AlgLib_ver3.19/alglib.mqh
super.admin 9e263d779c convert
2025-05-30 14:39:48 +02:00

38345 lines
2.4 MiB
MQL5

//+------------------------------------------------------------------+
//| alglib.mqh |
//| Copyright 2003-2022 Sergey Bochkanov (ALGLIB project) |
//| Copyright 2012-2023, MetaQuotes Ltd. |
//| https://www.mql5.com |
//+------------------------------------------------------------------+
//| Implementation of ALGLIB library in MetaQuotes Language 5 |
//| |
//| The features of the library include: |
//| - Linear algebra (direct algorithms, EVD, SVD) |
//| - Solving systems of linear and non-linear equations |
//| - Interpolation |
//| - Optimization |
//| - FFT (Fast Fourier Transform) |
//| - Numerical integration |
//| - Linear and nonlinear least-squares fitting |
//| - Ordinary differential equations |
//| - Computation of special functions |
//| - Descriptive statistics and hypothesis testing |
//| - Data analysis - classification, regression |
//| - Implementing linear algebra algorithms, interpolation, etc. |
//| in high-precision arithmetic (using MPFR) |
//| |
//| This file is free software; you can redistribute it and/or |
//| modify it under the terms of the GNU General Public License as |
//| published by the Free Software Foundation (www.fsf.org); either |
//| version 2 of the License, or (at your option) any later version. |
//| |
//| This program is distributed in the hope that it will be useful, |
//| but WITHOUT ANY WARRANTY; without even the implied warranty of |
//| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
//| GNU General Public License for more details. |
//+------------------------------------------------------------------+
#include <Object.mqh>
#include "alglibmisc.mqh"
#include "dataanalysis.mqh"
#include "diffequations.mqh"
#include "delegatefunctions.mqh"
#include "fasttransforms.mqh"
#include "integration.mqh"
#include "interpolation.mqh"
//+------------------------------------------------------------------+
//| The main class, which includes functions for users |
//+------------------------------------------------------------------+
class CAlglib
{
public:
//--- function of package alglibmisc
//--- high quality random number generator
static void HQRndRandomize(CHighQualityRandStateShell &state);
static void HQRndSeed(const int s1,const int s2,CHighQualityRandStateShell &state);
static double HQRndUniformR(CHighQualityRandStateShell &state);
static int HQRndUniformI(CHighQualityRandStateShell &state,const int n);
static double HQRndNormal(CHighQualityRandStateShell &state);
static void HQRndNormalV(CHighQualityRandStateShell &state,int n,CRowDouble &x);
static void HQRndNormalV(CHighQualityRandStateShell &state,int n,vector<double> &x);
static void HQRndNormalM(CHighQualityRandStateShell &state,int m,int n,CMatrixDouble &x);
static void HQRndNormalM(CHighQualityRandStateShell &state,int m,int n,matrix<double> &x);
static void HQRndUnit2(CHighQualityRandStateShell &state,double &x,double &y);
static void HQRndNormal2(CHighQualityRandStateShell &state,double &x1,double &x2);
static double HQRndExponential(CHighQualityRandStateShell &state,const double lambdav);
static double HQRndDiscrete(CHighQualityRandStateShell &state,int n,CRowDouble &x);
static double HQRndDiscrete(CHighQualityRandStateShell &state,int n,vector<double> &x);
static double HQRndContinuous(CHighQualityRandStateShell &state,int n,CRowDouble &x);
static double HQRndContinuous(CHighQualityRandStateShell &state,int n,vector<double> &x);
//--- build KD-trees
static void KDTreeSerialize(CKDTreeShell &obj,string &s_out);
static void KDTreeUnserialize(string s_in,CKDTreeShell &obj);
static void KDTreeBuild(CMatrixDouble &xy,const int n,const int nx,const int ny,const int normtype,CKDTreeShell &kdt);
static void KDTreeBuild(CMatrixDouble &xy,const int nx,const int ny,const int normtype,CKDTreeShell &kdt);
static void KDTreeBuildTagged(CMatrixDouble &xy,int &tags[],const int n,const int nx,const int ny,const int normtype,CKDTreeShell &kdt);
static void KDTreeBuildTagged(CMatrixDouble &xy,CRowInt &tags,const int n,const int nx,const int ny,const int normtype,CKDTreeShell &kdt);
static void KDTreeBuildTagged(CMatrixDouble &xy,int &tags[],const int nx,const int ny,const int normtype,CKDTreeShell &kdt);
static void KDTreeBuildTagged(CMatrixDouble &xy,CRowInt &tags,const int nx,const int ny,const int normtype,CKDTreeShell &kdt);
static void KDTreeCreateRequestBuffer(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf);
static int KDTreeQueryKNN(CKDTreeShell &kdt,double &x[],const int k,const bool selfmatch=true);
static int KDTreeQueryKNN(CKDTreeShell &kdt,CRowDouble &x,const int k,const bool selfmatch=true);
static int KDTreeQueryKNN(CKDTreeShell &kdt,vector<double> &x,const int k,const bool selfmatch=true);
static int KDTreeTsQueryKNN(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,double &x[],const int k,const bool selfmatch=true);
static int KDTreeTsQueryKNN(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,CRowDouble &x,const int k,const bool selfmatch=true);
static int KDTreeTsQueryKNN(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,vector<double> &x,const int k,const bool selfmatch=true);
static int KDTreeQueryRNN(CKDTreeShell &kdt,double &x[],const double r,const bool selfmatch=true);
static int KDTreeQueryRNN(CKDTreeShell &kdt,CRowDouble &x,const double r,const bool selfmatch=true);
static int KDTreeQueryRNN(CKDTreeShell &kdt,vector<double> &x,const double r,const bool selfmatch=true);
static int KDTreeQueryRNNU(CKDTreeShell &kdt,double &x[],const double r,const bool selfmatch=true);
static int KDTreeQueryRNNU(CKDTreeShell &kdt,CRowDouble &x,const double r,const bool selfmatch=true);
static int KDTreeQueryRNNU(CKDTreeShell &kdt,vector<double> &x,const double r,const bool selfmatch=true);
static int KDTreeTsQueryRNN(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,double &x[],const double r,const bool selfmatch=true);
static int KDTreeTsQueryRNN(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,CRowDouble &x,const double r,const bool selfmatch=true);
static int KDTreeTsQueryRNN(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,vector<double> &x,const double r,const bool selfmatch=true);
static int KDTreeTsQueryRNNU(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,double &x[],const double r,const bool selfmatch=true);
static int KDTreeTsQueryRNNU(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,CRowDouble &x,const double r,const bool selfmatch=true);
static int KDTreeTsQueryRNNU(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,vector<double> &x,const double r,const bool selfmatch=true);
static int KDTreeQueryAKNN(CKDTreeShell &kdt,double &x[],const int k,const bool selfmatch=true,const double eps=0);
static int KDTreeQueryAKNN(CKDTreeShell &kdt,vector<double> &x,const int k,const bool selfmatch=true,const double eps=0);
static int KDTreeQueryAKNN(CKDTreeShell &kdt,CRowDouble &x,const int k,const bool selfmatch=true,const double eps=0);
static int KDTreeQueryBox(CKDTreeShell &kdt,double &boxmin[],double &boxmax[]);
static int KDTreeQueryBox(CKDTreeShell &kdt,vector<double> &boxmin,vector<double> &boxmax);
static int KDTreeQueryBox(CKDTreeShell &kdt,CRowDouble &boxmin,CRowDouble &boxmax);
static int KDTreeTsQueryBox(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,double &boxmin[],double &boxmax[]);
static int KDTreeTsQueryBox(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,vector<double> &boxmin,vector<double> &boxmax);
static int KDTreeTsQueryBox(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,CRowDouble &boxmin,CRowDouble &boxmax);
static void KDTreeQueryResultsX(CKDTreeShell &kdt,CMatrixDouble &x);
static void KDTreeQueryResultsXY(CKDTreeShell &kdt,CMatrixDouble &xy);
static void KDTreeQueryResultsTags(CKDTreeShell &kdt,int &tags[]);
static void KDTreeQueryResultsTags(CKDTreeShell &kdt,CRowInt &tags);
static void KDTreeQueryResultsDistances(CKDTreeShell &kdt,double &r[]);
static void KDTreeQueryResultsDistances(CKDTreeShell &kdt,vector<double> &r);
static void KDTreeQueryResultsDistances(CKDTreeShell &kdt,CRowDouble &r);
static void KDTreeQueryResultsXI(CKDTreeShell &kdt,CMatrixDouble &x);
static void KDTreeQueryResultsXYI(CKDTreeShell &kdt,CMatrixDouble &xy);
static void KDTreeQueryResultsTagsI(CKDTreeShell &kdt,int &tags[]);
static void KDTreeQueryResultsTagsI(CKDTreeShell &kdt,CRowInt &tags);
static void KDTreeQueryResultsDistancesI(CKDTreeShell &kdt,double &r[]);
static void KDTreeQueryResultsDistancesI(CKDTreeShell &kdt,vector<double> &r);
static void KDTreeQueryResultsDistancesI(CKDTreeShell &kdt,CRowDouble &r);
//--- functions of package dataanalysis
//--- data analysis
static void DSOptimalSplit2(double &a[],int &c[],const int n,int &info,double &threshold,double &pal,double &pbl,double &par,double &pbr,double &cve);
static void DSOptimalSplit2(CRowDouble &a,CRowInt &c,const int n,int &info,double &threshold,double &pal,double &pbl,double &par,double &pbr,double &cve);
static void DSOptimalSplit2Fast(double &a[],int &c[],int &tiesbuf[],int &cntbuf[],double &bufr[],int &bufi[],const int n,const int nc,const double alpha,int &info,double &threshold,double &rms,double &cvrms);
static void DSOptimalSplit2Fast(CRowDouble &a,CRowInt &c,CRowInt &tiesbuf,CRowInt &cntbuf,CRowDouble &bufr,CRowInt &bufi,const int n,const int nc,const double alpha,int &info,double &threshold,double &rms,double &cvrms);
//--- decision forest
static void DFSerialize(CDecisionForestShell &obj,string &s_out);
static void DFUnserialize(const string s_in,CDecisionForestShell &obj);
static void DFCreateBuffer(CDecisionForestShell &model,CDecisionForestBuffer &buf);
static void DFBuilderCreate(CDecisionForestBuilder &s);
static void DFBuilderSetDataset(CDecisionForestBuilder &s,CMatrixDouble &xy,int npoints,int nvars,int nclasses);
static void DFBuilderSetRndVars(CDecisionForestBuilder &s,int rndvars);
static void DFBuilderSetRndVarsRatio(CDecisionForestBuilder &s,double f);
static void DFBuilderSetRndVarsAuto(CDecisionForestBuilder &s);
static void DFBuilderSetSubsampleRatio(CDecisionForestBuilder &s,double f);
static void DFBuilderSetSeed(CDecisionForestBuilder &s,int seedval);
static void DFBuilderSetRDFAlgo(CDecisionForestBuilder &s,int algotype);
static void DFBuilderSetRDFSplitStrength(CDecisionForestBuilder &s,int splitstrength);
static void DFBuilderSetImportanceTrnGini(CDecisionForestBuilder &s);
static void DFBuilderSetImportanceOOBGini(CDecisionForestBuilder &s);
static void DFBuilderSetImportancePermutation(CDecisionForestBuilder &s);
static void DFBuilderSetImportanceNone(CDecisionForestBuilder &s);
static double DFBuilderGetProgress(CDecisionForestBuilder &s);
static double DFBuilderPeekProgress(CDecisionForestBuilder &s);
static void DFBuilderBuildRandomForest(CDecisionForestBuilder &s,int ntrees,CDecisionForestShell &df,CDFReportShell &rep);
static double DFBinaryCompression(CDecisionForestShell &df);
static void DFProcess(CDecisionForestShell &df,double &x[],double &y[]);
static void DFProcessI(CDecisionForestShell &df,double &x[],double &y[]);
static double DFProcess0(CDecisionForestShell &model,double &x[]);
static double DFProcess0(CDecisionForestShell &model,CRowDouble &x);
static int DFClassify(CDecisionForestShell &model,double &x[]);
static int DFClassify(CDecisionForestShell &model,CRowDouble &x);
static double DFRelClsError(CDecisionForestShell &df,CMatrixDouble &xy,const int npoints);
static double DFAvgCE(CDecisionForestShell &df,CMatrixDouble &xy,const int npoints);
static double DFRMSError(CDecisionForestShell &df,CMatrixDouble &xy,const int npoints);
static double DFAvgError(CDecisionForestShell &df,CMatrixDouble &xy,const int npoints);
static double DFAvgRelError(CDecisionForestShell &df,CMatrixDouble &xy,const int npoints);
static void DFBuildRandomDecisionForest(CMatrixDouble &xy,const int npoints,const int nvars,const int nclasses,const int ntrees,const double r,int &info,CDecisionForestShell &df,CDFReportShell &rep);
static void DFBuildRandomDecisionForestX1(CMatrixDouble &xy,const int npoints,const int nvars,const int nclasses,const int ntrees,int nrndvars,const double r,int &info,CDecisionForestShell &df,CDFReportShell &rep);
//--- middle and clusterization
static void ClusterizerCreate(CClusterizerState &s);
static void ClusterizerSetPoints(CClusterizerState &s,CMatrixDouble &xy,int npoints,int nfeatures,int disttype);
static void ClusterizerSetPoints(CClusterizerState &s,CMatrixDouble &xy,int disttype);
static void ClusterizerSetDistances(CClusterizerState &s,CMatrixDouble &d,int npoints,bool IsUpper);
static void ClusterizerSetDistances(CClusterizerState &s,CMatrixDouble &d,bool IsUpper);
static void ClusterizerSetAHCAlgo(CClusterizerState &s,int algo);
static void ClusterizerSetKMeansLimits(CClusterizerState &s,int restarts,int maxits);
static void ClusterizerSetKMeansInit(CClusterizerState &s,int initalgo);
static void ClusterizerSetSeed(CClusterizerState &s,int seed);
static void ClusterizerRunAHC(CClusterizerState &s,CAHCReport &rep);
static void ClusterizerRunKMeans(CClusterizerState &s,int k,CKmeansReport &rep);
static void ClusterizerGetDistances(CMatrixDouble &xy,int npoints,int nfeatures,int disttype,CMatrixDouble &d);
static void ClusterizerGetKClusters(CAHCReport &rep,int k,CRowInt &cidx,CRowInt &cz);
static void ClusterizerSeparatedByDist(CAHCReport &rep,double r,int &k,CRowInt &cidx,CRowInt &cz);
static void ClusterizerSeparatedByCorr(CAHCReport &rep,double r,int &k,CRowInt &cidx,CRowInt &cz);
static void KMeansGenerate(CMatrixDouble &xy,const int npoints,const int nvars,const int k,const int restarts,int &info,CMatrixDouble &c,int &xyc[]);
//--- Fisher LDA functions
static void FisherLDA(CMatrixDouble &xy,const int npoints,const int nvars,const int nclasses,int &info,double &w[]);
static void FisherLDA(CMatrixDouble &xy,const int npoints,const int nvars,const int nclasses,int &info,CRowDouble &w);
static void FisherLDAN(CMatrixDouble &xy,const int npoints,const int nvars,const int nclasses,int &info,CMatrixDouble &w);
//--- linear regression
static void LRBuild(CMatrixDouble &xy,const int npoints,const int nvars,int &info,CLinearModelShell &lm,CLRReportShell &ar);
static void LRBuildS(CMatrixDouble &xy,double &s[],const int npoints,const int nvars,int &info,CLinearModelShell &lm,CLRReportShell &ar);
static void LRBuildS(CMatrixDouble &xy,CRowDouble &s,const int npoints,const int nvars,int &info,CLinearModelShell &lm,CLRReportShell &ar);
static void LRBuildZS(CMatrixDouble &xy,double &s[],const int npoints,const int nvars,int &info,CLinearModelShell &lm,CLRReportShell &ar);
static void LRBuildZS(CMatrixDouble &xy,CRowDouble &s,const int npoints,const int nvars,int &info,CLinearModelShell &lm,CLRReportShell &ar);
static void LRBuildZ(CMatrixDouble &xy,const int npoints,const int nvars,int &info,CLinearModelShell &lm,CLRReportShell &ar);
static void LRUnpack(CLinearModelShell &lm,double &v[],int &nvars);
static void LRUnpack(CLinearModelShell &lm,CRowDouble &v,int &nvars);
static void LRPack(double &v[],const int nvars,CLinearModelShell &lm);
static void LRPack(CRowDouble &v,const int nvars,CLinearModelShell &lm);
static double LRProcess(CLinearModelShell &lm,double &x[]);
static double LRProcess(CLinearModelShell &lm,CRowDouble &x);
static double LRRMSError(CLinearModelShell &lm,CMatrixDouble &xy,const int npoints);
static double LRAvgError(CLinearModelShell &lm,CMatrixDouble &xy,const int npoints);
static double LRAvgRelError(CLinearModelShell &lm,CMatrixDouble &xy,const int npoints);
//--- multilayer perceptron
static void MLPSerialize(CMultilayerPerceptronShell &obj,string &s_out);
static void MLPUnserialize(const string s_in,CMultilayerPerceptronShell &obj);
static void MLPCreate0(const int nin,const int nout,CMultilayerPerceptronShell &network);
static void MLPCreate1(const int nin,int nhid,const int nout,CMultilayerPerceptronShell &network);
static void MLPCreate2(const int nin,const int nhid1,const int nhid2,const int nout,CMultilayerPerceptronShell &network);
static void MLPCreateB0(const int nin,const int nout,const double b,const double d,CMultilayerPerceptronShell &network);
static void MLPCreateB1(const int nin,int nhid,const int nout,const double b,const double d,CMultilayerPerceptronShell &network);
static void MLPCreateB2(const int nin,const int nhid1,const int nhid2,const int nout,const double b,const double d,CMultilayerPerceptronShell &network);
static void MLPCreateR0(const int nin,const int nout,double a,const double b,CMultilayerPerceptronShell &network);
static void MLPCreateR1(const int nin,int nhid,const int nout,const double a,const double b,CMultilayerPerceptronShell &network);
static void MLPCreateR2(const int nin,const int nhid1,const int nhid2,const int nout,const double a,const double b,CMultilayerPerceptronShell &network);
static void MLPCreateC0(const int nin,const int nout,CMultilayerPerceptronShell &network);
static void MLPCreateC1(const int nin,int nhid,const int nout,CMultilayerPerceptronShell &network);
static void MLPCreateC2(const int nin,const int nhid1,const int nhid2,const int nout,CMultilayerPerceptronShell &network);
static void MLPRandomize(CMultilayerPerceptronShell &network);
static void MLPRandomizeFull(CMultilayerPerceptronShell &network);
static void MLPInitPreprocessor(CMultilayerPerceptronShell &network,CMatrixDouble &xy,int ssize);
static void MLPProperties(CMultilayerPerceptronShell &network,int &nin,int &nout,int &wcount);
static int MLPGetInputsCount(CMultilayerPerceptronShell &network);
static int MLPGetOutputsCount(CMultilayerPerceptronShell &network);
static int MLPGetWeightsCount(CMultilayerPerceptronShell &network);
static bool MLPIsSoftMax(CMultilayerPerceptronShell &network);
static int MLPGetLayersCount(CMultilayerPerceptronShell &network);
static int MLPGetLayerSize(CMultilayerPerceptronShell &network,const int k);
static void MLPGetInputScaling(CMultilayerPerceptronShell &network,const int i,double &mean,double &sigma);
static void MLPGetOutputScaling(CMultilayerPerceptronShell &network,const int i,double &mean,double &sigma);
static void MLPGetNeuronInfo(CMultilayerPerceptronShell &network,const int k,const int i,int &fkind,double &threshold);
static double MLPGetWeight(CMultilayerPerceptronShell &network,const int k0,const int i0,const int k1,const int i1);
static void MLPSetInputScaling(CMultilayerPerceptronShell &network,const int i,const double mean,const double sigma);
static void MLPSetOutputScaling(CMultilayerPerceptronShell &network,const int i,const double mean,const double sigma);
static void MLPSetNeuronInfo(CMultilayerPerceptronShell &network,const int k,const int i,int fkind,double threshold);
static void MLPSetWeight(CMultilayerPerceptronShell &network,const int k0,const int i0,const int k1,const int i1,const double w);
static void MLPActivationFunction(const double net,const int k,double &f,double &df,double &d2f);
static void MLPProcess(CMultilayerPerceptronShell &network,double &x[],double &y[]);
static void MLPProcessI(CMultilayerPerceptronShell &network,double &x[],double &y[]);
static double MLPError(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int ssize);
static double MLPErrorSparse(CMultilayerPerceptronShell &network,CSparseMatrix &xy,int npoints);
static double MLPErrorN(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int ssize);
static int MLPClsError(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int ssize);
static double MLPRelClsError(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int npoints);
static double MLPRelClsErrorSparse(CMultilayerPerceptronShell &network,CSparseMatrix &xy,int npoints);
static double MLPAvgCE(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int npoints);
static double MLPAvgCESparse(CMultilayerPerceptronShell &network,CSparseMatrix &xy,int npoints);
static double MLPRMSError(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int npoints);
static double MLPRMSErrorSparse(CMultilayerPerceptronShell &network,CSparseMatrix &xy,int npoints);
static double MLPAvgError(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int npoints);
static double MLPAvgErrorSparse(CMultilayerPerceptronShell &network,CSparseMatrix &xy,int npoints);
static double MLPAvgRelError(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int npoints);
static double MLPAvgRelErrorSparse(CMultilayerPerceptronShell &network,CSparseMatrix &xy,int npoints);
static void MLPGrad(CMultilayerPerceptronShell &network,double &x[],double &desiredy[],double &e,double &grad[]);
static void MLPGradN(CMultilayerPerceptronShell &network,double &x[],double &desiredy[],double &e,double &grad[]);
static void MLPGradBatch(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int ssize,double &e,double &grad[]);
static void MLPGradBatch(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int ssize,double &e,CRowDouble &grad);
static void MLPGradBatchSparse(CMultilayerPerceptronShell &network,CSparseMatrix &xy,int ssize,double &e,CRowDouble &grad);
static void MLPGradBatchSubset(CMultilayerPerceptronShell &network,CMatrixDouble &xy,int setsize,CRowInt &idx,int subsetsize,double &e,CRowDouble &grad);
static void MLPGradBatchSparseSubset(CMultilayerPerceptronShell &network,CSparseMatrix &xy,int setsize,CRowInt &idx,int subsetsize,double &e,CRowDouble &grad);
static void MLPGradNBatch(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int ssize,double &e,double &grad[]);
static void MLPGradNBatch(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int ssize,double &e,CRowDouble &grad);
static void MLPHessianNBatch(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int ssize,double &e,double &grad[],CMatrixDouble &h);
static void MLPHessianNBatch(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int ssize,double &e,CRowDouble &grad,CMatrixDouble &h);
static void MLPHessianBatch(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int ssize,double &e,double &grad[],CMatrixDouble &h);
static void MLPHessianBatch(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int ssize,double &e,CRowDouble &grad,CMatrixDouble &h);
static void MLPAllErrorsSubset(CMultilayerPerceptronShell &network,CMatrixDouble &xy,int setsize,CRowInt &subset,int subsetsize,CModelErrors &rep);
static void MLPAllErrorsSparseSubset(CMultilayerPerceptronShell &network,CSparseMatrix &xy,int setsize,CRowInt &subset,int subsetsize,CModelErrors &rep);
static double MLPErrorSubset(CMultilayerPerceptronShell &network,CMatrixDouble &xy,int setsize,CRowInt &subset,int subsetsize);
static double MLPErrorSparseSubset(CMultilayerPerceptronShell &network,CSparseMatrix &xy,int setsize,CRowInt &subset,int subsetsize);
//--- logit model functions
static void MNLTrainH(CMatrixDouble &xy,const int npoints,const int nvars,const int nclasses,int &info,CLogitModelShell &lm,CMNLReportShell &rep);
static void MNLProcess(CLogitModelShell &lm,double &x[],double &y[]);
static void MNLProcess(CLogitModelShell &lm,CRowDouble &x,CRowDouble &y);
static void MNLProcessI(CLogitModelShell &lm,double &x[],double &y[]);
static void MNLProcessI(CLogitModelShell &lm,CRowDouble &x,CRowDouble &y);
static void MNLUnpack(CLogitModelShell &lm,CMatrixDouble &a,int &nvars,int &nclasses);
static void MNLPack(CMatrixDouble &a,const int nvars,const int nclasses,CLogitModelShell &lm);
static double MNLAvgCE(CLogitModelShell &lm,CMatrixDouble &xy,const int npoints);
static double MNLRelClsError(CLogitModelShell &lm,CMatrixDouble &xy,const int npoints);
static double MNLRMSError(CLogitModelShell &lm,CMatrixDouble &xy,const int npoints);
static double MNLAvgError(CLogitModelShell &lm,CMatrixDouble &xy,const int npoints);
static double MNLAvgRelError(CLogitModelShell &lm,CMatrixDouble &xy,const int ssize);
static int MNLClsError(CLogitModelShell &lm,CMatrixDouble &xy,const int npoints);
//--- Markov chains
static void MCPDCreate(const int n,CMCPDStateShell &s);
static void MCPDCreateEntry(const int n,const int entrystate,CMCPDStateShell &s);
static void MCPDCreateExit(const int n,const int exitstate,CMCPDStateShell &s);
static void MCPDCreateEntryExit(const int n,const int entrystate,const int exitstate,CMCPDStateShell &s);
static void MCPDAddTrack(CMCPDStateShell &s,CMatrixDouble &xy,const int k);
static void MCPDAddTrack(CMCPDStateShell &s,CMatrixDouble &xy);
static void MCPDSetEC(CMCPDStateShell &s,CMatrixDouble &ec);
static void MCPDAddEC(CMCPDStateShell &s,const int i,const int j,const double c);
static void MCPDSetBC(CMCPDStateShell &s,CMatrixDouble &bndl,CMatrixDouble &bndu);
static void MCPDAddBC(CMCPDStateShell &s,const int i,const int j,const double bndl,const double bndu);
static void MCPDSetLC(CMCPDStateShell &s,CMatrixDouble &c,int &ct[],const int k);
static void MCPDSetLC(CMCPDStateShell &s,CMatrixDouble &c,CRowInt &ct,const int k);
static void MCPDSetLC(CMCPDStateShell &s,CMatrixDouble &c,int &ct[]);
static void MCPDSetLC(CMCPDStateShell &s,CMatrixDouble &c,CRowInt &ct);
static void MCPDSetTikhonovRegularizer(CMCPDStateShell &s,const double v);
static void MCPDSetPrior(CMCPDStateShell &s,CMatrixDouble &pp);
static void MCPDSetPredictionWeights(CMCPDStateShell &s,double &pw[]);
static void MCPDSetPredictionWeights(CMCPDStateShell &s,CRowDouble &pw);
static void MCPDSolve(CMCPDStateShell &s);
static void MCPDResults(CMCPDStateShell &s,CMatrixDouble &p,CMCPDReportShell &rep);
//--- training neural networks
static void MLPTrainLM(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int npoints,const double decay,const int restarts,int &info,CMLPReportShell &rep);
static void MLPTrainLBFGS(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int npoints,const double decay,const int restarts,const double wstep,int maxits,int &info,CMLPReportShell &rep);
static void MLPTrainES(CMultilayerPerceptronShell &network,CMatrixDouble &trnxy,const int trnsize,CMatrixDouble &valxy,const int valsize,const double decay,const int restarts,int &info,CMLPReportShell &rep);
static void MLPKFoldCVLBFGS(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int npoints,const double decay,const int restarts,const double wstep,const int maxits,const int foldscount,int &info,CMLPReportShell &rep,CMLPCVReportShell &cvrep);
static void MLPKFoldCVLM(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int npoints,const double decay,const int restarts,const int foldscount,int &info,CMLPReportShell &rep,CMLPCVReportShell &cvrep);
static void MLPCreateTrainer(int nin,int nout,CMLPTrainer &s);
static void MLPCreateTrainerCls(int nin,int nclasses,CMLPTrainer &s);
static void MLPSetDataset(CMLPTrainer &s,CMatrixDouble &xy,int npoints);
static void MLPSetSparseDataset(CMLPTrainer &s,CSparseMatrix &xy,int npoints);
static void MLPSetDecay(CMLPTrainer &s,double decay);
static void MLPSetCond(CMLPTrainer &s,double wstep,int maxits);
static void MLPSetAlgoBatch(CMLPTrainer &s);
static void MLPTrainNetwork(CMLPTrainer &s,CMultilayerPerceptronShell &network,int nrestarts,CMLPReportShell &rep);
static void MLPStartTraining(CMLPTrainer &s,CMultilayerPerceptronShell &network,bool randomstart);
static bool MLPContinueTraining(CMLPTrainer &s,CMultilayerPerceptronShell &network);
//--- neural networks ensemble functions
static void MLPECreate0(const int nin,const int nout,const int ensemblesize,CMLPEnsembleShell &ensemble);
static void MLPECreate1(const int nin,int nhid,const int nout,const int ensemblesize,CMLPEnsembleShell &ensemble);
static void MLPECreate2(const int nin,const int nhid1,const int nhid2,const int nout,const int ensemblesize,CMLPEnsembleShell &ensemble);
static void MLPECreateB0(const int nin,const int nout,const double b,const double d,const int ensemblesize,CMLPEnsembleShell &ensemble);
static void MLPECreateB1(const int nin,int nhid,const int nout,const double b,const double d,const int ensemblesize,CMLPEnsembleShell &ensemble);
static void MLPECreateB2(const int nin,const int nhid1,const int nhid2,const int nout,const double b,const double d,const int ensemblesize,CMLPEnsembleShell &ensemble);
static void MLPECreateR0(const int nin,const int nout,const double a,const double b,const int ensemblesize,CMLPEnsembleShell &ensemble);
static void MLPECreateR1(const int nin,int nhid,const int nout,const double a,const double b,const int ensemblesize,CMLPEnsembleShell &ensemble);
static void MLPECreateR2(const int nin,const int nhid1,const int nhid2,const int nout,const double a,const double b,const int ensemblesize,CMLPEnsembleShell &ensemble);
static void MLPECreateC0(const int nin,const int nout,const int ensemblesize,CMLPEnsembleShell &ensemble);
static void MLPECreateC1(const int nin,int nhid,const int nout,const int ensemblesize,CMLPEnsembleShell &ensemble);
static void MLPECreateC2(const int nin,const int nhid1,const int nhid2,const int nout,const int ensemblesize,CMLPEnsembleShell &ensemble);
static void MLPECreateFromNetwork(CMultilayerPerceptronShell &network,const int ensemblesize,CMLPEnsembleShell &ensemble);
static void MLPERandomize(CMLPEnsembleShell &ensemble);
static void MLPEProperties(CMLPEnsembleShell &ensemble,int &nin,int &nout);
static bool MLPEIsSoftMax(CMLPEnsembleShell &ensemble);
static void MLPEProcess(CMLPEnsembleShell &ensemble,double &x[],double &y[]);
static void MLPEProcessI(CMLPEnsembleShell &ensemble,double &x[],double &y[]);
static double MLPERelClsError(CMLPEnsembleShell &ensemble,CMatrixDouble &xy,const int npoints);
static double MLPEAvgCE(CMLPEnsembleShell &ensemble,CMatrixDouble &xy,const int npoints);
static double MLPERMSError(CMLPEnsembleShell &ensemble,CMatrixDouble &xy,const int npoints);
static double MLPEAvgError(CMLPEnsembleShell &ensemble,CMatrixDouble &xy,const int npoints);
static double MLPEAvgRelError(CMLPEnsembleShell &ensemble,CMatrixDouble &xy,const int npoints);
static void MLPEBaggingLM(CMLPEnsembleShell &ensemble,CMatrixDouble &xy,const int npoints,const double decay,const int restarts,int &info,CMLPReportShell &rep,CMLPCVReportShell &ooberrors);
static void MLPEBaggingLBFGS(CMLPEnsembleShell &ensemble,CMatrixDouble &xy,const int npoints,const double decay,const int restarts,const double wstep,const int maxits,int &info,CMLPReportShell &rep,CMLPCVReportShell &ooberrors);
static void MLPETrainES(CMLPEnsembleShell &ensemble,CMatrixDouble &xy,const int npoints,const double decay,const int restarts,int &info,CMLPReportShell &rep);
static void MLPTrainEnsembleES(CMLPTrainer &s,CMLPEnsembleShell &ensemble,int nrestarts,CMLPReportShell &rep);
//--- principal components analysis
static void PCABuildBasis(CMatrixDouble &x,const int npoints,const int nvars,int &info,double &s2[],CMatrixDouble &v);
static void PCABuildBasis(CMatrixDouble &x,const int npoints,const int nvars,int &info,CRowDouble &s2,CMatrixDouble &v);
static void PCATruncatedSubspace(CMatrixDouble &x,int npoints,int nvars,int nneeded,double eps,int maxits,CRowDouble &s2,CMatrixDouble &v);
static void PCATruncatedSubspaceSparse(CSparseMatrix &x,int npoints,int nvars,int nneeded,double eps,int maxits,CRowDouble &s2,CMatrixDouble &v);
//--- functions of package diffequations
static void ODESolverRKCK(double &y[],const int n,double &x[],const int m,const double eps,const double h,CODESolverStateShell &state);
static void ODESolverRKCK(double &y[],double &x[],const double eps,const double h,CODESolverStateShell &state);
static bool ODESolverIteration(CODESolverStateShell &state);
static void ODESolverSolve(CODESolverStateShell &state,CNDimensional_ODE_RP &diff,CObject &obj);
static void ODESolverResults(CODESolverStateShell &state,int &m,double &xtbl[],CMatrixDouble &ytbl,CODESolverReportShell &rep);
//--- filters
static void FilterSMA(CRowDouble &x,int n,int k);
static void FilterSMA(CRowDouble &x,int k);
static void FilterEMA(CRowDouble &x,int n,double alpha);
static void FilterEMA(CRowDouble &x,double alpha);
static void FilterLRMA(CRowDouble &x,int n,int k);
static void FilterLRMA(CRowDouble &x,int k);
//--- SSA models
static void SSACreate(CSSAModel &s);
static void SSASetWindow(CSSAModel &s,int windowwidth);
static void SSASetSeed(CSSAModel &s,int seed);
static void SSASetPowerUpLength(CSSAModel &s,int pwlen);
static void SSASetMemoryLimit(CSSAModel &s,int memlimit);
static void SSAAddSequence(CSSAModel &s,CRowDouble &x,int n);
static void SSAAddSequence(CSSAModel &s,CRowDouble &x);
static void SSAAppendPointAndUpdate(CSSAModel &s,double x,double updateits);
static void SSAAppendSequenceAndUpdate(CSSAModel &s,CRowDouble &x,int nticks,double updateits);
static void SSAAppendSequenceAndUpdate(CSSAModel &s,CRowDouble &x,double updateits);
static void SSASetAlgoPrecomputed(CSSAModel &s,CMatrixDouble &a,int windowwidth,int nbasis);
static void SSASetAlgoPrecomputed(CSSAModel &s,CMatrixDouble &a);
static void SSASetAlgoTopKDirect(CSSAModel &s,int topk);
static void SSASetAlgoTopKRealtime(CSSAModel &s,int topk);
static void SSAClearData(CSSAModel &s);
static void SSAGetBasis(CSSAModel &s,CMatrixDouble &a,CRowDouble &sv,int &windowwidth,int &nbasis);
static void SSAGetLRR(CSSAModel &s,CRowDouble &a,int &windowwidth);
static void SSAAnalyzeLastWindow(CSSAModel &s,CRowDouble &trend,CRowDouble &noise,int &nticks);
static void SSAAnalyzeLast(CSSAModel &s,int nticks,CRowDouble &trend,CRowDouble &noise);
static void SSAAnalyzeSequence(CSSAModel &s,CRowDouble &data,int nticks,CRowDouble &trend,CRowDouble &noise);
static void SSAAnalyzeSequence(CSSAModel &s,CRowDouble &data,CRowDouble &trend,CRowDouble &noise);
static void SSAForecastLast(CSSAModel &s,int nticks,CRowDouble &trend);
static void SSAForecastSequence(CSSAModel &s,CRowDouble &data,int datalen,int forecastlen,bool applysmoothing,CRowDouble &trend);
static void SSAForecastSequence(CSSAModel &s,CRowDouble &data,int forecastlen,CRowDouble &trend);
static void SSAForecastAvgLast(CSSAModel &s,int m,int nticks,CRowDouble &trend);
static void SSAForecastAvgSequence(CSSAModel &s,CRowDouble &data,int datalen,int m,int forecastlen,bool applysmoothing,CRowDouble &trend);
static void SSAForecastAvgSequence(CSSAModel &s,CRowDouble &data,int m,int forecastlen,CRowDouble &trend);
//--- KNN models
static void KNNSerialize(CKNNModel &obj,string &s_out);
static void KNNUnserialize(const string s_in,CKNNModel &obj);
static void KNNCreateBuffer(CKNNModel &model,CKNNBuffer &buf);
static void KNNBuilderCreate(CKNNBuilder &s);
static void KNNBuilderSetDatasetReg(CKNNBuilder &s,CMatrixDouble &xy,int npoints,int nvars,int nout);
static void KNNBuilderSetDatasetCLS(CKNNBuilder &s,CMatrixDouble &xy,int npoints,int nvars,int nclasses);
static void KNNBuilderSetNorm(CKNNBuilder &s,int nrmtype);
static void KNNBuilderBuildKNNModel(CKNNBuilder &s,int k,double eps,CKNNModel &model,CKNNReport &rep);
static void KNNRewriteKEps(CKNNModel &model,int k,double eps);
static void KNNProcess(CKNNModel &model,CRowDouble &x,CRowDouble &y);
static double KNNProcess0(CKNNModel &model,CRowDouble &x);
static int KNNClassify(CKNNModel &model,CRowDouble &x);
static void KNNProcessI(CKNNModel &model,CRowDouble &x,CRowDouble &y);
static void KNNTsProcess(CKNNModel &model,CKNNBuffer &buf,CRowDouble &x,CRowDouble &y);
static double KNNRelClsError(CKNNModel &model,CMatrixDouble &xy,int npoints);
static double KNNAvgCE(CKNNModel &model,CMatrixDouble &xy,int npoints);
static double KNNRMSError(CKNNModel &model,CMatrixDouble &xy,int npoints);
static double KNNAvgError(CKNNModel &model,CMatrixDouble &xy,int npoints);
static double KNNAvgRelError(CKNNModel &model,CMatrixDouble &xy,int npoints);
static void KNNAllErrors(CKNNModel &model,CMatrixDouble &xy,int npoints,CKNNReport &rep);
//--- functions of package fasttransforms
//--- fast Fourier transform
static void FFTC1D(complex &a[],const int n);
static void FFTC1D(complex &a[]);
static void FFTC1DInv(complex &a[],const int n);
static void FFTC1DInv(complex &a[]);
static void FFTR1D(double &a[],const int n,complex &f[]);
static void FFTR1D(double &a[],complex &f[]);
static void FFTR1DInv(complex &f[],const int n,double &a[]);
static void FFTR1DInv(complex &f[],double &a[]);
static void FFTC1D(CRowComplex &a,const int n);
static void FFTC1D(CRowComplex &a);
static void FFTC1DInv(CRowComplex &a,const int n);
static void FFTC1DInv(CRowComplex &a);
static void FFTR1D(CRowDouble &a,const int n,CRowComplex &f);
static void FFTR1D(CRowDouble &a,CRowComplex &f);
static void FFTR1DInv(CRowComplex &f,const int n,CRowDouble &a);
static void FFTR1DInv(CRowComplex &f,CRowDouble &a);
//--- convolution
static void ConvC1D(complex &a[],const int m,complex &b[],const int n,complex &r[]);
static void ConvC1DInv(complex &a[],const int m,complex &b[],const int n,complex &r[]);
static void ConvC1DCircular(complex &s[],const int m,complex &r[],const int n,complex &c[]);
static void ConvC1DCircularInv(complex &a[],const int m,complex &b[],const int n,complex &r[]);
static void ConvR1D(double &a[],const int m,double &b[],const int n,double &r[]);
static void ConvR1DInv(double &a[],const int m,double &b[],const int n,double &r[]);
static void ConvR1DCircular(double &s[],const int m,double &r[],const int n,double &c[]);
static void ConvR1DCircularInv(double &a[],const int m,double &b[],const int n,double &r[]);
static void CorrC1D(complex &signal[],const int n,complex &pattern[],const int m,complex &r[]);
static void CorrC1DCircular(complex &signal[],const int m,complex &pattern[],const int n,complex &c[]);
static void CorrR1D(double &signal[],const int n,double &pattern[],const int m,double &r[]);
static void CorrR1DCircular(double &signal[],const int m,double &pattern[],const int n,double &c[]);
//--- fast Hartley transform
static void FHTR1D(double &a[],const int n);
static void FHTR1DInv(double &a[],const int n);
//--- functions of package integration
//--- Gauss quadrature formula
static void GQGenerateRec(double &alpha[],double &beta[],const double mu0,const int n,int &info,double &x[],double &w[]);
static void GQGenerateGaussLobattoRec(double &alpha[],double &beta[],const double mu0,const double a,const double b,const int n,int &info,double &x[],double &w[]);
static void GQGenerateGaussRadauRec(double &alpha[],double &beta[],const double mu0,const double a,const int n,int &info,double &x[],double &w[]);
static void GQGenerateGaussLegendre(const int n,int &info,double &x[],double &w[]);
static void GQGenerateGaussJacobi(const int n,const double alpha,const double beta,int &info,double &x[],double &w[]);
static void GQGenerateGaussLaguerre(const int n,const double alpha,int &info,double &x[],double &w[]);
static void GQGenerateGaussHermite(const int n,int &info,double &x[],double &w[]);
//--- Gauss-Kronrod quadrature formula
static void GKQGenerateRec(double &alpha[],double &beta[],const double mu0,const int n,int &info,double &x[],double &wkronrod[],double &wgauss[]);
static void GKQGenerateGaussLegendre(const int n,int &info,double &x[],double &wkronrod[],double &wgauss[]);
static void GKQGenerateGaussJacobi(const int n,const double alpha,const double beta,int &info,double &x[],double &wkronrod[],double &wgauss[]);
static void GKQLegendreCalc(const int n,int &info,double &x[],double &wkronrod[],double &wgauss[]);
static void GKQLegendreTbl(const int n,double &x[],double &wkronrod[],double &wgauss[],double &eps);
//--- auto Gauss-Kronrod
static void AutoGKSmooth(const double a,const double b,CAutoGKStateShell &state);
static void AutoGKSmoothW(const double a,const double b,double xwidth,CAutoGKStateShell &state);
static void AutoGKSingular(const double a,const double b,const double alpha,const double beta,CAutoGKStateShell &state);
static bool AutoGKIteration(CAutoGKStateShell &state);
static void AutoGKIntegrate(CAutoGKStateShell &state,CIntegrator1_Func &func,CObject &obj);
static void AutoGKResults(CAutoGKStateShell &state,double &v,CAutoGKReportShell &rep);
//--- functions of package interpolation
//--- inverse distance weighting interpolation
static void IDWSerialize(CIDWModelShell &obj,string &s_out);
static void IDWUnserialize(string s_in,CIDWModelShell &obj);
static void IDWCreateCalcBuffer(CIDWModelShell &s,CIDWCalcBuffer &buf);
static void IDWBuilderCreate(int nx,int ny,CIDWBuilder &state);
static void IDWBuilderSetNLayers(CIDWBuilder &state,int nlayers);
static void IDWBuilderSetPoints(CIDWBuilder &state,CMatrixDouble &xy,int n);
static void IDWBuilderSetPoints(CIDWBuilder &state,CMatrixDouble &xy);
static void IDWBuilderSetAlgoMSTAB(CIDWBuilder &state,double srad);
static void IDWBuilderSetAlgoTextBookShepard(CIDWBuilder &state,double p);
static void IDWBuilderSetAlgoTextBookModShepard(CIDWBuilder &state,double r);
static void IDWBuilderSetUserTerm(CIDWBuilder &state,double v);
static void IDWBuilderSetConstTerm(CIDWBuilder &state);
static void IDWBuilderSetZeroTerm(CIDWBuilder &state);
static double IDWCalc1(CIDWModelShell &s,double x0);
static double IDWCalc2(CIDWModelShell &s,double x0,double x1);
static double IDWCalc3(CIDWModelShell &s,double x0,double x1,double x2);
static void IDWCalc(CIDWModelShell &s,CRowDouble &x,CRowDouble &y);
static void IDWCalcBuf(CIDWModelShell &s,CRowDouble &x,CRowDouble &y);
static void IDWTsCalcBuf(CIDWModelShell &s,CIDWCalcBuffer &buf,CRowDouble &x,CRowDouble &y);
static void IDWFit(CIDWBuilder &state,CIDWModelShell &model,CIDWReport &rep);
//--- rational interpolation
static double BarycentricCalc(CBarycentricInterpolantShell &b,const double t);
static void BarycentricDiff1(CBarycentricInterpolantShell &b,const double t,double &f,double &df);
static void BarycentricDiff2(CBarycentricInterpolantShell &b,const double t,double &f,double &df,double &d2f);
static void BarycentricLinTransX(CBarycentricInterpolantShell &b,const double ca,const double cb);
static void BarycentricLinTransY(CBarycentricInterpolantShell &b,const double ca,const double cb);
static void BarycentricUnpack(CBarycentricInterpolantShell &b,int &n,double &x[],double &y[],double &w[]);
static void BarycentricBuildXYW(double &x[],double &y[],double &w[],const int n,CBarycentricInterpolantShell &b);
static void BarycentricBuildFloaterHormann(double &x[],double &y[],const int n,const int d,CBarycentricInterpolantShell &b);
//--- polynomial interpolant
static void PolynomialBar2Cheb(CBarycentricInterpolantShell &p,const double a,const double b,double &t[]);
static void PolynomialCheb2Bar(double &t[],const int n,const double a,const double b,CBarycentricInterpolantShell &p);
static void PolynomialCheb2Bar(double &t[],const double a,const double b,CBarycentricInterpolantShell &p);
static void PolynomialBar2Pow(CBarycentricInterpolantShell &p,const double c,const double s,double &a[]);
static void PolynomialBar2Pow(CBarycentricInterpolantShell &p,double &a[]);
static void PolynomialPow2Bar(double &a[],const int n,const double c,const double s,CBarycentricInterpolantShell &p);
static void PolynomialPow2Bar(double &a[],CBarycentricInterpolantShell &p);
static void PolynomialBuild(double &x[],double &y[],const int n,CBarycentricInterpolantShell &p);
static void PolynomialBuild(double &x[],double &y[],CBarycentricInterpolantShell &p);
static void PolynomialBuildEqDist(const double a,const double b,double &y[],const int n,CBarycentricInterpolantShell &p);
static void PolynomialBuildEqDist(const double a,const double b,double &y[],CBarycentricInterpolantShell &p);
static void PolynomialBuildCheb1(const double a,const double b,double &y[],const int n,CBarycentricInterpolantShell &p);
static void PolynomialBuildCheb1(const double a,const double b,double &y[],CBarycentricInterpolantShell &p);
static void PolynomialBuildCheb2(const double a,const double b,double &y[],const int n,CBarycentricInterpolantShell &p);
static void PolynomialBuildCheb2(const double a,const double b,double &y[],CBarycentricInterpolantShell &p);
static double PolynomialCalcEqDist(const double a,const double b,double &f[],const int n,const double t);
static double PolynomialCalcEqDist(const double a,const double b,double &f[],const double t);
static double PolynomialCalcCheb1(const double a,const double b,double &f[],const int n,const double t);
static double PolynomialCalcCheb1(const double a,const double b,double &f[],const double t);
static double PolynomialCalcCheb2(const double a,const double b,double &f[],const int n,const double t);
static double PolynomialCalcCheb2(const double a,const double b,double &f[],const double t);
//--- 1-dimensional spline interpolation
static void Spline1DBuildLinear(double &x[],double &y[],const int n,CSpline1DInterpolantShell &c);
static void Spline1DBuildLinear(double &x[],double &y[],CSpline1DInterpolantShell &c);
static void Spline1DBuildCubic(double &x[],double &y[],const int n,const int boundltype,const double boundl,const int boundrtype,const double boundr,CSpline1DInterpolantShell &c);
static void Spline1DBuildCubic(double &x[],double &y[],CSpline1DInterpolantShell &c);
static void Spline1DGridDiffCubic(double &x[],double &y[],const int n,const int boundltype,const double boundl,const int boundrtype,const double boundr,double &d[]);
static void Spline1DGridDiffCubic(double &x[],double &y[],double &d[]);
static void Spline1DGridDiff2Cubic(double &x[],double &y[],const int n,const int boundltype,const double boundl,const int boundrtype,const double boundr,double &d1[],double &d2[]);
static void Spline1DGridDiff2Cubic(double &x[],double &y[],double &d1[],double &d2[]);
static void Spline1DConvCubic(double &x[],double &y[],const int n,const int boundltype,const double boundl,const int boundrtype,const double boundr,double &x2[],int n2,double &y2[]);
static void Spline1DConvCubic(double &x[],double &y[],double &x2[],double &y2[]);
static void Spline1DConvDiffCubic(double &x[],double &y[],const int n,const int boundltype,const double boundl,const int boundrtype,const double boundr,double &x2[],int n2,double &y2[],double &d2[]);
static void Spline1DConvDiffCubic(double &x[],double &y[],double &x2[],double &y2[],double &d2[]);
static void Spline1DConvDiff2Cubic(double &x[],double &y[],const int n,const int boundltype,const double boundl,const int boundrtype,const double boundr,double &x2[],const int n2,double &y2[],double &d2[],double &dd2[]);
static void Spline1DConvDiff2Cubic(double &x[],double &y[],double &x2[],double &y2[],double &d2[],double &dd2[]);
static void Spline1DBuildCatmullRom(double &x[],double &y[],const int n,const int boundtype,const double tension,CSpline1DInterpolantShell &c);
static void Spline1DBuildCatmullRom(double &x[],double &y[],CSpline1DInterpolantShell &c);
static void Spline1DBuildHermite(double &x[],double &y[],double &d[],const int n,CSpline1DInterpolantShell &c);
static void Spline1DBuildHermite(double &x[],double &y[],double &d[],CSpline1DInterpolantShell &c);
static void Spline1DBuildAkima(double &x[],double &y[],const int n,CSpline1DInterpolantShell &c);
static void Spline1DBuildAkima(double &x[],double &y[],CSpline1DInterpolantShell &c);
static double Spline1DCalc(CSpline1DInterpolantShell &c,const double x);
static void Spline1DDiff(CSpline1DInterpolantShell &c,const double x,double &s,double &ds,double &d2s);
static void Spline1DUnpack(CSpline1DInterpolantShell &c,int &n,CMatrixDouble &tbl);
static void Spline1DLinTransX(CSpline1DInterpolantShell &c,const double a,const double b);
static void Spline1DLinTransY(CSpline1DInterpolantShell &c,const double a,const double b);
static double Spline1DIntegrate(CSpline1DInterpolantShell &c,const double x);
static void Spline1DFit(double &x[],double &y[],int n,int m,double lambdans,CSpline1DInterpolantShell &s,CSpline1DFitReportShell &rep);
static void Spline1DFit(double &x[],double &y[],int m,double lambdans,CSpline1DInterpolantShell &s,CSpline1DFitReportShell &rep);
static void Spline1DBuildMonotone(double &x[],double &y[],int n,CSpline1DInterpolantShell &c);
static void Spline1DBuildMonotone(double &x[],double &y[],CSpline1DInterpolantShell &c);
static void Spline1DBuildMonotone(CRowDouble &x,CRowDouble &y,CSpline1DInterpolantShell &c);
//--- least squares fitting
static void PolynomialFit(double &x[],double &y[],const int n,const int m,int &info,CBarycentricInterpolantShell &p,CPolynomialFitReportShell &rep);
static void PolynomialFit(double &x[],double &y[],const int m,int &info,CBarycentricInterpolantShell &p,CPolynomialFitReportShell &rep);
static void PolynomialFitWC(double &x[],double &y[],double &w[],const int n,double &xc[],double &yc[],int &dc[],const int k,const int m,int &info,CBarycentricInterpolantShell &p,CPolynomialFitReportShell &rep);
static void PolynomialFitWC(double &x[],double &y[],double &w[],double &xc[],double &yc[],int &dc[],const int m,int &info,CBarycentricInterpolantShell &p,CPolynomialFitReportShell &rep);
static void BarycentricFitFloaterHormannWC(double &x[],double &y[],double &w[],const int n,double &xc[],double &yc[],int &dc[],const int k,const int m,int &info,CBarycentricInterpolantShell &b,CBarycentricFitReportShell &rep);
static void BarycentricFitFloaterHormann(double &x[],double &y[],const int n,const int m,int &info,CBarycentricInterpolantShell &b,CBarycentricFitReportShell &rep);
static void Spline1DFitPenalized(double &x[],double &y[],const int n,const int m,const double rho,int &info,CSpline1DInterpolantShell &s,CSpline1DFitReportShell &rep);
static void Spline1DFitPenalized(double &x[],double &y[],const int m,const double rho,int &info,CSpline1DInterpolantShell &s,CSpline1DFitReportShell &rep);
static void Spline1DFitPenalizedW(double &x[],double &y[],double &w[],const int n,const int m,const double rho,int &info,CSpline1DInterpolantShell &s,CSpline1DFitReportShell &rep);
static void Spline1DFitPenalizedW(double &x[],double &y[],double &w[],const int m,const double rho,int &info,CSpline1DInterpolantShell &s,CSpline1DFitReportShell &rep);
static void Spline1DFitCubicWC(double &x[],double &y[],double &w[],const int n,double &xc[],double &yc[],int &dc[],const int k,const int m,int &info,CSpline1DInterpolantShell &s,CSpline1DFitReportShell &rep);
static void Spline1DFitCubicWC(double &x[],double &y[],double &w[],double &xc[],double &yc[],int &dc[],const int m,int &info,CSpline1DInterpolantShell &s,CSpline1DFitReportShell &rep);
static void Spline1DFitHermiteWC(double &x[],double &y[],double &w[],const int n,double &xc[],double &yc[],int &dc[],const int k,const int m,int &info,CSpline1DInterpolantShell &s,CSpline1DFitReportShell &rep);
static void Spline1DFitHermiteWC(double &x[],double &y[],double &w[],double &xc[],double &yc[],int &dc[],const int m,int &info,CSpline1DInterpolantShell &s,CSpline1DFitReportShell &rep);
static void Spline1DFitCubic(double &x[],double &y[],const int n,const int m,int &info,CSpline1DInterpolantShell &s,CSpline1DFitReportShell &rep);
static void Spline1DFitCubic(double &x[],double &y[],const int m,int &info,CSpline1DInterpolantShell &s,CSpline1DFitReportShell &rep);
static void Spline1DFitHermite(double &x[],double &y[],const int n,const int m,int &info,CSpline1DInterpolantShell &s,CSpline1DFitReportShell &rep);
static void Spline1DFitHermite(double &x[],double &y[],const int m,int &info,CSpline1DInterpolantShell &s,CSpline1DFitReportShell &rep);
static void LSFitLinearW(double &y[],double &w[],CMatrixDouble &fmatrix,const int n,const int m,int &info,double &c[],CLSFitReportShell &rep);
static void LSFitLinearW(double &y[],double &w[],CMatrixDouble &fmatrix,int &info,double &c[],CLSFitReportShell &rep);
static void LSFitLinearWC(double &y[],double &w[],CMatrixDouble &fmatrix,CMatrixDouble &cmatrix,const int n,const int m,const int k,int &info,double &c[],CLSFitReportShell &rep);
static void LSFitLinearWC(double &y[],double &w[],CMatrixDouble &fmatrix,CMatrixDouble &cmatrix,int &info,double &c[],CLSFitReportShell &rep);
static void LSFitLinear(double &y[],CMatrixDouble &fmatrix,const int n,const int m,int &info,double &c[],CLSFitReportShell &rep);
static void LSFitLinear(double &y[],CMatrixDouble &fmatrix,int &info,double &c[],CLSFitReportShell &rep);
static void LSFitLinearC(double &y[],CMatrixDouble &fmatrix,CMatrixDouble &cmatrix,const int n,const int m,const int k,int &info,double &c[],CLSFitReportShell &rep);
static void LSFitLinearC(double &y[],CMatrixDouble &fmatrix,CMatrixDouble &cmatrix,int &info,double &c[],CLSFitReportShell &rep);
static void LSFitCreateWF(CMatrixDouble &x,double &y[],double &w[],double &c[],const int n,const int m,const int k,const double diffstep,CLSFitStateShell &state);
static void LSFitCreateWF(CMatrixDouble &x,double &y[],double &w[],double &c[],const double diffstep,CLSFitStateShell &state);
static void LSFitCreateF(CMatrixDouble &x,double &y[],double &c[],const int n,const int m,const int k,const double diffstep,CLSFitStateShell &state);
static void LSFitCreateF(CMatrixDouble &x,double &y[],double &c[],const double diffstep,CLSFitStateShell &state);
static void LSFitCreateWFG(CMatrixDouble &x,double &y[],double &w[],double &c[],const int n,const int m,const int k,const bool cheapfg,CLSFitStateShell &state);
static void LSFitCreateWFG(CMatrixDouble &x,double &y[],double &w[],double &c[],const bool cheapfg,CLSFitStateShell &state);
static void LSFitCreateFG(CMatrixDouble &x,double &y[],double &c[],const int n,const int m,const int k,const bool cheapfg,CLSFitStateShell &state);
static void LSFitCreateFG(CMatrixDouble &x,double &y[],double &c[],const bool cheapfg,CLSFitStateShell &state);
static void LSFitCreateWFGH(CMatrixDouble &x,double &y[],double &w[],double &c[],const int n,const int m,const int k,CLSFitStateShell &state);
static void LSFitCreateWFGH(CMatrixDouble &x,double &y[],double &w[],double &c[],CLSFitStateShell &state);
static void LSFitCreateFGH(CMatrixDouble &x,double &y[],double &c[],const int n,const int m,const int k,CLSFitStateShell &state);
static void LSFitCreateFGH(CMatrixDouble &x,double &y[],double &c[],CLSFitStateShell &state);
static void LSFitSetCond(CLSFitStateShell &state,const double epsx,const int maxits);
static void LSFitSetStpMax(CLSFitStateShell &state,const double stpmax);
static void LSFitSetXRep(CLSFitStateShell &state,const bool needxrep);
static void LSFitSetScale(CLSFitStateShell &state,double &s[]);
static void LSFitSetBC(CLSFitStateShell &state,double &bndl[],double &bndu[]);
static bool LSFitIteration(CLSFitStateShell &state);
static void LSFitFit(CLSFitStateShell &state,CNDimensional_PFunc &func,CNDimensional_Rep &rep,bool rep_status,CObject &obj);
static void LSFitFit(CLSFitStateShell &state,CNDimensional_PFunc &func,CNDimensional_PGrad &grad,CNDimensional_Rep &rep,bool rep_status,CObject &obj);
static void LSFitFit(CLSFitStateShell &state,CNDimensional_PFunc &func,CNDimensional_PGrad &grad,CNDimensional_PHess &hess,CNDimensional_Rep &rep,bool rep_status,CObject &obj);
static void LSFitResults(CLSFitStateShell &state,int &info,double &c[],CLSFitReportShell &rep);
static double LogisticCalc4(double x,double a,double b,double c,double d);
static double LogisticCalc5(double x,double a,double b,double c,double d,double g);
static void LogisticFit4(CRowDouble &x,CRowDouble &y,int n,double &a,double &b,double &c,double &d,CLSFitReportShell &rep);
static void LogisticFit4ec(CRowDouble &x,CRowDouble &y,int n,double cnstrleft,double cnstrright,double &a,double &b,double &c,double &d,CLSFitReportShell &rep);
static void LogisticFit5(CRowDouble &x,CRowDouble &y,int n,double &a,double &b,double &c,double &d,double &g,CLSFitReportShell &rep);
static void LogisticFit5ec(CRowDouble &x,CRowDouble &y,int n,double cnstrleft,double cnstrright,double &a,double &b,double &c,double &d,double &g,CLSFitReportShell &rep);
static void LogisticFit45x(CRowDouble &x,CRowDouble &y,int n,double cnstrleft,double cnstrright,bool is4pl,double lambdav,double epsx,int rscnt,double &a,double &b,double &c,double &d,double &g,CLSFitReportShell &rep);
//--- least squares (LS) circle
static void FitSphereLS(CMatrixDouble &xy,int npoints,int nx,CRowDouble &cx,double &r);
static void FitSphereMC(CMatrixDouble &xy,int npoints,int nx,CRowDouble &cx,double &rhi);
static void FitSphereMI(CMatrixDouble &xy,int npoints,int nx,CRowDouble &cx,double &rlo);
static void FitSphereMZ(CMatrixDouble &xy,int npoints,int nx,CRowDouble &cx,double &rlo,double &rhi);
static void FitSphereX(CMatrixDouble &xy,int npoints,int nx,int problemtype,double epsx,int aulits,double penalty,CRowDouble &cx,double &rlo,double &rhi);
//--- parametric spline
static void PSpline2Build(CMatrixDouble &xy,const int n,const int st,const int pt,CPSpline2InterpolantShell &p);
static void PSpline3Build(CMatrixDouble &xy,const int n,const int st,const int pt,CPSpline3InterpolantShell &p);
static void PSpline2BuildPeriodic(CMatrixDouble &xy,const int n,const int st,const int pt,CPSpline2InterpolantShell &p);
static void PSpline3BuildPeriodic(CMatrixDouble &xy,const int n,const int st,const int pt,CPSpline3InterpolantShell &p);
static void PSpline2ParameterValues(CPSpline2InterpolantShell &p,int &n,double &t[]);
static void PSpline3ParameterValues(CPSpline3InterpolantShell &p,int &n,double &t[]);
static void PSpline2Calc(CPSpline2InterpolantShell &p,const double t,double &x,double &y);
static void PSpline3Calc(CPSpline3InterpolantShell &p,const double t,double &x,double &y,double &z);
static void PSpline2Tangent(CPSpline2InterpolantShell &p,const double t,double &x,double &y);
static void PSpline3Tangent(CPSpline3InterpolantShell &p,const double t,double &x,double &y,double &z);
static void PSpline2Diff(CPSpline2InterpolantShell &p,const double t,double &x,double &dx,double &y,double &dy);
static void PSpline3Diff(CPSpline3InterpolantShell &p,const double t,double &x,double &dx,double &y,double &dy,double &z,double &dz);
static void PSpline2Diff2(CPSpline2InterpolantShell &p,const double t,double &x,double &dx,double &d2x,double &y,double &dy,double &d2y);
static void PSpline3Diff2(CPSpline3InterpolantShell &p,const double t,double &x,double &dx,double &d2x,double &y,double &dy,double &d2y,double &z,double &dz,double &d2z);
static double PSpline2ArcLength(CPSpline2InterpolantShell &p,const double a,const double b);
static double PSpline3ArcLength(CPSpline3InterpolantShell &p,const double a,const double b);
static void ParametricRDPFixed(CMatrixDouble &x,int n,int d,int stopm,double stopeps,CMatrixDouble &x2,int &idx2[],int &nsections);
//--- 2-dimensional spline interpolation
static void Spline2DSerialize(CSpline2DInterpolantShell &obj,string &s_out);
static void Spline2DUnserialize(string s_in,CSpline2DInterpolantShell &obj);
static double Spline2DCalc(CSpline2DInterpolantShell &c,const double x,const double y);
static void Spline2DDiff(CSpline2DInterpolantShell &c,const double x,const double y,double &f,double &fx,double &fy,double &fxy);
static void Spline2DCalcVBuf(CSpline2DInterpolantShell &c,double x,double y,CRowDouble &f);
static double Spline2DCalcVi(CSpline2DInterpolantShell &c,double x,double y,int i);
static void Spline2DCalcV(CSpline2DInterpolantShell &c,double x,double y,CRowDouble &f);
static void Spline2DDiffVi(CSpline2DInterpolantShell &c,double x,double y,int i,double &f,double &fx,double &fy,double &fxy);
static void Spline2DLinTransXY(CSpline2DInterpolantShell &c,const double ax,const double bx,const double ay,const double by);
static void Spline2DLinTransF(CSpline2DInterpolantShell &c,const double a,const double b);
static void Spline2DCopy(CSpline2DInterpolantShell &c,CSpline2DInterpolantShell &cc);
static void Spline2DResampleBicubic(CMatrixDouble &a,const int oldheight,const int oldwidth,CMatrixDouble &b,const int newheight,const int newwidth);
static void Spline2DResampleBilinear(CMatrixDouble &a,const int oldheight,const int oldwidth,CMatrixDouble &b,const int newheight,const int newwidth);
static void Spline2DBuildBilinearV(CRowDouble &x,int n,CRowDouble &y,int m,CRowDouble &f,int d,CSpline2DInterpolantShell &c);
static void Spline2DBuildBicubicV(CRowDouble &x,int n,CRowDouble &y,int m,CRowDouble &f,int d,CSpline2DInterpolantShell &c);
static void Spline2DUnpackV(CSpline2DInterpolantShell &c,int &m,int &n,int &d,CMatrixDouble &tbl);
static void Spline2DBuildBilinear(double &x[],double &y[],CMatrixDouble &f,const int m,const int n,CSpline2DInterpolantShell &c);
static void Spline2DBuildBicubic(double &x[],double &y[],CMatrixDouble &f,const int m,const int n,CSpline2DInterpolantShell &c);
static void Spline2DUnpack(CSpline2DInterpolantShell &c,int &m,int &n,CMatrixDouble &tbl);
static void Spline2DBuilderCreate(int d,CSpline2DBuilder &state);
static void Spline2DBuilderSetUserTerm(CSpline2DBuilder &state,double v);
static void Spline2DBuilderSetLinTerm(CSpline2DBuilder &state);
static void Spline2DBuilderSetConstTerm(CSpline2DBuilder &state);
static void Spline2DBuilderSetZeroTerm(CSpline2DBuilder &state);
static void Spline2DBuilderSetPoints(CSpline2DBuilder &state,CMatrixDouble &xy,int n);
static void Spline2DBuilderSetAreaAuto(CSpline2DBuilder &state);
static void Spline2DBuilderSetArea(CSpline2DBuilder &state,double xa,double xb,double ya,double yb);
static void Spline2DBuilderSetGrid(CSpline2DBuilder &state,int kx,int ky);
static void Spline2DBuilderSetAlgoFastDDM(CSpline2DBuilder &state,int nlayers,double lambdav);
static void Spline2DBuilderSetAlgoBlockLLS(CSpline2DBuilder &state,double lambdans);
static void Spline2DBuilderSetAlgoNaiveLLS(CSpline2DBuilder &state,double lambdans);
static void Spline2DFit(CSpline2DBuilder &state,CSpline2DInterpolantShell &s,CSpline2DFitReport &rep);
//--- 3-dimensional spline interpolation
static double Spline3DCalc(CSpline3DInterpolant &c,double x,double y,double z);
static void Spline3DLinTransXYZ(CSpline3DInterpolant &c,double ax,double bx,double ay,double by,double az,double bz);
static void Spline3DLinTransF(CSpline3DInterpolant &c,double a,double b);
static void Spline3DResampleTrilinear(CRowDouble &a,int oldzcount,int oldycount,int oldxcount,int newzcount,int newycount,int newxcount,CRowDouble &b);
static void Spline3DBuildTrilinearV(CRowDouble &x,int n,CRowDouble &y,int m,CRowDouble &z,int l,CRowDouble &f,int d,CSpline3DInterpolant &c);
static void Spline3DCalcVBuf(CSpline3DInterpolant &c,double x,double y,double z,CRowDouble &f);
static void Spline3DCalcV(CSpline3DInterpolant &c,double x,double y,double z,CRowDouble &f);
static void Spline3DUnpackV(CSpline3DInterpolant &c,int &n,int &m,int &l,int &d,int &stype,CMatrixDouble &tbl);
//--- RBF model
static void RBFSerialize(CRBFModel &obj,string &s_out);
static void RBFUnserialize(string s_in,CRBFModel &obj);
static void RBFCreate(int nx,int ny,CRBFModel &s);
static void RBFCreateCalcBuffer(CRBFModel &s,CRBFCalcBuffer &buf);
static void RBFSetPoints(CRBFModel &s,CMatrixDouble &xy,int n);
static void RBFSetPoints(CRBFModel &s,CMatrixDouble &xy);
static void RBFSetPointsAndScales(CRBFModel &r,CMatrixDouble &xy,int n,CRowDouble &s);
static void RBFSetPointsAndScales(CRBFModel &r,CMatrixDouble &xy,CRowDouble &s);
static void RBFSetAlgoQNN(CRBFModel &s,double q=1.0,double z=5.0);
static void RBFSetAlgoMultilayer(CRBFModel &s,double rbase,int nlayers,double lambdav=0.01);
static void RBFSetAlgoHierarchical(CRBFModel &s,double rbase,int nlayers,double lambdans);
static void RBFSetAlgoThinPlateSpline(CRBFModel &s,double lambdav=0.0);
static void RBFSetAlgoMultiQuadricManual(CRBFModel &s,double alpha,double lambdav=0.0);
static void RBFSetAlgoMultiQuadricAuto(CRBFModel &s,double lambdav=0.0);
static void RBFSetAlgoBiharmonic(CRBFModel &s,double lambdav=0.0);
static void RBFSetLinTerm(CRBFModel &s);
static void RBFSetConstTerm(CRBFModel &s);
static void RBFSetZeroTerm(CRBFModel &s);
static void RBFSetV2BF(CRBFModel &s,int bf);
static void RBFSetV2Its(CRBFModel &s,int maxits);
static void RBFSetV2SupportR(CRBFModel &s,double r);
static void RBFBuildModel(CRBFModel &s,CRBFReport &rep);
static double RBFCalc1(CRBFModel &s,double x0);
static double RBFCalc2(CRBFModel &s,double x0,double x1);
static double RBFCalc3(CRBFModel &s,double x0,double x1,double x2);
static void RBFDiff1(CRBFModel &s,double x0,double &y,double &dy0);
static void RBFDiff2(CRBFModel &s,double x0,double x1,double &y,double &dy0,double &dy1);
static void RBFDiff3(CRBFModel &s,double x0,double x1,double x2,double &y,double &dy0,double &dy1,double &dy2);
static void RBFCalc(CRBFModel &s,CRowDouble &x,CRowDouble &y);
static void RBFDiff(CRBFModel &s,CRowDouble &x,CRowDouble &y,CRowDouble &dy);
static void RBFHess(CRBFModel &s,CRowDouble &x,CRowDouble &y,CRowDouble &dy,CRowDouble &d2y);
static void RBFCalcBuf(CRBFModel &s,CRowDouble &x,CRowDouble &y);
static void RBFDiffBuf(CRBFModel &s,CRowDouble &x,CRowDouble &y,CRowDouble &dy);
static void RBFHessBuf(CRBFModel &s,CRowDouble &x,CRowDouble &y,CRowDouble &dy,CRowDouble &d2y);
static void RBFTSCalcBuf(CRBFModel &s,CRBFCalcBuffer &buf,CRowDouble &x,CRowDouble &y);
static void RBFTSDiffBuf(CRBFModel &s,CRBFCalcBuffer &buf,CRowDouble &x,CRowDouble &y,CRowDouble &dy);
static void RBFTSHessBuf(CRBFModel &s,CRBFCalcBuffer &buf,CRowDouble &x,CRowDouble &y,CRowDouble &dy,CRowDouble &d2y);
static void RBFGridCalc2(CRBFModel &s,CRowDouble &x0,int n0,CRowDouble &x1,int n1,CMatrixDouble &y);
static void RBFGridCalc2V(CRBFModel &s,CRowDouble &x0,int n0,CRowDouble &x1,int n1,CRowDouble &y);
static void RBFGridCalc2VSubset(CRBFModel &s,CRowDouble &x0,int n0,CRowDouble &x1,int n1,bool &flagy[],CRowDouble &y);
static void RBFGridCalc3V(CRBFModel &s,CRowDouble &x0,int n0,CRowDouble &x1,int n1,CRowDouble &x2,int n2,CRowDouble &y);
static void RBFGridCalc3VSubset(CRBFModel &s,CRowDouble &x0,int n0,CRowDouble &x1,int n1,CRowDouble &x2,int n2,bool &flagy[],CRowDouble &y);
static void RBFUnpack(CRBFModel &s,int &nx,int &ny,CMatrixDouble &xwr,int &nc,CMatrixDouble &v,int &modelversion);
static int RBFGetModelVersion(CRBFModel &s);
static double RBFPeekProgress(CRBFModel &s);
static void RBFRequestTermination(CRBFModel &s);
//--- functions of package linalg
//--- working with matrix forms
static void CMatrixTranspose(const int m,const int n,CMatrixComplex &a,const int ia,const int ja,CMatrixComplex &b,const int ib,const int jb);
static void RMatrixTranspose(const int m,const int n,CMatrixDouble &a,const int ia,const int ja,CMatrixDouble &b,const int ib,const int jb);
static void RMatrixEnforceSymmetricity(CMatrixDouble &a,int n,bool IsUpper);
static void CMatrixCopy(const int m,const int n,CMatrixComplex &a,const int ia,const int ja,CMatrixComplex &b,const int ib,const int jb);
static void RVectorCopy(int n,CRowDouble &a,int ia,CRowDouble &b,int ib);
static void RMatrixCopy(const int m,const int n,CMatrixDouble &a,const int ia,const int ja,CMatrixDouble &b,const int ib,const int jb);
static void RMatrixGenCopy(int m,int n,double alpha,CMatrixDouble &a,int ia,int ja,double beta,CMatrixDouble &b,int ib,int jb);
static void RMatrixGer(int m,int n,CMatrixDouble &a,int ia,int ja,double alpha,CRowDouble &u,int iu,CRowDouble &v,int iv);
static void CMatrixRank1(const int m,const int n,CMatrixComplex &a,const int ia,const int ja,complex &u[],const int iu,complex &v[],const int iv);
static void RMatrixRank1(const int m,const int n,CMatrixDouble &a,const int ia,const int ja,double &u[],const int iu,double &v[],const int iv);
static void RMatrixGemVect(int m,int n,double alpha,CMatrixDouble &a,int ia,int ja,int opa,CRowDouble &x,int ix,double beta,CRowDouble &y,int iy);
static void CMatrixMVect(const int m,const int n,CMatrixComplex &a,const int ia,const int ja,const int opa,complex &x[],const int ix,complex &y[],const int iy);
static void RMatrixMVect(const int m,const int n,CMatrixDouble &a,const int ia,const int ja,const int opa,double &x[],const int ix,double &y[],const int iy);
static void RMatrixSymVect(int n,double alpha,CMatrixDouble &a,int ia,int ja,bool IsUpper,CRowDouble &x,int ix,double beta,CRowDouble &y,int iy);
static double RMatrixSyvMVect(int n,CMatrixDouble &a,int ia,int ja,bool IsUpper,CRowDouble &x,int ix,CRowDouble &tmp);
static void RMatrixTrsVect(int n,CMatrixDouble &a,int ia,int ja,bool IsUpper,bool IsUnit,int OpType,CRowDouble &x,int ix);
static void CMatrixRightTrsM(const int m,const int n,CMatrixComplex &a,const int i1,const int j1,const bool IsUpper,const bool IsUnit,const int OpType,CMatrixComplex &x,const int i2,const int j2);
static void CMatrixLeftTrsM(const int m,const int n,CMatrixComplex &a,const int i1,const int j1,const bool IsUpper,const bool IsUnit,const int OpType,CMatrixComplex &x,const int i2,const int j2);
static void RMatrixRightTrsM(const int m,const int n,CMatrixDouble &a,const int i1,const int j1,const bool IsUpper,const bool IsUnit,const int OpType,CMatrixDouble &x,const int i2,const int j2);
static void RMatrixLeftTrsM(const int m,const int n,CMatrixDouble &a,const int i1,const int j1,const bool IsUpper,const bool IsUnit,const int OpType,CMatrixDouble &x,const int i2,const int j2);
static void CMatrixSyrk(const int n,const int k,const double alpha,CMatrixComplex &a,const int ia,const int ja,const int optypea,const double beta,CMatrixComplex &c,const int ic,const int jc,const bool IsUpper);
static void RMatrixSyrk(const int n,const int k,const double alpha,CMatrixDouble &a,const int ia,const int ja,const int optypea,const double beta,CMatrixDouble &c,const int ic,const int jc,const bool IsUpper);
static void CMatrixGemm(const int m,const int n,const int k,complex alpha,CMatrixComplex &a,const int ia,const int ja,const int optypea,CMatrixComplex &b,const int ib,const int jb,const int optypeb,complex beta,CMatrixComplex &c,const int ic,const int jc);
static void RMatrixGemm(const int m,const int n,const int k,const double alpha,CMatrixDouble &a,const int ia,const int ja,const int optypea,CMatrixDouble &b,const int ib,const int jb,const int optypeb,const double beta,CMatrixDouble &c,const int ic,const int jc);
//--- orthogonal factorizations
static void RMatrixQR(CMatrixDouble &a,const int m,const int n,double &tau[]);
static void RMatrixLQ(CMatrixDouble &a,const int m,const int n,double &tau[]);
static void CMatrixQR(CMatrixComplex &a,const int m,const int n,complex &tau[]);
static void CMatrixLQ(CMatrixComplex &a,const int m,const int n,complex &tau[]);
static void RMatrixQRUnpackQ(CMatrixDouble &a,const int m,const int n,double &tau[],const int qcolumns,CMatrixDouble &q);
static void RMatrixQRUnpackR(CMatrixDouble &a,const int m,const int n,CMatrixDouble &r);
static void RMatrixLQUnpackQ(CMatrixDouble &a,const int m,const int n,double &tau[],const int qrows,CMatrixDouble &q);
static void RMatrixLQUnpackL(CMatrixDouble &a,const int m,const int n,CMatrixDouble &l);
static void CMatrixQRUnpackQ(CMatrixComplex &a,const int m,const int n,complex &tau[],const int qcolumns,CMatrixComplex &q);
static void CMatrixQRUnpackR(CMatrixComplex &a,const int m,const int n,CMatrixComplex &r);
static void CMatrixLQUnpackQ(CMatrixComplex &a,const int m,const int n,complex &tau[],const int qrows,CMatrixComplex &q);
static void CMatrixLQUnpackL(CMatrixComplex &a,const int m,const int n,CMatrixComplex &l);
static void RMatrixBD(CMatrixDouble &a,const int m,const int n,double &tauq[],double &taup[]);
static void RMatrixBDUnpackQ(CMatrixDouble &qp,const int m,const int n,double &tauq[],const int qcolumns,CMatrixDouble &q);
static void RMatrixBDMultiplyByQ(CMatrixDouble &qp,const int m,const int n,double &tauq[],CMatrixDouble &z,const int zrows,const int zcolumns,const bool fromtheright,const bool dotranspose);
static void RMatrixBDUnpackPT(CMatrixDouble &qp,const int m,const int n,double &taup[],const int ptrows,CMatrixDouble &pt);
static void RMatrixBDMultiplyByP(CMatrixDouble &qp,const int m,const int n,double &taup[],CMatrixDouble &z,const int zrows,const int zcolumns,const bool fromtheright,const bool dotranspose);
static void RMatrixBDUnpackDiagonals(CMatrixDouble &b,const int m,const int n,bool &IsUpper,double &d[],double &e[]);
static void RMatrixHessenberg(CMatrixDouble &a,const int n,double &tau[]);
static void RMatrixHessenbergUnpackQ(CMatrixDouble &a,const int n,double &tau[],CMatrixDouble &q);
static void RMatrixHessenbergUnpackH(CMatrixDouble &a,const int n,CMatrixDouble &h);
static void SMatrixTD(CMatrixDouble &a,const int n,const bool IsUpper,double &tau[],double &d[],double &e[]);
static void SMatrixTDUnpackQ(CMatrixDouble &a,const int n,const bool IsUpper,double &tau[],CMatrixDouble &q);
static void HMatrixTD(CMatrixComplex &a,const int n,const bool IsUpper,complex &tau[],double &d[],double &e[]);
static void HMatrixTDUnpackQ(CMatrixComplex &a,const int n,const bool IsUpper,complex &tau[],CMatrixComplex &q);
//--- eigenvalues and eigenvectors
static void EigSubSpaceCreate(int n,int k,CEigSubSpaceState &state);
static void EigSubSpaceCreateBuf(int n,int k,CEigSubSpaceState &state);
static void EigSubSpaceSetCond(CEigSubSpaceState &state,double eps,int maxits);
static void EigSubSpaceSetWarmStart(CEigSubSpaceState &state,bool usewarmstart);
static void EigSubSpaceOOCStart(CEigSubSpaceState &state,int mtype);
static bool EigSubSpaceOOCContinue(CEigSubSpaceState &state);
static void EigSubSpaceOOCGetRequestInfo(CEigSubSpaceState &state,int &requesttype,int &requestsize);
static void EigSubSpaceOOCGetRequestData(CEigSubSpaceState &state,CMatrixDouble &x);
static void EigSubSpaceOOCSendResult(CEigSubSpaceState &state,CMatrixDouble &ax);
static void EigSubSpaceOOCStop(CEigSubSpaceState &state,CRowDouble &w,CMatrixDouble &z,CEigSubSpaceReport &rep);
static void EigSubSpaceSolveDenses(CEigSubSpaceState &state,CMatrixDouble &a,bool IsUpper,CRowDouble &w,CMatrixDouble &z,CEigSubSpaceReport &rep);
static void EigSubSpaceSolveSparses(CEigSubSpaceState &state,CSparseMatrix &a,bool IsUpper,CRowDouble &w,CMatrixDouble &z,CEigSubSpaceReport &rep);
static bool SMatrixEVD(CMatrixDouble &a,const int n,int zneeded,const bool IsUpper,double &d[],CMatrixDouble &z);
static bool SMatrixEVDR(CMatrixDouble &a,const int n,int zneeded,const bool IsUpper,double b1,double b2,int &m,double &w[],CMatrixDouble &z);
static bool SMatrixEVDI(CMatrixDouble &a,const int n,int zneeded,const bool IsUpper,const int i1,const int i2,double &w[],CMatrixDouble &z);
static bool HMatrixEVD(CMatrixComplex &a,const int n,const int zneeded,const bool IsUpper,double &d[],CMatrixComplex &z);
static bool HMatrixEVDR(CMatrixComplex &a,const int n,const int zneeded,const bool IsUpper,double b1,double b2,int &m,double &w[],CMatrixComplex &z);
static bool HMatrixEVDI(CMatrixComplex &a,const int n,const int zneeded,const bool IsUpper,const int i1,const int i2,double &w[],CMatrixComplex &z);
static bool SMatrixTdEVD(double &d[],double &e[],const int n,const int zneeded,CMatrixDouble &z);
static bool SMatrixTdEVDR(double &d[],double &e[],const int n,const int zneeded,const double a,const double b,int &m,CMatrixDouble &z);
static bool SMatrixTdEVDI(double &d[],double &e[],const int n,const int zneeded,const int i1,const int i2,CMatrixDouble &z);
static bool RMatrixEVD(CMatrixDouble &a,const int n,const int vneeded,double &wr[],double &wi[],CMatrixDouble &vl,CMatrixDouble &vr);
//--- random matrix generation
static void RMatrixRndOrthogonal(const int n,CMatrixDouble &a);
static void RMatrixRndCond(const int n,const double c,CMatrixDouble &a);
static void CMatrixRndOrthogonal(const int n,CMatrixComplex &a);
static void CMatrixRndCond(const int n,const double c,CMatrixComplex &a);
static void SMatrixRndCond(const int n,const double c,CMatrixDouble &a);
static void SPDMatrixRndCond(const int n,const double c,CMatrixDouble &a);
static void HMatrixRndCond(const int n,const double c,CMatrixComplex &a);
static void HPDMatrixRndCond(const int n,const double c,CMatrixComplex &a);
static void RMatrixRndOrthogonalFromTheRight(CMatrixDouble &a,const int m,const int n);
static void RMatrixRndOrthogonalFromTheLeft(CMatrixDouble &a,const int m,const int n);
static void CMatrixRndOrthogonalFromTheRight(CMatrixComplex &a,const int m,const int n);
static void CMatrixRndOrthogonalFromTheLeft(CMatrixComplex &a,const int m,const int n);
static void SMatrixRndMultiply(CMatrixDouble &a,const int n);
static void HMatrixRndMultiply(CMatrixComplex &a,const int n);
//--- sparse matrix
static void SparseSerialize(CSparseMatrix &obj,string &s_out);
static void SparseUunserialize(string s_in,CSparseMatrix &obj);
static void SparseCreate(int m,int n,int k,CSparseMatrix &s);
static void SparseCreate(int m,int n,CSparseMatrix &s);
static void SparseCreateBuf(int m,int n,int k,CSparseMatrix &s);
static void SparseCreateBuf(int m,int n,CSparseMatrix &s);
static void SparseCreateCRS(int m,int n,CRowInt &ner,CSparseMatrix &s);
static void SparseCreateCRS(int m,int n,int &ner[],CSparseMatrix &s);
static void SparseCreateCRSBuf(int m,int n,CRowInt &ner,CSparseMatrix &s);
static void SparseCreateCRSBuf(int m,int n,int &ner[],CSparseMatrix &s);
static void SparseCreateSKS(int m,int n,CRowInt &d,CRowInt &u,CSparseMatrix &s);
static void SparseCreateSKS(int m,int n,int &d[],int &u[],CSparseMatrix &s);
static void SparseCreateSKSBuf(int m,int n,CRowInt &d,CRowInt &u,CSparseMatrix &s);
static void SparseCreateSKSBuf(int m,int n,int &d[],int &u[],CSparseMatrix &s);
static void SparseCreateSKSBand(int m,int n,int bw,CSparseMatrix &s);
static void SparseCreateSKSBandBuf(int m,int n,int bw,CSparseMatrix &s);
static void SparseCopy(CSparseMatrix &s0,CSparseMatrix &s1);
static void SparseCopyBuf(CSparseMatrix &s0,CSparseMatrix &s1);
static void SparseSwap(CSparseMatrix &s0,CSparseMatrix &s1);
static void SparseAdd(CSparseMatrix &s,int i,int j,double v);
static void SparseSet(CSparseMatrix &s,int i,int j,double v);
static double SparseGet(CSparseMatrix &s,int i,int j);
static bool SparseExists(CSparseMatrix &s,int i,int j);
static double SparseGetDiagonal(CSparseMatrix &s,int i);
static void SparseMV(CSparseMatrix &s,CRowDouble &x,CRowDouble &y);
static void SparseMTV(CSparseMatrix &s,CRowDouble &x,CRowDouble &y);
static void SparseGemV(CSparseMatrix &s,double alpha,int ops,CRowDouble &x,int ix,double beta,CRowDouble &y,int iy);
static void SparseMV2(CSparseMatrix &s,CRowDouble &x,CRowDouble &y0,CRowDouble &y1);
static void SparseSMV(CSparseMatrix &s,bool IsUpper,CRowDouble &x,CRowDouble &y);
static double SparseVSMV(CSparseMatrix &s,bool IsUpper,CRowDouble &x);
static void SparseMM(CSparseMatrix &s,CMatrixDouble &a,int k,CMatrixDouble &b);
static void SparseMTM(CSparseMatrix &s,CMatrixDouble &a,int k,CMatrixDouble &b);
static void SparseMM2(CSparseMatrix &s,CMatrixDouble &a,int k,CMatrixDouble &b0,CMatrixDouble &b1);
static void SparseSMM(CSparseMatrix &s,bool IsUpper,CMatrixDouble &a,int k,CMatrixDouble &b);
static void SparseTRMV(CSparseMatrix &s,bool IsUpper,bool IsUnit,int OpType,CRowDouble &x,CRowDouble &y);
static void SparseTRSV(CSparseMatrix &s,bool IsUpper,bool IsUnit,int OpType,CRowDouble &x);
static void SparseSymmPermTbl(CSparseMatrix &a,bool IsUpper,CRowInt &p,CSparseMatrix &b);
static void SparseSymmPermTblBuf(CSparseMatrix &a,bool IsUpper,CRowInt &p,CSparseMatrix &b);
static void SparseResizeMatrix(CSparseMatrix &s);
static bool SparseEnumerate(CSparseMatrix &s,int &t0,int &t1,int &i,int &j,double &v);
static bool SparseRewriteExisting(CSparseMatrix &s,int i,int j,double v);
static void SparseGetRow(CSparseMatrix &s,int i,CRowDouble &irow);
static void SparseGetCompressedRow(CSparseMatrix &s,int i,CRowInt &colidx,CRowDouble &vals,int &nzcnt);
static void SparseTransposeSKS(CSparseMatrix &s);
static void SparseTransposeCRS(CSparseMatrix &s);
static void SparseCopyTransposeCRS(CSparseMatrix &s0,CSparseMatrix &s1);
static void SparseCopyTransposeCRSBuf(CSparseMatrix &s0,CSparseMatrix &s1);
static void SparseConvertTo(CSparseMatrix &s0,int fmt);
static void SparseCopyToBuf(CSparseMatrix &s0,int fmt,CSparseMatrix &s1);
static void SparseConvertToHash(CSparseMatrix &s);
static void SparseCopyToHash(CSparseMatrix &s0,CSparseMatrix &s1);
static void SparseCopyToHashBuf(CSparseMatrix &s0,CSparseMatrix &s1);
static void SparseConvertToCRS(CSparseMatrix &s);
static void SparseCopyToCRS(CSparseMatrix &s0,CSparseMatrix &s1);
static void SparseCopyToCRSBuf(CSparseMatrix &s0,CSparseMatrix &s1);
static void SparseConvertToSKS(CSparseMatrix &s);
static void SparseCopyToSKS(CSparseMatrix &s0,CSparseMatrix &s1);
static void SparseCopyToSKSBuf(CSparseMatrix &s0,CSparseMatrix &s1);
static int SparseGetMatrixType(CSparseMatrix &s);
static bool SparseIsHash(CSparseMatrix &s);
static bool SparseIsCRS(CSparseMatrix &s);
static bool SparseIsSKS(CSparseMatrix &s);
static void SparseFree(CSparseMatrix &s);
static int SparseGetNRows(CSparseMatrix &s);
static int SparseGetNCols(CSparseMatrix &s);
static int SparseGetUpperCount(CSparseMatrix &s);
static int SparseGetLowerCount(CSparseMatrix &s);
//--- triangular factorizations
static void RMatrixLU(CMatrixDouble &a,const int m,const int n,int &pivots[]);
static void CMatrixLU(CMatrixComplex &a,const int m,const int n,int &pivots[]);
static bool HPDMatrixCholesky(CMatrixComplex &a,const int n,const bool IsUpper);
static bool SPDMatrixCholesky(CMatrixDouble &a,const int n,const bool IsUpper);
static void SPDMatrixCholeskyUpdateAdd1(CMatrixDouble &a,int n,bool IsUpper,CRowDouble &u);
static void SPDMatrixCholeskyUpdateFix(CMatrixDouble &a,int n,bool IsUpper,bool &fix[]);
static void SPDMatrixCholeskyUpdateAdd1Buf(CMatrixDouble &a,int n,bool IsUpper,CRowDouble &u,CRowDouble &bufr);
static void SPDMatrixCholeskyUpdateFixBuf(CMatrixDouble &a,int n,bool IsUpper,bool &fix[],CRowDouble &bufr);
static bool SparseLU(CSparseMatrix &a,int pivottype,CRowInt &p,CRowInt &q);
static bool SparseCholeskySkyLine(CSparseMatrix &a,int n,bool IsUpper);
static bool SparseCholesky(CSparseMatrix &a,bool IsUpper);
static bool SparseCholeskyP(CSparseMatrix &a,bool IsUpper,CRowInt &p);
static bool sparsecholeskyanalyze(CSparseMatrix &a,bool IsUpper,int facttype,int permtype,CSparseDecompositionAnalysis &analysis);
static bool SparseCholeskyFactorize(CSparseDecompositionAnalysis &analysis,bool needupper,CSparseMatrix &a,CRowDouble &d,CRowInt &p);
static void SparseCholeskyReload(CSparseDecompositionAnalysis &analysis,CSparseMatrix &a,bool IsUpper);
//--- estimate of the condition numbers
static double RMatrixRCond1(CMatrixDouble &a,const int n);
static double RMatrixRCondInf(CMatrixDouble &a,const int n);
static double SPDMatrixRCond(CMatrixDouble &a,const int n,const bool IsUpper);
static double RMatrixTrRCond1(CMatrixDouble &a,const int n,const bool IsUpper,const bool IsUnit);
static double RMatrixTrRCondInf(CMatrixDouble &a,const int n,const bool IsUpper,const bool IsUnit);
static double HPDMatrixRCond(CMatrixComplex &a,const int n,const bool IsUpper);
static double CMatrixRCond1(CMatrixComplex &a,const int n);
static double CMatrixRCondInf(CMatrixComplex &a,const int n);
static double RMatrixLURCond1(CMatrixDouble &lua,const int n);
static double RMatrixLURCondInf(CMatrixDouble &lua,const int n);
static double SPDMatrixCholeskyRCond(CMatrixDouble &a,const int n,const bool IsUpper);
static double HPDMatrixCholeskyRCond(CMatrixComplex &a,const int n,const bool IsUpper);
static double CMatrixLURCond1(CMatrixComplex &lua,const int n);
static double CMatrixLURCondInf(CMatrixComplex &lua,const int n);
static double CMatrixTrRCond1(CMatrixComplex &a,const int n,const bool IsUpper,const bool IsUnit);
static double CMatrixTrRCondInf(CMatrixComplex &a,const int n,const bool IsUpper,const bool IsUnit);
//--- norm estimator
static void NormEstimatorCreate(int m,int n,int nstart,int nits,CNormEstimatorState &state);
static void NormEstimatorSetSeed(CNormEstimatorState &state,int seedval);
static void NormEstimatorEstimateSparse(CNormEstimatorState &state,CSparseMatrix &a);
static void NormEstimatorResults(CNormEstimatorState &state,double &nrm);
//--- matrix inversion
static void RMatrixLUInverse(CMatrixDouble &a,int &pivots[],const int n,int &info,CMatInvReportShell &rep);
static void RMatrixLUInverse(CMatrixDouble &a,int &pivots[],int &info,CMatInvReportShell &rep);
static void RMatrixInverse(CMatrixDouble &a,const int n,int &info,CMatInvReportShell &rep);
static void RMatrixInverse(CMatrixDouble &a,int &info,CMatInvReportShell &rep);
static void CMatrixLUInverse(CMatrixComplex &a,int &pivots[],const int n,int &info,CMatInvReportShell &rep);
static void CMatrixLUInverse(CMatrixComplex &a,int &pivots[],int &info,CMatInvReportShell &rep);
static void CMatrixInverse(CMatrixComplex &a,const int n,int &info,CMatInvReportShell &rep);
static void CMatrixInverse(CMatrixComplex &a,int &info,CMatInvReportShell &rep);
static void SPDMatrixCholeskyInverse(CMatrixDouble &a,const int n,const bool IsUpper,int &info,CMatInvReportShell &rep);
static void SPDMatrixCholeskyInverse(CMatrixDouble &a,int &info,CMatInvReportShell &rep);
static void SPDMatrixInverse(CMatrixDouble &a,const int n,const bool IsUpper,int &info,CMatInvReportShell &rep);
static void SPDMatrixInverse(CMatrixDouble &a,int &info,CMatInvReportShell &rep);
static void HPDMatrixCholeskyInverse(CMatrixComplex &a,const int n,const bool IsUpper,int &info,CMatInvReportShell &rep);
static void HPDMatrixCholeskyInverse(CMatrixComplex &a,int &info,CMatInvReportShell &rep);
static void HPDMatrixInverse(CMatrixComplex &a,const int n,const bool IsUpper,int &info,CMatInvReportShell &rep);
static void HPDMatrixInverse(CMatrixComplex &a,int &info,CMatInvReportShell &rep);
static void RMatrixTrInverse(CMatrixDouble &a,const int n,const bool IsUpper,const bool IsUnit,int &info,CMatInvReportShell &rep);
static void RMatrixTrInverse(CMatrixDouble &a,const bool IsUpper,int &info,CMatInvReportShell &rep);
static void CMatrixTrInverse(CMatrixComplex &a,const int n,const bool IsUpper,const bool IsUnit,int &info,CMatInvReportShell &rep);
static void CMatrixTrInverse(CMatrixComplex &a,const bool IsUpper,int &info,CMatInvReportShell &rep);
//--- singular value decomposition of a bidiagonal matrix
static bool RMatrixBdSVD(double &d[],double &e[],const int n,const bool IsUpper,bool isfractionalaccuracyrequired,CMatrixDouble &u,const int nru,CMatrixDouble &c,const int ncc,CMatrixDouble &vt,const int ncvt);
//--- singular value decomposition
static bool RMatrixSVD(CMatrixDouble &a,const int m,const int n,const int uneeded,const int vtneeded,const int additionalmemory,double &w[],CMatrixDouble &u,CMatrixDouble &vt);
//--- calculation determinant of the matrix
static double RMatrixLUDet(CMatrixDouble &a,int &pivots[],const int n);
static double RMatrixLUDet(CMatrixDouble &a,int &pivots[]);
static double RMatrixDet(CMatrixDouble &a,const int n);
static double RMatrixDet(CMatrixDouble &a);
static complex CMatrixLUDet(CMatrixComplex &a,int &pivots[],const int n);
static complex CMatrixLUDet(CMatrixComplex &a,int &pivots[]);
static complex CMatrixDet(CMatrixComplex &a,const int n);
static complex CMatrixDet(CMatrixComplex &a);
static double SPDMatrixCholeskyDet(CMatrixDouble &a,const int n);
static double SPDMatrixCholeskyDet(CMatrixDouble &a);
static double SPDMatrixDet(CMatrixDouble &a,const int n,const bool IsUpper);
static double SPDMatrixDet(CMatrixDouble &a);
//--- generalized symmetric positive definite eigenproblem
static bool SMatrixGEVD(CMatrixDouble &a,const int n,const bool isuppera,CMatrixDouble &b,const bool isupperb,const int zneeded,const int problemtype,double &d[],CMatrixDouble &z);
static bool SMatrixGEVDReduce(CMatrixDouble &a,const int n,const bool isuppera,CMatrixDouble &b,const bool isupperb,const int problemtype,CMatrixDouble &r,bool &isupperr);
//--- update of the inverse matrix by the Sherman-Morrison formula
static void RMatrixInvUpdateSimple(CMatrixDouble &inva,const int n,const int updrow,const int updcolumn,const double updval);
static void RMatrixInvUpdateRow(CMatrixDouble &inva,const int n,const int updrow,double &v[]);
static void RMatrixInvUpdateColumn(CMatrixDouble &inva,const int n,const int updcolumn,double &u[]);
static void RMatrixInvUpdateUV(CMatrixDouble &inva,const int n,double &u[],double &v[]);
//--- Schur decomposition
static bool RMatrixSchur(CMatrixDouble &a,const int n,CMatrixDouble &s);
//--- functions of package optimization
//--- conjugate gradient method
static void MinCGCreate(const int n,double &x[],CMinCGStateShell &state);
static void MinCGCreate(double &x[],CMinCGStateShell &state);
static void MinCGCreateF(const int n,double &x[],double diffstep,CMinCGStateShell &state);
static void MinCGCreateF(double &x[],double diffstep,CMinCGStateShell &state);
static void MinCGSetCond(CMinCGStateShell &state,double epsg,double epsf,double epsx,int maxits);
static void MinCGSetScale(CMinCGStateShell &state,double &s[]);
static void MinCGSetXRep(CMinCGStateShell &state,bool needxrep);
static void MinCGSetCGType(CMinCGStateShell &state,int cgtype);
static void MinCGSetStpMax(CMinCGStateShell &state,double stpmax);
static void MinCGSuggestStep(CMinCGStateShell &state,double stp);
static void MinCGSetPrecDefault(CMinCGStateShell &state);
static void MinCGSetPrecDiag(CMinCGStateShell &state,double &d[]);
static void MinCGSetPrecScale(CMinCGStateShell &state);
static bool MinCGIteration(CMinCGStateShell &state);
static void MinCGOptimize(CMinCGStateShell &state,CNDimensional_Func &func,CNDimensional_Rep &rep,bool rep_status,CObject &obj);
static void MinCGOptimize(CMinCGStateShell &state,CNDimensional_Grad &grad,CNDimensional_Rep &rep,bool rep_status,CObject &obj);
static void MinCGResults(CMinCGStateShell &state,double &x[],CMinCGReportShell &rep);
static void MinCGResultsBuf(CMinCGStateShell &state,double &x[],CMinCGReportShell &rep);
static void MinCGRestartFrom(CMinCGStateShell &state,double &x[]);
static void MinLBFGSRequestTermination(CMinLBFGSStateShell &state);
//--- bound constrained optimization with additional linear equality and inequality constraints
static void MinBLEICCreate(const int n,double &x[],CMinBLEICStateShell &state);
static void MinBLEICCreate(double &x[],CMinBLEICStateShell &state);
static void MinBLEICCreateF(const int n,double &x[],double diffstep,CMinBLEICStateShell &state);
static void MinBLEICCreateF(double &x[],double diffstep,CMinBLEICStateShell &state);
static void MinBLEICSetBC(CMinBLEICStateShell &state,double &bndl[],double &bndu[]);
static void MinBLEICSetLC(CMinBLEICStateShell &state,CMatrixDouble &c,int &ct[],const int k);
static void MinBLEICSetLC(CMinBLEICStateShell &state,CMatrixDouble &c,int &ct[]);
static void MinBLEICSetInnerCond(CMinBLEICStateShell &state,const double epsg,const double epsf,const double epsx);
static void MinBLEICSetOuterCond(CMinBLEICStateShell &state,const double epsx,const double epsi);
static void MinBLEICSetScale(CMinBLEICStateShell &state,double &s[]);
static void MinBLEICSetPrecDefault(CMinBLEICStateShell &state);
static void MinBLEICSetPrecDiag(CMinBLEICStateShell &state,double &d[]);
static void MinBLEICSetPrecScale(CMinBLEICStateShell &state);
static void MinBLEICSetMaxIts(CMinBLEICStateShell &state,const int maxits);
static void MinBLEICSetXRep(CMinBLEICStateShell &state,bool needxrep);
static void MinBLEICSetStpMax(CMinBLEICStateShell &state,double stpmax);
static bool MinBLEICIteration(CMinBLEICStateShell &state);
static void MinBLEICOptimize(CMinBLEICStateShell &state,CNDimensional_Func &func,CNDimensional_Rep &rep,bool rep_status,CObject &obj);
static void MinBLEICOptimize(CMinBLEICStateShell &state,CNDimensional_Grad &grad,CNDimensional_Rep &rep,bool rep_status,CObject &obj);
static void MinBLEICOptGuardGradient(CMinBLEICStateShell &state,double teststep);
static void MinBLEICOptGuardSmoothness(CMinBLEICStateShell &state,int level=1);
static void MinBLEICOptGuardResults(CMinBLEICStateShell &state,COptGuardReport &rep);
static void MinBLEICOptGuardNonC1Test0Results(CMinBLEICStateShell &state,COptGuardNonC1Test0Report &strrep,COptGuardNonC1Test0Report &lngrep);
static void MinBLEICOptGuardNonC1Test1Results(CMinBLEICStateShell &state,COptGuardNonC1Test1Report &strrep,COptGuardNonC1Test1Report &lngrep);
static void MinBLEICResults(CMinBLEICStateShell &state,double &x[],CMinBLEICReportShell &rep);
static void MinBLEICResultsBuf(CMinBLEICStateShell &state,double &x[],CMinBLEICReportShell &rep);
static void MinBLEICRestartFrom(CMinBLEICStateShell &state,double &x[]);
static void MinBLEICRequestTermination(CMinBLEICStateShell &state);
//--- limited memory BFGS method for large scale optimization
static void MinLBFGSCreate(const int n,const int m,double &x[],CMinLBFGSStateShell &state);
static void MinLBFGSCreate(const int m,double &x[],CMinLBFGSStateShell &state);
static void MinLBFGSCreateF(const int n,const int m,double &x[],const double diffstep,CMinLBFGSStateShell &state);
static void MinLBFGSCreateF(const int m,double &x[],const double diffstep,CMinLBFGSStateShell &state);
static void MinLBFGSSetCond(CMinLBFGSStateShell &state,const double epsg,const double epsf,const double epsx,const int maxits);
static void MinLBFGSSetXRep(CMinLBFGSStateShell &state,const bool needxrep);
static void MinLBFGSSetStpMax(CMinLBFGSStateShell &state,const double stpmax);
static void MinLBFGSSetScale(CMinLBFGSStateShell &state,double &s[]);
static void MinLBFGSSetPrecDefault(CMinLBFGSStateShell &state);
static void MinLBFGSSetPrecCholesky(CMinLBFGSStateShell &state,CMatrixDouble &p,const bool IsUpper);
static void MinLBFGSSetPrecDiag(CMinLBFGSStateShell &state,double &d[]);
static void MinLBFGSSetPrecScale(CMinLBFGSStateShell &state);
static bool MinLBFGSIteration(CMinLBFGSStateShell &state);
static void MinLBFGSOptimize(CMinLBFGSStateShell &state,CNDimensional_Func &func,CNDimensional_Rep &rep,bool rep_status,CObject &obj);
static void MinLBFGSOptimize(CMinLBFGSStateShell &state,CNDimensional_Grad &grad,CNDimensional_Rep &rep,bool rep_status,CObject &obj);
static void MinLBFGSResults(CMinLBFGSStateShell &state,double &x[],CMinLBFGSReportShell &rep);
static void MinLBFGSresultsbuf(CMinLBFGSStateShell &state,double &x[],CMinLBFGSReportShell &rep);
static void MinLBFGSRestartFrom(CMinLBFGSStateShell &state,double &x[]);
//--- constrained quadratic programming
static void MinQPCreate(const int n,CMinQPStateShell &state);
static void MinQPSetLinearTerm(CMinQPStateShell &state,double &b[]);
static void MinQPSetQuadraticTerm(CMinQPStateShell &state,CMatrixDouble &a,const bool IsUpper);
static void MinQPSetQuadraticTerm(CMinQPStateShell &state,CMatrixDouble &a);
static void MinQPSetQuadraticTermSparse(CMinQPStateShell &state,CSparseMatrix &a,bool IsUpper);
static void MinQPSetStartingPoint(CMinQPStateShell &state,double &x[]);
static void MinQPSetOrigin(CMinQPStateShell &state,double &xorigin[]);
static void MinQPSetScale(CMinQPStateShell &state,CRowDouble &s);
static void MinQPSetScaleAutoDiag(CMinQPStateShell &state);
static void MinQPSetAlgoBLEIC(CMinQPStateShell &state,double epsg,double epsf,double epsx,int maxits);
static void MinQPSetAlgoDenseAUL(CMinQPStateShell &state,double epsx,double rho,int itscnt);
static void MinQPSetAlgoDenseIPM(CMinQPStateShell &state,double eps);
static void MinQPSetAlgoSparseIPM(CMinQPStateShell &state,double eps);
static void MinQPSetAlgoQuickQP(CMinQPStateShell &state,double epsg,double epsf,double epsx,int maxouterits,bool usenewton);
static void MinQPSetBCAll(CMinQPStateShell &state,double bndl,double bndu);
static void MinQPSetAlgoCholesky(CMinQPStateShell &state);
static void MinQPSetBC(CMinQPStateShell &state,double &bndl[],double &bndu[]);
static void MinQPSetBCI(CMinQPStateShell &state,int i,double bndl,double bndu);
static void MinQPSetLC(CMinQPStateShell &state,CMatrixDouble &c,CRowInt &ct,int k);
static void MinQPSetLC(CMinQPStateShell &state,CMatrixDouble &c,CRowInt &ct);
static void MinQPSetLCSparse(CMinQPStateShell &state,CSparseMatrix &c,CRowInt &ct,int k);
static void MinQPSetLCMixed(CMinQPStateShell &state,CSparseMatrix &sparsec,CRowInt &sparsect,int sparsek,CMatrixDouble &densec,CRowInt &densect,int densek);
static void MinQPSetLCMixedLegacy(CMinQPStateShell &state,CMatrixDouble &densec,CRowInt &densect,int densek,CSparseMatrix &sparsec,CRowInt &sparsect,int sparsek);
static void MinQPSetLC2Dense(CMinQPStateShell &state,CMatrixDouble &a,CRowDouble &al,CRowDouble &au,int k);
static void MinQPSetLC2Dense(CMinQPStateShell &state,CMatrixDouble &a,CRowDouble &al,CRowDouble &au);
static void MinQPSetLC2(CMinQPStateShell &state,CSparseMatrix &a,CRowDouble &al,CRowDouble &au,int k);
static void MinQPSetLC2Mixed(CMinQPStateShell &state,CSparseMatrix &sparsea,int ksparse,CMatrixDouble &densea,int kdense,CRowDouble &al,CRowDouble &au);
static void MinQPAddLC2Dense(CMinQPStateShell &state,CRowDouble &a,double al,double au);
static void MinQPAddLC2(CMinQPStateShell &state,CRowInt &idxa,CRowDouble &vala,int nnz,double al,double au);
static void MinQPAddLC2SparseFromDense(CMinQPStateShell &state,CRowDouble &da,double al,double au);
static void MinQPOptimize(CMinQPStateShell &state);
static void MinQPResults(CMinQPStateShell &state,double &x[],CMinQPReportShell &rep);
static void MinQPResultsBuf(CMinQPStateShell &state,double &x[],CMinQPReportShell &rep);
//--- Levenberg-Marquardt method
static void MinLMCreateVJ(const int n,const int m,double &x[],CMinLMStateShell &state);
static void MinLMCreateVJ(const int m,double &x[],CMinLMStateShell &state);
static void MinLMCreateV(const int n,const int m,double &x[],double diffstep,CMinLMStateShell &state);
static void MinLMCreateV(const int m,double &x[],const double diffstep,CMinLMStateShell &state);
static void MinLMCreateFGH(const int n,double &x[],CMinLMStateShell &state);
static void MinLMCreateFGH(double &x[],CMinLMStateShell &state);
static void MinLMSetCond(CMinLMStateShell &state,const double epsx,const int maxits);
static void MinLMSetXRep(CMinLMStateShell &state,const bool needxrep);
static void MinLMSetStpMax(CMinLMStateShell &state,const double stpmax);
static void MinLMSetScale(CMinLMStateShell &state,double &s[]);
static void MinLMSetBC(CMinLMStateShell &state,double &bndl[],double &bndu[]);
static void MinLMSetAccType(CMinLMStateShell &state,const int acctype);
static bool MinLMIteration(CMinLMStateShell &state);
static void MinLMOptimize(CMinLMStateShell &state,CNDimensional_FVec &fvec,CNDimensional_Rep &rep,bool rep_status,CObject &obj);
static void MinLMOptimize(CMinLMStateShell &state,CNDimensional_FVec &fvec,CNDimensional_Jac &jac,CNDimensional_Rep &rep,bool rep_status,CObject &obj);
static void MinLMOptimize(CMinLMStateShell &state,CNDimensional_Func &func,CNDimensional_Grad &grad,CNDimensional_Hess &hess,CNDimensional_Rep &rep,bool rep_status,CObject &obj);
static void MinLMOptimize(CMinLMStateShell &state,CNDimensional_Func &func,CNDimensional_Jac &jac,CNDimensional_Rep &rep,bool rep_status,CObject &obj);
static void MinLMOptimize(CMinLMStateShell &state,CNDimensional_Func &func,CNDimensional_Grad &grad,CNDimensional_Jac &jac,CNDimensional_Rep &rep,bool rep_status,CObject &obj);
static void MinLMResults(CMinLMStateShell &state,double &x[],CMinLMReportShell &rep);
static void MinLMResultsBuf(CMinLMStateShell &state,double &x[],CMinLMReportShell &rep);
static void MinLMRestartFrom(CMinLMStateShell &state,double &x[]);
static void MinLMCreateVGJ(const int n,const int m,double &x[],CMinLMStateShell &state);
static void MinLMCreateVGJ(const int m,double &x[],CMinLMStateShell &state);
static void MinLMCreateFGJ(const int n,const int m,double &x[],CMinLMStateShell &state);
static void MinLMCreateFGJ(const int m,double &x[],CMinLMStateShell &state);
static void MinLMCreateFJ(const int n,const int m,double &x[],CMinLMStateShell &state);
static void MinLMCreateFJ(const int m,double &x[],CMinLMStateShell &state);
//--- linear programming
static void MinLPCreate(int n,CMinLPState &state);
static void MinLPSetAlgoDSS(CMinLPState &state,double eps);
static void MinLPSetAlgoIPM(CMinLPState &state,double eps=0);
static void MinLPSetCost(CMinLPState &state,CRowDouble &c);
static void MinLPSetScale(CMinLPState &state,CRowDouble &s);
static void MinLPSetBC(CMinLPState &state,CRowDouble &bndl,CRowDouble &bndu);
static void MinLPSetBCAll(CMinLPState &state,double bndl,double bndu);
static void MinLPSetBCi(CMinLPState &state,int i,double bndl,double bndu);
static void MinLPSetLC(CMinLPState &state,CMatrixDouble &a,CRowInt &ct,int k);
static void MinLPSetLC(CMinLPState &state,CMatrixDouble &a,CRowInt &ct);
static void MinLPSetLC2Dense(CMinLPState &state,CMatrixDouble &a,CRowDouble &al,CRowDouble &au,int k);
static void MinLPSetLC2Dense(CMinLPState &state,CMatrixDouble &a,CRowDouble &al,CRowDouble &au);
static void MinLPSetLC2(CMinLPState &state,CSparseMatrix &a,CRowDouble &al,CRowDouble &au,int k);
static void MinLPAddLC2Dense(CMinLPState &state,CRowDouble &a,double al,double au);
static void MinLPAddLC2(CMinLPState &state,CRowInt &idxa,CRowDouble &vala,int nnz,double al,double au);
static void MinLPOptimize(CMinLPState &state);
static void MinLPResults(CMinLPState &state,CRowDouble &x,CMinLPReport &rep);
static void MinLPResultsBuf(CMinLPState &state,CRowDouble &x,CMinLPReport &rep);
//--- non-linear constrained optimization
static void MinNLCCreate(int n,CRowDouble &x,CMinNLCState &state);
static void MinNLCCreate(CRowDouble &x,CMinNLCState &state);
static void MinNLCCreateF(int n,CRowDouble &x,double diffstep,CMinNLCState &state);
static void MinNLCCreateF(CRowDouble &x,double diffstep,CMinNLCState &state);
static void MinNLCSetBC(CMinNLCState &state,CRowDouble &bndl,CRowDouble &bndu);
static void MinNLCSetLC(CMinNLCState &state,CMatrixDouble &c,CRowInt &ct,int k);
static void MinNLCSetLC(CMinNLCState &state,CMatrixDouble &c,CRowInt &ct);
static void MinNLCSetNLC(CMinNLCState &state,int nlec,int nlic);
static void MinNLCSetCond(CMinNLCState &state,double epsx,int maxits);
static void MinNLCSetScale(CMinNLCState &state,CRowDouble &s);
static void MinNLCSetPrecInexact(CMinNLCState &state);
static void MinNLCSetPrecExactLowRank(CMinNLCState &state,int updatefreq);
static void MinNLCSetPrecExactRobust(CMinNLCState &state,int updatefreq);
static void MinNLCSetPrecNone(CMinNLCState &state);
static void MinNLCSetSTPMax(CMinNLCState &state,double stpmax);
static void MinNLCSetAlgoAUL(CMinNLCState &state,double rho,int itscnt);
static void MinNLCSetAlgoSLP(CMinNLCState &state);
static void MinNLCSetAlgoSQP(CMinNLCState &state);
static void MinNLCSetXRep(CMinNLCState &state,bool needxrep);
static bool MinNLCIteration(CMinNLCState &state);
static void MinNLCOptimize(CMinNLCState &state,CNDimensional_FVec &fvec,CNDimensional_Rep &rep,CObject &obj);
static void MinNLCOptimize(CMinNLCState &state,CNDimensional_Jac &jac,CNDimensional_Rep &rep,CObject &obj);
static void MinNLCOptGuardGradient(CMinNLCState &state,double teststep);
static void MinNLCOptGuardSmoothness(CMinNLCState &state,int level=1);
static void MinNLCOptGuardResults(CMinNLCState &state,COptGuardReport &rep);
static void MinNLCOptGuardNonC1Test0Results(CMinNLCState &state,COptGuardNonC1Test0Report &strrep,COptGuardNonC1Test0Report &lngrep);
static void MinNLCOptGuardNonC1Test1Results(CMinNLCState &state,COptGuardNonC1Test1Report &strrep,COptGuardNonC1Test1Report &lngrep);
static void MinNLCResults(CMinNLCState &state,CRowDouble &x,CMinNLCReport &rep);
static void MinNLCResultsBuf(CMinNLCState &state,CRowDouble &x,CMinNLCReport &rep);
static void MinNLCRequestTermination(CMinNLCState &state);
static void MinNLCRestartFrom(CMinNLCState &state,CRowDouble &x);
//--- non-smooth non-convex optimization
static void MinNSCreate(int n,CRowDouble &x,CMinNSState &state);
static void MinNSCreate(CRowDouble &x,CMinNSState &state);
static void MinNSCreateF(int n,CRowDouble &x,double diffstep,CMinNSState &state);
static void MinNSCreateF(CRowDouble &x,double diffstep,CMinNSState &state);
static void MinNSSetBC(CMinNSState &state,CRowDouble &bndl,CRowDouble &bndu);
static void MinNSSetLC(CMinNSState &state,CMatrixDouble &c,CRowInt &ct,int k);
static void MinNSSetLC(CMinNSState &state,CMatrixDouble &c,CRowInt &ct);
static void MinNSSetNLC(CMinNSState &state,int nlec,int nlic);
static void MinNSSetCond(CMinNSState &state,double epsx,int maxits);
static void MinNSSetScale(CMinNSState &state,CRowDouble &s);
static void MinNSSetAlgoAGS(CMinNSState &state,double radius,double penalty);
static void MinNSSetXRep(CMinNSState &state,bool needxrep);
static void MinNSRequestTermination(CMinNSState &state);
static bool MinNSIteration(CMinNSState &state);
static void MinNSOptimize(CMinNSState &state,CNDimensional_FVec &fvec,CNDimensional_Rep &rep,CObject &obj);
static void MinNSOptimize(CMinNSState &state,CNDimensional_Jac &jac,CNDimensional_Rep &rep,CObject &obj);
static void MinNSResults(CMinNSState &state,CRowDouble &x,CMinNSReport &rep);
static void MinNSResultsBuf(CMinNSState &state,CRowDouble &x,CMinNSReport &rep);
static void MinNSRestartFrom(CMinNSState &state,CRowDouble &x);
//---box constrained optimization
static void MinBCCreate(int n,CRowDouble &x,CMinBCState &state);
static void MinBCCreate(CRowDouble &x,CMinBCState &state);
static void MinBCCreateF(int n,CRowDouble &x,double diffstep,CMinBCState &state);
static void MinBCCreateF(CRowDouble &x,double diffstep,CMinBCState &state);
static void MinBCSetBC(CMinBCState &state,CRowDouble &bndl,CRowDouble &bndu);
static void MinBCSetCond(CMinBCState &state,double epsg,double epsf,double epsx,int maxits);
static void MinBCSetScale(CMinBCState &state,CRowDouble &s);
static void MinBCSetPrecDefault(CMinBCState &state);
static void MinBCSetPrecDiag(CMinBCState &state,CRowDouble &d);
static void MinBCSetPrecScale(CMinBCState &state);
static void MinBCSetXRep(CMinBCState &state,bool needxrep);
static void MinBCSetStpMax(CMinBCState &state,double stpmax);
static bool MinBCIteration(CMinBCState &state);
static void MinBCOptimize(CMinBCState &state,CNDimensional_Func &func,CNDimensional_Rep &rep,CObject &obj);
static void MinBCOptimize(CMinBCState &state,CNDimensional_Grad &grad,CNDimensional_Rep &rep,CObject &obj);
static void MinBCOptGuardGradient(CMinBCState &state,double teststep);
static void MinBCOptGuardSmoothness(CMinBCState &state,int level=1);
static void MinBCOptGuardResults(CMinBCState &state,COptGuardReport &rep);
static void MinBCOptGuardNonC1Test0Results(CMinBCState &state,COptGuardNonC1Test0Report &strrep,COptGuardNonC1Test0Report &lngrep);
static void MinBCOptGuardNonC1Test1Results(CMinBCState &state,COptGuardNonC1Test1Report &strrep,COptGuardNonC1Test1Report &lngrep);
static void MinBCResults(CMinBCState &state,CRowDouble &x,CMinBCReport &rep);
static void MinBCResultsBuf(CMinBCState &state,CRowDouble &x,CMinBCReport &rep);
static void MinBCRestartFrom(CMinBCState &state,CRowDouble &x);
static void MinBCRequestTermination(CMinBCState &state);
//--- optimization
static void MinLBFGSSetDefaultPreconditioner(CMinLBFGSStateShell &state);
static void MinLBFGSSetCholeskyPreconditioner(CMinLBFGSStateShell &state,CMatrixDouble &p,bool IsUpper);
static void MinBLEICSetBarrierWidth(CMinBLEICStateShell &state,const double mu);
static void MinBLEICSetBarrierDecay(CMinBLEICStateShell &state,const double mudecay);
static void MinASACreate(const int n,double &x[],double &bndl[],double &bndu[],CMinASAStateShell &state);
static void MinASACreate(double &x[],double &bndl[],double &bndu[],CMinASAStateShell &state);
static void MinASASetCond(CMinASAStateShell &state,const double epsg,const double epsf,const double epsx,const int maxits);
static void MinASASetXRep(CMinASAStateShell &state,const bool needxrep);
static void MinASASetAlgorithm(CMinASAStateShell &state,const int algotype);
static void MinASASetStpMax(CMinASAStateShell &state,const double stpmax);
static bool MinASAIteration(CMinASAStateShell &state);
static void MinASAOptimize(CMinASAStateShell &state,CNDimensional_Grad &grad,CNDimensional_Rep &rep,bool rep_status,CObject &obj);
static void MinASAResults(CMinASAStateShell &state,double &x[],CMinASAReportShell &rep);
static void MinASAResultsBuf(CMinASAStateShell &state,double &x[],CMinASAReportShell &rep);
static void MinASARestartFrom(CMinASAStateShell &state,double &x[],double &bndl[],double &bndu[]);
//--- functions of package solvers
//--- polynomial root finding
static void PolynomialSolve(CRowDouble &a,int n,CRowComplex &x,CPolynomialSolverReport &rep);
//--- dense solver
static void RMatrixSolve(CMatrixDouble &a,const int n,double &b[],int &info,CDenseSolverReportShell &rep,double &x[]);
static void RMatrixSolveM(CMatrixDouble &a,const int n,CMatrixDouble &b,const int m,const bool rfs,int &info,CDenseSolverReportShell &rep,CMatrixDouble &x);
static void RMatrixLUSolve(CMatrixDouble &lua,int &p[],const int n,double &b[],int &info,CDenseSolverReportShell &rep,double &x[]);
static void RMatrixLUSolveM(CMatrixDouble &lua,int &p[],const int n,CMatrixDouble &b,const int m,int &info,CDenseSolverReportShell &rep,CMatrixDouble &x);
static void RMatrixMixedSolve(CMatrixDouble &a,CMatrixDouble &lua,int &p[],const int n,double &b[],int &info,CDenseSolverReportShell &rep,double &x[]);
static void RMatrixMixedSolveM(CMatrixDouble &a,CMatrixDouble &lua,int &p[],const int n,CMatrixDouble &b,const int m,int &info,CDenseSolverReportShell &rep,CMatrixDouble &x);
static void CMatrixSolveM(CMatrixComplex &a,const int n,CMatrixComplex &b,const int m,const bool rfs,int &info,CDenseSolverReportShell &rep,CMatrixComplex &x);
static void CMatrixSolve(CMatrixComplex &a,const int n,complex &b[],int &info,CDenseSolverReportShell &rep,complex &x[]);
static void CMatrixLUSolveM(CMatrixComplex &lua,int &p[],const int n,CMatrixComplex &b,const int m,int &info,CDenseSolverReportShell &rep,CMatrixComplex &x);
static void CMatrixLUSolve(CMatrixComplex &lua,int &p[],const int n,complex &b[],int &info,CDenseSolverReportShell &rep,complex &x[]);
static void CMatrixMixedSolveM(CMatrixComplex &a,CMatrixComplex &lua,int &p[],const int n,CMatrixComplex &b,const int m,int &info,CDenseSolverReportShell &rep,CMatrixComplex &x);
static void CMatrixMixedSolve(CMatrixComplex &a,CMatrixComplex &lua,int &p[],const int n,complex &b[],int &info,CDenseSolverReportShell &rep,complex &x[]);
static void SPDMatrixSolveM(CMatrixDouble &a,const int n,const bool IsUpper,CMatrixDouble &b,const int m,int &info,CDenseSolverReportShell &rep,CMatrixDouble &x);
static void SPDMatrixSolve(CMatrixDouble &a,const int n,const bool IsUpper,double &b[],int &info,CDenseSolverReportShell &rep,double &x[]);
static void SPDMatrixCholeskySolveM(CMatrixDouble &cha,const int n,const bool IsUpper,CMatrixDouble &b,const int m,int &info,CDenseSolverReportShell &rep,CMatrixDouble &x);
static void SPDMatrixCholeskySolve(CMatrixDouble &cha,const int n,const bool IsUpper,double &b[],int &info,CDenseSolverReportShell &rep,double &x[]);
static void HPDMatrixSolveM(CMatrixComplex &a,const int n,const bool IsUpper,CMatrixComplex &b,const int m,int &info,CDenseSolverReportShell &rep,CMatrixComplex &x);
static void HPDMatrixSolve(CMatrixComplex &a,const int n,const bool IsUpper,complex &b[],int &info,CDenseSolverReportShell &rep,complex &x[]);
static void HPDMatrixCholeskySolveM(CMatrixComplex &cha,const int n,const bool IsUpper,CMatrixComplex &b,const int m,int &info,CDenseSolverReportShell &rep,CMatrixComplex &x);
static void HPDMatrixCholeskySolve(CMatrixComplex &cha,const int n,const bool IsUpper,complex &b[],int &info,CDenseSolverReportShell &rep,complex &x[]);
static void RMatrixSolveLS(CMatrixDouble &a,const int nrows,const int ncols,double &b[],const double threshold,int &info,CDenseSolverLSReportShell &rep,double &x[]);
//--- sparse linear solver
static void SparseSPDSolveSKS(CSparseMatrix &a,bool IsUpper,CRowDouble &b,CRowDouble &x,CSparseSolverReport &rep);
static void SparseSPDSolve(CSparseMatrix &a,bool IsUpper,CRowDouble &b,CRowDouble &x,CSparseSolverReport &rep);
static void SparseSPDCholeskySolve(CSparseMatrix &a,bool IsUpper,CRowDouble &b,CRowDouble &x,CSparseSolverReport &rep);
static void SparseSolve(CSparseMatrix &a,CRowDouble &b,CRowDouble &x,CSparseSolverReport &rep);
static void SparseLUSolve(CSparseMatrix &a,CRowInt &p,CRowInt &q,CRowDouble &b,CRowDouble &x,CSparseSolverReport &rep);
//--- sparse symmetric linear solver
static void SparseSolveSymmetricGMRES(CSparseMatrix &a,bool IsUpper,CRowDouble &b,int k,double epsf,int maxits,CRowDouble &x,CSparseSolverReport &rep);
static void SparseSolveGMRES(CSparseMatrix &a,CRowDouble &b,int k,double epsf,int maxits,CRowDouble &x,CSparseSolverReport &rep);
static void SparseSolverCreate(int n,CSparseSolverState &state);
static void SparseSolverSetAlgoGMRES(CSparseSolverState &state,int k);
static void SparseSolverSetStartingPoint(CSparseSolverState &state,CRowDouble &x);
static void SparseSolverSetCond(CSparseSolverState &state,double epsf,int maxits);
static void SparseSolverSolveSymmetric(CSparseSolverState &state,CSparseMatrix &a,bool IsUpper,CRowDouble &b);
static void SparseSolverSolve(CSparseSolverState &state,CSparseMatrix &a,CRowDouble &b);
static void SparseSolverResults(CSparseSolverState &state,CRowDouble &x,CSparseSolverReport &rep);
static void SparseSolverSetXRep(CSparseSolverState &state,bool needxrep);
static void SparseSolverOOCStart(CSparseSolverState &state,CRowDouble &b);
static bool SparseSolverOOCContinue(CSparseSolverState &state);
static void SparseSolverOOCGetRequestInfo(CSparseSolverState &state,int &requesttype);
static void SparseSolverOOCGetRequestData(CSparseSolverState &state,CRowDouble &x);
static void SparseSolverOOCGetRequestData1(CSparseSolverState &state,double &v);
static void SparseSolverOOCSendResult(CSparseSolverState &state,CRowDouble &ax);
static void SparseSolverOOCStop(CSparseSolverState &state,CRowDouble &x,CSparseSolverReport &rep);
static void SparseSolverRequestTermination(CSparseSolverState &state);
//--- linear CG Solver
static void LinCGCreate(int n,CLinCGState &state);
static void LinCGSetStartingPoint(CLinCGState &state,CRowDouble &x);
static void LinCGSetPrecUnit(CLinCGState &state);
static void LinCGSetPrecDiag(CLinCGState &state);
static void LinCGSetCond(CLinCGState &state,double epsf,int maxits);
static void LinCGSolveSparse(CLinCGState &state,CSparseMatrix &a,bool IsUpper,CRowDouble &b);
static void LinCGResult(CLinCGState &state,CRowDouble &x,CLinCGReport &rep);
static void LinCGSetRestartFreq(CLinCGState &state,int srf);
static void LinCGSetRUpdateFreq(CLinCGState &state,int freq);
static void LinCGSetXRep(CLinCGState &state,bool needxrep);
//--- linear LSQR Solver
static void LinLSQRCreate(int m,int n,CLinLSQRState &state);
static void LinLSQRCreateBuf(int m,int n,CLinLSQRState &state);
static void LinLSQRSetPrecUnit(CLinLSQRState &state);
static void LinLSQRSetPrecDiag(CLinLSQRState &state);
static void LinLSQRSetLambdaI(CLinLSQRState &state,double lambdai);
static void LinLSQRSolveSparse(CLinLSQRState &state,CSparseMatrix &a,CRowDouble &b);
static void LinLSQRSetCond(CLinLSQRState &state,double epsa,double epsb,int maxits);
static void LinLSQRResults(CLinLSQRState &state,CRowDouble &x,CLinLSQRReport &rep);
static void LinLSQRSetXRep(CLinLSQRState &state,bool needxrep);
static int LinLSQRPeekIterationsCount(CLinLSQRState &s);
static void LinLSQRRequestTermination(CLinLSQRState &state);
//--- solving systems of nonlinear equations
static void NlEqCreateLM(const int n,const int m,double &x[],CNlEqStateShell &state);
static void NlEqCreateLM(const int m,double &x[],CNlEqStateShell &state);
static void NlEqSetCond(CNlEqStateShell &state,const double epsf,const int maxits);
static void NlEqSetXRep(CNlEqStateShell &state,const bool needxrep);
static void NlEqSetStpMax(CNlEqStateShell &state,const double stpmax);
static bool NlEqIteration(CNlEqStateShell &state);
static void NlEqSolve(CNlEqStateShell &state,CNDimensional_Func &func,CNDimensional_Jac &jac,CNDimensional_Rep &rep,bool rep_status,CObject &obj);
static void NlEqResults(CNlEqStateShell &state,double &x[],CNlEqReportShell &rep);
static void NlEqResultsBuf(CNlEqStateShell &state,double &x[],CNlEqReportShell &rep);
static void NlEqRestartFrom(CNlEqStateShell &state,double &x[]);
//--- functions of package specialfunctions
//--- gamma function
static double GammaFunction(const double x);
static double LnGamma(const double x,double &sgngam);
//--- normal distribution
static double ErrorFunction(const double x);
static double ErrorFunctionC(const double x);
static double NormalDistribution(const double x);
static double NormalPDF(const double x);
static double NormalCDF(const double x);
static double InvErF(const double e);
static double InvNormalDistribution(double y0);
static double InvNormalCDF(const double y0);
static double BivariateNormalPDF(const double x,const double y,const double rho);
static double BivariateNormalCDF(double x,double y,const double rho);
//--- incomplete gamma function
static double IncompleteGamma(const double a,const double x);
static double IncompleteGammaC(const double a,const double x);
static double InvIncompleteGammaC(const double a,const double y0);
//--- airy function
static void Airy(const double x,double &ai,double &aip,double &bi,double &bip);
//--- Bessel function
static double BesselJ0(const double x);
static double BesselJ1(const double x);
static double BesselJN(const int n,const double x);
static double BesselY0(const double x);
static double BesselY1(const double x);
static double BesselYN(const int n,const double x);
static double BesselI0(const double x);
static double BesselI1(const double x);
static double BesselK0(const double x);
static double BesselK1(const double x);
static double BesselKN(const int nn,const double x);
//--- beta function
static double Beta(const double a,const double b);
static double IncompleteBeta(const double a,const double b,const double x);
static double InvIncompleteBeta(const double a,const double b,double y);
//--- binomial distribution
static double BinomialDistribution(const int k,const int n,const double p);
static double BinomialComplDistribution(const int k,const int n,const double p);
static double InvBinomialDistribution(const int k,const int n,const double y);
//--- Chebyshev polynom
static double ChebyshevCalculate(int r,const int n,const double x);
static double ChebyshevSum(double &c[],const int r,const int n,const double x);
static void ChebyshevCoefficients(const int n,double &c[]);
static void FromChebyshev(double &a[],const int n,double &b[]);
//--- chi-square distribution
static double ChiSquareDistribution(const double v,const double x);
static double ChiSquareComplDistribution(const double v,const double x);
static double InvChiSquareDistribution(const double v,const double y);
//--- Dawson's Integral
static double DawsonIntegral(const double x);
//--- elliptic integral
static double EllipticIntegralK(const double m);
static double EllipticIntegralKhighPrecision(const double m1);
static double IncompleteEllipticIntegralK(const double phi,const double m);
static double EllipticIntegralE(const double m);
static double IncompleteEllipticIntegralE(const double phi,const double m);
//--- exponential integral
static double ExponentialIntegralEi(const double x);
static double ExponentialIntegralEn(const double x,const int n);
//--- F distribution functions
static double FDistribution(const int a,const int b,const double x);
static double FComplDistribution(const int a,const int b,const double x);
static double InvFDistribution(const int a,const int b,const double y);
//--- Fresnel integral
static void FresnelIntegral(const double x,double &c,double &s);
//--- Hermite polynomial
static double HermiteCalculate(const int n,const double x);
static double HermiteSum(double &c[],const int n,const double x);
static void HermiteCoefficients(const int n,double &c[]);
//--- Jacobian elliptic functions
static void JacobianEllipticFunctions(const double u,const double m,double &sn,double &cn,double &dn,double &ph);
//--- Laguerre polynomial
static double LaguerreCalculate(const int n,const double x);
static double LaguerreSum(double &c[],const int n,const double x);
static void LaguerreCoefficients(const int n,double &c[]);
//--- Legendre polynomial
static double LegendreCalculate(const int n,const double x);
static double LegendreSum(double &c[],const int n,const double x);
static void LegendreCoefficients(const int n,double &c[]);
//--- Poisson distribution
static double PoissonDistribution(const int k,const double m);
static double PoissonComplDistribution(const int k,const double m);
static double InvPoissonDistribution(const int k,const double y);
//--- psi function
static double Psi(const double x);
//--- Student's t distribution
static double StudenttDistribution(const int k,const double t);
static double InvStudenttDistribution(const int k,const double p);
//--- trigonometric integrals
static void SineCosineIntegrals(const double x,double &si,double &ci);
static void HyperbolicSineCosineIntegrals(const double x,double &shi,double &chi);
//--- functions of package statistics
//--- basic statistics methods
static void SampleMoments(const double &x[],const int n,double &mean,double &variance,double &skewness,double &kurtosis);
static void SampleMoments(const double &x[],double &mean,double &variance,double &skewness,double &kurtosis);
static double SampleMean(CRowDouble &x,int n);
static double SampleMean(CRowDouble &x);
static double SampleVariance(CRowDouble &x,int n);
static double SampleVariance(CRowDouble &x);
static double SampleSkewness(CRowDouble &x,int n);
static double SampleSkewness(CRowDouble &x);
static double SampleKurtosis(CRowDouble &x,int n);
static double SampleKurtosis(CRowDouble &x);
static void SampleAdev(const double &x[],const int n,double &adev);
static void SampleAdev(const double &x[],double &adev);
static void SampleMedian(const double &x[],const int n,double &median);
static void SampleMedian(const double &x[],double &median);
static void SamplePercentile(const double &x[],const int n,const double p,double &v);
static void SamplePercentile(const double &x[],const double p,double &v);
static double Cov2(const double &x[],const double &y[],const int n);
static double Cov2(const double &x[],const double &y[]);
static double PearsonCorr2(const double &x[],const double &y[],const int n);
static double PearsonCorr2(const double &x[],const double &y[]);
static double SpearmanCorr2(const double &x[],const double &y[],const int n);
static double SpearmanCorr2(const double &x[],const double &y[]);
static void CovM(const CMatrixDouble &x,const int n,const int m,CMatrixDouble &c);
static void CovM(const CMatrixDouble &x,CMatrixDouble &c);
static void PearsonCorrM(const CMatrixDouble &x,const int n,const int m,CMatrixDouble &c);
static void PearsonCorrM(CMatrixDouble &x,CMatrixDouble &c);
static void SpearmanCorrM(const CMatrixDouble &x,const int n,const int m,CMatrixDouble &c);
static void SpearmanCorrM(const CMatrixDouble &x,CMatrixDouble &c);
static void CovM2(const CMatrixDouble &x,const CMatrixDouble &y,const int n,const int m1,const int m2,CMatrixDouble &c);
static void CovM2(const CMatrixDouble &x,const CMatrixDouble &y,CMatrixDouble &c);
static void PearsonCorrM2(const CMatrixDouble &x,const CMatrixDouble &y,const int n,const int m1,const int m2,CMatrixDouble &c);
static void PearsonCorrM2(const CMatrixDouble &x,const CMatrixDouble &y,CMatrixDouble &c);
static void SpearmanCorrM2(const CMatrixDouble &x,const CMatrixDouble &y,const int n,const int m1,const int m2,CMatrixDouble &c);
static void SpearmanCorrM2(const CMatrixDouble &x,const CMatrixDouble &y,CMatrixDouble &c);
static void RankData(CMatrixDouble &xy,int npoints,int nfeatures);
static void RankData(CMatrixDouble &xy);
static void RankDataCentered(CMatrixDouble &xy,int npoints,int nfeatures);
static void RankDataCentered(CMatrixDouble &xy);
//--- correlation tests
static void PearsonCorrelationSignificance(const double r,const int n,double &bothTails,double &leftTail,double &rightTail);
static void SpearmanRankCorrelationSignificance(const double r,const int n,double &bothTails,double &leftTail,double &rightTail);
//--- Jarque-Bera test
static void JarqueBeraTest(const double &x[],const int n,double &p);
//--- Mann-Whitney U-test
static void MannWhitneyUTest(const double &x[],const int n,const double &y[],const int m,double &bothTails,double &leftTail,double &rightTail);
//--- sign test
static void OneSampleSignTest(const double &x[],const int n,const double median,double &bothTails,double &leftTail,double &rightTail);
//--- Student Tests
static void StudentTest1(const double &x[],const int n,const double mean,double &bothTails,double &leftTail,double &rightTail);
static void StudentTest2(const double &x[],const int n,const double &y[],const int m,double &bothTails,double &leftTail,double &rightTail);
static void UnequalVarianceTest(const double &x[],const int n,const double &y[],const int m,double &bothTails,double &leftTail,double &rightTail);
//--- variance tests
static void FTest(const double &x[],const int n,const double &y[],const int m,double &bothTails,double &leftTail,double &rightTail);
static void OneSampleVarianceTest(double &x[],int n,double variance,double &bothTails,double &leftTail,double &rightTail);
//--- Wilcoxon signed-rank test
static void WilcoxonSignedRankTest(const double &x[],const int n,const double e,double &bothTails,double &leftTail,double &rightTail);
};
//+------------------------------------------------------------------+
//| HQRNDState initialization with random values which come from |
//| standard RNG. |
//+------------------------------------------------------------------+
void CAlglib::HQRndRandomize(CHighQualityRandStateShell &state)
{
CHighQualityRand::HQRndRandomize(state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| HQRNDState initialization with seed values |
//+------------------------------------------------------------------+
void CAlglib::HQRndSeed(const int s1,const int s2,CHighQualityRandStateShell &state)
{
CHighQualityRand::HQRndSeed(s1,s2,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This function generates random real number in (0,1), |
//| not including interval boundaries |
//| State structure must be initialized with HQRNDRandomize() or |
//| HQRNDSeed(). |
//+------------------------------------------------------------------+
double CAlglib::HQRndUniformR(CHighQualityRandStateShell &state)
{
return(CHighQualityRand::HQRndUniformR(state.GetInnerObj()));
}
//+------------------------------------------------------------------+
//| This function generates random integer number in [0, N) |
//| 1. N must be less than HQRNDMax-1. |
//| 2. State structure must be initialized with HQRNDRandomize() or |
//| HQRNDSeed() |
//+------------------------------------------------------------------+
int CAlglib::HQRndUniformI(CHighQualityRandStateShell &state,const int n)
{
return(CHighQualityRand::HQRndUniformI(state.GetInnerObj(),n));
}
//+------------------------------------------------------------------+
//| Random number generator: normal numbers |
//| This function generates one random number from normal |
//| distribution. |
//| Its performance is equal to that of HQRNDNormal2() |
//| State structure must be initialized with HQRNDRandomize() or |
//| HQRNDSeed(). |
//+------------------------------------------------------------------+
double CAlglib::HQRndNormal(CHighQualityRandStateShell &state)
{
return(CHighQualityRand::HQRndNormal(state.GetInnerObj()));
}
//+------------------------------------------------------------------+
//| Random number generator: vector with random entries (normal |
//| distribution) |
//| This function generates N random numbers from normal |
//| distribution. |
//| State structure must be initialized with HQRNDRandomize() or |
//| HQRNDSeed(). |
//+------------------------------------------------------------------+
void CAlglib::HQRndNormalV(CHighQualityRandStateShell &state,
int n,CRowDouble &x)
{
CHighQualityRand::HQRndNormalV(state.GetInnerObj(),n,x);
}
//+------------------------------------------------------------------+
//| Random number generator: vector with random entries (normal |
//| distribution) |
//| This function generates N random numbers from normal |
//| distribution. |
//| State structure must be initialized with HQRNDRandomize() or |
//| HQRNDSeed(). |
//+------------------------------------------------------------------+
void CAlglib::HQRndNormalV(CHighQualityRandStateShell &state,
int n,vector<double> &x)
{
CRowDouble X=x;
HQRndNormalV(state,n,X);
x=X.ToVector();
}
//+------------------------------------------------------------------+
//| Random number generator: matrix with random entries (normal |
//| distribution) |
//| This function generates MxN random matrix. |
//| State structure must be initialized with HQRNDRandomize() or |
//| HQRNDSeed(). |
//+------------------------------------------------------------------+
void CAlglib::HQRndNormalM(CHighQualityRandStateShell &state,
int m,int n,CMatrixDouble &x)
{
CHighQualityRand::HQRndNormalM(state.GetInnerObj(),m,n,x);
}
//+------------------------------------------------------------------+
//| Random number generator: matrix with random entries (normal |
//| distribution) |
//| This function generates MxN random matrix. |
//| State structure must be initialized with HQRNDRandomize() or |
//| HQRNDSeed(). |
//+------------------------------------------------------------------+
void CAlglib::HQRndNormalM(CHighQualityRandStateShell &state,
int m,int n,matrix<double> &x)
{
CMatrixDouble X=x;
HQRndNormalM(state,m,n,X);
x=X.ToMatrix();
}
//+------------------------------------------------------------------+
//| Random number generator: random X and Y such that X^2+Y^2=1 |
//| State structure must be initialized with HQRNDRandomize() or |
//| HQRNDSeed(). |
//+------------------------------------------------------------------+
void CAlglib::HQRndUnit2(CHighQualityRandStateShell &state,
double &x,double &y)
{
//--- initialization
x=0;
y=0;
//--- function call
CHighQualityRand::HQRndUnit2(state.GetInnerObj(),x,y);
}
//+------------------------------------------------------------------+
//| Random number generator: normal numbers |
//| This function generates two independent random numbers from |
//| normal distribution. Its performance is equal to that of |
//| HQRNDNormal() |
//| State structure must be initialized with HQRNDRandomize() or |
//| HQRNDSeed(). |
//+------------------------------------------------------------------+
void CAlglib::HQRndNormal2(CHighQualityRandStateShell &state,
double &x1,double &x2)
{
//--- initialization
x1=0;
x2=0;
//--- function call
CHighQualityRand::HQRndNormal2(state.GetInnerObj(),x1,x2);
}
//+------------------------------------------------------------------+
//| Random number generator: exponential distribution |
//| State structure must be initialized with HQRNDRandomize() or |
//| HQRNDSeed(). |
//+------------------------------------------------------------------+
double CAlglib::HQRndExponential(CHighQualityRandStateShell &state,
const double lambdav)
{
return(CHighQualityRand::HQRndExponential(state.GetInnerObj(),lambdav));
}
//+------------------------------------------------------------------+
//| This function generates random number from discrete distribution|
//| given by finite sample X. |
//| INPUT PARAMETERS |
//| State - high quality random number generator, must be |
//| initialized with HQRNDRandomize() or HQRNDSeed(). |
//| X - finite sample |
//| N - number of elements to use, N>=1 |
//| RESULT |
//| this function returns one of the X[i] for random i=0..N-1 |
//+------------------------------------------------------------------+
double CAlglib::HQRndDiscrete(CHighQualityRandStateShell &state,
int n,CRowDouble &x)
{
return(CHighQualityRand::HQRndDiscrete(state.GetInnerObj(),n,x));
}
//+------------------------------------------------------------------+
//| This function generates random number from discrete distribution|
//| given by finite sample X. |
//| INPUT PARAMETERS |
//| State - high quality random number generator, must be |
//| initialized with HQRNDRandomize() or HQRNDSeed(). |
//| X - finite sample |
//| N - number of elements to use, N>=1 |
//| RESULT |
//| this function returns one of the X[i] for random i=0..N-1 |
//+------------------------------------------------------------------+
double CAlglib::HQRndDiscrete(CHighQualityRandStateShell &state,
int n,vector<double> &x)
{
return(CHighQualityRand::HQRndDiscrete(state.GetInnerObj(),n,x));
}
//+------------------------------------------------------------------+
//| This function generates random number from continuous |
//| distribution given by finite sample X. |
//| INPUT PARAMETERS |
//| State - high quality random number generator, must be |
//| initialized with HQRNDRandomize() or HQRNDSeed(). |
//| X - finite sample, array[N] (can be larger, in this |
//| case only leading N elements are used). THIS ARRAY |
//| MUST BE SORTED BY ASCENDING. |
//| N - number of elements to use, N>=1 |
//| RESULT |
//| this function returns random number from continuous |
//| distribution which tries to approximate X as mush as possible. |
//| min(X)<=Result<=max(X). |
//+------------------------------------------------------------------+
double CAlglib::HQRndContinuous(CHighQualityRandStateShell &state,
int n,CRowDouble &x)
{
return(CHighQualityRand::HQRndContinuous(state.GetInnerObj(),n,x));
}
//+------------------------------------------------------------------+
//| This function generates random number from continuous |
//| distribution given by finite sample X. |
//| INPUT PARAMETERS |
//| State - high quality random number generator, must be |
//| initialized with HQRNDRandomize() or HQRNDSeed(). |
//| X - finite sample, array[N] (can be larger, in this |
//| case only leading N elements are used). THIS ARRAY |
//| MUST BE SORTED BY ASCENDING. |
//| N - number of elements to use, N>=1 |
//| RESULT |
//| this function returns random number from continuous |
//| distribution which tries to approximate X as mush as possible. |
//| min(X)<=Result<=max(X). |
//+------------------------------------------------------------------+
double CAlglib::HQRndContinuous(CHighQualityRandStateShell &state,
int n,vector<double> &x)
{
return(CHighQualityRand::HQRndContinuous(state.GetInnerObj(),n,x));
}
//+------------------------------------------------------------------+
//| This function serializes data structure to string. |
//| Important properties of s_out: |
//| * it contains alphanumeric characters, dots, underscores, minus |
//| signs |
//| * these symbols are grouped into words, which are separated by |
//| spaces and Windows-style (CR+LF) newlines |
//| * although serializer uses spaces and CR+LF as separators, you |
//| can replace any separator character by arbitrary combination |
//| of spaces, tabs, Windows or Unix newlines. It allows flexible |
//| reformatting of the string in case you want to include it into |
//| text or XML file. But you should not insert separators into the|
//| middle of the "words" nor you should change case of letters. |
//| * s_out can be freely moved between 32-bit and 64-bit systems, |
//| little and big endian machines, and so on. You can reference |
//| structure on 32-bit machine and unserialize it on 64-bit one |
//| (or vice versa), or reference it on SPARC and unserialize on |
//| x86. You can also reference it in C# version of ALGLIB and |
//| unserialize in C++ one, and vice versa. |
//+------------------------------------------------------------------+
void CAlglib::KDTreeSerialize(CKDTreeShell &obj,string &s_out)
{
//--- create a variable
CSerializer s;
//--- serialization start
s.Alloc_Start();
//--- function call
CNearestNeighbor::KDTreeAlloc(s,obj.GetInnerObj());
s.SStart_Str();
//--- function call
CNearestNeighbor::KDTreeSerialize(s,obj.GetInnerObj());
s.Stop();
//--- get result
s_out=s.Get_String();
}
//+------------------------------------------------------------------+
//| This function unserializes data structure from string. |
//+------------------------------------------------------------------+
void CAlglib::KDTreeUnserialize(string s_in,CKDTreeShell &obj)
{
//--- object of class
CSerializer s;
s.UStart_Str(s_in);
//--- function call
CNearestNeighbor::KDTreeUnserialize(s,obj.GetInnerObj());
s.Stop();
}
//+------------------------------------------------------------------+
//| KD-tree creation |
//| This subroutine creates KD-tree from set of X-values and optional|
//| Y-values |
//| INPUT PARAMETERS |
//| XY - dataset,array[0..N-1, 0..NX+NY-1]. |
//| one row corresponds to one point. |
//| first NX columns contain X-values, next NY (NY |
//| may be zero) |
//| columns may contain associated Y-values |
//| N - number of points, N>=1 |
//| NX - space dimension, NX>=1. |
//| NY - number of optional Y-values, NY>=0. |
//| NormType- norm type: |
//| * 0 denotes infinity-norm |
//| * 1 denotes 1-norm |
//| * 2 denotes 2-norm (Euclidean norm) |
//| OUTPUT PARAMETERS |
//| KDT - KD-tree |
//| NOTES |
//| 1. KD-tree creation have O(N*logN) complexity and |
//| O(N*(2*NX+NY)) memory requirements. |
//| 2. Although KD-trees may be used with any combination of N and |
//| NX, they are more efficient than brute-force search only when |
//| N >> 4^NX. So they are most useful in low-dimensional tasks |
//| (NX=2, NX=3). NX=1 is another inefficient case, because |
//| simple binary search (without additional structures) is |
//| much more efficient in such tasks than KD-trees. |
//+------------------------------------------------------------------+
void CAlglib::KDTreeBuild(CMatrixDouble &xy,const int n,const int nx,
const int ny,const int normtype,CKDTreeShell &kdt)
{
CNearestNeighbor::KDTreeBuild(xy,n,nx,ny,normtype,kdt.GetInnerObj());
}
//+------------------------------------------------------------------+
//| KD-tree creation |
//| This subroutine creates KD-tree from set of X-values and optional|
//| Y-values |
//| INPUT PARAMETERS |
//| XY - dataset,array[0..N-1, 0..NX+NY-1]. |
//| one row corresponds to one point. |
//| first NX columns contain X-values, next NY (NY |
//| may be zero) |
//| columns may contain associated Y-values |
//| N - number of points, N>=1 |
//| NX - space dimension, NX>=1. |
//| NY - number of optional Y-values, NY>=0. |
//| NormType- norm type: |
//| * 0 denotes infinity-norm |
//| * 1 denotes 1-norm |
//| * 2 denotes 2-norm (Euclidean norm) |
//| OUTPUT PARAMETERS |
//| KDT - KD-tree |
//| NOTES |
//| 1. KD-tree creation have O(N*logN) complexity and |
//| O(N*(2*NX+NY)) memory requirements. |
//| 2. Although KD-trees may be used with any combination of N and |
//| NX, they are more efficient than brute-force search only when |
//| N >> 4^NX. So they are most useful in low-dimensional tasks |
//| (NX=2, NX=3). NX=1 is another inefficient case, because |
//| simple binary search (without additional structures) is |
//| much more efficient in such tasks than KD-trees. |
//+------------------------------------------------------------------+
void CAlglib::KDTreeBuild(CMatrixDouble &xy,const int nx,const int ny,
const int normtype,CKDTreeShell &kdt)
{
//--- create a variable
int n=(int)CAp::Rows(xy);
//--- function call
CNearestNeighbor::KDTreeBuild(xy,n,nx,ny,normtype,kdt.GetInnerObj());
}
//+------------------------------------------------------------------+
//| KD-tree creation |
//| This subroutine creates KD-tree from set of X-values, integer |
//| tags and optional Y-values |
//| INPUT PARAMETERS |
//| XY - dataset,array[0..N-1, 0..NX+NY-1]. |
//| one row corresponds to one point. |
//| first NX columns contain X-values, next NY (NY |
//| may be zero) |
//| columns may contain associated Y-values |
//| Tags - tags, array[0..N-1], contains integer tags |
//| associated with points. |
//| N - number of points, N>=1 |
//| NX - space dimension, NX>=1. |
//| NY - number of optional Y-values, NY>=0. |
//| NormType- norm type: |
//| * 0 denotes infinity-norm |
//| * 1 denotes 1-norm |
//| * 2 denotes 2-norm (Euclidean norm) |
//| OUTPUT PARAMETERS |
//| KDT - KD-tree |
//| NOTES |
//| 1. KD-tree creation have O(N*logN) complexity and |
//| O(N*(2*NX+NY)) memory requirements. |
//| 2. Although KD-trees may be used with any combination of N and |
//| NX, they are more efficient than brute-force search only when |
//| N >> 4^NX. So they are most useful in low-dimensional tasks |
//| (NX=2, NX=3). NX=1 is another inefficient case, because simple|
//| binary search (without additional structures) is much more |
//| efficient in such tasks than KD-trees. |
//+------------------------------------------------------------------+
void CAlglib::KDTreeBuildTagged(CMatrixDouble &xy,int &tags[],
const int n,const int nx,
const int ny,const int normtype,
CKDTreeShell &kdt)
{
CNearestNeighbor::KDTreeBuildTagged(xy,tags,n,nx,ny,normtype,kdt.GetInnerObj());
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::KDTreeBuildTagged(CMatrixDouble &xy,CRowInt &tags,
const int n,const int nx,
const int ny,const int normtype,
CKDTreeShell &kdt)
{
CNearestNeighbor::KDTreeBuildTagged(xy,tags,n,nx,ny,normtype,kdt.GetInnerObj());
}
//+------------------------------------------------------------------+
//| KD-tree creation |
//| This subroutine creates KD-tree from set of X-values, integer |
//| tags and optional Y-values |
//| INPUT PARAMETERS |
//| XY - dataset,array[0..N-1, 0..NX+NY-1]. |
//| one row corresponds to one point. |
//| first NX columns contain X-values, next NY (NY |
//| may be zero) |
//| columns may contain associated Y-values |
//| Tags - tags, array[0..N-1], contains integer tags |
//| associated with points. |
//| N - number of points, N>=1 |
//| NX - space dimension, NX>=1. |
//| NY - number of optional Y-values, NY>=0. |
//| NormType- norm type: |
//| * 0 denotes infinity-norm |
//| * 1 denotes 1-norm |
//| * 2 denotes 2-norm (Euclidean norm) |
//| OUTPUT PARAMETERS |
//| KDT - KD-tree |
//| NOTES |
//| 1. KD-tree creation have O(N*logN) complexity and |
//| O(N*(2*NX+NY)) memory requirements. |
//| 2. Although KD-trees may be used with any combination of N and |
//| NX, they are more efficient than brute-force search only when |
//| N >> 4^NX. So they are most useful in low-dimensional tasks |
//| (NX=2, NX=3). NX=1 is another inefficient case, because simple|
//| binary search (without additional structures) is much more |
//| efficient in such tasks than KD-trees. |
//+------------------------------------------------------------------+
void CAlglib::KDTreeBuildTagged(CMatrixDouble &xy,int &tags[],
const int nx,const int ny,
const int normtype,CKDTreeShell &kdt)
{
//--- check
if((CAp::Rows(xy)!=CAp::Len(tags)))
{
Print(__FUNCTION__+": looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
int n=(int)CAp::Rows(xy);
//--- function call
CNearestNeighbor::KDTreeBuildTagged(xy,tags,n,nx,ny,normtype,kdt.GetInnerObj());
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::KDTreeBuildTagged(CMatrixDouble &xy,CRowInt &tags,
const int nx,const int ny,
const int normtype,CKDTreeShell &kdt)
{
//--- check
if((CAp::Rows(xy)!=CAp::Len(tags)))
{
Print(__FUNCTION__+": looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
int n=(int)CAp::Rows(xy);
//--- function call
CNearestNeighbor::KDTreeBuildTagged(xy,tags,n,nx,ny,normtype,kdt.GetInnerObj());
}
//+------------------------------------------------------------------+
//| K-NN query: K nearest neighbors |
//| INPUT PARAMETERS |
//| KDT - KD-tree |
//| X - point, array[0..NX-1]. |
//| K - number of neighbors to return, K>=1 |
//| SelfMatch - whether self-matches are allowed: |
//| * if True, nearest neighbor may be the point |
//| itself (if it exists in original dataset) |
//| * if False, then only points with non-zero |
//| distance are returned |
//| * if not given, considered True |
//| RESULT |
//| number of actual neighbors found (either K or N, if K>N). |
//| This subroutine performs query and stores its result in the |
//| internal structures of the KD-tree. You can use following |
//| subroutines to obtain these results: |
//| * KDTreeQueryResultsX() to get X-values |
//| * KDTreeQueryResultsXY() to get X- and Y-values |
//| * KDTreeQueryResultsTags() to get tag values |
//| * KDTreeQueryResultsDistances() to get distances |
//+------------------------------------------------------------------+
int CAlglib::KDTreeQueryKNN(CKDTreeShell &kdt,double &x[],
const int k,const bool selfmatch=true)
{
return(CNearestNeighbor::KDTreeQueryKNN(kdt.GetInnerObj(),x,k,selfmatch));
}
//+------------------------------------------------------------------+
//| K-NN query: K nearest neighbors |
//| INPUT PARAMETERS |
//| KDT - KD-tree |
//| X - point, array[0..NX-1]. |
//| K - number of neighbors to return, K>=1 |
//| SelfMatch - whether self-matches are allowed: |
//| * if True, nearest neighbor may be the point |
//| itself (if it exists in original dataset) |
//| * if False, then only points with non-zero |
//| distance are returned |
//| * if not given, considered True |
//| RESULT |
//| number of actual neighbors found (either K or N, if K>N). |
//| This subroutine performs query and stores its result in the |
//| internal structures of the KD-tree. You can use following |
//| subroutines to obtain these results: |
//| * KDTreeQueryResultsX() to get X-values |
//| * KDTreeQueryResultsXY() to get X- and Y-values |
//| * KDTreeQueryResultsTags() to get tag values |
//| * KDTreeQueryResultsDistances() to get distances |
//+------------------------------------------------------------------+
int CAlglib::KDTreeQueryKNN(CKDTreeShell &kdt,vector<double> &x,
const int k,const bool selfmatch)
{
CRowDouble X=x;
return(CNearestNeighbor::KDTreeQueryKNN(kdt.GetInnerObj(),X,k,selfmatch));
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
int CAlglib::KDTreeQueryKNN(CKDTreeShell &kdt,CRowDouble &x,
const int k,const bool selfmatch=true)
{
return(CNearestNeighbor::KDTreeQueryKNN(kdt.GetInnerObj(),x,k,selfmatch));
}
//+------------------------------------------------------------------+
//| K-NN query: K nearest neighbors, using external thread-local |
//| buffer. |
//| You can call this function from multiple threads for same kd-tree|
//| instance, assuming that different instances of buffer object are |
//| passed to different threads. |
//| INPUT PARAMETERS |
//| KDT - kd-tree |
//| Buf - request buffer object created for this particular |
//| instance of kd-tree structure with |
//| KDTreeCreateRequestBuffer() function. |
//| X - point, array[0..NX-1]. |
//| K - number of neighbors to return, K>=1 |
//| SelfMatch - whether self-matches are allowed: |
//| * if True, nearest neighbor may be the point |
//| itself (if it exists in original dataset) |
//| * if False, then only points with non-zero |
//| distance are returned |
//| * if not given, considered True |
//| RESULT |
//| number of actual neighbors found (either K or N, if K>N). |
//| This subroutine performs query and stores its result in the |
//| internal structures of the buffer object. You can use following |
//| subroutines to obtain these results (pay attention to "buf" in |
//| their names): |
//| * KDTreeTsQueryResultsX() to get X-values |
//| * KDTreeTsQueryResultsXY() to get X- and Y-values |
//| * KDTreeTsQueryResultsTags() to get tag values |
//| * KDTreeTsQueryResultsDistances() to get distances |
//| IMPORTANT: kd-tree buffer should be used only with KD-tree object|
//| which was used to initialize buffer. Any attempt to use biffer |
//| with different object is dangerous - you may get integrity check |
//| failure (exception) because sizes of internal arrays do not fit |
//| to dimensions of KD-tree structure. |
//+------------------------------------------------------------------+
int CAlglib::KDTreeTsQueryKNN(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,
double &x[],const int k,const bool selfmatch)
{
CRowDouble X=x;
return(CNearestNeighbor::KDTreeTsQueryKNN(kdt.GetInnerObj(),buf.GetInnerObj(),X,k,selfmatch));
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
int CAlglib::KDTreeTsQueryKNN(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,
vector<double> &x,const int k,const bool selfmatch)
{
CRowDouble X=x;
return(CNearestNeighbor::KDTreeTsQueryKNN(kdt.GetInnerObj(),buf.GetInnerObj(),X,k,selfmatch));
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
int CAlglib::KDTreeTsQueryKNN(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,
CRowDouble &x,const int k,const bool selfmatch)
{
return(CNearestNeighbor::KDTreeTsQueryKNN(kdt.GetInnerObj(),buf.GetInnerObj(),x,k,selfmatch));
}
//+------------------------------------------------------------------+
//| R-NN query: all points within R-sphere centered at X |
//| INPUT PARAMETERS |
//| KDT - KD-tree |
//| X - point, array[0..NX-1]. |
//| R - radius of sphere (in corresponding norm), R>0|
//| SelfMatch - whether self-matches are allowed: |
//| * if True, nearest neighbor may be the point |
//| itself (if it exists in original dataset) |
//| * if False, then only points with non-zero |
//| distance are returned |
//| * if not given, considered True |
//| RESULT |
//| number of neighbors found, >=0 |
//| This subroutine performs query and stores its result in the |
//| internal structures of the KD-tree. You can use following |
//| subroutines to obtain actual results: |
//| * KDTreeQueryResultsX() to get X-values |
//| * KDTreeQueryResultsXY() to get X- and Y-values |
//| * KDTreeQueryResultsTags() to get tag values |
//| * KDTreeQueryResultsDistances() to get distances |
//+------------------------------------------------------------------+
int CAlglib::KDTreeQueryRNN(CKDTreeShell &kdt,double &x[],
const double r,const bool selfmatch)
{
return(CNearestNeighbor::KDTreeQueryRNN(kdt.GetInnerObj(),x,r,selfmatch));
}
//+------------------------------------------------------------------+
//| R-NN query: all points within R-sphere centered at X |
//| INPUT PARAMETERS |
//| KDT - KD-tree |
//| X - point, array[0..NX-1]. |
//| R - radius of sphere (in corresponding norm), R>0|
//| SelfMatch - whether self-matches are allowed: |
//| * if True, nearest neighbor may be the point |
//| itself (if it exists in original dataset) |
//| * if False, then only points with non-zero |
//| distance are returned |
//| * if not given, considered True |
//| RESULT |
//| number of neighbors found, >=0 |
//| This subroutine performs query and stores its result in the |
//| internal structures of the KD-tree. You can use following |
//| subroutines to obtain actual results: |
//| * KDTreeQueryResultsX() to get X-values |
//| * KDTreeQueryResultsXY() to get X- and Y-values |
//| * KDTreeQueryResultsTags() to get tag values |
//| * KDTreeQueryResultsDistances() to get distances |
//+------------------------------------------------------------------+
int CAlglib::KDTreeQueryRNN(CKDTreeShell &kdt,vector<double> &x,
const double r,const bool selfmatch)
{
CRowDouble X=x;
return(CNearestNeighbor::KDTreeQueryRNN(kdt.GetInnerObj(),X,r,selfmatch));
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
int CAlglib::KDTreeQueryRNN(CKDTreeShell &kdt,CRowDouble &x,
const double r,const bool selfmatch)
{
return(CNearestNeighbor::KDTreeQueryRNN(kdt.GetInnerObj(),x,r,selfmatch));
}
//+------------------------------------------------------------------+
//| R-NN query: all points within R-sphere centered at X, no ordering|
//| by distance as undicated by "U" suffix (faster that ordered |
//| query, for large queries - significantly faster). |
//| IMPORTANT: this function can not be used in multithreaded code |
//| because it uses internal temporary buffer of kd-tree |
//| object, which can not be shared between multiple |
//| threads. If you want to perform parallel requests, use|
//| function which uses external request buffer: |
//| KDTreeTsQueryRNN() ("Ts" stands for "thread-safe"). |
//| INPUT PARAMETERS |
//| KDT - KD-tree |
//| X - point, array[0..NX-1]. |
//| R - radius of sphere (in corresponding norm), R>0 |
//| SelfMatch - whether self-matches are allowed: |
//| * if True, nearest neighbor may be the point |
//| itself (if it exists in original dataset) |
//| * if False, then only points with non-zero |
//| distance are returned |
//| * if not given, considered True |
//| RESULT |
//| number of neighbors found, >=0 |
//| This subroutine performs query and stores its result in the |
//| internal structures of the KD-tree. You can use following |
//| subroutines to obtain actual results: |
//| * KDTreeQueryResultsX() to get X-values |
//| * KDTreeQueryResultsXY() to get X- and Y-values |
//| * KDTreeQueryResultsTags() to get tag values |
//| * KDTreeQueryResultsDistances() to get distances |
//| As indicated by "U" suffix, this function returns unordered |
//| results. |
//+------------------------------------------------------------------+
int CAlglib::KDTreeQueryRNNU(CKDTreeShell &kdt,double &x[],const double r,bool selfmatch)
{
return(CNearestNeighbor::KDTreeQueryRNNU(kdt.GetInnerObj(),x,r,selfmatch));
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
int CAlglib::KDTreeQueryRNNU(CKDTreeShell &kdt,vector<double> &x,const double r,bool selfmatch)
{
CRowDouble X=x;
return(CNearestNeighbor::KDTreeQueryRNNU(kdt.GetInnerObj(),X,r,selfmatch));
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
int CAlglib::KDTreeQueryRNNU(CKDTreeShell &kdt,CRowDouble &x,const double r,bool selfmatch)
{
return(CNearestNeighbor::KDTreeQueryRNNU(kdt.GetInnerObj(),x,r,selfmatch));
}
//+------------------------------------------------------------------+
//| R-NN query: all points within R-sphere centered at X, using |
//| external thread-local buffer, sorted by distance between point |
//| and X (by ascending) |
//| You can call this function from multiple threads for same kd-tree|
//| instance, assuming that different instances of buffer object are |
//| passed to different threads. |
//| NOTE: it is also possible to perform undordered queries performed|
//| by means of KDTreeQueryRNNU() and KDTreeTsQueryRNNU() functions. |
//| Such queries are faster because we do not have to use heap |
//| structure for sorting. |
//| INPUT PARAMETERS |
//| KDT - KD-tree |
//| Buf - request buffer object created for this particular |
//| instance of kd-tree structure with |
//| KDTreeCreateRequestBuffer() function. |
//| X - point, array[0..NX-1]. |
//| R - radius of sphere (in corresponding norm), R>0 |
//| SelfMatch - whether self-matches are allowed: |
//| * if True, nearest neighbor may be the point itself |
//| (if it exists in original dataset) |
//| * if False, then only points with non-zero distance |
//| are returned |
//| * if not given, considered True |
//| RESULT |
//| number of neighbors found, >=0 |
//| This subroutine performs query and stores its result in the |
//| internal structures of the buffer object. You can use following |
//| subroutines to obtain these results (pay attention to "buf" in |
//| their names): |
//| * KDTreeTsQueryResultsX() to get X-values |
//| * KDTreeTsQueryResultsXY() to get X- and Y-values |
//| * KDTreeTsQueryResultsTags() to get tag values |
//| * KDTreeTsQueryResultsDistances() to get distances |
//| IMPORTANT: kd-tree buffer should be used only with KD-tree object|
//| which was used to initialize buffer. Any attempt to |
//| use biffer with different object is dangerous - you |
//| may get integrity check failure (exception) because |
//| sizes of internal arrays do not fit to dimensions of |
//| KD-tree structure. |
//+------------------------------------------------------------------+
int CAlglib::KDTreeTsQueryRNN(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,
double &x[],const double r,bool selfmatch)
{
CRowDouble X=x;
return(CNearestNeighbor::KDTreeTsQueryRNN(kdt.GetInnerObj(),buf.GetInnerObj(),X,r,selfmatch));
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
int CAlglib::KDTreeTsQueryRNN(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,
vector<double> &x,const double r,bool selfmatch)
{
CRowDouble X=x;
return(CNearestNeighbor::KDTreeTsQueryRNN(kdt.GetInnerObj(),buf.GetInnerObj(),X,r,selfmatch));
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
int CAlglib::KDTreeTsQueryRNN(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,
CRowDouble &x,const double r,bool selfmatch)
{
return(CNearestNeighbor::KDTreeTsQueryRNN(kdt.GetInnerObj(),buf.GetInnerObj(),x,r,selfmatch));
}
//+------------------------------------------------------------------+
//| R-NN query: all points within R-sphere centered at X, using |
//| external thread-local buffer, no ordering by distance as |
//| undicated by "U" suffix (faster that ordered query, for large |
//| queries - significantly faster). |
//| You can call this function from multiple threads for same kd-tree|
//| instance, assuming that different instances of buffer object are |
//| passed to different threads. |
//| INPUT PARAMETERS |
//| KDT - KD-tree |
//| Buf - request buffer object created for this particular |
//| instance of kd-tree structure with |
//| KDTreeCreateRequestBuffer() function. |
//| X - point, array[0..NX-1]. |
//| R - radius of sphere (in corresponding norm), R>0 |
//| SelfMatch - whether self-matches are allowed: |
//| * if True, nearest neighbor may be the point itself|
//| (if it exists in original dataset) |
//| * if False, then only points with non-zero distance|
//| are returned |
//| * if not given, considered True |
//| RESULT |
//| number of neighbors found, >=0 |
//| This subroutine performs query and stores its result in the |
//| internal structures of the buffer object. You can use following |
//| subroutines to obtain these results (pay attention to "buf" in |
//| their names): |
//| * KDTreeTsQueryResultsX() to get X-values |
//| * KDTreeTsQueryResultsXY() to get X- and Y-values |
//| * KDTreeTsQueryResultsTags() to get tag values |
//| * KDTreeTsQueryResultsDistances() to get distances |
//| As indicated by "U" suffix, this function returns unordered |
//| results. |
//| IMPORTANT: kd-tree buffer should be used only with KD-tree object|
//| which was used to initialize buffer. Any attempt to |
//| use biffer with different object is dangerous - you |
//| may get integrity check failure (exception) because |
//| sizes of internal arrays do not fit to dimensions of |
//| KD-tree structure. |
//+------------------------------------------------------------------+
int CAlglib::KDTreeTsQueryRNNU(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,
double &x[],const double r,const bool selfmatch)
{
CRowDouble X=x;
return(CNearestNeighbor::KDTreeTsQueryRNNU(kdt.GetInnerObj(),buf.GetInnerObj(),X,r,selfmatch));
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
int CAlglib::KDTreeTsQueryRNNU(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,
vector<double> &x,const double r,const bool selfmatch)
{
CRowDouble X=x;
return(CNearestNeighbor::KDTreeTsQueryRNNU(kdt.GetInnerObj(),buf.GetInnerObj(),X,r,selfmatch));
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
int CAlglib::KDTreeTsQueryRNNU(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,
CRowDouble &x,const double r,const bool selfmatch)
{
return(CNearestNeighbor::KDTreeTsQueryRNNU(kdt.GetInnerObj(),buf.GetInnerObj(),x,r,selfmatch));
}
//+------------------------------------------------------------------+
//| K-NN query: approximate K nearest neighbors |
//| INPUT PARAMETERS |
//| KDT - KD-tree |
//| X - point, array[0..NX-1]. |
//| K - number of neighbors to return, K>=1 |
//| SelfMatch - whether self-matches are allowed: |
//| * if True, nearest neighbor may be the point |
//| itself (if it exists in original dataset) |
//| * if False, then only points with non-zero |
//| distance are returned |
//| * if not given, considered True |
//| Eps - approximation factor, Eps>=0. eps-approximate|
//| nearest neighbor is a neighbor whose distance|
//| from X is at most (1+eps) times distance of |
//| true nearest neighbor. |
//| RESULT |
//| number of actual neighbors found (either K or N, if K>N). |
//| NOTES |
//| significant performance gain may be achieved only when Eps is|
//| on the order of magnitude of 1 or larger. |
//| This subroutine performs query and stores its result in the |
//| internal structures of the KD-tree. You can use following |
//| these subroutines to obtain results: |
//| * KDTreeQueryResultsX() to get X-values |
//| * KDTreeQueryResultsXY() to get X- and Y-values |
//| * KDTreeQueryResultsTags() to get tag values |
//| * KDTreeQueryResultsDistances() to get distances |
//+------------------------------------------------------------------+
int CAlglib::KDTreeQueryAKNN(CKDTreeShell &kdt,double &x[],
const int k,const bool selfmatch,
const double eps)
{
return(CNearestNeighbor::KDTreeQueryAKNN(kdt.GetInnerObj(),x,k,selfmatch,eps));
}
//+------------------------------------------------------------------+
//| K-NN query: approximate K nearest neighbors |
//| INPUT PARAMETERS |
//| KDT - KD-tree |
//| X - point, array[0..NX-1]. |
//| K - number of neighbors to return, K>=1 |
//| SelfMatch - whether self-matches are allowed: |
//| * if True, nearest neighbor may be the point |
//| itself (if it exists in original dataset) |
//| * if False, then only points with non-zero |
//| distance are returned |
//| * if not given, considered True |
//| Eps - approximation factor, Eps>=0. eps-approximate|
//| nearest neighbor is a neighbor whose distance|
//| from X is at most (1+eps) times distance of |
//| true nearest neighbor. |
//| RESULT |
//| number of actual neighbors found (either K or N, if K>N). |
//| NOTES |
//| significant performance gain may be achieved only when Eps is|
//| on the order of magnitude of 1 or larger. |
//| This subroutine performs query and stores its result in the |
//| internal structures of the KD-tree. You can use following |
//| these subroutines to obtain results: |
//| * KDTreeQueryResultsX() to get X-values |
//| * KDTreeQueryResultsXY() to get X- and Y-values |
//| * KDTreeQueryResultsTags() to get tag values |
//| * KDTreeQueryResultsDistances() to get distances |
//+------------------------------------------------------------------+
int CAlglib::KDTreeQueryAKNN(CKDTreeShell &kdt,vector<double> &x,
const int k,const bool selfmatch,
const double eps=0)
{
CRowDouble X=x;
return(CNearestNeighbor::KDTreeQueryAKNN(kdt.GetInnerObj(),X,k,selfmatch,eps));
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
int CAlglib::KDTreeQueryAKNN(CKDTreeShell &kdt,CRowDouble &x,
const int k,const bool selfmatch,
const double eps)
{
return(CNearestNeighbor::KDTreeQueryAKNN(kdt.GetInnerObj(),x,k,selfmatch,eps));
}
//+------------------------------------------------------------------+
//| Box query: all points within user-specified box. |
//| IMPORTANT: this function can not be used in multithreaded code |
//| because it uses internal temporary buffer of kd-tree |
//| object, which can not be shared between multiple |
//| threads. If you want to perform parallel requests, |
//| use function which uses external request buffer: |
//| KDTreeTsQueryBox() ("Ts" stands for "thread-safe"). |
//| INPUT PARAMETERS |
//| KDT - KD-tree |
//| BoxMin - lower bounds, array[0..NX-1]. |
//| BoxMax - upper bounds, array[0..NX-1]. |
//| RESULT |
//| number of actual neighbors found (in [0,N]). |
//| This subroutine performs query and stores its result in the |
//| internal structures of the KD-tree. You can use following |
//| subroutines to obtain these results: |
//| * KDTreeQueryResultsX() to get X-values |
//| * KDTreeQueryResultsXY() to get X- and Y-values |
//| * KDTreeQueryResultsTags() to get tag values |
//| * KDTreeQueryResultsDistances() returns zeros for this request |
//| NOTE: this particular query returns unordered results, because |
//| there is no meaningful way of ordering points. Furthermore,|
//| no 'distance' is associated with points - it is either |
//| INSIDE or OUTSIDE (so request for distances will return |
//| zeros). |
//+------------------------------------------------------------------+
int CAlglib::KDTreeQueryBox(CKDTreeShell &kdt,double &boxmin[],double &boxmax[])
{
return(CNearestNeighbor::KDTreeQueryBox(kdt.GetInnerObj(),boxmin,boxmax));
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
int CAlglib::KDTreeQueryBox(CKDTreeShell &kdt,vector<double> &boxmin,vector<double> &boxmax)
{
CRowDouble Min=boxmin;
CRowDouble Max=boxmax;
return(CNearestNeighbor::KDTreeQueryBox(kdt.GetInnerObj(),Min,Max));
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
int CAlglib::KDTreeQueryBox(CKDTreeShell &kdt,CRowDouble &boxmin,CRowDouble &boxmax)
{
return(CNearestNeighbor::KDTreeQueryBox(kdt.GetInnerObj(),boxmin,boxmax));
}
//+------------------------------------------------------------------+
//| Box query: all points within user-specified box, using |
//| thread-local buffer. |
//| You can call this function from multiple threads for same kd-tree|
//| instance, assuming that different instances of buffer object are |
//| passed to different threads. |
//| INPUT PARAMETERS |
//| KDT - KD-tree |
//| Buf - request buffer object created for this particular |
//| instance of kd-tree structure with |
//| KDTreeCreateRequestBuffer() function. |
//| BoxMin - lower bounds, array[0..NX-1]. |
//| BoxMax - upper bounds, array[0..NX-1]. |
//| RESULT |
//| number of actual neighbors found (in [0,N]). |
//| This subroutine performs query and stores its result in the |
//| internal structures of the buffer object. You can use following |
//| subroutines to obtain these results (pay attention to "ts" in |
//| their names): |
//| * KDTreeTsQueryResultsX() to get X-values |
//| * KDTreeTsQueryResultsXY() to get X- and Y-values |
//| * KDTreeTsQueryResultsTags() to get tag values |
//| * KDTreeTsQueryResultsDistances() returns zeros for this query |
//| NOTE: this particular query returns unordered results, because |
//| there is no meaningful way of ordering points. Furthermore,|
//| no 'distance' is associated with points - it is either |
//| INSIDE or OUTSIDE (so request for distances will return |
//| zeros). |
//| IMPORTANT: kd-tree buffer should be used only with KD-tree object|
//| which was used to initialize buffer. Any attempt to |
//| use biffer with different object is dangerous - you |
//| may get integrity check failure (exception) because|
//| sizes of internal arrays do not fit to dimensions of |
//| KD-tree structure. |
//+------------------------------------------------------------------+
int CAlglib::KDTreeTsQueryBox(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,double &boxmin[],double &boxmax[])
{
return(CNearestNeighbor::KDTreeTsQueryBox(kdt.GetInnerObj(),buf.GetInnerObj(),boxmin,boxmax));
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
int CAlglib::KDTreeTsQueryBox(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,CRowDouble &boxmin,CRowDouble &boxmax)
{
return(CNearestNeighbor::KDTreeTsQueryBox(kdt.GetInnerObj(),buf.GetInnerObj(),boxmin,boxmax));
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
int CAlglib::KDTreeTsQueryBox(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,vector<double> &boxmin,vector<double> &boxmax)
{
CRowDouble Min=boxmin;
CRowDouble Max=boxmax;
return(CNearestNeighbor::KDTreeTsQueryBox(kdt.GetInnerObj(),buf.GetInnerObj(),Min,Max));
}
//+------------------------------------------------------------------+
//| X-values from last query |
//| INPUT PARAMETERS |
//| KDT - KD-tree |
//| X - possibly pre-allocated buffer. If X is too small |
//| to store result, it is resized. If size(X) is |
//| enough to store result, it is left unchanged. |
//| OUTPUT PARAMETERS |
//| X - rows are filled with X-values |
//| NOTES |
//| 1. points are ordered by distance from the query point (first = |
//| closest) |
//| 2. if XY is larger than required to store result, only leading |
//| part will be overwritten; trailing part will be left |
//| unchanged. So if on input XY = [[A,B],[C,D]], and result is |
//| [1,2], then on exit we will get XY = [[1,2],[C,D]]. This is |
//| done purposely to increase performance; if you want function |
//| to resize array according to result size, use function with |
//| same name and suffix 'I'. |
//| SEE ALSO |
//| * KDTreeQueryResultsXY() X- and Y-values |
//| * KDTreeQueryResultsTags() tag values |
//| * KDTreeQueryResultsDistances() distances |
//+------------------------------------------------------------------+
void CAlglib::KDTreeQueryResultsX(CKDTreeShell &kdt,CMatrixDouble &x)
{
CNearestNeighbor::KDTreeQueryResultsX(kdt.GetInnerObj(),x);
}
//+------------------------------------------------------------------+
//| X- and Y-values from last query |
//| INPUT PARAMETERS |
//| KDT - KD-tree |
//| XY - possibly pre-allocated buffer. If XY is too small|
//| to store result, it is resized. If size(XY) is |
//| enough to store result, it is left unchanged. |
//| OUTPUT PARAMETERS |
//| XY - rows are filled with points: first NX columns |
//| with X-values, next NY columns - with Y-values. |
//| NOTES |
//| 1. points are ordered by distance from the query point (first = |
//| closest) |
//| 2. if XY is larger than required to store result, only leading |
//| part will be overwritten; trailing part will be left |
//| unchanged. So if on input XY = [[A,B],[C,D]], and result is |
//| [1,2], then on exit we will get XY = [[1,2],[C,D]]. This is |
//| done purposely to increase performance; if you want function |
//| to resize array according to result size, use function with |
//| same name and suffix 'I'. |
//| SEE ALSO |
//| * KDTreeQueryResultsX() X-values |
//| * KDTreeQueryResultsTags() tag values |
//| * KDTreeQueryResultsDistances() distances |
//+------------------------------------------------------------------+
void CAlglib::KDTreeQueryResultsXY(CKDTreeShell &kdt,CMatrixDouble &xy)
{
CNearestNeighbor::KDTreeQueryResultsXY(kdt.GetInnerObj(),xy);
}
//+------------------------------------------------------------------+
//| Tags from last query |
//| INPUT PARAMETERS |
//| KDT - KD-tree |
//| Tags - possibly pre-allocated buffer. If X is too small |
//| to store result, it is resized. If size(X) is |
//| enough to store result, it is left unchanged. |
//| OUTPUT PARAMETERS |
//| Tags - filled with tags associated with points, |
//| or, when no tags were supplied, with zeros |
//| NOTES |
//| 1. points are ordered by distance from the query point (first |
//| = closest) |
//| 2. if XY is larger than required to store result, only leading |
//| part will be overwritten; trailing part will be left |
//| unchanged. So if on input XY = [[A,B],[C,D]], and result is |
//| [1,2],then on exit we will get XY = [[1,2], [C,D]]. This is |
//| done purposely to increase performance; if you want function |
//| to resize array according to result size, use function with |
//| same name and suffix 'I'. |
//| SEE ALSO |
//| * KDTreeQueryResultsX() X-values |
//| * KDTreeQueryResultsXY() X- and Y-values |
//| * KDTreeQueryResultsDistances() distances |
//+------------------------------------------------------------------+
void CAlglib::KDTreeQueryResultsTags(CKDTreeShell &kdt,int &tags[])
{
CNearestNeighbor::KDTreeQueryResultsTags(kdt.GetInnerObj(),tags);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::KDTreeQueryResultsTags(CKDTreeShell &kdt,CRowInt &tags)
{
CNearestNeighbor::KDTreeQueryResultsTags(kdt.GetInnerObj(),tags);
}
//+------------------------------------------------------------------+
//| Distances from last query |
//| INPUT PARAMETERS |
//| KDT - KD-tree |
//| R - possibly pre-allocated buffer. If X is too small |
//| to store result, it is resized. If size(X) is |
//| enough to store result, it is left unchanged. |
//| OUTPUT PARAMETERS |
//| R - filled with distances (in corresponding norm) |
//| NOTES |
//| 1. points are ordered by distance from the query point (first |
//| = closest) |
//| 2. if XY is larger than required to store result, only leading |
//| part will be overwritten; trailing part will be left |
//| unchanged. So if on input XY = [[A,B], [C,D]],and result is |
//| [1,2], then on exit we will get XY = [[1,2], C,D]]. This is |
//| done purposely to increase performance; if you want function |
//| to resize array according to result size, use function with |
//| same name and suffix 'I'. |
//| SEE ALSO |
//| * KDTreeQueryResultsX() X-values |
//| * KDTreeQueryResultsXY() X- and Y-values |
//| * KDTreeQueryResultsTags() tag values |
//+------------------------------------------------------------------+
void CAlglib::KDTreeQueryResultsDistances(CKDTreeShell &kdt,double &r[])
{
CNearestNeighbor::KDTreeQueryResultsDistances(kdt.GetInnerObj(),r);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::KDTreeQueryResultsDistances(CKDTreeShell &kdt,CRowDouble &r)
{
CNearestNeighbor::KDTreeQueryResultsDistances(kdt.GetInnerObj(),r);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::KDTreeQueryResultsDistances(CKDTreeShell &kdt,vector<double> &r)
{
//--- create variables
CRowDouble R;
//--- function call
CNearestNeighbor::KDTreeQueryResultsDistances(kdt.GetInnerObj(),R);
//---copy result
r=R.ToVector();
}
//+------------------------------------------------------------------+
//| X-values from last query; 'interactive' variant for languages |
//| like Python which support constructs like "X = |
//| KDTreeQueryResultsXI(KDT)" and interactive mode of interpreter. |
//| This function allocates new array on each call, so it is |
//| significantly slower than its 'non-interactive' counterpart, but |
//| it is more convenient when you call it from command line. |
//+------------------------------------------------------------------+
void CAlglib::KDTreeQueryResultsXI(CKDTreeShell &kdt,CMatrixDouble &x)
{
CNearestNeighbor::KDTreeQueryResultsXI(kdt.GetInnerObj(),x);
}
//+------------------------------------------------------------------+
//| XY-values from last query; 'interactive' variant for languages |
//| like Python which support constructs like "XY = |
//| KDTreeQueryResultsXYI(KDT)" and interactive mode of interpreter. |
//| This function allocates new array on each call, so it is |
//| significantly slower than its 'non-interactive' counterpart, but |
//| it is more convenient when you call it from command line. |
//+------------------------------------------------------------------+
void CAlglib::KDTreeQueryResultsXYI(CKDTreeShell &kdt,CMatrixDouble &xy)
{
CNearestNeighbor::KDTreeQueryResultsXYI(kdt.GetInnerObj(),xy);
}
//+------------------------------------------------------------------+
//| Tags from last query; 'interactive' variant for languages like |
//| Python which support constructs like "Tags = |
//| KDTreeQueryResultsTagsI(KDT)" and interactive mode of |
//| interpreter. |
//| This function allocates new array on each call, so it is |
//| significantly slower than its 'non-interactive' counterpart, but |
//| it is more convenient when you call it from command line. |
//+------------------------------------------------------------------+
void CAlglib::KDTreeQueryResultsTagsI(CKDTreeShell &kdt,int &tags[])
{
CNearestNeighbor::KDTreeQueryResultsTagsI(kdt.GetInnerObj(),tags);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::KDTreeQueryResultsTagsI(CKDTreeShell &kdt,CRowInt &tags)
{
CNearestNeighbor::KDTreeQueryResultsTagsI(kdt.GetInnerObj(),tags);
}
//+------------------------------------------------------------------+
//| Distances from last query; 'interactive' variant for languages |
//| like Python which support constructs like "R = |
//| KDTreeQueryResultsDistancesI(KDT)" and interactive mode of |
//| interpreter. |
//| This function allocates new array on each call, so it is |
//| significantly slower than its 'non-interactive' counterpart, but |
//| it is more convenient when you call it from command line. |
//+------------------------------------------------------------------+
void CAlglib::KDTreeQueryResultsDistancesI(CKDTreeShell &kdt,double &r[])
{
CNearestNeighbor::KDTreeQueryResultsDistancesI(kdt.GetInnerObj(),r);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::KDTreeQueryResultsDistancesI(CKDTreeShell &kdt,vector<double> &r)
{
CRowDouble R;
CNearestNeighbor::KDTreeQueryResultsDistancesI(kdt.GetInnerObj(),R);
r=R.ToVector();
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::KDTreeQueryResultsDistancesI(CKDTreeShell &kdt,CRowDouble &r)
{
CNearestNeighbor::KDTreeQueryResultsDistancesI(kdt.GetInnerObj(),r);
}
//+------------------------------------------------------------------+
//| Optimal binary classification |
//| Algorithms finds optimal (=with minimal cross-entropy) binary |
//| partition. |
//| Internal subroutine. |
//| INPUT PARAMETERS: |
//| A - array[0..N-1], variable |
//| C - array[0..N-1], class numbers (0 or 1). |
//| N - array size |
//| OUTPUT PARAMETERS: |
//| Info - completetion code: |
//| * -3, all values of A[] are same (partition is |
//| impossible) |
//| * -2, one of C[] is incorrect (<0, >1) |
//| * -1, incorrect pararemets were passed (N<=0). |
//| * 1, OK |
//| Threshold- partiton boundary. Left part contains values |
//| which are strictly less than Threshold. Right |
//| part contains values which are greater than or |
//| equal to Threshold. |
//| PAL, PBL- probabilities P(0|v<Threshold) and |
//| P(1|v<Threshold) |
//| PAR, PBR- probabilities P(0|v>=Threshold) and |
//| P(1|v>=Threshold) |
//| CVE - cross-validation estimate of cross-entropy |
//+------------------------------------------------------------------+
void CAlglib::DSOptimalSplit2(double &a[],int &c[],const int n,
int &info,double &threshold,
double &pal,double &pbl,double &par,
double &pbr,double &cve)
{
//--- initialization
info=0;
threshold=0;
pal=0;
pbl=0;
par=0;
pbr=0;
cve=0;
//--- function call
CBdSS::DSOptimalSplit2(a,c,n,info,threshold,pal,pbl,par,pbr,cve);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::DSOptimalSplit2(CRowDouble &a,CRowInt &c,const int n,
int &info,double &threshold,
double &pal,double &pbl,double &par,
double &pbr,double &cve)
{
//--- initialization
info=0;
threshold=0;
pal=0;
pbl=0;
par=0;
pbr=0;
cve=0;
//--- function call
CBdSS::DSOptimalSplit2(a,c,n,info,threshold,pal,pbl,par,pbr,cve);
}
//+------------------------------------------------------------------+
//| Optimal partition, internal subroutine. Fast version. |
//| Accepts: |
//| A array[0..N-1] array of attributes array[0..N-1]|
//| C array[0..N-1] array of class labels |
//| TiesBuf array[0..N] temporaries (ties) |
//| CntBuf array[0..2*NC-1] temporaries (counts) |
//| Alpha centering factor (0<=alpha<=1, |
//| recommended value - 0.05) |
//| BufR array[0..N-1] temporaries |
//| BufI array[0..N-1] temporaries |
//| Output: |
//| Info error code (">0"=OK, "<0"=bad) |
//| RMS training set RMS error |
//| CVRMS leave-one-out RMS error |
//| Note: |
//| content of all arrays is changed by subroutine; |
//| it doesn't allocate temporaries. |
//+------------------------------------------------------------------+
void CAlglib::DSOptimalSplit2Fast(double &a[],int &c[],int &tiesbuf[],
int &cntbuf[],double &bufr[],
int &bufi[],const int n,
const int nc,const double alpha,
int &info,double &threshold,
double &rms,double &cvrms)
{
//--- initialization
info=0;
threshold=0;
rms=0;
cvrms=0;
//--- function call
CBdSS::DSOptimalSplit2Fast(a,c,tiesbuf,cntbuf,bufr,bufi,n,nc,alpha,info,threshold,rms,cvrms);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::DSOptimalSplit2Fast(CRowDouble &a,CRowInt &c,CRowInt &tiesbuf,
CRowInt &cntbuf,CRowDouble &bufr,
CRowInt &bufi,const int n,
const int nc,const double alpha,
int &info,double &threshold,
double &rms,double &cvrms)
{
//--- initialization
info=0;
threshold=0;
rms=0;
cvrms=0;
//--- function call
CBdSS::DSOptimalSplit2Fast(a,c,tiesbuf,cntbuf,bufr,bufi,n,nc,alpha,info,threshold,rms,cvrms);
}
//+------------------------------------------------------------------+
//| This function serializes data structure to string. |
//| Important properties of s_out: |
//| * it contains alphanumeric characters, dots, underscores, minus |
//| signs |
//| * these symbols are grouped into words, which are separated by |
//| spaces and Windows-style (CR+LF) newlines |
//| * although serializer uses spaces and CR+LF as separators, you|
//| can replace any separator character by arbitrary combination of|
//| spaces, tabs, Windows or Unix newlines. It allows flexible |
//| reformatting of the string in case you want to include it into |
//| text or XML file. But you should not insert separators into the|
//| middle of the "words" nor you should change case of letters. |
//| * s_out can be freely moved between 32-bit and 64-bit systems, |
//| little and big endian machines, and so on. You can reference |
//| structure on 32-bit machine and unserialize it on 64-bit one |
//| (or vice versa), or reference it on SPARC and unserialize on |
//| x86. You can also reference it in C# version of ALGLIB and |
//| unserialize in C++ one, and vice versa. |
//+------------------------------------------------------------------+
void CAlglib::DFSerialize(CDecisionForestShell &obj,string &s_out)
{
CSerializer s;
//--- serialization start
s.Alloc_Start();
//--- function call
CDForest::DFAlloc(s,obj.GetInnerObj());
//--- serialization
s.SStart_Str();
//--- function call
CDForest::DFSerialize(s,obj.GetInnerObj());
//--- stop
s.Stop();
//--- change value
s_out=s.Get_String();
}
//+------------------------------------------------------------------+
//| This function unserializes data structure from string. |
//+------------------------------------------------------------------+
void CAlglib::DFUnserialize(const string s_in,CDecisionForestShell &obj)
{
CSerializer s;
//--- unserialization
s.UStart_Str(s_in);
//--- function call
CDForest::DFUnserialize(s,obj.GetInnerObj());
//--- stop
s.Stop();
}
//+------------------------------------------------------------------+
//| This function creates buffer structure which can be used to |
//| perform parallel inference requests. |
//| DF subpackage provides two sets of computing functions - ones |
//| which use internal buffer of DF model (these functions are |
//| single-threaded because they use same buffer, which can not |
//| shared between threads), and ones which use external buffer. |
//| This function is used to initialize external buffer. |
//| INPUT PARAMETERS: |
//| Model - DF model which is associated with newly created |
//| buffer |
//| OUTPUT PARAMETERS: |
//| Buf - external buffer. |
//| IMPORTANT: buffer object should be used only with model which was|
//| used to initialize buffer. Any attempt to use buffer |
//| with different object is dangerous - you may get |
//| integrity check failure (exception) because sizes of |
//| internal arrays do not fit to dimensions of the model |
//| structure. |
//+------------------------------------------------------------------+
void CAlglib::DFCreateBuffer(CDecisionForestShell &model,
CDecisionForestBuffer &buf)
{
CDForest::DFCreateBuffer(model.GetInnerObj(),buf);
}
//+------------------------------------------------------------------+
//| This subroutine creates CDecisionForestBuilder object which is |
//| used to train decision forests. |
//| By default, new builder stores empty dataset and some reasonable |
//| default settings. At the very least, you should specify dataset |
//| prior to building decision forest. You can also tweak settings of|
//| the forest construction algorithm (recommended, although default |
//| setting should work well). |
//| Following actions are mandatory: |
//| * calling DFBuilderSetDataset() to specify dataset |
//| * calling DFBuilderBuildRandomForest() to build decision forest|
//| using current dataset and default settings |
//| Additionally, you may call: |
//| * DFBuilderSetRndVars() or DFBuilderSetRndVarsRatio() to |
//| specify number of variables randomly chosen for each split |
//| * DFBuilderSetSubsampleRatio() to specify fraction of the |
//| dataset randomly subsampled to build each tree |
//| * DFBuilderSetSeed() to control random seed chosen for tree |
//| construction |
//| INPUT PARAMETERS: |
//| none |
//| OUTPUT PARAMETERS: |
//| S - decision forest builder |
//+------------------------------------------------------------------+
void CAlglib::DFBuilderCreate(CDecisionForestBuilder &s)
{
CDForest::DFBuilderCreate(s);
}
//+------------------------------------------------------------------+
//| This subroutine adds dense dataset to the internal storage of the|
//| builder object. Specifying your dataset in the dense format means|
//| that the dense version of the forest construction algorithm will |
//| be invoked. |
//| INPUT PARAMETERS: |
//| S - decision forest builder object |
//| XY - array[NPoints,NVars+1] (minimum size; actual size |
//| can be larger, only leading part is used anyway), |
//| dataset: |
//| * first NVars elements of each row store values of |
//| the independent variables |
//| * last column store class number(in 0...NClasses-1)|
//| or real value of the dependent variable |
//| NPoints - number of rows in the dataset, NPoints>=1 |
//| NVars - number of independent variables, NVars>=1 |
//| NClasses - indicates type of the problem being solved: |
//| * NClasses>=2 means that classification problem is |
//| solved (last column of the dataset stores class |
//| number) |
//| * NClasses=1 means that regression problem is |
//| solved (last column of the dataset stores |
//| variable value) |
//| OUTPUT PARAMETERS: |
//| S - decision forest builder |
//+------------------------------------------------------------------+
void CAlglib::DFBuilderSetDataset(CDecisionForestBuilder &s,CMatrixDouble &xy,
int npoints,int nvars,int nclasses)
{
CDForest::DFBuilderSetDataset(s,xy,npoints,nvars,nclasses);
}
//+------------------------------------------------------------------+
//| This function sets number of variables (in [1,NVars] range) used |
//| by decision forest construction algorithm. |
//| The default option is to use roughly sqrt(NVars) variables. |
//| INPUT PARAMETERS: |
//| S - decision forest builder object |
//| RndVars - number of randomly selected variables; values |
//| outside of [1,NVars] range are silently clipped. |
//| OUTPUT PARAMETERS: |
//| S - decision forest builder |
//+------------------------------------------------------------------+
void CAlglib::DFBuilderSetRndVars(CDecisionForestBuilder &s,int rndvars)
{
CDForest::DFBuilderSetRndVars(s,rndvars);
}
//+------------------------------------------------------------------+
//| This function sets number of variables used by decision forest |
//| construction algorithm as a fraction of total variable count |
//| (0,1) range. |
//| The default option is to use roughly sqrt(NVars) variables. |
//| INPUT PARAMETERS: |
//| S - decision forest builder object |
//| F - round(NVars*F) variables are selected |
//| OUTPUT PARAMETERS: |
//| S - decision forest builder |
//+------------------------------------------------------------------+
void CAlglib::DFBuilderSetRndVarsRatio(CDecisionForestBuilder &s,double f)
{
CDForest::DFBuilderSetRndVarsRatio(s,f);
}
//+------------------------------------------------------------------+
//| This function tells decision forest builder to automatically |
//| choose number of variables used by decision forest construction |
//| algorithm. Roughly sqrt(NVars) variables will be used. |
//| INPUT PARAMETERS: |
//| S - decision forest builder object |
//| OUTPUT PARAMETERS: |
//| S - decision forest builder |
//+------------------------------------------------------------------+
void CAlglib::DFBuilderSetRndVarsAuto(CDecisionForestBuilder &s)
{
CDForest::DFBuilderSetRndVarsAuto(s);
}
//+------------------------------------------------------------------+
//| This function sets size of dataset subsample generated the |
//| decision forest construction algorithm. Size is specified as a |
//| fraction of total dataset size. |
//| The default option is to use 50% of the dataset for training, |
//| 50% for the OOB estimates. You can decrease fraction F down to |
//| 10%, 1% or even below in order to reduce overfitting. |
//| INPUT PARAMETERS: |
//| S - decision forest builder object |
//| F - fraction of the dataset to use, in (0,1] range. |
//| Values outside of this range will be silently |
//| clipped. At least one element is always selected |
//| for the training set. |
//| OUTPUT PARAMETERS: |
//| S - decision forest builder |
//+------------------------------------------------------------------+
void CAlglib::DFBuilderSetSubsampleRatio(CDecisionForestBuilder &s,double f)
{
CDForest::DFBuilderSetSubsampleRatio(s,f);
}
//+------------------------------------------------------------------+
//| This function sets seed used by internal RNG for random |
//| subsampling and random selection of variable subsets. |
//| By default random seed is used, i.e. every time you build |
//| decision forest, we seed generator with new value obtained from |
//| system-wide RNG. Thus, decision forest builder returns |
//| non-deterministic results. You can change such behavior by |
//| specyfing fixed positive seed value. |
//| INPUT PARAMETERS: |
//| S - decision forest builder object |
//| SeedVal - seed value: |
//| * positive values are used for seeding RNG with |
//| fixed seed, i.e. subsequent runs on same data |
//| will return same decision forests |
//| * non-positive seed means that random seed is used |
//| for every run of builder, i.e. subsequent runs |
//| on same datasets will return slightly different |
//| decision forests |
//| OUTPUT PARAMETERS: |
//| S - decision forest builder, see |
//+------------------------------------------------------------------+
void CAlglib::DFBuilderSetSeed(CDecisionForestBuilder &s,int seedval)
{
CDForest::DFBuilderSetSeed(s,seedval);
}
//+------------------------------------------------------------------+
//| This function sets random decision forest construction algorithm.|
//| As for now, only one decision forest construction algorithm is |
//| supported-a dense "baseline" RDF algorithm. |
//| INPUT PARAMETERS: |
//| S - decision forest builder object |
//| AlgoType - algorithm type: |
//| * 0 = baseline dense RDF |
//| OUTPUT PARAMETERS: |
//| S - decision forest builder, see |
//+------------------------------------------------------------------+
void CAlglib::DFBuilderSetRDFAlgo(CDecisionForestBuilder &s,int algotype)
{
CDForest::DFBuilderSetRDFAlgo(s,algotype);
}
//+------------------------------------------------------------------+
//| This function sets split selection algorithm used by decision |
//| forest classifier. You may choose several algorithms, with |
//| different speed and quality of the results. |
//| INPUT PARAMETERS: |
//| S - decision forest builder object |
//| SplitStrength - split type: |
//| * 0 = split at the random position, fastest one |
//| * 1 = split at the middle of the range |
//| * 2 = strong split at the best point of the range |
//| (default) |
//| OUTPUT PARAMETERS: |
//| S - decision forest builder, see |
//+------------------------------------------------------------------+
void CAlglib::DFBuilderSetRDFSplitStrength(CDecisionForestBuilder &s,
int splitstrength)
{
CDForest::DFBuilderSetRDFSplitStrength(s,splitstrength);
}
//+------------------------------------------------------------------+
//| This function tells decision forest construction algorithm to use|
//| Gini impurity based variable importance estimation (also known as|
//| MDI). |
//| This version of importance estimation algorithm analyzes mean |
//| decrease in impurity (MDI) on training sample during splits. The |
//| result is divided by impurity at the root node in order to |
//| produce estimate in [0,1] range. |
//| Such estimates are fast to calculate and beautifully normalized |
//| (sum to one) but have following downsides: |
//| * They ALWAYS sum to 1.0, even if output is completely |
//| unpredictable. I.e. MDI allows to order variables by |
//| importance, but does not tell us about "absolute" |
//| importances of variables |
//| * there exist some bias towards continuous and high-cardinality|
//| categorical variables |
//| NOTE: informally speaking, MDA (permutation importance) rating |
//| answers the question "what part of the model |
//| predictive power is ruined by permuting k-th variable?" |
//| while MDI tells us "what part of the model predictive power|
//| was achieved due to usage of k-th variable". |
//| Thus, MDA rates each variable independently at "0 to 1" scale |
//| while MDI (and OOB-MDI too) tends to divide "unit amount of |
//| importance" between several important variables. |
//| If all variables are equally important, they will have same |
//| MDI/OOB-MDI rating, equal (for OOB-MDI: roughly equal) to |
//| 1/NVars. However, roughly same picture will be produced for |
//| the "all variables provide information no one is critical" |
//| situation and for the "all variables are critical, drop any one, |
//| everything is ruined" situation. |
//| Contrary to that, MDA will rate critical variable as ~1.0 |
//| important, and important but non-critical variable will have less|
//| than unit rating. |
//| NOTE: quite an often MDA and MDI return same results. It |
//| generally happens on problems with low test set error |
//| (a few percents at most) and large enough training set |
//| to avoid overfitting. |
//| The difference between MDA, MDI and OOB-MDI becomes important |
//| only on "hard" tasks with high test set error and/or small |
//| training set. |
//| INPUT PARAMETERS: |
//| S - decision forest builder object |
//| OUTPUT PARAMETERS: |
//| S - decision forest builder object. Next call to the |
//| forest construction function will produce: |
//| * importance estimates in rep.varimportances field |
//| * variable ranks in rep.topvars field |
//+------------------------------------------------------------------+
void CAlglib::DFBuilderSetImportanceTrnGini(CDecisionForestBuilder &s)
{
CDForest::DFBuilderSetImportanceTrnGini(s);
}
//+------------------------------------------------------------------+
//| This function tells decision forest construction algorithm to use|
//| out-of-bag version of Gini variable importance estimation (also |
//| known as OOB-MDI). |
//| This version of importance estimation algorithm analyzes mean |
//| decrease in impurity (MDI) on out-of-bag sample during splits. |
//| The result is divided by impurity at the root node in order to |
//| produce estimate in [0,1] range. |
//| Such estimates are fast to calculate and resistant to overfitting|
//| issues (thanks to the out-of-bag estimates used). However, OOB |
//| Gini rating has following downsides: |
//| * there exist some bias towards continuous and |
//| high-cardinality categorical variables |
//| * Gini rating allows us to order variables by importance,|
//| but it is hard to define importance of the variable by |
//| itself. |
//| NOTE: informally speaking, MDA (permutation importance) rating |
//| answers the question "what part of the model predictive |
//| power is ruined by permuting k-th variable?" while MDI |
//| tells us "what part of the model predictive power was |
//| achieved due to usage of k-th variable". |
//| Thus, MDA rates each variable independently at "0 to 1" scale |
//| while MDI (and OOB-MDI too) tends to divide "unit amount of |
//| importance" between several important variables. |
//| If all variables are equally important, they will have same |
//| MDI/OOB-MDI rating, equal (for OOB-MDI: roughly equal) to |
//| 1/NVars. However, roughly same picture will be produced for the |
//| "all variables provide information no one is critical" situation |
//| and for the "all variables are critical, drop any one, everything|
//| is ruined" situation. |
//| Contrary to that, MDA will rate critical variable as ~1.0 |
//| important, and important but non-critical variable will have less|
//| than unit rating. |
//| NOTE: quite an often MDA and MDI return same results. It |
//| generally happens on problems with low test set error |
//| (a few percents at most) and large enough training set to |
//| avoid overfitting. |
//| The difference between MDA, MDI and OOB-MDI becomes important |
//| only on "hard" tasks with high test set error and/or small |
//| training set. |
//| INPUT PARAMETERS: |
//| S - decision forest builder object |
//| OUTPUT PARAMETERS: |
//| S - decision forest builder object. Next call to the|
//| forest construction function will produce: |
//| * importance estimates in rep.varimportances field |
//| * variable ranks in rep.topvars field |
//+------------------------------------------------------------------+
void CAlglib::DFBuilderSetImportanceOOBGini(CDecisionForestBuilder &s)
{
CDForest::DFBuilderSetImportanceOOBGini(s);
}
//+------------------------------------------------------------------+
//| This function tells decision forest construction algorithm to use|
//| permutation variable importance estimator (also known as MDA). |
//| This version of importance estimation algorithm analyzes mean |
//| increase in out-of-bag sum of squared residuals after random |
//| permutation of J-th variable. The result is divided by error |
//| computed with all variables being perturbed in order to produce |
//| R-squared-like estimate in [0,1] range. |
//| Such estimate is slower to calculate than Gini-based rating |
//| because it needs multiple inference runs for each of variables |
//| being studied. |
//| MDA rating has following benefits over Gini-based ones: |
//| * no bias towards specific variable types |
//| *ability to directly evaluate "absolute" importance of some|
//| variable at "0 to 1" scale (contrary to Gini-based rating,|
//| which returns comparative importances). |
//| NOTE: informally speaking, MDA (permutation importance) rating |
//| answers the question "what part of the model predictive |
//| power is ruined by permuting k-th variable?" while MDI |
//| tells us "what part of the model predictive power was |
//| achieved due to usage of k-th variable". |
//| Thus, MDA rates each variable independently at "0 to 1" scale |
//| while MDI (and OOB-MDI too) tends to divide "unit amount of |
//| importance" between several important variables. |
//| If all variables are equally important, they will have same |
//| MDI/OOB-MDI rating, equal (for OOB-MDI: roughly equal) to |
//| 1/NVars. However, roughly same picture will be produced forthe |
//| "all variables provide information no one is critical" situation |
//| and for the "all variables are critical, drop any one, everything|
//| is ruined" situation. |
//| Contrary to that, MDA will rate critical variable as ~1.0 |
//| important, and important but non-critical variable will have less|
//| than unit rating. |
//| NOTE: quite an often MDA and MDI return same results. It |
//| generally happens on problems with low test set error |
//| (a few percents at most) and large enough training set |
//| to avoid overfitting. |
//| The difference between MDA, MDI and OOB-MDI becomes important |
//| only on "hard" tasks with high test set error and/or small |
//| training set. |
//| INPUT PARAMETERS: |
//| S - decision forest builder object |
//| OUTPUT PARAMETERS: |
//| S - decision forest builder object. Next call to |
//| the forest construction function will produce: |
//| * importance estimates in rep.varimportances field |
//| * variable ranks in rep.topvars field |
//+------------------------------------------------------------------+
void CAlglib::DFBuilderSetImportancePermutation(CDecisionForestBuilder &s)
{
CDForest::DFBuilderSetImportancePermutation(s);
}
//+------------------------------------------------------------------+
//| This function tells decision forest construction algorithm to |
//| skip variable importance estimation. |
//| INPUT PARAMETERS: |
//| S - decision forest builder object |
//| OUTPUT PARAMETERS: |
//| S - decision forest builder object. Next call to the |
//| forest construction function will result in forest |
//| being built without variable importance estimation.|
//+------------------------------------------------------------------+
void CAlglib::DFBuilderSetImportanceNone(CDecisionForestBuilder &s)
{
CDForest::DFBuilderSetImportanceNone(s);
}
//+------------------------------------------------------------------+
//| This function is an alias for dfbuilderpeekprogress(), left in |
//| ALGLIB for backward compatibility reasons. |
//+------------------------------------------------------------------+
double CAlglib::DFBuilderGetProgress(CDecisionForestBuilder &s)
{
return(CDForest::DFBuilderGetProgress(s));
}
//+------------------------------------------------------------------+
//| This function is used to peek into decision forest construction |
//| process from some other thread and get current progress indicator|
//| It returns value in [0,1]. |
//| INPUT PARAMETERS: |
//| S - decision forest builder object used to build forest|
//| in some other thread |
//| RESULT: |
//| progress value, in [0,1] |
//+------------------------------------------------------------------+
double CAlglib::DFBuilderPeekProgress(CDecisionForestBuilder &s)
{
return(CDForest::DFBuilderPeekProgress(s));
}
//+------------------------------------------------------------------+
//| This subroutine builds decision forest according to current |
//| settings using dataset internally stored in the builder object. |
//| Dense algorithm is used. |
//| NOTE: this function uses dense algorithm for forest construction |
//| independently from the dataset format (dense or sparse). |
//| NOTE: forest built with this function is stored in-memory using |
//| 64-bit data structures for offsets/indexes/split values. It|
//| is possible to convert forest into more memory-efficient |
//| compressed binary representation. Depending on the problem |
//| properties, 3.7x-5.7x compression factors are possible. |
//| The downsides of compression are (a) slight reduction in the |
//| model accuracy and (b) ~1.5x reduction in the inference speed |
//| (due to increased complexity of the storage format). |
//| See comments on DFBinaryCompression() for more info. |
//| Default settings are used by the algorithm; you can tweak them |
//| with the help of the following functions: |
//| * DFBuilderSetRFactor() - to control a fraction of the |
//| dataset used for subsampling |
//| * DFBuilderSetRandomVars() - to control number of variables |
//| randomly chosen for decision rule |
//| creation |
//| INPUT PARAMETERS: |
//| S - decision forest builder object |
//| NTrees - NTrees>=1, number of trees to train |
//| OUTPUT PARAMETERS: |
//| D - decision forest. You can compress this forest to |
//| more compact 16-bit representation with |
//| DFBinaryCompression() |
//| Rep - report, see below for information on its fields. |
//| == report information produced by forest construction function = |
//| Decision forest training report includes following information: |
//| * training set errors |
//| * out-of-bag estimates of errors |
//| * variable importance ratings |
//| Following fields are used to store information: |
//| * training set errors are stored in rep.RelCLSError, rep.AvgCE,|
//| rep.RMSError, rep.AvgError and rep.AvgRelError |
//| * out-of-bag estimates of errors are stored in |
//| rep.oobrelclserror, rep.oobavgce, rep.oobrmserror, |
//| rep.oobavgerror and rep.oobavgrelerror |
//| Variable importance reports, if requested by |
//| DFBuilderSetImportanceGini(), DFBuilderSetImportanceTrnGini() or |
//| DFBuilderSetImportancePermutation() call, are stored in: |
//| * rep.varimportances field stores importance ratings |
//| * rep.topvars stores variable indexes ordered from the most |
//| important to less important ones |
//| You can find more information about report fields in: |
//| * comments on CDFReport structure |
//| * comments on DFBuilderSetImportanceGini function |
//| * comments on DFBuilderSetImportanceTrnGini function |
//| * comments on DFBuilderDetImportancePermutation function |
//+------------------------------------------------------------------+
void CAlglib::DFBuilderBuildRandomForest(CDecisionForestBuilder &s,int ntrees,
CDecisionForestShell &df,
CDFReportShell &rep)
{
CDForest::DFBuilderBuildRandomForest(s,ntrees,df.GetInnerObj(),rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This function performs binary compression of the decision forest.|
//| Original decision forest produced by the forest builder is stored|
//| using 64-bit representation for all numbers - offsets, variable |
//| indexes, split points. |
//| It is possible to significantly reduce model size by means of: |
//| * using compressed dynamic encoding for integers (offsets and |
//| variable indexes), which uses just 1 byte to store small ints|
//| (less than 128), just 2 bytes for larger values (less than |
//| 128^2) and so on |
//| * storing floating point numbers using 8-bit exponent and |
//| 16-bit mantissa |
//| As result, model needs significantly less memory (compression |
//| factor depends on variable and class counts). In particular: |
//| * NVars<128 and NClasses<128 result in 4.4x-5.7x model size |
//| reduction |
//| * NVars<16384 and NClasses<128 result in 3.7x-4.5x model size |
//| reduction |
//| Such storage format performs lossless compression of all integers|
//| but compression of floating point values (split values) is lossy,|
//| with roughly 0.01% relative error introduced during rounding. |
//| Thus, we recommend you to re-evaluate model accuracy after |
//| compression. |
//| Another downside of compression is ~1.5x reduction in the |
//| inference speed due to necessity of dynamic decompression of the |
//| compressed model. |
//| INPUT PARAMETERS: |
//| DF - decision forest built by forest builder |
//| OUTPUT PARAMETERS: |
//| DF - replaced by compressed forest |
//| RESULT: |
//| compression factor (in-RAM size of the compressed model vs than |
//| of the uncompressed one), positive number larger than 1.0 |
//+------------------------------------------------------------------+
double CAlglib::DFBinaryCompression(CDecisionForestShell &df)
{
return(CDForest::DFBinaryCompression(df.GetInnerObj()));
}
//+------------------------------------------------------------------+
//| Procesing |
//| INPUT PARAMETERS: |
//| DF - decision forest model |
//| X - input vector, array[0..NVars-1]. |
//| OUTPUT PARAMETERS: |
//| Y - result. Regression estimate when solving |
//| regression task, vector of posterior |
//| probabilities for classification task. |
//| See also DFProcessI. |
//+------------------------------------------------------------------+
void CAlglib::DFProcess(CDecisionForestShell &df,double &x[],
double &y[])
{
CDForest::DFProcess(df.GetInnerObj(),x,y);
}
//+------------------------------------------------------------------+
//| 'interactive' variant of DFProcess for languages like Python |
//| which support constructs like "Y = DFProcessI(DF,X)" and |
//| interactive mode of interpreter |
//| This function allocates new array on each call, so it is |
//| significantly slower than its 'non-interactive' counterpart, but |
//| it is more convenient when you call it from command line. |
//+------------------------------------------------------------------+
void CAlglib::DFProcessI(CDecisionForestShell &df,
double &x[],double &y[])
{
CDForest::DFProcessI(df.GetInnerObj(),x,y);
}
//+------------------------------------------------------------------+
//| This function returns first component of the inferred vector |
//| (i.e. one with index #0). |
//| It is a convenience wrapper for dfprocess() intended for either: |
//| * 1-dimensional regression problems |
//| * 2-class classification problems |
//| In the former case this function returns inference result as |
//| scalar, which is definitely more convenient that wrapping it as |
//| vector. In the latter case it returns probability of object |
//| belonging to class #0. |
//| If you call it for anything different from two cases above, it |
//| will work as defined, i.e. return y[0], although it is of less |
//| use in such cases. |
//| INPUT PARAMETERS: |
//| Model - DF model |
//| X - input vector, array[0..NVars-1]. |
//| RESULT: |
//| Y[0] |
//+------------------------------------------------------------------+
double CAlglib::DFProcess0(CDecisionForestShell &model,double &X[])
{
CRowDouble x=X;
return(CDForest::DFProcess0(model.GetInnerObj(),x));
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CAlglib::DFProcess0(CDecisionForestShell &model,CRowDouble &x)
{
return(CDForest::DFProcess0(model.GetInnerObj(),x));
}
//+------------------------------------------------------------------+
//| This function returns most probable class number for an input X. |
//| It is same as calling DFProcess(model,x,y), then determining |
//| i=ArgMax(y[i]) and returning i. |
//| A class number in [0,NOut) range in returned for classification |
//| problems, -1 is returned when this function is called for |
//| regression problems. |
//| INPUT PARAMETERS: |
//| Model - decision forest model |
//| X - input vector, array[0..NVars-1]. |
//| RESULT: |
//| class number, -1 for regression tasks |
//+------------------------------------------------------------------+
int CAlglib::DFClassify(CDecisionForestShell &model,double &X[])
{
CRowDouble x=X;
return(CDForest::DFClassify(model.GetInnerObj(),x));
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
int CAlglib::DFClassify(CDecisionForestShell &model,CRowDouble &x)
{
return(CDForest::DFClassify(model.GetInnerObj(),x));
}
//+------------------------------------------------------------------+
//| Relative classification error on the test set |
//| INPUT PARAMETERS: |
//| DF - decision forest model |
//| XY - test set |
//| NPoints - test set size |
//| RESULT: |
//| percent of incorrectly classified cases. |
//| Zero if model solves regression task. |
//+------------------------------------------------------------------+
double CAlglib::DFRelClsError(CDecisionForestShell &df,CMatrixDouble &xy,
const int npoints)
{
return(CDForest::DFRelClsError(df.GetInnerObj(),xy,npoints));
}
//+------------------------------------------------------------------+
//| Average cross-entropy (in bits per element) on the test set |
//| INPUT PARAMETERS: |
//| DF - decision forest model |
//| XY - test set |
//| NPoints - test set size |
//| RESULT: |
//| CrossEntropy/(NPoints*LN(2)). |
//| Zero if model solves regression task. |
//+------------------------------------------------------------------+
double CAlglib::DFAvgCE(CDecisionForestShell &df,CMatrixDouble &xy,
const int npoints)
{
return(CDForest::DFAvgCE(df.GetInnerObj(),xy,npoints));
}
//+------------------------------------------------------------------+
//| RMS error on the test set |
//| INPUT PARAMETERS: |
//| DF - decision forest model |
//| XY - test set |
//| NPoints - test set size |
//| RESULT: |
//| root mean square error. |
//| Its meaning for regression task is obvious. As for |
//| classification task,RMS error means error when estimating |
//| posterior probabilities. |
//+------------------------------------------------------------------+
double CAlglib::DFRMSError(CDecisionForestShell &df,CMatrixDouble &xy,
const int npoints)
{
return(CDForest::DFRMSError(df.GetInnerObj(),xy,npoints));
}
//+------------------------------------------------------------------+
//| Average error on the test set |
//| INPUT PARAMETERS: |
//| DF - decision forest model |
//| XY - test set |
//| NPoints - test set size |
//| RESULT: |
//| Its meaning for regression task is obvious. As for |
//| classification task, it means average error when estimating |
//| posterior probabilities. |
//+------------------------------------------------------------------+
double CAlglib::DFAvgError(CDecisionForestShell &df,CMatrixDouble &xy,
const int npoints)
{
return(CDForest::DFAvgError(df.GetInnerObj(),xy,npoints));
}
//+------------------------------------------------------------------+
//| Average relative error on the test set |
//| INPUT PARAMETERS: |
//| DF - decision forest model |
//| XY - test set |
//| NPoints - test set size |
//| RESULT: |
//| Its meaning for regression task is obvious. As for |
//| classification task, it means average relative error when |
//| estimating posterior probability of belonging to the correct |
//| class. |
//+------------------------------------------------------------------+
double CAlglib::DFAvgRelError(CDecisionForestShell &df,CMatrixDouble &xy,
const int npoints)
{
return(CDForest::DFAvgRelError(df.GetInnerObj(),xy,npoints));
}
//+------------------------------------------------------------------+
//| This subroutine builds random decision forest. |
//| ---- DEPRECATED VERSION! USE DECISION FOREST BUILDER OBJECT ---- |
//+------------------------------------------------------------------+
void CAlglib::DFBuildRandomDecisionForest(CMatrixDouble &xy,const int npoints,
const int nvars,const int nclasses,
const int ntrees,const double r,
int &info,CDecisionForestShell &df,
CDFReportShell &rep)
{
//--- initialization
info=0;
//--- function call
CDForest::DFBuildRandomDecisionForest(xy,npoints,nvars,nclasses,ntrees,r,info,df.GetInnerObj(),rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This subroutine builds random decision forest. |
//| ---- DEPRECATED VERSION! USE DECISION FOREST BUILDER OBJECT ---- |
//+------------------------------------------------------------------+
void CAlglib::DFBuildRandomDecisionForestX1(CMatrixDouble &xy,
const int npoints,
const int nvars,
const int nclasses,
const int ntrees,
int nrndvars,
const double r,
int &info,
CDecisionForestShell &df,
CDFReportShell &rep)
{
//--- initialization
info=0;
//--- function call
CDForest::DFBuildRandomDecisionForestX1(xy,npoints,nvars,nclasses,ntrees,nrndvars,r,info,df.GetInnerObj(),rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This function initializes clusterizer object. Newly initialized |
//| object is empty, i.e. it does not contain dataset. You should |
//| use it as follows: |
//| 1. creation |
//| 2. dataset is added with ClusterizerSetPoints() |
//| 3. additional parameters are set |
//| 3. clusterization is performed with one of the clustering |
//| functions |
//+------------------------------------------------------------------+
void CAlglib::ClusterizerCreate(CClusterizerState &s)
{
CClustering::ClusterizerCreate(s);
}
//+------------------------------------------------------------------+
//| This function adds dataset to the clusterizer structure. |
//| This function overrides all previous calls of |
//| ClusterizerSetPoints() or ClusterizerSetDistances(). |
//| INPUT PARAMETERS: |
//| S - clusterizer state, initialized by |
//| ClusterizerCreate() |
//| XY - array[NPoints,NFeatures], dataset |
//| NPoints - number of points, >=0 |
//| NFeatures- number of features, >=1 |
//| DistType - distance function: |
//| * 0 Chebyshev distance (L-inf norm) |
//| * 1 city block distance (L1 norm) |
//| * 2 Euclidean distance (L2 norm), non-squared |
//| * 10 Pearson correlation: |
//| dist(a,b) = 1-corr(a,b) |
//| * 11 Absolute Pearson correlation: |
//| dist(a,b) = 1-|corr(a,b)| |
//| * 12 Uncentered Pearson correlation (cosine of |
//| the angle): dist(a,b) = a'*b/(|a|*|b|) |
//| * 13 Absolute uncentered Pearson correlation |
//| dist(a,b) = |a'*b|/(|a|*|b|) |
//| * 20 Spearman rank correlation: |
//| dist(a,b) = 1-rankcorr(a,b) |
//| * 21 Absolute Spearman rank correlation |
//| dist(a,b) = 1-|rankcorr(a,b)| |
//| NOTE 1: different distance functions have different performance |
//| penalty: |
//| * Euclidean or Pearson correlation distances are |
//| the fastest ones |
//| * Spearman correlation distance function is a bit slower |
//| * city block and Chebyshev distances are order |
//| of magnitude slower |
//| The reason behing difference in performance is that |
//| correlation-based distance functions are computed using |
//| optimized linear algebra kernels, while Chebyshev and |
//| city block distance functions are computed using simple |
//| nested loops with two branches at each iteration. |
//| NOTE 2: different clustering algorithms have different |
//| limitations: |
//| * agglomerative hierarchical clustering algorithms may |
//| be used with any kind of distance metric |
//| * k-means++ clustering algorithm may be used only with |
//| Euclidean distance function |
//| Thus, list of specific clustering algorithms you may use |
//| depends on distance function you specify when you set |
//| your dataset. |
//+------------------------------------------------------------------+
void CAlglib::ClusterizerSetPoints(CClusterizerState &s,CMatrixDouble &xy,
int npoints,int nfeatures,int disttype)
{
CClustering::ClusterizerSetPoints(s,xy,npoints,nfeatures,disttype);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::ClusterizerSetPoints(CClusterizerState &s,CMatrixDouble &xy,int disttype)
{
//--- create variables
int npoints=CAp::Rows(xy);
int nfeatures=CAp::Cols(xy);
//--- function call
CClustering::ClusterizerSetPoints(s,xy,npoints,nfeatures,disttype);
}
//+------------------------------------------------------------------+
//| This function adds dataset given by distance matrix to the |
//| clusterizer structure. It is important that dataset is not given |
//| explicitly - only distance matrix is given. |
//| This function overrides all previous calls of |
//| ClusterizerSetPoints() or ClusterizerSetDistances(). |
//| INPUT PARAMETERS: |
//| S - clusterizer state, initialized by |
//| ClusterizerCreate() |
//| D - array[NPoints,NPoints], distance matrix given by |
//| its upper or lower triangle (main diagonal is |
//| ignored because its entries are expected to |
//| be zero). |
//| NPoints - number of points |
//| IsUpper - whether upper or lower triangle of D is given. |
//| NOTE 1: different clustering algorithms have different |
//| limitations: |
//| * agglomerative hierarchical clustering algorithms may |
//| be used with any kind of distance metric, including |
//| one which is given by distance matrix |
//| * k-means++ clustering algorithm may be used only with |
//| Euclidean distance function and explicitly given |
//| points - it can not be used with dataset given by |
//| distance matrix. Thus, if you call this function, you |
//| will be unable to use k-means clustering algorithm |
//| to process your problem. |
//+------------------------------------------------------------------+
void CAlglib::ClusterizerSetDistances(CClusterizerState &s,CMatrixDouble &d,int npoints,bool IsUpper)
{
CClustering::ClusterizerSetDistances(s,d,npoints,IsUpper);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::ClusterizerSetDistances(CClusterizerState &s,CMatrixDouble &d,bool IsUpper)
{
//--- check
if(!CAp::Assert(CAp::Rows(d)==CAp::Cols(d),"Error while calling 'ClusterizerSetDistances': looks like one of arguments has wrong size"))
return;
//--- initialization
int npoints=CAp::Rows(d);
//--- function call
CClustering::ClusterizerSetDistances(s,d,npoints,IsUpper);
}
//+------------------------------------------------------------------+
//| This function sets agglomerative hierarchical clustering |
//| algorithm |
//| INPUT PARAMETERS: |
//| S - clusterizer state, initialized by ClusterizerCreate() |
//| Algo - algorithm type: |
//| * 0 complete linkage(default algorithm) |
//| * 1 single linkage |
//| * 2 unweighted average linkage |
//| * 3 weighted average linkage |
//| * 4 Ward's method |
//| NOTE: Ward's method works correctly only with Euclidean distance,|
//| that's why algorithm will return negative termination |
//| code(failure) for any other distance type. |
//| It is possible, however, to use this method with user - supplied |
//| distance matrix. It is your responsibility to pass one which was |
//| calculated with Euclidean distance function. |
//+------------------------------------------------------------------+
void CAlglib::ClusterizerSetAHCAlgo(CClusterizerState &s,int algo)
{
CClustering::ClusterizerSetAHCAlgo(s,algo);
}
//+------------------------------------------------------------------+
//| This function sets k-means properties: |
//| number of restarts and maximum |
//| number of iterations per one run. |
//| INPUT PARAMETERS: |
//| S - clusterizer state, initialized by |
//| ClusterizerCreate() |
//| Restarts - restarts count, >= 1. |
//| k-means++ algorithm performs several restarts |
//| and chooses best set of centers(one with minimum |
//| squared distance). |
//| MaxIts - maximum number of k-means iterations performed |
//| during one run. >= 0, zero value means that |
//| algorithm performs unlimited number of iterations. |
//+------------------------------------------------------------------+
void CAlglib::ClusterizerSetKMeansLimits(CClusterizerState &s,int restarts,int maxits)
{
CClustering::ClusterizerSetKMeansLimits(s,restarts,maxits);
}
//+------------------------------------------------------------------+
//| This function sets k-means initialization algorithm. Several |
//| different algorithms can be chosen, including k-means++. |
//| INPUT PARAMETERS: |
//| S - clusterizer state, initialized by |
//| ClusterizerCreate() |
//| InitAlgo - initialization algorithm: |
//| * 0 automatic selection(different versions of ALGLIB |
//| may select different algorithms) |
//| * 1 random initialization |
//| * 2 k-means++ initialization(best quality of initial |
//| centers, but long non-parallelizable initialization |
//| phase with bad cache locality) |
//| *3 "fast-greedy" algorithm with efficient, easy to |
//| parallelize initialization. Quality of initial centers |
//| is somewhat worse than that of k-means++. This |
//| algorithm is a default one in the current version of |
//| ALGLIB. |
//| *-1 "debug" algorithm which always selects first K rows |
//| of dataset; this algorithm is used for debug purposes |
//| only. Do not use it in the industrial code! |
//+------------------------------------------------------------------+
void CAlglib::ClusterizerSetKMeansInit(CClusterizerState &s,int initalgo)
{
CClustering::ClusterizerSetKMeansInit(s,initalgo);
}
//+------------------------------------------------------------------+
//| This function sets seed which is used to initialize internal RNG.|
//| By default, deterministic seed is used - same for each run of |
//| clusterizer. If you specify non-deterministic seed value, then |
//| some algorithms which depend on random initialization(in current |
//| version : k-means) may return slightly different results after |
//| each run. |
//| INPUT PARAMETERS: |
//| S - clusterizer state, initialized by |
//| ClusterizerCreate() |
//| Seed - seed: |
//| * positive values = use deterministic seed for each|
//| run of algorithms which depend on random |
//| initialization |
//| * zero or negative values = use non-deterministic |
//| seed |
//+------------------------------------------------------------------+
void CAlglib::ClusterizerSetSeed(CClusterizerState &s,int seed)
{
CClustering::ClusterizerSetSeed(s,seed);
}
//+------------------------------------------------------------------+
//| This function performs agglomerative hierarchical clustering |
//| NOTE: Agglomerative hierarchical clustering algorithm has two |
//| phases: distance matrix calculation and clustering itself. |
//| INPUT PARAMETERS: |
//| S - clusterizer state, initialized by |
//| ClusterizerCreate() |
//| OUTPUT PARAMETERS: |
//| Rep - clustering results; see description of AHCReport |
//| structure for more information. |
//| NOTE 1: hierarchical clustering algorithms require large amounts |
//| of memory. In particular, this implementation needs |
//| sizeof(double) *NPoints^2 bytes, which are used to store |
//| distance matrix. In case we work with user - supplied |
//| matrix, this amount is multiplied by 2 (we have to store |
//| original matrix and to work with its copy). |
//| For example, problem with 10000 points would require 800M|
//| of RAM, even when working in a 1-dimensional space. |
//+------------------------------------------------------------------+
void CAlglib::ClusterizerRunAHC(CClusterizerState &s,CAHCReport &rep)
{
CClustering::ClusterizerRunAHC(s,rep);
}
//+------------------------------------------------------------------+
//| This function performs clustering by k-means++ algorithm. |
//| You may change algorithm properties by calling: |
//| * ClusterizerSetKMeansLimits() to change number of restarts |
//| or iterations |
//| * ClusterizerSetKMeansInit() to change initialization |
//| algorithm |
//| By default, one restart and unlimited number of iterations are |
//| used. Initialization algorithm is chosen automatically. |
//| NOTE: k-means clustering algorithm has two phases: selection of |
//| initial centers and clustering itself. |
//| INPUT PARAMETERS: |
//| S - clusterizer state, initialized by |
//| ClusterizerCreate() |
//| K - number of clusters, K >= 0. |
//| K can be zero only when algorithm is called for |
//| empty dataset, in this case completion code is set |
//| to success(+1). |
//| If K = 0 and dataset size is non-zero, we can not |
//| meaningfully assign points to some center(there are|
//| no centers because K = 0) and return -3 as |
//| completion code (failure). |
//| OUTPUT PARAMETERS: |
//| Rep - clustering results; see description of KMeansReport|
//| structure for more information. |
//| NOTE 1: k-means clustering can be performed only for datasets |
//| with Euclidean distance function. Algorithm will return |
//| negative completion code in Rep.TerminationType in case |
//| dataset was added to clusterizer with DistType other |
//| than Euclidean (or dataset was specified by distance |
//| matrix instead of explicitly given points). |
//| NOTE 2: by default, k-means uses non-deterministic seed to |
//| initialize RNG which is used to select initial centers. |
//| As result, each run of algorithm may return different |
//| values. If you need deterministic behavior, use |
//| ClusterizerSetSeed() function. |
//+------------------------------------------------------------------+
void CAlglib::ClusterizerRunKMeans(CClusterizerState &s,int k,CKmeansReport &rep)
{
CClustering::ClusterizerRunKMeans(s,k,rep);
}
//+------------------------------------------------------------------+
//| This function returns distance matrix for dataset |
//| INPUT PARAMETERS: |
//| XY - array[NPoints, NFeatures], dataset |
//| NPoints - number of points, >= 0 |
//| NFeatures- number of features, >= 1 |
//| DistType - distance function: |
//| * 0 Chebyshev distance(L - inf norm) |
//| * 1 city block distance(L1 norm) |
//| * 2 Euclidean distance(L2 norm, non - squared) |
//| * 10 Pearson correlation: |
//| dist(a, b) = 1 - corr(a, b) |
//| * 11 Absolute Pearson correlation: |
//| dist(a, b) = 1 - |corr(a, b)| |
//| * 12 Uncentered Pearson correlation(cosine of |
//| the angle): dist(a, b) = a'*b/(|a|*|b|) |
//| * 13 Absolute uncentered Pearson correlation |
//| dist(a, b) = |a'*b|/(|a|*|b|) |
//| * 20 Spearman rank correlation: |
//| dist(a, b) = 1 - rankcorr(a, b) |
//| * 21 Absolute Spearman rank correlation |
//| dist(a, b) = 1 - |rankcorr(a, b)| |
//| OUTPUT PARAMETERS: |
//| D - array[NPoints, NPoints], distance matrix (full |
//| matrix is returned, with lower and upper triangles)|
//| NOTE: different distance functions have different performance |
//| penalty: |
//| * Euclidean or Pearson correlation distances are the fastest|
//| ones |
//| * Spearman correlation distance function is a bit slower |
//| * city block and Chebyshev distances are order of magnitude |
//| slower |
//| The reason behing difference in performance is that correlation -|
//| based distance functions are computed using optimized linear |
//| algebra kernels, while Chebyshev and city block distance |
//| functions are computed using simple nested loops with two |
//| branches at each iteration. |
//+------------------------------------------------------------------+
void CAlglib::ClusterizerGetDistances(CMatrixDouble &xy,int npoints,
int nfeatures,int disttype,
CMatrixDouble &d)
{
d.Resize(0,0);
CClustering::ClusterizerGetDistances(xy,npoints,nfeatures,disttype,d);
}
//+------------------------------------------------------------------+
//| This function takes as input clusterization report Rep, desired |
//| clusters count K, and builds top K clusters from hierarchical |
//| clusterization tree. |
//| It returns assignment of points to clusters(array of cluster |
//| indexes). |
//| INPUT PARAMETERS: |
//| Rep - report from ClusterizerRunAHC() performed on XY |
//| K - desired number of clusters, 1 <= K <= NPoints. |
//| K can be zero only when NPoints = 0. |
//| OUTPUT PARAMETERS: |
//| CIdx - array[NPoints], I-th element contains cluster |
//| index(from 0 to K-1) for I-th point of the dataset.|
//| CZ - array[K]. This array allows to convert cluster |
//| indexes returned by this function to indexes used |
//| by Rep.Z. J-th cluster returned by this function |
//| corresponds to CZ[J]-th cluster stored in |
//| Rep.Z/PZ/PM. It is guaranteed that CZ[I] < CZ[I+1].|
//| NOTE: K clusters built by this subroutine are assumed to have no |
//| hierarchy. Although they were obtained by manipulation with|
//| top K nodes of dendrogram(i.e. hierarchical decomposition |
//| of dataset), this function does not return information |
//| about hierarchy. Each of the clusters stand on its own. |
//| NOTE: Cluster indexes returned by this function does not |
//| correspond to indexes returned in Rep.Z/PZ/PM. Either you |
//| work with hierarchical representation of the dataset |
//| (dendrogram), or you work with "flat" representation |
//| returned by this function. Each of representations has its |
//| own clusters indexing system(former uses [0,2*NPoints-2]), |
//| while latter uses [0..K-1]), although it is possible to |
//| perform conversion from one system to another by means of |
//| CZ array, returned by this function, which allows you to |
//| convert indexes stored in CIdx to the numeration system |
//| used by Rep.Z. |
//| NOTE: this subroutine is optimized for moderate values of K. |
//| Say, for K=5 it will perform many times faster than for |
//| K=100. Its worst - case performance is O(N*K), although in |
//| average case it perform better (up to O(N*log(K))). |
//+------------------------------------------------------------------+
void CAlglib::ClusterizerGetKClusters(CAHCReport &rep,int k,CRowInt &cidx,CRowInt &cz)
{
cidx.Resize(0);
cz.Resize(0);
CClustering::ClusterizerGetKClusters(rep,k,cidx,cz);
}
//+------------------------------------------------------------------+
//| This function accepts AHC report Rep, desired minimum |
//| intercluster distance and returns top clusters from hierarchical |
//| clusterization tree which are separated by distance R or HIGHER. |
//| It returns assignment of points to clusters (array of cluster |
//| indexes). |
//| There is one more function with similar name - |
//| ClusterizerSeparatedByCorr, which returns clusters with |
//| intercluster correlation equal to R or LOWER (note: higher for |
//| distance, lower for correlation). |
//| INPUT PARAMETERS: |
//| Rep - report from ClusterizerRunAHC() performed on XY |
//| R - desired minimum intercluster distance, R >= 0 |
//| OUTPUT PARAMETERS: |
//| K - number of clusters, 1 <= K <= NPoints |
//| CIdx - array[NPoints], I-th element contains cluster |
//| index (from 0 to K-1) for I-th point of the dataset|
//| CZ - array[K]. This array allows to convert cluster |
//| indexes returned by this function to indexes used |
//| by Rep.Z. J-th cluster returned by this function |
//| corresponds to CZ[J]-th cluster stored in |
//| Rep.Z/PZ/PM. It is guaranteed that CZ[I] < CZ[I+1].|
//| NOTE: K clusters built by this subroutine are assumed to have no |
//| hierarchy. Although they were obtained by manipulation with|
//| top K nodes of dendrogram (i.e. hierarchical decomposition |
//| of dataset), this function does not return information |
//| about hierarchy. Each of the clusters stand on its own. |
//| NOTE: Cluster indexes returned by this function does not |
//| correspond to indexes returned in Rep.Z/PZ/PM. Either you |
//| work with hierarchical representation of the dataset |
//| (dendrogram), or you work with "flat" representation |
//| returned by this function. Each of representations has its |
//| own clusters indexing system (former uses [0,2*NPoints-2]),|
//| while latter uses [0..K-1]), although it is possible to |
//| perform conversion from one system to another by means of |
//| CZ array, returned by this function, which allows you to |
//| convert indexes stored in CIdx to the numeration system |
//| used by Rep.Z. |
//| NOTE: this subroutine is optimized for moderate values of K. Say,|
//| for K=5 it will perform many times faster than for K=100. |
//| Its worst - case performance is O(N*K), although in average|
//| case it perform better (up to O(N*log(K))). |
//+------------------------------------------------------------------+
void CAlglib::ClusterizerSeparatedByDist(CAHCReport &rep,double r,int &k,
CRowInt &cidx,CRowInt &cz)
{
k=0;
cidx.Resize(0);
cz.Resize(0);
CClustering::ClusterizerSeparatedByDist(rep,r,k,cidx,cz);
}
//+------------------------------------------------------------------+
//| This function accepts AHC report Rep, desired maximum |
//| intercluster correlation and returns top clusters from |
//| hierarchical clusterization tree which are separated by |
//| correlation R or LOWER. |
//| It returns assignment of points to clusters(array of cluster |
//| indexes). |
//| There is one more function with similar name - |
//| ClusterizerSeparatedByDist, which returns clusters with |
//| intercluster distance equal to R or HIGHER (note: higher for |
//| distance, lower for correlation). |
//| INPUT PARAMETERS: |
//| Rep - report from ClusterizerRunAHC() performed on XY |
//| R - desired maximum intercluster correlation, -1<=R<=+1|
//| OUTPUT PARAMETERS: |
//| K - number of clusters, 1 <= K <= NPoints |
//| CIdx - array[NPoints], I-th element contains cluster index|
//| (from 0 to K-1) for I-th point of the dataset. |
//| CZ - array[K]. This array allows to convert cluster |
//| indexes returned by this function to indexes used |
//| by Rep.Z. J-th cluster returned by this function |
//| corresponds to CZ[J]-th cluster stored in |
//| Rep.Z/PZ/PM. It is guaranteed that CZ[I] < CZ[I+1].|
//| NOTE: K clusters built by this subroutine are assumed to have no |
//| hierarchy. Although they were obtained by manipulation with|
//| top K nodes of dendrogram (i.e. hierarchical decomposition |
//| of dataset), this function does not return information |
//| about hierarchy. Each of the clusters stand on its own. |
//| NOTE: Cluster indexes returned by this function does not |
//| correspond to indexes returned in Rep.Z/PZ/PM. Either you |
//| work with hierarchical representation of the dataset |
//| (dendrogram), or you work with "flat" representation |
//| returned by this function. Each of representations has its |
//| own clusters indexing system (former uses [0,2*NPoints-2]),|
//| while latter uses [0..K-1]), although it is possible to |
//| perform conversion from one system to another by means of |
//| CZ array, returned by this function, which allows you to |
//| convert indexes stored in CIdx to the numeration system |
//| used by Rep.Z. |
//| NOTE: this subroutine is optimized for moderate values of K. Say,|
//| for K=5 it will perform many times faster than for K=100. |
//| Its worst - case performance is O(N*K), although in average|
//| case it perform better (up to O(N*log(K))). |
//+------------------------------------------------------------------+
void CAlglib::ClusterizerSeparatedByCorr(CAHCReport &rep,double r,
int &k,CRowInt &cidx,CRowInt &cz)
{
k=0;
cidx.Resize(0);
cz.Resize(0);
CClustering::ClusterizerSeparatedByCorr(rep,r,k,cidx,cz);
}
//+------------------------------------------------------------------+
//| k-means++ clusterization |
//| Backward compatibility function, we recommend to use CLUSTERING |
//| subpackage as better replacement. |
//| INPUT PARAMETERS: |
//| XY - dataset, array [0..NPoints-1,0..NVars-1]. |
//| NPoints - dataset size, NPoints>=K |
//| NVars - number of variables, NVars>=1 |
//| K - desired number of clusters, K>=1 |
//| Restarts - number of restarts, Restarts>=1 |
//| OUTPUT PARAMETERS: |
//| Info - return code: |
//| * -3, if task is degenerate (number of |
//| distinct points is less than K) |
//| * -1, if incorrect |
//| NPoints/NFeatures/K/Restarts was passed|
//| * 1, if subroutine finished successfully |
//| C - array[0..NVars-1,0..K-1].matrix whose columns|
//| store cluster's centers |
//| XYC - array[NPoints], which contains cluster |
//| indexes |
//+------------------------------------------------------------------+
void CAlglib::KMeansGenerate(CMatrixDouble &xy,const int npoints,
const int nvars,const int k,
const int restarts,int &info,
CMatrixDouble &c,int &xyc[])
{
//--- initialization
info=0;
//--- function call
CKMeans::KMeansGenerate(xy,npoints,nvars,k,restarts,info,c,xyc);
}
//+------------------------------------------------------------------+
//| Multiclass Fisher LDA |
//| Subroutine finds coefficients of linear combination which |
//| optimally separates training set on classes. |
//| INPUT PARAMETERS: |
//| XY - training set, array[0..NPoints-1,0..NVars]. |
//| First NVars columns store values of |
//| independent variables, next column stores |
//| number of class (from 0 to NClasses-1) which |
//| dataset element belongs to. Fractional values|
//| are rounded to nearest integer. |
//| NPoints - training set size, NPoints>=0 |
//| NVars - number of independent variables, NVars>=1 |
//| NClasses - number of classes, NClasses>=2 |
//| OUTPUT PARAMETERS: |
//| Info - return code: |
//| * -4, if internal EVD subroutine hasn't |
//| converged |
//| * -2, if there is a point with class number |
//| outside of [0..NClasses-1]. |
//| * -1, if incorrect parameters was passed |
//| (NPoints<0, NVars<1, NClasses<2) |
//| * 1, if task has been solved |
//| * 2, if there was a multicollinearity in |
//| training set, but task has been solved.|
//| W - linear combination coefficients, |
//| array[0..NVars-1] |
//+------------------------------------------------------------------+
void CAlglib::FisherLDA(CMatrixDouble &xy,const int npoints,
const int nvars,const int nclasses,
int &info,double &w[])
{
//--- initialization
info=0;
//--- function call
CLDA::FisherLDA(xy,npoints,nvars,nclasses,info,w);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::FisherLDA(CMatrixDouble &xy,const int npoints,
const int nvars,const int nclasses,
int &info,CRowDouble &w)
{
//--- initialization
info=0;
//--- function call
CLDA::FisherLDA(xy,npoints,nvars,nclasses,info,w);
}
//+------------------------------------------------------------------+
//| N-dimensional multiclass Fisher LDA |
//| Subroutine finds coefficients of linear combinations which |
//| optimally separates |
//| training set on classes. It returns N-dimensional basis whose |
//| vector are sorted |
//| by quality of training set separation (in descending order). |
//| INPUT PARAMETERS: |
//| XY - training set, array[0..NPoints-1,0..NVars]. |
//| First NVars columns store values of |
//| independent variables, next column stores |
//| number of class (from 0 to NClasses-1) which |
//| dataset element belongs to. Fractional values|
//| are rounded to nearest integer. |
//| NPoints - training set size, NPoints>=0 |
//| NVars - number of independent variables, NVars>=1 |
//| NClasses - number of classes, NClasses>=2 |
//| OUTPUT PARAMETERS: |
//| Info - return code: |
//| * -4, if internal EVD subroutine hasn't |
//| converged |
//| * -2, if there is a point with class number |
//| outside of [0..NClasses-1]. |
//| * -1, if incorrect parameters was passed |
//| (NPoints<0, NVars<1, NClasses<2) |
//| * 1, if task has been solved |
//| * 2, if there was a multicollinearity in |
//| training set, but task has been solved.|
//| W - basis, array[0..NVars-1,0..NVars-1] |
//| columns of matrix stores basis vectors, |
//| sorted by quality of training set separation |
//| (in descending order) |
//+------------------------------------------------------------------+
void CAlglib::FisherLDAN(CMatrixDouble &xy,const int npoints,
const int nvars,const int nclasses,
int &info,CMatrixDouble &w)
{
//--- initialization
info=0;
//--- function call
CLDA::FisherLDAN(xy,npoints,nvars,nclasses,info,w);
}
//+------------------------------------------------------------------+
//| Linear regression |
//| Subroutine builds model: |
//| Y = A(0)*X[0] + ... + A(N-1)*X[N-1] + A(N) |
//| and model found in ALGLIB format, covariation matrix, training |
//| set errors (rms, average, average relative) and leave-one-out |
//| cross-validation estimate of the generalization error. CV |
//| estimate calculated using fast algorithm with O(NPoints*NVars) |
//| complexity. |
//| When covariation matrix is calculated standard deviations of|
//| function values are assumed to be equal to RMS error on the |
//| training set. |
//| INPUT PARAMETERS: |
//| XY - training set, array [0..NPoints-1,0..NVars]: |
//| * NVars columns - independent variables |
//| * last column - dependent variable |
//| NPoints - training set size, NPoints>NVars+1 |
//| NVars - number of independent variables |
//| OUTPUT PARAMETERS: |
//| Info - return code: |
//| * -255, in case of unknown internal error |
//| * -4, if internal SVD subroutine haven't |
//| converged |
//| * -1, if incorrect parameters was passed |
//| (NPoints<NVars+2, NVars<1). |
//| * 1, if subroutine successfully finished |
//| LM - linear model in the ALGLIB format. Use |
//| subroutines of this unit to work with the |
//| model. |
//| AR - additional results |
//+------------------------------------------------------------------+
void CAlglib::LRBuild(CMatrixDouble &xy,const int npoints,const int nvars,
int &info,CLinearModelShell &lm,CLRReportShell &ar)
{
//--- initialization
info=0;
//--- function call
CLinReg::LRBuild(xy,npoints,nvars,info,lm.GetInnerObj(),ar.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Linear regression |
//| Variant of LRBuild which uses vector of standatd deviations |
//| (errors in function values). |
//| INPUT PARAMETERS: |
//| XY - training set, array [0..NPoints-1,0..NVars]: |
//| * NVars columns - independent variables |
//| * last column - dependent variable |
//| S - standard deviations (errors in function |
//| values) array[0..NPoints-1], S[i]>0. |
//| NPoints - training set size, NPoints>NVars+1 |
//| NVars - number of independent variables |
//| OUTPUT PARAMETERS: |
//| Info - return code: |
//| * -255, in case of unknown internal error |
//| * -4, if internal SVD subroutine haven't |
//| converged |
//| * -1, if incorrect parameters was passed |
//| (NPoints<NVars+2, NVars<1). |
//| * -2, if S[I]<=0 |
//| * 1, if subroutine successfully finished |
//| LM - linear model in the ALGLIB format. Use |
//| subroutines of this unit to work with the |
//| model. |
//| AR - additional results |
//+------------------------------------------------------------------+
void CAlglib::LRBuildS(CMatrixDouble &xy,double &s[],const int npoints,
const int nvars,int &info,CLinearModelShell &lm,
CLRReportShell &ar)
{
//--- initialization
info=0;
//--- function call
CLinReg::LRBuildS(xy,s,npoints,nvars,info,lm.GetInnerObj(),ar.GetInnerObj());
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::LRBuildS(CMatrixDouble &xy,CRowDouble &s,const int npoints,
const int nvars,int &info,CLinearModelShell &lm,
CLRReportShell &ar)
{
//--- initialization
info=0;
//--- function call
CLinReg::LRBuildS(xy,s,npoints,nvars,info,lm.GetInnerObj(),ar.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Like LRBuildS, but builds model |
//| Y=A(0)*X[0] + ... + A(N-1)*X[N-1] |
//| i.m_e. with zero constant term. |
//+------------------------------------------------------------------+
void CAlglib::LRBuildZS(CMatrixDouble &xy,double &s[],const int npoints,
const int nvars,int &info,CLinearModelShell &lm,
CLRReportShell &ar)
{
//--- initialization
info=0;
//--- function call
CLinReg::LRBuildZS(xy,s,npoints,nvars,info,lm.GetInnerObj(),ar.GetInnerObj());
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::LRBuildZS(CMatrixDouble &xy,CRowDouble &s,const int npoints,
const int nvars,int &info,CLinearModelShell &lm,
CLRReportShell &ar)
{
//--- initialization
info=0;
//--- function call
CLinReg::LRBuildZS(xy,s,npoints,nvars,info,lm.GetInnerObj(),ar.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Like LRBuild but builds model |
//| Y=A(0)*X[0] + ... + A(N-1)*X[N-1] |
//| i.m_e. with zero constant term. |
//+------------------------------------------------------------------+
void CAlglib::LRBuildZ(CMatrixDouble &xy,const int npoints,
const int nvars,int &info,CLinearModelShell &lm,
CLRReportShell &ar)
{
//--- initialization
info=0;
//--- function call
CLinReg::LRBuildZ(xy,npoints,nvars,info,lm.GetInnerObj(),ar.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Unpacks coefficients of linear model. |
//| INPUT PARAMETERS: |
//| LM - linear model in ALGLIB format |
//| OUTPUT PARAMETERS: |
//| V - coefficients,array[0..NVars] |
//| constant term (intercept) is stored in the |
//| V[NVars]. |
//| NVars - number of independent variables (one less |
//| than number of coefficients) |
//+------------------------------------------------------------------+
void CAlglib::LRUnpack(CLinearModelShell &lm,double &v[],int &nvars)
{
//--- initialization
nvars=0;
//--- function call
CLinReg::LRUnpack(lm.GetInnerObj(),v,nvars);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::LRUnpack(CLinearModelShell &lm,CRowDouble &v,int &nvars)
{
//--- initialization
nvars=0;
//--- function call
CLinReg::LRUnpack(lm.GetInnerObj(),v,nvars);
}
//+------------------------------------------------------------------+
//| "Packs" coefficients and creates linear model in ALGLIB format |
//| (LRUnpack reversed). |
//| INPUT PARAMETERS: |
//| V - coefficients, array[0..NVars] |
//| NVars - number of independent variables |
//| OUTPUT PAREMETERS: |
//| LM - linear model. |
//+------------------------------------------------------------------+
void CAlglib::LRPack(double &v[],const int nvars,CLinearModelShell &lm)
{
CLinReg::LRPack(v,nvars,lm.GetInnerObj());
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::LRPack(CRowDouble &v,const int nvars,CLinearModelShell &lm)
{
CLinReg::LRPack(v,nvars,lm.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Procesing |
//| INPUT PARAMETERS: |
//| LM - linear model |
//| X - input vector, array[0..NVars-1]. |
//| Result: |
//| value of linear model regression estimate |
//+------------------------------------------------------------------+
double CAlglib::LRProcess(CLinearModelShell &lm,double &x[])
{
return(CLinReg::LRProcess(lm.GetInnerObj(),x));
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CAlglib::LRProcess(CLinearModelShell &lm,CRowDouble &x)
{
return(CLinReg::LRProcess(lm.GetInnerObj(),x));
}
//+------------------------------------------------------------------+
//| RMS error on the test set |
//| INPUT PARAMETERS: |
//| LM - linear model |
//| XY - test set |
//| NPoints - test set size |
//| RESULT: |
//| root mean square error. |
//+------------------------------------------------------------------+
double CAlglib::LRRMSError(CLinearModelShell &lm,CMatrixDouble &xy,
const int npoints)
{
return(CLinReg::LRRMSError(lm.GetInnerObj(),xy,npoints));
}
//+------------------------------------------------------------------+
//| Average error on the test set |
//| INPUT PARAMETERS: |
//| LM - linear model |
//| XY - test set |
//| NPoints - test set size |
//| RESULT: |
//| average error. |
//+------------------------------------------------------------------+
double CAlglib::LRAvgError(CLinearModelShell &lm,CMatrixDouble &xy,
const int npoints)
{
return(CLinReg::LRAvgError(lm.GetInnerObj(),xy,npoints));
}
//+------------------------------------------------------------------+
//| RMS error on the test set |
//| INPUT PARAMETERS: |
//| LM - linear model |
//| XY - test set |
//| NPoints - test set size |
//| RESULT: |
//| average relative error. |
//+------------------------------------------------------------------+
double CAlglib::LRAvgRelError(CLinearModelShell &lm,CMatrixDouble &xy,
const int npoints)
{
return(CLinReg::LRAvgRelError(lm.GetInnerObj(),xy,npoints));
}
//+------------------------------------------------------------------+
//| This function serializes data structure to string. |
//| Important properties of s_out: |
//| * it contains alphanumeric characters, dots, underscores, minus |
//| signs |
//| * these symbols are grouped into words, which are separated by |
//| spaces and Windows-style (CR+LF) newlines |
//| * although serializer uses spaces and CR+LF as separators, you|
//| can replace any separator character by arbitrary combination of|
//| spaces, tabs, Windows or Unix newlines. It allows flexible |
//| reformatting of the string in case you want to include it into |
//| text or XML file. But you should not insert separators into the|
//| middle of the "words" nor you should change case of letters. |
//| * s_out can be freely moved between 32-bit and 64-bit systems, |
//| little and big endian machines, and so on. You can reference |
//| structure on 32-bit machine and unserialize it on 64-bit one |
//| (or vice versa), or reference it on SPARC and unserialize on |
//| x86. You can also reference it in C# version of ALGLIB and |
//| unserialize in C++ one, and vice versa. |
//+------------------------------------------------------------------+
void CAlglib::MLPSerialize(CMultilayerPerceptronShell &obj,string &s_out)
{
//--- create a variable
CSerializer s;
//--- serialization start
s.Alloc_Start();
//--- function call
CMLPBase::MLPAlloc(s,obj.GetInnerObj());
//--- serialization
s.SStart_Str();
//--- function call
CMLPBase::MLPSerialize(s,obj.GetInnerObj());
//--- stop
s.Stop();
//--- change value
s_out=s.Get_String();
}
//+------------------------------------------------------------------+
//| This function unserializes data structure from string. |
//+------------------------------------------------------------------+
void CAlglib::MLPUnserialize(const string s_in,CMultilayerPerceptronShell &obj)
{
//--- create a variable
CSerializer s;
//--- unserialization
s.UStart_Str(s_in);
//--- function call
CMLPBase::MLPUnserialize(s,obj.GetInnerObj());
//--- stop
s.Stop();
}
//+------------------------------------------------------------------+
//| Creates neural network with NIn inputs, NOut outputs, |
//| without hidden layers, with linear output layer. Network weights |
//| are filled with small random values. |
//+------------------------------------------------------------------+
void CAlglib::MLPCreate0(const int nin,const int nout,
CMultilayerPerceptronShell &network)
{
CMLPBase::MLPCreate0(nin,nout,network.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Same as MLPCreate0, but with one hidden layer (NHid neurons) with|
//| non-linear activation function. Output layer is linear. |
//+------------------------------------------------------------------+
void CAlglib::MLPCreate1(const int nin,int nhid,const int nout,
CMultilayerPerceptronShell &network)
{
CMLPBase::MLPCreate1(nin,nhid,nout,network.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Same as MLPCreate0,but with two hidden layers (NHid1 and NHid2 |
//| neurons) with non-linear activation function. Output layer is |
//| linear. |
//| $ALL |
//+------------------------------------------------------------------+
void CAlglib::MLPCreate2(const int nin,const int nhid1,const int nhid2,
const int nout,CMultilayerPerceptronShell &network)
{
CMLPBase::MLPCreate2(nin,nhid1,nhid2,nout,network.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Creates neural network with NIn inputs, NOut outputs, without |
//| hidden layers with non-linear output layer. Network weights are |
//| filled with small random values. |
//| Activation function of the output layer takes values: |
//| (B, +INF), if D>=0 |
//| or |
//| (-INF, B), if D<0. |
//+------------------------------------------------------------------+
void CAlglib::MLPCreateB0(const int nin,const int nout,const double b,
const double d,CMultilayerPerceptronShell &network)
{
CMLPBase::MLPCreateB0(nin,nout,b,d,network.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Same as MLPCreateB0 but with non-linear hidden layer. |
//+------------------------------------------------------------------+
void CAlglib::MLPCreateB1(const int nin,int nhid,const int nout,
const double b,const double d,
CMultilayerPerceptronShell &network)
{
CMLPBase::MLPCreateB1(nin,nhid,nout,b,d,network.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Same as MLPCreateB0 but with two non-linear hidden layers. |
//+------------------------------------------------------------------+
void CAlglib::MLPCreateB2(const int nin,const int nhid1,const int nhid2,
const int nout,const double b,const double d,
CMultilayerPerceptronShell &network)
{
CMLPBase::MLPCreateB2(nin,nhid1,nhid2,nout,b,d,network.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Creates neural network with NIn inputs, NOut outputs, |
//| without hidden layers with non-linear output layer. Network |
//| weights are filled with small random values. Activation function |
//| of the output layer takes values [A,B]. |
//+------------------------------------------------------------------+
void CAlglib::MLPCreateR0(const int nin,const int nout,double a,
const double b,CMultilayerPerceptronShell &network)
{
CMLPBase::MLPCreateR0(nin,nout,a,b,network.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Same as MLPCreateR0,but with non-linear hidden layer. |
//+------------------------------------------------------------------+
void CAlglib::MLPCreateR1(const int nin,int nhid,const int nout,
const double a,const double b,
CMultilayerPerceptronShell &network)
{
CMLPBase::MLPCreateR1(nin,nhid,nout,a,b,network.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Same as MLPCreateR0,but with two non-linear hidden layers. |
//+------------------------------------------------------------------+
void CAlglib::MLPCreateR2(const int nin,const int nhid1,const int nhid2,
const int nout,const double a,const double b,
CMultilayerPerceptronShell &network)
{
CMLPBase::MLPCreateR2(nin,nhid1,nhid2,nout,a,b,network.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Creates classifier network with NIn inputs and NOut possible |
//| classes. |
//| Network contains no hidden layers and linear output layer with |
//| SOFTMAX-normalization (so outputs sums up to 1.0 and converge to |
//| posterior probabilities). |
//+------------------------------------------------------------------+
void CAlglib::MLPCreateC0(const int nin,const int nout,
CMultilayerPerceptronShell &network)
{
CMLPBase::MLPCreateC0(nin,nout,network.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Same as MLPCreateC0,but with one non-linear hidden layer. |
//+------------------------------------------------------------------+
void CAlglib::MLPCreateC1(const int nin,int nhid,const int nout,
CMultilayerPerceptronShell &network)
{
CMLPBase::MLPCreateC1(nin,nhid,nout,network.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Same as MLPCreateC0, but with two non-linear hidden layers. |
//+------------------------------------------------------------------+
void CAlglib::MLPCreateC2(const int nin,const int nhid1,const int nhid2,
const int nout,CMultilayerPerceptronShell &network)
{
CMLPBase::MLPCreateC2(nin,nhid1,nhid2,nout,network.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Randomization of neural network weights |
//+------------------------------------------------------------------+
void CAlglib::MLPRandomize(CMultilayerPerceptronShell &network)
{
CMLPBase::MLPRandomize(network.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Randomization of neural network weights and standartisator |
//+------------------------------------------------------------------+
void CAlglib::MLPRandomizeFull(CMultilayerPerceptronShell &network)
{
CMLPBase::MLPRandomizeFull(network.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Internal subroutine. |
//+------------------------------------------------------------------+
void CAlglib::MLPInitPreprocessor(CMultilayerPerceptronShell &network,
CMatrixDouble &xy,int ssize)
{
CMLPBase::MLPInitPreprocessor(network.GetInnerObj(),xy,ssize);
}
//+------------------------------------------------------------------+
//| Returns information about initialized network: number of inputs, |
//| outputs, weights. |
//+------------------------------------------------------------------+
void CAlglib::MLPProperties(CMultilayerPerceptronShell &network,
int &nin,int &nout,int &wcount)
{
//--- initialization
nin=0;
nout=0;
wcount=0;
//--- function call
CMLPBase::MLPProperties(network.GetInnerObj(),nin,nout,wcount);
}
//+------------------------------------------------------------------+
//| Returns number of inputs. |
//+------------------------------------------------------------------+
int CAlglib::MLPGetInputsCount(CMultilayerPerceptronShell &network)
{
return(CMLPBase::MLPGetInputsCount(network.GetInnerObj()));
}
//+------------------------------------------------------------------+
//| Returns number of outputs. |
//+------------------------------------------------------------------+
int CAlglib::MLPGetOutputsCount(CMultilayerPerceptronShell &network)
{
return(CMLPBase::MLPGetOutputsCount(network.GetInnerObj()));
}
//+------------------------------------------------------------------+
//| Returns number of weights. |
//+------------------------------------------------------------------+
int CAlglib::MLPGetWeightsCount(CMultilayerPerceptronShell &network)
{
return(CMLPBase::MLPGetWeightsCount(network.GetInnerObj()));
}
//+------------------------------------------------------------------+
//| Tells whether network is SOFTMAX-normalized (i.m_e. classifier) |
//| or not. |
//+------------------------------------------------------------------+
bool CAlglib::MLPIsSoftMax(CMultilayerPerceptronShell &network)
{
return(CMLPBase::MLPIsSoftMax(network.GetInnerObj()));
}
//+------------------------------------------------------------------+
//| This function returns total number of layers (including input, |
//| hidden and output layers). |
//+------------------------------------------------------------------+
int CAlglib::MLPGetLayersCount(CMultilayerPerceptronShell &network)
{
return(CMLPBase::MLPGetLayersCount(network.GetInnerObj()));
}
//+------------------------------------------------------------------+
//| This function returns size of K-th layer. |
//| K=0 corresponds to input layer, K=CNT-1 corresponds to output |
//| layer. |
//| Size of the output layer is always equal to the number of |
//| outputs, although when we have softmax-normalized network, last |
//| neuron doesn't have any connections - it is just zero. |
//+------------------------------------------------------------------+
int CAlglib::MLPGetLayerSize(CMultilayerPerceptronShell &network,
const int k)
{
return(CMLPBase::MLPGetLayerSize(network.GetInnerObj(),k));
}
//+------------------------------------------------------------------+
//| This function returns offset/scaling coefficients for I-th input |
//| of the network. |
//| INPUT PARAMETERS: |
//| Network - network |
//| I - input index |
//| OUTPUT PARAMETERS: |
//| Mean - mean term |
//| Sigma - sigma term,guaranteed to be nonzero. |
//| I-th input is passed through linear transformation |
//| IN[i]=(IN[i]-Mean)/Sigma |
//| before feeding to the network |
//+------------------------------------------------------------------+
void CAlglib::MLPGetInputScaling(CMultilayerPerceptronShell &network,
const int i,double &mean,double &sigma)
{
//--- initialization
mean=0;
sigma=0;
//--- function call
CMLPBase::MLPGetInputScaling(network.GetInnerObj(),i,mean,sigma);
}
//+------------------------------------------------------------------+
//| This function returns offset/scaling coefficients for I-th output|
//| of the network. |
//| INPUT PARAMETERS: |
//| Network - network |
//| I - input index |
//| OUTPUT PARAMETERS: |
//| Mean - mean term |
//| Sigma - sigma term, guaranteed to be nonzero. |
//| I-th output is passed through linear transformation |
//| OUT[i] = OUT[i]*Sigma+Mean |
//| before returning it to user. In case we have SOFTMAX-normalized |
//| network, we return (Mean,Sigma)=(0.0,1.0). |
//+------------------------------------------------------------------+
void CAlglib::MLPGetOutputScaling(CMultilayerPerceptronShell &network,
const int i,double &mean,double &sigma)
{
//--- initialization
mean=0;
sigma=0;
//--- function call
CMLPBase::MLPGetOutputScaling(network.GetInnerObj(),i,mean,sigma);
}
//+------------------------------------------------------------------+
//| This function returns information about Ith neuron of Kth layer |
//| INPUT PARAMETERS: |
//| Network - network |
//| K - layer index |
//| I - neuron index (within layer) |
//| OUTPUT PARAMETERS: |
//| FKind - activation function type (used by |
//| MLPActivationFunction()) this value is zero |
//| for input or linear neurons |
//| Threshold - also called offset, bias |
//| zero for input neurons |
//| NOTE: this function throws exception if layer or neuron with |
//| given index do not exists. |
//+------------------------------------------------------------------+
void CAlglib::MLPGetNeuronInfo(CMultilayerPerceptronShell &network,
const int k,const int i,int &fkind,
double &threshold)
{
//--- initialization
fkind=0;
threshold=0;
//--- function call
CMLPBase::MLPGetNeuronInfo(network.GetInnerObj(),k,i,fkind,threshold);
}
//+------------------------------------------------------------------+
//| This function returns information about connection from I0-th |
//| neuron of K0-th layer to I1-th neuron of K1-th layer. |
//| INPUT PARAMETERS: |
//| Network - network |
//| K0 - layer index |
//| I0 - neuron index (within layer) |
//| K1 - layer index |
//| I1 - neuron index (within layer) |
//| RESULT: |
//| connection weight (zero for non-existent connections) |
//| This function: |
//| 1. throws exception if layer or neuron with given index do not |
//| exists. |
//| 2. returns zero if neurons exist, but there is no connection |
//| between them |
//+------------------------------------------------------------------+
double CAlglib::MLPGetWeight(CMultilayerPerceptronShell &network,
const int k0,const int i0,const int k1,
const int i1)
{
return(CMLPBase::MLPGetWeight(network.GetInnerObj(),k0,i0,k1,i1));
}
//+------------------------------------------------------------------+
//| This function sets offset/scaling coefficients for I-th input of |
//| the network. |
//| INPUT PARAMETERS: |
//| Network - network |
//| I - input index |
//| Mean - mean term |
//| Sigma - sigma term (if zero,will be replaced by 1.0) |
//| NTE: I-th input is passed through linear transformation |
//| IN[i]=(IN[i]-Mean)/Sigma |
//| before feeding to the network. This function sets Mean and Sigma.|
//+------------------------------------------------------------------+
void CAlglib::MLPSetInputScaling(CMultilayerPerceptronShell &network,
const int i,const double mean,
const double sigma)
{
CMLPBase::MLPSetInputScaling(network.GetInnerObj(),i,mean,sigma);
}
//+------------------------------------------------------------------+
//| This function sets offset/scaling coefficients for I-th output of|
//| the network. |
//| INPUT PARAMETERS: |
//| Network - network |
//| I - input index |
//| Mean - mean term |
//| Sigma - sigma term (if zero, will be replaced by 1.0)|
//| OUTPUT PARAMETERS: |
//| NOTE: I-th output is passed through linear transformation |
//| OUT[i] = OUT[i]*Sigma+Mean |
//| before returning it to user. This function sets Sigma/Mean. In |
//| case we have SOFTMAX-normalized network, you can not set (Sigma, |
//| Mean) to anything other than(0.0,1.0) - this function will throw |
//| exception. |
//+------------------------------------------------------------------+
void CAlglib::MLPSetOutputScaling(CMultilayerPerceptronShell &network,
const int i,const double mean,
const double sigma)
{
CMLPBase::MLPSetOutputScaling(network.GetInnerObj(),i,mean,sigma);
}
//+------------------------------------------------------------------+
//| This function modifies information about Ith neuron of Kth layer |
//| INPUT PARAMETERS: |
//| Network - network |
//| K - layer index |
//| I - neuron index (within layer) |
//| FKind - activation function type (used by |
//| MLPActivationFunction()) this value must be |
//| zero for input neurons (you can not set |
//| activation function for input neurons) |
//| Threshold - also called offset, bias |
//| this value must be zero for input neurons |
//| (you can not set threshold for input neurons)|
//| NOTES: |
//| 1. this function throws exception if layer or neuron with given |
//| index do not exists. |
//| 2. this function also throws exception when you try to set |
//| non-linear activation function for input neurons (any kind |
//| of network) or for output neurons of classifier network. |
//| 3. this function throws exception when you try to set non-zero |
//| threshold for input neurons (any kind of network). |
//+------------------------------------------------------------------+
void CAlglib::MLPSetNeuronInfo(CMultilayerPerceptronShell &network,
const int k,const int i,int fkind,
double threshold)
{
CMLPBase::MLPSetNeuronInfo(network.GetInnerObj(),k,i,fkind,threshold);
}
//+------------------------------------------------------------------+
//| This function modifies information about connection from I0-th |
//| neuron of K0-th layer to I1-th neuron of K1-th layer. |
//| INPUT PARAMETERS: |
//| Network - network |
//| K0 - layer index |
//| I0 - neuron index (within layer) |
//| K1 - layer index |
//| I1 - neuron index (within layer) |
//| W - connection weight (must be zero for |
//| non-existent connections) |
//| This function: |
//| 1. throws exception if layer or neuron with given index do not |
//| exists. |
//| 2. throws exception if you try to set non-zero weight for |
//| non-existent connection |
//+------------------------------------------------------------------+
void CAlglib::MLPSetWeight(CMultilayerPerceptronShell &network,
const int k0,const int i0,const int k1,
const int i1,const double w)
{
CMLPBase::MLPSetWeight(network.GetInnerObj(),k0,i0,k1,i1,w);
}
//+------------------------------------------------------------------+
//| Neural network activation function |
//| INPUT PARAMETERS: |
//| NET - neuron input |
//| K - function index (zero for linear function) |
//| OUTPUT PARAMETERS: |
//| F - function |
//| DF - its derivative |
//| D2F - its second derivative |
//+------------------------------------------------------------------+
void CAlglib::MLPActivationFunction(const double net,const int k,
double &f,double &df,double &d2f)
{
//--- initialization
f=0;
df=0;
d2f=0;
//--- function call
CMLPBase::MLPActivationFunction(net,k,f,df,d2f);
}
//+------------------------------------------------------------------+
//| Procesing |
//| INPUT PARAMETERS: |
//| Network - neural network |
//| X - input vector, array[0..NIn-1]. |
//| OUTPUT PARAMETERS: |
//| Y - result. Regression estimate when solving |
//| regression task, vector of posterior |
//| probabilities for classification task. |
//| See also MLPProcessI |
//+------------------------------------------------------------------+
void CAlglib::MLPProcess(CMultilayerPerceptronShell &network,
double &x[],double &y[])
{
CMLPBase::MLPProcess(network.GetInnerObj(),x,y);
}
//+------------------------------------------------------------------+
//| 'interactive' variant of MLPProcess for languages like Python |
//| which support constructs like "Y = MLPProcess(NN,X)" and |
//| interactive mode of the interpreter |
//| This function allocates new array on each call, so it is |
//| significantly slower than its 'non-interactive' counterpart, |
//| but it is more convenient when you call it from command line. |
//+------------------------------------------------------------------+
void CAlglib::MLPProcessI(CMultilayerPerceptronShell &network,
double &x[],double &y[])
{
CMLPBase::MLPProcessI(network.GetInnerObj(),x,y);
}
//+------------------------------------------------------------------+
//| Error function for neural network,internal subroutine. |
//+------------------------------------------------------------------+
double CAlglib::MLPError(CMultilayerPerceptronShell &network,
CMatrixDouble &xy,const int ssize)
{
return(CMLPBase::MLPError(network.GetInnerObj(),xy,ssize));
}
//+------------------------------------------------------------------+
//| Error of the neural network on dataset given by sparse matrix. |
//| INPUT PARAMETERS: |
//| Network - neural network |
//| XY - training set, see below for information on the |
//| training set format. This function checks |
//| correctness of the dataset (no NANs/INFs, class |
//| numbers are correct) and throws exception when |
//| incorrect dataset is passed. Sparse matrix must |
//| use CRS format for storage. |
//| NPoints - points count, >=0 |
//| RESULT: |
//| sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2) |
//| DATASET FORMAT: |
//| This function uses two different dataset formats - one for |
//| regression networks, another one for classification networks. |
//| For regression networks with NIn inputs and NOut outputs |
//| following dataset format is used: |
//| * dataset is given by NPoints*(NIn+NOut) matrix |
//| * each row corresponds to one example |
//| * first NIn columns are inputs, next NOut columns are outputs |
//| For classification networks with NIn inputs and NClasses clases |
//| following dataset format is used: |
//| * dataset is given by NPoints*(NIn+1) matrix |
//| * each row corresponds to one example |
//| * first NIn columns are inputs, last column stores class number|
//| (from 0 to NClasses-1). |
//+------------------------------------------------------------------+
double CAlglib::MLPErrorSparse(CMultilayerPerceptronShell &network,
CSparseMatrix &xy,int npoints)
{
return(CMLPBase::MLPErrorSparse(network.GetInnerObj(),xy,npoints));
}
//+------------------------------------------------------------------+
//| Natural error function for neural network,internal subroutine. |
//+------------------------------------------------------------------+
double CAlglib::MLPErrorN(CMultilayerPerceptronShell &network,
CMatrixDouble &xy,const int ssize)
{
return(CMLPBase::MLPErrorN(network.GetInnerObj(),xy,ssize));
}
//+------------------------------------------------------------------+
//| Classification error |
//+------------------------------------------------------------------+
int CAlglib::MLPClsError(CMultilayerPerceptronShell &network,
CMatrixDouble &xy,const int ssize)
{
return(CMLPBase::MLPClsError(network.GetInnerObj(),xy,ssize));
}
//+------------------------------------------------------------------+
//| Relative classification error on the test set |
//| INPUT PARAMETERS: |
//| Network - network |
//| XY - test set |
//| NPoints - test set size |
//| RESULT: |
//| percent of incorrectly classified cases. Works both for |
//| classifier networks and general purpose networks used as |
//| classifiers. |
//+------------------------------------------------------------------+
double CAlglib::MLPRelClsError(CMultilayerPerceptronShell &network,
CMatrixDouble &xy,const int npoints)
{
return(CMLPBase::MLPRelClsError(network.GetInnerObj(),xy,npoints));
}
//+------------------------------------------------------------------+
//| Relative classification error on the test set given by sparse |
//| matrix. |
//| INPUT PARAMETERS: |
//| Network - neural network; |
//| XY - training set, see below for information on the |
//| training set format. Sparse matrix must use CRS |
//| format for storage. |
//| NPoints - points count, >=0. |
//| RESULT: |
//| Percent of incorrectly classified cases. Works both for |
//| classifier networks and general purpose networks used as |
//| classifiers. |
//| DATASET FORMAT: |
//| This function uses two different dataset formats - one for |
//| regression networks, another one for classification networks. |
//| For regression networks with NIn inputs and NOut outputs |
//| following dataset format is used: |
//| * dataset is given by NPoints*(NIn+NOut) matrix |
//| * each row corresponds to one example |
//| * first NIn columns are inputs, next NOut columns are |
//| outputs |
//| For classification networks with NIn inputs and NClasses clases|
//| following dataset format is used: |
//| * dataset is given by NPoints*(NIn+1) matrix |
//| * each row corresponds to one example |
//| * first NIn columns are inputs, last column stores class |
//| number (from 0 to NClasses-1). |
//+------------------------------------------------------------------+
double CAlglib::MLPRelClsErrorSparse(CMultilayerPerceptronShell &network,
CSparseMatrix &xy,int npoints)
{
return(CMLPBase::MLPRelClsErrorSparse(network.GetInnerObj(),xy,npoints));
}
//+------------------------------------------------------------------+
//| Average cross-entropy (in bits per element) on the test set |
//| INPUT PARAMETERS: |
//| Network - neural network |
//| XY - test set |
//| NPoints - test set size |
//| RESULT: |
//| CrossEntropy/(NPoints*LN(2)). |
//| Zero if network solves regression task. |
//+------------------------------------------------------------------+
double CAlglib::MLPAvgCE(CMultilayerPerceptronShell &network,
CMatrixDouble &xy,const int npoints)
{
return(CMLPBase::MLPAvgCE(network.GetInnerObj(),xy,npoints));
}
//+------------------------------------------------------------------+
//| Average cross-entropy (in bits per element) on the test set given|
//| by sparse matrix. |
//| INPUT PARAMETERS: |
//| Network - neural network; |
//| XY - training set, see below for information on the |
//| training set format. This function checks |
//| correctness of the dataset (no NANs/INFs, class |
//| numbers are correct) and throws exception when |
//| incorrect dataset is passed. Sparse matrix must |
//| use CRS format for storage. |
//| NPoints - points count, >=0. |
//| RESULT: |
//| CrossEntropy/(NPoints*LN(2)). |
//| Zero if network solves regression task. |
//| DATASET FORMAT: |
//| This function uses two different dataset formats - one for |
//| regression networks, another one for classification networks. |
//| For regression networks with NIn inputs and NOut outputs |
//| following dataset format is used: |
//| * dataset is given by NPoints*(NIn+NOut) matrix |
//| * each row corresponds to one example |
//| * first NIn columns are inputs, next NOut columns are |
//| outputs |
//| For classification networks with NIn inputs and NClasses clases|
//| following dataset format is used: |
//| * dataset is given by NPoints*(NIn+1) matrix |
//| * each row corresponds to one example |
//| * first NIn columns are inputs, last column stores class |
//| number (from 0 to NClasses-1). |
//+------------------------------------------------------------------+
double CAlglib::MLPAvgCESparse(CMultilayerPerceptronShell &network,
CSparseMatrix &xy,int npoints)
{
return(CMLPBase::MLPAvgCESparse(network.GetInnerObj(),xy,npoints));
}
//+------------------------------------------------------------------+
//| RMS error on the test set |
//| INPUT PARAMETERS: |
//| Network - neural network |
//| XY - test set |
//| NPoints - test set size |
//| RESULT: |
//| root mean square error. |
//| Its meaning for regression task is obvious. As for |
//| classification task,RMS error means error when estimating |
//| posterior probabilities. |
//+------------------------------------------------------------------+
double CAlglib::MLPRMSError(CMultilayerPerceptronShell &network,
CMatrixDouble &xy,const int npoints)
{
return(CMLPBase::MLPRMSError(network.GetInnerObj(),xy,npoints));
}
//+------------------------------------------------------------------+
//| RMS error on the test set given by sparse matrix. |
//| INPUT PARAMETERS: |
//| Network - neural network; |
//| XY - training set, see below for information on the |
//| training set format. This function checks |
//| correctness of the dataset (no NANs/INFs, class |
//| numbers are correct) and throws exception when |
//| incorrect dataset is passed. Sparse matrix must |
//| use CRS format for storage. |
//| NPoints - points count, >=0. |
//| RESULT: |
//| Root mean square error. Its meaning for regression task is |
//| obvious. As for classification task, RMS error means error when|
//| estimating posterior probabilities. |
//| DATASET FORMAT: |
//| This function uses two different dataset formats - one for |
//| regression networks, another one for classification networks. |
//| For regression networks with NIn inputs and NOut outputs |
//| following dataset format is used: |
//| * dataset is given by NPoints*(NIn + NOut) matrix |
//| * each row corresponds to one example |
//| * first NIn columns are inputs, next NOut columns are |
//| outputs |
//| For classification networks with NIn inputs and NClasses |
//| clases following dataset format is used: |
//| * dataset is given by NPoints*(NIn + 1) matrix |
//| * each row corresponds to one example |
//| * first NIn columns are inputs, last column stores class |
//| number(from 0 to NClasses - 1). |
//+------------------------------------------------------------------+
double CAlglib::MLPRMSErrorSparse(CMultilayerPerceptronShell &network,
CSparseMatrix &xy,int npoints)
{
return(CMLPBase::MLPRMSErrorSparse(network.GetInnerObj(),xy,npoints));
}
//+------------------------------------------------------------------+
//| Average error on the test set |
//| INPUT PARAMETERS: |
//| Network - neural network |
//| XY - test set |
//| NPoints - test set size |
//| RESULT: |
//| Its meaning for regression task is obvious. As for |
//| classification task,it means average error when estimating |
//| posterior probabilities. |
//+------------------------------------------------------------------+
double CAlglib::MLPAvgError(CMultilayerPerceptronShell &network,
CMatrixDouble &xy,const int npoints)
{
return(CMLPBase::MLPAvgError(network.GetInnerObj(),xy,npoints));
}
//+------------------------------------------------------------------+
//| Average absolute error on the test set given by sparse matrix. |
//| INPUT PARAMETERS: |
//| Network - neural network; |
//| XY - training set, see below for information on the |
//| training set format. This function checks |
//| correctness of the dataset (no NANs/INFs, class |
//| numbers are correct) and throws exception when |
//| incorrect dataset is passed. Sparse matrix must |
//| use CRS format for storage. |
//| NPoints - points count, >=0. |
//| RESULT: |
//| Its meaning for regression task is obvious. As for |
//| classification task, it means average error when estimating |
//| posterior probabilities. |
//| DATASET FORMAT: |
//| This function uses two different dataset formats - one for |
//| regression networks, another one for classification networks. |
//| For regression networks with NIn inputs and NOut outputs |
//| following dataset format is used: |
//| * dataset is given by NPoints*(NIn+NOut) matrix |
//| * each row corresponds to one example |
//| * first NIn columns are inputs, next NOut columns are |
//| outputs |
//| For classification networks with NIn inputs and NClasses clases|
//| following dataset format is used: |
//| * dataset is given by NPoints*(NIn+1) matrix |
//| * each row corresponds to one example |
//| * first NIn columns are inputs, last column stores class |
//| number (from 0 to NClasses-1). |
//+------------------------------------------------------------------+
double CAlglib::MLPAvgErrorSparse(CMultilayerPerceptronShell &network,
CSparseMatrix &xy,int npoints)
{
return(CMLPBase::MLPAvgErrorSparse(network.GetInnerObj(),xy,npoints));
}
//+------------------------------------------------------------------+
//| Average relative error on the test set |
//| INPUT PARAMETERS: |
//| Network - neural network |
//| XY - test set |
//| NPoints - test set size |
//| RESULT: |
//| Its meaning for regression task is obvious. As for |
//| classification task, it means average relative error when |
//| estimating posterior probability of belonging to the correct |
//| class. |
//+------------------------------------------------------------------+
double CAlglib::MLPAvgRelError(CMultilayerPerceptronShell &network,
CMatrixDouble &xy,const int npoints)
{
return(CMLPBase::MLPAvgRelError(network.GetInnerObj(),xy,npoints));
}
//+------------------------------------------------------------------+
//| Average relative error on the test set given by sparse matrix. |
//| INPUT PARAMETERS: |
//| Network - neural network; |
//| XY - training set, see below for information on the |
//| training set format. This function checks |
//| correctness of the dataset (no NANs/INFs, class |
//| numbers are correct) and throws exception when |
//| incorrect dataset is passed. Sparse matrix must |
//| use CRS format for storage. |
//| NPoints - points count, >=0. |
//| RESULT: |
//| Its meaning for regression task is obvious. As for |
//| classification task, it means average relative error when |
//| estimating posterior probability of belonging to the correct |
//| class. |
//| DATASET FORMAT: |
//| This function uses two different dataset formats - one for |
//| regression networks, another one for classification networks. |
//| For regression networks with NIn inputs and NOut outputs |
//| following dataset format is used: |
//| * dataset is given by NPoints*(NIn+NOut) matrix |
//| * each row corresponds to one example |
//| * first NIn columns are inputs, next NOut columns are |
//| outputs |
//| For classification networks with NIn inputs and NClasses clases|
//| following dataset format is used: |
//| * dataset is given by NPoints*(NIn+1) matrix |
//| * each row corresponds to one example |
//| * first NIn columns are inputs, last column stores class |
//| number (from 0 to NClasses-1). |
//+------------------------------------------------------------------+
double CAlglib::MLPAvgRelErrorSparse(CMultilayerPerceptronShell &network,
CSparseMatrix &xy,int npoints)
{
return(CMLPBase::MLPAvgRelErrorSparse(network.GetInnerObj(),xy,npoints));
}
//+------------------------------------------------------------------+
//| Gradient calculation |
//| INPUT PARAMETERS: |
//| Network - network initialized with one of the network |
//| creation funcs |
//| X - input vector, length of array must be at least |
//| NIn |
//| DesiredY- desired outputs, length of array must be at least|
//| NOut |
//| Grad - possibly preallocated array. If size of array is |
//| smaller than WCount, it will be reallocated. It |
//| is recommended to reuse previously allocated |
//| array to reduce allocation overhead. |
//| OUTPUT PARAMETERS: |
//| E - error function, SUM(sqr(y[i]-desiredy[i])/2,i) |
//| Grad - gradient of E with respect to weights of network,|
//| array[WCount] |
//+------------------------------------------------------------------+
void CAlglib::MLPGrad(CMultilayerPerceptronShell &network,double &x[],
double &desiredy[],double &e,double &grad[])
{
//--- initialization
e=0;
//--- function call
CMLPBase::MLPGrad(network.GetInnerObj(),x,desiredy,e,grad);
}
//+------------------------------------------------------------------+
//| Gradient calculation (natural error function is used) |
//| INPUT PARAMETERS: |
//| Network - network initialized with one of the network |
//| creation funcs |
//| X - input vector, length of array must be at least |
//| NIn |
//| DesiredY- desired outputs, length of array must be at least|
//| NOut |
//| Grad - possibly preallocated array. If size of array is |
//| smaller than WCount, it will be reallocated. It |
//| is recommended to reuse previously allocated |
//| array to reduce allocation overhead. |
//| OUTPUT PARAMETERS: |
//| E - error function, sum-of-squares for regression |
//| networks, cross-entropy for classification |
//| networks. |
//| Grad - gradient of E with respect to weights of network,|
//| array[WCount] |
//+------------------------------------------------------------------+
void CAlglib::MLPGradN(CMultilayerPerceptronShell &network,double &x[],
double &desiredy[],double &e,double &grad[])
{
//--- initialization
e=0;
//--- function call
CMLPBase::MLPGradN(network.GetInnerObj(),x,desiredy,e,grad);
}
//+------------------------------------------------------------------+
//| Batch gradient calculation for a set of inputs/outputs |
//| INPUT PARAMETERS: |
//| Network - network initialized with one of the network |
//| creation funcs |
//| XY - set of inputs/outputs; one sample = one row; |
//| first NIn columns contain inputs, |
//| next NOut columns - desired outputs. |
//| SSize - number of elements in XY |
//| Grad - possibly preallocated array. If size of array is |
//| smaller than WCount, it will be reallocated. It |
//| is recommended to reuse previously allocated |
//| array to reduce allocation overhead. |
//| OUTPUT PARAMETERS: |
//| E - error function, SUM(sqr(y[i]-desiredy[i])/2,i) |
//| Grad - gradient of E with respect to weights of network,|
//| array[WCount] |
//+------------------------------------------------------------------+
void CAlglib::MLPGradBatch(CMultilayerPerceptronShell &network,
CMatrixDouble &xy,const int ssize,
double &e,double &grad[])
{
//--- initialization
e=0;
//--- function call
CMLPBase::MLPGradBatch(network.GetInnerObj(),xy,ssize,e,grad);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::MLPGradBatch(CMultilayerPerceptronShell &network,
CMatrixDouble &xy,const int ssize,
double &e,CRowDouble &grad)
{
//--- initialization
e=0;
//--- function call
CMLPBase::MLPGradBatch(network.GetInnerObj(),xy,ssize,e,grad);
}
//+------------------------------------------------------------------+
//| Batch gradient calculation for a set of inputs/outputs given by |
//| sparse matrices |
//| INPUT PARAMETERS: |
//| Network - network initialized with one of the network |
//| creation funcs |
//| XY - original dataset in sparse format; one sample = one|
//| row: |
//| * MATRIX MUST BE STORED IN CRS FORMAT |
//| * first NIn columns contain inputs. |
//| * for regression problem, next NOut columns store |
//| desired outputs. |
//| * for classification problem, next column (just |
//| one!) stores class number. |
//| SSize - number of elements in XY |
//| Grad - possibly preallocated array. If size of array is |
//| smaller than WCount, it will be reallocated. It is |
//| recommended to reuse previously allocated array to |
//| reduce allocation overhead. |
//| OUTPUT PARAMETERS: |
//| E - error function, SUM(sqr(y[i]-desiredy[i])/2,i) |
//| Grad - gradient of E with respect to weights of network, |
//| array[WCount] |
//+------------------------------------------------------------------+
void CAlglib::MLPGradBatchSparse(CMultilayerPerceptronShell &network,
CSparseMatrix &xy,int ssize,double &e,
CRowDouble &grad)
{
//--- initialization
e=0;
//--- function call
CMLPBase::MLPGradBatchSparse(network.GetInnerObj(),xy,ssize,e,grad);
}
//+------------------------------------------------------------------+
//| Batch gradient calculation for a subset of dataset |
//| INPUT PARAMETERS: |
//| Network - network initialized with one of the network |
//| creation funcs |
//| XY - original dataset in dense format; one sample = one |
//| row: |
//| * first NIn columns contain inputs, |
//| * for regression problem, next NOut columns store |
//| desired outputs. |
//| * for classification problem, next column (just |
//| one!) stores class number. |
//| SetSize - real size of XY, SetSize>=0; |
//| Idx - subset of SubsetSize elements, array[SubsetSize]: |
//| * Idx[I] stores row index in the original dataset |
//| which is given by XY. Gradient is calculated with|
//| respect to rows whose indexes are stored in Idx[]|
//| * Idx[] must store correct indexes; this function |
//| throws an exception in case incorrect index (less|
//| than 0 or larger than rows(XY)) is given |
//| * Idx[] may store indexes in any order and even |
//| with repetitions. |
//| SubsetSize- number of elements in Idx[] array: |
//| * positive value means that subset given by Idx[] |
//| is processed |
//| * zero value results in zero gradient |
//| * negative value means that full dataset is |
//| processed |
//| Grad - possibly preallocated array. If size of array is |
//| smaller than WCount, it will be reallocated. It is |
//| recommended to reuse previously allocated array to |
//| reduce allocation overhead. |
//| OUTPUT PARAMETERS: |
//| E - error function, SUM(sqr(y[i]-desiredy[i])/2,i) |
//| Grad - gradient of E with respect to weights of network, |
//| array[WCount] |
//+------------------------------------------------------------------+
void CAlglib::MLPGradBatchSubset(CMultilayerPerceptronShell &network,
CMatrixDouble &xy,int setsize,
CRowInt &idx,int subsetsize,
double &e,CRowDouble &grad)
{
//--- initialization
e=0;
//--- function call
CMLPBase::MLPGradBatchSubset(network.GetInnerObj(),xy,setsize,idx,subsetsize,e,grad);
}
//+------------------------------------------------------------------+
//| Batch gradient calculation for a set of inputs/outputs for a |
//| subset of dataset given by set of indexes. |
//| INPUT PARAMETERS: |
//| Network - network initialized with one of the network |
//| creation funcs |
//| XY - original dataset in sparse format; one sample = one|
//| row: |
//| * MATRIX MUST BE STORED IN CRS FORMAT |
//| * first NIn columns contain inputs, |
//| * for regression problem, next NOut columns store |
//| desired outputs. |
//| * for classification problem, next column (just |
//| one!) stores class number. |
//| SetSize - real size of XY, SetSize>=0; |
//| Idx - subset of SubsetSize elements, array[SubsetSize]: |
//| * Idx[I] stores row index in the original dataset |
//| which is given by XY. Gradient is calculated with|
//| respect to rows whose indexes are stored in Idx[]|
//| * Idx[] must store correct indexes; this function |
//| throws an exception in case incorrect index (less|
//| than 0 or larger than rows(XY)) is given |
//| * Idx[] may store indexes in any order and even |
//| with repetitions. |
//| SubsetSize- number of elements in Idx[] array: |
//| * positive value means that subset given by Idx[] |
//| is processed |
//| * zero value results in zero gradient |
//| * negative value means that full dataset is |
//| processed |
//| Grad - possibly preallocated array. If size of array is |
//| smaller than WCount, it will be reallocated. It is |
//| recommended to reuse previously allocated array to |
//| reduce allocation overhead. |
//| OUTPUT PARAMETERS: |
//| E - error function, SUM(sqr(y[i]-desiredy[i])/2,i) |
//| Grad - gradient of E with respect to weights of network, |
//| array[WCount] |
//| NOTE: when SubsetSize<0 is used full dataset by call |
//| MLPGradBatchSparse function. |
//+------------------------------------------------------------------+
void CAlglib::MLPGradBatchSparseSubset(CMultilayerPerceptronShell &network,
CSparseMatrix &xy,int setsize,
CRowInt &idx,int subsetsize,
double &e,CRowDouble &grad)
{
//--- initialization
e=0;
//--- function call
CMLPBase::MLPGradBatchSparseSubset(network.GetInnerObj(),xy,setsize,idx,subsetsize,e,grad);
}
//+------------------------------------------------------------------+
//| Batch gradient calculation for a set of inputs/outputs |
//| (natural error function is used) |
//| INPUT PARAMETERS: |
//| Network - network initialized with one of the network |
//| creation funcs |
//| XY - set of inputs/outputs; one sample=one row; |
//| first NIn columns contain inputs, |
//| next NOut columns - desired outputs. |
//| SSize - number of elements in XY |
//| Grad - possibly preallocated array. If size of array is |
//| smaller than WCount, it will be reallocated. It |
//| is recommended to reuse previously allocated |
//| array to reduce allocation overhead. |
//| OUTPUT PARAMETERS: |
//| E - error function, sum-of-squares for regression |
//| networks, cross-entropy for classification |
//| networks. |
//| Grad - gradient of E with respect to weights of network,|
//| array[WCount] |
//+------------------------------------------------------------------+
void CAlglib::MLPGradNBatch(CMultilayerPerceptronShell &network,
CMatrixDouble &xy,const int ssize,
double &e,double &grad[])
{
//--- initialization
e=0;
//--- function call
CMLPBase::MLPGradNBatch(network.GetInnerObj(),xy,ssize,e,grad);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::MLPGradNBatch(CMultilayerPerceptronShell &network,
CMatrixDouble &xy,const int ssize,
double &e,CRowDouble &grad)
{
//--- initialization
e=0;
//--- function call
CMLPBase::MLPGradNBatch(network.GetInnerObj(),xy,ssize,e,grad);
}
//+------------------------------------------------------------------+
//| Batch Hessian calculation (natural error function) using |
//| R-algorithm. Internal subroutine. |
//| Hessian calculation based on R-algorithm described in |
//| "Fast Exact Multiplication by the Hessian", |
//| B. A. Pearlmutter, |
//| Neural Computation, 1994. |
//+------------------------------------------------------------------+
void CAlglib::MLPHessianNBatch(CMultilayerPerceptronShell &network,
CMatrixDouble &xy,const int ssize,
double &e,double &grad[],
CMatrixDouble &h)
{
//--- initialization
e=0;
//--- function call
CMLPBase::MLPHessianNBatch(network.GetInnerObj(),xy,ssize,e,grad,h);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::MLPHessianNBatch(CMultilayerPerceptronShell &network,
CMatrixDouble &xy,const int ssize,
double &e,CRowDouble &grad,
CMatrixDouble &h)
{
//--- initialization
e=0;
//--- function call
CMLPBase::MLPHessianNBatch(network.GetInnerObj(),xy,ssize,e,grad,h);
}
//+------------------------------------------------------------------+
//| Batch Hessian calculation using R-algorithm. |
//| Internal subroutine. |
//| Hessian calculation based on R-algorithm described in |
//| "Fast Exact Multiplication by the Hessian", |
//| B. A. Pearlmutter, |
//| Neural Computation, 1994. |
//+------------------------------------------------------------------+
void CAlglib::MLPHessianBatch(CMultilayerPerceptronShell &network,
CMatrixDouble &xy,const int ssize,
double &e,double &grad[],CMatrixDouble &h)
{
//--- initialization
e=0;
//--- function call
CMLPBase::MLPHessianBatch(network.GetInnerObj(),xy,ssize,e,grad,h);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::MLPHessianBatch(CMultilayerPerceptronShell &network,
CMatrixDouble &xy,const int ssize,
double &e,CRowDouble &grad,CMatrixDouble &h)
{
//--- initialization
e=0;
//--- function call
CMLPBase::MLPHessianBatch(network.GetInnerObj(),xy,ssize,e,grad,h);
}
//+------------------------------------------------------------------+
//| Calculation of all types of errors on subset of dataset. |
//| INPUT PARAMETERS: |
//| Network - network initialized with one of the network |
//| creation funcs |
//| XY - original dataset; one sample = one row; |
//| first NIn columns contain inputs, next NOut |
//| columns - desired outputs. |
//| SetSize - real size of XY, SetSize>=0; |
//| Subset - subset of SubsetSize elements, array[SubsetSize]; |
//| SubsetSize- number of elements in Subset[] array: |
//| * if SubsetSize>0, rows of XY with indices |
//| Subset[0]......Subset[SubsetSize-1] are processed|
//| * if SubsetSize=0, zeros are returned |
//| * if SubsetSize<0, entire dataset is processed; |
//| Subset[] array is ignored in this case. |
//| OUTPUT PARAMETERS: |
//| Rep - it contains all type of errors. |
//+------------------------------------------------------------------+
void CAlglib::MLPAllErrorsSubset(CMultilayerPerceptronShell &network,
CMatrixDouble &xy,int setsize,
CRowInt &subset,int subsetsize,
CModelErrors &rep)
{
CMLPBase::MLPAllErrorsSubset(network.GetInnerObj(),xy,setsize,subset,subsetsize,rep);
}
//+------------------------------------------------------------------+
//| Calculation of all types of errors on subset of dataset. |
//| INPUT PARAMETERS: |
//| Network - network initialized with one of the network |
//| creation funcs |
//| XY - original dataset given by sparse matrix; |
//| one sample = one row; first NIn columns contain |
//| inputs, next NOut columns - desired outputs. |
//| SetSize - real size of XY, SetSize>=0; |
//| Subset - subset of SubsetSize elements, array[SubsetSize]; |
//| SubsetSize- number of elements in Subset[] array: |
//| * if SubsetSize>0, rows of XY with indices |
//| Subset[0]......Subset[SubsetSize-1] are processed|
//| * if SubsetSize=0, zeros are returned |
//| * if SubsetSize<0, entire dataset is processed; |
//| Subset[] array is ignored in this case. |
//| OUTPUT PARAMETERS: |
//| Rep - it contains all type of errors. |
//+------------------------------------------------------------------+
void CAlglib::MLPAllErrorsSparseSubset(CMultilayerPerceptronShell &network,
CSparseMatrix &xy,int setsize,
CRowInt &subset,int subsetsize,
CModelErrors &rep)
{
CMLPBase::MLPAllErrorsSparseSubset(network.GetInnerObj(),xy,setsize,subset,subsetsize,rep);
}
//+------------------------------------------------------------------+
//| Error of the neural network on subset of dataset. |
//| INPUT PARAMETERS: |
//| Network - neural network; |
//| XY - training set, see below for information on the |
//| training set format; |
//| SetSize - real size of XY, SetSize>=0; |
//| Subset - subset of SubsetSize elements, array[SubsetSize]; |
//| SubsetSize- number of elements in Subset[] array: |
//| * if SubsetSize>0, rows of XY with indices |
//| Subset[0]......Subset[SubsetSize-1] are processed|
//| * if SubsetSize=0, zeros are returned |
//| * if SubsetSize<0, entire dataset is processed; |
//| Subset[] array is ignored in this case. |
//| RESULT: |
//| sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2) |
//| DATASET FORMAT: |
//| This function uses two different dataset formats - one for |
//| regression networks, another one for classification networks. |
//| For regression networks with NIn inputs and NOut outputs |
//| following dataset format is used: |
//| * dataset is given by NPoints*(NIn+NOut) matrix |
//| * each row corresponds to one example |
//| * first NIn columns are inputs, next NOut columns are |
//| outputs |
//| For classification networks with NIn inputs and NClasses clases|
//| following dataset format is used: |
//| * dataset is given by NPoints*(NIn+1) matrix |
//| * each row corresponds to one example |
//| * first NIn columns are inputs, last column stores class |
//| number (from 0 to NClasses-1). |
//+------------------------------------------------------------------+
double CAlglib::MLPErrorSubset(CMultilayerPerceptronShell &network,
CMatrixDouble &xy,int setsize,
CRowInt &subset,int subsetsize)
{
return(CMLPBase::MLPErrorSubset(network.GetInnerObj(),xy,setsize,subset,subsetsize));
}
//+------------------------------------------------------------------+
//| Error of the neural network on subset of sparse dataset. |
//| INPUT PARAMETERS: |
//| Network - neural network; |
//| XY - training set, see below for information on the |
//| training set format. This function checks |
//| correctness of the dataset (no NANs/INFs, class |
//| numbers are correct) and throws exception when |
//| incorrect dataset is passed. Sparse matrix must use|
//| CRS format for storage. |
//| SetSize - real size of XY, SetSize>=0; it is used when |
//| SubsetSize<0; |
//| Subset - subset of SubsetSize elements, array[SubsetSize]; |
//| SubsetSize- number of elements in Subset[] array: |
//| * if SubsetSize>0, rows of XY with indices |
//| Subset[0]......Subset[SubsetSize-1] are processed|
//| * if SubsetSize=0, zeros are returned |
//| * if SubsetSize<0, entire dataset is processed; |
//| Subset[] array is ignored in this case. |
//| RESULT: |
//| sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2) |
//| DATASET FORMAT: |
//| This function uses two different dataset formats - one for |
//| regression networks, another one for classification networks. |
//| For regression networks with NIn inputs and NOut outputs |
//| following dataset format is used: |
//| * dataset is given by NPoints*(NIn+NOut) matrix |
//| * each row corresponds to one example |
//| * first NIn columns are inputs, next NOut columns are |
//| outputs |
//| For classification networks with NIn inputs and NClasses clases|
//| following dataset format is used: |
//| * dataset is given by NPoints*(NIn+1) matrix |
//| * each row corresponds to one example |
//| * first NIn columns are inputs, last column stores class |
//| number (from 0 to NClasses-1). |
//+------------------------------------------------------------------+
double CAlglib::MLPErrorSparseSubset(CMultilayerPerceptronShell &network,
CSparseMatrix &xy,int setsize,
CRowInt &subset,int subsetsize)
{
return(CMLPBase::MLPErrorSparseSubset(network.GetInnerObj(),xy,setsize,subset,subsetsize));
}
//+------------------------------------------------------------------+
//| This subroutine trains logit model. |
//| INPUT PARAMETERS: |
//| XY - training set, array[0..NPoints-1,0..NVars] |
//| First NVars columns store values of |
//| independent variables, next column stores |
//| number of class (from 0 to NClasses-1) which |
//| dataset element belongs to. Fractional values|
//| are rounded to nearest integer. |
//| NPoints - training set size, NPoints>=1 |
//| NVars - number of independent variables, NVars>=1 |
//| NClasses - number of classes, NClasses>=2 |
//| OUTPUT PARAMETERS: |
//| Info - return code: |
//| * -2, if there is a point with class number |
//| outside of [0..NClasses-1]. |
//| * -1, if incorrect parameters was passed |
//| (NPoints<NVars+2, NVars<1, NClasses<2).|
//| * 1, if task has been solved |
//| LM - model built |
//| Rep - training report |
//+------------------------------------------------------------------+
void CAlglib::MNLTrainH(CMatrixDouble &xy,const int npoints,
const int nvars,const int nclasses,
int &info,CLogitModelShell &lm,
CMNLReportShell &rep)
{
//--- initialization
info=0;
//--- function call
CLogit::MNLTrainH(xy,npoints,nvars,nclasses,info,lm.GetInnerObj(),rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Procesing |
//| INPUT PARAMETERS: |
//| LM - logit model, passed by non-constant reference |
//| (some fields of structure are used as temporaries|
//| when calculating model output). |
//| X - input vector, array[0..NVars-1]. |
//| Y - (possibly) preallocated buffer; if size of Y is |
//| less than NClasses, it will be reallocated.If it |
//| is large enough, it is NOT reallocated, so we |
//| can save some time on reallocation. |
//| OUTPUT PARAMETERS: |
//| Y - result, array[0..NClasses-1] |
//| Vector of posterior probabilities for |
//| classification task. |
//+------------------------------------------------------------------+
void CAlglib::MNLProcess(CLogitModelShell &lm,double &x[],double &y[])
{
CLogit::MNLProcess(lm.GetInnerObj(),x,y);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::MNLProcess(CLogitModelShell &lm,CRowDouble &x,CRowDouble &y)
{
CLogit::MNLProcess(lm.GetInnerObj(),x,y);
}
//+------------------------------------------------------------------+
//| 'interactive' variant of MNLProcess for languages like Python |
//| which support constructs like "Y=MNLProcess(LM,X)" and |
//| interactive mode of the interpreter |
//| This function allocates new array on each call, so it is |
//| significantly slower than its 'non-interactive' counterpart, |
//| but it is more convenient when you call it from command line. |
//+------------------------------------------------------------------+
void CAlglib::MNLProcessI(CLogitModelShell &lm,double &x[],double &y[])
{
CLogit::MNLProcessI(lm.GetInnerObj(),x,y);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::MNLProcessI(CLogitModelShell &lm,CRowDouble &x,CRowDouble &y)
{
CLogit::MNLProcessI(lm.GetInnerObj(),x,y);
}
//+------------------------------------------------------------------+
//| Unpacks coefficients of logit model. Logit model have form: |
//| P(class=i) = S(i) / (S(0) + S(1) + ... +S(M-1)) |
//| S(i) = Exp(A[i,0]*X[0] + ... + A[i,N-1]*X[N-1] + A[i,N]), |
//| when i<M-1 |
//| S(M-1) = 1 |
//| INPUT PARAMETERS: |
//| LM - logit model in ALGLIB format |
//| OUTPUT PARAMETERS: |
//| V - coefficients, array[0..NClasses-2,0..NVars] |
//| NVars - number of independent variables |
//| NClasses - number of classes |
//+------------------------------------------------------------------+
void CAlglib::MNLUnpack(CLogitModelShell &lm,CMatrixDouble &a,
int &nvars,int &nclasses)
{
//--- initialization
nvars=0;
nclasses=0;
//--- function call
CLogit::MNLUnpack(lm.GetInnerObj(),a,nvars,nclasses);
}
//+------------------------------------------------------------------+
//| "Packs" coefficients and creates logit model in ALGLIB format |
//| (MNLUnpack reversed). |
//| INPUT PARAMETERS: |
//| A - model (see MNLUnpack) |
//| NVars - number of independent variables |
//| NClasses - number of classes |
//| OUTPUT PARAMETERS: |
//| LM - logit model. |
//+------------------------------------------------------------------+
void CAlglib::MNLPack(CMatrixDouble &a,const int nvars,
const int nclasses,CLogitModelShell &lm)
{
CLogit::MNLPack(a,nvars,nclasses,lm.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Average cross-entropy (in bits per element) on the test set |
//| INPUT PARAMETERS: |
//| LM - logit model |
//| XY - test set |
//| NPoints - test set size |
//| RESULT: |
//| CrossEntropy/(NPoints*ln(2)). |
//+------------------------------------------------------------------+
double CAlglib::MNLAvgCE(CLogitModelShell &lm,CMatrixDouble &xy,
const int npoints)
{
return(CLogit::MNLAvgCE(lm.GetInnerObj(),xy,npoints));
}
//+------------------------------------------------------------------+
//| Relative classification error on the test set |
//| INPUT PARAMETERS: |
//| LM - logit model |
//| XY - test set |
//| NPoints - test set size |
//| RESULT: |
//| percent of incorrectly classified cases. |
//+------------------------------------------------------------------+
double CAlglib::MNLRelClsError(CLogitModelShell &lm,CMatrixDouble &xy,
const int npoints)
{
return(CLogit::MNLRelClsError(lm.GetInnerObj(),xy,npoints));
}
//+------------------------------------------------------------------+
//| RMS error on the test set |
//| INPUT PARAMETERS: |
//| LM - logit model |
//| XY - test set |
//| NPoints - test set size |
//| RESULT: |
//| root mean square error (error when estimating posterior |
//| probabilities). |
//+------------------------------------------------------------------+
double CAlglib::MNLRMSError(CLogitModelShell &lm,CMatrixDouble &xy,
const int npoints)
{
return(CLogit::MNLRMSError(lm.GetInnerObj(),xy,npoints));
}
//+------------------------------------------------------------------+
//| Average error on the test set |
//| INPUT PARAMETERS: |
//| LM - logit model |
//| XY - test set |
//| NPoints - test set size |
//| RESULT: |
//| average error (error when estimating posterior |
//| probabilities). |
//+------------------------------------------------------------------+
double CAlglib::MNLAvgError(CLogitModelShell &lm,CMatrixDouble &xy,
const int npoints)
{
return(CLogit::MNLAvgError(lm.GetInnerObj(),xy,npoints));
}
//+------------------------------------------------------------------+
//| Average relative error on the test set |
//| INPUT PARAMETERS: |
//| LM - logit model |
//| XY - test set |
//| NPoints - test set size |
//| RESULT: |
//| average relative error (error when estimating posterior |
//| probabilities). |
//+------------------------------------------------------------------+
double CAlglib::MNLAvgRelError(CLogitModelShell &lm,CMatrixDouble &xy,
const int ssize)
{
return(CLogit::MNLAvgRelError(lm.GetInnerObj(),xy,ssize));
}
//+------------------------------------------------------------------+
//| Classification error on test set = MNLRelClsError*NPoints |
//+------------------------------------------------------------------+
int CAlglib::MNLClsError(CLogitModelShell &lm,CMatrixDouble &xy,
const int npoints)
{
return(CLogit::MNLClsError(lm.GetInnerObj(),xy,npoints));
}
//+------------------------------------------------------------------+
//| DESCRIPTION: |
//| This function creates MCPD (Markov Chains for Population Data) |
//| solver. |
//| This solver can be used to find transition matrix P for |
//| N-dimensional prediction problem where transition from X[i] to |
//| X[i+1] is modelled as X[i+1] = P*X[i] |
//| where X[i] and X[i+1] are N-dimensional population vectors |
//| (components of each X are non-negative), and P is a N*N |
//| transition matrix (elements of are non-negative, each column |
//| sums to 1.0). |
//| Such models arise when when: |
//| * there is some population of individuals |
//| * individuals can have different states |
//| * individuals can transit from one state to another |
//| * population size is constant, i.e. there is no new individuals |
//| and no one leaves population |
//| * you want to model transitions of individuals from one state |
//| into another |
//| USAGE: |
//| Here we give very brief outline of the MCPD. We strongly |
//| recommend you to read examples in the ALGLIB Reference Manual |
//| and to read ALGLIB User Guide on data analysis which is |
//| available at http://www.alglib.net/dataanalysis/ |
//| 1. User initializes algorithm state with MCPDCreate() call |
//| 2. User adds one or more tracks - sequences of states which |
//| describe evolution of a system being modelled from different |
//| starting conditions |
//| 3. User may add optional boundary, equality and/or linear |
//| constraints on the coefficients of P by calling one of the |
//| following functions: |
//| * MCPDSetEC() to set equality constraints |
//| * MCPDSetBC() to set bound constraints |
//| * MCPDSetLC() to set linear constraints |
//| 4. Optionally, user may set custom weights for prediction errors |
//| (by default, algorithm assigns non-equal, automatically chosen|
//| weights for errors in the prediction of different components |
//| of X). It can be done with a call of |
//| MCPDSetPredictionWeights() function. |
//| 5. User calls MCPDSolve() function which takes algorithm state |
//| and pointer (delegate, etc.) to callback function which |
//| calculates F/G. |
//| 6. User calls MCPDResults() to get solution |
//| INPUT PARAMETERS: |
//| N - problem dimension, N>=1 |
//| OUTPUT PARAMETERS: |
//| State - structure stores algorithm state |
//+------------------------------------------------------------------+
void CAlglib::MCPDCreate(const int n,CMCPDStateShell &s)
{
CMarkovCPD::MCPDCreate(n,s.GetInnerObj());
}
//+------------------------------------------------------------------+
//| DESCRIPTION: |
//| This function is a specialized version of MCPDCreate() function, |
//| and we recommend you to read comments for this function for |
//| general information about MCPD solver. |
//| This function creates MCPD (Markov Chains for Population Data) |
//| solver for "Entry-state" model, i.e. model where transition from |
//| X[i] to X[i+1] is modelled as |
//| X[i+1] = P*X[i] |
//| where |
//| X[i] and X[i+1] are N-dimensional state vectors |
//| P is a N*N transition matrix |
//| and one selected component of X[] is called "entry" state and |
//| is treated in a special way: |
//| system state always transits from "entry" state to some |
//| another state |
//| system state can not transit from any state into "entry" |
//| state |
//| Such conditions basically mean that row of P which corresponds to|
//| "entry" state is zero. |
//| Such models arise when: |
//| * there is some population of individuals |
//| * individuals can have different states |
//| * individuals can transit from one state to another |
//| * population size is NOT constant - at every moment of time |
//| there is some (unpredictable) amount of "new" individuals, |
//| which can transit into one of the states at the next turn, but |
//| still no one leaves population |
//| * you want to model transitions of individuals from one state |
//| into another |
//|*but you do NOT want to predict amount of "new" individuals |
//| because it does not depends on individuals already present |
//| (hence system can not transit INTO entry state - it can only |
//| transit FROM it). |
//| This model is discussed in more details in the ALGLIB User Guide |
//| (see http://www.alglib.net/dataanalysis/ for more data). |
//| INPUT PARAMETERS: |
//| N - problem dimension, N>=2 |
//| EntryState- index of entry state, in 0..N-1 |
//| OUTPUT PARAMETERS: |
//| State - structure stores algorithm state |
//+------------------------------------------------------------------+
void CAlglib::MCPDCreateEntry(const int n,const int entrystate,
CMCPDStateShell &s)
{
CMarkovCPD::MCPDCreateEntry(n,entrystate,s.GetInnerObj());
}
//+------------------------------------------------------------------+
//| DESCRIPTION: |
//| This function is a specialized version of MCPDCreate() function, |
//| and we recommend you to read comments for this function for |
//| general information about MCPD solver. |
//| This function creates MCPD (Markov Chains for Population Data) |
//| solver for "Exit-state" model, i.e. model where transition from |
//| X[i] to X[i+1] is modelled as |
//| X[i+1] = P*X[i] |
//| where |
//| X[i] and X[i+1] are N-dimensional state vectors |
//| P is a N*N transition matrix |
//| and one selected component of X[] is called "exit" state and |
//| is treated in a special way: |
//| system state can transit from any state into "exit" state |
//| system state can not transit from "exit" state into any other|
//| state transition operator discards "exit" state (makes it |
//| zero at each turn) |
//| Such conditions basically mean that column of P which |
//| corresponds to "exit" state is zero. Multiplication by such P |
//| may decrease sum of vector components. |
//| Such models arise when: |
//| * there is some population of individuals |
//| * individuals can have different states |
//| * individuals can transit from one state to another |
//| * population size is NOT constant - individuals can move into |
//| "exit" state and leave population at the next turn, but there |
//| are no new individuals |
//| * amount of individuals which leave population can be predicted |
//| * you want to model transitions of individuals from one state |
//| into another (including transitions into the "exit" state) |
//| This model is discussed in more details in the ALGLIB User Guide |
//| (see http://www.alglib.net/dataanalysis/ for more data). |
//| INPUT PARAMETERS: |
//| N - problem dimension, N>=2 |
//| ExitState- index of exit state, in 0..N-1 |
//| OUTPUT PARAMETERS: |
//| State - structure stores algorithm state |
//+------------------------------------------------------------------+
void CAlglib::MCPDCreateExit(const int n,const int exitstate,
CMCPDStateShell &s)
{
CMarkovCPD::MCPDCreateExit(n,exitstate,s.GetInnerObj());
}
//+------------------------------------------------------------------+
//| DESCRIPTION: |
//| This function is a specialized version of MCPDCreate() function, |
//| and we recommend you to read comments for this function for |
//| general information about MCPD solver. |
//| This function creates MCPD (Markov Chains for Population Data) |
//| solver for "Entry-Exit-states" model, i.e. model where transition|
//| from X[i] to X[i+1] is modelled as |
//| X[i+1] = P*X[i] |
//| where |
//| X[i] and X[i+1] are N-dimensional state vectors |
//| P is a N*N transition matrix |
//| one selected component of X[] is called "entry" state and is a |
//| treated in special way: |
//| system state always transits from "entry" state to some |
//| another state |
//| system state can not transit from any state into "entry" |
//| state |
//| and another one component of X[] is called "exit" state and is |
//| treated in a special way too: |
//| system state can transit from any state into "exit" state |
//| system state can not transit from "exit" state into any other|
//| state transition operator discards "exit" state (makes it |
//| zero at each turn) |
//| Such conditions basically mean that: |
//| row of P which corresponds to "entry" state is zero |
//| column of P which corresponds to "exit" state is zero |
//| Multiplication by such P may decrease sum of vector components. |
//| Such models arise when: |
//| * there is some population of individuals |
//| * individuals can have different states |
//| * individuals can transit from one state to another |
//| * population size is NOT constant |
//| * at every moment of time there is some (unpredictable) amount |
//| of "new" individuals, which can transit into one of the states |
//| at the next turn |
//|*some individuals can move (predictably) into "exit" state |
//| and leave population at the next turn |
//| * you want to model transitions of individuals from one state |
//| into another, including transitions from the "entry" state and |
//| into the "exit" state. |
//|*but you do NOT want to predict amount of "new" individuals |
//| because it does not depends on individuals already present |
//| (hence system can not transit INTO entry state - it can only |
//| transit FROM it). |
//| This model is discussed in more details in the ALGLIB User |
//| Guide (see http://www.alglib.net/dataanalysis/ for more data). |
//| INPUT PARAMETERS: |
//| N - problem dimension, N>=2 |
//| EntryState- index of entry state, in 0..N-1 |
//| ExitState- index of exit state, in 0..N-1 |
//| OUTPUT PARAMETERS: |
//| State - structure stores algorithm state |
//+------------------------------------------------------------------+
void CAlglib::MCPDCreateEntryExit(const int n,const int entrystate,
const int exitstate,CMCPDStateShell &s)
{
CMarkovCPD::MCPDCreateEntryExit(n,entrystate,exitstate,s.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This function is used to add a track - sequence of system states |
//| at the different moments of its evolution. |
//| You may add one or several tracks to the MCPD solver. In case you|
//| have several tracks, they won't overwrite each other. For |
//| example, if you pass two tracks, A1-A2-A3 (system at t=A+1, t=A+2|
//| and t=A+3) and B1-B2-B3, then solver will try to model |
//| transitions from t=A+1 to t=A+2, t=A+2 to t=A+3, t=B+1 to t=B+2, |
//| t=B+2 to t=B+3. But it WONT mix these two tracks - i.e. it wont |
//| try to model transition from t=A+3 to t=B+1. |
//| INPUT PARAMETERS: |
//| S - solver |
//| XY - track, array[K, N]: |
//| * I-th row is a state at t=I |
//| * elements of XY must be non-negative (exception |
//| will be thrown on negative elements) |
//| K - number of points in a track |
//| * if given, only leading K rows of XY are used |
//| * if not given, automatically determined from |
//| size of XY |
//| NOTES: |
//| 1. Track may contain either proportional or population data: |
//| * with proportional data all rows of XY must sum to 1.0, i.e. |
//| we have proportions instead of absolute population values |
//| * with population data rows of XY contain population counts |
//| and generally do not sum to 1.0 (although they still must be|
//| non-negative) |
//+------------------------------------------------------------------+
void CAlglib::MCPDAddTrack(CMCPDStateShell &s,CMatrixDouble &xy,
const int k)
{
CMarkovCPD::MCPDAddTrack(s.GetInnerObj(),xy,k);
}
//+------------------------------------------------------------------+
//| This function is used to add a track - sequence of system states |
//| at the different moments of its evolution. |
//| You may add one or several tracks to the MCPD solver. In case you|
//| have several tracks, they won't overwrite each other. For |
//| example, if you pass two tracks, A1-A2-A3 (system at t=A+1, t=A+2|
//| and t=A+3) and B1-B2-B3, then solver will try to model |
//| transitions from t=A+1 to t=A+2, t=A+2 to t=A+3, t=B+1 to t=B+2, |
//| t=B+2 to t=B+3. But it WONT mix these two tracks - i.e. it wont |
//| try to model transition from t=A+3 to t=B+1. |
//| INPUT PARAMETERS: |
//| S - solver |
//| XY - track, array[K,N]: |
//| * I-th row is a state at t=I |
//| * elements of XY must be non-negative (exception |
//| will be thrown on negative elements) |
//| K - number of points in a track |
//| * if given, only leading K rows of XY are used |
//| * if not given, automatically determined from |
//| size of XY |
//| NOTES: |
//| 1. Track may contain either proportional or population data: |
//| * with proportional data all rows of XY must sum to 1.0, i.e. |
//| we have proportions instead of absolute population values |
//| * with population data rows of XY contain population counts |
//| and generally do not sum to 1.0 (although they still must be|
//| non-negative) |
//+------------------------------------------------------------------+
void CAlglib::MCPDAddTrack(CMCPDStateShell &s,CMatrixDouble &xy)
{
//--- initialization
int k=(int)CAp::Rows(xy);
//--- function call
CMarkovCPD::MCPDAddTrack(s.GetInnerObj(),xy,k);
}
//+------------------------------------------------------------------+
//| This function is used to add equality constraints on the elements|
//| of the transition matrix P. |
//| MCPD solver has four types of constraints which can be placed |
//| on P: |
//| * user-specified equality constraints (optional) |
//| * user-specified bound constraints (optional) |
//| * user-specified general linear constraints (optional) |
//| * basic constraints (always present): |
//| * non-negativity: P[i,j]>=0 |
//| * consistency: every column of P sums to 1.0 |
//| Final constraints which are passed to the underlying optimizer |
//| are calculated as intersection of all present constraints. For |
//| example, you may specify boundary constraint on P[0,0] and |
//| equality one: |
//| 0.1<=P[0,0]<=0.9 |
//| P[0,0]=0.5 |
//| Such combination of constraints will be silently reduced to their|
//| intersection, which is P[0,0]=0.5. |
//| This function can be used to place equality constraints on |
//| arbitrary subset of elements of P. Set of constraints is |
//| specified by EC, which may contain either NAN's or finite numbers|
//| from [0,1]. NAN denotes absence of constraint, finite number |
//| denotes equality constraint on specific element of P. |
//| You can also use MCPDAddEC() function which allows to ADD |
//| equality constraint for one element of P without changing |
//| constraints for other elements. |
//| These functions (MCPDSetEC and MCPDAddEC) interact as follows: |
//| * there is internal matrix of equality constraints which is |
//| stored in the MCPD solver |
//| * MCPDSetEC() replaces this matrix by another one (SET) |
//| * MCPDAddEC() modifies one element of this matrix and leaves |
//| other ones unchanged (ADD) |
//| * thus MCPDAddEC() call preserves all modifications done by |
//| previous calls, while MCPDSetEC() completely discards all |
//| changes done to the equality constraints. |
//| INPUT PARAMETERS: |
//| S - solver |
//| EC - equality constraints, array[N,N]. Elements of EC |
//| can be either NAN's or finite numbers from [0,1].|
//| NAN denotes absence of constraints, while finite |
//| value denotes equality constraint on the |
//| corresponding element of P. |
//| NOTES: |
//| 1. infinite values of EC will lead to exception being thrown. |
//| Values less than 0.0 or greater than 1.0 will lead to error code |
//| being returned after call to MCPDSolve(). |
//+------------------------------------------------------------------+
void CAlglib::MCPDSetEC(CMCPDStateShell &s,CMatrixDouble &ec)
{
CMarkovCPD::MCPDSetEC(s.GetInnerObj(),ec);
}
//+------------------------------------------------------------------+
//| This function is used to add equality constraints on the elements|
//| of the transition matrix P. |
//| MCPD solver has four types of constraints which can be placed |
//| on P: |
//| * user-specified equality constraints (optional) |
//| * user-specified bound constraints (optional) |
//| * user-specified general linear constraints (optional) |
//| * basic constraints (always present): |
//| * non-negativity: P[i,j]>=0 |
//| * consistency: every column of P sums to 1.0 |
//| Final constraints which are passed to the underlying optimizer |
//| are calculated as intersection of all present constraints. For |
//| example, you may specify boundary constraint on P[0,0] and |
//| equality one: |
//| 0.1<=P[0,0]<=0.9 |
//| P[0,0]=0.5 |
//| Such combination of constraints will be silently reduced to their|
//| intersection, which is P[0,0]=0.5. |
//| This function can be used to ADD equality constraint for one |
//| element of P without changing constraints for other elements. |
//| You can also use MCPDSetEC() function which allows you to specify|
//| arbitrary set of equality constraints in one call. |
//| These functions (MCPDSetEC and MCPDAddEC) interact as follows: |
//| * there is internal matrix of equality constraints which is |
//| stored in the MCPD solver |
//| * MCPDSetEC() replaces this matrix by another one (SET) |
//| * MCPDAddEC() modifies one element of this matrix and leaves |
//| other ones unchanged (ADD) |
//| * thus MCPDAddEC() call preserves all modifications done by |
//| previous calls, while MCPDSetEC() completely discards all |
//| changes done to the equality constraints. |
//| INPUT PARAMETERS: |
//| S - solver |
//| I - row index of element being constrained |
//| J - column index of element being constrained |
//| C - value (constraint for P[I,J]). Can be either NAN |
//| (no constraint) or finite value from [0,1]. |
//| NOTES: |
//| 1. infinite values of C will lead to exception being thrown. |
//| Values less than 0.0 or greater than 1.0 will lead to error code |
//| being returned after call to MCPDSolve(). |
//+------------------------------------------------------------------+
void CAlglib::MCPDAddEC(CMCPDStateShell &s,const int i,const int j,
const double c)
{
CMarkovCPD::MCPDAddEC(s.GetInnerObj(),i,j,c);
}
//+------------------------------------------------------------------+
//| This function is used to add bound constraints on the elements |
//| of the transition matrix P. |
//| MCPD solver has four types of constraints which can be placed |
//| on P: |
//| * user-specified equality constraints (optional) |
//| * user-specified bound constraints (optional) |
//| * user-specified general linear constraints (optional) |
//| * basic constraints (always present): |
//| * non-negativity: P[i,j]>=0 |
//| * consistency: every column of P sums to 1.0 |
//| Final constraints which are passed to the underlying optimizer |
//| are calculated as intersection of all present constraints. For |
//| example, you may specify boundary constraint on P[0,0] and |
//| equality one: |
//| 0.1<=P[0,0]<=0.9 |
//| P[0,0]=0.5 |
//| Such combination of constraints will be silently reduced to their|
//| intersection, which is P[0,0]=0.5. |
//| This function can be used to place bound constraints on arbitrary|
//| subset of elements of P. Set of constraints is specified by |
//| BndL/BndU matrices, which may contain arbitrary combination of |
//| finite numbers or infinities (like -INF<x<=0.5 or 0.1<=x<+INF). |
//| You can also use MCPDAddBC() function which allows to ADD bound |
//| constraint for one element of P without changing constraints for |
//| other elements. |
//| These functions (MCPDSetBC and MCPDAddBC) interact as follows: |
//| * there is internal matrix of bound constraints which is stored |
//| in the MCPD solver |
//| * MCPDSetBC() replaces this matrix by another one (SET) |
//| * MCPDAddBC() modifies one element of this matrix and leaves |
//| other ones unchanged (ADD) |
//| * thus MCPDAddBC() call preserves all modifications done by |
//| previous calls, while MCPDSetBC() completely discards all |
//| changes done to the equality constraints. |
//| INPUT PARAMETERS: |
//| S - solver |
//| BndL - lower bounds constraints, array[N,N]. Elements of|
//| BndL can be finite numbers or -INF. |
//| BndU - upper bounds constraints, array[N,N]. Elements of|
//| BndU can be finite numbers or +INF. |
//+------------------------------------------------------------------+
void CAlglib::MCPDSetBC(CMCPDStateShell &s,CMatrixDouble &bndl,
CMatrixDouble &bndu)
{
CMarkovCPD::MCPDSetBC(s.GetInnerObj(),bndl,bndu);
}
//+------------------------------------------------------------------+
//| This function is used to add bound constraints on the elements |
//| of the transition matrix P. |
//| MCPD solver has four types of constraints which can be placed |
//| on P: |
//| * user-specified equality constraints (optional) |
//| * user-specified bound constraints (optional) |
//| * user-specified general linear constraints (optional) |
//| * basic constraints (always present): |
//| * non-negativity: P[i,j]>=0 |
//| * consistency: every column of P sums to 1.0 |
//| Final constraints which are passed to the underlying optimizer |
//| are calculated as intersection of all present constraints. For |
//| example, you may specify boundary constraint on P[0,0] and |
//| equality one: |
//| 0.1<=P[0,0]<=0.9 |
//| P[0,0]=0.5 |
//| Such combination of constraints will be silently reduced to their|
//| intersection, which is P[0,0]=0.5. |
//| This function can be used to ADD bound constraint for one element|
//| of P without changing constraints for other elements. |
//| You can also use MCPDSetBC() function which allows to place bound|
//| constraints on arbitrary subset of elements of P. Set of |
//| constraints is specified by BndL/BndU matrices, which may |
//| contain arbitrary combination of finite numbers or infinities |
//| (like -INF<x<=0.5 or 0.1<=x<+INF). |
//| These functions (MCPDSetBC and MCPDAddBC) interact as follows: |
//| * there is internal matrix of bound constraints which is stored |
//| in the MCPD solver |
//| * MCPDSetBC() replaces this matrix by another one (SET) |
//| * MCPDAddBC() modifies one element of this matrix and leaves |
//| other ones unchanged (ADD) |
//| * thus MCPDAddBC() call preserves all modifications done by |
//| previous calls, while MCPDSetBC() completely discards all |
//| changes done to the equality constraints. |
//| INPUT PARAMETERS: |
//| S - solver |
//| I - row index of element being constrained |
//| J - column index of element being constrained |
//| BndL - lower bound |
//| BndU - upper bound |
//+------------------------------------------------------------------+
void CAlglib::MCPDAddBC(CMCPDStateShell &s,const int i,const int j,
const double bndl,const double bndu)
{
CMarkovCPD::MCPDAddBC(s.GetInnerObj(),i,j,bndl,bndu);
}
//+------------------------------------------------------------------+
//| This function is used to set linear equality/inequality |
//| constraints on the elements of the transition matrix P. |
//| This function can be used to set one or several general linear |
//| constraints on the elements of P. Two types of constraints are |
//| supported: |
//| * equality constraints |
//| * inequality constraints (both less-or-equal and |
//| greater-or-equal) |
//| Coefficients of constraints are specified by matrix C (one of the|
//| parameters). One row of C corresponds to one constraint. |
//| Because transition matrix P has N*N elements, we need N*N columns|
//| to store all coefficients (they are stored row by row), and |
//| one more column to store right part - hence C has N*N+1 columns. |
//| Constraint kind is stored in the CT array. |
//| Thus, I-th linear constraint is |
//| P[0,0]*C[I,0] + P[0,1]*C[I,1] + .. + P[0,N-1]*C[I,N-1] + |
//| + P[1,0]*C[I,N] + P[1,1]*C[I,N+1] + ... + |
//| + P[N-1,N-1]*C[I,N*N-1] ?=? C[I,N*N] |
//| where ?=? can be either "=" (CT[i]=0), "<=" (CT[i]<0) or ">=" |
//| (CT[i]>0). |
//| Your constraint may involve only some subset of P (less than N*N |
//| elements). |
//| For example it can be something like |
//| P[0,0] + P[0,1] = 0.5 |
//| In this case you still should pass matrix with N*N+1 columns, |
//| but all its elements (except for C[0,0], C[0,1] and C[0,N*N-1]) |
//| will be zero. |
//| INPUT PARAMETERS: |
//| S - solver |
//| C - array[K,N*N+1] - coefficients of constraints |
//| (see above for complete description) |
//| CT - array[K] - constraint types |
//| (see above for complete description) |
//| K - number of equality/inequality constraints, K>=0: |
//| * if given, only leading K elements of C/CT are |
//| used |
//| * if not given, automatically determined from |
//| sizes of C/CT |
//+------------------------------------------------------------------+
void CAlglib::MCPDSetLC(CMCPDStateShell &s,CMatrixDouble &c,
int &ct[],const int k)
{
CMarkovCPD::MCPDSetLC(s.GetInnerObj(),c,ct,k);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::MCPDSetLC(CMCPDStateShell &s,CMatrixDouble &c,
CRowInt &ct,const int k)
{
CMarkovCPD::MCPDSetLC(s.GetInnerObj(),c,ct,k);
}
//+------------------------------------------------------------------+
//| This function is used to set linear equality/inequality |
//| constraints on the elements of the transition matrix P. |
//| This function can be used to set one or several general linear |
//| constraints on the elements of P. Two types of constraints are |
//| supported: |
//| * equality constraints |
//| * inequality constraints (both less-or-equal and |
//| greater-or-equal) |
//| Coefficients of constraints are specified by matrix C (one of the|
//| parameters). One row of C corresponds to one constraint. |
//| Because transition matrix P has N*N elements, we need N*N columns|
//| to store all coefficients (they are stored row by row), and |
//| one more column to store right part - hence C has N*N+1 columns. |
//| Constraint kind is stored in the CT array. |
//| Thus, I-th linear constraint is |
//| P[0,0]*C[I,0] + P[0,1]*C[I,1] + .. + P[0,N-1]*C[I,N-1] + |
//| + P[1,0]*C[I,N] + P[1,1]*C[I,N+1] + ... + |
//| + P[N-1,N-1]*C[I,N*N-1] ?=? C[I,N*N] |
//| where ?=? can be either "=" (CT[i]=0), "<=" (CT[i]<0) or ">=" |
//| (CT[i]>0). |
//| Your constraint may involve only some subset of P (less than N*N |
//| elements). |
//| For example it can be something like |
//| P[0,0] + P[0,1] = 0.5 |
//| In this case you still should pass matrix with N*N+1 columns, |
//| but all its elements (except for C[0,0], C[0,1] and C[0,N*N-1]) |
//| will be zero. |
//| INPUT PARAMETERS: |
//| S - solver |
//| C - array[K,N*N+1] - coefficients of constraints |
//| (see above for complete description) |
//| CT - array[K] - constraint types |
//| (see above for complete description) |
//| K - number of equality/inequality constraints, K>=0: |
//| * if given, only leading K elements of C/CT are |
//| used |
//| * if not given, automatically determined from |
//| sizes of C/CT |
//+------------------------------------------------------------------+
void CAlglib::MCPDSetLC(CMCPDStateShell &s,CMatrixDouble &c,int &ct[])
{
//--- check
if((CAp::Rows(c)!=CAp::Len(ct)))
{
Print("Error while calling " + __FUNCTION__ + ": looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
int k=(int)CAp::Rows(c);
//--- function call
CMarkovCPD::MCPDSetLC(s.GetInnerObj(),c,ct,k);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::MCPDSetLC(CMCPDStateShell &s,CMatrixDouble &c,CRowInt &ct)
{
//--- check
if((CAp::Rows(c)!=CAp::Len(ct)))
{
Print("Error while calling " + __FUNCTION__ + ": looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
int k=(int)CAp::Rows(c);
//--- function call
CMarkovCPD::MCPDSetLC(s.GetInnerObj(),c,ct,k);
}
//+------------------------------------------------------------------+
//| This function allows to tune amount of Tikhonov regularization |
//| being applied to your problem. |
//| By default, regularizing term is equal to r*||P-prior_P||^2, |
//| where r is a small non-zero value, P is transition matrix, |
//| prior_P is identity matrix, ||X||^2 is a sum of squared elements |
//| of X. |
//| This function allows you to change coefficient r. You can also |
//| change prior values with MCPDSetPrior() function. |
//| INPUT PARAMETERS: |
//| S - solver |
//| V - regularization coefficient, finite non-negative |
//| value. It is not recommended to specify zero |
//| value unless you are pretty sure that you want it.|
//+------------------------------------------------------------------+
void CAlglib::MCPDSetTikhonovRegularizer(CMCPDStateShell &s,
const double v)
{
CMarkovCPD::MCPDSetTikhonovRegularizer(s.GetInnerObj(),v);
}
//+------------------------------------------------------------------+
//| This function allows to set prior values used for regularization |
//| of your problem. |
//| By default, regularizing term is equal to r*||P-prior_P||^2, |
//| where r is a small non-zero value, P is transition matrix, |
//| prior_P is identity matrix, ||X||^2 is a sum of squared elements |
//| of X. |
//| This function allows you to change prior values prior_P. You can |
//| also change r with MCPDSetTikhonovRegularizer() function. |
//| INPUT PARAMETERS: |
//| S - solver |
//| PP - array[N,N], matrix of prior values: |
//| 1. elements must be real numbers from [0,1] |
//| 2. columns must sum to 1.0. |
//| First property is checked (exception is thrown |
//| otherwise), while second one is not |
//| checked/enforced. |
//+------------------------------------------------------------------+
void CAlglib::MCPDSetPrior(CMCPDStateShell &s,CMatrixDouble &pp)
{
CMarkovCPD::MCPDSetPrior(s.GetInnerObj(),pp);
}
//+------------------------------------------------------------------+
//| This function is used to change prediction weights |
//| MCPD solver scales prediction errors as follows |
//| Error(P) = ||W*(y-P*x)||^2 |
//| where |
//| x is a system state at time t |
//| y is a system state at time t+1 |
//| P is a transition matrix |
//| W is a diagonal scaling matrix |
//| By default, weights are chosen in order to minimize relative |
//| prediction error instead of absolute one. For example, if one |
//| component of state is about 0.5 in magnitude and another one is |
//| about 0.05, then algorithm will make corresponding weights equal |
//| to 2.0 and 20.0. |
//| INPUT PARAMETERS: |
//| S - solver |
//| PW - array[N], weights: |
//| * must be non-negative values (exception will be |
//| thrown otherwise) |
//| * zero values will be replaced by automatically |
//| chosen values |
//+------------------------------------------------------------------+
void CAlglib::MCPDSetPredictionWeights(CMCPDStateShell &s,
double &pw[])
{
CMarkovCPD::MCPDSetPredictionWeights(s.GetInnerObj(),pw);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::MCPDSetPredictionWeights(CMCPDStateShell &s,
CRowDouble &pw)
{
CMarkovCPD::MCPDSetPredictionWeights(s.GetInnerObj(),pw);
}
//+------------------------------------------------------------------+
//| This function is used to start solution of the MCPD problem. |
//| After return from this function, you can use MCPDResults() to get|
//| solution and completion code. |
//+------------------------------------------------------------------+
void CAlglib::MCPDSolve(CMCPDStateShell &s)
{
CMarkovCPD::MCPDSolve(s.GetInnerObj());
}
//+------------------------------------------------------------------+
//| MCPD results |
//| INPUT PARAMETERS: |
//| State - algorithm state |
//| OUTPUT PARAMETERS: |
//| P - array[N,N], transition matrix |
//| Rep - optimization report. You should check Rep. |
//| TerminationType in order to distinguish successful|
//| termination from unsuccessful one. Speaking short,|
//| positive values denote success, negative ones are |
//| failures. More information about fields of this |
//| structure can befound in the comments on |
//| MCPDReport datatype. |
//+------------------------------------------------------------------+
void CAlglib::MCPDResults(CMCPDStateShell &s,CMatrixDouble &p,
CMCPDReportShell &rep)
{
CMarkovCPD::MCPDResults(s.GetInnerObj(),p,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Neural network training using modified Levenberg-Marquardt with |
//| exact Hessian calculation and regularization. Subroutine trains |
//| neural network with restarts from random positions. Algorithm is |
//| well suited for small |
//| and medium scale problems (hundreds of weights). |
//| INPUT PARAMETERS: |
//| Network - neural network with initialized geometry |
//| XY - training set |
//| NPoints - training set size |
//| Decay - weight decay constant, >=0.001 |
//| Decay term 'Decay*||Weights||^2' is added to |
//| error function. |
//| If you don't know what Decay to choose, use |
//| 0.001. |
//| Restarts - number of restarts from random position, >0. |
//| If you don't know what Restarts to choose, |
//| use 2. |
//| OUTPUT PARAMETERS: |
//| Network - trained neural network. |
//| Info - return code: |
//| * -9, if internal matrix inverse subroutine |
//| failed |
//| * -2, if there is a point with class number |
//| outside of [0..NOut-1]. |
//| * -1, if wrong parameters specified |
//| (NPoints<0, Restarts<1). |
//| * 2, if task has been solved. |
//| Rep - training report |
//+------------------------------------------------------------------+
void CAlglib::MLPTrainLM(CMultilayerPerceptronShell &network,
CMatrixDouble &xy,const int npoints,
const double decay,const int restarts,
int &info,CMLPReportShell &rep)
{
//--- initialization
info=0;
//--- function call
CMLPTrain::MLPTrainLM(network.GetInnerObj(),xy,npoints,decay,restarts,info,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Neural network training using L-BFGS algorithm with |
//| regularization. Subroutine trains neural network with restarts |
//| from random positions. Algorithm is well suited for problems of |
//| any dimensionality (memory requirements and step complexity are |
//| linear by weights number). |
//| INPUT PARAMETERS: |
//| Network - neural network with initialized geometry |
//| XY - training set |
//| NPoints - training set size |
//| Decay - weight decay constant, >=0.001 |
//| Decay term 'Decay*||Weights||^2' is added to |
//| error function. |
//| If you don't know what Decay to choose, use |
//| 0.001. |
//| Restarts - number of restarts from random position, >0. |
//| If you don't know what Restarts to choose, |
//| use 2. |
//| WStep - stopping criterion. Algorithm stops if step |
//| size is less than WStep. Recommended |
//| value - 0.01. Zero step size means stopping |
//| after MaxIts iterations. |
//| MaxIts - stopping criterion. Algorithm stops after |
//| MaxIts iterations (NOT gradient calculations).|
//| Zero MaxIts means stopping when step is |
//| sufficiently small. |
//| OUTPUT PARAMETERS: |
//| Network - trained neural network. |
//| Info - return code: |
//| * -8, if both WStep=0 and MaxIts=0 |
//| * -2, if there is a point with class number |
//| outside of [0..NOut-1]. |
//| * -1, if wrong parameters specified |
//| (NPoints<0, Restarts<1). |
//| * 2, if task has been solved. |
//| Rep - training report |
//+------------------------------------------------------------------+
void CAlglib::MLPTrainLBFGS(CMultilayerPerceptronShell &network,
CMatrixDouble &xy,const int npoints,
const double decay,const int restarts,
const double wstep,int maxits,
int &info,CMLPReportShell &rep)
{
//--- initialization
info=0;
//--- function call
CMLPTrain::MLPTrainLBFGS(network.GetInnerObj(),xy,npoints,decay,restarts,wstep,maxits,info,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Neural network training using early stopping (base algorithm - |
//| L-BFGS with regularization). |
//| INPUT PARAMETERS: |
//| Network - neural network with initialized geometry |
//| TrnXY - training set |
//| TrnSize - training set size |
//| ValXY - validation set |
//| ValSize - validation set size |
//| Decay - weight decay constant, >=0.001 |
//| Decay term 'Decay*||Weights||^2' is added to |
//| error function. |
//| If you don't know what Decay to choose, use |
//| 0.001. |
//| Restarts - number of restarts from random position, >0. |
//| If you don't know what Restarts to choose, |
//| use 2. |
//| OUTPUT PARAMETERS: |
//| Network - trained neural network. |
//| Info - return code: |
//| * -2, if there is a point with class number |
//| outside of [0..NOut-1]. |
//| * -1, if wrong parameters specified |
//| (NPoints<0, Restarts<1, ...). |
//| * 2, task has been solved, stopping |
//| criterion met - sufficiently small |
//| step size. Not expected (we use EARLY |
//| stopping) but possible and not an error|
//| * 6, task has been solved, stopping |
//| criterion met - increasing of |
//| validation set error. |
//| Rep - training report |
//| NOTE: |
//| Algorithm stops if validation set error increases for a long |
//| enough or step size is small enought (there are task where |
//| validation set may decrease for eternity). In any case solution |
//| returned corresponds to the minimum of validation set error. |
//+------------------------------------------------------------------+
void CAlglib::MLPTrainES(CMultilayerPerceptronShell &network,
CMatrixDouble &trnxy,const int trnsize,
CMatrixDouble &valxy,const int valsize,
const double decay,const int restarts,
int &info,CMLPReportShell &rep)
{
//--- initialization
info=0;
//--- function call
CMLPTrain::MLPTrainES(network.GetInnerObj(),trnxy,trnsize,valxy,valsize,decay,restarts,info,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Cross-validation estimate of generalization error. |
//| Base algorithm - L-BFGS. |
//| INPUT PARAMETERS: |
//| Network - neural network with initialized geometry. |
//| Network is not changed during |
//| cross-validation - it is used only as a |
//| representative of its architecture. |
//| XY - training set. |
//| SSize - training set size |
//| Decay - weight decay, same as in MLPTrainLBFGS |
//| Restarts - number of restarts, >0. |
//| restarts are counted for each partition |
//| separately, so total number of restarts will |
//| be Restarts*FoldsCount. |
//| WStep - stopping criterion, same as in MLPTrainLBFGS |
//| MaxIts - stopping criterion, same as in MLPTrainLBFGS |
//| FoldsCount - number of folds in k-fold cross-validation, |
//| 2<=FoldsCount<=SSize. |
//| recommended value: 10. |
//| OUTPUT PARAMETERS: |
//| Info - return code, same as in MLPTrainLBFGS |
//| Rep - report, same as in MLPTrainLM/MLPTrainLBFGS |
//| CVRep - generalization error estimates |
//+------------------------------------------------------------------+
void CAlglib::MLPKFoldCVLBFGS(CMultilayerPerceptronShell &network,
CMatrixDouble &xy,const int npoints,
const double decay,const int restarts,
const double wstep,const int maxits,
const int foldscount,int &info,
CMLPReportShell &rep,CMLPCVReportShell &cvrep)
{
//--- initialization
info=0;
//--- function call
CMLPTrain::MLPKFoldCVLBFGS(network.GetInnerObj(),xy,npoints,decay,restarts,wstep,maxits,foldscount,info,rep.GetInnerObj(),cvrep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Cross-validation estimate of generalization error. |
//| Base algorithm - Levenberg-Marquardt. |
//| INPUT PARAMETERS: |
//| Network - neural network with initialized geometry. |
//| Network is not changed during |
//| cross-validation - it is used only as a |
//| representative of its architecture. |
//| XY - training set. |
//| SSize - training set size |
//| Decay - weight decay, same as in MLPTrainLBFGS |
//| Restarts - number of restarts, >0. |
//| restarts are counted for each partition |
//| separately, so total number of restarts will |
//| be Restarts*FoldsCount. |
//| FoldsCount - number of folds in k-fold cross-validation, |
//| 2<=FoldsCount<=SSize. |
//| recommended value: 10. |
//| OUTPUT PARAMETERS: |
//| Info - return code, same as in MLPTrainLBFGS |
//| Rep - report, same as in MLPTrainLM/MLPTrainLBFGS |
//| CVRep - generalization error estimates |
//+------------------------------------------------------------------+
void CAlglib::MLPKFoldCVLM(CMultilayerPerceptronShell &network,
CMatrixDouble &xy,const int npoints,
const double decay,const int restarts,
const int foldscount,int &info,
CMLPReportShell &rep,CMLPCVReportShell &cvrep)
{
//--- initialization
info=0;
//--- function call
CMLPTrain::MLPKFoldCVLM(network.GetInnerObj(),xy,npoints,decay,restarts,foldscount,info,rep.GetInnerObj(),cvrep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Creation of the network trainer object for regression networks |
//| INPUT PARAMETERS: |
//| NIn - number of inputs, NIn>=1 |
//| NOut - number of outputs, NOut>=1 |
//| OUTPUT PARAMETERS: |
//| S - neural network trainer object. |
//| This structure can be used to train any regression network with |
//| NIn inputs and NOut outputs. |
//+------------------------------------------------------------------+
void CAlglib::MLPCreateTrainer(int nin,int nout,CMLPTrainer &s)
{
CMLPTrain::MLPCreateTrainer(nin,nout,s);
}
//+------------------------------------------------------------------+
//| Creation of the network trainer object for classification |
//| networks |
//| INPUT PARAMETERS: |
//| NIn - number of inputs, NIn>=1 |
//| NClasses - number of classes, NClasses>=2 |
//| OUTPUT PARAMETERS: |
//| S - neural network trainer object. |
//| This structure can be used to train any classification network |
//| with NIn inputs and NOut outputs. |
//+------------------------------------------------------------------+
void CAlglib::MLPCreateTrainerCls(int nin,int nclasses,CMLPTrainer &s)
{
CMLPTrain::MLPCreateTrainerCls(nin,nclasses,s);
}
//+------------------------------------------------------------------+
//| This function sets "current dataset" of the trainer object to one|
//| passed by user. |
//| INPUT PARAMETERS: |
//| S - trainer object |
//| XY - training set, see below for information on the |
//| training set format. This function checks |
//| correctness of the dataset (no NANs/INFs, class |
//| numbers are correct) and throws exception when |
//| incorrect dataset is passed. |
//| NPoints - points count, >=0. |
//| DATASET FORMAT: |
//| This function uses two different dataset formats - one for |
//| regression networks, another one for classification networks. |
//| For regression networks with NIn inputs and NOut outputs |
//| following dataset format is used: |
//| * dataset is given by NPoints*(NIn+NOut) matrix |
//| * each row corresponds to one example |
//| * first NIn columns are inputs, next NOut columns are |
//| outputs |
//| For classification networks with NIn inputs and NClasses clases |
//| following datasetformat is used: |
//| * dataset is given by NPoints*(NIn+1) matrix |
//| * each row corresponds to one example |
//| * first NIn columns are inputs, last column stores class |
//| number (from 0 to NClasses-1). |
//+------------------------------------------------------------------+
void CAlglib::MLPSetDataset(CMLPTrainer &s,CMatrixDouble &xy,int npoints)
{
CMLPTrain::MLPSetDataset(s,xy,npoints);
}
//+------------------------------------------------------------------+
//| This function sets "current dataset" of the trainer object to one|
//| passed by user (sparse matrix is used to store dataset). |
//| INPUT PARAMETERS: |
//| S - trainer object |
//| XY - training set, see below for information on the |
//| training set format. This function checks |
//| correctness of the dataset (no NANs/INFs, class |
//| numbers are correct) and throws exception when |
//| incorrect dataset is passed. Any sparse storage |
//| format can be used: Hash-table, CRS... |
//| NPoints - points count, >=0 |
//| DATASET FORMAT: |
//| This function uses two different dataset formats - one for |
//| regression networks, another one for classification networks. |
//| For regression networks with NIn inputs and NOut outputs |
//| following dataset format is used: |
//| * dataset is given by NPoints*(NIn+NOut) matrix |
//| * each row corresponds to one example |
//| * first NIn columns are inputs, next NOut columns are outputs |
//| For classification networks with NIn inputs and NClasses clases |
//| following datasetformat is used: |
//| * dataset is given by NPoints*(NIn+1) matrix |
//| * each row corresponds to one example |
//| * first NIn columns are inputs, last column stores class number|
//| (from 0 to NClasses-1). |
//+------------------------------------------------------------------+
void CAlglib::MLPSetSparseDataset(CMLPTrainer &s,CSparseMatrix &xy,int npoints)
{
CMLPTrain::MLPSetSparseDataset(s,xy,npoints);
}
//+------------------------------------------------------------------+
//| This function sets weight decay coefficient which is used for |
//| training. |
//| INPUT PARAMETERS: |
//| S - trainer object |
//| Decay - weight decay coefficient, >=0. Weight decay term |
//| 'Decay*||Weights||^2' is added to error function. |
//| If you don't know what Decay to choose, use 1.0E-3.|
//| Weight decay can be set to zero, in this case |
//| network is trained without weight decay. |
//| NOTE: by default network uses some small nonzero value for weight|
//| decay. |
//+------------------------------------------------------------------+
void CAlglib::MLPSetDecay(CMLPTrainer &s,double decay)
{
CMLPTrain::MLPSetDecay(s,decay);
}
//+------------------------------------------------------------------+
//| This function sets stopping criteria for the optimizer. |
//| INPUT PARAMETERS: |
//| S - trainer object |
//| WStep - stopping criterion. Algorithm stops if step size is|
//| less than WStep. Recommended value - 0.01. Zero |
//| step size means stopping after MaxIts iterations. |
//| WStep>=0. |
//| MaxIts - stopping criterion. Algorithm stops after MaxIts |
//| epochs (full passes over entire dataset). Zero |
//| MaxIts means stopping when step is sufficiently |
//| small. MaxIts>=0. |
//| NOTE: by default, WStep=0.005 and MaxIts=0 are used. These values|
//| are also used when MLPSetCond() is called with WStep=0 and |
//| MaxIts=0. |
//| NOTE: these stopping criteria are used for all kinds of neural |
//| training-from "conventional" networks to early stopping |
//| ensembles. When used for "conventional" networks, they are |
//| used as the only stopping criteria. When combined with |
//| early stopping, they used as ADDITIONAL stopping criteria |
//| which can terminate early stopping algorithm. |
//+------------------------------------------------------------------+
void CAlglib::MLPSetCond(CMLPTrainer &s,double wstep,int maxits)
{
CMLPTrain::MLPSetCond(s,wstep,maxits);
}
//+------------------------------------------------------------------+
//| This function sets training algorithm: batch training using |
//| L-BFGS will be used. |
//| This algorithm: |
//| * the most robust for small-scale problems, but may be too slow|
//| for large scale ones. |
//| * perfoms full pass through the dataset before performing step |
//| * uses conditions specified by MLPSetCond() for stopping |
//| * is default one used by trainer object |
//| INPUT PARAMETERS: |
//| S - trainer object |
//+------------------------------------------------------------------+
void CAlglib::MLPSetAlgoBatch(CMLPTrainer &s)
{
CMLPTrain::MLPSetAlgoBatch(s);
}
//+------------------------------------------------------------------+
//| This function trains neural network passed to this function, |
//| using current dataset (one which was passed to MLPSetDataset() |
//| or MLPSetSparseDataset()) and current training settings. Training|
//| from NRestarts random starting positions is performed, best |
//| network is chosen. |
//| Training is performed using current training algorithm. |
//| INPUT PARAMETERS: |
//| S - trainer object |
//| Network - neural network. It must have same number of inputs |
//| and output/classes as was specified during creation|
//| of the trainer object. |
//| NRestarts- number of restarts, >=0: |
//| * NRestarts>0 means that specified number of random|
//| restarts are performed, best network is chosen |
//| after training |
//| * NRestarts=0 means that current state of the |
//| network is used for training. |
//| OUTPUT PARAMETERS: |
//| Network - trained network |
//| NOTE: when no dataset was specified with MLPSetDataset / |
//| SetSparseDataset(), network is filled by zero values. Same |
//| behavior for functions MLPStartTraining and |
//| MLPContinueTraining. |
//| NOTE: this method uses sum-of-squares error function for training|
//+------------------------------------------------------------------+
void CAlglib::MLPTrainNetwork(CMLPTrainer &s,
CMultilayerPerceptronShell &network,
int nrestarts,CMLPReportShell &rep)
{
CMLPTrain::MLPTrainNetwork(s,network.GetInnerObj(),nrestarts,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| IMPORTANT: this is an "expert" version of the MLPTrain() function|
//| We do not recommend you to use it unless you are |
//| pretty sure that you need ability to monitor training |
//| progress. |
//| This function performs step-by-step training of the neural |
//| network. Here "step-by-step" means that training starts with |
//| MLPStartTraining() call, and then user subsequently calls |
//| MLPContinueTraining() to perform one more iteration of the |
//| training. |
//| After call to this function trainer object remembers network and |
//| is ready to train it. However, no training is performed until |
//| first call to MLPContinueTraining() function. Subsequent calls |
//| to MLPContinueTraining() will advance training progress one |
//| iteration further. |
//| EXAMPLE: |
//| > |
//| > ...initialize network and trainer object.... |
//| > |
//| > MLPStartTraining(Trainer, Network, True) |
//| > while MLPContinueTraining(Trainer, Network) do |
//| > ...visualize training progress... |
//| > |
//| INPUT PARAMETERS: |
//| S - trainer object |
//| Network - neural network. It must have same number of inputs |
//| and output/classes as was specified during creation|
//| of the trainer object. |
//| RandomStart - randomize network before training or not: |
//| * True means that network is randomized and its |
//| initial state (one which was passed to the |
//| trainer object) is lost. |
//| * False means that training is started from the |
//| current state of the network |
//| OUTPUT PARAMETERS: |
//| Network - neural network which is ready to training (weights |
//| are initialized, preprocessor is initialized using |
//| current training set) |
//| NOTE: this method uses sum-of-squares error function for training|
//| NOTE: it is expected that trainer object settings are NOT changed|
//| during step-by-step training, i.e. no one changes stopping |
//| criteria or training set during training. It is possible |
//| and there is no defense against such actions, but algorithm|
//| behavior in such cases is undefined and can be |
//| unpredictable. |
//+------------------------------------------------------------------+
void CAlglib::MLPStartTraining(CMLPTrainer &s,
CMultilayerPerceptronShell &network,
bool randomstart)
{
CMLPTrain::MLPStartTraining(s,network.GetInnerObj(),randomstart);
}
//+------------------------------------------------------------------+
//| IMPORTANT: this is an "expert" version of the MLPTrain() function|
//| We do not recommend you to use it unless you are |
//| pretty sure that you need ability to monitor training |
//| progress. |
//| This function performs step-by-step training of the neural |
//| network. Here "step-by-step" means that training starts with |
//| MLPStartTraining() call, and then user subsequently calls |
//| MLPContinueTraining() to perform one more iteration of the |
//| training. |
//| This function performs one more iteration of the training and |
//| returns either True (training continues) or False (training |
//| stopped). In case True was returned, Network weights are updated |
//| according to the current state of the optimization progress. |
//| In case False was returned, no additional updates is performed |
//| (previous update of the network weights moved us to the final |
//| point, and no additional updates is needed). |
//| EXAMPLE: |
//| > |
//| > [initialize network and trainer object] |
//| > |
//| > MLPStartTraining(Trainer, Network, True) |
//| > while MLPContinueTraining(Trainer, Network) do |
//| > [visualize training progress] |
//| > |
//| INPUT PARAMETERS: |
//| S - trainer object |
//| Network - neural network structure, which is used to store |
//| current state of the training process. |
//| OUTPUT PARAMETERS: |
//| Network - weights of the neural network are rewritten by the |
//| current approximation. |
//| NOTE: this method uses sum-of-squares error function for training|
//| NOTE: it is expected that trainer object settings are NOT changed|
//| during step-by-step training, i.e. no one changes stopping |
//| criteria or training set during training. It is possible |
//| and there is no defense against such actions, but algorithm|
//| behavior in such cases is undefined and can be |
//| unpredictable. |
//| NOTE: It is expected that Network is the same one which was |
//| passed to MLPStartTraining() function. However, THIS |
//| function checks only following: |
//| * that number of network inputs is consistent with |
//| trainer object settings |
//| * that number of network outputs / classes is consistent |
//| with trainer object settings |
//| * that number of network weights is the same as number of|
//| weights in the network passed to MLPStartTraining() |
//| function Exception is thrown when these conditions are |
//| violated. |
//| It is also expected that you do not change state of the network |
//| on your own - the only party who has right to change network |
//| during its training is a trainer object. Any attempt to interfere|
//| with trainer may lead to unpredictable results. |
//+------------------------------------------------------------------+
bool CAlglib::MLPContinueTraining(CMLPTrainer &s,
CMultilayerPerceptronShell &network)
{
return(CMLPTrain::MLPContinueTraining(s,network.GetInnerObj()));
}
//+------------------------------------------------------------------+
//| This function trains neural network ensemble passed to this |
//| function using current dataset and early stopping training |
//| algorithm. Each early stopping round performs NRestarts random |
//| restarts (thus, EnsembleSize*NRestarts training rounds is |
//| performed in total). |
//| INPUT PARAMETERS: |
//| S - trainer object; |
//| Ensemble - neural network ensemble. It must have same |
//| number of inputs and outputs/classes as was |
//| specified during creation of the trainer object.|
//| NRestarts - number of restarts, >=0: |
//| * NRestarts>0 means that specified number of |
//| random restarts are performed during each ES |
//| round; |
//| * NRestarts=0 is silently replaced by 1. |
//| OUTPUT PARAMETERS: |
//| Ensemble - trained ensemble; |
//| Rep - it contains all type of errors. |
//| NOTE: this training method uses BOTH early stopping and weight |
//| decay! So, you should select weight decay before starting |
//| training just as you select it before training |
//| "conventional" networks. |
//| NOTE: when no dataset was specified with MLPSetDataset / |
//| SetSparseDataset(), or single-point dataset was passed, |
//| ensemble is filled by zero values. |
//| NOTE: this method uses sum-of-squares error function for training|
//+------------------------------------------------------------------+
void CAlglib::MLPTrainEnsembleES(CMLPTrainer &s,
CMLPEnsembleShell &ensemble,
int nrestarts,
CMLPReportShell &rep)
{
CMLPTrain::MLPTrainEnsembleES(s,ensemble.GetInnerObj(),nrestarts,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Like MLPCreate0, but for ensembles. |
//+------------------------------------------------------------------+
void CAlglib::MLPECreate0(const int nin,const int nout,const int ensemblesize,
CMLPEnsembleShell &ensemble)
{
CMLPE::MLPECreate0(nin,nout,ensemblesize,ensemble.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Like MLPCreate1, but for ensembles. |
//+------------------------------------------------------------------+
void CAlglib::MLPECreate1(const int nin,int nhid,const int nout,
const int ensemblesize,
CMLPEnsembleShell &ensemble)
{
CMLPE::MLPECreate1(nin,nhid,nout,ensemblesize,ensemble.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Like MLPCreate2, but for ensembles. |
//+------------------------------------------------------------------+
void CAlglib::MLPECreate2(const int nin,const int nhid1,const int nhid2,
const int nout,const int ensemblesize,
CMLPEnsembleShell &ensemble)
{
CMLPE::MLPECreate2(nin,nhid1,nhid2,nout,ensemblesize,ensemble.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Like MLPCreateB0, but for ensembles. |
//+------------------------------------------------------------------+
void CAlglib::MLPECreateB0(const int nin,const int nout,const double b,
const double d,const int ensemblesize,
CMLPEnsembleShell &ensemble)
{
CMLPE::MLPECreateB0(nin,nout,b,d,ensemblesize,ensemble.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Like MLPCreateB1, but for ensembles. |
//+------------------------------------------------------------------+
void CAlglib::MLPECreateB1(const int nin,int nhid,const int nout,
const double b,const double d,const int ensemblesize,
CMLPEnsembleShell &ensemble)
{
CMLPE::MLPECreateB1(nin,nhid,nout,b,d,ensemblesize,ensemble.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Like MLPCreateB2, but for ensembles. |
//+------------------------------------------------------------------+
void CAlglib::MLPECreateB2(const int nin,const int nhid1,const int nhid2,
const int nout,const double b,const double d,
const int ensemblesize,CMLPEnsembleShell &ensemble)
{
CMLPE::MLPECreateB2(nin,nhid1,nhid2,nout,b,d,ensemblesize,ensemble.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Like MLPCreateR0, but for ensembles. |
//+------------------------------------------------------------------+
void CAlglib::MLPECreateR0(const int nin,const int nout,const double a,
const double b,const int ensemblesize,
CMLPEnsembleShell &ensemble)
{
CMLPE::MLPECreateR0(nin,nout,a,b,ensemblesize,ensemble.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Like MLPCreateR1, but for ensembles. |
//+------------------------------------------------------------------+
void CAlglib::MLPECreateR1(const int nin,int nhid,const int nout,
const double a,const double b,const int ensemblesize,
CMLPEnsembleShell &ensemble)
{
CMLPE::MLPECreateR1(nin,nhid,nout,a,b,ensemblesize,ensemble.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Like MLPCreateR2, but for ensembles. |
//+------------------------------------------------------------------+
void CAlglib::MLPECreateR2(const int nin,const int nhid1,const int nhid2,
const int nout,const double a,const double b,
const int ensemblesize,CMLPEnsembleShell &ensemble)
{
CMLPE::MLPECreateR2(nin,nhid1,nhid2,nout,a,b,ensemblesize,ensemble.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Like MLPCreateC0, but for ensembles. |
//+------------------------------------------------------------------+
void CAlglib::MLPECreateC0(const int nin,const int nout,
const int ensemblesize,CMLPEnsembleShell &ensemble)
{
CMLPE::MLPECreateC0(nin,nout,ensemblesize,ensemble.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Like MLPCreateC1, but for ensembles. |
//+------------------------------------------------------------------+
void CAlglib::MLPECreateC1(const int nin,int nhid,const int nout,
const int ensemblesize,CMLPEnsembleShell &ensemble)
{
CMLPE::MLPECreateC1(nin,nhid,nout,ensemblesize,ensemble.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Like MLPCreateC2, but for ensembles. |
//+------------------------------------------------------------------+
void CAlglib::MLPECreateC2(const int nin,const int nhid1,const int nhid2,
const int nout,const int ensemblesize,
CMLPEnsembleShell &ensemble)
{
CMLPE::MLPECreateC2(nin,nhid1,nhid2,nout,ensemblesize,ensemble.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Creates ensemble from network. Only network geometry is copied. |
//+------------------------------------------------------------------+
void CAlglib::MLPECreateFromNetwork(CMultilayerPerceptronShell &network,
const int ensemblesize,
CMLPEnsembleShell &ensemble)
{
CMLPE::MLPECreateFromNetwork(network.GetInnerObj(),ensemblesize,ensemble.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Randomization of MLP ensemble |
//+------------------------------------------------------------------+
void CAlglib::MLPERandomize(CMLPEnsembleShell &ensemble)
{
CMLPE::MLPERandomize(ensemble.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Return ensemble properties (number of inputs and outputs). |
//+------------------------------------------------------------------+
void CAlglib::MLPEProperties(CMLPEnsembleShell &ensemble,
int &nin,int &nout)
{
//--- initialization
nin=0;
nout=0;
//--- function call
CMLPE::MLPEProperties(ensemble.GetInnerObj(),nin,nout);
}
//+------------------------------------------------------------------+
//| Return normalization type (whether ensemble is SOFTMAX-normalized|
//| or not). |
//+------------------------------------------------------------------+
bool CAlglib::MLPEIsSoftMax(CMLPEnsembleShell &ensemble)
{
return(CMLPE::MLPEIsSoftMax(ensemble.GetInnerObj()));
}
//+------------------------------------------------------------------+
//| Procesing |
//| INPUT PARAMETERS: |
//| Ensemble- neural networks ensemble |
//| X - input vector, array[0..NIn-1]. |
//| Y - (possibly) preallocated buffer; if size of Y is |
//| less than NOut, it will be reallocated. If it is |
//| large enough, it is NOT reallocated, so we can |
//| save some time on reallocation. |
//| OUTPUT PARAMETERS: |
//| Y - result. Regression estimate when solving |
//| regression task, vector of posterior |
//| probabilities for classification task. |
//+------------------------------------------------------------------+
void CAlglib::MLPEProcess(CMLPEnsembleShell &ensemble,
double &x[],double &y[])
{
CMLPE::MLPEProcess(ensemble.GetInnerObj(),x,y);
}
//+------------------------------------------------------------------+
//| 'interactive' variant of MLPEProcess for languages like Python |
//| which support constructs like "Y = MLPEProcess(LM,X)" and |
//| interactive mode of the interpreter |
//| This function allocates new array on each call, so it is |
//| significantly slower than its 'non-interactive' counterpart, but |
//| it is more convenient when you call it from command line. |
//+------------------------------------------------------------------+
void CAlglib::MLPEProcessI(CMLPEnsembleShell &ensemble,
double &x[],double &y[])
{
CMLPE::MLPEProcessI(ensemble.GetInnerObj(),x,y);
}
//+------------------------------------------------------------------+
//| Relative classification error on the test set |
//| INPUT PARAMETERS: |
//| Ensemble- ensemble |
//| XY - test set |
//| NPoints - test set size |
//| RESULT: |
//| percent of incorrectly classified cases. |
//| Works both for classifier betwork and for regression networks|
//| which are used as classifiers. |
//+------------------------------------------------------------------+
double CAlglib::MLPERelClsError(CMLPEnsembleShell &ensemble,
CMatrixDouble &xy,const int npoints)
{
return(CMLPE::MLPERelClsError(ensemble.GetInnerObj(),xy,npoints));
}
//+------------------------------------------------------------------+
//| Average cross-entropy (in bits per element) on the test set |
//| INPUT PARAMETERS: |
//| Ensemble- ensemble |
//| XY - test set |
//| NPoints - test set size |
//| RESULT: |
//| CrossEntropy/(NPoints*LN(2)). |
//| Zero if ensemble solves regression task. |
//+------------------------------------------------------------------+
double CAlglib::MLPEAvgCE(CMLPEnsembleShell &ensemble,
CMatrixDouble &xy,const int npoints)
{
return(CMLPE::MLPEAvgCE(ensemble.GetInnerObj(),xy,npoints));
}
//+------------------------------------------------------------------+
//| RMS error on the test set |
//| INPUT PARAMETERS: |
//| Ensemble- ensemble |
//| XY - test set |
//| NPoints - test set size |
//| RESULT: |
//| root mean square error. |
//| Its meaning for regression task is obvious. As for |
//| classification task RMS error means error when estimating |
//| posterior probabilities. |
//+------------------------------------------------------------------+
double CAlglib::MLPERMSError(CMLPEnsembleShell &ensemble,
CMatrixDouble &xy,const int npoints)
{
return(CMLPE::MLPERMSError(ensemble.GetInnerObj(),xy,npoints));
}
//+------------------------------------------------------------------+
//| Average error on the test set |
//| INPUT PARAMETERS: |
//| Ensemble- ensemble |
//| XY - test set |
//| NPoints - test set size |
//| RESULT: |
//| Its meaning for regression task is obvious. As for |
//| classification task it means average error when estimating |
//| posterior probabilities. |
//+------------------------------------------------------------------+
double CAlglib::MLPEAvgError(CMLPEnsembleShell &ensemble,
CMatrixDouble &xy,const int npoints)
{
return(CMLPE::MLPEAvgError(ensemble.GetInnerObj(),xy,npoints));
}
//+------------------------------------------------------------------+
//| Average relative error on the test set |
//| INPUT PARAMETERS: |
//| Ensemble- ensemble |
//| XY - test set |
//| NPoints - test set size |
//| RESULT: |
//| Its meaning for regression task is obvious. As for |
//| classification task it means average relative error when |
//| estimating posterior probabilities. |
//+------------------------------------------------------------------+
double CAlglib::MLPEAvgRelError(CMLPEnsembleShell &ensemble,
CMatrixDouble &xy,const int npoints)
{
return(CMLPE::MLPEAvgRelError(ensemble.GetInnerObj(),xy,npoints));
}
//+------------------------------------------------------------------+
//| Training neural networks ensemble using bootstrap aggregating |
//| (bagging). |
//| Modified Levenberg-Marquardt algorithm is used as base training |
//| method. |
//| INPUT PARAMETERS: |
//| Ensemble - model with initialized geometry |
//| XY - training set |
//| NPoints - training set size |
//| Decay - weight decay coefficient, >=0.001 |
//| Restarts - restarts, >0. |
//| OUTPUT PARAMETERS: |
//| Ensemble - trained model |
//| Info - return code: |
//| * -2, if there is a point with class number |
//| outside of [0..NClasses-1]. |
//| * -1, if incorrect parameters was passed |
//| (NPoints<0, Restarts<1). |
//| * 2, if task has been solved. |
//| Rep - training report. |
//| OOBErrors - out-of-bag generalization error estimate |
//+------------------------------------------------------------------+
void CAlglib::MLPEBaggingLM(CMLPEnsembleShell &ensemble,CMatrixDouble &xy,
const int npoints,const double decay,
const int restarts,int &info,
CMLPReportShell &rep,CMLPCVReportShell &ooberrors)
{
//--- initialization
info=0;
//--- function call
CMLPTrain::MLPEBaggingLM(ensemble.GetInnerObj(),xy,npoints,decay,restarts,info,rep.GetInnerObj(),ooberrors.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Training neural networks ensemble using bootstrap aggregating |
//| (bagging). L-BFGS algorithm is used as base training method. |
//| INPUT PARAMETERS: |
//| Ensemble - model with initialized geometry |
//| XY - training set |
//| NPoints - training set size |
//| Decay - weight decay coefficient, >=0.001 |
//| Restarts - restarts, >0. |
//| WStep - stopping criterion, same as in MLPTrainLBFGS |
//| MaxIts - stopping criterion, same as in MLPTrainLBFGS |
//| OUTPUT PARAMETERS: |
//| Ensemble - trained model |
//| Info - return code: |
//| * -8, if both WStep=0 and MaxIts=0 |
//| * -2, if there is a point with class number |
//| outside of [0..NClasses-1]. |
//| * -1, if incorrect parameters was passed |
//| (NPoints<0, Restarts<1). |
//| * 2, if task has been solved. |
//| Rep - training report. |
//| OOBErrors - out-of-bag generalization error estimate |
//+------------------------------------------------------------------+
void CAlglib::MLPEBaggingLBFGS(CMLPEnsembleShell &ensemble,CMatrixDouble &xy,
const int npoints,const double decay,
const int restarts,const double wstep,
const int maxits,int &info,
CMLPReportShell &rep,
CMLPCVReportShell &ooberrors)
{
//--- initialization
info=0;
//--- function call
CMLPTrain::MLPEBaggingLBFGS(ensemble.GetInnerObj(),xy,npoints,decay,restarts,wstep,maxits,info,rep.GetInnerObj(),ooberrors.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Training neural networks ensemble using early stopping. |
//| INPUT PARAMETERS: |
//| Ensemble - model with initialized geometry |
//| XY - training set |
//| NPoints - training set size |
//| Decay - weight decay coefficient, >=0.001 |
//| Restarts - restarts, >0. |
//| OUTPUT PARAMETERS: |
//| Ensemble - trained model |
//| Info - return code: |
//| * -2, if there is a point with class number |
//| outside of [0..NClasses-1]. |
//| * -1, if incorrect parameters was passed |
//| (NPoints<0, Restarts<1). |
//| * 6, if task has been solved. |
//| Rep - training report. |
//| OOBErrors - out-of-bag generalization error estimate |
//+------------------------------------------------------------------+
void CAlglib::MLPETrainES(CMLPEnsembleShell &ensemble,CMatrixDouble &xy,
const int npoints,const double decay,
const int restarts,int &info,
CMLPReportShell &rep)
{
//--- initialization
info=0;
//--- function call
CMLPTrain::MLPETrainES(ensemble.GetInnerObj(),xy,npoints,decay,restarts,info,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Principal components analysis |
//| Subroutine builds orthogonal basis where first axis corresponds |
//| to direction with maximum variance, second axis maximizes |
//| variance in subspace orthogonal to first axis and so on. |
//| It should be noted that, unlike LDA, PCA does not use class |
//| labels. |
//| INPUT PARAMETERS: |
//| X - dataset, array[0..NPoints-1,0..NVars-1]. |
//| matrix contains ONLY INDEPENDENT VARIABLES. |
//| NPoints - dataset size, NPoints>=0 |
//| NVars - number of independent variables, NVars>=1 |
//| OUTPUT PARAMETERS: |
//| Info - return code: |
//| * -4, if SVD subroutine haven't converged |
//| * -1, if wrong parameters has been passed |
//| (NPoints<0, NVars<1) |
//| * 1, if task is solved |
//| S2 - array[0..NVars-1]. variance values |
//| corresponding to basis vectors. |
//| V - array[0..NVars-1,0..NVars-1] |
//| matrix, whose columns store basis vectors. |
//+------------------------------------------------------------------+
void CAlglib::PCABuildBasis(CMatrixDouble &x,const int npoints,
const int nvars,int &info,double &s2[],
CMatrixDouble &v)
{
//--- initialization
info=0;
//--- function call
CPCAnalysis::PCABuildBasis(x,npoints,nvars,info,s2,v);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::PCABuildBasis(CMatrixDouble &x,const int npoints,
const int nvars,int &info,CRowDouble &s2,
CMatrixDouble &v)
{
//--- initialization
info=0;
//--- function call
CPCAnalysis::PCABuildBasis(x,npoints,nvars,info,s2,v);
}
//+------------------------------------------------------------------+
//| Principal components analysis |
//| This function performs truncated PCA, i.e. returns just a few |
//| most important directions. |
//| Internally it uses iterative eigensolver which is very efficient |
//| when only a minor fraction of full basis is required. Thus, if |
//| you need full basis, it is better to use pcabuildbasis() function|
//| It should be noted that, unlike LDA, PCA does not use class |
//| labels. |
//| INPUT PARAMETERS: |
//| X - dataset, array[0..NPoints-1,0..NVars-1] matrix |
//| contains ONLY INDEPENDENT VARIABLES. |
//| NPoints - dataset size, NPoints>=0 |
//| NVars - number of independent variables, NVars>=1 |
//| NNeeded - number of requested components, in [1,NVars] range;|
//| this function is efficient only for NNeeded<<NVars.|
//| Eps - desired precision of vectors returned; underlying |
//| solver will stop iterations as soon as absolute |
//| error in corresponding singular values reduces to |
//| roughly eps*MAX(lambda[]), with lambda[] being |
//| array of eigen values. |
//| Zero value means that algorithm performs number of |
//| iterations specified by maxits parameter, without |
//| paying attention to precision. |
//| MaxIts - number of iterations performed by subspace |
//| iteration method. Zero value means that no limit on|
//| iteration count is placed (eps-based stopping |
//| condition is used). |
//| OUTPUT PARAMETERS: |
//| S2 - array[NNeeded]. Variance values corresponding to |
//| basis vectors. |
//| V - array[NVars,NNeeded] matrix, whose columns store |
//| basis vectors. |
//| NOTE: passing eps=0 and maxits=0 results in small eps being |
//| selected as stopping condition. Exact value of automatically|
//| selected eps is version-dependent. |
//+------------------------------------------------------------------+
void CAlglib::PCATruncatedSubspace(CMatrixDouble &x,int npoints,int nvars,
int nneeded,double eps,int maxits,
CRowDouble &s2,CMatrixDouble &v)
{
s2.Resize(0);
v.Resize(0,0);
//--- function call
CPCAnalysis::PCATruncatedSubSpace(x,npoints,nvars,nneeded,eps,maxits,s2,v);
}
//+------------------------------------------------------------------+
//| Sparse truncated principal components analysis |
//| This function performs sparse truncated PCA, i.e. returns just a |
//| few most important principal components for a sparse input X. |
//| Internally it uses iterative eigensolver which is very efficient |
//| when only a minor fraction of full basis is required. |
//| It should be noted that, unlike LDA, PCA does not use class |
//| labels. |
//| INPUT PARAMETERS: |
//| X - sparse dataset, sparse npoints*nvars matrix. It is |
//| recommended to use CRS sparse storage format; |
//| non-CRS input will be internally converted to CRS. |
//| Matrix contains ONLY INDEPENDENT VARIABLES, and |
//| must be EXACTLY npoints*nvars. |
//| NPoints - dataset size, NPoints>=0 |
//| NVars - number of independent variables, NVars>=1 |
//| NNeeded - number of requested components, in [1,NVars] range;|
//| this function is efficient only for NNeeded<<NVars.|
//| Eps - desired precision of vectors returned; underlying |
//| solver will stop iterations as soon as absolute |
//| error in corresponding singular values reduces to |
//| roughly eps*MAX(lambda[]), with lambda[] being |
//| array of eigen values. |
//| Zero value means that algorithm performs number of |
//| iterations specified by maxits parameter, without |
//| paying attention to precision. |
//| MaxIts - number of iterations performed by subspace |
//| iteration method. Zero value means that no limit on|
//| iteration count is placed (eps-based stopping |
//| condition is used). |
//| OUTPUT PARAMETERS: |
//| S2 - array[NNeeded]. Variance values corresponding to |
//| basis vectors. |
//| V - array[NVars,NNeeded] matrix, whose columns store |
//| basis vectors. |
//| NOTE: passing eps=0 and maxits=0 results in small eps being |
//| selected as a stopping condition. Exact value of |
//| automatically selected eps is version-dependent. |
//| NOTE: zero MaxIts is silently replaced by some reasonable value |
//| which prevents eternal loops (possible when inputs are |
//| degenerate and too stringent stopping criteria are |
//| specified). In current version it is 50+2*NVars. |
//+------------------------------------------------------------------+
void CAlglib::PCATruncatedSubspaceSparse(CSparseMatrix &x,int npoints,
int nvars,int nneeded,double eps,
int maxits,CRowDouble &s2,
CMatrixDouble &v)
{
s2.Resize(0);
v.Resize(0,0);
//--- function call
CPCAnalysis::PCATruncatedSubSpaceSparse(x,npoints,nvars,nneeded,eps,maxits,s2,v);
}
//+------------------------------------------------------------------+
//| Cash-Karp adaptive ODE solver. |
//| This subroutine solves ODE Y'=f(Y,x) with initial conditions |
//| Y(xs)=Ys (here Y may be single variable or vector of N variables)|
//| INPUT PARAMETERS: |
//| Y - initial conditions, array[0..N-1]. |
//| contains values of Y[] at X[0] |
//| N - system size |
//| X - points at which Y should be tabulated, |
//| array[0..M-1] integrations starts at X[0], ends |
//| at X[M-1], intermediate values at X[i] are |
//| returned too. |
//| SHOULD BE ORDERED BY ASCENDING OR BY DESCENDING!!|
//| M - number of intermediate points + first point + |
//| last point: |
//| * M>2 means that you need both Y(X[M-1]) and M-2 |
//| values at intermediate points |
//| * M=2 means that you want just to integrate from |
//| X[0] to X[1] and don't interested in |
//| intermediate values. |
//| * M=1 means that you don't want to integrate :) |
//| it is degenerate case, but it will be handled |
//| correctly. |
//| * M<1 means error |
//| Eps - tolerance (absolute/relative error on each step |
//| will be less than Eps). When passing: |
//| * Eps>0, it means desired ABSOLUTE error |
//| * Eps<0, it means desired RELATIVE error. |
//| Relative errors are calculated with respect to |
//| maximum values of Y seen so far. Be careful to |
//| use this criterion when starting from Y[] that |
//| are close to zero. |
//| H - initial step lenth, it will be adjusted |
//| automatically after the first step. If H=0, step |
//| will be selected automatically (usualy it will |
//| be equal to 0.001 of min(x[i]-x[j])). |
//| OUTPUT PARAMETERS |
//| State - structure which stores algorithm state between |
//| subsequent calls of OdeSolverIteration. Used |
//| for reverse communication. This structure should |
//| be passed to the OdeSolverIteration subroutine. |
//| SEE ALSO |
//| AutoGKSmoothW, AutoGKSingular, AutoGKIteration, AutoGKResults|
//+------------------------------------------------------------------+
void CAlglib::ODESolverRKCK(double &y[],const int n,double &x[],
const int m,const double eps,const double h,
CODESolverStateShell &state)
{
CODESolver::ODESolverRKCK(y,n,x,m,eps,h,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Cash-Karp adaptive ODE solver. |
//| This subroutine solves ODE Y'=f(Y,x) with initial conditions |
//| Y(xs)=Ys (here Y may be single variable or vector of N variables)|
//| INPUT PARAMETERS: |
//| Y - initial conditions, array[0..N-1]. |
//| contains values of Y[] at X[0] |
//| N - system size |
//| X - points at which Y should be tabulated, |
//| array[0..M-1] integrations starts at X[0], ends |
//| at X[M-1], intermediate values at X[i] are |
//| returned too. |
//| SHOULD BE ORDERED BY ASCENDING OR BY DESCENDING!!|
//| M - number of intermediate points + first point + |
//| last point: |
//| * M>2 means that you need both Y(X[M-1]) and M-2 |
//| values at intermediate points |
//| * M=2 means that you want just to integrate from |
//| X[0] to X[1] and don't interested in |
//| intermediate values. |
//| * M=1 means that you don't want to integrate :) |
//| it is degenerate case, but it will be handled |
//| correctly. |
//| * M<1 means error |
//| Eps - tolerance (absolute/relative error on each step |
//| will be less than Eps). When passing: |
//| * Eps>0, it means desired ABSOLUTE error |
//| * Eps<0, it means desired RELATIVE error. |
//| Relative errors are calculated with respect to |
//| maximum values of Y seen so far. Be careful to |
//| use this criterion when starting from Y[] that |
//| are close to zero. |
//| H - initial step lenth, it will be adjusted |
//| automatically after the first step. If H=0, step |
//| will be selected automatically (usualy it will |
//| be equal to 0.001 of min(x[i]-x[j])). |
//| OUTPUT PARAMETERS |
//| State - structure which stores algorithm state between |
//| subsequent calls of OdeSolverIteration. Used |
//| for reverse communication. This structure should |
//| be passed to the OdeSolverIteration subroutine. |
//| SEE ALSO |
//| AutoGKSmoothW, AutoGKSingular, AutoGKIteration, AutoGKResults|
//+------------------------------------------------------------------+
void CAlglib::ODESolverRKCK(double &y[],double &x[],const double eps,
const double h,CODESolverStateShell &state)
{
//--- initialization
int n=CAp::Len(y);
int m=CAp::Len(x);
//--- function call
CODESolver::ODESolverRKCK(y,n,x,m,eps,h,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This function provides reverse communication interface |
//| Reverse communication interface is not documented or recommended |
//| to use. |
//| See below for functions which provide better documented API |
//+------------------------------------------------------------------+
bool CAlglib::ODESolverIteration(CODESolverStateShell &state)
{
return(CODESolver::ODESolverIteration(state.GetInnerObj()));
}
//+------------------------------------------------------------------+
//| This function is used to launcn iterations of ODE solver |
//| It accepts following parameters: |
//| diff - callback which calculates dy/dx for given y and x|
//| obj - optional object which is passed to diff; can be |
//| NULL |
//+------------------------------------------------------------------+
void CAlglib::ODESolverSolve(CODESolverStateShell &state,
CNDimensional_ODE_RP &diff,
CObject &obj)
{
//--- cycle
while(CAlglib::ODESolverIteration(state))
{
//--- check
if(state.GetNeedDY())
{
diff.ODE_RP(state.GetInnerObj().m_y,state.GetInnerObj().m_x,state.GetInnerObj().m_dy,obj);
//--- next iteration
continue;
}
Print("ALGLIB: unexpected error in 'odesolversolve'");
CAp::exception_happened=true;
break;
}
}
//+------------------------------------------------------------------+
//| ODE solver results |
//| Called after OdeSolverIteration returned False. |
//| INPUT PARAMETERS: |
//| State - algorithm state (used by OdeSolverIteration). |
//| OUTPUT PARAMETERS: |
//| M - number of tabulated values, M>=1 |
//| XTbl - array[0..M-1], values of X |
//| YTbl - array[0..M-1,0..N-1], values of Y in X[i] |
//| Rep - solver report: |
//| * Rep.TerminationType completetion code: |
//| * -2 X is not ordered by |
//| ascending/descending or there are |
//| non-distinct X[], i.e. X[i]=X[i+1] |
//| * -1 incorrect parameters were specified |
//| * 1 task has been solved |
//| * Rep.NFEV contains number of function |
//| calculations |
//+------------------------------------------------------------------+
void CAlglib::ODESolverResults(CODESolverStateShell &state,int &m,
double &xtbl[],CMatrixDouble &ytbl,
CODESolverReportShell &rep)
{
//--- initialization
m=0;
//--- function call
CODESolver::ODESolverResults(state.GetInnerObj(),m,xtbl,ytbl,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Filters: simple moving averages (unsymmetric). |
//| This filter replaces array by results of SMA(K) filter. SMA(K) |
//| is defined as filter which averages at most K previous points |
//| (previous - not points AROUND central point) - or less, in case |
//| of the first K-1 points. |
//| INPUT PARAMETERS: |
//| X - array[N], array to process. It can be larger |
//| than N, in this case only first N points are |
//| processed. |
//| N - points count, N>=0 |
//| K - K>=1 (K can be larger than N, such cases will |
//| be correctly handled). Window width. K=1 |
//| corresponds to identity transformation (nothing |
//| changes). |
//| OUTPUT PARAMETERS: |
//| X - array, whose first N elements were processed |
//| with SMA(K) |
//| NOTE 1: this function uses efficient in-place algorithm which |
//| does not allocate temporary arrays. |
//| NOTE 2: this algorithm makes only one pass through array and |
//| uses running sum to speed-up calculation of the averages.|
//| Additional measures are taken to ensure that running sum |
//| on a long sequence of zero elements will be correctly |
//| reset to zero even in the presence of round-off error. |
//| NOTE 3: this is unsymmetric version of the algorithm, which does |
//| NOT averages points after the current one. Only |
//| X[i], X[i-1], ... are used when calculating new value |
//| of X[i]. We should also note that this algorithm uses |
//| BOTH previous points and current one, i.e. new value |
//| of X[i] depends on BOTH previous point and X[i] itself. |
//+------------------------------------------------------------------+
void CAlglib::FilterSMA(CRowDouble &x,int n,int k)
{
CFilters::FilterSMA(x,n,k);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::FilterSMA(CRowDouble &x,int k)
{
int n=CAp::Len(x);
CFilters::FilterSMA(x,n,k);
}
//+------------------------------------------------------------------+
//| Filters: exponential moving averages. |
//| This filter replaces array by results of EMA(alpha) filter. |
//| EMA(alpha) is defined as filter which replaces X[] by S[]: |
//| S[0] = X[0] |
//| S[t] = alpha * X[t] + (1 - alpha) * S[t - 1] |
//| INPUT PARAMETERS: |
//| X - array[N], array to process. It can be larger |
//| than N, in this case only first N points are |
//| processed. |
//| N - points count, N >= 0 |
//| alpha - 0 < alpha <= 1, smoothing parameter. |
//| OUTPUT PARAMETERS: |
//| X - array, whose first N elements were processed |
//| with EMA(alpha) |
//| NOTE 1: this function uses efficient in-place algorithm which |
//| does not allocate temporary arrays. |
//| NOTE 2: this algorithm uses BOTH previous points and current one,|
//| i.e. new value of X[i] depends on BOTH previous point and|
//| X[i] itself. |
//| NOTE 3: technical analytis users quite often work with EMA |
//| coefficient expressed in DAYS instead of fractions. If |
//| you want to calculate EMA(N), where N is a number of |
//| days, you can use alpha = 2 / (N + 1). |
//+------------------------------------------------------------------+
void CAlglib::FilterEMA(CRowDouble &x,int n,double alpha)
{
CFilters::FilterEMA(x,n,alpha);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::FilterEMA(CRowDouble &x,double alpha)
{
int n=CAp::Len(x);
CFilters::FilterEMA(x,n,alpha);
}
//+------------------------------------------------------------------+
//| Filters: linear regression moving averages. |
//| This filter replaces array by results of LRMA(K) filter. |
//| LRMA(K) is defined as filter which, for each data point, builds |
//| linear regression model using K prevous points (point itself is |
//| included in these K points) and calculates value of this linear |
//| model at the point in question. |
//| INPUT PARAMETERS: |
//| X - array[N], array to process. It can be larger |
//| than N, in this case only first N points are |
//| processed. |
//| N - points count, N >= 0 |
//| K - K >= 1(K can be larger than N, such cases will |
//| be correctly handled). Window width. K = 1 |
//| corresponds to identity transformation(nothing |
//| changes). |
//| OUTPUT PARAMETERS: |
//| X - array, whose first N elements were processed |
//| with SMA(K) |
//| NOTE 1: this function uses efficient in-place algorithm which |
//| does not allocate temporary arrays. |
//| NOTE 2: this algorithm makes only one pass through array and |
//| uses running sum to speed-up calculation of the averages.|
//| Additional measures are taken to ensure that running sum |
//| on a long sequence of zero elements will be correctly |
//| reset to zero even in the presence of round - off error. |
//| NOTE 3: this is unsymmetric version of the algorithm, which does |
//| NOT averages points after the current one. Only |
//| X[i], X[i - 1], ... are used when calculating new value |
//| of X[i]. We should also note that this algorithm uses |
//| BOTH previous points and current one, i.e. new value of |
//| X[i] depends on BOTH previous point and X[i] itself. |
//+------------------------------------------------------------------+
void CAlglib::FilterLRMA(CRowDouble &x,int n,int k)
{
CFilters::FilterLRMA(x,n,k);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::FilterLRMA(CRowDouble &x,int k)
{
int n=CAp::Len(x);
//--- function call
CFilters::FilterLRMA(x,n,k);
}
//+------------------------------------------------------------------+
//| This function creates SSA model object. Right after creation |
//| model is in "dummy" mode - you can add data, but analyzing / |
//| prediction will return just zeros (it assumes that basis is |
//| empty). |
//| HOW TO USE SSA MODEL: |
//| 1. create model with SSACreate() |
//| 2. add data with one/many SSAAddSequence() calls |
//| 3. choose SSA algorithm with one of SSASetAlgo...() functions: |
//| * SSASetAlgoTopKDirect() for direct one-run analysis |
//| * SSASetAlgoTopKRealtime() for algorithm optimized for many |
//| subsequent runs with warm-start capabilities |
//| * SSASetAlgoPrecomputed() for user-supplied basis |
//| 4. set window width with SSASetWindow() |
//| 5. perform one of the analysis-related activities: |
//| a) call SSAGetBasis() to get basis |
//| b) call SSAAnalyzeLast() SSAAnalyzeSequence() or |
//| SSAAnalyzeLastWindow() to perform analysis (trend/noise |
//| separation) |
//| c) call one of the forecasting functions (SSAForecastLast() |
//| or SSAForecastSequence()) to perform prediction; |
//| alternatively, you can extract linear recurrence |
//| coefficients with SSAGetLRR(). |
//| SSA analysis will be performed during first call to analysis - |
//| related function. SSA model is smart enough to track all changes |
//| in the dataset and model settings, to cache previously computed |
//| basis and to re-evaluate basis only when necessary. |
//| Additionally, if your setting involves constant stream of |
//| incoming data, you can perform quick update already calculated |
//| model with one of the incremental append-and-update functions: |
//| SSAAppendPointAndUpdate() or SSAAppendSequenceAndUpdate(). |
//| NOTE: steps (2), (3), (4) can be performed in arbitrary order. |
//| INPUT PARAMETERS: |
//| none |
//| OUTPUT PARAMETERS: |
//| S - structure which stores model state |
//+------------------------------------------------------------------+
void CAlglib::SSACreate(CSSAModel &s)
{
CSSA::SSACreate(s);
}
//+------------------------------------------------------------------+
//| This function sets window width for SSA model. You should call it|
//| before analysis phase. Default window width is 1 (not for real |
//| use). |
//| Special notes: |
//| * this function call can be performed at any moment before |
//| first call to analysis-related functions |
//| * changing window width invalidates internally stored basis; |
//| if you change window width AFTER you call analysis-related |
//| function, next analysis phase will require re-calculation of |
//| the basis according to current algorithm. |
//| * calling this function with exactly same window width as |
//| current one has no effect |
//| * if you specify window width larger than any data sequence |
//| stored in the model, analysis will return zero basis. |
//| INPUT PARAMETERS: |
//| S - SSA model created with SSACreate() |
//| WindowWidth - >=1, new window width |
//| OUTPUT PARAMETERS: |
//| S - SSA model, updated |
//+------------------------------------------------------------------+
void CAlglib::SSASetWindow(CSSAModel &s,int windowwidth)
{
CSSA::SSASetWindow(s,windowwidth);
}
//+------------------------------------------------------------------+
//| This function sets seed which is used to initialize internal RNG |
//| when we make pseudorandom decisions on model updates. |
//| By default, deterministic seed is used - which results in same |
//| sequence of pseudorandom decisions every time you run SSA model. |
//| If you specify non-deterministic seed value, then SSA model may |
//| return slightly different results after each run. |
//| This function can be useful when you have several SSA models |
//| updated with SSAAppendPointAndUpdate() called with 0<UpdateIts<1 |
//| (fractional value) and due to performance limitations want them |
//| to perform updates at different moments. |
//| INPUT PARAMETERS: |
//| S - SSA model |
//| Seed - seed: |
//| * positive values = use deterministic seed for |
//| each run of algorithms which depend on random |
//| initialization |
//| * zero or negative values=use non-deterministic |
//| seed |
//+------------------------------------------------------------------+
void CAlglib::SSASetSeed(CSSAModel &s,int seed)
{
CSSA::SSASetSeed(s,seed);
}
//+------------------------------------------------------------------+
//| This function sets length of power-up cycle for real-time |
//| algorithm. |
//| By default, this algorithm performs costly O(N*WindowWidth^2) |
//| init phase followed by full run of truncated EVD. However, if you|
//| are ready to live with a bit lower-quality basis during first few|
//| iterations, you can split this O(N*WindowWidth^2) initialization |
//| between several subsequent append-and-update rounds. It results |
//| in better latency of the algorithm. |
//| This function invalidates basis/solver, next analysis call will |
//| result in full recalculation of everything. |
//| INPUT PARAMETERS: |
//| S - SSA model |
//| PWLen - length of the power-up stage: |
//| * 0 means that no power-up is requested |
//| * 1 is the same as 0 |
//| * >1 means that delayed power-up is performed |
//+------------------------------------------------------------------+
void CAlglib::SSASetPowerUpLength(CSSAModel &s,int pwlen)
{
CSSA::SSASetPowerUpLength(s,pwlen);
}
//+------------------------------------------------------------------+
//| This function sets memory limit of SSA analysis. |
//| Straightforward SSA with sequence length T and window width W |
//| needs O(T*W) memory. It is possible to reduce memory consumption |
//| by splitting task into smaller chunks. |
//| Thus function allows you to specify approximate memory limit |
//| (measured in double precision numbers used for buffers). Actual |
//| memory consumption will be comparable to the number specified by |
//| you. |
//| Default memory limit is 50.000.000 (400Mbytes) in current |
//| version. |
//| INPUT PARAMETERS: |
//| S - SSA model |
//| MemLimit - memory limit, >=0. Zero value means no limit. |
//+------------------------------------------------------------------+
void CAlglib::SSASetMemoryLimit(CSSAModel &s,int memlimit)
{
CSSA::SSASetMemoryLimit(s,memlimit);
}
//+------------------------------------------------------------------+
//| This function adds data sequence to SSA model. Only single- |
//| dimensional sequences are supported. |
//| What is a sequences? Following definitions/requirements apply: |
//| * a sequence is an array of values measured in subsequent, |
//| equally separated time moments (ticks). |
//| * you may have many sequences in your dataset; say, one |
//| sequence may correspond to one trading session. |
//| * sequence length should be larger than current window length |
//| (shorter sequences will be ignored during analysis). |
//| * analysis is performed within a sequence; different sequences |
//| are NOT stacked together to produce one large contiguous |
//| stream of data. |
//| * analysis is performed for all sequences at once, i.e. same |
//| set of basis vectors is computed for all sequences |
//| INCREMENTAL ANALYSIS |
//| This function is non intended for incremental updates of |
//| previously found SSA basis. Calling it invalidates all previous |
//| analysis results (basis is reset and will be recalculated from |
//| zero during next analysis). |
//| If you want to perform incremental/real-time SSA, consider using |
//| following functions: |
//| * SSAAppendPointAndUpdate() for appending one point |
//| * SSAAppendSequenceAndUpdate() for appending new sequence |
//| INPUT PARAMETERS: |
//| S - SSA model created with SSACreate() |
//| X - array[N], data, can be larger (additional values|
//| are ignored) |
//| N - data length, can be automatically determined |
//| from the array length. N>=0. |
//| OUTPUT PARAMETERS: |
//| S - SSA model, updated |
//| NOTE: you can clear dataset with SSAClearData() |
//+------------------------------------------------------------------+
void CAlglib::SSAAddSequence(CSSAModel &s,CRowDouble &x,int n)
{
CSSA::SSAAddSequence(s,x,n);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::SSAAddSequence(CSSAModel &s,CRowDouble &x)
{
int n=CAp::Len(x);
//--- function call
CSSA::SSAAddSequence(s,x,n);
}
//+------------------------------------------------------------------+
//| This function appends single point to last data sequence stored |
//| in the SSA model and tries to update model in the incremental |
//| manner (if possible with current algorithm). |
//| If you want to add more than one point at once: |
//| * if you want to add M points to the same sequence, perform |
//| M-1 calls with UpdateIts parameter set to 0.0, and last call |
//| with non-zero UpdateIts. |
//| * if you want to add new sequence, use |
//| SSAAppendSequenceAndUpdate() |
//| Running time of this function does NOT depend on dataset size, |
//| only on window width and number of singular vectors. Depending |
//| on algorithm being used, incremental update has complexity: |
//| * for top-K real time - O(UpdateIts*K*Width^2), with |
//| fractional UpdateIts |
//| * for top-K direct - O(Width^3) for any non-zero |
//| UpdateIts |
//| * for precomputed basis - O(1), no update is performed |
//| INPUT PARAMETERS: |
//| S - SSA model created with SSACreate() |
//| X - new point |
//| UpdateIts - >=0, floating point(!) value, desired update |
//| frequency: |
//| * zero value means that point is stored, but no update|
//| is performed |
//| * integer part of the value means that specified |
//| number of iterations is always performed |
//| * fractional part of the value means that one |
//| iteration is performed with this probability. |
//| Recommended value: 0<UpdateIts<=1. Values larger than 1 are VERY |
//| seldom needed. If your dataset changes slowly, you can set it to |
//| 0.1 and skip 90% of updates. |
//| In any case, no information is lost even with zero value of |
//| UpdateIts! It will be incorporated into model, sooner or later. |
//| OUTPUT PARAMETERS: |
//| S - SSA model, updated |
//| NOTE: this function uses internal RNG to handle fractional values|
//| of UpdateIts. By default it is initialized with fixed seed |
//| during initial calculation of basis. Thus subsequent calls |
//| to this function will result in the same sequence of |
//| pseudorandom decisions. |
//| However, if you have several SSA models which are calculated |
//| simultaneously, and if you want to reduce computational |
//| bottlenecks by performing random updates at random moments, then |
//| fixed seed is not an option - all updates will fire at same |
//| moments. |
//| You may change it with SSASetSeed() function. |
//| NOTE: this function throws an exception if called for empty |
//| dataset (there is no "last" sequence to modify). |
//+------------------------------------------------------------------+
void CAlglib::SSAAppendPointAndUpdate(CSSAModel &s,double x,double updateits)
{
CSSA::SSAAppendPointAndUpdate(s,x,updateits);
}
//+------------------------------------------------------------------+
//| This function appends new sequence to dataset stored in the SSA |
//| model and tries to update model in the incremental manner (if |
//| possible with current algorithm). |
//| Notes: |
//| * if you want to add M sequences at once, perform M-1 calls |
//| with UpdateIts parameter set to 0.0, and last call with |
//| non-zero UpdateIts. |
//| * if you want to add just one point, use |
//| SSAAppendPointAndUpdate() |
//| Running time of this function does NOT depend on dataset size, |
//| only on sequence length, window width and number of singular |
//| vectors. Depending on algorithm being used, incremental update |
//| has complexity: |
//| * for top-K real time - O(UpdateIts*K*Width^2+ |
//| (NTicks-Width)*Width^2) |
//| * for top-K direct - O(Width^3+(NTicks-Width)*Width^2) |
//| * for precomputed basis - O(1), no update is performed |
//| INPUT PARAMETERS: |
//| S - SSA model created with SSACreate() |
//| X - new sequence, array[NTicks] or larget |
//| NTicks - >=1, number of ticks in the sequence |
//| UpdateIts - >=0, floating point(!) value, desired update |
//| frequency: |
//| * zero value means that point is stored, but no |
//| update is performed |
//| * integer part of the value means that specified |
//| number of iterations is always performed |
//| * fractional part of the value means that one |
//| iteration is performed with this probability. |
//| Recommended value: 0<UpdateIts<=1. Values larger than 1 are VERY |
//| seldom needed. If your dataset changes slowly, you can set it to |
//| 0.1 and skip 90% of updates. |
//| In any case, no information is lost even with zero value of |
//| UpdateIts! It will be incorporated into model, sooner or later. |
//| OUTPUT PARAMETERS: |
//| S - SSA model, updated |
//| NOTE: this function uses internal RNG to handle fractional values|
//| of UpdateIts. By default it is initialized with fixed seed |
//| during initial calculation of basis. Thus subsequent calls |
//| to this function will result in the same sequence of |
//| pseudorandom decisions. |
//| However, if you have several SSA models which are calculated |
//| simultaneously, and if you want to reduce computational |
//| bottlenecks by performing random updates at random moments, then |
//| fixed seed is not an option - all updates will fire at same |
//| moments. |
//| You may change it with SSASetSeed() function. |
//+------------------------------------------------------------------+
void CAlglib::SSAAppendSequenceAndUpdate(CSSAModel &s,CRowDouble &x,
int nticks,double updateits)
{
CSSA::SSAAppendSequenceAndUpdate(s,x,nticks,updateits);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::SSAAppendSequenceAndUpdate(CSSAModel &s,CRowDouble &x,
double updateits)
{
int nticks=CAp::Len(x);
//--- function call
CSSA::SSAAppendSequenceAndUpdate(s,x,nticks,updateits);
}
//+------------------------------------------------------------------+
//| This function sets SSA algorithm to "precomputed vectors" |
//| algorithm. |
//| This algorithm uses precomputed set of orthonormal(orthogonal |
//| AND normalized) basis vectors supplied by user. Thus, basis |
//| calculation phase is not performed - we already have our basis - |
//| and only analysis/forecasting phase requires actual calculations.|
//| This algorithm may handle "append" requests which add just one/ |
//| few ticks to the end of the last sequence in O(1) time. |
//| NOTE: this algorithm accepts both basis and window width, because|
//| these two parameters are naturally aligned. Calling this |
//| function sets window width; if you call SSASetWindow() |
//| with other window width, then during analysis stage |
//| algorithm will detect conflict and reset to zero basis. |
//| INPUT PARAMETERS: |
//| S - SSA model |
//| A - array[WindowWidth, NBasis], orthonormalized |
//| basis; this function does NOT control |
//| orthogonality and does NOT perform any kind of |
//| renormalization. It is your responsibility to |
//| provide it with correct basis. |
//| WindowWidth - window width, >= 1 |
//| NBasis - number of basis vectors, |
//| 1 <= NBasis <= WindowWidth |
//| OUTPUT PARAMETERS: |
//| S - updated model |
//| NOTE: calling this function invalidates basis in all cases. |
//+------------------------------------------------------------------+
void CAlglib::SSASetAlgoPrecomputed(CSSAModel &s,CMatrixDouble &a,int windowwidth,int nbasis)
{
CSSA::SSASetAlgoPrecomputed(s,a,windowwidth,nbasis);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::SSASetAlgoPrecomputed(CSSAModel &s,CMatrixDouble &a)
{
int windowwidth=CAp::Rows(a);
int nbasis=CAp::Cols(a);
//--- function call
CSSA::SSASetAlgoPrecomputed(s,a,windowwidth,nbasis);
}
//+------------------------------------------------------------------+
//| This function sets SSA algorithm to "direct top-K" algorithm. |
//| "Direct top-K" algorithm performs full SVD of the N*WINDOW |
//| trajectory matrix (hence its name - direct solver is used), then |
//| extracts top K components. Overall running time is |
//| O(N * WINDOW ^ 2), where N is a number of ticks in the dataset, |
//| WINDOW is window width. |
//| This algorithm may handle "append" requests which add just one / |
//| few ticks to the end of the last sequence in O(WINDOW ^ 3) time, |
//| which is ~N/WINDOW times faster than re-computing everything from|
//| scratch. |
//| INPUT PARAMETERS: |
//| S - SSA model |
//| TopK - number of components to analyze; TopK >= 1. |
//| OUTPUT PARAMETERS: |
//| S - updated model |
//| NOTE: TopK>WindowWidth is silently decreased to WindowWidth |
//| during analysis phase |
//| NOTE: calling this function invalidates basis, except for the |
//| situation when this algorithm was already set with same |
//| parameters. |
//+------------------------------------------------------------------+
void CAlglib::SSASetAlgoTopKDirect(CSSAModel &s,int topk)
{
CSSA::SSASetAlgoTopKDirect(s,topk);
}
//+------------------------------------------------------------------+
//| This function sets SSA algorithm to "top-K real time algorithm". |
//| This algo extracts K components with largest singular values. |
//| It is real-time version of top-K algorithm which is optimized for|
//| incremental processing and fast start-up. Internally it uses |
//| subspace eigensolver for truncated SVD. It results in ability to |
//| perform quick updates of the basis when only a few points / |
//| sequences is added to dataset. |
//| Performance profile of the algorithm is given below: |
//| * O(K * WindowWidth ^ 2) running time for incremental update |
//| of the dataset with one of the "append-and-update" functions |
//| (SSAAppendPointAndUpdate() or SSAAppendSequenceAndUpdate()). |
//| * O(N * WindowWidth ^ 2) running time for initial basis |
//| evaluation(N = size of dataset) |
//| * ability to split costly initialization across several |
//| incremental updates of the basis(so called "Power-Up" |
//| functionality, activated by SSASetPowerUpLength() function) |
//| INPUT PARAMETERS: |
//| S - SSA model |
//| TopK - number of components to analyze; TopK >= 1. |
//| OUTPUT PARAMETERS: |
//| S - updated model |
//| NOTE: this algorithm is optimized for large-scale tasks with |
//| large datasets. On toy problems with just 5-10 points it |
//| can return basis which is slightly different from that |
//| returned by direct algorithm (SSASetAlgoTopKDirect() |
//| function). However, the difference becomes negligible as |
//| dataset grows. |
//| NOTE: TopK > WindowWidth is silently decreased to WindowWidth |
//| during analysis phase |
//| NOTE: calling this function invalidates basis, except for the |
//| situation when this algorithm was already set with same |
//| parameters. |
//+------------------------------------------------------------------+
void CAlglib::SSASetAlgoTopKRealtime(CSSAModel &s,int topk)
{
CSSA::SSASetAlgoTopKRealtime(s,topk);
}
//+------------------------------------------------------------------+
//| This function clears all data stored in the model and invalidates|
//| all basis components found so far. |
//| INPUT PARAMETERS: |
//| S - SSA model created with SSACreate() |
//| OUTPUT PARAMETERS: |
//| S - SSA model, updated |
//+------------------------------------------------------------------+
void CAlglib::SSAClearData(CSSAModel &s)
{
CSSA::SSAClearData(s);
}
//+------------------------------------------------------------------+
//| This function executes SSA on internally stored dataset and |
//| returns basis found by current method. |
//| INPUT PARAMETERS: |
//| S - SSA model |
//| OUTPUT PARAMETERS: |
//| A - array[WindowWidth, NBasis], basis; vectors are |
//| stored in matrix columns, by descreasing variance |
//| SV - array[NBasis]: |
//| * zeros - for model initialized with |
//| SSASetAlgoPrecomputed() |
//| * singular values - for other algorithms |
//| WindowWidth - current window |
//| NBasis - basis size |
//| CACHING / REUSE OF THE BASIS |
//| Caching / reuse of previous results is performed: |
//| * first call performs full run of SSA; basis is stored in the |
//| cache |
//| * subsequent calls reuse previously cached basis |
//| * if you call any function which changes model properties |
//| (window length, algorithm, dataset), internal basis will be |
//| invalidated. |
//| * the only calls which do NOT invalidate basis are listed |
//| below: |
//| a) SSASetWindow() with same window length |
//| b) SSAAppendPointAndUpdate() |
//| c) SSAAppendSequenceAndUpdate() |
//| d) SSASetAlgoTopK...() with exactly same K Calling these |
//| functions will result in reuse of previously found basis.|
//| HANDLING OF DEGENERATE CASES |
//| Calling this function in degenerate cases(no data or all data are|
//| shorter than window size; no algorithm is specified) returns |
//| basis with just one zero vector. |
//+------------------------------------------------------------------+
void CAlglib::SSAGetBasis(CSSAModel &s,CMatrixDouble &a,CRowDouble &sv,int &windowwidth,int &nbasis)
{
a.Resize(0,0);
sv.Resize(0);
windowwidth=0;
nbasis=0;
//--- function call
CSSA::SSAGetBasis(s,a,sv,windowwidth,nbasis);
}
//+------------------------------------------------------------------+
//| This function returns linear recurrence relation(LRR) |
//| coefficients found by current SSA algorithm. |
//| INPUT PARAMETERS: |
//| S - SSA model |
//| OUTPUT PARAMETERS: |
//| A - array[WindowWidth - 1]. Coefficients of the linear |
//| recurrence of the form: |
//| X[W - 1] = X[W - 2] * A[W - 2] + |
//| X[W - 3] * A[W - 3] + ... + X[0] * A[0].|
//| Empty array for WindowWidth = 1. |
//| WindowWidth - current window width |
//| CACHING / REUSE OF THE BASIS |
//| Caching / reuse of previous results is performed: |
//| * first call performs full run of SSA; basis is stored in the |
//| cache |
//| * subsequent calls reuse previously cached basis |
//| * if you call any function which changes model properties |
//| (window length, algorithm, dataset), internal basis will be |
//| invalidated. |
//| * the only calls which do NOT invalidate basis are listed |
//| below: |
//| a) SSASetWindow() with same window length |
//| b) SSAAppendPointAndUpdate() |
//| c) SSAAppendSequenceAndUpdate() |
//| d) SSASetAlgoTopK...() with exactly same K |
//| Calling these functions will result in reuse of previously found |
//| basis. |
//| HANDLING OF DEGENERATE CASES |
//| Calling this function in degenerate cases (no data or all data |
//| are shorter than window size; no algorithm is specified) returns |
//| zeros. |
//+------------------------------------------------------------------+
void CAlglib::SSAGetLRR(CSSAModel &s,CRowDouble &a,int &windowwidth)
{
a.Resize(0);
windowwidth=0;
//--- function call
CSSA::SSAGetLRR(s,a,windowwidth);
}
//+------------------------------------------------------------------+
//| This function executes SSA on internally stored dataset and |
//| returns analysis for the last window of the last sequence. Such |
//| analysis is an lightweight alternative for full scale |
//| reconstruction (see below). |
//| Typical use case for this function is real-time setting, when |
//| you are interested in quick-and-dirty (very quick and very dirty)|
//| processing of just a few last ticks of the trend. |
//| IMPORTANT: full scale SSA involves analysis of the ENTIRE |
//| dataset, with reconstruction being done for all |
//| positions of sliding window with subsequent |
//| hankelization (diagonal averaging) of the resulting |
//| matrix. |
//| Such analysis requires O((DataLen - Window)*Window*NBasis) FLOPs |
//| and can be quite costly. However, it has nice noise - canceling |
//| effects due to averaging. |
//| This function performs REDUCED analysis of the last window. It |
//| is much faster - just O(Window*NBasis), but its results are |
//| DIFFERENT from that of SSAAnalyzeLast(). In particular, first few|
//| points of the trend are much more prone to noise. |
//| INPUT PARAMETERS: |
//| S - SSA model |
//| OUTPUT PARAMETERS: |
//| Trend - array[WindowSize], reconstructed trend line |
//| Noise - array[WindowSize], the rest of the signal; it |
//| holds that ActualData = Trend + Noise. |
//| NTicks - current WindowSize |
//| CACHING / REUSE OF THE BASIS |
//| Caching / reuse of previous results is performed: |
//| * first call performs full run of SSA; basis is stored in the |
//| cache |
//| * subsequent calls reuse previously cached basis |
//| * if you call any function which changes model properties |
//| (window length, algorithm, dataset), internal basis will |
//| be invalidated. |
//| * the only calls which do NOT invalidate basis are listed |
//| below: |
//| a) SSASetWindow() with same window length |
//| b) SSAAppendPointAndUpdate() |
//| c) SSAAppendSequenceAndUpdate() |
//| d) SSASetAlgoTopK...() with exactly same K |
//| Calling these functions will result in reuse of previously found |
//| basis. |
//| In any case, only basis is reused. Reconstruction is performed |
//| from scratch every time you call this function. |
//| HANDLING OF DEGENERATE CASES |
//| Following degenerate cases may happen: |
//| * dataset is empty(no analysis can be done) |
//| * all sequences are shorter than the window length, no analysis|
//| can be done |
//| * no algorithm is specified(no analysis can be done) |
//| * last sequence is shorter than the window length (analysis |
//| can be done, but we can not perform reconstruction on the |
//| last sequence) |
//| Calling this function in degenerate cases returns following |
//| result: |
//| * in any case, WindowWidth ticks is returned |
//| * trend is assumed to be zero |
//| * noise is initialized by the last sequence; if last sequence |
//| is shorter than the window size, it is moved to the end of |
//| the array, and the beginning of the noise array is filled by |
//| zeros |
//| No analysis is performed in degenerate cases (we immediately |
//| return dummy values, no basis is constructed). |
//+------------------------------------------------------------------+
void CAlglib::SSAAnalyzeLastWindow(CSSAModel &s,CRowDouble &trend,CRowDouble &noise,int &nticks)
{
trend.Resize(0);
noise.Resize(0);
nticks=0;
//--- function call
CSSA::SSAAnalyzeLastWindow(s,trend,noise,nticks);
}
//+------------------------------------------------------------------+
//| This function: |
//| * builds SSA basis using internally stored(entire) dataset |
//| * returns reconstruction for the last NTicks of the last |
//| sequence |
//| If you want to analyze some other sequence, use |
//| SSAAnalyzeSequence(). |
//| Reconstruction phase involves generation of NTicks-WindowWidth |
//| sliding windows, their decomposition using empirical orthogonal |
//| functions found by SSA, followed by averaging of each data point |
//| across several overlapping windows. Thus, every point in the |
//| output trend is reconstructed using up to WindowWidth overlapping|
//| windows(WindowWidth windows exactly in the inner points, just one|
//| window at the extremal points). |
//| IMPORTANT: due to averaging this function returns different |
//| results for different values of NTicks. It is expected|
//| and not a bug. |
//| For example: |
//| * Trend[NTicks - 1] is always same because it is not averaged |
//| in any case(same applies to Trend[0]). |
//| * Trend[NTicks - 2] has different values fo NTicks=WindowWidth |
//| and NTicks=WindowWidth+1 because former case means that no |
//| averaging is performed, and latter case means that averaging |
//| using two sliding windows is performed. Larger values of |
//| NTicks produce same results as NTicks = WindowWidth + 1. |
//| * ...and so on... |
//| PERFORMANCE: this function has |
//| O((NTicks - WindowWidth) * WindowWidth*NBasis) |
//| running time. If you work in time-constrained setting and have |
//| to analyze just a few last ticks, choosing NTicks equal to |
//| WindowWidth + SmoothingLen, with SmoothingLen = 1...WindowWidth |
//| will result in good compromise between noise cancellation and |
//| analysis speed. |
//| INPUT PARAMETERS: |
//| S - SSA model |
//| NTicks - number of ticks to analyze, Nticks >= 1. |
//| * special case of NTicks<=WindowWidth is handled |
//| by analyzing last window and returning NTicks |
//| last ticks. |
//| * special case NTicks>LastSequenceLen is handled by |
//| prepending result with NTicks-LastSequenceLen zeros.|
//| OUTPUT PARAMETERS: |
//| Trend - array[NTicks], reconstructed trend line |
//| Noise - array[NTicks], the rest of the signal; it holds |
//| that ActualData = Trend + Noise. |
//| CACHING / REUSE OF THE BASIS |
//| Caching / reuse of previous results is performed: |
//| * first call performs full run of SSA; basis is stored in |
//| the cache |
//| * subsequent calls reuse previously cached basis |
//| * if you call any function which changes model properties |
//| (window length, algorithm, dataset), internal basis will |
//| be invalidated. |
//| * the only calls which do NOT invalidate basis are listed |
//| below: |
//| a) SSASetWindow() with same window length |
//| b) SSAAppendPointAndUpdate() |
//| c) SSAAppendSequenceAndUpdate() |
//| d) SSASetAlgoTopK...() with exactly same K |
//| Calling these functions will result in reuse of previously found |
//| basis. |
//| In any case, only basis is reused. Reconstruction is performed |
//| from scratch every time you call this function. |
//| HANDLING OF DEGENERATE CASES |
//| Following degenerate cases may happen: |
//| * dataset is empty(no analysis can be done) |
//| * all sequences are shorter than the window length, no |
//| analysis can be done |
//| * no algorithm is specified(no analysis can be done) |
//| * last sequence is shorter than the window length(analysis |
//| can be done, but we can not perform reconstruction on the |
//| last sequence) |
//| Calling this function in degenerate cases returns following |
//| result: |
//| * in any case, NTicks ticks is returned |
//| * trend is assumed to be zero |
//| * noise is initialized by the last sequence; if last |
//| sequence is shorter than the window size, it is moved |
//| to the end of the array, and the beginning of the noise |
//| array is filled by zeros |
//| No analysis is performed in degenerate cases(we immediately |
//| return dummy values, no basis is constructed). |
//+------------------------------------------------------------------+
void CAlglib::SSAAnalyzeLast(CSSAModel &s,int nticks,CRowDouble &trend,CRowDouble &noise)
{
trend.Resize(0);
noise.Resize(0);
//--- function call
CSSA::SSAAnalyzeLast(s,nticks,trend,noise);
}
//+------------------------------------------------------------------+
//| This function: |
//| * builds SSA basis using internally stored(entire) dataset |
//| * returns reconstruction for the sequence being passed to |
//| this function |
//| If you want to analyze last sequence stored in the model, use |
//| SSAAnalyzeLast(). |
//| Reconstruction phase involves generation of NTicks-WindowWidth |
//| sliding windows, their decomposition using empirical orthogonal |
//| functions found by SSA, followed by averaging of each data point |
//| across several overlapping windows. Thus, every point in the |
//| output trend is reconstructed using up to WindowWidth overlapping|
//| windows(WindowWidth windows exactly in the inner points, just one|
//| window at the extremal points). |
//| PERFORMANCE: this function has |
//| O((NTicks - WindowWidth)*WindowWidth*NBasis) |
//| running time. If you work in time-constrained setting and have |
//| to analyze just a few last ticks, choosing NTicks equal to |
//| WindowWidth + SmoothingLen, with SmoothingLen = 1...WindowWidth |
//| will result in good compromise between noise cancellation and |
//| analysis speed. |
//| INPUT PARAMETERS: |
//| S - SSA model |
//| Data - array[NTicks], can be larger(only NTicks leading|
//| elements will be used) |
//| NTicks - number of ticks to analyze, Nticks >= 1. |
//| * special case of NTicks<WindowWidth is handled by |
//| returning zeros as trend, and signal as noise |
//| OUTPUT PARAMETERS: |
//| Trend - array[NTicks], reconstructed trend line |
//| Noise - array[NTicks], the rest of the signal; it holds |
//| that ActualData = Trend + Noise. |
//| CACHING / REUSE OF THE BASIS |
//| Caching / reuse of previous results is performed: |
//| * first call performs full run of SSA; basis is stored in |
//| the cache |
//| * subsequent calls reuse previously cached basis |
//| * if you call any function which changes model properties |
//| (window length, algorithm, dataset), internal basis will |
//| be invalidated. |
//| * the only calls which do NOT invalidate basis are listed |
//| below: |
//| a) SSASetWindow() with same window length |
//| b) SSAAppendPointAndUpdate() |
//| c) SSAAppendSequenceAndUpdate() |
//| d) SSASetAlgoTopK...() with exactly same K |
//| Calling these functions will result in reuse of previously found |
//| basis. |
//| In any case, only basis is reused. Reconstruction is performed |
//| from scratch every time you call this function. |
//| HANDLING OF DEGENERATE CASES |
//| Following degenerate cases may happen: |
//| * dataset is empty(no analysis can be done) |
//| * all sequences are shorter than the window length, no |
//| analysis can be done |
//| * no algorithm is specified(no analysis can be done) |
//| * sequence being passed is shorter than the window length |
//| Calling this function in degenerate cases returns following |
//| result: |
//| * in any case, NTicks ticks is returned |
//| * trend is assumed to be zero |
//| * noise is initialized by the sequence. |
//| No analysis is performed in degenerate cases(we immediately |
//| return dummy values, no basis is constructed). |
//+------------------------------------------------------------------+
void CAlglib::SSAAnalyzeSequence(CSSAModel &s,CRowDouble &data,int nticks,CRowDouble &trend,CRowDouble &noise)
{
trend.Resize(0);
noise.Resize(0);
//--- function call
CSSA::SSAAnalyzeSequence(s,data,nticks,trend,noise);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::SSAAnalyzeSequence(CSSAModel &s,CRowDouble &data,CRowDouble &trend,CRowDouble &noise)
{
trend.Resize(0);
noise.Resize(0);
int nticks=CAp::Len(data);
//--- function call
CSSA::SSAAnalyzeSequence(s,data,nticks,trend,noise);
}
//+------------------------------------------------------------------+
//| This function builds SSA basis and performs forecasting for a |
//| specified number of ticks, returning value of trend. |
//| Forecast is performed as follows: |
//| * SSA trend extraction is applied to last WindowWidth |
//| elements of the internally stored dataset; this step is |
//| basically a noise reduction. |
//| * linear recurrence relation is applied to extracted trend |
//| This function has following running time: |
//| * O(NBasis*WindowWidth) for trend extraction phase (always |
//| performed) |
//| * O(WindowWidth*NTicks) for forecast phase |
//| NOTE: noise reduction is ALWAYS applied by this algorithm; if you|
//| want to apply recurrence relation to raw unprocessed data, |
//| use another function - SSAForecastSequence() which allows |
//| to turn on and off noise reduction phase. |
//| NOTE: this algorithm performs prediction using only one-last - |
//| sliding window. Predictions produced by such approach are |
//| smooth continuations of the reconstructed trend line, but |
//| they can be easily corrupted by noise. If you need noise - |
//| resistant prediction, use SSAForecastAvgLast() function, |
//| which averages predictions built using several sliding |
//| windows. |
//| INPUT PARAMETERS: |
//| S - SSA model |
//| NTicks - number of ticks to forecast, NTicks >= 1 |
//| OUTPUT PARAMETERS: |
//| Trend - array[NTicks], predicted trend line |
//| CACHING / REUSE OF THE BASIS |
//| Caching / reuse of previous results is performed: |
//| * first call performs full run of SSA; basis is stored in |
//| the cache |
//| * subsequent calls reuse previously cached basis |
//| * if you call any function which changes model properties |
//| (window length, algorithm, dataset), internal basis will |
//| be invalidated. |
//| * the only calls which do NOT invalidate basis are listed |
//| below: |
//| a) SSASetWindow() with same window length |
//| b) SSAAppendPointAndUpdate() |
//| c) SSAAppendSequenceAndUpdate() |
//| d) SSASetAlgoTopK...() with exactly same K |
//| Calling these functions will result in reuse of previously found |
//| basis. |
//| HANDLING OF DEGENERATE CASES |
//| Following degenerate cases may happen: |
//| * dataset is empty(no analysis can be done) |
//| * all sequences are shorter than the window length, no |
//| analysis can be done |
//| * no algorithm is specified(no analysis can be done) |
//| * last sequence is shorter than the WindowWidth(analysis can|
//| be done, but we can not perform forecasting on the last |
//| sequence) |
//| * window lentgh is 1(impossible to use for forecasting) |
//| * SSA analysis algorithm is configured to extract basis |
//| whose size is equal to window length(impossible to use for|
//| forecasting; only basis whose size is less than window |
//| length can be used). |
//| Calling this function in degenerate cases returns following |
//| result: |
//| * NTicks copies of the last value is returned for non-empty |
//| task with large enough dataset, but with overcomplete |
//| basis (window width = 1 or basis size is equal to window |
//| width) |
//| * zero trend with length = NTicks is returned for empty task|
//| No analysis is performed in degenerate cases (we immediately |
//| return dummy values, no basis is ever constructed). |
//+------------------------------------------------------------------+
void CAlglib::SSAForecastLast(CSSAModel &s,int nticks,CRowDouble &trend)
{
trend.Resize(0);
//--- function call
CSSA::SSAForecastLast(s,nticks,trend);
}
//+------------------------------------------------------------------+
//| This function builds SSA basis and performs forecasting for a |
//| user - specified sequence, returning value of trend. |
//| Forecasting is done in two stages: |
//| * first, we extract trend from the WindowWidth last |
//| elements of the sequence. This stage is optional, you can |
//| turn it off if you pass data which are already processed |
//| with SSA. Of course, you can turn it off even for raw |
//| data, but it is not recommended - noise suppression is |
//| very important for correct prediction. |
//| * then, we apply LRR for last WindowWidth - 1 elements of |
//| the extracted trend. |
//| This function has following running time: |
//| * O(NBasis*WindowWidth) for trend extraction phase |
//| * O(WindowWidth*NTicks) for forecast phase |
//| NOTE: this algorithm performs prediction using only one-last - |
//| sliding window. Predictions produced by such approach are |
//| smooth continuations of the reconstructed trend line, but |
//| they can be easily corrupted by noise. If you need noise - |
//| resistant prediction, use SSAForecastAvgSequence() |
//| function, which averages predictions built using several |
//| sliding windows. |
//| INPUT PARAMETERS: |
//| S - SSA model |
//| Data - array[NTicks], data to forecast |
//| DataLen - number of ticks in the data, DataLen >= 1 |
//| ForecastLen - number of ticks to predict, ForecastLen >= 1 |
//| ApplySmoothing - whether to apply smoothing trend extraction or|
//| not; if you do not know what to specify, pass True.|
//| OUTPUT PARAMETERS: |
//| Trend - array[ForecastLen], forecasted trend |
//| CACHING / REUSE OF THE BASIS |
//| Caching / reuse of previous results is performed: |
//| * first call performs full run of SSA; basis is stored in |
//| the cache |
//| * subsequent calls reuse previously cached basis |
//| * if you call any function which changes model properties |
//| (window length, algorithm, dataset), internal basis will |
//| be invalidated. |
//| * the only calls which do NOT invalidate basis are listed |
//| below: |
//| a) SSASetWindow() with same window length |
//| b) SSAAppendPointAndUpdate() |
//| c) SSAAppendSequenceAndUpdate() |
//| d) SSASetAlgoTopK...() with exactly same K |
//| Calling these functions will result in reuse of previously found |
//| basis. |
//| HANDLING OF DEGENERATE CASES |
//| Following degenerate cases may happen: |
//| * dataset is empty(no analysis can be done) |
//| * all sequences are shorter than the window length, no |
//| analysis can be done |
//| * no algorithm is specified(no analysis can be done) |
//| * data sequence is shorter than the WindowWidth(analysis can|
//| be done, but we can not perform forecasting on the last |
//| sequence) |
//| * window lentgh is 1(impossible to use for forecasting) |
//| * SSA analysis algorithm is configured to extract basis |
//| whose size is equal to window length (impossible to use |
//| for forecasting; only basis whose size is less than window|
//| length can be used). |
//| Calling this function in degenerate cases returns following |
//| result: |
//| * ForecastLen copies of the last value is returned for |
//| non-empty task with large enough dataset, but with |
//| overcomplete basis (window width = 1 or basis size is |
//| equal to window width) |
//| * zero trend with length = ForecastLen is returned for empty|
//| task |
//| No analysis is performed in degenerate cases (we immediately |
//| return dummy values, no basis is ever constructed). |
//+------------------------------------------------------------------+
void CAlglib::SSAForecastSequence(CSSAModel &s,CRowDouble &data,
int datalen,int forecastlen,
bool applysmoothing,CRowDouble &trend)
{
trend.Resize(0);
//--- function call
CSSA::SSAForecastSequence(s,data,datalen,forecastlen,applysmoothing,trend);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::SSAForecastSequence(CSSAModel &s,CRowDouble &data,int forecastlen,CRowDouble &trend)
{
trend.Resize(0);
int datalen=CAp::Len(data);
bool applysmoothing=true;
//--- function call
CSSA::SSAForecastSequence(s,data,datalen,forecastlen,applysmoothing,trend);
}
//+------------------------------------------------------------------+
//| This function builds SSA basis and performs forecasting for a |
//| specified number of ticks, returning value of trend. |
//| Forecast is performed as follows: |
//| * SSA trend extraction is applied to last M sliding windows|
//| of the internally stored dataset |
//| * for each of M sliding windows, M predictions are built |
//| * average value of M predictions is returned |
//| This function has following running time: |
//| * O(NBasis*WindowWidth*M) for trend extraction phase |
//| (always performed) |
//| * O(WindowWidth*NTicks*M) for forecast phase |
//| NOTE: noise reduction is ALWAYS applied by this algorithm; if you|
//| want to apply recurrence relation to raw unprocessed data, |
//| use another function - SSAForecastSequence() which allows |
//| to turn on and off noise reduction phase. |
//| NOTE: combination of several predictions results in lesser |
//| sensitivity to noise, but it may produce undesirable |
//| discontinuities between last point of the trend and first |
//| point of the prediction. The reason is that last point of |
//| the trend is usually corrupted by noise, but average value |
//| of several predictions is less sensitive to noise, thus |
//| discontinuity appears. It is not a bug. |
//| INPUT PARAMETERS: |
//| S - SSA model |
//| M - number of sliding windows to combine, M >= 1. If |
//| your dataset has less than M sliding windows, this |
//| parameter will be silently reduced. |
//| NTicks - number of ticks to forecast, NTicks >= 1 |
//| OUTPUT PARAMETERS: |
//| Trend - array[NTicks], predicted trend line |
//| CACHING / REUSE OF THE BASIS |
//| Caching / reuse of previous results is performed: |
//| * first call performs full run of SSA; basis is stored in |
//| the cache |
//| * subsequent calls reuse previously cached basis |
//| * if you call any function which changes model properties |
//| (window length, algorithm, dataset), internal basis will |
//| be invalidated. |
//| * the only calls which do NOT invalidate basis are listed |
//| below: |
//| a) SSASetWindow() with same window length |
//| b) SSAAppendPointAndUpdate() |
//| c) SSAAppendSequenceAndUpdate() |
//| d) SSASetAlgoTopK...() with exactly same K |
//| Calling these functions will result in reuse of previously found |
//| basis. |
//| HANDLING OF DEGENERATE CASES |
//| Following degenerate cases may happen: |
//| * dataset is empty(no analysis can be done) |
//| * all sequences are shorter than the window length, no |
//| analysis can be done |
//| * no algorithm is specified(no analysis can be done) |
//| * last sequence is shorter than the WindowWidth(analysis can|
//| be done, but we can not perform forecasting on the last |
//| sequence) |
//| * window lentgh is 1(impossible to use for forecasting) |
//| * SSA analysis algorithm is configured to extract basis |
//| whose size is equal to window length(impossible to use for|
//| forecasting; only basis whose size is less than window |
//| length can be used). |
//| Calling this function in degenerate cases returns following |
//| result: |
//| * NTicks copies of the last value is returned for non-empty |
//| task with large enough dataset, but with overcomplete basis |
//| (window width = 1 or basis size is equal to window width) |
//| * zero trend with length = NTicks is returned for empty task |
//| No analysis is performed in degenerate cases (we immediately |
//| return dummy values, no basis is ever constructed). |
//+------------------------------------------------------------------+
void CAlglib::SSAForecastAvgLast(CSSAModel &s,int m,int nticks,CRowDouble &trend)
{
trend.Resize(0);
//--- function call
CSSA::SSAForecastAvgLast(s,m,nticks,trend);
}
//+------------------------------------------------------------------+
//| This function builds SSA basis and performs forecasting for a |
//| user - specified sequence, returning value of trend. |
//| Forecasting is done in two stages: |
//| * first, we extract trend from M last sliding windows of the |
//| sequence. This stage is optional, you can turn it off i you |
//| pass data which are already processed with SSA. Of course, |
//| you can turn it off even for raw data, but it is not |
//| recommended - noise suppression is very important for correct|
//| prediction. |
//| * then, we apply LRR independently for M sliding windows |
//| * average of M predictions is returned |
//| This function has following running time: |
//| * O(NBasis*WindowWidth*M) for trend extraction phase |
//| * O(WindowWidth*NTicks*M) for forecast phase |
//| NOTE: combination of several predictions results in lesser |
//| sensitivity to noise, but it may produce undesirable |
//| discontinuities between last point of the trend and first |
//| point of the prediction. The reason is that last point of |
//| the trend is usually corrupted by noise, but average value |
//| of several predictions is less sensitive to noise, thus |
//| discontinuity appears. It is not a bug. |
//| INPUT PARAMETERS: |
//| S - SSA model |
//| Data - array[NTicks], data to forecast |
//| DataLen - number of ticks in the data, DataLen >= 1 |
//| M - number of sliding windows to combine, M >= 1. |
//| If your dataset has less than M sliding windows,|
//| this parameter will be silently reduced. |
//| ForecastLen - number of ticks to predict, ForecastLen >= 1 |
//| ApplySmoothing - whether to apply smoothing trend extraction |
//| or not. if you do not know what to specify, |
//| pass true. |
//| OUTPUT PARAMETERS: |
//| Trend - array[ForecastLen], forecasted trend |
//| CACHING / REUSE OF THE BASIS |
//| Caching / reuse of previous results is performed: |
//| * first call performs full run of SSA; basis is stored in |
//| the cache |
//| * subsequent calls reuse previously cached basis |
//| * if you call any function which changes model properties |
//| (window length, algorithm, dataset), internal basis will be |
//| invalidated. |
//| * the only calls which do NOT invalidate basis are listed |
//| below: |
//| a) SSASetWindow() with same window length |
//| b) SSAAppendPointAndUpdate() |
//| c) SSAAppendSequenceAndUpdate() |
//| d) SSASetAlgoTopK...() with exactly same K |
//| Calling these functions will result in reuse of previously found |
//| basis. |
//| HANDLING OF DEGENERATE CASES |
//| Following degenerate cases may happen: |
//| * dataset is empty(no analysis can be done) |
//| * all sequences are shorter than the window length, no analysis|
//| can be done |
//| * no algorithm is specified(no analysis can be done) |
//| * data sequence is shorter than the WindowWidth (analysis can |
//| be done, but we can not perform forecasting on the last |
//| sequence) |
//| * window lentgh is 1 (impossible to use for forecasting) |
//| * SSA analysis algorithm is configured to extract basis whose |
//| size is equal to window length (impossible to use for |
//| forecasting; only basis whose size is less than window length|
//| can be used). |
//| Calling this function in degenerate cases returns following |
//| result: |
//| * ForecastLen copies of the last value is returned for |
//| non-empty task with large enough dataset, but with |
//| overcomplete basis (window width = 1 or basis size |
//| is equal to window width) |
//| * zero trend with length = ForecastLen is returned for |
//| empty task |
//| No analysis is performed in degenerate case s(we immediately |
//| return dummy values, no basis is ever constructed). |
//+------------------------------------------------------------------+
void CAlglib::SSAForecastAvgSequence(CSSAModel &s,CRowDouble &data,
int datalen,int m,int forecastlen,
bool applysmoothing,CRowDouble &trend)
{
trend.Resize(0);
//--- function call
CSSA::SSAForecastAvgSequence(s,data,datalen,m,forecastlen,applysmoothing,trend);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::SSAForecastAvgSequence(CSSAModel &s,CRowDouble &data,
int m,int forecastlen,
CRowDouble &trend)
{
trend.Resize(0);
int datalen=CAp::Len(data);
bool applysmoothing=true;
//--- function call
CSSA::SSAForecastAvgSequence(s,data,datalen,m,forecastlen,applysmoothing,trend);
}
//+------------------------------------------------------------------+
//| This function serializes data structure to string. |
//| Important properties of s_out: |
//| * it contains alphanumeric characters, dots, underscores, minus|
//| signs |
//| * these symbols are grouped into words, which are separated by |
//| spaces and Windows-style (CR+LF) newlines |
//| * although serializer uses spaces and CR+LF as separators, |
//| you can replace any separator character by arbitrary |
//| combination of spaces, tabs, Windows or Unix newlines. It |
//| allows flexible reformatting of the string in case you want |
//| to include it into text or XML file. But you should not |
//| insert separators into the middle of the "words" nor you |
//| should change case of letters. |
//| * s_out can be freely moved between 32-bit and 64-bit systems, |
//| little and big endian machines, and so on. You can serialize |
//| structure on 32-bit machine and unserialize it on 64-bit one |
//| (or vice versa), or serialize it on SPARC and unserialize on |
//| x86. You can also serialize it in C# version of ALGLIB and|
//| unserialize in C++ one, and vice versa. |
//+------------------------------------------------------------------+
void CAlglib::KNNSerialize(CKNNModel &obj,string &s_out)
{
//--- create a variable
CSerializer s;
//--- serialization start
s.Alloc_Start();
//--- function call
CKNN::KNNAlloc(s,obj);
//--- serialization
s.SStart_Str();
CKNN::KNNSerialize(s,obj);
s.Stop();
s_out=s.Get_String();
}
//+------------------------------------------------------------------+
//| This function unserializes data structure from string. |
//+------------------------------------------------------------------+
void CAlglib::KNNUnserialize(const string s_in,CKNNModel &obj)
{
CSerializer s;
s.UStart_Str(s_in);
CKNN::KNNUnserialize(s,obj);
s.Stop();
}
//+------------------------------------------------------------------+
//| This function creates buffer structure which can be used to |
//| perform parallel KNN requests. |
//| KNN subpackage provides two sets of computing functions - ones |
//| which use internal buffer of KNN model (these functions are |
//| single-threaded because they use same buffer, which can not |
//| shared between threads), and ones which use external buffer. |
//| This function is used to initialize external buffer. |
//| INPUT PARAMETERS |
//| Model - KNN model which is associated with newly created|
//| buffer |
//| OUTPUT PARAMETERS |
//| Buf - external buffer. |
//| IMPORTANT: buffer object should be used only with model which was|
//| used to initialize buffer. Any attempt to use buffer|
//| with different object is dangerous - you may get |
//| integrity check failure (exception) because sizes of |
//| internal arrays do not fit to dimensions of the model |
//| structure. |
//+------------------------------------------------------------------+
void CAlglib::KNNCreateBuffer(CKNNModel &model,CKNNBuffer &buf)
{
CKNN::KNNCreateBuffer(model,buf);
}
//+------------------------------------------------------------------+
//| This subroutine creates KNNBuilder object which is used to train |
//| KNN models. |
//| By default, new builder stores empty dataset and some reasonable |
//| default settings. At the very least, you should specify dataset |
//| prior to building KNN model. You can also tweak settings of the |
//| model construction algorithm (recommended, although default |
//| settings should work well). |
//| Following actions are mandatory: |
//| * calling knnbuildersetdataset() to specify dataset |
//| * calling KNNBuilderBuildKNNModel() to build KNN model using |
//| current dataset and default settings |
//| Additionally, you may call: |
//| * KNNBuilderSetNorm() to change norm being used |
//| INPUT PARAMETERS: |
//| none |
//| OUTPUT PARAMETERS: |
//| S - KNN builder |
//+------------------------------------------------------------------+
void CAlglib::KNNBuilderCreate(CKNNBuilder &s)
{
CKNN::KNNBuilderCreate(s);
}
//+------------------------------------------------------------------+
//| Specifies regression problem (one or more continuous output |
//| variables are predicted). There also exists "classification" |
//| version of this function. |
//| This subroutine adds dense dataset to the internal storage of the|
//| builder object. Specifying your dataset in the dense format means|
//| that the dense version of the KNN construction algorithm will be |
//| invoked. |
//| INPUT PARAMETERS: |
//| S - KNN builder object |
//| XY - array[NPoints,NVars+NOut] (note: actual size can|
//| be larger, only leading part is used anyway), |
//| dataset: |
//| * first NVars elements of each row store values |
//| of the independent variables |
//| * next NOut elements store values of the |
//| dependent variables |
//| NPoints - number of rows in the dataset, NPoints>=1 |
//| NVars - number of independent variables, NVars>=1 |
//| NOut - number of dependent variables, NOut>=1 |
//| OUTPUT PARAMETERS: |
//| S - KNN builder |
//+------------------------------------------------------------------+
void CAlglib::KNNBuilderSetDatasetReg(CKNNBuilder &s,CMatrixDouble &xy,
int npoints,int nvars,int nout)
{
CKNN::KNNBuilderSetDatasetReg(s,xy,npoints,nvars,nout);
}
//+------------------------------------------------------------------+
//| Specifies classification problem (two or more classes are |
//| predicted). There also exists "regression" version of this |
//| function. |
//| This subroutine adds dense dataset to the internal storage of the|
//| builder object. Specifying your dataset in the dense format means|
//| that the dense version of the KNN construction algorithm will be |
//| invoked. |
//| INPUT PARAMETERS: |
//| S - KNN builder object |
//| XY - array[NPoints, NVars + 1] (note: actual size can be|
//| larger, only leading part is used anyway), dataset:|
//| * first NVars elements of each row store values of |
//| the independent variables |
//| * next element stores class index, in [0, NClasses)|
//| NPoints - number of rows in the dataset, NPoints >= 1 |
//| NVars - number of independent variables, NVars >= 1 |
//| NClasses - number of classes, NClasses >= 2 |
//| OUTPUT PARAMETERS: |
//| S - KNN builder |
//+------------------------------------------------------------------+
void CAlglib::KNNBuilderSetDatasetCLS(CKNNBuilder &s,CMatrixDouble &xy,
int npoints,int nvars,int nclasses)
{
CKNN::KNNBuilderSetDatasetCLS(s,xy,npoints,nvars,nclasses);
}
//+------------------------------------------------------------------+
//| This function sets norm type used for neighbor search. |
//| INPUT PARAMETERS: |
//| S - decision forest builder object |
//| NormType - norm type: |
//| * 0 inf-norm |
//| * 1 1-norm |
//| * 2 Euclidean norm(default) |
//| OUTPUT PARAMETERS: |
//| S - decision forest builder |
//+------------------------------------------------------------------+
void CAlglib::KNNBuilderSetNorm(CKNNBuilder &s,int nrmtype)
{
CKNN::KNNBuilderSetNorm(s,nrmtype);
}
//+------------------------------------------------------------------+
//| This subroutine builds KNN model according to current settings, |
//| using dataset internally stored in the builder object. |
//| The model being built performs inference using Eps-approximate |
//| K nearest neighbors search algorithm, with: |
//| *K=1, Eps=0 corresponding to the "nearest neighbor |
//| algorithm" |
//| *K>1, Eps=0 corresponding to the "K nearest neighbors |
//| algorithm" |
//| *K>=1, Eps>0 corresponding to "approximate nearest |
//| neighbors algorithm" |
//| An approximate KNN is a good option for high-dimensional datasets|
//| (exact KNN works slowly when dimensions count grows). |
//| An ALGLIB implementation of kd-trees is used to perform k-nn |
//| searches. |
//| INPUT PARAMETERS: |
//| S - KNN builder object |
//| K - number of neighbors to search for, K >= 1 |
//| Eps - approximation factor: |
//| * Eps = 0 means that exact kNN search is performed |
//| * Eps > 0 means that(1 + Eps) - approximate search |
//| is performed |
//| OUTPUT PARAMETERS: |
//| Model - KNN model |
//| Rep - report |
//+------------------------------------------------------------------+
void CAlglib::KNNBuilderBuildKNNModel(CKNNBuilder &s,int k,double eps,
CKNNModel &model,CKNNReport &rep)
{
CKNN::KNNBuilderBuildKNNModel(s,k,eps,model,rep);
}
//+------------------------------------------------------------------+
//| Changing search settings of KNN model. |
//| K and EPS parameters of KNN(AKNN) search are specified during |
//| model construction. However, plain KNN algorithm with Euclidean |
//| distance allows you to change them at any moment. |
//| NOTE: future versions of KNN model may support advanced versions |
//| of KNN, such as NCA or LMNN. It is possible that such |
//| algorithms won't allow you to change search settings on the|
//| fly. If you call this function for an algorithm which does |
//| not support on-the-fly changes, it will throw an exception|
//| INPUT PARAMETERS: |
//| Model - KNN model |
//| K - K >= 1, neighbors count |
//| EPS - accuracy of the EPS-approximate NN search. Set |
//| to 0.0, if you want to perform "classic" KNN |
//| search. Specify larger values if you need to |
//| speed-up high-dimensional KNN queries. |
//| OUTPUT PARAMETERS: |
//| nothing on success, exception on failure |
//+------------------------------------------------------------------+
void CAlglib::KNNRewriteKEps(CKNNModel &model,int k,double eps)
{
CKNN::KNNRewriteKEps(model,k,eps);
}
//+------------------------------------------------------------------+
//| Inference using KNN model. |
//| See also KNNProcess0(), KNNProcessI() and KNNClassify() for |
//| options with a bit more convenient interface. |
//| INPUT PARAMETERS: |
//| Model - KNN model |
//| X - input vector, array[0..NVars - 1]. |
//| Y - possible preallocated buffer. Reused if long enough|
//| OUTPUT PARAMETERS: |
//| Y - result. Regression estimate when solving regression|
//| task, vector of posterior probabilities for |
//| classification task. |
//+------------------------------------------------------------------+
void CAlglib::KNNProcess(CKNNModel &model,CRowDouble &x,CRowDouble &y)
{
CKNN::KNNProcess(model,x,y);
}
//+------------------------------------------------------------------+
//| This function returns first component of the inferred vector |
//| (i.e.one with index #0). |
//| It is a convenience wrapper for KNNProcess() intended for either:|
//| * 1 - dimensional regression problems |
//| * 2 - class classification problems |
//| In the former case this function returns inference result as |
//| scalar, which is definitely more convenient that wrapping it as |
//| vector. In the latter case it returns probability of object |
//| belonging to class #0. |
//| If you call it for anything different from two cases above, it |
//| will work as defined, i.e. return y[0], although it is of less |
//| use in such cases. |
//| INPUT PARAMETERS: |
//| Model - KNN model |
//| X - input vector, array[0..NVars - 1]. |
//| RESULT: |
//| Y[0] |
//+------------------------------------------------------------------+
double CAlglib::KNNProcess0(CKNNModel &model,CRowDouble &x)
{
return(CKNN::KNNProcess0(model,x));
}
//+------------------------------------------------------------------+
//| This function returns most probable class number for an input X.|
//| It is same as calling KNNProcess(model, x, y), then determining |
//| i = argmax(y[i]) and returning i. |
//| A class number in [0, NOut) range in returned for classification |
//| problems, -1 is returned when this function is called for |
//| regression problems. |
//| INPUT PARAMETERS: |
//| Model - KNN model |
//| X - input vector, array[0..NVars - 1]. |
//| RESULT: |
//| class number, -1 for regression tasks |
//+------------------------------------------------------------------+
int CAlglib::KNNClassify(CKNNModel &model,CRowDouble &x)
{
return(CKNN::KNNClassify(model,x));
}
//+------------------------------------------------------------------+
//| 'interactive' variant of KNNProcess() which support constructs |
//| like "y = KNNProcessI(model,x)" and interactive mode of the |
//| interpreter. |
//| This function allocates new array on each call, so it is |
//| significantly slower than its 'non-interactive' counterpart, but |
//| it is more convenient when you call it from command line. |
//+------------------------------------------------------------------+
void CAlglib::KNNProcessI(CKNNModel &model,CRowDouble &x,CRowDouble &y)
{
y.Resize(0);
//--- function call
CKNN::KNNProcessI(model,x,y);
}
//+------------------------------------------------------------------+
//| Thread - safe procesing using external buffer for temporaries. |
//| This function is thread-safe(i.e. you can use same KNN model from|
//| multiple threads) as long as you use different buffer objects for|
//| different threads. |
//| INPUT PARAMETERS: |
//| Model - KNN model |
//| Buf - buffer object, must be allocated specifically for |
//| this model with KNNCreateBuffer(). |
//| X - input vector, array[NVars] |
//| OUTPUT PARAMETERS: |
//| Y - result, array[NOut]. Regression estimate when |
//| solving regression task, vector of posterior |
//| probabilities for a classification task. |
//+------------------------------------------------------------------+
void CAlglib::KNNTsProcess(CKNNModel &model,CKNNBuffer &buf,
CRowDouble &x,CRowDouble &y)
{
CKNN::KNNTsProcess(model,buf,x,y);
}
//+------------------------------------------------------------------+
//| Relative classification error on the test set |
//| INPUT PARAMETERS: |
//| Model - KNN model |
//| XY - test set |
//| NPoints - test set size |
//| RESULT: |
//| percent of incorrectly classified cases. |
//| Zero if model solves regression task. |
//| NOTE: if you need several different kinds of error metrics, it is|
//| better to use KNNAllErrors() which computes all error |
//| metric with just one pass over dataset. |
//+------------------------------------------------------------------+
double CAlglib::KNNRelClsError(CKNNModel &model,CMatrixDouble &xy,
int npoints)
{
return(CKNN::KNNRelClsError(model,xy,npoints));
}
//+------------------------------------------------------------------+
//| Average cross-entropy (in bits per element) on the test set |
//| INPUT PARAMETERS: |
//| Model - KNN model |
//| XY - test set |
//| NPoints - test set size |
//| RESULT: |
//| CrossEntropy / NPoints. |
//| Zero if model solves regression task. |
//| NOTE: the cross-entropy metric is too unstable when used |
//| to evaluate KNN models (such models can report exactly |
//| zero probabilities), so we do not recommend using it. |
//| NOTE: if you need several different kinds of error metrics, it |
//| is better to use KNNAllErrors() which computes all error |
//| metric with just one pass over dataset. |
//+------------------------------------------------------------------+
double CAlglib::KNNAvgCE(CKNNModel &model,CMatrixDouble &xy,int npoints)
{
return(CKNN::KNNAvgCE(model,xy,npoints));
}
//+------------------------------------------------------------------+
//| RMS error on the test set. |
//| Its meaning for regression task is obvious. As for classification|
//| problems, RMS error means error when estimating posterior |
//| probabilities. |
//| INPUT PARAMETERS: |
//| Model - KNN model |
//| XY - test set |
//| NPoints - test set size |
//| RESULT: |
//| root mean square error. |
//| NOTE: if you need several different kinds of error metrics, it |
//| is better to use KNNAllErrors() which computes all error |
//| metric with just one pass over dataset. |
//+------------------------------------------------------------------+
double CAlglib::KNNRMSError(CKNNModel &model,CMatrixDouble &xy,
int npoints)
{
return(CKNN::KNNRMSError(model,xy,npoints));
}
//+------------------------------------------------------------------+
//| Average error on the test set |
//| Its meaning for regression task is obvious. As for classification|
//| problems, average error means error when estimating posterior |
//| probabilities. |
//| INPUT PARAMETERS: |
//| Model - KNN model |
//| XY - test set |
//| NPoints - test set size |
//| RESULT: |
//| average error |
//| NOTE: if you need several different kinds of error metrics, it |
//| is better to use KNNAllErrors() which computes all error |
//| metric with just one pass over dataset. |
//+------------------------------------------------------------------+
double CAlglib::KNNAvgError(CKNNModel &model,CMatrixDouble &xy,int npoints)
{
return(CKNN::KNNAvgError(model,xy,npoints));
}
//+------------------------------------------------------------------+
//| Average relative error on the test set |
//| Its meaning for regression task is obvious. As for classification|
//| problems, average relative error means error when estimating |
//| posterior probabilities. |
//| INPUT PARAMETERS: |
//| Model - KNN model |
//| XY - test set |
//| NPoints - test set size |
//| RESULT: |
//| average relative error |
//| NOTE: if you need several different kinds of error metrics, it |
//| is better to use KNNAllErrors() which computes all error |
//| metric with just one pass over dataset. |
//+------------------------------------------------------------------+
double CAlglib::KNNAvgRelError(CKNNModel &model,CMatrixDouble &xy,int npoints)
{
return(CKNN::KNNAvgRelError(model,xy,npoints));
}
//+------------------------------------------------------------------+
//| Calculates all kinds of errors for the model in one call. |
//| INPUT PARAMETERS: |
//| Model - KNN model |
//| XY - test set: |
//| * one row per point |
//| * first NVars columns store independent variables |
//| * depending on problem type: |
//| * next column stores class number |
//| in [0, NClasses) - for classification |
//| problems |
//| * next NOut columns store dependent |
//| variables - for regression problems |
//| NPoints - test set size, NPoints >= 0 |
//| OUTPUT PARAMETERS: |
//| Rep - following fields are loaded with errors for both |
//| regression and classification models: |
//| * rep.RMSError - RMS error for the output |
//| * rep.AvgError - average error |
//| * rep.AvgRelError - average relative error |
//| following fields are set only for classification |
//| models, zero for regression ones: |
//| * relclserror - relative classification error, |
//| in [0, 1] |
//| * avgce - average cross-entropy in bits per |
//| dataset entry |
//| NOTE: the cross-entropy metric is too unstable when used to |
//| evaluate KNN models (such models can report exactly zero |
//| probabilities), so we do not recommend using it. |
//+------------------------------------------------------------------+
void CAlglib::KNNAllErrors(CKNNModel &model,CMatrixDouble &xy,
int npoints,CKNNReport &rep)
{
CKNN::KNNAllErrors(model,xy,npoints,rep);
}
//+------------------------------------------------------------------+
//| 1-dimensional complex FFT. |
//| Array size N may be arbitrary number (composite or prime). |
//| Composite N's are handled with cache-oblivious variation of a |
//| Cooley-Tukey algorithm. Small prime-factors are transformed using|
//| hard coded codelets (similar to FFTW codelets, but without |
//| low-level optimization), large prime-factors are handled with |
//| Bluestein's algorithm. |
//| Fastests transforms are for smooth N's (prime factors are 2, 3, |
//| 5 only), most fast for powers of 2. When N have prime factors |
//| larger than these, but orders of magnitude smaller than N, |
//| computations will be about 4 times slower than for nearby highly |
//| composite N's. When N itself is prime, speed will be 6 times |
//| lower. |
//| Algorithm has O(N*logN) complexity for any N (composite or |
//| prime). |
//| INPUT PARAMETERS |
//| A - array[0..N-1] - complex function to be transformed |
//| N - problem size |
//| OUTPUT PARAMETERS |
//| A - DFT of a input array, array[0..N-1] |
//| A_out[j] = SUM(A_in[k]*exp(-2*pi*sqrt(-1)*j*k/N), |
//| k = 0..N-1) |
//+------------------------------------------------------------------+
void CAlglib::FFTC1D(complex &a[],const int n)
{
CFastFourierTransform::FFTC1D(a,n);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::FFTC1D(CRowComplex &a,const int n)
{
CFastFourierTransform::FFTC1D(a,n);
}
//+------------------------------------------------------------------+
//| 1-dimensional complex FFT. |
//| Array size N may be arbitrary number (composite or prime). |
//| Composite N's are handled with cache-oblivious variation of a |
//| Cooley-Tukey algorithm. Small prime-factors are transformed using|
//| hard coded codelets (similar to FFTW codelets, but without |
//| low-level optimization), large prime-factors are handled with |
//| Bluestein's algorithm. |
//| Fastests transforms are for smooth N's (prime factors are 2, 3, |
//| 5 only), most fast for powers of 2. When N have prime factors |
//| larger than these, but orders of magnitude smaller than N, |
//| computations will be about 4 times slower than for nearby highly |
//| composite N's. When N itself is prime, speed will be 6 times |
//| lower. |
//| Algorithm has O(N*logN) complexity for any N (composite or |
//| prime). |
//| INPUT PARAMETERS |
//| A - array[0..N-1] - complex function to be transformed |
//| N - problem size |
//| OUTPUT PARAMETERS |
//| A - DFT of a input array, array[0..N-1] |
//| A_out[j] = SUM(A_in[k]*exp(-2*pi*sqrt(-1)*j*k/N), |
//| k = 0..N-1) |
//+------------------------------------------------------------------+
void CAlglib::FFTC1D(complex &a[])
{
//--- initialization
int n=CAp::Len(a);
//--- function call
CFastFourierTransform::FFTC1D(a,n);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::FFTC1D(CRowComplex &a)
{
//--- initialization
int n=CAp::Len(a);
//--- function call
CFastFourierTransform::FFTC1D(a,n);
}
//+------------------------------------------------------------------+
//| 1-dimensional complex inverse FFT. |
//| Array size N may be arbitrary number (composite or prime). |
//| Algorithm has O(N*logN) complexity for any N (composite or prime)|
//| See FFTC1D() description for more information about algorithm |
//| performance. |
//| INPUT PARAMETERS |
//| A - array[0..N-1] - complex array to be transformed |
//| N - problem size |
//| OUTPUT PARAMETERS |
//| A - inverse DFT of a input array, array[0..N-1] |
//| A_out[j] = SUM(A_in[k]/N*exp(+2*pi*sqrt(-1)*j*k/N), |
//| k = 0..N-1) |
//+------------------------------------------------------------------+
void CAlglib::FFTC1DInv(complex &a[],const int n)
{
CFastFourierTransform::FFTC1DInv(a,n);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::FFTC1DInv(CRowComplex &a,const int n)
{
CFastFourierTransform::FFTC1DInv(a,n);
}
//+------------------------------------------------------------------+
//| 1-dimensional complex inverse FFT. |
//| Array size N may be arbitrary number (composite or prime). |
//| Algorithm has O(N*logN) complexity for any N (composite or prime)|
//| See FFTC1D() description for more information about algorithm |
//| performance. |
//| INPUT PARAMETERS |
//| A - array[0..N-1] - complex array to be transformed |
//| N - problem size |
//| OUTPUT PARAMETERS |
//| A - inverse DFT of a input array, array[0..N-1] |
//| A_out[j] = SUM(A_in[k]/N*exp(+2*pi*sqrt(-1)*j*k/N), |
//| k = 0..N-1) |
//+------------------------------------------------------------------+
void CAlglib::FFTC1DInv(complex &a[])
{
//--- initialization
int n=CAp::Len(a);
//--- function call
CFastFourierTransform::FFTC1DInv(a,n);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::FFTC1DInv(CRowComplex &a)
{
//--- initialization
int n=CAp::Len(a);
//--- function call
CFastFourierTransform::FFTC1DInv(a,n);
}
//+------------------------------------------------------------------+
//| 1-dimensional real FFT. |
//| Algorithm has O(N*logN) complexity for any N (composite or |
//| prime). |
//| INPUT PARAMETERS |
//| A - array[0..N-1] - real function to be transformed |
//| N - problem size |
//| OUTPUT PARAMETERS |
//| F - DFT of a input array, array[0..N-1] |
//| F[j] = SUM(A[k]*exp(-2*pi*sqrt(-1)*j*k/N), |
//| k = 0..N-1) |
//| NOTE: |
//| F[] satisfies symmetry property F[k] = conj(F[N-k]), so just |
//| one half of array is usually needed. But for convinience |
//| subroutine returns full complex array (with frequencies above |
//| N/2), so its result may be used by other FFT-related subroutines.|
//+------------------------------------------------------------------+
void CAlglib::FFTR1D(double &a[],const int n,complex &f[])
{
CFastFourierTransform::FFTR1D(a,n,f);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::FFTR1D(CRowDouble &a,const int n,CRowComplex &f)
{
CFastFourierTransform::FFTR1D(a,n,f);
}
//+------------------------------------------------------------------+
//| 1-dimensional real FFT. |
//| Algorithm has O(N*logN) complexity for any N (composite or |
//| prime). |
//| INPUT PARAMETERS |
//| A - array[0..N-1] - real function to be transformed |
//| N - problem size |
//| OUTPUT PARAMETERS |
//| F - DFT of a input array, array[0..N-1] |
//| F[j] = SUM(A[k]*exp(-2*pi*sqrt(-1)*j*k/N), |
//| k = 0..N-1) |
//| NOTE: |
//| F[] satisfies symmetry property F[k] = conj(F[N-k]), so just |
//| one half of array is usually needed. But for convinience |
//| subroutine returns full complex array (with frequencies above |
//| N/2), so its result may be used by other FFT-related subroutines.|
//+------------------------------------------------------------------+
void CAlglib::FFTR1D(double &a[],complex &f[])
{
//--- initialization
int n=CAp::Len(a);
//--- function call
CFastFourierTransform::FFTR1D(a,n,f);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::FFTR1D(CRowDouble &a,CRowComplex &f)
{
//--- initialization
int n=CAp::Len(a);
//--- function call
CFastFourierTransform::FFTR1D(a,n,f);
}
//+------------------------------------------------------------------+
//| 1-dimensional real inverse FFT. |
//| Algorithm has O(N*logN) complexity for any N (composite or |
//| prime). |
//| INPUT PARAMETERS |
//| F - array[0..floor(N/2)] - frequencies from forward real |
//| FFT |
//| N - problem size |
//| OUTPUT PARAMETERS |
//| A - inverse DFT of a input array, array[0..N-1] |
//| NOTE: |
//| F[] should satisfy symmetry property F[k] = conj(F[N-k]), |
//| so just one half of frequencies array is needed - elements from 0|
//| to floor(N/2). F[0] is ALWAYS real. If N is even F[floor(N/2)] is|
//| real too. If N is odd, then F[floor(N/2)] has no special |
//| properties. |
//| Relying on properties noted above, FFTR1DInv subroutine uses only|
//| elements from 0th to floor(N/2)-th. It ignores imaginary part of |
//| F[0], and in case N is even it ignores imaginary part of |
//| F[floor(N/2)] too. |
//| When you call this function using full arguments list - |
//| "FFTR1DInv(F,N,A)" |
//| - you can pass either either frequencies array with N elements or|
//| reduced array with roughly N/2 elements - subroutine will |
//| successfully transform both. |
//| If you call this function using reduced arguments list - |
//| "FFTR1DInv(F,A)" - you must pass FULL array with N elements |
//| (although higher N/2 are still not used) because array size is |
//| used to automatically determine FFT length |
//+------------------------------------------------------------------+
void CAlglib::FFTR1DInv(complex &f[],const int n,double &a[])
{
CFastFourierTransform::FFTR1DInv(f,n,a);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::FFTR1DInv(CRowComplex &f,const int n,CRowDouble &a)
{
CFastFourierTransform::FFTR1DInv(f,n,a);
}
//+------------------------------------------------------------------+
//| 1-dimensional real inverse FFT. |
//| Algorithm has O(N*logN) complexity for any N (composite or |
//| prime). |
//| INPUT PARAMETERS |
//| F - array[0..floor(N/2)] - frequencies from forward real |
//| FFT |
//| N - problem size |
//| OUTPUT PARAMETERS |
//| A - inverse DFT of a input array, array[0..N-1] |
//| NOTE: |
//| F[] should satisfy symmetry property F[k] = conj(F[N-k]), |
//| so just one half of frequencies array is needed - elements from 0|
//| to floor(N/2). F[0] is ALWAYS real. If N is even F[floor(N/2)] is|
//| real too. If N is odd, then F[floor(N/2)] has no special |
//| properties. |
//| Relying on properties noted above, FFTR1DInv subroutine uses only|
//| elements from 0th to floor(N/2)-th. It ignores imaginary part of |
//| F[0], and in case N is even it ignores imaginary part of |
//| F[floor(N/2)] too. |
//| When you call this function using full arguments list - |
//| "FFTR1DInv(F,N,A)" |
//| - you can pass either either frequencies array with N elements or|
//| reduced array with roughly N/2 elements - subroutine will |
//| successfully transform both. |
//| If you call this function using reduced arguments list - |
//| "FFTR1DInv(F,A)" - you must pass FULL array with N elements |
//| (although higher N/2 are still not used) because array size is |
//| used to automatically determine FFT length |
//+------------------------------------------------------------------+
void CAlglib::FFTR1DInv(complex &f[],double &a[])
{
//--- initialization
int n=CAp::Len(f);
//--- function call
CFastFourierTransform::FFTR1DInv(f,n,a);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::FFTR1DInv(CRowComplex &f,CRowDouble &a)
{
//--- initialization
int n=CAp::Len(f);
//--- function call
CFastFourierTransform::FFTR1DInv(f,n,a);
}
//+------------------------------------------------------------------+
//| 1-dimensional complex convolution. |
//| For given A/B returns conv(A,B) (non-circular). Subroutine can |
//| automatically choose between three implementations: |
//| straightforward O(M*N) formula for very small N (or M), |
//| significantly larger than min(M,N), but O(M*N) algorithm is too |
//| slow, and general FFT-based formula for cases where two previois |
//| algorithms are too slow. |
//| Algorithm has max(M,N)*log(max(M,N)) complexity for any M/N. |
//| INPUT PARAMETERS |
//| A - array[0..M-1] - complex function to be transformed |
//| M - problem size |
//| B - array[0..N-1] - complex function to be transformed |
//| N - problem size |
//| OUTPUT PARAMETERS |
//| R - convolution: A*B. array[0..N+M-2]. |
//| NOTE: |
//| It is assumed that A is zero at T<0, B is zero too. If one or|
//| both functions have non-zero values at negative T's, you can |
//| still use this subroutine - just shift its result |
//| correspondingly. |
//+------------------------------------------------------------------+
void CAlglib::ConvC1D(complex &a[],const int m,complex &b[],
const int n,complex &r[])
{
CConv::ConvC1D(a,m,b,n,r);
}
//+------------------------------------------------------------------+
//| 1-dimensional complex non-circular deconvolution (inverse of |
//| ConvC1D()). |
//| Algorithm has M*log(M)) complexity for any M (composite or prime)|
//| INPUT PARAMETERS |
//| A - array[0..M-1] - convolved signal, A = conv(R, B) |
//| M - convolved signal length |
//| B - array[0..N-1] - response |
//| N - response length, N<=M |
//| OUTPUT PARAMETERS |
//| R - deconvolved signal. array[0..M-N]. |
//| NOTE: |
//| deconvolution is unstable process and may result in division |
//| by zero (if your response function is degenerate, i.e. has zero |
//| Fourier coefficient). |
//| NOTE: |
//| It is assumed that A is zero at T<0, B is zero too. If one |
//| or both functions have non-zero values at negative T's, you can |
//| still use this subroutine - just shift its result correspondingly|
//+------------------------------------------------------------------+
void CAlglib::ConvC1DInv(complex &a[],const int m,complex &b[],
const int n,complex &r[])
{
CConv::ConvC1DInv(a,m,b,n,r);
}
//+------------------------------------------------------------------+
//| 1-dimensional circular complex convolution. |
//| For given S/R returns conv(S,R) (circular). Algorithm has |
//| linearithmic complexity for any M/N. |
//| IMPORTANT: normal convolution is commutative, i.e. it is |
//| symmetric - conv(A,B)=conv(B,A). Cyclic convolution IS NOT. One |
//| function - S - is a signal, periodic function, and another - R - |
//| is a response, non-periodic function with limited length. |
//| INPUT PARAMETERS |
//| S - array[0..M-1] - complex periodic signal |
//| M - problem size |
//| B - array[0..N-1] - complex non-periodic response |
//| N - problem size |
//| OUTPUT PARAMETERS |
//| R - convolution: A*B. array[0..M-1]. |
//| NOTE: |
//| It is assumed that B is zero at T<0. If it has non-zero |
//| values at negative T's, you can still use this subroutine - just |
//| shift its result correspondingly. |
//+------------------------------------------------------------------+
void CAlglib::ConvC1DCircular(complex &s[],const int m,complex &r[],
const int n,complex &c[])
{
CConv::ConvC1DCircular(s,m,r,n,c);
}
//+------------------------------------------------------------------+
//| 1-dimensional circular complex deconvolution (inverse of |
//| ConvC1DCircular()). |
//| Algorithm has M*log(M)) complexity for any M (composite or prime)|
//| INPUT PARAMETERS |
//| A - array[0..M-1] - convolved periodic signal, |
//| A = conv(R, B) |
//| M - convolved signal length |
//| B - array[0..N-1] - non-periodic response |
//| N - response length |
//| OUTPUT PARAMETERS |
//| R - deconvolved signal. array[0..M-1]. |
//| NOTE: |
//| deconvolution is unstable process and may result in division |
//| by zero (if your response function is degenerate, i.e. has zero |
//| Fourier coefficient). |
//| NOTE: |
//| It is assumed that B is zero at T<0. If it has non-zero |
//| values at negative T's, you can still use this subroutine - just |
//| shift its result correspondingly. |
//+------------------------------------------------------------------+
void CAlglib::ConvC1DCircularInv(complex &a[],const int m,complex &b[],
const int n,complex &r[])
{
CConv::ConvC1DCircularInv(a,m,b,n,r);
}
//+------------------------------------------------------------------+
//| 1-dimensional real convolution. |
//| Analogous to ConvC1D(), see ConvC1D() comments for more details. |
//| INPUT PARAMETERS |
//| A - array[0..M-1] - real function to be transformed |
//| M - problem size |
//| B - array[0..N-1] - real function to be transformed |
//| N - problem size |
//| OUTPUT PARAMETERS |
//| R - convolution: A*B. array[0..N+M-2]. |
//| NOTE: |
//| It is assumed that A is zero at T<0, B is zero too. If one |
//| or both functions have non-zero values at negative T's, you can |
//| still use this subroutine - just shift its result correspondingly|
//+------------------------------------------------------------------+
void CAlglib::ConvR1D(double &a[],const int m,double &b[],
const int n,double &r[])
{
CConv::ConvR1D(a,m,b,n,r);
}
//+------------------------------------------------------------------+
//| 1-dimensional real deconvolution (inverse of ConvC1D()). |
//| Algorithm has M*log(M)) complexity for any M (composite or prime)|
//| INPUT PARAMETERS |
//| A - array[0..M-1] - convolved signal, A = conv(R, B) |
//| M - convolved signal length |
//| B - array[0..N-1] - response |
//| N - response length, N<=M |
//| OUTPUT PARAMETERS |
//| R - deconvolved signal. array[0..M-N]. |
//| NOTE: |
//| deconvolution is unstable process and may result in division |
//| by zero (if your response function is degenerate, i.e. has zero |
//| Fourier coefficient). |
//| NOTE: |
//| It is assumed that A is zero at T<0, B is zero too. If one or|
//| both functions have non-zero values at negative T's, you can |
//| still use this subroutine - just shift its result correspondingly|
//+------------------------------------------------------------------+
void CAlglib::ConvR1DInv(double &a[],const int m,double &b[],
const int n,double &r[])
{
CConv::ConvR1DInv(a,m,b,n,r);
}
//+------------------------------------------------------------------+
//| 1-dimensional circular real convolution. |
//| Analogous to ConvC1DCircular(), see ConvC1DCircular() comments |
//| for more details. |
//| INPUT PARAMETERS |
//| S - array[0..M-1] - real signal |
//| M - problem size |
//| B - array[0..N-1] - real response |
//| N - problem size |
//| OUTPUT PARAMETERS |
//| R - convolution: A*B. array[0..M-1]. |
//| NOTE: |
//| It is assumed that B is zero at T<0. If it has non-zero |
//| values at negative T's, you can still use this subroutine - just |
//| shift its result correspondingly. |
//+------------------------------------------------------------------+
void CAlglib::ConvR1DCircular(double &s[],const int m,double &r[],
const int n,double &c[])
{
CConv::ConvR1DCircular(s,m,r,n,c);
}
//+------------------------------------------------------------------+
//| 1-dimensional complex deconvolution (inverse of ConvC1D()). |
//| Algorithm has M*log(M)) complexity for any M (composite or prime)|
//| INPUT PARAMETERS |
//| A - array[0..M-1] - convolved signal, A = conv(R, B) |
//| M - convolved signal length |
//| B - array[0..N-1] - response |
//| N - response length |
//| OUTPUT PARAMETERS |
//| R - deconvolved signal. array[0..M-N]. |
//| NOTE: |
//| deconvolution is unstable process and may result in division |
//| by zero (if your response function is degenerate, i.e. has zero |
//| Fourier coefficient). |
//| NOTE: |
//| It is assumed that B is zero at T<0. If it has non-zero |
//| values at negative T's, you can still use this subroutine - just |
//| shift its result correspondingly. |
//+------------------------------------------------------------------+
void CAlglib::ConvR1DCircularInv(double &a[],const int m,double &b[],
const int n,double &r[])
{
CConv::ConvR1DCircularInv(a,m,b,n,r);
}
//+------------------------------------------------------------------+
//| 1-dimensional complex cross-correlation. |
//| For given Pattern/Signal returns corr(Pattern,Signal) |
//| (non-circular). |
//| Correlation is calculated using reduction to convolution. |
//| Algorithm with max(N,N)*log(max(N,N)) complexity is used (see |
//| ConvC1D() for more info about performance). |
//| IMPORTANT: |
//| for historical reasons subroutine accepts its parameters in |
//| reversed order: CorrC1D(Signal, Pattern) = Pattern x Signal |
//| (using traditional definition of cross-correlation, denoting |
//| cross-correlation as "x"). |
//| INPUT PARAMETERS |
//| Signal - array[0..N-1] - complex function to be |
//| transformed, signal containing pattern |
//| N - problem size |
//| Pattern - array[0..M-1] - complex function to be |
//| transformed, pattern to search withing signal |
//| M - problem size |
//| OUTPUT PARAMETERS |
//| R - cross-correlation, array[0..N+M-2]: |
//| * positive lags are stored in R[0..N-1], |
//| R[i] = sum(conj(pattern[j])*signal[i+j] |
//| * negative lags are stored in R[N..N+M-2], |
//| R[N+M-1-i] = sum(conj(pattern[j])*signal[-i+j] |
//| NOTE: |
//| It is assumed that pattern domain is [0..M-1]. If Pattern is |
//| non-zero on [-K..M-1], you can still use this subroutine, just |
//| shift result by K. |
//+------------------------------------------------------------------+
void CAlglib::CorrC1D(complex &signal[],const int n,complex &pattern[],
const int m,complex &r[])
{
CCorr::CorrC1D(signal,n,pattern,m,r);
}
//+------------------------------------------------------------------+
//| 1-dimensional circular complex cross-correlation. |
//| For given Pattern/Signal returns corr(Pattern,Signal) (circular).|
//| Algorithm has linearithmic complexity for any M/N. |
//| IMPORTANT: |
//| for historical reasons subroutine accepts its parameters in |
//| reversed order: CorrC1DCircular(Signal, Pattern) = Pattern x |
//| Signal (using traditional definition of cross-correlation, |
//| denoting cross-correlation as "x"). |
//| INPUT PARAMETERS |
//| Signal - array[0..N-1] - complex function to be |
//| transformed, periodic signal containing pattern |
//| N - problem size |
//| Pattern - array[0..M-1] - complex function to be |
//| transformed, non-periodic pattern to search |
//| withing signal |
//| M - problem size |
//| OUTPUT PARAMETERS |
//| R - convolution: A*B. array[0..M-1]. |
//+------------------------------------------------------------------+
void CAlglib::CorrC1DCircular(complex &signal[],const int m,
complex &pattern[],const int n,
complex &c[])
{
CCorr::CorrC1DCircular(signal,m,pattern,n,c);
}
//+------------------------------------------------------------------+
//| 1-dimensional real cross-correlation. |
//| For given Pattern/Signal returns corr(Pattern,Signal) |
//| (non-circular). |
//| Correlation is calculated using reduction to convolution. |
//| Algorithm with max(N,N)*log(max(N,N)) complexity is used (see |
//| ConvC1D() for more info about performance). |
//| IMPORTANT: |
//| for historical reasons subroutine accepts its parameters in |
//| reversed order: CorrR1D(Signal, Pattern) = Pattern x Signal |
//| (using traditional definition of cross-correlation, denoting|
//| cross-correlation as "x"). |
//| INPUT PARAMETERS |
//| Signal - array[0..N-1] - real function to be transformed, |
//| signal containing pattern |
//| N - problem size |
//| Pattern - array[0..M-1] - real function to be transformed, |
//| pattern to search withing signal |
//| M - problem size |
//| OUTPUT PARAMETERS |
//| R - cross-correlation, array[0..N+M-2]: |
//| * positive lags are stored in R[0..N-1], |
//| R[i] = sum(pattern[j]*signal[i+j] |
//| * negative lags are stored in R[N..N+M-2], |
//| R[N+M-1-i] = sum(pattern[j]*signal[-i+j] |
//| NOTE: |
//| It is assumed that pattern domain is [0..M-1]. If Pattern is |
//| non-zero on [-K..M-1], you can still use this subroutine, just |
//| shift result by K. |
//+------------------------------------------------------------------+
void CAlglib::CorrR1D(double &signal[],const int n,double &pattern[],
const int m,double &r[])
{
CCorr::CorrR1D(signal,n,pattern,m,r);
}
//+------------------------------------------------------------------+
//| 1-dimensional circular real cross-correlation. |
//| For given Pattern/Signal returns corr(Pattern,Signal) (circular).|
//| Algorithm has linearithmic complexity for any M/N. |
//| IMPORTANT: |
//| for historical reasons subroutine accepts its parameters in |
//| reversed order: CorrR1DCircular(Signal, Pattern) = Pattern x |
//| Signal (using traditional definition of cross-correlation, |
//| denoting cross-correlation as "x"). |
//| INPUT PARAMETERS |
//| Signal - array[0..N-1] - real function to be transformed, |
//| periodic signal containing pattern |
//| N - problem size |
//| Pattern - array[0..M-1] - real function to be transformed, |
//| non-periodic pattern to search withing signal |
//| M - problem size |
//| OUTPUT PARAMETERS |
//| R - convolution: A*B. array[0..M-1]. |
//+------------------------------------------------------------------+
void CAlglib::CorrR1DCircular(double &signal[],const int m,
double &pattern[],const int n,
double &c[])
{
CCorr::CorrR1DCircular(signal,m,pattern,n,c);
}
//+------------------------------------------------------------------+
//| 1-dimensional Fast Hartley Transform. |
//| Algorithm has O(N*logN) complexity for any N (composite or prime)|
//| INPUT PARAMETERS |
//| A - array[0..N-1] - real function to be transformed |
//| N - problem size |
//| OUTPUT PARAMETERS |
//| A - FHT of a input array, array[0..N-1], |
//| A_out[k]=sum(A_in[j]*(cos(2*pi*j*k/N)+sin(2*pi*j*k/N)),|
//| j=0..N-1) |
//+------------------------------------------------------------------+
void CAlglib::FHTR1D(double &a[],const int n)
{
CFastHartleyTransform::FHTR1D(a,n);
}
//+------------------------------------------------------------------+
//| 1-dimensional inverse FHT. |
//| Algorithm has O(N*logN) complexity for any N (composite or prime)|
//| INPUT PARAMETERS |
//| A - array[0..N-1] - complex array to be transformed |
//| N - problem size |
//| OUTPUT PARAMETERS |
//| A - inverse FHT of a input array, array[0..N-1] |
//+------------------------------------------------------------------+
void CAlglib::FHTR1DInv(double &a[],const int n)
{
CFastHartleyTransform::FHTR1DInv(a,n);
}
//+------------------------------------------------------------------+
//| Computation of nodes and weights for a Gauss quadrature formula |
//| The algorithm generates the N-point Gauss quadrature formula |
//| with weight function given by coefficients alpha and beta of a |
//| recurrence relation which generates a system of orthogonal |
//| polynomials: |
//| P-1(x) = 0 |
//| P0(x) = 1 |
//| Pn+1(x) = (x-alpha(n))*Pn(x) - beta(n)*Pn-1(x) |
//| and zeroth moment Mu0 |
//| Mu0 = integral(W(x)dx,a,b) |
//| INPUT PARAMETERS: |
//| Alpha ? array[0..N-1], alpha coefficients |
//| Beta ? array[0..N-1], beta coefficients |
//| Zero-indexed element is not used and may be |
//| arbitrary. Beta[I]>0. |
//| Mu0 ? zeroth moment of the weight function. |
//| N ? number of nodes of the quadrature formula, N>=1 |
//| OUTPUT PARAMETERS: |
//| Info - error code: |
//| * -3 internal eigenproblem solver hasn't |
//| converged |
//| * -2 Beta[i]<=0 |
//| * -1 incorrect N was passed |
//| * 1 OK |
//| X - array[0..N-1] - array of quadrature nodes, |
//| in ascending order. |
//| W - array[0..N-1] - array of quadrature weights. |
//+------------------------------------------------------------------+
void CAlglib::GQGenerateRec(double &alpha[],double &beta[],
const double mu0,const int n,
int &info,double &x[],double &w[])
{
//--- initialization
info=0;
//--- function call
CGaussQ::GQGenerateRec(alpha,beta,mu0,n,info,x,w);
}
//+------------------------------------------------------------------+
//| Computation of nodes and weights for a Gauss-Lobatto quadrature |
//| formula |
//| The algorithm generates the N-point Gauss-Lobatto quadrature |
//| formula with weight function given by coefficients alpha and beta|
//| of a recurrence which generates a system of orthogonal |
//| polynomials. |
//| P-1(x) = 0 |
//| P0(x) = 1 |
//| Pn+1(x) = (x-alpha(n))*Pn(x) - beta(n)*Pn-1(x) |
//| and zeroth moment Mu0 |
//| Mu0 = integral(W(x)dx,a,b) |
//| INPUT PARAMETERS: |
//| Alpha ? array[0..N-2], alpha coefficients |
//| Beta ? array[0..N-2], beta coefficients. |
//| Zero-indexed element is not used, may be |
//| arbitrary. Beta[I]>0 |
//| Mu0 ? zeroth moment of the weighting function. |
//| A ? left boundary of the integration interval. |
//| B ? right boundary of the integration interval. |
//| N ? number of nodes of the quadrature formula, N>=3 |
//| (including the left and right boundary nodes). |
//| OUTPUT PARAMETERS: |
//| Info - error code: |
//| * -3 internal eigenproblem solver hasn't |
//| converged |
//| * -2 Beta[i]<=0 |
//| * -1 incorrect N was passed |
//| * 1 OK |
//| X - array[0..N-1] - array of quadrature nodes, |
//| in ascending order. |
//| W - array[0..N-1] - array of quadrature weights. |
//+------------------------------------------------------------------+
void CAlglib::GQGenerateGaussLobattoRec(double &alpha[],double &beta[],
const double mu0,const double a,
const double b,const int n,
int &info,double &x[],double &w[])
{
//--- initialization
info=0;
//--- function call
CGaussQ::GQGenerateGaussLobattoRec(alpha,beta,mu0,a,b,n,info,x,w);
}
//+------------------------------------------------------------------+
//| Computation of nodes and weights for a Gauss-Radau quadrature |
//| formula |
//| The algorithm generates the N-point Gauss-Radau quadrature |
//| formula with weight function given by the coefficients alpha and |
//| beta of a recurrence which generates a system of orthogonal |
//| polynomials. |
//| P-1(x) = 0 |
//| P0(x) = 1 |
//| Pn+1(x) = (x-alpha(n))*Pn(x) - beta(n)*Pn-1(x) |
//| and zeroth moment Mu0 |
//| Mu0 = integral(W(x)dx,a,b) |
//| INPUT PARAMETERS: |
//| Alpha ? array[0..N-2], alpha coefficients. |
//| Beta ? array[0..N-1], beta coefficients |
//| Zero-indexed element is not used. |
//| Beta[I]>0 |
//| Mu0 ? zeroth moment of the weighting function. |
//| A ? left boundary of the integration interval. |
//| N ? number of nodes of the quadrature formula, N>=2 |
//| (including the left boundary node). |
//| OUTPUT PARAMETERS: |
//| Info - error code: |
//| * -3 internal eigenproblem solver hasn't |
//| converged |
//| * -2 Beta[i]<=0 |
//| * -1 incorrect N was passed |
//| * 1 OK |
//| X - array[0..N-1] - array of quadrature nodes, |
//| in ascending order. |
//| W - array[0..N-1] - array of quadrature weights. |
//+------------------------------------------------------------------+
void CAlglib::GQGenerateGaussRadauRec(double &alpha[],double &beta[],
const double mu0,const double a,
const int n,int &info,
double &x[],double &w[])
{
//--- initialization
info=0;
//--- function call
CGaussQ::GQGenerateGaussRadauRec(alpha,beta,mu0,a,n,info,x,w);
}
//+------------------------------------------------------------------+
//| Returns nodes/weights for Gauss-Legendre quadrature on [-1,1] |
//| with N nodes. |
//| INPUT PARAMETERS: |
//| N - number of nodes, >=1 |
//| OUTPUT PARAMETERS: |
//| Info - error code: |
//| * -4 an error was detected when |
//| calculating weights/nodes. N is too |
//| large to obtain weights/nodes with |
//| high enough accuracy. Try to use |
//| multiple precision version. |
//| * -3 internal eigenproblem solver hasn't |
//| converged |
//| * -1 incorrect N was passed |
//| * +1 OK |
//| X - array[0..N-1] - array of quadrature nodes, |
//| in ascending order. |
//| W - array[0..N-1] - array of quadrature weights. |
//+------------------------------------------------------------------+
void CAlglib::GQGenerateGaussLegendre(const int n,int &info,
double &x[],double &w[])
{
//--- initialization
info=0;
//--- function call
CGaussQ::GQGenerateGaussLegendre(n,info,x,w);
}
//+------------------------------------------------------------------+
//| Returns nodes/weights for Gauss-Jacobi quadrature on [-1,1] |
//| with weight function W(x)=Power(1-x,Alpha)*Power(1+x,Beta). |
//| INPUT PARAMETERS: |
//| N - number of nodes, >=1 |
//| Alpha - power-law coefficient, Alpha>-1 |
//| Beta - power-law coefficient, Beta>-1 |
//| OUTPUT PARAMETERS: |
//| Info - error code: |
//| * -4 an error was detected when |
//| calculating weights/nodes. Alpha or |
//| Beta are too close to -1 to obtain |
//| weights/nodes with high enough |
//| accuracy, or, may be, N is too large.|
//| Try to use multiple precision version|
//| * -3 internal eigenproblem solver hasn't |
//| converged |
//| * -1 incorrect N/Alpha/Beta was passed |
//| * +1 OK |
//| X - array[0..N-1] - array of quadrature nodes, |
//| in ascending order. |
//| W - array[0..N-1] - array of quadrature weights. |
//+------------------------------------------------------------------+
void CAlglib::GQGenerateGaussJacobi(const int n,const double alpha,
const double beta,int &info,
double &x[],double &w[])
{
//--- initialization
info=0;
//--- function call
CGaussQ::GQGenerateGaussJacobi(n,alpha,beta,info,x,w);
}
//+------------------------------------------------------------------+
//| Returns nodes/weights for Gauss-Laguerre quadrature on [0,+inf) |
//| with weight function W(x)=Power(x,Alpha)*Exp(-x) |
//| INPUT PARAMETERS: |
//| N - number of nodes, >=1 |
//| Alpha - power-law coefficient, Alpha>-1 |
//| OUTPUT PARAMETERS: |
//| Info - error code: |
//| * -4 an error was detected when |
//| calculating weights/nodes. Alpha is |
//| too close to -1 to obtain |
//| weights/nodes with high enough |
//| accuracy or, may be, N is too large.|
//| Try to use multiple precision |
//| version. |
//| * -3 internal eigenproblem solver hasn't |
//| converged |
//| * -1 incorrect N/Alpha was passed |
//| * +1 OK |
//| X - array[0..N-1] - array of quadrature nodes, |
//| in ascending order. |
//| W - array[0..N-1] - array of quadrature weights. |
//+------------------------------------------------------------------+
void CAlglib::GQGenerateGaussLaguerre(const int n,const double alpha,
int &info,double &x[],double &w[])
{
//--- initialization
info=0;
//--- function call
CGaussQ::GQGenerateGaussLaguerre(n,alpha,info,x,w);
}
//+------------------------------------------------------------------+
//| Returns nodes/weights for Gauss-Hermite quadrature on |
//| (-inf,+inf) with weight function W(x)=Exp(-x*x) |
//| INPUT PARAMETERS: |
//| N - number of nodes, >=1 |
//| OUTPUT PARAMETERS: |
//| Info - error code: |
//| * -4 an error was detected when |
//| calculating weights/nodes. May be, N |
//| is too large. Try to use multiple |
//| precision version. |
//| * -3 internal eigenproblem solver hasn't |
//| converged |
//| * -1 incorrect N/Alpha was passed |
//| * +1 OK |
//| X - array[0..N-1] - array of quadrature nodes, |
//| in ascending order. |
//| W - array[0..N-1] - array of quadrature weights. |
//+------------------------------------------------------------------+
void CAlglib::GQGenerateGaussHermite(const int n,int &info,
double &x[],double &w[])
{
//--- initialization
info=0;
//--- function call
CGaussQ::GQGenerateGaussHermite(n,info,x,w);
}
//+------------------------------------------------------------------+
//| Computation of nodes and weights of a Gauss-Kronrod quadrature |
//| formula |
//| The algorithm generates the N-point Gauss-Kronrod quadrature |
//| formula with weight function given by coefficients alpha and beta|
//| of a recurrence relation which generates a system of orthogonal |
//| polynomials: |
//| P-1(x) = 0 |
//| P0(x) = 1 |
//| Pn+1(x) = (x-alpha(n))*Pn(x) - beta(n)*Pn-1(x) |
//| and zero moment Mu0 |
//| Mu0 = integral(W(x)dx,a,b) |
//| INPUT PARAMETERS: |
//| Alpha ? alpha coefficients, array[0..floor(3*K/2)]. |
//| Beta ? beta coefficients, array[0..ceil(3*K/2)]. |
//| Beta[0] is not used and may be arbitrary. |
//| Beta[I]>0. |
//| Mu0 ? zeroth moment of the weight function. |
//| N ? number of nodes of the Gauss-Kronrod |
//| quadrature formula, |
//| N >= 3, |
//| N = 2*K+1. |
//| OUTPUT PARAMETERS: |
//| Info - error code: |
//| * -5 no real and positive Gauss-Kronrod |
//| formula can be created for such a |
//| weight function with a given number |
//| of nodes. |
//| * -4 N is too large, task may be ill |
//| conditioned - x[i]=x[i+1] found. |
//| * -3 internal eigenproblem solver hasn't |
//| converged |
//| * -2 Beta[i]<=0 |
//| * -1 incorrect N was passed |
//| * +1 OK |
//| X - array[0..N-1] - array of quadrature nodes, |
//| in ascending order. |
//| WKronrod - array[0..N-1] - Kronrod weights |
//| WGauss - array[0..N-1] - Gauss weights (interleaved |
//| with zeros corresponding to extended Kronrod |
//| nodes). |
//+------------------------------------------------------------------+
void CAlglib::GKQGenerateRec(double &alpha[],double &beta[],
const double mu0,const int n,
int &info,double &x[],
double &wkronrod[],double &wgauss[])
{
//--- initialization
info=0;
//--- function call
CGaussKronrodQ::GKQGenerateRec(alpha,beta,mu0,n,info,x,wkronrod,wgauss);
}
//+------------------------------------------------------------------+
//| Returns Gauss and Gauss-Kronrod nodes/weights for Gauss-Legendre |
//| quadrature with N points. |
//| GKQLegendreCalc (calculation) or GKQLegendreTbl (precomputed |
//| table) is used depending on machine precision and number of |
//| nodes. |
//| INPUT PARAMETERS: |
//| N - number of Kronrod nodes, must be odd number, |
//| >=3. |
//| OUTPUT PARAMETERS: |
//| Info - error code: |
//| * -4 an error was detected when |
//| calculating weights/nodes. N is too |
//| obtain large to weights/nodes with |
//| high enough accuracy. Try to use |
//| multiple precision version. |
//| * -3 internal eigenproblem solver hasn't |
//| converged |
//| * -1 incorrect N was passed |
//| * +1 OK |
//| X - array[0..N-1] - array of quadrature nodes, |
//| ordered in ascending order. |
//| WKronrod - array[0..N-1] - Kronrod weights |
//| WGauss - array[0..N-1] - Gauss weights (interleaved |
//| with zeros corresponding to extended Kronrod |
//| nodes). |
//+------------------------------------------------------------------+
void CAlglib::GKQGenerateGaussLegendre(const int n,int &info,
double &x[],double &wkronrod[],
double &wgauss[])
{
//--- initialization
info=0;
//--- function call
CGaussKronrodQ::GKQGenerateGaussLegendre(n,info,x,wkronrod,wgauss);
}
//+------------------------------------------------------------------+
//| Returns Gauss and Gauss-Kronrod nodes/weights for Gauss-Jacobi |
//| quadrature on [-1,1] with weight function |
//| W(x)=Power(1-x,Alpha)*Power(1+x,Beta). |
//| INPUT PARAMETERS: |
//| N - number of Kronrod nodes, must be odd number, |
//| >=3. |
//| Alpha - power-law coefficient, Alpha>-1 |
//| Beta - power-law coefficient, Beta>-1 |
//| OUTPUT PARAMETERS: |
//| Info - error code: |
//| * -5 no real and positive Gauss-Kronrod |
//| formula can be created for such a |
//| weight function with a given number |
//| of nodes. |
//| * -4 an error was detected when |
//| calculating weights/nodes. Alpha or |
//| Beta are too close to -1 to obtain |
//| weights/nodes with high enough |
//| accuracy, or, may be, N is too large.|
//| Try to use multiple precision version|
//| * -3 internal eigenproblem solver hasn't |
//| converged |
//| * -1 incorrect N was passed |
//| * +1 OK |
//| * +2 OK, but quadrature rule have exterior|
//| nodes, x[0]<-1 or x[n-1]>+1 |
//| X - array[0..N-1] - array of quadrature nodes, |
//| ordered in ascending order. |
//| WKronrod - array[0..N-1] - Kronrod weights |
//| WGauss - array[0..N-1] - Gauss weights (interleaved |
//| with zeros corresponding to extended Kronrod |
//| nodes). |
//+------------------------------------------------------------------+
void CAlglib::GKQGenerateGaussJacobi(const int n,const double alpha,
const double beta,int &info,
double &x[],double &wkronrod[],
double &wgauss[])
{
//--- initialization
info=0;
//--- function call
CGaussKronrodQ::GKQGenerateGaussJacobi(n,alpha,beta,info,x,wkronrod,wgauss);
}
//+------------------------------------------------------------------+
//| Returns Gauss and Gauss-Kronrod nodes for quadrature with N |
//| points. |
//| Reduction to tridiagonal eigenproblem is used. |
//| INPUT PARAMETERS: |
//| N - number of Kronrod nodes, must be odd number, |
//| >=3. |
//| OUTPUT PARAMETERS: |
//| Info - error code: |
//| * -4 an error was detected when |
//| calculating weights/nodes. N is too |
//| large to obtain weights/nodes with |
//| high enough accuracy. |
//| Try to use multiple precision |
//| version. |
//| * -3 internal eigenproblem solver hasn't |
//| converged |
//| * -1 incorrect N was passed |
//| * +1 OK |
//| X - array[0..N-1] - array of quadrature nodes, |
//| ordered in ascending order. |
//| WKronrod - array[0..N-1] - Kronrod weights |
//| WGauss - array[0..N-1] - Gauss weights (interleaved |
//| with zeros corresponding to extended Kronrod |
//| nodes). |
//+------------------------------------------------------------------+
void CAlglib::GKQLegendreCalc(const int n,int &info,double &x[],
double &wkronrod[],double &wgauss[])
{
//--- initialization
info=0;
//--- function call
CGaussKronrodQ::GKQLegendreCalc(n,info,x,wkronrod,wgauss);
}
//+------------------------------------------------------------------+
//| Returns Gauss and Gauss-Kronrod nodes for quadrature with N |
//| points using pre-calculated table. Nodes/weights were computed |
//| with accuracy up to 1.0E-32 (if MPFR version of ALGLIB is used). |
//| In standard double precision accuracy reduces to something about |
//| 2.0E-16 (depending on your compiler's handling of long floating |
//| point constants). |
//| INPUT PARAMETERS: |
//| N - number of Kronrod nodes. |
//| N can be 15, 21, 31, 41, 51, 61. |
//| OUTPUT PARAMETERS: |
//| X - array[0..N-1] - array of quadrature nodes, |
//| ordered in ascending order. |
//| WKronrod - array[0..N-1] - Kronrod weights |
//| WGauss - array[0..N-1] - Gauss weights (interleaved |
//| with zeros corresponding to extended Kronrod |
//| nodes). |
//+------------------------------------------------------------------+
void CAlglib::GKQLegendreTbl(const int n,double &x[],double &wkronrod[],
double &wgauss[],double &eps)
{
//--- initialization
eps=0;
//--- function call
CGaussKronrodQ::GKQLegendreTbl(n,x,wkronrod,wgauss,eps);
}
//+------------------------------------------------------------------+
//| Integration of a smooth function F(x) on a finite interval [a,b].|
//| Fast-convergent algorithm based on a Gauss-Kronrod formula is |
//| used. Result is calculated with accuracy close to the machine |
//| precision. |
//| Algorithm works well only with smooth integrands. It may be used |
//| with continuous non-smooth integrands, but with less performance.|
//| It should never be used with integrands which have integrable |
//| singularities at lower or upper limits - algorithm may crash. |
//| Use AutoGKSingular in such cases. |
//| INPUT PARAMETERS: |
//| A, B - interval boundaries (A<B, A=B or A>B) |
//| OUTPUT PARAMETERS |
//| State - structure which stores algorithm state |
//| SEE ALSO |
//| AutoGKSmoothW, AutoGKSingular, AutoGKResults. |
//+------------------------------------------------------------------+
void CAlglib::AutoGKSmooth(const double a,const double b,
CAutoGKStateShell &state)
{
CAutoGK::AutoGKSmooth(a,b,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Integration of a smooth function F(x) on a finite interval [a,b].|
//| This subroutine is same as AutoGKSmooth(), but it guarantees that|
//| interval [a,b] is partitioned into subintervals which have width |
//| at most XWidth. |
//| Subroutine can be used when integrating nearly-constant function |
//| with narrow "bumps" (about XWidth wide). If "bumps" are too |
//| narrow, AutoGKSmooth subroutine can overlook them. |
//| INPUT PARAMETERS: |
//| A, B - interval boundaries (A<B, A=B or A>B) |
//| OUTPUT PARAMETERS |
//| State - structure which stores algorithm state |
//| SEE ALSO |
//| AutoGKSmooth, AutoGKSingular, AutoGKResults. |
//+------------------------------------------------------------------+
void CAlglib::AutoGKSmoothW(const double a,const double b,
double xwidth,CAutoGKStateShell &state)
{
CAutoGK::AutoGKSmoothW(a,b,xwidth,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Integration on a finite interval [A,B]. |
//| Integrand have integrable singularities at A/B. |
//| F(X) must diverge as "(x-A)^alpha" at A, as "(B-x)^beta" at B, |
//| with known alpha/beta (alpha>-1, beta>-1). If alpha/beta are not |
//| known, estimates from below can be used (but these estimates |
//| should be greater than -1 too). |
//| One of alpha/beta variables (or even both alpha/beta) may be |
//| equal to 0, which means than function F(x) is non-singular at |
//| A/B. Anyway (singular at bounds or not), function F(x) is |
//| supposed to be continuous on (A,B). |
//| Fast-convergent algorithm based on a Gauss-Kronrod formula is |
//| used. Result is calculated with accuracy close to the machine |
//| precision. |
//| INPUT PARAMETERS: |
//| A, B - interval boundaries (A<B, A=B or A>B) |
//| Alpha - power-law coefficient of the F(x) at A, |
//| Alpha>-1 |
//| Beta - power-law coefficient of the F(x) at B, |
//| Beta>-1 |
//| OUTPUT PARAMETERS |
//| State - structure which stores algorithm state |
//| SEE ALSO |
//| AutoGKSmooth, AutoGKSmoothW, AutoGKResults. |
//+------------------------------------------------------------------+
void CAlglib::AutoGKSingular(const double a,const double b,const double alpha,
const double beta,CAutoGKStateShell &state)
{
CAutoGK::AutoGKSingular(a,b,alpha,beta,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This function provides reverse communication interface |
//| Reverse communication interface is not documented or recommended |
//| to use. |
//| See below for functions which provide better documented API |
//+------------------------------------------------------------------+
bool CAlglib::AutoGKIteration(CAutoGKStateShell &state)
{
return(CAutoGK::AutoGKIteration(state.GetInnerObj()));
}
//+------------------------------------------------------------------+
//| This function is used to launcn iterations of ODE solver |
//| It accepts following parameters: |
//| diff - callback which calculates dy/dx for given y and x|
//| obj - optional object which is passed to diff; can be |
//| NULL |
//+------------------------------------------------------------------+
void CAlglib::AutoGKIntegrate(CAutoGKStateShell &state,
CIntegrator1_Func &func,
CObject &obj)
{
//--- cycle
while(CAlglib::AutoGKIteration(state))
{
//--- check
if(state.GetNeedF())
{
func.Int_Func(state.GetX(),state.GetXMinusA(),state.GetBMinusX(),state.GetInnerObj().m_f,obj);
//--- next iteration
continue;
}
Print("ALGLIB: unexpected error in 'autogksolve'");
CAp::exception_happened=true;
break;
}
}
//+------------------------------------------------------------------+
//| Adaptive integration results |
//| Called after AutoGKIteration returned False. |
//| Input parameters: |
//| State - algorithm state (used by AutoGKIteration). |
//| Output parameters: |
//| V - integral(f(x)dx,a,b) |
//| Rep - optimization report (see AutoGKReport |
//| description) |
//+------------------------------------------------------------------+
void CAlglib::AutoGKResults(CAutoGKStateShell &state,double &v,
CAutoGKReportShell &rep)
{
//--- initialization
v=0;
//--- function call
CAutoGK::AutoGKResults(state.GetInnerObj(),v,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This function serializes data structure to string. |
//| Important properties of s_out: |
//| * it contains alphanumeric characters, dots, underscores, minus|
//| signs |
//| * these symbols are grouped into words, which are separated by |
//| spaces and Windows-style (CR+LF) newlines |
//| * although serializer uses spaces and CR+LF as separators, |
//| you can replace any separator character by arbitrary |
//| combination of spaces, tabs, Windows or Unix newlines. It |
//| allows flexible reformatting of the string in case you |
//| want to include it into text or XML file. But you should not |
//| insert separators into the middle of the "words" nor you |
//| should change case of letters. |
//| * s_out can be freely moved between 32-bit and 64-bit systems, |
//| little and big endian machines, and so on. You can serialize |
//| structure on 32-bit machine and unserialize it on 64-bit one |
//| (or vice versa), or serialize it on SPARC and unserialize |
//| on x86. You can also serialize it in C# version of ALGLIB and|
//| unserialize in C++ one, and vice versa. |
//+------------------------------------------------------------------+
void CAlglib::IDWSerialize(CIDWModelShell &obj,string &s_out)
{
//--- create a variable
CSerializer s;
//--- serialization start
s.Alloc_Start();
//--- function call
CIDWInt::IDWAlloc(s,obj.GetInnerObj());
//--- serialization
s.SStart_Str();
CIDWInt::IDWSerialize(s,obj.GetInnerObj());
s.Stop();
s_out=s.Get_String();
}
//+------------------------------------------------------------------+
//| This function unserializes data structure from string. |
//+------------------------------------------------------------------+
void CAlglib::IDWUnserialize(string s_in,CIDWModelShell &obj)
{
CSerializer s;
s.UStart_Str(s_in);
CIDWInt::IDWUnserialize(s,obj.GetInnerObj());
s.Stop();
}
//+------------------------------------------------------------------+
//| This function creates buffer structure which can be used to |
//| perform parallel IDW model evaluations (with one IDW model |
//| instance being used from multiple threads, as long as different |
//| threads use different instances of buffer). |
//| This buffer object can be used with IDWTsCalcBuf() function (here|
//| "ts" stands for "thread-safe", "buf" is a suffix which denotes |
//| function which reuses previously allocated output space). |
//| How to use it: |
//| * create IDW model structure or load it from file |
//| * call IDWCreateCalcBuffer(), once per thread working with IDW |
//| model (you should call this function only AFTER model |
//| initialization, see below for more information) |
//| * call IDWTsCalcBuf() from different threads, with each thread |
//| working with its own copy of buffer object. |
//| INPUT PARAMETERS: |
//| S - IDW model |
//| OUTPUT PARAMETERS: |
//| Buf - external buffer. |
//| IMPORTANT: buffer object should be used only with IDW model |
//| object which was used to initialize buffer. Any |
//| attempt to use buffer with different object is |
//| dangerous - you may get memory violation error because|
//| sizes of internal arrays do not fit to dimensions of |
//| the IDW structure. |
//| IMPORTANT: you should call this function only for model which was|
//| built with model builder (or unserialized from file). |
//| Sizes of some internal structures are determined only |
//| after model is built, so buffer object created before |
//| model construction stage will be useless (and any |
//| attempt to use it will result in exception). |
//+------------------------------------------------------------------+
void CAlglib::IDWCreateCalcBuffer(CIDWModelShell &s,CIDWCalcBuffer &buf)
{
CIDWInt::IDWCreateCalcBuffer(s.GetInnerObj(),buf);
}
//+------------------------------------------------------------------+
//| This subroutine creates builder object used to generate IDW model|
//| from irregularly sampled (scattered) dataset. Multidimensional |
//| scalar/vector-valued are supported. |
//| Builder object is used to fit model to data as follows: |
//| * builder object is created with idwbuildercreate() function |
//| * dataset is added with IDWBuilderSetPoints() function |
//| * one of the modern IDW algorithms is chosen with either: |
//| * IDWBuilderSetAlgoMSTAB() - Multilayer STABilized algorithm|
//| (interpolation). |
//| Alternatively, one of the textbook algorithms can be chosen (not |
//| recommended): |
//| * IDWBuilderSetAlgoTextBookShepard() - textbook Shepard |
//| algorithm |
//| * IDWBuilderSetAlgoTextBookModShepard()- textbook modified |
//| Shepard algorithm |
//| * finally, model construction is performed with IDWFit() |
//| function. |
//| INPUT PARAMETERS: |
//| NX - dimensionality of the argument, NX>=1 |
//| NY - dimensionality of the function being modeled, |
//| NY>=1; NY=1 corresponds to classic scalar function,|
//| NY>=1 corresponds to vector-valued function. |
//| OUTPUT PARAMETERS: |
//| State - builder object |
//+------------------------------------------------------------------+
void CAlglib::IDWBuilderCreate(int nx,int ny,CIDWBuilder &state)
{
CIDWInt::IDWBuilderCreate(nx,ny,state);
}
//+------------------------------------------------------------------+
//| This function changes number of layers used by IDW-MSTAB |
//| algorithm. |
//| The more layers you have, the finer details can be reproduced |
//| with IDW model. The less layers you have, the less memory and CPU|
//| time is consumed by the model. |
//| Memory consumption grows linearly with layers count, running time|
//| grows sub-linearly. |
//| The default number of layers is 16, which allows you to reproduce|
//| details at distance down to SRad/65536. You will rarely need to |
//| change it. |
//| INPUT PARAMETERS: |
//| State - builder object |
//| NLayers - NLayers>=1, the number of layers used by the model.|
//+------------------------------------------------------------------+
void CAlglib::IDWBuilderSetNLayers(CIDWBuilder &state,int nlayers)
{
CIDWInt::IDWBuilderSetNLayers(state,nlayers);
}
//+------------------------------------------------------------------+
//| This function adds dataset to the builder object. |
//| This function overrides results of the previous calls, i.e. |
//| multiple calls of this function will result in only the last set |
//| being added. |
//| INPUT PARAMETERS: |
//| State - builder object |
//| XY - points, array[N, NX+NY]. One row corresponds to one |
//| point in the dataset. First NX elements are |
//| coordinates, next NY elements are function values. |
//| Array may be larger than specified, in this case |
//| only leading [N,NX+NY] elements will be used. |
//| N - number of points in the dataset, N>=0. |
//+------------------------------------------------------------------+
void CAlglib::IDWBuilderSetPoints(CIDWBuilder &state,CMatrixDouble &xy,int n)
{
CIDWInt::IDWBuilderSetPoints(state,xy,n);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::IDWBuilderSetPoints(CIDWBuilder &state,CMatrixDouble &xy)
{
int n=CAp::Rows(xy);
//--- function call
CIDWInt::IDWBuilderSetPoints(state,xy,n);
}
//+------------------------------------------------------------------+
//| This function sets IDW model construction algorithm to the |
//| Multilayer Stabilized IDW method (IDW-MSTAB), a latest |
//| incarnation of the inverse distance weighting interpolation which|
//| fixes shortcomings of the original and modified Shepard's |
//| variants. |
//| The distinctive features of IDW-MSTAB are: |
//| 1) exact interpolation is pursued (as opposed to fitting and |
//| noise suppression) |
//| 2) improved robustness when compared with that of other |
//| algorithms: |
//| * MSTAB shows almost no strange fitting artifacts like |
//| ripples and sharp spikes (unlike N-dimensional splines |
//| and HRBFs) |
//| * MSTAB does not return function values far from the |
//| interval spanned by the dataset; say, if all your points |
//| have |f|<=1, you can be sure that model value won't |
//| deviate too much from [-1,+1] |
//| 3) good model construction time competing with that of HRBFs |
//| and bicubic splines |
//| 4) ability to work with any number of dimensions, starting |
//| from NX=1 |
//| The drawbacks of IDW-MSTAB (and all IDW algorithms in general) |
//| are: |
//| 1) dependence of the model evaluation time on the search radius|
//| 2) bad extrapolation properties, models built by this method |
//| are usually conservative in their predictions |
//| Thus, IDW-MSTAB is a good "default" option if you want to perform|
//| scattered multidimensional interpolation. Although it has its |
//| drawbacks, it is easy to use and robust, which makes it a good |
//| first step. |
//| INPUT PARAMETERS: |
//| State - builder object |
//| SRad - initial search radius, SRad>0 is required. A model |
//| value is obtained by "smart" averaging of the |
//| dataset points within search radius. |
//| NOTE 1: IDW interpolation can correctly handle ANY dataset, |
//| including datasets with non-distinct points. In case |
//| non-distinct points are found, an average value for this |
//| point will be calculated. |
//| NOTE 2: the memory requirements for model storage are |
//| O(NPoints*NLayers). The model construction needs twice |
//| as much memory as model storage. |
//| NOTE 3: by default 16 IDW layers are built which is enough for |
//| most cases. You can change this parameter with |
//| IDWBuilderSetNLayers() method. Larger values may be |
//| necessary if you need to reproduce extrafine details at |
//| distances smaller than SRad/65536. Smaller value may |
//| be necessary if you have to save memory and computing |
//| time, and ready to sacrifice some model quality. |
//| ALGORITHM DESCRIPTION: |
//| ALGLIB implementation of IDW is somewhat similar to the |
//| modified Shepard's method (one with search radius R) but |
//| overcomes several of its drawbacks, namely: |
//| 1) a tendency to show stepwise behavior for uniform datasets|
//| 2) a tendency to show terrible interpolation properties for |
//| highly nonuniform datasets which often arise in |
//| geospatial tasks (function values are densely sampled |
//| across multiple separated "tracks") |
//| IDW-MSTAB method performs several passes over dataset and builds |
//| a sequence of progressively refined IDW models (layers), which |
//| starts from one with largest search radius SRad and continues |
//| to smaller search radii until required number of layers is built.|
//| Highest layers reproduce global behavior of the target function |
//| at larger distances whilst lower layers reproduce fine details at|
//| smaller distances. |
//| Each layer is an IDW model built with following modifications: |
//| * weights go to zero when distance approach to the current |
//| search radius |
//| * an additional regularizing term is added to the distance: |
//| w=1/(d^2+lambda) |
//| * an additional fictional term with unit weight and zero |
//| function value is added in order to promote continuity |
//| properties at the isolated and boundary points |
//| By default, 16 layers is built, which is enough for most cases. |
//| You can change this parameter with IDWBuilderSetNLayers() method.|
//+------------------------------------------------------------------+
void CAlglib::IDWBuilderSetAlgoMSTAB(CIDWBuilder &state,double srad)
{
CIDWInt::IDWBuilderSetAlgoMSTAB(state,srad);
}
//+------------------------------------------------------------------+
//| This function sets IDW model construction algorithm to the |
//| textbook Shepard's algorithm with custom (user-specified) power |
//| parameter. |
//| IMPORTANT: we do NOT recommend using textbook IDW algorithms |
//| because they have terrible interpolation properties. |
//| Use MSTAB in all cases. |
//| INPUT PARAMETERS: |
//| State - builder object |
//| P - power parameter, P>0; good value to start with is |
//| 2.0 |
//| NOTE 1: IDW interpolation can correctly handle ANY dataset, |
//| including datasets with non-distinct points. In case |
//| non-distinct points are found, an average value for this |
//| point will be calculated. |
//+------------------------------------------------------------------+
void CAlglib::IDWBuilderSetAlgoTextBookShepard(CIDWBuilder &state,double p)
{
CIDWInt::IDWBuilderSetAlgoTextBookShepard(state,p);
}
//+------------------------------------------------------------------+
//| This function sets IDW model construction algorithm to the |
//| 'textbook' modified Shepard's algorithm with user-specified |
//| search radius. |
//| IMPORTANT: we do NOT recommend using textbook IDW algorithms |
//| because they have terrible interpolation properties. |
//| Use MSTAB in all cases. |
//| INPUT PARAMETERS: |
//| State - builder object |
//| R - search radius |
//| NOTE 1: IDW interpolation can correctly handle ANY dataset, |
//| including datasets with non-distinct points. In case |
//| non-distinct points are found, an average value for this |
//| point will be calculated. |
//+------------------------------------------------------------------+
void CAlglib::IDWBuilderSetAlgoTextBookModShepard(CIDWBuilder &state,double r)
{
CIDWInt::IDWBuilderSetAlgoTextBookModShepard(state,r);
}
//+------------------------------------------------------------------+
//| This function sets prior term (model value at infinity) as |
//| user-specified value. |
//| INPUT PARAMETERS: |
//| S - spline builder |
//| V - value for user-defined prior |
//| NOTE: for vector-valued models all components of the prior are |
//| set to same user-specified value |
//+------------------------------------------------------------------+
void CAlglib::IDWBuilderSetUserTerm(CIDWBuilder &state,double v)
{
CIDWInt::IDWBuilderSetUserTerm(state,v);
}
//+------------------------------------------------------------------+
//| This function sets constant prior term (model value at infinity).|
//| Constant prior term is determined as mean value over dataset. |
//| INPUT PARAMETERS: |
//| S - spline builder |
//+------------------------------------------------------------------+
void CAlglib::IDWBuilderSetConstTerm(CIDWBuilder &state)
{
CIDWInt::IDWBuilderSetConstTerm(state);
}
//+------------------------------------------------------------------+
//| This function sets zero prior term (model value at infinity). |
//| INPUT PARAMETERS: |
//| S - spline builder |
//+------------------------------------------------------------------+
void CAlglib::IDWBuilderSetZeroTerm(CIDWBuilder &state)
{
CIDWInt::IDWBuilderSetZeroTerm(state);
}
//+------------------------------------------------------------------+
//| IDW interpolation: scalar target, 1-dimensional argument |
//| NOTE: this function modifies internal temporaries of the IDW |
//| model, thus IT IS NOT THREAD-SAFE! If you want to perform |
//| parallel model evaluation from the multiple threads, use |
//| IDWTsCalcBuf() with per-thread buffer object. |
//| INPUT PARAMETERS: |
//| S - IDW interpolant built with IDW builder |
//| X0 - argument value |
//| Result: |
//| IDW interpolant S(X0) |
//+------------------------------------------------------------------+
double CAlglib::IDWCalc1(CIDWModelShell &s,double x0)
{
return(CIDWInt::IDWCalc1(s.GetInnerObj(),x0));
}
//+------------------------------------------------------------------+
//| IDW interpolation: scalar target, 2-dimensional argument |
//| NOTE: this function modifies internal temporaries of the IDW |
//| model, thus IT IS NOT THREAD-SAFE! If you want to perform |
//| parallel model evaluation from the multiple threads, use |
//| IDWTsCalcBuf() with per- thread buffer object. |
//| INPUT PARAMETERS: |
//| S - IDW interpolant built with IDW builder |
//| X0, X1 - argument value |
//| Result: |
//| IDW interpolant S(X0,X1) |
//+------------------------------------------------------------------+
double CAlglib::IDWCalc2(CIDWModelShell &s,double x0,double x1)
{
return(CIDWInt::IDWCalc2(s.GetInnerObj(),x0,x1));
}
//+------------------------------------------------------------------+
//| IDW interpolation: scalar target, 3-dimensional argument |
//| NOTE: this function modifies internal temporaries of the IDW |
//| model, thus IT IS NOT THREAD-SAFE! If you want to perform |
//| parallel model evaluation from the multiple threads, use |
//| IDWTsCalcBuf() with per- thread buffer object. |
//| INPUT PARAMETERS: |
//| S - IDW interpolant built with IDW builder |
//| X0,X1,X2 - argument value |
//| Result: |
//| IDW interpolant S(X0,X1,X2) |
//+------------------------------------------------------------------+
double CAlglib::IDWCalc3(CIDWModelShell &s,double x0,double x1,double x2)
{
return(CIDWInt::IDWCalc3(s.GetInnerObj(),x0,x1,x2));
}
//+------------------------------------------------------------------+
//| This function calculates values of the IDW model at the given |
//| point. |
//| This is general function which can be used for arbitrary NX |
//| (dimension of the space of arguments) and NY (dimension of the |
//| function itself). However when you have NY=1 you may find more |
//| convenient to use IDWCalc1(), IDWCalc2() or IDWCalc3(). |
//| NOTE: this function modifies internal temporaries of the IDW |
//| model, thus IT IS NOT THREAD-SAFE! If you want to perform |
//| parallel model evaluation from the multiple threads, use |
//| IDWTsCalcBuf() with per-thread buffer object. |
//| INPUT PARAMETERS: |
//| S - IDW model |
//| X - coordinates, array[NX]. X may have more than NX |
//| elements, in this case only leading NX will be used|
//| OUTPUT PARAMETERS: |
//| Y - function value, array[NY]. Y is out-parameter and |
//| will be reallocated after call to this function. In|
//| case you want to reuse previously allocated Y, you |
//| may use IDWCalcBuf(), which reallocates Y only when|
//| it is too small. |
//+------------------------------------------------------------------+
void CAlglib::IDWCalc(CIDWModelShell &s,CRowDouble &x,CRowDouble &y)
{
CIDWInt::IDWCalc(s.GetInnerObj(),x,y);
}
//+------------------------------------------------------------------+
//| This function calculates values of the IDW model at the given |
//| point. |
//| Same as IDWCalc(), but does not reallocate Y when in is large |
//| enough to store function values. |
//| NOTE: this function modifies internal temporaries of the IDW |
//| model, thus IT IS NOT THREAD-SAFE! If you want to perform |
//| parallel model evaluation from the multiple threads, use |
//| IDWTsCalcBuf() with per-thread buffer object. |
//| INPUT PARAMETERS: |
//| S - IDW model |
//| X - coordinates, array[NX]. X may have more than NX |
//| elements, in this case only leading NX will be used|
//| Y - possibly preallocated array |
//| OUTPUT PARAMETERS: |
//| Y - function value, array[NY]. Y is not reallocated |
//| when it is larger than NY. |
//+------------------------------------------------------------------+
void CAlglib::IDWCalcBuf(CIDWModelShell &s,CRowDouble &x,CRowDouble &y)
{
CIDWInt::IDWCalcBuf(s.GetInnerObj(),x,y);
}
//+------------------------------------------------------------------+
//| This function calculates values of the IDW model at the given |
//| point, using external buffer object (internal temporaries of IDW |
//| model are not modified). |
//| This function allows to use same IDW model object in different |
//| threads, assuming that different threads use different instances|
//| of the buffer structure. |
//| INPUT PARAMETERS: |
//| S - IDW model, may be shared between different threads |
//| Buf - buffer object created for this particular instance |
//| of IDW model with IDWCreateCalcBuffer(). |
//| X - coordinates, array[NX]. X may have more than NX |
//| elements, in this case only leading NX will be used|
//| Y - possibly preallocated array |
//| OUTPUT PARAMETERS: |
//| Y - function value, array[NY]. Y is not reallocated |
//| when it is larger than NY. |
//+------------------------------------------------------------------+
void CAlglib::IDWTsCalcBuf(CIDWModelShell &s,CIDWCalcBuffer &buf,
CRowDouble &x,CRowDouble &y)
{
CIDWInt::IDWTsCalcBuf(s.GetInnerObj(),buf,x,y);
}
//+------------------------------------------------------------------+
//| This function fits IDW model to the dataset using current IDW |
//| construction algorithm. A model being built and fitting report |
//| are returned. |
//| INPUT PARAMETERS: |
//| State - builder object |
//| OUTPUT PARAMETERS: |
//| Model - an IDW model built with current algorithm |
//| Rep - model fitting report, fields of this structure |
//| contain information about average fitting errors. |
//| NOTE: although IDW-MSTAB algorithm is an interpolation method, |
//| i.e. it tries to fit the model exactly, it can handle |
//| datasets with non-distinct points which can not be fit |
//| exactly; in such cases least-squares fitting is performed. |
//+------------------------------------------------------------------+
void CAlglib::IDWFit(CIDWBuilder &state,CIDWModelShell &model,CIDWReport &rep)
{
CIDWInt::IDWFit(state,model.GetInnerObj(),rep);
}
//+------------------------------------------------------------------+
//| Rational interpolation using barycentric formula |
//| F(t)=SUM(i=0,n-1,w[i]*f[i]/(t-x[i])) / SUM(i=0,n-1,w[i]/(t-x[i]))|
//| Input parameters: |
//| B - barycentric interpolant built with one of model |
//| building subroutines. |
//| T - interpolation point |
//| Result: |
//| barycentric interpolant F(t) |
//+------------------------------------------------------------------+
double CAlglib::BarycentricCalc(CBarycentricInterpolantShell &b,
const double t)
{
return(CRatInt::BarycentricCalc(b.GetInnerObj(),t));
}
//+------------------------------------------------------------------+
//| Differentiation of barycentric interpolant: first derivative. |
//| Algorithm used in this subroutine is very robust and should not |
//| fail until provided with values too close to MaxRealNumber |
//| (usually MaxRealNumber/N or greater will overflow). |
//| INPUT PARAMETERS: |
//| B - barycentric interpolant built with one of model |
//| building subroutines. |
//| T - interpolation point |
//| OUTPUT PARAMETERS: |
//| F - barycentric interpolant at T |
//| DF - first derivative |
//+------------------------------------------------------------------+
void CAlglib::BarycentricDiff1(CBarycentricInterpolantShell &b,
const double t,double &f,double &df)
{
//--- initialization
f=0;
df=0;
//--- function call
CRatInt::BarycentricDiff1(b.GetInnerObj(),t,f,df);
}
//+------------------------------------------------------------------+
//| Differentiation of barycentric interpolant: first/second |
//| derivatives. |
//| INPUT PARAMETERS: |
//| B - barycentric interpolant built with one of model |
//| building subroutines. |
//| T - interpolation point |
//| OUTPUT PARAMETERS: |
//| F - barycentric interpolant at T |
//| DF - first derivative |
//| D2F - second derivative |
//| NOTE: this algorithm may fail due to overflow/underflor if used |
//| on data whose values are close to MaxRealNumber or MinRealNumber.|
//| Use more robust BarycentricDiff1() subroutine in such cases. |
//+------------------------------------------------------------------+
void CAlglib::BarycentricDiff2(CBarycentricInterpolantShell &b,
const double t,double &f,double &df,
double &d2f)
{
//--- initialization
f=0;
df=0;
d2f=0;
//--- function call
CRatInt::BarycentricDiff2(b.GetInnerObj(),t,f,df,d2f);
}
//+------------------------------------------------------------------+
//| This subroutine performs linear transformation of the argument. |
//| INPUT PARAMETERS: |
//| B - rational interpolant in barycentric form |
//| CA, CB - transformation coefficients: x = CA*t + CB |
//| OUTPUT PARAMETERS: |
//| B - transformed interpolant with X replaced by T |
//+------------------------------------------------------------------+
void CAlglib::BarycentricLinTransX(CBarycentricInterpolantShell &b,
const double ca,const double cb)
{
CRatInt::BarycentricLinTransX(b.GetInnerObj(),ca,cb);
}
//+------------------------------------------------------------------+
//| This subroutine performs linear transformation of the barycentric|
//| interpolant. |
//| INPUT PARAMETERS: |
//| B - rational interpolant in barycentric form |
//| CA, CB - transformation coefficients: B2(x) = CA*B(x) + CB|
//| OUTPUT PARAMETERS: |
//| B - transformed interpolant |
//+------------------------------------------------------------------+
void CAlglib::BarycentricLinTransY(CBarycentricInterpolantShell &b,
const double ca,const double cb)
{
CRatInt::BarycentricLinTransY(b.GetInnerObj(),ca,cb);
}
//+------------------------------------------------------------------+
//| Extracts X/Y/W arrays from rational interpolant |
//| INPUT PARAMETERS: |
//| B - barycentric interpolant |
//| OUTPUT PARAMETERS: |
//| N - nodes count, N>0 |
//| X - interpolation nodes, array[0..N-1] |
//| F - function values, array[0..N-1] |
//| W - barycentric weights, array[0..N-1] |
//+------------------------------------------------------------------+
void CAlglib::BarycentricUnpack(CBarycentricInterpolantShell &b,
int &n,double &x[],double &y[],
double &w[])
{
//--- initialization
n=0;
//--- function call
CRatInt::BarycentricUnpack(b.GetInnerObj(),n,x,y,w);
}
//+------------------------------------------------------------------+
//| Rational interpolant from X/Y/W arrays |
//| F(t)=SUM(i=0,n-1,w[i]*f[i]/(t-x[i])) / SUM(i=0,n-1,w[i]/(t-x[i]))|
//| INPUT PARAMETERS: |
//| X - interpolation nodes, array[0..N-1] |
//| F - function values, array[0..N-1] |
//| W - barycentric weights, array[0..N-1] |
//| N - nodes count, N>0 |
//| OUTPUT PARAMETERS: |
//| B - barycentric interpolant built from (X, Y, W) |
//+------------------------------------------------------------------+
void CAlglib::BarycentricBuildXYW(double &x[],double &y[],double &w[],
const int n,CBarycentricInterpolantShell &b)
{
CRatInt::BarycentricBuildXYW(x,y,w,n,b.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Rational interpolant without poles |
//| The subroutine constructs the rational interpolating function |
//| without real poles (see 'Barycentric rational interpolation with |
//| no poles and high rates of approximation', Michael S. Floater. |
//| and Kai Hormann, for more information on this subject). |
//| Input parameters: |
//| X - interpolation nodes, array[0..N-1]. |
//| Y - function values, array[0..N-1]. |
//| N - number of nodes, N>0. |
//| D - order of the interpolation scheme, 0 <= D <= N-1. |
//| D<0 will cause an error. |
//| D>=N it will be replaced with D=N-1. |
//| if you don't know what D to choose, use small value |
//| about 3-5. |
//| Output parameters: |
//| B - barycentric interpolant. |
//| Note: |
//| this algorithm always succeeds and calculates the weights |
//| with close to machine precision. |
//+------------------------------------------------------------------+
void CAlglib::BarycentricBuildFloaterHormann(double &x[],double &y[],
const int n,const int d,
CBarycentricInterpolantShell &b)
{
CRatInt::BarycentricBuildFloaterHormann(x,y,n,d,b.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Conversion from barycentric representation to Chebyshev basis. |
//| This function has O(N^2) complexity. |
//| INPUT PARAMETERS: |
//| P - polynomial in barycentric form |
//| A,B - base interval for Chebyshev polynomials (see below) |
//| A<>B |
//| OUTPUT PARAMETERS |
//| T - coefficients of Chebyshev representation; |
//| P(x) = sum { T[i]*Ti(2*(x-A)/(B-A)-1), i=0..N-1 }, |
//| where Ti - I-th Chebyshev polynomial. |
//| NOTES: |
//| barycentric interpolant passed as P may be either polynomial |
//| obtained from polynomial interpolation/ fitting or rational |
//| function which is NOT polynomial. We can't distinguish |
//| between these two cases, and this algorithm just tries to |
//| work assuming that P IS a polynomial. If not, algorithm will |
//| return results, but they won't have any meaning. |
//+------------------------------------------------------------------+
void CAlglib::PolynomialBar2Cheb(CBarycentricInterpolantShell &p,
const double a,const double b,
double &t[])
{
CPolInt::PolynomialBar2Cheb(p.GetInnerObj(),a,b,t);
}
//+------------------------------------------------------------------+
//| Conversion from Chebyshev basis to barycentric representation. |
//| This function has O(N^2) complexity. |
//| INPUT PARAMETERS: |
//| T - coefficients of Chebyshev representation; |
//| P(x) = sum { T[i]*Ti(2*(x-A)/(B-A)-1), i=0..N }, |
//| where Ti - I-th Chebyshev polynomial. |
//| N - number of coefficients: |
//| * if given, only leading N elements of T are used |
//| * if not given, automatically determined from size |
//| of T |
//| A,B - base interval for Chebyshev polynomials (see above) |
//| A<B |
//| OUTPUT PARAMETERS |
//| P - polynomial in barycentric form |
//+------------------------------------------------------------------+
void CAlglib::PolynomialCheb2Bar(double &t[],const int n,const double a,
const double b,
CBarycentricInterpolantShell &p)
{
CPolInt::PolynomialCheb2Bar(t,n,a,b,p.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Conversion from Chebyshev basis to barycentric representation. |
//| This function has O(N^2) complexity. |
//| INPUT PARAMETERS: |
//| T - coefficients of Chebyshev representation; |
//| P(x) = sum { T[i]*Ti(2*(x-A)/(B-A)-1), i=0..N }, |
//| where Ti - I-th Chebyshev polynomial. |
//| N - number of coefficients: |
//| * if given, only leading N elements of T are used |
//| * if not given, automatically determined from size |
//| of T |
//| A,B - base interval for Chebyshev polynomials (see above) |
//| A<B |
//| OUTPUT PARAMETERS |
//| P - polynomial in barycentric form |
//+------------------------------------------------------------------+
void CAlglib::PolynomialCheb2Bar(double &t[],const double a,
const double b,
CBarycentricInterpolantShell &p)
{
//--- initialization
int n=CAp::Len(t);
//--- function call
CPolInt::PolynomialCheb2Bar(t,n,a,b,p.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Conversion from barycentric representation to power basis. |
//| This function has O(N^2) complexity. |
//| INPUT PARAMETERS: |
//| P - polynomial in barycentric form |
//| C - offset (see below); 0.0 is used as default value. |
//| S - scale (see below); 1.0 is used as default value. |
//| S<>0. |
//| OUTPUT PARAMETERS |
//| A - coefficients, |
//| P(x) = sum { A[i]*((X-C)/S)^i, i=0..N-1 } |
//| N - number of coefficients (polynomial degree plus 1) |
//| NOTES: |
//| 1. this function accepts offset and scale, which can be set to |
//| improve numerical properties of polynomial. For example, if |
//| P was obtained as result of interpolation on [-1,+1], you can|
//| set C=0 and S=1 and represent P as sum of 1, x, x^2, x^3 and |
//| so on. In most cases you it is exactly what you need. |
//| However, if your interpolation model was built on [999,1001],|
//| you will see significant growth of numerical errors when |
//| using {1, x, x^2, x^3} as basis. Representing P as sum of 1, |
//| (x-1000), (x-1000)^2, (x-1000)^3 will be better option. Such |
//| representation can be obtained by using 1000.0 as offset |
//| C and 1.0 as scale S. |
//| 2. power basis is ill-conditioned and tricks described above |
//| can't solve this problem completely. This function will |
//| return coefficients in any case, but for N>8 they will become|
//| unreliable. However, N's less than 5 are pretty safe. |
//| 3. barycentric interpolant passed as P may be either polynomial |
//| obtained from polynomial interpolation/ fitting or rational |
//| function which is NOT polynomial. We can't distinguish |
//| between these two cases, and this algorithm just tries to |
//| work assuming that P IS a polynomial. If not, algorithm will |
//| return results, but they won't have any meaning. |
//+------------------------------------------------------------------+
void CAlglib::PolynomialBar2Pow(CBarycentricInterpolantShell &p,
const double c,const double s,
double &a[])
{
CPolInt::PolynomialBar2Pow(p.GetInnerObj(),c,s,a);
}
//+------------------------------------------------------------------+
//| Conversion from barycentric representation to power basis. |
//| This function has O(N^2) complexity. |
//| INPUT PARAMETERS: |
//| P - polynomial in barycentric form |
//| C - offset (see below); 0.0 is used as default value. |
//| S - scale (see below); 1.0 is used as default value. |
//| S<>0. |
//| OUTPUT PARAMETERS |
//| A - coefficients, |
//| P(x) = sum { A[i]*((X-C)/S)^i, i=0..N-1 } |
//| N - number of coefficients (polynomial degree plus 1) |
//| NOTES: |
//| 1. this function accepts offset and scale, which can be set to |
//| improve numerical properties of polynomial. For example, if |
//| P was obtained as result of interpolation on [-1,+1], you can|
//| set C=0 and S=1 and represent P as sum of 1, x, x^2, x^3 and |
//| so on. In most cases you it is exactly what you need. |
//| However, if your interpolation model was built on [999,1001],|
//| you will see significant growth of numerical errors when |
//| using {1, x, x^2, x^3} as basis. Representing P as sum of 1, |
//| (x-1000), (x-1000)^2, (x-1000)^3 will be better option. Such |
//| representation can be obtained by using 1000.0 as offset |
//| C and 1.0 as scale S. |
//| 2. power basis is ill-conditioned and tricks described above |
//| can't solve this problem completely. This function will |
//| return coefficients in any case, but for N>8 they will become|
//| unreliable. However, N's less than 5 are pretty safe. |
//| 3. barycentric interpolant passed as P may be either polynomial |
//| obtained from polynomial interpolation/ fitting or rational |
//| function which is NOT polynomial. We can't distinguish |
//| between these two cases, and this algorithm just tries to |
//| work assuming that P IS a polynomial. If not, algorithm will |
//| return results, but they won't have any meaning. |
//+------------------------------------------------------------------+
void CAlglib::PolynomialBar2Pow(CBarycentricInterpolantShell &p,
double &a[])
{
//--- initialization
double c=0;
double s=1;
//--- function call
CPolInt::PolynomialBar2Pow(p.GetInnerObj(),c,s,a);
}
//+------------------------------------------------------------------+
//| Conversion from power basis to barycentric representation. |
//| This function has O(N^2) complexity. |
//| INPUT PARAMETERS: |
//| A - coefficients, P(x)=sum { A[i]*((X-C)/S)^i, i=0..N-1 }|
//| N - number of coefficients (polynomial degree plus 1) |
//| * if given, only leading N elements of A are used |
//| * if not given, automatically determined from size |
//| of A |
//| C - offset (see below); 0.0 is used as default value. |
//| S - scale (see below); 1.0 is used as default value. |
//| S<>0. |
//| OUTPUT PARAMETERS |
//| P - polynomial in barycentric form |
//| NOTES: |
//| 1. this function accepts offset and scale, which can be set to |
//| improve numerical properties of polynomial. For example, if |
//| you interpolate on [-1,+1], you can set C=0 and S=1 and |
//| convert from sum of 1, x, x^2, x^3 and so on. In most cases |
//| you it is exactly what you need. |
//| However, if your interpolation model was built on [999,1001],|
//| you will see significant growth of numerical errors when |
//| using {1, x, x^2, x^3} as input basis. Converting from sum |
//| of 1, (x-1000), (x-1000)^2, (x-1000)^3 will be better option |
//| (you have to specify 1000.0 as offset C and 1.0 as scale S). |
//| 2. power basis is ill-conditioned and tricks described above |
//| can't solve this problem completely. This function will |
//| return barycentric model in any case, but for N>8 accuracy |
//| well degrade. However, N's less than 5 are pretty safe. |
//+------------------------------------------------------------------+
void CAlglib::PolynomialPow2Bar(double &a[],const int n,const double c,
const double s,CBarycentricInterpolantShell &p)
{
CPolInt::PolynomialPow2Bar(a,n,c,s,p.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Conversion from power basis to barycentric representation. |
//| This function has O(N^2) complexity. |
//| INPUT PARAMETERS: |
//| A - coefficients, P(x)=sum { A[i]*((X-C)/S)^i, i=0..N-1 }|
//| N - number of coefficients (polynomial degree plus 1) |
//| * if given, only leading N elements of A are used |
//| * if not given, automatically determined from size |
//| of A |
//| C - offset (see below); 0.0 is used as default value. |
//| S - scale (see below); 1.0 is used as default value. |
//| S<>0. |
//| OUTPUT PARAMETERS |
//| P - polynomial in barycentric form |
//| NOTES: |
//| 1. this function accepts offset and scale, which can be set to |
//| improve numerical properties of polynomial. For example, if |
//| you interpolate on [-1,+1], you can set C=0 and S=1 and |
//| convert from sum of 1, x, x^2, x^3 and so on. In most cases |
//| you it is exactly what you need. |
//| However, if your interpolation model was built on [999,1001],|
//| you will see significant growth of numerical errors when |
//| using {1, x, x^2, x^3} as input basis. Converting from sum |
//| of 1, (x-1000), (x-1000)^2, (x-1000)^3 will be better option |
//| (you have to specify 1000.0 as offset C and 1.0 as scale S). |
//| 2. power basis is ill-conditioned and tricks described above |
//| can't solve this problem completely. This function will |
//| return barycentric model in any case, but for N>8 accuracy |
//| well degrade. However, N's less than 5 are pretty safe. |
//+------------------------------------------------------------------+
void CAlglib::PolynomialPow2Bar(double &a[],CBarycentricInterpolantShell &p)
{
//--- initialization
int n=CAp::Len(a);
double c=0;
double s=1;
//--- function call
CPolInt::PolynomialPow2Bar(a,n,c,s,p.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Lagrange intepolant: generation of the model on the general grid.|
//| This function has O(N^2) complexity. |
//| INPUT PARAMETERS: |
//| X - abscissas, array[0..N-1] |
//| Y - function values, array[0..N-1] |
//| N - number of points, N>=1 |
//| OUTPUT PARAMETERS |
//| P - barycentric model which represents Lagrange |
//| interpolant (see ratint unit info and |
//| BarycentricCalc() description for more information). |
//+------------------------------------------------------------------+
void CAlglib::PolynomialBuild(double &x[],double &y[],const int n,
CBarycentricInterpolantShell &p)
{
CPolInt::PolynomialBuild(x,y,n,p.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Lagrange intepolant: generation of the model on the general grid.|
//| This function has O(N^2) complexity. |
//| INPUT PARAMETERS: |
//| X - abscissas, array[0..N-1] |
//| Y - function values, array[0..N-1] |
//| N - number of points, N>=1 |
//| OUTPUT PARAMETERS |
//| P - barycentric model which represents Lagrange |
//| interpolant (see ratint unit info and |
//| BarycentricCalc() description for more information). |
//+------------------------------------------------------------------+
void CAlglib::PolynomialBuild(double &x[],double &y[],
CBarycentricInterpolantShell &p)
{
//--- check
if((CAp::Len(x)!=CAp::Len(y)))
{
Print("Error while calling 'polynomialbuild': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
int n=CAp::Len(x);
//--- function call
CPolInt::PolynomialBuild(x,y,n,p.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Lagrange intepolant: generation of the model on equidistant grid.|
//| This function has O(N) complexity. |
//| INPUT PARAMETERS: |
//| A - left boundary of [A,B] |
//| B - right boundary of [A,B] |
//| Y - function values at the nodes, array[0..N-1] |
//| N - number of points, N>=1 |
//| for N=1 a constant model is constructed. |
//| OUTPUT PARAMETERS |
//| P - barycentric model which represents Lagrange |
//| interpolant (see ratint unit info and |
//| BarycentricCalc() description for more information). |
//+------------------------------------------------------------------+
void CAlglib::PolynomialBuildEqDist(const double a,const double b,
double &y[],const int n,
CBarycentricInterpolantShell &p)
{
CPolInt::PolynomialBuildEqDist(a,b,y,n,p.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Lagrange intepolant: generation of the model on equidistant grid.|
//| This function has O(N) complexity. |
//| INPUT PARAMETERS: |
//| A - left boundary of [A,B] |
//| B - right boundary of [A,B] |
//| Y - function values at the nodes, array[0..N-1] |
//| N - number of points, N>=1 |
//| for N=1 a constant model is constructed. |
//| OUTPUT PARAMETERS |
//| P - barycentric model which represents Lagrange |
//| interpolant (see ratint unit info and |
//| BarycentricCalc() description for more information). |
//+------------------------------------------------------------------+
void CAlglib::PolynomialBuildEqDist(const double a,const double b,
double &y[],
CBarycentricInterpolantShell &p)
{
//--- initialization
int n=CAp::Len(y);
//--- function call
CPolInt::PolynomialBuildEqDist(a,b,y,n,p.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Lagrange intepolant on Chebyshev grid (first kind). |
//| This function has O(N) complexity. |
//| INPUT PARAMETERS: |
//| A - left boundary of [A,B] |
//| B - right boundary of [A,B] |
//| Y - function values at the nodes, array[0..N-1], |
//| Y[I] = Y(0.5*(B+A) + 0.5*(B-A)*Cos(PI*(2*i+1)/(2*n)))|
//| N - number of points, N>=1 |
//| for N=1 a constant model is constructed. |
//| OUTPUT PARAMETERS |
//| P - barycentric model which represents Lagrange |
//| interpolant (see ratint unit info and |
//| BarycentricCalc() description for more information). |
//+------------------------------------------------------------------+
void CAlglib::PolynomialBuildCheb1(const double a,const double b,
double &y[],const int n,
CBarycentricInterpolantShell &p)
{
CPolInt::PolynomialBuildCheb1(a,b,y,n,p.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Lagrange intepolant on Chebyshev grid (first kind). |
//| This function has O(N) complexity. |
//| INPUT PARAMETERS: |
//| A - left boundary of [A,B] |
//| B - right boundary of [A,B] |
//| Y - function values at the nodes, array[0..N-1], |
//| Y[I] = Y(0.5*(B+A) + 0.5*(B-A)*Cos(PI*(2*i+1)/(2*n)))|
//| N - number of points, N>=1 |
//| for N=1 a constant model is constructed. |
//| OUTPUT PARAMETERS |
//| P - barycentric model which represents Lagrange |
//| interpolant (see ratint unit info and |
//| BarycentricCalc() description for more information). |
//+------------------------------------------------------------------+
void CAlglib::PolynomialBuildCheb1(const double a,const double b,
double &y[],
CBarycentricInterpolantShell &p)
{
//--- initialization
int n=CAp::Len(y);
//--- function call
CPolInt::PolynomialBuildCheb1(a,b,y,n,p.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Lagrange intepolant on Chebyshev grid (second kind). |
//| This function has O(N) complexity. |
//| INPUT PARAMETERS: |
//| A - left boundary of [A,B] |
//| B - right boundary of [A,B] |
//| Y - function values at the nodes, array[0..N-1], |
//| Y[I] = Y(0.5*(B+A) + 0.5*(B-A)*Cos(PI*i/(n-1))) |
//| N - number of points, N>=1 |
//| for N=1 a constant model is constructed. |
//| OUTPUT PARAMETERS |
//| P - barycentric model which represents Lagrange |
//| interpolant (see ratint unit info and |
//| BarycentricCalc() description for more information). |
//+------------------------------------------------------------------+
void CAlglib::PolynomialBuildCheb2(const double a,const double b,
double &y[],const int n,
CBarycentricInterpolantShell &p)
{
CPolInt::PolynomialBuildCheb2(a,b,y,n,p.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Lagrange intepolant on Chebyshev grid (second kind). |
//| This function has O(N) complexity. |
//| INPUT PARAMETERS: |
//| A - left boundary of [A,B] |
//| B - right boundary of [A,B] |
//| Y - function values at the nodes, array[0..N-1], |
//| Y[I] = Y(0.5*(B+A) + 0.5*(B-A)*Cos(PI*i/(n-1))) |
//| N - number of points, N>=1 |
//| for N=1 a constant model is constructed. |
//| OUTPUT PARAMETERS |
//| P - barycentric model which represents Lagrange |
//| interpolant (see ratint unit info and |
//| BarycentricCalc() description for more information). |
//+------------------------------------------------------------------+
void CAlglib::PolynomialBuildCheb2(const double a,const double b,
double &y[],
CBarycentricInterpolantShell &p)
{
//--- initialization
int n=CAp::Len(y);
//--- function call
CPolInt::PolynomialBuildCheb2(a,b,y,n,p.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Fast equidistant polynomial interpolation function with O(N) |
//| complexity |
//| INPUT PARAMETERS: |
//| A - left boundary of [A,B] |
//| B - right boundary of [A,B] |
//| F - function values, array[0..N-1] |
//| N - number of points on equidistant grid, N>=1 |
//| for N=1 a constant model is constructed. |
//| T - position where P(x) is calculated |
//| RESULT |
//| value of the Lagrange interpolant at T |
//| IMPORTANT |
//| this function provides fast interface which is not |
//| overflow-safe nor it is very precise. |
//| the best option is to use PolynomialBuildEqDist() or |
//| BarycentricCalc() subroutines unless you are pretty sure that|
//| your data will not result in overflow. |
//+------------------------------------------------------------------+
double CAlglib::PolynomialCalcEqDist(const double a,const double b,
double &f[],const int n,
const double t)
{
return(CPolInt::PolynomialCalcEqDist(a,b,f,n,t));
}
//+------------------------------------------------------------------+
//| Fast equidistant polynomial interpolation function with O(N) |
//| complexity |
//| INPUT PARAMETERS: |
//| A - left boundary of [A,B] |
//| B - right boundary of [A,B] |
//| F - function values, array[0..N-1] |
//| N - number of points on equidistant grid, N>=1 |
//| for N=1 a constant model is constructed. |
//| T - position where P(x) is calculated |
//| RESULT |
//| value of the Lagrange interpolant at T |
//| IMPORTANT |
//| this function provides fast interface which is not |
//| overflow-safe nor it is very precise. |
//| the best option is to use PolynomialBuildEqDist() or |
//| BarycentricCalc() subroutines unless you are pretty sure that|
//| your data will not result in overflow. |
//+------------------------------------------------------------------+
double CAlglib::PolynomialCalcEqDist(const double a,const double b,
double &f[],const double t)
{
//--- initialization
int n=CAp::Len(f);
//--- return result
return(CPolInt::PolynomialCalcEqDist(a,b,f,n,t));
}
//+------------------------------------------------------------------+
//| Fast polynomial interpolation function on Chebyshev points (first|
//| kind) with O(N) complexity. |
//| INPUT PARAMETERS: |
//| A - left boundary of [A,B] |
//| B - right boundary of [A,B] |
//| F - function values, array[0..N-1] |
//| N - number of points on Chebyshev grid (first kind), |
//| X[i] = 0.5*(B+A) + 0.5*(B-A)*Cos(PI*(2*i+1)/(2*n)) |
//| for N=1 a constant model is constructed. |
//| T - position where P(x) is calculated |
//| RESULT |
//| value of the Lagrange interpolant at T |
//| IMPORTANT |
//| this function provides fast interface which is not |
//| overflow-safe nor it is very precise |
//| the best option is to use PolIntBuildCheb1() or |
//| BarycentricCalc() subroutines unless you are pretty sure that|
//| your data will not result in overflow. |
//+------------------------------------------------------------------+
double CAlglib::PolynomialCalcCheb1(const double a,const double b,
double &f[],const int n,
const double t)
{
return(CPolInt::PolynomialCalcCheb1(a,b,f,n,t));
}
//+------------------------------------------------------------------+
//| Fast polynomial interpolation function on Chebyshev points (first|
//| kind) with O(N) complexity. |
//| INPUT PARAMETERS: |
//| A - left boundary of [A,B] |
//| B - right boundary of [A,B] |
//| F - function values, array[0..N-1] |
//| N - number of points on Chebyshev grid (first kind), |
//| X[i] = 0.5*(B+A) + 0.5*(B-A)*Cos(PI*(2*i+1)/(2*n)) |
//| for N=1 a constant model is constructed. |
//| T - position where P(x) is calculated |
//| RESULT |
//| value of the Lagrange interpolant at T |
//| IMPORTANT |
//| this function provides fast interface which is not |
//| overflow-safe nor it is very precise |
//| the best option is to use PolIntBuildCheb1() or |
//| BarycentricCalc() subroutines unless you are pretty sure that|
//| your data will not result in overflow. |
//+------------------------------------------------------------------+
double CAlglib::PolynomialCalcCheb1(const double a,const double b,
double &f[],const double t)
{
//--- initialization
int n=CAp::Len(f);
//--- return result
return(CPolInt::PolynomialCalcCheb1(a,b,f,n,t));
}
//+------------------------------------------------------------------+
//| Fast polynomial interpolation function on Chebyshev points |
//| (second kind) with O(N) complexity. |
//| INPUT PARAMETERS: |
//| A - left boundary of [A,B] |
//| B - right boundary of [A,B] |
//| F - function values, array[0..N-1] |
//| N - number of points on Chebyshev grid (second kind), |
//| X[i] = 0.5*(B+A) + 0.5*(B-A)*Cos(PI*i/(n-1)) |
//| for N=1 a constant model is constructed. |
//| T - position where P(x) is calculated |
//| RESULT |
//| value of the Lagrange interpolant at T |
//| IMPORTANT |
//| this function provides fast interface which is not |
//| overflow-safe nor it is very precise. |
//| the best option is to use PolIntBuildCheb2() or |
//| BarycentricCalc() subroutines unless you are pretty sure that|
//| your data will not result in overflow. |
//+------------------------------------------------------------------+
double CAlglib::PolynomialCalcCheb2(const double a,const double b,
double &f[],const int n,
const double t)
{
return(CPolInt::PolynomialCalcCheb2(a,b,f,n,t));
}
//+------------------------------------------------------------------+
//| Fast polynomial interpolation function on Chebyshev points |
//| (second kind) with O(N) complexity. |
//| INPUT PARAMETERS: |
//| A - left boundary of [A,B] |
//| B - right boundary of [A,B] |
//| F - function values, array[0..N-1] |
//| N - number of points on Chebyshev grid (second kind), |
//| X[i] = 0.5*(B+A) + 0.5*(B-A)*Cos(PI*i/(n-1)) |
//| for N=1 a constant model is constructed. |
//| T - position where P(x) is calculated |
//| RESULT |
//| value of the Lagrange interpolant at T |
//| IMPORTANT |
//| this function provides fast interface which is not |
//| overflow-safe nor it is very precise. |
//| the best option is to use PolIntBuildCheb2() or |
//| BarycentricCalc() subroutines unless you are pretty sure that|
//| your data will not result in overflow. |
//+------------------------------------------------------------------+
double CAlglib::PolynomialCalcCheb2(const double a,const double b,
double &f[],const double t)
{
//--- initialization
int n=CAp::Len(f);
//--- return result
return(CPolInt::PolynomialCalcCheb2(a,b,f,n,t));
}
//+------------------------------------------------------------------+
//| This subroutine builds linear spline interpolant |
//| INPUT PARAMETERS: |
//| X - spline nodes, array[0..N-1] |
//| Y - function values, array[0..N-1] |
//| N - points count (optional): |
//| * N>=2 |
//| * if given, only first N points are used to build |
//| spline |
//| * if not given, automatically detected from X/Y |
//| sizes (len(X) must be equal to len(Y)) |
//| OUTPUT PARAMETERS: |
//| C - spline interpolant |
//| ORDER OF POINTS |
//| Subroutine automatically sorts points, so caller may pass |
//| unsorted array. |
//+------------------------------------------------------------------+
void CAlglib::Spline1DBuildLinear(double &x[],double &y[],const int n,
CSpline1DInterpolantShell &c)
{
CSpline1D::Spline1DBuildLinear(x,y,n,c.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This subroutine builds linear spline interpolant |
//| INPUT PARAMETERS: |
//| X - spline nodes, array[0..N-1] |
//| Y - function values, array[0..N-1] |
//| N - points count (optional): |
//| * N>=2 |
//| * if given, only first N points are used to build |
//| spline |
//| * if not given, automatically detected from X/Y |
//| sizes (len(X) must be equal to len(Y)) |
//| OUTPUT PARAMETERS: |
//| C - spline interpolant |
//| ORDER OF POINTS |
//| Subroutine automatically sorts points, so caller may pass |
//| unsorted array. |
//+------------------------------------------------------------------+
void CAlglib::Spline1DBuildLinear(double &x[],double &y[],
CSpline1DInterpolantShell &c)
{
//--- check
if((CAp::Len(x)!=CAp::Len(y)))
{
Print("Error while calling 'spline1dbuildlinear': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
int n=CAp::Len(x);
//--- function call
CSpline1D::Spline1DBuildLinear(x,y,n,c.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This subroutine builds cubic spline interpolant. |
//| INPUT PARAMETERS: |
//| X - spline nodes, array[0..N-1]. |
//| Y - function values, array[0..N-1]. |
//| OPTIONAL PARAMETERS: |
//| N - points count: |
//| * N>=2 |
//| * if given, only first N points are used to |
//| build spline |
//| * if not given, automatically detected from |
//| X/Y sizes (len(X) must be equal to len(Y)) |
//| BoundLType - boundary condition type for the left boundary|
//| BoundL - left boundary condition (first or second |
//| derivative, depending on the BoundLType) |
//| BoundRType - boundary condition type for the right |
//| boundary |
//| BoundR - right boundary condition (first or second |
//| derivative, depending on the BoundRType) |
//| OUTPUT PARAMETERS: |
//| C - spline interpolant |
//| ORDER OF POINTS |
//| Subroutine automatically sorts points, so caller may pass |
//| unsorted array. |
//| SETTING BOUNDARY VALUES: |
//| The BoundLType/BoundRType parameters can have the following |
//| values: |
//| * -1, which corresonds to the periodic (cyclic) boundary |
//| conditions. In this case: |
//| * both BoundLType and BoundRType must be equal to -1. |
//| * BoundL/BoundR are ignored |
//| * Y[last] is ignored (it is assumed to be equal to |
//| Y[first]). |
//| * 0, which corresponds to the parabolically terminated |
//| spline (BoundL and/or BoundR are ignored). |
//| * 1, which corresponds to the first derivative boundary |
//| condition |
//| * 2, which corresponds to the second derivative boundary |
//| condition |
//| * by default, BoundType=0 is used |
//| PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: |
//| Problems with periodic boundary conditions have |
//| Y[first_point]=Y[last_point]. However, this subroutine doesn't |
//| require you to specify equal values for the first and last |
//| points - it automatically forces them to be equal by copying |
//| Y[first_point] (corresponds to the leftmost, minimal X[]) to |
//| Y[last_point]. However it is recommended to pass consistent |
//| values of Y[], i.e. to make Y[first_point]=Y[last_point]. |
//+------------------------------------------------------------------+
void CAlglib::Spline1DBuildCubic(double &x[],double &y[],const int n,
const int boundltype,const double boundl,
const int boundrtype,const double boundr,
CSpline1DInterpolantShell &c)
{
CSpline1D::Spline1DBuildCubic(x,y,n,boundltype,boundl,boundrtype,boundr,c.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This subroutine builds cubic spline interpolant. |
//| INPUT PARAMETERS: |
//| X - spline nodes, array[0..N-1]. |
//| Y - function values, array[0..N-1]. |
//| OPTIONAL PARAMETERS: |
//| N - points count: |
//| * N>=2 |
//| * if given, only first N points are used to |
//| build spline |
//| * if not given, automatically detected from |
//| X/Y sizes (len(X) must be equal to len(Y)) |
//| BoundLType - boundary condition type for the left boundary|
//| BoundL - left boundary condition (first or second |
//| derivative, depending on the BoundLType) |
//| BoundRType - boundary condition type for the right |
//| boundary |
//| BoundR - right boundary condition (first or second |
//| derivative, depending on the BoundRType) |
//| OUTPUT PARAMETERS: |
//| C - spline interpolant |
//| ORDER OF POINTS |
//| Subroutine automatically sorts points, so caller may pass |
//| unsorted array. |
//| SETTING BOUNDARY VALUES: |
//| The BoundLType/BoundRType parameters can have the following |
//| values: |
//| * -1, which corresonds to the periodic (cyclic) boundary |
//| conditions. In this case: |
//| * both BoundLType and BoundRType must be equal to -1. |
//| * BoundL/BoundR are ignored |
//| * Y[last] is ignored (it is assumed to be equal to |
//| Y[first]). |
//| * 0, which corresponds to the parabolically terminated |
//| spline (BoundL and/or BoundR are ignored). |
//| * 1, which corresponds to the first derivative boundary |
//| condition |
//| * 2, which corresponds to the second derivative boundary |
//| condition |
//| * by default, BoundType=0 is used |
//| PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: |
//| Problems with periodic boundary conditions have |
//| Y[first_point]=Y[last_point]. However, this subroutine doesn't |
//| require you to specify equal values for the first and last |
//| points - it automatically forces them to be equal by copying |
//| Y[first_point] (corresponds to the leftmost, minimal X[]) to |
//| Y[last_point]. However it is recommended to pass consistent |
//| values of Y[], i.e. to make Y[first_point]=Y[last_point]. |
//+------------------------------------------------------------------+
void CAlglib::Spline1DBuildCubic(double &x[],double &y[],
CSpline1DInterpolantShell &c)
{
//--- check
if((CAp::Len(x)!=CAp::Len(y)))
{
Print("Error while calling 'spline1dbuildcubic': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
int n=CAp::Len(x);
int boundltype=0;
double boundl=0;
int boundrtype=0;
double boundr=0;
//--- function call
CSpline1D::Spline1DBuildCubic(x,y,n,boundltype,boundl,boundrtype,boundr,c.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This function solves following problem: given table y[] of |
//| function values at nodes x[], it calculates and returns table of |
//| function derivatives d[] (calculated at the same nodes x[]). |
//| This function yields same result as Spline1DBuildCubic() call |
//| followed by sequence of Spline1DDiff() calls, but it can be |
//| several times faster when called for ordered X[] and X2[]. |
//| INPUT PARAMETERS: |
//| X - spline nodes |
//| Y - function values |
//| OPTIONAL PARAMETERS: |
//| N - points count: |
//| * N>=2 |
//| * if given, only first N points are used |
//| * if not given, automatically detected from |
//| X/Y sizes (len(X) must be equal to len(Y)) |
//| BoundLType - boundary condition type for the left boundary|
//| BoundL - left boundary condition (first or second |
//| derivative, depending on the BoundLType) |
//| BoundRType - boundary condition type for the right |
//| boundary |
//| BoundR - right boundary condition (first or second |
//| derivative, depending on the BoundRType) |
//| OUTPUT PARAMETERS: |
//| D - derivative values at X[] |
//| ORDER OF POINTS |
//| Subroutine automatically sorts points, so caller may pass |
//| unsorted array. Derivative values are correctly reordered on |
//| return, so D[I] is always equal to S'(X[I]) independently of |
//| points order. |
//| SETTING BOUNDARY VALUES: |
//| The BoundLType/BoundRType parameters can have the following |
//| values: |
//| * -1, which corresonds to the periodic (cyclic) boundary |
//| conditions. In this case: |
//| * both BoundLType and BoundRType must be equal to -1. |
//| * BoundL/BoundR are ignored |
//| * Y[last] is ignored (it is assumed to be equal to |
//| Y[first]). |
//| * 0, which corresponds to the parabolically terminated |
//| spline (BoundL and/or BoundR are ignored). |
//| * 1, which corresponds to the first derivative boundary |
//| condition |
//| * 2, which corresponds to the second derivative boundary |
//| condition |
//| * by default, BoundType=0 is used |
//| PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: |
//| Problems with periodic boundary conditions have |
//| Y[first_point]=Y[last_point]. However, this subroutine doesn't |
//| require you to specify equal values for the first and last |
//| points - it automatically forces them to be equal by copying |
//| Y[first_point] (corresponds to the leftmost, minimal X[]) to |
//| Y[last_point]. However it is recommended to pass consistent |
//| values of Y[], i.e. to make Y[first_point]=Y[last_point]. |
//+------------------------------------------------------------------+
void CAlglib::Spline1DGridDiffCubic(double &x[],double &y[],
const int n,const int boundltype,
const double boundl,const int boundrtype,
const double boundr,double &d[])
{
CSpline1D::Spline1DGridDiffCubic(x,y,n,boundltype,boundl,boundrtype,boundr,d);
}
//+------------------------------------------------------------------+
//| This function solves following problem: given table y[] of |
//| function values at nodes x[], it calculates and returns table of |
//| function derivatives d[] (calculated at the same nodes x[]). |
//| This function yields same result as Spline1DBuildCubic() call |
//| followed by sequence of Spline1DDiff() calls, but it can be |
//| several times faster when called for ordered X[] and X2[]. |
//| INPUT PARAMETERS: |
//| X - spline nodes |
//| Y - function values |
//| OPTIONAL PARAMETERS: |
//| N - points count: |
//| * N>=2 |
//| * if given, only first N points are used |
//| * if not given, automatically detected from |
//| X/Y sizes (len(X) must be equal to len(Y)) |
//| BoundLType - boundary condition type for the left boundary|
//| BoundL - left boundary condition (first or second |
//| derivative, depending on the BoundLType) |
//| BoundRType - boundary condition type for the right |
//| boundary |
//| BoundR - right boundary condition (first or second |
//| derivative, depending on the BoundRType) |
//| OUTPUT PARAMETERS: |
//| D - derivative values at X[] |
//| ORDER OF POINTS |
//| Subroutine automatically sorts points, so caller may pass |
//| unsorted array. Derivative values are correctly reordered on |
//| return, so D[I] is always equal to S'(X[I]) independently of |
//| points order. |
//| SETTING BOUNDARY VALUES: |
//| The BoundLType/BoundRType parameters can have the following |
//| values: |
//| * -1, which corresonds to the periodic (cyclic) boundary |
//| conditions. In this case: |
//| * both BoundLType and BoundRType must be equal to -1. |
//| * BoundL/BoundR are ignored |
//| * Y[last] is ignored (it is assumed to be equal to |
//| Y[first]). |
//| * 0, which corresponds to the parabolically terminated |
//| spline (BoundL and/or BoundR are ignored). |
//| * 1, which corresponds to the first derivative boundary |
//| condition |
//| * 2, which corresponds to the second derivative boundary |
//| condition |
//| * by default, BoundType=0 is used |
//| PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: |
//| Problems with periodic boundary conditions have |
//| Y[first_point]=Y[last_point]. However, this subroutine doesn't |
//| require you to specify equal values for the first and last |
//| points - it automatically forces them to be equal by copying |
//| Y[first_point] (corresponds to the leftmost, minimal X[]) to |
//| Y[last_point]. However it is recommended to pass consistent |
//| values of Y[], i.e. to make Y[first_point]=Y[last_point]. |
//+------------------------------------------------------------------+
void CAlglib::Spline1DGridDiffCubic(double &x[],double &y[],
double &d[])
{
//--- check
if((CAp::Len(x)!=CAp::Len(y)))
{
Print("Error while calling 'spline1dgriddiffcubic': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
int n=CAp::Len(x);
int boundltype=0;
double boundl=0;
int boundrtype=0;
double boundr=0;
//--- function call
CSpline1D::Spline1DGridDiffCubic(x,y,n,boundltype,boundl,boundrtype,boundr,d);
}
//+------------------------------------------------------------------+
//| This function solves following problem: given table y[] of |
//| function values at nodes x[], it calculates and returns tables of|
//| first and second function derivatives d1[] and d2[] (calculated |
//| at the same nodes x[]). |
//| This function yields same result as Spline1DBuildCubic() call |
//| followed by sequence of Spline1DDiff() calls, but it can be |
//| several times faster when called for ordered X[] and X2[]. |
//| INPUT PARAMETERS: |
//| X - spline nodes |
//| Y - function values |
//| OPTIONAL PARAMETERS: |
//| N - points count: |
//| * N>=2 |
//| * if given, only first N points are used |
//| * if not given, automatically detected from |
//| X/Y sizes (len(X) must be equal to len(Y)) |
//| BoundLType - boundary condition type for the left boundary|
//| BoundL - left boundary condition (first or second |
//| derivative, depending on the BoundLType) |
//| BoundRType - boundary condition type for the right |
//| boundary |
//| BoundR - right boundary condition (first or second |
//| derivative, depending on the BoundRType) |
//| OUTPUT PARAMETERS: |
//| D1 - S' values at X[] |
//| D2 - S'' values at X[] |
//| ORDER OF POINTS |
//| Subroutine automatically sorts points, so caller may pass |
//| unsorted array. Derivative values are correctly reordered on |
//| return, so D[I] is always equal to S'(X[I]) independently of |
//| points order. |
//| SETTING BOUNDARY VALUES: |
//| The BoundLType/BoundRType parameters can have the following |
//| values: |
//| * -1, which corresonds to the periodic (cyclic) boundary |
//| conditions. In this case: |
//| * both BoundLType and BoundRType must be equal to -1. |
//| * BoundL/BoundR are ignored |
//| * Y[last] is ignored (it is assumed to be equal to |
//| Y[first]). |
//| * 0, which corresponds to the parabolically terminated |
//| spline (BoundL and/or BoundR are ignored). |
//| * 1, which corresponds to the first derivative boundary |
//| condition |
//| * 2, which corresponds to the second derivative boundary |
//| condition |
//| * by default, BoundType=0 is used |
//| PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: |
//| Problems with periodic boundary conditions have |
//| Y[first_point]=Y[last_point]. |
//| However, this subroutine doesn't require you to specify equal |
//| values for the first and last points - it automatically forces |
//| them to be equal by copying Y[first_point] (corresponds to the |
//| leftmost, minimal X[]) to Y[last_point]. However it is |
//| recommended to pass consistent values of Y[], i.e. to make |
//| Y[first_point]=Y[last_point]. |
//+------------------------------------------------------------------+
void CAlglib::Spline1DGridDiff2Cubic(double &x[],double &y[],
const int n,const int boundltype,
const double boundl,const int boundrtype,
const double boundr,double &d1[],
double &d2[])
{
CSpline1D::Spline1DGridDiff2Cubic(x,y,n,boundltype,boundl,boundrtype,boundr,d1,d2);
}
//+------------------------------------------------------------------+
//| This function solves following problem: given table y[] of |
//| function values at nodes x[], it calculates and returns tables of|
//| first and second function derivatives d1[] and d2[] (calculated |
//| at the same nodes x[]). |
//| This function yields same result as Spline1DBuildCubic() call |
//| followed by sequence of Spline1DDiff() calls, but it can be |
//| several times faster when called for ordered X[] and X2[]. |
//| INPUT PARAMETERS: |
//| X - spline nodes |
//| Y - function values |
//| OPTIONAL PARAMETERS: |
//| N - points count: |
//| * N>=2 |
//| * if given, only first N points are used |
//| * if not given, automatically detected from |
//| X/Y sizes (len(X) must be equal to len(Y)) |
//| BoundLType - boundary condition type for the left boundary|
//| BoundL - left boundary condition (first or second |
//| derivative, depending on the BoundLType) |
//| BoundRType - boundary condition type for the right |
//| boundary |
//| BoundR - right boundary condition (first or second |
//| derivative, depending on the BoundRType) |
//| OUTPUT PARAMETERS: |
//| D1 - S' values at X[] |
//| D2 - S'' values at X[] |
//| ORDER OF POINTS |
//| Subroutine automatically sorts points, so caller may pass |
//| unsorted array. Derivative values are correctly reordered on |
//| return, so D[I] is always equal to S'(X[I]) independently of |
//| points order. |
//| SETTING BOUNDARY VALUES: |
//| The BoundLType/BoundRType parameters can have the following |
//| values: |
//| * -1, which corresonds to the periodic (cyclic) boundary |
//| conditions. In this case: |
//| * both BoundLType and BoundRType must be equal to -1. |
//| * BoundL/BoundR are ignored |
//| * Y[last] is ignored (it is assumed to be equal to |
//| Y[first]). |
//| * 0, which corresponds to the parabolically terminated |
//| spline (BoundL and/or BoundR are ignored). |
//| * 1, which corresponds to the first derivative boundary |
//| condition |
//| * 2, which corresponds to the second derivative boundary |
//| condition |
//| * by default, BoundType=0 is used |
//| PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: |
//| Problems with periodic boundary conditions have |
//| Y[first_point]=Y[last_point]. |
//| However, this subroutine doesn't require you to specify equal |
//| values for the first and last points - it automatically forces |
//| them to be equal by copying Y[first_point] (corresponds to the |
//| leftmost, minimal X[]) to Y[last_point]. However it is |
//| recommended to pass consistent values of Y[], i.e. to make |
//| Y[first_point]=Y[last_point]. |
//+------------------------------------------------------------------+
void CAlglib::Spline1DGridDiff2Cubic(double &x[],double &y[],
double &d1[],double &d2[])
{
//--- check
if((CAp::Len(x)!=CAp::Len(y)))
{
Print("Error while calling 'spline1dgriddiff2cubic': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
int n=CAp::Len(x);
int boundltype=0;
double boundl=0;
int boundrtype=0;
double boundr=0;
//--- function call
CSpline1D::Spline1DGridDiff2Cubic(x,y,n,boundltype,boundl,boundrtype,boundr,d1,d2);
}
//+------------------------------------------------------------------+
//| This function solves following problem: given table y[] of |
//| function values at old nodes x[] and new nodes x2[], it |
//| calculates and returns table of function values y2[] (calculated |
//| at x2[]). |
//| This function yields same result as Spline1DBuildCubic() call |
//| followed by sequence of Spline1DDiff() calls, but it can be |
//| several times faster when called for ordered X[] and X2[]. |
//| INPUT PARAMETERS: |
//| X - old spline nodes |
//| Y - function values |
//| X2 - new spline nodes |
//| OPTIONAL PARAMETERS: |
//| N - points count: |
//| * N>=2 |
//| * if given, only first N points from X/Y are |
//| used |
//| * if not given, automatically detected from |
//| X/Y sizes (len(X) must be equal to len(Y)) |
//| BoundLType - boundary condition type for the left boundary|
//| BoundL - left boundary condition (first or second |
//| derivative, depending on the BoundLType) |
//| BoundRType - boundary condition type for the right |
//| boundary |
//| BoundR - right boundary condition (first or second |
//| derivative, depending on the BoundRType) |
//| N2 - new points count: |
//| * N2>=2 |
//| * if given, only first N2 points from X2 are |
//| used |
//| * if not given, automatically detected from |
//| X2 size |
//| OUTPUT PARAMETERS: |
//| F2 - function values at X2[] |
//| ORDER OF POINTS |
//| Subroutine automatically sorts points, so caller may pass |
//| unsorted array. Function values are correctly reordered on |
//| return, so F2[I] is always equal to S(X2[I]) independently of |
//| points order. |
//| SETTING BOUNDARY VALUES: |
//| The BoundLType/BoundRType parameters can have the following |
//| values: |
//| * -1, which corresonds to the periodic (cyclic) boundary |
//| conditions. In this case: |
//| * both BoundLType and BoundRType must be equal to -1. |
//| * BoundL/BoundR are ignored |
//| * Y[last] is ignored (it is assumed to be equal to |
//| Y[first]). |
//| * 0, which corresponds to the parabolically terminated |
//| spline (BoundL and/or BoundR are ignored). |
//| * 1, which corresponds to the first derivative boundary |
//| condition |
//| * 2, which corresponds to the second derivative boundary |
//| condition |
//| * by default, BoundType=0 is used |
//| PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: |
//| Problems with periodic boundary conditions have |
//| Y[first_point]=Y[last_point]. However, this subroutine doesn't |
//| require you to specify equal values for the first and last |
//| points - it automatically forces them to be equal by copying |
//| Y[first_point] (corresponds to the leftmost, minimal X[]) to |
//| Y[last_point]. However it is recommended to pass consistent |
//| values of Y[], i.e. to make Y[first_point]=Y[last_point]. |
//+------------------------------------------------------------------+
void CAlglib::Spline1DConvCubic(double &x[],double &y[],const int n,
const int boundltype,const double boundl,
const int boundrtype,const double boundr,
double &x2[],int n2,double &y2[])
{
CSpline1D::Spline1DConvCubic(x,y,n,boundltype,boundl,boundrtype,boundr,x2,n2,y2);
}
//+------------------------------------------------------------------+
//| This function solves following problem: given table y[] of |
//| function values at old nodes x[] and new nodes x2[], it |
//| calculates and returns table of function values y2[] (calculated |
//| at x2[]). |
//| This function yields same result as Spline1DBuildCubic() call |
//| followed by sequence of Spline1DDiff() calls, but it can be |
//| several times faster when called for ordered X[] and X2[]. |
//| INPUT PARAMETERS: |
//| X - old spline nodes |
//| Y - function values |
//| X2 - new spline nodes |
//| OPTIONAL PARAMETERS: |
//| N - points count: |
//| * N>=2 |
//| * if given, only first N points from X/Y are |
//| used |
//| * if not given, automatically detected from |
//| X/Y sizes (len(X) must be equal to len(Y)) |
//| BoundLType - boundary condition type for the left boundary|
//| BoundL - left boundary condition (first or second |
//| derivative, depending on the BoundLType) |
//| BoundRType - boundary condition type for the right |
//| boundary |
//| BoundR - right boundary condition (first or second |
//| derivative, depending on the BoundRType) |
//| N2 - new points count: |
//| * N2>=2 |
//| * if given, only first N2 points from X2 are |
//| used |
//| * if not given, automatically detected from |
//| X2 size |
//| OUTPUT PARAMETERS: |
//| F2 - function values at X2[] |
//| ORDER OF POINTS |
//| Subroutine automatically sorts points, so caller may pass |
//| unsorted array. Function values are correctly reordered on |
//| return, so F2[I] is always equal to S(X2[I]) independently of |
//| points order. |
//| SETTING BOUNDARY VALUES: |
//| The BoundLType/BoundRType parameters can have the following |
//| values: |
//| * -1, which corresonds to the periodic (cyclic) boundary |
//| conditions. In this case: |
//| * both BoundLType and BoundRType must be equal to -1. |
//| * BoundL/BoundR are ignored |
//| * Y[last] is ignored (it is assumed to be equal to |
//| Y[first]). |
//| * 0, which corresponds to the parabolically terminated |
//| spline (BoundL and/or BoundR are ignored). |
//| * 1, which corresponds to the first derivative boundary |
//| condition |
//| * 2, which corresponds to the second derivative boundary |
//| condition |
//| * by default, BoundType=0 is used |
//| PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: |
//| Problems with periodic boundary conditions have |
//| Y[first_point]=Y[last_point]. However, this subroutine doesn't |
//| require you to specify equal values for the first and last |
//| points - it automatically forces them to be equal by copying |
//| Y[first_point] (corresponds to the leftmost, minimal X[]) to |
//| Y[last_point]. However it is recommended to pass consistent |
//| values of Y[], i.e. to make Y[first_point]=Y[last_point]. |
//+------------------------------------------------------------------+
void CAlglib::Spline1DConvCubic(double &x[],double &y[],
double &x2[],double &y2[])
{
//--- check
if((CAp::Len(x)!=CAp::Len(y)))
{
Print("Error while calling 'spline1dconvcubic': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
int n=CAp::Len(x);
int boundltype=0;
double boundl=0;
int boundrtype=0;
double boundr=0;
int n2=CAp::Len(x2);
//--- function call
CSpline1D::Spline1DConvCubic(x,y,n,boundltype,boundl,boundrtype,boundr,x2,n2,y2);
}
//+------------------------------------------------------------------+
//| This function solves following problem: given table y[] of |
//| function values at old nodes x[] and new nodes x2[], it |
//| calculates and returns table of function values y2[] and |
//| derivatives d2[] (calculated at x2[]). |
//| This function yields same result as Spline1DBuildCubic() call |
//| followed by sequence of Spline1DDiff() calls, but it can be |
//| several times faster when called for ordered X[] and X2[]. |
//| INPUT PARAMETERS: |
//| X - old spline nodes |
//| Y - function values |
//| X2 - new spline nodes |
//| OPTIONAL PARAMETERS: |
//| N - points count: |
//| * N>=2 |
//| * if given, only first N points from X/Y are |
//| used |
//| * if not given, automatically detected from |
//| X/Y sizes (len(X) must be equal to len(Y)) |
//| BoundLType - boundary condition type for the left boundary|
//| BoundL - left boundary condition (first or second |
//| derivative, depending on the BoundLType) |
//| BoundRType - boundary condition type for the right |
//| boundary |
//| BoundR - right boundary condition (first or second |
//| derivative, depending on the BoundRType) |
//| N2 - new points count: |
//| * N2>=2 |
//| * if given, only first N2 points from X2 are |
//| used |
//| * if not given, automatically detected from |
//| X2 size |
//| OUTPUT PARAMETERS: |
//| F2 - function values at X2[] |
//| D2 - first derivatives at X2[] |
//| ORDER OF POINTS |
//| Subroutine automatically sorts points, so caller may pass |
//| unsorted array. Function values are correctly reordered on |
//| return, so F2[I] is always equal to S(X2[I]) independently of |
//| points order. |
//| SETTING BOUNDARY VALUES: |
//| The BoundLType/BoundRType parameters can have the following |
//| values: |
//| * -1, which corresonds to the periodic (cyclic) boundary |
//| conditions. In this case: |
//| * both BoundLType and BoundRType must be equal to -1. |
//| * BoundL/BoundR are ignored |
//| * Y[last] is ignored (it is assumed to be equal to |
//| Y[first]). |
//| * 0, which corresponds to the parabolically terminated |
//| spline (BoundL and/or BoundR are ignored). |
//| * 1, which corresponds to the first derivative boundary |
//| condition |
//| * 2, which corresponds to the second derivative boundary |
//| condition |
//| * by default, BoundType=0 is used |
//| PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: |
//| Problems with periodic boundary conditions have |
//| Y[first_point]=Y[last_point]. However, this subroutine doesn't |
//| require you to specify equal values for the first and last |
//| points - it automatically forces them to be equal by copying |
//| Y[first_point] (corresponds to the leftmost, minimal X[]) to |
//| Y[last_point]. However it is recommended to pass consistent |
//| values of Y[], i.e. to make Y[first_point]=Y[last_point]. |
//+------------------------------------------------------------------+
void CAlglib::Spline1DConvDiffCubic(double &x[],double &y[],
const int n,const int boundltype,
const double boundl,const int boundrtype,
const double boundr,double &x2[],
int n2,double &y2[],double &d2[])
{
CSpline1D::Spline1DConvDiffCubic(x,y,n,boundltype,boundl,boundrtype,boundr,x2,n2,y2,d2);
}
//+------------------------------------------------------------------+
//| This function solves following problem: given table y[] of |
//| function values at old nodes x[] and new nodes x2[], it |
//| calculates and returns table of function values y2[] and |
//| derivatives d2[] (calculated at x2[]). |
//| This function yields same result as Spline1DBuildCubic() call |
//| followed by sequence of Spline1DDiff() calls, but it can be |
//| several times faster when called for ordered X[] and X2[]. |
//| INPUT PARAMETERS: |
//| X - old spline nodes |
//| Y - function values |
//| X2 - new spline nodes |
//| OPTIONAL PARAMETERS: |
//| N - points count: |
//| * N>=2 |
//| * if given, only first N points from X/Y are |
//| used |
//| * if not given, automatically detected from |
//| X/Y sizes (len(X) must be equal to len(Y)) |
//| BoundLType - boundary condition type for the left boundary|
//| BoundL - left boundary condition (first or second |
//| derivative, depending on the BoundLType) |
//| BoundRType - boundary condition type for the right |
//| boundary |
//| BoundR - right boundary condition (first or second |
//| derivative, depending on the BoundRType) |
//| N2 - new points count: |
//| * N2>=2 |
//| * if given, only first N2 points from X2 are |
//| used |
//| * if not given, automatically detected from |
//| X2 size |
//| OUTPUT PARAMETERS: |
//| F2 - function values at X2[] |
//| D2 - first derivatives at X2[] |
//| ORDER OF POINTS |
//| Subroutine automatically sorts points, so caller may pass |
//| unsorted array. Function values are correctly reordered on |
//| return, so F2[I] is always equal to S(X2[I]) independently of |
//| points order. |
//| SETTING BOUNDARY VALUES: |
//| The BoundLType/BoundRType parameters can have the following |
//| values: |
//| * -1, which corresonds to the periodic (cyclic) boundary |
//| conditions. In this case: |
//| * both BoundLType and BoundRType must be equal to -1. |
//| * BoundL/BoundR are ignored |
//| * Y[last] is ignored (it is assumed to be equal to |
//| Y[first]). |
//| * 0, which corresponds to the parabolically terminated |
//| spline (BoundL and/or BoundR are ignored). |
//| * 1, which corresponds to the first derivative boundary |
//| condition |
//| * 2, which corresponds to the second derivative boundary |
//| condition |
//| * by default, BoundType=0 is used |
//| PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: |
//| Problems with periodic boundary conditions have |
//| Y[first_point]=Y[last_point]. However, this subroutine doesn't |
//| require you to specify equal values for the first and last |
//| points - it automatically forces them to be equal by copying |
//| Y[first_point] (corresponds to the leftmost, minimal X[]) to |
//| Y[last_point]. However it is recommended to pass consistent |
//| values of Y[], i.e. to make Y[first_point]=Y[last_point]. |
//+------------------------------------------------------------------+
void CAlglib::Spline1DConvDiffCubic(double &x[],double &y[],
double &x2[],double &y2[],
double &d2[])
{
//--- check
if((CAp::Len(x)!=CAp::Len(y)))
{
Print("Error while calling 'spline1dconvdiffcubic': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
int n=CAp::Len(x);
int boundltype=0;
double boundl=0;
int boundrtype=0;
double boundr=0;
int n2=CAp::Len(x2);
//--- function call
CSpline1D::Spline1DConvDiffCubic(x,y,n,boundltype,boundl,boundrtype,boundr,x2,n2,y2,d2);
}
//+------------------------------------------------------------------+
//| This function solves following problem: given table y[] of |
//| function values at old nodes x[] and new nodes x2[], it |
//| calculates and returns table of function values y2[], first and |
//| second derivatives d2[] and dd2[] (calculated at x2[]). |
//| This function yields same result as Spline1DBuildCubic() call |
//| followed by sequence of Spline1DDiff() calls, but it can be |
//| several times faster when called for ordered X[] and X2[]. |
//| INPUT PARAMETERS: |
//| X - old spline nodes |
//| Y - function values |
//| X2 - new spline nodes |
//| OPTIONAL PARAMETERS: |
//| N - points count: |
//| * N>=2 |
//| * if given, only first N points from X/Y are |
//| used |
//| * if not given, automatically detected from |
//| X/Y sizes (len(X) must be equal to len(Y)) |
//| BoundLType - boundary condition type for the left boundary|
//| BoundL - left boundary condition (first or second |
//| derivative, depending on the BoundLType) |
//| BoundRType - boundary condition type for the right |
//| boundary |
//| BoundR - right boundary condition (first or second |
//| derivative, depending on the BoundRType) |
//| N2 - new points count: |
//| * N2>=2 |
//| * if given, only first N2 points from X2 are |
//| used |
//| * if not given, automatically detected from |
//| X2 size |
//| OUTPUT PARAMETERS: |
//| F2 - function values at X2[] |
//| D2 - first derivatives at X2[] |
//| DD2 - second derivatives at X2[] |
//| ORDER OF POINTS |
//| Subroutine automatically sorts points, so caller may pass |
//| unsorted array. Function values are correctly reordered on |
//| return, so F2[I] is always equal to S(X2[I]) independently of |
//| points order. |
//| SETTING BOUNDARY VALUES: |
//| The BoundLType/BoundRType parameters can have the following |
//| values: |
//| * -1, which corresonds to the periodic (cyclic) boundary |
//| conditions. In this case: |
//| * both BoundLType and BoundRType must be equal to -1. |
//| * BoundL/BoundR are ignored |
//| * Y[last] is ignored (it is assumed to be equal to |
//| Y[first]). |
//| * 0, which corresponds to the parabolically terminated |
//| spline (BoundL and/or BoundR are ignored). |
//| * 1, which corresponds to the first derivative boundary |
//| condition |
//| * 2, which corresponds to the second derivative boundary |
//| condition |
//| * by default, BoundType=0 is used |
//| PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: |
//| Problems with periodic boundary conditions have |
//| Y[first_point]=Y[last_point]. However, this subroutine doesn't |
//| require you to specify equal values for the first and last |
//| points - it automatically forces them to be equal by copying |
//| Y[first_point] (corresponds to the leftmost, minimal X[]) to |
//| Y[last_point]. However it is recommended to pass consistent |
//| values of Y[], i.e. to make Y[first_point]=Y[last_point]. |
//+------------------------------------------------------------------+
void CAlglib::Spline1DConvDiff2Cubic(double &x[],double &y[],
const int n,const int boundltype,
const double boundl,const int boundrtype,
const double boundr,double &x2[],
const int n2,double &y2[],
double &d2[],double &dd2[])
{
CSpline1D::Spline1DConvDiff2Cubic(x,y,n,boundltype,boundl,boundrtype,boundr,x2,n2,y2,d2,dd2);
}
//+------------------------------------------------------------------+
//| This function solves following problem: given table y[] of |
//| function values at old nodes x[] and new nodes x2[], it |
//| calculates and returns table of function values y2[], first and |
//| second derivatives d2[] and dd2[] (calculated at x2[]). |
//| This function yields same result as Spline1DBuildCubic() call |
//| followed by sequence of Spline1DDiff() calls, but it can be |
//| several times faster when called for ordered X[] and X2[]. |
//| INPUT PARAMETERS: |
//| X - old spline nodes |
//| Y - function values |
//| X2 - new spline nodes |
//| OPTIONAL PARAMETERS: |
//| N - points count: |
//| * N>=2 |
//| * if given, only first N points from X/Y are |
//| used |
//| * if not given, automatically detected from |
//| X/Y sizes (len(X) must be equal to len(Y)) |
//| BoundLType - boundary condition type for the left boundary|
//| BoundL - left boundary condition (first or second |
//| derivative, depending on the BoundLType) |
//| BoundRType - boundary condition type for the right |
//| boundary |
//| BoundR - right boundary condition (first or second |
//| derivative, depending on the BoundRType) |
//| N2 - new points count: |
//| * N2>=2 |
//| * if given, only first N2 points from X2 are |
//| used |
//| * if not given, automatically detected from |
//| X2 size |
//| OUTPUT PARAMETERS: |
//| F2 - function values at X2[] |
//| D2 - first derivatives at X2[] |
//| DD2 - second derivatives at X2[] |
//| ORDER OF POINTS |
//| Subroutine automatically sorts points, so caller may pass |
//| unsorted array. Function values are correctly reordered on |
//| return, so F2[I] is always equal to S(X2[I]) independently of |
//| points order. |
//| SETTING BOUNDARY VALUES: |
//| The BoundLType/BoundRType parameters can have the following |
//| values: |
//| * -1, which corresonds to the periodic (cyclic) boundary |
//| conditions. In this case: |
//| * both BoundLType and BoundRType must be equal to -1. |
//| * BoundL/BoundR are ignored |
//| * Y[last] is ignored (it is assumed to be equal to |
//| Y[first]). |
//| * 0, which corresponds to the parabolically terminated |
//| spline (BoundL and/or BoundR are ignored). |
//| * 1, which corresponds to the first derivative boundary |
//| condition |
//| * 2, which corresponds to the second derivative boundary |
//| condition |
//| * by default, BoundType=0 is used |
//| PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: |
//| Problems with periodic boundary conditions have |
//| Y[first_point]=Y[last_point]. However, this subroutine doesn't |
//| require you to specify equal values for the first and last |
//| points - it automatically forces them to be equal by copying |
//| Y[first_point] (corresponds to the leftmost, minimal X[]) to |
//| Y[last_point]. However it is recommended to pass consistent |
//| values of Y[], i.e. to make Y[first_point]=Y[last_point]. |
//+------------------------------------------------------------------+
void CAlglib::Spline1DConvDiff2Cubic(double &x[],double &y[],
double &x2[],double &y2[],
double &d2[],double &dd2[])
{
//--- check
if((CAp::Len(x)!=CAp::Len(y)))
{
Print("Error while calling 'spline1dconvdiff2cubic': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
int n=CAp::Len(x);
int boundltype=0;
double boundl=0;
int boundrtype=0;
double boundr=0;
int n2=CAp::Len(x2);
//--- function call
CSpline1D::Spline1DConvDiff2Cubic(x,y,n,boundltype,boundl,boundrtype,boundr,x2,n2,y2,d2,dd2);
}
//+------------------------------------------------------------------+
//| This subroutine builds Catmull-Rom spline interpolant. |
//| INPUT PARAMETERS: |
//| X - spline nodes, array[0..N-1]. |
//| Y - function values, array[0..N-1]. |
//| OPTIONAL PARAMETERS: |
//| N - points count: |
//| * N>=2 |
//| * if given, only first N points are used to |
//| build spline |
//| * if not given, automatically detected from |
//| X/Y sizes (len(X) must be equal to len(Y)) |
//| BoundType - boundary condition type: |
//| * -1 for periodic boundary condition |
//| * 0 for parabolically terminated spline |
//| (default) |
//| Tension - tension parameter: |
//| * tension=0 corresponds to classic |
//| Catmull-Rom spline (default) |
//| * 0<tension<1 corresponds to more general |
//| form - cardinal spline |
//| OUTPUT PARAMETERS: |
//| C - spline interpolant |
//| ORDER OF POINTS |
//| Subroutine automatically sorts points, so caller may pass |
//| unsorted array. |
//| PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: |
//| Problems with periodic boundary conditions have |
//| Y[first_point]=Y[last_point]. However, this subroutine doesn't |
//| require you to specify equal values for the first and last |
//| points - it automatically forces them to be equal by copying |
//| Y[first_point] (corresponds to the leftmost, minimal X[]) to |
//| Y[last_point]. However it is recommended to pass consistent |
//| values of Y[], i.e. to make Y[first_point]=Y[last_point]. |
//+------------------------------------------------------------------+
void CAlglib::Spline1DBuildCatmullRom(double &x[],double &y[],
const int n,const int boundtype,
const double tension,
CSpline1DInterpolantShell &c)
{
CSpline1D::Spline1DBuildCatmullRom(x,y,n,boundtype,tension,c.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This subroutine builds Catmull-Rom spline interpolant. |
//| INPUT PARAMETERS: |
//| X - spline nodes, array[0..N-1]. |
//| Y - function values, array[0..N-1]. |
//| OPTIONAL PARAMETERS: |
//| N - points count: |
//| * N>=2 |
//| * if given, only first N points are used to |
//| build spline |
//| * if not given, automatically detected from |
//| X/Y sizes (len(X) must be equal to len(Y)) |
//| BoundType - boundary condition type: |
//| * -1 for periodic boundary condition |
//| * 0 for parabolically terminated spline |
//| (default) |
//| Tension - tension parameter: |
//| * tension=0 corresponds to classic |
//| Catmull-Rom spline (default) |
//| * 0<tension<1 corresponds to more general |
//| form - cardinal spline |
//| OUTPUT PARAMETERS: |
//| C - spline interpolant |
//| ORDER OF POINTS |
//| Subroutine automatically sorts points, so caller may pass |
//| unsorted array. |
//| PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: |
//| Problems with periodic boundary conditions have |
//| Y[first_point]=Y[last_point]. However, this subroutine doesn't |
//| require you to specify equal values for the first and last |
//| points - it automatically forces them to be equal by copying |
//| Y[first_point] (corresponds to the leftmost, minimal X[]) to |
//| Y[last_point]. However it is recommended to pass consistent |
//| values of Y[], i.e. to make Y[first_point]=Y[last_point]. |
//+------------------------------------------------------------------+
void CAlglib::Spline1DBuildCatmullRom(double &x[],double &y[],
CSpline1DInterpolantShell &c)
{
//--- check
if((CAp::Len(x)!=CAp::Len(y)))
{
Print("Error while calling 'spline1dbuildcatmullrom': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
int n=CAp::Len(x);
int boundtype=0;
double tension=0;
//--- function call
CSpline1D::Spline1DBuildCatmullRom(x,y,n,boundtype,tension,c.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This subroutine builds Hermite spline interpolant. |
//| INPUT PARAMETERS: |
//| X - spline nodes, array[0..N-1] |
//| Y - function values, array[0..N-1] |
//| D - derivatives, array[0..N-1] |
//| N - points count (optional): |
//| * N>=2 |
//| * if given, only first N points are used to |
//| build spline |
//| * if not given, automatically detected from |
//| X/Y sizes (len(X) must be equal to len(Y)) |
//| OUTPUT PARAMETERS: |
//| C - spline interpolant. |
//| ORDER OF POINTS |
//| Subroutine automatically sorts points, so caller may pass |
//| unsorted array. |
//+------------------------------------------------------------------+
void CAlglib::Spline1DBuildHermite(double &x[],double &y[],double &d[],
const int n,CSpline1DInterpolantShell &c)
{
CSpline1D::Spline1DBuildHermite(x,y,d,n,c.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This subroutine builds Hermite spline interpolant. |
//| INPUT PARAMETERS: |
//| X - spline nodes, array[0..N-1] |
//| Y - function values, array[0..N-1] |
//| D - derivatives, array[0..N-1] |
//| N - points count (optional): |
//| * N>=2 |
//| * if given, only first N points are used to |
//| build spline |
//| * if not given, automatically detected from |
//| X/Y sizes (len(X) must be equal to len(Y)) |
//| OUTPUT PARAMETERS: |
//| C - spline interpolant. |
//| ORDER OF POINTS |
//| Subroutine automatically sorts points, so caller may pass |
//| unsorted array. |
//+------------------------------------------------------------------+
void CAlglib::Spline1DBuildHermite(double &x[],double &y[],double &d[],
CSpline1DInterpolantShell &c)
{
//--- check
if((CAp::Len(x)!=CAp::Len(y)) || (CAp::Len(x)!=CAp::Len(d)))
{
Print("Error while calling 'spline1dbuildhermite': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
int n=CAp::Len(x);
//--- function call
CSpline1D::Spline1DBuildHermite(x,y,d,n,c.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This subroutine builds Akima spline interpolant |
//| INPUT PARAMETERS: |
//| X - spline nodes, array[0..N-1] |
//| Y - function values, array[0..N-1] |
//| N - points count (optional): |
//| * N>=5 |
//| * if given, only first N points are used to |
//| build spline |
//| * if not given, automatically detected from |
//| X/Y sizes (len(X) must be equal to len(Y)) |
//| OUTPUT PARAMETERS: |
//| C - spline interpolant |
//| ORDER OF POINTS |
//| Subroutine automatically sorts points, so caller may pass |
//| unsorted array. |
//+------------------------------------------------------------------+
void CAlglib::Spline1DBuildAkima(double &x[],double &y[],const int n,
CSpline1DInterpolantShell &c)
{
CSpline1D::Spline1DBuildAkima(x,y,n,c.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This subroutine builds Akima spline interpolant |
//| INPUT PARAMETERS: |
//| X - spline nodes, array[0..N-1] |
//| Y - function values, array[0..N-1] |
//| N - points count (optional): |
//| * N>=5 |
//| * if given, only first N points are used to |
//| build spline |
//| * if not given, automatically detected from |
//| X/Y sizes (len(X) must be equal to len(Y)) |
//| OUTPUT PARAMETERS: |
//| C - spline interpolant |
//| ORDER OF POINTS |
//| Subroutine automatically sorts points, so caller may pass |
//| unsorted array. |
//+------------------------------------------------------------------+
void CAlglib::Spline1DBuildAkima(double &x[],double &y[],
CSpline1DInterpolantShell &c)
{
//--- check
if((CAp::Len(x)!=CAp::Len(y)))
{
Print("Error while calling 'spline1dbuildakima': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
int n=CAp::Len(x);
//--- function call
CSpline1D::Spline1DBuildAkima(x,y,n,c.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This subroutine calculates the value of the spline at the given |
//| point X. |
//| INPUT PARAMETERS: |
//| C - spline interpolant |
//| X - point |
//| Result: |
//| S(x) |
//+------------------------------------------------------------------+
double CAlglib::Spline1DCalc(CSpline1DInterpolantShell &c,const double x)
{
return(CSpline1D::Spline1DCalc(c.GetInnerObj(),x));
}
//+------------------------------------------------------------------+
//| This subroutine differentiates the spline. |
//| INPUT PARAMETERS: |
//| C - spline interpolant. |
//| X - point |
//| Result: |
//| S - S(x) |
//| DS - S'(x) |
//| D2S - S''(x) |
//+------------------------------------------------------------------+
void CAlglib::Spline1DDiff(CSpline1DInterpolantShell &c,const double x,
double &s,double &ds,double &d2s)
{
//--- initialization
s=0;
ds=0;
d2s=0;
//--- function call
CSpline1D::Spline1DDiff(c.GetInnerObj(),x,s,ds,d2s);
}
//+------------------------------------------------------------------+
//| This subroutine unpacks the spline into the coefficients table. |
//| INPUT PARAMETERS: |
//| C - spline interpolant. |
//| X - point |
//| Result: |
//| Tbl - coefficients table, unpacked format, array[0..N-2, |
//| 0..5]. |
//| For I = 0...N-2: |
//| Tbl[I,0] = X[i] |
//| Tbl[I,1] = X[i+1] |
//| Tbl[I,2] = C0 |
//| Tbl[I,3] = C1 |
//| Tbl[I,4] = C2 |
//| Tbl[I,5] = C3 |
//| On [x[i], x[i+1]] spline is equals to: |
//| S(x) = C0 + C1*t + C2*t^2 + C3*t^3 |
//| t = x-x[i] |
//+------------------------------------------------------------------+
void CAlglib::Spline1DUnpack(CSpline1DInterpolantShell &c,int &n,
CMatrixDouble &tbl)
{
//--- initialization
n=0;
//--- function call
CSpline1D::Spline1DUnpack(c.GetInnerObj(),n,tbl);
}
//+------------------------------------------------------------------+
//| This subroutine performs linear transformation of the spline |
//| argument. |
//| INPUT PARAMETERS: |
//| C - spline interpolant. |
//| A, B- transformation coefficients: x = A*t + B |
//| Result: |
//| C - transformed spline |
//+------------------------------------------------------------------+
void CAlglib::Spline1DLinTransX(CSpline1DInterpolantShell &c,
const double a,const double b)
{
CSpline1D::Spline1DLinTransX(c.GetInnerObj(),a,b);
}
//+------------------------------------------------------------------+
//| This subroutine performs linear transformation of the spline. |
//| INPUT PARAMETERS: |
//| C - spline interpolant. |
//| A,B- transformation coefficients: S2(x)=A*S(x) + B |
//| Result: |
//| C - transformed spline |
//+------------------------------------------------------------------+
void CAlglib::Spline1DLinTransY(CSpline1DInterpolantShell &c,
const double a,const double b)
{
CSpline1D::Spline1DLinTransY(c.GetInnerObj(),a,b);
}
//+------------------------------------------------------------------+
//| This subroutine integrates the spline. |
//| INPUT PARAMETERS: |
//| C - spline interpolant. |
//| X - right bound of the integration interval [a, x], |
//| here 'a' denotes min(x[]) |
//| Result: |
//| integral(S(t)dt,a,x) |
//+------------------------------------------------------------------+
double CAlglib::Spline1DIntegrate(CSpline1DInterpolantShell &c,
const double x)
{
return(CSpline1D::Spline1DIntegrate(c.GetInnerObj(),x));
}
//+------------------------------------------------------------------+
//| Fitting by smoothing (penalized) cubic spline. |
//| This function approximates N scattered points (some of X[] may |
//| be equal to each other) by cubic spline with M nodes at |
//| equidistant grid spanning interval [min(x,xc),max(x,xc)]. |
//| The problem is regularized by adding nonlinearity penalty to |
//| usual least squares penalty function: |
//| MERIT_FUNC = F_LS + F_NL |
//| where F_LS is a least squares error term, and F_NL is a |
//| nonlinearity penalty which is roughly proportional to |
//| LambdaNS*integral{ S''(x)^2*dx }. Algorithm applies automatic |
//| renormalization of F_NL which makes penalty term roughly |
//| invariant to scaling of X[] and changes in M. |
//| This function is a new edition of penalized regression spline|
//| fitting, a fast and compact one which needs much less resources |
//| that its previous version: just O(maxMN) memory and |
//| O(maxMN*log(maxMN)) time. |
//| NOTE: it is OK to run this function with both M<<N and M>>N; say,|
//| it is possible to process 100 points with 1000-node spline.|
//| INPUT PARAMETERS: |
//| X - points, array[0..N-1]. |
//| Y - function values, array[0..N-1]. |
//| N - number of points (optional): |
//| * N>0 |
//| * if given, only first N elements of X/Y are |
//| processed |
//| * if not given, automatically determined from |
//| lengths |
//| M - number of basis functions ( = number_of_nodes), |
//| M>=4. |
//| LambdaNS - LambdaNS>=0, regularization constant passed by |
//| user. It penalizes nonlinearity in the regression |
//| spline. Possible values to start from are 0.00001, |
//| 0.1, 1 |
//| OUTPUT PARAMETERS: |
//| S - spline interpolant. |
//| Rep - Following fields are set: |
//| * RMSError rms error on the (X,Y). |
//| * AvgError average error on the (X,Y). |
//| * AvgRelError average relative error on the |
//| non-zero Y |
//| * MaxError maximum error |
//+------------------------------------------------------------------+
void CAlglib::Spline1DFit(double &x[],double &y[],int n,int m,
double lambdans,CSpline1DInterpolantShell &s,
CSpline1DFitReportShell &rep)
{
CSpline1D::Spline1DFit(x,y,n,m,lambdans,s.GetInnerObj(),rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::Spline1DFit(double &x[],double &y[],int m,double lambdans,
CSpline1DInterpolantShell &s,CSpline1DFitReportShell &rep)
{
//--- check
if(!CAp::Assert(CAp::Len(x)==CAp::Len(y),"Error while calling 'Spline1DFit': looks like one of arguments has wrong size"))
return;
//--- initialization
int n=CAp::Len(x);
//--- function call
CSpline1D::Spline1DFit(x,y,n,m,lambdans,s.GetInnerObj(),rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This function builds monotone cubic Hermite interpolant. This |
//| interpolant is monotonic in [x(0),x(n-1)] and is constant outside|
//| of this interval. |
//| In case y[] form non-monotonic sequence, interpolant is |
//| piecewise monotonic. Say, for x=(0,1,2,3,4) and y=(0,1,2,1,0) |
//| interpolant will monotonically grow at [0..2] and monotonically |
//| decrease at [2..4]. |
//| INPUT PARAMETERS: |
//| X - spline nodes, array[0..N-1]. Subroutine |
//| automatically sorts points, so caller may pass |
//| unsorted array. |
//| Y - function values, array[0..N-1] |
//| N - the number of points(N>=2). |
//| OUTPUT PARAMETERS: |
//| C - spline interpolant. |
//+------------------------------------------------------------------+
void CAlglib::Spline1DBuildMonotone(double &x[],double &y[],int n,CSpline1DInterpolantShell &c)
{
CSpline1D::Spline1DBuildMonotone(x,y,n,c.GetInnerObj());
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::Spline1DBuildMonotone(CRowDouble &x,CRowDouble &y,CSpline1DInterpolantShell &c)
{
//--- create variables
double X[];
double Y[];
//--- initialization
x.ToArray(X);
y.ToArray(Y);
//--- function call
Spline1DBuildMonotone(X,Y,c);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::Spline1DBuildMonotone(double &x[],double &y[],CSpline1DInterpolantShell &c)
{
//--- check
if(!CAp::Assert(CAp::Len(x)==CAp::Len(y),"Error while calling 'Spline1DBuildMonotone': looks like one of arguments has wrong size"))
return;
//--- initialization
int n=CAp::Len(x);
//--- function call
CSpline1D::Spline1DBuildMonotone(x,y,n,c.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Fitting by polynomials in barycentric form. This function |
//| provides simple unterface for unconstrained unweighted fitting. |
//| See PolynomialFitWC() if you need constrained fitting. |
//| Task is linear, so linear least squares solver is used. |
//| Complexity of this computational scheme is O(N*M^2), mostly |
//| dominated by least squares solver |
//| SEE ALSO: |
//| PolynomialFitWC() |
//| INPUT PARAMETERS: |
//| X - points, array[0..N-1]. |
//| Y - function values, array[0..N-1]. |
//| N - number of points, N>0 |
//| * if given, only leading N elements of X/Y are used |
//| * if not given, automatically determined from sizes |
//| of X/Y |
//| M - number of basis functions (= polynomial_degree + 1), |
//| M>=1 |
//| OUTPUT PARAMETERS: |
//| Info- same format as in LSFitLinearW() subroutine: |
//| * Info>0 task is solved |
//| * Info<=0 an error occured: |
//| -4 means inconvergence of internal SVD |
//| P - interpolant in barycentric form. |
//| Rep - report, same format as in LSFitLinearW() subroutine. |
//| Following fields are set: |
//| * RMSError rms error on the (X,Y). |
//| * AvgError average error on the (X,Y). |
//| * AvgRelError average relative error on the |
//| non-zero Y |
//| * MaxError maximum error |
//| NON-WEIGHTED ERRORS ARE CALCULATED |
//| NOTES: |
//| you can convert P from barycentric form to the power or |
//| Chebyshev basis with PolynomialBar2Pow() or |
//| PolynomialBar2Cheb() functions from POLINT subpackage. |
//+------------------------------------------------------------------+
void CAlglib::PolynomialFit(double &x[],double &y[],const int n,
const int m,int &info,CBarycentricInterpolantShell &p,
CPolynomialFitReportShell &rep)
{
//--- initialization
info=0;
//--- function call
CLSFit::PolynomialFit(x,y,n,m,info,p.GetInnerObj(),rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Fitting by polynomials in barycentric form. This function |
//| provides simple unterface for unconstrained unweighted fitting. |
//| See PolynomialFitWC() if you need constrained fitting. |
//| Task is linear, so linear least squares solver is used. |
//| Complexity of this computational scheme is O(N*M^2), mostly |
//| dominated by least squares solver |
//| SEE ALSO: |
//| PolynomialFitWC() |
//| INPUT PARAMETERS: |
//| X - points, array[0..N-1]. |
//| Y - function values, array[0..N-1]. |
//| N - number of points, N>0 |
//| * if given, only leading N elements of X/Y are used |
//| * if not given, automatically determined from sizes |
//| of X/Y |
//| M - number of basis functions (= polynomial_degree + 1), |
//| M>=1 |
//| OUTPUT PARAMETERS: |
//| Info- same format as in LSFitLinearW() subroutine: |
//| * Info>0 task is solved |
//| * Info<=0 an error occured: |
//| -4 means inconvergence of internal SVD |
//| P - interpolant in barycentric form. |
//| Rep - report, same format as in LSFitLinearW() subroutine. |
//| Following fields are set: |
//| * RMSError rms error on the (X,Y). |
//| * AvgError average error on the (X,Y). |
//| * AvgRelError average relative error on the |
//| non-zero Y |
//| * MaxError maximum error |
//| NON-WEIGHTED ERRORS ARE CALCULATED |
//| NOTES: |
//| you can convert P from barycentric form to the power or |
//| Chebyshev basis with PolynomialBar2Pow() or |
//| PolynomialBar2Cheb() functions from POLINT subpackage. |
//+------------------------------------------------------------------+
void CAlglib::PolynomialFit(double &x[],double &y[],const int m,
int &info,CBarycentricInterpolantShell &p,
CPolynomialFitReportShell &rep)
{
//--- check
if((CAp::Len(x)!=CAp::Len(y)))
{
Print("Error while calling 'polynomialfit': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
info =0;
int n=CAp::Len(x);
//--- function call
CLSFit::PolynomialFit(x,y,n,m,info,p.GetInnerObj(),rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Weighted fitting by polynomials in barycentric form, with |
//| constraints on function values or first derivatives. |
//| Small regularizing term is used when solving constrained tasks |
//| (to improve stability). |
//| Task is linear, so linear least squares solver is used. |
//| Complexity of this computational scheme is O(N*M^2), mostly |
//| dominated by least squares solver |
//| SEE ALSO: |
//| PolynomialFit() |
//| INPUT PARAMETERS: |
//| X - points, array[0..N-1]. |
//| Y - function values, array[0..N-1]. |
//| W - weights, array[0..N-1] |
//| Each summand in square sum of approximation |
//| deviations from given values is multiplied by the |
//| square of corresponding weight. Fill it by 1's if you|
//| don't want to solve weighted task. |
//| N - number of points, N>0. |
//| * if given, only leading N elements of X/Y/W are used|
//| * if not given, automatically determined from sizes |
//| of X/Y/W |
//| XC - points where polynomial values/derivatives are |
//| constrained, array[0..K-1]. |
//| YC - values of constraints, array[0..K-1] |
//| DC - array[0..K-1], types of constraints: |
//| * DC[i]=0 means that P(XC[i])=YC[i] |
//| * DC[i]=1 means that P'(XC[i])=YC[i] |
//| SEE BELOW FOR IMPORTANT INFORMATION ON CONSTRAINTS |
//| K - number of constraints, 0<=K<M. |
//| K=0 means no constraints (XC/YC/DC are not used in |
//| such cases) |
//| M - number of basis functions (= polynomial_degree + 1), |
//| M>=1 |
//| OUTPUT PARAMETERS: |
//| Info- same format as in LSFitLinearW() subroutine: |
//| * Info>0 task is solved |
//| * Info<=0 an error occured: |
//| -4 means inconvergence of internal SVD |
//| -3 means inconsistent constraints |
//| P - interpolant in barycentric form. |
//| Rep - report, same format as in LSFitLinearW() subroutine. |
//| Following fields are set: |
//| * RMSError rms error on the (X,Y). |
//| * AvgError average error on the (X,Y). |
//| * AvgRelError average relative error on the |
//| non-zero Y |
//| * MaxError maximum error |
//| NON-WEIGHTED ERRORS ARE CALCULATED |
//| IMPORTANT: |
//| this subroitine doesn't calculate task's condition number |
//| for K<>0. |
//| NOTES: |
//| you can convert P from barycentric form to the power or |
//| Chebyshev basis with PolynomialBar2Pow() or |
//| PolynomialBar2Cheb() functions from POLINT subpackage. |
//| SETTING CONSTRAINTS - DANGERS AND OPPORTUNITIES: |
//| Setting constraints can lead to undesired results, like |
//| ill-conditioned behavior, or inconsistency being detected. |
//| From the other side, it allows us to improve quality of the fit. |
//| Here we summarize our experience with constrained regression |
//| splines: |
//| * even simple constraints can be inconsistent, see Wikipedia |
//| article on this subject: |
//| http://en.wikipedia.org/wiki/Birkhoff_interpolation |
//| * the greater is M (given fixed constraints), the more chances |
//| that constraints will be consistent |
//| * in the general case, consistency of constraints is NOT |
//| GUARANTEED. |
//| * in the one special cases, however, we can guarantee |
//| consistency. This case is: M>1 and constraints on the |
//| function values (NOT DERIVATIVES) |
//| Our final recommendation is to use constraints WHEN AND ONLY when|
//| you can't solve your task without them. Anything beyond special |
//| cases given above is not guaranteed and may result in |
//| inconsistency. |
//+------------------------------------------------------------------+
void CAlglib::PolynomialFitWC(double &x[],double &y[],double &w[],
const int n,double &xc[],double &yc[],
int &dc[],const int k,const int m,
int &info,CBarycentricInterpolantShell &p,
CPolynomialFitReportShell &rep)
{
//--- initialization
info=0;
//--- function call
CLSFit::PolynomialFitWC(x,y,w,n,xc,yc,dc,k,m,info,p.GetInnerObj(),rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Weighted fitting by polynomials in barycentric form, with |
//| constraints on function values or first derivatives. |
//| Small regularizing term is used when solving constrained tasks |
//| (to improve stability). |
//| Task is linear, so linear least squares solver is used. |
//| Complexity of this computational scheme is O(N*M^2), mostly |
//| dominated by least squares solver |
//| SEE ALSO: |
//| PolynomialFit() |
//| INPUT PARAMETERS: |
//| X - points, array[0..N-1]. |
//| Y - function values, array[0..N-1]. |
//| W - weights, array[0..N-1] |
//| Each summand in square sum of approximation |
//| deviations from given values is multiplied by the |
//| square of corresponding weight. Fill it by 1's if you|
//| don't want to solve weighted task. |
//| N - number of points, N>0. |
//| * if given, only leading N elements of X/Y/W are used|
//| * if not given, automatically determined from sizes |
//| of X/Y/W |
//| XC - points where polynomial values/derivatives are |
//| constrained, array[0..K-1]. |
//| YC - values of constraints, array[0..K-1] |
//| DC - array[0..K-1], types of constraints: |
//| * DC[i]=0 means that P(XC[i])=YC[i] |
//| * DC[i]=1 means that P'(XC[i])=YC[i] |
//| SEE BELOW FOR IMPORTANT INFORMATION ON CONSTRAINTS |
//| K - number of constraints, 0<=K<M. |
//| K=0 means no constraints (XC/YC/DC are not used in |
//| such cases) |
//| M - number of basis functions (= polynomial_degree + 1), |
//| M>=1 |
//| OUTPUT PARAMETERS: |
//| Info- same format as in LSFitLinearW() subroutine: |
//| * Info>0 task is solved |
//| * Info<=0 an error occured: |
//| -4 means inconvergence of internal SVD |
//| -3 means inconsistent constraints |
//| P - interpolant in barycentric form. |
//| Rep - report, same format as in LSFitLinearW() subroutine. |
//| Following fields are set: |
//| * RMSError rms error on the (X,Y). |
//| * AvgError average error on the (X,Y). |
//| * AvgRelError average relative error on the |
//| non-zero Y |
//| * MaxError maximum error |
//| NON-WEIGHTED ERRORS ARE CALCULATED |
//| IMPORTANT: |
//| this subroitine doesn't calculate task's condition number |
//| for K<>0. |
//| NOTES: |
//| you can convert P from barycentric form to the power or |
//| Chebyshev basis with PolynomialBar2Pow() or |
//| PolynomialBar2Cheb() functions from POLINT subpackage. |
//| SETTING CONSTRAINTS - DANGERS AND OPPORTUNITIES: |
//| Setting constraints can lead to undesired results, like |
//| ill-conditioned behavior, or inconsistency being detected. |
//| From the other side, it allows us to improve quality of the fit. |
//| Here we summarize our experience with constrained regression |
//| splines: |
//| * even simple constraints can be inconsistent, see Wikipedia |
//| article on this subject: |
//| http://en.wikipedia.org/wiki/Birkhoff_interpolation |
//| * the greater is M (given fixed constraints), the more chances |
//| that constraints will be consistent |
//| * in the general case, consistency of constraints is NOT |
//| GUARANTEED. |
//| * in the one special cases, however, we can guarantee |
//| consistency. This case is: M>1 and constraints on the |
//| function values (NOT DERIVATIVES) |
//| Our final recommendation is to use constraints WHEN AND ONLY when|
//| you can't solve your task without them. Anything beyond special |
//| cases given above is not guaranteed and may result in |
//| inconsistency. |
//+------------------------------------------------------------------+
void CAlglib::PolynomialFitWC(double &x[],double &y[],double &w[],
double &xc[],double &yc[],int &dc[],
const int m,int &info,CBarycentricInterpolantShell &p,
CPolynomialFitReportShell &rep)
{
//--- check
if((CAp::Len(x)!=CAp::Len(y)) || (CAp::Len(x)!=CAp::Len(w)))
{
Print("Error while calling 'polynomialfitwc': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
if((CAp::Len(xc)!=CAp::Len(yc)) || (CAp::Len(xc)!=CAp::Len(dc)))
{
Print("Error while calling 'polynomialfitwc': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
info =0;
int n=CAp::Len(x);
int k=CAp::Len(xc);
//--- function call
CLSFit::PolynomialFitWC(x,y,w,n,xc,yc,dc,k,m,info,p.GetInnerObj(),rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Weghted rational least squares fitting using Floater-Hormann |
//| rational functions with optimal D chosen from [0,9], with |
//| constraints and individual weights. |
//| Equidistant grid with M node on [min(x),max(x)] is used to build |
//| basis functions. Different values of D are tried, optimal D |
//| (least WEIGHTED root mean square error) is chosen. Task is |
//| linear, so linear least squares solver is used. Complexity of |
//| this computational scheme is O(N*M^2) (mostly dominated by the |
//| least squares solver). |
//| SEE ALSO |
//|*BarycentricFitFloaterHormann(), "lightweight" fitting without |
//| invididual weights and constraints. |
//| INPUT PARAMETERS: |
//| X - points, array[0..N-1]. |
//| Y - function values, array[0..N-1]. |
//| W - weights, array[0..N-1] |
//| Each summand in square sum of approximation |
//| deviations from given values is multiplied by the |
//| square of corresponding weight. Fill it by 1's if |
//| you don't want to solve weighted task. |
//| N - number of points, N>0. |
//| XC - points where function values/derivatives are |
//| constrained, array[0..K-1]. |
//| YC - values of constraints, array[0..K-1] |
//| DC - array[0..K-1], types of constraints: |
//| * DC[i]=0 means that S(XC[i])=YC[i] |
//| * DC[i]=1 means that S'(XC[i])=YC[i] |
//| SEE BELOW FOR IMPORTANT INFORMATION ON CONSTRAINTS |
//| K - number of constraints, 0<=K<M. |
//| K=0 means no constraints (XC/YC/DC are not used in |
//| such cases) |
//| M - number of basis functions ( = number_of_nodes), |
//| M>=2. |
//| OUTPUT PARAMETERS: |
//| Info- same format as in LSFitLinearWC() subroutine. |
//| * Info>0 task is solved |
//| * Info<=0 an error occured: |
//| -4 means inconvergence of internal SVD |
//| -3 means inconsistent constraints |
//| -1 means another errors in parameters |
//| passed (N<=0, for example) |
//| B - barycentric interpolant. |
//| Rep - report, same format as in LSFitLinearWC() subroutine.|
//| Following fields are set: |
//| * DBest best value of the D parameter |
//| * RMSError rms error on the (X,Y). |
//| * AvgError average error on the (X,Y). |
//| * AvgRelError average relative error on the |
//| non-zero Y |
//| * MaxError maximum error |
//| NON-WEIGHTED ERRORS ARE CALCULATED |
//| IMPORTANT: |
//| this subroutine doesn't calculate task's condition number |
//| for K<>0. |
//| SETTING CONSTRAINTS - DANGERS AND OPPORTUNITIES: |
//| Setting constraints can lead to undesired results, like |
//| ill-conditioned behavior, or inconsistency being detected. From |
//| the other side, it allows us to improve quality of the fit. Here |
//| we summarize our experience with constrained barycentric |
//| interpolants: |
//| * excessive constraints can be inconsistent. Floater-Hormann |
//| basis functions aren't as flexible as splines (although they |
//| are very smooth). |
//| * the more evenly constraints are spread across [min(x),max(x)], |
//| the more chances that they will be consistent |
//| * the greater is M (given fixed constraints), the more chances |
//| that constraints will be consistent |
//| * in the general case, consistency of constraints IS NOT |
//| GUARANTEED. |
//| * in the several special cases, however, we CAN guarantee |
//| consistency. |
//| * one of this cases is constraints on the function VALUES at the |
//| interval boundaries. Note that consustency of the constraints |
//| on the function DERIVATIVES is NOT guaranteed (you can use in |
//| such cases cubic splines which are more flexible). |
//| * another special case is ONE constraint on the function value |
//| (OR, but not AND, derivative) anywhere in the interval |
//| Our final recommendation is to use constraints WHEN AND ONLY |
//| WHEN you can't solve your task without them. Anything beyond |
//| special cases given above is not guaranteed and may result in |
//| inconsistency. |
//+------------------------------------------------------------------+
void CAlglib::BarycentricFitFloaterHormannWC(double &x[],double &y[],
double &w[],const int n,
double &xc[],double &yc[],
int &dc[],const int k,
const int m,int &info,
CBarycentricInterpolantShell &b,
CBarycentricFitReportShell &rep)
{
//--- initialization
info=0;
//--- function call
CLSFit::BarycentricFitFloaterHormannWC(x,y,w,n,xc,yc,dc,k,m,info,b.GetInnerObj(),rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Rational least squares fitting using Floater-Hormann rational |
//| functions with optimal D chosen from [0,9]. |
//| Equidistant grid with M node on [min(x),max(x)] is used to build |
//| basis functions. Different values of D are tried, optimal D |
//| (least root mean square error) is chosen. Task is linear, so |
//| linear least squares solver is used. Complexity of this |
//| computational scheme is O(N*M^2) (mostly dominated by the least |
//| squares solver). |
//| INPUT PARAMETERS: |
//| X - points, array[0..N-1]. |
//| Y - function values, array[0..N-1]. |
//| N - number of points, N>0. |
//| M - number of basis functions ( = number_of_nodes), M>=2.|
//| OUTPUT PARAMETERS: |
//| Info- same format as in LSFitLinearWC() subroutine. |
//| * Info>0 task is solved |
//| * Info<=0 an error occured: |
//| -4 means inconvergence of internal SVD |
//| -3 means inconsistent constraints |
//| B - barycentric interpolant. |
//| Rep - report, same format as in LSFitLinearWC() subroutine.|
//| Following fields are set: |
//| * DBest best value of the D parameter |
//| * RMSError rms error on the (X,Y). |
//| * AvgError average error on the (X,Y). |
//| * AvgRelError average relative error on the |
//| non-zero Y |
//| * MaxError maximum error |
//| NON-WEIGHTED ERRORS ARE CALCULATED |
//+------------------------------------------------------------------+
void CAlglib::BarycentricFitFloaterHormann(double &x[],double &y[],
const int n,const int m,
int &info,CBarycentricInterpolantShell &b,
CBarycentricFitReportShell &rep)
{
//--- initialization
info=0;
//--- function call
CLSFit::BarycentricFitFloaterHormann(x,y,n,m,info,b.GetInnerObj(),rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Rational least squares fitting using Floater-Hormann rational |
//| functions with optimal D chosen from [0,9]. |
//| Equidistant grid with M node on [min(x),max(x)] is used to build |
//| basis functions. Different values of D are tried, optimal D |
//| (least root mean square error) is chosen. Task is linear, so |
//| linear least squares solver is used. Complexity of this |
//| computational scheme is O(N*M^2) (mostly dominated by the least |
//| squares solver). |
//| INPUT PARAMETERS: |
//| X - points, array[0..N-1]. |
//| Y - function values, array[0..N-1]. |
//| N - number of points, N>0. |
//| M - number of basis functions ( = number_of_nodes), M>=2.|
//| OUTPUT PARAMETERS: |
//| Info- same format as in LSFitLinearWC() subroutine. |
//| * Info>0 task is solved |
//| * Info<=0 an error occured: |
//| -4 means inconvergence of internal SVD |
//| -3 means inconsistent constraints |
//| B - barycentric interpolant. |
//| Rep - report, same format as in LSFitLinearWC() subroutine.|
//| Following fields are set: |
//| * DBest best value of the D parameter |
//| * RMSError rms error on the (X,Y). |
//| * AvgError average error on the (X,Y). |
//| * AvgRelError average relative error on the |
//| non-zero Y |
//| * MaxError maximum error |
//| NON-WEIGHTED ERRORS ARE CALCULATED |
//+------------------------------------------------------------------+
void CAlglib::Spline1DFitPenalized(double &x[],double &y[],const int n,
const int m,const double rho,int &info,
CSpline1DInterpolantShell &s,
CSpline1DFitReportShell &rep)
{
//--- initialization
info=0;
//--- function call
CIntComp::Spline1DFitPenalized(x,y,n,m,rho,info,s.GetInnerObj(),rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Rational least squares fitting using Floater-Hormann rational |
//| functions with optimal D chosen from [0,9]. |
//| Equidistant grid with M node on [min(x),max(x)] is used to build |
//| basis functions. Different values of D are tried, optimal D |
//| (least root mean square error) is chosen. Task is linear, so |
//| linear least squares solver is used. Complexity of this |
//| computational scheme is O(N*M^2) (mostly dominated by the least |
//| squares solver). |
//| INPUT PARAMETERS: |
//| X - points, array[0..N-1]. |
//| Y - function values, array[0..N-1]. |
//| N - number of points, N>0. |
//| M - number of basis functions ( = number_of_nodes), M>=2.|
//| OUTPUT PARAMETERS: |
//| Info- same format as in LSFitLinearWC() subroutine. |
//| * Info>0 task is solved |
//| * Info<=0 an error occured: |
//| -4 means inconvergence of internal SVD |
//| -3 means inconsistent constraints |
//| B - barycentric interpolant. |
//| Rep - report, same format as in LSFitLinearWC() subroutine.|
//| Following fields are set: |
//| * DBest best value of the D parameter |
//| * RMSError rms error on the (X,Y). |
//| * AvgError average error on the (X,Y). |
//| * AvgRelError average relative error on the |
//| non-zero Y |
//| * MaxError maximum error |
//| NON-WEIGHTED ERRORS ARE CALCULATED |
//+------------------------------------------------------------------+
void CAlglib::Spline1DFitPenalized(double &x[],double &y[],const int m,
const double rho,int &info,
CSpline1DInterpolantShell &s,
CSpline1DFitReportShell &rep)
{
//--- check
if((CAp::Len(x)!=CAp::Len(y)))
{
Print("Error while calling 'spline1dfitpenalized': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
info =0;
int n=CAp::Len(x);
//--- function call
CIntComp::Spline1DFitPenalized(x,y,n,m,rho,info,s.GetInnerObj(),rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Weighted fitting by penalized cubic spline. |
//| Equidistant grid with M nodes on [min(x,xc),max(x,xc)] is used to|
//| build basis functions. Basis functions are cubic splines with |
//| natural boundary conditions. Problem is regularized by adding |
//| non-linearity penalty to the usual least squares penalty |
//| function: |
//| S(x) = arg min { LS + P }, where |
//| LS = SUM { w[i]^2*(y[i] - S(x[i]))^2 } - least squares |
//| penalty |
//| P = C*10^rho*integral{ S''(x)^2*dx } - non-linearity |
//| penalty |
//| rho - tunable constant given by user |
//| C - automatically determined scale parameter, |
//| makes penalty invariant with respect to scaling of X, |
//| Y, W. |
//| INPUT PARAMETERS: |
//| X - points, array[0..N-1]. |
//| Y - function values, array[0..N-1]. |
//| W - weights, array[0..N-1] |
//| Each summand in square sum of approximation |
//| deviations from given values is multiplied by the |
//| square of corresponding weight. Fill it by 1's if |
//| you don't want to solve weighted problem. |
//| N - number of points (optional): |
//| * N>0 |
//| * if given, only first N elements of X/Y/W are |
//| processed |
//| * if not given, automatically determined from X/Y/W |
//| sizes |
//| M - number of basis functions ( = number_of_nodes), M>=4.|
//| Rho - regularization constant passed by user. It penalizes |
//| nonlinearity in the regression spline. It is |
//| logarithmically scaled, i.e. actual value of |
//| regularization constant is calculated as 10^Rho. It |
//| is automatically scaled so that: |
//| * Rho=2.0 corresponds to moderate amount of |
//| nonlinearity |
//| * generally, it should be somewhere in the |
//| [-8.0,+8.0] |
//| If you do not want to penalize nonlineary, |
//| pass small Rho. Values as low as -15 should work. |
//| OUTPUT PARAMETERS: |
//| Info- same format as in LSFitLinearWC() subroutine. |
//| * Info>0 task is solved |
//| * Info<=0 an error occured: |
//| -4 means inconvergence of internal SVD |
//| or Cholesky decomposition; problem |
//| may be too ill-conditioned (very |
//| rare) |
//| S - spline interpolant. |
//| Rep - Following fields are set: |
//| * RMSError rms error on the (X,Y). |
//| * AvgError average error on the (X,Y). |
//| * AvgRelError average relative error on the |
//| non-zero Y |
//| * MaxError maximum error |
//| NON-WEIGHTED ERRORS ARE CALCULATED |
//| IMPORTANT: |
//| this subroitine doesn't calculate task's condition number |
//| for K<>0. |
//| NOTE 1: additional nodes are added to the spline outside of the |
//| fitting interval to force linearity when x<min(x,xc) or |
//| x>max(x,xc). It is done for consistency - we penalize |
//| non-linearity at [min(x,xc),max(x,xc)], so it is natural to |
//| force linearity outside of this interval. |
//| NOTE 2: function automatically sorts points, so caller may pass |
//| unsorted array. |
//+------------------------------------------------------------------+
void CAlglib::Spline1DFitPenalizedW(double &x[],double &y[],
double &w[],const int n,
const int m,const double rho,
int &info,CSpline1DInterpolantShell &s,
CSpline1DFitReportShell &rep)
{
//--- initialization
info=0;
//--- function call
CIntComp::Spline1DFitPenalizedW(x,y,w,n,m,rho,info,s.GetInnerObj(),rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Weighted fitting by penalized cubic spline. |
//| Equidistant grid with M nodes on [min(x,xc),max(x,xc)] is used to|
//| build basis functions. Basis functions are cubic splines with |
//| natural boundary conditions. Problem is regularized by adding |
//| non-linearity penalty to the usual least squares penalty |
//| function: |
//| S(x) = arg min { LS + P }, where |
//| LS = SUM { w[i]^2*(y[i] - S(x[i]))^2 } - least squares |
//| penalty |
//| P = C*10^rho*integral{ S''(x)^2*dx } - non-linearity |
//| penalty |
//| rho - tunable constant given by user |
//| C - automatically determined scale parameter, |
//| makes penalty invariant with respect to scaling of X, |
//| Y, W. |
//| INPUT PARAMETERS: |
//| X - points, array[0..N-1]. |
//| Y - function values, array[0..N-1]. |
//| W - weights, array[0..N-1] |
//| Each summand in square sum of approximation |
//| deviations from given values is multiplied by the |
//| square of corresponding weight. Fill it by 1's if |
//| you don't want to solve weighted problem. |
//| N - number of points (optional): |
//| * N>0 |
//| * if given, only first N elements of X/Y/W are |
//| processed |
//| * if not given, automatically determined from X/Y/W |
//| sizes |
//| M - number of basis functions ( = number_of_nodes), M>=4.|
//| Rho - regularization constant passed by user. It penalizes |
//| nonlinearity in the regression spline. It is |
//| logarithmically scaled, i.e. actual value of |
//| regularization constant is calculated as 10^Rho. It |
//| is automatically scaled so that: |
//| * Rho=2.0 corresponds to moderate amount of |
//| nonlinearity |
//| * generally, it should be somewhere in the |
//| [-8.0,+8.0] |
//| If you do not want to penalize nonlineary, |
//| pass small Rho. Values as low as -15 should work. |
//| OUTPUT PARAMETERS: |
//| Info- same format as in LSFitLinearWC() subroutine. |
//| * Info>0 task is solved |
//| * Info<=0 an error occured: |
//| -4 means inconvergence of internal SVD |
//| or Cholesky decomposition; problem |
//| may be too ill-conditioned (very |
//| rare) |
//| S - spline interpolant. |
//| Rep - Following fields are set: |
//| * RMSError rms error on the (X,Y). |
//| * AvgError average error on the (X,Y). |
//| * AvgRelError average relative error on the |
//| non-zero Y |
//| * MaxError maximum error |
//| NON-WEIGHTED ERRORS ARE CALCULATED |
//| IMPORTANT: |
//| this subroitine doesn't calculate task's condition number |
//| for K<>0. |
//| NOTE 1: additional nodes are added to the spline outside of the |
//| fitting interval to force linearity when x<min(x,xc) or |
//| x>max(x,xc). It is done for consistency - we penalize |
//| non-linearity at [min(x,xc),max(x,xc)], so it is natural to |
//| force linearity outside of this interval. |
//| NOTE 2: function automatically sorts points, so caller may pass |
//| unsorted array. |
//+------------------------------------------------------------------+
void CAlglib::Spline1DFitPenalizedW(double &x[],double &y[],
double &w[],const int m,
const double rho,int &info,
CSpline1DInterpolantShell &s,
CSpline1DFitReportShell &rep)
{
//--- check
if((CAp::Len(x)!=CAp::Len(y)) || (CAp::Len(x)!=CAp::Len(w)))
{
Print("Error while calling 'spline1dfitpenalizedw': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
info =0;
int n=CAp::Len(x);
//--- function call
CIntComp::Spline1DFitPenalizedW(x,y,w,n,m,rho,info,s.GetInnerObj(),rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Weighted fitting by cubic spline, with constraints on function |
//| values or derivatives. |
//| Equidistant grid with M-2 nodes on [min(x,xc),max(x,xc)] is used |
//| to build basis functions. Basis functions are cubic splines with |
//| continuous second derivatives and non-fixed first derivatives at |
//| interval ends. Small regularizing term is used when solving |
//| constrained tasks (to improve stability). |
//| Task is linear, so linear least squares solver is used. |
//| Complexity of this computational scheme is O(N*M^2), mostly |
//| dominated by least squares solver |
//| SEE ALSO |
//| Spline1DFitHermiteWC() - fitting by Hermite splines (more |
//| flexible, less smooth) |
//| Spline1DFitCubic() - "lightweight" fitting by cubic |
//| splines, without invididual |
//| weights and constraints |
//| INPUT PARAMETERS: |
//| X - points, array[0..N-1]. |
//| Y - function values, array[0..N-1]. |
//| W - weights, array[0..N-1] |
//| Each summand in square sum of approximation |
//| deviations from given values is multiplied by the |
//| square of corresponding weight. Fill it by 1's if you|
//| don't want to solve weighted task. |
//| N - number of points (optional): |
//| * N>0 |
//| * if given, only first N elements of X/Y/W are |
//| processed |
//| * if not given, automatically determined from X/Y/W |
//| sizes |
//| XC - points where spline values/derivatives are |
//| constrained, array[0..K-1]. |
//| YC - values of constraints, array[0..K-1] |
//| DC - array[0..K-1], types of constraints: |
//| * DC[i]=0 means that S(XC[i])=YC[i] |
//| * DC[i]=1 means that S'(XC[i])=YC[i] |
//| SEE BELOW FOR IMPORTANT INFORMATION ON CONSTRAINTS |
//| K - number of constraints (optional): |
//| * 0<=K<M. |
//| * K=0 means no constraints (XC/YC/DC are not used) |
//| * if given, only first K elements of XC/YC/DC are |
//| used |
//| * if not given, automatically determined from |
//| XC/YC/DC |
//| M - number of basis functions ( = number_of_nodes+2), |
//| M>=4. |
//| OUTPUT PARAMETERS: |
//| Info- same format as in LSFitLinearWC() subroutine. |
//| * Info>0 task is solved |
//| * Info<=0 an error occured: |
//| -4 means inconvergence of internal SVD |
//| -3 means inconsistent constraints |
//| S - spline interpolant. |
//| Rep - report, same format as in LSFitLinearWC() subroutine.|
//| Following fields are set: |
//| * RMSError rms error on the (X,Y). |
//| * AvgError average error on the (X,Y). |
//| * AvgRelError average relative error on the |
//| non-zero Y |
//| * MaxError maximum error |
//| NON-WEIGHTED ERRORS ARE CALCULATED |
//| IMPORTANT: |
//| this subroitine doesn't calculate task's condition number |
//| for K<>0. |
//| ORDER OF POINTS |
//| Subroutine automatically sorts points, so caller may pass |
//| unsorted array. |
//| SETTING CONSTRAINTS - DANGERS AND OPPORTUNITIES: |
//| Setting constraints can lead to undesired results, like |
//| ill-conditioned behavior, or inconsistency being detected. From |
//| the other side, it allows us to improve quality of the fit. |
//| Here we summarize our experience with constrained regression |
//| splines: |
//| * excessive constraints can be inconsistent. Splines are |
//| piecewise cubic functions, and it is easy to create an |
//| example, where large number of constraints concentrated in |
//| small area will result in inconsistency. Just because spline |
//| is not flexible enough to satisfy all of them. And same |
//| constraints spread across the [min(x),max(x)] will be |
//| perfectly consistent. |
//| * the more evenly constraints are spread across [min(x),max(x)], |
//| the more chances that they will be consistent |
//| * the greater is M (given fixed constraints), the more chances |
//| that constraints will be consistent |
//| * in the general case, consistency of constraints IS NOT |
//| GUARANTEED. |
//| * in the several special cases, however, we CAN guarantee |
//| consistency. |
//| * one of this cases is constraints on the function values |
//| AND/OR its derivatives at the interval boundaries. |
//| * another special case is ONE constraint on the function value |
//| (OR, but not AND, derivative) anywhere in the interval |
//| Our final recommendation is to use constraints WHEN AND ONLY WHEN|
//| you can't solve your task without them. Anything beyond special |
//| cases given above is not guaranteed and may result in |
//| inconsistency. |
//+------------------------------------------------------------------+
void CAlglib::Spline1DFitCubicWC(double &x[],double &y[],double &w[],
const int n,double &xc[],double &yc[],
int &dc[],const int k,const int m,
int &info,CSpline1DInterpolantShell &s,
CSpline1DFitReportShell &rep)
{
//--- initialization
info=0;
//--- function call
CLSFit::Spline1DFitCubicWC(x,y,w,n,xc,yc,dc,k,m,info,s.GetInnerObj(),rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Weighted fitting by cubic spline, with constraints on function |
//| values or derivatives. |
//| Equidistant grid with M-2 nodes on [min(x,xc),max(x,xc)] is used |
//| to build basis functions. Basis functions are cubic splines with |
//| continuous second derivatives and non-fixed first derivatives at |
//| interval ends. Small regularizing term is used when solving |
//| constrained tasks (to improve stability). |
//| Task is linear, so linear least squares solver is used. |
//| Complexity of this computational scheme is O(N*M^2), mostly |
//| dominated by least squares solver |
//| SEE ALSO |
//| Spline1DFitHermiteWC() - fitting by Hermite splines (more |
//| flexible, less smooth) |
//| Spline1DFitCubic() - "lightweight" fitting by cubic |
//| splines, without invididual |
//| weights and constraints |
//| INPUT PARAMETERS: |
//| X - points, array[0..N-1]. |
//| Y - function values, array[0..N-1]. |
//| W - weights, array[0..N-1] |
//| Each summand in square sum of approximation |
//| deviations from given values is multiplied by the |
//| square of corresponding weight. Fill it by 1's if you|
//| don't want to solve weighted task. |
//| N - number of points (optional): |
//| * N>0 |
//| * if given, only first N elements of X/Y/W are |
//| processed |
//| * if not given, automatically determined from X/Y/W |
//| sizes |
//| XC - points where spline values/derivatives are |
//| constrained, array[0..K-1]. |
//| YC - values of constraints, array[0..K-1] |
//| DC - array[0..K-1], types of constraints: |
//| * DC[i]=0 means that S(XC[i])=YC[i] |
//| * DC[i]=1 means that S'(XC[i])=YC[i] |
//| SEE BELOW FOR IMPORTANT INFORMATION ON CONSTRAINTS |
//| K - number of constraints (optional): |
//| * 0<=K<M. |
//| * K=0 means no constraints (XC/YC/DC are not used) |
//| * if given, only first K elements of XC/YC/DC are |
//| used |
//| * if not given, automatically determined from |
//| XC/YC/DC |
//| M - number of basis functions ( = number_of_nodes+2), |
//| M>=4. |
//| OUTPUT PARAMETERS: |
//| Info- same format as in LSFitLinearWC() subroutine. |
//| * Info>0 task is solved |
//| * Info<=0 an error occured: |
//| -4 means inconvergence of internal SVD |
//| -3 means inconsistent constraints |
//| S - spline interpolant. |
//| Rep - report, same format as in LSFitLinearWC() subroutine.|
//| Following fields are set: |
//| * RMSError rms error on the (X,Y). |
//| * AvgError average error on the (X,Y). |
//| * AvgRelError average relative error on the |
//| non-zero Y |
//| * MaxError maximum error |
//| NON-WEIGHTED ERRORS ARE CALCULATED |
//| IMPORTANT: |
//| this subroitine doesn't calculate task's condition number |
//| for K<>0. |
//| ORDER OF POINTS |
//| Subroutine automatically sorts points, so caller may pass |
//| unsorted array. |
//| SETTING CONSTRAINTS - DANGERS AND OPPORTUNITIES: |
//| Setting constraints can lead to undesired results, like |
//| ill-conditioned behavior, or inconsistency being detected. From |
//| the other side, it allows us to improve quality of the fit. |
//| Here we summarize our experience with constrained regression |
//| splines: |
//| * excessive constraints can be inconsistent. Splines are |
//| piecewise cubic functions, and it is easy to create an |
//| example, where large number of constraints concentrated in |
//| small area will result in inconsistency. Just because spline |
//| is not flexible enough to satisfy all of them. And same |
//| constraints spread across the [min(x),max(x)] will be |
//| perfectly consistent. |
//| * the more evenly constraints are spread across [min(x), max(x)], |
//| the more chances that they will be consistent |
//| * the greater is M (given fixed constraints), the more chances |
//| that constraints will be consistent |
//| * in the general case, consistency of constraints IS NOT |
//| GUARANTEED. |
//| * in the several special cases, however, we CAN guarantee |
//| consistency. |
//| * one of this cases is constraints on the function values |
//| AND/OR its derivatives at the interval boundaries. |
//| * another special case is ONE constraint on the function value |
//| (OR, but not AND, derivative) anywhere in the interval |
//| Our final recommendation is to use constraints WHEN AND ONLY WHEN|
//| you can't solve your task without them. Anything beyond special |
//| cases given above is not guaranteed and may result in |
//| inconsistency. |
//+------------------------------------------------------------------+
void CAlglib::Spline1DFitCubicWC(double &x[],double &y[],double &w[],
double &xc[],double &yc[],int &dc[],
const int m,int &info,CSpline1DInterpolantShell &s,
CSpline1DFitReportShell &rep)
{
//--- check
if((CAp::Len(x)!=CAp::Len(y)) || (CAp::Len(x)!=CAp::Len(w)))
{
Print("Error while calling 'spline1dfitcubicwc': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
if((CAp::Len(xc)!=CAp::Len(yc)) || (CAp::Len(xc)!=CAp::Len(dc)))
{
Print("Error while calling 'spline1dfitcubicwc': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
info=0;
int n=CAp::Len(x);
int k=CAp::Len(xc);
//--- function call
CLSFit::Spline1DFitCubicWC(x,y,w,n,xc,yc,dc,k,m,info,s.GetInnerObj(),rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Weighted fitting by Hermite spline, with constraints on function |
//| values or first derivatives. |
//| Equidistant grid with M nodes on [min(x,xc),max(x,xc)] is used to|
//| build basis functions. Basis functions are Hermite splines. Small|
//| regularizing term is used when solving constrained tasks (to |
//| improve stability). |
//| Task is linear, so linear least squares solver is used. |
//| Complexity of this computational scheme is O(N*M^2), mostly |
//| dominated by least squares solver |
//| SEE ALSO |
//| Spline1DFitCubicWC() - fitting by Cubic splines (less |
//| flexible, more smooth) |
//| Spline1DFitHermite() - "lightweight" Hermite fitting, |
//| without invididual weights and |
//| constraints |
//| INPUT PARAMETERS: |
//| X - points, array[0..N-1]. |
//| Y - function values, array[0..N-1]. |
//| W - weights, array[0..N-1] |
//| Each summand in square sum of approximation |
//| deviations from given values is multiplied by the |
//| square of corresponding weight. Fill it by 1's if |
//| you don't want to solve weighted task. |
//| N - number of points (optional): |
//| * N>0 |
//| * if given, only first N elements of X/Y/W are |
//| processed |
//| * if not given, automatically determined from X/Y/W |
//| sizes |
//| XC - points where spline values/derivatives are |
//| constrained, array[0..K-1]. |
//| YC - values of constraints, array[0..K-1] |
//| DC - array[0..K-1], types of constraints: |
//| * DC[i]=0 means that S(XC[i])=YC[i] |
//| * DC[i]=1 means that S'(XC[i])=YC[i] |
//| SEE BELOW FOR IMPORTANT INFORMATION ON CONSTRAINTS |
//| K - number of constraints (optional): |
//| * 0<=K<M. |
//| * K=0 means no constraints (XC/YC/DC are not used) |
//| * if given, only first K elements of XC/YC/DC are |
//| used |
//| * if not given, automatically determined from |
//| XC/YC/DC |
//| M - number of basis functions (= 2 * number of nodes), |
//| M>=4, |
//| M IS EVEN! |
//| OUTPUT PARAMETERS: |
//| Info- same format as in LSFitLinearW() subroutine: |
//| * Info>0 task is solved |
//| * Info<=0 an error occured: |
//| -4 means inconvergence of internal SVD |
//| -3 means inconsistent constraints |
//| -2 means odd M was passed (which is not |
//| supported) |
//| -1 means another errors in parameters |
//| passed (N<=0, for example) |
//| S - spline interpolant. |
//| Rep - report, same format as in LSFitLinearW() subroutine. |
//| Following fields are set: |
//| * RMSError rms error on the (X,Y). |
//| * AvgError average error on the (X,Y). |
//| * AvgRelError average relative error on the |
//| non-zero Y |
//| * MaxError maximum error |
//| NON-WEIGHTED ERRORS ARE CALCULATED |
//| IMPORTANT: |
//| this subroitine doesn't calculate task's condition number |
//| for K<>0. |
//| IMPORTANT: |
//| this subroitine supports only even M's |
//| ORDER OF POINTS |
//| ubroutine automatically sorts points, so caller may pass |
//| unsorted array. |
//| SETTING CONSTRAINTS - DANGERS AND OPPORTUNITIES: |
//| Setting constraints can lead to undesired results, like |
//| ill-conditioned behavior, or inconsistency being detected. From |
//| the other side, it allows us to improve quality of the fit. Here |
//| we summarize our experience with constrained regression splines:|
//| * excessive constraints can be inconsistent. Splines are |
//| piecewise cubic functions, and it is easy to create an example,|
//| where large number of constraints concentrated in small area |
//| will result in inconsistency. Just because spline is not |
//| flexible enough to satisfy all of them. And same constraints |
//| spread across the [min(x),max(x)] will be perfectly consistent.|
//| * the more evenly constraints are spread across [min(x),max(x)], |
//| the more chances that they will be consistent |
//| * the greater is M (given fixed constraints), the more chances |
//| that constraints will be consistent |
//| * in the general case, consistency of constraints is NOT |
//| GUARANTEED. |
//| * in the several special cases, however, we can guarantee |
//| consistency. |
//| * one of this cases is M>=4 and constraints on the function |
//| value (AND/OR its derivative) at the interval boundaries. |
//| * another special case is M>=4 and ONE constraint on the |
//| function value (OR, BUT NOT AND, derivative) anywhere in |
//| [min(x),max(x)] |
//| Our final recommendation is to use constraints WHEN AND ONLY when|
//| you can't solve your task without them. Anything beyond special |
//| cases given above is not guaranteed and may result in |
//| inconsistency. |
//+------------------------------------------------------------------+
void CAlglib::Spline1DFitHermiteWC(double &x[],double &y[],double &w[],
const int n,double &xc[],double &yc[],
int &dc[],const int k,const int m,
int &info,CSpline1DInterpolantShell &s,
CSpline1DFitReportShell &rep)
{
//--- initialization
info=0;
//--- function call
CLSFit::Spline1DFitHermiteWC(x,y,w,n,xc,yc,dc,k,m,info,s.GetInnerObj(),rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Weighted fitting by Hermite spline, with constraints on function |
//| values or first derivatives. |
//| Equidistant grid with M nodes on [min(x,xc),max(x,xc)] is used to|
//| build basis functions. Basis functions are Hermite splines. Small|
//| regularizing term is used when solving constrained tasks (to |
//| improve stability). |
//| Task is linear, so linear least squares solver is used. |
//| Complexity of this computational scheme is O(N*M^2), mostly |
//| dominated by least squares solver |
//| SEE ALSO |
//| Spline1DFitCubicWC() - fitting by Cubic splines (less |
//| flexible, more smooth) |
//| Spline1DFitHermite() - "lightweight" Hermite fitting, |
//| without invididual weights and |
//| constraints |
//| INPUT PARAMETERS: |
//| X - points, array[0..N-1]. |
//| Y - function values, array[0..N-1]. |
//| W - weights, array[0..N-1] |
//| Each summand in square sum of approximation |
//| deviations from given values is multiplied by the |
//| square of corresponding weight. Fill it by 1's if |
//| you don't want to solve weighted task. |
//| N - number of points (optional): |
//| * N>0 |
//| * if given, only first N elements of X/Y/W are |
//| processed |
//| * if not given, automatically determined from X/Y/W |
//| sizes |
//| XC - points where spline values/derivatives are |
//| constrained, array[0..K-1]. |
//| YC - values of constraints, array[0..K-1] |
//| DC - array[0..K-1], types of constraints: |
//| * DC[i]=0 means that S(XC[i])=YC[i] |
//| * DC[i]=1 means that S'(XC[i])=YC[i] |
//| SEE BELOW FOR IMPORTANT INFORMATION ON CONSTRAINTS |
//| K - number of constraints (optional): |
//| * 0<=K<M. |
//| * K=0 means no constraints (XC/YC/DC are not used) |
//| * if given, only first K elements of XC/YC/DC are |
//| used |
//| * if not given, automatically determined from |
//| XC/YC/DC |
//| M - number of basis functions (= 2 * number of nodes), |
//| M>=4, |
//| M IS EVEN! |
//| OUTPUT PARAMETERS: |
//| Info- same format as in LSFitLinearW() subroutine: |
//| * Info>0 task is solved |
//| * Info<=0 an error occured: |
//| -4 means inconvergence of internal SVD |
//| -3 means inconsistent constraints |
//| -2 means odd M was passed (which is not |
//| supported) |
//| -1 means another errors in parameters |
//| passed (N<=0, for example) |
//| S - spline interpolant. |
//| Rep - report, same format as in LSFitLinearW() subroutine. |
//| Following fields are set: |
//| * RMSError rms error on the (X,Y). |
//| * AvgError average error on the (X,Y). |
//| * AvgRelError average relative error on the |
//| non-zero Y |
//| * MaxError maximum error |
//| NON-WEIGHTED ERRORS ARE CALCULATED |
//| IMPORTANT: |
//| this subroitine doesn't calculate task's condition number |
//| for K<>0. |
//| IMPORTANT: |
//| this subroitine supports only even M's |
//| ORDER OF POINTS |
//| ubroutine automatically sorts points, so caller may pass |
//| unsorted array. |
//| SETTING CONSTRAINTS - DANGERS AND OPPORTUNITIES: |
//| Setting constraints can lead to undesired results, like |
//| ill-conditioned behavior, or inconsistency being detected. From |
//| the other side, it allows us to improve quality of the fit. Here |
//| we summarize our experience with constrained regression splines:|
//| * excessive constraints can be inconsistent. Splines are |
//| piecewise cubic functions, and it is easy to create an example,|
//| where large number of constraints concentrated in small area |
//| will result in inconsistency. Just because spline is not |
//| flexible enough to satisfy all of them. And same constraints |
//| spread across the [min(x),max(x)] will be perfectly consistent.|
//| * the more evenly constraints are spread across [min(x),max(x)], |
//| the more chances that they will be consistent |
//| * the greater is M (given fixed constraints), the more chances |
//| that constraints will be consistent |
//| * in the general case, consistency of constraints is NOT |
//| GUARANTEED. |
//| * in the several special cases, however, we can guarantee |
//| consistency. |
//| * one of this cases is M>=4 and constraints on the function |
//| value (AND/OR its derivative) at the interval boundaries. |
//| * another special case is M>=4 and ONE constraint on the |
//| function value (OR, BUT NOT AND, derivative) anywhere in |
//| [min(x),max(x)] |
//| Our final recommendation is to use constraints WHEN AND ONLY when|
//| you can't solve your task without them. Anything beyond special |
//| cases given above is not guaranteed and may result in |
//| inconsistency. |
//+------------------------------------------------------------------+
void CAlglib::Spline1DFitHermiteWC(double &x[],double &y[],double &w[],
double &xc[],double &yc[],int &dc[],
const int m,int &info,
CSpline1DInterpolantShell &s,
CSpline1DFitReportShell &rep)
{
//--- check
if((CAp::Len(x)!=CAp::Len(y)) || (CAp::Len(x)!=CAp::Len(w)))
{
Print("Error while calling 'spline1dfithermitewc': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
if((CAp::Len(xc)!=CAp::Len(yc)) || (CAp::Len(xc)!=CAp::Len(dc)))
{
Print("Error while calling 'spline1dfithermitewc': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
info =0;
int n=CAp::Len(x);
int k=CAp::Len(xc);
//--- function call
CLSFit::Spline1DFitHermiteWC(x,y,w,n,xc,yc,dc,k,m,info,s.GetInnerObj(),rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Least squares fitting by cubic spline. |
//| This subroutine is "lightweight" alternative for more complex |
//| and feature - rich Spline1DFitCubicWC(). See Spline1DFitCubicWC()|
//| for more information about subroutine parameters (we don't |
//| duplicate it here because of length) |
//+------------------------------------------------------------------+
void CAlglib::Spline1DFitCubic(double &x[],double &y[],const int n,
const int m,int &info,
CSpline1DInterpolantShell &s,
CSpline1DFitReportShell &rep)
{
//--- initialization
info=0;
//--- function call
CLSFit::Spline1DFitCubic(x,y,n,m,info,s.GetInnerObj(),rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Least squares fitting by cubic spline. |
//| This subroutine is "lightweight" alternative for more complex |
//| and feature - rich Spline1DFitCubicWC(). See Spline1DFitCubicWC()|
//| for more information about subroutine parameters (we don't |
//| duplicate it here because of length) |
//+------------------------------------------------------------------+
void CAlglib::Spline1DFitCubic(double &x[],double &y[],const int m,
int &info,CSpline1DInterpolantShell &s,
CSpline1DFitReportShell &rep)
{
//--- check
if((CAp::Len(x)!=CAp::Len(y)))
{
Print("Error while calling 'spline1dfitcubic': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
info =0;
int n=CAp::Len(x);
//--- function call
CLSFit::Spline1DFitCubic(x,y,n,m,info,s.GetInnerObj(),rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Least squares fitting by Hermite spline. |
//| This subroutine is "lightweight" alternative for more complex |
//| and feature - rich Spline1DFitHermiteWC(). See |
//| Spline1DFitHermiteWC() description for more information about |
//| subroutine parameters (we don't duplicate it here because of |
//| length). |
//+------------------------------------------------------------------+
void CAlglib::Spline1DFitHermite(double &x[],double &y[],const int n,
const int m,int &info,
CSpline1DInterpolantShell &s,
CSpline1DFitReportShell &rep)
{
//--- initialization
info=0;
//--- function call
CLSFit::Spline1DFitHermite(x,y,n,m,info,s.GetInnerObj(),rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Least squares fitting by Hermite spline. |
//| This subroutine is "lightweight" alternative for more complex |
//| and feature - rich Spline1DFitHermiteWC(). See |
//| Spline1DFitHermiteWC() description for more information about |
//| subroutine parameters (we don't duplicate it here because of |
//| length). |
//+------------------------------------------------------------------+
void CAlglib::Spline1DFitHermite(double &x[],double &y[],const int m,
int &info,CSpline1DInterpolantShell &s,
CSpline1DFitReportShell &rep)
{
//--- check
if((CAp::Len(x)!=CAp::Len(y)))
{
Print("Error while calling 'spline1dfithermite': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
info =0;
int n=CAp::Len(x);
//--- function call
CLSFit::Spline1DFitHermite(x,y,n,m,info,s.GetInnerObj(),rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Weighted linear least squares fitting. |
//| QR decomposition is used to reduce task to MxM, then triangular |
//| solver or SVD-based solver is used depending on condition number |
//| of the system. It allows to maximize speed and retain decent |
//| accuracy. |
//| INPUT PARAMETERS: |
//| Y - array[0..N-1] Function values in N points. |
//| W - array[0..N-1] Weights corresponding to function |
//| values. Each summand in square sum of |
//| approximation deviations from given values is |
//| multiplied by the square of corresponding weight.|
//| FMatrix - a table of basis functions values, |
//| array[0..N-1, 0..M-1]. FMatrix[I, J] - value of |
//| J-th basis function in I-th point. |
//| N - number of points used. N>=1. |
//| M - number of basis functions, M>=1. |
//| OUTPUT PARAMETERS: |
//| Info - error code: |
//| * -4 internal SVD decomposition subroutine |
//| failed (very rare and for degenerate |
//| systems only) |
//| * -1 incorrect N/M were specified |
//| * 1 task is solved |
//| C - decomposition coefficients, array[0..M-1] |
//| Rep - fitting report. Following fields are set: |
//| * Rep.TaskRCond reciprocal of condition |
//| number |
//| * RMSError rms error on the (X,Y). |
//| * AvgError average error on the (X,Y). |
//| * AvgRelError average relative error on the|
//| non-zero Y |
//| * MaxError maximum error |
//| NON-WEIGHTED ERRORS ARE |
//| CALCULATED |
//+------------------------------------------------------------------+
void CAlglib::LSFitLinearW(double &y[],double &w[],CMatrixDouble &fmatrix,
const int n,const int m,int &info,
double &c[],CLSFitReportShell &rep)
{
//--- initialization
info=0;
//--- function call
CLSFit::LSFitLinearW(y,w,fmatrix,n,m,info,c,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Weighted linear least squares fitting. |
//| QR decomposition is used to reduce task to MxM, then triangular |
//| solver or SVD-based solver is used depending on condition number |
//| of the system. It allows to maximize speed and retain decent |
//| accuracy. |
//| INPUT PARAMETERS: |
//| Y - array[0..N-1] Function values in N points. |
//| W - array[0..N-1] Weights corresponding to function |
//| values. Each summand in square sum of |
//| approximation deviations from given values is |
//| multiplied by the square of corresponding weight.|
//| FMatrix - a table of basis functions values, |
//| array[0..N-1, 0..M-1]. FMatrix[I, J] - value of |
//| J-th basis function in I-th point. |
//| N - number of points used. N>=1. |
//| M - number of basis functions, M>=1. |
//| OUTPUT PARAMETERS: |
//| Info - error code: |
//| * -4 internal SVD decomposition subroutine |
//| failed (very rare and for degenerate |
//| systems only) |
//| * -1 incorrect N/M were specified |
//| * 1 task is solved |
//| C - decomposition coefficients, array[0..M-1] |
//| Rep - fitting report. Following fields are set: |
//| * Rep.TaskRCond reciprocal of condition |
//| number |
//| * RMSError rms error on the (X,Y). |
//| * AvgError average error on the (X,Y). |
//| * AvgRelError average relative error on the|
//| non-zero Y |
//| * MaxError maximum error |
//| NON-WEIGHTED ERRORS ARE |
//| CALCULATED |
//+------------------------------------------------------------------+
void CAlglib::LSFitLinearW(double &y[],double &w[],CMatrixDouble &fmatrix,
int &info,double &c[],CLSFitReportShell &rep)
{
//--- check
if((CAp::Len(y)!=CAp::Len(w)) || (CAp::Len(y)!=CAp::Rows(fmatrix)))
{
Print("Error while calling 'lsfitlinearw': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
info =0;
int n=CAp::Len(y);
int m=(int)CAp::Cols(fmatrix);
//--- function call
CLSFit::LSFitLinearW(y,w,fmatrix,n,m,info,c,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Weighted constained linear least squares fitting. |
//| This is variation of LSFitLinearW(), which searchs for |
//| min|A*x=b| given that K additional constaints C*x=bc are |
//| satisfied. It reduces original task to modified one: min|B*y-d| |
//| WITHOUT constraints, then LSFitLinearW() is called. |
//| INPUT PARAMETERS: |
//| Y - array[0..N-1] Function values in N points. |
//| W - array[0..N-1] Weights corresponding to function |
//| values. Each summand in square sum of |
//| approximation deviations from given values is |
//| multiplied by the square of corresponding |
//| weight. |
//| FMatrix - a table of basis functions values, |
//| array[0..N-1,0..M-1]. FMatrix[I,J] - value of |
//| J-th basis function in I-th point. |
//| CMatrix - a table of constaints, array[0..K-1,0..M]. |
//| I-th row of CMatrix corresponds to I-th linear |
//| constraint: CMatrix[I,0]*C[0] + ... + |
//| + CMatrix[I,M-1]*C[M-1] = CMatrix[I,M] |
//| N - number of points used. N>=1. |
//| M - number of basis functions, M>=1. |
//| K - number of constraints, 0 <= K < M |
//| K=0 corresponds to absence of constraints. |
//| OUTPUT PARAMETERS: |
//| Info - error code: |
//| * -4 internal SVD decomposition subroutine |
//| failed (very rare and for degenerate |
//| systems only) |
//| * -3 either too many constraints (M or more), |
//| degenerate constraints (some constraints |
//| are repetead twice) or inconsistent |
//| constraints were specified. |
//| * 1 task is solved |
//| C - decomposition coefficients, array[0..M-1] |
//| Rep - fitting report. Following fields are set: |
//| * RMSError rms error on the (X,Y). |
//| * AvgError average error on the (X,Y). |
//| * AvgRelError average relative error on the|
//| non-zero Y |
//| * MaxError maximum error |
//| NON-WEIGHTED ERRORS ARE |
//| CALCULATED |
//| IMPORTANT: |
//| this subroitine doesn't calculate task's condition number |
//| for K<>0. |
//+------------------------------------------------------------------+
void CAlglib::LSFitLinearWC(double &y[],double &w[],CMatrixDouble &fmatrix,
CMatrixDouble &cmatrix,const int n,
const int m,const int k,int &info,
double &c[],CLSFitReportShell &rep)
{
//--- initialization
info=0;
//--- function call
CLSFit::LSFitLinearWC(y,w,fmatrix,cmatrix,n,m,k,info,c,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Weighted constained linear least squares fitting. |
//| This is variation of LSFitLinearW(), which searchs for |
//| min|A*x=b| given that K additional constaints C*x=bc are |
//| satisfied. It reduces original task to modified one: min|B*y-d| |
//| WITHOUT constraints, then LSFitLinearW() is called. |
//| INPUT PARAMETERS: |
//| Y - array[0..N-1] Function values in N points. |
//| W - array[0..N-1] Weights corresponding to function |
//| values. Each summand in square sum of |
//| approximation deviations from given values is |
//| multiplied by the square of corresponding |
//| weight. |
//| FMatrix - a table of basis functions values, |
//| array[0..N-1,0..M-1]. FMatrix[I,J] - value of |
//| J-th basis function in I-th point. |
//| CMatrix - a table of constaints, array[0..K-1,0..M]. |
//| I-th row of CMatrix corresponds to I-th linear |
//| constraint: CMatrix[I,0]*C[0] + ... + |
//| + CMatrix[I,M-1]*C[M-1] = CMatrix[I,M] |
//| N - number of points used. N>=1. |
//| M - number of basis functions, M>=1. |
//| K - number of constraints, 0 <= K < M |
//| K=0 corresponds to absence of constraints. |
//| OUTPUT PARAMETERS: |
//| Info - error code: |
//| * -4 internal SVD decomposition subroutine |
//| failed (very rare and for degenerate |
//| systems only) |
//| * -3 either too many constraints (M or more), |
//| degenerate constraints (some constraints |
//| are repetead twice) or inconsistent |
//| constraints were specified. |
//| * 1 task is solved |
//| C - decomposition coefficients, array[0..M-1] |
//| Rep - fitting report. Following fields are set: |
//| * RMSError rms error on the (X,Y). |
//| * AvgError average error on the (X,Y). |
//| * AvgRelError average relative error on the|
//| non-zero Y |
//| * MaxError maximum error |
//| NON-WEIGHTED ERRORS ARE |
//| CALCULATED |
//| IMPORTANT: |
//| this subroitine doesn't calculate task's condition number |
//| for K<>0. |
//+------------------------------------------------------------------+
void CAlglib::LSFitLinearWC(double &y[],double &w[],CMatrixDouble &fmatrix,
CMatrixDouble &cmatrix,int &info,
double &c[],CLSFitReportShell &rep)
{
//--- check
if((CAp::Len(y)!=CAp::Len(w)) || (CAp::Len(y)!=CAp::Rows(fmatrix)))
{
Print("Error while calling 'lsfitlinearwc': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
if((CAp::Cols(fmatrix)!=CAp::Cols(cmatrix)-1))
{
Print("Error while calling 'lsfitlinearwc': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
info =0;
int n=CAp::Len(y);
int m=(int)CAp::Cols(fmatrix);
int k=(int)CAp::Rows(cmatrix);
//--- function call
CLSFit::LSFitLinearWC(y,w,fmatrix,cmatrix,n,m,k,info,c,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Linear least squares fitting. |
//| QR decomposition is used to reduce task to MxM, then triangular |
//| solver or SVD-based solver is used depending on condition number |
//| of the system. It allows to maximize speed and retain decent |
//| accuracy. |
//| INPUT PARAMETERS: |
//| Y - array[0..N-1] Function values in N points. |
//| FMatrix - a table of basis functions values, |
//| array[0..N-1, 0..M-1]. |
//| FMatrix[I, J] - value of J-th basis function in |
//| I-th point. |
//| N - number of points used. N>=1. |
//| M - number of basis functions, M>=1. |
//| OUTPUT PARAMETERS: |
//| Info - error code: |
//| * -4 internal SVD decomposition subroutine |
//| failed (very rare and for degenerate |
//| systems only) |
//| * 1 task is solved |
//| C - decomposition coefficients, array[0..M-1] |
//| Rep - fitting report. Following fields are set: |
//| * Rep.TaskRCond reciprocal of condition |
//| number |
//| * RMSError rms error on the (X,Y). |
//| * AvgError average error on the (X,Y). |
//| * AvgRelError average relative error on the|
//| non-zero Y |
//| * MaxError maximum error |
//| NON-WEIGHTED ERRORS ARE |
//| CALCULATED |
//+------------------------------------------------------------------+
void CAlglib::LSFitLinear(double &y[],CMatrixDouble &fmatrix,
const int n,const int m,int &info,
double &c[],CLSFitReportShell &rep)
{
//--- initialization
info=0;
//--- function call
CLSFit::LSFitLinear(y,fmatrix,n,m,info,c,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Linear least squares fitting. |
//| QR decomposition is used to reduce task to MxM, then triangular |
//| solver or SVD-based solver is used depending on condition number |
//| of the system. It allows to maximize speed and retain decent |
//| accuracy. |
//| INPUT PARAMETERS: |
//| Y - array[0..N-1] Function values in N points. |
//| FMatrix - a table of basis functions values, |
//| array[0..N-1, 0..M-1]. |
//| FMatrix[I, J] - value of J-th basis function in |
//| I-th point. |
//| N - number of points used. N>=1. |
//| M - number of basis functions, M>=1. |
//| OUTPUT PARAMETERS: |
//| Info - error code: |
//| * -4 internal SVD decomposition subroutine |
//| failed (very rare and for degenerate |
//| systems only) |
//| * 1 task is solved |
//| C - decomposition coefficients, array[0..M-1] |
//| Rep - fitting report. Following fields are set: |
//| * Rep.TaskRCond reciprocal of condition |
//| number |
//| * RMSError rms error on the (X,Y). |
//| * AvgError average error on the (X,Y). |
//| * AvgRelError average relative error on the|
//| non-zero Y |
//| * MaxError maximum error |
//| NON-WEIGHTED ERRORS ARE |
//| CALCULATED |
//+------------------------------------------------------------------+
void CAlglib::LSFitLinear(double &y[],CMatrixDouble &fmatrix,
int &info,double &c[],CLSFitReportShell &rep)
{
//--- check
if((CAp::Len(y)!=CAp::Rows(fmatrix)))
{
Print("Error while calling 'lsfitlinear': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
info =0;
int n=CAp::Len(y);
int m=(int)CAp::Cols(fmatrix);
//--- function call
CLSFit::LSFitLinear(y,fmatrix,n,m,info,c,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Constained linear least squares fitting. |
//| This is variation of LSFitLinear(), which searchs for min|A*x=b| |
//| given that K additional constaints C*x=bc are satisfied. It |
//| reduces original task to modified one: min|B*y-d| WITHOUT |
//| constraints, then LSFitLinear() is called. |
//| INPUT PARAMETERS: |
//| Y - array[0..N-1] Function values in N points. |
//| FMatrix - a table of basis functions values, |
//| array[0..N-1,0..M-1]. FMatrix[I,J] - value of |
//| J-th basis function in I-th point. |
//| CMatrix - a table of constaints, array[0..K-1,0..M]. |
//| I-th row of CMatrix corresponds to I-th linear |
//| constraint: CMatrix[I,0]*C[0] + ... + |
//| + CMatrix[I,M-1]*C[M-1] = CMatrix[I,M] |
//| N - number of points used. N>=1. |
//| M - number of basis functions, M>=1. |
//| K - number of constraints, 0 <= K < M |
//| K=0 corresponds to absence of constraints. |
//| OUTPUT PARAMETERS: |
//| Info - error code: |
//| * -4 internal SVD decomposition subroutine |
//| failed (very rare and for degenerate |
//| systems only) |
//| * -3 either too many constraints (M or more), |
//| degenerate constraints (some constraints |
//| are repetead twice) or inconsistent |
//| constraints were specified. |
//| * 1 task is solved |
//| C - decomposition coefficients, array[0..M-1] |
//| Rep - fitting report. Following fields are set: |
//| * RMSError rms error on the (X,Y). |
//| * AvgError average error on the (X,Y). |
//| * AvgRelError average relative error on the|
//| non-zero Y |
//| * MaxError maximum error |
//| NON-WEIGHTED ERRORS ARE |
//| CALCULATED |
//| IMPORTANT: |
//| this subroitine doesn't calculate task's condition number |
//| for K<>0. |
//+------------------------------------------------------------------+
void CAlglib::LSFitLinearC(double &y[],CMatrixDouble &fmatrix,
CMatrixDouble &cmatrix,const int n,
const int m,const int k,int &info,
double &c[],CLSFitReportShell &rep)
{
//--- initialization
info=0;
//--- function call
CLSFit::LSFitLinearC(y,fmatrix,cmatrix,n,m,k,info,c,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Constained linear least squares fitting. |
//| This is variation of LSFitLinear(), which searchs for min|A*x=b| |
//| given that K additional constaints C*x=bc are satisfied. It |
//| reduces original task to modified one: min|B*y-d| WITHOUT |
//| constraints, then LSFitLinear() is called. |
//| INPUT PARAMETERS: |
//| Y - array[0..N-1] Function values in N points. |
//| FMatrix - a table of basis functions values, |
//| array[0..N-1,0..M-1]. FMatrix[I,J] - value of |
//| J-th basis function in I-th point. |
//| CMatrix - a table of constaints, array[0..K-1,0..M]. |
//| I-th row of CMatrix corresponds to I-th linear |
//| constraint: CMatrix[I,0]*C[0] + ... + |
//| + CMatrix[I,M-1]*C[M-1] = CMatrix[I,M] |
//| N - number of points used. N>=1. |
//| M - number of basis functions, M>=1. |
//| K - number of constraints, 0 <= K < M |
//| K=0 corresponds to absence of constraints. |
//| OUTPUT PARAMETERS: |
//| Info - error code: |
//| * -4 internal SVD decomposition subroutine |
//| failed (very rare and for degenerate |
//| systems only) |
//| * -3 either too many constraints (M or more), |
//| degenerate constraints (some constraints |
//| are repetead twice) or inconsistent |
//| constraints were specified. |
//| * 1 task is solved |
//| C - decomposition coefficients, array[0..M-1] |
//| Rep - fitting report. Following fields are set: |
//| * RMSError rms error on the (X,Y). |
//| * AvgError average error on the (X,Y). |
//| * AvgRelError average relative error on the|
//| non-zero Y |
//| * MaxError maximum error |
//| NON-WEIGHTED ERRORS ARE |
//| CALCULATED |
//| IMPORTANT: |
//| this subroitine doesn't calculate task's condition number |
//| for K<>0. |
//+------------------------------------------------------------------+
void CAlglib::LSFitLinearC(double &y[],CMatrixDouble &fmatrix,
CMatrixDouble &cmatrix,int &info,
double &c[],CLSFitReportShell &rep)
{
//--- check
if((CAp::Len(y)!=CAp::Rows(fmatrix)))
{
Print("Error while calling 'lsfitlinearc': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
if((CAp::Cols(fmatrix)!=CAp::Cols(cmatrix)-1))
{
Print("Error while calling 'lsfitlinearc': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
info =0;
int n=CAp::Len(y);
int m=(int)CAp::Cols(fmatrix);
int k=(int)CAp::Rows(cmatrix);
//--- function call
CLSFit::LSFitLinearC(y,fmatrix,cmatrix,n,m,k,info,c,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Weighted nonlinear least squares fitting using function values |
//| only. |
//| Combination of numerical differentiation and secant updates is |
//| used to obtain function Jacobian. |
//| Nonlinear task min(F(c)) is solved, where |
//| F(c) = (w[0]*(f(c,x[0])-y[0]))^2 + ... + |
//| + (w[n-1]*(f(c,x[n-1])-y[n-1]))^2, |
//| * N is a number of points, |
//| * M is a dimension of a space points belong to, |
//| * K is a dimension of a space of parameters being fitted, |
//| * w is an N-dimensional vector of weight coefficients, |
//| * x is a set of N points, each of them is an M-dimensional |
//| vector, |
//| * c is a K-dimensional vector of parameters being fitted |
//| This subroutine uses only f(c,x[i]). |
//| INPUT PARAMETERS: |
//| X - array[0..N-1,0..M-1], points (one row = one |
//| point) |
//| Y - array[0..N-1], function values. |
//| W - weights, array[0..N-1] |
//| C - array[0..K-1], initial approximation to the |
//| solution, |
//| N - number of points, N>1 |
//| M - dimension of space |
//| K - number of parameters being fitted |
//| DiffStep- numerical differentiation step; |
//| should not be very small or large; |
//| large = loss of accuracy |
//| small = growth of round-off errors |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//+------------------------------------------------------------------+
void CAlglib::LSFitCreateWF(CMatrixDouble &x,double &y[],double &w[],
double &c[],const int n,const int m,
const int k,const double diffstep,
CLSFitStateShell &state)
{
CLSFit::LSFitCreateWF(x,y,w,c,n,m,k,diffstep,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Weighted nonlinear least squares fitting using function values |
//| only. |
//| Combination of numerical differentiation and secant updates is |
//| used to obtain function Jacobian. |
//| Nonlinear task min(F(c)) is solved, where |
//| F(c) = (w[0]*(f(c,x[0])-y[0]))^2 + ... + |
//| + (w[n-1]*(f(c,x[n-1])-y[n-1]))^2, |
//| * N is a number of points, |
//| * M is a dimension of a space points belong to, |
//| * K is a dimension of a space of parameters being fitted, |
//| * w is an N-dimensional vector of weight coefficients, |
//| * x is a set of N points, each of them is an M-dimensional |
//| vector, |
//| * c is a K-dimensional vector of parameters being fitted |
//| This subroutine uses only f(c,x[i]). |
//| INPUT PARAMETERS: |
//| X - array[0..N-1,0..M-1], points (one row = one |
//| point) |
//| Y - array[0..N-1], function values. |
//| W - weights, array[0..N-1] |
//| C - array[0..K-1], initial approximation to the |
//| solution, |
//| N - number of points, N>1 |
//| M - dimension of space |
//| K - number of parameters being fitted |
//| DiffStep- numerical differentiation step; |
//| should not be very small or large; |
//| large = loss of accuracy |
//| small = growth of round-off errors |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//+------------------------------------------------------------------+
void CAlglib::LSFitCreateWF(CMatrixDouble &x,double &y[],double &w[],
double &c[],const double diffstep,
CLSFitStateShell &state)
{
//--- check
if((CAp::Rows(x)!=CAp::Len(y)) || (CAp::Rows(x)!=CAp::Len(w)))
{
Print("Error while calling 'lsfitcreatewf': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
int n=(int)CAp::Rows(x);
int m=(int)CAp::Cols(x);
int k=CAp::Len(c);
//--- function call
CLSFit::LSFitCreateWF(x,y,w,c,n,m,k,diffstep,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Nonlinear least squares fitting using function values only. |
//| Combination of numerical differentiation and secant updates is |
//| used to obtain function Jacobian. |
//| Nonlinear task min(F(c)) is solved, where |
//| F(c) = (f(c,x[0])-y[0])^2 + ... + (f(c,x[n-1])-y[n-1])^2, |
//| * N is a number of points, |
//| * M is a dimension of a space points belong to, |
//| * K is a dimension of a space of parameters being fitted, |
//| * w is an N-dimensional vector of weight coefficients, |
//| * x is a set of N points, each of them is an M-dimensional |
//| vector, |
//| * c is a K-dimensional vector of parameters being fitted |
//| This subroutine uses only f(c,x[i]). |
//| INPUT PARAMETERS: |
//| X - array[0..N-1,0..M-1], points (one row = one |
//| point) |
//| Y - array[0..N-1], function values. |
//| C - array[0..K-1], initial approximation to the |
//| solution, |
//| N - number of points, N>1 |
//| M - dimension of space |
//| K - number of parameters being fitted |
//| DiffStep- numerical differentiation step; |
//| should not be very small or large; |
//| large = loss of accuracy |
//| small = growth of round-off errors |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//+------------------------------------------------------------------+
void CAlglib::LSFitCreateF(CMatrixDouble &x,double &y[],double &c[],
const int n,const int m,const int k,
const double diffstep,CLSFitStateShell &state)
{
CLSFit::LSFitCreateF(x,y,c,n,m,k,diffstep,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Nonlinear least squares fitting using function values only. |
//| Combination of numerical differentiation and secant updates is |
//| used to obtain function Jacobian. |
//| Nonlinear task min(F(c)) is solved, where |
//| F(c) = (f(c,x[0])-y[0])^2 + ... + (f(c,x[n-1])-y[n-1])^2, |
//| * N is a number of points, |
//| * M is a dimension of a space points belong to, |
//| * K is a dimension of a space of parameters being fitted, |
//| * w is an N-dimensional vector of weight coefficients, |
//| * x is a set of N points, each of them is an M-dimensional |
//| vector, |
//| * c is a K-dimensional vector of parameters being fitted |
//| This subroutine uses only f(c,x[i]). |
//| INPUT PARAMETERS: |
//| X - array[0..N-1,0..M-1], points (one row = one |
//| point) |
//| Y - array[0..N-1], function values. |
//| C - array[0..K-1], initial approximation to the |
//| solution, |
//| N - number of points, N>1 |
//| M - dimension of space |
//| K - number of parameters being fitted |
//| DiffStep- numerical differentiation step; |
//| should not be very small or large; |
//| large = loss of accuracy |
//| small = growth of round-off errors |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//+------------------------------------------------------------------+
void CAlglib::LSFitCreateF(CMatrixDouble &x,double &y[],double &c[],
const double diffstep,CLSFitStateShell &state)
{
//--- check
if((CAp::Rows(x)!=CAp::Len(y)))
{
Print("Error while calling 'lsfitcreatef': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
int n=(int)CAp::Rows(x);
int m=(int)CAp::Cols(x);
int k=CAp::Len(c);
//--- function call
CLSFit::LSFitCreateF(x,y,c,n,m,k,diffstep,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Weighted nonlinear least squares fitting using gradient only. |
//| Nonlinear task min(F(c)) is solved, where |
//| F(c) = (w[0]*(f(c,x[0])-y[0]))^2 + ... + |
//| + (w[n-1]*(f(c,x[n-1])-y[n-1]))^2, |
//| * N is a number of points, |
//| * M is a dimension of a space points belong to, |
//| * K is a dimension of a space of parameters being fitted, |
//| * w is an N-dimensional vector of weight coefficients, |
//| * x is a set of N points, each of them is an M-dimensional |
//| vector, |
//| * c is a K-dimensional vector of parameters being fitted |
//| This subroutine uses only f(c,x[i]) and its gradient. |
//| INPUT PARAMETERS: |
//| X - array[0..N-1,0..M-1], points (one row = one |
//| point) |
//| Y - array[0..N-1], function values. |
//| W - weights, array[0..N-1] |
//| C - array[0..K-1], initial approximation to the |
//| solution, |
//| N - number of points, N>1 |
//| M - dimension of space |
//| K - number of parameters being fitted |
//| CheapFG - boolean flag, which is: |
//| * True if both function and gradient calculation |
//| complexity are less than O(M^2). An |
//| improved algorithm can be used which |
//| corresponds to FGJ scheme from MINLM unit.|
//| * False otherwise. |
//| Standard Jacibian-bases |
//| Levenberg-Marquardt algo will be used (FJ |
//| scheme). |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| See also: |
//| LSFitResults |
//| LSFitCreateFG (fitting without weights) |
//| LSFitCreateWFGH (fitting using Hessian) |
//| LSFitCreateFGH (fitting using Hessian, without weights) |
//+------------------------------------------------------------------+
void CAlglib::LSFitCreateWFG(CMatrixDouble &x,double &y[],double &w[],
double &c[],const int n,const int m,
const int k,const bool cheapfg,
CLSFitStateShell &state)
{
CLSFit::LSFitCreateWFG(x,y,w,c,n,m,k,cheapfg,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Weighted nonlinear least squares fitting using gradient only. |
//| Nonlinear task min(F(c)) is solved, where |
//| F(c) = (w[0]*(f(c,x[0])-y[0]))^2 + ... + |
//| + (w[n-1]*(f(c,x[n-1])-y[n-1]))^2, |
//| * N is a number of points, |
//| * M is a dimension of a space points belong to, |
//| * K is a dimension of a space of parameters being fitted, |
//| * w is an N-dimensional vector of weight coefficients, |
//| * x is a set of N points, each of them is an M-dimensional |
//| vector, |
//| * c is a K-dimensional vector of parameters being fitted |
//| This subroutine uses only f(c,x[i]) and its gradient. |
//| INPUT PARAMETERS: |
//| X - array[0..N-1,0..M-1], points (one row = one |
//| point) |
//| Y - array[0..N-1], function values. |
//| W - weights, array[0..N-1] |
//| C - array[0..K-1], initial approximation to the |
//| solution, |
//| N - number of points, N>1 |
//| M - dimension of space |
//| K - number of parameters being fitted |
//| CheapFG - boolean flag, which is: |
//| * True if both function and gradient calculation |
//| complexity are less than O(M^2). An |
//| improved algorithm can be used which |
//| corresponds to FGJ scheme from MINLM unit.|
//| * False otherwise. |
//| Standard Jacibian-bases |
//| Levenberg-Marquardt algo will be used (FJ |
//| scheme). |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| See also: |
//| LSFitResults |
//| LSFitCreateFG (fitting without weights) |
//| LSFitCreateWFGH (fitting using Hessian) |
//| LSFitCreateFGH (fitting using Hessian, without weights) |
//+------------------------------------------------------------------+
void CAlglib::LSFitCreateWFG(CMatrixDouble &x,double &y[],double &w[],
double &c[],const bool cheapfg,
CLSFitStateShell &state)
{
//--- check
if((CAp::Rows(x)!=CAp::Len(y)) || (CAp::Rows(x)!=CAp::Len(w)))
{
Print("Error while calling 'lsfitcreatewfg': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
int n=(int)CAp::Rows(x);
int m=(int)CAp::Cols(x);
int k=CAp::Len(c);
//--- function call
CLSFit::LSFitCreateWFG(x,y,w,c,n,m,k,cheapfg,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Nonlinear least squares fitting using gradient only, without |
//| individual weights. |
//| Nonlinear task min(F(c)) is solved, where |
//| F(c) = ((f(c,x[0])-y[0]))^2 + ... + ((f(c,x[n-1])-y[n-1]))^2,|
//| * N is a number of points, |
//| * M is a dimension of a space points belong to, |
//| * K is a dimension of a space of parameters being fitted, |
//| * x is a set of N points, each of them is an M-dimensional |
//| vector, |
//| * c is a K-dimensional vector of parameters being fitted |
//| This subroutine uses only f(c,x[i]) and its gradient. |
//| INPUT PARAMETERS: |
//| X - array[0..N-1,0..M-1], points (one row = one |
//| point) |
//| Y - array[0..N-1], function values. |
//| C - array[0..K-1], initial approximation to the |
//| solution, |
//| N - number of points, N>1 |
//| M - dimension of space |
//| K - number of parameters being fitted |
//| CheapFG - boolean flag, which is: |
//| * True if both function and gradient calculation|
//| complexity are less than O(M^2). An |
//| improved algorithm can be used which |
//| corresponds to FGJ scheme from MINLM |
//| unit. |
//| * False otherwise. |
//| Standard Jacibian-bases |
//| Levenberg-Marquardt algo will be used |
//| (FJ scheme). |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//+------------------------------------------------------------------+
void CAlglib::LSFitCreateFG(CMatrixDouble &x,double &y[],double &c[],
const int n,const int m,const int k,
const bool cheapfg,CLSFitStateShell &state)
{
CLSFit::LSFitCreateFG(x,y,c,n,m,k,cheapfg,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Nonlinear least squares fitting using gradient only, without |
//| individual weights. |
//| Nonlinear task min(F(c)) is solved, where |
//| F(c) = ((f(c,x[0])-y[0]))^2 + ... + ((f(c,x[n-1])-y[n-1]))^2,|
//| * N is a number of points, |
//| * M is a dimension of a space points belong to, |
//| * K is a dimension of a space of parameters being fitted, |
//| * x is a set of N points, each of them is an M-dimensional |
//| vector, |
//| * c is a K-dimensional vector of parameters being fitted |
//| This subroutine uses only f(c,x[i]) and its gradient. |
//| INPUT PARAMETERS: |
//| X - array[0..N-1,0..M-1], points (one row = one |
//| point) |
//| Y - array[0..N-1], function values. |
//| C - array[0..K-1], initial approximation to the |
//| solution, |
//| N - number of points, N>1 |
//| M - dimension of space |
//| K - number of parameters being fitted |
//| CheapFG - boolean flag, which is: |
//| * True if both function and gradient calculation|
//| complexity are less than O(M^2). An |
//| improved algorithm can be used which |
//| corresponds to FGJ scheme from MINLM |
//| unit. |
//| * False otherwise. |
//| Standard Jacibian-bases |
//| Levenberg-Marquardt algo will be used |
//| (FJ scheme). |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//+------------------------------------------------------------------+
void CAlglib::LSFitCreateFG(CMatrixDouble &x,double &y[],double &c[],
const bool cheapfg,CLSFitStateShell &state)
{
//--- check
if((CAp::Rows(x)!=CAp::Len(y)))
{
Print("Error while calling 'lsfitcreatefg': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
int n=(int)CAp::Rows(x);
int m=(int)CAp::Cols(x);
int k=CAp::Len(c);
//--- function call
CLSFit::LSFitCreateFG(x,y,c,n,m,k,cheapfg,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Weighted nonlinear least squares fitting using gradient/Hessian. |
//| Nonlinear task min(F(c)) is solved, where |
//| F(c) = (w[0]*(f(c,x[0])-y[0]))^2 + ... + |
//| (w[n-1]*(f(c,x[n-1])-y[n-1]))^2, |
//| * N is a number of points, |
//| * M is a dimension of a space points belong to, |
//| * K is a dimension of a space of parameters being fitted, |
//| * w is an N-dimensional vector of weight coefficients, |
//| * x is a set of N points, each of them is an M-dimensional |
//| vector, |
//| * c is a K-dimensional vector of parameters being fitted |
//| This subroutine uses f(c,x[i]), its gradient and its Hessian. |
//| INPUT PARAMETERS: |
//| X - array[0..N-1,0..M-1], points (one row = one |
//| point) |
//| Y - array[0..N-1], function values. |
//| W - weights, array[0..N-1] |
//| C - array[0..K-1], initial approximation to the |
//| solution, |
//| N - number of points, N>1 |
//| M - dimension of space |
//| K - number of parameters being fitted |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//+------------------------------------------------------------------+
void CAlglib::LSFitCreateWFGH(CMatrixDouble &x,double &y[],double &w[],
double &c[],const int n,const int m,
const int k,CLSFitStateShell &state)
{
CLSFit::LSFitCreateWFGH(x,y,w,c,n,m,k,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Weighted nonlinear least squares fitting using gradient/Hessian. |
//| Nonlinear task min(F(c)) is solved, where |
//| F(c) = (w[0]*(f(c,x[0])-y[0]))^2 + ... + |
//| (w[n-1]*(f(c,x[n-1])-y[n-1]))^2, |
//| * N is a number of points, |
//| * M is a dimension of a space points belong to, |
//| * K is a dimension of a space of parameters being fitted, |
//| * w is an N-dimensional vector of weight coefficients, |
//| * x is a set of N points, each of them is an M-dimensional |
//| vector, |
//| * c is a K-dimensional vector of parameters being fitted |
//| This subroutine uses f(c,x[i]), its gradient and its Hessian. |
//| INPUT PARAMETERS: |
//| X - array[0..N-1,0..M-1], points (one row = one |
//| point) |
//| Y - array[0..N-1], function values. |
//| W - weights, array[0..N-1] |
//| C - array[0..K-1], initial approximation to the |
//| solution, |
//| N - number of points, N>1 |
//| M - dimension of space |
//| K - number of parameters being fitted |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//+------------------------------------------------------------------+
void CAlglib::LSFitCreateWFGH(CMatrixDouble &x,double &y[],double &w[],
double &c[],CLSFitStateShell &state)
{
//--- check
if((CAp::Rows(x)!=CAp::Len(y)) || (CAp::Rows(x)!=CAp::Len(w)))
{
Print("Error while calling 'lsfitcreatewfgh': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
int n=(int)CAp::Rows(x);
int m=(int)CAp::Cols(x);
int k=CAp::Len(c);
//--- function call
CLSFit::LSFitCreateWFGH(x,y,w,c,n,m,k,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Nonlinear least squares fitting using gradient/Hessian, without |
//| individial weights. |
//| Nonlinear task min(F(c)) is solved, where |
//| F(c) = ((f(c,x[0])-y[0]))^2 + ... + |
//| ((f(c,x[n-1])-y[n-1]))^2, |
//| * N is a number of points, |
//| * M is a dimension of a space points belong to, |
//| * K is a dimension of a space of parameters being fitted, |
//| * x is a set of N points, each of them is an M-dimensional |
//| vector, |
//| * c is a K-dimensional vector of parameters being fitted |
//| This subroutine uses f(c,x[i]), its gradient and its Hessian. |
//| INPUT PARAMETERS: |
//| X - array[0..N-1,0..M-1], points (one row = one |
//| point) |
//| Y - array[0..N-1], function values. |
//| C - array[0..K-1], initial approximation to the |
//| solution, |
//| N - number of points, N>1 |
//| M - dimension of space |
//| K - number of parameters being fitted |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//+------------------------------------------------------------------+
void CAlglib::LSFitCreateFGH(CMatrixDouble &x,double &y[],double &c[],
const int n,const int m,const int k,
CLSFitStateShell &state)
{
CLSFit::LSFitCreateFGH(x,y,c,n,m,k,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Nonlinear least squares fitting using gradient/Hessian, without |
//| individial weights. |
//| Nonlinear task min(F(c)) is solved, where |
//| F(c) = ((f(c,x[0])-y[0]))^2 + ... + |
//| ((f(c,x[n-1])-y[n-1]))^2, |
//| * N is a number of points, |
//| * M is a dimension of a space points belong to, |
//| * K is a dimension of a space of parameters being fitted, |
//| * x is a set of N points, each of them is an M-dimensional |
//| vector, |
//| * c is a K-dimensional vector of parameters being fitted |
//| This subroutine uses f(c,x[i]), its gradient and its Hessian. |
//| INPUT PARAMETERS: |
//| X - array[0..N-1,0..M-1], points (one row = one |
//| point) |
//| Y - array[0..N-1], function values. |
//| C - array[0..K-1], initial approximation to the |
//| solution, |
//| N - number of points, N>1 |
//| M - dimension of space |
//| K - number of parameters being fitted |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//+------------------------------------------------------------------+
void CAlglib::LSFitCreateFGH(CMatrixDouble &x,double &y[],double &c[],
CLSFitStateShell &state)
{
//--- check
if((CAp::Rows(x)!=CAp::Len(y)))
{
Print("Error while calling 'lsfitcreatefgh': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
int n=(int)CAp::Rows(x);
int m=(int)CAp::Cols(x);
int k=CAp::Len(c);
//--- function call
CLSFit::LSFitCreateFGH(x,y,c,n,m,k,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Stopping conditions for nonlinear least squares fitting. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| EpsF - stopping criterion. Algorithm stops if |
//| |F(k+1)-F(k)| <= EpsF*max{|F(k)|, |F(k+1)|, 1} |
//| EpsX - >=0 |
//| The subroutine finishes its work if on k+1-th |
//| iteration the condition |v|<=EpsX is fulfilled, |
//| where: |
//| * |.| means Euclidian norm |
//| * v - scaled step vector, v[i]=dx[i]/s[i] |
//| * dx - ste pvector, dx=X(k+1)-X(k) |
//| * s - scaling coefficients set by LSFitSetScale()|
//| MaxIts - maximum number of iterations. If MaxIts=0, the |
//| number of iterations is unlimited. Only |
//| Levenberg-Marquardt iterations are counted |
//| (L-BFGS/CG iterations are NOT counted because |
//| their cost is very low compared to that of LM). |
//| NOTE |
//| Passing EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will lead to|
//| automatic stopping criterion selection (according to the scheme |
//| used by MINLM unit). |
//+------------------------------------------------------------------+
void CAlglib::LSFitSetCond(CLSFitStateShell &state,
const double epsx,const int maxits)
{
CLSFit::LSFitSetCond(state.GetInnerObj(),epsx,maxits);
}
//+------------------------------------------------------------------+
//| This function sets maximum step length |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| StpMax - maximum step length, >=0. Set StpMax to 0.0, if |
//| you don't want to limit step length. |
//| Use this subroutine when you optimize target function which |
//| contains exp() or other fast growing functions, and optimization |
//| algorithm makes too large steps which leads to overflow. This |
//| function allows us to reject steps that are too large (and |
//| therefore expose us to the possible overflow) without actually |
//| calculating function value at the x+stp*d. |
//| NOTE: non-zero StpMax leads to moderate performance degradation |
//| because intermediate step of preconditioned L-BFGS optimization |
//| is incompatible with limits on step size. |
//+------------------------------------------------------------------+
void CAlglib::LSFitSetStpMax(CLSFitStateShell &state,const double stpmax)
{
CLSFit::LSFitSetStpMax(state.GetInnerObj(),stpmax);
}
//+------------------------------------------------------------------+
//| This function turns on/off reporting. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| NeedXRep- whether iteration reports are needed or not |
//| When reports are needed, State.C (current parameters) and State. |
//| F (current value of fitting function) are reported. |
//+------------------------------------------------------------------+
void CAlglib::LSFitSetXRep(CLSFitStateShell &state,const bool needxrep)
{
CLSFit::LSFitSetXRep(state.GetInnerObj(),needxrep);
}
//+------------------------------------------------------------------+
//| This function sets scaling coefficients for underlying optimizer.|
//| ALGLIB optimizers use scaling matrices to test stopping |
//| conditions (step size and gradient are scaled before comparison |
//| with tolerances). Scale of the I-th variable is a translation |
//| invariant measure of: |
//| a) "how large" the variable is |
//| b) how large the step should be to make significant changes in |
//| the function |
//| Generally, scale is NOT considered to be a form of |
//| preconditioner. But LM optimizer is unique in that it uses |
//| scaling matrix both in the stopping condition tests and as |
//| Marquardt damping factor. |
//| Proper scaling is very important for the algorithm performance. |
//| It is less important for the quality of results, but still has |
//| some influence (it is easier to converge when variables are |
//| properly scaled, so premature stopping is possible when very |
//| badly scalled variables are combined with relaxed stopping |
//| conditions). |
//| INPUT PARAMETERS: |
//| State - structure stores algorithm state |
//| S - array[N], non-zero scaling coefficients |
//| S[i] may be negative, sign doesn't matter. |
//+------------------------------------------------------------------+
void CAlglib::LSFitSetScale(CLSFitStateShell &state,double &s[])
{
CLSFit::LSFitSetScale(state.GetInnerObj(),s);
}
//+------------------------------------------------------------------+
//| This function sets boundary constraints for underlying optimizer |
//| Boundary constraints are inactive by default (after initial |
//| creation). They are preserved until explicitly turned off with |
//| another SetBC() call. |
//| INPUT PARAMETERS: |
//| State - structure stores algorithm state |
//| BndL - lower bounds, array[K]. |
//| If some (all) variables are unbounded, you may |
//| specify very small number or -INF (latter is |
//| recommended because it will allow solver to use |
//| better algorithm). |
//| BndU - upper bounds, array[K]. |
//| If some (all) variables are unbounded, you may |
//| specify very large number or +INF (latter is |
//| recommended because it will allow solver to use |
//| better algorithm). |
//| NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case |
//| I-th variable will be "frozen" at X[i]=BndL[i]=BndU[i]. |
//| NOTE 2: unlike other constrained optimization algorithms, this |
//| solver has following useful properties: |
//| * bound constraints are always satisfied exactly |
//| * function is evaluated only INSIDE area specified by bound |
//| constraints |
//+------------------------------------------------------------------+
void CAlglib::LSFitSetBC(CLSFitStateShell &state,double &bndl[],
double &bndu[])
{
CLSFit::LSFitSetBC(state.GetInnerObj(),bndl,bndu);
}
//+------------------------------------------------------------------+
//| This function provides reverse communication interface |
//| Reverse communication interface is not documented or recommended |
//| to use. |
//| See below for functions which provide better documented API |
//+------------------------------------------------------------------+
bool CAlglib::LSFitIteration(CLSFitStateShell &state)
{
return(CLSFit::LSFitIteration(state.GetInnerObj()));
}
//+------------------------------------------------------------------+
//| This family of functions is used to launcn iterations of |
//| nonlinear fitter |
//| These functions accept following parameters: |
//| func - callback which calculates function (or merit |
//| function) value func at given point x |
//| grad - callback which calculates function (or merit |
//| function) value func and gradient grad at given |
//| point x |
//| hess - callback which calculates function (or merit |
//| function) value func, gradient grad and Hessian |
//| hess at given point x |
//| rep - optional callback which is called after each |
//| iteration can be null |
//| obj - optional object which is passed to |
//| func/grad/hess/jac/rep can be null |
//| NOTES: |
//| 1. this algorithm is somewhat unusual because it works with |
//| parameterized function f(C,X), where X is a function argument |
//| (we have many points which are characterized by different |
//| argument values), and C is a parameter to fit. |
//| For example, if we want to do linear fit by |
//| f(c0,c1,x)=c0*x+c1, then x will be argument, and {c0,c1} will |
//| be parameters. |
//| It is important to understand that this algorithm finds |
//| minimum in the space of function PARAMETERS (not arguments), |
//| so it needs derivatives of f() with respect to C, not X. |
//| In the example above it will need f=c0*x+c1 and |
//| {df/dc0,df/dc1}={x,1} instead of {df/dx}={c0}. |
//| 2. Callback functions accept C as the first parameter, and X as |
//| the second |
//| 3. If state was created with LSFitCreateFG(), algorithm needs |
//| just function and its gradient, but if state wascreated with |
//| LSFitCreateFGH(), algorithm will need function, gradient and |
//| Hessian. |
//| According to the said above, there ase several versions of |
//| this function, which accept different sets of callbacks. |
//| This flexibility opens way to subtle errors - you may create |
//| state with LSFitCreateFGH() (optimization using Hessian), but |
//| call function which does not accept Hessian. So when algorithm|
//| will request Hessian, there will be no callback to call. In |
//| this case exception will be thrown. |
//| Be careful to avoid such errors because there is no way to |
//| find them at compile time - you can see them at runtime only. |
//+------------------------------------------------------------------+
void CAlglib::LSFitFit(CLSFitStateShell &state,CNDimensional_PFunc &func,
CNDimensional_Rep &rep,bool rep_status,CObject &obj)
{
//--- cycle
while(CAlglib::LSFitIteration(state))
{
//--- check
if(state.GetNeedF())
{
func.PFunc(state.GetInnerObj().m_c,state.GetInnerObj().m_x,state.GetInnerObj().m_f,obj);
//--- next iteration
continue;
}
//--- check
if(state.GetInnerObj().m_xupdated)
{
//--- check
if(rep_status)
rep.Rep(state.GetInnerObj().m_c,state.GetInnerObj().m_f,obj);
//--- next iteration
continue;
}
Print("ALGLIB: error in 'lsfitfit' (some derivatives were not provided?)");
CAp::exception_happened=true;
break;
}
}
//+------------------------------------------------------------------+
//| This family of functions is used to launcn iterations of |
//| nonlinear fitter |
//| These functions accept following parameters: |
//| func - callback which calculates function (or merit |
//| function) value func at given point x |
//| grad - callback which calculates function (or merit |
//| function) value func and gradient grad at given |
//| point x |
//| hess - callback which calculates function (or merit |
//| function) value func, gradient grad and Hessian |
//| hess at given point x |
//| rep - optional callback which is called after each |
//| iteration can be null |
//| obj - optional object which is passed to |
//| func/grad/hess/jac/rep can be null |
//| NOTES: |
//| 1. this algorithm is somewhat unusual because it works with |
//| parameterized function f(C,X), where X is a function argument |
//| (we have many points which are characterized by different |
//| argument values), and C is a parameter to fit. |
//| For example, if we want to do linear fit by |
//| f(c0,c1,x)=c0*x+c1, then x will be argument, and {c0,c1} will |
//| be parameters. |
//| It is important to understand that this algorithm finds |
//| minimum in the space of function PARAMETERS (not arguments), |
//| so it needs derivatives of f() with respect to C, not X. |
//| In the example above it will need f=c0*x+c1 and |
//| {df/dc0,df/dc1}={x,1} instead of {df/dx}={c0}. |
//| 2. Callback functions accept C as the first parameter, and X as |
//| the second |
//| 3. If state was created with LSFitCreateFG(), algorithm needs |
//| just function and its gradient, but if state wascreated with |
//| LSFitCreateFGH(), algorithm will need function, gradient and |
//| Hessian. |
//| According to the said above, there ase several versions of |
//| this function, which accept different sets of callbacks. |
//| This flexibility opens way to subtle errors - you may create |
//| state with LSFitCreateFGH() (optimization using Hessian), but |
//| call function which does not accept Hessian. So when algorithm|
//| will request Hessian, there will be no callback to call. In |
//| this case exception will be thrown. |
//| Be careful to avoid such errors because there is no way to |
//| find them at compile time - you can see them at runtime only. |
//+------------------------------------------------------------------+
void CAlglib::LSFitFit(CLSFitStateShell &state,CNDimensional_PFunc &func,
CNDimensional_PGrad &grad,CNDimensional_Rep &rep,
bool rep_status,CObject &obj)
{
//--- cycle
while(CAlglib::LSFitIteration(state))
{
//--- check
if(state.GetNeedF())
{
func.PFunc(state.GetInnerObj().m_c,state.GetInnerObj().m_x,state.GetInnerObj().m_f,obj);
//--- next iteration
continue;
}
//--- check
if(state.GetNeedFG())
{
grad.PGrad(state.GetInnerObj().m_c,state.GetInnerObj().m_x,state.GetInnerObj().m_f,state.GetInnerObj().m_g,obj);
//--- next iteration
continue;
}
//--- check
if(state.GetInnerObj().m_xupdated)
{
//--- check
if(rep_status)
rep.Rep(state.GetInnerObj().m_c,state.GetInnerObj().m_f,obj);
//--- next iteration
continue;
}
Print("ALGLIB: error in 'lsfitfit' (some derivatives were not provided?)");
CAp::exception_happened=true;
break;
}
}
//+------------------------------------------------------------------+
//| This family of functions is used to launcn iterations of |
//| nonlinear fitter |
//| These functions accept following parameters: |
//| func - callback which calculates function (or merit |
//| function) value func at given point x |
//| grad - callback which calculates function (or merit |
//| function) value func and gradient grad at given |
//| point x |
//| hess - callback which calculates function (or merit |
//| function) value func, gradient grad and Hessian |
//| hess at given point x |
//| rep - optional callback which is called after each |
//| iteration can be null |
//| obj - optional object which is passed to |
//| func/grad/hess/jac/rep can be null |
//| NOTES: |
//| 1. this algorithm is somewhat unusual because it works with |
//| parameterized function f(C,X), where X is a function argument |
//| (we have many points which are characterized by different |
//| argument values), and C is a parameter to fit. |
//| For example, if we want to do linear fit by |
//| f(c0,c1,x)=c0*x+c1, then x will be argument, and {c0,c1} will |
//| be parameters. |
//| It is important to understand that this algorithm finds |
//| minimum in the space of function PARAMETERS (not arguments), |
//| so it needs derivatives of f() with respect to C, not X. |
//| In the example above it will need f=c0*x+c1 and |
//| {df/dc0,df/dc1}={x,1} instead of {df/dx}={c0}. |
//| 2. Callback functions accept C as the first parameter, and X as |
//| the second |
//| 3. If state was created with LSFitCreateFG(), algorithm needs |
//| just function and its gradient, but if state wascreated with |
//| LSFitCreateFGH(), algorithm will need function, gradient and |
//| Hessian. |
//| According to the said above, there ase several versions of |
//| this function, which accept different sets of callbacks. |
//| This flexibility opens way to subtle errors - you may create |
//| state with LSFitCreateFGH() (optimization using Hessian), but |
//| call function which does not accept Hessian. So when algorithm|
//| will request Hessian, there will be no callback to call. In |
//| this case exception will be thrown. |
//| Be careful to avoid such errors because there is no way to |
//| find them at compile time - you can see them at runtime only. |
//+------------------------------------------------------------------+
void CAlglib::LSFitFit(CLSFitStateShell &state,CNDimensional_PFunc &func,
CNDimensional_PGrad &grad,CNDimensional_PHess &hess,
CNDimensional_Rep &rep,bool rep_status,CObject &obj)
{
//--- cycle
while(CAlglib::LSFitIteration(state))
{
//--- check
if(state.GetNeedF())
{
func.PFunc(state.GetInnerObj().m_c,state.GetInnerObj().m_x,state.GetInnerObj().m_f,obj);
//--- next iteration
continue;
}
//--- check
if(state.GetNeedFG())
{
grad.PGrad(state.GetInnerObj().m_c,state.GetInnerObj().m_x,state.GetInnerObj().m_f,state.GetInnerObj().m_g,obj);
//--- next iteration
continue;
}
//--- check
if(state.GetNeedFGH())
{
hess.PHess(state.GetInnerObj().m_c,state.GetInnerObj().m_x,state.GetInnerObj().m_f,state.GetInnerObj().m_g,state.GetInnerObj().m_h,obj);
//--- next iteration
continue;
}
//--- check
if(state.GetInnerObj().m_xupdated)
{
//--- check
if(rep_status)
rep.Rep(state.GetInnerObj().m_c,state.GetInnerObj().m_f,obj);
//--- next iteration
continue;
}
Print("ALGLIB: error in 'lsfitfit' (some derivatives were not provided?)");
CAp::exception_happened=true;
break;
}
}
//+------------------------------------------------------------------+
//| This function calculates value of four-parameter logistic (4PL) |
//| model at specified point X. 4PL model has following form: |
//| F(x|A,B,C,D) = D+(A-D)/(1+Power(x/C,B)) |
//| INPUT PARAMETERS: |
//| X - current point, X>=0: |
//| * zero X is correctly handled even for B<=0 |
//| * negative X results in exception. |
//| A, B, C, D - parameters of 4PL model: |
//| * A is unconstrained |
//| * B is unconstrained; zero or negative values |
//| are handled correctly. |
//| * C>0, non-positive value results in exception |
//| * D is unconstrained |
//| RESULT: |
//| model value at X |
//| NOTE: if B=0, denominator is assumed to be equal to 2.0 even for|
//| zero X (strictly speaking, 0^0 is undefined). |
//| NOTE: this function also throws exception if all input parameters|
//| are correct, but overflow was detected during calculations.|
//| NOTE: this function performs a lot of checks; if you need really |
//| high performance, consider evaluating model yourself, |
//| without checking for degenerate cases. |
//+------------------------------------------------------------------+
double CAlglib::LogisticCalc4(double x,double a,double b,double c,
double d)
{
return(CLSFit::LogisticCalc4(x,a,b,c,d));
}
//+------------------------------------------------------------------+
//| This function calculates value of five-parameter logistic (5PL) |
//| model at specified point X. 5PL model has following form: |
//| F(x|A,B,C,D,G) = D+(A-D)/Power(1+Power(x/C,B),G) |
//| INPUT PARAMETERS: |
//| X - current point, X>=0: |
//| * zero X is correctly handled even for B<=0 |
//| * negative X results in exception. |
//| A, B, C, D, G- parameters of 5PL model: |
//| * A is unconstrained |
//| * B is unconstrained; zero or negative values |
//| are handled correctly. |
//| * C>0, non-positive value results in exception |
//| * D is unconstrained |
//| * G>0, non-positive value results in exception |
//| RESULT: |
//| model value at X |
//| NOTE: if B=0, denominator is assumed to be equal to Power(2.0,G) |
//| even for zero X (strictly speaking, 0^0 is undefined). |
//| NOTE: this function also throws exception if all input parameters|
//| are correct, but overflow was detected during calculations.|
//| NOTE: this function performs a lot of checks; if you need really |
//| high performance, consider evaluating model yourself, |
//| without checking for degenerate cases. |
//+------------------------------------------------------------------+
double CAlglib::LogisticCalc5(double x,double a,double b,double c,
double d,double g)
{
return(CLSFit::LogisticCalc5(x,a,b,c,d,g));
}
//+------------------------------------------------------------------+
//| This function fits four-parameter logistic (4PL) model to data |
//| provided by user. 4PL model has following form: |
//| F(x|A,B,C,D) = D+(A-D)/(1+Power(x/C,B)) |
//| Here: |
//| * A, D - unconstrained (see LogisticFit4EC() for |
//| constrained 4PL) |
//| * B>=0 |
//| * C>0 |
//| IMPORTANT: output of this function is constrained in such way |
//| that B>0. Because 4PL model is symmetric with respect|
//| to B, there is no need to explore B<0. Constraining |
//| B makes algorithm easier to stabilize and debug. |
//| Users who for some reason prefer to work with |
//| negative B's should transform output themselves (swap |
//| A and D, replace B by -B). |
//| 4PL fitting is implemented as follows: |
//| * we perform small number of restarts from random locations |
//| which helps to solve problem of bad local extrema. Locations |
//| are only partially random - we use input data to determine |
//| good initial guess, but we include controlled amount of |
//| randomness. |
//| * we perform Levenberg-Marquardt fitting with very tight |
//| constraints on parameters B and C - it allows us to find good|
//| initial guess for the second stage without risk of running|
//| into "flat spot". |
//| * second Levenberg-Marquardt round is performed without |
//| excessive constraints. Results from the previous round are |
//| used as initial guess. |
//| * after fitting is done, we compare results with best values |
//| found so far, rewrite "best solution" if needed, and move to |
//| next random location. |
//| Overall algorithm is very stable and is not prone to bad local |
//| extrema. Furthermore, it automatically scales when input data |
//| have very large or very small range. |
//| INPUT PARAMETERS: |
//| X - array[N], stores X-values. |
//| MUST include only non-negative numbers (but may |
//| include zero values). Can be unsorted. |
//| Y - array[N], values to fit. |
//| N - number of points. If N is less than length of |
//| X/Y, only leading N elements are used. |
//| OUTPUT PARAMETERS: |
//| A, B, C, D- parameters of 4PL model |
//| Rep - fitting report. This structure has many fields, |
//| but ONLY ONES LISTED BELOW ARE SET: |
//| * Rep.IterationsCount - number of iterations |
//| performed |
//| * Rep.RMSError - root-mean-square error |
//| * Rep.AvgError - average absolute error |
//| * Rep.AvgRelError - average relative error |
//| (calculated for non-zero |
//| Y-values) |
//| * Rep.MaxError - maximum absolute error |
//| * Rep.R2 - coefficient of determination,|
//| R-squared. This coefficient|
//| is calculated as |
//| R2=1-RSS/TSS (in case of |
//| nonlinear regression there|
//| are multiple ways to |
//| define R2, each of them |
//| giving different results). |
//| NOTE: for stability reasons the B parameter is restricted by |
//| [1/1000,1000] range. It prevents algorithm from making |
//| trial steps deep into the area of bad parameters. |
//| NOTE: after you obtained coefficients, you can evaluate model |
//| with LogisticCalc4() function. |
//| NOTE: if you need better control over fitting process than |
//| provided by this function, you may use LogisticFit45X(). |
//| NOTE: step is automatically scaled according to scale of |
//| parameters being fitted before we compare its length with |
//| EpsX. Thus, this function can be used to fit data with |
//| very small or very large values without changing EpsX. |
//+------------------------------------------------------------------+
void CAlglib::LogisticFit4(CRowDouble &x,CRowDouble &y,int n,
double &a,double &b,double &c,
double &d,CLSFitReportShell &rep)
{
CLSFit::LogisticFit4(x,y,n,a,b,c,d,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This function fits four-parameter logistic (4PL) model to data |
//| provided by user. 4PL model has following form: |
//| F(x|A,B,C,D) = D+(A-D)/(1+Power(x/C,B)) |
//| Here: |
//| * A, D - unconstrained (see LogisticFit4EC() for |
//| constrained 4PL) |
//| * B>=0 |
//| * C>0 |
//| IMPORTANT: output of this function is constrained in such way |
//| that B>0. Because 4PL model is symmetric with respect|
//| to B, there is no need to explore B<0. Constraining |
//| B makes algorithm easier to stabilize and debug. |
//| Users who for some reason prefer to work with |
//| negative B's should transform output themselves (swap |
//| A and D, replace B by -B). |
//| 4PL fitting is implemented as follows: |
//| * we perform small number of restarts from random locations |
//| which helps to solve problem of bad local extrema. Locations |
//| are only partially random - we use input data to determine |
//| good initial guess, but we include controlled amount of |
//| randomness. |
//| * we perform Levenberg-Marquardt fitting with very tight |
//| constraints on parameters B and C - it allows us to find good|
//| initial guess for the second stage without risk of running|
//| into "flat spot". |
//| * second Levenberg-Marquardt round is performed without |
//| excessive constraints. Results from the previous round are |
//| used as initial guess. |
//| * after fitting is done, we compare results with best values |
//| found so far, rewrite "best solution" if needed, and move to |
//| next random location. |
//| Overall algorithm is very stable and is not prone to bad local |
//| extrema. Furthermore, it automatically scales when input data |
//| have very large or very small range. |
//| INPUT PARAMETERS: |
//| X - array[N], stores X-values. |
//| MUST include only non-negative numbers (but may |
//| include zero values). Can be unsorted. |
//| Y - array[N], values to fit. |
//| N - number of points. If N is less than length of |
//| X/Y, only leading N elements are used. |
//| CnstrLeft- optional equality constraint for model value at the|
//| left boundary (at X=0). Specify NAN (Not-a-Number) |
//| if you do not need constraint on the model value |
//| at X=0. See below, section "EQUALITY CONSTRAINTS" |
//| for more information about constraints. |
//| CnstrRight- optional equality constraint for model value at |
//| X=infinity. Specify NAN (Not-a-Number) if you do |
//| not need constraint on the model value. See below,|
//| section "EQUALITY CONSTRAINTS" for more |
//| information about constraints. |
//| OUTPUT PARAMETERS:
//| OUTPUT PARAMETERS: |
//| A, B, C, D- parameters of 4PL model |
//| Rep - fitting report. This structure has many fields, |
//| but ONLY ONES LISTED BELOW ARE SET: |
//| * Rep.IterationsCount - number of iterations |
//| performed |
//| * Rep.RMSError - root-mean-square error |
//| * Rep.AvgError - average absolute error |
//| * Rep.AvgRelError - average relative error |
//| (calculated for non-zero |
//| Y-values) |
//| * Rep.MaxError - maximum absolute error |
//| * Rep.R2 - coefficient of determination,|
//| R-squared. This coefficient|
//| is calculated as |
//| R2=1-RSS/TSS (in case of |
//| nonlinear regression there|
//| are multiple ways to |
//| define R2, each of them |
//| giving different results). |
//| NOTE: for stability reasons the B parameter is restricted by |
//| [1/1000,1000] range. It prevents algorithm from making |
//| trial steps deep into the area of bad parameters. |
//| NOTE: after you obtained coefficients, you can evaluate model |
//| with LogisticCalc4() function. |
//| NOTE: if you need better control over fitting process than |
//| provided by this function, you may use LogisticFit45X(). |
//| NOTE: step is automatically scaled according to scale of |
//| parameters being fitted before we compare its length with |
//| EpsX. Thus, this function can be used to fit data with |
//| very small or very large values without changing EpsX. |
//| EQUALITY CONSTRAINTS ON PARAMETERS |
//| 4PL/5PL solver supports equality constraints on model values at |
//| the left boundary (X=0) and right boundary (X=infinity). These|
//| constraints are completely optional and you can specify both of |
//| them, only one - or no constraints at all. |
//| Parameter CnstrLeft contains left constraint (or NAN for |
//| unconstrained fitting), and CnstrRight contains right one. For |
//| 4PL, left constraint ALWAYS corresponds to parameter A, and |
//| right one is ALWAYS constraint on D. That's because 4PL model |
//| is normalized in such way that B>=0. |
//+------------------------------------------------------------------+
void CAlglib::LogisticFit4ec(CRowDouble &x,CRowDouble &y,int n,
double cnstrleft,double cnstrright,
double &a,double &b,double &c,
double &d,CLSFitReportShell &rep)
{
CLSFit::LogisticFit4ec(x,y,n,cnstrleft,cnstrright,a,b,c,d,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This function fits five-parameter logistic (5PL) model to data |
//| provided by user. 5PL model has following form: |
//| F(x|A,B,C,D,G) = D+(A-D)/Power(1+Power(x/C,B),G) |
//| Here: |
//| * A, D - unconstrained |
//| * B - unconstrained |
//| * C>0 |
//| * G>0 |
//| IMPORTANT: unlike in 4PL fitting, output of this function |
//| is NOT constrained in such way that B is guaranteed |
//| to be positive. Furthermore, unlike 4PL, 5PL model |
//| is NOT symmetric with respect to B, so you can NOT |
//| transform model to equivalent one, with B having |
//| desired sign (>0 or <0). |
//| 5PL fitting is implemented as follows: |
//| * we perform small number of restarts from random locations |
//| which helps to solve problem of bad local extrema. Locations |
//| are only partially random - we use input data to determine |
//| good initial guess, but we include controlled amount of |
//| randomness. |
//| * we perform Levenberg - Marquardt fitting with very tight |
//| constraints on parameters B and C - it allows us to find good|
//| initial guess for the second stage without risk of running|
//| into "flat spot". Parameter G is fixed at G = 1. |
//| * second Levenberg - Marquardt round is performed without |
//| excessive constraints on B and C, but with G still equal to 1|
//| Results from the previous round are used as initial guess. |
//| * third Levenberg - Marquardt round relaxes constraints on G |
//| and tries two different models - one with B > 0 and one |
//| with B < 0. |
//| * after fitting is done, we compare results with best values |
//| found so far, rewrite "best solution" if needed, and move to |
//| next random location. |
//| Overall algorithm is very stable and is not prone to bad local |
//| extrema. Furthermore, it automatically scales when input data |
//| have very large or very small range. |
//| INPUT PARAMETERS: |
//| X - array[N], stores X - values. |
//| MUST include only non - negative numbers(but may|
//| include zero values). Can be unsorted. |
//| Y - array[N], values to fit. |
//| N - number of points. If N is less than length of |
//| X / Y, only leading N elements are used. |
//| OUTPUT PARAMETERS: |
//| A, B, C, D, G - parameters of 5PL model |
//| Rep - fitting report. This structure has many fields, |
//| but ONLY ONES LISTED BELOW ARE SET: |
//| * Rep.IterationsCount - number of iterations performed|
//| * Rep.RMSError - root - mean - square error |
//| * Rep.AvgError - average absolute error |
//| * Rep.AvgRelError - average relative error |
//| (calculated for non - zero |
//| Y - values) |
//| * Rep.MaxError - maximum absolute error |
//| * Rep.R2 - coefficient of determination, |
//| R - squared. This coefficient|
//| is calculated as |
//| R2 = 1 - RSS / TSS (in case |
//| of nonlinear regression there|
//| are multiple ways to define|
//| R2, each of them giving |
//| different results). |
//| NOTE: for better stability B parameter is restricted by |
//| [+-1/1000, +-1000] range, and G is restricted by [1/10, 10]|
//| range. It prevents algorithm from making trial steps deep |
//| into the area of bad parameters. |
//| NOTE: after you obtained coefficients, you can evaluate |
//| model with LogisticCalc5() function. |
//| NOTE: if you need better control over fitting process than |
//| provided by this function, you may use LogisticFit45X(). |
//| NOTE: step is automatically scaled according to scale of |
//| parameters being fitted before we compare its length with |
//| EpsX. Thus, this function can be used to fit data with |
//| very small or very large values without changing EpsX. |
//+------------------------------------------------------------------+
void CAlglib::LogisticFit5(CRowDouble &x,CRowDouble &y,int n,
double &a,double &b,double &c,double &d,
double &g,CLSFitReportShell &rep)
{
CLSFit::LogisticFit5(x,y,n,a,b,c,d,g,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This function fits five-parameter logistic (5PL) model to data |
//| provided by user. 5PL model has following form: |
//| F(x|A,B,C,D,G) = D+(A-D)/Power(1+Power(x/C,B),G) |
//| Here: |
//| * A, D - unconstrained |
//| * B - unconstrained |
//| * C>0 |
//| * G>0 |
//| IMPORTANT: unlike in 4PL fitting, output of this function |
//| is NOT constrained in such way that B is guaranteed |
//| to be positive. Furthermore, unlike 4PL, 5PL model |
//| is NOT symmetric with respect to B, so you can NOT |
//| transform model to equivalent one, with B having |
//| desired sign (>0 or <0). |
//| 5PL fitting is implemented as follows: |
//| * we perform small number of restarts from random locations |
//| which helps to solve problem of bad local extrema. Locations |
//| are only partially random - we use input data to determine |
//| good initial guess, but we include controlled amount of |
//| randomness. |
//| * we perform Levenberg - Marquardt fitting with very tight |
//| constraints on parameters B and C - it allows us to find good|
//| initial guess for the second stage without risk of running|
//| into "flat spot". Parameter G is fixed at G = 1. |
//| * second Levenberg - Marquardt round is performed without |
//| excessive constraints on B and C, but with G still equal to 1|
//| Results from the previous round are used as initial guess. |
//| * third Levenberg - Marquardt round relaxes constraints on G |
//| and tries two different models - one with B > 0 and one |
//| with B < 0. |
//| * after fitting is done, we compare results with best values |
//| found so far, rewrite "best solution" if needed, and move to |
//| next random location. |
//| Overall algorithm is very stable and is not prone to bad local |
//| extrema. Furthermore, it automatically scales when input data |
//| have very large or very small range. |
//| INPUT PARAMETERS: |
//| X - array[N], stores X - values. |
//| MUST include only non - negative numbers(but may|
//| include zero values). Can be unsorted. |
//| Y - array[N], values to fit. |
//| N - number of points. If N is less than length of |
//| X / Y, only leading N elements are used. |
//| CnstrLeft - optional equality constraint for model value at |
//| the left boundary(at X = 0). |
//| Specify NAN(Not - a - Number) if you do not |
//| need constraint on the model value at X = 0. |
//| See below, section "EQUALITY CONSTRAINTS" |
//| for more information about constraints. |
//| CnstrRight - optional equality constraint for model value at |
//| X = infinity. |
//| Specify NAN(Not - a - Number) if you do not |
//| need constraint on the model value at X = 0. |
//| See below, section "EQUALITY CONSTRAINTS" |
//| for more information about constraints. |
//| OUTPUT PARAMETERS: |
//| A, B, C, D, G - parameters of 5PL model |
//| Rep - fitting report. This structure has many fields, |
//| but ONLY ONES LISTED BELOW ARE SET: |
//| * Rep.IterationsCount - number of iterations performed|
//| * Rep.RMSError - root - mean - square error |
//| * Rep.AvgError - average absolute error |
//| * Rep.AvgRelError - average relative error |
//| (calculated for non - zero |
//| Y - values) |
//| * Rep.MaxError - maximum absolute error |
//| * Rep.R2 - coefficient of determination, |
//| R - squared. This coefficient|
//| is calculated as |
//| R2 = 1 - RSS / TSS (in case |
//| of nonlinear regression there|
//| are multiple ways to define|
//| R2, each of them giving |
//| different results). |
//| NOTE: for better stability B parameter is restricted by |
//| [+-1/1000, +-1000] range, and G is restricted by [1/10, 10]|
//| range. It prevents algorithm from making trial steps deep |
//| into the area of bad parameters. |
//| NOTE: after you obtained coefficients, you can evaluate |
//| model with LogisticCalc5() function. |
//| NOTE: if you need better control over fitting process than |
//| provided by this function, you may use LogisticFit45X(). |
//| NOTE: step is automatically scaled according to scale of |
//| parameters being fitted before we compare its length with |
//| EpsX. Thus, this function can be used to fit data with |
//| very small or very large values without changing EpsX. |
//| EQUALITY CONSTRAINTS ON PARAMETERS |
//| 5PL solver supports equality constraints on model values at the|
//| left boundary(X = 0) and right boundary(X = infinity). These |
//| constraints are completely optional and you can specify both of |
//| them, only one - or no constraints at all. |
//| Parameter CnstrLeft contains left constraint (or NAN for |
//| unconstrained fitting), and CnstrRight contains right one. |
//| Unlike 4PL one, 5PL model is NOT symmetric with respect to change|
//| in sign of B. Thus, negative B's are possible, and left |
//| constraint may constrain parameter A(for positive B's) - or |
//| parameter D (for negative B's). Similarly changes meaning of |
//| right constraint. |
//| You do not have to decide what parameter to constrain - algorithm|
//| will automatically determine correct parameters as fitting |
//| progresses. However, question highlighted above is important when|
//| you interpret fitting results. |
//+------------------------------------------------------------------+
void CAlglib::LogisticFit5ec(CRowDouble &x,CRowDouble &y,int n,
double cnstrleft,double cnstrright,
double &a,double &b,double &c,double &d,
double &g,CLSFitReportShell &rep)
{
CLSFit::LogisticFit5ec(x,y,n,cnstrleft,cnstrright,a,b,c,d,g,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This is "expert" 4PL / 5PL fitting function, which can be used if|
//| you need better control over fitting process than provided by |
//| LogisticFit4() or LogisticFit5(). |
//| This function fits model of the form |
//| F(x|A,B,C,D) = D+(A-D)/(1+Power(x/C,B)) (4PL model) |
//| or |
//| F(x|A,B,C,D,G) = D+(A-D)/Power(1+Power(x/C,B),G)(5PL model) |
//| Here: |
//| * A, D - unconstrained |
//| * B >= 0 for 4PL, unconstrained for 5PL |
//| * C > 0 |
//| * G > 0(if present) |
//| INPUT PARAMETERS: |
//| X - array[N], stores X - values. |
//| MUST include only non-negative numbers(but may |
//| include zero values). Can be unsorted. |
//| Y - array[N], values to fit. |
//| N - number of points. If N is less than length of |
//| X / Y, only leading N elements are used. |
//| CnstrLeft - optional equality constraint for model value at |
//| the left boundary(at X = 0). |
//| Specify NAN (Not-a-Number) if you do not need |
//| constraint on the model value at X = 0. |
//| See below, section "EQUALITY CONSTRAINTS" |
//| for more information about constraints. |
//| CnstrRight - optional equality constraint for model value at |
//| X = infinity. |
//| Specify NAN (Not-a-Number) if you do not need |
//| constraint on the model value at X = 0. |
//| See below, section "EQUALITY CONSTRAINTS" |
//| for more information about constraints. |
//| Is4PL - whether 4PL or 5PL models are fitted |
//| LambdaV - regularization coefficient, LambdaV >= 0. Set it|
//| to zero unless you know what you are doing. |
//| EpsX - stopping condition(step size), EpsX >= 0. Zero |
//| value means that small step is automatically |
//| chosen. See notes below for more information. |
//| RsCnt - number of repeated restarts from random points.|
//| 4PL/5PL models are prone to problem of bad local|
//| extrema. Utilizing multiple random restarts |
//| allows us to improve algorithm convergence. |
//| RsCnt >= 0. Zero value means that function |
//| automatically choose small amount of restarts |
//| (recommended). |
//| OUTPUT PARAMETERS: |
//| A, B, C, D - parameters of 4PL model |
//| G - parameter of 5PL model; for Is4PL = True, G = 1 |
//| is returned. |
//| Rep - fitting report. This structure has many fields, |
//| but ONLY ONES LISTED BELOW ARE SET: |
//| * Rep.IterationsCount - number of iterations performed|
//| * Rep.RMSError - root - mean - square error |
//| * Rep.AvgError - average absolute error |
//| * Rep.AvgRelError - average relative error |
//| (calculated for non - zero |
//| Y - values) |
//| * Rep.MaxError - maximum absolute error |
//| * Rep.R2 - coefficient of determination, |
//| R - squared. This coefficient|
//| is calculated as |
//| R2 = 1 - RSS / TSS (in case |
//| of nonlinear regression there|
//| are multiple ways to define|
//| R2, each of them giving |
//| different results). |
//| NOTE: for better stability B parameter is restricted by |
//| [+-1/1000, +-1000] range, and G is restricted by [1/10, 10]|
//| range. It prevents algorithm from making trial steps deep |
//| into the area of bad parameters. |
//| NOTE: after you obtained coefficients, you can evaluate |
//| model with LogisticCalc5() function. |
//| NOTE: if you need better control over fitting process than |
//| provided by this function, you may use LogisticFit45X(). |
//| NOTE: step is automatically scaled according to scale of |
//| parameters being fitted before we compare its length with |
//| EpsX. Thus, this function can be used to fit data with |
//| very small or very large values without changing EpsX. |
//| EQUALITY CONSTRAINTS ON PARAMETERS |
//| 4PL/5PL solver supports equality constraints on model values at |
//| the left boundary(X = 0) and right boundary(X = infinity). These |
//| constraints are completely optional and you can specify both of |
//| them, only one - or no constraints at all. |
//| Parameter CnstrLeft contains left constraint (or NAN for |
//| unconstrained fitting), and CnstrRight contains right one. For |
//| 4PL, left constraint ALWAYS corresponds to parameter A, and right|
//| one is ALWAYS constraint on D. That's because 4PL model is |
//| normalized in such way that B>=0. |
//| For 5PL model things are different. Unlike 4PL one, 5PL model is |
//| NOT symmetric with respect to change in sign of B. Thus, |
//| negative B's are possible, and left constraint may constrain |
//| parameter A(for positive B's) - or parameter D(for negative B's).|
//| Similarly changes meaning of right constraint. |
//| You do not have to decide what parameter to constrain - algorithm|
//| will automatically determine correct parameters as fitting |
//| progresses. However, question highlighted above is important when|
//| you interpret fitting results. |
//+------------------------------------------------------------------+
void CAlglib::LogisticFit45x(CRowDouble &x,CRowDouble &y,int n,
double cnstrleft,double cnstrright,
bool is4pl,double lambdav,double epsx,
int rscnt,double &a,double &b,double &c,
double &d,double &g,CLSFitReportShell &rep)
{
CLSFit::LogisticFit45x(x,y,n,cnstrleft,cnstrright,is4pl,lambdav,epsx,rscnt,a,b,c,d,g,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Nonlinear least squares fitting results. |
//| Called after return from LSFitFit(). |
//| INPUT PARAMETERS: |
//| State - algorithm state |
//| OUTPUT PARAMETERS: |
//| Info - completetion code: |
//| * 1 relative function improvement is no |
//| more than EpsF. |
//| * 2 relative step is no more than EpsX. |
//| * 4 gradient norm is no more than EpsG |
//| * 5 MaxIts steps was taken |
//| * 7 stopping conditions are too |
//| stringent, further improvement is |
//| impossible |
//| C - array[0..K-1], solution |
//| Rep - optimization report. Following fields are set: |
//| * Rep.TerminationType completetion code: |
//| * RMSError rms error on the (X,Y). |
//| * AvgError average error on the (X,Y). |
//| * AvgRelError average relative error on the|
//| non-zero Y |
//| * MaxError maximum error |
//| NON-WEIGHTED ERRORS ARE |
//| CALCULATED |
//| * WRMSError weighted rms error on the |
//| (X,Y). |
//+------------------------------------------------------------------+
void CAlglib::LSFitResults(CLSFitStateShell &state,int &info,
double &c[],CLSFitReportShell &rep)
{
//--- initialization
info=0;
//--- function call
CLSFit::LSFitResults(state.GetInnerObj(),info,c,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Fits least squares (LS) circle (or NX-dimensional sphere) to data|
//| (a set of points in NX-dimensional space). |
//| Least squares circle minimizes sum of squared deviations between |
//| distances from points to the center and some "candidate" radius,|
//| which is also fitted to the data. |
//| INPUT PARAMETERS: |
//| XY - array[NPoints,NX] (or larger), contains dataset. |
//| One row = one point in NX-dimensional space. |
//| NPoints - dataset size, NPoints>0 |
//| NX - space dimensionality, NX>0(1, 2, 3, 4, 5 and so on)|
//| OUTPUT PARAMETERS: |
//| CX - central point for a sphere |
//| R - radius |
//+------------------------------------------------------------------+
void CAlglib::FitSphereLS(CMatrixDouble &xy,int npoints,int nx,
CRowDouble &cx,double &r)
{
r=0;
//--- function call
CFitSphere::FitSphereLS(xy,npoints,nx,cx,r);
}
//+------------------------------------------------------------------+
//| Fits minimum circumscribed (MC) circle (or NX-dimensional sphere)|
//| to data (a set of points in NX-dimensional space). |
//| INPUT PARAMETERS: |
//| XY - array[NPoints,NX] (or larger), contains dataset. |
//| One row = one point in NX-dimensional space. |
//| NPoints - dataset size, NPoints>0 |
//| NX - space dimensionality, NX>0(1, 2, 3, 4, 5 and so on)|
//| OUTPUT PARAMETERS: |
//| CX - central point for a sphere |
//| RHi - radius |
//| NOTE: this function is an easy-to-use wrapper around more |
//| powerful "expert" function FitSphereX(). |
//| This wrapper is optimized for ease of use and stability - at the |
//| cost of somewhat lower performance (we have to use very tight |
//| stopping criteria for inner optimizer because we want to make |
//| sure that it will converge on any dataset). |
//| If you are ready to experiment with settings of "expert" |
//| function, you can achieve ~2-4x speedup over standard |
//| "bulletproof" settings. |
//+------------------------------------------------------------------+
void CAlglib::FitSphereMC(CMatrixDouble &xy,int npoints,int nx,
CRowDouble &cx,double &rhi)
{
CFitSphere::FitSphereMC(xy,npoints,nx,cx,rhi);
}
//+------------------------------------------------------------------+
//| Fits maximum inscribed circle (or NX-dimensional sphere) to data |
//| (a set of points in NX-dimensional space). |
//| INPUT PARAMETERS: |
//| XY - array[NPoints,NX] (or larger), contains dataset. |
//| One row = one point in NX-dimensional space. |
//| NPoints - dataset size, NPoints>0 |
//| NX - space dimensionality, NX>0(1, 2, 3, 4, 5 and so on)|
//| OUTPUT PARAMETERS: |
//| CX - central point for a sphere |
//| RLo - radius |
//| NOTE: this function is an easy-to-use wrapper around more |
//| powerful "expert" function FitSphereX(). |
//| This wrapper is optimized for ease of use and stability - at |
//| the cost of somewhat lower performance (we have to use very|
//| tight stopping criteria for inner optimizer because we want to |
//| make sure that it will converge on any dataset). |
//| If you are ready to experiment with settings of "expert" |
//| function, you can achieve ~2-4x speedup over standard |
//| "bulletproof" settings. |
//+------------------------------------------------------------------+
void CAlglib::FitSphereMI(CMatrixDouble &xy,int npoints,int nx,
CRowDouble &cx,double &rlo)
{
CFitSphere::FitSphereMI(xy,npoints,nx,cx,rlo);
}
//+------------------------------------------------------------------+
//| Fits minimum zone circle (or NX-dimensional sphere) to data (a |
//| set of points in NX-dimensional space). |
//| INPUT PARAMETERS: |
//| XY - array[NPoints,NX] (or larger), contains dataset. |
//| One row = one point in NX-dimensional space. |
//| NPoints - dataset size, NPoints>0 |
//| NX - space dimensionality, NX>0(1, 2, 3, 4, 5 and so on)|
//| OUTPUT PARAMETERS: |
//| CX - central point for a sphere |
//| RLo - radius of inscribed circle |
//| RHo - radius of circumscribed circle |
//| NOTE: this function is an easy-to-use wrapper around more |
//| powerful "expert" function FitSphereX(). |
//| This wrapper is optimized for ease of use and stability - at |
//| the cost of somewhat lower performance (we have to use very|
//| tight stopping criteria for inner optimizer because we want to |
//| make sure that it will converge on any dataset). |
//| If you are ready to experiment with settings of "expert" |
//| function, you can achieve ~2-4x speedup over standard |
//| "bulletproof" settings. |
//+------------------------------------------------------------------+
void CAlglib::FitSphereMZ(CMatrixDouble &xy,int npoints,int nx,
CRowDouble &cx,double &rlo,double &rhi)
{
CFitSphere::FitSphereMZ(xy,npoints,nx,cx,rlo,rhi);
}
//+------------------------------------------------------------------+
//| Fitting minimum circumscribed, maximum inscribed or minimum zone |
//| circles (or NX-dimensional spheres) to data (a set of points |
//| in NX-dimensional space). |
//| This is expert function which allows to tweak many parameters of |
//| underlying nonlinear solver: |
//| * stopping criteria for inner iterations |
//| * number of outer iterations |
//| * penalty coefficient used to handle nonlinear constraints (we |
//| convert unconstrained nonsmooth optimization problem ivolving|
//| max() and/or min() operations to quadratically constrained |
//| smooth one). |
//| You may tweak all these parameters or only some of them, leaving |
//| other ones at their default state - just specify zero value, and |
//| solver will fill it with appropriate default one. |
//| These comments also include some discussion of approach used to |
//| handle such unusual fitting problem, its stability, drawbacks of |
//| alternative methods, and convergence properties. |
//| INPUT PARAMETERS: |
//| XY - array[NPoints,NX] (or larger), contains dataset. |
//| One row = one point in NX-dimensional space. |
//| NPoints - dataset size, NPoints>0 |
//| NX - space dimensionality, NX>0(1, 2, 3, 4, 5 and so on)|
//| ProblemType - used to encode problem type: |
//| * 0 for least squares circle |
//| * 1 for minimum circumscribed circle/sphere fitting|
//| (MC) |
//| * 2 for maximum inscribed circle/sphere fitting(MI)|
//| * 3 for minimum zone circle fitting (difference |
//| between Rhi and Rlo is minimized), denoted as |
//| MZ |
//| EpsX - stopping condition for NLC optimizer: |
//| * must be non-negative |
//| * use 0 to choose default value (1.0E-12 is used by|
//| default) |
//| * you may specify larger values, up to 1.0E-6, if |
//| you want to speed-up solver; NLC solver performs |
//| several preconditioned outer iterations, so final|
//| result typically has precision much better than |
//| EpsX. |
//| AULIts - number of outer iterations performed by NLC |
//| optimizer: |
//| * must be non-negative |
//| * use 0 to choose default value (20 is used by |
//| default) |
//| * you may specify values smaller than 20 if you |
//| want to speed up solver; 10 often results in good|
//| combination of precision and speed; sometimes you|
//| may get good results with just 6 outer iterations|
//| Ignored for ProblemType=0. |
//| Penalty - penalty coefficient for NLC optimizer: |
//| * must be non-negative |
//| * use 0 to choose default value (1.0E6 in current |
//| version) |
//| * it should be really large, 1.0E6...1.0E7 is a |
//| good value to start from; |
//| * generally, default value is good enough |
//| Ignored for ProblemType=0. |
//| OUTPUT PARAMETERS: |
//| CX - central point for a sphere |
//| RLo - radius: |
//| * for ProblemType=2,3, radius of the inscribed |
//| sphere |
//| * for ProblemType=0 - radius of the least squares |
//| sphere |
//| * for ProblemType=1 - zero |
//| RHo - radius: |
//| * for ProblemType=1,3, radius of the circumscribed |
//| sphere |
//| * for ProblemType=0 - radius of the least squares |
//| sphere |
//| * for ProblemType=2 - zero |
//| NOTE: ON THE UNIQUENESS OF SOLUTIONS |
//| ALGLIB provides solution to several related circle fitting |
//| problems: MC (minimum circumscribed), MI (maximum inscribed) and |
//| MZ (minimum zone) fitting, LS (least squares) fitting. |
//| It is important to note that among these problems only MC and LS |
//| are convex and have unique solution independently from starting |
//| point. |
//| As for MI, it may (or may not, depending on dataset properties) |
//| have multiple solutions, and it always has one degenerate |
//| solution C=infinity which corresponds to infinitely large radius.|
//| Thus, there are no guarantees that solution to MI returned by |
//| this solver will be the best one (and no one can provide you with|
//| such guarantee because problem is NP-hard). The only guarantee |
//| you have is that this solution is locally optimal, i.e. it can |
//| not be improved by infinitesimally small tweaks in the parameters|
//| It is also possible to "run away" to infinity when started from |
//| bad initial point located outside of point cloud (or when point |
//| cloud does not span entire circumference/surface of the sphere). |
//| Finally, MZ (minimum zone circle) stands somewhere between MC and|
//| MI in stability. It is somewhat regularized by "circumscribed" |
//| term of the merit function; however, solutions to MZ may be |
//| non-unique, and in some unlucky cases it is also possible to "run|
//| away to infinity". |
//| NOTE: ON THE NONLINEARLY CONSTRAINED PROGRAMMING APPROACH |
//| The problem formulation for MC (minimum circumscribed circle; for|
//| the sake of simplicity we omit MZ and MI here) is: |
//| [ [ ]2 ] |
//| min [ max [ XY[i]-C ] ] |
//| C [ i [ ] ] |
//| i.e. it is unconstrained nonsmooth optimization problem of |
//| finding "best" central point, with radius R being unambiguously |
//| determined from C. In order to move away from non-smoothness we |
//| use following reformulation: |
//| [ ] [ ]2 |
//| min [ R ] subject to R>=0, [ XY[i]-C ] <= R^2 |
//| C,R [ ] [ ] |
//| i.e. it becomes smooth quadratically constrained optimization |
//| problem with linear target function. Such problem statement is |
//| 100% equivalent to the original nonsmooth one, but much easier |
//| to approach. We solve it with MinNLC solver provided by ALGLIB. |
//| NOTE: ON INSTABILITY OF SEQUENTIAL LINEARIZATION APPROACH |
//| ALGLIB has nonlinearly constrained solver which proved to be |
//| stable on such problems. However, some authors proposed to |
//| linearize constraints in the vicinity of current approximation |
//| (Ci,Ri) and to get next approximate solution (Ci+1,Ri+1) as |
//| solution to linear programming problem. Obviously, LP problems |
//| are easier than nonlinearly constrained ones. |
//| Indeed, such approach to MC/MI/MZ resulted in ~10-20x increase in|
//| performance (when compared with NLC solver). However, it turned |
//| out that in some cases linearized model fails to predict correct |
//| direction for next step and tells us that we converged to |
//| solution even when we are still 2-4 digits of precision away from|
//| it. |
//| It is important that it is not failure of LP solver - it is |
//| failure of the linear model; even when solved exactly, it fails |
//| to handle subtle nonlinearities which arise near the solution. |
//| We validated it by comparing results returned by ALGLIB linear |
//| solver with that of MATLAB. |
//| In our experiments with linearization: |
//| * MC failed most often, at both realistic and synthetic |
//| datasets |
//| * MI sometimes failed, but sometimes succeeded |
//| * MZ often succeeded; our guess is that presence of two |
//| independent sets of constraints (one set for Rlo and another |
//| one for Rhi) and two terms in the target function (Rlo and |
//| Rhi) regularizes task, so when linear model fails to handle |
//| nonlinearities from Rlo, it uses Rhi as a hint (and vice |
//| versa). |
//| Because linearization approach failed to achieve stable results, |
//| we do not include it in ALGLIB. |
//+------------------------------------------------------------------+
void CAlglib::FitSphereX(CMatrixDouble &xy,int npoints,int nx,
int problemtype,double epsx,int aulits,
double penalty,CRowDouble &cx,double &rlo,
double &rhi)
{
CFitSphere::FitSphereX(xy,npoints,nx,problemtype,epsx,aulits,penalty,cx,rlo,rhi);
}
//+------------------------------------------------------------------+
//| This function builds non-periodic 2-dimensional parametric |
//| spline which starts at (X[0],Y[0]) and ends at (X[N-1],Y[N-1]). |
//| INPUT PARAMETERS: |
//| XY - points, array[0..N-1,0..1]. |
//| XY[I,0:1] corresponds to the Ith point. |
//| Order of points is important! |
//| N - points count, N>=5 for Akima splines, N>=2 for other |
//| types of splines. |
//| ST - spline type: |
//| * 0 Akima spline |
//| * 1 parabolically terminated Catmull-Rom spline |
//| (Tension=0) |
//| * 2 parabolically terminated cubic spline |
//| PT - parameterization type: |
//| * 0 uniform |
//| * 1 chord length |
//| * 2 centripetal |
//| OUTPUT PARAMETERS: |
//| P - parametric spline interpolant |
//| NOTES: |
//| * this function assumes that there all consequent points are |
//| distinct. I.e. (x0,y0)<>(x1,y1), (x1,y1)<>(x2,y2), |
//| (x2,y2)<>(x3,y3) and so on. However, non-consequent points may |
//| coincide, i.e. we can have (x0,y0) = (x2,y2). |
//+------------------------------------------------------------------+
void CAlglib::PSpline2Build(CMatrixDouble &xy,const int n,const int st,
const int pt,CPSpline2InterpolantShell &p)
{
CPSpline::PSpline2Build(xy,n,st,pt,p.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This function builds non-periodic 3-dimensional parametric spline|
//| which starts at (X[0],Y[0],Z[0]) and ends at |
//| (X[N-1],Y[N-1],Z[N-1]). |
//| Same as PSpline2Build() function, but for 3D, so we won't |
//| duplicate its description here. |
//+------------------------------------------------------------------+
void CAlglib::PSpline3Build(CMatrixDouble &xy,const int n,const int st,
const int pt,CPSpline3InterpolantShell &p)
{
CPSpline::PSpline3Build(xy,n,st,pt,p.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This function builds periodic 2-dimensional parametric spline |
//| which starts at (X[0],Y[0]), goes through all points to |
//| (X[N-1],Y[N-1]) and then back to (X[0],Y[0]). |
//| INPUT PARAMETERS: |
//| XY - points, array[0..N-1,0..1]. |
//| XY[I,0:1] corresponds to the Ith point. |
//| XY[N-1,0:1] must be different from XY[0,0:1]. |
//| Order of points is important! |
//| N - points count, N>=3 for other types of splines. |
//| ST - spline type: |
//| * 1 Catmull-Rom spline (Tension=0) with cyclic |
//| boundary conditions |
//| * 2 cubic spline with cyclic boundary conditions |
//| PT - parameterization type: |
//| * 0 uniform |
//| * 1 chord length |
//| * 2 centripetal |
//| OUTPUT PARAMETERS: |
//| P - parametric spline interpolant |
//| NOTES: |
//| * this function assumes that there all consequent points are |
//| distinct. I.e. (x0,y0)<>(x1,y1), (x1,y1)<>(x2,y2), |
//| (x2,y2)<>(x3,y3) and so on. However, non-consequent points may |
//| coincide, i.e. we can have (x0,y0) = (x2,y2). |
//| * last point of sequence is NOT equal to the first point. You |
//| shouldn't make curve "explicitly periodic" by making them |
//| equal. |
//+------------------------------------------------------------------+
void CAlglib::PSpline2BuildPeriodic(CMatrixDouble &xy,const int n,
const int st,const int pt,
CPSpline2InterpolantShell &p)
{
CPSpline::PSpline2BuildPeriodic(xy,n,st,pt,p.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This function builds periodic 3-dimensional parametric spline |
//| which starts at (X[0],Y[0],Z[0]), goes through all points to |
//| (X[N-1],Y[N-1],Z[N-1]) and then back to (X[0],Y[0],Z[0]). |
//| Same as PSpline2Build() function, but for 3D, so we won't |
//| duplicate its description here. |
//+------------------------------------------------------------------+
void CAlglib::PSpline3BuildPeriodic(CMatrixDouble &xy,const int n,
const int st,const int pt,
CPSpline3InterpolantShell &p)
{
CPSpline::PSpline3BuildPeriodic(xy,n,st,pt,p.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This function returns vector of parameter values correspoding to |
//| points. |
//| I.e. for P created from (X[0],Y[0])...(X[N-1],Y[N-1]) and |
//| U=TValues(P) we have |
//| (X[0],Y[0]) = PSpline2Calc(P,U[0]), |
//| (X[1],Y[1]) = PSpline2Calc(P,U[1]), |
//| (X[2],Y[2]) = PSpline2Calc(P,U[2]), |
//| ... |
//| INPUT PARAMETERS: |
//| P - parametric spline interpolant |
//| OUTPUT PARAMETERS: |
//| N - array size |
//| T - array[0..N-1] |
//| NOTES: |
//| * for non-periodic splines U[0]=0, U[0]<U[1]<...<U[N-1], U[N-1]=1|
//| * for periodic splines U[0]=0, U[0]<U[1]<...<U[N-1], U[N-1]<1|
//+------------------------------------------------------------------+
void CAlglib::PSpline2ParameterValues(CPSpline2InterpolantShell &p,
int &n,double &t[])
{
//--- initialization
n=0;
//--- function call
CPSpline::PSpline2ParameterValues(p.GetInnerObj(),n,t);
}
//+------------------------------------------------------------------+
//| This function returns vector of parameter values correspoding to |
//| points. |
//| Same as PSpline2ParameterValues(), but for 3D. |
//+------------------------------------------------------------------+
void CAlglib::PSpline3ParameterValues(CPSpline3InterpolantShell &p,
int &n,double &t[])
{
//--- initialization
n=0;
//--- function call
CPSpline::PSpline3ParameterValues(p.GetInnerObj(),n,t);
}
//+------------------------------------------------------------------+
//| This function calculates the value of the parametric spline for a|
//| given value of parameter T |
//| INPUT PARAMETERS: |
//| P - parametric spline interpolant |
//| T - point: |
//| * T in [0,1] corresponds to interval spanned by |
//| points |
//| * for non-periodic splines T<0 (or T>1) correspond to|
//| parts of the curve before the first (after the |
//| last) point |
//| * for periodic splines T<0 (or T>1) are projected |
//| into [0,1] by making T=T-floor(T). |
//| OUTPUT PARAMETERS: |
//| X - X-position |
//| Y - Y-position |
//+------------------------------------------------------------------+
void CAlglib::PSpline2Calc(CPSpline2InterpolantShell &p,const double t,
double &x,double &y)
{
//--- initialization
x=0;
y=0;
//--- function call
CPSpline::PSpline2Calc(p.GetInnerObj(),t,x,y);
}
//+------------------------------------------------------------------+
//| This function calculates the value of the parametric spline for a|
//| given value of parameter T. |
//| INPUT PARAMETERS: |
//| P - parametric spline interpolant |
//| T - point: |
//| * T in [0,1] corresponds to interval spanned by |
//| points |
//| * for non-periodic splines T<0 (or T>1) correspond |
//| to parts of the curve before the first (after the |
//| last) point |
//| * for periodic splines T<0 (or T>1) are projected |
//| into [0,1] by making T=T-floor(T). |
//| OUTPUT PARAMETERS: |
//| X - X-position |
//| Y - Y-position |
//| Z - Z-position |
//+------------------------------------------------------------------+
void CAlglib::PSpline3Calc(CPSpline3InterpolantShell &p,const double t,
double &x,double &y,double &z)
{
//--- initialization
x=0;
y=0;
z=0;
//--- function call
CPSpline::PSpline3Calc(p.GetInnerObj(),t,x,y,z);
}
//+------------------------------------------------------------------+
//| This function calculates tangent vector for a given value of |
//| parameter T |
//| INPUT PARAMETERS: |
//| P - parametric spline interpolant |
//| T - point: |
//| * T in [0,1] corresponds to interval spanned by |
//| points |
//| * for non-periodic splines T<0 (or T>1) correspond to|
//| parts of the curve before the first (after the |
//| last) point |
//| * for periodic splines T<0 (or T>1) are projected |
//| into [0,1] by making T=T-floor(T). |
//| OUTPUT PARAMETERS: |
//| X - X-component of tangent vector (normalized) |
//| Y - Y-component of tangent vector (normalized) |
//| NOTE: |
//| X^2+Y^2 is either 1 (for non-zero tangent vector) or 0. |
//+------------------------------------------------------------------+
void CAlglib::PSpline2Tangent(CPSpline2InterpolantShell &p,const double t,
double &x,double &y)
{
//--- initialization
x=0;
y=0;
//--- function call
CPSpline::PSpline2Tangent(p.GetInnerObj(),t,x,y);
}
//+------------------------------------------------------------------+
//| This function calculates tangent vector for a given value of |
//| parameter T |
//| INPUT PARAMETERS: |
//| P - parametric spline interpolant |
//| T - point: |
//| * T in [0,1] corresponds to interval spanned by |
//| points |
//| * for non-periodic splines T<0 (or T>1) correspond to|
//| parts of the curve before the first (after the |
//| last) point |
//| * for periodic splines T<0 (or T>1) are projected |
//| into [0,1] by making T=T-floor(T). |
//| OUTPUT PARAMETERS: |
//| X - X-component of tangent vector (normalized) |
//| Y - Y-component of tangent vector (normalized) |
//| Z - Z-component of tangent vector (normalized) |
//| NOTE: |
//| X^2+Y^2+Z^2 is either 1 (for non-zero tangent vector) or 0. |
//+------------------------------------------------------------------+
void CAlglib::PSpline3Tangent(CPSpline3InterpolantShell &p,const double t,
double &x,double &y,double &z)
{
//--- initialization
x=0;
y=0;
z=0;
//--- function call
CPSpline::PSpline3Tangent(p.GetInnerObj(),t,x,y,z);
}
//+------------------------------------------------------------------+
//| This function calculates derivative, i.e. it returns |
//| (dX/dT,dY/dT). |
//| INPUT PARAMETERS: |
//| P - parametric spline interpolant |
//| T - point: |
//| * T in [0,1] corresponds to interval spanned by |
//| points |
//| * for non-periodic splines T<0 (or T>1) correspond to|
//| parts of the curve before the first (after the |
//| last) point |
//| * for periodic splines T<0 (or T>1) are projected |
//| into [0,1] by making T=T-floor(T). |
//| OUTPUT PARAMETERS: |
//| X - X-value |
//| DX - X-derivative |
//| Y - Y-value |
//| DY - Y-derivative |
//+------------------------------------------------------------------+
void CAlglib::PSpline2Diff(CPSpline2InterpolantShell &p,const double t,
double &x,double &dx,double &y,double &dy)
{
//--- initialization
x=0;
dx=0;
y=0;
dy=0;
//--- function call
CPSpline::PSpline2Diff(p.GetInnerObj(),t,x,dx,y,dy);
}
//+------------------------------------------------------------------+
//| This function calculates derivative, i.e. it returns |
//| (dX/dT,dY/dT,dZ/dT). |
//| INPUT PARAMETERS: |
//| P - parametric spline interpolant |
//| T - point: |
//| * T in [0,1] corresponds to interval spanned by |
//| points |
//| * for non-periodic splines T<0 (or T>1) correspond to|
//| parts of the curve before the first (after the |
//| last) point |
//| * for periodic splines T<0 (or T>1) are projected |
//| into [0,1] by making T=T-floor(T). |
//| OUTPUT PARAMETERS: |
//| X - X-value |
//| DX - X-derivative |
//| Y - Y-value |
//| DY - Y-derivative |
//| Z - Z-value |
//| DZ - Z-derivative |
//+------------------------------------------------------------------+
void CAlglib::PSpline3Diff(CPSpline3InterpolantShell &p,const double t,
double &x,double &dx,double &y,double &dy
,double &z,double &dz)
{
//--- initialization
x=0;
dx=0;
y=0;
dy=0;
z=0;
dz=0;
//--- function call
CPSpline::PSpline3Diff(p.GetInnerObj(),t,x,dx,y,dy,z,dz);
}
//+------------------------------------------------------------------+
//| This function calculates first and second derivative with respect|
//| to T. |
//| INPUT PARAMETERS: |
//| P - parametric spline interpolant |
//| T - point: |
//| * T in [0,1] corresponds to interval spanned by |
//| points |
//| * for non-periodic splines T<0 (or T>1) correspond to|
//| parts of the curve before the first (after the |
//| last) point |
//| * for periodic splines T<0 (or T>1) are projected |
//| into [0,1] by making T=T-floor(T). |
//| OUTPUT PARAMETERS: |
//| X - X-value |
//| DX - derivative |
//| D2X - second derivative |
//| Y - Y-value |
//| DY - derivative |
//| D2Y - second derivative |
//+------------------------------------------------------------------+
void CAlglib::PSpline2Diff2(CPSpline2InterpolantShell &p,const double t,
double &x,double &dx,double &d2x,double &y,
double &dy,double &d2y)
{
//--- initialization
x=0;
dx=0;
d2x=0;
y=0;
dy=0;
d2y=0;
//--- function call
CPSpline::PSpline2Diff2(p.GetInnerObj(),t,x,dx,d2x,y,dy,d2y);
}
//+------------------------------------------------------------------+
//| This function calculates first and second derivative with respect|
//| to T. |
//| INPUT PARAMETERS: |
//| P - parametric spline interpolant |
//| T - point: |
//| * T in [0,1] corresponds to interval spanned by |
//| points |
//| * for non-periodic splines T<0 (or T>1) correspond to|
//| parts of the curve before the first (after the |
//| last) point |
//| * for periodic splines T<0 (or T>1) are projected |
//| into [0,1] by making T=T-floor(T). |
//| OUTPUT PARAMETERS: |
//| X - X-value |
//| DX - derivative |
//| D2X - second derivative |
//| Y - Y-value |
//| DY - derivative |
//| D2Y - second derivative |
//| Z - Z-value |
//| DZ - derivative |
//| D2Z - second derivative |
//+------------------------------------------------------------------+
void CAlglib::PSpline3Diff2(CPSpline3InterpolantShell &p,const double t,
double &x,double &dx,double &d2x,double &y,
double &dy,double &d2y,double &z,
double &dz,double &d2z)
{
//--- initialization
x=0;
dx=0;
d2x=0;
y=0;
dy=0;
d2y=0;
z=0;
dz=0;
d2z=0;
//--- function call
CPSpline::PSpline3Diff2(p.GetInnerObj(),t,x,dx,d2x,y,dy,d2y,z,dz,d2z);
}
//+------------------------------------------------------------------+
//| This function calculates arc length, i.e. length of curve between|
//| t=a and t=b. |
//| INPUT PARAMETERS: |
//| P - parametric spline interpolant |
//| A,B - parameter values corresponding to arc ends: |
//| * B>A will result in positive length returned |
//| * B<A will result in negative length returned |
//| RESULT: |
//| length of arc starting at T=A and ending at T=B. |
//+------------------------------------------------------------------+
double CAlglib::PSpline2ArcLength(CPSpline2InterpolantShell &p,
const double a,const double b)
{
return(CPSpline::PSpline2ArcLength(p.GetInnerObj(),a,b));
}
//+------------------------------------------------------------------+
//| This function calculates arc length, i.e. length of curve between|
//| t=a and t=b. |
//| INPUT PARAMETERS: |
//| P - parametric spline interpolant |
//| A,B - parameter values corresponding to arc ends: |
//| * B>A will result in positive length returned |
//| * B<A will result in negative length returned |
//| RESULT: |
//| length of arc starting at T=A and ending at T=B. |
//+------------------------------------------------------------------+
double CAlglib::PSpline3ArcLength(CPSpline3InterpolantShell &p,
const double a,const double b)
{
return(CPSpline::PSpline3ArcLength(p.GetInnerObj(),a,b));
}
//+------------------------------------------------------------------+
//| This subroutine fits piecewise linear curve to points with |
//| Ramer-Douglas-Peucker algorithm. This function performs |
//| PARAMETRIC fit, i.e. it can be used to fit curves like circles. |
//| On input it accepts dataset which describes parametric |
//| multidimensional curve X(t), with X being vector, and t taking |
//| values in [0,N), where N is a number of points in dataset. As |
//| result, it returns reduced dataset X2, which can be used to |
//| build parametric curve X2(t), which approximates X(t) with |
//| desired precision (or has specified number of sections). |
//| INPUT PARAMETERS: |
//| X - array of multidimensional points: |
//| * at least N elements, leading N elements are used |
//| if more than N elements were specified |
//| * order of points is IMPORTANT because it is |
//| parametric fit |
//| * each row of array is one point which has D |
//| coordinates |
//| N - number of elements in X |
//| D - number of dimensions (elements per row of X) |
//| StopM - stopping condition - desired number of sections: |
//| * at most M sections are generated by this function|
//| * less than M sections can be generated if we have |
//| N<M (or some X are non-distinct). |
//| * zero StopM means that algorithm does not stop |
//| after achieving some pre-specified section count |
//| StopEps - stopping condition - desired precision: |
//| * algorithm stops after error in each section is at|
//| most Eps |
//| * zero Eps means that algorithm does not stop after|
//| achieving some pre-specified precision |
//| OUTPUT PARAMETERS: |
//| X2 - array of corner points for piecewise approximation,|
//| has length NSections+1 or zero (for NSections=0). |
//| Idx2 - array of indexes (parameter values): |
//| * has length NSections+1 or zero (for NSections=0).|
//| * each element of Idx2 corresponds to same-numbered|
//| element of X2 |
//| * each element of Idx2 is index of corresponding |
//| element of X2 at original array X, i.e. I-th row |
//| of X2 is Idx2[I]-th row of X. |
//| * elements of Idx2 can be treated as parameter |
//| values which should be used when building new |
//| parametric curve |
//| * Idx2[0]=0, Idx2[NSections]=N-1 |
//| NSections - number of sections found by algorithm,NSections<=M,|
//| NSections can be zero for degenerate datasets (N<=1|
//| or all X[] are non-distinct). |
//| NOTE: algorithm stops after: |
//| a) dividing curve into StopM sections |
//| b) achieving required precision StopEps |
//| c) dividing curve into N-1 sections |
//| If both StopM and StopEps are non-zero, algorithm is stopped by |
//| the FIRST criterion which is satisfied. In case both StopM and |
//| StopEps are zero, algorithm stops because of (c). |
//+------------------------------------------------------------------+
void CAlglib::ParametricRDPFixed(CMatrixDouble &x,int n,int d,
int stopm,double stopeps,
CMatrixDouble &x2,int &idx2[],
int &nsections)
{
CPSpline::ParametricRDPFixed(x,n,d,stopm,stopeps,x2,idx2,nsections);
}
//+------------------------------------------------------------------+
//| This function serializes data structure to string. |
//| Important properties of s_out: |
//| * it contains alphanumeric characters, dots, underscores, minus|
//| signs |
//| * these symbols are grouped into words, which are separated by |
//| spaces and Windows-style (CR+LF) newlines |
//| * although serializer uses spaces and CR+LF as separators, |
//| you can replace any separator character by arbitrary |
//| combination of spaces, tabs, Windows or Unix newlines. It |
//| allows flexible reformatting of the string in case you |
//| want to include it into text or XML file. But you should not |
//| insert separators into the middle of the "words" nor you |
//| should change case of letters. |
//| * s_out can be freely moved between 32-bit and 64-bit systems, |
//| little and big endian machines, and so on. You can serialize |
//| structure on 32-bit machine and unserialize it on 64-bit one |
//| (or vice versa), or serialize it on SPARC and unserialize |
//| on x86. You can also serialize it in C# version of ALGLIB and|
//| unserialize in C++ one, and vice versa. |
//+------------------------------------------------------------------+
void CAlglib::Spline2DSerialize(CSpline2DInterpolantShell &obj,string &s_out)
{
//--- create a variable
CSerializer s;
//--- serialization start
s.Alloc_Start();
//--- function call
CSpline2D::Spline2DAlloc(s,obj.GetInnerObj());
//--- serialization
s.SStart_Str();
CSpline2D::Spline2DSerialize(s,obj.GetInnerObj());
s.Stop();
s_out=s.Get_String();
}
//+------------------------------------------------------------------+
//| This function unserializes data structure from string. |
//+------------------------------------------------------------------+
void CAlglib::Spline2DUnserialize(string s_in,CSpline2DInterpolantShell &obj)
{
CSerializer s;
s.UStart_Str(s_in);
CSpline2D::Spline2DUnserialize(s,obj.GetInnerObj());
s.Stop();
}
//+------------------------------------------------------------------+
//| This subroutine builds bilinear vector-valued spline. |
//| Input parameters: |
//| X - spline abscissas, array[0..N-1] |
//| Y - spline ordinates, array[0..M-1] |
//| F - function values, array[0..M*N*D-1]: |
//| * first D elements store D values at (X[0],Y[0]) |
//| * next D elements store D values at (X[1],Y[0]) |
//| * general form - D function values at (X[i],Y[j]) |
//| are stored at F[D*(J*N+I)...D*(J*N+I)+D-1]. |
//| M,N - grid size, M>=2, N>=2 |
//| D - vector dimension, D>=1 |
//| Output parameters: |
//| C - spline interpolant |
//+------------------------------------------------------------------+
void CAlglib::Spline2DBuildBilinearV(CRowDouble &x,int n,CRowDouble &y,
int m,CRowDouble &f,int d,
CSpline2DInterpolantShell &c)
{
CSpline2D::Spline2DBuildBilinearV(x,n,y,m,f,d,c.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This subroutine builds bicubic vector-valued spline. |
//| Input parameters: |
//| X - spline abscissas, array[0..N-1] |
//| Y - spline ordinates, array[0..M-1] |
//| F - function values, array[0..M*N*D-1]: |
//| * first D elements store D values at (X[0],Y[0]) |
//| * next D elements store D values at (X[1],Y[0]) |
//| * general form - D function values at (X[i],Y[j]) |
//| are stored at F[D*(J*N+I)...D*(J*N+I)+D-1]. |
//| M,N - grid size, M>=2, N>=2 |
//| D - vector dimension, D>=1 |
//| Output parameters: |
//| C - spline interpolant |
//+------------------------------------------------------------------+
void CAlglib::Spline2DBuildBicubicV(CRowDouble &x,int n,CRowDouble &y,
int m,CRowDouble &f,int d,
CSpline2DInterpolantShell &c)
{
CSpline2D::Spline2DBuildBicubicV(x,n,y,m,f,d,c.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This subroutine unpacks two-dimensional spline into the |
//| coefficients table |
//| Input parameters: |
//| C - spline interpolant. |
//| Result: |
//| M, N - grid size (x-axis and y-axis) |
//| D - number of components |
//| Tbl - coefficients table, unpacked format, |
//| D - components: [0..(N-1)*(M-1)*D-1, 0..19]. |
//| For T=0..D-1 (component index), I = 0...N-2 (x index), J=0..M-2 |
//| (y index): |
//| K := T + I*D + J*D*(N-1) |
//| K-th row stores decomposition for T-th component of the |
//| vector-valued function |
//| Tbl[K,0] = X[i] |
//| Tbl[K,1] = X[i+1] |
//| Tbl[K,2] = Y[j] |
//| Tbl[K,3] = Y[j+1] |
//| Tbl[K,4] = C00 |
//| Tbl[K,5] = C01 |
//| Tbl[K,6] = C02 |
//| Tbl[K,7] = C03 |
//| Tbl[K,8] = C10 |
//| Tbl[K,9] = C11 |
//| ... |
//| Tbl[K,19] = C33 |
//| On each grid square spline is equals to: |
//| S(x) = SUM(c[i,j]*(t^i)*(u^j), i=0..3, j=0..3) |
//| t = x-x[j] |
//| u = y-y[i] |
//+------------------------------------------------------------------+
void CAlglib::Spline2DUnpackV(CSpline2DInterpolantShell &c,int &m,
int &n,int &d,CMatrixDouble &tbl)
{
CSpline2D::Spline2DUnpackV(c.GetInnerObj(),m,n,d,tbl);
}
//+------------------------------------------------------------------+
//| This subroutine was deprecated in ALGLIB 3.6.0 |
//| We recommend you to switch to Spline2DBuildBilinearV(), |
//| which is more flexible and accepts its arguments in more |
//| convenient order. |
//| This subroutine builds bilinear spline coefficients table. |
//| Input parameters: |
//| X - spline abscissas, array[0..N-1] |
//| Y - spline ordinates, array[0..M-1] |
//| F - function values, array[0..M-1,0..N-1] |
//| M,N - grid size, M>=2, N>=2 |
//| Output parameters: |
//| C - spline interpolant |
//+------------------------------------------------------------------+
void CAlglib::Spline2DBuildBilinear(double &x[],double &y[],
CMatrixDouble &f,const int m,
const int n,CSpline2DInterpolantShell &c)
{
CSpline2D::Spline2DBuildBilinear(x,y,f,m,n,c.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This subroutine was deprecated in ALGLIB 3.6.0 |
//| We recommend you to switch to Spline2DBuildBicubicV(), which |
//| is more flexible and accepts its arguments in more convenient |
//| order. |
//| This subroutine builds bicubic spline coefficients table. |
//| Input parameters: |
//| X - spline abscissas, array[0..N-1] |
//| Y - spline ordinates, array[0..M-1] |
//| F - function values, array[0..M-1,0..N-1] |
//| M,N - grid size, M>=2, N>=2 |
//| Output parameters: |
//| C - spline interpolant |
//+------------------------------------------------------------------+
void CAlglib::Spline2DBuildBicubic(double &x[],double &y[],CMatrixDouble &f,
const int m,const int n,
CSpline2DInterpolantShell &c)
{
CSpline2D::Spline2DBuildBicubic(x,y,f,m,n,c.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This subroutine calculates the value of the bilinear or bicubic |
//| spline at the given point X. |
//| Input parameters: |
//| C - coefficients table. |
//| Built by BuildBilinearSpline or BuildBicubicSpline. |
//| X, Y- point |
//| Result: |
//| S(x,y) |
//+------------------------------------------------------------------+
double CAlglib::Spline2DCalc(CSpline2DInterpolantShell &c,
const double x,const double y)
{
return(CSpline2D::Spline2DCalc(c.GetInnerObj(),x,y));
}
//+------------------------------------------------------------------+
//| This subroutine calculates the value of the bilinear or bicubic |
//| spline at the given point X and its derivatives. |
//| Input parameters: |
//| C - spline interpolant. |
//| X, Y- point |
//| Output parameters: |
//| F - S(x,y) |
//| FX - dS(x,y)/dX |
//| FY - dS(x,y)/dY |
//| FXY - d2S(x,y)/dXdY |
//+------------------------------------------------------------------+
void CAlglib::Spline2DDiff(CSpline2DInterpolantShell &c,const double x,
const double y,double &f,double &fx,
double &fy,double &fxy)
{
//--- initialization
f=0;
fx=0;
fy=0;
fxy=0;
//--- function call
CSpline2D::Spline2DDiff(c.GetInnerObj(),x,y,f,fx,fy,fxy);
}
//+------------------------------------------------------------------+
//| This subroutine calculates bilinear or bicubic vector-valued |
//| spline at the given point (X,Y). |
//| If you need just some specific component of vector-valued spline,|
//| you can use Spline2DCalcVi() function. |
//| INPUT PARAMETERS: |
//| C - spline interpolant. |
//| X, Y - point |
//| F - output buffer, possibly preallocated array. In case|
//| array size is large enough to store result, it is |
//| not reallocated. Array which is too short will be |
//| reallocated |
//| OUTPUT PARAMETERS: |
//| F - array[D] (or larger) which stores function values |
//+------------------------------------------------------------------+
void CAlglib::Spline2DCalcVBuf(CSpline2DInterpolantShell &c,double x,
double y,CRowDouble &f)
{
CSpline2D::Spline2DCalcVBuf(c.GetInnerObj(),x,y,f);
}
//+------------------------------------------------------------------+
//| This subroutine calculates specific component of vector-valued |
//| bilinear or bicubic spline at the given point (X,Y). |
//| INPUT PARAMETERS: |
//| C - spline interpolant. |
//| X, Y - point |
//| I - component index, in [0,D). An exception is |
//| generated for out of range values. |
//| RESULT: |
//| value of I-th component |
//+------------------------------------------------------------------+
double CAlglib::Spline2DCalcVi(CSpline2DInterpolantShell &c,double x,
double y,int i)
{
return(CSpline2D::Spline2DCalcVi(c.GetInnerObj(),x,y,i));
}
//+------------------------------------------------------------------+
//| This subroutine calculates bilinear or bicubic vector-valued |
//| spline at the given point (X,Y). |
//| INPUT PARAMETERS: |
//| C - spline interpolant. |
//| X, Y - point |
//| OUTPUT PARAMETERS: |
//| F - array[D] which stores function values. F is |
//| out-parameter and it is reallocated after call to |
//| this function. In case you want to reuse previously|
//| allocated F, you may use Spline2DCalcVBuf(), which |
//| reallocates F only when it is too small. |
//+------------------------------------------------------------------+
void CAlglib::Spline2DCalcV(CSpline2DInterpolantShell &c,double x,
double y,CRowDouble &f)
{
CSpline2D::Spline2DCalcV(c.GetInnerObj(),x,y,f);
}
//+------------------------------------------------------------------+
//| This subroutine calculates value of specific component of |
//| bilinear or bicubic vector-valued spline and its derivatives. |
//| Input parameters: |
//| C - spline interpolant. |
//| X, Y - point |
//| I - component index, in [0,D) |
//| Output parameters: |
//| F - S(x,y) |
//| FX - dS(x,y)/dX |
//| FY - dS(x,y)/dY |
//| FXY - d2S(x,y)/dXdY |
//+------------------------------------------------------------------+
void CAlglib::Spline2DDiffVi(CSpline2DInterpolantShell &c,double x,
double y,int i,double &f,double &fx,
double &fy,double &fxy)
{
CSpline2D::Spline2DDiffVi(c.GetInnerObj(),x,y,i,f,fx,fy,fxy);
}
//+------------------------------------------------------------------+
//| This subroutine was deprecated in ALGLIB 3.6.0 |
//| We recommend you to switch to Spline2DUnpackV(), which is |
//| more flexible and accepts its arguments in more convenient order.|
//| This subroutine unpacks two-dimensional spline into the |
//| coefficients table |
//| Input parameters: |
//| C - spline interpolant. |
//| Result: |
//| M, N- grid size (x-axis and y-axis) |
//| Tbl - coefficients table, unpacked format, |
//| [0..(N-1)*(M-1)-1, 0..19]. |
//| For I = 0...M-2, J=0..N-2: |
//| K = I*(N-1)+J |
//| Tbl[K,0] = X[j] |
//| Tbl[K,1] = X[j+1] |
//| Tbl[K,2] = Y[i] |
//| Tbl[K,3] = Y[i+1] |
//| Tbl[K,4] = C00 |
//| Tbl[K,5] = C01 |
//| Tbl[K,6] = C02 |
//| Tbl[K,7] = C03 |
//| Tbl[K,8] = C10 |
//| Tbl[K,9] = C11 |
//| ... |
//| Tbl[K,19] = C33 |
//| On each grid square spline is equals to: |
//| S(x) = SUM(c[i,j]*(x^i)*(y^j), i=0..3, j=0..3) |
//| t = x-x[j] |
//| u = y-y[i] |
//+------------------------------------------------------------------+
void CAlglib::Spline2DUnpack(CSpline2DInterpolantShell &c,int &m,
int &n,CMatrixDouble &tbl)
{
//--- initialization
m=0;
n=0;
//--- function call
CSpline2D::Spline2DUnpack(c.GetInnerObj(),m,n,tbl);
}
//+------------------------------------------------------------------+
//| This subroutine performs linear transformation of the spline |
//| argument. |
//| Input parameters: |
//| C - spline interpolant |
//| AX, BX - transformation coefficients: x = A*t + B |
//| AY, BY - transformation coefficients: y = A*u + B |
//| Result: |
//| C - transformed spline |
//+------------------------------------------------------------------+
void CAlglib::Spline2DLinTransXY(CSpline2DInterpolantShell &c,
const double ax,const double bx,
const double ay,const double by)
{
CSpline2D::Spline2DLinTransXY(c.GetInnerObj(),ax,bx,ay,by);
}
//+------------------------------------------------------------------+
//| This subroutine performs linear transformation of the spline. |
//| Input parameters: |
//| C - spline interpolant. |
//| A, B- transformation coefficients: S2(x,y) = A*S(x,y) + B |
//| Output parameters: |
//| C - transformed spline |
//+------------------------------------------------------------------+
void CAlglib::Spline2DLinTransF(CSpline2DInterpolantShell &c,
const double a,const double b)
{
CSpline2D::Spline2DLinTransF(c.GetInnerObj(),a,b);
}
//+------------------------------------------------------------------+
//| This subroutine makes the copy of the spline model. |
//| Input parameters: |
//| C - spline interpolant |
//| Output parameters: |
//| CC - spline copy |
//+------------------------------------------------------------------+
void CAlglib::Spline2DCopy(CSpline2DInterpolantShell &c,
CSpline2DInterpolantShell &cc)
{
CSpline2D::Spline2DCopy(c.GetInnerObj(),cc.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Bicubic spline resampling |
//| Input parameters: |
//| A - function values at the old grid, |
//| array[0..OldHeight-1, 0..OldWidth-1] |
//| OldHeight - old grid height, OldHeight>1 |
//| OldWidth - old grid width, OldWidth>1 |
//| NewHeight - new grid height, NewHeight>1 |
//| NewWidth - new grid width, NewWidth>1 |
//| Output parameters: |
//| B - function values at the new grid, |
//| array[0..NewHeight-1, 0..NewWidth-1] |
//+------------------------------------------------------------------+
void CAlglib::Spline2DResampleBicubic(CMatrixDouble &a,const int oldheight,
const int oldwidth,CMatrixDouble &b,
const int newheight,const int newwidth)
{
CSpline2D::Spline2DResampleBicubic(a,oldheight,oldwidth,b,newheight,newwidth);
}
//+------------------------------------------------------------------+
//| Bilinear spline resampling |
//| Input parameters: |
//| A - function values at the old grid, |
//| array[0..OldHeight-1, 0..OldWidth-1] |
//| OldHeight - old grid height, OldHeight>1 |
//| OldWidth - old grid width, OldWidth>1 |
//| NewHeight - new grid height, NewHeight>1 |
//| NewWidth - new grid width, NewWidth>1 |
//| Output parameters: |
//| B - function values at the new grid, |
//| array[0..NewHeight-1, 0..NewWidth-1] |
//+------------------------------------------------------------------+
void CAlglib::Spline2DResampleBilinear(CMatrixDouble &a,const int oldheight,
const int oldwidth,CMatrixDouble &b,
const int newheight,const int newwidth)
{
CSpline2D::Spline2DResampleBilinear(a,oldheight,oldwidth,b,newheight,newwidth);
}
//+------------------------------------------------------------------+
//| This subroutine creates least squares solver used to fit 2D |
//| splines to irregularly sampled (scattered) data. |
//| Solver object is used to perform spline fits as follows: |
//| * solver object is created with Spline2DBuilderCreate() |
//| function |
//| * dataset is added with Spline2DBuilderSetPoints() function |
//| * fit area is chosen: |
//| * Spline2DBuilderSetArea() - for user-defined area |
//| * Spline2DBuilderSetAreaAuto()- for automatically chosen |
//| area |
//| * number of grid nodes is chosen with Spline2DBuilderSetGrid() |
//| * prior term is chosen with one of the following functions: |
//| * Spline2DBuilderSetLinTerm() to set linear prior |
//| * Spline2DBuilderSetConstTerm() to set constant prior |
//| * Spline2DBuilderSetZeroTerm() to set zero prior |
//| * Spline2DBuilderSetUserTerm() to set user-defined constant |
//| prior |
//| * solver algorithm is chosen with either: |
//| * Spline2DBuilderSetAlgoBlockLLS() - BlockLLS algorithm, |
//| medium-scale problems|
//| * Spline2DBuilderSetAlgoFastDDM() - FastDDM algorithm, |
//| large-scale problems |
//| * finally, fitting itself is performed with Spline2DFit() |
//| function. |
//| Most of the steps above can be omitted, solver is configured with|
//| good defaults. The minimum is to call: |
//| * Spline2DBuilderCreate() to create solver object |
//| * Spline2DBuilderSetPoints() to specify dataset |
//| * Spline2DBuilderSetGrid() to tell how many nodes you need |
//| * Spline2DFit() to perform fit |
//| INPUT PARAMETERS: |
//| D - positive number, number of Y-components: D=1 for |
//| simple scalar fit, D>1 for vector-valued spline |
//| fitting. |
//| OUTPUT PARAMETERS: |
//| S - solver object |
//+------------------------------------------------------------------+
void CAlglib::Spline2DBuilderCreate(int d,CSpline2DBuilder &state)
{
CSpline2D::Spline2DBuilderCreate(d,state);
}
//+------------------------------------------------------------------+
//| This function sets constant prior term (model is a sum of bicubic|
//| spline and global prior, which can be linear, constant, |
//| user-defined constant or zero). |
//| Constant prior term is determined by least squares fitting. |
//| INPUT PARAMETERS: |
//| S - spline builder |
//| V - value for user-defined prior |
//+------------------------------------------------------------------+
void CAlglib::Spline2DBuilderSetUserTerm(CSpline2DBuilder &state,double v)
{
CSpline2D::Spline2DBuilderSetUserTerm(state,v);
}
//+------------------------------------------------------------------+
//| This function sets linear prior term (model is a sum of bicubic |
//| spline and global prior, which can be linear, constant, |
//| user-defined constant or zero). |
//| Linear prior term is determined by least squares fitting. |
//| INPUT PARAMETERS: |
//| S - spline builder |
//+------------------------------------------------------------------+
void CAlglib::Spline2DBuilderSetLinTerm(CSpline2DBuilder &state)
{
CSpline2D::Spline2DBuilderSetLinTerm(state);
}
//+------------------------------------------------------------------+
//| This function sets constant prior term (model is a sum of bicubic|
//| spline and global prior, which can be linear, constant, |
//| user-defined constant or zero). |
//| Constant prior term is determined by least squares fitting. |
//| INPUT PARAMETERS: |
//| S - spline builder |
//+------------------------------------------------------------------+
void CAlglib::Spline2DBuilderSetConstTerm(CSpline2DBuilder &state)
{
CSpline2D::Spline2DBuilderSetConstTerm(state);
}
//+------------------------------------------------------------------+
//| This function sets zero prior term (model is a sum of bicubic |
//| spline and global prior, which can be linear, constant, |
//| user-defined constant or zero). |
//| INPUT PARAMETERS: |
//| S - spline builder |
//+------------------------------------------------------------------+
void CAlglib::Spline2DBuilderSetZeroTerm(CSpline2DBuilder &state)
{
CSpline2D::Spline2DBuilderSetZeroTerm(state);
}
//+------------------------------------------------------------------+
//| This function adds dataset to the builder object. |
//| This function overrides results of the previous calls, i.e. |
//| multiple calls of this function will result in only the last |
//| set being added. |
//| INPUT PARAMETERS: |
//| S - spline 2D builder object |
//| XY - points, array[N,2+D]. One row corresponds to one |
//| point in the dataset. First 2 elements are |
//| coordinates, next D elements are function values. |
//| Array may be larger than specified, in this case |
//| only leading [N,NX+NY] elements will be used. |
//| N - number of points in the dataset |
//+------------------------------------------------------------------+
void CAlglib::Spline2DBuilderSetPoints(CSpline2DBuilder &state,
CMatrixDouble &xy,int n)
{
CSpline2D::Spline2DBuilderSetPoints(state,xy,n);
}
//+------------------------------------------------------------------+
//| This function sets area where 2D spline interpolant is built. |
//| "Auto" means that area extent is determined automatically from |
//| dataset extent. |
//| INPUT PARAMETERS: |
//| S - spline 2D builder object |
//+------------------------------------------------------------------+
void CAlglib::Spline2DBuilderSetAreaAuto(CSpline2DBuilder &state)
{
CSpline2D::Spline2DBuilderSetAreaAuto(state);
}
//+------------------------------------------------------------------+
//| This function sets area where 2D spline interpolant is built to |
//| user-defined one: [XA,XB]*[YA,YB] |
//| INPUT PARAMETERS: |
//| S - spline 2D builder object |
//| XA,XB - spatial extent in the first (X) dimension, XA<XB |
//| YA,YB - spatial extent in the second (Y) dimension, YA<YB |
//+------------------------------------------------------------------+
void CAlglib::Spline2DBuilderSetArea(CSpline2DBuilder &state,
double xa,double xb,
double ya,double yb)
{
CSpline2D::Spline2DBuilderSetArea(state,xa,xb,ya,yb);
}
//+------------------------------------------------------------------+
//| This function sets nodes count for 2D spline interpolant. Fitting|
//| is performed on area defined with one of the "setarea" functions;|
//| this one sets number of nodes placed upon the fitting area. |
//| INPUT PARAMETERS: |
//| S - spline 2D builder object |
//| KX - nodes count for the first (X) dimension; fitting |
//| interval [XA,XB] is separated into KX-1 |
//| subintervals, with KX nodes created at the |
//| boundaries. |
//| KY - nodes count for the first (Y) dimension; fitting |
//| interval [YA,YB] is separated into KY-1 |
//| subintervals, with KY nodes created at the |
//| boundaries. |
//| NOTE: at least 4 nodes is created in each dimension, so KX and KY|
//| are silently increased if needed. |
//+------------------------------------------------------------------+
void CAlglib::Spline2DBuilderSetGrid(CSpline2DBuilder &state,
int kx,int ky)
{
CSpline2D::Spline2DBuilderSetGrid(state,kx,ky);
}
//+------------------------------------------------------------------+
//| This function allows you to choose least squares solver used to |
//| perform fitting. This function sets solver algorithm to "FastDDM"|
//| which performs fast parallel fitting by splitting problem into |
//| smaller chunks and merging results together. |
//| This solver is optimized for large-scale problems, starting from |
//| 256x256 grids, and up to 10000x10000 grids. Of course, it will |
//| work for smaller grids too. |
//| More detailed description of the algorithm is given below: |
//| * algorithm generates hierarchy of nested grids, ranging from |
//| ~16x16 (topmost "layer" of the model) to ~KX*KY one (final |
//| layer). Upper layers model global behavior of the function, |
//| lower layers are used to model fine details. Moving from |
//| layer to layer doubles grid density. |
//| * fitting is started from topmost layer, subsequent layers are |
//| fitted using residuals from previous ones. |
//| * user may choose to skip generation of upper layers and |
//| generate only a few bottom ones, which will result in much |
//| better performance and parallelization efficiency, at the |
//| cost of algorithm inability to "patch" large holes in the |
//| dataset. |
//| * every layer is regularized using progressively increasing |
//| regularization coefficient; thus, increasing LambdaV |
//| penalizes fine details first, leaving lower frequencies |
//| almost intact for a while. |
//| * after fitting is done, all layers are merged together into |
//| one bicubic spline |
//| IMPORTANT: regularization coefficient used by this solver is |
//| different from the one used by BlockLLS. Latter |
//| utilizes nonlinearity penalty, which is global in |
//| nature (large regularization results in global linear |
//| trend being extracted); this solver uses another, |
//| localized form of penalty, which is suitable for |
//| parallel processing. |
//| Notes on memory and performance: |
//| * memory requirements: most memory is consumed during modeling |
//| of the higher layers; ~[512*NPoints] bytes is required for a |
//| model with full hierarchy of grids being generated. However, |
//| if you skip a few topmost layers, you will get nearly |
//| constant (wrt. points count and grid size) memory consumption|
//| * serial running time: O(K*K)+O(NPoints) for a KxK grid |
//| * parallelism potential: good. You may get nearly linear |
//| speed-up when performing fitting with just a few layers. |
//| Adding more layers results in model becoming more global, |
//| which somewhat reduces efficiency of the parallel code. |
//| INPUT PARAMETERS: |
//| S - spline 2D builder object |
//| NLayers - number of layers in the model: |
//| * NLayers>=1 means that up to chosen number of |
//| bottom layers is fitted |
//| * NLayers=0 means that maximum number of layers is |
//| chosen (according to current grid size) |
//| * NLayers<=-1 means that up to |NLayers| topmost |
//| layers is skipped |
//| Recommendations: |
//| *good "default" value is 2 layers |
//| * you may need more layers, if your dataset is very|
//| irregular and you want to "patch" large holes. |
//| For a grid step H (equal to AreaWidth/GridSize) |
//| you may expect that last layer reproduces |
//| variations at distance H (and can patch holes |
//| that wide); that higher layers operate at |
//| distances 2*H, 4*H, 8*H and so on. |
//| *good value for "bullletproof" mode is NLayers=0, |
//| which results in complete hierarchy of layers |
//| being generated. |
//| LambdaV - regularization coefficient, chosen in such a way |
//| that it penalizes bottom layers (fine details) |
//| first. LambdaV>=0, zero value means that no penalty|
//| is applied. |
//+------------------------------------------------------------------+
void CAlglib::Spline2DBuilderSetAlgoFastDDM(CSpline2DBuilder &state,
int nlayers,double lambdav)
{
CSpline2D::Spline2DBuilderSetAlgoFastDDM(state,nlayers,lambdav);
}
//+------------------------------------------------------------------+
//| This function allows you to choose least squares solver used to |
//| perform fitting. This function sets solver algorithm to |
//| "BlockLLS", which performs least squares fitting with fast sparse|
//| direct solver, with optional nonsmoothness penalty being applied.|
//| Nonlinearity penalty has the following form: |
//| [ ] |
//| P()~Lambda*integral[(d2S/dx2)^2+2*(d2S/dxdy)^2+(d2S/dy2)^2]dxdy |
//| [ ] |
//| here integral is calculated over entire grid, and "~" means |
//| "proportional" because integral is normalized after calcilation. |
//| Extremely large values of Lambda result in linear fit being |
//| performed. |
//| NOTE: this algorithm is the most robust and controllable one, but|
//| it is limited by 512x512 grids and (say) up to 1.000.000 |
//| points. However, ALGLIB has one more spline solver: FastDDM|
//| algorithm, which is intended for really large-scale |
//| problems (in 10M-100M range). FastDDM algorithm also has |
//| better parallelism properties. |
//| More information on BlockLLS solver: |
//| * memory requirements: ~[32*K^3+256*NPoints] bytes for KxK grid|
//| with NPoints-sized dataset |
//| * serial running time: O(K^4+NPoints) |
//| * parallelism potential: limited. You may get some sublinear |
//| gain when working with large grids (K's in 256..512 range) |
//| INPUT PARAMETERS: |
//| S - spline 2D builder object |
//| LambdaNS - non-negative value: |
//| * positive value means that some smoothing is |
//| applied |
//| * zero value means that no smoothing is applied, |
//| and corresponding entries of design matrix are |
//| numerically zero and dropped from consideration. |
//+------------------------------------------------------------------+
void CAlglib::Spline2DBuilderSetAlgoBlockLLS(CSpline2DBuilder &state,
double lambdans)
{
CSpline2D::Spline2DBuilderSetAlgoBlockLLS(state,lambdans);
}
//+------------------------------------------------------------------+
//| This function allows you to choose least squares solver used to |
//| perform fitting. This function sets solver algorithm to |
//| "NaiveLLS". |
//| IMPORTANT: NaiveLLS is NOT intended to be used in real life code!|
//| This algorithm solves problem by generated dense |
//| (K^2)x(K^2+NPoints) matrix and solves linear least |
//| squares problem with dense solver. |
//| It is here just to test BlockLLS against reference |
//| solver (and maybe for someone trying to compare well |
//| optimized solver against straightforward approach to |
//| the LLS problem). |
//| More information on naive LLS solver: |
//| * memory requirements: ~[8*K^4+256*NPoints] bytes for KxK grid.|
//| * serial running time: O(K^6+NPoints) for KxK grid |
//| * when compared with BlockLLS, NaiveLLS has ~K larger memory |
//| demand and ~K^2 larger running time. |
//| INPUT PARAMETERS: |
//| S - spline 2D builder object |
//| LambdaNS - nonsmoothness penalty |
//+------------------------------------------------------------------+
void CAlglib::Spline2DBuilderSetAlgoNaiveLLS(CSpline2DBuilder &state,
double lambdans)
{
CSpline2D::Spline2DBuilderSetAlgoNaiveLLS(state,lambdans);
}
//+------------------------------------------------------------------+
//| This function fits bicubic spline to current dataset, using |
//| current area/grid and current LLS solver. |
//| INPUT PARAMETERS: |
//| State - spline 2D builder object |
//| OUTPUT PARAMETERS: |
//| S - 2D spline, fit result |
//| Rep - fitting report, which provides some additional info|
//| about errors, R2 coefficient and so on. |
//+------------------------------------------------------------------+
void CAlglib::Spline2DFit(CSpline2DBuilder &state,
CSpline2DInterpolantShell &s,
CSpline2DFitReport &rep)
{
CSpline2D::Spline2DFit(state,s.GetInnerObj(),rep);
}
//+------------------------------------------------------------------+
//| This subroutine calculates the value of the trilinear or tricubic|
//| spline at the given point (X,Y,Z). |
//| INPUT PARAMETERS: |
//| C - coefficients table. Built by BuildBilinearSpline or|
//| BuildBicubicSpline. |
//| X, Y, |
//| Z - point |
//| Result: |
//| S(x,y,z) |
//+------------------------------------------------------------------+
double CAlglib::Spline3DCalc(CSpline3DInterpolant &c,double x,
double y,double z)
{
return(CSpline3D::Spline3DCalc(c,x,y,z));
}
//+------------------------------------------------------------------+
//| This subroutine performs linear transformation of the spline |
//| argument. |
//| INPUT PARAMETERS: |
//| C - spline interpolant |
//| AX, BX - transformation coefficients: x = A*u + B |
//| AY, BY - transformation coefficients: y = A*v + B |
//| AZ, BZ - transformation coefficients: z = A*w + B |
//| OUTPUT PARAMETERS: |
//| C - transformed spline |
//+------------------------------------------------------------------+
void CAlglib::Spline3DLinTransXYZ(CSpline3DInterpolant &c,double ax,
double bx,double ay,double by,
double az,double bz)
{
CSpline3D::Spline3DLinTransXYZ(c,ax,bx,ay,by,az,bz);
}
//+------------------------------------------------------------------+
//| This subroutine performs linear transformation of the spline. |
//| INPUT PARAMETERS: |
//| C - spline interpolant. |
//| A, B - transformation coefficients: S2(x,y)=A*S(x,y,z)+B |
//| OUTPUT PARAMETERS: |
//| C - transformed spline |
//+------------------------------------------------------------------+
void CAlglib::Spline3DLinTransF(CSpline3DInterpolant &c,double a,
double b)
{
CSpline3D::Spline3DLinTransF(c,a,b);
}
//+------------------------------------------------------------------+
//| Trilinear spline resampling |
//| INPUT PARAMETERS: |
//| A - array[0..OldXCount*OldYCount*OldZCount-1], function|
//| values at the old grid: |
//| A[0] x=0,y=0,z=0 |
//| A[1] x=1,y=0,z=0 |
//| A[..] ... |
//| A[..] x=oldxcount-1,y=0,z=0 |
//| A[..] x=0,y=1,z=0 |
//| A[..] ... |
//| ... |
//| OldZCount - old Z-count, OldZCount>1 |
//| OldYCount - old Y-count, OldYCount>1 |
//| OldXCount - old X-count, OldXCount>1 |
//| NewZCount - new Z-count, NewZCount>1 |
//| NewYCount - new Y-count, NewYCount>1 |
//| NewXCount - new X-count, NewXCount>1 |
//| OUTPUT PARAMETERS: |
//| B - array[0..NewXCount*NewYCount*NewZCount-1], function|
//| values at the new grid: |
//| B[0] x=0,y=0,z=0 |
//| B[1] x=1,y=0,z=0 |
//| B[..] ... |
//| B[..] x=newxcount-1,y=0,z=0 |
//| B[..] x=0,y=1,z=0 |
//| B[..] ... |
//| ... |
//+------------------------------------------------------------------+
void CAlglib::Spline3DResampleTrilinear(CRowDouble &a,int oldzcount,
int oldycount,int oldxcount,
int newzcount,int newycount,
int newxcount,CRowDouble &b)
{
CSpline3D::Spline3DResampleTrilinear(a,oldzcount,oldycount,oldxcount,newzcount,newycount,newxcount,b);
}
//+------------------------------------------------------------------+
//| This subroutine builds trilinear vector-valued spline. |
//| INPUT PARAMETERS: |
//| X - spline abscissas, array[0..N-1] |
//| Y - spline ordinates, array[0..M-1] |
//| Z - spline applicates, array[0..L-1] |
//| F - function values, array[0..M*N*L*D-1]: |
//| * first D elements store D values at (X[0],Y[0],Z[0]) |
//| * next D elements store D values at (X[1],Y[0],Z[0]) |
//| * next D elements store D values at (X[2],Y[0],Z[0]) |
//| * ... |
//| * next D elements store D values at (X[0],Y[1],Z[0]) |
//| * next D elements store D values at (X[1],Y[1],Z[0]) |
//| * next D elements store D values at (X[2],Y[1],Z[0]) |
//| * ... |
//| * next D elements store D values at (X[0],Y[0],Z[1]) |
//| * next D elements store D values at (X[1],Y[0],Z[1]) |
//| * next D elements store D values at (X[2],Y[0],Z[1]) |
//| * ... |
//| * general form - D function values at (X[i],Y[j]) are |
//| stored at F[D*(N*(M*K+J)+I)...D*(N*(M*K+J)+I)+D-1]. |
//| M,N, |
//| L - grid size, M>=2, N>=2, L>=2 |
//| D - vector dimension, D>=1 |
//| OUTPUT PARAMETERS: |
//| C - spline interpolant |
//+------------------------------------------------------------------+
void CAlglib::Spline3DBuildTrilinearV(CRowDouble &x,int n,
CRowDouble &y,int m,
CRowDouble &z,int l,
CRowDouble &f,int d,
CSpline3DInterpolant &c)
{
CSpline3D::Spline3DBuildTrilinearV(x,n,y,m,z,l,f,d,c);
}
//+------------------------------------------------------------------+
//| This subroutine calculates bilinear or bicubic vector-valued |
//| spline at the given point (X,Y,Z). |
//| INPUT PARAMETERS: |
//| C - spline interpolant. |
//| X, Y, |
//| Z - point |
//| F - output buffer, possibly preallocated array. In case|
//| array size is large enough to store result, it is |
//| not reallocated. Array which is too short will be |
//| reallocated |
//| OUTPUT PARAMETERS: |
//| F - array[D] (or larger) which stores function values |
//+------------------------------------------------------------------+
void CAlglib::Spline3DCalcVBuf(CSpline3DInterpolant &c,double x,
double y,double z,CRowDouble &f)
{
CSpline3D::Spline3DCalcVBuf(c,x,y,z,f);
}
//+------------------------------------------------------------------+
//| This subroutine calculates trilinear or tricubic vector-valued |
//| spline at the given point (X,Y,Z). |
//| INPUT PARAMETERS: |
//| C - spline interpolant. |
//| X, Y, |
//| Z - point |
//| OUTPUT PARAMETERS: |
//| F - array[D] which stores function values. F is |
//| out-parameter and it is reallocated after call to |
//| this function. In case you want to reuse |
//| previously allocated F, you may use |
//| Spline2DCalcVBuf(), which reallocates F only when |
//| it is too small. |
//+------------------------------------------------------------------+
void CAlglib::Spline3DCalcV(CSpline3DInterpolant &c,double x,
double y,double z,CRowDouble &f)
{
CSpline3D::Spline3DCalcV(c,x,y,z,f);
}
//+------------------------------------------------------------------+
//| This subroutine unpacks tri-dimensional spline into the |
//| coefficients table |
//| INPUT PARAMETERS: |
//| C - spline interpolant. |
//| Result: |
//| N - grid size (X) |
//| M - grid size (Y) |
//| L - grid size (Z) |
//| D - number of components |
//| SType - spline type. Currently, only one spline type is |
//| supported: trilinear spline, as indicated |
//| by SType=1. |
//| Tbl - spline coefficients: |
//| [0..(N-1)*(M-1)*(L-1)*D-1, 0..13]. |
//| For T=0..D-1 (component index), I = 0...N-2 |
//| (x index), J=0..M-2 (y index), K=0..L-2 (z index): |
//| Q := T + I*D + J*D*(N-1) + K*D*(N-1)*(M-1), |
//| Q-th row stores decomposition for T-th component |
//| of the vector-valued function |
//| Tbl[Q,0] = X[i] |
//| Tbl[Q,1] = X[i+1] |
//| Tbl[Q,2] = Y[j] |
//| Tbl[Q,3] = Y[j+1] |
//| Tbl[Q,4] = Z[k] |
//| Tbl[Q,5] = Z[k+1] |
//| Tbl[Q,6] = C000 |
//| Tbl[Q,7] = C100 |
//| Tbl[Q,8] = C010 |
//| Tbl[Q,9] = C110 |
//| Tbl[Q,10]= C001 |
//| Tbl[Q,11]= C101 |
//| Tbl[Q,12]= C011 |
//| Tbl[Q,13]= C111 |
//| On each grid square spline is equals to: |
//| S(x) = SUM(c[i,j,k]*(x^i)*(y^j)*(z^k), i=0..1, j=0..1, k=0..1) |
//| t = x-x[j] |
//| u = y-y[i] |
//| v = z-z[k] |
//| NOTE: format of Tbl is given for SType=1. Future versions of |
//| ALGLIB can use different formats for different values of |
//| SType. |
//+------------------------------------------------------------------+
void CAlglib::Spline3DUnpackV(CSpline3DInterpolant &c,int &n,int &m,
int &l,int &d,int &stype,
CMatrixDouble &tbl)
{
CSpline3D::Spline3DUnpackV(c,n,m,l,d,stype,tbl);
}
//+------------------------------------------------------------------+
//| This function serializes data structure to string. |
//| Important properties of s_out: |
//| * it contains alphanumeric characters, dots, underscores, minus |
//| signs |
//| * these symbols are grouped into words, which are separated by |
//| spaces and Windows-style (CR+LF) newlines |
//| * although serializer uses spaces and CR+LF as separators, you |
//| can replace any separator character by arbitrary combination |
//| of spaces, tabs, Windows or Unix newlines. It allows flexible |
//| reformatting of the string in case you want to include it into |
//| text or XML file. But you should not insert separators into the|
//| middle of the "words" nor you should change case of letters. |
//| * s_out can be freely moved between 32-bit and 64-bit systems, |
//| little and big endian machines, and so on. You can serialize |
//| structure on 32-bit machine and unserialize it on 64-bit one |
//| (or vice versa), or serialize it on SPARC and unserialize on |
//| x86. You can also serialize it in C# version of ALGLIB and |
//| unserialize in C++ one, and vice versa. |
//+------------------------------------------------------------------+
void CAlglib::RBFSerialize(CRBFModel &obj,string &s_out)
{
//--- create a variable
CSerializer s;
//--- serialization start
s.Alloc_Start();
//--- function call
CRBF::RBFAlloc(s,obj);
//--- serialization
s.SStart_Str();
CRBF::RBFSerialize(s,obj);
s.Stop();
s_out=s.Get_String();
}
//+------------------------------------------------------------------+
//| This function unserializes data structure from string. |
//+------------------------------------------------------------------+
void CAlglib::RBFUnserialize(string s_in,CRBFModel &obj)
{
//--- create a variable
CSerializer s;
//--- unserialization start
s.UStart_Str(s_in);
//--- function call
CRBF::RBFUnserialize(s,obj);
s.Stop();
}
//+------------------------------------------------------------------+
//| This function creates RBF model for a scalar(NY = 1) or vector |
//| (NY > 1) function in a NX - dimensional space(NX >= 1). |
//| Newly created model is empty. It can be used for interpolation |
//| right after creation, but it just returns zeros. You have to add |
//| points to the model, tune interpolation settings, and then call |
//| model construction function RBFBuildModel() which will update |
//| model according to your specification. |
//| USAGE: |
//| 1. User creates model with RBFCreate() |
//| 2. User adds dataset with RBFSetPoints() or |
//| RBFSetPointsAndScales() |
//| 3. User selects RBF solver by calling: |
//| * RBFSetAlgoHierarchical() - for a HRBF solver, a |
//| hierarchical large - scale Gaussian RBFs (works well for |
//| uniformly distributed point clouds, but may fail when the |
//| data are non-uniform; use other solvers below in such |
//| cases) |
//| * RBFSetAlgoThinPlateSpline() - for a large - scale DDM-RBF |
//| solver with thin plate spline basis function being used |
//| * RBFSetAlgoBiharmonic() - for a large-scale DDM-RBF solver |
//| with biharmonic basis function being used |
//| * RBFSetAlgoMultiQuadricAuto() - for a large-scale DDM-RBF |
//| solver with multiquadric basis function being used |
//| (automatic selection of the scale parameter Alpha) |
//| * RBFSetAlgoMultiQuadricManual() - for a large-scale DDM-RBF|
//| solver with multiquadric basis function being used (manual|
//| selection of the scale parameter Alpha) |
//| 4.(OPTIONAL) User chooses polynomial term by calling: |
//| * RBFLinTerm() to set linear term (default) |
//| * RBFConstTerm() to set constant term |
//| * RBFZeroTerm() to set zero term |
//| 5. User calls RBFBuildModel() function which rebuilds model |
//| according to the specification |
//| INPUT PARAMETERS: |
//| NX - dimension of the space, NX >= 1 |
//| NY - function dimension, NY >= 1 |
//| OUTPUT PARAMETERS: |
//| S - RBF model(initially equals to zero) |
//| NOTE 1: memory requirements. RBF models require amount of memory |
//| which is proportional to the number of data points. Some |
//| additional memory is allocated during model construction,|
//| but most of this memory is freed after the model |
//| coefficients are calculated. Amount of this additional |
//| memory depends on model construction algorithm being used|
//+------------------------------------------------------------------+
void CAlglib::RBFCreate(int nx,int ny,CRBFModel &s)
{
CRBF::RBFCreate(nx,ny,s);
}
//+------------------------------------------------------------------+
//| This function creates buffer structure which can be used to |
//| perform parallel RBF model evaluations (with one RBF model |
//| instance being used from multiple threads, as long as different |
//| threads use different instances of the buffer). |
//| This buffer object can be used with RBFTSCalcBuf() function (here|
//| "ts" stands for "thread-safe", "buf" is a suffix which denotes |
//| function which reuses previously allocated output space). |
//| A buffer creation function (this function) is also thread-safe. |
//| I.e. you may safely create multiple buffers for the same RBF |
//| model from multiple threads. |
//| NOTE: the buffer object is just a collection of several |
//| preallocated dynamic arrays and precomputed values. If you |
//| delete its "parent" RBF model when the buffer is still |
//| alive, nothing bad will happen (no dangling pointers or |
//| resource leaks). The buffer will simply become useless. |
//| How to use it: |
//| * create RBF model structure with RBFCreate() |
//| * load data, tune parameters |
//| * call RBFBuildModel() |
//| * call RBFCreateCalcBuffer(), once per thread working with RBF |
//| model (you should call this function only AFTER call to |
//| RBFBuildModel(), see below for more information) |
//| * call RBFTSCalcBuf() from different threads, with each thread |
//| working with its own copy of buffer object. |
//| * it is recommended to reuse buffer as much as possible because|
//| buffer creation involves allocation of several large dynamic |
//| arrays. It is a huge waste of resource to use it just once. |
//| INPUT PARAMETERS: |
//| S - RBF model |
//| OUTPUT PARAMETERS: |
//| Buf - external buffer. |
//| IMPORTANT: buffer object should be used only with RBF model |
//| object which was used to initialize buffer. Any |
//| attempt to use buffer with different object is |
//| dangerous - you may get memory violation error because|
//| sizes of internal arrays do not fit to dimensions of |
//| RBF structure. |
//| IMPORTANT: you should call this function only for model which was|
//| built with RBFBuildModel() function, after successful |
//| invocation of RBFBuildModel(). Sizes of some |
//| internal structures are determined only after model is|
//| built, so buffer object created before model |
//| construction stage will be useless (and any attempt to|
//| use it will result in exception). |
//+------------------------------------------------------------------+
void CAlglib::RBFCreateCalcBuffer(CRBFModel &s,CRBFCalcBuffer &buf)
{
CRBF::RBFCreateCalcBuffer(s,buf);
}
//+------------------------------------------------------------------+
//| This function adds dataset. |
//| This function overrides results of the previous calls, i.e. |
//| multiple calls of this function will result in only the last set |
//| being added. |
//| IMPORTANT: ALGLIB version 3.11 and later allows you to specify a |
//| set of per-dimension scales. Interpolation radii are |
//| multiplied by the scale vector. It may be useful if |
//| you have mixed spatio - temporal data (say, a set of |
//| 3D slices recorded at different times). You should |
//| call RBFSetPointsAndScales() function to use this |
//| feature. |
//| INPUT PARAMETERS: |
//| S - RBF model, initialized by RBFCreate() call. |
//| XY - points, array[N, NX + NY]. One row corresponds to |
//| one point in the dataset. First NX elements are |
//| coordinates, next NY elements are function values. |
//| Array may be larger than specified, in this case |
//| only leading [N, NX+NY] elements will be used. |
//| N - number of points in the dataset |
//| After you've added dataset and (optionally) tuned algorithm |
//| settings you should call RBFBuildModel() in order to build a |
//| model for you. |
//| NOTE: dataset added by this function is not saved during model |
//| serialization. MODEL ITSELF is serialized, but data used |
//| to build it are not. |
//| So, if you 1) add dataset to empty RBF model, 2) serialize and |
//| unserialize it, then you will get an empty RBF model with no |
//| dataset being attached. |
//| From the other side, if you call RBFBuildModel() between(1) and |
//| (2), then after(2) you will get your fully constructed RBF model-|
//| but again with no dataset attached, so subsequent calls to |
//| RBFBuildModel() will produce empty model. |
//+------------------------------------------------------------------+
void CAlglib::RBFSetPoints(CRBFModel &s,CMatrixDouble &xy,int n)
{
CRBF::RBFSetPoints(s,xy,n);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::RBFSetPoints(CRBFModel &s,CMatrixDouble &xy)
{
//--- initialization
int n=CAp::Rows(xy);
//--- function call
CRBF::RBFSetPoints(s,xy,n);
}
//+------------------------------------------------------------------+
//| This function adds dataset and a vector of per-dimension scales. |
//| It may be useful if you have mixed spatio - temporal data - say, |
//| a set of 3D slices recorded at different times. Such data |
//| typically require different RBF radii for spatial and temporal |
//| dimensions. ALGLIB solves this problem by specifying single RBF |
//| radius, which is (optionally) multiplied by the scale vector. |
//| This function overrides results of the previous calls, i.e. |
//| multiple calls of this function will result in only the last set |
//| being added. |
//| IMPORTANT: only modern RBF algorithms support variable scaling. |
//| Legacy algorithms like RBF-ML or QNN algorithms will |
//| result in - 3 completion code being returned(incorrect|
//| algorithm). |
//| INPUT PARAMETERS: |
//| R - RBF model, initialized by RBFCreate() call. |
//| XY - points, array[N, NX + NY]. One row corresponds to |
//| one point in the dataset. First NX elements are |
//| coordinates, next NY elements are function values. |
//| Array may be larger than specified, in this case |
//| only leading [N, NX+NY] elements will be used. |
//| N - number of points in the dataset |
//| S - array[NX], scale vector, S[i] > 0. |
//| After you've added dataset and (optionally) tuned algorithm |
//| settings you should call RBFBuildModel() in order to build a |
//| model for you. |
//| NOTE: dataset added by this function is not saved during model |
//| serialization. MODEL ITSELF is serialized, but data used |
//| to build it are not. |
//| So, if you 1) add dataset to empty RBF model, 2) serialize and |
//| unserialize it, then you will get an empty RBF model with no |
//| dataset being attached. |
//| From the other side, if you call RBFBuildModel() between(1) and |
//| (2), then after(2) you will get your fully constructed RBF model-|
//| but again with no dataset attached, so subsequent calls to |
//| RBFBuildModel() will produce empty model. |
//+------------------------------------------------------------------+
void CAlglib::RBFSetPointsAndScales(CRBFModel &r,CMatrixDouble &xy,
int n,CRowDouble &s)
{
CRBF::RBFSetPointsAndScales(r,xy,n,s);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::RBFSetPointsAndScales(CRBFModel &r,CMatrixDouble &xy,
CRowDouble &s)
{
//--- initialization
int n=CAp::Rows(xy);
//--- function call
CRBF::RBFSetPointsAndScales(r,xy,n,s);
}
//+------------------------------------------------------------------+
//| DEPRECATED: this function is deprecated. ALGLIB includes new RBF |
//| model algorithms: |
//| DDM - RBF (since version 3.19) and HRBF (since version 3.11). |
//+------------------------------------------------------------------+
void CAlglib::RBFSetAlgoQNN(CRBFModel &s,double q=1.0,double z=5.0)
{
CRBF::RBFSetAlgoQNN(s,q,z);
}
//+------------------------------------------------------------------+
//| DEPRECATED: this function is deprecated. ALGLIB includes new RBF |
//| model algorithms: |
//| DDM - RBF(since version 3.19) and HRBF (since version 3.11). |
//+------------------------------------------------------------------+
void CAlglib::RBFSetAlgoMultilayer(CRBFModel &s,double rbase,int nlayers,double lambdav=0.01)
{
CRBF::RBFSetAlgoMultilayer(s,rbase,nlayers,lambdav);
}
//+------------------------------------------------------------------+
//| This function chooses HRBF solver, a 2nd version of ALGLIB RBFs. |
//| This algorithm is called Hierarchical RBF. It similar to its |
//| previous incarnation, RBF-ML, i.e. it also builds a sequence of |
//| models with decreasing radii. However, it uses more economical |
//| way of building upper layers (ones with large radii), which |
//| results in faster model construction and evaluation, as well as |
//| smaller memory footprint during construction. |
//| This algorithm has following important features: |
//| * ability to handle millions of points |
//| * controllable smoothing via nonlinearity penalization |
//| * support for specification of per - dimensional radii via |
//| scale vector, which is set by means of RBFSetPointsAndScales |
//| function. This feature is useful if you solve spatio - |
//| temporal interpolation problems, where different radii are |
//| required for spatial and temporal dimensions. |
//| Running times are roughly proportional to: |
//| * N*log(N) |
//| * NLayers - for the model construction |
//| * N*NLayers - for the model evaluation |
//| You may see that running time does not depend on search radius or|
//| points density, just on the number of layers in the hierarchy. |
//| INPUT PARAMETERS: |
//| S - RBF model, initialized by RBFCreate() call |
//| RBase - RBase parameter, RBase > 0 |
//| NLayers - NLayers parameter, NLayers > 0, recommended value |
//| to start with - about 5. |
//| LambdaNS - >= 0, nonlinearity penalty coefficient, negative |
//| values are not allowed. This parameter adds |
//| controllable smoothing to the problem, which may |
//| reduce noise. Specification of non-zero lambda |
//| means that in addition to fitting error solver will|
//| also minimize LambdaNS* | S''(x) | 2 (appropriately|
//| generalized to multiple dimensions. |
//| Specification of exactly zero value means that no penalty is |
//| added (we do not even evaluate matrix of second derivatives which|
//| is necessary for smoothing). |
//| Calculation of nonlinearity penalty is costly - it results in |
//| several - fold increase of model construction time. Evaluation |
//| time remains the same. |
//| Optimal lambda is problem - dependent and requires trial and |
//| error. Good value to start from is 1e-5...1e-6, which corresponds|
//| to slightly noticeable smoothing of the function. Value 1e-2 |
//| usually means that quite heavy smoothing is applied. |
//| TUNING ALGORITHM |
//| In order to use this algorithm you have to choose three |
//| parameters: |
//| * initial radius RBase |
//| * number of layers in the model NLayers |
//| * penalty coefficient LambdaNS |
//| Initial radius is easy to choose - you can pick any number |
//| several times larger than the average distance between points. |
//| Algorithm won't break down if you choose radius which is too |
//| large (model construction time will increase, but model will be |
//| built correctly). |
//| Choose such number of layers that RLast = RBase / 2^(NLayers - 1)|
//| (radius used by the last layer) will be smaller than the typical |
//| distance between points. In case model error is too large, you |
//| can increase number of layers. Having more layers will make model|
//| construction and evaluation proportionally slower, but it will |
//| allow you to have model which precisely fits your data. From the |
//| other side, if you want to suppress noise, you can DECREASE |
//| number of layers to make your model less flexible (or specify |
//| non-zero LambdaNS). |
//| TYPICAL ERRORS: |
//| 1. Using too small number of layers - RBF models with large |
//| radius are not flexible enough to reproduce small variations|
//| in the target function. You need many layers with different |
//| radii, from large to small, in order to have good model. |
//| 2. Using initial radius which is too small. You will get model |
//| with "holes" in the areas which are too far away from |
//| interpolation centers. However, algorithm will work |
//| correctly (and quickly) in this case. |
//+------------------------------------------------------------------+
void CAlglib::RBFSetAlgoHierarchical(CRBFModel &s,double rbase,
int nlayers,double lambdans)
{
CRBF::RBFSetAlgoHierarchical(s,rbase,nlayers,lambdans);
}
//+------------------------------------------------------------------+
//| This function chooses a thin plate spline DDM-RBF solver, a fast |
//| RBF solver with f(r) = r ^ 2 * ln(r) basis function. |
//| This algorithm has following important features: |
//| * easy setup - no tunable parameters |
//| * C1 continuous RBF model (gradient is defined everywhere, but |
//| Hessian is undefined at nodes), high - quality interpolation |
//| * fast model construction algorithm with O(N) memory and O(N^2)|
//| running time requirements. Hundreds of thousands of points |
//| can be handled with this algorithm. |
//| * controllable smoothing via optional nonlinearity penalty |
//| INPUT PARAMETERS: |
//| S - RBF model, initialized by RBFCreate() call |
//| LambdaV - smoothing parameter, LambdaV >= 0, defaults to 0.0:|
//| * LambdaV = 0 means that no smoothing is applied, |
//| i.e. the spline tries to pass through|
//| all dataset points exactly |
//| * LambdaV > 0 means that a smoothing thin plate |
//| spline is built, with larger LambdaV |
//| corresponding to models with less |
//| nonlinearities. Smoothing spline |
//| reproduces target values at nodes |
//| with small error; from the other |
//| side, it is much more stable. |
//| Recommended values: |
//| * 1.0E-6 for minimal stability improving smoothing |
//| * 1.0E-3 a good value to start experiments; first results are |
//| visible |
//| * 1.0 for strong smoothing |
//| IMPORTANT: this model construction algorithm was introduced in |
//| ALGLIB 3.19 and produces models which are INCOMPATIBLE|
//| with previous versions of ALGLIB. You can not |
//| unserialize models produced with this function in |
//| ALGLIB 3.18 or earlier. |
//| NOTE: polyharmonic RBFs, including thin plate splines, are |
//| somewhat slower than compactly supported RBFs built with |
//| HRBF algorithm due to the fact that non-compact basis |
//| function does not vanish far away from the nodes. From the |
//| other side, polyharmonic RBFs often produce much better |
//| results than HRBFs. |
//| NOTE: this algorithm supports specification of per-dimensional |
//| radii via scale vector, which is set by means of |
//| RBFSetPointsAndScales() function. This feature is useful if|
//| you solve spatio-temporal interpolation problems where |
//| different radii are required for spatial and temporal |
//| dimensions. |
//+------------------------------------------------------------------+
void CAlglib::RBFSetAlgoThinPlateSpline(CRBFModel &s,double lambdav=0.0)
{
CRBF::RBFSetAlgoThinPlateSpline(s,lambdav);
}
//+------------------------------------------------------------------+
//| This function chooses a multiquadric DDM - RBF solver, a fast RBF|
//| solver with f(r) = sqrt(r ^ 2 + Alpha ^ 2) as a basis function, |
//| with manual choice of the scale parameter Alpha. |
//| This algorithm has following important features: |
//| * C2 continuous RBF model(when Alpha > 0 is used; for Alpha = 0|
//| the model is merely C0 continuous) |
//| * fast model construction algorithm with O(N) memory and O(N^2)|
//| running time requirements. Hundreds of thousands of points |
//| can be handled with this algorithm. |
//| * controllable smoothing via optional nonlinearity penalty |
//| One important point is that this algorithm includes tunable |
//| parameter Alpha, which should be carefully chosen. Selecting too |
//| large value will result in extremely badly conditioned problems |
//| (interpolation accuracy may degrade up to complete breakdown) |
//| whilst selecting too small value may produce models that are |
//| precise but nearly nonsmooth at the nodes. |
//| Good value to start from is mean distance between nodes. |
//| Generally, choosing too small Alpha is better than choosing too |
//| large - in the former case you still have model that reproduces |
//| target values at the nodes. |
//| In most cases, better option is to choose good Alpha |
//| automatically - it is done by another version of the same |
//| algorithm that is activated by calling RBFSetAlgoMultiQuadricAuto|
//| method. |
//| INPUT PARAMETERS: |
//| S - RBF model, initialized by RBFCreate() call |
//| Alpha - basis function parameter, Alpha >= 0: |
//| * Alpha > 0 means that multiquadric algorithm is |
//| used which produces C2-continuous RBF |
//| model |
//| * Alpha = 0 means that the multiquadric kernel |
//| effectively becomes a biharmonic one: |
//| f = r. As a result, the model becomes |
//| nonsmooth at nodes, and hence is C0 |
//| continuous |
//| LambdaV - smoothing parameter, LambdaV >= 0, defaults to 0.0:|
//| * LambdaV = 0 means that no smoothing is applied, |
//| i.e. the spline tries to pass through|
//| all dataset points exactly |
//| * LambdaV > 0 means that a multiquadric spline is |
//| built with larger LambdaV |
//| corresponding to models with less |
//| nonlinearities. Smoothing spline |
//| reproduces target values at nodes |
//| with small error; from the other |
//| side, it is much more stable. |
//| Recommended values: |
//| * 1.0E-6 for minimal stability improving smoothing |
//| * 1.0E-3 a good value to start experiments; first results are |
//| visible |
//| * 1.0 for strong smoothing |
//| IMPORTANT: this model construction algorithm was introduced in |
//| ALGLIB 3.19 and produces models which are INCOMPATIBLE|
//| with previous versions of ALGLIB. You can not |
//| unserialize models produced with this function in |
//| ALGLIB 3.18 or earlier. |
//| NOTE: polyharmonic RBFs, including thin plate splines, are |
//| somewhat slower than compactly supported RBFs built with |
//| HRBF algorithm due to the fact that non-compact basis |
//| function does not vanish far away from the nodes. From the |
//| other side, polyharmonic RBFs often produce much better |
//| results than HRBFs. |
//| NOTE: this algorithm supports specification of per-dimensional |
//| radii via scale vector, which is set by means of |
//| RBFSetPointsAndScales() function. This feature is useful if|
//| you solve spatio-temporal interpolation problems where |
//| different radii are required for spatial and temporal |
//| dimensions. |
//+------------------------------------------------------------------+
void CAlglib::RBFSetAlgoMultiQuadricManual(CRBFModel &s,double alpha,
double lambdav=0.0)
{
CRBF::RBFSetAlgoMultiQuadricManual(s,alpha,lambdav);
}
//+------------------------------------------------------------------+
//| This function chooses a multiquadric DDM-RBF solver, a fast RBF |
//| solver with f(r) = sqrt(r ^ 2 + Alpha ^ 2) as a basis function, |
//| with Alpha being automatically determined. |
//| This algorithm has following important features: |
//| * easy setup - no need to tune Alpha, good value is |
//| automatically assigned |
//| * C2 continuous RBF model |
//| * fast model construction algorithm with O(N) memory and O(N^2)|
//| running time requirements. Hundreds of thousands of points |
//| can be handled with this algorithm. |
//| * controllable smoothing via optional nonlinearity penalty |
//| This algorithm automatically selects Alpha as a mean distance to |
//| the nearest neighbor(ignoring neighbors that are too close). |
//| INPUT PARAMETERS: |
//| S - RBF model, initialized by RBFCreate() call |
//| LambdaV - smoothing parameter, LambdaV >= 0, defaults to 0.0:|
//| * LambdaV = 0 means that no smoothing is applied, |
//| i.e. the spline tries to pass through|
//| all dataset points exactly |
//| * LambdaV > 0 means that a multiquadric spline is |
//| built with larger LambdaV |
//| corresponding to models with less |
//| nonlinearities. Smoothing spline |
//| reproduces target values at nodes |
//| with small error; from the other |
//| side, it is much more stable. |
//| Recommended values: |
//| * 1.0E-6 for minimal stability improving smoothing |
//| * 1.0E-3 a good value to start experiments; first results are |
//| visible |
//| * 1.0 for strong smoothing |
//| IMPORTANT: this model construction algorithm was introduced in |
//| ALGLIB 3.19 and produces models which are INCOMPATIBLE|
//| with previous versions of ALGLIB. You can not |
//| unserialize models produced with this function in |
//| ALGLIB 3.18 or earlier. |
//| NOTE: polyharmonic RBFs, including thin plate splines, are |
//| somewhat slower than compactly supported RBFs built with |
//| HRBF algorithm due to the fact that non-compact basis |
//| function does not vanish far away from the nodes. From the |
//| other side, polyharmonic RBFs often produce much better |
//| results than HRBFs. |
//| NOTE: this algorithm supports specification of per-dimensional |
//| radii via scale vector, which is set by means of |
//| RBFSetPointsAndScales() function. This feature is useful if|
//| you solve spatio - temporal interpolation problems where |
//| different radii are required for spatial and temporal |
//| dimensions. |
//+------------------------------------------------------------------+
void CAlglib::RBFSetAlgoMultiQuadricAuto(CRBFModel &s,double lambdav=0.0)
{
CRBF::RBFSetAlgoMultiQuadricAuto(s,lambdav);
}
//+------------------------------------------------------------------+
//| This function chooses a biharmonic DDM-RBF solver, a fast RBF |
//| solver with f(r) = r as a basis function. |
//| This algorithm has following important features: |
//| * no tunable parameters |
//| * C0 continuous RBF model (the model has discontinuous |
//| derivatives at the interpolation nodes) |
//| * fast model construction algorithm with O(N) memory and O(N^2)|
//| running time requirements. Hundreds of thousands of points |
//| can be handled with this algorithm. |
//| * controllable smoothing via optional nonlinearity penalty |
//| INPUT PARAMETERS: |
//| S - RBF model, initialized by RBFCreate() call |
//| LambdaV - smoothing parameter, LambdaV >= 0, defaults to 0.0:|
//| * LambdaV = 0 means that no smoothing is applied, |
//| i.e. the spline tries to pass through|
//| all dataset points exactly |
//| * LambdaV > 0 means that a multiquadric spline is |
//| built with larger LambdaV |
//| corresponding to models with less |
//| nonlinearities. Smoothing spline |
//| reproduces target values at nodes |
//| with small error; from the other |
//| side, it is much more stable. |
//| Recommended values: |
//| * 1.0E-6 for minimal stability improving smoothing |
//| * 1.0E-3 a good value to start experiments; first results are |
//| visible |
//| * 1.0 for strong smoothing |
//| IMPORTANT: this model construction algorithm was introduced in |
//| ALGLIB 3.19 and produces models which are INCOMPATIBLE|
//| with previous versions of ALGLIB. You can not |
//| unserialize models produced with this function in |
//| ALGLIB 3.18 or earlier. |
//| NOTE: polyharmonic RBFs, including thin plate splines, are |
//| somewhat slower than compactly supported RBFs built with |
//| HRBF algorithm due to the fact that non-compact basis |
//| function does not vanish far away from the nodes. From the |
//| other side, polyharmonic RBFs often produce much better |
//| results than HRBFs. |
//| NOTE: this algorithm supports specification of per-dimensional |
//| radii via scale vector, which is set by means of |
//| RBFSetPointsAndScales() function. This feature is useful if|
//| you solve spatio - temporal interpolation problems where |
//| different radii are required for spatial and temporal |
//| dimensions. |
//+------------------------------------------------------------------+
void CAlglib::RBFSetAlgoBiharmonic(CRBFModel &s,double lambdav=0.0)
{
CRBF::RBFSetAlgoBiharmonic(s,lambdav);
}
//+------------------------------------------------------------------+
//| This function sets linear term (model is a sum of radial basis |
//| functions plus linear polynomial). This function won't have |
//| effect until next call to RBFBuildModel(). |
//| Using linear term is a default option and it is the best one-it |
//| provides best convergence guarantees for all RBF model types: |
//| legacy RBF-QNN and RBF-ML, Gaussian HRBFs and all types of |
//| DDM-RBF models. |
//| Other options, like constant or zero term, work for HRBFs, almost|
//| always work for DDM-RBFs but provide no stability guarantees in |
//| the latter case (e.g. the solver may fail on some carefully |
//| prepared problems). |
//| INPUT PARAMETERS: |
//| S - RBF model, initialized by RBFCreate() call |
//+------------------------------------------------------------------+
void CAlglib::RBFSetLinTerm(CRBFModel &s)
{
CRBF::RBFSetLinTerm(s);
}
//+------------------------------------------------------------------+
//| This function sets constant term (model is a sum of radial basis |
//| functions plus constant). This function won't have effect until |
//| next call to RBFBuildModel(). |
//| IMPORTANT: thin plate splines require polynomial term to be |
//| linear, not constant, in order to provide |
//| interpolation guarantees. Although failures are |
//| exceptionally rare, some small toy problems may result|
//| in degenerate linear systems. Thus, it is advised to |
//| use linear term when one fits data with TPS. |
//| INPUT PARAMETERS: |
//| S - RBF model, initialized by RBFCreate() call |
//+------------------------------------------------------------------+
void CAlglib::RBFSetConstTerm(CRBFModel &s)
{
CRBF::RBFSetConstTerm(s);
}
//+------------------------------------------------------------------+
//| This function sets zero term (model is a sum of radial basis |
//| functions without polynomial term). This function won't have |
//| effect until next call to RBFBuildModel(). |
//| IMPORTANT: only Gaussian RBFs(HRBF algorithm) provide |
//| interpolation guarantees when no polynomial term is |
//| used. Most other RBFs, including biharmonic splines, |
//| thin plate splines and multiquadrics, require at least|
//| constant term(biharmonic and multiquadric) or linear |
//| one (thin plate splines) in order to guarantee |
//| non-degeneracy of linear systems being solved. |
//| Although failures are exceptionally rare, some small toy problems|
//| still may result in degenerate linear systems. Thus, it is |
//| advised to use constant / linear term, unless one is 100 % sure |
//| that he needs zero term. |
//| INPUT PARAMETERS: |
//| S - RBF model, initialized by RBFCreate() call |
//+------------------------------------------------------------------+
void CAlglib::RBFSetZeroTerm(CRBFModel &s)
{
CRBF::RBFSetZeroTerm(s);
}
//+------------------------------------------------------------------+
//| This function sets basis function type, which can be: |
//| * 0 for classic Gaussian |
//| * 1 for fast and compact bell - like basis function, which |
//| becomes exactly zero at distance equal to 3 * R (default |
//| option). |
//| INPUT PARAMETERS: |
//| S - RBF model, initialized by RBFCreate() call |
//| BF - basis function type: |
//| * 0 - classic Gaussian |
//| * 1 - fast and compact one |
//+------------------------------------------------------------------+
void CAlglib::RBFSetV2BF(CRBFModel &s,int bf)
{
CRBF::RBFSetV2BF(s,bf);
}
//+------------------------------------------------------------------+
//| This function sets stopping criteria of the underlying linear |
//| solver for hierarchical (version 2) RBF constructor. |
//| INPUT PARAMETERS: |
//| S - RBF model, initialized by RBFCreate() call |
//| MaxIts - this criterion will stop algorithm after MaxIts |
//| iterations. Typically a few hundreds iterations is |
//| required, with 400 being a good default value to |
//| start experimentation. Zero value means that |
//| default value will be selected. |
//+------------------------------------------------------------------+
void CAlglib::RBFSetV2Its(CRBFModel &s,int maxits)
{
CRBF::RBFSetV2Its(s,maxits);
}
//+------------------------------------------------------------------+
//| This function sets support radius parameter of hierarchical |
//| (version 2) RBF constructor. |
//| Hierarchical RBF model achieves great speed-up by removing from |
//| the model excessive (too dense) nodes. Say, if you have RBF |
//| radius equal to 1 meter, and two nodes are just 1 millimeter |
//| apart, you may remove one of them without reducing model quality.|
//| Support radius parameter is used to justify which points need |
//| removal, and which do not. If two points are less than |
//| SUPPORT_R*CUR_RADIUS units of distance apart, one of them is |
//| removed from the model. The larger support radius is, the faster |
//| model construction AND evaluation are. However, too large values |
//| result in "bumpy" models. |
//| INPUT PARAMETERS: |
//| S - RBF model, initialized by RBFCreate() call |
//| R - support radius coefficient, >= 0. |
//| Recommended values are [0.1, 0.4] range, with 0.1 being default |
//| value. |
//+------------------------------------------------------------------+
void CAlglib::RBFSetV2SupportR(CRBFModel &s,double r)
{
CRBF::RBFSetV2SupportR(s,r);
}
//+------------------------------------------------------------------+
//| This function builds RBF model and returns report (contains some |
//| information which can be used for evaluation of the algorithm |
//| properties). |
//| Call to this function modifies RBF model by calculating its |
//| centers/radii/weights and saving them into RBFModel structure. |
//| Initially RBFModel contain zero coefficients, but after call to |
//| this function we will have coefficients which were calculated in |
//| order to fit our dataset. |
//| After you called this function you can call RBFCalc(), |
//| RBFGridCalc() and other model calculation functions. |
//| INPUT PARAMETERS: |
//| S - RBF model, initialized by RBFCreate() call |
//| Rep - report: |
//| * Rep.TerminationType: |
//| * -5 - non-distinct basis function centers were |
//| detected, interpolation aborted; only QNN|
//| returns this error code, other algorithms|
//| can handle non-distinct nodes. |
//| * -4 - nonconvergence of the internal SVD solver|
//| * -3 incorrect model construction algorithm |
//| was chosen: QNN or RBF-ML, combined with |
//| one of the incompatible features: |
//| * NX = 1 or NX > 3 |
//| * points with per - dimension scales. |
//| * 1 - successful termination |
//| * 8 - a termination request was submitted via |
//| RBFRequestTermination() function. |
//| Fields which are set only by modern RBF solvers (hierarchical or |
//| nonnegative; older solvers like QNN and ML initialize these |
//| fields by NANs): |
//| * rep.m_rmserror - root-mean-square error at nodes |
//| * rep.m_maxerror - maximum error at nodes |
//| Fields are used for debugging purposes: |
//| * Rep.IterationsCount - iterations count of the LSQR solver |
//| * Rep.NMV - number of matrix - vector products |
//| * Rep.ARows - rows count for the system matrix |
//| * Rep.ACols - columns count for the system matrix |
//| * Rep.ANNZ - number of significantly non - zero elements |
//| (elements above some algorithm - determined |
//| threshold) |
//| NOTE: failure to build model will leave current state of the |
//| structure unchanged. |
//+------------------------------------------------------------------+
void CAlglib::RBFBuildModel(CRBFModel &s,CRBFReport &rep)
{
CRBF::RBFBuildModel(s,rep);
}
//+------------------------------------------------------------------+
//| This function calculates values of the 1-dimensional RBF model |
//| with scalar output (NY = 1) at the given point. |
//| IMPORTANT: this function works only with modern (hierarchical) |
//| RBFs. It can not be used with legacy (version 1) RBFs |
//| because older RBF code does not support 1-dimensional |
//| models. |
//| IMPORTANT: THIS FUNCTION IS THREAD - UNSAFE. It uses fields of |
//| CRBFModel as temporary arrays, i.e. it is impossible |
//| to perform parallel evaluation on the same CRBFModel |
//| object (parallel calls of this function for |
//| independent CRBFModel objects are safe). If you want |
//| to perform parallel model evaluation from multiple |
//| threads, use RBFTSCalcBuf() with per-thread buffer |
//| object. |
//| This function returns 0.0 when: |
//| * the model is not initialized |
//| * NX<>1 |
//| * NY<>1 |
//| INPUT PARAMETERS: |
//| S - RBF model |
//| X0 - X - coordinate, finite number |
//| RESULT: |
//| value of the model or 0.0 (as defined above) |
//+------------------------------------------------------------------+
double CAlglib::RBFCalc1(CRBFModel &s,double x0)
{
return(CRBF::RBFCalc1(s,x0));
}
//+------------------------------------------------------------------+
//| This function calculates values of the 2-dimensional RBF model |
//| with scalar output (NY = 1) at the given point. |
//| IMPORTANT: THIS FUNCTION IS THREAD - UNSAFE. It uses fields of |
//| CRBFModel as temporary arrays, i.e. it is impossible |
//| to perform parallel evaluation on the same CRBFModel |
//| object (parallel calls of this function for |
//| independent CRBFModel objects are safe). If you want |
//| to perform parallel model evaluation from multiple |
//| threads, use RBFTSCalcBuf() with per-thread buffer |
//| object. |
//| This function returns 0.0 when: |
//| * model is not initialized |
//| * NX<>2 |
//| * NY<>1 |
//| INPUT PARAMETERS: |
//| S - RBF model |
//| X0 - first coordinate, finite number |
//| X1 - second coordinate, finite number |
//| RESULT: |
//| value of the model or 0.0 (as defined above) |
//+------------------------------------------------------------------+
double CAlglib::RBFCalc2(CRBFModel &s,double x0,double x1)
{
return(CRBF::RBFCalc2(s,x0,x1));
}
//+------------------------------------------------------------------+
//| This function calculates values of the 3-dimensional RBF model |
//| with scalar output (NY = 1) at the given point. |
//| IMPORTANT: THIS FUNCTION IS THREAD - UNSAFE. It uses fields of |
//| CRBFModel as temporary arrays, i.e. it is impossible |
//| to perform parallel evaluation on the same CRBFModel |
//| object (parallel calls of this function for |
//| independent CRBFModel objects are safe). If you want |
//| to perform parallel model evaluation from multiple |
//| threads, use RBFTSCalcBuf() with per-thread buffer |
//| object. |
//| This function returns 0.0 when: |
//| * model is not initialized |
//| * NX<>3 |
//| * NY<>1 |
//| INPUT PARAMETERS: |
//| S - RBF model |
//| X0 - first coordinate, finite number |
//| X1 - second coordinate, finite number |
//| X2 - third coordinate, finite number |
//| RESULT: |
//| value of the model or 0.0 (as defined above) |
//+------------------------------------------------------------------+
double CAlglib::RBFCalc3(CRBFModel &s,double x0,double x1,double x2)
{
return(CRBF::RBFCalc3(s,x0,x1,x2));
}
//+------------------------------------------------------------------+
//| This function calculates value and derivatives of the |
//| 1-dimensional RBF model with scalar output (NY = 1) at the given |
//| point. |
//| IMPORTANT: THIS FUNCTION IS THREAD - UNSAFE. It uses fields of |
//| CRBFModel as temporary arrays, i.e. it is impossible |
//| to perform parallel evaluation on the same CRBFModel |
//| object (parallel calls of this function for |
//| independent CRBFModel objects are safe). If you want |
//| to perform parallel model evaluation from multiple |
//| threads, use RBFTSCalcBuf() with per-thread buffer |
//| object. |
//| This function returns 0.0 in Y and/or DY in the following cases: |
//| * the model is not initialized (Y = 0, DY = 0) |
//| * NX<>1 or NY<>1 (Y = 0, DY = 0) |
//| * the gradient is undefined at the trial point. Some basis |
//| functions have discontinuous derivatives at the interpolation|
//| nodes: |
//| * biharmonic splines f = r have no Hessian and no gradient |
//| at the nodes In these cases only DY is set to zero (Y is |
//| still returned) |
//| INPUT PARAMETERS: |
//| S - RBF model |
//| X0 - first coordinate, finite number |
//| OUTPUT PARAMETERS: |
//| Y - value of the model or 0.0 (as defined above) |
//| DY0 - derivative with respect to X0 |
//+------------------------------------------------------------------+
void CAlglib::RBFDiff1(CRBFModel &s,double x0,double &y,double &dy0)
{
CRBF::RBFDiff1(s,x0,y,dy0);
}
//+------------------------------------------------------------------+
//| This function calculates value and derivatives of the |
//| 2-dimensional RBF model with scalar output (NY = 1) at the given |
//| point. |
//| IMPORTANT: THIS FUNCTION IS THREAD - UNSAFE. It uses fields of |
//| CRBFModel as temporary arrays, i.e. it is impossible |
//| to perform parallel evaluation on the same CRBFModel |
//| object (parallel calls of this function for |
//| independent CRBFModel objects are safe). If you want |
//| to perform parallel model evaluation from multiple |
//| threads, use RBFTSCalcBuf() with per-thread buffer |
//| object. |
//| This function returns 0.0 in Y and/or DY in the following cases: |
//| * the model is not initialized(Y = 0, DY = 0) |
//| * NX<>2 or NY<>1 (Y=0, DY=0) |
//| * the gradient is undefined at the trial point. Some basis |
//| functions have discontinuous derivatives at the interpolation|
//| nodes: |
//| * biharmonic splines f = r have no Hessian and no gradient at |
//| the nodes In these cases only DY is set to zero (Y is still |
//| returned) |
//| INPUT PARAMETERS: |
//| S - RBF model |
//| X0 - first coordinate, finite number |
//| X1 - second coordinate, finite number |
//| OUTPUT PARAMETERS: |
//| Y - value of the model or 0.0 (as defined above) |
//| DY0 - derivative with respect to X0 |
//| DY1 - derivative with respect to X1 |
//+------------------------------------------------------------------+
void CAlglib::RBFDiff2(CRBFModel &s,double x0,double x1,double &y,
double &dy0,double &dy1)
{
CRBF::RBFDiff2(s,x0,x1,y,dy0,dy1);
}
//+------------------------------------------------------------------+
//| This function calculates value and derivatives of the |
//| 3-dimensional RBF model with scalar output (NY = 1) at the given |
//| point. |
//| IMPORTANT: THIS FUNCTION IS THREAD - UNSAFE. It uses fields of |
//| CRBFModel as temporary arrays, i.e. it is impossible |
//| to perform parallel evaluation on the same CRBFModel |
//| object (parallel calls of this function for |
//| independent CRBFModel objects are safe). If you want |
//| to perform parallel model evaluation from multiple |
//| threads, use RBFTSCalcBuf() with per-thread buffer |
//| object. |
//| This function returns 0.0 in Y and/or DY in the following cases: |
//| * the model is not initialized (Y = 0, DY = 0) |
//| * NX<>3 or NY<>1 (Y = 0, DY = 0) |
//| * the gradient is undefined at the trial point. Some basis |
//| functions have discontinuous derivatives at the interpolation|
//| nodes: |
//| * biharmonic splines f = r have no Hessian and no gradient |
//| at the nodes In these cases only DY is set to zero (Y is |
//| still returned) |
//| INPUT PARAMETERS: |
//| S - RBF model |
//| X0 - first coordinate, finite number |
//| X1 - second coordinate, finite number |
//| X2 - third coordinate, finite number |
//| OUTPUT PARAMETERS: |
//| Y - value of the model or 0.0 (as defined above) |
//| DY0 - derivative with respect to X0 |
//| DY1 - derivative with respect to X1 |
//| DY2 - derivative with respect to X2 |
//+------------------------------------------------------------------+
void CAlglib::RBFDiff3(CRBFModel &s,double x0,double x1,double x2,
double &y,double &dy0,double &dy1,double &dy2)
{
CRBF::RBFDiff3(s,x0,x1,x2,y,dy0,dy1,dy2);
}
//+------------------------------------------------------------------+
//| This function calculates values of the RBF model at the given |
//| point. |
//| This is general function which can be used for arbitrary NX |
//| (dimension of the space of arguments) and NY (dimension of the |
//| function itself). However when you have NY = 1 you may find more |
//| convenient to use RBFCalc2() or RBFCalc3(). |
//| IMPORTANT: THIS FUNCTION IS THREAD - UNSAFE. It uses fields of |
//| CRBFModel as temporary arrays, i.e. it is impossible |
//| to perform parallel evaluation on the same CRBFModel |
//| object (parallel calls of this function for |
//| independent CRBFModel objects are safe). If you want |
//| to perform parallel model evaluation from multiple |
//| threads, use RBFTSCalcBuf() with per-thread buffer |
//| object. |
//| This function returns 0.0 when model is not initialized. |
//| INPUT PARAMETERS: |
//| S - RBF model |
//| X - coordinates, array[NX]. X may have more than NX |
//| elements, in this case only leading NX will be used|
//| OUTPUT PARAMETERS: |
//| Y - function value, array[NY]. Y is out - parameter and|
//| reallocated after call to this function. In case |
//| you want to reuse previously allocated Y, you may |
//| use RBFCalcBuf(), which reallocates Y only when it |
//| is too small. |
//+------------------------------------------------------------------+
void CAlglib::RBFCalc(CRBFModel &s,CRowDouble &x,CRowDouble &y)
{
CRBF::RBFCalc(s,x,y);
}
//+------------------------------------------------------------------+
//| This function calculates values of the RBF model and its |
//| derivatives at the given point. |
//| This is general function which can be used for arbitrary NX |
//| (dimension of the space of arguments) and NY(dimension of the |
//| function itself). However if you have NX = 3 and NY = 1, you may |
//| find more convenient to use RBFDiff3(). |
//| IMPORTANT: THIS FUNCTION IS THREAD - UNSAFE. It uses fields of |
//| CRBFModel as temporary arrays, i.e. it is impossible |
//| to perform parallel evaluation on the same CRBFModel |
//| object (parallel calls of this function for |
//| independent CRBFModel objects are safe). |
//| If you want to perform parallel model evaluation from multiple |
//| threads, use RBFTSDiffBuf() with per-thread buffer object. |
//| This function returns 0.0 in Y and/or DY in the following cases: |
//| * the model is not initialized (Y = 0, DY = 0) |
//| * the gradient is undefined at the trial point. Some basis |
//| functions have discontinuous derivatives at the interpolation|
//| nodes: |
//| * biharmonic splines f = r have no Hessian and no gradient |
//| at the nodes In these cases only DY is set to zero (Y is |
//| still returned) |
//| INPUT PARAMETERS: |
//| S - RBF model |
//| X - coordinates, array[NX]. X may have more than NX |
//| elements, in this case only leading NX will be used|
//| OUTPUT PARAMETERS: |
//| Y - function value, array[NY]. Y is out-parameter and |
//| reallocated after call to this function. In case |
//| you want to reuse previously allocated Y, you may |
//| use RBFDiffBuf(), which reallocates Y only when it |
//| is too small. |
//| DY - derivatives, array[NX * NY]: |
//| * Y[I * NX + J] with 0 <= I < NY and 0 <= J < NX |
//| stores derivative of function component I with |
//| respect to input J. |
//| * for NY = 1 it is simply NX-dimensional gradient |
//| of the scalar NX-dimensional function DY is |
//| out-parameter and reallocated after call to this |
//| function. In case you want to reuse previously |
//| allocated DY, you may use RBFDiffBuf(), which |
//| reallocates DY only when it is too small to store|
//| the result. |
//+------------------------------------------------------------------+
void CAlglib::RBFDiff(CRBFModel &s,CRowDouble &x,CRowDouble &y,
CRowDouble &dy)
{
CRBF::RBFDiff(s,x,y,dy);
}
//+------------------------------------------------------------------+
//| This function calculates values of the RBF model and its first |
//| and second derivatives (Hessian matrix) at the given point. |
//| This function supports both scalar (NY = 1) and vector - valued |
//| (NY > 1) RBFs. |
//| IMPORTANT: THIS FUNCTION IS THREAD - UNSAFE. It uses fields of |
//| CRBFModel as temporary arrays, i.e. it is impossible |
//| to perform parallel evaluation on the same CRBFModel |
//| object (parallel calls of this function for |
//| independent CRBFModel objects are safe). |
//| If you want to perform parallel model evaluation from multiple |
//| threads, use RBFTsHessBuf() with per - thread buffer object. |
//| This function returns 0 in Y and/or DY and/or D2Y in the |
//| following cases: |
//| * the model is not initialized (Y = 0, DY = 0, D2Y = 0) |
//| * the gradient and/or Hessian is undefined at the trial point. |
//| Some basis functions have discontinuous derivatives at the |
//| interpolation nodes: |
//| * thin plate splines have no Hessian at the nodes |
//| * biharmonic splines f = r have no Hessian and no gradient |
//| at the nodes In these cases only corresponding derivative |
//| is set to zero, and the rest of the derivatives is still |
//| returned. |
//| INPUT PARAMETERS: |
//| S - RBF model |
//| X - coordinates, array[NX]. X may have more than NX |
//| elements, in this case only leading NX will be used|
//| OUTPUT PARAMETERS: |
//| Y - function value, array[NY]. Y is out-parameter and |
//| reallocated after call to this function. In case |
//| you want to reuse previously allocated Y, you may |
//| use RBFHessBuf(), which reallocates Y only when |
//| it is too small. |
//| DY - first derivatives, array[NY * NX]: |
//| * Y[I * NX + J] with 0 <= I < NY and 0 <= J < NX |
//| stores derivative of function component I with |
//| respect to input J. |
//| * for NY = 1 it is simply NX - dimensional gradient|
//| of the scalar NX-dimensional function DY is |
//| out-parameter and reallocated after call to this |
//| function. In case you want to reuse previously |
//| allocated DY, you may use RBFHessBuf(), which |
//| reallocates DY only when it is too small to store|
//| the result. |
//| D2Y - second derivatives, array[NY * NX * NX]: |
//| * for NY = 1 it is NX*NX array that stores Hessian |
//| matrix, with Y[I * NX + J] = Y[J * NX + I]. |
//| * for a vector - valued RBF with NY > 1 it contains|
//| NY subsequently stored Hessians: an element |
//| Y[K * NX * NX + I * NX + J] with 0 <= K < NY, |
//| 0 <= I < NX and 0 <= J < NX stores second |
//| derivative of the function #K with respect to |
//| inputs #I and #J. |
//| D2Y is out-parameter and reallocated after call to |
//| this function. In case you want to reuse previously|
//| allocated D2Y, you may use RBFHessBuf(), which |
//| reallocates D2Y only when it is too small to store |
//| the result. |
//+------------------------------------------------------------------+
void CAlglib::RBFHess(CRBFModel &s,CRowDouble &x,CRowDouble &y,
CRowDouble &dy,CRowDouble &d2y)
{
CRBF::RBFHess(s,x,y,dy,d2y);
}
//+------------------------------------------------------------------+
//| This function calculates values of the RBF model at the given |
//| point. |
//| Same as RBFCalc(), but does not reallocate Y when in is large |
//| enough to store function values. |
//| IMPORTANT: THIS FUNCTION IS THREAD - UNSAFE. It uses fields of |
//| CRBFModel as temporary arrays, i.e. it is impossible |
//| to perform parallel evaluation on the same CRBFModel |
//| object (parallel calls of this function for |
//| independent CRBFModel objects are safe). If you want |
//| to perform parallel model evaluation from multiple |
//| threads, use RBFTSCalcBuf() with per-thread buffer |
//| object. |
//| INPUT PARAMETERS: |
//| S - RBF model |
//| X - coordinates, array[NX]. X may have more than NX |
//| elements, in this case only leading NX will be used|
//| Y - possibly preallocated array |
//| OUTPUT PARAMETERS: |
//| Y - function value, array[NY]. Y is not reallocated |
//| when it is larger than NY. |
//+------------------------------------------------------------------+
void CAlglib::RBFCalcBuf(CRBFModel &s,CRowDouble &x,CRowDouble &y)
{
CRBF::RBFCalcBuf(s,x,y);
}
//+------------------------------------------------------------------+
//| This function calculates values of the RBF model and its |
//| derivatives at the given point. It is a buffered version of the |
//| RBFDiff() which tries to reuse possibly preallocated output |
//| arrays Y / DY as much as possible. |
//| This is general function which can be used for arbitrary NX |
//| (dimension of the space of arguments) and NY (dimension of the |
//| function itself). However if you have NX = 1, 2 or 3 and NY = 1, |
//| you may find more convenient to use RBFDiff1(), RBFDiff2() or |
//| RBFDiff3(). |
//| This function returns 0.0 in Y and/or DY in the following cases: |
//| * the model is not initialized (Y = 0, DY = 0) |
//| * the gradient is undefined at the trial point. Some basis |
//| functions have discontinuous derivatives at the interpolation|
//| nodes: |
//| * biharmonic splines f = r have no Hessian and no gradient |
//| at the nodes In these cases only DY is set to zero (Y is |
//| still returned) |
//| INPUT PARAMETERS: |
//| S - RBF model |
//| X - coordinates, array[NX]. X may have more than NX |
//| elements, in this case only leading NX will be used|
//| Y, DY - possibly preallocated arrays; if array size is |
//| large enough to store results, this function does |
//| not reallocate array to fit output size exactly. |
//| OUTPUT PARAMETERS: |
//| Y - function value, array[NY]. |
//| DY - derivatives, array[NX * NY]: |
//| * Y[I * NX + J] with 0 <= I < NY and 0 <= J < NX |
//| stores derivative of function component I with |
//| respect to input J. |
//| * for NY = 1 it is simply NX - dimensional gradient|
//| of the scalar NX - dimensional function |
//+------------------------------------------------------------------+
void CAlglib::RBFDiffBuf(CRBFModel &s,CRowDouble &x,CRowDouble &y,
CRowDouble &dy)
{
CRBF::RBFDiffBuf(s,x,y,dy);
}
//+------------------------------------------------------------------+
//| This function calculates values of the RBF model and its first |
//| and second derivatives (Hessian matrix) at the given point. It is|
//| a buffered version that reuses memory allocated in output buffers|
//| Y/DY/D2Y as much as possible. |
//| This function supports both scalar(NY = 1) and vector - valued |
//| (NY > 1) RBFs. |
//| This function returns 0 in Y and/or DY and/or D2Y in the |
//| following cases: |
//| * the model is not initialized (Y = 0, DY = 0, D2Y = 0) |
//| * the gradient and/or Hessian is undefined at the trial point |
//| Some basis functions have discontinuous derivatives at the |
//| interpolation nodes: |
//| * thin plate splines have no Hessian at the nodes |
//| * biharmonic splines f = r have no Hessian and no gradient |
//| at the nodes In these cases only corresponding derivative |
//| is set to zero, and the rest of the derivatives is still |
//| returned. |
//| INPUT PARAMETERS: |
//| S - RBF model |
//| X - coordinates, array[NX]. X may have more than NX |
//| elements, in this case only leading NX will be used|
//| Y, DY, D2Y - possible preallocated output arrays. If these |
//| arrays are smaller than required to store the |
//| result, they are automatically reallocated. If |
//| array is large enough, it is not resized. |
//| OUTPUT PARAMETERS: |
//| Y - function value, array[NY]. |
//| DY - first derivatives, array[NY * NX]: |
//| * Y[I * NX + J] with 0 <= I < NY and 0 <= J < NX |
//| stores derivative of function component I with |
//| respect to input J. |
//| * for NY = 1 it is simply NX - dimensional gradient|
//| of the scalar NX - dimensional function |
//| D2Y - second derivatives, array[NY * NX * NX]: |
//| * for NY = 1 it is NX*NX array that stores Hessian |
//| matrix, with Y[I * NX + J] = Y[J * NX + I]. |
//| * for a vector - valued RBF with NY > 1 it contains|
//| NY subsequently stored Hessians: an element |
//| Y[K * NX * NX + I * NX + J] with 0 <= K < NY, |
//| 0 <= I < NX and 0 <= J < NX stores second |
//| derivative of the function #K with respect to |
//| inputs #I and #J. |
//+------------------------------------------------------------------+
void CAlglib::RBFHessBuf(CRBFModel &s,CRowDouble &x,CRowDouble &y,
CRowDouble &dy,CRowDouble &d2y)
{
CRBF::RBFHessBuf(s,x,y,dy,d2y);
}
//+------------------------------------------------------------------+
//| This function calculates values of the RBF model at the given |
//| point, using external buffer object (internal temporaries of RBF |
//| model are not modified). |
//| This function allows to use same RBF model object in different |
//| threads, assuming that different threads use different instances|
//| of buffer structure. |
//| INPUT PARAMETERS: |
//| S - RBF model, may be shared between different threads |
//| Buf - buffer object created for this particular instance |
//| of RBF model with RBFCreateCalcBuffer(). |
//| X - coordinates, array[NX]. X may have more than NX |
//| elements, in this case only leading NX will be used|
//| Y - possibly preallocated array |
//| OUTPUT PARAMETERS: |
//| Y - function value, array[NY]. Y is not reallocated |
//| when it is larger than NY. |
//+------------------------------------------------------------------+
void CAlglib::RBFTSCalcBuf(CRBFModel &s,CRBFCalcBuffer &buf,
CRowDouble &x,CRowDouble &y)
{
CRBF::RBFTSCalcBuf(s,buf,x,y);
}
//+------------------------------------------------------------------+
//| This function calculates values of the RBF model and its |
//| derivatives at the given point, using external buffer object |
//| (internal temporaries of the RBF model are not modified). |
//| This function allows to use same RBF model object in different |
//| threads, assuming that different threads use different instances |
//| of the buffer structure. |
//| This function returns 0.0 in Y and/or DY in the following |
//| cases: |
//| * the model is not initialized(Y = 0, DY = 0) |
//| * the gradient is undefined at the trial point. Some basis |
//| functions have discontinuous derivatives at the interpolation|
//| nodes: |
//| * biharmonic splines f = r have no Hessian and no gradient |
//| at the nodes In these cases only DY is set to zero (Y is |
//| still returned) |
//| INPUT PARAMETERS: |
//| S - RBF model, may be shared between different threads |
//| Buf - buffer object created for this particular instance |
//| of RBF model with RBFCreateCalcBuffer(). |
//| X - coordinates, array[NX]. X may have more than NX |
//| elements, in this case only leading NX will be used|
//| Y, DY - possibly preallocated arrays; if array size is |
//| large enough to store results, this function does |
//| not reallocate array to fit output size exactly. |
//| OUTPUT PARAMETERS: |
//| Y - function value, array[NY]. |
//| DY - derivatives, array[NX * NY]: |
//| * Y[I * NX + J] with 0 <= I < NY and 0 <= J < NX |
//| stores derivative of function component I with |
//| respect to input J. |
//| * for NY = 1 it is simply NX-dimensional gradient |
//| of the scalar NX-dimensional function |
//| Zero is returned when the first derivative is |
//| undefined. |
//+------------------------------------------------------------------+
void CAlglib::RBFTSDiffBuf(CRBFModel &s,CRBFCalcBuffer &buf,
CRowDouble &x,CRowDouble &y,CRowDouble &dy)
{
CRBF::RBFTSDiffBuf(s,buf,x,y,dy);
}
//+------------------------------------------------------------------+
//| This function calculates values of the RBF model and its first |
//| and second derivatives (Hessian matrix) at the given point, using|
//| external buffer object (internal temporaries of the RBF model are|
//| not modified). |
//| This function allows to use same RBF model object in different |
//| threads, assuming that different threads use different instances|
//| of the buffer structure. |
//| This function returns 0 in Y and/or DY and/or D2Y in the |
//| following cases: |
//| * the model is not initialized (Y = 0, DY = 0, D2Y = 0) |
//| * the gradient and/or Hessian is undefined at the trial point. |
//| Some basis functions have discontinuous derivatives at the |
//| interpolation nodes: |
//| * thin plate splines have no Hessian at the nodes |
//| * biharmonic splines f = r have no Hessian and no gradient at |
//| the nodes In these cases only corresponding derivative is set|
//| to zero, and the rest of the derivatives is still returned. |
//| INPUT PARAMETERS: |
//| S - RBF model, may be shared between different threads |
//| Buf - buffer object created for this particular instance |
//| of RBF model with RBFCreateCalcBuffer(). |
//| X - coordinates, array[NX]. X may have more than NX |
//| elements, in this case only leading NX will be used|
//| Y, DY, D2Y - possible preallocated output arrays. If these |
//| arrays are smaller than required to store the |
//| result, they are automatically reallocated. If |
//| array is large enough, it is not resized. |
//| OUTPUT PARAMETERS: |
//| Y - function value, array[NY]. |
//| DY - first derivatives, array[NY * NX]: |
//| * Y[I * NX + J] with 0 <= I < NY and 0 <= J < NX |
//| stores derivative of function component I with |
//| respect to input J. |
//| * for NY = 1 it is simply NX - dimensional gradient|
//| of the scalar NX - dimensional function Zero is |
//| returned when the first derivative is undefined. |
//| D2Y - second derivatives, array[NY * NX * NX]: |
//| * for NY = 1 it is NX*NX array that stores Hessian |
//| matrix, with Y[I * NX + J] = Y[J * NX + I]. |
//| * for a vector - valued RBF with NY > 1 it contains|
//| NY subsequently stored Hessians: an element |
//| Y[K * NX * NX + I * NX + J] with 0 <= K < NY, |
//| 0 <= I < NX and 0 <= J < NX stores second |
//| derivative of the function #K with respect to |
//| inputs and #J. |
//| Zero is returned when the second derivative is |
//| undefined. |
//+------------------------------------------------------------------+
void CAlglib::RBFTSHessBuf(CRBFModel &s,CRBFCalcBuffer &buf,
CRowDouble &x,CRowDouble &y,
CRowDouble &dy,CRowDouble &d2y)
{
CRBF::RBFTSHessBuf(s,buf,x,y,dy,d2y);
}
//+------------------------------------------------------------------+
//| This is legacy function for gridded calculation of RBF model. |
//| It is superseded by RBFGridCalc2V() and RBFGridCalc2VSubset() |
//| functions. |
//+------------------------------------------------------------------+
void CAlglib::RBFGridCalc2(CRBFModel &s,CRowDouble &x0,int n0,
CRowDouble &x1,int n1,CMatrixDouble &y)
{
CRBF::RBFGridCalc2(s,x0,n0,x1,n1,y);
}
//+------------------------------------------------------------------+
//| This function calculates values of the RBF model at the regular |
//| grid, which has N0*N1 points, with Point[I, J] = (X0[I], X1[J]). |
//| Vector - valued RBF models are supported. |
//| This function returns 0.0 when: |
//| * model is not initialized |
//| * NX<>2 |
//| INPUT PARAMETERS: |
//| S - RBF model, used in read - only mode, can be shared |
//| between multiple invocations of this function from|
//| multiple threads. |
//| X0 - array of grid nodes, first coordinates, array[N0]. |
//| Must be ordered by ascending. Exception is |
//| generated if the array is not correctly ordered. |
//| N0 - grid size(number of nodes) in the first dimension |
//| X1 - array of grid nodes, second coordinates, array[N1] |
//| Must be ordered by ascending. Exception is |
//| generated if the array is not correctly ordered. |
//| N1 - grid size(number of nodes) in the second dimension |
//| OUTPUT PARAMETERS: |
//| Y - function values, array[NY * N0 * N1], where NY is a|
//| number of "output" vector values(this function |
//| supports vector - valued RBF models). Y is |
//| out-variable and is reallocated by this function. |
//| Y[K + NY * (I0 + I1 * N0)] = F_k(X0[I0], X1[I1]), |
//| for: |
//| * K = 0...NY - 1 |
//| * I0 = 0...N0 - 1 |
//| * I1 = 0...N1 - 1 |
//| NOTE: this function supports weakly ordered grid nodes, i.e. you |
//| may have X[i] = X[i + 1] for some i. It does not provide |
//| you any performance benefits due to duplication of points,|
//| just convenience and flexibility. |
//| NOTE: this function is re-entrant, i.e. you may use same |
//| CRBFModel structure in multiple threads calling this |
//| function for different grids. |
//| NOTE: if you need function values on some subset of regular grid,|
//| which may be described as "several compact and dense |
//| islands", you may use RBFGridCalc2VSubset(). |
//+------------------------------------------------------------------+
void CAlglib::RBFGridCalc2V(CRBFModel &s,CRowDouble &x0,int n0,
CRowDouble &x1,int n1,CRowDouble &y)
{
CRBF::RBFGridCalc2V(s,x0,n0,x1,n1,y);
}
//+------------------------------------------------------------------+
//| This function calculates values of the RBF model at some subset |
//| of regular grid: |
//| * grid has N0*N1 points, with Point[I, J] = (X0[I], X1[J]) |
//| * only values at some subset of this grid are required |
//| Vector - valued RBF models are supported. |
//| This function returns 0.0 when: |
//| * model is not initialized |
//| * NX<>2 |
//| INPUT PARAMETERS: |
//| S - RBF model, used in read - only mode, can be shared |
//| between multiple invocations of this function from |
//| multiple threads. |
//| X0 - array of grid nodes, first coordinates, array[N0]. |
//| Must be ordered by ascending. Exception is |
//| generated if the array is not correctly ordered. |
//| N0 - grid size (number of nodes) in the first dimension |
//| X1 - array of grid nodes, second coordinates, array[N1] |
//| Must be ordered by ascending. Exception is |
//| generated if the array is not correctly ordered. |
//| N1 - grid size(number of nodes) in the second dimension |
//| FlagY - array[N0 * N1]: |
//| * Y[I0 + I1 * N0] corresponds to node (X0[I0], |
//| X1[I1]) |
//| *it is a "bitmap" array which contains False for |
//| nodes which are NOT calculated, and True for |
//| nodes which are required. |
//| OUTPUT PARAMETERS: |
//| Y - function values, array[NY * N0 * N1 * N2], where NY|
//| is a number of "output" vector values (this |
//| function supports vector - valued RBF models): |
//| * Y[K + NY * (I0 + I1 * N0)] = F_k(X0[I0],X1[I1]), |
//| for K = 0...NY-1, I0 = 0...N0-1, I1 = 0...N1-1. |
//| * elements of Y[] which correspond to FlagY[]=True |
//| are loaded by model values(which may be exactly |
//| zero for some nodes). |
//| * elements of Y[] which correspond to FlagY[]=False|
//| MAY be initialized by zeros OR may be calculated|
//| This function processes grid as a hierarchy of |
//| nested blocks and micro-rows. If just one |
//| element of micro-row is required, entire micro- |
//| row (up to 8 nodes in the current version, but |
//| no promises) is calculated. |
//| NOTE: this function supports weakly ordered grid nodes, i.e. you |
//| may have X[i] = X[i + 1] for some i. It does not provide |
//| you any performance benefits due to duplication of points, |
//| just convenience and flexibility. |
//| NOTE: this function is re - entrant, i.e. you may use same |
//| CRBFModel structure in multiple threads calling this |
//| function for different grids. |
//+------------------------------------------------------------------+
void CAlglib::RBFGridCalc2VSubset(CRBFModel &s,CRowDouble &x0,int n0,
CRowDouble &x1,int n1,bool &flagy[],
CRowDouble &y)
{
CRBF::RBFGridCalc2VSubset(s,x0,n0,x1,n1,flagy,y);
}
//+------------------------------------------------------------------+
//| This function calculates values of the RBF model at the regular |
//| grid, which has N0*N1*N2 points, with Point[I, J, K] = (X0[I], |
//| X1[J], X2[K]). Vector - valued RBF models are supported. |
//| This function returns 0.0 when: |
//| * model is not initialized |
//| * NX<>3 |
//| INPUT PARAMETERS: |
//| S - RBF model, used in read-only mode, can be shared |
//| between multiple invocations of this function from|
//| multiple threads. |
//| X0 - array of grid nodes, first coordinates, array[N0]. |
//| Must be ordered by ascending. Exception is |
//| generated if the array is not correctly ordered. |
//| N0 - grid size(number of nodes) in the first dimension |
//| X1 - array of grid nodes, second coordinates, array[N1] |
//| Must be ordered by ascending. Exception is |
//| generated if the array is not correctly ordered. |
//| N1 - grid size(number of nodes) in the second dimension |
//| X2 - array of grid nodes, third coordinates, array[N2] |
//| Must be ordered by ascending. Exception is |
//| generated if the array is not correctly ordered. |
//| N2 - grid size(number of nodes) in the third dimension |
//| OUTPUT PARAMETERS: |
//| Y - function values, array[NY * N0 * N1 * N2], where NY|
//| is a number of "output" vector values (this |
//| function supports vector-valued RBF models). Y is |
//| out-variable and is reallocated by this function. |
//| Y[K+NY*(I0+I1*N0+I2*N0*N1)] = F_k(X0[I0],X1[I1],X2[I2]),|
//| for: |
//| * K = 0...NY - 1 |
//| * I0 = 0...N0 - 1 |
//| * I1 = 0...N1 - 1 |
//| * I2 = 0...N2 - 1 |
//| NOTE: this function supports weakly ordered grid nodes, i.e. you |
//| may have X[i] = X[i + 1] for some i. It does not provide |
//| you any performance benefits due to duplication of points,|
//| just convenience and flexibility. |
//| NOTE: this function is re-entrant, i.e. you may use same |
//| CRBFModel structure in multiple threads calling this |
//| function for different grids. |
//| NOTE: if you need function values on some subset of regular grid,|
//| which may be described as "several compact and dense |
//| islands", you may use RBFGridCalc3VSubset(). |
//+------------------------------------------------------------------+
void CAlglib::RBFGridCalc3V(CRBFModel &s,CRowDouble &x0,int n0,
CRowDouble &x1,int n1,CRowDouble &x2,
int n2,CRowDouble &y)
{
CRBF::RBFGridCalc3V(s,x0,n0,x1,n1,x2,n2,y);
}
//+------------------------------------------------------------------+
//| This function calculates values of the RBF model at some subset |
//| of regular grid: |
//| * grid has N0*N1*N2 points, with Point[I, J, K] = (X0[I], |
//| X1[J], X2[K]) |
//| * only values at some subset of this grid are required |
//| Vector - valued RBF models are supported. |
//| This function returns 0.0 when: |
//| * model is not initialized |
//| * NX<>3 |
//| INPUT PARAMETERS: |
//| S - RBF model, used in read - only mode, can be shared |
//| between multiple invocations of this function from|
//| multiple threads. |
//| X0 - array of grid nodes, first coordinates, array[N0]. |
//| Must be ordered by ascending. Exception is |
//| generated if the array is not correctly ordered. |
//| N0 - grid size(number of nodes) in the first dimension |
//| X1 - array of grid nodes, second coordinates, array[N1] |
//| Must be ordered by ascending. Exception is |
//| generated if the array is not correctly ordered. |
//| N1 - grid size(number of nodes) in the second dimension |
//| X2 - array of grid nodes, third coordinates, array[N2] |
//| Must be ordered by ascending. Exception is |
//| generated if the array is not correctly ordered. |
//| N2 - grid size(number of nodes) in the third dimension |
//| FlagY - array[N0 * N1 * N2]: |
//| * Y[I0 + I1 * N0 + I2 * N0 * N1] corresponds to |
//| node (X0[I0], X1[I1], X2[I2]) |
//| *it is a "bitmap" array which contains False for |
//| nodes which are NOT calculated, and True for |
//| nodes which are required. |
//| OUTPUT PARAMETERS: |
//| Y - function values, array[NY * N0 * N1 * N2], where NY|
//| is a number of "output" vector values(this function|
//| supports vector- valued RBF models): |
//| * Y[K+NY*(I0+I1*N0+I2*N0*N1)] = F_k(X0[I0],X1[I1],X2[I2]),|
//| for K = 0...NY-1, I0 = 0...N0-1, I1 = 0...N1-1, |
//| I2 = 0...N2-1. |
//| * elements of Y[] which correspond to FlagY[]=True |
//| are loaded by model values(which may be exactly |
//| zero for some nodes). |
//| * elements of Y[] which correspond to FlagY[]=False|
//| MAY be initialized by zeros OR may be calculated.|
//| This function processes grid as a hierarchy of |
//| nested blocks and micro-rows. If just one element|
//| of micro-row is required, entire micro-row (up to|
//| 8 nodes in the current version, but no promises) |
//| is calculated. |
//| NOTE: this function supports weakly ordered grid nodes, i.e. you |
//| may have X[i] = X[i + 1] for some i. It does not provide |
//| you any performance benefits due to duplication of points,|
//| just convenience and flexibility. |
//| NOTE: this function is re-entrant, i.e. you may use same |
//| CRBFModel structure in multiple threads calling this |
//| function for different grids. |
//+------------------------------------------------------------------+
void CAlglib::RBFGridCalc3VSubset(CRBFModel &s,CRowDouble &x0,int n0,
CRowDouble &x1,int n1,
CRowDouble &x2,int n2,
bool &flagy[],CRowDouble &y)
{
CRBF::RBFGridCalc3VSubset(s,x0,n0,x1,n1,x2,n2,flagy,y);
}
//+------------------------------------------------------------------+
//| This function "unpacks" RBF model by extracting its coefficients.|
//| INPUT PARAMETERS: |
//| S - RBF model |
//| OUTPUT PARAMETERS: |
//| NX - dimensionality of argument |
//| NY - dimensionality of the target function |
//| XWR - model information, 2D array. One row of the array |
//| corresponds to one basis function. |
//| For ModelVersion = 1 we have NX + NY + 1 columns: |
//| * first NX columns - coordinates of the center |
//| * next NY columns - weights, one per dimension of the function|
//| being modeled |
//| * last column - radius, same for all dimensions of the |
//| function being modeled |
//| For ModelVersion = 2 we have NX + NY + NX columns: |
//| * first NX columns - coordinates of the center |
//| * next NY columns - weights, one per dimension of the function|
//| being modeled |
//| * last NX columns - radii, one per dimension |
//| For ModelVersion = 3 we have NX + NY + NX + 3 columns: |
//| * first NX columns - coordinates of the center |
//| * next NY columns - weights, one per dimension of the |
//| function being modeled |
//| * next NX columns - radii, one per dimension |
//| * next column - basis function type: |
//| * 1 for f = r |
//| * 2 for f = r ^ 2 * ln(r) |
//| * 10 for multiquadric f=sqrt(r^2+alpha^2) |
//| * next column - basis function parameter: |
//| * alpha, for basis function type 10 |
//| * ignored(zero) for other basis function |
//| types |
//| * next column - point index in the original dataset, or -1|
//| for an artificial node created by the |
//| solver. The algorithm may reorder the |
//| nodes, drop some nodes or add artificial |
//| nodes. Thus, one parsing this column |
//| should expect all these kinds of |
//| alterations in the dataset. |
//| NC - number of the centers |
//| V - polynomial term, array[NY, NX + 1]. One row per |
//| one dimension of the function being modelled. |
//| First NX elements are linear coefficients, V[NX]|
//| is equal to the constant part. |
//| ModelVersion - version of the RBF model: |
//| * 1 - for models created by QNN and RBF-ML |
//| algorithms, compatible with ALGLIB 3.10 or|
//| earlier. |
//| * 2 - for models created by HierarchicalRBF, |
//| requires ALGLIB 3.11 or later |
//| * 3 - for models created by DDM-RBF, requires |
//| ALGLIB 3.19 or later |
//+------------------------------------------------------------------+
void CAlglib::RBFUnpack(CRBFModel &s,int &nx,int &ny,
CMatrixDouble &xwr,int &nc,
CMatrixDouble &v,int &modelversion)
{
CRBF::RBFUnpack(s,nx,ny,xwr,nc,v,modelversion);
}
//+------------------------------------------------------------------+
//|This function returns model version. |
//| INPUT PARAMETERS: |
//| S - RBF model |
//| RESULT: |
//| * 1 - for models created by QNN and RBF-ML algorithms, |
//| compatible with ALGLIB 3.10 or earlier. |
//| * 2 - for models created by HierarchicalRBF, requires |
//| ALGLIB 3.11 or later |
//+------------------------------------------------------------------+
int CAlglib::RBFGetModelVersion(CRBFModel &s)
{
return(CRBF::RBFGetModelVersion(s));
}
//+------------------------------------------------------------------+
//| This function is used to peek into hierarchical RBF construction |
//| process from some other thread and get current progress indicator|
//| It returns value in [0, 1]. |
//| IMPORTANT: only HRBFs (hierarchical RBFs) support peeking into |
//| progress indicator. Legacy RBF-ML and RBF-QNN do not |
//| support it. You will always get 0 value. |
//| INPUT PARAMETERS: |
//| S - RBF model object |
//| RESULT: |
//| progress value, in [0, 1] |
//+------------------------------------------------------------------+
double CAlglib::RBFPeekProgress(CRBFModel &s)
{
return(CRBF::RBFPeekProgress(s));
}
//+------------------------------------------------------------------+
//| This function is used to submit a request for termination of the |
//| hierarchical RBF construction process from some other thread. As |
//| result, RBF construction is terminated smoothly (with proper |
//| deallocation of all necessary resources) and resultant model is |
//| filled by zeros. |
//| A rep.m_terminationtype = 8 will be returned upon receiving such |
//| request. |
//| IMPORTANT: only HRBFs(hierarchical RBFs) support termination |
//| requests. Legacy RBF-ML and RBF-QNN do not support it.|
//| An attempt to terminate their construction will be |
//| ignored. |
//| IMPORTANT: termination request flag is cleared when the model |
//| construction starts. Thus, any pre-construction |
//| termination requests will be silently ignored - only |
//| ones submitted AFTER construction has actually began |
//| will be handled. |
//| INPUT PARAMETERS: |
//| S - RBF model object |
//+------------------------------------------------------------------+
void CAlglib::RBFRequestTermination(CRBFModel &s)
{
CRBF::RBFRequestTermination(s);
}
//+------------------------------------------------------------------+
//| Cache-oblivous complex "copy-and-transpose" |
//| Input parameters: |
//| M - number of rows |
//| N - number of columns |
//| A - source matrix, MxN submatrix is copied and transposed|
//| IA - submatrix offset (row index) |
//| JA - submatrix offset (column index) |
//| A - destination matrix |
//| IB - submatrix offset (row index) |
//| JB - submatrix offset (column index) |
//+------------------------------------------------------------------+
void CAlglib::CMatrixTranspose(const int m,const int n,CMatrixComplex &a,
const int ia,const int ja,CMatrixComplex &b,
const int ib,const int jb)
{
CAblas::CMatrixTranspose(m,n,a,ia,ja,b,ib,jb);
}
//+------------------------------------------------------------------+
//| Cache-oblivous real "copy-and-transpose" |
//| Input parameters: |
//| M - number of rows |
//| N - number of columns |
//| A - source matrix, MxN submatrix is copied and transposed|
//| IA - submatrix offset (row index) |
//| JA - submatrix offset (column index) |
//| A - destination matrix |
//| IB - submatrix offset (row index) |
//| JB - submatrix offset (column index) |
//+------------------------------------------------------------------+
void CAlglib::RMatrixTranspose(const int m,const int n,CMatrixDouble &a,
const int ia,const int ja,CMatrixDouble &b,
const int ib,const int jb)
{
CAblas::RMatrixTranspose(m,n,a,ia,ja,b,ib,jb);
}
//+------------------------------------------------------------------+
//| This code enforces symmetricy of the matrix by copying Upper part|
//| to lower one (or vice versa). |
//| INPUT PARAMETERS: |
//| A - matrix |
//| N - number of rows/columns |
//| IsUpper - whether we want to copy upper triangle to lower |
//| one (True) or vice versa (False). |
//+------------------------------------------------------------------+
void CAlglib::RMatrixEnforceSymmetricity(CMatrixDouble &a,int n,bool IsUpper)
{
CAblas::RMatrixEnforceSymmetricity(a,n,IsUpper);
}
//+------------------------------------------------------------------+
//| Copy |
//| Input parameters: |
//| M - number of rows |
//| N - number of columns |
//| A - source matrix, MxN submatrix is copied and transposed|
//| IA - submatrix offset (row index) |
//| JA - submatrix offset (column index) |
//| B - destination matrix |
//| IB - submatrix offset (row index) |
//| JB - submatrix offset (column index) |
//+------------------------------------------------------------------+
void CAlglib::CMatrixCopy(const int m,const int n,CMatrixComplex &a,
const int ia,const int ja,CMatrixComplex &b,
const int ib,const int jb)
{
CAblas::CMatrixCopy(m,n,a,ia,ja,b,ib,jb);
}
//+------------------------------------------------------------------+
//| Copy |
//| Input parameters: |
//| N - subvector size |
//| A - source vector, N elements are copied |
//| IA - source offset (first element index) |
//| B - destination vector, must be large enough to store |
//| result |
//| IB - destination offset (first element index) |
//+------------------------------------------------------------------+
void CAlglib::RVectorCopy(int n,CRowDouble &a,int ia,CRowDouble &b,int ib)
{
CAblas::RVectorCopy(n,a,ia,b,ib);
}
//+------------------------------------------------------------------+
//| Copy |
//| Input parameters: |
//| M - number of rows |
//| N - number of columns |
//| A - source matrix, MxN submatrix is copied and transposed|
//| IA - submatrix offset (row index) |
//| JA - submatrix offset (column index) |
//| B - destination matrix |
//| IB - submatrix offset (row index) |
//| JB - submatrix offset (column index) |
//+------------------------------------------------------------------+
void CAlglib::RMatrixCopy(const int m,const int n,CMatrixDouble &a,
const int ia,const int ja,CMatrixDouble &b,
const int ib,const int jb)
{
CAblas::RMatrixCopy(m,n,a,ia,ja,b,ib,jb);
}
//+------------------------------------------------------------------+
//| Performs generalized copy: B := Beta*B + Alpha*A. |
//| If Beta=0, then previous contents of B is simply ignored. If |
//| Alpha=0, then A is ignored and not referenced. If both Alpha and |
//| Beta are zero, B is filled by zeros. |
//| Input parameters: |
//| M - number of rows |
//| N - number of columns |
//| Alpha - coefficient |
//| A - source matrix, MxN submatrix is copied and transposed |
//| IA - submatrix offset (row index) |
//| JA - submatrix offset (column index) |
//| Beta - coefficient |
//| B - destination matrix, must be large enough to store |
//| result |
//| IB - submatrix offset (row index) |
//| JB - submatrix offset (column index) |
//+------------------------------------------------------------------+
void CAlglib::RMatrixGenCopy(int m,int n,double alpha,
CMatrixDouble &a,int ia,int ja,
double beta,CMatrixDouble &b,
int ib,int jb)
{
CAblas::RMatrixGenCopy(m,n,alpha,a,ia,ja,beta,b,ib,jb);
}
//+------------------------------------------------------------------+
//| Rank-1 correction: A := A + alpha*u*v' |
//| NOTE: this function expects A to be large enough to store result.|
//| No automatic preallocation happens for smaller arrays. No |
//| integrity checks is performed for sizes of A, u, v. |
//| INPUT PARAMETERS: |
//| M - number of rows |
//| N - number of columns |
//| A - target matrix, MxN submatrix is updated |
//| IA - submatrix offset (row index) |
//| JA - submatrix offset (column index) |
//| Alpha - coefficient |
//| U - vector #1 |
//| IU - subvector offset |
//| V - vector #2 |
//| IV - subvector offset |
//+------------------------------------------------------------------+
void CAlglib::RMatrixGer(int m,int n,CMatrixDouble &a,int ia,
int ja,double alpha,CRowDouble &u,
int iu,CRowDouble &v,int iv)
{
CAblas::RMatrixGer(m,n,a,ia,ja,alpha,u,iu,v,iv);
}
//+------------------------------------------------------------------+
//| Rank-1 correction: A := A + u*v' |
//| INPUT PARAMETERS: |
//| M - number of rows |
//| N - number of columns |
//| A - target matrix, MxN submatrix is updated |
//| IA - submatrix offset (row index) |
//| JA - submatrix offset (column index) |
//| U - vector #1 |
//| IU - subvector offset |
//| V - vector #2 |
//| IV - subvector offset |
//+------------------------------------------------------------------+
void CAlglib::CMatrixRank1(const int m,const int n,CMatrixComplex &a,
const int ia,const int ja,complex &u[],
const int iu,complex &v[],const int iv)
{
CAblas::CMatrixRank1(m,n,a,ia,ja,u,iu,v,iv);
}
//+------------------------------------------------------------------+
//| Rank-1 correction: A := A + u*v' |
//| INPUT PARAMETERS: |
//| M - number of rows |
//| N - number of columns |
//| A - target matrix, MxN submatrix is updated |
//| IA - submatrix offset (row index) |
//| JA - submatrix offset (column index) |
//| U - vector #1 |
//| IU - subvector offset |
//| V - vector #2 |
//| IV - subvector offset |
//+------------------------------------------------------------------+
void CAlglib::RMatrixRank1(const int m,const int n,CMatrixDouble &a,
const int ia,const int ja,double &u[],
const int iu,double &v[],const int iv)
{
CAblas::RMatrixRank1(m,n,a,ia,ja,u,iu,v,iv);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::RMatrixGemVect(int m,int n,double alpha,
CMatrixDouble &a,int ia,int ja,int opa,
CRowDouble &x,int ix,double beta,
CRowDouble &y,int iy)
{
CAblas::RMatrixGemVect(m,n,alpha,a,ia,ja,opa,x,ix,beta,y,iy);
}
//+------------------------------------------------------------------+
//| Matrix-vector product: y := op(A)*x |
//| INPUT PARAMETERS: |
//| M - number of rows of op(A) |
//| M>=0 |
//| N - number of columns of op(A) |
//| N>=0 |
//| A - target matrix |
//| IA - submatrix offset (row index) |
//| JA - submatrix offset (column index) |
//| OpA - operation type: |
//| * OpA=0 => op(A) = A |
//| * OpA=1 => op(A) = A^T |
//| * OpA=2 => op(A) = A^H |
//| X - input vector |
//| IX - subvector offset |
//| IY - subvector offset |
//| OUTPUT PARAMETERS: |
//| Y - vector which stores result |
//| if M=0, then subroutine does nothing. |
//| if N=0, Y is filled by zeros. |
//+------------------------------------------------------------------+
void CAlglib::CMatrixMVect(const int m,const int n,CMatrixComplex &a,
const int ia,const int ja,const int opa,
complex &x[],const int ix,complex &y[],
const int iy)
{
CAblas::CMatrixMVect(m,n,a,ia,ja,opa,x,ix,y,iy);
}
//+------------------------------------------------------------------+
//| Matrix-vector product: y := op(A)*x |
//| INPUT PARAMETERS: |
//| M - number of rows of op(A) |
//| N - number of columns of op(A) |
//| A - target matrix |
//| IA - submatrix offset (row index) |
//| JA - submatrix offset (column index) |
//| OpA - operation type: |
//| * OpA=0 => op(A) = A |
//| * OpA=1 => op(A) = A^T |
//| X - input vector |
//| IX - subvector offset |
//| IY - subvector offset |
//| OUTPUT PARAMETERS: |
//| Y - vector which stores result |
//| if M=0, then subroutine does nothing. |
//| if N=0, Y is filled by zeros. |
//+------------------------------------------------------------------+
void CAlglib::RMatrixMVect(const int m,const int n,CMatrixDouble &a,
const int ia,const int ja,const int opa,
double &x[],const int ix,double &y[],
const int iy)
{
CAblas::RMatrixMVect(m,n,a,ia,ja,opa,x,ix,y,iy);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::RMatrixSymVect(int n,double alpha,CMatrixDouble &a,
int ia,int ja,bool IsUpper,CRowDouble &x,
int ix,double beta,CRowDouble &y,int iy)
{
CAblas::RMatrixSymVect(n,alpha,a,ia,ja,IsUpper,x,ix,beta,y,iy);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CAlglib::RMatrixSyvMVect(int n,CMatrixDouble &a,int ia,int ja,
bool IsUpper,CRowDouble &x,int ix,
CRowDouble &tmp)
{
return(CAblas::RMatrixSyvMVect(n,a,ia,ja,IsUpper,x,ix,tmp));
}
//+------------------------------------------------------------------+
//| This subroutine solves linear system op(A)*x=b where: |
//| * A is NxN upper/lower triangular/unitriangular matrix |
//| * X and B are Nx1 vectors |
//|*"op" may be identity transformation or transposition |
//| Solution replaces X. |
//| IMPORTANT: * no overflow/underflow/denegeracy tests is performed.|
//| * no integrity checks for operand sizes, out-of-bounds|
//| accesses and so on is performed |
//| INPUT PARAMETERS: |
//| N - matrix size, N>=0 |
//| A - matrix, actial matrix is stored in |
//| A[IA:IA+N-1,JA:JA+N-1] |
//| IA - submatrix offset |
//| JA - submatrix offset |
//| IsUpper - whether matrix is upper triangular |
//| IsUnit - whether matrix is unitriangular |
//| OpType - transformation type: |
//| * 0 - no transformation |
//| * 1 - transposition |
//| X - right part, actual vector is stored in X[IX:IX+N-1] |
//| IX - offset |
//| OUTPUT PARAMETERS: |
//| X - solution replaces elements X[IX:IX+N-1] |
//| (c) 2016 Reference BLAS level1 routine (LAPACK version 3.7.0) |
//| Reference BLAS is a software package provided by Univ. of |
//| Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver|
//| and NAG Ltd. |
//+------------------------------------------------------------------+
void CAlglib::RMatrixTrsVect(int n,CMatrixDouble &a,int ia,int ja,
bool IsUpper,bool IsUnit,int OpType,
CRowDouble &x,int ix)
{
CAblas::RMatrixTrsVect(n,a,ia,ja,IsUpper,IsUnit,OpType,x,ix);
}
//+------------------------------------------------------------------+
//| This subroutine calculates X*op(A^-1) where: |
//| * X is MxN general matrix |
//| * A is NxN upper/lower triangular/unitriangular matrix |
//|*"op" may be identity transformation, transposition, conjugate |
//| transposition |
//| Multiplication result replaces X. |
//| Cache-oblivious algorithm is used. |
//| INPUT PARAMETERS |
//| N - matrix size, N>=0 |
//| M - matrix size, N>=0 |
//| A - matrix, actial matrix is stored in |
//| A[I1:I1+N-1,J1:J1+N-1] |
//| I1 - submatrix offset |
//| J1 - submatrix offset |
//| IsUpper - whether matrix is upper triangular |
//| IsUnit - whether matrix is unitriangular |
//| OpType - transformation type: |
//| * 0 - no transformation |
//| * 1 - transposition |
//| * 2 - conjugate transposition |
//| C - matrix, actial matrix is stored in |
//| C[I2:I2+M-1,J2:J2+N-1] |
//| I2 - submatrix offset |
//| J2 - submatrix offset |
//+------------------------------------------------------------------+
void CAlglib::CMatrixRightTrsM(const int m,const int n,CMatrixComplex &a,
const int i1,const int j1,const bool IsUpper,
const bool IsUnit,const int OpType,
CMatrixComplex &x,const int i2,const int j2)
{
CAblas::CMatrixRightTrsM(m,n,a,i1,j1,IsUpper,IsUnit,OpType,x,i2,j2);
}
//+------------------------------------------------------------------+
//| This subroutine calculates op(A^-1)*X where: |
//| * X is MxN general matrix |
//| * A is MxM upper/lower triangular/unitriangular matrix |
//|*"op" may be identity transformation, transposition, conjugate |
//| transposition |
//| Multiplication result replaces X. |
//| Cache-oblivious algorithm is used. |
//| INPUT PARAMETERS |
//| N - matrix size, N>=0 |
//| M - matrix size, N>=0 |
//| A - matrix, actial matrix is stored in |
//| A[I1:I1+M-1,J1:J1+M-1] |
//| I1 - submatrix offset |
//| J1 - submatrix offset |
//| IsUpper - whether matrix is upper triangular |
//| IsUnit - whether matrix is unitriangular |
//| OpType - transformation type: |
//| * 0 - no transformation |
//| * 1 - transposition |
//| * 2 - conjugate transposition |
//| C - matrix, actial matrix is stored in |
//| C[I2:I2+M-1,J2:J2+N-1] |
//| I2 - submatrix offset |
//| J2 - submatrix offset |
//+------------------------------------------------------------------+
void CAlglib::CMatrixLeftTrsM(const int m,const int n,CMatrixComplex &a,
const int i1,const int j1,const bool IsUpper,
const bool IsUnit,const int OpType,
CMatrixComplex &x,const int i2,const int j2)
{
CAblas::CMatrixLeftTrsM(m,n,a,i1,j1,IsUpper,IsUnit,OpType,x,i2,j2);
}
//+------------------------------------------------------------------+
//| Same as CMatrixRightTRSM, but for real matrices |
//| OpType may be only 0 or 1. |
//+------------------------------------------------------------------+
void CAlglib::RMatrixRightTrsM(const int m,const int n,CMatrixDouble &a,
const int i1,const int j1,const bool IsUpper,
const bool IsUnit,const int OpType,
CMatrixDouble &x,const int i2,const int j2)
{
CAblas::RMatrixRightTrsM(m,n,a,i1,j1,IsUpper,IsUnit,OpType,x,i2,j2);
}
//+------------------------------------------------------------------+
//| Same as CMatrixLeftTRSM, but for real matrices |
//| OpType may be only 0 or 1. |
//+------------------------------------------------------------------+
void CAlglib::RMatrixLeftTrsM(const int m,const int n,CMatrixDouble &a,
const int i1,const int j1,const bool IsUpper,
const bool IsUnit,const int OpType,
CMatrixDouble &x,const int i2,const int j2)
{
CAblas::RMatrixLeftTrsM(m,n,a,i1,j1,IsUpper,IsUnit,OpType,x,i2,j2);
}
//+------------------------------------------------------------------+
//| This subroutine calculates C=alpha*A*A^H+beta*C or |
//| C=alpha*A^H*A+beta*C where: |
//| * C is NxN Hermitian matrix given by its upper/lower triangle |
//| * A is NxK matrix when A*A^H is calculated, KxN matrix otherwise |
//| Additional info: |
//| * cache-oblivious algorithm is used. |
//| * multiplication result replaces C. If Beta=0, C elements are not|
//| used in calculations (not multiplied by zero - just not |
//| referenced) |
//| * if Alpha=0, A is not used (not multiplied by zero - just not |
//| referenced) |
//| * if both Beta and Alpha are zero, C is filled by zeros. |
//| INPUT PARAMETERS |
//| N - matrix size, N>=0 |
//| K - matrix size, K>=0 |
//| Alpha - coefficient |
//| A - matrix |
//| IA - submatrix offset |
//| JA - submatrix offset |
//| OpTypeA - multiplication type: |
//| * 0 - A*A^H is calculated |
//| * 2 - A^H*A is calculated |
//| Beta - coefficient |
//| C - matrix |
//| IC - submatrix offset |
//| JC - submatrix offset |
//| IsUpper - whether C is upper triangular or lower triangular|
//+------------------------------------------------------------------+
void CAlglib::CMatrixSyrk(const int n,const int k,const double alpha,
CMatrixComplex &a,const int ia,const int ja,
const int optypea,const double beta,CMatrixComplex &c,
const int ic,const int jc,const bool IsUpper)
{
CAblas::CMatrixSyrk(n,k,alpha,a,ia,ja,optypea,beta,c,ic,jc,IsUpper);
}
//+------------------------------------------------------------------+
//| Same as CMatrixSYRK, but for real matrices |
//| OpType may be only 0 or 1. |
//+------------------------------------------------------------------+
void CAlglib::RMatrixSyrk(const int n,const int k,const double alpha,
CMatrixDouble &a,const int ia,const int ja,
const int optypea,const double beta,
CMatrixDouble &c,const int ic,
const int jc,const bool IsUpper)
{
CAblas::RMatrixSyrk(n,k,alpha,a,ia,ja,optypea,beta,c,ic,jc,IsUpper);
}
//+------------------------------------------------------------------+
//| This subroutine calculates C = alpha*op1(A)*op2(B) +beta*C where:|
//| * C is MxN general matrix |
//| * op1(A) is MxK matrix |
//| * op2(B) is KxN matrix |
//|*"op" may be identity transformation, transposition, conjugate |
//| transposition |
//| Additional info: |
//| * cache-oblivious algorithm is used. |
//| * multiplication result replaces C. If Beta=0, C elements are not|
//| used in calculations (not multiplied by zero - just not |
//| referenced) |
//| * if Alpha=0, A is not used (not multiplied by zero - just not |
//| referenced) |
//| * if both Beta and Alpha are zero, C is filled by zeros. |
//| INPUT PARAMETERS |
//| N - matrix size, N>0 |
//| M - matrix size, N>0 |
//| K - matrix size, K>0 |
//| Alpha - coefficient |
//| A - matrix |
//| IA - submatrix offset |
//| JA - submatrix offset |
//| OpTypeA - transformation type: |
//| * 0 - no transformation |
//| * 1 - transposition |
//| * 2 - conjugate transposition |
//| B - matrix |
//| IB - submatrix offset |
//| JB - submatrix offset |
//| OpTypeB - transformation type: |
//| * 0 - no transformation |
//| * 1 - transposition |
//| * 2 - conjugate transposition |
//| Beta - coefficient |
//| C - matrix |
//| IC - submatrix offset |
//| JC - submatrix offset |
//+------------------------------------------------------------------+
void CAlglib::CMatrixGemm(const int m,const int n,const int k,
complex alpha,CMatrixComplex &a,
const int ia,const int ja,const int optypea,
CMatrixComplex &b,const int ib,const int jb,
const int optypeb,complex beta,CMatrixComplex &c,
const int ic,const int jc)
{
CAblas::CMatrixGemm(m,n,k,alpha,a,ia,ja,optypea,b,ib,jb,optypeb,beta,c,ic,jc);
}
//+------------------------------------------------------------------+
//| Same as CMatrixGEMM, but for real numbers. |
//| OpType may be only 0 or 1. |
//+------------------------------------------------------------------+
void CAlglib::RMatrixGemm(const int m,const int n,const int k,
const double alpha,CMatrixDouble &a,
const int ia,const int ja,const int optypea,
CMatrixDouble &b,const int ib,const int jb,
const int optypeb,const double beta,
CMatrixDouble &c,const int ic,const int jc)
{
CAblas::RMatrixGemm(m,n,k,alpha,a,ia,ja,optypea,b,ib,jb,optypeb,beta,c,ic,jc);
}
//+------------------------------------------------------------------+
//| QR decomposition of a rectangular matrix of size MxN |
//| Input parameters: |
//| A - matrix A whose indexes range within [0..M-1, 0..N-1].|
//| M - number of rows in matrix A. |
//| N - number of columns in matrix A. |
//| Output parameters: |
//| A - matrices Q and R in compact form (see below). |
//| Tau - array of scalar factors which are used to form |
//| matrix Q. Array whose index ranges within |
//| [0.. Min(M-1,N-1)]. |
//| Matrix A is represented as A = QR, where Q is an orthogonal |
//| matrix of size MxM, R - upper triangular (or upper trapezoid) |
//| matrix of size M x N. |
//| The elements of matrix R are located on and above the main |
//| diagonal of matrix A. The elements which are located in Tau |
//| array and below the main diagonal of matrix A are used to form |
//| matrix Q as follows: |
//| Matrix Q is represented as a product of elementary reflections |
//| Q = H(0)*H(2)*...*H(k-1), |
//| where k = min(m,n), and each H(i) is in the form |
//| H(i) = 1 - tau * v * (v^T) |
//| where tau is a scalar stored in Tau[I]; v - real vector, |
//| so that v(0:i-1) = 0, v(i) = 1, v(i+1:m-1) stored in |
//| A(i+1:m-1,i). |
//+------------------------------------------------------------------+
void CAlglib::RMatrixQR(CMatrixDouble &a,const int m,const int n,
double &tau[])
{
COrtFac::RMatrixQR(a,m,n,tau);
}
//+------------------------------------------------------------------+
//| LQ decomposition of a rectangular matrix of size MxN |
//| Input parameters: |
//| A - matrix A whose indexes range within [0..M-1, 0..N-1].|
//| M - number of rows in matrix A. |
//| N - number of columns in matrix A. |
//| Output parameters: |
//| A - matrices L and Q in compact form (see below) |
//| Tau - array of scalar factors which are used to form |
//| matrix Q. Array whose index ranges within |
//| [0..Min(M,N)-1]. |
//| Matrix A is represented as A = LQ, where Q is an orthogonal |
//| matrix of size MxM, L - lower triangular (or lower trapezoid) |
//| matrix of size M x N. |
//| The elements of matrix L are located on and below the main |
//| diagonal of matrix A. The elements which are located in Tau |
//| array and above the main diagonal of matrix A are used to form |
//| matrix Q as follows: |
//| Matrix Q is represented as a product of elementary reflections |
//| Q = H(k-1)*H(k-2)*...*H(1)*H(0), |
//| where k = min(m,n), and each H(i) is of the form |
//| H(i) = 1 - tau * v * (v^T) |
//| where tau is a scalar stored in Tau[I]; v - real vector, so that |
//| v(0:i-1)=0, v(i) = 1, v(i+1:n-1) stored in A(i,i+1:n-1). |
//+------------------------------------------------------------------+
void CAlglib::RMatrixLQ(CMatrixDouble &a,const int m,const int n,
double &tau[])
{
COrtFac::RMatrixLQ(a,m,n,tau);
}
//+------------------------------------------------------------------+
//| QR decomposition of a rectangular complex matrix of size MxN |
//| Input parameters: |
//| A - matrix A whose indexes range within [0..M-1, 0..N-1] |
//| M - number of rows in matrix A. |
//| N - number of columns in matrix A. |
//| Output parameters: |
//| A - matrices Q and R in compact form |
//| Tau - array of scalar factors which are used to form |
//| matrix Q. Array whose indexes range within |
//| [0.. Min(M,N)-1] |
//| Matrix A is represented as A = QR, where Q is an orthogonal |
//| matrix of size MxM, R - upper triangular (or upper trapezoid) |
//| matrix of size MxN. |
//| -- LAPACK routine (version 3.0) -- |
//| Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., |
//| Courant Institute, Argonne National Lab, and Rice University|
//| September 30, 1994 |
//+------------------------------------------------------------------+
void CAlglib::CMatrixQR(CMatrixComplex &a,const int m,const int n,
complex &tau[])
{
COrtFac::CMatrixQR(a,m,n,tau);
}
//+------------------------------------------------------------------+
//| LQ decomposition of a rectangular complex matrix of size MxN |
//| Input parameters: |
//| A - matrix A whose indexes range within [0..M-1, 0..N-1] |
//| M - number of rows in matrix A. |
//| N - number of columns in matrix A. |
//| Output parameters: |
//| A - matrices Q and L in compact form |
//| Tau - array of scalar factors which are used to form |
//| matrix Q. Array whose indexes range within |
//| [0.. Min(M,N)-1] |
//| Matrix A is represented as A = LQ, where Q is an orthogonal |
//| matrix of size MxM, L - lower triangular (or lower trapezoid) |
//| matrix of size MxN. |
//| -- LAPACK routine (version 3.0) -- |
//| Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., |
//| Courant Institute, Argonne National Lab, and Rice University|
//| September 30, 1994 |
//+------------------------------------------------------------------+
void CAlglib::CMatrixLQ(CMatrixComplex &a,const int m,const int n,
complex &tau[])
{
COrtFac::CMatrixLQ(a,m,n,tau);
}
//+------------------------------------------------------------------+
//| Partial unpacking of matrix Q from the QR decomposition of a |
//| matrix A |
//| Input parameters: |
//| A - matrices Q and R in compact form. |
//| Output of RMatrixQR subroutine. |
//| M - number of rows in given matrix A. M>=0. |
//| N - number of columns in given matrix A. N>=0. |
//| Tau - scalar factors which are used to form Q. |
//| Output of the RMatrixQR subroutine. |
//| QColumns - required number of columns of matrix Q. |
//| M>=QColumns>=0. |
//| Output parameters: |
//| Q - first QColumns columns of matrix Q. |
//| Array whose indexes range within |
//| [0..M-1, 0..QColumns-1]. |
//| If QColumns=0, the array remains unchanged. |
//+------------------------------------------------------------------+
void CAlglib::RMatrixQRUnpackQ(CMatrixDouble &a,const int m,const int n,
double &tau[],const int qcolumns,
CMatrixDouble &q)
{
COrtFac::RMatrixQRUnpackQ(a,m,n,tau,qcolumns,q);
}
//+------------------------------------------------------------------+
//| Unpacking of matrix R from the QR decomposition of a matrix A |
//| Input parameters: |
//| A - matrices Q and R in compact form. |
//| Output of RMatrixQR subroutine. |
//| M - number of rows in given matrix A. M>=0. |
//| N - number of columns in given matrix A. N>=0. |
//| Output parameters: |
//| R - matrix R, array[0..M-1, 0..N-1]. |
//+------------------------------------------------------------------+
void CAlglib::RMatrixQRUnpackR(CMatrixDouble &a,const int m,
const int n,CMatrixDouble &r)
{
COrtFac::RMatrixQRUnpackR(a,m,n,r);
}
//+------------------------------------------------------------------+
//| Partial unpacking of matrix Q from LQ decomposition of a complex |
//| matrix A. |
//| Input parameters: |
//| A - matrices Q and R in compact form. |
//| Output of CMatrixLQ subroutine. |
//| M - number of rows in matrix A. M>=0. |
//| N - number of columns in matrix A. N>=0. |
//| Tau - scalar factors which are used to form Q. |
//| Output of CMatrixLQ subroutine . |
//| QRows - required number of rows in matrix Q. |
//| N>=QColumns>=0. |
//| Output parameters: |
//| Q - first QRows rows of matrix Q. |
//| Array whose index ranges within [0..QRows-1, |
//| 0..N-1]. |
//| If QRows=0, array isn't changed. |
//+------------------------------------------------------------------+
void CAlglib::RMatrixLQUnpackQ(CMatrixDouble &a,const int m,const int n,
double &tau[],const int qrows,
CMatrixDouble &q)
{
COrtFac::RMatrixLQUnpackQ(a,m,n,tau,qrows,q);
}
//+------------------------------------------------------------------+
//| Unpacking of matrix L from the LQ decomposition of a matrix A |
//| Input parameters: |
//| A -matrices Q and L in compact form. |
//| Output of RMatrixLQ subroutine. |
//| M -number of rows in given matrix A. M>=0. |
//| N -number of columns in given matrix A. N>=0. |
//| Output parameters: |
//| L -matrix L, array[0..M-1, 0..N-1]. |
//+------------------------------------------------------------------+
void CAlglib::RMatrixLQUnpackL(CMatrixDouble &a,const int m,
const int n,CMatrixDouble &l)
{
COrtFac::RMatrixLQUnpackL(a,m,n,l);
}
//+------------------------------------------------------------------+
//| Partial unpacking of matrix Q from QR decomposition of a complex |
//| matrix A. |
//| Input parameters: |
//| A - matrices Q and R in compact form. |
//| Output of CMatrixQR subroutine . |
//| M - number of rows in matrix A. M>=0. |
//| N - number of columns in matrix A. N>=0. |
//| Tau - scalar factors which are used to form Q. |
//| Output of CMatrixQR subroutine . |
//| QColumns - required number of columns in matrix Q. |
//| M>=QColumns>=0. |
//| Output parameters: |
//| Q - first QColumns columns of matrix Q. |
//| Array whose index ranges within [0..M-1, |
//| 0..QColumns-1]. |
//| If QColumns=0, array isn't changed. |
//+------------------------------------------------------------------+
void CAlglib::CMatrixQRUnpackQ(CMatrixComplex &a,const int m,
const int n,complex &tau[],
const int qcolumns,CMatrixComplex &q)
{
COrtFac::CMatrixQRUnpackQ(a,m,n,tau,qcolumns,q);
}
//+------------------------------------------------------------------+
//| Unpacking of matrix R from the QR decomposition of a matrix A |
//| Input parameters: |
//| A - matrices Q and R in compact form. |
//| Output of CMatrixQR subroutine. |
//| M - number of rows in given matrix A. M>=0. |
//| N - number of columns in given matrix A. N>=0. |
//| Output parameters: |
//| R - matrix R, array[0..M-1, 0..N-1]. |
//+------------------------------------------------------------------+
void CAlglib::CMatrixQRUnpackR(CMatrixComplex &a,const int m,
const int n,CMatrixComplex &r)
{
COrtFac::CMatrixQRUnpackR(a,m,n,r);
}
//+------------------------------------------------------------------+
//| Partial unpacking of matrix Q from LQ decomposition of a complex |
//| matrix A. |
//| Input parameters: |
//| A - matrices Q and R in compact form. |
//| Output of CMatrixLQ subroutine. |
//| M - number of rows in matrix A. M>=0. |
//| N - number of columns in matrix A. N>=0. |
//| Tau - scalar factors which are used to form Q. |
//| Output of CMatrixLQ subroutine . |
//| QRows - required number of rows in matrix Q. |
//| N>=QColumns>=0. |
//| Output parameters: |
//| Q - first QRows rows of matrix Q. |
//| Array whose index ranges within [0..QRows-1, |
//| 0..N-1]. |
//| If QRows=0, array isn't changed. |
//+------------------------------------------------------------------+
void CAlglib::CMatrixLQUnpackQ(CMatrixComplex &a,const int m,
const int n,complex &tau[],
const int qrows,CMatrixComplex &q)
{
COrtFac::CMatrixLQUnpackQ(a,m,n,tau,qrows,q);
}
//+------------------------------------------------------------------+
//| Unpacking of matrix L from the LQ decomposition of a matrix A |
//| Input parameters: |
//| A - matrices Q and L in compact form. |
//| Output of CMatrixLQ subroutine. |
//| M - number of rows in given matrix A. M>=0. |
//| N - number of columns in given matrix A. N>=0. |
//| Output parameters: |
//| L - matrix L, array[0..M-1, 0..N-1]. |
//+------------------------------------------------------------------+
void CAlglib::CMatrixLQUnpackL(CMatrixComplex &a,const int m,
const int n,CMatrixComplex &l)
{
COrtFac::CMatrixLQUnpackL(a,m,n,l);
}
//+------------------------------------------------------------------+
//| Reduction of a rectangular matrix to bidiagonal form |
//| The algorithm reduces the rectangular matrix A to bidiagonal |
//| form by orthogonal transformations P and Q: A = Q*B*P. |
//| Input parameters: |
//| A - source matrix. array[0..M-1, 0..N-1] |
//| M - number of rows in matrix A. |
//| N - number of columns in matrix A. |
//| Output parameters: |
//| A - matrices Q, B, P in compact form (see below). |
//| TauQ - scalar factors which are used to form matrix Q. |
//| TauP - scalar factors which are used to form matrix P. |
//| The main diagonal and one of the secondary diagonals of matrix A |
//| are replaced with bidiagonal matrix B. Other elements contain |
//| elementary reflections which form MxM matrix Q and NxN matrix P, |
//| respectively. |
//| If M>=N, B is the upper bidiagonal MxN matrix and is stored in |
//| the corresponding elements of matrix A. Matrix Q is represented |
//| as a product of elementary reflections Q = H(0)*H(1)*...*H(n-1), |
//| where H(i) = 1-tau*v*v'. Here tau is a scalar which is stored in |
//| TauQ[i], and vector v has the following structure: v(0:i-1)=0, |
//| v(i)=1, v(i+1:m-1) is stored in elements A(i+1:m-1,i).Matrix P is|
//| as follows: P = G(0)*G(1)*...*G(n-2), where G(i) = 1 - tau*u*u'. |
//| Tau is stored in TauP[i], u(0:i)=0, u(i+1)=1, u(i+2:n-1) is |
//| stored in elements A(i,i+2:n-1). |
//| If M<N, B is the lower bidiagonal MxN matrix and is stored in the|
//| corresponding elements of matrix A. Q = H(0)*H(1)*...*H(m-2), |
//| where H(i) = 1 - tau*v*v', tau is stored in TauQ, v(0:i)=0, |
//| v(i+1)=1, v(i+2:m-1) is stored in elements A(i+2:m-1,i). |
//| P = G(0)*G(1)*...*G(m-1), G(i) = 1-tau*u*u', tau is stored in |
//| TauP, u(0:i-1)=0, u(i)=1, u(i+1:n-1) is stored in A(i,i+1:n-1). |
//| EXAMPLE: |
//| m=6, n=5 (m > n): m=5, n=6 (m < n): |
//| ( d e u1 u1 u1 ) ( d u1 u1 u1 u1 u1 ) |
//| ( v1 d e u2 u2 ) ( e d u2 u2 u2 u2 ) |
//| ( v1 v2 d e u3 ) ( v1 e d u3 u3 u3 ) |
//| ( v1 v2 v3 d e ) ( v1 v2 e d u4 u4 ) |
//| ( v1 v2 v3 v4 d ) ( v1 v2 v3 e d u5 ) |
//| ( v1 v2 v3 v4 v5 ) |
//| Here vi and ui are vectors which form H(i) and G(i), and d and |
//| e - are the diagonal and off-diagonal elements of matrix B. |
//+------------------------------------------------------------------+
void CAlglib::RMatrixBD(CMatrixDouble &a,const int m,const int n,
double &tauq[],double &taup[])
{
COrtFac::RMatrixBD(a,m,n,tauq,taup);
}
//+------------------------------------------------------------------+
//| Unpacking matrix Q which reduces a matrix to bidiagonal form. |
//| Input parameters: |
//| QP - matrices Q and P in compact form. |
//| Output of ToBidiagonal subroutine. |
//| M - number of rows in matrix A. |
//| N - number of columns in matrix A. |
//| TAUQ - scalar factors which are used to form Q. |
//| Output of ToBidiagonal subroutine. |
//| QColumns - required number of columns in matrix Q. |
//| M>=QColumns>=0. |
//| Output parameters: |
//| Q - first QColumns columns of matrix Q. |
//| Array[0..M-1, 0..QColumns-1] |
//| If QColumns=0, the array is not modified. |
//+------------------------------------------------------------------+
void CAlglib::RMatrixBDUnpackQ(CMatrixDouble &qp,const int m,
const int n,double &tauq[],
const int qcolumns,CMatrixDouble &q)
{
COrtFac::RMatrixBDUnpackQ(qp,m,n,tauq,qcolumns,q);
}
//+------------------------------------------------------------------+
//| Multiplication by matrix Q which reduces matrix A to bidiagonal |
//| form. |
//| The algorithm allows pre- or post-multiply by Q or Q'. |
//| Input parameters: |
//| QP - matrices Q and P in compact form. |
//| Output of ToBidiagonal subroutine. |
//| M - number of rows in matrix A. |
//| N - number of columns in matrix A. |
//| TAUQ - scalar factors which are used to form Q. |
//| Output of ToBidiagonal subroutine. |
//| Z - multiplied matrix. |
//| array[0..ZRows-1,0..ZColumns-1] |
//| ZRows - number of rows in matrix Z. If FromTheRight= |
//| =False, ZRows=M, otherwise ZRows can be |
//| arbitrary. |
//| ZColumns - number of columns in matrix Z. If |
//| FromTheRight=True, ZColumns=M, otherwise |
//| ZColumns can be arbitrary. |
//| FromTheRight - pre- or post-multiply. |
//| DoTranspose - multiply by Q or Q'. |
//| Output parameters: |
//| Z - product of Z and Q. |
//| Array[0..ZRows-1,0..ZColumns-1] |
//| If ZRows=0 or ZColumns=0, the array is not |
//| modified. |
//+------------------------------------------------------------------+
void CAlglib::RMatrixBDMultiplyByQ(CMatrixDouble &qp,const int m,
const int n,double &tauq[],
CMatrixDouble &z,const int zrows,
const int zcolumns,const bool fromtheright,
const bool dotranspose)
{
COrtFac::RMatrixBDMultiplyByQ(qp,m,n,tauq,z,zrows,zcolumns,fromtheright,dotranspose);
}
//+------------------------------------------------------------------+
//| Unpacking matrix P which reduces matrix A to bidiagonal form. |
//| The subroutine returns transposed matrix P. |
//| Input parameters: |
//| QP - matrices Q and P in compact form. |
//| Output of ToBidiagonal subroutine. |
//| M - number of rows in matrix A. |
//| N - number of columns in matrix A. |
//| TAUP - scalar factors which are used to form P. |
//| Output of ToBidiagonal subroutine. |
//| PTRows - required number of rows of matrix P^T. |
//| N >= PTRows >= 0. |
//| Output parameters: |
//| PT - first PTRows columns of matrix P^T |
//| Array[0..PTRows-1, 0..N-1] |
//| If PTRows=0, the array is not modified. |
//+------------------------------------------------------------------+
void CAlglib::RMatrixBDUnpackPT(CMatrixDouble &qp,const int m,
const int n,double &taup[],
const int ptrows,CMatrixDouble &pt)
{
COrtFac::RMatrixBDUnpackPT(qp,m,n,taup,ptrows,pt);
}
//+------------------------------------------------------------------+
//| Multiplication by matrix P which reduces matrix A to bidiagonal |
//| form. |
//| The algorithm allows pre- or post-multiply by P or P'. |
//| Input parameters: |
//| QP - matrices Q and P in compact form. |
//| Output of RMatrixBD subroutine. |
//| M - number of rows in matrix A. |
//| N - number of columns in matrix A. |
//| TAUP - scalar factors which are used to form P. |
//| Output of RMatrixBD subroutine. |
//| Z - multiplied matrix. |
//| Array whose indexes range within |
//| [0..ZRows-1,0..ZColumns-1]. |
//| ZRows - number of rows in matrix Z. If |
//| FromTheRight=False, ZRows=N, otherwise ZRows |
//| can be arbitrary. |
//| ZColumns - number of columns in matrix Z. If |
//| FromTheRight=True, ZColumns=N, otherwise |
//| ZColumns can be arbitrary. |
//| FromTheRight - pre- or post-multiply. |
//| DoTranspose - multiply by P or P'. |
//| Output parameters: |
//| Z - product of Z and P. |
//| Array whose indexes range within |
//| [0..ZRows-1,0..ZColumns-1]. If ZRows=0 or |
//| ZColumns=0, the array is not modified. |
//+------------------------------------------------------------------+
void CAlglib::RMatrixBDMultiplyByP(CMatrixDouble &qp,const int m,
const int n,double &taup[],
CMatrixDouble &z,const int zrows,
const int zcolumns,const bool fromtheright,
const bool dotranspose)
{
COrtFac::RMatrixBDMultiplyByP(qp,m,n,taup,z,zrows,zcolumns,fromtheright,dotranspose);
}
//+------------------------------------------------------------------+
//| Unpacking of the main and secondary diagonals of bidiagonal |
//| decomposition of matrix A. |
//| Input parameters: |
//| B - output of RMatrixBD subroutine. |
//| M - number of rows in matrix B. |
//| N - number of columns in matrix B. |
//| Output parameters: |
//| IsUpper - True, if the matrix is upper bidiagonal. |
//| otherwise IsUpper is False. |
//| D - the main diagonal. |
//| Array whose index ranges within [0..Min(M,N)-1]. |
//| E - the secondary diagonal (upper or lower, depending|
//| on the value of IsUpper). |
//| Array index ranges within [0..Min(M, N)-1], the |
//| last element is not used. |
//+------------------------------------------------------------------+
void CAlglib::RMatrixBDUnpackDiagonals(CMatrixDouble &b,const int m,
const int n,bool &IsUpper,
double &d[],double &e[])
{
//--- initialization
IsUpper=false;
//--- function call
COrtFac::RMatrixBDUnpackDiagonals(b,m,n,IsUpper,d,e);
}
//+------------------------------------------------------------------+
//| Reduction of a square matrix to upper Hessenberg form: |
//| Q'*A*Q = H, where Q is an orthogonal matrix, H - Hessenberg |
//| matrix. |
//| Input parameters: |
//| A - matrix A with elements [0..N-1, 0..N-1] |
//| N - size of matrix A. |
//| Output parameters: |
//| A - matrices Q and P in compact form (see below). |
//| Tau - array of scalar factors which are used to form |
//| matrix Q. |
//| Array whose index ranges within [0..N-2] |
//| Matrix H is located on the main diagonal, on the lower secondary |
//| diagonal and above the main diagonal of matrix A. The elements |
//| which are used to form matrix Q are situated in array Tau and |
//| below the lower secondary diagonal of matrix A as follows: |
//| Matrix Q is represented as a product of elementary reflections |
//| Q = H(0)*H(2)*...*H(n-2), |
//| where each H(i) is given by |
//| H(i) = 1 - tau * v * (v^T) |
//| where tau is a scalar stored in Tau[I]; v - is a real vector, |
//| so that v(0:i) = 0, v(i+1) = 1, v(i+2:n-1) stored in |
//| A(i+2:n-1,i). |
//| -- LAPACK routine (version 3.0) -- |
//| Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., |
//| Courant Institute, Argonne National Lab, and Rice University|
//| October 31, 1992 |
//+------------------------------------------------------------------+
void CAlglib::RMatrixHessenberg(CMatrixDouble &a,const int n,
double &tau[])
{
COrtFac::RMatrixHessenberg(a,n,tau);
}
//+------------------------------------------------------------------+
//| Unpacking matrix Q which reduces matrix A to upper Hessenberg |
//| form |
//| Input parameters: |
//| A - output of RMatrixHessenberg subroutine. |
//| N - size of matrix A. |
//| Tau - scalar factors which are used to form Q. |
//| Output of RMatrixHessenberg subroutine. |
//| Output parameters: |
//| Q - matrix Q. |
//| Array whose indexes range within [0..N-1, 0..N-1]. |
//+------------------------------------------------------------------+
void CAlglib::RMatrixHessenbergUnpackQ(CMatrixDouble &a,const int n,
double &tau[],CMatrixDouble &q)
{
COrtFac::RMatrixHessenbergUnpackQ(a,n,tau,q);
}
//+------------------------------------------------------------------+
//| Unpacking matrix H (the result of matrix A reduction to upper |
//| Hessenberg form) |
//| Input parameters: |
//| A - output of RMatrixHessenberg subroutine. |
//| N - size of matrix A. |
//| Output parameters: |
//| H - matrix H. Array whose indexes range within |
//| [0..N-1, 0..N-1]. |
//+------------------------------------------------------------------+
void CAlglib::RMatrixHessenbergUnpackH(CMatrixDouble &a,const int n,
CMatrixDouble &h)
{
COrtFac::RMatrixHessenbergUnpackH(a,n,h);
}
//+------------------------------------------------------------------+
//| Reduction of a symmetric matrix which is given by its higher or |
//| lower triangular part to a tridiagonal matrix using orthogonal |
//| similarity transformation: Q'*A*Q=T. |
//| Input parameters: |
//| A - matrix to be transformed |
//| array with elements [0..N-1, 0..N-1]. |
//| N - size of matrix A. |
//| IsUpper - storage format. If IsUpper = True, then matrix A |
//| is given by its upper triangle, and the lower |
//| triangle is not used and not modified by the |
//| algorithm, and vice versa if IsUpper = False. |
//| Output parameters: |
//| A - matrices T and Q in compact form (see lower) |
//| Tau - array of factors which are forming matrices H(i) |
//| array with elements [0..N-2]. |
//| D - main diagonal of symmetric matrix T. |
//| array with elements [0..N-1]. |
//| E - secondary diagonal of symmetric matrix T. |
//| array with elements [0..N-2]. |
//| If IsUpper=True, the matrix Q is represented as a product of |
//| elementary reflectors |
//| Q = H(n-2) . . . H(2) H(0). |
//| Each H(i) has the form |
//| H(i) = I - tau * v * v' |
//| where tau is a real scalar, and v is a real vector with |
//| v(i+1:n-1) = 0, v(i) = 1, v(0:i-1) is stored on exit in |
//| A(0:i-1,i+1), and tau in TAU(i). |
//| If IsUpper=False, the matrix Q is represented as a product of |
//| elementary reflectors |
//| Q = H(0) H(2) . . . H(n-2). |
//| Each H(i) has the form |
//| H(i) = I - tau * v * v' |
//| where tau is a real scalar, and v is a real vector with |
//| v(0:i) = 0, v(i+1) = 1, v(i+2:n-1) is stored on exit in |
//| A(i+2:n-1,i), and tau in TAU(i). |
//| The contents of A on exit are illustrated by the following |
//| examples with n = 5: |
//| if UPLO = 'U': if UPLO = 'L': |
//| ( d e v1 v2 v3 ) ( d ) |
//| ( d e v2 v3 ) ( e d ) |
//| ( d e v3 ) ( v0 e d ) |
//| ( d e ) ( v0 v1 e d ) |
//| ( d ) ( v0 v1 v2 e d ) |
//| where d and e denote diagonal and off-diagonal elements of T, |
//| and vi denotes an element of the vector defining H(i). |
//| -- LAPACK routine (version 3.0) -- |
//| Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., |
//| Courant Institute, Argonne National Lab, and Rice University|
//| October 31, 1992 |
//+------------------------------------------------------------------+
void CAlglib::SMatrixTD(CMatrixDouble &a,const int n,const bool IsUpper,
double &tau[],double &d[],double &e[])
{
COrtFac::SMatrixTD(a,n,IsUpper,tau,d,e);
}
//+------------------------------------------------------------------+
//| Unpacking matrix Q which reduces symmetric matrix to a |
//| tridiagonal form. |
//| Input parameters: |
//| A - the result of a SMatrixTD subroutine |
//| N - size of matrix A. |
//| IsUpper - storage format (a parameter of SMatrixTD |
//| subroutine) |
//| Tau - the result of a SMatrixTD subroutine |
//| Output parameters: |
//| Q - transformation matrix. |
//| array with elements [0..N-1, 0..N-1]. |
//+------------------------------------------------------------------+
void CAlglib::SMatrixTDUnpackQ(CMatrixDouble &a,const int n,
const bool IsUpper,double &tau[],
CMatrixDouble &q)
{
COrtFac::SMatrixTDUnpackQ(a,n,IsUpper,tau,q);
}
//+------------------------------------------------------------------+
//| Reduction of a Hermitian matrix which is given by its higher or |
//| lower triangular part to a real tridiagonal matrix using unitary |
//| similarity transformation: Q'*A*Q = T. |
//| Input parameters: |
//| A - matrix to be transformed |
//| array with elements [0..N-1, 0..N-1]. |
//| N - size of matrix A. |
//| IsUpper - storage format. If IsUpper = True, then matrix A |
//| is given by its upper triangle, and the lower |
//| triangle is not used and not modified by the |
//| algorithm, and vice versa if IsUpper = False. |
//| Output parameters: |
//| A - matrices T and Q in compact form (see lower) |
//| Tau - array of factors which are forming matrices H(i) |
//| array with elements [0..N-2]. |
//| D - main diagonal of real symmetric matrix T. |
//| array with elements [0..N-1]. |
//| E - secondary diagonal of real symmetric matrix T. |
//| array with elements [0..N-2]. |
//| If IsUpper=True, the matrix Q is represented as a product of |
//| elementary reflectors |
//| Q = H(n-2) . . . H(2) H(0). |
//| Each H(i) has the form |
//| H(i) = I - tau * v * v' |
//| where tau is a complex scalar, and v is a complex vector with |
//| v(i+1:n-1) = 0, v(i) = 1, v(0:i-1) is stored on exit in |
//| A(0:i-1,i+1), and tau in TAU(i). |
//| If IsUpper=False, the matrix Q is represented as a product of |
//| elementary reflectors |
//| Q = H(0) H(2) . . . H(n-2). |
//| Each H(i) has the form |
//| H(i) = I - tau * v * v' |
//| where tau is a complex scalar, and v is a complex vector with |
//| v(0:i) = 0, v(i+1) = 1, v(i+2:n-1) is stored on exit in |
//| A(i+2:n-1,i), and tau in TAU(i). |
//| The contents of A on exit are illustrated by the following |
//| examples with n = 5: |
//| if UPLO = 'U': if UPLO = 'L': |
//| ( d e v1 v2 v3 ) ( d ) |
//| ( d e v2 v3 ) ( e d ) |
//| ( d e v3 ) ( v0 e d ) |
//| ( d e ) ( v0 v1 e d ) |
//| ( d ) ( v0 v1 v2 e d ) |
//| where d and e denote diagonal and off-diagonal elements of T, and|
//| vi denotes an element of the vector defining H(i). |
//| -- LAPACK routine (version 3.0) -- |
//| Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., |
//| Courant Institute, Argonne National Lab, and Rice University|
//| October 31, 1992 |
//+------------------------------------------------------------------+
void CAlglib::HMatrixTD(CMatrixComplex &a,const int n,const bool IsUpper,
complex &tau[],double &d[],double &e[])
{
COrtFac::HMatrixTD(a,n,IsUpper,tau,d,e);
}
//+------------------------------------------------------------------+
//| Unpacking matrix Q which reduces a Hermitian matrix to a real |
//| tridiagonal form. |
//| Input parameters: |
//| A - the result of a HMatrixTD subroutine |
//| N - size of matrix A. |
//| IsUpper - storage format (a parameter of HMatrixTD |
//| subroutine) |
//| Tau - the result of a HMatrixTD subroutine |
//| Output parameters: |
//| Q - transformation matrix. |
//| array with elements [0..N-1, 0..N-1]. |
//+------------------------------------------------------------------+
void CAlglib::HMatrixTDUnpackQ(CMatrixComplex &a,const int n,
const bool IsUpper,complex &tau[],
CMatrixComplex &q)
{
COrtFac::HMatrixTDUnpackQ(a,n,IsUpper,tau,q);
}
//+------------------------------------------------------------------+
//| This function initializes subspace iteration solver. This solver |
//| is used to solve symmetric real eigenproblems where just a few |
//| (top K) eigenvalues and corresponding eigenvectors is required. |
//| This solver can be significantly faster than complete EVD |
//| decomposition in the following case: |
//| * when only just a small fraction of top eigenpairs of dense |
//| matrix is required. When K approaches N, this solver is |
//| slower than complete dense EVD |
//| * when problem matrix is sparse(and/or is not known explicitly,|
//| i.e. only matrix-matrix product can be performed) |
//| USAGE (explicit dense/sparse matrix): |
//| 1. User initializes algorithm state with EigSubSpaceCreate() |
//| call |
//| 2. [optional] User tunes solver parameters by calling |
//| eigsubspacesetcond() or other functions |
//| 3. User calls EigSubSpaceSolveDense() or |
//| EigSubSpaceSolveSparse() methods, which take algorithm state|
//| and 2D array or CSparseMatrix object. |
//| USAGE (out-of-core mode): |
//| 1. User initializes algorithm state with EigSubSpaceCreate() |
//| call |
//| 2. [optional] User tunes solver parameters by calling |
//| EigSubSpaceSetCond() or other functions |
//| 3. User activates out-of-core mode of the solver and repeatedly|
//| calls communication functions in a loop like below: |
//| > EigSubSpaceOOCStart(state) |
//| > while EigSubSpaceOOCContinue(state) do |
//| > EigSubSpaceOOCGetRequestInfo(state, RequestType, M) |
//| > EigSubSpaceOOCGetRequestData(state, X) |
//| > [calculate Y=A*X, with X=R^NxM] |
//| > EigSubSpaceOOCSendResult(state, Y) |
//| > EigSubSpaceOOCStop(state, W, Z, Report) |
//| INPUT PARAMETERS: |
//| N - problem dimensionality, N>0 |
//| K - number of top eigenvector to calculate, 0<K<=N. |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| NOTE: if you solve many similar EVD problems you may find it |
//| useful to reuse previous subspace as warm-start point for |
//| new EVD problem. It can be done with |
//| EigSubSpaceSetWarmStart() function. |
//+------------------------------------------------------------------+
void CAlglib::EigSubSpaceCreate(int n,int k,CEigSubSpaceState &state)
{
CEigenVDetect::EigSubSpaceCreate(n,k,state);
}
//+------------------------------------------------------------------+
//| Buffered version of constructor which aims to reuse previously |
//| allocated memory as much as possible. |
//+------------------------------------------------------------------+
void CAlglib::EigSubSpaceCreateBuf(int n,int k,CEigSubSpaceState &state)
{
CEigenVDetect::EigSubSpaceCreateBuf(n,k,state);
}
//+------------------------------------------------------------------+
//| This function sets stopping critera for the solver: |
//| * error in eigenvector/value allowed by solver |
//| * maximum number of iterations to perform |
//| INPUT PARAMETERS: |
//| State - solver structure |
//| Eps - eps>=0, with non-zero value used to tell solver |
//| that it can stop after all eigenvalues converged |
//| with error roughly proportional to |
//| eps*MAX(LAMBDA_MAX), where LAMBDA_MAX is a maximum |
//| eigenvalue. Zero value means that no check for |
//| precision is performed. |
//| MaxIts - maxits>=0, with non-zero value used to tell solver |
//| that it can stop after maxits steps (no matter how |
//| precise current estimate is) |
//| NOTE: passing eps=0 and maxits=0 results in automatic selection |
//| of moderate eps as stopping criteria (1.0E-6 in current |
//| implementation, but it may change without notice). |
//| NOTE: very small values of eps are possible (say, 1.0E-12), |
//| although the larger problem you solve (N and/or K), the |
//| harder it is to find precise eigenvectors because rounding |
//| errors tend to accumulate. |
//| NOTE: passing non-zero eps results in some performance penalty, |
//| roughly equal to 2N*(2K)^2 FLOPs per iteration. These |
//| additional computations are required in order to estimate |
//| current error in eigenvalues via Rayleigh-Ritz process. |
//| Most of this additional time is spent in construction of |
//| ~2Kx2K symmetric subproblem whose eigenvalues are checked |
//| with exact eigensolver. |
//| This additional time is negligible if you search for |
//| eigenvalues of the large dense matrix, but may become |
//| noticeable on highly sparse EVD problems, where cost of |
//| matrix-matrix product is low. |
//| If you set eps to exactly zero, Rayleigh-Ritz phase is |
//| completely turned off. |
//+------------------------------------------------------------------+
void CAlglib::EigSubSpaceSetCond(CEigSubSpaceState &state,double eps,int maxits)
{
CEigenVDetect::EigSubSpaceSetCond(state,eps,maxits);
}
//+------------------------------------------------------------------+
//| This function sets warm-start mode of the solver: next call to |
//| the solver will reuse previous subspace as warm-start point. It |
//| can significantly speed-up convergence when you solve many |
//| similar eigenproblems. |
//| INPUT PARAMETERS: |
//| State - solver structure |
//| UseWarmStart - either True or False |
//+------------------------------------------------------------------+
void CAlglib::EigSubSpaceSetWarmStart(CEigSubSpaceState &state,bool usewarmstart)
{
CEigenVDetect::EigSubSpaceSetWarmStart(state,usewarmstart);
}
//+------------------------------------------------------------------+
//| This function initiates out-of-core mode of subspace eigensolver.|
//| It should be used in conjunction with other out-of-core-related |
//| functions of this subspackage in a loop like below: |
//| > EigSubSpaceOOCStart(state) |
//| > while EigSubSpaceOOCContinue(state) do |
//| > EigSubSpaceOOCGetRequestInfo(state, RequestType, M) |
//| > EigSubSpaceOOCGetRequestData(state, X) |
//| > [calculate Y=A*X, with X=R^NxM] |
//| > EigSubSpaceOOCSendResult(state, Y) |
//| > EigSubSpaceOOCStop(state, W, Z, Report) |
//| INPUT PARAMETERS: |
//| State - solver object |
//| MType - matrix type: |
//| * 0 for real symmetric matrix (solver assumes that matrix|
//| being processed is symmetric; symmetric direct |
//| eigensolver is used for smaller subproblems arising |
//| during solution of larger "full" task) |
//| Future versions of ALGLIB may introduce support for other|
//| matrix types; for now, only symmetric eigenproblems are |
//| supported. |
//+------------------------------------------------------------------+
void CAlglib::EigSubSpaceOOCStart(CEigSubSpaceState &state,int mtype)
{
CEigenVDetect::EigSubSpaceOOCStart(state,mtype);
}
//+------------------------------------------------------------------+
//| This function performs subspace iteration in the out-of-core mode|
//| It should be used in conjunction with other out-of-core-related |
//| functions of this subspackage in a loop like below: |
//| > EigSubSpaceOOCStart(state) |
//| > while EigSubSpaceOOCContinue(state) do |
//| > EigSubSpaceOOCGetRequestInfo(state, RequestType, M) |
//| > EigSubSpaceOOCGetRequestdData(state, X) |
//| > [calculate Y=A*X, with X=R^NxM] |
//| > EigSubSpaceOOCSendResult(state, Y) |
//| > EigSubSpaceOOCStop(state, W, Z, Report) |
//+------------------------------------------------------------------+
bool CAlglib::EigSubSpaceOOCContinue(CEigSubSpaceState &state)
{
return(CEigenVDetect::EigSubSpaceOOCContinue(state));
}
//+------------------------------------------------------------------+
//| This function is used to retrieve information about out-of-core |
//| request sent by solver to user code: request type (current |
//| version of the solver sends only requests for matrix-matrix |
//| products) and request size (size of the matrices being |
//| multiplied). |
//| This function returns just request metrics; in order to get |
//| contents of the matrices being multiplied, use |
//| EigSubSpaceOOCGetRequestData(). |
//| It should be used in conjunction with other out-of-core-related |
//| functions of this subspackage in a loop like below: |
//| > EigSubSpaceOOCStart(state) |
//| > while EigSubSpaceOOCContinue(state) do |
//| > EigSubSpaceOOCGetRequestInfo(state, RequestType, M) |
//| > EigSubSpaceOOCGetRequestData(state, X) |
//| > [calculate Y=A*X, with X=R^NxM] |
//| > EigSubSpaceOOCSendResult(state, Y) |
//| > EigSubSpaceOOCStop(state, W, Z, Report) |
//| INPUT PARAMETERS: |
//| State - solver running in out-of-core mode |
//| OUTPUT PARAMETERS: |
//| RequestType - type of the request to process: |
//| * 0 - for matrix-matrix product A*X, with A being |
//| NxN matrix whose eigenvalues/vectors are needed,|
//| and X being NxREQUESTSIZE one which is returned|
//| by the eigsubspaceoocgetrequestdata(). |
//| RequestSize - size of the X matrix (number of columns), |
//| usually it is several times larger than number |
//| of vectors K requested by user. |
//+------------------------------------------------------------------+
void CAlglib::EigSubSpaceOOCGetRequestInfo(CEigSubSpaceState &state,
int &requesttype,
int &requestsize)
{
CEigenVDetect::EigSubSpaceOOCGetRequestInfo(state,requesttype,requestsize);
}
//+------------------------------------------------------------------+
//| This function is used to retrieve information about out-of-core |
//| request sent by solver to user code: |
//| matrix X(array[N,RequestSize]) |
//| which have to be multiplied by out-of-core matrix A in a product |
//| A*X. |
//| This function returns just request data; in order to get size of |
//| the data prior to processing requestm, use |
//| EigSubSpaceOOCGetRequestInfo(). |
//| It should be used in conjunction with other out-of-core-related |
//| functions of this subspackage in a loop like below: |
//| > EigSubSpaceOOCStart(state) |
//| > while EigSubSpaceOOCContinue(state) do |
//| > EigSubSpaceOOCGetRequestInfo(state, RequestType, M) |
//| > EigSubSpaceOOCGetRequestData(state, X) |
//| > [calculate Y=A*X, with X=R^NxM] |
//| > EigSubSpaceOOCSendResult(state, Y) |
//| > EigSubSpaceOOCStop(state, W, Z, Report) |
//| INPUT PARAMETERS: |
//| State - solver running in out-of-core mode |
//| X - possibly preallocated storage; reallocated if |
//| needed, left unchanged, if large enough to store|
//| request data. |
//| OUTPUT PARAMETERS: |
//| X - array[N,RequestSize] or larger, leading |
//| rectangle is filled with dense matrix X. |
//+------------------------------------------------------------------+
void CAlglib::EigSubSpaceOOCGetRequestData(CEigSubSpaceState &state,
CMatrixDouble &x)
{
CEigenVDetect::EigSubSpaceOOCGetRequestData(state,x);
}
//+------------------------------------------------------------------+
//| This function is used to send user reply to out-of-core request |
//| sent by solver. Usually it is product A*X for returned by solver |
//| matrix X. |
//| It should be used in conjunction with other out-of-core-related |
//| functions of this subspackage in a loop like below: |
//| > EigSubSpaceOOCStart(state) |
//| > while EigSubSpaceOOCContinue(state) do |
//| > EigSubSpaceOOCGetRequestInfo(state, RequestType, M) |
//| > EigSubSpaceOOCGetRequestData(state, X) |
//| > [calculate Y=A*X, with X=R^NxM] |
//| > EigSubSpaceOOCSendResult(state, Y) |
//| > EigSubSpaceOOCStop(state, W, Z, Report) |
//| INPUT PARAMETERS: |
//| State - solver running in out-of-core mode |
//| AX - array[N,RequestSize] or larger, leading rectangle |
//| is filled with product A*X. |
//+------------------------------------------------------------------+
void CAlglib::EigSubSpaceOOCSendResult(CEigSubSpaceState &state,
CMatrixDouble &ax)
{
CEigenVDetect::EigSubSpaceOOCSendResult(state,ax);
}
//+------------------------------------------------------------------+
//| This function finalizes out-of-core mode of subspace eigensolver.|
//| It should be used in conjunction with other out-of-core-related |
//| functions of this subspackage in a loop like below: |
//| > EigSubSpaceOOCStart(state) |
//| > while EigSubSpaceOOCContinue(state) do |
//| > EigSubSpaceOOCGetRequestInfo(state, RequestType, M) |
//| > EigSubSpaceOOCGetRequestData(state, X) |
//| > [calculate Y=A*X, with X=R^NxM] |
//| > EigSubSpaceOOCSendResult(state, Y) |
//| > EigSubSpaceOOCStop(state, W, Z, Report) |
//| INPUT PARAMETERS: |
//| State - solver state |
//| OUTPUT PARAMETERS: |
//| W - array[K], depending on solver settings: |
//| * top K eigenvalues ordered by descending - if |
//| EigenVectors are returned in Z |
//| * zeros - if invariant subspace is returned in Z |
//| Z - array[N,K], depending on solver settings either: |
//| * matrix of eigenvectors found |
//| * orthogonal basis of K-dimensional invariant subspace|
//| Rep - report with additional parameters |
//+------------------------------------------------------------------+
void CAlglib::EigSubSpaceOOCStop(CEigSubSpaceState &state,CRowDouble &w,
CMatrixDouble &z,CEigSubSpaceReport &rep)
{
CEigenVDetect::EigSubSpaceOOCStop(state,w,z,rep);
}
//+------------------------------------------------------------------+
//| This function runs subspace eigensolver for dense NxN symmetric |
//| matrix A, given by its upper or lower triangle. |
//| This function can not process nonsymmetric matrices. |
//| INPUT PARAMETERS: |
//| State - solver state |
//| A - array[N,N], symmetric NxN matrix given by one of |
//| its triangles |
//| IsUpper - whether upper or lower triangle of A is given (the |
//| other one is not referenced at all). |
//| OUTPUT PARAMETERS: |
//| W - array[K], top K EigenValues ordered by descending |
//| of their absolute values |
//| Z - array[N,K], matrix of eigenvectors found |
//| Rep - report with additional parameters |
//| NOTE: internally this function allocates a copy of NxN dense A. |
//| You should take it into account when working with very large|
//| matrices occupying almost all RAM. |
//+------------------------------------------------------------------+
void CAlglib::EigSubSpaceSolveDenses(CEigSubSpaceState &state,
CMatrixDouble &a,bool IsUpper,
CRowDouble &w,CMatrixDouble &z,
CEigSubSpaceReport &rep)
{
CEigenVDetect::EigSubSpaceSolveDenses(state,a,IsUpper,w,z,rep);
}
//+------------------------------------------------------------------+
//| This function runs EigenSolver for dense NxN symmetric matrix A, |
//| given by upper or lower triangle. |
//| This function can not process nonsymmetric matrices. |
//| INPUT PARAMETERS: |
//| State - solver state |
//| A - NxN symmetric matrix given by one of its triangles |
//| IsUpper - whether upper or lower triangle of A is given (the |
//| other one is not referenced at all). |
//| OUTPUT PARAMETERS: |
//| W - array[K], top K eigenvalues ordered by descending |
//| of their absolute values |
//| Z - array[N,K], matrix of eigenvectors found |
//| Rep - report with additional parameters |
//+------------------------------------------------------------------+
void CAlglib::EigSubSpaceSolveSparses(CEigSubSpaceState &state,
CSparseMatrix &a,bool IsUpper,
CRowDouble &w,CMatrixDouble &z,
CEigSubSpaceReport &rep)
{
CEigenVDetect::EigSubSpaceSolveSparses(state,a,IsUpper,w,z,rep);
}
//+------------------------------------------------------------------+
//| Finding the eigenvalues and eigenvectors of a symmetric matrix |
//| The algorithm finds eigen pairs of a symmetric matrix by reducing|
//| it to tridiagonal form and using the QL/QR algorithm. |
//| Input parameters: |
//| A - symmetric matrix which is given by its upper or |
//| lower triangular part. |
//| Array whose indexes range within |
//| [0..N-1, 0..N-1]. |
//| N - size of matrix A. |
//| ZNeeded - flag controlling whether the eigenvectors are |
//| needed or not. |
//| If ZNeeded is equal to: |
//| * 0, the eigenvectors are not returned; |
//| * 1, the eigenvectors are returned. |
//| IsUpper - storage format. |
//| Output parameters: |
//| D - eigenvalues in ascending order. |
//| Array whose index ranges within [0..N-1]. |
//| Z - if ZNeeded is equal to: |
//| * 0, Z hasn?t changed; |
//| * 1, Z contains the eigenvectors. |
//| Array whose indexes range within |
//| [0..N-1, 0..N-1]. |
//| The eigenvectors are stored in the matrix |
//| columns. |
//| Result: |
//| True, if the algorithm has converged. |
//| False, if the algorithm hasn't converged (rare case). |
//+------------------------------------------------------------------+
bool CAlglib::SMatrixEVD(CMatrixDouble &a,const int n,int zneeded,
const bool IsUpper,double &d[],
CMatrixDouble &z)
{
return(CEigenVDetect::SMatrixEVD(a,n,zneeded,IsUpper,d,z));
}
//+------------------------------------------------------------------+
//| Subroutine for finding the eigenvalues (and eigenvectors) of a |
//| symmetric matrix in a given half open interval (A, B] by using a |
//| bisection and inverse iteration |
//| Input parameters: |
//| A - symmetric matrix which is given by its upper or |
//| lower triangular part. Array [0..N-1, 0..N-1]. |
//| N - size of matrix A. |
//| ZNeeded - flag controlling whether the eigenvectors are |
//| needed or not. |
//| If ZNeeded is equal to: |
//| * 0, the eigenvectors are not returned; |
//| * 1, the eigenvectors are returned. |
//| IsUpperA - storage format of matrix A. |
//| B1, B2 - half open interval (B1, B2] to search |
//| eigenvalues in. |
//| Output parameters: |
//| M - number of eigenvalues found in a given |
//| half-interval (M>=0). |
//| W - array of the eigenvalues found. |
//| Array whose index ranges within [0..M-1]. |
//| Z - if ZNeeded is equal to: |
//| * 0, Z hasn?t changed; |
//| * 1, Z contains eigenvectors. |
//| Array whose indexes range within |
//| [0..N-1, 0..M-1]. |
//| The eigenvectors are stored in the matrix |
//| columns. |
//| Result: |
//| True, if successful. M contains the number of eigenvalues in |
//| the given half-interval (could be equal to 0), W contains the|
//| eigenvalues, Z contains the eigenvectors (if needed). |
//| False, if the bisection method subroutine wasn't able to find|
//| the eigenvalues in the given interval or if the inverse |
//| iteration subroutine wasn't able to find all the |
//| corresponding eigenvectors. In that case, the eigenvalues |
//| and eigenvectors are not returned, M is equal to 0. |
//+------------------------------------------------------------------+
bool CAlglib::SMatrixEVDR(CMatrixDouble &a,const int n,int zneeded,
const bool IsUpper,double b1,double b2,
int &m,double &w[],CMatrixDouble &z)
{
//--- initialization
m=0;
//--- return result
return(CEigenVDetect::SMatrixEVDR(a,n,zneeded,IsUpper,b1,b2,m,w,z));
}
//+------------------------------------------------------------------+
//| Subroutine for finding the eigenvalues and eigenvectors of a |
//| symmetric matrix with given indexes by using bisection and |
//| inverse iteration methods. |
//| Input parameters: |
//| A - symmetric matrix which is given by its upper or |
//| lower triangular part. Array whose indexes range |
//| within [0..N-1, 0..N-1]. |
//| N - size of matrix A. |
//| ZNeeded - flag controlling whether the eigenvectors are |
//| needed or not. |
//| If ZNeeded is equal to: |
//| * 0, the eigenvectors are not returned; |
//| * 1, the eigenvectors are returned. |
//| IsUpperA - storage format of matrix A. |
//| I1, I2 - index interval for searching (from I1 to I2). |
//| 0 <= I1 <= I2 <= N-1. |
//| Output parameters: |
//| W - array of the eigenvalues found. |
//| Array whose index ranges within [0..I2-I1]. |
//| Z - if ZNeeded is equal to: |
//| * 0, Z hasn?t changed; |
//| * 1, Z contains eigenvectors. |
//| Array whose indexes range within |
//| [0..N-1, 0..I2-I1]. |
//| In that case, the eigenvectors are stored in the |
//| matrix columns. |
//| Result: |
//| True, if successful. W contains the eigenvalues, Z contains |
//| the eigenvectors (if needed). |
//| False, if the bisection method subroutine wasn't able to find|
//| the eigenvalues in the given interval or if the inverse |
//| iteration subroutine wasn't able to find all the |
//| corresponding eigenvectors. In that case, the eigenvalues |
//| and eigenvectors are not returned. |
//+------------------------------------------------------------------+
bool CAlglib::SMatrixEVDI(CMatrixDouble &a,const int n,int zneeded,
const bool IsUpper,const int i1,
const int i2,double &w[],CMatrixDouble &z)
{
return(CEigenVDetect::SMatrixEVDI(a,n,zneeded,IsUpper,i1,i2,w,z));
}
//+------------------------------------------------------------------+
//| Finding the eigenvalues and eigenvectors of a Hermitian matrix |
//| The algorithm finds eigen pairs of a Hermitian matrix by reducing|
//| it to real tridiagonal form and using the QL/QR algorithm. |
//| Input parameters: |
//| A - Hermitian matrix which is given by its upper or |
//| lower triangular part. |
//| Array whose indexes range within |
//| [0..N-1, 0..N-1]. |
//| N - size of matrix A. |
//| IsUpper - storage format. |
//| ZNeeded - flag controlling whether the eigenvectors are |
//| needed or not. If ZNeeded is equal to: |
//| * 0, the eigenvectors are not returned; |
//| * 1, the eigenvectors are returned. |
//| Output parameters: |
//| D - eigenvalues in ascending order. |
//| Array whose index ranges within [0..N-1]. |
//| Z - if ZNeeded is equal to: |
//| * 0, Z hasn?t changed; |
//| * 1, Z contains the eigenvectors. |
//| Array whose indexes range within |
//| [0..N-1, 0..N-1]. |
//| The eigenvectors are stored in the matrix |
//| columns. |
//| Result: |
//| True, if the algorithm has converged. |
//| False, if the algorithm hasn't converged (rare case). |
//| Note: |
//| eigenvectors of Hermitian matrix are defined up to |
//| multiplication by a complex number L, such that |L|=1. |
//+------------------------------------------------------------------+
bool CAlglib::HMatrixEVD(CMatrixComplex &a,const int n,const int zneeded,
const bool IsUpper,double &d[],CMatrixComplex &z)
{
return(CEigenVDetect::HMatrixEVD(a,n,zneeded,IsUpper,d,z));
}
//+------------------------------------------------------------------+
//| Subroutine for finding the eigenvalues (and eigenvectors) of a |
//| Hermitian matrix in a given half-interval (A, B] by using a |
//| bisection and inverse iteration |
//| Input parameters: |
//| A - Hermitian matrix which is given by its upper or |
//| lower triangular part. Array whose indexes range |
//| within [0..N-1, 0..N-1]. |
//| N - size of matrix A. |
//| ZNeeded - flag controlling whether the eigenvectors are |
//| needed or not. If ZNeeded is equal to: |
//| * 0, the eigenvectors are not returned; |
//| * 1, the eigenvectors are returned. |
//| IsUpperA - storage format of matrix A. |
//| B1, B2 - half-interval (B1, B2] to search eigenvalues in. |
//| Output parameters: |
//| M - number of eigenvalues found in a given |
//| half-interval, M>=0 |
//| W - array of the eigenvalues found. |
//| Array whose index ranges within [0..M-1]. |
//| Z - if ZNeeded is equal to: |
//| * 0, Z hasn?t changed; |
//| * 1, Z contains eigenvectors. |
//| Array whose indexes range within |
//| [0..N-1, 0..M-1]. |
//| The eigenvectors are stored in the matrix |
//| columns. |
//| Result: |
//| True, if successful. M contains the number of eigenvalues |
//| in the given half-interval (could be equal to 0), W contains |
//| the eigenvalues, Z contains the eigenvectors (if needed). |
//| False, if the bisection method subroutine wasn't able to find|
//| the eigenvalues in the given interval or if the inverse |
//| iteration subroutine wasn't able to find all the |
//| corresponding eigenvectors. In that case, the eigenvalues and|
//| eigenvectors are not returned, M is equal to 0. |
//| Note: |
//| eigen vectors of Hermitian matrix are defined up to |
//| multiplication by a complex number L, such as |L|=1. |
//+------------------------------------------------------------------+
bool CAlglib::HMatrixEVDR(CMatrixComplex &a,const int n,const int zneeded,
const bool IsUpper,double b1,double b2,
int &m,double &w[],CMatrixComplex &z)
{
//--- initialization
m=0;
//--- return result
return(CEigenVDetect::HMatrixEVDR(a,n,zneeded,IsUpper,b1,b2,m,w,z));
}
//+------------------------------------------------------------------+
//| Subroutine for finding the eigenvalues and eigenvectors of a |
//| Hermitian matrix with given indexes by using bisection and |
//| inverse iteration methods |
//| Input parameters: |
//| A - Hermitian matrix which is given by its upper or |
//| lower triangular part. |
//| Array whose indexes range within |
//| [0..N-1, 0..N-1]. |
//| N - size of matrix A. |
//| ZNeeded - flag controlling whether the eigenvectors are |
//| needed or not. If ZNeeded is equal to: |
//| * 0, the eigenvectors are not returned; |
//| * 1, the eigenvectors are returned. |
//| IsUpperA - storage format of matrix A. |
//| I1, I2 - index interval for searching (from I1 to I2). |
//| 0 <= I1 <= I2 <= N-1. |
//| Output parameters: |
//| W - array of the eigenvalues found. |
//| Array whose index ranges within [0..I2-I1]. |
//| Z - if ZNeeded is equal to: |
//| * 0, Z hasn?t changed; |
//| * 1, Z contains eigenvectors. |
//| Array whose indexes range within |
//| [0..N-1, 0..I2-I1]. |
//| In that case, the eigenvectors are stored in |
//| the matrix columns. |
//| Result: |
//| True, if successful. W contains the eigenvalues, Z contains |
//| the eigenvectors (if needed). |
//| False, if the bisection method subroutine wasn't able to find|
//| the eigenvalues in the given interval or if the inverse |
//| corresponding eigenvectors. iteration subroutine wasn't able |
//| to find all the corresponding eigenvectors. In that case, |
//| the eigenvalues and eigenvectors are not returned. |
//| Note: |
//| eigen vectors of Hermitian matrix are defined up to |
//| multiplication by a complex number L, such as |L|=1. |
//+------------------------------------------------------------------+
bool CAlglib::HMatrixEVDI(CMatrixComplex &a,const int n,const int zneeded,
const bool IsUpper,const int i1,const int i2,
double &w[],CMatrixComplex &z)
{
return(CEigenVDetect::HMatrixEVDI(a,n,zneeded,IsUpper,i1,i2,w,z));
}
//+------------------------------------------------------------------+
//| Finding the eigenvalues and eigenvectors of a tridiagonal |
//| symmetric matrix |
//| The algorithm finds the eigen pairs of a tridiagonal symmetric |
//| matrix by using an QL/QR algorithm with implicit shifts. |
//| Input parameters: |
//| D - the main diagonal of a tridiagonal matrix. |
//| Array whose index ranges within [0..N-1]. |
//| E - the secondary diagonal of a tridiagonal matrix. |
//| Array whose index ranges within [0..N-2]. |
//| N - size of matrix A. |
//| ZNeeded - flag controlling whether the eigenvectors are |
//| needed or not. |
//| If ZNeeded is equal to: |
//| * 0, the eigenvectors are not needed; |
//| * 1, the eigenvectors of a tridiagonal matrix |
//| are multiplied by the square matrix Z. It is |
//| used if the tridiagonal matrix is obtained by |
//| the similarity transformation of a symmetric |
//| matrix; |
//| * 2, the eigenvectors of a tridiagonal matrix |
//| replace the square matrix Z; |
//| * 3, matrix Z contains the first row of the |
//| eigenvectors matrix. |
//| Z - if ZNeeded=1, Z contains the square matrix by |
//| which the eigenvectors are multiplied. |
//| Array whose indexes range within |
//| [0..N-1, 0..N-1]. |
//| Output parameters: |
//| D - eigenvalues in ascending order. |
//| Array whose index ranges within [0..N-1]. |
//| Z - if ZNeeded is equal to: |
//| * 0, Z hasn?t changed; |
//| * 1, Z contains the product of a given matrix |
//| (from the left) and the eigenvectors matrix |
//| (from the right); |
//| * 2, Z contains the eigenvectors. |
//| * 3, Z contains the first row of the |
//| eigenvectors matrix. |
//| If ZNeeded<3, Z is the array whose indexes range |
//| within [0..N-1, 0..N-1]. |
//| In that case, the eigenvectors are stored in the |
//| matrix columns. |
//| If ZNeeded=3, Z is the array whose indexes range |
//| within [0..0, 0..N-1]. |
//| Result: |
//| True, if the algorithm has converged. |
//| False, if the algorithm hasn't converged. |
//| -- LAPACK routine (version 3.0) -- |
//| Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., |
//| Courant Institute, Argonne National Lab, and Rice University|
//| September 30, 1994 |
//+------------------------------------------------------------------+
bool CAlglib::SMatrixTdEVD(double &d[],double &e[],const int n,
const int zneeded,CMatrixDouble &z)
{
return(CEigenVDetect::SMatrixTdEVD(d,e,n,zneeded,z));
}
//+------------------------------------------------------------------+
//| Subroutine for finding the tridiagonal matrix eigenvalues/vectors|
//| in a given half-interval (A, B] by using bisection and inverse |
//| iteration. |
//| Input parameters: |
//| D - the main diagonal of a tridiagonal matrix. |
//| Array whose index ranges within [0..N-1]. |
//| E - the secondary diagonal of a tridiagonal matrix. |
//| Array whose index ranges within [0..N-2]. |
//| N - size of matrix, N>=0. |
//| ZNeeded - flag controlling whether the eigenvectors are |
//| needed or not. If ZNeeded is equal to: |
//| * 0, the eigenvectors are not needed; |
//| * 1, the eigenvectors of a tridiagonal matrix |
//| are multiplied by the square matrix Z. It is |
//| used if the tridiagonal matrix is obtained by |
//| the similarity transformation of a symmetric |
//| matrix. |
//| * 2, the eigenvectors of a tridiagonal matrix |
//| replace matrix Z. |
//| A, B - half-interval (A, B] to search eigenvalues in. |
//| Z - if ZNeeded is equal to: |
//| * 0, Z isn't used and remains unchanged; |
//| * 1, Z contains the square matrix (array whose |
//| indexes range within [0..N-1, 0..N-1]) which |
//| reduces the given symmetric matrix to |
//| tridiagonal form; |
//| * 2, Z isn't used (but changed on the exit). |
//| Output parameters: |
//| D - array of the eigenvalues found. |
//| Array whose index ranges within [0..M-1]. |
//| M - number of eigenvalues found in the given |
//| half-interval (M>=0). |
//| Z - if ZNeeded is equal to: |
//| * 0, doesn't contain any information; |
//| * 1, contains the product of a given NxN matrix |
//| Z (from the left) and NxM matrix of the |
//| eigenvectors found (from the right). Array |
//| whose indexes range within [0..N-1, 0..M-1]. |
//| * 2, contains the matrix of the eigenvectors |
//| found. Array whose indexes range within |
//| [0..N-1, 0..M-1]. |
//| Result: |
//| True, if successful. In that case, M contains the number of |
//| eigenvalues in the given half-interval (could be equal to 0),|
//| D contains the eigenvalues, Z contains the eigenvectors (if |
//| needed). It should be noted that the subroutine changes the |
//| size of arrays D and Z. |
//| False, if the bisection method subroutine wasn't able to find|
//| the eigenvalues in the given interval or if the inverse |
//| iteration subroutine wasn't able to find all the |
//| corresponding eigenvectors. In that case, the eigenvalues and|
//| eigenvectors are not returned, M is equal to 0. |
//+------------------------------------------------------------------+
bool CAlglib::SMatrixTdEVDR(double &d[],double &e[],const int n,
const int zneeded,const double a,
const double b,int &m,CMatrixDouble &z)
{
//--- initialization
m=0;
//--- return result
return(CEigenVDetect::SMatrixTdEVDR(d,e,n,zneeded,a,b,m,z));
}
//+------------------------------------------------------------------+
//| Subroutine for finding tridiagonal matrix eigenvalues/vectors |
//| with given indexes (in ascending order) by using the bisection |
//| and inverse iteraion. |
//| Input parameters: |
//| D - the main diagonal of a tridiagonal matrix. |
//| Array whose index ranges within [0..N-1]. |
//| E - the secondary diagonal of a tridiagonal matrix. |
//| Array whose index ranges within [0..N-2]. |
//| N - size of matrix. N>=0. |
//| ZNeeded - flag controlling whether the eigenvectors are |
//| needed or not. If ZNeeded is equal to: |
//| * 0, the eigenvectors are not needed; |
//| * 1, the eigenvectors of a tridiagonal matrix |
//| are multiplied by the square matrix Z. It is |
//| used if the tridiagonal matrix is obtained by |
//| the similarity transformation of a symmetric |
//| matrix. |
//| * 2, the eigenvectors of a tridiagonal matrix |
//| replace matrix Z. |
//| I1, I2 - index interval for searching (from I1 to I2). |
//| 0 <= I1 <= I2 <= N-1. |
//| Z - if ZNeeded is equal to: |
//| * 0, Z isn't used and remains unchanged; |
//| * 1, Z contains the square matrix (array whose |
//| indexes range within [0..N-1, 0..N-1]) which |
//| reduces the given symmetric matrix to |
//| tridiagonal form; |
//| * 2, Z isn't used (but changed on the exit). |
//| Output parameters: |
//| D - array of the eigenvalues found. |
//| Array whose index ranges within [0..I2-I1]. |
//| Z - if ZNeeded is equal to: |
//| * 0, doesn't contain any information; |
//| * 1, contains the product of a given NxN matrix |
//| Z (from the left) and Nx(I2-I1) matrix of the |
//| eigenvectors found (from the right). Array |
//| whose indexes range within [0..N-1, 0..I2-I1].|
//| * 2, contains the matrix of the eigenvalues |
//| found. Array whose indexes range within |
//| [0..N-1, 0..I2-I1]. |
//| Result: |
//| True, if successful. In that case, D contains the |
//| eigenvalues, Z contains the eigenvectors (if needed). |
//| It should be noted that the subroutine changes the size of |
//| arrays D and Z. |
//| False, if the bisection method subroutine wasn't able to find|
//| the eigenvalues in the given interval or if the inverse |
//| iteration subroutine wasn't able to find all the |
//| corresponding eigenvectors. In that case, the eigenvalues and|
//| eigenvectors are not returned. |
//+------------------------------------------------------------------+
bool CAlglib::SMatrixTdEVDI(double &d[],double &e[],const int n,
const int zneeded,const int i1,
const int i2,CMatrixDouble &z)
{
return(CEigenVDetect::SMatrixTdEVDI(d,e,n,zneeded,i1,i2,z));
}
//+------------------------------------------------------------------+
//| Finding eigenvalues and eigenvectors of a general matrix |
//| The algorithm finds eigenvalues and eigenvectors of a general |
//| matrix by using the QR algorithm with multiple shifts. The |
//| algorithm can find eigenvalues and both left and right |
//| eigenvectors. |
//| The right eigenvector is a vector x such that A*x = w*x, and the |
//| left eigenvector is a vector y such that y'*A = w*y' (here y' |
//| implies a complex conjugate transposition of vector y). |
//| Input parameters: |
//| A - matrix. Array whose indexes range within |
//| [0..N-1, 0..N-1]. |
//| N - size of matrix A. |
//| VNeeded - flag controlling whether eigenvectors are needed |
//| or not. If VNeeded is equal to: |
//| * 0, eigenvectors are not returned; |
//| * 1, right eigenvectors are returned; |
//| * 2, left eigenvectors are returned; |
//| * 3, both left and right eigenvectors are |
//| returned. |
//| Output parameters: |
//| WR - real parts of eigenvalues. |
//| Array whose index ranges within [0..N-1]. |
//| WR - imaginary parts of eigenvalues. |
//| Array whose index ranges within [0..N-1]. |
//| VL, VR - arrays of left and right eigenvectors (if they |
//| are needed). If WI[i]=0, the respective |
//| eigenvalue is a real number, and it corresponds |
//| to the column number I of matrices VL/VR. If |
//| WI[i]>0, we have a pair of complex conjugate |
//| numbers with positive and negative imaginary |
//| parts: the first eigenvalue WR[i] + |
//| + sqrt(-1)*WI[i]; the second eigenvalue |
//| WR[i+1] + sqrt(-1)*WI[i+1]; |
//| WI[i]>0 |
//| WI[i+1] = -WI[i] < 0 |
//| In that case, the eigenvector corresponding to |
//| the first eigenvalue is located in i and i+1 |
//| columns of matrices VL/VR (the column number i |
//| contains the real part, and the column number |
//| i+1 contains the imaginary part), and the vector |
//| corresponding to the second eigenvalue is a |
//| complex conjugate to the first vector. |
//| Arrays whose indexes range within |
//| [0..N-1, 0..N-1]. |
//| Result: |
//| True, if the algorithm has converged. |
//| False, if the algorithm has not converged. |
//| Note 1: |
//| Some users may ask the following question: what if WI[N-1]>0?|
//| WI[N] must contain an eigenvalue which is complex conjugate |
//| to the N-th eigenvalue, but the array has only size N? |
//| The answer is as follows: such a situation cannot occur |
//| because the algorithm finds a pairs of eigenvalues, |
//| therefore, if WI[i]>0, I is strictly less than N-1. |
//| Note 2: |
//| The algorithm performance depends on the value of the |
//| internal parameter NS of the InternalSchurDecomposition |
//| subroutine which defines the number of shifts in the QR |
//| algorithm (similarly to the block width in block-matrix |
//| algorithms of linear algebra). If you require maximum |
//| performance on your machine, it is recommended to adjust |
//| this parameter manually. |
//| See also the InternalTREVC subroutine. |
//| The algorithm is based on the LAPACK 3.0 library. |
//+------------------------------------------------------------------+
bool CAlglib::RMatrixEVD(CMatrixDouble &a,const int n,const int vneeded,
double &wr[],double &wi[],CMatrixDouble &vl,
CMatrixDouble &vr)
{
return(CEigenVDetect::RMatrixEVD(a,n,vneeded,wr,wi,vl,vr));
}
//+------------------------------------------------------------------+
//| Generation of a random uniformly distributed (Haar) orthogonal |
//| matrix |
//| INPUT PARAMETERS: |
//| N - matrix size, N>=1 |
//| OUTPUT PARAMETERS: |
//| A - orthogonal NxN matrix, array[0..N-1,0..N-1] |
//+------------------------------------------------------------------+
void CAlglib::RMatrixRndOrthogonal(const int n,CMatrixDouble &a)
{
CMatGen::RMatrixRndOrthogonal(n,a);
}
//+------------------------------------------------------------------+
//| Generation of random NxN matrix with given condition number and |
//| norm2(A)=1 |
//| INPUT PARAMETERS: |
//| N - matrix size |
//| C - condition number (in 2-norm) |
//| OUTPUT PARAMETERS: |
//| A - random matrix with norm2(A)=1 and cond(A)=C |
//+------------------------------------------------------------------+
void CAlglib::RMatrixRndCond(const int n,const double c,
CMatrixDouble &a)
{
CMatGen::RMatrixRndCond(n,c,a);
}
//+------------------------------------------------------------------+
//| Generation of a random Haar distributed orthogonal complex matrix|
//| INPUT PARAMETERS: |
//| N - matrix size, N>=1 |
//| OUTPUT PARAMETERS: |
//| A - orthogonal NxN matrix, array[0..N-1,0..N-1] |
//+------------------------------------------------------------------+
void CAlglib::CMatrixRndOrthogonal(const int n,CMatrixComplex &a)
{
CMatGen::CMatrixRndOrthogonal(n,a);
}
//+------------------------------------------------------------------+
//| Generation of random NxN complex matrix with given condition |
//| number C and norm2(A)=1 |
//| INPUT PARAMETERS: |
//| N - matrix size |
//| C - condition number (in 2-norm) |
//| OUTPUT PARAMETERS: |
//| A - random matrix with norm2(A)=1 and cond(A)=C |
//+------------------------------------------------------------------+
void CAlglib::CMatrixRndCond(const int n,const double c,
CMatrixComplex &a)
{
CMatGen::CMatrixRndCond(n,c,a);
}
//+------------------------------------------------------------------+
//| Generation of random NxN symmetric matrix with given condition |
//| number and norm2(A)=1 |
//| INPUT PARAMETERS: |
//| N - matrix size |
//| C - condition number (in 2-norm) |
//| OUTPUT PARAMETERS: |
//| A - random matrix with norm2(A)=1 and cond(A)=C |
//+------------------------------------------------------------------+
void CAlglib::SMatrixRndCond(const int n,const double c,
CMatrixDouble &a)
{
CMatGen::SMatrixRndCond(n,c,a);
}
//+------------------------------------------------------------------+
//| Generation of random NxN symmetric positive definite matrix with |
//| given condition number and norm2(A)=1 |
//| INPUT PARAMETERS: |
//| N - matrix size |
//| C - condition number (in 2-norm) |
//| OUTPUT PARAMETERS: |
//| A - random SPD matrix with norm2(A)=1 and cond(A)=C |
//+------------------------------------------------------------------+
void CAlglib::SPDMatrixRndCond(const int n,const double c,
CMatrixDouble &a)
{
CMatGen::SPDMatrixRndCond(n,c,a);
}
//+------------------------------------------------------------------+
//| Generation of random NxN Hermitian matrix with given condition |
//| number and norm2(A)=1 |
//| INPUT PARAMETERS: |
//| N - matrix size |
//| C - condition number (in 2-norm) |
//| OUTPUT PARAMETERS: |
//| A - random matrix with norm2(A)=1 and cond(A)=C |
//+------------------------------------------------------------------+
void CAlglib::HMatrixRndCond(const int n,const double c,
CMatrixComplex &a)
{
CMatGen::HMatrixRndCond(n,c,a);
}
//+------------------------------------------------------------------+
//| Generation of random NxN Hermitian positive definite matrix with |
//| given condition number and norm2(A)=1 |
//| INPUT PARAMETERS: |
//| N - matrix size |
//| C - condition number (in 2-norm) |
//| OUTPUT PARAMETERS: |
//| A - random HPD matrix with norm2(A)=1 and cond(A)=C |
//+------------------------------------------------------------------+
void CAlglib::HPDMatrixRndCond(const int n,const double c,
CMatrixComplex &a)
{
CMatGen::HPDMatrixRndCond(n,c,a);
}
//+------------------------------------------------------------------+
//| Multiplication of MxN matrix by NxN random Haar distributed |
//| orthogonal matrix |
//| INPUT PARAMETERS: |
//| A - matrix, array[0..M-1, 0..N-1] |
//| M, N- matrix size |
//| OUTPUT PARAMETERS: |
//| A - A*Q, where Q is random NxN orthogonal matrix |
//+------------------------------------------------------------------+
void CAlglib::RMatrixRndOrthogonalFromTheRight(CMatrixDouble &a,
const int m,const int n)
{
CMatGen::RMatrixRndOrthogonalFromTheRight(a,m,n);
}
//+------------------------------------------------------------------+
//| Multiplication of MxN matrix by MxM random Haar distributed |
//| orthogonal matrix |
//| INPUT PARAMETERS: |
//| A - matrix, array[0..M-1, 0..N-1] |
//| M, N- matrix size |
//| OUTPUT PARAMETERS: |
//| A - Q*A, where Q is random MxM orthogonal matrix |
//+------------------------------------------------------------------+
void CAlglib::RMatrixRndOrthogonalFromTheLeft(CMatrixDouble &a,
const int m,const int n)
{
CMatGen::RMatrixRndOrthogonalFromTheLeft(a,m,n);
}
//+------------------------------------------------------------------+
//| Multiplication of MxN complex matrix by NxN random Haar |
//| distributed complex orthogonal matrix |
//| INPUT PARAMETERS: |
//| A - matrix, array[0..M-1, 0..N-1] |
//| M, N- matrix size |
//| OUTPUT PARAMETERS: |
//| A - A*Q, where Q is random NxN orthogonal matrix |
//+------------------------------------------------------------------+
void CAlglib::CMatrixRndOrthogonalFromTheRight(CMatrixComplex &a,
const int m,const int n)
{
CMatGen::CMatrixRndOrthogonalFromTheRight(a,m,n);
}
//+------------------------------------------------------------------+
//| Multiplication of MxN complex matrix by MxM random Haar |
//| distributed complex orthogonal matrix |
//| INPUT PARAMETERS: |
//| A - matrix, array[0..M-1, 0..N-1] |
//| M, N- matrix size |
//| OUTPUT PARAMETERS: |
//| A - Q*A, where Q is random MxM orthogonal matrix |
//+------------------------------------------------------------------+
void CAlglib::CMatrixRndOrthogonalFromTheLeft(CMatrixComplex &a,
const int m,const int n)
{
CMatGen::CMatrixRndOrthogonalFromTheLeft(a,m,n);
}
//+------------------------------------------------------------------+
//| Symmetric multiplication of NxN matrix by random Haar |
//| distributed orthogonal matrix |
//| INPUT PARAMETERS: |
//| A - matrix, array[0..N-1, 0..N-1] |
//| N - matrix size |
//| OUTPUT PARAMETERS: |
//| A - Q'*A*Q, where Q is random NxN orthogonal matrix |
//+------------------------------------------------------------------+
void CAlglib::SMatrixRndMultiply(CMatrixDouble &a,const int n)
{
CMatGen::SMatrixRndMultiply(a,n);
}
//+------------------------------------------------------------------+
//| Hermitian multiplication of NxN matrix by random Haar distributed|
//| complex orthogonal matrix |
//| INPUT PARAMETERS: |
//| A - matrix, array[0..N-1, 0..N-1] |
//| N - matrix size |
//| OUTPUT PARAMETERS: |
//| A - Q^H*A*Q, where Q is random NxN orthogonal matrix |
//+------------------------------------------------------------------+
void CAlglib::HMatrixRndMultiply(CMatrixComplex &a,const int n)
{
CMatGen::HMatrixRndMultiply(a,n);
}
//+------------------------------------------------------------------+
//| This function serializes data structure to string. |
//| Important properties of s_out: |
//| * it contains alphanumeric characters, dots, underscores, minus |
//| signs |
//| * these symbols are grouped into words, which are separated by |
//| spaces and Windows-style (CR+LF) newlines |
//| * although serializer uses spaces and CR+LF as separators, you |
//| can replace any separator character by arbitrary combination |
//| of spaces, tabs, Windows or Unix newlines. It allows flexible |
//| reformatting of the string in case you want to include it into |
//| text or XML file. But you should not insert separators into the|
//| middle of the "words" nor you should change case of letters. |
//| * s_out can be freely moved between 32-bit and 64-bit systems, |
//| little and big endian machines, and so on. You can serialize |
//| structure on 32-bit machine and unserialize it on 64-bit one |
//| (or vice versa), or serialize it on SPARC and unserialize on |
//| x86. You can also serialize it in C# version of ALGLIB and |
//| unserialize in C++ one, and vice versa. |
//+------------------------------------------------------------------+
void CAlglib::SparseSerialize(CSparseMatrix &obj,string &s_out)
{
//--- create a variable
CSerializer s;
//--- serialization start
s.Alloc_Start();
//--- function call
CSparse::SparseAlloc(s,obj);
s.SStart_Str();
CSparse::SparseSerialize(s,obj);
s.Stop();
s_out=s.Get_String();
}
//+------------------------------------------------------------------+
//| This function unserializes data structure from string. |
//+------------------------------------------------------------------+
void CAlglib::SparseUunserialize(string s_in,CSparseMatrix &obj)
{
//--- create a variable
CSerializer s;
//--- unserialization start
s.UStart_Str(s_in);
//--- function call
CSparse::SparseUnserialize(s,obj);
s.Stop();
}
//+------------------------------------------------------------------+
//| This function creates sparse matrix in a Hash-Table format. |
//| This function creates Hast-Table matrix, which can be converted |
//| to CRS format after its initialization is over. Typical usage |
//| scenario for a sparse matrix is: |
//| 1. creation in a Hash-Table format |
//| 2. insertion of the matrix elements |
//| 3. conversion to the CRS representation |
//| 4. matrix is passed to some linear algebra algorithm |
//| Some information about different matrix formats can be found |
//| below, in the "NOTES" section. |
//| INPUT PARAMETERS: |
//| M - number of rows in a matrix, M>=1 |
//| N - number of columns in a matrix, N>=1 |
//| K - K>=0, expected number of non-zero elements in a |
//| matrix. K can be inexact approximation, can be less|
//| than actual number of elements (table will grow |
//| when needed) or even zero). |
//| It is important to understand that although hash-table may grow |
//| automatically, it's better to provide good estimate of data size.|
//| OUTPUT PARAMETERS: |
//| S - sparse M*N matrix in Hash-Table representation. All|
//| elements of the matrix are zero. |
//| NOTE 1 |
//| Hash-tables use memory inefficiently, and they have to keep some |
//| amount of the "spare memory" in order to have good performance. |
//| Hash table for matrix with K non-zero elements will need |
//| C*K*(8+2*sizeof(int)) bytes, where C is a small constant, about |
//| 1.5-2 in magnitude. |
//| CRS storage, from the other side, is more memory-efficient, and |
//| needs just K*(8+sizeof(int))+M*sizeof(int) bytes, where M is a |
//| number of rows in a matrix. |
//| When you convert from the Hash-Table to CRS representation, all |
//| unneeded memory will be freed. |
//| |
//| NOTE 2 |
//| Comments of SparseMatrix structure outline information about |
//| different sparse storage formats. We recommend you to read them |
//| before starting to use ALGLIB sparse matrices. |
//| |
//| NOTE 3 |
//| This function completely overwrites S with new sparse matrix. |
//| Previously allocated storage is NOT reused. If you want to reuse |
//| already allocated memory, call SparseCreateBuf function. |
//+------------------------------------------------------------------+
void CAlglib::SparseCreate(int m,int n,int k,CSparseMatrix &s)
{
CSparse::SparseCreate(m,n,k,s);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::SparseCreate(int m,int n,CSparseMatrix &s)
{
//--- initialization
int k=0;
//--- function call
CSparse::SparseCreate(m,n,k,s);
}
//+------------------------------------------------------------------+
//| This version of SparseCreate function creates sparse matrix in |
//| Hash-Table format, reusing previously allocated storage as much |
//| as possible.Read comments for SparseCreate() for more information|
//| INPUT PARAMETERS: |
//| M - number of rows in a matrix, M>=1 |
//| N - number of columns in a matrix, N>=1 |
//| K - K>=0, expected number of non-zero elements in a matrix|
//| K can be inexact approximation, can be less than |
//| actual number of elements (table will grow when needed|
//| or even zero). |
//| It is important to understand that although hash-table may grow |
//| automatically, it is better to provide good estimate of data size|
//| S - SparseMatrix structure which MAY contain some already |
//| allocated storage. |
//| OUTPUT PARAMETERS: |
//| S - sparse M*N matrix in Hash-Table representation. All |
//| elements of the matrix are zero. Previously allocated |
//| storage is reused, if its size is compatible with |
//| expected number of non-zeros K. |
//+------------------------------------------------------------------+
void CAlglib::SparseCreateBuf(int m,int n,int k,CSparseMatrix &s)
{
CSparse::SparseCreateBuf(m,n,k,s);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::SparseCreateBuf(int m,int n,CSparseMatrix &s)
{
//--- initialization
int k=0;
//--- function call
CSparse::SparseCreateBuf(m,n,k,s);
}
//+------------------------------------------------------------------+
//| This function creates sparse matrix in a CRS format (expert |
//| function for situations when you are running out of memory). |
//| This function creates CRS matrix. Typical usage scenario for a |
//| CRS matrix is: |
//| 1. creation (you have to tell number of non-zero elements at each|
//| row at this moment) |
//| 2. insertion of the matrix elements (row by row, from left to |
//| right) |
//| 3. matrix is passed to some linear algebra algorithm |
//| This function is a memory-efficient alternative to SparseCreate()|
//| but it is more complex because it requires you to know in advance|
//| how large your matrix is. Some information about different matrix|
//| formats can be found in comments on SparseMatrix structure. We |
//| recommend you to read them before starting to use ALGLIB sparse |
//| matrices.. |
//| INPUT PARAMETERS: |
//| M - number of rows in a matrix, M>=1 |
//| N - number of columns in a matrix, N>=1 |
//| NER - number of elements at each row, array[M], NER[I]>=0 |
//| OUTPUT PARAMETERS: |
//| S - sparse M*N matrix in CRS representation. |
//| You have to fill ALL non-zero elements by calling |
//| SparseSet() BEFORE you try to use this matrix. |
//| NOTE: this function completely overwrites S with new sparse |
//| matrix. Previously allocated storage is NOT reused. If you |
//| want to reuse already allocated memory, call |
//| SparseCreateCRSBuf function. |
//+------------------------------------------------------------------+
void CAlglib::SparseCreateCRS(int m,int n,CRowInt &ner,
CSparseMatrix &s)
{
CSparse::SparseCreateCRS(m,n,ner,s);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::SparseCreateCRS(int m,int n,int &ner[],
CSparseMatrix &s)
{
CSparse::SparseCreateCRS(m,n,ner,s);
}
//+------------------------------------------------------------------+
//| This function creates sparse matrix in a CRS format (expert |
//| function for situations when you are running out of memory). This|
//| version of CRS matrix creation function may reuse memory already |
//| allocated in S. |
//| This function creates CRS matrix. Typical usage scenario for a |
//| CRS matrix is: |
//| 1. creation (you have to tell number of non-zero elements at each|
//| row at this moment) |
//| 2. insertion of the matrix elements (row by row, from left to |
//| right) |
//| 3. matrix is passed to some linear algebra algorithm |
//| This function is a memory-efficient alternative to SparseCreate()|
//| but it is more complex because it requires you to know in advance|
//| how large your matrix is. Some information about different matrix|
//| formats can be found in comments on SparseMatrix structure. We |
//| recommend you to read them before starting to use ALGLIB sparse |
//| matrices.. |
//| INPUT PARAMETERS: |
//| M - number of rows in a matrix, M>=1 |
//| N - number of columns in a matrix, N>=1 |
//| NER - number of elements at each row, array[M], NER[I]>=0 |
//| S - sparse matrix structure with possibly preallocated |
//| memory. |
//| OUTPUT PARAMETERS: |
//| S - sparse M*N matrix in CRS representation. You have to |
//| fill ALL non-zero elements by calling SparseSet() |
//| BEFORE you try to use this matrix. |
//+------------------------------------------------------------------+
void CAlglib::SparseCreateCRSBuf(int m,int n,CRowInt &ner,
CSparseMatrix &s)
{
CSparse::SparseCreateCRSBuf(m,n,ner,s);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::SparseCreateCRSBuf(int m,int n,int &ner[],
CSparseMatrix &s)
{
CSparse::SparseCreateCRSBuf(m,n,ner,s);
}
//+------------------------------------------------------------------+
//| This function creates sparse matrix in a SKS format (skyline |
//| storage format). In most cases you do not need this function -CRS|
//| format better suits most use cases. |
//| INPUT PARAMETERS: |
//| M, N - number of rows(M) and columns(N) in a matrix: |
//| * M=N (as for now, ALGLIB supports only square SKS)|
//| * N>=1 |
//| * M>=1 |
//| D - "bottom" bandwidths, array[M],D[I]>=0. I-th element|
//| stores number of non-zeros at I-th row, below the |
//| diagonal (diagonal itself is not included) |
//| U - "top" bandwidths, array[N], U[I]>=0. I-th element |
//| stores number of non-zeros at I-th row, above the |
//| diagonal (diagonal itself is not included) |
//| OUTPUT PARAMETERS: |
//| S - sparse M*N matrix in SKS representation. All |
//| elements are filled by zeros. You may use |
//| SparseSet() to change their values. |
//| NOTE: this function completely overwrites S with new sparse |
//| matrix. Previously allocated storage is NOT reused. If you |
//| want to reuse already allocated memory, call |
//| SparseCreateSKSBuf function. |
//+------------------------------------------------------------------+
void CAlglib::SparseCreateSKS(int m,int n,CRowInt &d,CRowInt &u,
CSparseMatrix &s)
{
CSparse::SparseCreateSKS(m,n,d,u,s);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::SparseCreateSKS(int m,int n,int &d[],int &u[],
CSparseMatrix &s)
{
CSparse::SparseCreateSKS(m,n,d,u,s);
}
//+------------------------------------------------------------------+
//| This is "buffered" version of SparseCreateSKS() which reuses |
//| memory previously allocated in S (of course, memory is |
//| reallocated if needed). |
//| This function creates sparse matrix in a SKS format (skyline |
//| storage format). In most cases you do not need this function - |
//| CRS format better suits most use cases. |
//| INPUT PARAMETERS: |
//| M, N - number of rows(M) and columns (N) in a matrix: |
//| * M=N (as for now, ALGLIB supports only square SKS)|
//| * N>=1 |
//| * M>=1 |
//| D - "bottom" bandwidths, array[M], 0<=D[I]<=I. |
//| I-th element stores number of non-zeros at I-th row,|
//| below the diagonal (diagonal itself is not included)|
//| U - "top" bandwidths, array[N], 0<=U[I]<=I. I-th |
//| element stores number of non-zeros at I-th row,above|
//| the diagonal (diagonal itself is not included) |
//| OUTPUT PARAMETERS: |
//| S - sparse M*N matrix in SKS representation. All |
//| elements are filled by zeros. You may use |
//| SparseSet() to change their values. |
//+------------------------------------------------------------------+
void CAlglib::SparseCreateSKSBuf(int m,int n,CRowInt &d,CRowInt &u,CSparseMatrix &s)
{
CSparse::SparseCreateSKSBuf(m,n,d,u,s);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::SparseCreateSKSBuf(int m,int n,int &d[],int &u[],CSparseMatrix &s)
{
CSparse::SparseCreateSKSBuf(m,n,d,u,s);
}
//+------------------------------------------------------------------+
//| This function creates sparse matrix in a SKS format (skyline |
//| storage format). Unlike more general SparseCreateSKS(), this |
//| function creates sparse matrix with constant bandwidth. |
//| You may want to use this function instead of SparseCreateSKS() |
//| when your matrix has constant or nearly-constant bandwidth, and |
//| you want to simplify source code. |
//| INPUT PARAMETERS: |
//| M, N - number of rows(M) and columns (N) in a matrix: |
//| * M=N (as for now, ALGLIB supports only square SKS) |
//| * N>=1 |
//| * M>=1 |
//| BW - matrix bandwidth, BW>=0 |
//| OUTPUT PARAMETERS: |
//| S - sparse M*N matrix in SKS representation. All elements |
//| are filled by zeros. You may use SparseSet() to change|
//| their values. |
//| NOTE: this function completely overwrites S with new sparse |
//| matrix. Previously allocated storage is NOT reused. If you |
//| want to reuse already allocated memory, call |
//| SparseCreateSKSBandBuf function. |
//+------------------------------------------------------------------+
void CAlglib::SparseCreateSKSBand(int m,int n,int bw,
CSparseMatrix &s)
{
CSparse::SparseCreateSKSBand(m,n,bw,s);
}
//+------------------------------------------------------------------+
//| This is "buffered" version of SparseCreateSKSBand() which reuses |
//| memory previously allocated in S(of course, memory is reallocated|
//| if needed). |
//| You may want to use this function instead of SparseCreateSKSBuf()|
//| when your matrix has constant or nearly-constant bandwidth, and |
//| you want to simplify source code. |
//| INPUT PARAMETERS: |
//| M, N - number of rows(M) and columns (N) in a matrix: |
//| * M=N (as for now, ALGLIB supports only square SKS) |
//| * N>=1 |
//| * M>=1 |
//| BW - bandwidth, BW>=0 |
//| OUTPUT PARAMETERS: |
//| S - sparse M*N matrix in SKS representation. All elements |
//| are filled by zeros. You may use SparseSet() to change|
//| their values. |
//+------------------------------------------------------------------+
void CAlglib::SparseCreateSKSBandBuf(int m,int n,int bw,
CSparseMatrix &s)
{
CSparse::SparseCreateSKSBandBuf(m,n,bw,s);
}
//+------------------------------------------------------------------+
//| This function copies S0 to S1. |
//| This function completely deallocates memory owned by S1 before |
//| creating a copy of S0. If you want to reuse memory, use |
//| SparseCopyBuf. |
//| NOTE: this function does not verify its arguments, it just copies|
//| all fields of the structure. |
//+------------------------------------------------------------------+
void CAlglib::SparseCopy(CSparseMatrix &s0,CSparseMatrix &s1)
{
CSparse::SparseCopy(s0,s1);
}
//+------------------------------------------------------------------+
//| This function copies S0 to S1. |
//| Memory already allocated in S1 is reused as much as possible. |
//| NOTE: this function does not verify its arguments, it just copies|
//| all fields of the structure. |
//+------------------------------------------------------------------+
void CAlglib::SparseCopyBuf(CSparseMatrix &s0,CSparseMatrix &s1)
{
CSparse::SparseCopyBuf(s0,s1);
}
//+------------------------------------------------------------------+
//| This function efficiently swaps contents of S0 and S1. |
//+------------------------------------------------------------------+
void CAlglib::SparseSwap(CSparseMatrix &s0,CSparseMatrix &s1)
{
CSparse::SparseSwap(s0,s1);
}
//+------------------------------------------------------------------+
//| This function adds value to S[i,j] - element of the sparse matrix|
//| Matrix must be in a Hash-Table mode. |
//| In case S[i,j] already exists in the table, V i added to its |
//| value. In case S[i,j] is non-existent, it is inserted in the |
//| table. Table automatically grows when necessary. |
//| INPUT PARAMETERS: |
//| S - sparse M*N matrix in Hash-Table representation. |
//| Exception will be thrown for CRS matrix. |
//| I - row index of the element to modify, 0<=I<M |
//| J - column index of the element to modify, 0<=J<N |
//| V - value to add, must be finite number |
//| OUTPUT PARAMETERS: |
//| S - modified matrix |
//| NOTE 1: when S[i,j] is exactly zero after modification, it is |
//| deleted from the table. |
//+------------------------------------------------------------------+
void CAlglib::SparseAdd(CSparseMatrix &s,int i,int j,double v)
{
CSparse::SparseAdd(s,i,j,v);
}
//+------------------------------------------------------------------+
//| This function modifies S[i,j] - element of the sparse matrix. |
//| For Hash-based storage format: |
//| * this function can be called at any moment - during matrix |
//| initialization or later |
//| * new value can be zero or non-zero. In case new value of S[i,j] |
//| is zero, this element is deleted from the table. |
//| * this function has no effect when called with zero V for |
//| non-existent element. |
//| For CRS-bases storage format: |
//| * this function can be called ONLY DURING MATRIX INITIALIZATION |
//| * zero values are stored in the matrix similarly to non-zero ones|
//| * elements must be initialized in correct order - from top row |
//| to bottom, within row - from left to right. |
//| For SKS storage: |
//| * this function can be called at any moment - during matrix |
//| initialization or later |
//| * zero values are stored in the matrix similarly to non-zero ones|
//| * this function CAN NOT be called for non-existent (outside of |
//| the band specified during SKS matrix creation) elements. Say, |
//| if you created SKS matrix with bandwidth=2 and tried to call |
//| SparseSet(s,0,10,VAL), an exception will be generated. |
//| INPUT PARAMETERS: |
//| S - sparse M*N matrix in Hash-Table, SKS or CRS format. |
//| I - row index of the element to modify, 0<=I<M |
//| J - column index of the element to modify, 0<=J<N |
//| V - value to set, must be finite number, can be zero |
//| OUTPUT PARAMETERS: |
//| S - modified matrix |
//+------------------------------------------------------------------+
void CAlglib::SparseSet(CSparseMatrix &s,int i,int j,double v)
{
CSparse::SparseSet(s,i,j,v);
}
//+------------------------------------------------------------------+
//| This function returns S[i,j] - element of the sparse matrix. |
//| Matrix can be in any mode (Hash-Table, CRS, SKS), but this |
//| function is less efficient for CRS matrices. Hash-Table and SKS |
//| matrices can find element in O(1) time, while CRS matrices need |
//| O(log(RS)) time, where RS is an number of non-zero elements in a |
//| row. |
//| INPUT PARAMETERS: |
//| S - sparse M*N matrix |
//| I - row index of the element to modify, 0<=I<M |
//| J - column index of the element to modify, 0<=J<N |
//| RESULT |
//| value of S[I,J] or zero (in case no element with such index is |
//| found) |
//+------------------------------------------------------------------+
double CAlglib::SparseGet(CSparseMatrix &s,int i,int j)
{
return(CSparse::SparseGet(s,i,j));
}
//+------------------------------------------------------------------+
//| This function checks whether S[i,j] is present in the sparse |
//| matrix. It returns True even for elements that are numerically |
//| zero (but still have place allocated for them). |
//| The matrix can be in any mode (Hash-Table, CRS, SKS), but this |
//| function is less efficient for CRS matrices. Hash-Table and SKS |
//| matrices can find element in O(1) time, while CRS matrices need |
//| O(log(RS)) time, where RS is an number of non-zero elements in a |
//| row. |
//| INPUT PARAMETERS: |
//| S - sparse M*N matrix |
//| I - row index of the element to modify, 0<=I<M |
//| J - column index of the element to modify, 0<=J<N |
//| RESULT |
//| whether S[I,J] is present in the data structure or not |
//+------------------------------------------------------------------+
bool CAlglib::SparseExists(CSparseMatrix &s,int i,int j)
{
return(CSparse::SparseExists(s,i,j));
}
//+------------------------------------------------------------------+
//| This function returns I-th diagonal element of the sparse matrix.|
//| Matrix can be in any mode (Hash-Table or CRS storage), but this |
//| function is most efficient for CRS matrices - it requires less |
//| than 50 CPU cycles to extract diagonal element. For Hash-Table |
//| matrices we still have O(1) query time, but function is many |
//| times slower. |
//| INPUT PARAMETERS: |
//| S - sparse M*N matrix in Hash-Table representation. |
//| Exception will be thrown for CRS matrix. |
//| I - index of the element to modify, 0<=I<min(M,N) |
//| RESULT |
//| value of S[I,I] or zero (in case no element with such index is |
//| found) |
//+------------------------------------------------------------------+
double CAlglib::SparseGetDiagonal(CSparseMatrix &s,int i)
{
return(CSparse::SparseGetDiagonal(s,i));
}
//+------------------------------------------------------------------+
//| This function calculates matrix-vector product S*x. Matrix S must|
//| be stored in CRS or SKS format (exception will be thrown |
//| otherwise). |
//| INPUT PARAMETERS: |
//| S - sparse M*N matrix in CRS or SKS format. |
//| X - array[N], input vector. For performance reasons we |
//| make only quick checks - we check that array size is |
//| at least N, but we do not check for NAN's or INF's. |
//| Y - output buffer, possibly preallocated. In case buffer |
//| size is too small to store result, this buffer is |
//| automatically resized. |
//| OUTPUT PARAMETERS: |
//| Y - array[M], S*x |
//| NOTE: this function throws exception when called for |
//| non-CRS/SKS matrix. You must convert your matrix with |
//| SparseConvertToCRS/SKS() before using this function. |
//+------------------------------------------------------------------+
void CAlglib::SparseMV(CSparseMatrix &s,CRowDouble &x,CRowDouble &y)
{
CSparse::SparseMV(s,x,y);
}
//+------------------------------------------------------------------+
//| This function calculates matrix-vector product S^T*x. Matrix S |
//| must be stored in CRS or SKS format (exception will be thrown |
//| otherwise). |
//| INPUT PARAMETERS: |
//| S - sparse M*N matrix in CRS or SKS format. |
//| X - array[M], input vector. For performance reasons we |
//| make only quick checks - we check that array size is |
//| at least M, but we do not check for NAN's or INF's. |
//| Y - output buffer, possibly preallocated. In case buffer |
//| size is too small to store result, this buffer is |
//| automatically resized. |
//| OUTPUT PARAMETERS: |
//| Y - array[N], S^T*x |
//| NOTE: this function throws exception when called for non-CRS/SKS |
//| matrix. |
//| You must convert your matrix with SparseConvertToCRS/SKS() before|
//| using this function. |
//+------------------------------------------------------------------+
void CAlglib::SparseMTV(CSparseMatrix &s,CRowDouble &x,CRowDouble &y)
{
CSparse::SparseMTV(s,x,y);
}
//+------------------------------------------------------------------+
//| This function calculates generalized sparse matrix-vector product|
//| y := alpha*op(S)*x + beta*y |
//| Matrix S must be stored in CRS or SKS format (exception will be |
//| thrown otherwise). op(S) can be either S or S^T. |
//| NOTE: this function expects Y to be large enough to store result.|
//| No automatic preallocation happens for smaller arrays. |
//| INPUT PARAMETERS: |
//| S - sparse matrix in CRS or SKS format. |
//| Alpha - source coefficient |
//| OpS - operation type: |
//| * OpS=0 => op(S) = S |
//| * OpS=1 => op(S) = S^T |
//| X - input vector, must have at least Cols(op(S))+IX |
//| elements |
//| IX - subvector offset |
//| Beta - destination coefficient |
//| Y - preallocated output array, must have at least |
//| Rows(op(S))+IY elements |
//| IY - subvector offset |
//| OUTPUT PARAMETERS: |
//| Y - elements [IY...IY+Rows(op(S))-1] are replaced by |
//| result, other elements are not modified |
//| HANDLING OF SPECIAL CASES: |
//| * below M=Rows(op(S)) and N=Cols(op(S)). Although current |
//| ALGLIB version does not allow you to create zero-sized sparse|
//| matrices, internally ALGLIB can deal with such matrices. So, |
//| comments for M or N equal to zero are for internal use only. |
//| * if M=0, then subroutine does nothing. It does not even touch |
//| arrays. |
//| * if N=0 or Alpha=0.0, then: |
//| * if Beta=0, then Y is filled by zeros. S and X are not |
//| referenced at all. Initial values of Y are ignored (we do not|
//| multiply Y by zero, we just rewrite it by zeros) |
//| * if Beta<>0, then Y is replaced by Beta*Y |
//| * if M>0, N>0, Alpha<>0, but Beta=0, then Y is areplaced by |
//| alpha*op(S)*x initial state of Y is ignored (rewritten |
//| without initial multiplication by zeros). |
//| NOTE: this function throws exception when called for non-CRS/SKS |
//| matrix. |
//| You must convert your matrix with SparseConvertToCRS/SKS() before|
//| using this function. |
//+------------------------------------------------------------------+
void CAlglib::SparseGemV(CSparseMatrix &s,double alpha,int ops,
CRowDouble &x,int ix,double beta,
CRowDouble &y,int iy)
{
CSparse::SparseGemV(s,alpha,ops,x,ix,beta,y,iy);
}
//+------------------------------------------------------------------+
//| This function simultaneously calculates two matrix-vector |
//| products: |
//| S*x and S^T*x. |
//| S must be square (non-rectangular) matrix stored in CRS or SKS |
//| format (exception will be thrown otherwise). |
//| INPUT PARAMETERS: |
//| S - sparse N*N matrix in CRS or SKS format. |
//| X - array[N], input vector. For performance reasons we |
//| make only quick checks - we check that array size is |
//| at least N, but we do not check for NAN's or INF's. |
//| Y0 - output buffer, possibly preallocated. In case buffer |
//| size is too small to store result, this buffer is |
//| automatically resized. |
//| Y1 - output buffer, possibly preallocated. In case buffer |
//| size is too small to store result, this buffer is |
//| automatically resized. |
//| OUTPUT PARAMETERS: |
//| Y0 - array[N], S*x |
//| Y1 - array[N], S^T*x |
//| NOTE: this function throws exception when called for non-CRS/SKS |
//| matrix. |
//| You must convert your matrix with SparseConvertToCRS/SKS() before|
//| using this function. |
//+------------------------------------------------------------------+
void CAlglib::SparseMV2(CSparseMatrix &s,CRowDouble &x,
CRowDouble &y0,CRowDouble &y1)
{
CSparse::SparseMV2(s,x,y0,y1);
}
//+------------------------------------------------------------------+
//| This function calculates matrix-vector product S*x, when S is |
//| symmetric matrix. Matrix S must be stored in CRS or SKS format |
//| (exception will be thrown otherwise). |
//| INPUT PARAMETERS: |
//| S - sparse M*M matrix in CRS or SKS format. |
//| IsUpper - whether upper or lower triangle of S is given: |
//| * if upper triangle is given, only S[i,j] for j>=i |
//| are used, and lower triangle is ignored (it can be |
//| empty - these elements are not referenced at all). |
//| * if lower triangle is given, only S[i,j] for j<=i |
//| are used, and upper triangle is ignored. |
//| X - array[N], input vector. For performance reasons we |
//| make only quick checks - we check that array size is|
//| at least N, but we do not check for NAN's or INF's. |
//| Y - output buffer, possibly preallocated.In case buffer|
//| size is too small to store result, this buffer is|
//| automatically resized. |
//| OUTPUT PARAMETERS: |
//| Y - array[M], S*x |
//| NOTE: this function throws exception when called for non-CRS/SKS |
//| matrix. |
//| You must convert your matrix with SparseConvertToCRS/SKS() before|
//| using this function. |
//+------------------------------------------------------------------+
void CAlglib::SparseSMV(CSparseMatrix &s,bool IsUpper,
CRowDouble &x,CRowDouble &y)
{
CSparse::SparseSMV(s,IsUpper,x,y);
}
//+------------------------------------------------------------------+
//| This function calculates vector-matrix-vector product x'*S*x, |
//| where S is symmetric matrix. Matrix S must be stored in CRS or |
//| SKS format (exception will be thrown otherwise). |
//| INPUT PARAMETERS: |
//| S - sparse M*M matrix in CRS or SKS format. |
//| IsUpper - whether upper or lower triangle of S is given: |
//| * if upper triangle is given, only S[i,j] for j>=i |
//| are used, and lower triangle is ignored (it can be |
//| empty - these elements are not referenced at all). |
//| * if lower triangle is given, only S[i,j] for j<=i |
//| are used, and upper triangle is ignored. |
//| X - array[N], input vector. For performance reasons we |
//| make only quick checks - we check that array size is|
//| at least N, but we do not check for NAN's or INF's. |
//| RESULT |
//| x'*S*x |
//| NOTE: this function throws exception when called for non-CRS/SKS |
//| matrix. |
//| You must convert your matrix with SparseConvertToCRS/SKS() before|
//| using this function. |
//+------------------------------------------------------------------+
double CAlglib::SparseVSMV(CSparseMatrix &s,bool IsUpper,CRowDouble &x)
{
return(CSparse::SparseVSMV(s,IsUpper,x));
}
//+------------------------------------------------------------------+
//| This function calculates matrix-matrix product S*A. Matrix S must|
//| be stored in CRS or SKS format (exception will be thrown |
//| otherwise). |
//| INPUT PARAMETERS: |
//| S - sparse M*N matrix in CRS or SKS format. |
//| A - array[N,K], input dense matrix. For performance |
//| reasons we make only quick checks - we check that |
//| array size is at least N, but we do not check for |
//| NAN's or INF's. |
//| K - number of columns of matrix (A). |
//| B - output buffer, possibly preallocated. In case buffer |
//| size is too small to store result, this buffer is |
//| automatically resized. |
//| OUTPUT PARAMETERS: |
//| B - array[M,K], S*A |
//| NOTE: this function throws exception when called for non-CRS/SKS |
//| matrix. |
//| You must convert your matrix with SparseConvertToCRS/SKS() before|
//| using this function. |
//+------------------------------------------------------------------+
void CAlglib::SparseMM(CSparseMatrix &s,CMatrixDouble &a,int k,
CMatrixDouble &b)
{
CSparse::SparseMM(s,a,k,b);
}
//+------------------------------------------------------------------+
//| This function calculates matrix-matrix product S^T*A. Matrix S |
//| must be stored in CRS or SKS format (exception will be thrown |
//| otherwise). |
//| INPUT PARAMETERS: |
//| S - sparse M*N matrix in CRS or SKS format. |
//| A - array[M,K], input dense matrix. For performance |
//| reasons we make only quick checks - we check that |
//| array size is at least M, but we do not check for |
//| NAN's or INF's. |
//| K - number of columns of matrix (A). |
//| B - output buffer, possibly preallocated. In case buffer |
//| size is too small to store result, this buffer is |
//| automatically resized. |
//| OUTPUT PARAMETERS: |
//| B - array[N,K], S^T*A |
//| NOTE: this function throws exception when called for non-CRS/SKS |
//| matrix. |
//| You must convert your matrix with SparseConvertToCRS/SKS() before|
//| using this function. |
//+------------------------------------------------------------------+
void CAlglib::SparseMTM(CSparseMatrix &s,CMatrixDouble &a,int k,
CMatrixDouble &b)
{
CSparse::SparseMTM(s,a,k,b);
}
//+------------------------------------------------------------------+
//| This function simultaneously calculates two matrix-matrix |
//| products: |
//| S*A and S^T*A. |
//| S must be square (non-rectangular) matrix stored in CRS or SKS |
//| format (exception will be thrown otherwise). |
//| INPUT PARAMETERS: |
//| S - sparse N*N matrix in CRS or SKS format. |
//| A - array[N,K], input dense matrix. For performance |
//| reasons we make only quick checks - we check that |
//| array size is at least N, but we do not check for |
//| NAN's or INF's. |
//| K - number of columns of matrix (A). |
//| B0 - output buffer, possibly preallocated. In case buffer |
//| size is too small to store result, this buffer is |
//| automatically resized. |
//| B1 - output buffer, possibly preallocated. In case buffer |
//| size is too small to store result, this buffer is |
//| automatically resized. |
//| OUTPUT PARAMETERS: |
//| B0 - array[N,K], S*A |
//| B1 - array[N,K], S^T*A |
//| NOTE: this function throws exception when called for non-CRS/SKS |
//| matrix. |
//| You must convert your matrix with SparseConvertToCRS/SKS() before|
//| using this function. |
//+------------------------------------------------------------------+
void CAlglib::SparseMM2(CSparseMatrix &s,CMatrixDouble &a,int k,
CMatrixDouble &b0,CMatrixDouble &b1)
{
CSparse::SparseMM2(s,a,k,b0,b1);
}
//+------------------------------------------------------------------+
//| This function calculates matrix-matrix product S*A, when S is |
//| symmetric matrix. Matrix S must be stored in CRS or SKS format |
//| (exception will be thrown otherwise). |
//| INPUT PARAMETERS: |
//| S - sparse M*M matrix in CRS or SKS format. |
//| IsUpper - whether upper or lower triangle of S is given: |
//| * if upper triangle is given, only S[i,j] for j>=i |
//| are used, and lower triangle is ignored (it can be |
//| empty - these elements are not referenced at all). |
//| * if lower triangle is given, only S[i,j] for j<=i |
//| are used, and upper triangle is ignored. |
//| A - array[N,K], input dense matrix. For performance |
//| reasons we make only quick checks - we check that |
//| array size is at least N, but we do not check for |
//| NAN's or INF's. |
//| K - number of columns of matrix (A). |
//| B - output buffer, possibly preallocated. In case |
//| buffer size is too small to store result, this |
//| buffer is automatically resized. |
//| OUTPUT PARAMETERS: |
//| B - array[M,K], S*A |
//| NOTE: this function throws exception when called for non-CRS/SKS |
//| matrix. |
//| You must convert your matrix with SparseConvertToCRS/SKS() before|
//| using this function. |
//+------------------------------------------------------------------+
void CAlglib::SparseSMM(CSparseMatrix &s,bool IsUpper,
CMatrixDouble &a,int k,CMatrixDouble &b)
{
CSparse::SparseSMM(s,IsUpper,a,k,b);
}
//+------------------------------------------------------------------+
//| This function calculates matrix-vector product op(S)*x, when x is|
//| vector, S is symmetric triangular matrix, op(S) is transposition |
//| or no operation. |
//| Matrix S must be stored in CRS or SKS format (exception will be |
//| thrown otherwise). |
//| INPUT PARAMETERS: |
//| S - sparse square matrix in CRS or SKS format. |
//| IsUpper - whether upper or lower triangle of S is used: |
//| * if upper triangle is given, only S[i,j] for j>=i|
//| are used, and lower triangle is ignored (it can be|
//| empty - these elements are not referenced at all). |
//| * if lower triangle is given, only S[i,j] for j<=i|
//| are used, and upper triangle is ignored. |
//| IsUnit - unit or non-unit diagonal: |
//| * if True, diagonal elements of triangular matrix are |
//| considered equal to 1.0. Actual elements stored in|
//| S are not referenced at all. |
//| * if False, diagonal stored in S is used |
//| OpType - operation type: |
//| * if 0, S*x is calculated |
//| * if 1, (S^T)*x is calculated (transposition) |
//| X - array[N] which stores input vector. For performance|
//| reasons we make only quick checks - we check that |
//| array size is at least N, but we do not check for |
//| NAN's or INF's. |
//| Y - possibly preallocated input buffer. Automatically |
//| resized if its size is too small. |
//| OUTPUT PARAMETERS: |
//| Y - array[N], op(S)*x |
//| NOTE: this function throws exception when called for non-CRS/SKS |
//| matrix. |
//| You must convert your matrix with SparseConvertToCRS/SKS() before|
//| using this function. |
//+------------------------------------------------------------------+
void CAlglib::SparseTRMV(CSparseMatrix &s,bool IsUpper,bool IsUnit,
int OpType,CRowDouble &x,CRowDouble &y)
{
CSparse::SparseTRMV(s,IsUpper,IsUnit,OpType,x,y);
}
//+------------------------------------------------------------------+
//| This function solves linear system op(S)*y=x where x is vector, S|
//| is symmetric triangular matrix, op(S) is transposition or no |
//| operation. |
//| Matrix S must be stored in CRS or SKS format (exception will be |
//| thrown otherwise). |
//| INPUT PARAMETERS: |
//| S - sparse square matrix in CRS or SKS format. |
//| IsUpper - whether upper or lower triangle of S is used: |
//| * if upper triangle is given, only S[i,j] for j>=i are|
//| used, and lower triangle is ignored (it can be |
//| empty - these elements are not referenced at all). |
//| * if lower triangle is given, only S[i,j] for j<=i are|
//| used, and upper triangle is ignored. |
//| IsUnit - unit or non-unit diagonal: |
//| * if True, diagonal elements of triangular matrix are |
//| considered equal to 1.0. Actual elements stored in S|
//| are not referenced at all. |
//| * if False, diagonal stored in S is used. It is your |
//| responsibility to make sure that diagonal is non-zero|
//| OpType - operation type: |
//| * if 0, S*x is calculated |
//| * if 1, (S^T)*x is calculated (transposition) |
//| X - array[N] which stores input vector. For performance|
//| reasons we make only quick checks - we check that |
//| array size is at least N, but we do not check for |
//| NAN's or INF's. |
//| OUTPUT PARAMETERS: |
//| X - array[N], inv(op(S))*x |
//| NOTE: this function throws exception when called for non-CRS/SKS |
//| matrix. |
//| You must convert your matrix with SparseConvertToCRS/SKS() before|
//| using this function. |
//| NOTE: no assertion or tests are done during algorithm operation. |
//| It is your responsibility to provide invertible matrix to |
//| algorithm. |
//+------------------------------------------------------------------+
void CAlglib::SparseTRSV(CSparseMatrix &s,bool IsUpper,bool IsUnit,
int OpType,CRowDouble &x)
{
CSparse::SparseTRSV(s,IsUpper,IsUnit,OpType,x);
}
//+------------------------------------------------------------------+
//| This function applies permutation given by permutation table P |
//| (as opposed to product form of permutation) to sparse symmetric |
//| matrix A, given by either upper or lower triangle: B := P*A*P'|
//| This function allocates completely new instance of B. Use |
//| buffered version SparseSymmPermTblBuf() if you want to reuse |
//| already allocated structure. |
//| INPUT PARAMETERS: |
//| A - sparse square matrix in CRS format. |
//| IsUpper - whether upper or lower triangle of A is used: |
//| * if upper triangle is given, only A[i,j] for j>=i|
//| are used, and lower triangle is ignored (it can be|
//| empty - these elements are not referenced at all). |
//| * if lower triangle is given, only A[i,j] for j<=i|
//| are used, and upper triangle is ignored. |
//| P - array[N] which stores permutation table; P[I]=J |
//| means that I-th row/column of matrix A is moved to |
//| J-th position. For performance reasons we do NOT |
//| check that P[] is a correct permutation (that there|
//| is no repetitions, just that all its elements are |
//| in [0,N) range. |
//| OUTPUT PARAMETERS: |
//| B - permuted matrix. Permutation is applied to A from |
//| the both sides, only upper or lower triangle |
//| (depending on IsUpper) is stored. |
//| NOTE: this function throws exception when called for non-CRS |
//| matrix. You must convert your matrix with SparseConvertToCRS() |
//| before using this function. |
//+------------------------------------------------------------------+
void CAlglib::SparseSymmPermTbl(CSparseMatrix &a,bool IsUpper,
CRowInt &p,CSparseMatrix &b)
{
CSparse::SparseSymmPermTbl(a,IsUpper,p,b);
}
//+------------------------------------------------------------------+
//| This function is a buffered version of SparseSymmPermTbl() that |
//| reuses previously allocated storage in B as much as possible. |
//| This function applies permutation given by permutation table P |
//| (as opposed to product form of permutation) to sparse symmetric |
//| matrix A, given by either upper or lower triangle: B := P*A*P'. |
//| INPUT PARAMETERS: |
//| A - sparse square matrix in CRS format. |
//| IsUpper - whether upper or lower triangle of A is used: |
//| * if upper triangle is given, only A[i,j] for j>=i|
//| are used, and lower triangle is ignored (it can be|
//| empty - these elements are not referenced at all). |
//| * if lower triangle is given, only A[i,j] for j<=i|
//| are used, and upper triangle is ignored. |
//| P - array[N] which stores permutation table; P[I]=J |
//| means that I-th row/column of matrix A is moved to |
//| J-th position. For performance reasons we do NOT |
//| check that P[] is a correct permutation (that there|
//| is no repetitions, just that all its elements are |
//| in [0,N) range. |
//| B - sparse matrix object that will hold output. |
//| Previously allocated memory will be reused as much |
//| as possible. |
//| OUTPUT PARAMETERS: |
//| B - permuted matrix. Permutation is applied to A from |
//| the both sides, only upper or lower triangle |
//| (depending on IsUpper) is stored. |
//| NOTE: this function throws exception when called for non-CRS |
//| matrix. You must convert your matrix with SparseConvertToCRS() |
//| before using this function. |
//+------------------------------------------------------------------+
void CAlglib::SparseSymmPermTblBuf(CSparseMatrix &a,bool IsUpper,
CRowInt &p,CSparseMatrix &b)
{
CSparse::SparseSymmPermTblBuf(a,IsUpper,p,b);
}
//+------------------------------------------------------------------+
//| This procedure resizes Hash-Table matrix. It can be called when |
//| you have deleted too many elements from the matrix, and you want |
//| to free unneeded memory. |
//+------------------------------------------------------------------+
void CAlglib::SparseResizeMatrix(CSparseMatrix &s)
{
CSparse::SparseResizeMatrix(s);
}
//+------------------------------------------------------------------+
//| This function is used to enumerate all elements of the sparse |
//| matrix. Before first call user initializes T0 and T1 counters by |
//| zero. These counters are used to remember current position in a |
//| matrix; after each call they are updated by the function. |
//| Subsequent calls to this function return non-zero elements of the|
//| sparse matrix, one by one. If you enumerate CRS matrix, matrix is|
//| traversed from left to right, from top to bottom. In case you |
//| enumerate matrix stored as Hash table, elements are returned in |
//| random order. |
//| EXAMPLE |
//| > T0=0 |
//| > T1=0 |
//| > while SparseEnumerate(S,T0,T1,I,J,V) do |
//| > ....do something with I,J,V |
//| INPUT PARAMETERS: |
//| S - sparse M*N matrix in Hash-Table or CRS representation.|
//| T0 - internal counter |
//| T1 - internal counter |
//| OUTPUT PARAMETERS: |
//| T0 - new value of the internal counter |
//| T1 - new value of the internal counter |
//| I - row index of non-zero element, 0<=I<M. |
//| J - column index of non-zero element, 0<=J<N |
//| V - value of the T-th element |
//| RESULT |
//| True in case of success (next non-zero element was retrieved) |
//| False in case all non-zero elements were enumerated |
//| NOTE: you may call SparseRewriteExisting() during enumeration, |
//| but it is THE ONLY matrix modification function you can |
//| call!!! Other matrix modification functions should not be |
//| called during enumeration! |
//+------------------------------------------------------------------+
bool CAlglib::SparseEnumerate(CSparseMatrix &s,int &t0,int &t1,
int &i,int &j,double &v)
{
return(CSparse::SparseEnumerate(s,t0,t1,i,j,v));
}
//+------------------------------------------------------------------+
//| This function rewrites existing (non-zero) element. It returns |
//| True if element exists or False, when it is called for |
//| non-existing (zero) element. |
//| This function works with any kind of the matrix. |
//| The purpose of this function is to provide convenient thread-safe|
//| way to modify sparse matrix. Such modification (already existing |
//| element is rewritten) is guaranteed to be thread-safe without any|
//| synchronization, as long as different threads modify different |
//| elements. |
//| INPUT PARAMETERS: |
//| S - sparse M*N matrix in any kind of representation (Hash,|
//| SKS, CRS). |
//| I - row index of non-zero element to modify, 0<=I<M |
//| J - column index of non-zero element to modify, 0<=J<N |
//| V - value to rewrite, must be finite number |
//| OUTPUT PARAMETERS: |
//| S - modified matrix |
//| RESULT |
//| True in case when element exists |
//| False in case when element doesn't exist or it is zero |
//+------------------------------------------------------------------+
bool CAlglib::SparseRewriteExisting(CSparseMatrix &s,int i,
int j,double v)
{
return(CSparse::SparseRewriteExisting(s,i,j,v));
}
//+------------------------------------------------------------------+
//| This function returns I-th row of the sparse matrix. Matrix must |
//| be stored in CRS or SKS format. |
//| INPUT PARAMETERS: |
//| S - sparse M*N matrix in CRS format |
//| I - row index, 0<=I<M |
//| IRow - output buffer, can be preallocated. In case buffer|
//| size is too small to store I-th row, it is|
//| automatically reallocated. |
//| OUTPUT PARAMETERS: |
//| IRow - array[M], I-th row. |
//| NOTE: this function has O(N) running time, where N is a column |
//| count. It allocates and fills N-element array, even although|
//| most of its elemets are zero. |
//| NOTE: If you have O(non-zeros-per-row) time and memory |
//| requirements, use SparseGetCompressedRow() function. It |
//| returns data in compressed format. |
//| NOTE: when incorrect I (outside of [0,M-1]) or matrix (non |
//| CRS/SKS) is passed, this function throws exception. |
//+------------------------------------------------------------------+
void CAlglib::SparseGetRow(CSparseMatrix &s,int i,CRowDouble &irow)
{
CSparse::SparseGetRow(s,i,irow);
}
//+------------------------------------------------------------------+
//| This function returns I-th row of the sparse matrix IN COMPRESSED|
//| FORMAT - only non-zero elements are returned (with their indexes)|
//| Matrix must be stored in CRS or SKS format. |
//| INPUT PARAMETERS: |
//| S - sparse M*N matrix in CRS format |
//| I - row index, 0<=I<M |
//| ColIdx - output buffer for column indexes, can be |
//| preallocated. In case buffer size is too small to |
//| store I-th row, it is automatically reallocated. |
//| Vals - output buffer for values, can be preallocated. In |
//| case buffer size is too small to store I-th row, it|
//| is automatically reallocated. |
//| OUTPUT PARAMETERS: |
//| ColIdx - column indexes of non-zero elements, sorted by |
//| ascending. Symbolically non-zero elements are |
//| counted (i.e. if you allocated place for element, |
//| but it has zero numerical value - it is counted). |
//| Vals - values. Vals[K] stores value of matrix element with|
//| indexes (I,ColIdx[K]). Symbolically non-zero |
//| elements are counted (i.e. if you allocated place |
//| for element, but it has zero numerical value - it |
//| is counted). |
//| NZCnt - number of symbolically non-zero elements per row. |
//| NOTE: when incorrect I (outside of [0,M-1]) or matrix (non |
//| CRS/SKS) is passed, this function throws exception. |
//| NOTE: this function may allocate additional, unnecessary place |
//| for ColIdx and Vals arrays. It is dictated by performance |
//| reasons - on SKS matrices it is faster to allocate space at |
//| the beginning with some "extra"-space, than performing two |
//| passes over matrix - first time to calculate exact space |
//| required for data, second time - to store data itself. |
//+------------------------------------------------------------------+
void CAlglib::SparseGetCompressedRow(CSparseMatrix &s,int i,
CRowInt &colidx,CRowDouble &vals,
int &nzcnt)
{
CSparse::SparseGetCompressedRow(s,i,colidx,vals,nzcnt);
}
//+------------------------------------------------------------------+
//| This function performs efficient in-place transpose of SKS matrix|
//| No additional memory is allocated during transposition. |
//| This function supports only skyline storage format (SKS). |
//| INPUT PARAMETERS: |
//| S - sparse matrix in SKS format. |
//| OUTPUT PARAMETERS: |
//| S - sparse matrix, transposed. |
//+------------------------------------------------------------------+
void CAlglib::SparseTransposeSKS(CSparseMatrix &s)
{
CSparse::SparseTransposeSKS(s);
}
//+------------------------------------------------------------------+
//| This function performs transpose of CRS matrix. |
//| INPUT PARAMETERS: |
//| S - sparse matrix in CRS format. |
//| OUTPUT PARAMETERS: |
//| S - sparse matrix, transposed. |
//| NOTE: internal temporary copy is allocated for the purposes of |
//| transposition. It is deallocated after transposition. |
//+------------------------------------------------------------------+
void CAlglib::SparseTransposeCRS(CSparseMatrix &s)
{
CSparse::SparseTransposeCRS(s);
}
//+------------------------------------------------------------------+
//| This function performs copying with transposition of CRS matrix. |
//| INPUT PARAMETERS: |
//| S0 - sparse matrix in CRS format. |
//| OUTPUT PARAMETERS: |
//| S1 - sparse matrix, transposed |
//+------------------------------------------------------------------+
void CAlglib::SparseCopyTransposeCRS(CSparseMatrix &s0,CSparseMatrix &s1)
{
CSparse::SparseCopyTransposeCRS(s0,s1);
}
//+------------------------------------------------------------------+
//| This function performs copying with transposition of CRS matrix |
//| (buffered version which reuses memory already allocated by the |
//| target as much as possible). |
//| INPUT PARAMETERS: |
//| S0 - sparse matrix in CRS format. |
//| OUTPUT PARAMETERS: |
//| S1 - sparse matrix, transposed; previously allocated memory|
//| is reused if possible. |
//+------------------------------------------------------------------+
void CAlglib::SparseCopyTransposeCRSBuf(CSparseMatrix &s0,CSparseMatrix &s1)
{
CSparse::SparseCopyTransposeCRSBuf(s0,s1);
}
//+------------------------------------------------------------------+
//| This function performs in-place conversion to desired sparse |
//| storage format. |
//| INPUT PARAMETERS: |
//| S0 - sparse matrix in any format. |
//| Fmt - desired storage format of the output, as returned by |
//| SparseGetMatrixType() function: |
//| * 0 for hash-based storage |
//| * 1 for CRS |
//| * 2 for SKS |
//| OUTPUT PARAMETERS: |
//| S0 - sparse matrix in requested format. |
//| NOTE: in-place conversion wastes a lot of memory which is used |
//| to store temporaries. If you perform a lot of repeated |
//| conversions, we recommend to use out-of-place buffered |
//| conversion functions, like SparseCopyToBuf(), which can |
//| reuse already allocated memory. |
//+------------------------------------------------------------------+
void CAlglib::SparseConvertTo(CSparseMatrix &s0,int fmt)
{
CSparse::SparseConvertTo(s0,fmt);
}
//+------------------------------------------------------------------+
//| This function performs out-of-place conversion to desired sparse |
//| storage format. S0 is copied to S1 and converted on-the-fly. |
//| Memory allocated in S1 is reused to maximum extent possible. |
//| INPUT PARAMETERS: |
//| S0 - sparse matrix in any format. |
//| Fmt - desired storage format of the output, as returned by |
//| SparseGetMatrixType() function: |
//| * 0 for hash-based storage |
//| * 1 for CRS |
//| * 2 for SKS |
//| OUTPUT PARAMETERS: |
//| S1 - sparse matrix in requested format. |
//+------------------------------------------------------------------+
void CAlglib::SparseCopyToBuf(CSparseMatrix &s0,int fmt,CSparseMatrix &s1)
{
CSparse::SparseCopyToBuf(s0,fmt,s1);
}
//+------------------------------------------------------------------+
//| This function performs in-place conversion to Hash table storage.|
//| INPUT PARAMETERS: |
//| S - sparse matrix in CRS format. |
//| OUTPUT PARAMETERS: |
//| S - sparse matrix in Hash table format. |
//| NOTE: this function has no effect when called with matrix which |
//| is already in Hash table mode. |
//| NOTE: in-place conversion involves allocation of temporary arrays|
//| If you perform a lot of repeated in-place conversions, it |
//| may lead to memory fragmentation. Consider using |
//| out-of-place SparseCopyToHashBuf() function in this case. |
//+------------------------------------------------------------------+
void CAlglib::SparseConvertToHash(CSparseMatrix &s)
{
CSparse::SparseConvertToHash(s);
}
//+------------------------------------------------------------------+
//| This function performs out-of-place conversion to Hash table |
//| storage format. S0 is copied to S1 and converted on-the-fly. |
//| INPUT PARAMETERS: |
//| S0 - sparse matrix in any format. |
//| OUTPUT PARAMETERS: |
//| S1 - sparse matrix in Hash table format. |
//| NOTE: if S0 is stored as Hash-table, it is just copied without |
//| conversion. |
//| NOTE: this function de-allocates memory occupied by S1 before |
//| starting conversion. If you perform a lot of repeated |
//| conversions, it may lead to memory fragmentation. In this |
//| case we recommend you to use SparseCopyToHashBuf() function |
//| which re-uses memory in S1 as much as possible. |
//+------------------------------------------------------------------+
void CAlglib::SparseCopyToHash(CSparseMatrix &s0,CSparseMatrix &s1)
{
CSparse::SparseCopyToHash(s0,s1);
}
//+------------------------------------------------------------------+
//| This function performs out-of-place conversion to Hash table |
//| storage format. S0 is copied to S1 and converted on-the-fly. |
//| Memory allocated in S1 is reused to maximum extent possible. |
//| INPUT PARAMETERS: |
//| S0 - sparse matrix in any format. |
//| OUTPUT PARAMETERS: |
//| S1 - sparse matrix in Hash table format. |
//| NOTE: if S0 is stored as Hash-table, it is just copied without |
//| conversion. |
//+------------------------------------------------------------------+
void CAlglib::SparseCopyToHashBuf(CSparseMatrix &s0,CSparseMatrix &s1)
{
CSparse::SparseCopyToHashBuf(s0,s1);
}
//+------------------------------------------------------------------+
//| This function converts matrix to CRS format. |
//| Some algorithms (linear algebra ones, for example) require |
//| matrices in CRS format. This function allows to perform in-place |
//| conversion. |
//| INPUT PARAMETERS: |
//| S - sparse M*N matrix in any format |
//| OUTPUT PARAMETERS: |
//| S - matrix in CRS format |
//| NOTE: this function has no effect when called with matrix which |
//| is already in CRS mode. |
//| NOTE: this function allocates temporary memory to store a copy of|
//| the matrix. If you perform a lot of repeated conversions, we|
//| recommend you to use SparseCopyToCRSBuf() function, which |
//| can reuse previously allocated memory. |
//+------------------------------------------------------------------+
void CAlglib::SparseConvertToCRS(CSparseMatrix &s)
{
CSparse::SparseConvertToCRS(s);
}
//+------------------------------------------------------------------+
//| This function performs out-of-place conversion to CRS format. S0 |
//| is copied to S1 and converted on-the-fly. |
//| INPUT PARAMETERS: |
//| S0 - sparse matrix in any format. |
//| OUTPUT PARAMETERS: |
//| S1 - sparse matrix in CRS format. |
//| NOTE: if S0 is stored as CRS, it is just copied without |
//| conversion. |
//| NOTE: this function de-allocates memory occupied by S1 before |
//| starting CRS conversion. If you perform a lot of repeated |
//| CRS conversions, it may lead to memory fragmentation. In |
//| this case we recommend you to use SparseCopyToCRSBuf() |
//| function which re-uses memory in S1 as much as possible. |
//+------------------------------------------------------------------+
void CAlglib::SparseCopyToCRS(CSparseMatrix &s0,CSparseMatrix &s1)
{
CSparse::SparseCopyToCRS(s0,s1);
}
//+------------------------------------------------------------------+
//| This function performs out-of-place conversion to CRS format. S0 |
//| is copied to S1 and converted on-the-fly. Memory allocated in S1 |
//| is reused to maximum extent possible. |
//| INPUT PARAMETERS: |
//| S0 - sparse matrix in any format. |
//| S1 - matrix which may contain some pre-allocated memory, or|
//| can be just uninitialized structure. |
//| OUTPUT PARAMETERS: |
//| S1 - sparse matrix in CRS format. |
//| NOTE: if S0 is stored as CRS, it is just copied without |
//| conversion. |
//+------------------------------------------------------------------+
void CAlglib::SparseCopyToCRSBuf(CSparseMatrix &s0,CSparseMatrix &s1)
{
CSparse::SparseCopyToCRSBuf(s0,s1);
}
//+------------------------------------------------------------------+
//| This function performs in-place conversion to SKS format. |
//| INPUT PARAMETERS: |
//| S - sparse matrix in any format. |
//| OUTPUT PARAMETERS: |
//| S - sparse matrix in SKS format. |
//| NOTE: this function has no effect when called with matrix which |
//| is already in SKS mode. |
//| NOTE: in-place conversion involves allocation of temporary arrays|
//| If you perform a lot of repeated in-place conversions, it may|
//| lead to memory fragmentation. Consider using out-of-place |
//| SparseCopyToSKSBuf() function in this case. |
//+------------------------------------------------------------------+
void CAlglib::SparseConvertToSKS(CSparseMatrix &s)
{
CSparse::SparseConvertToSKS(s);
}
//+------------------------------------------------------------------+
//| This function performs out-of-place conversion to SKS storage |
//| format. S0 is copied to S1 and converted on-the-fly. |
//| INPUT PARAMETERS: |
//| S0 - sparse matrix in any format. |
//| OUTPUT PARAMETERS: |
//| S1 - sparse matrix in SKS format. |
//| NOTE: if S0 is stored as SKS, it is just copied without |
//| conversion. |
//| NOTE: this function de-allocates memory occupied by S1 before |
//| starting conversion. If you perform a lot of repeated |
//| conversions, it may lead to memory fragmentation. In this |
//| case we recommend you to use SparseCopyToSKSBuf() function |
//| which re-uses memory in S1 as much as possible. |
//+------------------------------------------------------------------+
void CAlglib::SparseCopyToSKS(CSparseMatrix &s0,CSparseMatrix &s1)
{
CSparse::SparseCopyToSKS(s0,s1);
}
//+------------------------------------------------------------------+
//| This function performs out-of-place conversion to SKS format. S0 |
//| is copied to S1 and converted on-the-fly. Memory allocated in S1 |
//| is reused to maximum extent possible. |
//| INPUT PARAMETERS: |
//| S0 - sparse matrix in any format. |
//| OUTPUT PARAMETERS: |
//| S1 - sparse matrix in SKS format. |
//| NOTE: if S0 is stored as SKS, it is just copied without |
//| conversion. |
//+------------------------------------------------------------------+
void CAlglib::SparseCopyToSKSBuf(CSparseMatrix &s0,CSparseMatrix &s1)
{
CSparse::SparseCopyToSKSBuf(s0,s1);
}
//+------------------------------------------------------------------+
//| This function returns type of the matrix storage format. |
//| INPUT PARAMETERS: |
//| S - sparse matrix. |
//| RESULT: |
//| sparse storage format used by matrix: |
//| 0 - Hash-table |
//| 1 - CRS (compressed row storage) |
//| 2 - SKS (skyline) |
//| NOTE: future versions of ALGLIB may include additional sparse |
//| storage formats. |
//+------------------------------------------------------------------+
int CAlglib::SparseGetMatrixType(CSparseMatrix &s)
{
return(CSparse::SparseGetMatrixType(s));
}
//+------------------------------------------------------------------+
//| This function checks matrix storage format and returns True when |
//| matrix is stored using Hash table representation. |
//| INPUT PARAMETERS: |
//| S - sparse matrix. |
//| RESULT: |
//| True if matrix type is Hash table |
//| False if matrix type is not Hash table |
//+------------------------------------------------------------------+
bool CAlglib::SparseIsHash(CSparseMatrix &s)
{
return(CSparse::SparseIsHash(s));
}
//+------------------------------------------------------------------+
//| This function checks matrix storage format and returns True when |
//| matrix is stored using CRS representation. |
//| INPUT PARAMETERS: |
//| S - sparse matrix. |
//| RESULT: |
//| True if matrix type is CRS |
//| False if matrix type is not CRS |
//+------------------------------------------------------------------+
bool CAlglib::SparseIsCRS(CSparseMatrix &s)
{
return(CSparse::SparseIsCRS(s));
}
//+------------------------------------------------------------------+
//| This function checks matrix storage format and returns True when |
//| matrix is stored using SKS representation. |
//| INPUT PARAMETERS: |
//| S - sparse matrix. |
//| RESULT: |
//| True if matrix type is SKS |
//| False if matrix type is not SKS |
//+------------------------------------------------------------------+
bool CAlglib::SparseIsSKS(CSparseMatrix &s)
{
return(CSparse::SparseIsSKS(s));
}
//+------------------------------------------------------------------+
//| The function frees all memory occupied by sparse matrix. Sparse |
//| matrix structure becomes unusable after this call. |
//| OUTPUT PARAMETERS: |
//| S - sparse matrix to delete |
//+------------------------------------------------------------------+
void CAlglib::SparseFree(CSparseMatrix &s)
{
CSparse::SparseFree(s);
}
//+------------------------------------------------------------------+
//| The function returns number of rows of a sparse matrix. |
//| RESULT: number of rows of a sparse matrix. |
//+------------------------------------------------------------------+
int CAlglib::SparseGetNRows(CSparseMatrix &s)
{
return(CSparse::SparseGetNRows(s));
}
//+------------------------------------------------------------------+
//| The function returns number of columns of a sparse matrix. |
//| RESULT: number of columns of a sparse matrix. |
//+------------------------------------------------------------------+
int CAlglib::SparseGetNCols(CSparseMatrix &s)
{
return(CSparse::SparseGetNCols(s));
}
//+------------------------------------------------------------------+
//| The function returns number of strictly upper triangular non-zero|
//| elements in the matrix. It counts SYMBOLICALLY non-zero elements,|
//| i.e. entries in the sparse matrix data structure. If some element|
//| has zero numerical value, it is still counted. |
//| This function has different cost for different types of matrices:|
//| * for hash-based matrices it involves complete pass over entire|
//| hash-table with O(NNZ) cost, where NNZ is number of non-zero |
//| elements |
//| * for CRS and SKS matrix types cost of counting is O(N) |
//| (N - matrix size). |
//| RESULT: number of non-zero elements strictly above main diagonal |
//+------------------------------------------------------------------+
int CAlglib::SparseGetUpperCount(CSparseMatrix &s)
{
return(CSparse::SparseGetUpperCount(s));
}
//+------------------------------------------------------------------+
//| The function returns number of strictly lower triangular non-zero|
//| elements in the matrix. It counts SYMBOLICALLY non-zero elements,|
//| i.e. entries in the sparse matrix data structure. If some element|
//| has zero numerical value, it is still counted. |
//| This function has different cost for different types of matrices:|
//| * for hash-based matrices it involves complete pass over entire|
//| hash-table with O(NNZ) cost, where NNZ is number of non-zero |
//| elements |
//| * for CRS and SKS matrix types cost of counting is O(N) |
//| (N - matrix size). |
//| RESULT: number of non-zero elements strictly below main diagonal |
//+------------------------------------------------------------------+
int CAlglib::SparseGetLowerCount(CSparseMatrix &s)
{
return(CSparse::SparseGetLowerCount(s));
}
//+------------------------------------------------------------------+
//| LU decomposition of a general real matrix with row pivoting |
//| A is represented as A = P*L*U, where: |
//| * L is lower unitriangular matrix |
//| * U is upper triangular matrix |
//| * P = P0*P1*...*PK, K=min(M,N)-1, |
//| Pi - permutation matrix for I and Pivots[I] |
//| This is cache-oblivous implementation of LU decomposition. |
//| It is optimized for square matrices. As for rectangular matrices:|
//| * best case - M>>N |
//| * worst case - N>>M, small M, large N, matrix does not fit in CPU|
//| cache |
//| INPUT PARAMETERS: |
//| A - array[0..M-1, 0..N-1]. |
//| M - number of rows in matrix A. |
//| N - number of columns in matrix A. |
//| OUTPUT PARAMETERS: |
//| A - matrices L and U in compact form: |
//| * L is stored under main diagonal |
//| * U is stored on and above main diagonal |
//| Pivots - permutation matrix in compact form. |
//| array[0..Min(M-1,N-1)]. |
//+------------------------------------------------------------------+
void CAlglib::RMatrixLU(CMatrixDouble &a,const int m,const int n,
int &pivots[])
{
CTrFac::RMatrixLU(a,m,n,pivots);
}
//+------------------------------------------------------------------+
//| LU decomposition of a general complex matrix with row pivoting |
//| A is represented as A = P*L*U, where: |
//| * L is lower unitriangular matrix |
//| * U is upper triangular matrix |
//| * P = P0*P1*...*PK, K=min(M,N)-1, |
//| Pi - permutation matrix for I and Pivots[I] |
//| This is cache-oblivous implementation of LU decomposition. It is |
//| optimized for square matrices. As for rectangular matrices: |
//| * best case - M>>N |
//| * worst case - N>>M, small M, large N, matrix does not fit in CPU|
//| cache |
//| INPUT PARAMETERS: |
//| A - array[0..M-1, 0..N-1]. |
//| M - number of rows in matrix A. |
//| N - number of columns in matrix A. |
//| OUTPUT PARAMETERS: |
//| A - matrices L and U in compact form: |
//| * L is stored under main diagonal |
//| * U is stored on and above main diagonal |
//| Pivots - permutation matrix in compact form. |
//| array[0..Min(M-1,N-1)]. |
//+------------------------------------------------------------------+
void CAlglib::CMatrixLU(CMatrixComplex &a,const int m,const int n,
int &pivots[])
{
CTrFac::CMatrixLU(a,m,n,pivots);
}
//+------------------------------------------------------------------+
//| Cache-oblivious Cholesky decomposition |
//| The algorithm computes Cholesky decomposition of a Hermitian |
//| positive - definite matrix. The result of an algorithm is a |
//| representation of A as A=U'*U or A=L*L' (here X' detones |
//| conj(X^T)). |
//| INPUT PARAMETERS: |
//| A - upper or lower triangle of a factorized matrix. |
//| array with elements [0..N-1, 0..N-1]. |
//| N - size of matrix A. |
//| IsUpper - if IsUpper=True, then A contains an upper |
//| triangle of a symmetric matrix, otherwise A |
//| contains a lower one. |
//| OUTPUT PARAMETERS: |
//| A - the result of factorization. If IsUpper=True, |
//| then the upper triangle contains matrix U, so |
//| that A = U'*U, and the elements below the main |
//| diagonal are not modified. Similarly, if |
//| IsUpper = False. |
//| RESULT: |
//| If the matrix is positive-definite, the function returns |
//| True. Otherwise, the function returns False. Contents of A is|
//| not determined in such case. |
//+------------------------------------------------------------------+
bool CAlglib::HPDMatrixCholesky(CMatrixComplex &a,const int n,
const bool IsUpper)
{
return(CTrFac::HPDMatrixCholesky(a,n,IsUpper));
}
//+------------------------------------------------------------------+
//| Cache-oblivious Cholesky decomposition |
//| The algorithm computes Cholesky decomposition of a symmetric |
//| positive - definite matrix. The result of an algorithm is a |
//| representation of A as A=U^T*U or A=L*L^T |
//| INPUT PARAMETERS: |
//| A - upper or lower triangle of a factorized matrix. |
//| array with elements [0..N-1, 0..N-1]. |
//| N - size of matrix A. |
//| IsUpper - if IsUpper=True, then A contains an upper |
//| triangle of a symmetric matrix, otherwise A |
//| contains a lower one. |
//| OUTPUT PARAMETERS: |
//| A - the result of factorization. If IsUpper=True, |
//| then the upper triangle contains matrix U, so |
//| that A = U^T*U, and the elements below the main |
//| diagonal are not modified. Similarly, if |
//| IsUpper = False. |
//| RESULT: |
//| If the matrix is positive-definite, the function returns |
//| True. Otherwise, the function returns False. Contents of A is|
//| not determined in such case. |
//+------------------------------------------------------------------+
bool CAlglib::SPDMatrixCholesky(CMatrixDouble &a,const int n,
const bool IsUpper)
{
return(CTrFac::SPDMatrixCholesky(a,n,IsUpper));
}
//+------------------------------------------------------------------+
//| Update of Cholesky decomposition: rank-1 update to original A. |
//| "Buffered" version which uses preallocated buffer which is saved |
//| between subsequent function calls. |
//| This function uses internally allocated buffer which is not saved|
//| between subsequent calls. So, if you perform a lot of subsequent |
//| updates, we recommend you to use "buffered" version of this |
//| function: SPDMatrixCholeskyUpdateAdd1Buf(). |
//| INPUT PARAMETERS: |
//| A - upper or lower Cholesky factor. array with elements|
//| [0..N-1, 0..N-1]. Exception is thrown if array size|
//| is too small. |
//| N - size of matrix A, N>0 |
//| IsUpper - if IsUpper=True, then A contains upper Cholesky |
//| factor; otherwise A contains a lower one. |
//| U - array[N], rank-1 update to A: A_mod = A + u*u' |
//| Exception is thrown if array size is too small. |
//| BufR - possibly preallocated buffer; automatically resized|
//| if needed. It is recommended to reuse this buffer |
//| if you perform a lot of subsequent decompositions. |
//| OUTPUT PARAMETERS: |
//| A - updated factorization. If IsUpper=True, then the |
//| upper triangle contains matrix U, and the elements |
//| below the main diagonal are not modified. Similarly|
//| if IsUpper = False. |
//| NOTE: this function always succeeds, so it does not return |
//| completion code |
//| NOTE: this function checks sizes of input arrays, but it does NOT|
//| checks for presence of infinities or NAN's. |
//+------------------------------------------------------------------+
void CAlglib::SPDMatrixCholeskyUpdateAdd1(CMatrixDouble &a,int n,
bool IsUpper,CRowDouble &u)
{
CTrFac::SPDMatrixCholeskyUpdateAdd1(a,n,IsUpper,u);
}
//+------------------------------------------------------------------+
//| Update of Cholesky decomposition: "fixing" some variables. |
//| This function uses internally allocated buffer which is not saved|
//| between subsequent calls. So, if you perform a lot of subsequent |
//| updates, we recommend you to use "buffered" version of this |
//| function: SPDMatrixCholeskyUpdateFixBuf(). |
//| "FIXING" EXPLAINED: |
//| Suppose we have N*N positive definite matrix A. "Fixing" some |
//| variable means filling corresponding row/column of A by zeros, |
//| and setting diagonal element to 1. |
//| For example, if we fix 2nd variable in 4 * 4 matrix A, it |
//| becomes Af: |
//| (A00 A01 A02 A03) (Af00 0 Af02 Af03) |
//| (A10 A11 A12 A13) ( 0 1 0 0 ) |
//| (A20 A21 A22 A23) => (Af20 0 Af22 Af23) |
//| (A30 A31 A32 A33) (Af30 0 Af32 Af33) |
//| If we have Cholesky decomposition of A, it must be recalculated|
//| after variables were fixed. However, it is possible to use |
//| efficient algorithm, which needs O(K*N^2) time to "fix" K |
//| variables, given Cholesky decomposition of original, "unfixed" |
//| A. |
//| INPUT PARAMETERS: |
//| A - upper or lower Cholesky factor. Array with elements|
//| [0..N - 1, 0..N - 1]. Exception is thrown if array |
//| size is too small. |
//| N - size of matrix A, N > 0 |
//| IsUpper - if IsUpper = True, then A contains upper Cholesky |
//| factor; otherwise A contains a lower one. |
//| Fix - array[N], I-th element is True if I-th variable |
//| must be fixed. Exception is thrown if array size is|
//| too small. |
//| BufR - possibly preallocated buffer; automatically resized|
//| if needed. It is recommended to reuse this buffer |
//| if you perform a lot of subsequent decompositions. |
//| OUTPUT PARAMETERS: |
//| A - updated factorization. If IsUpper=True, then the |
//| upper triangle contains matrix U, and the elements |
//| below the main diagonal are not modified. |
//| Similarly, if IsUpper=False. |
//| NOTE: this function always succeeds, so it does not return |
//| completion code |
//| NOTE: this function checks sizes of input arrays, but it does NOT|
//| checks for presence of infinities or NAN's. |
//| NOTE: this function is efficient only for moderate amount of |
//| updated variables - say, 0.1*N or 0.3*N. For larger amount |
//| of variables it will still work, but you may get better |
//| performance with straightforward Cholesky. |
//+------------------------------------------------------------------+
void CAlglib::SPDMatrixCholeskyUpdateFix(CMatrixDouble &a,int n,
bool IsUpper,bool &fix[])
{
CTrFac::SPDMatrixCholeskyUpdateFix(a,n,IsUpper,fix);
}
//+------------------------------------------------------------------+
//| Update of Cholesky decomposition: rank - 1 update to original A. |
//| "Buffered" version which uses preallocated buffer which is saved |
//| between subsequent function calls. |
//| See comments for SPDMatrixCholeskyUpdateAdd1() for more |
//| information. |
//| INPUT PARAMETERS: |
//| A - upper or lower Cholesky factor. array with elements|
//| [0..N - 1, 0..N - 1]. Exception is thrown if array |
//| size is too small. |
//| N - size of matrix A, N > 0 |
//| IsUpper - if IsUpper = True, then A contains upper Cholesky |
//| factor; otherwise A contains a lower one. |
//| U - array[N], rank - 1 update to A: A_mod = A + u * u' |
//| Exception is thrown if array size is too small. |
//| BufR - possibly preallocated buffer; automatically resized|
//| if needed. It is recommended to reuse this buffer |
//| if you perform a lot of subsequent decompositions. |
//| OUTPUT PARAMETERS: |
//| A - updated factorization. If IsUpper=True, then the |
//| upper triangle contains matrix U, and the elements |
//| below the main diagonal are not modified. |
//| Similarly, if IsUpper=False. |
//+------------------------------------------------------------------+
void CAlglib::SPDMatrixCholeskyUpdateAdd1Buf(CMatrixDouble &a,int n,
bool IsUpper,CRowDouble &u,
CRowDouble &bufr)
{
CTrFac::SPDMatrixCholeskyUpdateAdd1Buf(a,n,IsUpper,u,bufr);
}
//+------------------------------------------------------------------+
//| Update of Cholesky decomposition: "fixing" some variables. |
//| "Buffered" version which uses preallocated buffer which is saved |
//| between subsequent function calls. See comments for |
//| SPDMatrixCholeskyUpdateFix() for more information. |
//| INPUT PARAMETERS: |
//| A - upper or lower Cholesky factor. Array with elements|
//| [0..N - 1, 0..N - 1]. Exception is thrown if array |
//| size is too small. |
//| N - size of matrix A, N > 0 |
//| IsUpper - if IsUpper = True, then A contains upper Cholesky |
//| factor; otherwise A contains a lower one. |
//| Fix - array[N], I-th element is True if I-th variable |
//| must be fixed. Exception is thrown if array size is|
//| too small. |
//| BufR - possibly preallocated buffer; automatically resized|
//| if needed. It is recommended to reuse this buffer |
//| if you perform a lot of subsequent decompositions. |
//| OUTPUT PARAMETERS: |
//| A - updated factorization. If IsUpper=True, then the |
//| upper triangle contains matrix U, and the elements |
//| below the main diagonal are not modified. |
//| Similarly, if IsUpper = False. |
//+------------------------------------------------------------------+
void CAlglib::SPDMatrixCholeskyUpdateFixBuf(CMatrixDouble &a,int n,
bool IsUpper,bool &fix[],
CRowDouble &bufr)
{
CTrFac::SPDMatrixCholeskyUpdateFixBuf(a,n,IsUpper,fix,bufr);
}
//+------------------------------------------------------------------+
//| Sparse LU decomposition with column pivoting for sparsity and row|
//| pivoting for stability. Input must be square sparse matrix stored|
//| in CRS format. |
//| The algorithm computes LU decomposition of a general square |
//| matrix (rectangular ones are not supported). The result of an |
//| algorithm is a representation of A as A = P * L * U * Q, where: |
//| * L is lower unitriangular matrix |
//| * U is upper triangular matrix |
//| * P = P0 * P1 * ...*PK, K = N - 1, Pi - permutation matrix for |
//| I and P[I] |
//| * Q = QK * ...*Q1 * Q0, K = N - 1, Qi - permutation matrix for |
//| I and Q[I] |
//| This function pivots columns for higher sparsity, and then pivots|
//| rows for stability(larger element at the diagonal). |
//| INPUT PARAMETERS: |
//| A - sparse NxN matrix in CRS format. An exception is|
//| generated if matrix is non-CRS or non-square. |
//| PivotType - pivoting strategy: |
//| * 0 for best pivoting available (2 in current |
//| version) |
//| * 1 for row - only pivoting(NOT RECOMMENDED) |
//| * 2 for complete pivoting which produces most |
//| sparse outputs |
//| OUTPUT PARAMETERS: |
//| A - the result of factorization, matrices L and U |
//| stored in compact form using CRS sparse storage |
//| format: |
//| * lower unitriangular L is stored strictly under|
//| main diagonal |
//| * upper triangilar U is stored ON and ABOVE main|
//| diagonal |
//| P - row permutation matrix in compact form, array[N]|
//| Q - col permutation matrix in compact form, array[N]|
//| This function always succeeds, i.e. it ALWAYS returns valid |
//| factorization, but for your convenience it also returns boolean |
//| value which helps to detect symbolically degenerate matrices: |
//| * function returns TRUE, if the matrix was factorized AND |
//| symbolically non-degenerate |
//| * function returns FALSE, if the matrix was factorized but U |
//| has strictly zero elements at the diagonal(the factorization |
//| is returned anyway). |
//+------------------------------------------------------------------+
bool CAlglib::SparseLU(CSparseMatrix &a,int pivottype,CRowInt &p,
CRowInt &q)
{
return(CTrFac::SparseLU(a,pivottype,p,q));
}
//+------------------------------------------------------------------+
//| Sparse Cholesky decomposition for skyline matrixm using in-place |
//| algorithm without allocating additional storage. |
//| The algorithm computes Cholesky decomposition of a symmetric |
//| positive - definite sparse matrix. The result of an algorithm is |
//| a representation of A as A = U ^ T * U or A = L * L ^ T |
//| This function allows to perform very efficient decomposition of |
//| low - profile matrices(average bandwidth is ~5-10 elements). For |
//| larger matrices it is recommended to use supernodal Cholesky |
//| decomposition: SparseCholeskyP() or |
//| SparseCholeskyAnalyze() / SparseCholeskyFactorize(). |
//| INPUT PARAMETERS: |
//| A - sparse matrix in skyline storage(SKS) format. |
//| N - size of matrix A(can be smaller than actual size |
//| of A) |
//| IsUpper - if IsUpper = True, then factorization is performed |
//| on upper triangle. Another triangle is ignored (it |
//| may contant some data, but it is not changed). |
//| OUTPUT PARAMETERS: |
//| A - the result of factorization, stored in SKS. If |
//| IsUpper = True, then the upper triangle contains |
//| matrix U, such that A = U ^ T * U. Lower triangle |
//| is not changed. Similarly, if IsUpper = False. In |
//| this case L is returned, and we have A = L * (L^T).|
//| Note that THIS function does not perform permutation of rows to |
//| reduce bandwidth. |
//| RESULT: |
//| If the matrix is positive - definite, the function returns True|
//| Otherwise, the function returns False. Contents of A is not |
//| determined in such case. |
//| NOTE: for performance reasons this function does NOT check that |
//| input matrix includes only finite values. It is your |
//| responsibility to make sure that there are no infinite or |
//| NAN values in the matrix. |
//+------------------------------------------------------------------+
bool CAlglib::SparseCholeskySkyLine(CSparseMatrix &a,int n,bool IsUpper)
{
return(CTrFac::SparseCholeskySkyLine(a,n,IsUpper));
}
//+------------------------------------------------------------------+
//| Sparse Cholesky decomposition for a matrix stored in any sparse |
//| storage, without rows/cols permutation. |
//| This function is the most convenient(less parameters to specify),|
//| although less efficient, version of sparse Cholesky. |
//| Internally it: |
//| * calls SparseCholeskyAnalyze() function to perform symbolic |
//| analysis phase with no permutation being configured. |
//| * calls SparseCholeskyFactorize() function to perform numerical|
//| phase of the factorization |
//| Following alternatives may result in better performance: |
//| * using SparseCholeskyP(), which selects best pivoting |
//| available, which almost always results in improved sparsity |
//| and cache locality |
//| * using SparseCholeskyAnalyze() and SparseCholeskyFactorize() |
//| functions directly, which may improve performance of |
//| repetitive factorizations with same sparsity patterns. |
//| The latter also allows one to perform LDLT factorization of |
//| indefinite matrix(one with strictly diagonal D, which is known to|
//| be stable only in few special cases, like quasi - definite |
//| matrices). |
//| INPUT PARAMETERS: |
//| A - a square NxN sparse matrix, stored in any storage |
//| format. |
//| IsUpper - if IsUpper=True, then factorization is performed on|
//| upper triangle. Another triangle is ignored on |
//| input, dropped on output. Similarly, if |
//| IsUpper=False, the lower triangle is processed. |
//| OUTPUT PARAMETERS: |
//| A - the result of factorization, stored in CRS format: |
//| * if IsUpper = True, then the upper triangle contains matrix|
//| U such that A = U ^ T * U and the lower triangle is empty.|
//| * similarly, if IsUpper = False, then lower triangular L is|
//| returned and we have A = L * (L^T). |
//| Note that THIS function does not perform permutation of the rows |
//| to reduce fill-in. |
//| RESULT: |
//| If the matrix is positive-definite, the function returns True. |
//| Otherwise, the function returns False. Contents of A is |
//| undefined in such case. |
//| NOTE: for performance reasons this function does NOT check that |
//| input matrix includes only finite values. It is your |
//| responsibility to make sure that there are no infinite or |
//| NAN values in the matrix. |
//+------------------------------------------------------------------+
bool CAlglib::SparseCholesky(CSparseMatrix &a,bool IsUpper)
{
return(CTrFac::SparseCholesky(a,IsUpper));
}
//+------------------------------------------------------------------+
//| Sparse Cholesky decomposition for a matrix stored in any sparse |
//| storage format, with performance - enhancing permutation of |
//| rows/cols. |
//| Present version is configured to perform supernodal permutation |
//| which sparsity reducing ordering. |
//| This function is a wrapper around generic sparse decomposition |
//| functions that internally : |
//| * calls SparseCholeskyAnalyze() function to perform symbolic |
//| analysis phase with best available permutation being |
//| configured. |
//| * calls SparseCholeskyFactorize() function to perform numerical|
//| phase of the factorization. |
//| NOTE: using SparseCholeskyAnalyze() and SparseCholeskyFactorize()|
//| directly may improve performance of repetitive |
//| factorizations with same sparsity patterns. It also allows |
//| one to perform LDLT factorization of indefinite matrix - a |
//| factorization with strictly diagonal D, which is known to |
//| be stable only in few special cases, like quasi - definite |
//| matrices. |
//| INPUT PARAMETERS: |
//| A - a square NxN sparse matrix, stored in any storage |
//| format. |
//| IsUpper - if IsUpper=True, then factorization is performed on|
//| upper triangle. Another triangle is ignored on |
//| input, dropped on output. Similarly, if |
//| IsUpper=False, the lower triangle is processed. |
//| OUTPUT PARAMETERS: |
//| A - the result of factorization, stored in CRS format: |
//| * if IsUpper = True, then the upper triangle contains matrix|
//| U such that A = U ^ T * U and the lower triangle is empty.|
//| * similarly, if IsUpper = False, then lower triangular L is|
//| returned and we have A = L * (L^T). |
//| P - a row / column permutation, a product of |
//| P0 * P1 * ...*Pk, k = N - 1, with Pi being |
//| permutation of rows / cols I and P[I] |
//| RESULT: |
//| If the matrix is positive-definite, the function returns True. |
//| Otherwise, the function returns False. Contents of A is |
//| undefined in such case. |
//| NOTE: for performance reasons this function does NOT check that |
//| input matrix includes only finite values. It is your |
//| responsibility to make sure that there are no infinite or |
//| NAN values in the matrix. |
//+------------------------------------------------------------------+
bool CAlglib::SparseCholeskyP(CSparseMatrix &a,bool IsUpper,CRowInt &p)
{
return(CTrFac::SparseCholeskyP(a,IsUpper,p));
}
//+------------------------------------------------------------------+
//| Sparse Cholesky/LDLT decomposition: symbolic analysis phase. |
//| This function is a part of the 'expert' sparse Cholesky API: |
//| * SparseCholeskyAnalyze(), that performs symbolic analysis |
//| phase and loads matrix to be factorized into internal |
//| storage |
//| * SparseCholeskySetModType(), that allows to use modified |
//| Cholesky/LDLT with lower bounds on pivot magnitudes and |
//| additional overflow safeguards |
//| * SparseCholeskyFactorize(), that performs numeric |
//| factorization using precomputed symbolic analysis and |
//| internally stored matrix - and outputs result |
//| * SparseCholeskyReload(), that reloads one more matrix with |
//| same sparsity pattern into internal storage so one may |
//| reuse previously allocated temporaries and previously |
//| performed symbolic analysis |
//| This specific function performs preliminary analysis of the |
//| Cholesky/LDLT factorization. It allows to choose different |
//| permutation types and to choose between classic Cholesky and |
//| indefinite LDLT factorization(the latter is computed with |
//| strictly diagonal D, i.e. without Bunch-Kauffman pivoting). |
//| NOTE: L*D*LT family of factorization may be used to factorize |
//| indefinite matrices. However, numerical stability is |
//| guaranteed ONLY for a class of quasi - definite matrices. |
//| NOTE: all internal processing is performed with lower triangular |
//| matrices stored in CRS format. Any other storage formats |
//| and/or upper triangular storage means that one format |
//| conversion and/or one transposition will be performed |
//| internally for the analysis and factorization phases. Thus,|
//| highest performance is achieved when input is a lower |
//| triangular CRS matrix. |
//| INPUT PARAMETERS: |
//| A - sparse square matrix in any sparse storage format. |
//| IsUpper - whether upper or lower triangle is decomposed (the |
//| other one is ignored). |
//| FactType - factorization type: |
//| * 0 for traditional Cholesky of SPD matrix |
//| * 1 for LDLT decomposition with strictly diagonal D, which |
//| may have non - positive entries. |
//| PermType - permutation type: |
//| * -1 for absence of permutation |
//| * 0 for best fill - in reducing permutation available, |
//| which is 3 in the current version |
//| * 1 for supernodal ordering(improves locality and |
//| performance, does NOT change fill - in factor) |
//| * 2 for original AMD ordering |
//| * 3 for improved AMD(approximate minimum degree) |
//| ordering with better handling of matrices with dense |
//| rows/columns |
//| OUTPUT PARAMETERS: |
//| Analysis - contains: |
//| * symbolic analysis of the matrix structure which will be |
//| used later to guide numerical factorization. |
//| * specific numeric values loaded into internal memory |
//| waiting for the factorization to be performed |
//| This function fails if and only if the matrix A is symbolically |
//| degenerate i.e. has diagonal element which is exactly zero. In |
//| such case False is returned, contents of Analysis object is |
//| undefined. |
//+------------------------------------------------------------------+
bool CAlglib::sparsecholeskyanalyze(CSparseMatrix &a,bool IsUpper,
int facttype,int permtype,
CSparseDecompositionAnalysis &analysis)
{
return(CTrFac::SparseCholeskyAnalyze(a,IsUpper,facttype,permtype,analysis));
}
//+------------------------------------------------------------------+
//| Sparse Cholesky decomposition: numerical analysis phase. |
//| This function is a part of the 'expert' sparse Cholesky API: |
//| * SparseCholeskyAnalyze(), that performs symbolic analysis |
//| phase and loads matrix to be factorized into internal |
//| storage |
//| * SparseCholeskySetModType(), that allows to use modified |
//| Cholesky/LDLT with lower bounds on pivot magnitudes and |
//| additional overflow safeguards |
//| * SparseCholeskyFactorize(), that performs numeric |
//| factorization using precomputed symbolic analysis and |
//| internally stored matrix - and outputs result |
//| * SparseCholeskyReload(), that reloads one more matrix with |
//| same sparsity pattern into internal storage so one may |
//| reuse previously allocated temporaries and previously |
//| performed symbolic analysis |
//| Depending on settings specified during SparseCholeskyAnalyze() |
//| call it may produce classic Cholesky or L*D*LT decomposition |
//| (with strictly diagonal D), without permutation or with |
//| performance - enhancing permutation P. |
//| NOTE: all internal processing is performed with lower triangular |
//| matrices stored in CRS format. Any other storage formats |
//| and/or upper triangular storage means that one format |
//| conversion and/or one transposition will be performed |
//| internally for the analysis and factorization phases. Thus,|
//| highest performance is achieved when input is a lower |
//| triangular CRS matrix, and lower triangular output is |
//| requested. |
//| NOTE: L*D*LT family of factorization may be used to factorize |
//| indefinite matrices. However, numerical stability is |
//| guaranteed ONLY for a class of quasi - definite matrices. |
//| INPUT PARAMETERS: |
//| Analysis - prior analysis with internally stored matrix |
//| which will be factorized |
//| NeedUpper - whether upper triangular or lower triangular |
//| output is needed |
//| OUTPUT PARAMETERS: |
//| A - Cholesky decomposition of A stored in lower |
//| triangular CRS format, i.e. A = L * L' (or upper|
//| triangular CRS, with A = U'*U, depending on |
//| NeedUpper parameter). |
//| D - array[N], diagonal factor. If no diagonal factor|
//| was required during analysis phase, still |
//| returned but filled with 1's |
//| P - array[N], pivots. Permutation matrix P is a |
//| product of P(0) * P(1) * ...*P(N - 1), |
//| where P(i) is a permutation of row/col I and |
//| P[I] (with P[I] >= I). |
//| If no permutation was requested during analysis phase, still |
//| returned but filled with identity permutation. |
//| The function returns True when factorization resulted in |
//| nondegenerate matrix. False is returned when factorization fails |
//| (Cholesky factorization of indefinite matrix) or LDLT |
//| factorization has exactly zero elements at the diagonal. In the |
//| latter case contents of A, D and P is undefined. |
//| The analysis object is not changed during the factorization. |
//| Subsequent calls to SparseCholeskyFactorize() will result in same|
//| factorization being performed one more time. |
//+------------------------------------------------------------------+
bool CAlglib::SparseCholeskyFactorize(CSparseDecompositionAnalysis &analysis,
bool needupper,CSparseMatrix &a,
CRowDouble &d,CRowInt &p)
{
return(CTrFac::SparseCholeskyFactorize(analysis,needupper,a,d,p));
}
//+------------------------------------------------------------------+
//| Sparse Cholesky decomposition: update internally stored matrix |
//| with another one with exactly same sparsity pattern. |
//| This function is a part of the 'expert' sparse Cholesky API: |
//| * SparseCholeskyAnalyze(), that performs symbolic analysis |
//| phase and loads matrix to be factorized into internal |
//| storage |
//| * SparseCholeskySetModType(), that allows to use modified |
//| Cholesky/LDLT with lower bounds on pivot magnitudes and |
//| additional overflow safeguards |
//| * SparseCholeskyFactorize(), that performs numeric |
//| factorization using precomputed symbolic analysis and |
//| internally stored matrix - and outputs result |
//| * SparseCholeskyReload(), that reloads one more matrix with |
//| same sparsity pattern into internal storage so one may |
//| reuse previously allocated temporaries and previously |
//| performed symbolic analysis |
//| This specific function replaces internally stored numerical |
//| values with ones from another sparse matrix (but having exactly |
//| same sparsity pattern as one that was used for initial |
//| SparseCholeskyAnalyze() call). |
//| NOTE: all internal processing is performed with lower triangular |
//| matrices stored in CRS format. Any other storage formats |
//| and/or upper triangular storage means that one format |
//| conversion and/or one transposition will be performed |
//| internally for the analysis and factorization phases. Thus,|
//| highest performance is achieved when input is a lower |
//| triangular CRS matrix. |
//| INPUT PARAMETERS: |
//| Analysis - analysis object |
//| A - sparse square matrix in any sparse storage format. |
//| It MUST have exactly same sparsity pattern as that |
//| of the matrix that was passed to |
//| SparseCholeskyAnalyze(). Any difference (missing |
//| elements or additional elements) may result in |
//| unpredictable and undefined behavior - an algorithm|
//| may fail due to memory access violation. |
//| IsUpper - whether upper or lower triangle is decomposed (the |
//| other one is ignored). |
//| OUTPUT PARAMETERS: |
//| Analysis - contains: |
//| * symbolic analysis of the matrix structure which will be |
//| used later to guide numerical factorization. |
//| * specific numeric values loaded into internal memory |
//| waiting for the factorization to be performed |
//+------------------------------------------------------------------+
void CAlglib::SparseCholeskyReload(CSparseDecompositionAnalysis &analysis,
CSparseMatrix &a,bool IsUpper)
{
CTrFac::SparseCholeskyReload(analysis,a,IsUpper);
}
//+------------------------------------------------------------------+
//| Estimate of a matrix condition number (1-norm) |
//| The algorithm calculates a lower bound of the condition number. |
//| In this case, the algorithm does not return a lower bound of the |
//| condition number, but an inverse number (to avoid an overflow in |
//| case of a singular matrix). |
//| Input parameters: |
//| A - matrix. Array whose indexes range within |
//| [0..N-1, 0..N-1]. |
//| N - size of matrix A. |
//| Result: 1/LowerBound(cond(A)) |
//| NOTE: |
//| if k(A) is very large, then matrix is assumed degenerate, |
//| k(A)=INF, 0.0 is returned in such cases. |
//+------------------------------------------------------------------+
double CAlglib::RMatrixRCond1(CMatrixDouble &a,const int n)
{
return(CRCond::RMatrixRCond1(a,n));
}
//+------------------------------------------------------------------+
//| Estimate of a matrix condition number (infinity-norm). |
//| The algorithm calculates a lower bound of the condition number. |
//| In this case, the algorithm does not return a lower bound of the |
//| condition number, but an inverse number (to avoid an overflow in |
//| case of a singular matrix). |
//| Input parameters: |
//| A - matrix. Array whose indexes range within |
//| [0..N-1, 0..N-1]. |
//| N - size of matrix A. |
//| Result: 1/LowerBound(cond(A)) |
//| NOTE: |
//| if k(A) is very large, then matrix is assumed degenerate, |
//| k(A)=INF, 0.0 is returned in such cases. |
//+------------------------------------------------------------------+
double CAlglib::RMatrixRCondInf(CMatrixDouble &a,const int n)
{
return(CRCond::RMatrixRCondInf(a,n));
}
//+------------------------------------------------------------------+
//| Condition number estimate of a symmetric positive definite |
//| matrix. |
//| The algorithm calculates a lower bound of the condition number. |
//| In this case, the algorithm does not return a lower bound of the |
//| condition number, but an inverse number (to avoid an overflow in |
//| case of a singular matrix). |
//| It should be noted that 1-norm and inf-norm of condition numbers |
//| of symmetric matrices are equal, so the algorithm doesn't take |
//| into account the differences between these types of norms. |
//| Input parameters: |
//| A - symmetric positive definite matrix which is given|
//| by its upper or lower triangle depending on the |
//| value of IsUpper. Array with elements |
//| [0..N-1, 0..N-1]. |
//| N - size of matrix A. |
//| IsUpper - storage format. |
//| Result: |
//| 1/LowerBound(cond(A)), if matrix A is positive definite, |
//| -1, if matrix A is not positive definite, and its condition |
//| number could not be found by this algorithm. |
//| NOTE: |
//| if k(A) is very large, then matrix is assumed degenerate, |
//| k(A)=INF, 0.0 is returned in such cases. |
//+------------------------------------------------------------------+
double CAlglib::SPDMatrixRCond(CMatrixDouble &a,const int n,
const bool IsUpper)
{
return(CRCond::SPDMatrixRCond(a,n,IsUpper));
}
//+------------------------------------------------------------------+
//| Triangular matrix: estimate of a condition number (1-norm) |
//| The algorithm calculates a lower bound of the condition number. |
//| In this case, the algorithm does not return a lower bound of the |
//| condition number, but an inverse number (to avoid an overflow in |
//| case of a singular matrix). |
//| Input parameters: |
//| A - matrix. Array[0..N-1, 0..N-1]. |
//| N - size of A. |
//| IsUpper - True, if the matrix is upper triangular. |
//| IsUnit - True, if the matrix has a unit diagonal. |
//| Result: 1/LowerBound(cond(A)) |
//| NOTE: |
//| if k(A) is very large, then matrix is assumed degenerate, |
//| k(A)=INF, 0.0 is returned in such cases. |
//+------------------------------------------------------------------+
double CAlglib::RMatrixTrRCond1(CMatrixDouble &a,const int n,
const bool IsUpper,const bool IsUnit)
{
return(CRCond::RMatrixTrRCond1(a,n,IsUpper,IsUnit));
}
//+-------------------------------------------------------------------+
//| Triangular matrix: estimate of a matrix condition number |
//| (infinity-norm). |
//| The algorithm calculates a lower bound of the condition number. In|
//| this case, the algorithm does not return a lower bound of the |
//| condition number, but an inverse number (to avoid an overflow in |
//| case of a singular matrix). |
//| Input parameters: |
//| A - matrix. Array whose indexes range within |
//| [0..N-1,0..N-1]. |
//| N - size of matrix A. |
//| IsUpper - True, if the matrix is upper triangular. |
//| IsUnit - True, if the matrix has a unit diagonal. |
//| Result: 1/LowerBound(cond(A)) |
//| NOTE: |
//| if k(A) is very large,then matrix is assumed degenerate, |
//| k(A)=INF, 0.0 is returned in such cases. |
//+-------------------------------------------------------------------+
double CAlglib::RMatrixTrRCondInf(CMatrixDouble &a,const int n,
const bool IsUpper,const bool IsUnit)
{
return(CRCond::RMatrixTrRCondInf(a,n,IsUpper,IsUnit));
}
//+------------------------------------------------------------------+
//| Condition number estimate of a Hermitian positive definite |
//| matrix. |
//| The algorithm calculates a lower bound of the condition number. |
//| In this case, the algorithm does not return a lower bound of the |
//| condition number, but an inverse number (to avoid an overflow in |
//| case of a singular matrix). |
//| It should be noted that 1-norm and inf-norm of condition numbers |
//| of symmetric matrices are equal, so the algorithm doesn't take |
//| into account the differences between these types of norms. |
//| Input parameters: |
//| A - Hermitian positive definite matrix which is given|
//| by its upper or lower triangle depending on the |
//| value of IsUpper. Array with elements |
//| [0..N-1, 0..N-1]. |
//| N - size of matrix A. |
//| IsUpper - storage format. |
//| Result: |
//| 1/LowerBound(cond(A)), if matrix A is positive definite, |
//| -1, if matrix A is not positive definite, and its condition |
//| number could not be found by this algorithm. |
//| NOTE: |
//| if k(A) is very large, then matrix is assumed degenerate, |
//| k(A)=INF, 0.0 is returned in such cases. |
//+------------------------------------------------------------------+
double CAlglib::HPDMatrixRCond(CMatrixComplex &a,const int n,
const bool IsUpper)
{
return(CRCond::HPDMatrixRCond(a,n,IsUpper));
}
//+------------------------------------------------------------------+
//| Estimate of a matrix condition number (1-norm) |
//| The algorithm calculates a lower bound of the condition number. |
//| In this case, the algorithm does not return a lower bound of the |
//| condition number, but an inverse number (to avoid an overflow in |
//| case of a singular matrix). |
//| Input parameters: |
//| A - matrix. Array whose indexes range within |
//| [0..N-1, 0..N-1]. |
//| N - size of matrix A. |
//| Result: 1/LowerBound(cond(A)) |
//| NOTE: |
//| if k(A) is very large, then matrix is assumed degenerate, |
//| k(A)=INF, 0.0 is returned in such cases. |
//+------------------------------------------------------------------+
double CAlglib::CMatrixRCond1(CMatrixComplex &a,const int n)
{
return(CRCond::CMatrixRCond1(a,n));
}
//+------------------------------------------------------------------+
//| Estimate of a matrix condition number (infinity-norm). |
//| The algorithm calculates a lower bound of the condition number. |
//| In this case, the algorithm does not return a lower bound of the |
//| condition number, but an inverse number (to avoid an overflow in |
//| case of a singular matrix). |
//| Input parameters: |
//| A - matrix. Array whose indexes range within |
//| [0..N-1, 0..N-1]. |
//| N - size of matrix A. |
//| Result: 1/LowerBound(cond(A)) |
//| NOTE: |
//| if k(A) is very large, then matrix is assumed degenerate, |
//| k(A)=INF, 0.0 is returned in such cases. |
//+------------------------------------------------------------------+
double CAlglib::CMatrixRCondInf(CMatrixComplex &a,const int n)
{
return(CRCond::CMatrixRCondInf(a,n));
}
//+------------------------------------------------------------------+
//| Estimate of the condition number of a matrix given by its LU |
//| decomposition (1-norm) |
//| The algorithm calculates a lower bound of the condition number. |
//| In this case, the algorithm does not return a lower bound of the |
//| condition number, but an inverse number (to avoid an overflow in |
//| case of a singular matrix). |
//| Input parameters: |
//| LUA - LU decomposition of a matrix in compact form.|
//| Output of the RMatrixLU subroutine. |
//| N - size of matrix A. |
//| Result: 1/LowerBound(cond(A)) |
//| NOTE: |
//| if k(A) is very large, then matrix is assumed degenerate, |
//| k(A)=INF, 0.0 is returned in such cases. |
//+------------------------------------------------------------------+
double CAlglib::RMatrixLURCond1(CMatrixDouble &lua,const int n)
{
return(CRCond::RMatrixLURCond1(lua,n));
}
//+------------------------------------------------------------------+
//| Estimate of the condition number of a matrix given by its LU |
//| decomposition (infinity norm). |
//| The algorithm calculates a lower bound of the condition number. |
//| In this case, the algorithm does not return a lower bound of the |
//| condition number, but an inverse number (to avoid an overflow in |
//| case of a singular matrix). |
//| Input parameters: |
//| LUA - LU decomposition of a matrix in compact form. |
//| Output of the RMatrixLU subroutine. |
//| N - size of matrix A. |
//| Result: 1/LowerBound(cond(A)) |
//| NOTE: |
//| if k(A) is very large, then matrix is assumed degenerate, |
//| k(A)=INF, 0.0 is returned in such cases. |
//+------------------------------------------------------------------+
double CAlglib::RMatrixLURCondInf(CMatrixDouble &lua,const int n)
{
return(CRCond::RMatrixLURCondInf(lua,n));
}
//+------------------------------------------------------------------+
//| Condition number estimate of a symmetric positive definite matrix|
//| given by Cholesky decomposition. |
//| The algorithm calculates a lower bound of the condition number. |
//| In this case, the algorithm does not return a lower bound of the |
//| condition number, but an inverse number (to avoid an overflow in |
//| case of a singular matrix). |
//| It should be noted that 1-norm and inf-norm condition numbers of |
//| symmetric matrices are equal, so the algorithm doesn't take into |
//| account the differences between these types of norms. |
//| Input parameters: |
//| CD - Cholesky decomposition of matrix A, |
//| output of SMatrixCholesky subroutine. |
//| N - size of matrix A. |
//| Result: 1/LowerBound(cond(A)) |
//| NOTE: |
//| if k(A) is very large, then matrix is assumed degenerate, |
//| k(A)=INF, 0.0 is returned in such cases. |
//+------------------------------------------------------------------+
double CAlglib::SPDMatrixCholeskyRCond(CMatrixDouble &a,const int n,
const bool IsUpper)
{
return(CRCond::SPDMatrixCholeskyRCond(a,n,IsUpper));
}
//+------------------------------------------------------------------+
//| Condition number estimate of a Hermitian positive definite matrix|
//| given by Cholesky decomposition. |
//| The algorithm calculates a lower bound of the condition number. |
//| In this case, the algorithm does not return a lower bound of the |
//| condition number, but an inverse number (to avoid an overflow in |
//| case of a singular matrix). |
//| It should be noted that 1-norm and inf-norm condition numbers of |
//| symmetric matrices are equal, so the algorithm doesn't take into |
//| account the differences between these types of norms. |
//| Input parameters: |
//| CD - Cholesky decomposition of matrix A, |
//| output of SMatrixCholesky subroutine. |
//| N - size of matrix A. |
//| Result: 1/LowerBound(cond(A)) |
//| NOTE: |
//| if k(A) is very large, then matrix is assumed degenerate, |
//| k(A)=INF, 0.0 is returned in such cases. |
//+------------------------------------------------------------------+
double CAlglib::HPDMatrixCholeskyRCond(CMatrixComplex &a,const int n,
const bool IsUpper)
{
return(CRCond::HPDMatrixCholeskyRCond(a,n,IsUpper));
}
//+------------------------------------------------------------------+
//| Estimate of the condition number of a matrix given by its LU |
//| decomposition (1-norm) |
//| The algorithm calculates a lower bound of the condition number. |
//| In this case, the algorithm does not return a lower bound of the |
//| condition number, but an inverse number (to avoid an overflow in |
//| case of a singular matrix). |
//| Input parameters: |
//| LUA - LU decomposition of a matrix in compact form.|
//| Output of the CMatrixLU subroutine. |
//| N - size of matrix A. |
//| Result: 1/LowerBound(cond(A)) |
//| NOTE: |
//| if k(A) is very large, then matrix is assumed degenerate, |
//| k(A)=INF, 0.0 is returned in such cases. |
//+------------------------------------------------------------------+
double CAlglib::CMatrixLURCond1(CMatrixComplex &lua,const int n)
{
return(CRCond::CMatrixLURCond1(lua,n));
}
//+------------------------------------------------------------------+
//| Estimate of the condition number of a matrix given by its LU |
//| decomposition (infinity norm). |
//| The algorithm calculates a lower bound of the condition number. |
//| In this case, the algorithm does not return a lower bound of the |
//| condition number, but an inverse number (to avoid an overflow in |
//| case of a singular matrix). |
//| Input parameters: |
//| LUA - LU decomposition of a matrix in compact form. |
//| Output of the CMatrixLU subroutine. |
//| N - size of matrix A. |
//| Result: 1/LowerBound(cond(A)) |
//| NOTE: |
//| if k(A) is very large, then matrix is assumed degenerate, |
//| k(A)=INF, 0.0 is returned in such cases. |
//+------------------------------------------------------------------+
double CAlglib::CMatrixLURCondInf(CMatrixComplex &lua,const int n)
{
return(CRCond::CMatrixLURCondInf(lua,n));
}
//+------------------------------------------------------------------+
//| Triangular matrix: estimate of a condition number (1-norm) |
//| The algorithm calculates a lower bound of the condition number. |
//| In this case, the algorithm does not return a lower bound of the |
//| condition number, but an inverse number (to avoid an overflow in |
//| case of a singular matrix). |
//| Input parameters: |
//| A - matrix. Array[0..N-1, 0..N-1]. |
//| N - size of A. |
//| IsUpper - True, if the matrix is upper triangular. |
//| IsUnit - True, if the matrix has a unit diagonal. |
//| Result: 1/LowerBound(cond(A)) |
//| NOTE: |
//| if k(A) is very large, then matrix is assumed degenerate, |
//| k(A)=INF, 0.0 is returned in such cases. |
//+------------------------------------------------------------------+
double CAlglib::CMatrixTrRCond1(CMatrixComplex &a,const int n,
const bool IsUpper,const bool IsUnit)
{
return(CRCond::CMatrixTrRCond1(a,n,IsUpper,IsUnit));
}
//+------------------------------------------------------------------+
//| Triangular matrix: estimate of a matrix condition number |
//| (infinity-norm). |
//| The algorithm calculates a lower bound of the condition number. |
//| In this case, the algorithm does not return a lower bound of the |
//| condition number, but an inverse number (to avoid an overflow in |
//| case of a singular matrix). |
//| Input parameters: |
//| A - matrix. Array whose indexes range within |
//| [0..N-1, 0..N-1]. |
//| N - size of matrix A. |
//| IsUpper - True, if the matrix is upper triangular. |
//| IsUnit - True, if the matrix has a unit diagonal. |
//| Result: 1/LowerBound(cond(A)) |
//| NOTE: |
//| if k(A) is very large, then matrix is assumed degenerate, |
//| k(A)=INF, 0.0 is returned in such cases. |
//+------------------------------------------------------------------+
double CAlglib::CMatrixTrRCondInf(CMatrixComplex &a,const int n,
const bool IsUpper,const bool IsUnit)
{
return(CRCond::CMatrixTrRCondInf(a,n,IsUpper,IsUnit));
}
//+------------------------------------------------------------------+
//| This procedure initializes matrix norm estimator. |
//| USAGE: |
//| 1. User initializes algorithm state with NormEstimatorCreate() |
//| call |
//| 2. User calls NormEstimatorEstimateSparse() (or |
//| NormEstimatorIteration()) |
//| 3. User calls NormEstimatorResults() to get solution. |
//| INPUT PARAMETERS: |
//| M - number of rows in the matrix being estimated, M>0 |
//| N - number of columns in the matrix being estimated, |
//| N>0 |
//| NStart - number of random starting vectors, recommended |
//| value - at least 5. |
//| NIts - number of iterations to do with best starting |
//| vector recommended value - at least 5. |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| NOTE: this algorithm is effectively deterministic, i.e. it always|
//| returns same result when repeatedly called for the same |
//| matrix. In fact, algorithm uses randomized starting v |
//| ectors, but internal random numbers generator always |
//| generates same sequence of the random values (it is a |
//| feature, not bug). |
//| Algorithm can be made non-deterministic with |
//| NormEstimatorSetSeed(0) call. |
//+------------------------------------------------------------------+
void CAlglib::NormEstimatorCreate(int m,int n,int nstart,int nits,
CNormEstimatorState &state)
{
CNormEstimator::NormEstimatorCreate(m,n,nstart,nits,state);
}
//+------------------------------------------------------------------+
//| This function changes seed value used by algorithm. In some cases|
//| we need deterministic processing, i.e. subsequent calls must |
//| return equal results, in other cases we need non-deterministic |
//| algorithm which returns different results for the same matrix on |
//| every pass. |
//| Setting zero seed will lead to non-deterministic algorithm, while|
//| non-zero value will make our algorithm deterministic. |
//| INPUT PARAMETERS: |
//| State - norm estimator state, must be initialized with a |
//| call to NormEstimatorCreate() |
//| SeedVal - seed value, >=0. Zero value=non-deterministic algo.|
//+------------------------------------------------------------------+
void CAlglib::NormEstimatorSetSeed(CNormEstimatorState &state,int seedval)
{
CNormEstimator::NormEstimatorSetSeed(state,seedval);
}
//+------------------------------------------------------------------+
//| This function estimates norm of the sparse M*N matrix A. |
//| INPUT PARAMETERS: |
//| State - norm estimator state, must be initialized with a |
//| call to NormEstimatorCreate() |
//| A - sparse M*N matrix, must be converted to CRS format |
//| prior to calling this function. |
//| After this function is over you can call NormEstimatorResults() |
//| to get estimate of the norm(A). |
//+------------------------------------------------------------------+
void CAlglib::NormEstimatorEstimateSparse(CNormEstimatorState &state,
CSparseMatrix &a)
{
CNormEstimator::NormEstimatorEstimateSparse(state,a);
}
//+------------------------------------------------------------------+
//| Matrix norm estimation results |
//| INPUT PARAMETERS: |
//| State - algorithm state |
//| OUTPUT PARAMETERS: |
//| Nrm - estimate of the matrix norm, Nrm >= 0 |
//+------------------------------------------------------------------+
void CAlglib::NormEstimatorResults(CNormEstimatorState &state,double &nrm)
{
CNormEstimator::NormEstimatorResults(state,nrm);
}
//+------------------------------------------------------------------+
//| Inversion of a matrix given by its LU decomposition. |
//| INPUT PARAMETERS: |
//| A - LU decomposition of the matrix |
//| (output of RMatrixLU subroutine). |
//| Pivots - table of permutations |
//| (the output of RMatrixLU subroutine). |
//| N - size of matrix A (optional) : |
//| * if given, only principal NxN submatrix is |
//| processed and overwritten. other elements are |
//| unchanged. |
//| * if not given, size is automatically determined |
//| from matrix size (A must be square matrix) |
//| OUTPUT PARAMETERS: |
//| Info - return code: |
//| * -3 A is singular, or VERY close to singular.|
//| it is filled by zeros in such cases. |
//| * 1 task is solved (but matrix A may be |
//| ill-conditioned, check R1/RInf parameters|
//| for condition numbers). |
//| Rep - solver report, see below for more info |
//| A - inverse of matrix A. |
//| Array whose indexes range within |
//| [0..N-1, 0..N-1]. |
//| SOLVER REPORT |
//| Subroutine sets following fields of the Rep structure: |
//| * R1 reciprocal of condition number: 1/cond(A), 1-norm. |
//| * RInf reciprocal of condition number: 1/cond(A), inf-norm. |
//+------------------------------------------------------------------+
void CAlglib::RMatrixLUInverse(CMatrixDouble &a,int &pivots[],
const int n,int &info,
CMatInvReportShell &rep)
{
//--- initialization
info=0;
//--- function call
CMatInv::RMatrixLUInverse(a,pivots,n,info,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Inversion of a matrix given by its LU decomposition. |
//| INPUT PARAMETERS: |
//| A - LU decomposition of the matrix |
//| (output of RMatrixLU subroutine). |
//| Pivots - table of permutations |
//| (the output of RMatrixLU subroutine). |
//| N - size of matrix A (optional) : |
//| * if given, only principal NxN submatrix is |
//| processed and overwritten. other elements are |
//| unchanged. |
//| * if not given, size is automatically determined |
//| from matrix size (A must be square matrix) |
//| OUTPUT PARAMETERS: |
//| Info - return code: |
//| * -3 A is singular, or VERY close to singular.|
//| it is filled by zeros in such cases. |
//| * 1 task is solved (but matrix A may be |
//| ill-conditioned, check R1/RInf parameters|
//| for condition numbers). |
//| Rep - solver report, see below for more info |
//| A - inverse of matrix A. |
//| Array whose indexes range within |
//| [0..N-1, 0..N-1]. |
//| SOLVER REPORT |
//| Subroutine sets following fields of the Rep structure: |
//| * R1 reciprocal of condition number: 1/cond(A), 1-norm. |
//| * RInf reciprocal of condition number: 1/cond(A), inf-norm. |
//+------------------------------------------------------------------+
void CAlglib::RMatrixLUInverse(CMatrixDouble &a,int &pivots[],
int &info,CMatInvReportShell &rep)
{
//--- check
if((CAp::Cols(a)!=CAp::Rows(a)) || (CAp::Cols(a)!=CAp::Len(pivots)))
{
Print(__FUNCTION__+": looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
info =0;
int n=(int)CAp::Cols(a);
//--- function call
CMatInv::RMatrixLUInverse(a,pivots,n,info,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Inversion of a general matrix. |
//| Input parameters: |
//| A - matrix. |
//| N - size of matrix A (optional) : |
//| * if given, only principal NxN submatrix is |
//| processed and overwritten. other elements are |
//| unchanged. |
//| * if not given, size is automatically determined |
//| from matrix size (A must be square matrix) |
//| Output parameters: |
//| Info - return code, same as in RMatrixLUInverse |
//| Rep - solver report, same as in RMatrixLUInverse |
//| A - inverse of matrix A, same as in RMatrixLUInverse |
//| Result: |
//| True, if the matrix is not singular. |
//| False, if the matrix is singular. |
//+------------------------------------------------------------------+
void CAlglib::RMatrixInverse(CMatrixDouble &a,const int n,int &info,
CMatInvReportShell &rep)
{
//--- initialization
info=0;
//--- function call
CMatInv::RMatrixInverse(a,n,info,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Inversion of a general matrix. |
//| Input parameters: |
//| A - matrix. |
//| N - size of matrix A (optional) : |
//| * if given, only principal NxN submatrix is |
//| processed and overwritten. other elements are |
//| unchanged. |
//| * if not given, size is automatically determined |
//| from matrix size (A must be square matrix) |
//| Output parameters: |
//| Info - return code, same as in RMatrixLUInverse |
//| Rep - solver report, same as in RMatrixLUInverse |
//| A - inverse of matrix A, same as in RMatrixLUInverse |
//| Result: |
//| True, if the matrix is not singular. |
//| False, if the matrix is singular. |
//+------------------------------------------------------------------+
void CAlglib::RMatrixInverse(CMatrixDouble &a,int &info,
CMatInvReportShell &rep)
{
//--- check
if((CAp::Cols(a)!=CAp::Rows(a)))
{
Print(__FUNCTION__+": looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
info =0;
int n=(int)CAp::Cols(a);
//--- function call
CMatInv::RMatrixInverse(a,n,info,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Inversion of a matrix given by its LU decomposition. |
//| INPUT PARAMETERS: |
//| A - LU decomposition of the matrix |
//| (output of CMatrixLU subroutine). |
//| Pivots - table of permutations |
//| (the output of CMatrixLU subroutine). |
//| N - size of matrix A (optional) : |
//| * if given, only principal NxN submatrix is |
//| processed and overwritten. other elements are |
//| unchanged. |
//| * if not given, size is automatically determined |
//| from matrix size (A must be square matrix) |
//| OUTPUT PARAMETERS: |
//| Info - return code, same as in RMatrixLUInverse |
//| Rep - solver report, same as in RMatrixLUInverse |
//| A - inverse of matrix A, same as in RMatrixLUInverse |
//+------------------------------------------------------------------+
void CAlglib::CMatrixLUInverse(CMatrixComplex &a,int &pivots[],
const int n,int &info,
CMatInvReportShell &rep)
{
//--- initialization
info=0;
//--- function call
CMatInv::CMatrixLUInverse(a,pivots,n,info,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Inversion of a matrix given by its LU decomposition. |
//| INPUT PARAMETERS: |
//| A - LU decomposition of the matrix |
//| (output of CMatrixLU subroutine). |
//| Pivots - table of permutations |
//| (the output of CMatrixLU subroutine). |
//| N - size of matrix A (optional) : |
//| * if given, only principal NxN submatrix is |
//| processed and overwritten. other elements are |
//| unchanged. |
//| * if not given, size is automatically determined |
//| from matrix size (A must be square matrix) |
//| OUTPUT PARAMETERS: |
//| Info - return code, same as in RMatrixLUInverse |
//| Rep - solver report, same as in RMatrixLUInverse |
//| A - inverse of matrix A, same as in RMatrixLUInverse |
//+------------------------------------------------------------------+
void CAlglib::CMatrixLUInverse(CMatrixComplex &a,int &pivots[],
int &info,CMatInvReportShell &rep)
{
//--- check
if((CAp::Cols(a)!=CAp::Rows(a)) || (CAp::Cols(a)!=CAp::Len(pivots)))
{
Print(__FUNCTION__+": looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
info =0;
int n=(int)CAp::Cols(a);
//--- function call
CMatInv::CMatrixLUInverse(a,pivots,n,info,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Inversion of a general matrix. |
//| Input parameters: |
//| A - matrix |
//| N - size of matrix A (optional) : |
//| * if given, only principal NxN submatrix is |
//| processed and overwritten. other elements are |
//| unchanged. |
//| * if not given, size is automatically determined |
//| from matrix size (A must be square matrix) |
//| Output parameters: |
//| Info - return code, same as in RMatrixLUInverse |
//| Rep - solver report, same as in RMatrixLUInverse |
//| A - inverse of matrix A, same as in RMatrixLUInverse |
//+------------------------------------------------------------------+
void CAlglib::CMatrixInverse(CMatrixComplex &a,const int n,int &info,
CMatInvReportShell &rep)
{
//--- initialization
info=0;
//--- function call
CMatInv::CMatrixInverse(a,n,info,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Inversion of a general matrix. |
//| Input parameters: |
//| A - matrix |
//| N - size of matrix A (optional) : |
//| * if given, only principal NxN submatrix is |
//| processed and overwritten. other elements are |
//| unchanged. |
//| * if not given, size is automatically determined |
//| from matrix size (A must be square matrix) |
//| Output parameters: |
//| Info - return code, same as in RMatrixLUInverse |
//| Rep - solver report, same as in RMatrixLUInverse |
//| A - inverse of matrix A, same as in RMatrixLUInverse |
//+------------------------------------------------------------------+
void CAlglib::CMatrixInverse(CMatrixComplex &a,int &info,
CMatInvReportShell &rep)
{
//--- check
if((CAp::Cols(a)!=CAp::Rows(a)))
{
Print(__FUNCTION__+": looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
info =0;
int n=(int)CAp::Cols(a);
//--- function call
CMatInv::CMatrixInverse(a,n,info,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Inversion of a symmetric positive definite matrix which is given |
//| by Cholesky decomposition. |
//| Input parameters: |
//| A - Cholesky decomposition of the matrix to be |
//| inverted: A=U?*U or A = L*L'. |
//| Output of SPDMatrixCholesky subroutine. |
//| N - size of matrix A (optional) : |
//| * if given, only principal NxN submatrix is |
//| processed and overwritten. other elements are |
//| unchanged. |
//| * if not given, size is automatically determined |
//| from matrix size (A must be square matrix) |
//| IsUpper - storage type (optional): |
//| * if True, symmetric matrix A is given by its |
//| upper triangle, and the lower triangle isn?t |
//| used/changed by function |
//| * if False, symmetric matrix A is given by its |
//| lower triangle, and the upper triangle isn?t |
//| used/changed by function |
//| * if not given, lower half is used. |
//| Output parameters: |
//| Info - return code, same as in RMatrixLUInverse |
//| Rep - solver report, same as in RMatrixLUInverse |
//| A - inverse of matrix A, same as in RMatrixLUInverse |
//+------------------------------------------------------------------+
void CAlglib::SPDMatrixCholeskyInverse(CMatrixDouble &a,const int n,
const bool IsUpper,int &info,
CMatInvReportShell &rep)
{
//--- initialization
info=0;
//--- function call
CMatInv::SPDMatrixCholeskyInverse(a,n,IsUpper,info,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Inversion of a symmetric positive definite matrix which is given |
//| by Cholesky decomposition. |
//| Input parameters: |
//| A - Cholesky decomposition of the matrix to be |
//| inverted: A=U?*U or A = L*L'. |
//| Output of SPDMatrixCholesky subroutine. |
//| N - size of matrix A (optional) : |
//| * if given, only principal NxN submatrix is |
//| processed and overwritten. other elements are |
//| unchanged. |
//| * if not given, size is automatically determined |
//| from matrix size (A must be square matrix) |
//| IsUpper - storage type (optional): |
//| * if True, symmetric matrix A is given by its |
//| upper triangle, and the lower triangle isn?t |
//| used/changed by function |
//| * if False, symmetric matrix A is given by its |
//| lower triangle, and the upper triangle isn?t |
//| used/changed by function |
//| * if not given, lower half is used. |
//| Output parameters: |
//| Info - return code, same as in RMatrixLUInverse |
//| Rep - solver report, same as in RMatrixLUInverse |
//| A - inverse of matrix A, same as in RMatrixLUInverse |
//+------------------------------------------------------------------+
void CAlglib::SPDMatrixCholeskyInverse(CMatrixDouble &a,int &info,
CMatInvReportShell &rep)
{
//--- check
if((CAp::Cols(a)!=CAp::Rows(a)))
{
Print(__FUNCTION__+": looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
info=0;
int n=(int)CAp::Cols(a);
bool IsUpper=false;
//--- function call
CMatInv::SPDMatrixCholeskyInverse(a,n,IsUpper,info,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Inversion of a symmetric positive definite matrix. |
//| Given an upper or lower triangle of a symmetric positive definite|
//| matrix, the algorithm generates matrix A^-1 and saves the upper |
//| or lower triangle depending on the input. |
//| Input parameters: |
//| A - matrix to be inverted (upper or lower triangle). |
//| Array with elements [0..N-1,0..N-1]. |
//| N - size of matrix A (optional) : |
//| * if given, only principal NxN submatrix is |
//| processed and overwritten. other elements are |
//| unchanged. |
//| * if not given, size is automatically determined |
//| from matrix size (A must be square matrix) |
//| IsUpper - storage type (optional): |
//| * if True, symmetric matrix A is given by its |
//| upper triangle, and the lower triangle isn?t |
//| used/changed by function |
//| * if False, symmetric matrix A is given by its |
//| lower triangle, and the upper triangle isn?t |
//| used/changed by function |
//| * if not given, both lower and upper triangles |
//| must be filled. |
//| Output parameters: |
//| Info - return code, same as in RMatrixLUInverse |
//| Rep - solver report, same as in RMatrixLUInverse |
//| A - inverse of matrix A, same as in RMatrixLUInverse |
//+------------------------------------------------------------------+
void CAlglib::SPDMatrixInverse(CMatrixDouble &a,const int n,
const bool IsUpper,int &info,
CMatInvReportShell &rep)
{
//--- initialization
info=0;
//--- function call
CMatInv::SPDMatrixInverse(a,n,IsUpper,info,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Inversion of a symmetric positive definite matrix. |
//| Given an upper or lower triangle of a symmetric positive definite|
//| matrix, the algorithm generates matrix A^-1 and saves the upper |
//| or lower triangle depending on the input. |
//| Input parameters: |
//| A - matrix to be inverted (upper or lower triangle). |
//| Array with elements [0..N-1,0..N-1]. |
//| N - size of matrix A (optional) : |
//| * if given, only principal NxN submatrix is |
//| processed and overwritten. other elements are |
//| unchanged. |
//| * if not given, size is automatically determined |
//| from matrix size (A must be square matrix) |
//| IsUpper - storage type (optional): |
//| * if True, symmetric matrix A is given by its |
//| upper triangle, and the lower triangle isn?t |
//| used/changed by function |
//| * if False, symmetric matrix A is given by its |
//| lower triangle, and the upper triangle isn?t |
//| used/changed by function |
//| * if not given, both lower and upper triangles |
//| must be filled. |
//| Output parameters: |
//| Info - return code, same as in RMatrixLUInverse |
//| Rep - solver report, same as in RMatrixLUInverse |
//| A - inverse of matrix A, same as in RMatrixLUInverse |
//+------------------------------------------------------------------+
void CAlglib::SPDMatrixInverse(CMatrixDouble &a,int &info,
CMatInvReportShell &rep)
{
//--- check
if((CAp::Cols(a)!=CAp::Rows(a)))
{
Print(__FUNCTION__+": looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
if(!CAp::IsSymmetric(a))
{
Print(__FUNCTION__+": 'a' parameter is not symmetric matrix");
CAp::exception_happened=true;
return;
}
//--- initialization
info=0;
int n=(int)CAp::Cols(a);
bool IsUpper=false;
//--- function call
CMatInv::SPDMatrixInverse(a,n,IsUpper,info,rep.GetInnerObj());
//--- check
if(!CAp::ForceSymmetric(a))
{
Print(__FUNCTION__+": Internal error while forcing symmetricity of 'a' parameter");
CAp::exception_happened=true;
}
}
//+------------------------------------------------------------------+
//| Inversion of a Hermitian positive definite matrix which is given |
//| by Cholesky decomposition. |
//| Input parameters: |
//| A - Cholesky decomposition of the matrix to be |
//| inverted: A=U?*U or A = L*L'. |
//| Output of HPDMatrixCholesky subroutine. |
//| N - size of matrix A (optional) : |
//| * if given, only principal NxN submatrix is |
//| processed and overwritten. other elements are |
//| unchanged. |
//| * if not given, size is automatically determined |
//| from matrix size (A must be square matrix) |
//| IsUpper - storage type (optional): |
//| * if True, symmetric matrix A is given by its |
//| upper triangle, and the lower triangle isn?t |
//| used/changed by function |
//| * if False, symmetric matrix A is given by its |
//| lower triangle, and the upper triangle isn?t |
//| used/changed by function |
//| * if not given, lower half is used. |
//| Output parameters: |
//| Info - return code, same as in RMatrixLUInverse |
//| Rep - solver report, same as in RMatrixLUInverse |
//| A - inverse of matrix A, same as in RMatrixLUInverse |
//+------------------------------------------------------------------+
void CAlglib::HPDMatrixCholeskyInverse(CMatrixComplex &a,const int n,
const bool IsUpper,int &info,
CMatInvReportShell &rep)
{
//--- initialization
info=0;
//--- function call
CMatInv::HPDMatrixCholeskyInverse(a,n,IsUpper,info,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Inversion of a Hermitian positive definite matrix which is given |
//| by Cholesky decomposition. |
//| Input parameters: |
//| A - Cholesky decomposition of the matrix to be |
//| inverted: A=U?*U or A = L*L'. |
//| Output of HPDMatrixCholesky subroutine. |
//| N - size of matrix A (optional) : |
//| * if given, only principal NxN submatrix is |
//| processed and overwritten. other elements are |
//| unchanged. |
//| * if not given, size is automatically determined |
//| from matrix size (A must be square matrix) |
//| IsUpper - storage type (optional): |
//| * if True, symmetric matrix A is given by its |
//| upper triangle, and the lower triangle isn?t |
//| used/changed by function |
//| * if False, symmetric matrix A is given by its |
//| lower triangle, and the upper triangle isn?t |
//| used/changed by function |
//| * if not given, lower half is used. |
//| Output parameters: |
//| Info - return code, same as in RMatrixLUInverse |
//| Rep - solver report, same as in RMatrixLUInverse |
//| A - inverse of matrix A, same as in RMatrixLUInverse |
//+------------------------------------------------------------------+
void CAlglib::HPDMatrixCholeskyInverse(CMatrixComplex &a,int &info,
CMatInvReportShell &rep)
{
//--- check
if((CAp::Cols(a)!=CAp::Rows(a)))
{
Print(__FUNCTION__+": looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
info=0;
int n=(int)CAp::Cols(a);
bool IsUpper=false;
//--- function call
CMatInv::HPDMatrixCholeskyInverse(a,n,IsUpper,info,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Inversion of a Hermitian positive definite matrix. |
//| Given an upper or lower triangle of a Hermitian positive definite|
//| matrix, the algorithm generates matrix A^-1 and saves the upper |
//| or lower triangle depending on the input. |
//| Input parameters: |
//| A - matrix to be inverted (upper or lower triangle). |
//| Array with elements [0..N-1,0..N-1]. |
//| N - size of matrix A (optional) : |
//| * if given, only principal NxN submatrix is |
//| processed and overwritten. other elements are |
//| unchanged. |
//| * if not given, size is automatically determined |
//| from matrix size (A must be square matrix) |
//| IsUpper - storage type (optional): |
//| * if True, symmetric matrix A is given by its |
//| upper triangle, and the lower triangle isn?t |
//| used/changed by function |
//| * if False, symmetric matrix A is given by its |
//| lower triangle, and the upper triangle isn?t |
//| used/changed by function |
//| * if not given, both lower and upper triangles |
//| must be filled. |
//| Output parameters: |
//| Info - return code, same as in RMatrixLUInverse |
//| Rep - solver report, same as in RMatrixLUInverse |
//| A - inverse of matrix A, same as in RMatrixLUInverse |
//+------------------------------------------------------------------+
void CAlglib::HPDMatrixInverse(CMatrixComplex &a,const int n,
const bool IsUpper,int &info,
CMatInvReportShell &rep)
{
//--- initialization
info=0;
//--- function call
CMatInv::HPDMatrixInverse(a,n,IsUpper,info,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Inversion of a Hermitian positive definite matrix. |
//| Given an upper or lower triangle of a Hermitian positive definite|
//| matrix, the algorithm generates matrix A^-1 and saves the upper |
//| or lower triangle depending on the input. |
//| Input parameters: |
//| A - matrix to be inverted (upper or lower triangle). |
//| Array with elements [0..N-1,0..N-1]. |
//| N - size of matrix A (optional) : |
//| * if given, only principal NxN submatrix is |
//| processed and overwritten. other elements are |
//| unchanged. |
//| * if not given, size is automatically determined |
//| from matrix size (A must be square matrix) |
//| IsUpper - storage type (optional): |
//| * if True, symmetric matrix A is given by its |
//| upper triangle, and the lower triangle isn?t |
//| used/changed by function |
//| * if False, symmetric matrix A is given by its |
//| lower triangle, and the upper triangle isn?t |
//| used/changed by function |
//| * if not given, both lower and upper triangles |
//| must be filled. |
//| Output parameters: |
//| Info - return code, same as in RMatrixLUInverse |
//| Rep - solver report, same as in RMatrixLUInverse |
//| A - inverse of matrix A, same as in RMatrixLUInverse |
//+------------------------------------------------------------------+
void CAlglib::HPDMatrixInverse(CMatrixComplex &a,int &info,
CMatInvReportShell &rep)
{
//--- check
if((CAp::Cols(a)!=CAp::Rows(a)))
{
Print(__FUNCTION__+": looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
if(!CAp::IsHermitian(a))
{
Print(__FUNCTION__+": 'a' parameter is not Hermitian matrix");
CAp::exception_happened=true;
return;
}
//--- initialization
info=0;
int n=(int)CAp::Cols(a);
bool IsUpper=false;
//--- function call
CMatInv::HPDMatrixInverse(a,n,IsUpper,info,rep.GetInnerObj());
//--- check
if(!CAp::ForceHermitian(a))
{
Print(__FUNCTION__+": Internal error while forcing Hermitian properties of 'a' parameter");
CAp::exception_happened=true;
}
}
//+------------------------------------------------------------------+
//| Triangular matrix inverse (real) |
//| The subroutine inverts the following types of matrices: |
//| * upper triangular |
//| * upper triangular with unit diagonal |
//| * lower triangular |
//| * lower triangular with unit diagonal |
//| In case of an upper (lower) triangular matrix, the inverse matrix|
//| will also be upper (lower) triangular, and after the end of the |
//| algorithm, the inverse matrix replaces the source matrix. The |
//| elements below (above) the main diagonal are not changed by the |
//| algorithm. |
//| If the matrix has a unit diagonal, the inverse matrix also has a |
//| unit diagonal, and the diagonal elements are not passed to the |
//| algorithm. |
//| Input parameters: |
//| A - matrix, array[0..N-1, 0..N-1]. |
//| N - size of matrix A (optional) : |
//| * if given, only principal NxN submatrix is |
//| processed and overwritten. other elements are |
//| unchanged. |
//| * if not given, size is automatically determined |
//| from matrix size (A must be square matrix) |
//| IsUpper - True, if the matrix is upper triangular. |
//| IsUnit - diagonal type (optional): |
//| * if True, matrix has unit diagonal (a[i,i] are |
//| NOT used) |
//| * if False, matrix diagonal is arbitrary |
//| * if not given, False is assumed |
//| Output parameters: |
//| Info - same as for RMatrixLUInverse |
//| Rep - same as for RMatrixLUInverse |
//| A - same as for RMatrixLUInverse. |
//+------------------------------------------------------------------+
void CAlglib::RMatrixTrInverse(CMatrixDouble &a,const int n,
const bool IsUpper,const bool IsUnit,
int &info,CMatInvReportShell &rep)
{
//--- initialization
info=0;
//--- function call
CMatInv::RMatrixTrInverse(a,n,IsUpper,IsUnit,info,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Triangular matrix inverse (real) |
//| The subroutine inverts the following types of matrices: |
//| * upper triangular |
//| * upper triangular with unit diagonal |
//| * lower triangular |
//| * lower triangular with unit diagonal |
//| In case of an upper (lower) triangular matrix, the inverse matrix|
//| will also be upper (lower) triangular, and after the end of the |
//| algorithm, the inverse matrix replaces the source matrix. The |
//| elements below (above) the main diagonal are not changed by the |
//| algorithm. |
//| If the matrix has a unit diagonal, the inverse matrix also has a |
//| unit diagonal, and the diagonal elements are not passed to the |
//| algorithm. |
//| Input parameters: |
//| A - matrix, array[0..N-1, 0..N-1]. |
//| N - size of matrix A (optional) : |
//| * if given, only principal NxN submatrix is |
//| processed and overwritten. other elements are |
//| unchanged. |
//| * if not given, size is automatically determined |
//| from matrix size (A must be square matrix) |
//| IsUpper - True, if the matrix is upper triangular. |
//| IsUnit - diagonal type (optional): |
//| * if True, matrix has unit diagonal (a[i,i] are |
//| NOT used) |
//| * if False, matrix diagonal is arbitrary |
//| * if not given, False is assumed |
//| Output parameters: |
//| Info - same as for RMatrixLUInverse |
//| Rep - same as for RMatrixLUInverse |
//| A - same as for RMatrixLUInverse. |
//+------------------------------------------------------------------+
void CAlglib::RMatrixTrInverse(CMatrixDouble &a,const bool IsUpper,
int &info,CMatInvReportShell &rep)
{
//--- check
if((CAp::Cols(a)!=CAp::Rows(a)))
{
Print(__FUNCTION__+": looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
info=0;
int n=(int)CAp::Cols(a);
bool IsUnit=false;
//--- function call
CMatInv::RMatrixTrInverse(a,n,IsUpper,IsUnit,info,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Triangular matrix inverse (complex) |
//| The subroutine inverts the following types of matrices: |
//| * upper triangular |
//| * upper triangular with unit diagonal |
//| * lower triangular |
//| * lower triangular with unit diagonal |
//| In case of an upper (lower) triangular matrix, the inverse matrix|
//| will also be upper (lower) triangular, and after the end of the |
//| algorithm, the inverse matrix replaces the source matrix. The |
//| elements below (above) the main diagonal are not changed by the |
//| algorithm. |
//| If the matrix has a unit diagonal, the inverse matrix also has a |
//| unit diagonal, and the diagonal elements are not passed to the |
//| algorithm. |
//| Input parameters: |
//| A - matrix, array[0..N-1, 0..N-1]. |
//| N - size of matrix A (optional) : |
//| * if given, only principal NxN submatrix is |
//| processed and overwritten. other elements are |
//| unchanged. |
//| * if not given, size is automatically determined |
//| from matrix size (A must be square matrix) |
//| IsUpper - True, if the matrix is upper triangular. |
//| IsUnit - diagonal type (optional): |
//| * if True, matrix has unit diagonal (a[i,i] are |
//| NOT used) |
//| * if False, matrix diagonal is arbitrary |
//| * if not given, False is assumed |
//| Output parameters: |
//| Info - same as for RMatrixLUInverse |
//| Rep - same as for RMatrixLUInverse |
//| A - same as for RMatrixLUInverse. |
//+------------------------------------------------------------------+
void CAlglib::CMatrixTrInverse(CMatrixComplex &a,const int n,
const bool IsUpper,const bool IsUnit,
int &info,CMatInvReportShell &rep)
{
//--- initialization
info=0;
//--- function call
CMatInv::CMatrixTrInverse(a,n,IsUpper,IsUnit,info,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Triangular matrix inverse (complex) |
//| The subroutine inverts the following types of matrices: |
//| * upper triangular |
//| * upper triangular with unit diagonal |
//| * lower triangular |
//| * lower triangular with unit diagonal |
//| In case of an upper (lower) triangular matrix, the inverse matrix|
//| will also be upper (lower) triangular, and after the end of the |
//| algorithm, the inverse matrix replaces the source matrix. The |
//| elements below (above) the main diagonal are not changed by the |
//| algorithm. |
//| If the matrix has a unit diagonal, the inverse matrix also has a |
//| unit diagonal, and the diagonal elements are not passed to the |
//| algorithm. |
//| Input parameters: |
//| A - matrix, array[0..N-1, 0..N-1]. |
//| N - size of matrix A (optional) : |
//| * if given, only principal NxN submatrix is |
//| processed and overwritten. other elements are |
//| unchanged. |
//| * if not given, size is automatically determined |
//| from matrix size (A must be square matrix) |
//| IsUpper - True, if the matrix is upper triangular. |
//| IsUnit - diagonal type (optional): |
//| * if True, matrix has unit diagonal (a[i,i] are |
//| NOT used) |
//| * if False, matrix diagonal is arbitrary |
//| * if not given, False is assumed |
//| Output parameters: |
//| Info - same as for RMatrixLUInverse |
//| Rep - same as for RMatrixLUInverse |
//| A - same as for RMatrixLUInverse. |
//+------------------------------------------------------------------+
void CAlglib::CMatrixTrInverse(CMatrixComplex &a,const bool IsUpper,
int &info,CMatInvReportShell &rep)
{
//--- check
if((CAp::Cols(a)!=CAp::Rows(a)))
{
Print(__FUNCTION__+": looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
info=0;
int n=(int)CAp::Cols(a);
bool IsUnit=false;
//--- function call
CMatInv::CMatrixTrInverse(a,n,IsUpper,IsUnit,info,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Singular value decomposition of a bidiagonal matrix (extended |
//| algorithm) |
//| The algorithm performs the singular value decomposition of a |
//| bidiagonal matrix B (upper or lower) representing it as |
//| B = Q*S*P^T, where Q and P - orthogonal matrices, S - diagonal |
//| matrix with non-negative elements on the main diagonal, in |
//| descending order. |
//| The algorithm finds singular values. In addition, the algorithm |
//| can calculate matrices Q and P (more precisely, not the matrices,|
//| but their product with given matrices U and VT - U*Q and |
//| (P^T)*VT)). Of course, matrices U and VT can be of any type, |
//| including identity. Furthermore, the algorithm can calculate Q'*C|
//| (this product is calculated more effectively than U*Q, because |
//| this calculation operates with rows instead of matrix columns). |
//| The feature of the algorithm is its ability to find all singular |
//| values including those which are arbitrarily close to 0 with |
//| relative accuracy close to machine precision. If the parameter |
//| IsFractionalAccuracyRequired is set to True, all singular values |
//| will have high relative accuracy close to machine precision. If |
//| the parameter is set to False, only the biggest singular value |
//| will have relative accuracy close to machine precision. The |
//| absolute error of other singular values is equal to the absolute |
//| error of the biggest singular value. |
//| Input parameters: |
//| D - main diagonal of matrix B. |
//| Array whose index ranges within [0..N-1]. |
//| E - superdiagonal (or subdiagonal) of matrix B. |
//| Array whose index ranges within [0..N-2]. |
//| N - size of matrix B. |
//| IsUpper - True, if the matrix is upper bidiagonal. |
//| IsFractionalAccuracyRequired - |
//| accuracy to search singular values with. |
//| U - matrix to be multiplied by Q. |
//| Array whose indexes range within |
//| [0..NRU-1, 0..N-1]. |
//| The matrix can be bigger, in that case only the |
//| submatrix [0..NRU-1, 0..N-1] will be multiplied |
//| by Q. |
//| NRU - number of rows in matrix U. |
//| C - matrix to be multiplied by Q'. |
//| Array whose indexes range within |
//| [0..N-1, 0..NCC-1]. |
//| The matrix can be bigger, in that case only the |
//| submatrix [0..N-1, 0..NCC-1] will be multiplied |
//| by Q'. |
//| NCC - number of columns in matrix C. |
//| VT - matrix to be multiplied by P^T. |
//| Array whose indexes range within |
//| [0..N-1, 0..NCVT-1]. |
//| The matrix can be bigger, in that case only the |
//| submatrix [0..N-1, 0..NCVT-1] will be multiplied |
//| by P^T. |
//| NCVT - number of columns in matrix VT. |
//| Output parameters: |
//| D - singular values of matrix B in descending order. |
//| U - if NRU>0, contains matrix U*Q. |
//| VT - if NCVT>0, contains matrix (P^T)*VT. |
//| C - if NCC>0, contains matrix Q'*C. |
//| Result: |
//| True, if the algorithm has converged. |
//| False, if the algorithm hasn't converged (rare case). |
//| Additional information: |
//| The type of convergence is controlled by the internal |
//| parameter TOL. If the parameter is greater than 0, the |
//| singular values will have relative accuracy TOL. If TOL<0, |
//| the singular values will have absolute accuracy |
//| ABS(TOL)*norm(B). By default, |TOL| falls within the range of|
//| 10*Epsilon and 100*Epsilon, where Epsilon is the machine |
//| precision. It is not recommended to use TOL less than |
//| 10*Epsilon since this will considerably slow down the |
//| algorithm and may not lead to error decreasing. |
//| History: |
//| * 31 March, 2007. |
//| changed MAXITR from 6 to 12. |
//| -- LAPACK routine (version 3.0) -- |
//| Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., |
//| Courant Institute, Argonne National Lab, and Rice University|
//| October 31, 1999. |
//+------------------------------------------------------------------+
bool CAlglib::RMatrixBdSVD(double &d[],double &e[],const int n,
const bool IsUpper,
bool isfractionalaccuracyrequired,
CMatrixDouble &u,const int nru,
CMatrixDouble &c,const int ncc,
CMatrixDouble &vt,const int ncvt)
{
return(CBdSingValueDecompose::RMatrixBdSVD(d,e,n,IsUpper,isfractionalaccuracyrequired,u,nru,c,ncc,vt,ncvt));
}
//+------------------------------------------------------------------+
//| Singular value decomposition of a rectangular matrix. |
//| The algorithm calculates the singular value decomposition of a |
//| matrix of size MxN: A = U * S * V^T |
//| The algorithm finds the singular values and, optionally, matrices|
//| U and V^T. The algorithm can find both first min(M,N) columns of |
//| matrix U and rows of matrix V^T (singular vectors), and matrices |
//| U and V^T wholly (of sizes MxM and NxN respectively). |
//| Take into account that the subroutine does not return matrix V |
//| but V^T. |
//| Input parameters: |
//| A - matrix to be decomposed. |
//| Array whose indexes range within |
//| [0..M-1, 0..N-1]. |
//| M - number of rows in matrix A. |
//| N - number of columns in matrix A. |
//| UNeeded - 0, 1 or 2. See the description of the |
//| parameter U. |
//| VTNeeded - 0, 1 or 2. See the description of the |
//| parameter VT. |
//| AdditionalMemory - |
//| If the parameter: |
//| * equals 0, the algorithm doesn?t use |
//| additional memory (lower requirements, |
//| lower performance). |
//| * equals 1, the algorithm uses additional |
//| memory of size min(M,N)*min(M,N) of real |
//| numbers. It often speeds up the algorithm.|
//| * equals 2, the algorithm uses additional |
//| memory of size M*min(M,N) of real numbers.|
//| It allows to get a maximum performance. |
//| The recommended value of the parameter is 2. |
//| Output parameters: |
//| W - contains singular values in descending order.|
//| U - if UNeeded=0, U isn't changed, the left |
//| singular vectors are not calculated. |
//| if Uneeded=1, U contains left singular |
//| vectors (first min(M,N) columns of matrix U).|
//| Array whose indexes range within |
//| [0..M-1, 0..Min(M,N)-1]. if UNeeded=2, U |
//| contains matrix U wholly. Array whose indexes|
//| range within [0..M-1, 0..M-1]. |
//| VT - if VTNeeded=0, VT isn?t changed, the right |
//| singular vectors are not calculated. |
//| if VTNeeded=1, VT contains right singular |
//| vectors (first min(M,N) rows of matrix V^T). |
//| Array whose indexes range within |
//| [0..min(M, N)-1,0..N-1]. if VTNeeded=2, VT |
//| contains matrix V^T wholly. Array whose |
//| indexes range within [0..N-1, 0..N-1]. |
//+------------------------------------------------------------------+
bool CAlglib::RMatrixSVD(CMatrixDouble &a,const int m,const int n,
const int uneeded,const int vtneeded,
const int additionalmemory,double &w[],
CMatrixDouble &u,CMatrixDouble &vt)
{
return(CSingValueDecompose::RMatrixSVD(a,m,n,uneeded,vtneeded,additionalmemory,w,u,vt));
}
//+------------------------------------------------------------------+
//| Determinant calculation of the matrix given by its LU |
//| decomposition. |
//| Input parameters: |
//| A - LU decomposition of the matrix (output of |
//| RMatrixLU subroutine). |
//| Pivots - table of permutations which were made during |
//| the LU decomposition. |
//| Output of RMatrixLU subroutine. |
//| N - (optional) size of matrix A: |
//| * if given, only principal NxN submatrix is |
//| processed and overwritten. other elements are |
//| unchanged. |
//| * if not given, automatically determined from |
//| matrix size (A must be square matrix) |
//| Result: matrix determinant. |
//+------------------------------------------------------------------+
double CAlglib::RMatrixLUDet(CMatrixDouble &a,int &pivots[],const int n)
{
return(CMatDet::RMatrixLUDet(a,pivots,n));
}
//+------------------------------------------------------------------+
//| Determinant calculation of the matrix given by its LU |
//| decomposition. |
//| Input parameters: |
//| A - LU decomposition of the matrix (output of |
//| RMatrixLU subroutine). |
//| Pivots - table of permutations which were made during |
//| the LU decomposition. |
//| Output of RMatrixLU subroutine. |
//| N - (optional) size of matrix A: |
//| * if given, only principal NxN submatrix is |
//| processed and overwritten. other elements are |
//| unchanged. |
//| * if not given, automatically determined from |
//| matrix size (A must be square matrix) |
//| Result: matrix determinant. |
//+------------------------------------------------------------------+
double CAlglib::RMatrixLUDet(CMatrixDouble &a,int &pivots[])
{
//--- check
if((CAp::Rows(a)!=CAp::Cols(a)) || (CAp::Rows(a)!=CAp::Len(pivots)))
{
Print(__FUNCTION__+": looks like one of arguments has wrong size");
CAp::exception_happened=true;
return(EMPTY_VALUE);
}
//--- initialization
int n=(int)CAp::Rows(a);
//--- return result
return(CMatDet::RMatrixLUDet(a,pivots,n));
}
//+------------------------------------------------------------------+
//| Calculation of the determinant of a general matrix |
//| Input parameters: |
//| A - matrix, array[0..N-1, 0..N-1] |
//| N - (optional) size of matrix A: |
//| * if given, only principal NxN submatrix is |
//| processed and overwritten. other elements are |
//| unchanged. |
//| * if not given, automatically determined from |
//| matrix size (A must be square matrix) |
//| Result: determinant of matrix A. |
//+------------------------------------------------------------------+
double CAlglib::RMatrixDet(CMatrixDouble &a,const int n)
{
return(CMatDet::RMatrixDet(a,n));
}
//+------------------------------------------------------------------+
//| Calculation of the determinant of a general matrix |
//| Input parameters: |
//| A - matrix, array[0..N-1, 0..N-1] |
//| N - (optional) size of matrix A: |
//| * if given, only principal NxN submatrix is |
//| processed and overwritten. other elements are |
//| unchanged. |
//| * if not given, automatically determined from |
//| matrix size (A must be square matrix) |
//| Result: determinant of matrix A. |
//+------------------------------------------------------------------+
double CAlglib::RMatrixDet(CMatrixDouble &a)
{
//--- check
if((CAp::Rows(a)!=CAp::Cols(a)))
{
Print(__FUNCTION__+": looks like one of arguments has wrong size");
CAp::exception_happened=true;
return(EMPTY_VALUE);
}
//--- initialization
int n=(int)CAp::Rows(a);
//--- return result
return(CMatDet::RMatrixDet(a,n));
}
//+------------------------------------------------------------------+
//| Determinant calculation of the matrix given by its LU |
//| decomposition. |
//| Input parameters: |
//| A - LU decomposition of the matrix (output of |
//| RMatrixLU subroutine). |
//| Pivots - table of permutations which were made during |
//| the LU decomposition. |
//| Output of RMatrixLU subroutine. |
//| N - (optional) size of matrix A: |
//| * if given, only principal NxN submatrix is |
//| processed and overwritten. other elements are |
//| unchanged. |
//| * if not given, automatically determined from |
//| matrix size (A must be square matrix) |
//| Result: matrix determinant. |
//+------------------------------------------------------------------+
complex CAlglib::CMatrixLUDet(CMatrixComplex &a,int &pivots[],
const int n)
{
return(CMatDet::CMatrixLUDet(a,pivots,n));
}
//+------------------------------------------------------------------+
//| Determinant calculation of the matrix given by its LU |
//| decomposition. |
//| Input parameters: |
//| A - LU decomposition of the matrix (output of |
//| RMatrixLU subroutine). |
//| Pivots - table of permutations which were made during |
//| the LU decomposition. |
//| Output of RMatrixLU subroutine. |
//| N - (optional) size of matrix A: |
//| * if given, only principal NxN submatrix is |
//| processed and overwritten. other elements are |
//| unchanged. |
//| * if not given, automatically determined from |
//| matrix size (A must be square matrix) |
//| Result: matrix determinant. |
//+------------------------------------------------------------------+
complex CAlglib::CMatrixLUDet(CMatrixComplex &a,int &pivots[])
{
//--- check
if((CAp::Rows(a)!=CAp::Cols(a)) || (CAp::Rows(a)!=CAp::Len(pivots)))
{
Print(__FUNCTION__+": looks like one of arguments has wrong size");
CAp::exception_happened=true;
return(EMPTY_VALUE);
}
//--- initialization
int n=(int)CAp::Rows(a);
//--- return result
return(CMatDet::CMatrixLUDet(a,pivots,n));
}
//+------------------------------------------------------------------+
//| Calculation of the determinant of a general matrix |
//| Input parameters: |
//| A - matrix, array[0..N-1, 0..N-1] |
//| N - (optional) size of matrix A: |
//| * if given, only principal NxN submatrix is |
//| processed and overwritten. other elements are |
//| unchanged. |
//| * if not given, automatically determined from |
//| matrix size (A must be square matrix) |
//| Result: determinant of matrix A. |
//+------------------------------------------------------------------+
complex CAlglib::CMatrixDet(CMatrixComplex &a,const int n)
{
return(CMatDet::CMatrixDet(a,n));
}
//+------------------------------------------------------------------+
//| Calculation of the determinant of a general matrix |
//| Input parameters: |
//| A - matrix, array[0..N-1, 0..N-1] |
//| N - (optional) size of matrix A: |
//| * if given, only principal NxN submatrix is |
//| processed and overwritten. other elements are |
//| unchanged. |
//| * if not given, automatically determined from |
//| matrix size (A must be square matrix) |
//| Result: determinant of matrix A. |
//+------------------------------------------------------------------+
complex CAlglib::CMatrixDet(CMatrixComplex &a)
{
//--- check
if((CAp::Rows(a)!=CAp::Cols(a)))
{
Print(__FUNCTION__+": looks like one of arguments has wrong size");
CAp::exception_happened=true;
return(EMPTY_VALUE);
}
//--- initialization
int n=(int)CAp::Rows(a);
//--- return result
return(CMatDet::CMatrixDet(a,n));
}
//+------------------------------------------------------------------+
//| Determinant calculation of the matrix given by the Cholesky |
//| decomposition. |
//| Input parameters: |
//| A - Cholesky decomposition, |
//| output of SMatrixCholesky subroutine. |
//| N - (optional) size of matrix A: |
//| * if given, only principal NxN submatrix is |
//| processed and overwritten. other elements are |
//| unchanged. |
//| * if not given, automatically determined from |
//| matrix size (A must be square matrix) |
//| As the determinant is equal to the product of squares of diagonal|
//| elements, it?s not necessary to specify which triangle - lower |
//| or upper - the matrix is stored in. |
//| Result: |
//| matrix determinant. |
//+------------------------------------------------------------------+
double CAlglib::SPDMatrixCholeskyDet(CMatrixDouble &a,const int n)
{
return(CMatDet::SPDMatrixCholeskyDet(a,n));
}
//+------------------------------------------------------------------+
//| Determinant calculation of the matrix given by the Cholesky |
//| decomposition. |
//| Input parameters: |
//| A - Cholesky decomposition, |
//| output of SMatrixCholesky subroutine. |
//| N - (optional) size of matrix A: |
//| * if given, only principal NxN submatrix is |
//| processed and overwritten. other elements are |
//| unchanged. |
//| * if not given, automatically determined from |
//| matrix size (A must be square matrix) |
//| As the determinant is equal to the product of squares of diagonal|
//| elements, it?s not necessary to specify which triangle - lower |
//| or upper - the matrix is stored in. |
//| Result: |
//| matrix determinant. |
//+------------------------------------------------------------------+
double CAlglib::SPDMatrixCholeskyDet(CMatrixDouble &a)
{
//--- check
if((CAp::Rows(a)!=CAp::Cols(a)))
{
Print(__FUNCTION__+": looks like one of arguments has wrong size");
CAp::exception_happened=true;
return(EMPTY_VALUE);
}
//--- initialization
int n=(int)CAp::Rows(a);
//--- return result
return(CMatDet::SPDMatrixCholeskyDet(a,n));
}
//+------------------------------------------------------------------+
//| Determinant calculation of the symmetric positive definite |
//| matrix. |
//| Input parameters: |
//| A - matrix. Array with elements [0..N-1, 0..N-1]. |
//| N - (optional) size of matrix A: |
//| * if given, only principal NxN submatrix is |
//| processed and overwritten. other elements are |
//| unchanged. |
//| * if not given, automatically determined from |
//| matrix size (A must be square matrix) |
//| IsUpper - (optional) storage type: |
//| * if True, symmetric matrix A is given by its |
//| upper triangle, and the lower triangle isn?t |
//| used/changed by function |
//| * if False, symmetric matrix A is given by its |
//| lower triangle, and the upper triangle isn?t |
//| used/changed by function |
//| * if not given, both lower and upper triangles |
//| must be filled. |
//| Result: |
//| determinant of matrix A. |
//| If matrix A is not positive definite, exception is thrown. |
//+------------------------------------------------------------------+
double CAlglib::SPDMatrixDet(CMatrixDouble &a,const int n,
const bool IsUpper)
{
return(CMatDet::SPDMatrixDet(a,n,IsUpper));
}
//+------------------------------------------------------------------+
//| Determinant calculation of the symmetric positive definite |
//| matrix. |
//| Input parameters: |
//| A - matrix. Array with elements [0..N-1, 0..N-1]. |
//| N - (optional) size of matrix A: |
//| * if given, only principal NxN submatrix is |
//| processed and overwritten. other elements are |
//| unchanged. |
//| * if not given, automatically determined from |
//| matrix size (A must be square matrix) |
//| IsUpper - (optional) storage type: |
//| * if True, symmetric matrix A is given by its |
//| upper triangle, and the lower triangle isn?t |
//| used/changed by function |
//| * if False, symmetric matrix A is given by its |
//| lower triangle, and the upper triangle isn?t |
//| used/changed by function |
//| * if not given, both lower and upper triangles |
//| must be filled. |
//| Result: |
//| determinant of matrix A. |
//| If matrix A is not positive definite, exception is thrown. |
//+------------------------------------------------------------------+
double CAlglib::SPDMatrixDet(CMatrixDouble &a)
{
//--- check
if((CAp::Rows(a)!=CAp::Cols(a)))
{
Print(__FUNCTION__+": looks like one of arguments has wrong size");
CAp::exception_happened=true;
return(EMPTY_VALUE);
}
if(!CAp::IsSymmetric(a))
{
Print(__FUNCTION__+": 'a' parameter is not symmetric matrix");
CAp::exception_happened=true;
return(EMPTY_VALUE);
}
//--- initialization
int n=(int)CAp::Rows(a);
bool IsUpper=false;
//--- return result
return(CMatDet::SPDMatrixDet(a,n,IsUpper));
}
//+------------------------------------------------------------------+
//| Algorithm for solving the following generalized symmetric |
//| positive-definite eigenproblem: |
//| A*x = lambda*B*x (1) or |
//| A*B*x = lambda*x (2) or |
//| B*A*x = lambda*x (3). |
//| where A is a symmetric matrix, B - symmetric positive-definite |
//| matrix. The problem is solved by reducing it to an ordinary |
//| symmetric eigenvalue problem. |
//| Input parameters: |
//| A - symmetric matrix which is given by its upper |
//| or lower triangular part. |
//| Array whose indexes range within |
//| [0..N-1, 0..N-1]. |
//| N - size of matrices A and B. |
//| IsUpperA - storage format of matrix A. |
//| B - symmetric positive-definite matrix which is |
//| given by its upper or lower triangular part. |
//| Array whose indexes range within |
//| [0..N-1, 0..N-1]. |
//| IsUpperB - storage format of matrix B. |
//| ZNeeded - if ZNeeded is equal to: |
//| * 0, the eigenvectors are not returned; |
//| * 1, the eigenvectors are returned. |
//| ProblemType - if ProblemType is equal to: |
//| * 1, the following problem is solved: |
//| A*x = lambda*B*x; |
//| * 2, the following problem is solved: |
//| A*B*x = lambda*x; |
//| * 3, the following problem is solved: |
//| B*A*x = lambda*x. |
//| Output parameters: |
//| D - eigenvalues in ascending order. |
//| Array whose index ranges within [0..N-1]. |
//| Z - if ZNeeded is equal to: |
//| * 0, Z hasn?t changed; |
//| * 1, Z contains eigenvectors. |
//| Array whose indexes range within |
//| [0..N-1, 0..N-1]. |
//| The eigenvectors are stored in matrix |
//| columns. It should be noted that the |
//| eigenvectors in such problems do not form an |
//| orthogonal system. |
//| Result: |
//| True, if the problem was solved successfully. |
//| False, if the error occurred during the Cholesky |
//| decomposition of matrix B (the matrix isn?t |
//| positive-definite) or during the work of the iterative |
//| algorithm for solving the symmetric eigenproblem. |
//| See also the GeneralizedSymmetricDefiniteEVDReduce subroutine. |
//+------------------------------------------------------------------+
bool CAlglib::SMatrixGEVD(CMatrixDouble &a,const int n,const bool isuppera,
CMatrixDouble &b,const bool isupperb,
const int zneeded,const int problemtype,
double &d[],CMatrixDouble &z)
{
return(CSpdGEVD::SMatrixGEVD(a,n,isuppera,b,isupperb,zneeded,problemtype,d,z));
}
//+------------------------------------------------------------------+
//| Algorithm for reduction of the following generalized symmetric |
//| positive- definite eigenvalue problem: |
//| A*x = lambda*B*x (1) or |
//| A*B*x = lambda*x (2) or |
//| B*A*x = lambda*x (3) |
//| to the symmetric eigenvalues problem C*y = lambda*y (eigenvalues |
//| of this and the given problems are the same, and the eigenvectors|
//| of the given problem could be obtained by multiplying the |
//| obtained eigenvectors by the transformation matrix x = R*y). |
//| Here A is a symmetric matrix, B - symmetric positive-definite |
//| matrix. |
//| Input parameters: |
//| A - symmetric matrix which is given by its upper |
//| or lower triangular part. |
//| Array whose indexes range within |
//| [0..N-1, 0..N-1]. |
//| N - size of matrices A and B. |
//| IsUpperA - storage format of matrix A. |
//| B - symmetric positive-definite matrix which is |
//| given by its upper or lower triangular part. |
//| Array whose indexes range within |
//| [0..N-1, 0..N-1]. |
//| IsUpperB - storage format of matrix B. |
//| ProblemType - if ProblemType is equal to: |
//| * 1, the following problem is solved: |
//| A*x = lambda*B*x; |
//| * 2, the following problem is solved: |
//| A*B*x = lambda*x; |
//| * 3, the following problem is solved: |
//| B*A*x = lambda*x. |
//| Output parameters: |
//| A - symmetric matrix which is given by its upper |
//| or lower triangle depending on IsUpperA. |
//| Contains matrix C. Array whose indexes range |
//| within [0..N-1, 0..N-1]. |
//| R - upper triangular or low triangular |
//| transformation matrix which is used to obtain|
//| the eigenvectors of a given problem as the |
//| product of eigenvectors of C (from the right)|
//| and matrix R (from the left). If the matrix |
//| is upper triangular, the elements below the |
//| main diagonal are equal to 0 (and vice versa)|
//| Thus, we can perform the multiplication |
//| without taking into account the internal |
//| structure (which is an easier though less |
//| effective way). Array whose indexes range |
//| within [0..N-1, 0..N-1]. |
//| IsUpperR - type of matrix R (upper or lower triangular).|
//| Result: |
//| True, if the problem was reduced successfully. |
//| False, if the error occurred during the Cholesky |
//| decomposition of matrix B (the matrix is not |
//| positive-definite). |
//+------------------------------------------------------------------+
bool CAlglib::SMatrixGEVDReduce(CMatrixDouble &a,const int n,
const bool isuppera,CMatrixDouble &b,
const bool isupperb,const int problemtype,
CMatrixDouble &r,bool &isupperr)
{
//--- initialization
isupperr=false;
//--- return result
return(CSpdGEVD::SMatrixGEVDReduce(a,n,isuppera,b,isupperb,problemtype,r,isupperr));
}
//+------------------------------------------------------------------+
//| Inverse matrix update by the Sherman-Morrison formula |
//| The algorithm updates matrix A^-1 when adding a number to an |
//| element of matrix A. |
//| Input parameters: |
//| InvA - inverse of matrix A. |
//| Array whose indexes range within |
//| [0..N-1, 0..N-1]. |
//| N - size of matrix A. |
//| UpdRow - row where the element to be updated is stored. |
//| UpdColumn - column where the element to be updated is stored.|
//| UpdVal - a number to be added to the element. |
//| Output parameters: |
//| InvA - inverse of modified matrix A. |
//+------------------------------------------------------------------+
void CAlglib::RMatrixInvUpdateSimple(CMatrixDouble &inva,const int n,
const int updrow,const int updcolumn,
const double updval)
{
CInverseUpdate::RMatrixInvUpdateSimple(inva,n,updrow,updcolumn,updval);
}
//+------------------------------------------------------------------+
//| Inverse matrix update by the Sherman-Morrison formula |
//| The algorithm updates matrix A^-1 when adding a vector to a row |
//| of matrix A. |
//| Input parameters: |
//| InvA - inverse of matrix A. |
//| Array whose indexes range within |
//| [0..N-1, 0..N-1]. |
//| N - size of matrix A. |
//| UpdRow - the row of A whose vector V was added. |
//| 0 <= Row <= N-1 |
//| V - the vector to be added to a row. |
//| Array whose index ranges within [0..N-1]. |
//| Output parameters: |
//| InvA - inverse of modified matrix A. |
//+------------------------------------------------------------------+
void CAlglib::RMatrixInvUpdateRow(CMatrixDouble &inva,const int n,
const int updrow,double &v[])
{
CInverseUpdate::RMatrixInvUpdateRow(inva,n,updrow,v);
}
//+------------------------------------------------------------------+
//| Inverse matrix update by the Sherman-Morrison formula |
//| The algorithm updates matrix A^-1 when adding a vector to a |
//| column of matrix A. |
//| Input parameters: |
//| InvA - inverse of matrix A. |
//| Array whose indexes range within |
//| [0..N-1, 0..N-1]. |
//| N - size of matrix A. |
//| UpdColumn - the column of A whose vector U was added. |
//| 0 <= UpdColumn <= N-1 |
//| U - the vector to be added to a column. |
//| Array whose index ranges within [0..N-1]. |
//| Output parameters: |
//| InvA - inverse of modified matrix A. |
//+------------------------------------------------------------------+
void CAlglib::RMatrixInvUpdateColumn(CMatrixDouble &inva,const int n,
const int updcolumn,double &u[])
{
CInverseUpdate::RMatrixInvUpdateColumn(inva,n,updcolumn,u);
}
//+------------------------------------------------------------------+
//| Inverse matrix update by the Sherman-Morrison formula |
//| The algorithm computes the inverse of matrix A+u*v? by using the |
//| given matrix A^-1 and the vectors u and v. |
//| Input parameters: |
//| InvA - inverse of matrix A. |
//| Array whose indexes range within |
//| [0..N-1, 0..N-1]. |
//| N - size of matrix A. |
//| U - the vector modifying the matrix. |
//| Array whose index ranges within [0..N-1]. |
//| V - the vector modifying the matrix. |
//| Array whose index ranges within [0..N-1]. |
//| Output parameters: |
//| InvA - inverse of matrix A + u*v'. |
//+------------------------------------------------------------------+
void CAlglib::RMatrixInvUpdateUV(CMatrixDouble &inva,const int n,
double &u[],double &v[])
{
CInverseUpdate::RMatrixInvUpdateUV(inva,n,u,v);
}
//+------------------------------------------------------------------+
//| Subroutine performing the Schur decomposition of a general matrix|
//| by using the QR algorithm with multiple shifts. |
//| The source matrix A is represented as S'*A*S = T, where S is an |
//| orthogonal matrix (Schur vectors), T - upper quasi-triangular |
//| matrix (with blocks of sizes 1x1 and 2x2 on the main diagonal). |
//| Input parameters: |
//| A - matrix to be decomposed. |
//| Array whose indexes range within [0..N-1, 0..N-1]. |
//| N - size of A, N>=0. |
//| Output parameters: |
//| A - contains matrix T. |
//| Array whose indexes range within [0..N-1, 0..N-1]. |
//| S - contains Schur vectors. |
//| Array whose indexes range within [0..N-1, 0..N-1]. |
//| Note 1: |
//| The block structure of matrix T can be easily recognized: |
//| since all the elements below the blocks are zeros, the |
//| elements a[i+1,i] which are equal to 0 show the block border.|
//| Note 2: |
//| The algorithm performance depends on the value of the |
//| internal parameter NS of the InternalSchurDecomposition |
//| subroutine which defines the number of shifts in the QR |
//| algorithm (similarly to the block width in block-matrix |
//| algorithms in linear algebra). If you require maximum |
//| performance on your machine, it is recommended to adjust |
//| this parameter manually. |
//| Result: |
//| True, |
//| if the algorithm has converged and parameters A and S |
//| contain the result. |
//| False, |
//| if the algorithm has not converged. |
//| Algorithm implemented on the basis of the DHSEQR subroutine |
//| (LAPACK 3.0 library). |
//+------------------------------------------------------------------+
bool CAlglib::RMatrixSchur(CMatrixDouble &a,const int n,CMatrixDouble &s)
{
return(CSchur::RMatrixSchur(a,n,s));
}
//+------------------------------------------------------------------+
//| NONLINEAR CONJUGATE GRADIENT METHOD |
//| DESCRIPTION: |
//| The subroutine minimizes function F(x) of N arguments by using |
//| one of the nonlinear conjugate gradient methods. |
//| These CG methods are globally convergent (even on non-convex |
//| functions) as long as grad(f) is Lipschitz continuous in a some |
//| neighborhood of the L = { x : f(x)<=f(x0) }. |
//| REQUIREMENTS: |
//| Algorithm will request following information during its |
//| operation: |
//| * function value F and its gradient G (simultaneously) at given |
//| point X |
//| USAGE: |
//| 1. User initializes algorithm state with MinCGCreate() call |
//| 2. User tunes solver parameters with MinCGSetCond(), |
//| MinCGSetStpMax() and other functions |
//| 3. User calls MinCGOptimize() function which takes algorithm |
//| state and pointer (delegate, etc.) to callback function which |
//| calculates F/G. |
//| 4. User calls MinCGResults() to get solution |
//| 5. Optionally, user may call MinCGRestartFrom() to solve another |
//| problem with same N but another starting point and/or another |
//| function. MinCGRestartFrom() allows to reuse already |
//| initialized structure. |
//| INPUT PARAMETERS: |
//| N - problem dimension, N>0: |
//| * if given, only leading N elements of X are used|
//| * if not given, automatically determined from |
//| size of X |
//| X - starting point, array[0..N-1]. |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//+------------------------------------------------------------------+
void CAlglib::MinCGCreate(const int n,double &x[],CMinCGStateShell &state)
{
CMinCG::MinCGCreate(n,x,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| NONLINEAR CONJUGATE GRADIENT METHOD |
//| DESCRIPTION: |
//| The subroutine minimizes function F(x) of N arguments by using |
//| one of the nonlinear conjugate gradient methods. |
//| These CG methods are globally convergent (even on non-convex |
//| functions) as long as grad(f) is Lipschitz continuous in a some |
//| neighborhood of the L = { x : f(x)<=f(x0) }. |
//| REQUIREMENTS: |
//| Algorithm will request following information during its |
//| operation: |
//| * function value F and its gradient G (simultaneously) at given |
//| point X |
//| USAGE: |
//| 1. User initializes algorithm state with MinCGCreate() call |
//| 2. User tunes solver parameters with MinCGSetCond(), |
//| MinCGSetStpMax() and other functions |
//| 3. User calls MinCGOptimize() function which takes algorithm |
//| state and pointer (delegate, etc.) to callback function which |
//| calculates F/G. |
//| 4. User calls MinCGResults() to get solution |
//| 5. Optionally, user may call MinCGRestartFrom() to solve another |
//| problem with same N but another starting point and/or another |
//| function. MinCGRestartFrom() allows to reuse already |
//| initialized structure. |
//| INPUT PARAMETERS: |
//| N - problem dimension, N>0: |
//| * if given, only leading N elements of X are used|
//| * if not given, automatically determined from |
//| size of X |
//| X - starting point, array[0..N-1]. |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//+------------------------------------------------------------------+
void CAlglib::MinCGCreate(double &x[],CMinCGStateShell &state)
{
//--- initialization
int n=CAp::Len(x);
//--- function call
CMinCG::MinCGCreate(n,x,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| The subroutine is finite difference variant of MinCGCreate(). |
//| It uses finite differences in order to differentiate target |
//| function. |
//| Description below contains information which is specific to this |
//| function only. We recommend to read comments on MinCGCreate() in |
//| order to get more information about creation of CG optimizer. |
//| INPUT PARAMETERS: |
//| N - problem dimension, N>0: |
//| * if given, only leading N elements of X are |
//| used |
//| * if not given, automatically determined from |
//| size of X |
//| X - starting point, array[0..N-1]. |
//| DiffStep- differentiation step, >0 |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| NOTES: |
//| 1. algorithm uses 4-point central formula for differentiation. |
//| 2. differentiation step along I-th axis is equal to |
//| DiffStep*S[I] where S[] is scaling vector which can be set by |
//| MinCGSetScale() call. |
//| 3. we recommend you to use moderate values of differentiation |
//| step. Too large step will result in too large truncation |
//| errors, while too small step will result in too large |
//| numerical errors. 1.0E-6 can be good value to start with. |
//| 4. Numerical differentiation is very inefficient - one gradient |
//| calculation needs 4*N function evaluations. This function will|
//| work for any N - either small (1...10), moderate (10...100) or|
//| large (100...). However, performance penalty will be too |
//| severe for any N's except for small ones. |
//| We should also say that code which relies on numerical |
//| differentiation is less robust and precise. L-BFGS needs |
//| exact gradient values. Imprecise gradient may slow down |
//| convergence, especially on highly nonlinear problems. |
//| Thus we recommend to use this function for fast prototyping |
//| on small- dimensional problems only, and to implement |
//| analytical gradient as soon as possible. |
//+------------------------------------------------------------------+
void CAlglib::MinCGCreateF(const int n,double &x[],double diffstep,
CMinCGStateShell &state)
{
CMinCG::MinCGCreateF(n,x,diffstep,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| The subroutine is finite difference variant of MinCGCreate(). |
//| It uses finite differences in order to differentiate target |
//| function. |
//| Description below contains information which is specific to this |
//| function only. We recommend to read comments on MinCGCreate() in |
//| order to get more information about creation of CG optimizer. |
//| INPUT PARAMETERS: |
//| N - problem dimension, N>0: |
//| * if given, only leading N elements of X are |
//| used |
//| * if not given, automatically determined from |
//| size of X |
//| X - starting point, array[0..N-1]. |
//| DiffStep- differentiation step, >0 |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| NOTES: |
//| 1. algorithm uses 4-point central formula for differentiation. |
//| 2. differentiation step along I-th axis is equal to |
//| DiffStep*S[I] where S[] is scaling vector which can be set by |
//| MinCGSetScale() call. |
//| 3. we recommend you to use moderate values of differentiation |
//| step. Too large step will result in too large truncation |
//| errors, while too small step will result in too large |
//| numerical errors. 1.0E-6 can be good value to start with. |
//| 4. Numerical differentiation is very inefficient - one gradient |
//| calculation needs 4*N function evaluations. This function will|
//| work for any N - either small (1...10), moderate (10...100) or|
//| large (100...). However, performance penalty will be too |
//| severe for any N's except for small ones. |
//| We should also say that code which relies on numerical |
//| differentiation is less robust and precise. L-BFGS needs |
//| exact gradient values. Imprecise gradient may slow down |
//| convergence, especially on highly nonlinear problems. |
//| Thus we recommend to use this function for fast prototyping |
//| on small- dimensional problems only, and to implement |
//| analytical gradient as soon as possible. |
//+------------------------------------------------------------------+
void CAlglib::MinCGCreateF(double &x[],double diffstep,
CMinCGStateShell &state)
{
//--- initialization
int n=CAp::Len(x);
//--- function call
CMinCG::MinCGCreateF(n,x,diffstep,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This function sets stopping conditions for CG optimization |
//| algorithm. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| EpsG - >=0 |
//| The subroutine finishes its work if the condition|
//| |v|<EpsG is satisfied, where: |
//| * |.| means Euclidian norm |
//| * v - scaled gradient vector, v[i]=g[i]*s[i] |
//| * g - gradient |
//| * s - scaling coefficients set by MinCGSetScale()|
//| EpsF - >=0 |
//| The subroutine finishes its work if on k+1-th |
//| iteration the condition |F(k+1)-F(k)| <= |
//| <= EpsF*max{|F(k)|,|F(k+1)|,1} is satisfied. |
//| EpsX - >=0 |
//| The subroutine finishes its work if on k+1-th |
//| iteration the condition |v|<=EpsX is fulfilled, |
//| where: |
//| * |.| means Euclidian norm |
//| * v - scaled step vector, v[i]=dx[i]/s[i] |
//| * dx - ste pvector, dx=X(k+1)-X(k) |
//| * s - scaling coefficients set by MinCGSetScale()|
//| MaxIts - maximum number of iterations. If MaxIts=0, the |
//| number of iterations is unlimited. |
//| Passing EpsG=0, EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will|
//| lead to automatic stopping criterion selection (small EpsX). |
//+------------------------------------------------------------------+
void CAlglib::MinCGSetCond(CMinCGStateShell &state,double epsg,
double epsf,double epsx,int maxits)
{
CMinCG::MinCGSetCond(state.GetInnerObj(),epsg,epsf,epsx,maxits);
}
//+------------------------------------------------------------------+
//| This function sets scaling coefficients for CG optimizer. |
//| ALGLIB optimizers use scaling matrices to test stopping |
//| conditions (step size and gradient are scaled before comparison |
//| with tolerances). Scale of the I-th variable is a translation |
//| invariant measure of: |
//| a) "how large" the variable is |
//| b) how large the step should be to make significant changes in |
//| the function |
//| Scaling is also used by finite difference variant of CG |
//| optimizer - step along I-th axis is equal to DiffStep*S[I]. |
//| In most optimizers (and in the CG too) scaling is NOT a form of |
//| preconditioning. It just affects stopping conditions. You should |
//| set preconditioner by separate call to one of the |
//| MinCGSetPrec...() functions. |
//| There is special preconditioning mode, however, which uses |
//| scaling coefficients to form diagonal preconditioning matrix. |
//| You can turn this mode on, if you want. But you should understand|
//| that scaling is not the same thing as preconditioning - these are|
//| two different, although related forms of tuning solver. |
//| INPUT PARAMETERS: |
//| State - structure stores algorithm state |
//| S - array[N], non-zero scaling coefficients |
//| S[i] may be negative, sign doesn't matter. |
//+------------------------------------------------------------------+
void CAlglib::MinCGSetScale(CMinCGStateShell &state,double &s[])
{
CMinCG::MinCGSetScale(state.GetInnerObj(),s);
}
//+------------------------------------------------------------------+
//| This function turns on/off reporting. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| NeedXRep- whether iteration reports are needed or not |
//| If NeedXRep is True, algorithm will call rep() callback function |
//| if it is provided to MinCGOptimize(). |
//+------------------------------------------------------------------+
void CAlglib::MinCGSetXRep(CMinCGStateShell &state,bool needxrep)
{
CMinCG::MinCGSetXRep(state.GetInnerObj(),needxrep);
}
//+------------------------------------------------------------------+
//| This function sets CG algorithm. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| CGType - algorithm type: |
//| * -1 automatic selection of the best |
//| algorithm |
//| * 0 DY (Dai and Yuan) algorithm |
//| * 1 Hybrid DY-HS algorithm |
//+------------------------------------------------------------------+
void CAlglib::MinCGSetCGType(CMinCGStateShell &state,int cgtype)
{
CMinCG::MinCGSetCGType(state.GetInnerObj(),cgtype);
}
//+------------------------------------------------------------------+
//| This function sets maximum step length |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| StpMax - maximum step length, >=0. Set StpMax to 0.0, if |
//| you don't want to limit step length. |
//| Use this subroutine when you optimize target function which |
//| contains exp() or other fast growing functions, and optimization |
//| algorithm makes too large steps which leads to overflow. This |
//| function allows us to reject steps that are too large (and |
//| therefore expose us to the possible overflow) without actually |
//| calculating function value at the x+stp*d. |
//+------------------------------------------------------------------+
void CAlglib::MinCGSetStpMax(CMinCGStateShell &state,double stpmax)
{
CMinCG::MinCGSetStpMax(state.GetInnerObj(),stpmax);
}
//+------------------------------------------------------------------+
//| This function allows to suggest initial step length to the CG |
//| algorithm. |
//| Suggested step length is used as starting point for the line |
//| search. It can be useful when you have badly scaled problem, i.e.|
//| when ||grad|| (which is used as initial estimate for the first |
//| step) is many orders of magnitude different from the desired |
//| step. |
//| Line search may fail on such problems without good estimate of |
//| initial step length. Imagine, for example, problem with |
//| ||grad||=10^50 and desired step equal to 0.1 Line search |
//| function will use 10^50 as initial step, then it will decrease |
//| step length by 2 (up to 20 attempts) and will get 10^44, which is|
//| still too large. |
//| This function allows us to tell than line search should be |
//| started from some moderate step length, like 1.0, so algorithm |
//| will be able to detect desired step length in a several searches.|
//| Default behavior (when no step is suggested) is to use |
//| preconditioner, if it is available, to generate initial estimate |
//| of step length. |
//| This function influences only first iteration of algorithm. It |
//| should be called between MinCGCreate/MinCGRestartFrom() call and |
//| MinCGOptimize call. Suggested step is ignored if you have |
//| preconditioner. |
//| INPUT PARAMETERS: |
//| State - structure used to store algorithm state. |
//| Stp - initial estimate of the step length. |
//| Can be zero (no estimate). |
//+------------------------------------------------------------------+
void CAlglib::MinCGSuggestStep(CMinCGStateShell &state,double stp)
{
CMinCG::MinCGSuggestStep(state.GetInnerObj(),stp);
}
//+------------------------------------------------------------------+
//| Modification of the preconditioner: preconditioning is turned |
//| off. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| NOTE: you can change preconditioner "on the fly", during |
//| algorithm iterations. |
//+------------------------------------------------------------------+
void CAlglib::MinCGSetPrecDefault(CMinCGStateShell &state)
{
CMinCG::MinCGSetPrecDefault(state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Modification of the preconditioner: diagonal of approximate |
//| Hessian is used. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| D - diagonal of the approximate Hessian, |
//| array[0..N-1], (if larger, only leading N |
//| elements are used). |
//| NOTE: you can change preconditioner "on the fly", during |
//| algorithm iterations. |
//| NOTE 2: D[i] should be positive. Exception will be thrown |
//| otherwise. |
//| NOTE 3: you should pass diagonal of approximate Hessian - NOT |
//| ITS INVERSE. |
//+------------------------------------------------------------------+
void CAlglib::MinCGSetPrecDiag(CMinCGStateShell &state,double &d[])
{
CMinCG::MinCGSetPrecDiag(state.GetInnerObj(),d);
}
//+------------------------------------------------------------------+
//| Modification of the preconditioner: scale-based diagonal |
//| preconditioning. |
//| This preconditioning mode can be useful when you don't have |
//| approximate diagonal of Hessian, but you know that your variables|
//| are badly scaled (for example, one variable is in [1,10], and |
//| another in [1000,100000]), and most part of the ill-conditioning |
//| comes from different scales of vars. |
//| In this case simple scale-based preconditioner, |
//| with H[i] = 1/(s[i]^2), can greatly improve convergence. |
//| IMPRTANT: you should set scale of your variables with |
//| MinCGSetScale() call (before or after MinCGSetPrecScale() call). |
//| Without knowledge of the scale of your variables scale-based |
//| preconditioner will be just unit matrix. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| NOTE: you can change preconditioner "on the fly", during |
//| algorithm iterations. |
//+------------------------------------------------------------------+
void CAlglib::MinCGSetPrecScale(CMinCGStateShell &state)
{
CMinCG::MinCGSetPrecScale(state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This function provides reverse communication interface |
//| Reverse communication interface is not documented or recommended |
//| to use. |
//| See below for functions which provide better documented API |
//+------------------------------------------------------------------+
bool CAlglib::MinCGIteration(CMinCGStateShell &state)
{
return(CMinCG::MinCGIteration(state.GetInnerObj()));
}
//+------------------------------------------------------------------+
//| This family of functions is used to launcn iterations of |
//| nonlinear optimizer |
//| These functions accept following parameters: |
//| func - callback which calculates function (or merit |
//| function) value func at given point x |
//| grad - callback which calculates function (or merit |
//| function) value func and gradient grad at given |
//| point x |
//| rep - optional callback which is called after each |
//| iteration can be null |
//| obj - optional object which is passed to |
//| func/grad/hess/jac/rep can be null |
//| NOTES: |
//| 1. This function has two different implementations: one which |
//| uses exact (analytical) user-supplied gradient, and one which|
//| uses function value only and numerically differentiates |
//| function in order to obtain gradient. |
//| Depending on the specific function used to create optimizer |
//| object (either MinCGCreate() for analytical gradient or |
//| MinCGCreateF() for numerical differentiation) you should |
//| choose appropriate variant of MinCGOptimize() - one which |
//| accepts function AND gradient or one which accepts function |
//| ONLY. |
//| Be careful to choose variant of MinCGOptimize() which |
//| corresponds to your optimization scheme! Table below lists |
//| different combinations of callback (function/gradient) passed |
//| to MinCGOptimize() and specific function used to create |
//| optimizer. |
//| | USER PASSED TO MinCGOptimize() |
//| CREATED WITH | function only | function and gradient |
//| ------------------------------------------------------------ |
//| MinCGCreateF() | work FAIL |
//| MinCGCreate() | FAIL work |
//| Here "FAIL" denotes inappropriate combinations of optimizer |
//| creation function and MinCGOptimize() version. Attemps to use |
//| such combination (for example, to create optimizer with |
//| MinCGCreateF() and to pass gradient information to |
//| MinCGOptimize()) will lead to exception being thrown. Either |
//| you did not pass gradient when it WAS needed or you passed |
//| gradient when it was NOT needed. |
//+------------------------------------------------------------------+
void CAlglib::MinCGOptimize(CMinCGStateShell &state,CNDimensional_Func &func,
CNDimensional_Rep &rep,bool rep_status,CObject &obj)
{
//--- cycle
while(CAlglib::MinCGIteration(state))
{
//--- check
if(state.GetNeedF())
{
func.Func(state.GetInnerObj().m_x,state.GetInnerObj().m_f,obj);
//--- next iteration
continue;
}
//--- check
if(state.GetInnerObj().m_xupdated)
{
//--- check
if(rep_status)
rep.Rep(state.GetInnerObj().m_x,state.GetInnerObj().m_f,obj);
//--- next iteration
continue;
}
Print("ALGLIB: error in 'mincgoptimize' (some derivatives were not provided?)");
CAp::exception_happened=true;
break;
}
}
//+------------------------------------------------------------------+
//| This family of functions is used to launcn iterations of |
//| nonlinear optimizer |
//| These functions accept following parameters: |
//| func - callback which calculates function (or merit |
//| function) value func at given point x |
//| grad - callback which calculates function (or merit |
//| function) value func and gradient grad at given |
//| point x |
//| rep - optional callback which is called after each |
//| iteration can be null |
//| obj - optional object which is passed to |
//| func/grad/hess/jac/rep can be null |
//| NOTES: |
//| 1. This function has two different implementations: one which |
//| uses exact (analytical) user-supplied gradient, and one which|
//| uses function value only and numerically differentiates |
//| function in order to obtain gradient. |
//| Depending on the specific function used to create optimizer |
//| object (either MinCGCreate() for analytical gradient or |
//| MinCGCreateF() for numerical differentiation) you should |
//| choose appropriate variant of MinCGOptimize() - one which |
//| accepts function AND gradient or one which accepts function |
//| ONLY. |
//| Be careful to choose variant of MinCGOptimize() which |
//| corresponds to your optimization scheme! Table below lists |
//| different combinations of callback (function/gradient) passed |
//| to MinCGOptimize() and specific function used to create |
//| optimizer. |
//| | USER PASSED TO MinCGOptimize() |
//| CREATED WITH | function only | function and gradient |
//| ------------------------------------------------------------ |
//| MinCGCreateF() | work FAIL |
//| MinCGCreate() | FAIL work |
//| Here "FAIL" denotes inappropriate combinations of optimizer |
//| creation function and MinCGOptimize() version. Attemps to use |
//| such combination (for example, to create optimizer with |
//| MinCGCreateF() and to pass gradient information to |
//| MinCGOptimize()) will lead to exception being thrown. Either |
//| you did not pass gradient when it WAS needed or you passed |
//| gradient when it was NOT needed. |
//+------------------------------------------------------------------+
void CAlglib::MinCGOptimize(CMinCGStateShell &state,CNDimensional_Grad &grad,
CNDimensional_Rep &rep,bool rep_status,CObject &obj)
{
//--- cycle
while(CAlglib::MinCGIteration(state))
{
//--- check
if(state.GetNeedFG())
{
grad.Grad(state.GetInnerObj().m_x,state.GetInnerObj().m_f,state.GetInnerObj().m_g,obj);
//--- next iteration
continue;
}
//--- check
if(state.GetInnerObj().m_xupdated)
{
//--- check
if(rep_status)
rep.Rep(state.GetInnerObj().m_x,state.GetInnerObj().m_f,obj);
//--- next iteration
continue;
}
Print("ALGLIB: error in 'mincgoptimize' (some derivatives were not provided?)");
CAp::exception_happened=true;
break;
}
}
//+------------------------------------------------------------------+
//| Conjugate gradient results |
//| INPUT PARAMETERS: |
//| State - algorithm state |
//| OUTPUT PARAMETERS: |
//| X - array[0..N-1], solution |
//| Rep - optimization report: |
//| * Rep.TerminationType completetion code: |
//| * 1 relative function improvement is no |
//| more than EpsF. |
//| * 2 relative step is no more than EpsX. |
//| * 4 gradient norm is no more than EpsG |
//| * 5 MaxIts steps was taken |
//| * 7 stopping conditions are too |
//| stringent, further improvement is |
//| impossible, we return best X found |
//| so far |
//| * 8 terminated by user |
//| * Rep.IterationsCount contains iterations count |
//| * NFEV countains number of function calculations |
//+------------------------------------------------------------------+
void CAlglib::MinCGResults(CMinCGStateShell &state,double &x[],
CMinCGReportShell &rep)
{
CMinCG::MinCGResults(state.GetInnerObj(),x,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Conjugate gradient results |
//| Buffered implementation of MinCGResults(), which uses |
//| pre-allocated buffer to store X[]. If buffer size is too small, |
//| it resizes buffer.It is intended to be used in the inner cycles |
//| of performance critical algorithms where array reallocation |
//| penalty is too large to be ignored. |
//+------------------------------------------------------------------+
void CAlglib::MinCGResultsBuf(CMinCGStateShell &state,double &x[],
CMinCGReportShell &rep)
{
CMinCG::MinCGResultsBuf(state.GetInnerObj(),x,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This subroutine restarts CG algorithm from new point. All |
//| optimization parameters are left unchanged. |
//| This function allows to solve multiple optimization problems |
//| (which must have same number of dimensions) without object |
//| reallocation penalty. |
//| INPUT PARAMETERS: |
//| State - structure used to store algorithm state. |
//| X - new starting point. |
//+------------------------------------------------------------------+
void CAlglib::MinCGRestartFrom(CMinCGStateShell &state,double &x[])
{
CMinCG::MinCGRestartFrom(state.GetInnerObj(),x);
}
//+------------------------------------------------------------------+
//| This subroutine submits request for termination of running |
//| optimizer. It should be called from user-supplied callback when |
//| user decides that it is time to "smoothly" terminate optimization|
//| process. As result, optimizer stops at point which was "current |
//| accepted" when termination request was submitted and returns |
//| error code 8 (successful termination). |
//| INPUT PARAMETERS: |
//| State - optimizer structure |
//| NOTE: after request for termination optimizer may perform several|
//| additional calls to user-supplied callbacks. It does NOT |
//| guarantee to stop immediately - it just guarantees that |
//| these additional calls will be discarded later. |
//| NOTE: calling this function on optimizer which is NOT running |
//| will have no effect. |
//| NOTE: multiple calls to this function are possible. First call is|
//| counted, subsequent calls are silently ignored. |
//+------------------------------------------------------------------+
void CAlglib::MinLBFGSRequestTermination(CMinLBFGSStateShell &state)
{
CMinLBFGS::MinLBFGSRequestTermination(state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| BOUND CONSTRAINED OPTIMIZATION |
//| WITH ADDITIONAL LINEAR EQUALITY AND INEQUALITY CONSTRAINTS|
//| DESCRIPTION: |
//| The subroutine minimizes function F(x) of N arguments subject to |
//| any combination of: |
//| * bound constraints |
//| * linear inequality constraints |
//| * linear equality constraints |
//| REQUIREMENTS: |
//| * user must provide function value and gradient |
//| * starting point X0 must be feasible or |
//| not too far away from the feasible set |
//| * grad(f) must be Lipschitz continuous on a level set: |
//| L = { x : f(x)<=f(x0) } |
//| * function must be defined everywhere on the feasible set F |
//| USAGE: |
//| Constrained optimization if far more complex than the |
//| unconstrained one. Here we give very brief outline of the BLEIC |
//| optimizer. We strongly recommend you to read examples in the |
//| ALGLIB Reference Manual and to read ALGLIB User Guide on |
//| optimization, which is available at |
//| http://www.alglib.net/optimization/ |
//| 1. User initializes algorithm state with MinBLEICCreate() call |
//| 2. USer adds boundary and/or linear constraints by calling |
//| MinBLEICSetBC() and MinBLEICSetLC() functions. |
//| 3. User sets stopping conditions for underlying unconstrained |
//| solver with MinBLEICSetInnerCond() call. |
//| This function controls accuracy of underlying optimization |
//| algorithm. |
//| 4. User sets stopping conditions for outer iteration by calling |
//| MinBLEICSetOuterCond() function. |
//| This function controls handling of boundary and inequality |
//| constraints. |
//| 5. Additionally, user may set limit on number of internal |
//| iterations by MinBLEICSetMaxIts() call. |
//| This function allows to prevent algorithm from looping |
//| forever. |
//| 6. User calls MinBLEICOptimize() function which takes algorithm |
//| state and pointer (delegate, etc.) to callback function |
//| which calculates F/G. |
//| 7. User calls MinBLEICResults() to get solution |
//| 8. Optionally user may call MinBLEICRestartFrom() to solve |
//| another problem with same N but another starting point. |
//| MinBLEICRestartFrom() allows to reuse already initialized |
//| structure. |
//| INPUT PARAMETERS: |
//| N - problem dimension, N>0: |
//| * if given, only leading N elements of X are |
//| used |
//| * if not given, automatically determined from |
//| size ofX |
//| X - starting point, array[N]: |
//| * it is better to set X to a feasible point |
//| * but X can be infeasible, in which case |
//| algorithm will try to find feasible point |
//| first, using X as initial approximation. |
//| OUTPUT PARAMETERS: |
//| State - structure stores algorithm state |
//+------------------------------------------------------------------+
void CAlglib::MinBLEICCreate(const int n,double &x[],CMinBLEICStateShell &state)
{
CMinBLEIC::MinBLEICCreate(n,x,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| BOUND CONSTRAINED OPTIMIZATION |
//| WITH ADDITIONAL LINEAR EQUALITY AND INEQUALITY CONSTRAINTS|
//| DESCRIPTION: |
//| The subroutine minimizes function F(x) of N arguments subject to |
//| any combination of: |
//| * bound constraints |
//| * linear inequality constraints |
//| * linear equality constraints |
//| REQUIREMENTS: |
//| * user must provide function value and gradient |
//| * starting point X0 must be feasible or |
//| not too far away from the feasible set |
//| * grad(f) must be Lipschitz continuous on a level set: |
//| L = { x : f(x)<=f(x0) } |
//| * function must be defined everywhere on the feasible set F |
//| USAGE: |
//| Constrained optimization if far more complex than the |
//| unconstrained one. Here we give very brief outline of the BLEIC |
//| optimizer. We strongly recommend you to read examples in the |
//| ALGLIB Reference Manual and to read ALGLIB User Guide on |
//| optimization, which is available at |
//| http://www.alglib.net/optimization/ |
//| 1. User initializes algorithm state with MinBLEICCreate() call |
//| 2. USer adds boundary and/or linear constraints by calling |
//| MinBLEICSetBC() and MinBLEICSetLC() functions. |
//| 3. User sets stopping conditions for underlying unconstrained |
//| solver with MinBLEICSetInnerCond() call. |
//| This function controls accuracy of underlying optimization |
//| algorithm. |
//| 4. User sets stopping conditions for outer iteration by calling |
//| MinBLEICSetOuterCond() function. |
//| This function controls handling of boundary and inequality |
//| constraints. |
//| 5. Additionally, user may set limit on number of internal |
//| iterations by MinBLEICSetMaxIts() call. |
//| This function allows to prevent algorithm from looping |
//| forever. |
//| 6. User calls MinBLEICOptimize() function which takes algorithm |
//| state and pointer (delegate, etc.) to callback function |
//| which calculates F/G. |
//| 7. User calls MinBLEICResults() to get solution |
//| 8. Optionally user may call MinBLEICRestartFrom() to solve |
//| another problem with same N but another starting point. |
//| MinBLEICRestartFrom() allows to reuse already initialized |
//| structure. |
//| INPUT PARAMETERS: |
//| N - problem dimension, N>0: |
//| * if given, only leading N elements of X are |
//| used |
//| * if not given, automatically determined from |
//| size ofX |
//| X - starting point, array[N]: |
//| * it is better to set X to a feasible point |
//| * but X can be infeasible, in which case |
//| algorithm will try to find feasible point |
//| first, using X as initial approximation. |
//| OUTPUT PARAMETERS: |
//| State - structure stores algorithm state |
//+------------------------------------------------------------------+
void CAlglib::MinBLEICCreate(double &x[],CMinBLEICStateShell &state)
{
//--- initialization
int n=CAp::Len(x);
//--- function call
CMinBLEIC::MinBLEICCreate(n,x,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| The subroutine is finite difference variant of MinBLEICCreate(). |
//| It uses finite differences in order to differentiate target |
//| function. |
//| Description below contains information which is specific to this |
//| function only. We recommend to read comments on MinBLEICCreate() |
//| in order to get more information about creation of BLEIC |
//| optimizer. |
//| INPUT PARAMETERS: |
//| N - problem dimension, N>0: |
//| * if given, only leading N elements of X are used|
//| * if not given, automatically determined from |
//| size of X |
//| X - starting point, array[0..N-1]. |
//| DiffStep- differentiation step, >0 |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| NOTES: |
//| 1. algorithm uses 4-point central formula for differentiation. |
//| 2. differentiation step along I-th axis is equal to DiffStep*S[I]|
//| where S[] is scaling vector which can be set by |
//| MinBLEICSetScale() call. |
//| 3. we recommend you to use moderate values of differentiation |
//| step. Too large step will result in too large truncation |
//| errors, while too small step will result in too large |
//| numerical errors. 1.0E-6 can be good value to start with. |
//| 4. Numerical differentiation is very inefficient - one gradient |
//| calculation needs 4*N function evaluations. This function will|
//| work for any N - either small (1...10), moderate (10...100) or|
//| large (100...). However, performance penalty will be too |
//| severe for any N's except for small ones. |
//| We should also say that code which relies on numerical |
//| differentiation is less robust and precise. CG needs exact |
//| gradient values. Imprecise gradient may slow down convergence,|
//| especially on highly nonlinear problems. |
//| Thus we recommend to use this function for fast prototyping on|
//| small - dimensional problems only, and to implement analytical|
//| gradient as soon as possible. |
//+------------------------------------------------------------------+
void CAlglib::MinBLEICCreateF(const int n,double &x[],double diffstep,
CMinBLEICStateShell &state)
{
CMinBLEIC::MinBLEICCreateF(n,x,diffstep,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| The subroutine is finite difference variant of MinBLEICCreate(). |
//| It uses finite differences in order to differentiate target |
//| function. |
//| Description below contains information which is specific to this |
//| function only. We recommend to read comments on MinBLEICCreate() |
//| in order to get more information about creation of BLEIC |
//| optimizer. |
//| INPUT PARAMETERS: |
//| N - problem dimension, N>0: |
//| * if given, only leading N elements of X are used|
//| * if not given, automatically determined from |
//| size of X |
//| X - starting point, array[0..N-1]. |
//| DiffStep- differentiation step, >0 |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| NOTES: |
//| 1. algorithm uses 4-point central formula for differentiation. |
//| 2. differentiation step along I-th axis is equal to DiffStep*S[I]|
//| where S[] is scaling vector which can be set by |
//| MinBLEICSetScale() call. |
//| 3. we recommend you to use moderate values of differentiation |
//| step. Too large step will result in too large truncation |
//| errors, while too small step will result in too large |
//| numerical errors. 1.0E-6 can be good value to start with. |
//| 4. Numerical differentiation is very inefficient - one gradient |
//| calculation needs 4*N function evaluations. This function will|
//| work for any N - either small (1...10), moderate (10...100) or|
//| large (100...). However, performance penalty will be too |
//| severe for any N's except for small ones. |
//| We should also say that code which relies on numerical |
//| differentiation is less robust and precise. CG needs exact |
//| gradient values. Imprecise gradient may slow down convergence,|
//| especially on highly nonlinear problems. |
//| Thus we recommend to use this function for fast prototyping on|
//| small - dimensional problems only, and to implement analytical|
//| gradient as soon as possible. |
//+------------------------------------------------------------------+
void CAlglib::MinBLEICCreateF(double &x[],double diffstep,
CMinBLEICStateShell &state)
{
//--- initialization
int n=CAp::Len(x);
//--- function call
CMinBLEIC::MinBLEICCreateF(n,x,diffstep,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This function sets boundary constraints for BLEIC optimizer. |
//| Boundary constraints are inactive by default (after initial |
//| creation). They are preserved after algorithm restart with |
//| MinBLEICRestartFrom(). |
//| INPUT PARAMETERS: |
//| State - structure stores algorithm state |
//| BndL - lower bounds, array[N]. |
//| If some (all) variables are unbounded, you may |
//| specify very small number or -INF. |
//| BndU - upper bounds, array[N]. |
//| If some (all) variables are unbounded, you may |
//| specify very large number or +INF. |
//| NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case |
//| I-th variable will be "frozen" at X[i]=BndL[i]=BndU[i]. |
//| NOTE 2: this solver has following useful properties: |
//| * bound constraints are always satisfied exactly |
//| * function is evaluated only INSIDE area specified by bound |
//| constraints, even when numerical differentiation is used |
//| (algorithm adjusts nodes according to boundary constraints) |
//+------------------------------------------------------------------+
void CAlglib::MinBLEICSetBC(CMinBLEICStateShell &state,double &bndl[],
double &bndu[])
{
CMinBLEIC::MinBLEICSetBC(state.GetInnerObj(),bndl,bndu);
}
//+------------------------------------------------------------------+
//| This function sets linear constraints for BLEIC optimizer. |
//| Linear constraints are inactive by default (after initial |
//| creation). They are preserved after algorithm restart with |
//| MinBLEICRestartFrom(). |
//| INPUT PARAMETERS: |
//| State - structure previously allocated with |
//| MinBLEICCreate call. |
//| C - linear constraints, array[K,N+1]. |
//| Each row of C represents one constraint, either |
//| equality or inequality (see below): |
//| * first N elements correspond to coefficients, |
//| * last element corresponds to the right part. |
//| All elements of C (including right part) must be |
//| finite. |
//| CT - type of constraints, array[K]: |
//| * if CT[i]>0, then I-th constraint is |
//| C[i,*]*x >= C[i,n+1] |
//| * if CT[i]=0, then I-th constraint is |
//| C[i,*]*x = C[i,n+1] |
//| * if CT[i]<0, then I-th constraint is |
//| C[i,*]*x <= C[i,n+1] |
//| K - number of equality/inequality constraints, K>=0: |
//| * if given, only leading K elements of C/CT are |
//| used |
//| * if not given, automatically determined from |
//| sizes of C/CT |
//| NOTE 1: linear (non-bound) constraints are satisfied only |
//| approximately: |
//| * there always exists some minor violation (about Epsilon in |
//| magnitude) due to rounding errors |
//| * numerical differentiation, if used, may lead to function |
//| evaluations outside of the feasible area, because algorithm |
//| does NOT change numerical differentiation formula according to |
//| linear constraints. |
//| If you want constraints to be satisfied exactly, try to |
//| reformulate your problem in such manner that all constraints will|
//| become boundary ones (this kind of constraints is always |
//| satisfied exactly, both in the final solution and in all |
//| intermediate points). |
//+------------------------------------------------------------------+
void CAlglib::MinBLEICSetLC(CMinBLEICStateShell &state,CMatrixDouble &c,
int &ct[],const int k)
{
CMinBLEIC::MinBLEICSetLC(state.GetInnerObj(),c,ct,k);
}
//+------------------------------------------------------------------+
//| This function sets linear constraints for BLEIC optimizer. |
//| Linear constraints are inactive by default (after initial |
//| creation). They are preserved after algorithm restart with |
//| MinBLEICRestartFrom(). |
//| INPUT PARAMETERS: |
//| State - structure previously allocated with |
//| MinBLEICCreate call. |
//| C - linear constraints, array[K,N+1]. |
//| Each row of C represents one constraint, either |
//| equality or inequality (see below): |
//| * first N elements correspond to coefficients, |
//| * last element corresponds to the right part. |
//| All elements of C (including right part) must be |
//| finite. |
//| CT - type of constraints, array[K]: |
//| * if CT[i]>0, then I-th constraint is |
//| C[i,*]*x >= C[i,n+1] |
//| * if CT[i]=0, then I-th constraint is |
//| C[i,*]*x = C[i,n+1] |
//| * if CT[i]<0, then I-th constraint is |
//| C[i,*]*x <= C[i,n+1] |
//| K - number of equality/inequality constraints, K>=0: |
//| * if given, only leading K elements of C/CT are |
//| used |
//| * if not given, automatically determined from |
//| sizes of C/CT |
//| NOTE 1: linear (non-bound) constraints are satisfied only |
//| approximately: |
//| * there always exists some minor violation (about Epsilon in |
//| magnitude) due to rounding errors |
//| * numerical differentiation, if used, may lead to function |
//| evaluations outside of the feasible area, because algorithm |
//| does NOT change numerical differentiation formula according to |
//| linear constraints. |
//| If you want constraints to be satisfied exactly, try to |
//| reformulate your problem in such manner that all constraints will|
//| become boundary ones (this kind of constraints is always |
//| satisfied exactly, both in the final solution and in all |
//| intermediate points). |
//+------------------------------------------------------------------+
void CAlglib::MinBLEICSetLC(CMinBLEICStateShell &state,CMatrixDouble &c,
int &ct[])
{
//--- check
if(CAp::Rows(c)!=CAp::Len(ct))
{
Print(__FUNCTION__+": looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
int k=(int)CAp::Rows(c);
//--- function call
CMinBLEIC::MinBLEICSetLC(state.GetInnerObj(),c,ct,k);
}
//+------------------------------------------------------------------+
//| This function sets stopping conditions for the underlying |
//| nonlinear CG optimizer. It controls overall accuracy of solution.|
//| These conditions should be strict enough in order for algorithm |
//| to converge. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| EpsG - >=0 |
//| The subroutine finishes its work if the condition|
//| |v|<EpsG is satisfied, where: |
//| * |.| means Euclidian norm |
//| * v - scaled gradient vector, v[i]=g[i]*s[i] |
//| * g - gradient |
//| * s - scaling coefficients set by |
//| MinBLEICSetScale() |
//| EpsF - >=0 |
//| The subroutine finishes its work if on k+1-th |
//| iteration the condition |F(k+1)-F(k)| <= |
//| <= EpsF*max{|F(k)|,|F(k+1)|,1} is satisfied. |
//| EpsX - >=0 |
//| The subroutine finishes its work if on k+1-th |
//| iteration the condition |v|<=EpsX is fulfilled, |
//| where: |
//| * |.| means Euclidian norm |
//| * v - scaled step vector, v[i]=dx[i]/s[i] |
//| * dx - ste pvector, dx=X(k+1)-X(k) |
//| * s - scaling coefficients set by |
//| MinBLEICSetScale() |
//| Passing EpsG=0, EpsF=0 and EpsX=0 (simultaneously) will lead to |
//| automatic stopping criterion selection. |
//| These conditions are used to terminate inner iterations. However,|
//| you need to tune termination conditions for outer iterations too.|
//+------------------------------------------------------------------+
void CAlglib::MinBLEICSetInnerCond(CMinBLEICStateShell &state,
const double epsg,
const double epsf,
const double epsx)
{
CMinBLEIC::MinBLEICSetInnerCond(state.GetInnerObj(),epsg,epsf,epsx);
}
//+------------------------------------------------------------------+
//| This function sets stopping conditions for outer iteration of |
//| BLEIC algo. |
//| These conditions control accuracy of constraint handling and |
//| amount of infeasibility allowed in the solution. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| EpsX - >0, stopping condition on outer iteration step |
//| length |
//| EpsI - >0, stopping condition on infeasibility |
//| Both EpsX and EpsI must be non-zero. |
//| MEANING OF EpsX |
//| EpsX is a stopping condition for outer iterations. Algorithm will|
//| stop when solution of the current modified subproblem will be |
//| within EpsX (using 2-norm) of the previous solution. |
//| MEANING OF EpsI |
//| EpsI controls feasibility properties - algorithm won't stop until|
//| all inequality constraints will be satisfied with error (distance|
//| from current point to the feasible area) at most EpsI. |
//+------------------------------------------------------------------+
void CAlglib::MinBLEICSetOuterCond(CMinBLEICStateShell &state,
const double epsx,const double epsi)
{
CMinBLEIC::MinBLEICSetOuterCond(state.GetInnerObj(),epsx,epsi);
}
//+------------------------------------------------------------------+
//| This function sets scaling coefficients for BLEIC optimizer. |
//| ALGLIB optimizers use scaling matrices to test stopping |
//| conditions (step size and gradient are scaled before comparison |
//| with tolerances). Scale of the I-th variable is a translation |
//| invariant measure of: |
//| a) "how large" the variable is |
//| b) how large the step should be to make significant changes in |
//| the function |
//| Scaling is also used by finite difference variant of the |
//| optimizer - step along I-th axis is equal to DiffStep*S[I]. |
//| In most optimizers (and in the BLEIC too) scaling is NOT a form |
//| of preconditioning. It just affects stopping conditions. You |
//| should set preconditioner by separate call to one of the |
//| MinBLEICSetPrec...() functions. |
//| There is a special preconditioning mode, however, which uses |
//| scaling coefficients to form diagonal preconditioning matrix. |
//| You can turn this mode on, if you want. But you should understand|
//| that scaling is not the same thing as preconditioning - these are|
//| two different, although related forms of tuning solver. |
//| INPUT PARAMETERS: |
//| State - structure stores algorithm state |
//| S - array[N], non-zero scaling coefficients |
//| S[i] may be negative, sign doesn't matter. |
//+------------------------------------------------------------------+
void CAlglib::MinBLEICSetScale(CMinBLEICStateShell &state,double &s[])
{
CMinBLEIC::MinBLEICSetScale(state.GetInnerObj(),s);
}
//+------------------------------------------------------------------+
//| Modification of the preconditioner: preconditioning is turned |
//| off. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//+------------------------------------------------------------------+
void CAlglib::MinBLEICSetPrecDefault(CMinBLEICStateShell &state)
{
CMinBLEIC::MinBLEICSetPrecDefault(state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Modification of the preconditioner: diagonal of approximate |
//| Hessian is used. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| D - diagonal of the approximate Hessian, |
//| array[0..N-1], (if larger, only leading N |
//| elements are used). |
//| NOTE 1: D[i] should be positive. Exception will be thrown |
//| otherwise. |
//| NOTE 2: you should pass diagonal of approximate Hessian - NOT |
//| ITS INVERSE. |
//+------------------------------------------------------------------+
void CAlglib::MinBLEICSetPrecDiag(CMinBLEICStateShell &state,
double &d[])
{
CMinBLEIC::MinBLEICSetPrecDiag(state.GetInnerObj(),d);
}
//+------------------------------------------------------------------+
//| Modification of the preconditioner: scale-based diagonal |
//| preconditioning. |
//| This preconditioning mode can be useful when you don't have |
//| approximate diagonal of Hessian, but you know that your variables|
//| are badly scaled (for example, one variable is in [1,10], and |
//| another in [1000,100000]), and most part of the ill-conditioning |
//| comes from different scales of vars. |
//| In this case simple scale-based preconditioner, with H[i] = |
//| = 1/(s[i]^2), can greatly improve convergence. |
//| IMPRTANT: you should set scale of your variables with |
//| MinBLEICSetScale() call (before or after MinBLEICSetPrecScale() |
//| call). Without knowledge of the scale of your variables |
//| scale-based preconditioner will be just unit matrix. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//+------------------------------------------------------------------+
void CAlglib::MinBLEICSetPrecScale(CMinBLEICStateShell &state)
{
CMinBLEIC::MinBLEICSetPrecScale(state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This function allows to stop algorithm after specified number of |
//| inner iterations. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| MaxIts - maximum number of inner iterations. |
//| If MaxIts=0, the number of iterations is |
//| unlimited. |
//+------------------------------------------------------------------+
void CAlglib::MinBLEICSetMaxIts(CMinBLEICStateShell &state,
const int maxits)
{
CMinBLEIC::MinBLEICSetMaxIts(state.GetInnerObj(),maxits);
}
//+------------------------------------------------------------------+
//| This function turns on/off reporting. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| NeedXRep- whether iteration reports are needed or not |
//| If NeedXRep is True, algorithm will call rep() callback function |
//| if it is provided to MinBLEICOptimize(). |
//+------------------------------------------------------------------+
void CAlglib::MinBLEICSetXRep(CMinBLEICStateShell &state,bool needxrep)
{
CMinBLEIC::MinBLEICSetXRep(state.GetInnerObj(),needxrep);
}
//+------------------------------------------------------------------+
//| This function sets maximum step length |
//| IMPORTANT: this feature is hard to combine with preconditioning. |
//| You can't set upper limit on step length, when you solve |
//| optimization problem with linear (non-boundary) constraints AND |
//| preconditioner turned on. |
//| When non-boundary constraints are present, you have to either a) |
//| use preconditioner, or b) use upper limit on step length. YOU |
//| CAN'T USE BOTH! In this case algorithm will terminate with |
//| appropriate error code. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| StpMax - maximum step length, >=0. Set StpMax to 0.0, if |
//| you don't want to limit step length. |
//| Use this subroutine when you optimize target function which |
//| contains exp() or other fast growing functions, and optimization |
//| algorithm makes too large steps which lead to overflow. This |
//| function allows us to reject steps that are too large (and |
//| therefore expose us to the possible overflow) without actually |
//| calculating function value at the x+stp*d. |
//+------------------------------------------------------------------+
void CAlglib::MinBLEICSetStpMax(CMinBLEICStateShell &state,double stpmax)
{
CMinBLEIC::MinBLEICSetStpMax(state.GetInnerObj(),stpmax);
}
//+------------------------------------------------------------------+
//| This function provides reverse communication interface |
//| Reverse communication interface is not documented or recommended |
//| to use. |
//| See below for functions which provide better documented API |
//+------------------------------------------------------------------+
bool CAlglib::MinBLEICIteration(CMinBLEICStateShell &state)
{
return(CMinBLEIC::MinBLEICIteration(state.GetInnerObj()));
}
//+------------------------------------------------------------------+
//| This family of functions is used to launcn iterations of |
//| nonlinear optimizer |
//| These functions accept following parameters: |
//| func - callback which calculates function (or merit |
//| function) value func at given point x |
//| grad - callback which calculates function (or merit |
//| function) value func and gradient grad at given |
//| point x |
//| rep - optional callback which is called after each |
//| iteration can be null |
//| obj - optional object which is passed to |
//| func/grad/hess/jac/rep can be null |
//| NOTES: |
//| 1. This function has two different implementations: one which |
//| uses exact (analytical) user-supplied gradient, and one which |
//| uses function value only and numerically differentiates |
//| function in order to obtain gradient. |
//| Depending on the specific function used to create optimizer |
//| object (either MinBLEICCreate() for analytical gradient or |
//| MinBLEICCreateF() for numerical differentiation) you should |
//| choose appropriate variant of MinBLEICOptimize() - one which |
//| accepts function AND gradient or one which accepts function |
//| ONLY. |
//| Be careful to choose variant of MinBLEICOptimize() which |
//| corresponds to your optimization scheme! Table below lists |
//| different combinations of callback (function/gradient) passed |
//| to MinBLEICOptimize() and specific function used to create |
//| optimizer. |
//| | USER PASSED TO MinBLEICOptimize() |
//| CREATED WITH | function only | function and gradient |
//| ------------------------------------------------------------ |
//| MinBLEICCreateF() | work FAIL |
//| MinBLEICCreate() | FAIL work |
//| Here "FAIL" denotes inappropriate combinations of optimizer |
//| creation function and MinBLEICOptimize() version. Attemps to |
//| use such combination (for example, to create optimizer with |
//| MinBLEICCreateF() and to pass gradient information to |
//| MinCGOptimize()) will lead to exception being thrown. Either |
//| you did not pass gradient when it WAS needed or you passed |
//| gradient when it was NOT needed. |
//+------------------------------------------------------------------+
void CAlglib::MinBLEICOptimize(CMinBLEICStateShell &state,CNDimensional_Func &func,
CNDimensional_Rep &rep,bool rep_status,CObject &obj)
{
//--- cycle
while(CAlglib::MinBLEICIteration(state))
{
//--- check
if(state.GetNeedF())
{
func.Func(state.GetInnerObj().m_x,state.GetInnerObj().m_f,obj);
//--- next iteration
continue;
}
//--- check
if(state.GetInnerObj().m_xupdated)
{
//--- check
if(rep_status)
rep.Rep(state.GetInnerObj().m_x,state.GetInnerObj().m_f,obj);
//--- next iteration
continue;
}
Print("ALGLIB: error in 'minbleicoptimize' (some derivatives were not provided?)");
CAp::exception_happened=true;
break;
}
}
//+------------------------------------------------------------------+
//| This family of functions is used to launcn iterations of |
//| nonlinear optimizer |
//| These functions accept following parameters: |
//| func - callback which calculates function (or merit |
//| function) value func at given point x |
//| grad - callback which calculates function (or merit |
//| function) value func and gradient grad at given |
//| point x |
//| rep - optional callback which is called after each |
//| iteration can be null |
//| obj - optional object which is passed to |
//| func/grad/hess/jac/rep can be null |
//| NOTES: |
//| 1. This function has two different implementations: one which |
//| uses exact (analytical) user-supplied gradient, and one which |
//| uses function value only and numerically differentiates |
//| function in order to obtain gradient. |
//| Depending on the specific function used to create optimizer |
//| object (either MinBLEICCreate() for analytical gradient or |
//| MinBLEICCreateF() for numerical differentiation) you should |
//| choose appropriate variant of MinBLEICOptimize() - one which |
//| accepts function AND gradient or one which accepts function |
//| ONLY. |
//| Be careful to choose variant of MinBLEICOptimize() which |
//| corresponds to your optimization scheme! Table below lists |
//| different combinations of callback (function/gradient) passed |
//| to MinBLEICOptimize() and specific function used to create |
//| optimizer. |
//| | USER PASSED TO MinBLEICOptimize() |
//| CREATED WITH | function only | function and gradient |
//| ------------------------------------------------------------ |
//| MinBLEICCreateF() | work FAIL |
//| MinBLEICCreate() | FAIL work |
//| Here "FAIL" denotes inappropriate combinations of optimizer |
//| creation function and MinBLEICOptimize() version. Attemps to |
//| use such combination (for example, to create optimizer with |
//| MinBLEICCreateF() and to pass gradient information to |
//| MinCGOptimize()) will lead to exception being thrown. Either |
//| you did not pass gradient when it WAS needed or you passed |
//| gradient when it was NOT needed. |
//+------------------------------------------------------------------+
void CAlglib::MinBLEICOptimize(CMinBLEICStateShell &state,CNDimensional_Grad &grad,
CNDimensional_Rep &rep,bool rep_status,CObject &obj)
{
//--- cycle
while(CAlglib::MinBLEICIteration(state))
{
//--- check
if(state.GetNeedFG())
{
grad.Grad(state.GetInnerObj().m_x,state.GetInnerObj().m_f,state.GetInnerObj().m_g,obj);
//--- next iteration
continue;
}
//--- check
if(state.GetInnerObj().m_xupdated)
{
//--- check
if(rep_status)
rep.Rep(state.GetInnerObj().m_x,state.GetInnerObj().m_f,obj);
//--- next iteration
continue;
}
Print("ALGLIB: error in 'minbleicoptimize' (some derivatives were not provided?)");
CAp::exception_happened=true;
break;
}
}
//+------------------------------------------------------------------+
//| This function activates/deactivates verification of the user- |
//| supplied analytic gradient. |
//| Upon activation of this option OptGuard integrity checker |
//| performs numerical differentiation of your target function at the|
//| initial point (note: future versions may also perform check at |
//| the final point) and compares numerical gradient with analytic |
//| one provided by you. |
//| If difference is too large, an error flag is set and optimization|
//| session continues. After optimization session is over, you can |
//| retrieve the report which stores both gradients and specific |
//| components highlighted as suspicious by the OptGuard. |
//| The primary OptGuard report can be retrieved with |
//| MinBLEICOptGuardResults(). |
//| IMPORTANT: gradient check is a high-overhead option which will |
//| cost you about 3*N additional function evaluations. |
//| In many cases it may cost as much as the rest of the |
//| optimization session. |
//| YOU SHOULD NOT USE IT IN THE PRODUCTION CODE UNLESS YOU WANT TO |
//| CHECK DERIVATIVES PROVIDED BY SOME THIRD PARTY. |
//| NOTE: unlike previous incarnation of the gradient checking code, |
//| OptGuard does NOT interrupt optimization even if it |
//| discovers bad gradient. |
//| INPUT PARAMETERS: |
//| State - structure used to store algorithm State |
//| TestStep - verification step used for numerical |
//| differentiation: |
//| * TestStep=0 turns verification off |
//| * TestStep>0 activates verification |
//| You should carefully choose TestStep. Value |
//| which is too large (so large that function |
//| behavior is non-cubic at this scale) will lead |
//| to false alarms. Too short step will result in |
//| rounding errors dominating numerical derivative.|
//| You may use different step for different parameters by means of |
//| setting scale with MinBLEICSetScale(). |
//| === EXPLANATION ================================================ |
//| In order to verify gradient algorithm performs following steps: |
//| * two trial steps are made to X[i]-TestStep*S[i] and |
//| X[i]+TestStep*S[i], where X[i] is i-th component of the |
//| initial point and S[i] is a scale of i-th parameter |
//| * F(X) is evaluated at these trial points |
//| * we perform one more evaluation in the middle point of the |
//| interval |
//| * we build cubic model using function values and derivatives at|
//| trial points and we compare its prediction with actual value |
//| in the middle point |
//+------------------------------------------------------------------+
void CAlglib::MinBLEICOptGuardGradient(CMinBLEICStateShell &state,double teststep)
{
CMinBLEIC::MinBLEICOptGuardGradient(state.GetInnerObj(),teststep);
}
//+------------------------------------------------------------------+
//| This function activates/deactivates nonsmoothness monitoring |
//| option of the OptGuard integrity checker. Smoothness monitor |
//| silently observes solution process and tries to detect ill-posed |
//| problems, i.e. ones with: |
//| a) discontinuous target function (non-C0) |
//| b) nonsmooth target function (non-C1) |
//| Smoothness monitoring does NOT interrupt optimization even if it |
//| suspects that your problem is nonsmooth. It just sets |
//| corresponding flags in the OptGuard report which can be retrieved|
//| after optimization is over. |
//| Smoothness monitoring is a moderate overhead option which often |
//| adds less than 1% to the optimizer running time. Thus, you can |
//| use it even for large scale problems. |
//| NOTE: OptGuard does NOT guarantee that it will always detect |
//| C0/C1 continuity violations. |
//| First, minor errors are hard to catch - say, a 0.0001 difference |
//| in the model values at two sides of the gap may be due |
//| to discontinuity of the model - or simply because the model has |
//| changed. |
//| Second, C1-violations are especially difficult to detect in a |
//| noninvasive way. The optimizer usually performs very short steps |
//| near the nonsmoothness, and differentiation usually introduces a |
//| lot of numerical noise. It is hard to tell whether some tiny |
//| discontinuity in the slope is due to real nonsmoothness or just |
//| due to numerical noise alone. |
//| Our top priority was to avoid false positives, so in some rare |
//| cases minor errors may went unnoticed (however, in most cases |
//| they can be spotted with restart from different initial point). |
//| INPUT PARAMETERS: |
//| State - algorithm State |
//| Level - monitoring level: |
//| * 0 - monitoring is disabled |
//| * 1 - noninvasive low-overhead monitoring; function|
//| values and/or gradients are recorded, but |
//| OptGuard does not try to perform additional |
//| evaluations in order to get more information |
//| about suspicious locations. |
//| === EXPLANATION ================================================ |
//| One major source of headache during optimization is the |
//| possibility of the coding errors in the target function / |
//| constraints (or their gradients). Such errors most often manifest|
//| themselves as discontinuity or nonsmoothness of the target / |
//| constraints. |
//| Another frequent situation is when you try to optimize something |
//| involving lots of min() and max() operations, i.e. nonsmooth |
//| target. Although not a coding error, it is nonsmoothness anyway -|
//| and smooth optimizers usually stop right after encountering |
//| nonsmoothness, well before reaching solution. |
//| OptGuard integrity checker helps you to catch such situations: it|
//| monitors function values/gradients being passed to the optimizer |
//| and tries to errors. Upon discovering suspicious pair of points |
//| it raises appropriate flag (and allows you to continue |
//| optimization). When optimization is done, you can study OptGuard |
//| result. |
//+------------------------------------------------------------------+
void CAlglib::MinBLEICOptGuardSmoothness(CMinBLEICStateShell &state,
int level)
{
CMinBLEIC::MinBLEICOptGuardSmoothness(state.GetInnerObj(),level);
}
//+------------------------------------------------------------------+
//| Results of OptGuard integrity check, should be called after |
//| optimization session is over. |
//| === PRIMARY REPORT ============================================= |
//| OptGuard performs several checks which are intended to catch |
//| common errors in the implementation of nonlinear function / |
//| gradient: |
//| * incorrect analytic gradient |
//| * discontinuous (non-C0) target functions (constraints) |
//| * nonsmooth (non-C1) target functions (constraints) |
//| Each of these checks is activated with appropriate function: |
//| * MinBLEICOptGuardGradient() for gradient verification |
//| * MinBLEICOptGuardSmoothness() for C0/C1 checks |
//| Following flags are set when these errors are suspected: |
//| * rep.badgradsuspected, and additionally: |
//| * rep.badgradvidx for specific variable (gradient element) |
//| suspected |
//| * rep.badgradxbase, a point where gradient is tested |
//| * rep.badgraduser, user-provided gradient (stored as 2D |
//| matrix with single row in order to make report structure |
//| compatible with more complex optimizers like MinNLC or |
//| MinLM) |
//| * rep.badgradnum, reference gradient obtained via numerical |
//| differentiation (stored as 2D matrix with single row in |
//| order to make report structure compatible with more |
//| complex optimizers like MinNLC or MinLM) |
//| * rep.nonc0suspected |
//| * rep.nonc1suspected |
//| === ADDITIONAL REPORTS/LOGS ==================================== |
//| Several different tests are performed to catch C0/C1 errors, you |
//| can find out specific test signaled error by looking to: |
//| * rep.nonc0test0positive, for non-C0 test #0 |
//| * rep.nonc1test0positive, for non-C1 test #0 |
//| * rep.nonc1test1positive, for non-C1 test #1 |
//| Additional information (including line search logs) can be |
//| obtained by means of: |
//| * MinBLEICOptGuardNonC1Test0Results() |
//| * MinBLEICOptGuardNonC1Test1Results() |
//| which return detailed error reports, specific points where |
//| discontinuities were found, and so on. |
//| ================================================================ |
//| INPUT PARAMETERS: |
//| State - algorithm State |
//| OUTPUT PARAMETERS: |
//| Rep - generic OptGuard report; more detailed reports |
//| can be retrieved with other functions. |
//| NOTE: false negatives (nonsmooth problems are not identified as |
//| nonsmooth ones) are possible although unlikely. |
//| The reason is that you need to make several evaluations around |
//| nonsmoothness in order to accumulate enough information about |
//| function curvature. Say, if you start right from the nonsmooth |
//| point, optimizer simply won't get enough data to understand what |
//| is going wrong before it terminates due to abrupt changes in the |
//| derivative. It is also possible that "unlucky" step will move us |
//| to the termination too quickly. |
//| Our current approach is to have less than 0.1% false negatives in|
//| our test examples (measured with multiple restarts from random |
//| points), and to have exactly 0% false positives. |
//+------------------------------------------------------------------+
void CAlglib::MinBLEICOptGuardResults(CMinBLEICStateShell &state,
COptGuardReport &rep)
{
CMinBLEIC::MinBLEICOptGuardResults(state.GetInnerObj(),rep);
}
//+------------------------------------------------------------------+
//| Detailed results of the OptGuard integrity check for |
//| nonsmoothness test #0 |
//| Nonsmoothness (non-C1) test #0 studies function values (not |
//| gradient!) obtained during line searches and monitors behavior |
//| of the directional derivative estimate. |
//| This test is less powerful than test #1, but it does not depend |
//| on the gradient values and thus it is more robust against |
//| artifacts introduced by numerical differentiation. |
//| Two reports are returned: |
//| *a "strongest" one, corresponding to line search which had |
//| highest value of the nonsmoothness indicator |
//| *a "longest" one, corresponding to line search which had more |
//| function evaluations, and thus is more detailed |
//| In both cases following fields are returned: |
//| * positive - is TRUE when test flagged suspicious point; |
//| FALSE if test did not notice anything |
//| (in the latter cases fields below are empty). |
//| * x0[], d[] - arrays of length N which store initial point and |
//| direction for line search (d[] can be normalized,|
//| but does not have to) |
//| * stp[], f[]- arrays of length CNT which store step lengths and|
//| function values at these points; f[i] is |
//| evaluated in x0+stp[i]*d. |
//| * stpidxa, stpidxb - we suspect that function violates C1 |
//| continuity between steps #stpidxa and #stpidxb |
//| (usually we have stpidxb=stpidxa+3, with most |
//| likely position of the violation between |
//| stpidxa+1 and stpidxa+2. |
//| ================================================================ |
//| = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it - |
//| = you will see where C1 continuity is violated.|
//| ================================================================ |
//| INPUT PARAMETERS: |
//| State - algorithm State |
//| OUTPUT PARAMETERS: |
//| StrRep - C1 test #0 "strong" report |
//| LngRep - C1 test #0 "long" report |
//+------------------------------------------------------------------+
void CAlglib::MinBLEICOptGuardNonC1Test0Results(CMinBLEICStateShell &state,
COptGuardNonC1Test0Report &strrep,
COptGuardNonC1Test0Report &lngrep)
{
CMinBLEIC::MinBLEICOptGuardNonC1Test0Results(state.GetInnerObj(),strrep,lngrep);
}
//+------------------------------------------------------------------+
//| Detailed results of the OptGuard integrity check for |
//| nonsmoothness test #1 |
//| Nonsmoothness (non-C1) test #1 studies individual components of |
//| the gradient computed during line search. |
//| When precise analytic gradient is provided this test is more |
//| powerful than test #0 which works with function values and |
//| ignores user-provided gradient. However, test #0 becomes more |
//| powerful when numerical differentiation is employed (in such |
//| cases test #1 detects higher levels of numerical noise and |
//| becomes too conservative). |
//| This test also tells specific components of the gradient which |
//| violate C1 continuity, which makes it more informative than #0, |
//| which just tells that continuity is violated. |
//| Two reports are returned: |
//| *a "strongest" one, corresponding to line search which had |
//| highest value of the nonsmoothness indicator |
//| *a "longest" one, corresponding to line search which had more |
//| function evaluations, and thus is more detailed |
//| In both cases following fields are returned: |
//| * positive - is TRUE when test flagged suspicious point; |
//| FALSE if test did not notice anything |
//| (in the latter cases fields below are empty).|
//| * vidx - is an index of the variable in [0,N) with |
//| nonsmooth derivative |
//| * x0[], d[] - arrays of length N which store initial point and|
//| direction for line search (d[] can be normalized|
//| but does not have to) |
//| * stp[], g[]- arrays of length CNT which store step lengths |
//| and gradient values at these points; g[i] is |
//| evaluated in x0+stp[i]*d and contains vidx-th |
//| component of the gradient. |
//| * stpidxa, stpidxb - we suspect that function violates C1 |
//| continuity between steps #stpidxa and #stpidxb |
//| (usually we have stpidxb=stpidxa+3, with most |
//| likely position of the violation between |
//| stpidxa+1 and stpidxa+2. |
//| ================================================================ |
//| = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it - |
//| = you will see where C1 continuity is violated.|
//| ================================================================ |
//| INPUT PARAMETERS: |
//| State - algorithm State |
//| OUTPUT PARAMETERS: |
//| StrRep - C1 test #1 "strong" report |
//| LngRep - C1 test #1 "long" report |
//+------------------------------------------------------------------+
void CAlglib::MinBLEICOptGuardNonC1Test1Results(CMinBLEICStateShell &state,
COptGuardNonC1Test1Report &strrep,
COptGuardNonC1Test1Report &lngrep)
{
CMinBLEIC::MinBLEICOptGuardNonC1Test1Results(state.GetInnerObj(),strrep,lngrep);
}
//+------------------------------------------------------------------+
//| BLEIC results |
//| INPUT PARAMETERS: |
//| State - algorithm state |
//| OUTPUT PARAMETERS: |
//| X - array[0..N-1], solution |
//| Rep - optimization report. You should check Rep. |
//| TerminationType in order to distinguish |
//| successful termination from unsuccessful one. |
//| More information about fields of this structure |
//| can be found in the comments on MinBLEICReport |
//| datatype. |
//+------------------------------------------------------------------+
void CAlglib::MinBLEICResults(CMinBLEICStateShell &state,double &x[],
CMinBLEICReportShell &rep)
{
CMinBLEIC::MinBLEICResults(state.GetInnerObj(),x,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| BLEIC results |
//| Buffered implementation of MinBLEICResults() which uses |
//| pre-allocated buffer to store X[]. If buffer size is too small, |
//| it resizes buffer. It is intended to be used in the inner cycles |
//| of performance critical algorithms where array reallocation |
//| penalty is too large to be ignored. |
//+------------------------------------------------------------------+
void CAlglib::MinBLEICResultsBuf(CMinBLEICStateShell &state,double &x[],
CMinBLEICReportShell &rep)
{
CMinBLEIC::MinBLEICResultsBuf(state.GetInnerObj(),x,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This subroutine restarts algorithm from new point. |
//| All optimization parameters (including constraints) are left |
//| unchanged. |
//| This function allows to solve multiple optimization problems |
//| (which must have same number of dimensions) without object |
//| reallocation penalty. |
//| INPUT PARAMETERS: |
//| State - structure previously allocated with |
//| MinBLEICCreate call. |
//| X - new starting point. |
//+------------------------------------------------------------------+
void CAlglib::MinBLEICRestartFrom(CMinBLEICStateShell &state,
double &x[])
{
CMinBLEIC::MinBLEICRestartFrom(state.GetInnerObj(),x);
}
//+------------------------------------------------------------------+
//| This subroutine submits request for termination of running |
//| optimizer. It should be called from user-supplied callback when |
//| user decides that it is time to "smoothly" terminate optimization|
//| process. As result, optimizer stops at point which was "current |
//| accepted" when termination request was submitted and returns |
//| error code 8 (successful termination). |
//| INPUT PARAMETERS: |
//| State - optimizer structure |
//| NOTE: after request for termination optimizer may perform |
//| several additional calls to user-supplied callbacks. It |
//| does NOT guarantee to stop immediately - it just guarantees|
//| that these additional calls will be discarded later. |
//| NOTE: calling this function on optimizer which is NOT running |
//| will have no effect. |
//| NOTE: multiple calls to this function are possible. First call is|
//| counted, subsequent calls are silently ignored. |
//+------------------------------------------------------------------+
void CAlglib::MinBLEICRequestTermination(CMinBLEICStateShell &state)
{
CMinBLEIC::MinBLEICRequestTermination(state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| LIMITED MEMORY BFGS METHOD FOR LARGE SCALE OPTIMIZATION |
//| DESCRIPTION: |
//| The subroutine minimizes function F(x) of N arguments by using a |
//| quasi - Newton method (LBFGS scheme) which is optimized to use a |
//| minimum amount of memory. |
//| The subroutine generates the approximation of an inverse Hessian |
//| matrix by using information about the last M steps of the |
//| algorithm (instead of N). It lessens a required amount of memory |
//| from a value of order N^2 to a value of order 2*N*M. |
//| REQUIREMENTS: |
//| Algorithm will request following information during its |
//| operation: |
//| * function value F and its gradient G (simultaneously) at given |
//| point X |
//| USAGE: |
//| 1. User initializes algorithm state with MinLBFGSCreate() call |
//| 2. User tunes solver parameters with MinLBFGSSetCond() |
//| MinLBFGSSetStpMax() and other functions |
//| 3. User calls MinLBFGSOptimize() function which takes algorithm |
//| state and pointer (delegate, etc.) to callback function which |
//| calculates F/G. |
//| 4. User calls MinLBFGSResults() to get solution |
//| 5. Optionally user may call MinLBFGSRestartFrom() to solve |
//| another problem with same N/M but another starting point |
//| and/or another function. MinLBFGSRestartFrom() allows to reuse|
//| already initialized structure. |
//| INPUT PARAMETERS: |
//| N - problem dimension. N>0 |
//| M - number of corrections in the BFGS scheme of |
//| Hessian approximation update. Recommended value: |
//| 3<=M<=7. The smaller value causes worse |
//| convergence, the bigger will not cause a |
//| considerably better convergence, but will cause |
//| a fall in the performance. M<=N. |
//| X - initial solution approximation, array[0..N-1]. |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| NOTES: |
//| 1. you may tune stopping conditions with MinLBFGSSetCond() |
//| function |
//| 2. if target function contains exp() or other fast growing |
//| functions, and optimization algorithm makes too large steps |
//| which leads to overflow, use MinLBFGSSetStpMax() function to |
//| bound algorithm's steps. However, L-BFGS rarely needs such a |
//| tuning. |
//+------------------------------------------------------------------+
void CAlglib::MinLBFGSCreate(const int n,const int m,double &x[],
CMinLBFGSStateShell &state)
{
CMinLBFGS::MinLBFGSCreate(n,m,x,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| LIMITED MEMORY BFGS METHOD FOR LARGE SCALE OPTIMIZATION |
//| DESCRIPTION: |
//| The subroutine minimizes function F(x) of N arguments by using a |
//| quasi - Newton method (LBFGS scheme) which is optimized to use a |
//| minimum amount of memory. |
//| The subroutine generates the approximation of an inverse Hessian |
//| matrix by using information about the last M steps of the |
//| algorithm (instead of N). It lessens a required amount of memory |
//| from a value of order N^2 to a value of order 2*N*M. |
//| REQUIREMENTS: |
//| Algorithm will request following information during its |
//| operation: |
//| * function value F and its gradient G (simultaneously) at given |
//| point X |
//| USAGE: |
//| 1. User initializes algorithm state with MinLBFGSCreate() call |
//| 2. User tunes solver parameters with MinLBFGSSetCond() |
//| MinLBFGSSetStpMax() and other functions |
//| 3. User calls MinLBFGSOptimize() function which takes algorithm |
//| state and pointer (delegate, etc.) to callback function which |
//| calculates F/G. |
//| 4. User calls MinLBFGSResults() to get solution |
//| 5. Optionally user may call MinLBFGSRestartFrom() to solve |
//| another problem with same N/M but another starting point |
//| and/or another function. MinLBFGSRestartFrom() allows to reuse|
//| already initialized structure. |
//| INPUT PARAMETERS: |
//| N - problem dimension. N>0 |
//| M - number of corrections in the BFGS scheme of |
//| Hessian approximation update. Recommended value: |
//| 3<=M<=7. The smaller value causes worse |
//| convergence, the bigger will not cause a |
//| considerably better convergence, but will cause |
//| a fall in the performance. M<=N. |
//| X - initial solution approximation, array[0..N-1]. |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| NOTES: |
//| 1. you may tune stopping conditions with MinLBFGSSetCond() |
//| function |
//| 2. if target function contains exp() or other fast growing |
//| functions, and optimization algorithm makes too large steps |
//| which leads to overflow, use MinLBFGSSetStpMax() function to |
//| bound algorithm's steps. However, L-BFGS rarely needs such a |
//| tuning. |
//+------------------------------------------------------------------+
void CAlglib::MinLBFGSCreate(const int m,double &x[],CMinLBFGSStateShell &state)
{
//--- initialization
int n=CAp::Len(x);
//--- function call
CMinLBFGS::MinLBFGSCreate(n,m,x,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| The subroutine is finite difference variant of MinLBFGSCreate(). |
//| It uses finite differences in order to differentiate target |
//| function. |
//| Description below contains information which is specific to this |
//| function only. We recommend to read comments on MinLBFGSCreate() |
//| in order to get more information about creation of LBFGS |
//| optimizer. |
//| INPUT PARAMETERS: |
//| N - problem dimension, N>0: |
//| * if given, only leading N elements of X are used|
//| * if not given, automatically determined from |
//| size of X |
//| M - number of corrections in the BFGS scheme of |
//| Hessian approximation update. Recommended value: |
//| 3<=M<=7. The smaller value causes worse |
//| convergence, the bigger will not cause a |
//| considerably better convergence, but will cause a|
//| fall in the performance. M<=N. |
//| X - starting point, array[0..N-1]. |
//| DiffStep- differentiation step, >0 |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| NOTES: |
//| 1. algorithm uses 4-point central formula for differentiation. |
//| 2. differentiation step along I-th axis is equal to DiffStep*S[I]|
//| where S[] is scaling vector which can be set by |
//| MinLBFGSSetScale() call. |
//| 3. we recommend you to use moderate values of differentiation |
//| step. Too large step will result in too large truncation |
//| errors, while too small step will result in too large |
//| numerical errors. 1.0E-6 can be good value to start with. |
//| 4. Numerical differentiation is very inefficient - one gradient |
//| calculation needs 4*N function evaluations. This function will|
//| work for any N - either small (1...10), moderate (10...100) or|
//| large (100...). However, performance penalty will be too |
//| severe for any N's except for small ones. |
//| We should also say that code which relies on numerical |
//| differentiation is less robust and precise. LBFGS needs exact |
//| gradient values. Imprecise gradient may slow down convergence,|
//| especially on highly nonlinear problems. |
//| Thus we recommend to use this function for fast prototyping on|
//| small- dimensional problems only, and to implement analytical |
//| gradient as soon as possible. |
//+------------------------------------------------------------------+
void CAlglib::MinLBFGSCreateF(const int n,const int m,double &x[],
const double diffstep,CMinLBFGSStateShell &state)
{
CMinLBFGS::MinLBFGSCreateF(n,m,x,diffstep,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| The subroutine is finite difference variant of MinLBFGSCreate(). |
//| It uses finite differences in order to differentiate target |
//| function. |
//| Description below contains information which is specific to this |
//| function only. We recommend to read comments on MinLBFGSCreate() |
//| in order to get more information about creation of LBFGS |
//| optimizer. |
//| INPUT PARAMETERS: |
//| N - problem dimension, N>0: |
//| * if given, only leading N elements of X are used|
//| * if not given, automatically determined from |
//| size of X |
//| M - number of corrections in the BFGS scheme of |
//| Hessian approximation update. Recommended value: |
//| 3<=M<=7. The smaller value causes worse |
//| convergence, the bigger will not cause a |
//| considerably better convergence, but will cause a|
//| fall in the performance. M<=N. |
//| X - starting point, array[0..N-1]. |
//| DiffStep- differentiation step, >0 |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| NOTES: |
//| 1. algorithm uses 4-point central formula for differentiation. |
//| 2. differentiation step along I-th axis is equal to DiffStep*S[I]|
//| where S[] is scaling vector which can be set by |
//| MinLBFGSSetScale() call. |
//| 3. we recommend you to use moderate values of differentiation |
//| step. Too large step will result in too large truncation |
//| errors, while too small step will result in too large |
//| numerical errors. 1.0E-6 can be good value to start with. |
//| 4. Numerical differentiation is very inefficient - one gradient |
//| calculation needs 4*N function evaluations. This function will|
//| work for any N - either small (1...10), moderate (10...100) or|
//| large (100...). However, performance penalty will be too |
//| severe for any N's except for small ones. |
//| We should also say that code which relies on numerical |
//| differentiation is less robust and precise. LBFGS needs exact |
//| gradient values. Imprecise gradient may slow down convergence,|
//| especially on highly nonlinear problems. |
//| Thus we recommend to use this function for fast prototyping on|
//| small- dimensional problems only, and to implement analytical |
//| gradient as soon as possible. |
//+------------------------------------------------------------------+
void CAlglib::MinLBFGSCreateF(const int m,double &x[],const double diffstep,
CMinLBFGSStateShell &state)
{
//--- initialization
int n=CAp::Len(x);
//--- function call
CMinLBFGS::MinLBFGSCreateF(n,m,x,diffstep,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This function sets stopping conditions for L-BFGS optimization |
//| algorithm. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| EpsG - >=0 |
//| The subroutine finishes its work if the condition|
//| |v|<EpsG is satisfied, where: |
//| * |.| means Euclidian norm |
//| * v - scaled gradient vector, v[i]=g[i]*s[i] |
//| * g - gradient |
//| * s - scaling coefficients set by |
//| MinLBFGSSetScale() |
//| EpsF - >=0 |
//| The subroutine finishes its work if on k+1-th |
//| iteration the condition |F(k+1)-F(k)| <= |
//| <= EpsF*max{|F(k)|,|F(k+1)|,1} is satisfied. |
//| EpsX - >=0 |
//| The subroutine finishes its work if on k+1-th |
//| iteration the condition |v|<=EpsX is fulfilled, |
//| where: |
//| * |.| means Euclidian norm |
//| * v - scaled step vector, v[i]=dx[i]/s[i] |
//| * dx - ste pvector, dx=X(k+1)-X(k) |
//| * s - scaling coefficients set by |
//| MinLBFGSSetScale() |
//| MaxIts - maximum number of iterations. If MaxIts=0, the |
//| number of iterations is unlimited. |
//| Passing EpsG=0, EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will|
//| lead to automatic stopping criterion selection (small EpsX). |
//+------------------------------------------------------------------+
void CAlglib::MinLBFGSSetCond(CMinLBFGSStateShell &state,const double epsg,
const double epsf,const double epsx,
const int maxits)
{
CMinLBFGS::MinLBFGSSetCond(state.GetInnerObj(),epsg,epsf,epsx,maxits);
}
//+------------------------------------------------------------------+
//| This function turns on/off reporting. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| NeedXRep- whether iteration reports are needed or not |
//| If NeedXRep is True, algorithm will call rep() callback function |
//| if it is provided to MinLBFGSOptimize(). |
//+------------------------------------------------------------------+
void CAlglib::MinLBFGSSetXRep(CMinLBFGSStateShell &state,const bool needxrep)
{
CMinLBFGS::MinLBFGSSetXRep(state.GetInnerObj(),needxrep);
}
//+------------------------------------------------------------------+
//| This function sets maximum step length |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| StpMax - maximum step length, >=0. Set StpMax to 0.0 |
//| (default), if you don't want to limit step |
//| length. |
//| Use this subroutine when you optimize target function which |
//| contains exp() or other fast growing functions, and optimization |
//| algorithm makes too large steps which leads to overflow. This |
//| function allows us to reject steps that are too large (and |
//| therefore expose us to the possible overflow) without actually |
//| calculating function value at the x+stp*d. |
//+------------------------------------------------------------------+
void CAlglib::MinLBFGSSetStpMax(CMinLBFGSStateShell &state,const double stpmax)
{
CMinLBFGS::MinLBFGSSetStpMax(state.GetInnerObj(),stpmax);
}
//+------------------------------------------------------------------+
//| This function sets scaling coefficients for LBFGS optimizer. |
//| ALGLIB optimizers use scaling matrices to test stopping |
//| conditions (step size and gradient are scaled before comparison |
//| with tolerances). Scale of the I-th variable is a translation |
//| invariant measure of: |
//| a) "how large" the variable is |
//| b) how large the step should be to make significant changes in |
//| the function |
//| Scaling is also used by finite difference variant of the |
//| optimizer - step along I-th axis is equal to DiffStep*S[I]. |
//| In most optimizers (and in the LBFGS too) scaling is NOT a form |
//| of preconditioning. It just affects stopping conditions. You |
//| should set preconditioner by separate call to one of the |
//| MinLBFGSSetPrec...() functions. |
//| There is special preconditioning mode, however, which uses |
//| scaling coefficients to form diagonal preconditioning matrix. |
//| You can turn this mode on, if you want. But you should |
//| understand that scaling is not the same thing as |
//| preconditioning - these are two different, although related |
//| forms of tuning solver. |
//| INPUT PARAMETERS: |
//| State - structure stores algorithm state |
//| S - array[N], non-zero scaling coefficients |
//| S[i] may be negative, sign doesn't matter. |
//+------------------------------------------------------------------+
void CAlglib::MinLBFGSSetScale(CMinLBFGSStateShell &state,double &s[])
{
CMinLBFGS::MinLBFGSSetScale(state.GetInnerObj(),s);
}
//+------------------------------------------------------------------+
//| Modification of the preconditioner: default preconditioner |
//| (simple scaling, same for all elements of X) is used. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| NOTE: you can change preconditioner "on the fly", during |
//| algorithm iterations. |
//+------------------------------------------------------------------+
void CAlglib::MinLBFGSSetPrecDefault(CMinLBFGSStateShell &state)
{
CMinLBFGS::MinLBFGSSetPrecDefault(state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Modification of the preconditioner: Cholesky factorization of |
//| approximate Hessian is used. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| P - triangular preconditioner, Cholesky factorization|
//| of the approximate Hessian. array[0..N-1,0..N-1],|
//| (if larger, only leading N elements are used). |
//| IsUpper - whether upper or lower triangle of P is given |
//| (other triangle is not referenced) |
//| After call to this function preconditioner is changed to P (P is |
//| copied into the internal buffer). |
//| NOTE: you can change preconditioner "on the fly", during |
//| algorithm iterations. |
//| NOTE 2: P should be nonsingular. Exception will be thrown |
//| otherwise. |
//+------------------------------------------------------------------+
void CAlglib::MinLBFGSSetPrecCholesky(CMinLBFGSStateShell &state,
CMatrixDouble &p,const bool IsUpper)
{
CMinLBFGS::MinLBFGSSetPrecCholesky(state.GetInnerObj(),p,IsUpper);
}
//+------------------------------------------------------------------+
//| Modification of the preconditioner: diagonal of approximate |
//| Hessian is used. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| D - diagonal of the approximate Hessian, |
//| array[0..N-1], (if larger, only leading N |
//| elements are used). |
//| NOTE: you can change preconditioner "on the fly", during |
//| algorithm iterations. |
//| NOTE 2: D[i] should be positive. Exception will be thrown |
//| otherwise. |
//| NOTE 3: you should pass diagonal of approximate Hessian - NOT |
//| ITS INVERSE. |
//+------------------------------------------------------------------+
void CAlglib::MinLBFGSSetPrecDiag(CMinLBFGSStateShell &state,double &d[])
{
CMinLBFGS::MinLBFGSSetPrecDiag(state.GetInnerObj(),d);
}
//+------------------------------------------------------------------+
//| Modification of the preconditioner: scale-based diagonal |
//| preconditioning. |
//| This preconditioning mode can be useful when you don't have |
//| approximate diagonal of Hessian, but you know that your variables|
//| are badly scaled (for example, one variable is in [1,10], and |
//| another in [1000,100000]), and most part of the ill-conditioning |
//| comes from different scales of vars. |
//| In this case simple scale-based preconditioner, with H[i] = |
//| = 1/(s[i]^2), can greatly improve convergence. |
//| IMPRTANT: you should set scale of your variables with |
//| MinLBFGSSetScale() call (before or after MinLBFGSSetPrecScale() |
//| call). Without knowledge of the scale of your variables |
//| scale-based preconditioner will be just unit matrix. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//+------------------------------------------------------------------+
void CAlglib::MinLBFGSSetPrecScale(CMinLBFGSStateShell &state)
{
CMinLBFGS::MinLBFGSSetPrecScale(state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This function provides reverse communication interface |
//| Reverse communication interface is not documented or recommended |
//| to use. |
//| See below for functions which provide better documented API |
//+------------------------------------------------------------------+
bool CAlglib::MinLBFGSIteration(CMinLBFGSStateShell &state)
{
return(CMinLBFGS::MinLBFGSIteration(state.GetInnerObj()));
}
//+------------------------------------------------------------------+
//| This family of functions is used to launcn iterations of |
//| nonlinear optimizer |
//| These functions accept following parameters: |
//| func - callback which calculates function (or merit |
//| function) value func at given point x |
//| grad - callback which calculates function (or merit |
//| function) value func and gradient grad at given |
//| point x |
//| rep - optional callback which is called after each |
//| iteration can be null |
//| obj - optional object which is passed to |
//| func/grad/hess/jac/rep can be null |
//| NOTES: |
//| 1. This function has two different implementations: one which |
//| uses exact (analytical) user-supplied gradient, and one which |
//| uses function value only and numerically differentiates |
//| function in order to obtain gradient. |
//| Depending on the specific function used to create optimizer |
//| object (either MinLBFGSCreate() for analytical gradient or |
//| MinLBFGSCreateF() for numerical differentiation) you should |
//| choose appropriate variant of MinLBFGSOptimize() - one which |
//| accepts function AND gradient or one which accepts function |
//| ONLY. |
//| Be careful to choose variant of MinLBFGSOptimize() which |
//| corresponds to your optimization scheme! Table below lists |
//| different combinations of callback (function/gradient) passed |
//| to MinLBFGSOptimize() and specific function used to create |
//| optimizer. |
//| | USER PASSED TO MinLBFGSOptimize() |
//| CREATED WITH | function only | function and gradient |
//| ------------------------------------------------------------ |
//| MinLBFGSCreateF() | work FAIL |
//| MinLBFGSCreate() | FAIL work |
//| Here "FAIL" denotes inappropriate combinations of optimizer |
//| creation function and MinLBFGSOptimize() version. Attemps to |
//| use such combination (for example, to create optimizer with |
//| MinLBFGSCreateF() and to pass gradient information to |
//| MinCGOptimize()) will lead to exception being thrown. Either |
//| you did not pass gradient when it WAS needed or you passed |
//| gradient when it was NOT needed. |
//+------------------------------------------------------------------+
void CAlglib::MinLBFGSOptimize(CMinLBFGSStateShell &state,CNDimensional_Func &func,
CNDimensional_Rep &rep,bool rep_status,CObject &obj)
{
//--- cycle
while(CAlglib::MinLBFGSIteration(state))
{
//--- check
if(state.GetNeedF())
{
func.Func(state.GetInnerObj().m_x,state.GetInnerObj().m_f,obj);
//--- next iteration
continue;
}
//--- check
if(state.GetInnerObj().m_xupdated)
{
//--- check
if(rep_status)
rep.Rep(state.GetInnerObj().m_x,state.GetInnerObj().m_f,obj);
//--- next iteration
continue;
}
Print("ALGLIB: error in 'minlbfgsoptimize' (some derivatives were not provided?)");
CAp::exception_happened=true;
break;
}
}
//+------------------------------------------------------------------+
//| This family of functions is used to launcn iterations of |
//| nonlinear optimizer |
//| These functions accept following parameters: |
//| func - callback which calculates function (or merit |
//| function) value func at given point x |
//| grad - callback which calculates function (or merit |
//| function) value func and gradient grad at given |
//| point x |
//| rep - optional callback which is called after each |
//| iteration can be null |
//| obj - optional object which is passed to |
//| func/grad/hess/jac/rep can be null |
//| NOTES: |
//| 1. This function has two different implementations: one which |
//| uses exact (analytical) user-supplied gradient, and one which |
//| uses function value only and numerically differentiates |
//| function in order to obtain gradient. |
//| Depending on the specific function used to create optimizer |
//| object (either MinLBFGSCreate() for analytical gradient or |
//| MinLBFGSCreateF() for numerical differentiation) you should |
//| choose appropriate variant of MinLBFGSOptimize() - one which |
//| accepts function AND gradient or one which accepts function |
//| ONLY. |
//| Be careful to choose variant of MinLBFGSOptimize() which |
//| corresponds to your optimization scheme! Table below lists |
//| different combinations of callback (function/gradient) passed |
//| to MinLBFGSOptimize() and specific function used to create |
//| optimizer. |
//| | USER PASSED TO MinLBFGSOptimize() |
//| CREATED WITH | function only | function and gradient |
//| ------------------------------------------------------------ |
//| MinLBFGSCreateF() | work FAIL |
//| MinLBFGSCreate() | FAIL work |
//| Here "FAIL" denotes inappropriate combinations of optimizer |
//| creation function and MinLBFGSOptimize() version. Attemps to |
//| use such combination (for example, to create optimizer with |
//| MinLBFGSCreateF() and to pass gradient information to |
//| MinCGOptimize()) will lead to exception being thrown. Either |
//| you did not pass gradient when it WAS needed or you passed |
//| gradient when it was NOT needed. |
//+------------------------------------------------------------------+
void CAlglib::MinLBFGSOptimize(CMinLBFGSStateShell &state,CNDimensional_Grad &grad,
CNDimensional_Rep &rep,bool rep_status,CObject &obj)
{
//--- cycle
while(CAlglib::MinLBFGSIteration(state))
{
//--- check
if(state.GetNeedFG())
{
grad.Grad(state.GetInnerObj().m_x,state.GetInnerObj().m_f,state.GetInnerObj().m_g,obj);
//--- next iteration
continue;
}
//--- check
if(state.GetInnerObj().m_xupdated)
{
//--- check
if(rep_status)
rep.Rep(state.GetInnerObj().m_x,state.GetInnerObj().m_f,obj);
//--- next iteration
continue;
}
Print("ALGLIB: error in 'minlbfgsoptimize' (some derivatives were not provided?)");
CAp::exception_happened=true;
break;
}
}
//+------------------------------------------------------------------+
//| L-BFGS algorithm results |
//| INPUT PARAMETERS: |
//| State - algorithm state |
//| OUTPUT PARAMETERS: |
//| X - array[0..N-1], solution |
//| Rep - optimization report: |
//| * Rep.TerminationType completetion code: |
//| * -2 rounding errors prevent further |
//| improvement. X contains best point |
//| found. |
//| * -1 incorrect parameters were specified |
//| * 1 relative function improvement is no |
//| more than EpsF. |
//| * 2 relative step is no more than EpsX. |
//| * 4 gradient norm is no more than EpsG |
//| * 5 MaxIts steps was taken |
//| * 7 stopping conditions are too |
//| stringent, further improvement is |
//| impossible |
//| * Rep.IterationsCount contains iterations count |
//| * NFEV countains number of function calculations |
//+------------------------------------------------------------------+
void CAlglib::MinLBFGSResults(CMinLBFGSStateShell &state,double &x[],
CMinLBFGSReportShell &rep)
{
CMinLBFGS::MinLBFGSResults(state.GetInnerObj(),x,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| L-BFGS algorithm results |
//| Buffered implementation of MinLBFGSResults which uses |
//| pre-allocated buffer to store X[]. If buffer size is too small, |
//| it resizes buffer. It is intended to be used in the inner cycles |
//| of performance critical algorithms where array reallocation |
//| penalty is too large to be ignored. |
//+------------------------------------------------------------------+
void CAlglib::MinLBFGSresultsbuf(CMinLBFGSStateShell &state,double &x[],
CMinLBFGSReportShell &rep)
{
CMinLBFGS::MinLBFGSResultsBuf(state.GetInnerObj(),x,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This subroutine restarts LBFGS algorithm from new point. All |
//| optimization parameters are left unchanged. |
//| This function allows to solve multiple optimization problems |
//| (which must have same number of dimensions) without object |
//| reallocation penalty. |
//| INPUT PARAMETERS: |
//| State - structure used to store algorithm state |
//| X - new starting point. |
//+------------------------------------------------------------------+
void CAlglib::MinLBFGSRestartFrom(CMinLBFGSStateShell &state,double &x[])
{
CMinLBFGS::MinLBFGSRestartFrom(state.GetInnerObj(),x);
}
//+------------------------------------------------------------------+
//| CONSTRAINED QUADRATIC PROGRAMMING |
//| The subroutine creates QP optimizer. After initial creation, it |
//| contains default optimization problem with zero quadratic and |
//| linear terms and no constraints. You should set quadratic/linear |
//| terms with calls to functions provided by MinQP subpackage. |
//| INPUT PARAMETERS: |
//| N - problem size |
//| OUTPUT PARAMETERS: |
//| State - optimizer with zero quadratic/linear terms |
//| and no constraints |
//+------------------------------------------------------------------+
void CAlglib::MinQPCreate(const int n,CMinQPStateShell &state)
{
CMinQP::MinQPCreate(n,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This function sets linear term for QP solver. |
//| By default, linear term is zero. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| B - linear term, array[N]. |
//+------------------------------------------------------------------+
void CAlglib::MinQPSetLinearTerm(CMinQPStateShell &state,double &b[])
{
CMinQP::MinQPSetLinearTerm(state.GetInnerObj(),b);
}
//+------------------------------------------------------------------+
//| This function sets quadratic term for QP solver. |
//| By default quadratic term is zero. |
//| IMPORTANT: this solver minimizes following function: |
//| f(x) = 0.5*x'*A*x + b'*x. |
//| Note that quadratic term has 0.5 before it. So if you want to |
//| minimize |
//| f(x) = x^2 + x |
//| you should rewrite your problem as follows: |
//| f(x) = 0.5*(2*x^2) + x |
//| and your matrix A will be equal to [[2.0]], not to [[1.0]] |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| A - matrix, array[N,N] |
//| IsUpper - (optional) storage type: |
//| * if True, symmetric matrix A is given by its |
//| upper triangle, and the lower triangle isn?t |
//| used |
//| * if False, symmetric matrix A is given by its |
//| lower triangle, and the upper triangle isn?t |
//| used |
//| * if not given, both lower and upper triangles |
//| must be filled. |
//+------------------------------------------------------------------+
void CAlglib::MinQPSetQuadraticTerm(CMinQPStateShell &state,CMatrixDouble &a,
const bool IsUpper)
{
CMinQP::MinQPSetQuadraticTerm(state.GetInnerObj(),a,IsUpper);
}
//+------------------------------------------------------------------+
//| This function sets quadratic term for QP solver. |
//| By default quadratic term is zero. |
//| IMPORTANT: this solver minimizes following function: |
//| f(x) = 0.5*x'*A*x + b'*x. |
//| Note that quadratic term has 0.5 before it. So if you want to |
//| minimize |
//| f(x) = x^2 + x |
//| you should rewrite your problem as follows: |
//| f(x) = 0.5*(2*x^2) + x |
//| and your matrix A will be equal to [[2.0]], not to [[1.0]] |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| A - matrix, array[N,N] |
//| IsUpper - (optional) storage type: |
//| * if True, symmetric matrix A is given by its |
//| upper triangle, and the lower triangle isn?t |
//| used |
//| * if False, symmetric matrix A is given by its |
//| lower triangle, and the upper triangle isn?t |
//| used |
//| * if not given, both lower and upper triangles |
//| must be filled. |
//+------------------------------------------------------------------+
void CAlglib::MinQPSetQuadraticTerm(CMinQPStateShell &state,CMatrixDouble &a)
{
//--- check
if(!CAp::IsSymmetric(a))
{
Print(__FUNCTION__+": 'a' parameter is not symmetric matrix");
CAp::exception_happened=true;
return;
}
//--- initialization
bool IsUpper=false;
//--- function call
CMinQP::MinQPSetQuadraticTerm(state.GetInnerObj(),a,IsUpper);
}
//+------------------------------------------------------------------+
//| This function sets sparse quadratic term for QP solver. By |
//| default, quadratic term is zero. This function overrides previous|
//| calls to MinQPSetQuadraticTerm() or MinQPSetQuadraticTermSparse()|
//| NOTE: dense solvers like DENSE-AUL-QP or DENSE-IPM-QP will |
//| convert this matrix to dense storage anyway. |
//| IMPORTANT: This solver minimizes following function: |
//| f(x) = 0.5*x'*A*x + b'*x. |
//| Note that quadratic term has 0.5 before it. So if you |
//| want to minimize |
//| f(x) = x^2 + x |
//| you should rewrite your problem as follows: |
//| f(x) = 0.5*(2*x^2) + x |
//| and your matrix A will be equal to [[2.0]], not to |
//| [[1.0]] |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm State |
//| A - matrix, array[N,N] |
//| IsUpper - (optional) storage type: |
//| * if True, symmetric matrix A is given by its upper|
//| triangle, and the lower triangle isn't used |
//| * if False, symmetric matrix A is given by its |
//| lower triangle, and the upper triangle isn't used|
//| * if not given, both lower and upper triangles must|
//| be filled. |
//+------------------------------------------------------------------+
void CAlglib::MinQPSetQuadraticTermSparse(CMinQPStateShell &state,
CSparseMatrix &a,bool IsUpper)
{
CMinQP::MinQPSetQuadraticTermSparse(state.GetInnerObj(),a,IsUpper);
}
//+------------------------------------------------------------------+
//| This function sets starting point for QP solver. It is useful to |
//| have good initial approximation to the solution, because it will |
//| increase speed of convergence and identification of active |
//| constraints. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| X - starting point, array[N]. |
//+------------------------------------------------------------------+
void CAlglib::MinQPSetStartingPoint(CMinQPStateShell &state,double &x[])
{
CMinQP::MinQPSetStartingPoint(state.GetInnerObj(),x);
}
//+------------------------------------------------------------------+
//| This function sets origin for QP solver. By default, following |
//| QP program is solved: |
//| min(0.5*x'*A*x+b'*x) |
//| This function allows to solve different problem: |
//| min(0.5*(x-x_origin)'*A*(x-x_origin)+b'*(x-x_origin)) |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| XOrigin - origin, array[N]. |
//+------------------------------------------------------------------+
void CAlglib::MinQPSetOrigin(CMinQPStateShell &state,double &xorigin[])
{
CMinQP::MinQPSetOrigin(state.GetInnerObj(),xorigin);
}
//+------------------------------------------------------------------+
//| This function sets scaling coefficients. |
//| ALGLIB optimizers use scaling matrices to test stopping |
//| conditions (step size and gradient are scaled before comparison |
//| with tolerances) and as preconditioner. |
//| Scale of the I-th variable is a translation invariant measure of:|
//| a) "how large" the variable is |
//| b) how large the step should be to make significant changes in |
//| the function |
//| If you do not know how to choose scales of your variables, you |
//| can: |
//| * read www.alglib.net/optimization/scaling.php article |
//| * use minqpsetscaleautodiag(), which calculates scale using |
//| diagonal ofthe quadratic term: S is set to 1/sqrt(diag(A)), |
//| which works well sometimes. |
//| INPUT PARAMETERS: |
//| State - structure stores algorithm State |
//| S - array[N], non-zero scaling coefficients S[i] may |
//| be negative, sign doesn't matter. |
//+------------------------------------------------------------------+
void CAlglib::MinQPSetScale(CMinQPStateShell &state,CRowDouble &s)
{
CMinQP::MinQPSetScale(state.GetInnerObj(),s);
}
//+------------------------------------------------------------------+
//| This function sets automatic evaluation of variable scaling. |
//| IMPORTANT: this function works only for matrices with positive |
//| diagonal elements! Zero or negative elements will |
//| result in -9 error code being returned. Specify scale |
//| vector manually with MinQPSetScale() in such cases. |
//| ALGLIB optimizers use scaling matrices to test |
//| stopping conditions (step size and gradient are scaled|
//| before comparison with tolerances) and as |
//| preconditioner. |
//| The best way to set scaling is to manually specify variable |
//| scales. However, sometimes you just need quick-and-dirty |
//| solution - either when you perform fast prototyping, or when you |
//| know your problem well and you are 100 % sure that this quick |
//| solution is robust enough in your case. |
//| One such solution is to evaluate scale of I-th variable |
//| as 1 / Sqrt(A[i, i]), where A[i, i] is an I-th diagonal |
//| element of the quadratic term. |
//| Such approach works well sometimes, but you have to be careful |
//| here. |
//| INPUT PARAMETERS: |
//| State - structure stores algorithm State |
//+------------------------------------------------------------------+
void CAlglib::MinQPSetScaleAutoDiag(CMinQPStateShell &state)
{
CMinQP::MinQPSetScaleAutoDiag(state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This function tells solver to use BLEIC-based algorithm and sets |
//| stopping criteria for the algorithm. |
//| This algorithm is intended for large-scale problems, possibly |
//| nonconvex, with small number of general linear constraints. |
//| Feasible initial point is essential for good performance. |
//| IMPORTANT: when DENSE-IPM (or DENSE-AUL for nonconvex problems) |
//| solvers are applicable, their performance is much |
//| better than that of BLEIC-QP. |
//| We recommend you to use BLEIC only when other solvers |
//| can not be used. |
//| ALGORITHM FEATURES: |
//| * supports dense and sparse QP problems |
//| * supports box and general linear equality / inequality |
//| constraints |
//| * can solve all types of problems (convex, semidefinite, |
//| nonconvex) as long as they are bounded from below under |
//| constraints. |
//| Say, it is possible to solve "min{-x^2} subject to -1<=x<=+1". |
//| Of course, global minimum is found only for positive definite and|
//| semidefinite problems. As for indefinite ones-only local minimum |
//| is found. |
//| ALGORITHM OUTLINE : |
//| * BLEIC-QP solver is just a driver function for MinBLEIC |
//| solver; it solves quadratic programming problem as general |
//| linearly constrained optimization problem, which is solved |
//| by means of BLEIC solver(part of ALGLIB, active set method). |
//| ALGORITHM LIMITATIONS: |
//| * This algorithm is inefficient on problems with hundreds and |
//| thousands of general inequality constraints and infeasible |
//| initial point. Initial feasibility detection stage may take |
//| too long on such constraint sets. Consider using DENSE-IPM |
//| or DENSE-AUL instead. |
//| * unlike QuickQP solver, this algorithm does not perform Newton|
//| steps and does not use Level 3 BLAS. Being general-purpose |
//| active set method, it can activate constraints only |
//| one-by-one. Thus, its performance is lower than that of |
//| QuickQP. |
//| * its precision is also a bit inferior to that of QuickQP. |
//| BLEIC-QP performs only LBFGS steps(no Newton steps), which |
//| are good at detecting neighborhood of the solution, buy needs|
//| many iterations to find solution with more than 6 digits of |
//| precision. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm State |
//| EpsG - >= 0, The subroutine finishes its work if the |
//| condition | v | < EpsG is satisfied, where: |
//| * | . | means Euclidian norm |
//| * v - scaled constrained gradient vector, |
//| v[i] = g[i] * s[i] |
//| * g - gradient |
//| * s - scaling coefficients set by |
//| MinQPSetScale() |
//| EpsF - >= 0, The subroutine finishes its work if |
//| exploratory steepest descent step on k+1-th |
//| iteration satisfies following condition: |
//| |F(k+1) - F(k)| <= EpsF * max{ |F(k)|, |F(k+1)|, 1}|
//| EpsX - >= 0, The subroutine finishes its work if |
//| exploratory steepest descent step on k+1-th |
//| iteration satisfies following condition: |
//| * | . | means Euclidian norm |
//| * v - scaled step vector, v[i] = dx[i] / s[i] |
//| * dx - step vector, dx = X(k + 1) - X(k) |
//| * s - scaling coefficients set by |
//| MinQPSetScale() |
//| MaxIts - maximum number of iterations. If MaxIts = 0, the |
//| number of iterations is unlimited. |
//| NOTE: this algorithm uses LBFGS iterations, which are relatively |
//| cheap, but improve function value only a bit. So you will |
//| need many iterations to converge - from 0.1 * N to 10 * N, |
//| depending on problem's condition number. |
//| IT IS VERY IMPORTANT TO CALL MinQPSetScale() WHEN YOU USE THIS |
//| ALGORITHM BECAUSE ITS STOPPING CRITERIA ARE SCALE - DEPENDENT! |
//| Passing EpsG = 0, EpsF = 0 and EpsX = 0 and MaxIts = 0 |
//| (simultaneously) will lead to automatic stopping criterion |
//| selection (presently it is small step length, but it may change |
//| in the future versions of ALGLIB). |
//+------------------------------------------------------------------+
void CAlglib::MinQPSetAlgoBLEIC(CMinQPStateShell &state,double epsg,
double epsf,double epsx,int maxits)
{
CMinQP::MinQPSetAlgoBLEIC(state.GetInnerObj(),epsg,epsf,epsx,maxits);
}
//+------------------------------------------------------------------+
//| This function tells QP solver to use DENSE-AUL algorithm and sets|
//| stopping criteria for the algorithm. |
//| This algorithm is intended for non-convex problems with moderate |
//| (up to several thousands) variable count and arbitrary number of |
//| constraints which are either(a) effectively convexified under |
//| constraints or (b) have unique solution even with nonconvex |
//| target. |
//| IMPORTANT: when DENSE-IPM solver is applicable, its performance |
//| is usually much better than that of DENSE-AUL. We |
//| recommend you to use DENSE-AUL only when other solvers|
//| can not be used. |
//| ALGORITHM FEATURES: |
//| * supports box and dense / sparse general linear equality / |
//| inequality constraints |
//| * convergence is theoretically proved for positive - definite |
//| (convex) QP problems. Semidefinite and non-convex problems |
//| can be solved as long as they are bounded from below under |
//| constraints, although without theoretical guarantees. |
//| ALGORITHM OUTLINE: |
//| * this algorithm is an augmented Lagrangian method with dense |
//| preconditioner(hence its name). |
//| * it performs several outer iterations in order to refine |
//| values of the Lagrange multipliers. Single outer iteration is|
//| a solution of some optimization problem: first it performs |
//| dense Cholesky factorization of the Hessian in order to build|
//| preconditioner (adaptive regularization is applied to enforce|
//| positive definiteness), and then it uses L-BFGS optimizer to |
//| solve optimization problem. |
//| * typically you need about 5-10 outer iterations to converge |
//| to solution |
//| ALGORITHM LIMITATIONS: |
//| * because dense Cholesky driver is used, this algorithm has |
//| O(N^2) memory requirements and O(OuterIterations*N^3) minimum|
//| running time. From the practical point of view, it limits its|
//| applicability by several thousands of variables. |
//| From the other side, variables count is the most limiting factor,|
//| and dependence on constraint count is much more lower. Assuming |
//| that constraint matrix is sparse, it may handle tens of thousands|
//| of general linear constraints. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm State |
//| EpsX - >= 0, stopping criteria for inner optimizer. Inner |
//| iterations are stopped when step length (with|
//| variable scaling being applied) is less than |
//| EpsX. See MinQPSetScale() for more |
//| information on variable scaling. |
//| Rho - penalty coefficient, Rho > 0: |
//| * large enough that algorithm converges with |
//| desired precision. |
//| * not TOO large to prevent ill-conditioning |
//| * recommended values are 100, 1000 or 10000 |
//| ItsCnt - number of outer iterations: |
//| * recommended values: 10 - 15 (although in most |
//| cases it converges within 5 iterations, you may |
//| need a few more to be sure). |
//| * ItsCnt = 0 means that small number of outer |
//| iterations is automatically chosen (10 iterations|
//| in current version). |
//| * ItsCnt = 1 means that AUL algorithm performs just|
//| as usual penalty method. |
//| * ItsCnt > 1 means that AUL algorithm performs |
//| specified number of outer iterations |
//| IT IS VERY IMPORTANT TO CALL MinQPSetScale() WHEN YOU USE THIS |
//| ALGORITHM BECAUSE ITS CONVERGENCE PROPERTIES AND STOPPING |
//| CRITERIA ARE SCALE - DEPENDENT! |
//| NOTE: Passing EpsX = 0 will lead to automatic step length |
//| selection (specific step length chosen may change in the |
//| future versions of ALGLIB, so it is better to specify step |
//| length explicitly). |
//+------------------------------------------------------------------+
void CAlglib::MinQPSetAlgoDenseAUL(CMinQPStateShell &state,double epsx,
double rho,int itscnt)
{
CMinQP::MinQPSetAlgoDenseAUL(state.GetInnerObj(),epsx,rho,itscnt);
}
//+------------------------------------------------------------------+
//| This function tells QP solver to use DENSE-IPM QP algorithm and |
//| sets stopping criteria for the algorithm. |
//| This algorithm is intended for convex and semidefinite problems |
//| with moderate (up to several thousands) variable count and |
//| arbitrary number of constraints. |
//| IMPORTANT: this algorithm won't work for nonconvex problems, use |
//| DENSE-AUL or BLEIC-QP instead. If you try to run |
//| DENSE-IPM on problem with indefinite matrix (matrix |
//| having at least one negative eigenvalue) then |
//| depending on circumstances it may either(a) stall at |
//| some arbitrary point, or (b) throw exception on |
//| failure of Cholesky decomposition. |
//| ALGORITHM FEATURES: |
//| * supports box and dense / sparse general linear equality / |
//| inequality constraints |
//| ALGORITHM OUTLINE: |
//| * this algorithm is our implementation of interior point method|
//| as formulated by R.J.Vanderbei, with minor modifications to |
//| the algorithm (damped Newton directions are extensively used)|
//| * like all interior point methods, this algorithm tends to |
//| converge in roughly same number of iterations (between 15 and|
//| 50) independently from the problem dimensionality |
//| ALGORITHM LIMITATIONS: |
//| * because dense Cholesky driver is used, for N-dimensional |
//| problem with M dense constaints this algorithm has |
//| O(N^2 + N*M) memory requirements and O(N^3 + N*M^2) running |
//| time. Having sparse constraints with Z nonzeros per row |
//| relaxes storage and running time down to O(N^2 + M*Z) and |
//| O(N^3 + N*Z^2) From the practical point of view, it limits |
//| its applicability by several thousands of variables. From the|
//| other side, variables count is the most limiting factor, and |
//| dependence on constraint count is much more lower. Assuming |
//| that constraint matrix is sparse, it may handle tens of |
//| thousands of general linear constraints. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm State |
//| Eps - >= 0, stopping criteria. The algorithm stops when |
//| primal and dual infeasiblities as well as |
//| complementarity gap are less than Eps. |
//| IT IS VERY IMPORTANT TO CALL minqpsetscale() WHEN YOU USE THIS |
//| ALGORITHM BECAUSE ITS CONVERGENCE PROPERTIES AND STOPPING |
//| CRITERIA ARE SCALE - DEPENDENT! |
//| NOTE: Passing EpsX = 0 will lead to automatic selection of small |
//| epsilon. |
//| ===== TRACING IPM SOLVER ======================================= |
//| IPM solver supports advanced tracing capabilities. You can trace |
//| algorithm output by specifying following trace symbols (case- |
//| insensitive) by means of trace_file() call: |
//| * 'IPM' - for basic trace of algorithm steps and decisions|
//| Only short scalars(function values and deltas) |
//| are printed. N-dimensional quantities like |
//| search directions are NOT printed. |
//| * 'IPM.DETAILED' - for output of points being visited and |
//| search directions. This symbol also implicitly |
//| defines 'IPM'. You can control output format by |
//| additionally specifying: |
//| * nothing to output in 6-digit exponential |
//| format |
//| * 'PREC.E15' to output in 15-digit exponential |
//| format |
//| * 'PREC.F6' to output in 6-digit fixed-point |
//| format |
//| By default trace is disabled and adds no overhead to the |
//| optimization process. However, specifying any of the symbols adds|
//| some formatting and output - related overhead. |
//| You may specify multiple symbols by separating them with commas: |
//| > |
//| >CAlglib::Trace_File("IPM,PREC.F6","path/to/trace.log") |
//| > |
//+------------------------------------------------------------------+
void CAlglib::MinQPSetAlgoDenseIPM(CMinQPStateShell &state,double eps)
{
CMinQP::MinQPSetAlgoDenseIPM(state.GetInnerObj(),eps);
}
//+------------------------------------------------------------------+
//| This function tells QP solver to use SPARSE-IPM QP algorithm |
//| and sets stopping criteria for the algorithm. |
//| This algorithm is intended for convex and semidefinite |
//| problems with large variable and constraint count and sparse |
//| quadratic term and constraints. It is possible to have some |
//| limited set of dense linear constraints - they will be handled |
//| separately by dense BLAS - but the more dense constraints you |
//| have, the more time solver needs. |
//| IMPORTANT: internally this solver performs large and sparse(N+M) |
//| x(N+M) triangular factorization. So it expects both |
//| quadratic term and constraints to be highly CSparse |
//| However, its running time is influenced by BOTH fill|
//| factor and sparsity pattern. |
//| Generally we expect that no more than few nonzero elements per |
//| row are present. However different sparsity patterns may result |
//| in completely different running times even given same fill |
//| factor. |
//| In many cases this algorithm outperforms DENSE-IPM by order of |
//| magnitude. However, in some cases you may get better results |
//| with DENSE-IPM even when solving sparse task. |
//| IMPORTANT: this algorithm won't work for nonconvex problems, use |
//| DENSE-AUL or BLEIC - QP instead. If you try to run |
//| DENSE-IPM on problem with indefinite matrix (matrix |
//| having at least one negative eigenvalue) then |
//| depending on circumstances it may either(a) stall at |
//| some arbitrary point, or (b) throw exception on |
//| failure of Cholesky decomposition. |
//| ALGORITHM FEATURES: |
//| * supports box and dense/sparse general linear equality/ |
//| inequality constraints |
//| * specializes on large-scale sparse problems |
//| ALGORITHM OUTLINE: |
//| * this algorithm is our implementation of interior point |
//| method as formulated by R.J.Vanderbei, with minor |
//| modifications to the algorithm (damped Newton directions are|
//| extensively used) |
//| * like all interior point methods, this algorithm tends to |
//| converge in roughly same number of iterations(between 15 and |
//| 50) independently from the problem dimensionality |
//| ALGORITHM LIMITATIONS: |
//| * this algorithm may handle moderate number of dense |
//| constraints, usually no more than a thousand of dense ones |
//| without losing its efficiency. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm State |
//| Eps - >= 0, stopping criteria. The algorithm stops when |
//| primal and dual infeasiblities as well as |
//| complementarity gap are less than Eps. |
//| IT IS VERY IMPORTANT TO CALL minqpsetscale() WHEN YOU USE THIS |
//| ALGORITHM BECAUSE ITS CONVERGENCE PROPERTIES AND STOPPING |
//| CRITERIA ARE SCALE - DEPENDENT! |
//| NOTE: Passing EpsX = 0 will lead to automatic selection of small |
//| epsilon. |
//| ===== TRACING IPM SOLVER ======================================= |
//| IPM solver supports advanced tracing capabilities. You can trace |
//| algorithm output by specifying following trace symbols |
//| (case-insensitive) by means of trace_file() call: |
//| * 'IPM' - for basic trace of algorithm steps and decisions. |
//| Only short scalars(function values and deltas) are |
//| printed. N-dimensional quantities like search |
//| directions are NOT printed. |
//| * 'IPM.DETAILED' - for output of points being visited and |
//| search directions. This symbol also implicitly |
//| defines 'IPM'. You can control output format by |
//| additionally specifying: |
//| * nothing to output in 6-digit exponential |
//| format |
//| * 'PREC.E15' to output in 15-digit exponential |
//| format |
//| * 'PREC.F6' to output in 6-digit fixed-point |
//| format |
//| By default trace is disabled and adds no overhead to the |
//| optimization process. However, specifying any of the symbols adds|
//| some formatting and output - related overhead. |
//| You may specify multiple symbols by separating them with commas: |
//| > |
//| >CAlglib::Trace_File("IPM,PREC.F6","path/to/trace.log") |
//| > |
//+------------------------------------------------------------------+
void CAlglib::MinQPSetAlgoSparseIPM(CMinQPStateShell &state,double eps)
{
CMinQP::MinQPSetAlgoSparseIPM(state.GetInnerObj(),eps);
}
//+------------------------------------------------------------------+
//| This function tells solver to use QuickQP algorithm: special |
//| extra-fast algorithm for problems with box-only constrants. It |
//| may solve non-convex problems as long as they are bounded from |
//| below under constraints. |
//| ALGORITHM FEATURES: |
//| * several times faster than DENSE-IPM when running on box-only |
//| problem |
//| * utilizes accelerated methods for activation of constraints. |
//| * supports dense and sparse QP problems |
//| * supports ONLY box constraints; general linear constraints are|
//| NOT supported by this solver |
//| * can solve all types of problems (convex, semidefinite, |
//| nonconvex) as long as they are bounded from below under |
//| constraints. Say, it is possible to solve "min{-x^2} subject |
//| to -1<=x<=+1". In convex/semidefinite case global minimum is|
//| returned, in nonconvex case-algorithm returns one of the |
//| local minimums. |
//| ALGORITHM OUTLINE: |
//| * algorithm performs two kinds of iterations: constrained CG |
//| iterations and constrained Newton iterations |
//| * initially it performs small number of constrained CG |
//| iterations, which can efficiently activate/deactivate |
//| multiple constraints |
//| * after CG phase algorithm tries to calculate Cholesky |
//| decomposition and to perform several constrained Newton |
//| steps. If Cholesky decomposition failed(matrix is |
//| indefinite even under constraints), we perform more CG |
//| iterations until we converge to such set of constraints that |
//| system matrix becomes positive definite. Constrained Newton |
//| steps greatly increase convergence speed and precision. |
//| * algorithm interleaves CG and Newton iterations which allows |
//| to handle indefinite matrices (CG phase) and quickly converge|
//| after final set of constraints is found (Newton phase). |
//| Combination of CG and Newton phases is called "outer |
//| iteration". |
//| * it is possible to turn off Newton phase (beneficial for |
//| semidefinite problems - Cholesky decomposition will fail too |
//| often) |
//| ALGORITHM LIMITATIONS: |
//| * algorithm does not support general linear constraints; only|
//| box ones are supported |
//| * Cholesky decomposition for sparse problems is performed with |
//| Skyline Cholesky solver, which is intended for low-profile |
//| matrices. No profile-reducing reordering of variables is |
//| performed in this version of ALGLIB. |
//| * problems with near-zero negative eigenvalues (or exacty zero |
//| ones) may experience about 2-3x performance penalty. The |
//| reason is that Cholesky decomposition can not be performed |
//| until we identify directions of zero and negative curvature |
//| and activate corresponding boundary constraints- but we need |
//| a lot of trial and errors because these directions are hard |
//| to notice in the matrix spectrum. In this case you may turn |
//| off Newton phase of algorithm. Large negative eigenvalues |
//| are not an issue, so highly non-convex problems can be |
//| solved very efficiently. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm State |
//| EpsG - >= 0. The subroutine finishes its work if the |
//| condition |v| < EpsG is satisfied, where: |
//| * | . | means Euclidian norm |
//| * v - scaled constrained gradient vector, |
//| v[i] = g[i] * s[i] |
//| * g - gradient |
//| * s - scaling coefficients set by MinQPSetScale() |
//| EpsF - >= 0. The subroutine finishes its work if |
//| exploratory steepest descent step on k+1-th |
//| iteration satisfies following condition: |
//| |F(k+1) - F(k)| <= EpsF*max{|F(k)|, |F(k+1)|, 1} |
//| EpsX - >= 0. The subroutine finishes its work if |
//| exploratory steepest descent step on k+1-th |
//| iteration satisfies following condition: |
//| * | . | means Euclidian norm |
//| * v - scaled step vector, v[i] = dx[i] / s[i] |
//| * dx - step vector, dx = X(k + 1) - X(k) |
//| * s - scaling coefficients set by MinQPSetScale() |
//| MaxOuterIts - maximum number of OUTER iterations. One outer |
//| iteration includes some amount of CG iterations |
//| (from 5 to ~N) and one or several (usually small |
//| amount) Newton steps. Thus, one outer iteration has|
//| high cost, but can greatly reduce funcation value. |
//| Use 0 if you do not want to limit number of outer |
//| iterations. |
//| UseNewton - use Newton phase or not: |
//| * Newton phase improves performance of positive |
//| definite dense problems (about 2 times |
//| improvement can be observed) |
//| * can result in some performance penalty on |
//| semidefinite or slightly negative definite |
//| problems - each Newton phase will bring no |
//| improvement (Cholesky failure), but still will |
//| require computational time. |
//| * if you doubt, you can turn off this phase - |
//| optimizer will retain its most of its high speed.|
//| IT IS VERY IMPORTANT TO CALL MinQPSetScale() WHEN YOU USE THIS |
//| ALGORITHM BECAUSE ITS STOPPING CRITERIA ARE SCALE - DEPENDENT! |
//| Passing EpsG = 0, EpsF = 0 and EpsX = 0 and MaxIts = 0 |
//| (simultaneously) will lead to automatic stopping criterion |
//| selection (presently it is small step length, but it may change |
//| in the future versions of ALGLIB). |
//+------------------------------------------------------------------+
void CAlglib::MinQPSetAlgoQuickQP(CMinQPStateShell &state,double epsg,
double epsf,double epsx,
int maxouterits,bool usenewton)
{
CMinQP::MinQPSetAlgoQuickQP(state.GetInnerObj(),epsg,epsf,epsx,maxouterits,usenewton);
}
//+------------------------------------------------------------------+
//| This function tells solver to use Cholesky-based algorithm. |
//| Cholesky-based algorithm can be used when: |
//| * problem is convex |
//| * there is no constraints or only boundary constraints are |
//| present |
//| This algorithm has O(N^3) complexity for unconstrained problem |
//| and is up to several times slower on bound constrained problems |
//| (these additional iterations are needed to identify active |
//| constraints). |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//+------------------------------------------------------------------+
void CAlglib::MinQPSetAlgoCholesky(CMinQPStateShell &state)
{
CMinQP::MinQPSetAlgoCholesky(state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This function sets boundary constraints for QP solver |
//| Boundary constraints are inactive by default (after initial |
//| creation). After being set, they are preserved until explicitly |
//| turned off with another SetBC() call. |
//| INPUT PARAMETERS: |
//| State - structure stores algorithm state |
//| BndL - lower bounds, array[N]. |
//| If some (all) variables are unbounded, you may |
//| specify very small number or -INF (latter is |
//| recommended because it will allow solver to use |
//| better algorithm). |
//| BndU - upper bounds, array[N]. |
//| If some (all) variables are unbounded, you may |
//| specify very large number or +INF (latter is |
//| recommended because it will allow solver to use |
//| better algorithm). |
//| NOTE: it is possible to specify BndL[i]=BndU[i]. In this case |
//| I-th variable will be "frozen" at X[i]=BndL[i]=BndU[i]. |
//+------------------------------------------------------------------+
void CAlglib::MinQPSetBC(CMinQPStateShell &state,double &bndl[],
double &bndu[])
{
CMinQP::MinQPSetBC(state.GetInnerObj(),bndl,bndu);
}
//+------------------------------------------------------------------+
//| This function sets box constraints for QP solver (all variables |
//| at once, same constraints for all variables) |
//| Box constraints are inactive by default (after initial creation)|
//| After being set, they are preserved until explicitly overwritten |
//| with another MinQPSetBC() or MinQPSetBCAll() call, or partially |
//| overwritten with MinQPSetBCI() call. |
//| Following types of constraints are supported: |
//| DESCRIPTION CONSTRAINT HOW TO SPECIFY |
//| fixed variable x[i]=Bnd BndL=BndU |
//| lower bound BndL<=x[i] BndU=+INF |
//| upper bound x[i]<=BndU BndL=-INF |
//| range BndL<=x[i]<=BndU ... |
//| free variable - BndL=-INF, BndU+INF|
//| INPUT PARAMETERS: |
//| State - structure stores algorithm State |
//| BndL - lower bound, same for all variables |
//| BndU - upper bound, same for all variables |
//| NOTE: infinite values can be specified by means of AL_NEGINF and |
//| AL_POSINF. |
//| NOTE: you may replace infinities by very small/very large values,|
//| but it is not recommended because large numbers may |
//| introduce large numerical errors in the algorithm. |
//| NOTE: BndL>BndU will result in QP problem being recognized as |
//| infeasible. |
//+------------------------------------------------------------------+
void CAlglib::MinQPSetBCAll(CMinQPStateShell &state,double bndl,
double bndu)
{
CMinQP::MinQPSetBCAll(state.GetInnerObj(),bndl,bndu);
}
//+------------------------------------------------------------------+
//| This function sets box constraints for I-th variable (other |
//| variables are not modified). |
//| Following types of constraints are supported: |
//| DESCRIPTION CONSTRAINT HOW TO SPECIFY |
//| fixed variable x[i] = Bnd BndL = BndU |
//| lower bound BndL <= x[i] BndU = +INF |
//| upper bound x[i] <= BndU BndL = -INF |
//| range BndL <= x[i] <= BndU ... |
//| free variable - BndL -INF, BndU +INF|
//| INPUT PARAMETERS: |
//| State - structure stores algorithm State |
//| BndL - lower bound |
//| BndU - upper bound |
//| NOTE: infinite values can be specified by means of AL_POSINF and |
//| AL_POSINF. |
//| NOTE: you may replace infinities by very small/very large values,|
//| but it is not recommended because large numbers may |
//| introduce large numerical errors in the algorithm. |
//| NOTE: BndL>BndU will result in QP problem being recognized as |
//| infeasible. |
//+------------------------------------------------------------------+
void CAlglib::MinQPSetBCI(CMinQPStateShell &state,int i,
double bndl,double bndu)
{
CMinQP::MinQPSetBCI(state.GetInnerObj(),i,bndl,bndu);
}
//+------------------------------------------------------------------+
//| This function sets dense linear constraints for QP optimizer. |
//| This function overrides results of previous calls to MinQPSetLC()|
//| MinQPSetLCSparse() and MinQPSetLCMixed(). After call to this |
//| function all non-box constraints are dropped, and you have only |
//| those constraints which were specified in the present call. |
//| If you want to specify mixed(with dense and sparse terms) linear |
//| constraints, you should call MinQPSetLCMixed(). |
//| INPUT PARAMETERS: |
//| State - structure previously allocated with MinQPCreate |
//| call. |
//| C - linear constraints, array[K, N + 1]. Each row of C |
//| represents one constraint, either equality or |
//| inequality (see below): |
//| * first N elements correspond to coefficients, |
//| * last element corresponds to the right part. |
//| All elements of C(including right part) must be |
//| finite. |
//| CT - type of constraints, array[K]: |
//| * if CT[i] > 0, then I-th constraint is |
//| C[i, *] * x >= C[i, n + 1] |
//| * if CT[i] = 0, then I-th constraint is |
//| C[i, *] * x = C[i, n + 1] |
//| * if CT[i] < 0, then I-th constraint is |
//| C[i, *] * x <= C[i, n + 1] |
//| K - number of equality/inequality constraints, K >= 0: |
//| * if given, only leading K elements of C/CT are |
//| used |
//| * if not given, automatically determined from sizes|
//| of C/CT |
//| NOTE 1: linear (non-bound) constraints are satisfied only |
//| approximately - there always exists some violation due |
//| to numerical errors and algorithmic limitations |
//| (BLEIC-QP solver is most precise, AUL-QP solver is less |
//| precise). |
//+------------------------------------------------------------------+
void CAlglib::MinQPSetLC(CMinQPStateShell &state,CMatrixDouble &c,
CRowInt &ct,int k)
{
CMinQP::MinQPSetLC(state.GetInnerObj(),c,ct,k);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::MinQPSetLC(CMinQPStateShell &state,CMatrixDouble &c,
CRowInt &ct)
{
//--- check
if(!CAp::Assert(CAp::Rows(c)==CAp::Len(ct),"Error while calling 'MinQPSetLC': looks like one of arguments has wrong size"))
return;
//--- initialization
int k=CAp::Rows(c);
//--- function call
CMinQP::MinQPSetLC(state.GetInnerObj(),c,ct,k);
}
//+------------------------------------------------------------------+
//| This function sets sparse linear constraints for QP optimizer. |
//| This function overrides results of previous calls to MinQPSetLC()|
//| MinQPSetLCSparse() and MinQPSetLCMixed(). After call to this |
//| function all non-box constraints are dropped, and you have only |
//| those constraints which were specified in the present call. |
//| If you want to specify mixed(with dense and sparse terms) linear |
//| constraints, you should call MinQPSetLCMixed(). |
//| INPUT PARAMETERS: |
//| State - structure previously allocated with MinQPCreate |
//| call. |
//| C - linear constraints, sparse matrix with dimensions |
//| at least [K, N + 1]. If matrix has larger size, |
//| only leading Kx(N + 1) rectangle is used. Each row |
//| of C represents one constraint, either equality or |
//| inequality(see below) : |
//| * first N elements correspond to coefficients, |
//| * last element corresponds to the right part. |
//| All elements of C(including right part) must be |
//| finite. |
//| CT - type of constraints, array[K]: |
//| * if CT[i] > 0, then I-th constraint is |
//| C[i, *] * x >= C[i, n + 1] |
//| * if CT[i] = 0, then I-th constraint is |
//| C[i, *] * x = C[i, n + 1] |
//| * if CT[i] < 0, then I-th constraint is |
//| C[i, *] * x <= C[i, n + 1] |
//| K - number of equality/inequality constraints, K >= 0: |
//| * if given, only leading K elements of C/CT are |
//| used |
//| * if not given, automatically determined from sizes|
//| of C/CT |
//| NOTE 1: linear (non-bound) constraints are satisfied only |
//| approximately - there always exists some violation due |
//| to numerical errors and algorithmic limitations |
//| (BLEIC-QP solver is most precise, AUL-QP solver is less |
//| precise). |
//+------------------------------------------------------------------+
void CAlglib::MinQPSetLCSparse(CMinQPStateShell &state,CSparseMatrix &c,
CRowInt &ct,int k)
{
CMinQP::MinQPSetLCSparse(state.GetInnerObj(),c,ct,k);
}
//+------------------------------------------------------------------+
//| This function sets mixed linear constraints, which include a set |
//| of dense rows, and a set of sparse rows. |
//| This function overrides results of previous calls to MinQPSetLC()|
//| MinQPSetLCSparse() and MinQPSetLCMixed(). After call to this |
//| function all non-box constraints are dropped, and you have only |
//| those constraints which were specified in the present call. |
//| If you want to specify mixed(with dense and sparse terms) linear |
//| constraints, you should call MinQPSetLCMixed(). |
//| INPUT PARAMETERS: |
//| State - structure previously allocated with MinQPCreate |
//| call. |
//| SparseC - linear constraints, sparse matrix with dimensions |
//| EXACTLY EQUAL TO [SparseK, N + 1]. Each row of C |
//| represents one constraint, either equality or |
//| inequality (see below): |
//| * first N elements correspond to coefficients, |
//| * last element corresponds to the right part. |
//| All elements of C(including right part) must be |
//| finite. |
//| SparseCT - type of sparse constraints, array[K]: |
//| * if SparseCT[i] > 0, then I-th constraint is |
//| SparseC[i, *] * x >= SparseC[i, n + 1] |
//| * if SparseCT[i] = 0, then I-th constraint is |
//| SparseC[i, *] * x = SparseC[i, n + 1] |
//| * if SparseCT[i] < 0, then I-th constraint is |
//| SparseC[i, *] * x <= SparseC[i, n + 1] |
//| SparseK - number of sparse equality/inequality constraints, |
//| K >= 0 |
//| DenseC - dense linear constraints, array[K, N + 1]. Each row|
//| of DenseC represents one constraint, either |
//| equality or inequality(see below): |
//| * first N elements correspond to coefficients, |
//| * last element corresponds to the right part. |
//| All elements of DenseC (including right part) must |
//| be finite. |
//| DenseCT - type of constraints, array[K]: |
//| * if DenseCT[i] > 0, then I-th constraint is |
//| DenseC[i, *] * x >= DenseC[i, n + 1] |
//| * if DenseCT[i] = 0, then I-th constraint is |
//| DenseC[i, *] * x = DenseC[i, n + 1] |
//| * if DenseCT[i] < 0, then I-th constraint is |
//| DenseC[i, *] * x <= DenseC[i, n + 1] |
//| DenseK - number of equality/inequality constraints, |
//| DenseK >= 0 |
//| NOTE 1: linear(non-box) constraints are satisfied only |
//| approximately - there always exists some violation due |
//| to numerical errors and algorithmic limitations |
//| (BLEIC-QP solver is most precise, AUL-QP solver is less |
//| precise). |
//| NOTE 2: due to backward compatibility reasons SparseC can be |
//| larger than [SparseK, N + 1]. In this case only leading |
//| [SparseK, N+1] submatrix will be used. However, the rest |
//| of ALGLIB has more strict requirements on the input size,|
//| so we recommend you to pass sparse term whose size |
//| exactly matches algorithm expectations. |
//+------------------------------------------------------------------+
void CAlglib::MinQPSetLCMixed(CMinQPStateShell &state,
CSparseMatrix &sparsec,CRowInt &sparsect,
int sparsek,CMatrixDouble &densec,
CRowInt &densect,int densek)
{
CMinQP::MinQPSetLCMixed(state.GetInnerObj(),sparsec,sparsect,sparsek,densec,densect,densek);
}
//+------------------------------------------------------------------+
//| This function provides legacy API for specification of mixed |
//| dense / sparse linear constraints. |
//| New conventions used by ALGLIB since release 3.16.0 State that |
//| set of sparse constraints comes first, followed by set of |
//| dense ones. This convention is essential when you talk about |
//| things like order of Lagrange multipliers. |
//| However, legacy API accepted mixed constraints in reverse order. |
//| This function is here to simplify situation with code relying on |
//| legacy API. It simply accepts constraints in one order (old) and |
//| passes them to new API, now in correct order. |
//+------------------------------------------------------------------+
void CAlglib::MinQPSetLCMixedLegacy(CMinQPStateShell &state,
CMatrixDouble &densec,
CRowInt &densect,int densek,
CSparseMatrix &sparsec,
CRowInt &sparsect,int sparsek)
{
CMinQP::MinQPSetLCMixedLegacy(state.GetInnerObj(),densec,densect,densek,sparsec,sparsect,sparsek);
}
//+------------------------------------------------------------------+
//| This function sets two-sided linear constraints AL <= A*x <= AU |
//| with dense constraint matrix A. |
//| NOTE: knowing that constraint matrix is dense helps some QP |
//| solvers (especially modern IPM method) to utilize efficient dense|
//| Level 3 BLAS for dense parts of the problem. If your problem has |
//| both dense and sparse constraints, you can use MinQPSetLC2Mixed()|
//| function, which will result in dense algebra being applied to |
//| dense terms, and sparse sparse linear algebra applied to sparse |
//| terms. |
//| INPUT PARAMETERS: |
//| State - structure previously allocated with MinQPCreate() |
//| call. |
//| A - linear constraints, array[K, N]. Each row of A |
//| represents one constraint. One-sided inequality |
//| constraints, two-sided inequality constraints, |
//| equality constraints are supported (see below) |
//| AL, AU - lower and upper bounds, array[K]; |
//| * AL[i] = AU[i] => equality constraint Ai * x |
//| * AL[i]<AU.Set(i,> two-sided constraint |
//| AL[i] <= Ai*x <= AU[i] |
//| * AL[i] = -INF => one-sided constraint |
//| Ai*x <= AU[i] |
//| * AU[i] = +INF => one-sided constraint |
//| AL[i] <= Ai*x |
//| * AL[i] = -INF, AU[i] = +INF => constraint is |
//| ignored |
//| K - number of equality/inequality constraints, K >= 0; |
//| if not given, inferred from sizes of A, AL, AU. |
//+------------------------------------------------------------------+
void CAlglib::MinQPSetLC2Dense(CMinQPStateShell &state,
CMatrixDouble &a,CRowDouble &al,
CRowDouble &au,int k)
{
CMinQP::MinQPSetLC2Dense(state.GetInnerObj(),a,al,au,k);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::MinQPSetLC2Dense(CMinQPStateShell &state,
CMatrixDouble &a,CRowDouble &al,
CRowDouble &au)
{
//--- check
if(!CAp::Assert((CAp::Rows(a)==CAp::Len(al)) || (CAp::Rows(a)!=CAp::Len(au)),
"Error while calling 'minqpsetlc2dense': looks like one of arguments has wrong size"))
return;
//--- initialization
int k=CAp::Rows(a);
//--- function call
CMinQP::MinQPSetLC2Dense(state.GetInnerObj(),a,al,au,k);
}
//+------------------------------------------------------------------+
//| This function sets two-sided linear constraints AL <= A*x <= AU |
//| with sparse constraining matrix A. Recommended for large-scale |
//| problems. |
//| This function overwrites linear (non-box) constraints set by |
//| previous calls(if such calls were made). |
//| INPUT PARAMETERS: |
//| State - structure previously allocated with MinQPCreate() |
//| call. |
//| A - sparse matrix with size [K, N](exactly!). Each row |
//| of A represents one general linear constraint. A |
//| can be stored in any sparse storage format. |
//| AL, AU - lower and upper bounds, array[K]; |
//| * AL[i] = AU[i] => equality constraint Ai*x |
//| * AL[i]<AU.Set(i,> two-sided constraint |
//| AL[i] <= Ai*x <= AU[i] |
//| * AL[i] = -INF => one-sided constraint |
//| Ai*x <= AU[i] |
//| * AU[i] = +INF => one-sided constraint |
//| AL[i] <= Ai*x |
//| * AL[i] = -INF, AU[i] = +INF => constraint is |
//| ignored |
//| K - number of equality/inequality constraints, K >= 0. |
//| If K = 0 is specified, A, AL, AU are ignored. |
//+------------------------------------------------------------------+
void CAlglib::MinQPSetLC2(CMinQPStateShell &state,CSparseMatrix &a,
CRowDouble &al,CRowDouble &au,int k)
{
CMinQP::MinQPSetLC2(state.GetInnerObj(),a,al,au,k);
}
//+------------------------------------------------------------------+
//| This function sets two-sided linear constraints AL <= A*x <= AU |
//| with mixed constraining matrix A including sparse part (first |
//| SparseK rows) and dense part(last DenseK rows). Recommended for |
//| large-scale problems. |
//| This function overwrites linear (non-box) constraints set by |
//| previous calls (if such calls were made). |
//| This function may be useful if constraint matrix includes large |
//| number of both types of rows - dense and CSparse If you have just|
//| a few sparse rows, you may represent them in dense format without|
//| losing performance. Similarly, if you have just a few dense rows,|
//| you may store them in sparse format with almost same performance.|
//| INPUT PARAMETERS: |
//| State - structure previously allocated with MinQPCreate() |
//| call. |
//| SparseA - sparse matrix with size [K, N](exactly!). Each row |
//| of A represents one general linear constraint. A |
//| can be stored in any sparse storage format. |
//| SparseK - number of sparse constraints, SparseK >= 0 |
//| DenseA - linear constraints, array[K, N], set of dense |
//| constraints. Each row of A represents one general |
//| linear constraint. |
//| DenseK - number of dense constraints, DenseK >= 0 |
//| AL, AU - lower and upper bounds, array[SparseK + DenseK], |
//| with former SparseK elements corresponding to |
//| sparse constraints, and latter DenseK elements |
//| corresponding to dense constraints; |
//| * AL[i] = AU[i] => equality constraint Ai*x |
//| * AL[i]<AU.Set(i,> two-sided constraint |
//| AL[i] <= Ai*x <= AU[i] |
//| * AL[i] = -INF => one-sided constraint |
//| Ai*x <= AU[i] |
//| * AU[i] = +INF => one-sided constraint |
//| AL[i] <= Ai*x |
//| * AL[i] = -INF, AU[i] = +INF => constraint is |
//| ignored |
//| K - number of equality/inequality constraints, K >= 0.|
//| If K = 0 is specified, A, AL, AU are ignored. |
//+------------------------------------------------------------------+
void CAlglib::MinQPSetLC2Mixed(CMinQPStateShell &state,
CSparseMatrix &sparsea,int ksparse,
CMatrixDouble &densea,int kdense,
CRowDouble &al,CRowDouble &au)
{
CMinQP::MinQPSetLC2Mixed(state.GetInnerObj(),sparsea,ksparse,densea,kdense,al,au);
}
//+------------------------------------------------------------------+
//| This function appends two-sided linear constraint AL <= A*x <= AU|
//| to the matrix of currently present dense constraints. |
//| INPUT PARAMETERS: |
//| State - structure previously allocated with MinQPCreate() |
//| call. |
//| A - linear constraint coefficient, array[N], right side|
//| is NOT included. |
//| AL, AU - lower and upper bounds; |
//| * AL = AU => equality constraint Ai*x |
//| * AL < AU => two-sided constraint |
//| AL <= Ai*x <= AU |
//| * AL = -INF => one-sided constraint |
//| Ai*x <= AU |
//| * AU = +INF => one-sided constraint |
//| AL <= Ai*x |
//| * AL = -INF, AU = +INF => constraint is |
//| ignored |
//+------------------------------------------------------------------+
void CAlglib::MinQPAddLC2Dense(CMinQPStateShell &state,CRowDouble &a,
double al,double au)
{
CMinQP::MinQPAddLC2Dense(state.GetInnerObj(),a,al,au);
}
//+------------------------------------------------------------------+
//| This function appends two-sided linear constraint AL <= A*x <= AU|
//| to the list of currently present sparse constraints. |
//| Constraint is passed in compressed format: as list of non-zero |
//| entries of coefficient vector A. Such approach is more efficient |
//| than dense storage for highly sparse constraint vectors. |
//| INPUT PARAMETERS: |
//| State - structure previously allocated with MinQPCreate() |
//| call. |
//| IdxA - array[NNZ], indexes of non-zero elements of A: |
//| * can be unsorted |
//| * can include duplicate indexes (corresponding |
//| entries of ValA[] will be summed) |
//| ValA - array[NNZ], values of non-zero elements of A |
//| NNZ - number of non-zero coefficients in A |
//| AL, AU - lower and upper bounds; |
//| * AL = AU => equality constraint A*x |
//| * AL<AU => two-sided constraint AL <= A*x <= AU |
//| * AL = -INF => one-sided constraint A*x <= AU |
//| * AU = +INF => one-sided constraint AL <= A*x |
//| * AL = -INF, AU = +INF => constraint is ignored |
//+------------------------------------------------------------------+
void CAlglib::MinQPAddLC2(CMinQPStateShell &state,CRowInt &idxa,
CRowDouble &vala,int nnz,
double al,double au)
{
CMinQP::MinQPAddLC2(state.GetInnerObj(),idxa,vala,nnz,al,au);
}
//+------------------------------------------------------------------+
//| This function appends two-sided linear constraint AL <= A*x <= AU|
//| to the list of currently present sparse constraints. |
//| Constraint vector A is passed as a dense array which is |
//| internally sparsified by this function. |
//| INPUT PARAMETERS: |
//| State - structure previously allocated with MinQPCreate() |
//| call. |
//| DA - array[N], constraint vector |
//| AL, AU - lower and upper bounds; |
//| * AL = AU => equality constraint A*x |
//| * AL < AU => two-sided constraint AL<=A*x<=AU |
//| * AL = -INF => one-sided constraint A*x <= AU |
//| * AU = +INF => one-sided constraint AL <= A*x |
//| * AL = -INF, AU = +INF => constraint is ignored |
//+------------------------------------------------------------------+
void CAlglib::MinQPAddLC2SparseFromDense(CMinQPStateShell &state,
CRowDouble &da,
double al,double au)
{
CMinQP::MinQPAddLC2SparseFromDense(state.GetInnerObj(),da,al,au);
}
//+------------------------------------------------------------------+
//| This function solves quadratic programming problem. |
//| You should call it after setting solver options with |
//| MinQPSet...() calls. |
//| INPUT PARAMETERS: |
//| State - algorithm state |
//| You should use MinQPResults() function to access results after |
//| calls to this function. |
//+------------------------------------------------------------------+
void CAlglib::MinQPOptimize(CMinQPStateShell &state)
{
CMinQP::MinQPOptimize(state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| QP solver results |
//| INPUT PARAMETERS: |
//| State - algorithm state |
//| OUTPUT PARAMETERS: |
//| X - array[0..N-1], solution |
//| Rep - optimization report. You should check Rep. |
//| TerminationType, which contains completion code, |
//| and you may check another fields which contain |
//| another information about algorithm functioning. |
//+------------------------------------------------------------------+
void CAlglib::MinQPResults(CMinQPStateShell &state,double &x[],
CMinQPReportShell &rep)
{
CMinQP::MinQPResults(state.GetInnerObj(),x,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| QP results |
//| Buffered implementation of MinQPResults() which uses |
//| pre-allocated buffer to store X[]. If buffer size is too small, |
//| it resizes buffer. It is intended to be used in the inner cycles |
//| of performance critical algorithms where array reallocation |
//| penalty is too large to be ignored. |
//+------------------------------------------------------------------+
void CAlglib::MinQPResultsBuf(CMinQPStateShell &state,double &x[],
CMinQPReportShell &rep)
{
CMinQP::MinQPResultsBuf(state.GetInnerObj(),x,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| IMPROVED LEVENBERG-MARQUARDT METHOD FOR |
//| NON-LINEAR LEAST SQUARES OPTIMIZATION |
//| DESCRIPTION: |
//| This function is used to find minimum of function which is |
//| represented as sum of squares: |
//| F(x) = f[0]^2(x[0],...,x[n-1]) + ... + |
//| + f[m-1]^2(x[0],...,x[n-1]) |
//| using value of function vector f[] and Jacobian of f[]. |
//| REQUIREMENTS: |
//| This algorithm will request following information during its |
//| operation: |
//| * function vector f[] at given point X |
//| * function vector f[] and Jacobian of f[] (simultaneously) at |
//| given point |
//| There are several overloaded versions of MinLMOptimize() |
//| function which correspond to different LM-like optimization |
//| algorithms provided by this unit. You should choose version which|
//| accepts fvec() and jac() callbacks. First one is used to |
//| calculate f[] at given point, second one calculates f[] and |
//| Jacobian df[i]/dx[j]. |
//| You can try to initialize MinLMState structure with VJ function |
//| and then use incorrect version of MinLMOptimize() (for example,|
//| version which works with general form function and does not |
//| provide Jacobian), but it will lead to exception being thrown |
//| after first attempt to calculate Jacobian. |
//| USAGE: |
//| 1. User initializes algorithm state with MinLMCreateVJ() call |
//| 2. User tunes solver parameters with MinLMSetCond(), |
//| MinLMSetStpMax() and other functions |
//| 3. User calls MinLMOptimize() function which takes algorithm |
//| state and callback functions. |
//| 4. User calls MinLMResults() to get solution |
//| 5. Optionally, user may call MinLMRestartFrom() to solve another |
//| problem with same N/M but another starting point and/or |
//| another function. MinLMRestartFrom() allows to reuse already |
//| initialized structure. |
//| INPUT PARAMETERS: |
//| N - dimension, N>1 |
//| * if given, only leading N elements of X are |
//| used |
//| * if not given, automatically determined from |
//| size of X |
//| M - number of functions f[i] |
//| X - initial solution, array[0..N-1] |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| NOTES: |
//| 1. you may tune stopping conditions with MinLMSetCond() function |
//| 2. if target function contains exp() or other fast growing |
//| functions, and optimization algorithm makes too large steps |
//| which leads to overflow, use MinLMSetStpMax() function to |
//| bound algorithm's steps. |
//+------------------------------------------------------------------+
void CAlglib::MinLMCreateVJ(const int n,const int m,double &x[],
CMinLMStateShell &state)
{
CMinLM::MinLMCreateVJ(n,m,x,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| IMPROVED LEVENBERG-MARQUARDT METHOD FOR |
//| NON-LINEAR LEAST SQUARES OPTIMIZATION |
//| DESCRIPTION: |
//| This function is used to find minimum of function which is |
//| represented as sum of squares: |
//| F(x) = f[0]^2(x[0],...,x[n-1]) + ... + |
//| + f[m-1]^2(x[0],...,x[n-1]) |
//| using value of function vector f[] and Jacobian of f[]. |
//| REQUIREMENTS: |
//| This algorithm will request following information during its |
//| operation: |
//| * function vector f[] at given point X |
//| * function vector f[] and Jacobian of f[] (simultaneously) at |
//| given point |
//| There are several overloaded versions of MinLMOptimize() |
//| function which correspond to different LM-like optimization |
//| algorithms provided by this unit. You should choose version which|
//| accepts fvec() and jac() callbacks. First one is used to |
//| calculate f[] at given point, second one calculates f[] and |
//| Jacobian df[i]/dx[j]. |
//| You can try to initialize MinLMState structure with VJ function |
//| and then use incorrect version of MinLMOptimize() (for example,|
//| version which works with general form function and does not |
//| provide Jacobian), but it will lead to exception being thrown |
//| after first attempt to calculate Jacobian. |
//| USAGE: |
//| 1. User initializes algorithm state with MinLMCreateVJ() call |
//| 2. User tunes solver parameters with MinLMSetCond(), |
//| MinLMSetStpMax() and other functions |
//| 3. User calls MinLMOptimize() function which takes algorithm |
//| state and callback functions. |
//| 4. User calls MinLMResults() to get solution |
//| 5. Optionally, user may call MinLMRestartFrom() to solve another |
//| problem with same N/M but another starting point and/or |
//| another function. MinLMRestartFrom() allows to reuse already |
//| initialized structure. |
//| INPUT PARAMETERS: |
//| N - dimension, N>1 |
//| * if given, only leading N elements of X are |
//| used |
//| * if not given, automatically determined from |
//| size of X |
//| M - number of functions f[i] |
//| X - initial solution, array[0..N-1] |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| NOTES: |
//| 1. you may tune stopping conditions with MinLMSetCond() function |
//| 2. if target function contains exp() or other fast growing |
//| functions, and optimization algorithm makes too large steps |
//| which leads to overflow, use MinLMSetStpMax() function to |
//| bound algorithm's steps. |
//+------------------------------------------------------------------+
void CAlglib::MinLMCreateVJ(const int m,double &x[],CMinLMStateShell &state)
{
//--- initialization
int n=CAp::Len(x);
//--- function call
CMinLM::MinLMCreateVJ(n,m,x,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| IMPROVED LEVENBERG-MARQUARDT METHOD FOR |
//| NON-LINEAR LEAST SQUARES OPTIMIZATION |
//| DESCRIPTION: |
//| This function is used to find minimum of function which is |
//| represented as sum of squares: |
//| F(x) = f[0]^2(x[0],...,x[n-1]) + ... + |
//| + f[m-1]^2(x[0],...,x[n-1]) |
//| using value of function vector f[] only. Finite differences are |
//| used to calculate Jacobian. |
//| REQUIREMENTS: |
//| This algorithm will request following information during its |
//| operation: |
//| * function vector f[] at given point X |
//| There are several overloaded versions of MinLMOptimize() function|
//| which correspond to different LM-like optimization algorithms |
//| provided by this unit. You should choose version which accepts |
//| fvec() callback. |
//| You can try to initialize MinLMState structure with VJ function |
//| and then use incorrect version of MinLMOptimize() (for example, |
//| version which works with general form function and does not |
//| accept function vector), but it will lead to exception being |
//| thrown after first attempt to calculate Jacobian. |
//| USAGE: |
//| 1. User initializes algorithm state with MinLMCreateV() call |
//| 2. User tunes solver parameters with MinLMSetCond(), |
//| MinLMSetStpMax() and other functions |
//| 3. User calls MinLMOptimize() function which takes algorithm |
//| state and callback functions. |
//| 4. User calls MinLMResults() to get solution |
//| 5. Optionally, user may call MinLMRestartFrom() to solve another |
//| problem with same N/M but another starting point and/or |
//| another function. MinLMRestartFrom() allows to reuse already |
//| initialized structure. |
//| INPUT PARAMETERS: |
//| N - dimension, N>1 |
//| * if given, only leading N elements of X are |
//| used |
//| * if not given, automatically determined from |
//| size of X |
//| M - number of functions f[i] |
//| X - initial solution, array[0..N-1] |
//| DiffStep- differentiation step, >0 |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| See also MinLMIteration, MinLMResults. |
//| NOTES: |
//| 1. you may tune stopping conditions with MinLMSetCond() function |
//| 2. if target function contains exp() or other fast growing |
//| functions, and optimization algorithm makes too large steps |
//| which leads to overflow, use MinLMSetStpMax() function to |
//| bound algorithm's steps. |
//+------------------------------------------------------------------+
void CAlglib::MinLMCreateV(const int n,const int m,double &x[],
double diffstep,CMinLMStateShell &state)
{
CMinLM::MinLMCreateV(n,m,x,diffstep,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| IMPROVED LEVENBERG-MARQUARDT METHOD FOR |
//| NON-LINEAR LEAST SQUARES OPTIMIZATION |
//| DESCRIPTION: |
//| This function is used to find minimum of function which is |
//| represented as sum of squares: |
//| F(x) = f[0]^2(x[0],...,x[n-1]) + ... + |
//| + f[m-1]^2(x[0],...,x[n-1]) |
//| using value of function vector f[] only. Finite differences are |
//| used to calculate Jacobian. |
//| REQUIREMENTS: |
//| This algorithm will request following information during its |
//| operation: |
//| * function vector f[] at given point X |
//| There are several overloaded versions of MinLMOptimize() function|
//| which correspond to different LM-like optimization algorithms |
//| provided by this unit. You should choose version which accepts |
//| fvec() callback. |
//| You can try to initialize MinLMState structure with VJ function |
//| and then use incorrect version of MinLMOptimize() (for example, |
//| version which works with general form function and does not |
//| accept function vector), but it will lead to exception being |
//| thrown after first attempt to calculate Jacobian. |
//| USAGE: |
//| 1. User initializes algorithm state with MinLMCreateV() call |
//| 2. User tunes solver parameters with MinLMSetCond(), |
//| MinLMSetStpMax() and other functions |
//| 3. User calls MinLMOptimize() function which takes algorithm |
//| state and callback functions. |
//| 4. User calls MinLMResults() to get solution |
//| 5. Optionally, user may call MinLMRestartFrom() to solve another |
//| problem with same N/M but another starting point and/or |
//| another function. MinLMRestartFrom() allows to reuse already |
//| initialized structure. |
//| INPUT PARAMETERS: |
//| N - dimension, N>1 |
//| * if given, only leading N elements of X are |
//| used |
//| * if not given, automatically determined from |
//| size of X |
//| M - number of functions f[i] |
//| X - initial solution, array[0..N-1] |
//| DiffStep- differentiation step, >0 |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| See also MinLMIteration, MinLMResults. |
//| NOTES: |
//| 1. you may tune stopping conditions with MinLMSetCond() function |
//| 2. if target function contains exp() or other fast growing |
//| functions, and optimization algorithm makes too large steps |
//| which leads to overflow, use MinLMSetStpMax() function to |
//| bound algorithm's steps. |
//+------------------------------------------------------------------+
void CAlglib::MinLMCreateV(const int m,double &x[],const double diffstep,
CMinLMStateShell &state)
{
//--- initialization
int n=CAp::Len(x);
//--- function call
CMinLM::MinLMCreateV(n,m,x,diffstep,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| LEVENBERG-MARQUARDT-LIKE METHOD FOR NON-LINEAR OPTIMIZATION |
//| DESCRIPTION: |
//| This function is used to find minimum of general form (not |
//| "sum-of-squares") function |
//| F = F(x[0], ..., x[n-1]) |
//| using its gradient and Hessian. Levenberg-Marquardt modification |
//| with L-BFGS pre-optimization and internal pre-conditioned L-BFGS |
//| optimization after each Levenberg-Marquardt step is used. |
//| REQUIREMENTS: |
//| This algorithm will request following information during its |
//| operation: |
//| * function value F at given point X |
//| * F and gradient G (simultaneously) at given point X |
//| * F, G and Hessian H (simultaneously) at given point X |
//| There are several overloaded versions of MinLMOptimize() |
//| function which correspond to different LM-like optimization |
//| algorithms provided by this unit. You should choose version which|
//| accepts func(), grad() and hess() function pointers. First |
//| pointer is used to calculate F at given point, second one |
//| calculates F(x) and grad F(x), third one calculates F(x), grad |
//| F(x), hess F(x). |
//| You can try to initialize MinLMState structure with FGH-function |
//| and then use incorrect version of MinLMOptimize() (for example, |
//| version which does not provide Hessian matrix), but it will lead |
//| to exception being thrown after first attempt to calculate |
//| Hessian. |
//| USAGE: |
//| 1. User initializes algorithm state with MinLMCreateFGH() call |
//| 2. User tunes solver parameters with MinLMSetCond(), |
//| MinLMSetStpMax() and other functions |
//| 3. User calls MinLMOptimize() function which takes algorithm |
//| state and pointers (delegates, etc.) to callback functions. |
//| 4. User calls MinLMResults() to get solution |
//| 5. Optionally, user may call MinLMRestartFrom() to solve another |
//| problem with same N but another starting point and/or another |
//| function. MinLMRestartFrom() allows to reuse already |
//| initialized structure. |
//| INPUT PARAMETERS: |
//| N - dimension, N>1 |
//| * if given, only leading N elements of X are |
//| used |
//| * if not given, automatically determined from |
//| size of X |
//| X - initial solution, array[0..N-1] |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| NOTES: |
//| 1. you may tune stopping conditions with MinLMSetCond() function |
//| 2. if target function contains exp() or other fast growing |
//| functions, and optimization algorithm makes too large steps |
//| which leads to overflow, use MinLMSetStpMax() function to |
//| bound algorithm's steps. |
//+------------------------------------------------------------------+
void CAlglib::MinLMCreateFGH(const int n,double &x[],CMinLMStateShell &state)
{
CMinLM::MinLMCreateFGH(n,x,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| LEVENBERG-MARQUARDT-LIKE METHOD FOR NON-LINEAR OPTIMIZATION |
//| DESCRIPTION: |
//| This function is used to find minimum of general form (not |
//| "sum-of-squares") function |
//| F = F(x[0], ..., x[n-1]) |
//| using its gradient and Hessian. Levenberg-Marquardt modification |
//| with L-BFGS pre-optimization and internal pre-conditioned L-BFGS |
//| optimization after each Levenberg-Marquardt step is used. |
//| REQUIREMENTS: |
//| This algorithm will request following information during its |
//| operation: |
//| * function value F at given point X |
//| * F and gradient G (simultaneously) at given point X |
//| * F, G and Hessian H (simultaneously) at given point X |
//| There are several overloaded versions of MinLMOptimize() |
//| function which correspond to different LM-like optimization |
//| algorithms provided by this unit. You should choose version which|
//| accepts func(), grad() and hess() function pointers. First |
//| pointer is used to calculate F at given point, second one |
//| calculates F(x) and grad F(x), third one calculates F(x), grad |
//| F(x), hess F(x). |
//| You can try to initialize MinLMState structure with FGH-function |
//| and then use incorrect version of MinLMOptimize() (for example, |
//| version which does not provide Hessian matrix), but it will lead |
//| to exception being thrown after first attempt to calculate |
//| Hessian. |
//| USAGE: |
//| 1. User initializes algorithm state with MinLMCreateFGH() call |
//| 2. User tunes solver parameters with MinLMSetCond(), |
//| MinLMSetStpMax() and other functions |
//| 3. User calls MinLMOptimize() function which takes algorithm |
//| state and pointers (delegates, etc.) to callback functions. |
//| 4. User calls MinLMResults() to get solution |
//| 5. Optionally, user may call MinLMRestartFrom() to solve another |
//| problem with same N but another starting point and/or another |
//| function. MinLMRestartFrom() allows to reuse already |
//| initialized structure. |
//| INPUT PARAMETERS: |
//| N - dimension, N>1 |
//| * if given, only leading N elements of X are |
//| used |
//| * if not given, automatically determined from |
//| size of X |
//| X - initial solution, array[0..N-1] |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| NOTES: |
//| 1. you may tune stopping conditions with MinLMSetCond() function |
//| 2. if target function contains exp() or other fast growing |
//| functions, and optimization algorithm makes too large steps |
//| which leads to overflow, use MinLMSetStpMax() function to |
//| bound algorithm's steps. |
//+------------------------------------------------------------------+
void CAlglib::MinLMCreateFGH(double &x[],CMinLMStateShell &state)
{
//--- initialization
int n=CAp::Len(x);
//--- function call
CMinLM::MinLMCreateFGH(n,x,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This function sets stopping conditions for Levenberg-Marquardt |
//| optimization algorithm. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| EpsX - >=0 |
//| The subroutine finishes its work if on k+1-th |
//| iteration the condition |v|<=EpsX is fulfilled, |
//| where: |
//| * |.| means Euclidian norm |
//| * v - scaled step vector, v[i]=dx[i]/s[i] |
//| * dx - ste pvector, dx=X(k+1)-X(k) |
//| * s - scaling coefficients set by MinLMSetScale()|
//| MaxIts - maximum number of iterations. If MaxIts=0, the |
//| number of iterations is unlimited. Only |
//| Levenberg-Marquardt iterations are counted |
//| (L-BFGS/CG iterations are NOT counted because |
//| their cost is very low compared to that of LM). |
//| Passing EpsG=0, EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will|
//| lead to automatic stopping criterion selection (small EpsX). |
//+------------------------------------------------------------------+
void CAlglib::MinLMSetCond(CMinLMStateShell &state,const double epsx,
const int maxits)
{
CMinLM::MinLMSetCond(state.GetInnerObj(),epsx,maxits);
}
//+------------------------------------------------------------------+
//| This function turns on/off reporting. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| NeedXRep- whether iteration reports are needed or not |
//| If NeedXRep is True, algorithm will call rep() callback function |
//| if it is provided to MinLMOptimize(). Both Levenberg-Marquardt |
//| and internal L-BFGS iterations are reported. |
//+------------------------------------------------------------------+
void CAlglib::MinLMSetXRep(CMinLMStateShell &state,const bool needxrep)
{
CMinLM::MinLMSetXRep(state.GetInnerObj(),needxrep);
}
//+------------------------------------------------------------------+
//| This function sets maximum step length |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| StpMax - maximum step length, >=0. Set StpMax to 0.0, if |
//| you don't want to limit step length. |
//| Use this subroutine when you optimize target function which |
//| contains exp() or other fast growing functions, and optimization |
//| algorithm makes too large steps which leads to overflow. This |
//| function allows us to reject steps that are too large (and |
//| therefore expose us to the possible overflow) without actually |
//| calculating function value at the x+stp*d. |
//| NOTE: non-zero StpMax leads to moderate performance degradation |
//| because intermediate step of preconditioned L-BFGS optimization |
//| is incompatible with limits on step size. |
//+------------------------------------------------------------------+
void CAlglib::MinLMSetStpMax(CMinLMStateShell &state,const double stpmax)
{
CMinLM::MinLMSetStpMax(state.GetInnerObj(),stpmax);
}
//+------------------------------------------------------------------+
//| This function sets scaling coefficients for LM optimizer. |
//| ALGLIB optimizers use scaling matrices to test stopping |
//| conditions (step size and gradient are scaled before comparison |
//| with tolerances). Scale of the I-th variable is a translation |
//| invariant measure of: |
//| a) "how large" the variable is |
//| b) how large the step should be to make significant changes in |
//| the function |
//| Generally, scale is NOT considered to be a form of |
//| preconditioner. But LM optimizer is unique in that it uses |
//| scaling matrix both in the stopping condition tests and as |
//| Marquardt damping factor. |
//| Proper scaling is very important for the algorithm performance. |
//| It is less important for the quality of results, but still has |
//| some influence (it is easier to converge when variables are |
//| properly scaled, so premature stopping is possible when very |
//| badly scalled variables are combined with relaxed stopping |
//| conditions). |
//| INPUT PARAMETERS: |
//| State - structure stores algorithm state |
//| S - array[N], non-zero scaling coefficients |
//| S[i] may be negative, sign doesn't matter. |
//+------------------------------------------------------------------+
void CAlglib::MinLMSetScale(CMinLMStateShell &state,double &s[])
{
CMinLM::MinLMSetScale(state.GetInnerObj(),s);
}
//+------------------------------------------------------------------+
//| This function sets boundary constraints for LM optimizer |
//| Boundary constraints are inactive by default (after initial |
//| creation). They are preserved until explicitly turned off with |
//| another SetBC() call. |
//| INPUT PARAMETERS: |
//| State - structure stores algorithm state |
//| BndL - lower bounds, array[N]. |
//| If some (all) variables are unbounded, you may |
//| specify very small number or -INF (latter is |
//| recommended because it will allow solver to use |
//| better algorithm). |
//| BndU - upper bounds, array[N]. |
//| If some (all) variables are unbounded, you may |
//| specify very large number or +INF (latter is |
//| recommended because it will allow solver to use |
//| better algorithm). |
//| NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case |
//| I-th variable will be "frozen" at X[i]=BndL[i]=BndU[i]. |
//| NOTE 2: this solver has following useful properties: |
//| * bound constraints are always satisfied exactly |
//| * function is evaluated only INSIDE area specified by bound |
//| constraints or at its boundary |
//+------------------------------------------------------------------+
void CAlglib::MinLMSetBC(CMinLMStateShell &state,double &bndl[],
double &bndu[])
{
CMinLM::MinLMSetBC(state.GetInnerObj(),bndl,bndu);
}
//+------------------------------------------------------------------+
//| This function is used to change acceleration settings |
//| You can choose between three acceleration strategies: |
//| * AccType=0, no acceleration. |
//| * AccType=1, secant updates are used to update quadratic model |
//| after each iteration. After fixed number of iterations (or |
//| after model breakdown) we recalculate quadratic model using |
//| analytic Jacobian or finite differences. Number of secant-based|
//| iterations depends on optimization settings: about 3 |
//| iterations - when we have analytic Jacobian, up to 2*N |
//| iterations - when we use finite differences to calculate |
//| Jacobian. |
//| AccType=1 is recommended when Jacobian calculation cost is |
//| prohibitive high (several Mx1 function vector calculations |
//| followed by several NxN Cholesky factorizations are faster than |
//| calculation of one M*N Jacobian). It should also be used when we|
//| have no Jacobian, because finite difference approximation takes |
//| too much time to compute. |
//| Table below list optimization protocols (XYZ protocol corresponds|
//| to MinLMCreateXYZ) and acceleration types they support (and use |
//| by default). |
//| ACCELERATION TYPES SUPPORTED BY OPTIMIZATION PROTOCOLS: |
//| protocol 0 1 comment |
//| V + + |
//| VJ + + |
//| FGH + |
//| DAFAULT VALUES: |
//| protocol 0 1 comment |
//| V x without acceleration it is so slooooooooow |
//| VJ x |
//| FGH x |
//| NOTE: this function should be called before optimization. |
//| Attempt to call it during algorithm iterations may result in |
//| unexpected behavior. |
//| NOTE: attempt to call this function with unsupported |
//| protocol/acceleration combination will result in exception being |
//| thrown. |
//+------------------------------------------------------------------+
void CAlglib::MinLMSetAccType(CMinLMStateShell &state,const int acctype)
{
CMinLM::MinLMSetAccType(state.GetInnerObj(),acctype);
}
//+------------------------------------------------------------------+
//| This function provides reverse communication interface |
//| Reverse communication interface is not documented or recommended |
//| to use. |
//| See below for functions which provide better documented API |
//+------------------------------------------------------------------+
bool CAlglib::MinLMIteration(CMinLMStateShell &state)
{
return(CMinLM::MinLMIteration(state.GetInnerObj()));
}
//+------------------------------------------------------------------+
//| This family of functions is used to launcn iterations of |
//| nonlinear optimizer |
//| These functions accept following parameters: |
//| func - callback which calculates function (or merit |
//| function) value func at given point x |
//| grad - callback which calculates function (or merit |
//| function) value func and gradient grad at given |
//| point x |
//| hess - callback which calculates function (or merit |
//| function) value func, gradient grad and Hessian |
//| hess at given point x |
//| fvec - callback which calculates function vector fi[] |
//| at given point x |
//| jac - callback which calculates function vector fi[] |
//| and Jacobian jac at given point x |
//| rep - optional callback which is called after each |
//| iteration can be null |
//| obj - optional object which is passed to |
//| func/grad/hess/jac/rep can be null |
//| NOTES: |
//| 1. Depending on function used to create state structure, this |
//| algorithm may accept Jacobian and/or Hessian and/or gradient. |
//| According to the said above, there ase several versions of |
//| this function, which accept different sets of callbacks. |
//| This flexibility opens way to subtle errors - you may create |
//| state with MinLMCreateFGH() (optimization using Hessian), but |
//| call function which does not accept Hessian. So when |
//| algorithm will request Hessian, there will be no callback to |
//| call. In this case exception will be thrown. |
//| Be careful to avoid such errors because there is no way to |
//| find them at compile time - you can see them at runtime only. |
//+------------------------------------------------------------------+
void CAlglib::MinLMOptimize(CMinLMStateShell &state,CNDimensional_FVec &fvec,
CNDimensional_Rep &rep,bool rep_status,CObject &obj)
{
//--- cycle
while(CAlglib::MinLMIteration(state))
{
if(state.GetNeedFI())
{
fvec.FVec(state.GetInnerObj().m_x,state.GetInnerObj().m_fi,obj);
//--- next iteration
continue;
}
//--- check
if(state.GetInnerObj().m_xupdated)
{
//--- check
if(rep_status)
rep.Rep(state.GetInnerObj().m_x,state.GetInnerObj().m_f,obj);
//--- next iteration
continue;
}
Print("ALGLIB: error in 'minlmoptimize' (some derivatives were not provided?)");
CAp::exception_happened=true;
break;
}
}
//+------------------------------------------------------------------+
//| This family of functions is used to launcn iterations of |
//| nonlinear optimizer |
//| These functions accept following parameters: |
//| func - callback which calculates function (or merit |
//| function) value func at given point x |
//| grad - callback which calculates function (or merit |
//| function) value func and gradient grad at given |
//| point x |
//| hess - callback which calculates function (or merit |
//| function) value func, gradient grad and Hessian |
//| hess at given point x |
//| fvec - callback which calculates function vector fi[] |
//| at given point x |
//| jac - callback which calculates function vector fi[] |
//| and Jacobian jac at given point x |
//| rep - optional callback which is called after each |
//| iteration can be null |
//| obj - optional object which is passed to |
//| func/grad/hess/jac/rep can be null |
//| NOTES: |
//| 1. Depending on function used to create state structure, this |
//| algorithm may accept Jacobian and/or Hessian and/or gradient. |
//| According to the said above, there ase several versions of |
//| this function, which accept different sets of callbacks. |
//| This flexibility opens way to subtle errors - you may create |
//| state with MinLMCreateFGH() (optimization using Hessian), but |
//| call function which does not accept Hessian. So when |
//| algorithm will request Hessian, there will be no callback to |
//| call. In this case exception will be thrown. |
//| Be careful to avoid such errors because there is no way to |
//| find them at compile time - you can see them at runtime only. |
//+------------------------------------------------------------------+
void CAlglib::MinLMOptimize(CMinLMStateShell &state,CNDimensional_FVec &fvec,
CNDimensional_Jac &jac,CNDimensional_Rep &rep,
bool rep_status,CObject &obj)
{
//--- cycle
while(CAlglib::MinLMIteration(state))
{
//--- check
if(state.GetNeedFI())
{
fvec.FVec(state.GetInnerObj().m_x,state.GetInnerObj().m_fi,obj);
//--- next iteration
continue;
}
//--- check
if(state.GetNeedFIJ())
{
jac.Jac(state.GetInnerObj().m_x,state.GetInnerObj().m_fi,state.GetInnerObj().m_j,obj);
//--- next iteration
continue;
}
//--- check
if(state.GetInnerObj().m_xupdated)
{
//--- check
if(rep_status)
rep.Rep(state.GetInnerObj().m_x,state.GetInnerObj().m_f,obj);
//--- next iteration
continue;
}
Print("ALGLIB: error in 'minlmoptimize' (some derivatives were not provided?)");
CAp::exception_happened=true;
break;
}
}
//+------------------------------------------------------------------+
//| This family of functions is used to launcn iterations of |
//| nonlinear optimizer |
//| These functions accept following parameters: |
//| func - callback which calculates function (or merit |
//| function) value func at given point x |
//| grad - callback which calculates function (or merit |
//| function) value func and gradient grad at given |
//| point x |
//| hess - callback which calculates function (or merit |
//| function) value func, gradient grad and Hessian |
//| hess at given point x |
//| fvec - callback which calculates function vector fi[] |
//| at given point x |
//| jac - callback which calculates function vector fi[] |
//| and Jacobian jac at given point x |
//| rep - optional callback which is called after each |
//| iteration can be null |
//| obj - optional object which is passed to |
//| func/grad/hess/jac/rep can be null |
//| NOTES: |
//| 1. Depending on function used to create state structure, this |
//| algorithm may accept Jacobian and/or Hessian and/or gradient. |
//| According to the said above, there ase several versions of |
//| this function, which accept different sets of callbacks. |
//| This flexibility opens way to subtle errors - you may create |
//| state with MinLMCreateFGH() (optimization using Hessian), but |
//| call function which does not accept Hessian. So when |
//| algorithm will request Hessian, there will be no callback to |
//| call. In this case exception will be thrown. |
//| Be careful to avoid such errors because there is no way to |
//| find them at compile time - you can see them at runtime only. |
//+------------------------------------------------------------------+
void CAlglib::MinLMOptimize(CMinLMStateShell &state,CNDimensional_Func &func,
CNDimensional_Grad &grad,CNDimensional_Hess &hess,
CNDimensional_Rep &rep,bool rep_status,CObject &obj)
{
//--- cycle
while(CAlglib::MinLMIteration(state))
{
//--- check
if(state.GetNeedF())
{
func.Func(state.GetInnerObj().m_x,state.GetInnerObj().m_f,obj);
//--- next iteration
continue;
}
//--- check
if(state.GetNeedFG())
{
grad.Grad(state.GetInnerObj().m_x,state.GetInnerObj().m_f,state.GetInnerObj().m_g,obj);
//--- next iteration
continue;
}
//--- check
if(state.GetNeedFGH())
{
hess.Hess(state.GetInnerObj().m_x,state.GetInnerObj().m_f,state.GetInnerObj().m_g,state.GetInnerObj().m_h,obj);
//--- next iteration
continue;
}
//--- check
if(state.GetInnerObj().m_xupdated)
{
//--- check
if(rep_status)
rep.Rep(state.GetInnerObj().m_x,state.GetInnerObj().m_f,obj);
//--- next iteration
continue;
}
Print("ALGLIB: error in 'minlmoptimize' (some derivatives were not provided?)");
CAp::exception_happened=true;
break;
}
}
//+------------------------------------------------------------------+
//| This family of functions is used to launcn iterations of |
//| nonlinear optimizer |
//| These functions accept following parameters: |
//| func - callback which calculates function (or merit |
//| function) value func at given point x |
//| grad - callback which calculates function (or merit |
//| function) value func and gradient grad at given |
//| point x |
//| hess - callback which calculates function (or merit |
//| function) value func, gradient grad and Hessian |
//| hess at given point x |
//| fvec - callback which calculates function vector fi[] |
//| at given point x |
//| jac - callback which calculates function vector fi[] |
//| and Jacobian jac at given point x |
//| rep - optional callback which is called after each |
//| iteration can be null |
//| obj - optional object which is passed to |
//| func/grad/hess/jac/rep can be null |
//| NOTES: |
//| 1. Depending on function used to create state structure, this |
//| algorithm may accept Jacobian and/or Hessian and/or gradient. |
//| According to the said above, there ase several versions of |
//| this function, which accept different sets of callbacks. |
//| This flexibility opens way to subtle errors - you may create |
//| state with MinLMCreateFGH() (optimization using Hessian), but |
//| call function which does not accept Hessian. So when |
//| algorithm will request Hessian, there will be no callback to |
//| call. In this case exception will be thrown. |
//| Be careful to avoid such errors because there is no way to |
//| find them at compile time - you can see them at runtime only. |
//+------------------------------------------------------------------+
void CAlglib::MinLMOptimize(CMinLMStateShell &state,CNDimensional_Func &func,
CNDimensional_Jac &jac,CNDimensional_Rep &rep,
bool rep_status,CObject &obj)
{
//--- cycle
while(CAlglib::MinLMIteration(state))
{
//--- check
if(state.GetNeedF())
{
func.Func(state.GetInnerObj().m_x,state.GetInnerObj().m_f,obj);
//--- next iteration
continue;
}
//--- check
if(state.GetNeedFIJ())
{
jac.Jac(state.GetInnerObj().m_x,state.GetInnerObj().m_fi,state.GetInnerObj().m_j,obj);
//--- next iteration
continue;
}
//--- check
if(state.GetInnerObj().m_xupdated)
{
//--- check
if(rep_status)
rep.Rep(state.GetInnerObj().m_x,state.GetInnerObj().m_f,obj);
//--- next iteration
continue;
}
Print("ALGLIB: error in 'minlmoptimize' (some derivatives were not provided?)");
CAp::exception_happened=true;
break;
}
}
//+------------------------------------------------------------------+
//| This family of functions is used to launcn iterations of |
//| nonlinear optimizer |
//| These functions accept following parameters: |
//| func - callback which calculates function (or merit |
//| function) value func at given point x |
//| grad - callback which calculates function (or merit |
//| function) value func and gradient grad at given |
//| point x |
//| hess - callback which calculates function (or merit |
//| function) value func, gradient grad and Hessian |
//| hess at given point x |
//| fvec - callback which calculates function vector fi[] |
//| at given point x |
//| jac - callback which calculates function vector fi[] |
//| and Jacobian jac at given point x |
//| rep - optional callback which is called after each |
//| iteration can be null |
//| obj - optional object which is passed to |
//| func/grad/hess/jac/rep can be null |
//| NOTES: |
//| 1. Depending on function used to create state structure, this |
//| algorithm may accept Jacobian and/or Hessian and/or gradient. |
//| According to the said above, there ase several versions of |
//| this function, which accept different sets of callbacks. |
//| This flexibility opens way to subtle errors - you may create |
//| state with MinLMCreateFGH() (optimization using Hessian), but |
//| call function which does not accept Hessian. So when |
//| algorithm will request Hessian, there will be no callback to |
//| call. In this case exception will be thrown. |
//| Be careful to avoid such errors because there is no way to |
//| find them at compile time - you can see them at runtime only. |
//+------------------------------------------------------------------+
void CAlglib::MinLMOptimize(CMinLMStateShell &state,CNDimensional_Func &func,
CNDimensional_Grad &grad,CNDimensional_Jac &jac,
CNDimensional_Rep &rep,bool rep_status,CObject &obj)
{
//--- cycle
while(CAlglib::MinLMIteration(state))
{
//--- check
if(state.GetNeedF())
{
func.Func(state.GetInnerObj().m_x,state.GetInnerObj().m_f,obj);
//--- next iteration
continue;
}
//--- check
if(state.GetNeedFG())
{
grad.Grad(state.GetInnerObj().m_x,state.GetInnerObj().m_f,state.GetInnerObj().m_g,obj);
//--- next iteration
continue;
}
//--- check
if(state.GetNeedFIJ())
{
jac.Jac(state.GetInnerObj().m_x,state.GetInnerObj().m_fi,state.GetInnerObj().m_j,obj);
//--- next iteration
continue;
}
//--- check
if(state.GetInnerObj().m_xupdated)
{
//--- check
if(rep_status)
rep.Rep(state.GetInnerObj().m_x,state.GetInnerObj().m_f,obj);
//--- next iteration
continue;
}
Print("ALGLIB: error in 'minlmoptimize' (some derivatives were not provided?)");
CAp::exception_happened=true;
break;
}
}
//+------------------------------------------------------------------+
//| Levenberg-Marquardt algorithm results |
//| INPUT PARAMETERS: |
//| State - algorithm state |
//| OUTPUT PARAMETERS: |
//| X - array[0..N-1], solution |
//| Rep - optimization report; |
//| see comments for this structure for more info. |
//+------------------------------------------------------------------+
void CAlglib::MinLMResults(CMinLMStateShell &state,double &x[],
CMinLMReportShell &rep)
{
CMinLM::MinLMResults(state.GetInnerObj(),x,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Levenberg-Marquardt algorithm results |
//| Buffered implementation of MinLMResults(), which uses |
//| pre-allocated buffer to store X[]. If buffer size is too small, |
//| it resizes buffer. It is intended to be used in the inner cycles |
//| of performance critical algorithms where array reallocation |
//| penalty is too large to be ignored. |
//+------------------------------------------------------------------+
void CAlglib::MinLMResultsBuf(CMinLMStateShell &state,double &x[],
CMinLMReportShell &rep)
{
CMinLM::MinLMResultsBuf(state.GetInnerObj(),x,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This subroutine restarts LM algorithm from new point. All |
//| optimization parameters are left unchanged. |
//| This function allows to solve multiple optimization problems |
//| (which must have same number of dimensions) without object |
//| reallocation penalty. |
//| INPUT PARAMETERS: |
//| State - structure used for reverse communication |
//| previously allocated with MinLMCreateXXX call. |
//| X - new starting point. |
//+------------------------------------------------------------------+
void CAlglib::MinLMRestartFrom(CMinLMStateShell &state,double &x[])
{
CMinLM::MinLMRestartFrom(state.GetInnerObj(),x);
}
//+------------------------------------------------------------------+
//| This is obsolete function. |
//| Since ALGLIB 3.3 it is equivalent to MinLMCreateVJ(). |
//+------------------------------------------------------------------+
void CAlglib::MinLMCreateVGJ(const int n,const int m,double &x[],
CMinLMStateShell &state)
{
CMinLM::MinLMCreateVGJ(n,m,x,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This is obsolete function. |
//| Since ALGLIB 3.3 it is equivalent to MinLMCreateVJ(). |
//+------------------------------------------------------------------+
void CAlglib::MinLMCreateVGJ(const int m,double &x[],CMinLMStateShell &state)
{
//--- initialization
int n=CAp::Len(x);
//--- function call
CMinLM::MinLMCreateVGJ(n,m,x,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This is obsolete function. |
//| Since ALGLIB 3.3 it is equivalent to MinLMCreateFJ(). |
//+------------------------------------------------------------------+
void CAlglib::MinLMCreateFGJ(const int n,const int m,double &x[],
CMinLMStateShell &state)
{
CMinLM::MinLMCreateFGJ(n,m,x,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This is obsolete function. |
//| Since ALGLIB 3.3 it is equivalent to MinLMCreateFJ(). |
//+------------------------------------------------------------------+
void CAlglib::MinLMCreateFGJ(const int m,double &x[],CMinLMStateShell &state)
{
//--- initialization
int n=CAp::Len(x);
//--- function call
CMinLM::MinLMCreateFGJ(n,m,x,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This function is considered obsolete since ALGLIB 3.1.0 and is |
//| present for backward compatibility only. We recommend to use |
//| MinLMCreateVJ, which provides similar, but more consistent and |
//| feature-rich interface. |
//+------------------------------------------------------------------+
void CAlglib::MinLMCreateFJ(const int n,const int m,double &x[],
CMinLMStateShell &state)
{
CMinLM::MinLMCreateFJ(n,m,x,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This function is considered obsolete since ALGLIB 3.1.0 and is |
//| present for backward compatibility only. We recommend to use |
//| MinLMCreateVJ, which provides similar, but more consistent and |
//| feature-rich interface. |
//+------------------------------------------------------------------+
void CAlglib::MinLMCreateFJ(const int m,double &x[],CMinLMStateShell &state)
{
//--- initialization
int n=CAp::Len(x);
//--- function call
CMinLM::MinLMCreateFJ(n,m,x,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| LINEAR PROGRAMMING |
//| The subroutine creates LP solver. After initial creation it |
//| contains default optimization problem with zero cost vector and |
//| all variables being fixed to zero values and no constraints. |
//| In order to actually solve something you should: |
//| * set cost vector with MinLPSetCost() |
//| * set variable bounds with MinLPSetBC() or MinLPSetBCAll() |
//| * specify constraint matrix with one of the following |
//| functions: |
//| [*] MinLPSetLC() for dense one-sided constraints |
//| [*] MinLPSetLC2Dense() for dense two-sided constraints |
//| [*] MinLPSetLC2() for sparse two-sided constraints |
//| [*] MinLPAddLC2Dense() to add one dense row to constraint |
//| matrix |
//| [*] MinLPAddLC2() to add one row to constraint matrix |
//| (compressed format) |
//| * call MinLPOptimize() to run the solver and MinLPResults() to |
//| get the solution vector and additional information. |
//| By default, LP solver uses best algorithm available. As of ALGLIB|
//| 3.17, sparse interior point (barrier) solver is used. Future |
//| releases of ALGLIB may introduce other solvers. |
//| User may choose specific LP algorithm by calling: |
//| * MinLPSetAlgoDSS() for revised dual simplex method with DSE |
//| pricing and bounds flipping ratio test (aka long dual step). |
//| Large - scale sparse LU solverwith Forest - Tomlin update is |
//| used internally as linear algebra driver. |
//| * MinLPSetAlgoIPM() for sparse interior point method |
//| INPUT PARAMETERS: |
//| N - problem size |
//| OUTPUT PARAMETERS: |
//| State - optimizer in the default State |
//+------------------------------------------------------------------+
void CAlglib::MinLPCreate(int n,CMinLPState &state)
{
CMinLP::MinLPCreate(n,state);
}
//+------------------------------------------------------------------+
//| This function sets LP algorithm to revised dual simplex method. |
//| ALGLIB implementation of dual simplex method supports advanced |
//| performance and stability improvements like DSE pricing, bounds |
//| flipping ratio test (aka long dual step), Forest - Tomlin update,|
//| Shifting. |
//| INPUT PARAMETERS: |
//| State - optimizer |
//| Eps - stopping condition, Eps >= 0: |
//| * should be small number about 1E-6 or 1E-7. |
//| * zero value means that solver automatically |
//| selects good value (can be different in |
//| different ALGLIB versions) |
//| * default value is zero |
//| Algorithm stops when relative error is less than Eps. |
//| ===== TRACING DSS SOLVER ======================================= |
//| DSS solver supports advanced tracing capabilities. You can trace |
//| algorithm output by specifying following trace symbols |
//| (case-insensitive) by means of trace_file() call: |
//| * 'DSS' - for basic trace of algorithm steps and decisions|
//| Only short scalars (function values and deltas) |
//| are printed. N-dimensional quantities like |
//| search directions are NOT printed. |
//| * 'DSS.DETAILED' - for output of points being visited and |
//| search directions. |
//| This symbol also implicitly defines 'DSS'. You can control output|
//| format by additionally specifying: |
//| * nothing to output in 6-digit exponential format |
//| * 'PREC.E15' to output in 15-digit exponential format |
//| * 'PREC.F6' to output in 6-digit fixed - point format |
//| By default trace is disabled and adds no overhead to the |
//| optimization process. However, specifying any of the symbols |
//| adds some formatting and output - related overhead. |
//| You may specify multiple symbols by separating them with commas: |
//| > |
//| >CAlglib::Trace_File("DSS,PREC.F6","path/to/trace.log") |
//+------------------------------------------------------------------+
void CAlglib::MinLPSetAlgoDSS(CMinLPState &state,double eps)
{
CMinLP::MinLPSetAlgoDSS(state,eps);
}
//+------------------------------------------------------------------+
//| This function sets LP algorithm to sparse interior point method. |
//| ALGORITHM INFORMATION: |
//| * this algorithm is our implementation of interior point |
//| method as formulated by R.J.Vanderbei, with minor |
//| modifications to the algorithm (damped Newton directions are |
//| extensively used) |
//| * like all interior point methods, this algorithm tends to |
//| converge in roughly same number of iterations (between 15 |
//| and 50) independently from the problem dimensionality |
//| INPUT PARAMETERS: |
//| State - optimizer |
//| Eps - stopping condition, Eps >= 0: |
//| * should be small number about 1E-7 or 1E-8. |
//| * zero value means that solver automatically |
//| selects good value (can be different in different|
//| ALGLIB versions) |
//| * default value is zero |
//| Algorithm stops when primal error AND dual error AND |
//| duality gap are less than Eps. |
//| ===== TRACING IPM SOLVER ======================================= |
//| IPM solver supports advanced tracing capabilities. You can trace |
//| algorithm output by specifying following trace symbols |
//| (case-insensitive) by means of trace_file() call: |
//| * 'IPM' - for basic trace of algorithm steps and |
//| decisions. Only short scalars (function |
//| values and deltas) are printed. N-dimensional|
//| quantities like search directions are NOT |
//| printed. |
//| * 'IPM.DETAILED' - for output of points being visited and |
//| search directions |
//| This symbol also implicitly defines 'IPM'. You can output format |
//| by additionally specifying: |
//| * nothing to output in 6-digit exponential format |
//| * 'PREC.E15' to output in 15 - digit exponential format |
//| * 'PREC.F6' to output in 6-digit fixed-point format |
//| By default trace is disabled and adds no overhead to the |
//| optimization process. However, specifying any of the symbols |
//| adds some formatting and output - related overhead. |
//| You may specify multiple symbols by separating them with commas: |
//| > |
//| >CAlglib::Trace_File("IPM,PREC.F6","path/to/trace.log") |
//+------------------------------------------------------------------+
void CAlglib::MinLPSetAlgoIPM(CMinLPState &state,double eps=0)
{
CMinLP::MinLPSetAlgoIPM(state,eps);
}
//+------------------------------------------------------------------+
//| This function sets cost term for LP solver. |
//| By default, cost term is zero. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm State |
//| C - cost term, array[N]. |
//+------------------------------------------------------------------+
void CAlglib::MinLPSetCost(CMinLPState &state,CRowDouble &c)
{
CMinLP::MinLPSetCost(state,c);
}
//+------------------------------------------------------------------+
//| This function sets scaling coefficients. |
//| ALGLIB optimizers use scaling matrices to test stopping |
//| conditions and as preconditioner. |
//| Scale of the I-th variable is a translation invariant measure of:|
//| a) "how large" the variable is |
//| b) how large the step should be to make significant changes in |
//| the function |
//| INPUT PARAMETERS: |
//| State - structure stores algorithm State |
//| S - array[N], non-zero scaling coefficients S[i] may |
//| be negative, sign doesn't matter. |
//+------------------------------------------------------------------+
void CAlglib::MinLPSetScale(CMinLPState &state,CRowDouble &s)
{
CMinLP::MinLPSetScale(state,s);
}
//+------------------------------------------------------------------+
//| This function sets box constraints for LP solver (all variables |
//| at once, different constraints for different variables). |
//| The default State of constraints is to have all variables fixed |
//| at zero. You have to overwrite it by your own constraint vector. |
//| Constraint status is preserved until constraints are explicitly |
//| overwritten with another MinLPSetBC() call, overwritten with |
//| MinLPSetBCAll(), or partially overwritten with minlmsetbci() call|
//| Following types of constraints are supported: |
//| DESCRIPTION CONSTRAINT HOW TO SPECIFY |
//| fixed variable x[i] = Bnd[i] BndL[i] = BndU[i]|
//| lower bound BndL[i] <= x[i] BndU[i] = +INF |
//| upper bound x[i] <= BndU[i] BndL[i] = -INF |
//| range BndL[i] <= x[i] <= BndU[i] ... |
//| free variable - BndL[I] = -INF, BndU[I] + INF|
//| INPUT PARAMETERS: |
//| State - structure stores algorithm State |
//| BndL - lower bounds, array[N]. |
//| BndU - upper bounds, array[N]. |
//| NOTE: infinite values can be specified by means of AL_POSINF and |
//| AL_NEGINF |
//| NOTE: you may replace infinities by very small/very large values,|
//| but it is not recommended because large numbers may |
//| introduce large numerical errors in the algorithm. |
//| NOTE: if constraints for all variables are same you may use |
//| MinLPSetBCAll() which allows to specify constraints without|
//| using arrays. |
//| NOTE: BndL > BndU will result in LP problem being recognized as |
//| infeasible. |
//+------------------------------------------------------------------+
void CAlglib::MinLPSetBC(CMinLPState &state,CRowDouble &bndl,
CRowDouble &bndu)
{
CMinLP::MinLPSetBC(state,bndl,bndu);
}
//+------------------------------------------------------------------+
//| This function sets box constraints for LP solver(all variables at|
//| once, same constraints for all variables) |
//| The default State of constraints is to have all variables fixed |
//| at zero. You have to overwrite it by your own constraint vector. |
//| Constraint status is preserved until constraints are explicitly |
//| overwritten with another MinLPSetBC() call or partially |
//| overwritten with MinLPSetBCAll(). |
//| Following types of constraints are supported: |
//| DESCRIPTION CONSTRAINT HOW TO SPECIFY |
//| fixed variable x[i] = Bnd[i] BndL[i] = BndU[i]|
//| lower bound BndL[i] <= x[i] BndU[i] = +INF |
//| upper bound x[i] <= BndU[i] BndL[i] = -INF |
//| range BndL[i] <= x[i] <= BndU[i] ... |
//| free variable - BndL[I] = -INF, BndU[I] + INF|
//| INPUT PARAMETERS: |
//| State - structure stores algorithm State |
//| BndL - lower bound, same for all variables |
//| BndU - upper bound, same for all variables |
//| NOTE: infinite values can be specified by means of AL_POSINF and |
//| AL_NEGINF |
//| NOTE: you may replace infinities by very small/very large values,|
//| but it is not recommended because large numbers may |
//| introduce large numerical errors in the algorithm. |
//| NOTE: MinLPSetBC() can be used to specify different constraints |
//| for different variables. |
//| NOTE: BndL > BndU will result in LP problem being recognized as |
//| infeasible. |
//+------------------------------------------------------------------+
void CAlglib::MinLPSetBCAll(CMinLPState &state,double bndl,double bndu)
{
CMinLP::MinLPSetBCAll(state,bndl,bndu);
}
//+------------------------------------------------------------------+
//| This function sets box constraints for I-th variable (other |
//| variables are not modified). |
//| The default State of constraints is to have all variables fixed |
//| at zero. You have to overwrite it by your own constraint vector. |
//| Following types of constraints are supported: |
//| DESCRIPTION CONSTRAINT HOW TO SPECIFY |
//| fixed variable x[i] = Bnd[i] BndL[i] = BndU[i]|
//| lower bound BndL[i] <= x[i] BndU[i] = +INF |
//| upper bound x[i] <= BndU[i] BndL[i] = -INF |
//| range BndL[i] <= x[i] <= BndU[i] ... |
//| free variable - BndL[I] = -INF, BndU[I] + INF|
//| INPUT PARAMETERS: |
//| State - structure stores algorithm State |
//| I - variable index, in [0, N) |
//| BndL - lower bound for I-th variable |
//| BndU - upper bound for I-th variable |
//| NOTE: infinite values can be specified by means of AL_POSINF and |
//| AL_NEGINF |
//| NOTE: you may replace infinities by very small/very large values,|
//| but it is not recommended because large numbers may |
//| introduce large numerical errors in the algorithm. |
//| NOTE: MinLPSetBC() can be used to specify different constraints |
//| for different variables. |
//| NOTE: BndL > BndU will result in LP problem being recognized as |
//| infeasible. |
//+------------------------------------------------------------------+
void CAlglib::MinLPSetBCi(CMinLPState &state,int i,double bndl,double bndu)
{
CMinLP::MinLPSetBCi(state,i,bndl,bndu);
}
//+------------------------------------------------------------------+
//| This function sets one-sided linear constraints A*x ~ AU, where |
//| "~" can be a mix of "<=", "=" and ">=". |
//| IMPORTANT: this function is provided here for compatibility with |
//| the rest of ALGLIB optimizers which accept constraints|
//| in format like this one. Many real-life problems |
//| feature two-sided constraints like a0 <= a*x <= a1. It|
//| is really inefficient to add them as a pair of |
//| one-sided constraints. |
//| Use MinLPSetLC2Dense(), MinLPSetLC2(), MinLPAddLC2() (or its |
//| sparse version) wherever possible. |
//| INPUT PARAMETERS: |
//| State - structure previously allocated with MinLPCreate() |
//| call. |
//| A - linear constraints, array[K, N + 1]. Each row of A |
//| represents one constraint, with first N elements |
//| being linear coefficients, and last element being |
//| right side. |
//| CT - constraint types, array[K]: |
//| * if CT[i] > 0, then I-th constraint is |
//| A[i, *]*x >= A[i, n] |
//| * if CT[i] = 0, then I-th constraint is |
//| A[i, *] * x = A[i, n] |
//| * if CT[i] < 0, then I-th constraint is |
//| A[i, *] * x <= A[i, n] |
//| K - number of equality/inequality constraints, K >= 0; |
//| if not given, inferred from sizes of A and CT. |
//+------------------------------------------------------------------+
void CAlglib::MinLPSetLC(CMinLPState &state,CMatrixDouble &a,
CRowInt &ct,int k)
{
CMinLP::MinLPSetLC(state,a,ct,k);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::MinLPSetLC(CMinLPState &state,CMatrixDouble &a,CRowInt &ct)
{
//--- check
if(!CAp::Assert(CAp::Rows(a)==CAp::Len(ct),"Error while calling 'MinLPSetLC': looks like one of arguments has wrong size"))
return;
//--- initialization
int k=CAp::Rows(a);
//--- function call
CMinLP::MinLPSetLC(state,a,ct,k);
}
//+------------------------------------------------------------------+
//| This function sets two-sided linear constraints AL <= A*x <= AU. |
//| This version accepts dense matrix as input; internally LP solver|
//| uses sparse storage anyway (most LP problems are sparse), but for|
//| your convenience it may accept dense inputs. This function |
//| overwrites linear constraints set by previous calls (if such |
//| calls were made). |
//| We recommend you to use sparse version of this function unless |
//| you solve small-scale LP problem (less than few hundreds of |
//| variables). |
//| NOTE: there also exist several versions of this function: |
//| * one-sided dense version which accepts constraints in the same|
//| format as one used by QP and NLP solvers |
//| * two-sided sparse version which accepts sparse matrix |
//| * two-sided dense version which allows you to add constraints |
//| row by row |
//| * two-sided sparse version which allows you to add constraints |
//| row by row |
//| INPUT PARAMETERS: |
//| State - structure previously allocated with MinLPCreate() |
//| call. |
//| A - linear constraints, array[K, N]. Each row of A |
//| represents one constraint. One-sided inequality |
//| constraints, two-sided inequality constraints, |
//| equality constraints are supported (see below) |
//| AL, AU - lower and upper bounds, array[K]; |
//| * AL[i] = AU[i] => equality constraint Ai * x |
//| * AL[i]<AU[i] => two-sided constraint |
//| AL[i] <= Ai*x <= AU[i] |
//| * AL[i] = -INF => one-sided constraint |
//| Ai*x <= AU[i] |
//| * AU[i] = +INF => one-sided constraint |
//| AL[i] <= Ai*x |
//| * AL[i] = -INF, AU[i] = +INF => constraint is |
//| ignored |
//| K - number of equality/inequality constraints, K >= 0; |
//| if not given, inferred from sizes of A, AL, AU. |
//+------------------------------------------------------------------+
void CAlglib::MinLPSetLC2Dense(CMinLPState &state,CMatrixDouble &a,
CRowDouble &al,CRowDouble &au,int k)
{
CMinLP::MinLPSetLC2Dense(state,a,al,au,k);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::MinLPSetLC2Dense(CMinLPState &state,CMatrixDouble &a,
CRowDouble &al,CRowDouble &au)
{
//--- check
if(!CAp::Assert(CAp::Rows(a)==CAp::Len(al) && CAp::Rows(a)==CAp::Len(au),
"Error while calling 'minlpsetlc2dense': looks like one of arguments has wrong size"))
return;
//--- initialization
int k=CAp::Rows(a);
//--- function call
CMinLP::MinLPSetLC2Dense(state,a,al,au,k);
}
//+------------------------------------------------------------------+
//| This function sets two-sided linear constraints AL <= A*x <= AU |
//| with sparse constraining matrix A. Recommended for large-scale |
//| problems. |
//| This function overwrites linear (non-box) constraints set by |
//| previous calls (if such calls were made). |
//| INPUT PARAMETERS: |
//| State - structure previously allocated with MinLPCreate() |
//| call. |
//| A - sparse matrix with size [K, N] (exactly!). Each row|
//| of A represents one general linear constraint. A |
//| can be stored in any sparse storage format. |
//| AL, AU - lower and upper bounds, array[K]; |
//| * AL.Set(i,AU.Set(i, > equality constraint Ai*x |
//| * AL[i]<AU.Set(i,> two-sided constraint |
//| AL[i] <= Ai*x <= AU[i] |
//| * AL[i] = -INF => one-sided constraint |
//| Ai*x <= AU[i] |
//| * AU[i] = +INF => one-sided constraint |
//| AL[i] <= Ai*x |
//| * AL.Set(i, -INF, AU.Set(i, +INF => constraint is |
//| ignored |
//| K - number of equality/inequality constraints, K >= 0. |
//| If K = 0 is specified, A, AL, AU are ignored. |
//+------------------------------------------------------------------+
void CAlglib::MinLPSetLC2(CMinLPState &state,CSparseMatrix &a,
CRowDouble &al,CRowDouble &au,int k)
{
CMinLP::MinLPSetLC2(state,a,al,au,k);
}
//+------------------------------------------------------------------+
//| This function appends two-sided linear constraint AL <= A*x <= AU|
//| to the list of currently present constraints. |
//| This version accepts dense constraint vector as input, but |
//| sparsifies it for internal storage and processing. Thus, time to |
//| add one constraint in is O(N) - we have to scan entire array of |
//| length N. Sparse version of this function is order of magnitude |
//| faster for constraints with just a few nonzeros per row. |
//| INPUT PARAMETERS: |
//| State - structure previously allocated with MinLPCreate() |
//| call. |
//| A - linear constraint coefficient, array[N], right side|
//| is NOT included. |
//| AL, AU - lower and upper bounds; |
//| * AL = AU => equality constraint Ai*x |
//| * AL<AU => two-sided constraint |
//| AL <= A*x <= AU |
//| * AL = -INF => one-sided constraint Ai*x <= AU |
//| * AU = +INF => one-sided constraint AL <= Ai*x |
//| * AL = -INF, AU = +INF => constraint is ignored |
//+------------------------------------------------------------------+
void CAlglib::MinLPAddLC2Dense(CMinLPState &state,CRowDouble &a,
double al,double au)
{
CMinLP::MinLPAddLC2Dense(state,a,al,au);
}
//+------------------------------------------------------------------+
//| This function appends two-sided linear constraint AL <= A*x <= AU|
//| to the list of currently present constraints. |
//| Constraint is passed in compressed format: as list of non-zero |
//| entries of coefficient vector A. Such approach is more efficient |
//| than dense storage for highly sparse constraint vectors. |
//| INPUT PARAMETERS: |
//| State - structure previously allocated with MinLPCreate() |
//| call. |
//| IdxA - array[NNZ], indexes of non-zero elements of A: |
//| * can be unsorted |
//| * can include duplicate indexes (corresponding |
//| entries of ValA[] will be summed) |
//| ValA - array[NNZ], values of non-zero elements of A |
//| NNZ - number of non-zero coefficients in A |
//| AL, AU - lower and upper bounds; |
//| * AL = AU => equality constraint A*x |
//| * AL<AU => two-sided constraint AL <= A*x <= AU |
//| * AL = -INF => one-sided constraint A*x <= AU |
//| * AU = +INF => one-sided constraint AL <= A*x |
//| * AL = -INF, AU = +INF => constraint is ignored |
//+------------------------------------------------------------------+
void CAlglib::MinLPAddLC2(CMinLPState &state,CRowInt &idxa,
CRowDouble &vala,int nnz,double al,
double au)
{
CMinLP::MinLPAddLC2(state,idxa,vala,nnz,al,au);
}
//+------------------------------------------------------------------+
//| This function solves LP problem. |
//| INPUT PARAMETERS: |
//| State - algorithm State |
//| You should use MinLPResults() function to access results after |
//| calls to this function. |
//+------------------------------------------------------------------+
void CAlglib::MinLPOptimize(CMinLPState &state)
{
CMinLP::MinLPOptimize(state);
}
//+------------------------------------------------------------------+
//| LP solver results |
//| INPUT PARAMETERS: |
//| State - algorithm State |
//| OUTPUT PARAMETERS: |
//| X - array[N], solution (on failure: last trial point) |
//| Rep - optimization report. You should check |
//| Rep.TerminationType, which contains completion |
//| code, and you may check another fields which |
//| contain another information about algorithm |
//| functioning. |
//| Failure codes returned by algorithm are: |
//| * -4 LP problem is primal unbounded(dual infeasible) |
//| * -3 LP problem is primal infeasible(dual unbounded) |
//| * -2 IPM solver detected that problem is either infeasible |
//| or unbounded |
//| Success codes: |
//| * 1..4 successful completion |
//| * 5 MaxIts steps was taken |
//+------------------------------------------------------------------+
void CAlglib::MinLPResults(CMinLPState &state,CRowDouble &x,
CMinLPReport &rep)
{
CMinLP::MinLPResults(state,x,rep);
}
//+------------------------------------------------------------------+
//| LP results |
//| Buffered implementation of MinLPResults() which uses |
//| pre-allocated buffer to store X[]. If buffer size is too small, |
//| it resizes buffer. It is intended to be used in the inner cycles |
//| of performance critical algorithms where array reallocation |
//| penalty is too large to be ignored. |
//+------------------------------------------------------------------+
void CAlglib::MinLPResultsBuf(CMinLPState &state,CRowDouble &x,
CMinLPReport &rep)
{
CMinLP::MinLPResultsBuf(state,x,rep);
}
//+------------------------------------------------------------------+
//| NONLINEARLY CONSTRAINED OPTIMIZATION WITH PRECONDITIONED |
//| AUGMENTED LAGRANGIAN ALGORITHM |
//| DESCRIPTION: |
//| The subroutine minimizes function F(x) of N arguments subject to |
//| any combination of: |
//| * bound constraints |
//| * linear inequality constraints |
//| * linear equality constraints |
//| * nonlinear equality constraints Gi(x) = 0 |
//| * nonlinear inequality constraints Hi(x) <= 0 |
//| REQUIREMENTS: |
//| * user must provide function value and gradient for F(), H(), |
//| G() |
//| * starting point X0 must be feasible or not too far away from |
//| the feasible set |
//| * F(), G(), H() are continuously differentiable on the |
//| feasible set and its neighborhood |
//| * nonlinear constraints G() and H() must have non-zero |
//| gradient at G(x) = 0 and at H(x) = 0. Say, constraint like |
//| x^2 >= 1 is supported, but x^2 >= 0 is NOT supported. |
//| USAGE: |
//| Constrained optimization if far more complex than the |
//| unconstrained one. Nonlinearly constrained optimization is one |
//| of the most esoteric numerical procedures. |
//| Here we give very brief outline of the MinNLC optimizer. We |
//| strongly recommend you to study examples in the ALGLIB Reference |
//| Manual and to read ALGLIB User Guide on optimization, which is |
//| available at http://www.alglib.net/optimization/ |
//| 1. User initializes algorithm State with MinNLCCreate() call |
//| and chooses what NLC solver to use. There is some solver |
//| which is used by default, with default Settings, but you |
//| should NOT rely on default choice. It may change in future |
//| releases of ALGLIB without notice, and no one can guarantee |
//| that new solver will be able to solve your problem with |
//| default Settings. |
//| From the other side, if you choose solver explicitly, you can be |
//| pretty sure that it will work with new ALGLIB releases. |
//| In the current release following solvers can be used: |
//| * SQP solver, recommended for medium-scale problems (less than|
//| thousand of variables) with hard-to-evaluate target |
//| functions. Requires less function evaluations than other |
//| solvers but each step involves solution of QP subproblem, |
//| so running time may be higher than that of AUL (another |
//| recommended option). Activated with MinNLCSetAlgoSQP() |
//| function. |
//| * AUL solver with dense preconditioner, recommended for |
//| large-scale problems or for problems with cheap target |
//| function. Needs more function evaluations that SQP (about |
//| 5x - 10x times more), but its iterations are much |
//| cheaper that that of SQP. Activated with MinNLCSetAlgoAUL() |
//| function. |
//| * SLP solver, successive linear programming. The slowest one, |
//| requires more target function evaluations that SQP and AUL. |
//| However, it is somewhat more robust in tricky cases, so |
//| it can be used as a backup plan. Activated with |
//| MinNLCSetAlgoSLP() function. |
//| 2. [optional] user activates OptGuard integrity checker which |
//| tries to detect possible errors in the user - supplied |
//| callbacks: |
//| * discontinuity/nonsmoothness of the target/nonlinear |
//| constraints |
//| * errors in the analytic gradient provided by user. |
//| This feature is essential for early prototyping stages because it|
//| helps to catch common coding and problem statement errors. |
//| OptGuard can be activated with following functions (one per each |
//| check performed): |
//| * MinNLCOptGuardSmoothness() |
//| * MinNLCOptGuardGradient() |
//| 3. User adds boundary and/or linear and/or nonlinear |
//| constraints by means of calling one of the following |
//| functions: |
//| a) MinNLCSetBC() for boundary constraints |
//| b) MinNLCSetLC() for linear constraints |
//| c) MinNLCSetNLC() for nonlinear constraints |
//| You may combine(a), (b) and (c) in one optimization problem. |
//| 4. User sets scale of the variables with MinNLCSetScale() |
//| function. It is VERY important to set scale of the |
//| variables, because nonlinearly constrained problems are |
//| hard to solve when variables are badly scaled. |
//| 5. User sets stopping conditions with MinNLCSetCond(). If |
//| NLC solver uses inner/outer iteration layout, this |
//| function sets stopping conditions for INNER iterations. |
//| 6. Finally, user calls MinNLCOptimize() function which takes |
//| algorithm State and pointer (delegate, etc.) to callback |
//| function which calculates F / G / H. |
//| 7. User calls MinNLCResults() to get solution; additionally you|
//| can retrieve OptGuard report with MinNLCOptGuardResults(), |
//| and get detailed report about purported errors in the target|
//| function with: |
//| * MinNLCOptGuardNonC1Test0Results() |
//| * MinNLCOptGuardNonC1Test1Results() |
//| 8. Optionally user may call MinNLCRestartFrom() to solve |
//| another problem with same N but another starting point. |
//| MinNLCRestartFrom() allows to reuse already initialized |
//| structure. |
//| INPUT PARAMETERS: |
//| N - problem dimension, N > 0 : |
//| * if given, only leading N elements of X are used |
//| * if not given, automatically determined from size |
//| of X |
//| X - starting point, array[N]: |
//| * it is better to set X to a feasible point |
//| * but X can be infeasible, in which case algorithm |
//| will try to find feasible point first, using X as|
//| initial approximation. |
//| OUTPUT PARAMETERS: |
//| State - structure stores algorithm State |
//+------------------------------------------------------------------+
void CAlglib::MinNLCCreate(int n,CRowDouble &x,CMinNLCState &state)
{
CMinNLC::MinNLCCreate(n,x,state);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::MinNLCCreate(CRowDouble &x,CMinNLCState &state)
{
int n=CAp::Len(x);
//--- function call
CMinNLC::MinNLCCreate(n,x,state);
}
//+------------------------------------------------------------------+
//| This subroutine is a finite difference variant of MinNLCCreate().|
//| It uses finite differences in order to differentiate target |
//| function. |
//| Description below contains information which is specific to this |
//| function only. We recommend to read comments on MinNLCCreate() |
//| in order to get more information about creation of NLC optimizer.|
//| INPUT PARAMETERS: |
//| N - problem dimension, N > 0: |
//| * if given, only leading N elements of X are used |
//| * if not given, automatically determined from size |
//| of X |
//| X - starting point, array[N]: |
//| * it is better to set X to a feasible point |
//| * but X can be infeasible, in which case algorithm |
//| will try to find feasible point first, using X as|
//| initial approximation. |
//| DiffStep - differentiation step, > 0 |
//| OUTPUT PARAMETERS: |
//| State - structure stores algorithm State |
//| NOTES: |
//| 1. algorithm uses 4-point central formula for differentiation. |
//| 2. differentiation step along I-th axis is equal to |
//| DiffStep*S[I] where S[] is scaling vector which can be set |
//| by MinNLCSetScale() call. |
//| 3. we recommend you to use moderate values of differentiation |
//| step. Too large step will result in too large TRUNCATION |
//| errors, while too small step will result in too large |
//| NUMERICAL errors. 1.0E-4 can be good value to start from. |
//| 4. Numerical differentiation is very inefficient - one gradient|
//| calculation needs 4 * N function evaluations. This function |
//| will work for any N - either small(1...10), moderate |
//| (10...100) or large(100...). However, performance penalty |
//| will be too severe for any N's except for small ones. We |
//| should also say that code which relies on numerical |
//| differentiation is less robust and precise. Imprecise |
//| gradient may slow down convergence, especially on highly |
//| nonlinear problems. Thus we recommend to use this function |
//| for fast prototyping on small-dimensional problems only, |
//| and to implement analytical gradient as soon as possible. |
//+------------------------------------------------------------------+
void CAlglib::MinNLCCreateF(int n,CRowDouble &x,double diffstep,
CMinNLCState &state)
{
CMinNLC::MinNLCCreateF(n,x,diffstep,state);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::MinNLCCreateF(CRowDouble &x,double diffstep,CMinNLCState &state)
{
//--- initialization
int n=CAp::Len(x);
//--- function call
CMinNLC::MinNLCCreateF(n,x,diffstep,state);
}
//+------------------------------------------------------------------+
//| This function sets boundary constraints for NLC optimizer. |
//| Boundary constraints are inactive by default (after initial |
//| creation). They are preserved after algorithm restart with |
//| MinNLCRestartFrom(). |
//| You may combine boundary constraints with general linear ones -|
//| and with nonlinear ones! Boundary constraints are handled more |
//| efficiently than other types. Thus, if your problem has mixed |
//| constraints, you may explicitly specify some of them as boundary |
//| and save some time / space. |
//| INPUT PARAMETERS: |
//| State - structure stores algorithm State |
//| BndL - lower bounds, array[N]. If some (all) variables |
//| are unbounded, you may specify very small number|
//| or -INF. |
//| BndU - upper bounds, array[N]. If some (all) variables |
//| are unbounded, you may specify very large number|
//| or +INF. |
//| NOTE 1: it is possible to specify BndL.Set(i, BndU[i]. In this |
//| case I-th variable will be "frozen" at |
//| X.Set(i, BndL.Set(i, BndU[i]. |
//| NOTE 2: when you solve your problem with augmented Lagrangian |
//| solver, boundary constraints are satisfied only |
//| approximately! It is possible that algorithm will |
//| evaluate function outside of feasible area! |
//+------------------------------------------------------------------+
void CAlglib::MinNLCSetBC(CMinNLCState &state,CRowDouble &bndl,
CRowDouble &bndu)
{
CMinNLC::MinNLCSetBC(state,bndl,bndu);
}
//+------------------------------------------------------------------+
//| This function sets linear constraints for MinNLC optimizer. |
//| Linear constraints are inactive by default (after initial |
//| creation). They are preserved after algorithm restart with |
//| MinNLCRestartFrom(). |
//| You may combine linear constraints with boundary ones - and with |
//| nonlinear ones! If your problem has mixed constraints, you may |
//| explicitly specify some of them as linear. It may help optimizer |
//| to handle them more efficiently. |
//| INPUT PARAMETERS: |
//| State - structure previously allocated with MinNLCCreate |
//| call. |
//| C - linear constraints, array[K, N + 1]. Each row of C |
//| represents one constraint, either equality or |
//| inequality (see below): |
//| * first N elements correspond to coefficients, |
//| * last element corresponds to the right part. |
//| All elements of C (including right part) must be |
//| finite. |
//| CT - type of constraints, array[K]: |
//| * if CT[i] > 0, then I-th constraint is |
//| C[i, *] * x >= C[i, n + 1] |
//| * if CT[i] = 0, then I-th constraint is |
//| C[i, *] * x = C[i, n + 1] |
//| * if CT[i] < 0, then I-th constraint is |
//| C[i, *] * x <= C[i, n + 1] |
//| K - number of equality/inequality constraints, K >= 0: |
//| * if given, only leading K elements of C/CT are |
//| used |
//| * if not given, automatically determined from sizes|
//| of C/CT |
//| NOTE 1: when you solve your problem with augmented Lagrangian |
//| solver, linear constraints are satisfied only |
//| approximately! It is possible that algorithm will |
//| evaluate function outside of feasible area! |
//+------------------------------------------------------------------+
void CAlglib::MinNLCSetLC(CMinNLCState &state,CMatrixDouble &c,
CRowInt &ct,int k)
{
CMinNLC::MinNLCSetLC(state,c,ct,k);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::MinNLCSetLC(CMinNLCState &state,CMatrixDouble &c,CRowInt &ct)
{
//--- check
if(!CAp::Assert(CAp::Rows(c)==CAp::Len(ct),
"Error while calling 'MinNLCSetLC': looks like one of arguments has wrong size"))
return;
//--- initialization
int k=CAp::Rows(c);
//--- function call
CMinNLC::MinNLCSetLC(state,c,ct,k);
}
//+------------------------------------------------------------------+
//| This function sets nonlinear constraints for MinNLC optimizer. |
//| In fact, this function sets NUMBER of nonlinear constraints. |
//| Constraints itself (constraint functions) are passed to |
//| MinNLCOptimize() method. This method requires user-defined vector|
//| function F[] and its Jacobian J[], where: |
//| * first component of F[] and first row of Jacobian J[] |
//| corresponds to function being minimized |
//| * next NLEC components of F[] (and rows of J) correspond to |
//| nonlinear equality constraints G_i(x) = 0 |
//| * next NLIC components of F[] (and rows of J) correspond to |
//| nonlinear inequality constraints H_i(x) <= 0 |
//| NOTE: you may combine nonlinear constraints with linear/boundary |
//| ones. If your problem has mixed constraints, you may |
//| explicitly specify some of them as linear ones. It may help|
//| optimizer to handle them more efficiently. |
//| INPUT PARAMETERS: |
//| State - structure previously allocated with MinNLCCreate|
//| call. |
//| NLEC - number of Non-Linear Equality Constraints(NLEC),|
//| >= 0 |
//| NLIC - number of Non-Linear Inquality Constraints(NLIC)|
//| >= 0 |
//| NOTE 1: when you solve your problem with augmented Lagrangian |
//| solver, nonlinear constraints are satisfied only |
//| approximately! It is possible that algorithm will |
//| evaluate function outside of feasible area! |
//| NOTE 2: algorithm scales variables according to scale specified |
//| by MinNLCSetScale() function, so it can handle problems |
//| with badly scaled variables (as long as we KNOW their |
//| scales). |
//| However, there is no way to automatically scale nonlinear |
//| constraints Gi(x) and Hi(x). Inappropriate scaling of Gi/Hi may |
//| ruin convergence. Solving problem with constraint "1000*G0(x)=0" |
//| is NOT same as solving it with constraint "0.001*G0(x)=0". |
//| It means that YOU are the one who is responsible for correct |
//| scaling of nonlinear constraints Gi(x) and Hi(x). We recommend |
//| you to scale nonlinear constraints in such way that I-th |
//| component of dG/dX (or dH/dx) has approximately unit magnitude |
//| (for problems with unit scale) or has magnitude approximately |
//| equal to 1/S[i] (where S is a scale set by MinNLCSetScale() |
//| function). |
//+------------------------------------------------------------------+
void CAlglib::MinNLCSetNLC(CMinNLCState &state,int nlec,int nlic)
{
CMinNLC::MinNLCSetNLC(state,nlec,nlic);
}
//+------------------------------------------------------------------+
//| This function sets stopping conditions for inner iterations of |
//| optimizer. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm State |
//| EpsX - >= 0, The subroutine finishes its work if on k+1-th|
//| iteration the condition |v| <= EpsX is fulfilled, |
//| where: |
//| * | . | means Euclidian norm |
//| * v - scaled step vector, v[i] = dx[i] / s[i] |
//| * dx - step vector, dx = X(k + 1) - X(k) |
//| * s - scaling coefficients set by MinNLCSetScale() |
//| MaxIts - maximum number of iterations. If MaxIts = 0, the |
//| number of iterations is unlimited. |
//| Passing EpsX = 0 and MaxIts = 0(simultaneously) will lead to |
//| automatic selection of the stopping condition. |
//+------------------------------------------------------------------+
void CAlglib::MinNLCSetCond(CMinNLCState &state,double epsx,int maxits)
{
CMinNLC::MinNLCSetCond(state,epsx,maxits);
}
//+------------------------------------------------------------------+
//| This function sets scaling coefficients for NLC optimizer. |
//| ALGLIB optimizers use scaling matrices to test stopping |
//| conditions (step size and gradient are scaled before comparison |
//| with tolerances). Scale of the I-th variable is a translation |
//| invariant measure of: |
//| a) "how large" the variable is |
//| b) how large the step should be to make significant changes in |
//| the function |
//| Scaling is also used by finite difference variant of the |
//| optimizer - step along I-th axis is equal to DiffStep*S[I]. |
//| INPUT PARAMETERS: |
//| State - structure stores algorithm State |
//| S - array[N], non-zero scaling coefficients S[i] may |
//| be negative, sign doesn't matter. |
//+------------------------------------------------------------------+
void CAlglib::MinNLCSetScale(CMinNLCState &state,CRowDouble &s)
{
CMinNLC::MinNLCSetScale(state,s);
}
//+------------------------------------------------------------------+
//| This function sets preconditioner to "inexact LBFGS-based" mode. |
//| Preconditioning is very important for convergence of Augmented |
//| Lagrangian algorithm because presence of penalty term makes |
//| problem ill-conditioned. Difference between performance of |
//| preconditioned and unpreconditioned methods can be as large |
//| as 100x! |
//| MinNLC optimizer may use following preconditioners, each with its|
//| own benefits and drawbacks: |
//| a) inexact LBFGS-based, with O(N * K) evaluation time |
//| b) exact low rank one, with O(N * K ^ 2) evaluation time |
//| c) exact robust one, with O(N ^ 3 + K * N ^ 2) evaluation |
//| time where K is a total number of general linear and |
//| nonlinear constraints (box ones are not counted). |
//| Inexact LBFGS-based preconditioner uses L-BFGS formula combined |
//| with orthogonality assumption to perform very fast updates. For a|
//| N-dimensional problem with K general linear or nonlinear |
//| constraints (boundary ones are not counted) it has O(N * K) cost|
//| per iteration. This preconditioner has best quality (less |
//| iterations) when general linear and nonlinear constraints |
//| are orthogonal to each other (orthogonality with respect to |
//| boundary constraints is not required). Number of iterations |
//| increases when constraints are non - orthogonal, because |
//| algorithm assumes orthogonality, but still it is better than no |
//| preconditioner at all. |
//| INPUT PARAMETERS: |
//| State - structure stores algorithm State |
//+------------------------------------------------------------------+
void CAlglib::MinNLCSetPrecInexact(CMinNLCState &state)
{
CMinNLC::MinNLCSetPrecInexact(state);
}
//+------------------------------------------------------------------+
//| This function sets preconditioner to "exact low rank" mode. |
//| Preconditioning is very important for convergence of Augmented |
//| Lagrangian algorithm because presence of penalty term makes |
//| problem ill-conditioned. Difference between performance of |
//| preconditioned and unpreconditioned methods can be as large |
//| as 100x! |
//| MinNLC optimizer may use following preconditioners, each with its|
//| own benefits and drawbacks: |
//| a) inexact LBFGS-based, with O(N * K) evaluation time |
//| b) exact low rank one, with O(N * K ^ 2) evaluation time |
//| c) exact robust one, with O(N ^ 3 + K * N ^ 2) evaluation |
//| time where K is a total number of general linear and |
//| nonlinear constraints (box ones are not counted). |
//| It also provides special unpreconditioned mode of operation which|
//| can be used for test purposes. Comments below discuss low rank |
//| preconditioner. |
//| Exact low-rank preconditioner uses Woodbury matrix identity to |
//| build quadratic model of the penalized function. It has following|
//| features: |
//| * no special assumptions about orthogonality of constraints |
//| * preconditioner evaluation is optimized for K << N. Its cost |
//| is O(N * K ^ 2), so it may become prohibitively slow for |
//| K >= N. |
//| * finally, stability of the process is guaranteed only for |
//| K << N. Woodbury update often fail for K >= N due to |
//| degeneracy of intermediate matrices. |
//| That's why we recommend to use "exact robust" preconditioner for |
//| such cases. |
//| RECOMMENDATIONS: |
//| We recommend to choose between "exact low rank" and "exact |
//| robust" preconditioners, with "low rank" version being chosen |
//| when you know in advance that total count of non-box constraints |
//| won't exceed N, and "robust" version being chosen when you need |
//| bulletproof solution. |
//| INPUT PARAMETERS: |
//| State - structure stores algorithm State |
//| UpdateFreq - update frequency. Preconditioner is rebuilt |
//| after every UpdateFreq iterations. Recommended |
//| value : 10 or higher. Zero value means that good|
//| default value will be used. |
//+------------------------------------------------------------------+
void CAlglib::MinNLCSetPrecExactLowRank(CMinNLCState &state,int updatefreq)
{
CMinNLC::MinNLCSetPrecExactLowRank(state,updatefreq);
}
//+------------------------------------------------------------------+
//| This function sets preconditioner to "exact robust" mode. |
//| Preconditioning is very important for convergence of Augmented |
//| Lagrangian algorithm because presence of penalty term makes |
//| problem ill-conditioned. Difference between performance of |
//| preconditioned and unpreconditioned methods can be as large |
//| as 100x! |
//| MinNLC optimizer may use following preconditioners, each with its|
//| own benefits and drawbacks: |
//| a) inexact LBFGS-based, with O(N * K) evaluation time |
//| b) exact low rank one, with O(N * K ^ 2) evaluation time |
//| c) exact robust one, with O(N ^ 3 + K * N ^ 2) evaluation |
//| time where K is a total number of general linear and |
//| nonlinear constraints (box ones are not counted). |
//| It also provides special unpreconditioned mode of operation which|
//| can be used for test purposes. Comments below discuss robust |
//| preconditioner. |
//| Exact robust preconditioner uses Cholesky decomposition to invert|
//| approximate Hessian matrix H = D + W'*C*W (where D stands for |
//| diagonal terms of Hessian, combined result of initial scaling |
//| matrix and penalty from box constraints; W stands for general |
//| linear constraints and linearization of nonlinear ones; C stands |
//| for diagonal matrix of penalty coefficients). |
//| This preconditioner has following features: |
//| * no special assumptions about constraint structure |
//| *preconditioner is optimized for stability; unlike "exact |
//| low rank" version which fails for K >= N, this one works well|
//| for any value of K. |
//| * the only drawback is that is takes O(N ^ 3 + K * N ^ 2) time |
//| to build it. No economical Woodbury update is applied even |
//| when it makes sense, thus there are exist situations (K << N)|
//| when "exact low rank" preconditioner outperforms this one. |
//| RECOMMENDATIONS: |
//| We recommend to choose between "exact low rank" and "exact |
//| robust" preconditioners, with "low rank" version being chosen |
//| when you know in advance that total count of non-box constraints |
//| won't exceed N, and "robust" version being chosen when you need |
//| bulletproof solution. |
//| INPUT PARAMETERS: |
//| State - structure stores algorithm State |
//| UpdateFreq - update frequency. Preconditioner is rebuilt |
//| after every UpdateFreq iterations. Recommended |
//| value: 10 or higher. Zero value means that good |
//| default value will be used. |
//+------------------------------------------------------------------+
void CAlglib::MinNLCSetPrecExactRobust(CMinNLCState &state,int updatefreq)
{
CMinNLC::MinNLCSetPrecExactRobust(state,updatefreq);
}
//+------------------------------------------------------------------+
//| This function sets preconditioner to "turned off" mode. |
//| Preconditioning is very important for convergence of Augmented |
//| Lagrangian algorithm because presence of penalty term makes |
//| problem ill-conditioned. Difference between performance of |
//| preconditioned and unpreconditioned methods can be as large |
//| as 100x! |
//| MinNLC optimizer may utilize two preconditioners, each with its |
//| own benefits and drawbacks: |
//| a) inexact LBFGS-based, and b) exact low rank one. |
//| It also provides special unpreconditioned mode of operation which|
//| can be used for test purposes. |
//| This function activates this test mode. Do not use it in |
//| production code to solve real-life problems. |
//| INPUT PARAMETERS: |
//| State - structure stores algorithm State |
//+------------------------------------------------------------------+
void CAlglib::MinNLCSetPrecNone(CMinNLCState &state)
{
CMinNLC::MinNLCSetPrecNone(state);
}
//+------------------------------------------------------------------+
//| This function sets maximum step length (after scaling of step |
//| vector with respect to variable scales specified by |
//| MinNLCSetScale() call). |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm State |
//| StpMax - maximum step length, >= 0. Set StpMax to 0.0 |
//| (default), if you don't want to limit step |
//| length. |
//| Use this subroutine when you optimize target function which |
//| contains exp() or other fast growing functions, and optimization |
//| algorithm makes too large steps which leads to overflow. This |
//| function allows us to reject steps that are too large (and |
//| therefore expose us to the possible overflow) without actually |
//| calculating function value at the x + stp*d. |
//| NOTE: different solvers employed by MinNLC optimizer use |
//| different norms for step; AUL solver uses 2-norm, whilst |
//| SLP solver uses INF-norm. |
//+------------------------------------------------------------------+
void CAlglib::MinNLCSetSTPMax(CMinNLCState &state,double stpmax)
{
CMinNLC::MinNLCSetSTPMax(state,stpmax);
}
//+------------------------------------------------------------------+
//| This function tells MinNLC unit to use Augmented Lagrangian |
//| algorithm for nonlinearly constrained optimization. This |
//| algorithm is a slight modification of one described in |
//| "A Modified Barrier-Augmented Lagrangian Method for Constrained |
//| Minimization(1999)" by D.GOLDFARB, R.POLYAK, K. SCHEINBERG, |
//| I.YUZEFOVICH. |
//| AUL solver can be significantly faster than SQP on easy problems |
//| due to cheaper iterations, although it needs more function |
//| evaluations. |
//| Augmented Lagrangian algorithm works by converting problem of |
//| minimizing F(x) subject to equality/inequality constraints to |
//| unconstrained problem of the form |
//| min[ f(x) + |
//| + Rho * PENALTY_EQ(x) + SHIFT_EQ(x, Nu1) + |
//| + Rho * PENALTY_INEQ(x) + SHIFT_INEQ(x, Nu2) ] |
//| where: |
//| * Rho is a fixed penalization coefficient |
//| * PENALTY_EQ(x) is a penalty term, which is used to |
//| APPROXIMATELY enforce equality constraints |
//| *SHIFT_EQ(x) is a special "shift" term which is used to |
//| "fine-tune" equality constraints, greatly increasing |
//| precision |
//| * PENALTY_INEQ(x) is a penalty term which is used to |
//| approximately enforce inequality constraints |
//| *SHIFT_INEQ(x) is a special "shift" term which is used to |
//| "fine-tune" inequality constraints, greatly increasing |
//| precision |
//| * Nu1/Nu2 are vectors of Lagrange coefficients which are fine- |
//| tuned during outer iterations of algorithm |
//| This version of AUL algorithm uses preconditioner, which |
//| greatly accelerates convergence. Because this algorithm is |
//| similar to penalty methods, it may perform steps into infeasible |
//| area. All kinds of constraints (boundary, linear and nonlinear |
//| ones) may be violated in intermediate points - and in the |
//| solution. However, properly configured AUL method is |
//| significantly better at handling constraints than barrier and/or |
//| penalty methods. |
//| The very basic outline of algorithm is given below: |
//| 1) first outer iteration is performed with "default" values of |
//| Lagrange multipliers Nu1/Nu2. Solution quality is low |
//| (candidate point can be too far away from true solution; |
//| large violation of constraints is possible) and is |
//| comparable with that of penalty methods. |
//| 2) subsequent outer iterations refine Lagrange multipliers and |
//| improve quality of the solution. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm State |
//| Rho - penalty coefficient, Rho > 0: |
//| * large enough that algorithm converges with |
//| desired precision. Minimum value is |
//| 10 * max(S'*diag(H)*S), where S is a scale matrix|
//| (set by MinNLCSetScale) and H is a Hessian of the|
//| function being minimized. If you can not easily |
//| estimate Hessian norm, see our recommendations |
//| below. |
//| * not TOO large to prevent ill - conditioning |
//| * for unit - scale problems(variables and Hessian |
//| have unit magnitude), Rho = 100 or Rho = 1000 can|
//| be used. |
//| * it is important to note that Rho is internally |
//| multiplied by scaling matrix, i.e. optimum value |
//| of Rho depends on scale of variables specified |
//| by MinNLCSetScale(). |
//| ItsCnt - number of outer iterations: |
//| * ItsCnt = 0 means that small number of outer |
//| iterations is automatically chosen (10 iterations|
//| in current version). |
//| * ItsCnt = 1 means that AUL algorithm performs just|
//| as usual barrier method. |
//| * ItsCnt > 1 means that AUL algorithm performs |
//| specified number of outer iterations |
//| HOW TO CHOOSE PARAMETERS |
//| Nonlinear optimization is a tricky area and Augmented Lagrangian |
//| algorithm is sometimes hard to tune. Good values of Rho and |
//| ItsCnt are problem - specific. In order to help you we prepared |
//| following set of recommendations: |
//| * for unit-scale problems (variables and Hessian have unit |
//| magnitude), Rho = 100 or Rho = 1000 can be used. |
//| * start from some small value of Rho and solve problem with |
//| just one outer iteration (ItcCnt = 1). In this case algorithm|
//| behaves like penalty method. Increase Rho in 2x or 10x steps |
//| until you see that one outer iteration returns point which is|
//| "rough approximation to solution". |
//| It is very important to have Rho so large that penalty term |
//| becomes constraining i.e. modified function becomes highly convex|
//| in constrained directions. |
//| From the other side, too large Rho may prevent you from |
//| converging to the solution. You can diagnose it by studying |
//| number of inner iterations performed by algorithm: too few (5-10 |
//| on 1000-dimensional problem) or too many (orders of magnitude |
//| more than dimensionality) usually means that Rho is too large. |
//| * with just one outer iteration you usually have low-quality |
//| solution. Some constraints can be violated with very large |
//| margin, while other ones (which are NOT violated in the true |
//| solution) can push final point too far in the inner area of |
//| the feasible set. |
//| For example, if you have constraint x0 >= 0 and true solution |
//| x0=1, then merely a presence of "x0>=0" will introduce a bias |
//| towards larger values of x0. Say, algorithm may stop at x0 = 1.5 |
//| instead of 1.0. |
//| * after you found good Rho, you may increase number of outer |
//| iterations. ItsCnt = 10 is a good value. Subsequent outer |
//| iteration will refine values of Lagrange multipliers. |
//| Constraints which were violated will be enforced, inactive |
//| constraints will be dropped(corresponding multipliers will be|
//| decreased). Ideally, you should see 10-1000x improvement in |
//| constraint handling(constraint violation is reduced). |
//| * if you see that algorithm converges to vicinity of solution, |
//| but additional outer iterations do not refine solution, it |
//| may mean that algorithm is unstable - it wanders around true |
//| solution, but can not approach it. Sometimes algorithm may be|
//| stabilized by increasing Rho one more time, making it 5x or |
//| 10x larger. |
//| SCALING OF CONSTRAINTS [IMPORTANT] |
//| AUL optimizer scales variables according to scale specified by |
//| MinNLCSetScale() function, so it can handle problems with badly |
//| scaled variables (as long as we KNOW their scales). However, |
//| because function being optimized is a mix of original function |
//| and constraint - dependent penalty functions, it is important to |
//| rescale both variables AND constraints. |
//| Say, if you minimize f(x) = x^2 subject to 1000000*x >= 0, then |
//| you have constraint whose scale is different from that of target |
//| function (another example is 0.000001*x >= 0). It is also |
//| possible to have constraints whose scales are misaligned: |
//| 1000000*x0 >= 0, 0.000001*x1 <= 0. Inappropriate scaling may ruin|
//| convergence because minimizing x^2 subject to x >= 0 is NOT same |
//| as minimizing it subject to 1000000*x >= 0. |
//| Because we know coefficients of boundary/linear constraints, we |
//| can automatically rescale and normalize them. However, there is |
//| no way to automatically rescale nonlinear constraints Gi(x) and |
//| Hi(x) - they are black boxes. |
//| It means that YOU are the one who is responsible for correct |
//| scaling of nonlinear constraints Gi(x) and Hi(x). We recommend |
//| you to rescale nonlinear constraints in such way that I-th |
//| component of dG/dX (or dH/dx) has magnitude approximately equal |
//| to 1/S[i] (where S is a scale set by MinNLCSetScale() function). |
//| WHAT IF IT DOES NOT CONVERGE? |
//| It is possible that AUL algorithm fails to converge to precise |
//| values of Lagrange multipliers. It stops somewhere around true |
//| solution, but candidate point is still too far from solution, |
//| and some constraints are violated. Such kind of failure is |
//| specific for Lagrangian algorithms - technically, they stop at |
//| some point, but this point is not constrained solution. |
//| There are exist several reasons why algorithm may fail to |
//| converge: |
//| a) too loose stopping criteria for inner iteration |
//| b) degenerate, redundant constraints |
//| c) target function has unconstrained extremum exactly at the |
//| boundary of some constraint |
//| d) numerical noise in the target function |
//| In all these cases algorithm is unstable - each outer iteration |
//| results in large and almost random step which improves handling |
//| of some constraints, but violates other ones (ideally outer |
//| iterations should form a sequence of progressively decreasing |
//| steps towards solution). |
//| First reason possible is that too loose stopping criteria for |
//| inner iteration were specified. Augmented Lagrangian algorithm |
//| solves a sequence of intermediate problems, and requries each of |
//| them to be solved with high precision. Insufficient precision |
//| results in incorrect update of Lagrange multipliers. |
//| Another reason is that you may have specified degenerate |
//| constraints: say, some constraint was repeated twice. In most |
//| cases AUL algorithm gracefully handles such situations, but |
//| sometimes it may spend too much time figuring out subtle |
//| degeneracies in constraint matrix. |
//| Third reason is tricky and hard to diagnose. Consider situation |
//| when you minimize f = x^2 subject to constraint x >= 0. |
//| Unconstrained extremum is located exactly at the boundary of |
//| constrained area. In this case algorithm will tend to oscillate |
//| between negative and positive x. Each time it stops at x<0 it |
//| "reinforces" constraint x >= 0, and each time it is bounced to |
//| x>0 it "relaxes" constraint( and is attracted to x < 0). |
//| Such situation sometimes happens in problems with hidden |
//| symetries. Algorithm is got caught in a loop with Lagrange |
//| multipliers being continuously increased / decreased. Luckily, |
//| such loop forms after at least three iterations, so this problem|
//| can be solved by DECREASING number of outer iterations down |
//| to 1-2 and increasing penalty coefficient Rho as much as possible|
//| Final reason is numerical noise. AUL algorithm is robust against |
//| moderate noise (more robust than, say, active set methods), but |
//| large noise may destabilize algorithm. |
//+------------------------------------------------------------------+
void CAlglib::MinNLCSetAlgoAUL(CMinNLCState &state,double rho,int itscnt)
{
CMinNLC::MinNLCSetAlgoAUL(state,rho,itscnt);
}
//+------------------------------------------------------------------+
//| This function tells MinNLC optimizer to use SLP (Successive |
//| Linear Programming) algorithm for nonlinearly constrained |
//| optimization. This algorithm is a slight modification of one |
//| described in "A Linear programming - based optimization algorithm|
//| for solving nonlinear programming problems" (2010) by Claus Still|
//| and Tapio Westerlund. |
//| This solver is the slowest one in ALGLIB, it requires more target|
//| function evaluations that SQP and AUL. However it is somewhat |
//| more robust in tricky cases, so it can be used as a backup plan. |
//| We recommend to use this algo when SQP/AUL do not work (does not |
//| return the solution you expect). If trying different approach |
//| gives same results, then MAYBE something is wrong with your |
//| optimization problem. |
//| Despite its name ("linear" = "first order method") this algorithm|
//| performs steps similar to that of conjugate gradients method; |
//| internally it uses orthogonality/conjugacy requirement for |
//| subsequent steps which makes it closer to second order methods in|
//| terms of convergence speed. |
//| Convergence is proved for the following case: |
//| * function and constraints are continuously differentiable (C1|
//| class) |
//| * extended Mangasarian-Fromovitz constraint qualification |
//| (EMFCQ) holds; in the context of this algorithm EMFCQ means |
//| that one can, for any infeasible point, find a search |
//| direction such that the constraint infeasibilities are |
//| reduced. |
//| This algorithm has following nice properties: |
//| * no parameters to tune |
//| * no convexity requirements for target function or constraints|
//| * initial point can be infeasible |
//| * algorithm respects box constraints in all intermediate |
//| points (it does not even evaluate function outside of box |
//| constrained area) |
//| * once linear constraints are enforced, algorithm will not |
//| violate them |
//| * no such guarantees can be provided for nonlinear |
//| constraints, but once nonlinear constraints are enforced, |
//| algorithm will try to respect them as much as possible |
//| * numerical differentiation does not violate box constraints |
//| (although general linear and nonlinear ones can be violated |
//| during differentiation) |
//| * from our experience, this algorithm is somewhat more robust |
//| in really difficult cases |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm State |
//| ===== TRACING SLP SOLVER ======================================= |
//| SLP solver supports advanced tracing capabilities. You can trace |
//| algorithm output by specifying following trace symbols (case- |
//| insensitive) by means of trace_file() call: |
//| * 'SLP' - for basic trace of algorithm steps and decisions. |
//| Only short scalars(function values and deltas) are |
//| printed. |
//| N-dimensional quantities like search directions are|
//| NOT printed. |
//| It also prints OptGuard integrity checker report |
//| when nonsmoothness of target / constraints is |
//| suspected. |
//| * 'SLP.DETAILED' - for output of points being visited and |
//| search directions. |
//| This symbol also implicitly defines 'SLP'. You can |
//| control output format by additionally specifying: |
//| * nothing to output in 6-digit exponential format |
//| * 'PREC.E15' to output in 15-digit exponential |
//| format |
//| * 'PREC.F6' to output in 6-digit fixed-point format|
//| * 'SLP.PROBING' - to let algorithm insert additional function |
//| evaluations before line search in order to build |
//| human-readable chart of the raw Lagrangian |
//| (~40 additional function evaluations is performed |
//| for each line search). This symbol also implicitly |
//| defines 'SLP'. Definition of this symbol also |
//| automatically activates OptGuard smoothness monitor|
//| * 'OPTGUARD' - for report of smoothness/continuity violations |
//| in target and/or constraints. This kind of |
//| reporting is included in 'SLP', but it comes with |
//| lots of additional Info. If you need just |
//| smoothness monitoring, specify this setting. |
//| NOTE: this tag merely directs OptGuard output to log file. Even |
//| if you specify it, you still have to configure OptGuard by |
//| calling MinNLCOptGuard...() family of functions. |
//| By default trace is disabled and adds no overhead to the |
//| optimization process. However, specifying any of the symbols adds|
//| some formatting and output - related overhead. Specifying |
//| 'SLP.PROBING' adds even larger overhead due to additional |
//| function evaluations being performed. |
//| You may specify multiple symbols by separating them with commas: |
//| > |
//| >CAlglib::Trace_File("SLP,SLP.PROBING,PREC.F6", |
//| "path/to/trace.log") |
//| > |
//+------------------------------------------------------------------+
void CAlglib::MinNLCSetAlgoSLP(CMinNLCState &state)
{
CMinNLC::MinNLCSetAlgoSLP(state);
}
//+------------------------------------------------------------------+
//| This function tells MinNLC optimizer to use SQP (Successive |
//| Quadratic Programming) algorithm for nonlinearly constrained |
//| optimization. |
//| This algorithm needs order of magnitude (5x-10x) less function |
//| evaluations than AUL solver, but has higher overhead because each|
//| iteration involves solution of quadratic programming problem. |
//| Convergence is proved for the following case: |
//| * function and constraints are continuously differentiable |
//| (C1 class) |
//| This algorithm has following nice properties: |
//| * no parameters to tune |
//| * no convexity requirements for target function or constraints|
//| * initial point can be infeasible |
//| * algorithm respects box constraints in all intermediate |
//| points (it does not even evaluate function outside of box |
//| constrained area) |
//| * once linear constraints are enforced, algorithm will not |
//| violate them |
//| * no such guarantees can be provided for nonlinear |
//| constraints, but once nonlinear constraints are enforced, |
//| algorithm will try to respect them as much as possible |
//| * numerical differentiation does not violate box constraints |
//| (although general linear and nonlinear ones can be violated |
//| during differentiation) |
//| We recommend this algorithm as a default option for medium scale |
//| problems (less than thousand of variables) or problems with |
//| target function being hard to evaluate. |
//| For large-scale problems or ones with very cheap target function|
//| AUL solver can be better option. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm State |
//| ===== INTERACTION WITH OPTGUARD ================================ |
//| OptGuard integrity checker allows us to catch problems like |
//| errors in gradients and discontinuity/nonsmoothness of the |
//| target/constraints. The latter kind of problems can be detected |
//| by looking upon line searches performed during optimization and |
//| searching for signs of nonsmoothness. |
//| The problem with SQP is that it is too good for OptGuard to work-|
//| it does not perform line searches. It typically needs 1-2 |
//| function evaluations per step, and it is not enough for OptGuard |
//| to detect nonsmoothness. |
//| So, if you suspect that your problem is nonsmooth and if you want|
//| to confirm or deny it, we recommend you to either: |
//| * use AUL or SLP solvers, which can detect nonsmoothness of |
//| the problem |
//| * or, alternatively, activate 'SQP.PROBING' trace tag that |
//| will insert additional function evaluations (~40 per line |
//| step) that will help OptGuard integrity checker to study |
//| properties of your problem |
//| ===== TRACING SQP SOLVER ======================================= |
//| SQP solver supports advanced tracing capabilities. You can trace |
//| algorithm output by specifying following trace symbols (case- |
//| insensitive) by means of trace_file() call: |
//| * 'SQP' - for basic trace of algorithm steps and |
//| decisions. Only short scalars (function values |
//| and deltas) are printed. |
//| N-dimensional quantities like search directions |
//| are NOT printed. |
//| It also prints OptGuard integrity checker report|
//| when nonsmoothness of target/constraints is |
//| suspected. |
//| * 'SQP.DETAILED' - for output of points being visited and |
//| search directions. This symbol also implicitly |
//| defines 'SQP'. You can control output format by |
//| additionally specifying: |
//| * nothing to output in 6-digit exponential |
//| format |
//| * 'PREC.E15' to output in 15-digit exponential |
//| format |
//| * 'PREC.F6' to output in 6-digit fixed-point |
//| format |
//| * 'SQP.PROBING' - to let algorithm insert additional function |
//| evaluations before line search in order to build|
//| human-readable chart of the raw Lagrangian (~40 |
//| additional function evaluations is performed for|
//| each line search). This symbol also implicitly |
//| defines 'SQP' and activates OptGuard integrity |
//| checker which detects continuity and smoothness |
//| violations. An OptGuard log is printed at the |
//| end of the file. |
//| By default trace is disabled and adds no overhead to the |
//| optimization process. However, specifying any of the symbols adds|
//| some formatting and output-related overhead. Specifying |
//| 'SQP.PROBING' adds even larger overhead due to additional |
//| function evaluations being performed. |
//| You may specify multiple symbols by separating them with commas: |
//| > |
//| >CAlglib::Trace_File("SQP, SQP.PROBING, PREC.F6", |
//| "path/to/trace.log") |
//| > |
//+------------------------------------------------------------------+
void CAlglib::MinNLCSetAlgoSQP(CMinNLCState &state)
{
CMinNLC::MinNLCSetAlgoSQP(state);
}
//+------------------------------------------------------------------+
//| This function turns on / off reporting. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm State |
//| NeedXRep - whether iteration reports are needed or not |
//| If NeedXRep is True, algorithm will call rep() callback function |
//| if it is provided to MinNLCOptimize(). |
//| NOTE: algorithm passes two parameters to rep() callback - |
//| current point and penalized function value at current |
//| point. Important - function value which is returned is |
//| NOT function being minimized. It is sum of the value of |
//| the function being minimized - and penalty term. |
//+------------------------------------------------------------------+
void CAlglib::MinNLCSetXRep(CMinNLCState &state,bool needxrep)
{
CMinNLC::MinNLCSetXRep(state,needxrep);
}
//+------------------------------------------------------------------+
//| NOTES: |
//| 1. This function has two different implementations: one which |
//| uses exact (analytical) user-supplied Jacobian, and one |
//| which uses only function vector and numerically |
//| differentiates function in order to obtain gradient. |
//| Depending on the specific function used to create optimizer |
//| object you should choose appropriate variant of MinNLCOptimize()-|
//| one which accepts function AND Jacobian or one which accepts ONLY|
//| function. |
//| Be careful to choose variant of MinNLCOptimize() which |
//| corresponds to your optimization scheme! Table below lists |
//| different combinations of callback (function/gradient) passed to |
//| MinNLCOptimize() and specific function used to create optimizer. |
//| | USER PASSED TO MinNLCOptimize() |
//| CREATED WITH | function only | function and gradient |
//| ------------------------------------------------------------ |
//| MinNLCCreateF() | works FAILS |
//| MinNLCCreate() | FAILS works |
//| Here "FAILS" denotes inappropriate combinations of optimizer |
//| creation function and MinNLCOptimize() version. Attemps to use |
//| such combination will lead to exception. Either you did not pass |
//| gradient when it WAS needed or you passed gradient when it was |
//| NOT needed. |
//+------------------------------------------------------------------+
bool CAlglib::MinNLCIteration(CMinNLCState &state)
{
return(CMinNLC::MinNLCIteration(state));
}
//+------------------------------------------------------------------+
//| This family of functions is used to launcn iterations of |
//| nonlinear optimizer |
//| These functions accept following parameters: |
//| fvec - callback which calculates function vector fi[] at |
//| given point x |
//| jac - callback which calculates function vector fi[] and |
//| Jacobian jac at given point x |
//| rep - optional callback which is called after each |
//| iteration can be null |
//| obj - optional object which is passed to func/grad/hess/ |
//| jac/rep can be null |
//| NOTES: |
//| 1. This function has two different implementations: one which |
//| uses exact (analytical) user-supplied Jacobian, and one which|
//| uses only function vector and numerically differentiates |
//| function in order to obtain gradient. |
//| Depending on the specific function used to create |
//| optimizer object you should choose appropriate variant of |
//| MinNLCOptimize() - one which accepts function AND Jacobian |
//| or one which accepts ONLY function. |
//| Be careful to choose variant of MinNLCOptimize() which |
//| corresponds to your optimization scheme! Table below lists |
//| different combinations of callback (function/gradient) |
//| passed to MinNLCOptimize() and specific function used to |
//| create optimizer. |
//| | USER PASSED TO MinNLCOptimize() |
//| CREATED WITH | function only | function and gradient |
//| ------------------------------------------------------------ |
//| MinNLCCreateF() | works FAILS |
//| MinNLCCreate() | FAILS works |
//| Here "FAILS" denotes inappropriate combinations of optimizer |
//| creation function and MinNLCOptimize() version. Attemps to |
//| use such combination will lead to exception. Either you did |
//| not pass gradient when it WAS needed or you passed gradient when |
//| it was NOT needed. |
//+------------------------------------------------------------------+
void CAlglib::MinNLCOptimize(CMinNLCState &state,
CNDimensional_FVec &fvec,
CNDimensional_Rep &rep,CObject &obj)
{
//--- check
if(!CAp::Assert(GetPointer(fvec)!=NULL,"ALGLIB: error in 'minnlcoptimize()' (fvec is null)"))
return;
//--- cycle
while(MinNLCIteration(state))
{
if(state.m_needfi)
{
fvec.FVec(state.m_x,state.m_fi,obj);
continue;
}
if(state.m_xupdated)
{
if(GetPointer(rep)!=NULL)
rep.Rep(state.m_x,state.m_f,obj);
continue;
}
CAp::Assert(false,"ALGLIB: error in 'minnlcoptimize' (some derivatives were not provided?)");
break;
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::MinNLCOptimize(CMinNLCState &state,CNDimensional_Jac &jac,CNDimensional_Rep &rep,CObject &obj)
{
//--- check
if(!CAp::Assert(GetPointer(jac)!=NULL,"ALGLIB: error in 'minnlcoptimize()' (jac is null)"))
return;
//--- cycle
while(MinNLCIteration(state))
{
if(state.m_needfij)
{
jac.Jac(state.m_x,state.m_fi,state.m_j,obj);
continue;
}
if(state.m_xupdated)
{
if(GetPointer(rep)!=NULL)
rep.Rep(state.m_x,state.m_f,obj);
continue;
}
CAp::Assert(false,"ALGLIB: error in 'minnlcoptimize' (some derivatives were not provided?)");
break;
}
}
//+------------------------------------------------------------------+
//| This function activates/deactivates verification of the user- |
//| supplied analytic gradient/Jacobian. |
//| Upon activation of this option OptGuard integrity checker |
//| performs numerical differentiation of your target function |
//| (constraints) at the initial point (note: future versions may |
//| also perform check at the final point) and compares numerical |
//| gradient/Jacobian with analytic one provided by you. |
//| If difference is too large, an error flag is set and optimization|
//| session continues. After optimization session is over, you can |
//| retrieve the report which stores both gradients/Jacobians, and |
//| specific components highlighted as suspicious by the OptGuard. |
//| The primary OptGuard report can be retrieved with |
//| MinNLCOptGuardResults(). |
//| IMPORTANT: gradient check is a high-overhead option which will |
//| cost you about 3*N additional function evaluations. |
//| In many cases it may cost as much as the rest of the |
//| optimization session. |
//| YOU SHOULD NOT USE IT IN THE PRODUCTION CODE UNLESS YOU WANT TO |
//| CHECK DERIVATIVES PROVIDED BY SOME THIRD PARTY. |
//| NOTE: unlike previous incarnation of the gradient checking code, |
//| OptGuard does NOT interrupt optimization even if it |
//| discovers bad gradient. |
//| INPUT PARAMETERS: |
//| State - structure used to store algorithm State |
//| TestStep - verification step used for numerical |
//| differentiation: |
//| * TestStep = 0 turns verification off |
//| * TestStep > 0 activates verification |
//| You should carefully choose TestStep. Value |
//| which is too large (so large that function |
//| behavior is non- cubic at this scale) will |
//| lead to false alarms. Too short step will |
//| result in rounding errors dominating numerical |
//| derivative. |
//| You may use different step for different |
//| parameters by means of setting scale with |
//| MinNLCSetScale(). |
//| === EXPLANATION ================================================ |
//| In order to verify gradient algorithm performs following steps: |
//| * two trial steps are made to |
//| X[i] - TestStep * S[i] and X[i] + TestStep * S[i], |
//| where X[i] is i-th component of the initial point and S[i] |
//| is a scale of i-th parameter |
//| * F(X) is evaluated at these trial points |
//| * we perform one more evaluation in the middle point of the |
//| interval |
//| * we build cubic model using function values and derivatives |
//| at trial points and we compare its prediction with actual |
//| value in the middle point |
//+------------------------------------------------------------------+
void CAlglib::MinNLCOptGuardGradient(CMinNLCState &state,double teststep)
{
CMinNLC::MinNLCOptGuardGradient(state,teststep);
}
//+------------------------------------------------------------------+
//| This function activates/deactivates nonsmoothness monitoring |
//| option of the OptGuard integrity checker. Smoothness monitor |
//| silently observes solution process and tries to detect ill-posed |
//| problems, i.e. ones with: |
//| a) discontinuous target function(non-C0) and/or constraints |
//| b) nonsmooth target function(non-C1) and/or constraints |
//| Smoothness monitoring does NOT interrupt optimization even if it |
//| suspects that your problem is nonsmooth. It just sets |
//| corresponding flags in the OptGuard report which can be retrieved|
//| after optimization is over. |
//| Smoothness monitoring is a moderate overhead option which often |
//| adds less than 1% to the optimizer running time. Thus, you can |
//| use it even for large scale problems. |
//| NOTE: OptGuard does NOT guarantee that it will always detect |
//| C0/C1 continuity violations. |
//| First, minor errors are hard to catch-say, a 0.0001 difference |
//| in the model values at two sides of the gap may be due to |
//| discontinuity of the model - or simply because the model has |
//| changed. |
//| Second, C1-violations are especially difficult to detect in a |
//| noninvasive way. The optimizer usually performs very short steps |
//| near the nonsmoothness, and differentiation usually introduces |
//| a lot of numerical noise. It is hard to tell whether some tiny |
//| discontinuity in the slope is due to real nonsmoothness or just |
//| due to numerical noise alone. |
//| Our top priority was to avoid false positives, so in some rare |
//| cases minor errors may went unnoticed (however, in most cases |
//| they can be spotted with restart from different initial point). |
//| INPUT PARAMETERS: |
//| State - algorithm State |
//| level - monitoring level: |
//| * 0 - monitoring is disabled |
//| * 1 - noninvasive low - overhead monitoring; |
//| function values and/or gradients are |
//| recorded, but OptGuard does not try to |
//| perform additional evaluations in order |
//| to get more information about suspicious |
//| locations. |
//| This kind of monitoring does not work well with SQP because SQP |
//| solver needs just 1-2 function evaluations per step, which is not|
//| enough for OptGuard to make any conclusions. |
//| === EXPLANATION ================================================ |
//| One major source of headache during optimization is the |
//| possibility of the coding errors in the target function/ |
//| constraints (or their gradients). Such errors most often |
//| manifest themselves as discontinuity or nonsmoothness of the |
//| target/constraints. |
//| Another frequent situation is when you try to optimize something |
//| involving lots of min() and max() operations, i.e. nonsmooth |
//| target. Although not a coding error, it is nonsmoothness anyway -|
//| and smooth optimizers usually stop right after encountering |
//| nonsmoothness, well before reaching solution. |
//| OptGuard integrity checker helps you to catch such situations: |
//| it monitors function values/gradients being passed to the |
//| optimizer and tries to errors. Upon discovering suspicious pair |
//| of points it raises appropriate flag (and allows you to continue |
//| optimization). When optimization is done, you can study OptGuard |
//| result. |
//+------------------------------------------------------------------+
void CAlglib::MinNLCOptGuardSmoothness(CMinNLCState &state,int level)
{
CMinNLC::MinNLCOptGuardSmoothness(state,level);
}
//+------------------------------------------------------------------+
//| Results of OptGuard integrity check, should be called after |
//| optimization session is over. |
//| === PRIMARY REPORT ============================================= |
//| OptGuard performs several checks which are intended to catch |
//| common errors in the implementation of nonlinear function/ |
//| gradient: |
//| * incorrect analytic gradient |
//| * discontinuous (non-C0) target functions (constraints) |
//| * nonsmooth (non-C1) target functions (constraints) |
//| Each of these checks is activated with appropriate function: |
//| * MinNLCOptGuardGradient() for gradient verification |
//| * MinNLCOptGuardSmoothness() for C0/C1 checks |
//| Following flags are set when these errors are suspected: |
//| * rep.badgradsuspected, and additionally: |
//| * rep.badgradfidx for specific function (Jacobian row) |
//| suspected |
//| * rep.badgradvidx for specific variable (Jacobian column)|
//| suspected |
//| * rep.badgradxbase, a point where gradient/Jacobian is |
//| tested |
//| * rep.badgraduser, user-provided gradient/Jacobian |
//| * rep.badgradnum, reference gradient/Jacobian obtained |
//| via numerical differentiation |
//| * rep.nonc0suspected, and additionally: |
//| * rep.nonc0fidx - an index of specific function violating|
//| C0 continuity |
//| * rep.nonc1suspected, and additionally |
//| * rep.nonc1fidx - an index of specific function violating|
//| C1 continuity |
//| Here function index 0 means target function, index 1 or higher |
//| denotes nonlinear constraints. |
//| === ADDITIONAL REPORTS / LOGS ================================== |
//| Several different tests are performed to catch C0/C1 errors, you |
//| can find out specific test signaled error by looking to: |
//| * rep.nonc0test0positive, for non-C0 test #0 |
//| * rep.nonc1test0positive, for non-C1 test #0 |
//| * rep.nonc1test1positive, for non-C1 test #1 |
//| Additional information (including line search logs) can be |
//| obtained by means of: |
//| * MinNLCOptGuardNonC1Test0Results() |
//| * MinNLCOptGuardNonC1Test1Results() |
//| which return detailed error reports, specific points where |
//| discontinuities were found, and so on. |
//| ================================================================ |
//| INPUT PARAMETERS: |
//| State - algorithm State |
//| OUTPUT PARAMETERS: |
//| rep - generic OptGuard report; more detailed reports can|
//| be retrieved with other functions. |
//| NOTE: false negatives (nonsmooth problems are not identified |
//| as nonsmooth ones) are possible although unlikely. |
//| The reason is that you need to make several evaluations around |
//| nonsmoothness in order to accumulate enough information about |
//| function curvature. Say, if you start right from the nonsmooth |
//| point, optimizer simply won't get enough data to understand what |
//| is going wrong before it terminates due to abrupt changes in the |
//| derivative. It is also possible that "unlucky" step will move us |
//| to the termination too quickly. |
//| Our current approach is to have less than 0.1 % false negatives |
//| in our test examples (measured with multiple restarts from random|
//| points), and to have exactly 0 % false positives. |
//+------------------------------------------------------------------+
void CAlglib::MinNLCOptGuardResults(CMinNLCState &state,
COptGuardReport &rep)
{
CMinNLC::MinNLCOptGuardResults(state,rep);
}
//+------------------------------------------------------------------+
//| Detailed results of the OptGuard integrity check for |
//| nonsmoothness test #0 |
//| Nonsmoothness(non-C1) test #0 studies function values (not |
//| gradient!) obtained during line searches and monitors behavior |
//| of the directional derivative estimate. |
//| This test is less powerful than test #1, but it does not depend |
//| on the gradient values and thus it is more robust against |
//| artifacts introduced by numerical differentiation. |
//| Two reports are returned: |
//| *a "strongest" one, corresponding to line search which had |
//| highest value of the nonsmoothness indicator |
//| *a "longest" one, corresponding to line search which had more |
//| function evaluations, and thus is more detailed |
//| In both cases following fields are returned: |
//| * positive - is TRUE when test flagged suspicious point; |
//| FALSE if test did not notice anything (in the |
//| latter cases fields below are empty). |
//| * fidx-is an index of the function (0 for target function, 1 or|
//| higher for nonlinear constraints) which is |
//| suspected of being "non-C1" |
//| * x0[], d[] - arrays of length N which store initial point and |
//| direction for line search (d[] can be normalized, |
//| but does not have to) |
//| * stp[], f[] - arrays of length CNT which store step lengths |
//| and function values at these points; f[i] is |
//| evaluated in x0 + stp[i]*d. |
//| * stpidxa, stpidxb - we suspect that function violates C1 |
//| continuity between steps #stpidxa and #stpidxb |
//| (usually we have stpidxb = stpidxa + 3, with most |
//| likely position of the violation between |
//| stpidxa + 1 and stpidxa + 2. |
//| ================================================================ |
//| = SHORTLY SPEAKING: build a 2D plot of(stp, f) and look at it - |
//| = you will see where C1 continuity is violated.|
//| ================================================================ |
//| INPUT PARAMETERS: |
//| State - algorithm State |
//| OUTPUT PARAMETERS: |
//| strrep - C1 test #0 "strong" report |
//| lngrep - C1 test #0 "long" report |
//+------------------------------------------------------------------+
void CAlglib::MinNLCOptGuardNonC1Test0Results(CMinNLCState &state,
COptGuardNonC1Test0Report &strrep,
COptGuardNonC1Test0Report &lngrep)
{
CMinNLC::MinNLCOptGuardNonC1Test0Results(state,strrep,lngrep);
}
//+------------------------------------------------------------------+
//| Detailed results of the OptGuard integrity check for |
//| nonsmoothness test #1 |
//| Nonsmoothness(non-C1) test #1 studies individual components of |
//| the gradient computed during line search. |
//| When precise analytic gradient is provided this test is more |
//| powerful than test #0 which works with function values and |
//| ignores user-provided gradient. However, test #0 becomes more |
//| powerful when numerical differentiation is employed (in such |
//| cases test #1 detects higher levels of numerical noise and |
//| becomes too conservative). |
//| This test also tells specific components of the gradient which |
//| violate C1 continuity, which makes it more informative than #0, |
//| which just tells that continuity is violated. |
//| Two reports are returned: |
//| *a "strongest" one, corresponding to line search which had |
//| highest value of the nonsmoothness indicator |
//| *a "longest" one, corresponding to line search which had more |
//| function evaluations, and thus is more detailed |
//| In both cases following fields are returned: |
//| * positive - is TRUE when test flagged suspicious point; FALSE |
//| if test did not notice anything(in the latter cases fields |
//| below are empty). |
//| * fidx-is an index of the function(0 for target function, 1 or |
//| higher for nonlinear constraints) which is suspected of |
//| being "non-C1" |
//| * vidx - is an index of the variable in [0, N) with nonsmooth |
//| derivative |
//| * x0[], d[] - arrays of length N which store initial point and |
//| direction for line search(d[] can be normalized, but does not|
//| have to) |
//| * stp[], g[]-arrays of length CNT which store step lengths and |
//| gradient values at these points; g[i] is evaluated in |
//| x0 + stp[i]*d and contains vidx-th component of the gradient.|
//| * stpidxa, stpidxb - we suspect that function violates C1 |
//| continuity between steps #stpidxa and #stpidxb (usually we |
//| have stpidxb = stpidxa + 3, with most likely position of the |
//| violation between stpidxa + 1 and stpidxa + 2. |
//| ================================================================ |
//| = SHORTLY SPEAKING: build a 2D plot of (stp, f) and look at it - |
//| = you will see where C1 continuity is violated.|
//| ================================================================ |
//| INPUT PARAMETERS: |
//| State - algorithm State |
//| OUTPUT PARAMETERS: |
//| strrep - C1 test #1 "strong" report |
//| lngrep - C1 test #1 "long" report |
//+------------------------------------------------------------------+
void CAlglib::MinNLCOptGuardNonC1Test1Results(CMinNLCState &state,
COptGuardNonC1Test1Report &strrep,
COptGuardNonC1Test1Report &lngrep)
{
CMinNLC::MinNLCOptGuardNonC1Test1Results(state,strrep,lngrep);
}
//+------------------------------------------------------------------+
//| MinNLC results: the solution found, completion codes and |
//| additional information. |
//| If you activated OptGuard integrity checking functionality and |
//| want to get OptGuard report, it can be retrieved with: |
//| * MinNLCOptGuardResults() - for a primary report about(a) |
//| suspected C0/C1 continuity |
//| violations and (b) errors in the |
//| analytic gradient. |
//| * MinNLCOptGuardNonC1Test0Results() - for C1 continuity |
//| violation test #0, detailed line |
//| search log |
//| * MinNLCOptGuardNonC1Test1Results() - for C1 continuity |
//| violation test #1, detailed line |
//| search log |
//| INPUT PARAMETERS: |
//| State - algorithm State |
//| OUTPUT PARAMETERS: |
//| X - array[0..N - 1], solution |
//| Rep - optimization report, contains information about |
//| completion code, constraint violation at the |
//| solution and so on. |
//| You should check rep.m_terminationtype in order to distinguish|
//| successful termination from unsuccessful one: |
//| === FAILURE CODES === |
//| * -8 internal integrity control detected infinite or NAN |
//| values in function/gradient. Abnormal termination |
//| signalled. |
//| * -3 box constraints are infeasible. |
//| Note: infeasibility of non-box constraints does NOT trigger |
//| emergency completion; you have to examine rep.m_bcerr/ |
//| rep.m_lcerr/rep.m_nlcerr to detect possibly inconsistent |
//| constraints. |
//| === SUCCESS CODES === |
//| * 2 scaled step is no more than EpsX. |
//| * 5 MaxIts steps were taken. |
//| * 8 user requested algorithm termination via |
//| MinNLCRequestTermination(), last accepted point is |
//| returned. |
//| More information about fields of this structure can be found in |
//| the comments on CMinNLCReport datatype. |
//+------------------------------------------------------------------+
void CAlglib::MinNLCResults(CMinNLCState &state,CRowDouble &x,
CMinNLCReport &rep)
{
CMinNLC::MinNLCResults(state,x,rep);
}
//+------------------------------------------------------------------+
//| NLC results |
//| Buffered implementation of MinNLCResults() which uses pre- |
//| allocated buffer to store X[]. If buffer size is too small, it |
//| resizes buffer. It is intended to be used in the inner cycles of |
//| performance critical algorithms where array reallocation penalty |
//| is too large to be ignored. |
//+------------------------------------------------------------------+
void CAlglib::MinNLCResultsBuf(CMinNLCState &state,
CRowDouble &x,
CMinNLCReport &rep)
{
CMinNLC::MinNLCResultsBuf(state,x,rep);
}
//+------------------------------------------------------------------+
//| This subroutine submits request for termination of running |
//| optimizer. It should be called from user - supplied callback |
//| when user decides that it is time to "smoothly" terminate |
//| optimization process. As result, optimizer stops at point which |
//| was "current accepted" when termination request was submitted |
//| and returns error code 8(successful termination). |
//| INPUT PARAMETERS: |
//| State - optimizer structure |
//| NOTE: after request for termination optimizer may perform |
//| several additional calls to user-supplied callbacks. |
//| It does NOT guarantee to stop immediately - it just |
//| guarantees that these additional calls will be discarded |
//| later. |
//| NOTE: calling this function on optimizer which is NOT running |
//| will have no effect. |
//| NOTE: multiple calls to this function are possible. First call |
//| is counted, subsequent calls are silently ignored. |
//+------------------------------------------------------------------+
void CAlglib::MinNLCRequestTermination(CMinNLCState &state)
{
CMinNLC::MinNLCRequestTermination(state);
}
//+------------------------------------------------------------------+
//| This subroutine restarts algorithm from new point. |
//| All optimization parameters (including constraints) are left |
//| unchanged. |
//| This function allows to solve multiple optimization problems |
//| (which must have same number of dimensions) without object |
//| reallocation penalty. |
//| INPUT PARAMETERS: |
//| State - structure previously allocated with MinNLCCreate |
//| call. |
//| X - new starting point. |
//+------------------------------------------------------------------+
void CAlglib::MinNLCRestartFrom(CMinNLCState &state,CRowDouble &x)
{
CMinNLC::MinNLCRestartFrom(state,x);
}
//+------------------------------------------------------------------+
//| NONSMOOTH NONCONVEX OPTIMIZATION |
//| SUBJECT TO BOX / LINEAR / NONLINEAR - NONSMOOTH CONSTRAINTS |
//| DESCRIPTION: |
//| The subroutine minimizes function F(x) of N arguments subject to|
//| any combination of: |
//| * bound constraints |
//| * linear inequality constraints |
//| * linear equality constraints |
//| * nonlinear equality constraints Gi(x) = 0 |
//| * nonlinear inequality constraints Hi(x) <= 0 |
//| IMPORTANT: see MinNSSetAlgoAGS for important information on |
//| performance restrictions of AGS solver. |
//| REQUIREMENTS: |
//| * starting point X0 must be feasible or not too far away from |
//| the feasible set |
//| * F(), G(), H() are continuous, locally Lipschitz and |
//| continuously (but not necessarily twice) differentiable in an|
//| open dense subset of R^N. |
//| Functions F(), G() and H() may be nonsmooth and non-convex. |
//| Informally speaking, it means that functions are composed of |
//| large differentiable "patches" with nonsmoothness having place |
//| only at the boundaries between these "patches". Most real-life |
//| nonsmooth functions satisfy these requirements. Say, anything |
//| which involves finite number of abs(), min() and max() is very |
//| likely to pass the test. Say, it is possible to optimize anything|
//| of the following: |
//| * f = abs(x0) + 2 * abs(x1) |
//| * f = max(x0, x1) |
//| * f = sin(max(x0, x1) + abs(x2)) |
//| * for nonlinearly constrained problems: F() must be bounded |
//| from below without nonlinear constraints (this requirement |
//| is due to the fact that, contrary to box and linear |
//| constraints, nonlinear ones require special handling). |
//| * user must provide function value and gradient for F(), H(), |
//| G() at all points where function / gradient can be calculated|
//| If optimizer requires value exactly at the boundary between |
//| "patches"(say, at x = 0 for f = abs(x)), where gradient is |
//| not defined, user may resolve tie arbitrarily (in our case - |
//| return +1 or -1 at its discretion). |
//| * NS solver supports numerical differentiation, i.e. it may |
//| differentiate your function for you, but it results in 2N |
//| increase of function evaluations. Not recommended unless you |
//| solve really small problems. See MinNSCreateF() for more |
//| information on this functionality. |
//| USAGE: |
//| 1. User initializes algorithm State with MinNSCreate() call and |
//| chooses what NLC solver to use. There is some solver which is |
//| used by default, with default Settings, but you should NOT |
//| rely on default choice. It may change in future releases of |
//| ALGLIB without notice, and no one can guarantee that new |
//| solver will be able to solve your problem with default |
//| Settings. |
//| From the other side, if you choose solver explicitly, you can be |
//| pretty sure that it will work with new ALGLIB releases. |
//| In the current release following solvers can be used: |
//| * AGS solver (activated with MinNSSetAlgoAGS() function) |
//| 2. User adds boundary and/or linear and/or nonlinear constraints |
//| by means of calling one of the following functions: |
//| a) MinNSSetBC() for boundary constraints |
//| b) MinNSSetLC() for linear constraints |
//| c) MinNSSetNLC() for nonlinear constraints |
//| You may combine(a), (b) and (c) in one optimization problem. |
//| 3. User sets scale of the variables with MinNSSetScale() function|
//| It is VERY important to set scale of the variables, because |
//| nonlinearly constrained problems are hard to solve when |
//| variables are badly scaled. |
//| 4. User sets stopping conditions with MinNSSetCond(). |
//| 5. Finally, user calls MinNSOptimize() function which takes |
//| algorithm State and pointer (delegate, etc) to callback |
//| function which calculates F / G / H. |
//| 6. User calls MinNSResults() to get solution |
//| 7. Optionally user may call MinNSRestartFrom() to solve another |
//| problem with same N but another starting point. |
//| MinNSRestartFrom() allows to reuse already initialized |
//| structure. |
//| INPUT PARAMETERS: |
//| N - problem dimension, N > 0: |
//| * if given, only leading N elements of X are used |
//| * if not given, automatically determined from size |
//| of X |
//| X - starting point, array[N]: |
//| * it is better to set X to a feasible point |
//| * but X can be infeasible, in which case algorithm |
//| will try to find feasible point first, using X as|
//| initial approximation. |
//| OUTPUT PARAMETERS: |
//| State - structure stores algorithm State |
//| NOTE: MinNSCreateF() function may be used if you do not have |
//| analytic gradient. This function creates solver which |
//| uses numerical differentiation with user-specified step. |
//+------------------------------------------------------------------+
void CAlglib::MinNSCreate(int n,CRowDouble &x,CMinNSState &state)
{
CMinNS::MinNSCreate(n,x,state);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::MinNSCreate(CRowDouble &x,CMinNSState &state)
{
//--- initialization
int n=CAp::Len(x);
//--- function call
CMinNS::MinNSCreate(n,x,state);
}
//+------------------------------------------------------------------+
//| Version of MinNSCreateF() which uses numerical differentiation. |
//| I.e., you do not have to calculate derivatives yourself. However,|
//| this version needs 2N times more function evaluations. |
//| 2-point differentiation formula is used, because more precise |
//| 4-point formula is unstable when used on non - smooth functions. |
//| INPUT PARAMETERS: |
//| N - problem dimension, N > 0: |
//| * if given, only leading N elements of X are used |
//| * if not given, automatically determined from size |
//| of X |
//| X - starting point, array[N]: |
//| * it is better to set X to a feasible point |
//| * but X can be infeasible, in which case algorithm |
//| will try to find feasible point first, using X as|
//| initial approximation. |
//| DiffStep - differentiation step, DiffStep > 0. Algorithm |
//| performs numerical differentiation with step for|
//| I-th variable being equal to DiffStep*S[I] (here |
//| S[] is a scale vector, set by MinNSSetScale() |
//| function). Do not use too small steps, because |
//| it may lead to catastrophic cancellation during |
//| intermediate calculations. |
//| OUTPUT PARAMETERS: |
//| State - structure stores algorithm State |
//+------------------------------------------------------------------+
void CAlglib::MinNSCreateF(int n,CRowDouble &x,double diffstep,
CMinNSState &state)
{
CMinNS::MinNSCreateF(n,x,diffstep,state);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::MinNSCreateF(CRowDouble &x,double diffstep,CMinNSState &state)
{
//--- initialization
int n=CAp::Len(x);
//--- function call
CMinNS::MinNSCreateF(n,x,diffstep,state);
}
//+------------------------------------------------------------------+
//| This function sets boundary constraints. |
//| Boundary constraints are inactive by default (after initial |
//| creation). They are preserved after algorithm restart with |
//| MinNSRestartFrom(). |
//| INPUT PARAMETERS: |
//| State - structure stores algorithm State |
//| BndL - lower bounds, array[N]. If some (all) variables |
//| are unbounded, you may specify very small number |
//| or -INF. |
//| BndU - upper bounds, array[N]. If some (all) variables are|
//| unbounded, you may specify very large number |
//| or +INF. |
//| NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case |
//| I-th variable will be "frozen" at X[i]=BndL[i]=BndU[i]. |
//| NOTE 2: AGS solver has following useful properties: |
//| * bound constraints are always satisfied exactly |
//| * function is evaluated only INSIDE area specified by |
//| bound constraints, even when numerical |
//| differentiation is used(algorithm adjusts nodes |
//| according to boundary constraints) |
//+------------------------------------------------------------------+
void CAlglib::MinNSSetBC(CMinNSState &state,CRowDouble &bndl,CRowDouble &bndu)
{
CMinNS::MinNSSetBC(state,bndl,bndu);
}
//+------------------------------------------------------------------+
//| This function sets linear constraints. |
//| Linear constraints are inactive by default (after initial |
//| creation). They are preserved after algorithm restart with |
//| MinNSRestartFrom(). |
//| INPUT PARAMETERS: |
//| State - structure previously allocated with MinNSCreate() |
//| call. |
//| C - linear constraints, array[K, N + 1]. Each row of C |
//| represents one constraint, either equality or |
//| inequality(see below): |
//| * first N elements correspond to coefficients, |
//| * last element corresponds to the right part. |
//| All elements of C (including right part) must be |
//| finite. |
//| CT - type of constraints, array[K]: |
//| * if CT[i] > 0, then I-th constraint is |
//| C[i, *] * x >= C[i, n + 1] |
//| * if CT[i] = 0, then I-th constraint is |
//| C[i, *] * x = C[i, n + 1] |
//| * if CT[i] < 0, then I-th constraint is |
//| C[i, *] * x <= C[i, n + 1] |
//| K - number of equality / inequality constraints, K>=0: |
//| * if given, only leading K elements of C/CT are |
//| used |
//| * if not given, automatically determined from sizes|
//| of C/CT |
//| NOTE: linear (non-bound) constraints are satisfied only |
//| approximately: |
//| * there always exists some minor violation(about current |
//| sampling radius in magnitude during optimization, about|
//| EpsX in the solution) due to use of penalty method to |
//| handle constraints. |
//| * numerical differentiation, if used, may lead to |
//| function evaluations outside of the feasible area, |
//| because algorithm does NOT change numerical |
//| differentiation formula according to linear constraints|
//| If you want constraints to be satisfied exactly, try to |
//| reformulate your problem in such manner that all constraints will|
//| become boundary ones (this kind of constraints is always |
//| satisfied exactly, both in the final solution and in all |
//| intermediate points). |
//+------------------------------------------------------------------+
void CAlglib::MinNSSetLC(CMinNSState &state,CMatrixDouble &c,CRowInt &ct,int k)
{
CMinNS::MinNSSetLC(state,c,ct,k);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::MinNSSetLC(CMinNSState &state,CMatrixDouble &c,CRowInt &ct)
{
//--- check
if(!CAp::Assert(CAp::Rows(c)==CAp::Len(ct),
"Error while calling 'MinNSSetLC': looks like one of arguments has wrong size"))
return;
//--- initialization
int k=CAp::Rows(c);
//--- function call
CMinNS::MinNSSetLC(state,c,ct,k);
}
//+------------------------------------------------------------------+
//| This function sets nonlinear constraints. |
//| In fact, this function sets NUMBER of nonlinear constraints. |
//| Constraints itself (constraint functions) are passed to |
//| MinNSOptimize() method. This method requires user-defined vector |
//| function F[] and its Jacobian J[], where: |
//| * first component of F[] and first row of Jacobian J[] |
//| correspond to function being minimized |
//| * next NLEC components of F[] (and rows of J) correspond to |
//| nonlinear equality constraints G_i(x) = 0 |
//| * next NLIC components of F[] (and rows of J) correspond to |
//| nonlinear inequality constraints H_i(x) <= 0 |
//| NOTE: you may combine nonlinear constraints with linear/boundary |
//| ones. If your problem has mixed constraints, you may |
//| explicitly specify some of them as linear ones. It may help|
//| optimizer to handle them more efficiently. |
//| INPUT PARAMETERS: |
//| State - structure previously allocated with |
//| MinNSCreate() call. |
//| NLEC - number of Non-Linear Equality Constraints(NLEC),|
//| >= 0 |
//| NLIC - number of Non-Linear Inquality Constraints(NLIC)|
//| >= 0 |
//| NOTE 1: nonlinear constraints are satisfied only approximately! |
//| It is possible that algorithm will evaluate function |
//| outside of the feasible area! |
//| NOTE 2: algorithm scales variables according to scale specified |
//| by MinNSSetScale() function, so it can handle problems |
//| with badly scaled variables (as long as we KNOW their |
//| scales). |
//| However, there is no way to automatically scale nonlinear |
//| constraints Gi(x) and Hi(x). Inappropriate scaling of Gi/Hi may |
//| ruin convergence. Solving problem with constraint "1000*G0(x)=0" |
//| is NOT same as solving it with constraint "0.001*G0(x)=0". |
//| It means that YOU are the one who is responsible for correct |
//| scaling of nonlinear constraints Gi(x) and Hi(x). We recommend |
//| you to scale nonlinear constraints in such way that I-th |
//| component of dG/dX (or dH/dx) has approximately unit magnitude |
//| (for problems with unit scale) or has magnitude approximately |
//| equal to 1/S[i] (where S is a scale set by MinNSSetScale() |
//| function). |
//| NOTE 3: nonlinear constraints are always hard to handle, no |
//| matter what algorithm you try to use. Even basic box/ |
//| linear constraints modify function curvature by adding |
//| valleys and ridges. However, nonlinear constraints add |
//| valleys which are very hard to follow due to their |
//| "curved" nature. |
//| It means that optimization with single nonlinear constraint may |
//| be significantly slower than optimization with multiple linear |
//| ones. It is normal situation, and we recommend you to carefully |
//| choose Rho parameter of MinNSSetAlgoAGS(), because too large |
//| value may slow down convergence. |
//+------------------------------------------------------------------+
void CAlglib::MinNSSetNLC(CMinNSState &state,int nlec,int nlic)
{
CMinNS::MinNSSetNLC(state,nlec,nlic);
}
//+------------------------------------------------------------------+
//| This function sets stopping conditions for iterations of |
//| optimizer. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm State |
//| EpsX - >= 0, The AGS solver finishes its work if on |
//| k+1-th iteration sampling radius decreases |
//| below EpsX. |
//| MaxIts - maximum number of iterations. If MaxIts = 0, |
//| the number of iterations is unlimited. |
//| Passing EpsX = 0 and MaxIts = 0 (simultaneously) will lead to |
//| automatic stopping criterion selection. We do not recommend you |
//| to rely on default choice in production code. |
//+------------------------------------------------------------------+
void CAlglib::MinNSSetCond(CMinNSState &state,double epsx,int maxits)
{
CMinNS::MinNSSetCond(state,epsx,maxits);
}
//+------------------------------------------------------------------+
//| This function sets scaling coefficients for NLC optimizer. |
//| ALGLIB optimizers use scaling matrices to test stopping |
//| conditions (step size and gradient are scaled before comparison |
//| with tolerances). Scale of the I-th variable is a translation |
//| invariant measure of: |
//| a) "how large" the variable is |
//| b) how large the step should be to make significant changes in |
//| the function |
//| Scaling is also used by finite difference variant of the |
//| optimizer - step along I-th axis is equal to DiffStep*S[I]. |
//| INPUT PARAMETERS: |
//| State - structure stores algorithm State |
//| S - array[N], non-zero scaling coefficients S[i] |
//| may be negative, sign doesn't matter. |
//+------------------------------------------------------------------+
void CAlglib::MinNSSetScale(CMinNSState &state,CRowDouble &s)
{
CMinNS::MinNSSetScale(state,s);
}
//+------------------------------------------------------------------+
//| This function tells MinNS unit to use AGS (adaptive gradient |
//| sampling) algorithm for nonsmooth constrained optimization. This |
//| algorithm is a slight modification of one described in "An |
//| Adaptive Gradient Sampling Algorithm for Nonsmooth Optimization" |
//| by Frank E. Curtisy and Xiaocun Quez. |
//| This optimizer has following benefits and drawbacks: |
//| + robustness; it can be used with nonsmooth and nonconvex |
//| functions. |
//| + relatively easy tuning; most of the metaparameters are easy |
//| to select. |
//| - it has convergence of steepest descent, slower than CG/LBFGS.|
//| - each iteration involves evaluation of ~2N gradient values and|
//| solution of 2Nx2N quadratic programming problem, which limits|
//| applicability of algorithm by small-scale problems (up |
//| to 50 - 100). |
//| IMPORTANT: this algorithm has convergence guarantees, i.e. it |
//| will steadily move towards some stationary point of |
//| the function. |
//| However, "stationary point" does not always mean "solution". |
//| Nonsmooth problems often have "flat spots", i.e. areas where |
//| function do not change at all. Such "flat spots" are stationary |
//| points by definition, and algorithm may be caught here. |
//| Nonsmooth CONVEX tasks are not prone to this problem. Say, if |
//| your function has form f() = MAX(f0, f1, ...), and f_i are convex|
//| then f() is convex too and you have guaranteed convergence to |
//| solution. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm State |
//| Radius - initial sampling radius, >= 0. Internally |
//| multiplied by vector of per-variable scales |
//| specified by MinNSSetScale(). |
//| You should select relatively large sampling |
//| radius, roughly proportional to scaled length |
//| of the first steps of the algorithm. Something |
//| close to 0.1 in magnitude should be good for |
//| most problems. |
//| AGS solver can automatically decrease radius, |
//| so too large radius is not a problem (assuming |
//| that you won't choose so large radius that |
//| algorithm will sample function in too far away |
//| points, where gradient value is irrelevant). |
//| Too small radius won't cause algorithm to fail, |
//| but it may slow down algorithm (it may have to |
//| perform too short steps). |
//| Penalty - penalty coefficient for nonlinear constraints: |
//| * for problem with nonlinear constraints should |
//| be some problem - specific positive value, |
//| large enough that penalty term changes shape |
//| of the function. Starting from some problem - |
//| specific value penalty coefficient becomes |
//| large enough to exactly enforce nonlinear |
//| constraints; larger values do not improve |
//| precision. Increasing it too much may slow |
//| down convergence, so you should choose it |
//| carefully. |
//| * can be zero for problems WITHOUT nonlinear |
//| constraints (i.e. for unconstrained ones or |
//| ones with just box or linear constraints) |
//| * if you specify zero value for problem with at |
//| least one nonlinear constraint, algorithm will|
//| terminate with error code - 1. |
//| ALGORITHM OUTLINE |
//| The very basic outline of unconstrained AGS algorithm is given |
//| below: |
//| 0. If sampling radius is below EpsX or we performed more then |
//| MaxIts iterations - STOP. |
//| 1. sample O(N) gradient values at random locations around current|
//| point; informally speaking, this sample is an implicit |
//| piecewise linear model of the function, although algorithm |
//| formulation does not mention that explicitly |
//| 2. solve quadratic programming problem in order to find descent |
//| direction |
//| 3. if QP solver tells us that we are near solution, decrease |
//| sampling radius and move to(0) |
//| 4. perform backtracking line search |
//| 5. after moving to new point, goto(0) |
//| Constraint handling details: |
//| * box constraints are handled exactly by algorithm |
//| * linear/nonlinear constraints are handled by adding L1 |
//| penalty. Because our solver can handle nonsmoothness, we can |
//| use L1 penalty function, which is an exact one (i.e. exact |
//| solution is returned under such penalty). |
//| * penalty coefficient for linear constraints is chosen |
//| automatically; however, penalty coefficient for nonlinear |
//| constraints must be specified by user. |
//| ===== TRACING AGS SOLVER ======================================= |
//| AGS solver supports advanced tracing capabilities. You can trace |
//| algorithm output by specifying following trace symbols (case- |
//| insensitive) by means of trace_file() call: |
//| * 'AGS' - for basic trace of algorithm steps and |
//| decisions. Only short scalars (function |
//| values and deltas) are printed. N-dimensional|
//| quantities like search directions are NOT |
//| printed. |
//| * 'AGS.DETAILED' - for output of points being visited and |
//| search directions. This symbol also |
//| implicitly defines 'AGS'. You can control |
//| output format by additionally specifying: |
//| * nothing to output in 6-digit exponential |
//| format |
//| * 'PREC.E15' to output in 15-digit |
//| exponential format |
//| * 'PREC.F6' to output in 6-digit fixed-point |
//| format |
//| * 'AGS.DETAILED.SAMPLE' - for output of points being visited, |
//| search directions and gradient sample. May |
//| take a LOT of space, do not use it on |
//| problems with more that several tens of vars.|
//| This symbol also implicitly defines 'AGS' and|
//| 'AGS.DETAILED'. |
//| By default trace is disabled and adds no overhead to the |
//| optimization process. However, specifying any of the symbols adds|
//| some formatting and output-related overhead. |
//| You may specify multiple symbols by separating them with commas: |
//| > |
//| >CAlglib::Trace_File("AGS,PREC.F6","path/to/trace.log") |
//| > |
//+------------------------------------------------------------------+
void CAlglib::MinNSSetAlgoAGS(CMinNSState &state,double radius,double penalty)
{
CMinNS::MinNSSetAlgoAGS(state,radius,penalty);
}
//+------------------------------------------------------------------+
//| This function turns on / off reporting. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm State |
//| NeedXRep - whether iteration reports are needed or not |
//| If NeedXRep is True, algorithm will call rep() callback function |
//| if it is provided to MinNSOptimize(). |
//+------------------------------------------------------------------+
void CAlglib::MinNSSetXRep(CMinNSState &state,bool needxrep)
{
CMinNS::MinNSSetXRep(state,needxrep);
}
//+------------------------------------------------------------------+
//| This subroutine submits request for termination of running |
//| optimizer. It should be called from user-supplied callback when |
//| user decides that it is time to "smoothly" terminate optimization|
//| process. As result, optimizer stops at point which was "current |
//| accepted" when termination request was submitted and returns |
//| error code 8 (successful termination). |
//| INPUT PARAMETERS: |
//| State - optimizer structure |
//| NOTE: after request for termination optimizer may perform several|
//| additional calls to user-supplied callbacks. It does NOT |
//| guarantee to stop immediately - it just guarantees that |
//| these additional calls will be discarded later. |
//| NOTE: calling this function on optimizer which is NOT running |
//| will have no effect. |
//| NOTE: multiple calls to this function are possible. First call is|
//| counted, subsequent calls are silently ignored. |
//+------------------------------------------------------------------+
void CAlglib::MinNSRequestTermination(CMinNSState &state)
{
CMinNS::MinNSRequestTermination(state);
}
//+------------------------------------------------------------------+
//| This function provides reverse communication interface |
//| Reverse communication interface is not documented or recommended |
//| to use. See below for functions which provide better documented |
//| API |
//+------------------------------------------------------------------+
bool CAlglib::MinNSIteration(CMinNSState &state)
{
return(CMinNS::MinNSIteration(state));
}
//+------------------------------------------------------------------+
//| This family of functions is used to launcn iterations of |
//| nonlinear optimizer |
//| These functions accept following parameters: |
//| fvec - callback which calculates function vector fi[] |
//| at given point x |
//| jac - callback which calculates function vector fi[] |
//| and Jacobian jac at given point x |
//| rep - optional callback which is called after each |
//| iteration can be null |
//| obj - optional object which is passed to func/grad/ |
//| hess/jac/rep can be null |
//| NOTES: |
//| 1. This function has two different implementations: one which |
//| uses exact (analytical) user-supplied Jacobian, and one which|
//| uses only function vector and numerically differentiates |
//| function in order to obtain gradient. |
//| Depending on the specific function used to create |
//| optimizer object you should choose appropriate variant of |
//| MinNSOptimize() - one which accepts function AND Jacobian or|
//| one which accepts ONLY function. |
//| Be careful to choose variant of MinNSOptimize() which |
//| corresponds to your optimization scheme! Table below lists |
//| different combinations of callback (function/gradient) passed |
//| to MinNSOptimize() and specific function used to create |
//| optimizer. |
//| | USER PASSED TO MinNLCOptimize() |
//| CREATED WITH | function only | function and gradient |
//| ------------------------------------------------------------ |
//| MinNLCCreateF() | works FAILS |
//| MinNLCCreate() | FAILS works |
//| Here "FAILS" denotes inappropriate combinations of optimizer |
//| creation function and MinNLCOptimize() version. Attemps to |
//| use such combination will lead to exception. Either you did |
//| not pass gradient when it WAS needed or you passed gradient when |
//| it was NOT needed. |
//+------------------------------------------------------------------+
void CAlglib::MinNSOptimize(CMinNSState &state,
CNDimensional_FVec &fvec,
CNDimensional_Rep &rep,CObject &obj)
{
//--- check
if(!CAp::Assert(GetPointer(fvec)!=NULL,"ALGLIB: error in 'MinNSOptimize()' (fvec is null)"))
return;
//--- cycle
while(MinNSIteration(state))
{
if(state.m_needfi)
{
fvec.FVec(state.m_x,state.m_fi,obj);
continue;
}
if(state.m_xupdated)
{
if(GetPointer(rep)!=NULL)
rep.Rep(state.m_x,state.m_f,obj);
continue;
}
CAp::Assert(false,"ALGLIB: error in 'MinNSOptimize' (some derivatives were not provided?)");
break;
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::MinNSOptimize(CMinNSState &state,
CNDimensional_Jac &jac,
CNDimensional_Rep &rep,
CObject &obj)
{
//--- check
if(!CAp::Assert(GetPointer(jac)!=NULL,"ALGLIB: error in 'MinNSOptimize()' (jac is null)"))
return;
//--- cycle
while(MinNSIteration(state))
{
if(state.m_needfij)
{
jac.Jac(state.m_x,state.m_fi,state.m_j,obj);
continue;
}
if(state.m_xupdated)
{
if(GetPointer(rep)!=NULL)
rep.Rep(state.m_x,state.m_f,obj);
continue;
}
CAp::Assert(false,"ALGLIB: error in 'MinNSOptimize' (some derivatives were not provided?)");
break;
}
}
//+------------------------------------------------------------------+
//| MinNS results |
//| INPUT PARAMETERS: |
//| State - algorithm State |
//| OUTPUT PARAMETERS: |
//| X - array[0..N - 1], solution |
//| Rep - optimization report. You should check |
//| Rep.TerminationType in order to distinguish |
//| successful termination from unsuccessful one: |
//| * -8 internal integrity control detected infinite or NAN |
//| values in function/gradient. Abnormal termination |
//| signalled. |
//| * -3 box constraints are inconsistent |
//| * -1 inconsistent parameters were passed: |
//| * penalty parameter for MinNSSetAlgoAGS() is zero, but|
//| we have nonlinear constraints set by MinNSSetNLC() |
//| * 2 sampling radius decreased below epsx |
//| * 7 stopping conditions are too stringent, further |
//| improvement is impossible, X contains best point found|
//| so far. |
//| * 8 User requested termination via |
//| MinNSRequestTermination() |
//+------------------------------------------------------------------+
void CAlglib::MinNSResults(CMinNSState &state,
CRowDouble &x,CMinNSReport &rep)
{
CMinNS::MinNSResults(state,x,rep);
}
//+------------------------------------------------------------------+
//| Buffered implementation of MinNSResults() which uses pre- |
//| allocated buffer to store X[]. If buffer size is too small, it |
//| resizes buffer. It is intended to be used in the inner cycles of |
//| performance critical algorithms where array reallocation penalty |
//| is too large to be ignored. |
//+------------------------------------------------------------------+
void CAlglib::MinNSResultsBuf(CMinNSState &state,
CRowDouble &x,
CMinNSReport &rep)
{
CMinNS::MinNSResultsBuf(state,x,rep);
}
//+------------------------------------------------------------------+
//| This subroutine restarts algorithm from new point. |
//| All optimization parameters (including constraints) are left |
//| unchanged. |
//| This function allows to solve multiple optimization problems |
//| (which must have same number of dimensions) without object |
//| reallocation penalty. |
//| INPUT PARAMETERS: |
//| State - structure previously allocated with |
//| MinNSCreate() call. |
//| X - new starting point. |
//+------------------------------------------------------------------+
void CAlglib::MinNSRestartFrom(CMinNSState &state,CRowDouble &x)
{
CMinNS::MinNSRestartFrom(state,x);
}
//+------------------------------------------------------------------+
//| BOX CONSTRAINED OPTIMIZATION WITH FAST ACTIVATION OF MULTIPLE BOX|
//| CONSTRAINTS |
//| DESCRIPTION: |
//| The subroutine minimizes function F(x) of N arguments subject to |
//| box constraints (with some of box constraints actually being |
//| equality ones). |
//| This optimizer uses algorithm similar to that of MinBLEIC |
//| (optimizer with general linear constraints), but presence of |
//| box-only constraints allows us to use faster constraint |
//| activation strategies. On large-scale problems, with multiple |
//| constraints active at the solution, this optimizer can be several|
//| times faster than BLEIC. |
//| REQUIREMENTS: |
//| * user must provide function value and gradient |
//| * starting point X0 must be feasible or not too far away from |
//| the feasible set |
//| * grad(f) must be Lipschitz continuous on a level set: |
//| L = { x : f(x) <= f(x0) } |
//| * function must be defined everywhere on the feasible set F |
//| USAGE: |
//| Constrained optimization if far more complex than the |
//| unconstrained one. Here we give very brief outline of the BC |
//| optimizer. We strongly recommend you to read examples in the |
//| ALGLIB Reference Manual and to read ALGLIB User Guide on |
//| optimization, which is available at |
//| http://www.alglib.net/optimization/ |
//| 1. User initializes algorithm state with MinBCCreate() call |
//| 2. USer adds box constraints by calling MinBCSetBC() function. |
//| 3. User sets stopping conditions with MinBCSetCond(). |
//| 4. User calls MinBCOptimize() function which takes algorithm |
//| state and pointer (delegate, etc.) to callback function which |
//| calculates F / G. |
//| 5. User calls MinBCResults() to get solution |
//| 6. Optionally user may call MinBCRestartFrom() to solve another |
//| problem with same N but another starting point. |
//| MinBCRestartFrom() allows to reuse already initialized |
//| structure. |
//| INPUT PARAMETERS: |
//| N - problem dimension, N > 0: |
//| * if given, only leading N elements of X are used |
//| * if not given, automatically determined from size |
//| ofX |
//| X - starting point, array[N]: |
//| * it is better to set X to a feasible point |
//| * but X can be infeasible, in which case algorithm |
//| will try to find feasible point first, using X as|
//| initial approximation. |
//| OUTPUT PARAMETERS: |
//| State - structure stores algorithm state |
//+------------------------------------------------------------------+
void CAlglib::MinBCCreate(int n,CRowDouble &x,CMinBCState &state)
{
CMinBC::MinBCCreate(n,x,state);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::MinBCCreate(CRowDouble &x,CMinBCState &state)
{
//--- initialization
int n=CAp::Len(x);
//--- function call
CMinBC::MinBCCreate(n,x,state);
}
//+------------------------------------------------------------------+
//| The subroutine is finite difference variant of MinBCCreate(). It |
//| uses finite differences in order to differentiate target function|
//| Description below contains information which is specific to this|
//| function only. We recommend to read comments on MinBCCreate() in |
//| order to get more information about creation of BC optimizer. |
//| INPUT PARAMETERS: |
//| N - problem dimension, N > 0: |
//| * if given, only leading N elements of X are used |
//| * if not given, automatically determined from size |
//| of X |
//| X - starting point, array[0..N - 1]. |
//| DiffStep - differentiation step, > 0 |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| NOTES: |
//| 1. algorithm uses 4-point central formula for differentiation. |
//| 2. differentiation step along I-th axis is equal to |
//| DiffStep*S[I] where S[] is scaling vector which can be set |
//| by MinBCSetScale() call. |
//| 3. we recommend you to use moderate values of differentiation |
//| step. Too large step will result in too large truncation |
//| errors, while too small step will result in too large |
//| numerical errors. 1.0E-6 can be good value to start with. |
//| 4. Numerical differentiation is very inefficient - one gradient |
//| calculation needs 4*N function evaluations. This function will|
//| work for any N - either small(1...10), moderate(10...100) or |
//| large(100...). However, performance penalty will be too severe|
//| for any N's except for small ones. |
//| We should also say that code which relies on numerical |
//| differentiation is less robust and precise. CG needs exact |
//| gradient values. Imprecise gradient may slow down convergence,|
//| especially on highly nonlinear problems. |
//| Thus we recommend to use this function for fast prototyping on|
//| small-dimensional problems only, and to implement analytical |
//| gradient as soon as possible. |
//+------------------------------------------------------------------+
void CAlglib::MinBCCreateF(int n,CRowDouble &x,
double diffstep,
CMinBCState &state)
{
CMinBC::MinBCCreateF(n,x,diffstep,state);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::MinBCCreateF(CRowDouble &x,
double diffstep,
CMinBCState &state)
{
//--- initialization
int n=CAp::Len(x);
//--- function call
CMinBC::MinBCCreateF(n,x,diffstep,state);
}
//+------------------------------------------------------------------+
//| This function sets boundary constraints for BC optimizer. |
//| Boundary constraints are inactive by default (after initial |
//| creation). They are preserved after algorithm restart with |
//| MinBCRestartFrom(). |
//| INPUT PARAMETERS: |
//| State - structure stores algorithm state |
//| BndL - lower bounds, array[N]. If some (all) variables |
//| are unbounded, you may specify very small number|
//| or -INF. |
//| BndU - upper bounds, array[N]. If some (all) variables |
//| are unbounded, you may specify very large number|
//| or +INF. |
//| NOTE 1: it is possible to specify BndL[i] = BndU[i]. In this case|
//| I-th variable will be "frozen" at X[i] = BndL[i]=BndU[i].|
//| NOTE 2: this solver has following useful properties: |
//| * bound constraints are always satisfied exactly |
//| * function is evaluated only INSIDE area specified by |
//| bound constraints, even when numerical differentiation |
//| is used (algorithm adjusts nodes according to boundary |
//| constraints) |
//+------------------------------------------------------------------+
void CAlglib::MinBCSetBC(CMinBCState &state,CRowDouble &bndl,
CRowDouble &bndu)
{
CMinBC::MinBCSetBC(state,bndl,bndu);
}
//+------------------------------------------------------------------+
//| This function sets stopping conditions for the optimizer. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| EpsG - >= 0. The subroutine finishes its work if the |
//| condition | v | < EpsG is satisfied, where: |
//| * | . | means Euclidian norm |
//| * v - scaled gradient vector, v[i] = g[i] * s[i]|
//| * g - gradient |
//| * s - scaling coefficients set by MinBCSetScale |
//| EpsF - >= 0. The subroutine finishes its work if on |
//| k+1-th iteration the condition |
//| | F(k + 1) - F(k) | <= EpsF * max{ | F(k) |, | F(k + 1) |, 1} |
//| is satisfied. |
//| EpsX - >= 0. The subroutine finishes its work if on |
//| k+1-th iteration the condition | v | <= EpsX is |
//| fulfilled, where: |
//| * | . | means Euclidian norm |
//| * v - scaled step vector, v[i] = dx[i] / s[i] |
//| * dx - step vector, dx = X(k + 1) - X(k) |
//| * s - scaling coefficients set by MinBCSetScale |
//| MaxIts - maximum number of iterations. If MaxIts = 0, the|
//| number of iterations is unlimited. |
//| Passing EpsG = 0, EpsF = 0 and EpsX = 0 and MaxIts = 0 |
//| (simultaneously) will lead to automatic stopping criterion |
//| selection. |
//| NOTE: when SetCond() called with non-zero MaxIts, BC solver may |
//| perform slightly more than MaxIts iterations. I.e., MaxIts |
//| sets non-strict limit on iterations count. |
//+------------------------------------------------------------------+
void CAlglib::MinBCSetCond(CMinBCState &state,double epsg,
double epsf,double epsx,int maxits)
{
CMinBC::MinBCSetCond(state,epsg,epsf,epsx,maxits);
}
//+------------------------------------------------------------------+
//| This function sets scaling coefficients for BC optimizer. |
//| ALGLIB optimizers use scaling matrices to test stopping |
//| conditions (step size and gradient are scaled before comparison |
//| with tolerances). Scale of the I-th variable is a translation |
//| invariant measure of: |
//| a) "how large" the variable is |
//| b) how large the step should be to make significant changes in |
//| the function |
//| Scaling is also used by finite difference variant of the |
//| optimizer - step along I-th axis is equal to DiffStep*S[I]. |
//| In most optimizers (and in the BC too) scaling is NOT a form of |
//| preconditioning. It just affects stopping conditions. You should |
//| set preconditioner by separate call to one of the MinBCSetPrec...|
//| functions. |
//| There is a special preconditioning mode, however, which uses |
//| scaling coefficients to form diagonal preconditioning matrix. You|
//| can turn this mode on, if you want. But you should understand |
//| that scaling is not the same thing as preconditioning - these are|
//| two different, although related forms of tuning solver. |
//| INPUT PARAMETERS: |
//| State - structure stores algorithm state |
//| S - array[N], non-zero scaling coefficients S[i] may|
//| be negative, sign doesn't matter. |
//+------------------------------------------------------------------+
void CAlglib::MinBCSetScale(CMinBCState &state,CRowDouble &s)
{
CMinBC::MinBCSetScale(state,s);
}
//+------------------------------------------------------------------+
//| Modification of the preconditioner: preconditioning is turned off|
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//+------------------------------------------------------------------+
void CAlglib::MinBCSetPrecDefault(CMinBCState &state)
{
CMinBC::MinBCSetPrecDefault(state);
}
//+------------------------------------------------------------------+
//| Modification of the preconditioner: diagonal of approximate |
//| Hessian is used. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| D - diagonal of the approximate Hessian, |
//| array[0..N - 1], (if larger, only leading N |
//| elements are used). |
//| NOTE 1: D[i] should be positive. Exception will be thrown |
//| otherwise. |
//| NOTE 2: you should pass diagonal of approximate Hessian - NOT ITS|
//| INVERSE. |
//+------------------------------------------------------------------+
void CAlglib::MinBCSetPrecDiag(CMinBCState &state,CRowDouble &d)
{
CMinBC::MinBCSetPrecDiag(state,d);
}
//+------------------------------------------------------------------+
//| Modification of the preconditioner: scale - based diagonal |
//| preconditioning. |
//| This preconditioning mode can be useful when you don't have |
//| approximate diagonal of Hessian, but you know that your variables|
//| are badly scaled (for example, one variable is in [1, 10], and |
//| another in [1000, 100000]), and most part of the ill-conditioning|
//| comes from different scales of vars. |
//| In this case simple scale-based preconditioner, with |
//| H[i] = 1/(s[i]^2), can greatly improve convergence. |
//| IMPRTANT: you should set scale of your variables with |
//| MinBCSetScale() call (before or after |
//| MinBCSetPrecScale() call). Without knowledge of the |
//| scale of your variables scale-based preconditioner |
//| will be just unit matrix. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//+------------------------------------------------------------------+
void CAlglib::MinBCSetPrecScale(CMinBCState &state)
{
CMinBC::MinBCSetPrecScale(state);
}
//+------------------------------------------------------------------+
//| This function turns on / off reporting. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| NeedXRep - whether iteration reports are needed or not |
//| If NeedXRep is True, algorithm will call rep() callback function |
//| if it is provided to MinBCOptimize(). |
//+------------------------------------------------------------------+
void CAlglib::MinBCSetXRep(CMinBCState &state,bool needxrep)
{
CMinBC::MinBCSetXRep(state,needxrep);
}
//+------------------------------------------------------------------+
//| This function sets maximum step length |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| StpMax - maximum step length, >= 0. Set StpMax to 0.0, |
//| if you don't want to limit step length. |
//| Use this subroutine when you optimize target function which |
//| contains exp() or other fast growing functions, and optimization |
//| algorithm makes too large steps which lead to overflow. This |
//| function allows us to reject steps that are too large (and |
//| therefore expose us to the possible overflow) without actually |
//| calculating function value at the x + stp*d. |
//+------------------------------------------------------------------+
void CAlglib::MinBCSetStpMax(CMinBCState &state,double stpmax)
{
CMinBC::MinBCSetStpMax(state,stpmax);
}
//+------------------------------------------------------------------+
//| This function provides reverse communication interface |
//| Reverse communication interface is not documented or recommended |
//| to use. See below for functions which provide better documented |
//| API |
//+------------------------------------------------------------------+
bool CAlglib::MinBCIteration(CMinBCState &state)
{
return(CMinBC::MinBCIteration(state));
}
//+------------------------------------------------------------------+
//| This family of functions is used to launcn iterations of |
//| nonlinear optimizer |
//| These functions accept following parameters: |
//| fvec - callback which calculates function vector fi[] |
//| at given point x |
//| jac - callback which calculates function vector fi[] |
//| and Jacobian jac at given point x |
//| rep - optional callback which is called after each |
//| iteration can be null |
//| obj - optional object which is passed to func/grad/ |
//| hess/jac/rep can be null |
//| NOTES: |
//| 1. This function has two different implementations: one which |
//| uses exact (analytical) user-supplied Jacobian, and one which|
//| uses only function vector and numerically differentiates |
//| function in order to obtain gradient. |
//| Depending on the specific function used to create |
//| optimizer object you should choose appropriate variant of |
//| MinBCOptimize() - one which accepts function AND Jacobian or|
//| one which accepts ONLY function. |
//| Be careful to choose variant of MinBCOptimize() which |
//| corresponds to your optimization scheme! Table below lists |
//| different combinations of callback (function/gradient) passed |
//| to MinBCOptimize() and specific function used to create |
//| optimizer. |
//| | USER PASSED TO MinBCOptimize() |
//| CREATED WITH | function only | function and gradient |
//| ------------------------------------------------------------ |
//| MinBCCreateF() | works FAILS |
//| MinBCCreate() | FAILS works |
//| Here "FAILS" denotes inappropriate combinations of optimizer |
//| creation function and MinBCOptimize() version. Attemps to |
//| use such combination will lead to exception. Either you did |
//| not pass gradient when it WAS needed or you passed gradient when |
//| it was NOT needed. |
//+------------------------------------------------------------------+
void CAlglib::MinBCOptimize(CMinBCState &state,CNDimensional_Func &func,
CNDimensional_Rep &rep,CObject &obj)
{
//--- check
if(!CAp::Assert(GetPointer(func)!=NULL,"ALGLIB: error in 'MinBCOptimize()' (func is null)"))
return;
//--- cycle
while(MinBCIteration(state))
{
if(state.m_needf)
{
func.Func(state.m_x,state.m_f,obj);
continue;
}
if(state.m_xupdated)
{
if(GetPointer(rep)!=NULL)
rep.Rep(state.m_x,state.m_f,obj);
continue;
}
CAp::Assert(false,"ALGLIB: error in 'MinBCOptimize' (some derivatives were not provided?)");
break;
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::MinBCOptimize(CMinBCState &state,CNDimensional_Grad &grad,
CNDimensional_Rep &rep,CObject &obj)
{
//--- check
if(!CAp::Assert(GetPointer(grad)!=NULL,"ALGLIB: error in 'MinBCOptimize()' (grad is null)"))
return;
//--- cycle
while(MinBCIteration(state))
{
if(state.m_needfg)
{
grad.Grad(state.m_x,state.m_f,state.m_g,obj);
continue;
}
if(state.m_xupdated)
{
if(GetPointer(rep)!=NULL)
rep.Rep(state.m_x,state.m_f,obj);
continue;
}
CAp::Assert(false,"ALGLIB: error in 'MinBCOptimize' (some derivatives were not provided?)");
break;
}
}
//+------------------------------------------------------------------+
//| This function activates / deactivates verification of the user - |
//| supplied analytic gradient. |
//| Upon activation of this option OptGuard integrity checker |
//| performs numerical differentiation of your target function at |
//| the initial point (note: future versions may also perform check |
//| at the final point) and compares numerical gradient with analytic|
//| one provided by you. |
//| If difference is too large, an error flag is set and optimization|
//| session continues. After optimization session is over, you can |
//| retrieve the report which stores both gradients and specific |
//| components highlighted as suspicious by the OptGuard. |
//| The primary OptGuard report can be retrieved with |
//| MinBCOptGuardResults(). |
//| IMPORTANT: gradient check is a high - overhead option which will |
//| cost you about 3*N additional function evaluations. In many cases|
//| it may cost as much as the rest of the optimization session. |
//| YOU SHOULD NOT USE IT IN THE PRODUCTION CODE UNLESS YOU WANT TO |
//| CHECK DERIVATIVES PROVIDED BY SOME THIRD PARTY. |
//| NOTE: unlike previous incarnation of the gradient checking code, |
//| OptGuard does NOT interrupt optimization even if it |
//| discovers bad gradient. |
//| INPUT PARAMETERS: |
//| State - structure used to store algorithm state |
//| TestStep - verification step used for numerical |
//| differentiation: |
//| * TestStep = 0 turns verification off |
//| * TestStep > 0 activates verification |
//| You should carefully choose TestStep. Value |
//| which is too large (so large that function |
//| behavior is non-cubic at this scale) will lead |
//| to false alarms. Too short step will result in |
//| rounding errors dominating numerical derivative.|
//| You may use different step for different parameters by means of |
//| setting scale with MinBCSetScale(). |
//| === EXPLANATION ================================================ |
//| In order to verify gradient algorithm performs following steps: |
//| * two trial steps are made to X[i] - TestStep * S[i] and |
//| X[i] + TestStep * S[i], where X[i] is i-th component of the |
//| initial point and S[i] is a scale of i-th parameter |
//| * F(X) is evaluated at these trial points |
//| * we perform one more evaluation in the middle point of the |
//| interval |
//| * we build cubic model using function values and derivatives |
//| at trial points and we compare its prediction with actual |
//| value in the middle point |
//+------------------------------------------------------------------+
void CAlglib::MinBCOptGuardGradient(CMinBCState &state,double teststep)
{
CMinBC::MinBCOptGuardGradient(state,teststep);
}
//+------------------------------------------------------------------+
//| This function activates / deactivates nonsmoothness monitoring |
//| option of the OptGuard integrity checker. Smoothness monitor |
//| silently observes solution process and tries to detect ill-posed |
//| problems, i.e. ones with: |
//| a) discontinuous target function(non - C0) |
//| b) nonsmooth target function(non - C1) |
//| Smoothness monitoring does NOT interrupt optimization even if it|
//| suspects that your problem is nonsmooth. It just sets |
//| corresponding flags in the OptGuard report which can be retrieved|
//| after optimization is over. |
//| Smoothness monitoring is a moderate overhead option which often |
//| adds less than 1 % to the optimizer running time. Thus, you can |
//| use it even for large scale problems. |
//| NOTE: OptGuard does NOT guarantee that it will always detect |
//| C0 / C1 continuity violations. |
//| First, minor errors are hard to catch - say, a 0.0001 difference|
//| in the model values at two sides of the gap may be due to |
//| discontinuity of the model - or simply because the model has |
//| changed. |
//| Second, C1 - violations are especially difficult to detect |
//| in a noninvasive way. The optimizer usually performs very |
//| short steps near the nonsmoothness, and differentiation usually|
//| introduces a lot of numerical noise. It is hard to tell |
//| whether some tiny discontinuity in the slope is due to real |
//| nonsmoothness or just due to numerical noise alone. |
//| Our top priority was to avoid false positives, so in some rare |
//| cases minor errors may went unnoticed(however, in most cases they|
//| can be spotted with restart from different initial point). |
//| INPUT PARAMETERS: |
//| State - algorithm state |
//| Level - monitoring level: |
//| * 0 - monitoring is disabled |
//| * 1 - noninvasive low - overhead monitoring; |
//| function values and / or gradients are recorded,|
//| but OptGuard does not try to perform additional |
//| evaluations in order to get more information |
//| about suspicious locations. |
//| === EXPLANATION ================================================ |
//| One major source of headache during optimization is the |
//| possibility of the coding errors in the target function / |
//| constraints (or their gradients). Such errors most often |
//| manifest themselves as discontinuity or nonsmoothness of the|
//| target / constraints. |
//| Another frequent situation is when you try to optimize something |
//| involving lots of min() and max() operations, i.e. nonsmooth |
//| target. Although not a coding error, it is nonsmoothness anyway-|
//| and smooth optimizers usually stop right after encountering |
//| nonsmoothness, well before reaching solution. |
//| OptGuard integrity checker helps you to catch such situations: |
//| it monitors function values / gradients being passed to the |
//| optimizer and tries to errors. Upon discovering suspicious pair |
//| of points it raises appropriate flag (and allows you to continue|
//| optimization). When optimization is done, you can study OptGuard |
//| result. |
//+------------------------------------------------------------------+
void CAlglib::MinBCOptGuardSmoothness(CMinBCState &state,int level=1)
{
CMinBC::MinBCOptGuardSmoothness(state,level);
}
//+------------------------------------------------------------------+
//| Results of OptGuard integrity check, should be called after |
//| optimization session is over. |
//| === PRIMARY REPORT ============================================= |
//| OptGuard performs several checks which are intended to catch |
//| common errors in the implementation of nonlinear function / |
//| gradient: |
//| * incorrect analytic gradient |
//| * discontinuous(non - C0) target functions(constraints) |
//| * nonsmooth(non - C1) target functions(constraints) |
//| Each of these checks is activated with appropriate function: |
//| * MinBCOptGuardGradient() for gradient verification |
//| * MinBCOptGuardSmoothness() for C0 / C1 checks |
//| Following flags are set when these errors are suspected: |
//| * rep.badgradsuspected, and additionally: |
//| * rep.badgradvidx for specific variable (gradient element) |
//| suspected |
//| * rep.badgradxbase, a point where gradient is tested |
//| * rep.badgraduser, user - provided gradient(stored as 2D |
//| matrix with single row in order to make report structure |
//| compatible with more complex optimizers like MinNLC or |
//| MinLM) |
//| * rep.badgradnum, reference gradient obtained via |
//| numerical differentiation (stored as 2D matrix with single|
//| row in order to make report structure compatible with more|
//| complex optimizers like MinNLC or MinLM) |
//| * rep.nonc0suspected |
//| * rep.nonc1suspected |
//| === ADDITIONAL REPORTS / LOGS ================================== |
//| Several different tests are performed to catch C0 / C1 errors, |
//| you can find out specific test signaled error by looking to: |
//| * rep.nonc0test0positive, for non - C0 test #0 |
//| * rep.nonc1test0positive, for non - C1 test #0 |
//| * rep.nonc1test1positive, for non - C1 test #1 |
//| Additional information (including line search logs) can be |
//| obtained by means of: |
//| * MinBCOptGuardNonC1Test0Results() |
//| * MinBCOptGuardNonC1Test1Results() |
//| which return detailed error reports, specific points where |
//| discontinuities were found, and so on. |
//| ================================================================ |
//| INPUT PARAMETERS: |
//| State - algorithm state |
//| OUTPUT PARAMETERS: |
//| Rep - generic OptGuard report; more detailed reports |
//| can be retrieved with other functions. |
//| NOTE: false negatives (nonsmooth problems are not identified as |
//| nonsmooth ones) are possible although unlikely. |
//| The reason is that you need to make several evaluations |
//| around nonsmoothness in order to accumulate enough information |
//| about function curvature. Say, if you start right from the |
//| nonsmooth point, optimizer simply won't get enough data to |
//| understand what is going wrong before it terminates due to abrupt|
//| changes in the derivative. It is also possible that "unlucky"|
//| step will move us to the termination too quickly. |
//| Our current approach is to have less than 0.1 % false negatives|
//| in our test examples(measured with multiple restarts from |
//| random points), and to have exactly 0 % false positives. |
//+------------------------------------------------------------------+
void CAlglib::MinBCOptGuardResults(CMinBCState &state,COptGuardReport &rep)
{
CMinBC::MinBCOptGuardResults(state,rep);
}
//+------------------------------------------------------------------+
//|Detailed results of the OptGuard integrity check for nonsmoothness|
//| test #0 |
//| Nonsmoothness (non - C1) test #0 studies function values (not |
//| gradient!) obtained during line searches and monitors behavior |
//| of the directional derivative estimate. |
//| This test is less powerful than test #1, but it does not depend|
//| on the gradient values and thus it is more robust against |
//| artifacts introduced by numerical differentiation. |
//| Two reports are returned: |
//| *a "strongest" one, corresponding to line search which |
//| had highest value of the nonsmoothness indicator |
//| *a "longest" one, corresponding to line search which had more |
//| function evaluations, and thus is more detailed |
//| In both cases following fields are returned: |
//| * positive - is TRUE when test flagged suspicious point; |
//| FALSE if test did not notice anything (in the|
//| latter cases fields below are empty). |
//| * x0[], d[] - arrays of length N which store initial point and |
//| direction for line search (d[] can be normalized,|
//| but does not have to) |
//| * stp[], f[] - arrays of length CNT which store step lengths |
//| and function values at these points; f[i] is |
//| evaluated in x0 + stp[i]*d. |
//| * stpidxa, stpidxb - we suspect that function violates C1 |
//| continuity between steps #stpidxa and #stpidxb |
//| (usually we have stpidxb = stpidxa + 3, with |
//| most likely position of the violation between |
//| stpidxa + 1 and stpidxa + 2. |
//| ================================================================ |
//| = SHORTLY SPEAKING: build a 2D plot of (stp, f) and look at it - |
//| = you will see where C1 continuity is violated.|
//| ================================================================ |
//| INPUT PARAMETERS: |
//| State - algorithm state |
//| OUTPUT PARAMETERS: |
//| strrep - C1 test #0 "strong" report |
//| lngrep - C1 test #0 "long" report |
//+------------------------------------------------------------------+
void CAlglib::MinBCOptGuardNonC1Test0Results(CMinBCState &state,
COptGuardNonC1Test0Report &strrep,
COptGuardNonC1Test0Report &lngrep)
{
CMinBC::MinBCOptGuardNonC1Test0Results(state,strrep,lngrep);
}
//+------------------------------------------------------------------+
//| Detailed results of the OptGuard integrity check for |
//| nonsmoothness test #1 |
//| Nonsmoothness (non-C1) test #1 studies individual components of |
//| the gradient computed during line search. |
//| When precise analytic gradient is provided this test is more |
//| powerful than test #0 which works with function values and |
//| ignores user-provided gradient. However, test #0 becomes more |
//| powerful when numerical differentiation is employed (in such |
//| cases test #1 detects higher levels of numerical noise and |
//| becomes too conservative). |
//| This test also tells specific components of the gradient which |
//| violate C1 continuity, which makes it more informative than #0, |
//| which just tells that continuity is violated. |
//| Two reports are returned: |
//| *a "strongest" one, corresponding to line search which had |
//| highest value of the nonsmoothness indicator |
//| *a "longest" one, corresponding to line search which had more |
//| function evaluations, and thus is more detailed |
//| In both cases following fields are returned: |
//| * positive - is TRUE when test flagged suspicious point; |
//| FALSE if test did not notice anything (in the |
//| latter cases fields below are empty). |
//| * vidx - is an index of the variable in [0, N) with nonsmooth |
//| derivative |
//| * x0[], d[] - arrays of length N which store initial point and|
//| direction for line search(d[] can be normalized,|
//| but does not have to) |
//| * stp[], g[] - arrays of length CNT which store step lengths |
//| and gradient values at these points; g[i] is |
//| evaluated in x0 + stp[i]*d and contains vidx-th |
//| component of the gradient. |
//| * stpidxa, stpidxb - we suspect that function violates C1 |
//| continuity between steps #stpidxa and #stpidxb |
//| (usually we have stpidxb = stpidxa + 3, with |
//| most likely position of the violation between |
//| stpidxa + 1 and stpidxa + 2. |
//| ================================================================ |
//| = SHORTLY SPEAKING: build a 2D plot of (stp, f) and look at it - |
//| = you will see where C1 continuity is violated.|
//| ================================================================ |
//| INPUT PARAMETERS: |
//| State - algorithm state |
//| OUTPUT PARAMETERS: |
//| strrep - C1 test #1 "strong" report |
//| lngrep - C1 test #1 "long" report |
//+------------------------------------------------------------------+
void CAlglib::MinBCOptGuardNonC1Test1Results(CMinBCState &state,
COptGuardNonC1Test1Report &strrep,
COptGuardNonC1Test1Report &lngrep)
{
CMinBC::MinBCOptGuardNonC1Test1Results(state,strrep,lngrep);
}
//+------------------------------------------------------------------+
//| BC results |
//| INPUT PARAMETERS: |
//| State - algorithm state |
//| OUTPUT PARAMETERS: |
//| X - array[0..N - 1], solution |
//| Rep - optimization report. You should check |
//| Rep.TerminationType in order to distinguish |
//| successful termination from unsuccessful one: |
//| * -8 internal integrity control detected infinite or |
//| NAN values in function / gradient. Abnormal |
//| termination signalled. |
//| * -3 inconsistent constraints. |
//| * 1 relative function improvement is no more than EpsF.|
//| * 2 scaled step is no more than EpsX. |
//| * 4 scaled gradient norm is no more than EpsG. |
//| * 5 MaxIts steps was taken |
//| * 8 terminated by user who called |
//| MinBCRequestTermination(). |
//| X contains point which was "current accepted" when termination |
//| request was submitted. More information about fields of this |
//| structure can be found in the comments on MinBCReport datatype. |
//+------------------------------------------------------------------+
void CAlglib::MinBCResults(CMinBCState &state,CRowDouble &x,
CMinBCReport &rep)
{
CMinBC::MinBCResults(state,x,rep);
}
//+------------------------------------------------------------------+
//| BC results |
//| Buffered implementation of MinBCResults() which uses pre - |
//| allocated buffer to store X[]. If buffer size is too small, it |
//| resizes buffer. It is intended to be used in the inner cycles of |
//| performance critical algorithms where array reallocation penalty |
//| is too large to be ignored. |
//+------------------------------------------------------------------+
void CAlglib::MinBCResultsBuf(CMinBCState &state,CRowDouble &x,
CMinBCReport &rep)
{
CMinBC::MinBCResultsBuf(state,x,rep);
}
//+------------------------------------------------------------------+
//| This subroutine restarts algorithm from new point. |
//| All optimization parameters (including constraints) are left |
//| unchanged. |
//| This function allows to solve multiple optimization problems |
//| (which must have same number of dimensions) without object |
//| reallocation penalty. |
//| INPUT PARAMETERS: |
//| State - structure previously allocated with MinBCCreate |
//| call. |
//| X - new starting point. |
//+------------------------------------------------------------------+
void CAlglib::MinBCRestartFrom(CMinBCState &state,CRowDouble &x)
{
CMinBC::MinBCRestartFrom(state,x);
}
//+------------------------------------------------------------------+
//| This subroutine submits request for termination of running |
//| optimizer. It should be called from user-supplied callback when |
//| user decides that it is time to "smoothly" terminate optimization|
//| process. As result, optimizer stops at point which was "current |
//| accepted" when termination request was submitted and returns |
//| error code 8 (successful termination). |
//| INPUT PARAMETERS: |
//| State - optimizer structure |
//| NOTE: after request for termination optimizer may perform several|
//| additional calls to user-supplied callbacks. It does NOT |
//| guarantee to stop immediately - it just guarantees that |
//| these additional calls will be discarded later. |
//| NOTE: calling this function on optimizer which is NOT running |
//| will have no effect. |
//| NOTE: multiple calls to this function are possible. First call is|
//| counted, subsequent calls are silently ignored. |
//+------------------------------------------------------------------+
void CAlglib::MinBCRequestTermination(CMinBCState &state)
{
CMinBC::MinBCRequestTermination(state);
}
//+------------------------------------------------------------------+
//| Obsolete function, use MinLBFGSSetPrecDefault() instead. |
//+------------------------------------------------------------------+
void CAlglib::MinLBFGSSetDefaultPreconditioner(CMinLBFGSStateShell &state)
{
CMinComp::MinLBFGSSetDefaultPreconditioner(state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Obsolete function, use MinLBFGSSetCholeskyPreconditioner() |
//| instead. |
//+------------------------------------------------------------------+
void CAlglib::MinLBFGSSetCholeskyPreconditioner(CMinLBFGSStateShell &state,
CMatrixDouble &p,bool IsUpper)
{
CMinComp::MinLBFGSSetCholeskyPreconditioner(state.GetInnerObj(),p,IsUpper);
}
//+------------------------------------------------------------------+
//| This is obsolete function which was used by previous version of |
//| the BLEIC optimizer. It does nothing in the current version of |
//| BLEIC. |
//+------------------------------------------------------------------+
void CAlglib::MinBLEICSetBarrierWidth(CMinBLEICStateShell &state,
const double mu)
{
CMinComp::MinBLEICSetBarrierWidth(state.GetInnerObj(),mu);
}
//+------------------------------------------------------------------+
//| This is obsolete function which was used by previous version of |
//| the BLEIC optimizer. It does nothing in the current version of |
//| BLEIC. |
//+------------------------------------------------------------------+
void CAlglib::MinBLEICSetBarrierDecay(CMinBLEICStateShell &state,
const double mudecay)
{
CMinComp::MinBLEICSetBarrierDecay(state.GetInnerObj(),mudecay);
}
//+------------------------------------------------------------------+
//| Obsolete optimization algorithm. |
//| Was replaced by MinBLEIC subpackage. |
//+------------------------------------------------------------------+
void CAlglib::MinASACreate(const int n,double &x[],double &bndl[],
double &bndu[],CMinASAStateShell &state)
{
CMinComp::MinASACreate(n,x,bndl,bndu,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Obsolete optimization algorithm. |
//| Was replaced by MinBLEIC subpackage. |
//+------------------------------------------------------------------+
void CAlglib::MinASACreate(double &x[],double &bndl[],double &bndu[],
CMinASAStateShell &state)
{
//--- check
if((CAp::Len(x)!=CAp::Len(bndl)) || (CAp::Len(x)!=CAp::Len(bndu)))
{
Print("Error while calling 'minasacreate': looks like one of arguments has wrong size");
CAp::exception_happened=true;
return;
}
//--- initialization
int n=CAp::Len(x);
//--- function call
CMinComp::MinASACreate(n,x,bndl,bndu,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Obsolete optimization algorithm. |
//| Was replaced by MinBLEIC subpackage. |
//+------------------------------------------------------------------+
void CAlglib::MinASASetCond(CMinASAStateShell &state,const double epsg,
const double epsf,const double epsx,const int maxits)
{
CMinComp::MinASASetCond(state.GetInnerObj(),epsg,epsf,epsx,maxits);
}
//+------------------------------------------------------------------+
//| Obsolete optimization algorithm. |
//| Was replaced by MinBLEIC subpackage. |
//+------------------------------------------------------------------+
void CAlglib::MinASASetXRep(CMinASAStateShell &state,const bool needxrep)
{
CMinComp::MinASASetXRep(state.GetInnerObj(),needxrep);
}
//+------------------------------------------------------------------+
//| Obsolete optimization algorithm. |
//| Was replaced by MinBLEIC subpackage. |
//+------------------------------------------------------------------+
void CAlglib::MinASASetAlgorithm(CMinASAStateShell &state,const int algotype)
{
CMinComp::MinASASetAlgorithm(state.GetInnerObj(),algotype);
}
//+------------------------------------------------------------------+
//| Obsolete optimization algorithm. |
//| Was replaced by MinBLEIC subpackage. |
//+------------------------------------------------------------------+
void CAlglib::MinASASetStpMax(CMinASAStateShell &state,const double stpmax)
{
CMinComp::MinASASetStpMax(state.GetInnerObj(),stpmax);
}
//+------------------------------------------------------------------+
//| This function provides reverse communication interface |
//| Reverse communication interface is not documented or recommended |
//| to use. |
//| See below for functions which provide better documented API |
//+------------------------------------------------------------------+
bool CAlglib::MinASAIteration(CMinASAStateShell &state)
{
return(CMinComp::MinASAIteration(state.GetInnerObj()));
}
//+------------------------------------------------------------------+
//| This family of functions is used to launcn iterations of |
//| nonlinear optimizer |
//| These functions accept following parameters: |
//| grad - callback which calculates function (or merit |
//| function) value func and gradient grad at given |
//| point x |
//| rep - optional callback which is called after each |
//| iteration can be null |
//| obj - optional object which is passed to |
//| func/grad/hess/jac/rep can be null |
//+------------------------------------------------------------------+
void CAlglib::MinASAOptimize(CMinASAStateShell &state,CNDimensional_Grad &grad,
CNDimensional_Rep &rep,bool rep_status,CObject &obj)
{
//--- cycle
while(CAlglib::MinASAIteration(state))
{
//--- check
if(state.GetNeedFG())
{
grad.Grad(state.GetInnerObj().m_x,state.GetInnerObj().m_f,state.GetInnerObj().m_g,obj);
//--- next iteration
continue;
}
//--- check
if(state.GetInnerObj().m_xupdated)
{
//--- check
if(rep_status)
rep.Rep(state.GetInnerObj().m_x,state.GetInnerObj().m_f,obj);
//--- next iteration
continue;
}
Print("ALGLIB: error in 'minasaoptimize' (some derivatives were not provided?)");
CAp::exception_happened=true;
break;
}
}
//+------------------------------------------------------------------+
//| Obsolete optimization algorithm. |
//| Was replaced by MinBLEIC subpackage. |
//+------------------------------------------------------------------+
void CAlglib::MinASAResults(CMinASAStateShell &state,double &x[],
CMinASAReportShell &rep)
{
CMinComp::MinASAResults(state.GetInnerObj(),x,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Obsolete optimization algorithm. |
//| Was replaced by MinBLEIC subpackage. |
//+------------------------------------------------------------------+
void CAlglib::MinASAResultsBuf(CMinASAStateShell &state,double &x[],
CMinASAReportShell &rep)
{
CMinComp::MinASAResultsBuf(state.GetInnerObj(),x,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| Obsolete optimization algorithm. |
//| Was replaced by MinBLEIC subpackage. |
//+------------------------------------------------------------------+
void CAlglib::MinASARestartFrom(CMinASAStateShell &state,double &x[],
double &bndl[],double &bndu[])
{
CMinComp::MinASARestartFrom(state.GetInnerObj(),x,bndl,bndu);
}
//+------------------------------------------------------------------+
//| Polynomial root finding. |
//| This function returns all roots of the polynomial |
//| P(x) = a0 + a1*x + a2*x^2 + ... + an*x^n |
//| Both real and complex roots are returned (see below). |
//| INPUT PARAMETERS: |
//| A - array[N+1], polynomial coefficients: |
//| * A[0] is constant term |
//| * A[N] is a coefficient of X^N |
//| N - polynomial degree |
//| OUTPUT PARAMETERS: |
//| X - array of complex roots: |
//| * for isolated real root, X[I] is strictly real: |
//| IMAGE(X[I])=0 |
//| * complex roots are always returned in pairs-roots |
//| occupy positions I and I+1, with: |
//| * X[I+1]=Conj(X[I]) |
//| * IMAGE(X[I]) > 0 |
//| * IMAGE(X[I+1]) = -IMAGE(X[I]) < 0 |
//| * multiple real roots may have non-zero imaginary |
//| part due to roundoff errors. There is no reliable|
//| way to distinguish real root of multiplicity 2 |
//| from two complex roots in the presence of |
//| roundoff errors. |
//| Rep - report, additional information, following fields |
//| are set: |
//| * Rep.MaxErr - max( |P(xi)| ) for i=0..N-1. This |
//| field allows to quickly estimate "quality" of the|
//| roots being returned. |
//| NOTE: this function uses companion matrix method to find roots. |
//| In case internal EVD solver fails do find eigenvalues, |
//| exception is generated. |
//| NOTE: roots are not "polished" and no matrix balancing is |
//| performed for them. |
//+------------------------------------------------------------------+
void CAlglib::PolynomialSolve(CRowDouble &a,int n,CRowComplex &x,
CPolynomialSolverReport &rep)
{
CPolynomialSolver::PolynomialSolve(a,n,x,rep);
}
//+------------------------------------------------------------------+
//| Dense solver. |
//| This subroutine solves a system A*x=b, where A is NxN |
//| non-denegerate real matrix, x and b are vectors. |
//| Algorithm features: |
//| * automatic detection of degenerate cases |
//| * condition number estimation |
//| * iterative refinement |
//| * O(N^3) complexity |
//| INPUT PARAMETERS |
//| A - array[0..N-1,0..N-1], system matrix |
//| N - size of A |
//| B - array[0..N-1], right part |
//| OUTPUT PARAMETERS |
//| Info - return code: |
//| * -3 A is singular, or VERY close to singular.|
//| X is filled by zeros in such cases. |
//| * -1 N<=0 was passed |
//| * 1 task is solved (but matrix A may be |
//| ill-conditioned, check R1/RInf parameters|
//| for condition numbers). |
//| Rep - solver report, see below for more info |
//| X - array[0..N-1], it contains: |
//| * solution of A*x=b if A is non-singular |
//| (well-conditioned or ill-conditioned, but not |
//| very close to singular) |
//| * zeros, if A is singular or VERY close to |
//| singular (in this case Info=-3). |
//| SOLVER REPORT |
//| Subroutine sets following fields of the Rep structure: |
//| * R1 reciprocal of condition number: 1/cond(A), 1-norm. |
//| * RInf reciprocal of condition number: 1/cond(A), inf-norm. |
//+------------------------------------------------------------------+
void CAlglib::RMatrixSolve(CMatrixDouble &a,const int n,double &b[],
int &info,CDenseSolverReportShell &rep,
double &x[])
{
//--- initialization
info=0;
//--- function call
CDenseSolver::RMatrixSolve(a,n,b,info,rep.GetInnerObj(),x);
}
//+------------------------------------------------------------------+
//| Dense solver. |
//| Similar to RMatrixSolve() but solves task with multiple right |
//| parts (where b and x are NxM matrices). |
//| Algorithm features: |
//| * automatic detection of degenerate cases |
//| * condition number estimation |
//| * optional iterative refinement |
//| * O(N^3+M*N^2) complexity |
//| INPUT PARAMETERS |
//| A - array[0..N-1,0..N-1], system matrix |
//| N - size of A |
//| B - array[0..N-1,0..M-1], right part |
//| M - right part size |
//| RFS - iterative refinement switch: |
//| * True - refinement is used. |
//| Less performance, more precision. |
//| * False - refinement is not used. |
//| More performance, less precision. |
//| OUTPUT PARAMETERS |
//| Info - same as in RMatrixSolve |
//| Rep - same as in RMatrixSolve |
//| X - same as in RMatrixSolve |
//+------------------------------------------------------------------+
void CAlglib::RMatrixSolveM(CMatrixDouble &a,const int n,CMatrixDouble &b,
const int m,const bool rfs,int &info,
CDenseSolverReportShell &rep,CMatrixDouble &x)
{
//--- initialization
info=0;
//--- function call
CDenseSolver::RMatrixSolveM(a,n,b,m,rfs,info,rep.GetInnerObj(),x);
}
//+------------------------------------------------------------------+
//| Dense solver. |
//| This subroutine solves a system A*X=B, where A is NxN |
//| non-denegerate real matrix given by its LU decomposition, X and |
//| B are NxM real matrices. |
//| Algorithm features: |
//| * automatic detection of degenerate cases |
//| * O(N^2) complexity |
//| * condition number estimation |
//| No iterative refinement is provided because exact form of |
//| original matrix is not known to subroutine. Use RMatrixSolve or |
//| RMatrixMixedSolve if you need iterative refinement. |
//| INPUT PARAMETERS |
//| LUA - array[0..N-1, ..N-1], LU decomposition, RMatrixLU|
//| result |
//| P - array[0..N-1], pivots array, RMatrixLU result |
//| N - size of A |
//| B - array[0..N-1], right part |
//| OUTPUT PARAMETERS |
//| Info - same as in RMatrixSolve |
//| Rep - same as in RMatrixSolve |
//| X - same as in RMatrixSolve |
//+------------------------------------------------------------------+
void CAlglib::RMatrixLUSolve(CMatrixDouble &lua,int &p[],const int n,
double &b[],int &info,
CDenseSolverReportShell &rep,double &x[])
{
//--- initialization
info=0;
//--- function call
CDenseSolver::RMatrixLUSolve(lua,p,n,b,info,rep.GetInnerObj(),x);
}
//+------------------------------------------------------------------+
//| Dense solver. |
//| Similar to RMatrixLUSolve() but solves task with multiple right |
//| parts (where b and x are NxM matrices). |
//| Algorithm features: |
//| * automatic detection of degenerate cases |
//| * O(M*N^2) complexity |
//| * condition number estimation |
//| No iterative refinement is provided because exact form of |
//| original matrix is not known to subroutine. Use RMatrixSolve or |
//| RMatrixMixedSolve if you need iterative refinement. |
//| INPUT PARAMETERS |
//| LUA - array[0..N-1,0..N-1], LU decomposition, RMatrixLU|
//| result |
//| P - array[0..N-1], pivots array, RMatrixLU result |
//| N - size of A |
//| B - array[0..N-1,0..M-1], right part |
//| M - right part size |
//| OUTPUT PARAMETERS |
//| Info - same as in RMatrixSolve |
//| Rep - same as in RMatrixSolve |
//| X - same as in RMatrixSolve |
//+------------------------------------------------------------------+
void CAlglib::RMatrixLUSolveM(CMatrixDouble &lua,int &p[],const int n,
CMatrixDouble &b,const int m,int &info,
CDenseSolverReportShell &rep,CMatrixDouble &x)
{
//--- initialization
info=0;
//--- function call
CDenseSolver::RMatrixLUSolveM(lua,p,n,b,m,info,rep.GetInnerObj(),x);
}
//+------------------------------------------------------------------+
//| Dense solver. |
//| This subroutine solves a system A*x=b, where BOTH ORIGINAL A AND |
//| ITS LU DECOMPOSITION ARE KNOWN. You can use it if for some |
//| reasons you have both A and its LU decomposition. |
//| Algorithm features: |
//| * automatic detection of degenerate cases |
//| * condition number estimation |
//| * iterative refinement |
//| * O(N^2) complexity |
//| INPUT PARAMETERS |
//| A - array[0..N-1,0..N-1], system matrix |
//| LUA - array[0..N-1,0..N-1], LU decomposition, RMatrixLU|
//| result |
//| P - array[0..N-1], pivots array, RMatrixLU result |
//| N - size of A |
//| B - array[0..N-1], right part |
//| OUTPUT PARAMETERS |
//| Info - same as in RMatrixSolveM |
//| Rep - same as in RMatrixSolveM |
//| X - same as in RMatrixSolveM |
//+------------------------------------------------------------------+
void CAlglib::RMatrixMixedSolve(CMatrixDouble &a,CMatrixDouble &lua,
int &p[],const int n,double &b[],
int &info,CDenseSolverReportShell &rep,
double &x[])
{
//--- initialization
info=0;
//--- function call
CDenseSolver::RMatrixMixedSolve(a,lua,p,n,b,info,rep.GetInnerObj(),x);
}
//+------------------------------------------------------------------+
//| Dense solver. |
//| Similar to RMatrixMixedSolve() but solves task with multiple |
//| right parts (where b and x are NxM matrices). |
//| Algorithm features: |
//| * automatic detection of degenerate cases |
//| * condition number estimation |
//| * iterative refinement |
//| * O(M*N^2) complexity |
//| INPUT PARAMETERS |
//| A - array[0..N-1,0..N-1], system matrix |
//| LUA - array[0..N-1,0..N-1], LU decomposition, RMatrixLU|
//| result |
//| P - array[0..N-1], pivots array, RMatrixLU result |
//| N - size of A |
//| B - array[0..N-1,0..M-1], right part |
//| M - right part size |
//| OUTPUT PARAMETERS |
//| Info - same as in RMatrixSolveM |
//| Rep - same as in RMatrixSolveM |
//| X - same as in RMatrixSolveM |
//+------------------------------------------------------------------+
void CAlglib::RMatrixMixedSolveM(CMatrixDouble &a,CMatrixDouble &lua,
int &p[],const int n,CMatrixDouble &b,
const int m,int &info,
CDenseSolverReportShell &rep,
CMatrixDouble &x)
{
//--- initialization
info=0;
//--- function call
CDenseSolver::RMatrixMixedSolveM(a,lua,p,n,b,m,info,rep.GetInnerObj(),x);
}
//+------------------------------------------------------------------+
//| Dense solver. Same as RMatrixSolveM(), but for complex matrices. |
//| Algorithm features: |
//| * automatic detection of degenerate cases |
//| * condition number estimation |
//| * iterative refinement |
//| * O(N^3+M*N^2) complexity |
//| INPUT PARAMETERS |
//| A - array[0..N-1,0..N-1], system matrix |
//| N - size of A |
//| B - array[0..N-1,0..M-1], right part |
//| M - right part size |
//| RFS - iterative refinement switch: |
//| * True - refinement is used. |
//| Less performance, more precision. |
//| * False - refinement is not used. |
//| More performance, less precision. |
//| OUTPUT PARAMETERS |
//| Info - same as in RMatrixSolve |
//| Rep - same as in RMatrixSolve |
//| X - same as in RMatrixSolve |
//+------------------------------------------------------------------+
void CAlglib::CMatrixSolveM(CMatrixComplex &a,const int n,CMatrixComplex &b,
const int m,const bool rfs,int &info,
CDenseSolverReportShell &rep,
CMatrixComplex &x)
{
//--- initialization
info=0;
//--- function call
CDenseSolver::CMatrixSolveM(a,n,b,m,rfs,info,rep.GetInnerObj(),x);
}
//+------------------------------------------------------------------+
//| Dense solver. Same as RMatrixSolve(), but for complex matrices. |
//| Algorithm features: |
//| * automatic detection of degenerate cases |
//| * condition number estimation |
//| * iterative refinement |
//| * O(N^3) complexity |
//| INPUT PARAMETERS |
//| A - array[0..N-1,0..N-1], system matrix |
//| N - size of A |
//| B - array[0..N-1], right part |
//| OUTPUT PARAMETERS |
//| Info - same as in RMatrixSolve |
//| Rep - same as in RMatrixSolve |
//| X - same as in RMatrixSolve |
//+------------------------------------------------------------------+
void CAlglib::CMatrixSolve(CMatrixComplex &a,const int n,complex &b[],
int &info,CDenseSolverReportShell &rep,
complex &x[])
{
//--- initialization
info=0;
//--- function call
CDenseSolver::CMatrixSolve(a,n,b,info,rep.GetInnerObj(),x);
}
//+------------------------------------------------------------------+
//| Dense solver. Same as RMatrixLUSolveM(), but for complex |
//| matrices. |
//| Algorithm features: |
//| * automatic detection of degenerate cases |
//| * O(M*N^2) complexity |
//| * condition number estimation |
//| No iterative refinement is provided because exact form of |
//| original matrix is not known to subroutine. Use CMatrixSolve or |
//| CMatrixMixedSolve if you need iterative refinement. |
//| INPUT PARAMETERS |
//| LUA - array[0..N-1,0..N-1], LU decomposition, RMatrixLU|
//| result |
//| P - array[0..N-1], pivots array, RMatrixLU result |
//| N - size of A |
//| B - array[0..N-1,0..M-1], right part |
//| M - right part size |
//| OUTPUT PARAMETERS |
//| Info - same as in RMatrixSolve |
//| Rep - same as in RMatrixSolve |
//| X - same as in RMatrixSolve |
//+------------------------------------------------------------------+
void CAlglib::CMatrixLUSolveM(CMatrixComplex &lua,int &p[],const int n,
CMatrixComplex &b,const int m,int &info,
CDenseSolverReportShell &rep,CMatrixComplex &x)
{
//--- initialization
info=0;
//--- function call
CDenseSolver::CMatrixLUSolveM(lua,p,n,b,m,info,rep.GetInnerObj(),x);
}
//+------------------------------------------------------------------+
//| Dense solver. Same as RMatrixLUSolve(), but for complex matrices.|
//| Algorithm features: |
//| * automatic detection of degenerate cases |
//| * O(N^2) complexity |
//| * condition number estimation |
//| No iterative refinement is provided because exact form of |
//| original matrix is not known to subroutine. Use CMatrixSolve or |
//| CMatrixMixedSolve if you need iterative refinement. |
//| INPUT PARAMETERS |
//| LUA - array[0..N-1,0..N-1], LU decomposition, CMatrixLU|
//| result |
//| P - array[0..N-1], pivots array, CMatrixLU result |
//| N - size of A |
//| B - array[0..N-1], right part |
//| OUTPUT PARAMETERS |
//| Info - same as in RMatrixSolve |
//| Rep - same as in RMatrixSolve |
//| X - same as in RMatrixSolve |
//+------------------------------------------------------------------+
void CAlglib::CMatrixLUSolve(CMatrixComplex &lua,int &p[],const int n,
complex &b[],int &info,CDenseSolverReportShell &rep,
complex &x[])
{
//--- initialization
info=0;
//--- function call
CDenseSolver::CMatrixLUSolve(lua,p,n,b,info,rep.GetInnerObj(),x);
}
//+------------------------------------------------------------------+
//| Dense solver. Same as RMatrixMixedSolveM(), but for complex |
//| matrices. |
//| Algorithm features: |
//| * automatic detection of degenerate cases |
//| * condition number estimation |
//| * iterative refinement |
//| * O(M*N^2) complexity |
//| INPUT PARAMETERS |
//| A - array[0..N-1,0..N-1], system matrix |
//| LUA - array[0..N-1,0..N-1], LU decomposition, CMatrixLU|
//| result |
//| P - array[0..N-1], pivots array, CMatrixLU result |
//| N - size of A |
//| B - array[0..N-1,0..M-1], right part |
//| M - right part size |
//| OUTPUT PARAMETERS |
//| Info - same as in RMatrixSolveM |
//| Rep - same as in RMatrixSolveM |
//| X - same as in RMatrixSolveM |
//+------------------------------------------------------------------+
void CAlglib::CMatrixMixedSolveM(CMatrixComplex &a,CMatrixComplex &lua,
int &p[],const int n,CMatrixComplex &b,
const int m,int &info,
CDenseSolverReportShell &rep,
CMatrixComplex &x)
{
//--- initialization
info=0;
//--- function call
CDenseSolver::CMatrixMixedSolveM(a,lua,p,n,b,m,info,rep.GetInnerObj(),x);
}
//+------------------------------------------------------------------+
//| Dense solver. Same as RMatrixMixedSolve(), but for complex |
//| matrices. |
//| Algorithm features: |
//| * automatic detection of degenerate cases |
//| * condition number estimation |
//| * iterative refinement |
//| * O(N^2) complexity |
//| INPUT PARAMETERS |
//| A - array[0..N-1,0..N-1], system matrix |
//| LUA - array[0..N-1,0..N-1], LU decomposition, CMatrixLU|
//| result |
//| P - array[0..N-1], pivots array, CMatrixLU result |
//| N - size of A |
//| B - array[0..N-1], right part |
//| OUTPUT PARAMETERS |
//| Info - same as in RMatrixSolveM |
//| Rep - same as in RMatrixSolveM |
//| X - same as in RMatrixSolveM |
//+------------------------------------------------------------------+
void CAlglib::CMatrixMixedSolve(CMatrixComplex &a,CMatrixComplex &lua,
int &p[],const int n,complex &b[],
int &info,CDenseSolverReportShell &rep,
complex &x[])
{
//--- initialization
info=0;
//--- function call
CDenseSolver::CMatrixMixedSolve(a,lua,p,n,b,info,rep.GetInnerObj(),x);
}
//+------------------------------------------------------------------+
//| Dense solver. Same as RMatrixSolveM(), but for symmetric positive|
//| definite matrices. |
//| Algorithm features: |
//| * automatic detection of degenerate cases |
//| * condition number estimation |
//| * O(N^3+M*N^2) complexity |
//| * matrix is represented by its upper or lower triangle |
//| No iterative refinement is provided because such partial |
//| representation of matrix does not allow efficient calculation of |
//| extra-precise matrix-vector products for large matrices. Use |
//| RMatrixSolve or RMatrixMixedSolve if you need iterative |
//| refinement. |
//| INPUT PARAMETERS |
//| A - array[0..N-1,0..N-1], system matrix |
//| N - size of A |
//| IsUpper - what half of A is provided |
//| B - array[0..N-1,0..M-1], right part |
//| M - right part size |
//| OUTPUT PARAMETERS |
//| Info - same as in RMatrixSolve. |
//| Returns -3 for non-SPD matrices. |
//| Rep - same as in RMatrixSolve |
//| X - same as in RMatrixSolve |
//+------------------------------------------------------------------+
void CAlglib::SPDMatrixSolveM(CMatrixDouble &a,const int n,const bool IsUpper,
CMatrixDouble &b,const int m,int &info,
CDenseSolverReportShell &rep,CMatrixDouble &x)
{
//--- initialization
info=0;
//--- function call
CDenseSolver::SPDMatrixSolveM(a,n,IsUpper,b,m,info,rep.GetInnerObj(),x);
}
//+------------------------------------------------------------------+
//| Dense solver. Same as RMatrixSolve(), but for SPD matrices. |
//| Algorithm features: |
//| * automatic detection of degenerate cases |
//| * condition number estimation |
//| * O(N^3) complexity |
//| * matrix is represented by its upper or lower triangle |
//| No iterative refinement is provided because such partial |
//| representation of matrix does not allow efficient calculation of |
//| extra-precise matrix-vector products for large matrices. Use |
//| RMatrixSolve or RMatrixMixedSolve if you need iterative |
//| refinement. |
//| INPUT PARAMETERS |
//| A - array[0..N-1,0..N-1], system matrix |
//| N - size of A |
//| IsUpper - what half of A is provided |
//| B - array[0..N-1], right part |
//| OUTPUT PARAMETERS |
//| Info - same as in RMatrixSolve |
//| Returns -3 for non-SPD matrices. |
//| Rep - same as in RMatrixSolve |
//| X - same as in RMatrixSolve |
//+------------------------------------------------------------------+
void CAlglib::SPDMatrixSolve(CMatrixDouble &a,const int n,const bool IsUpper,
double &b[],int &info,CDenseSolverReportShell &rep,
double &x[])
{
//--- initialization
info=0;
//--- function call
CDenseSolver::SPDMatrixSolve(a,n,IsUpper,b,info,rep.GetInnerObj(),x);
}
//+------------------------------------------------------------------+
//| Dense solver. Same as RMatrixLUSolveM(), but for SPD matrices |
//| represented by their Cholesky decomposition. |
//| Algorithm features: |
//| * automatic detection of degenerate cases |
//| * O(M*N^2) complexity |
//| * condition number estimation |
//| * matrix is represented by its upper or lower triangle |
//| No iterative refinement is provided because such partial |
//| representation of matrix does not allow efficient calculation of |
//| extra-precise matrix-vector products for large matrices. Use |
//| RMatrixSolve or RMatrixMixedSolve if you need iterative |
//| refinement. |
//| INPUT PARAMETERS |
//| CHA - array[0..N-1,0..N-1], Cholesky decomposition, |
//| SPDMatrixCholesky result |
//| N - size of CHA |
//| IsUpper - what half of CHA is provided |
//| B - array[0..N-1,0..M-1], right part |
//| M - right part size |
//| OUTPUT PARAMETERS |
//| Info - same as in RMatrixSolve |
//| Rep - same as in RMatrixSolve |
//| X - same as in RMatrixSolve |
//+------------------------------------------------------------------+
void CAlglib::SPDMatrixCholeskySolveM(CMatrixDouble &cha,const int n,
const bool IsUpper,CMatrixDouble &b,
const int m,int &info,
CDenseSolverReportShell &rep,
CMatrixDouble &x)
{
//--- initialization
info=0;
//--- function call
CDenseSolver::SPDMatrixCholeskySolveM(cha,n,IsUpper,b,m,info,rep.GetInnerObj(),x);
}
//+------------------------------------------------------------------+
//| Dense solver. Same as RMatrixLUSolve(), but for SPD matrices |
//| represented by their Cholesky decomposition. |
//| Algorithm features: |
//| * automatic detection of degenerate cases |
//| * O(N^2) complexity |
//| * condition number estimation |
//| * matrix is represented by its upper or lower triangle |
//| No iterative refinement is provided because such partial |
//| representation of matrix does not allow efficient calculation of |
//| extra-precise matrix-vector products for large matrices. Use |
//| RMatrixSolve or RMatrixMixedSolve if you need iterative |
//| refinement. |
//| INPUT PARAMETERS |
//| CHA - array[0..N-1,0..N-1], Cholesky decomposition, |
//| SPDMatrixCholesky result |
//| N - size of A |
//| IsUpper - what half of CHA is provided |
//| B - array[0..N-1], right part |
//| OUTPUT PARAMETERS |
//| Info - same as in RMatrixSolve |
//| Rep - same as in RMatrixSolve |
//| X - same as in RMatrixSolve |
//+------------------------------------------------------------------+
void CAlglib::SPDMatrixCholeskySolve(CMatrixDouble &cha,const int n,
const bool IsUpper,double &b[],
int &info,CDenseSolverReportShell &rep,
double &x[])
{
//--- initialization
info=0;
//--- function call
CDenseSolver::SPDMatrixCholeskySolve(cha,n,IsUpper,b,info,rep.GetInnerObj(),x);
}
//+------------------------------------------------------------------+
//| Dense solver. Same as RMatrixSolveM(), but for Hermitian positive|
//| definite matrices. |
//| Algorithm features: |
//| * automatic detection of degenerate cases |
//| * condition number estimation |
//| * O(N^3+M*N^2) complexity |
//| * matrix is represented by its upper or lower triangle |
//| No iterative refinement is provided because such partial |
//| representation of matrix does not allow efficient calculation of |
//| extra-precise matrix-vector products for large matrices. Use |
//| RMatrixSolve or RMatrixMixedSolve if you need iterative |
//| refinement. |
//| INPUT PARAMETERS |
//| A - array[0..N-1,0..N-1], system matrix |
//| N - size of A |
//| IsUpper - what half of A is provided |
//| B - array[0..N-1,0..M-1], right part |
//| M - right part size |
//| OUTPUT PARAMETERS |
//| Info - same as in RMatrixSolve. |
//| Returns -3 for non-HPD matrices. |
//| Rep - same as in RMatrixSolve |
//| X - same as in RMatrixSolve |
//+------------------------------------------------------------------+
void CAlglib::HPDMatrixSolveM(CMatrixComplex &a,const int n,const bool IsUpper,
CMatrixComplex &b,const int m,int &info,
CDenseSolverReportShell &rep,CMatrixComplex &x)
{
//--- initialization
info=0;
//--- function call
CDenseSolver::HPDMatrixSolveM(a,n,IsUpper,b,m,info,rep.GetInnerObj(),x);
}
//+------------------------------------------------------------------+
//| Dense solver. Same as RMatrixSolve(), but for Hermitian positive |
//| definite matrices. |
//| Algorithm features: |
//| * automatic detection of degenerate cases |
//| * condition number estimation |
//| * O(N^3) complexity |
//| * matrix is represented by its upper or lower triangle |
//| No iterative refinement is provided because such partial |
//| representation of matrix does not allow efficient calculation of |
//| extra-precise matrix-vector products for large matrices. Use |
//| RMatrixSolve or RMatrixMixedSolve if you need iterative |
//| refinement. |
//| INPUT PARAMETERS |
//| A - array[0..N-1,0..N-1], system matrix |
//| N - size of A |
//| IsUpper - what half of A is provided |
//| B - array[0..N-1], right part |
//| OUTPUT PARAMETERS |
//| Info - same as in RMatrixSolve |
//| Returns -3 for non-HPD matrices. |
//| Rep - same as in RMatrixSolve |
//| X - same as in RMatrixSolve |
//+------------------------------------------------------------------+
void CAlglib::HPDMatrixSolve(CMatrixComplex &a,const int n,
const bool IsUpper,complex &b[],
int &info,CDenseSolverReportShell &rep,
complex &x[])
{
//--- initialization
info=0;
//--- function call
CDenseSolver::HPDMatrixSolve(a,n,IsUpper,b,info,rep.GetInnerObj(),x);
}
//+------------------------------------------------------------------+
//| Dense solver. Same as RMatrixLUSolveM(), but for HPD matrices |
//| represented by their Cholesky decomposition. |
//| Algorithm features: |
//| * automatic detection of degenerate cases |
//| * O(M*N^2) complexity |
//| * condition number estimation |
//| * matrix is represented by its upper or lower triangle |
//| No iterative refinement is provided because such partial |
//| representation of matrix does not allow efficient calculation of |
//| extra-precise matrix-vector products for large matrices. Use |
//| RMatrixSolve or RMatrixMixedSolve if you need iterative |
//| refinement. |
//| INPUT PARAMETERS |
//| CHA - array[0..N-1,0..N-1], Cholesky decomposition, |
//| HPDMatrixCholesky result |
//| N - size of CHA |
//| IsUpper - what half of CHA is provided |
//| B - array[0..N-1,0..M-1], right part |
//| M - right part size |
//| OUTPUT PARAMETERS |
//| Info - same as in RMatrixSolve |
//| Rep - same as in RMatrixSolve |
//| X - same as in RMatrixSolve |
//+------------------------------------------------------------------+
void CAlglib::HPDMatrixCholeskySolveM(CMatrixComplex &cha,const int n,
const bool IsUpper,CMatrixComplex &b,
const int m,int &info,
CDenseSolverReportShell &rep,
CMatrixComplex &x)
{
//--- initialization
info=0;
//--- function call
CDenseSolver::HPDMatrixCholeskySolveM(cha,n,IsUpper,b,m,info,rep.GetInnerObj(),x);
}
//+------------------------------------------------------------------+
//| Dense solver. Same as RMatrixLUSolve(), but for HPD matrices |
//| represented by their Cholesky decomposition. |
//| Algorithm features: |
//| * automatic detection of degenerate cases |
//| * O(N^2) complexity |
//| * condition number estimation |
//| * matrix is represented by its upper or lower triangle |
//| No iterative refinement is provided because such partial |
//| representation of matrix does not allow efficient calculation of |
//| extra-precise matrix-vector products for large matrices. Use |
//| RMatrixSolve or RMatrixMixedSolve if you need iterative |
//| refinement. |
//| INPUT PARAMETERS |
//| CHA - array[0..N-1,0..N-1], Cholesky decomposition, |
//| SPDMatrixCholesky result |
//| N - size of A |
//| IsUpper - what half of CHA is provided |
//| B - array[0..N-1], right part |
//| OUTPUT PARAMETERS |
//| Info - same as in RMatrixSolve |
//| Rep - same as in RMatrixSolve |
//| X - same as in RMatrixSolve |
//+------------------------------------------------------------------+
void CAlglib::HPDMatrixCholeskySolve(CMatrixComplex &cha,const int n,
const bool IsUpper,complex &b[],
int &info,CDenseSolverReportShell &rep,
complex &x[])
{
//--- initialization
info=0;
//--- function call
CDenseSolver::HPDMatrixCholeskySolve(cha,n,IsUpper,b,info,rep.GetInnerObj(),x);
}
//+------------------------------------------------------------------+
//| Dense solver. |
//| This subroutine finds solution of the linear system A*X=B with |
//| non-square, possibly degenerate A. System is solved in the least |
//| squares sense, and general least squares solution X = X0 + CX*y |
//| which minimizes |A*X-B| is returned. If A is non-degenerate, |
//| solution in the usual sense is returned |
//| Algorithm features: |
//| * automatic detection of degenerate cases |
//| * iterative refinement |
//| * O(N^3) complexity |
//| INPUT PARAMETERS |
//| A - array[0..NRows-1,0..NCols-1], system matrix |
//| NRows - vertical size of A |
//| NCols - horizontal size of A |
//| B - array[0..NCols-1], right part |
//| Threshold- a number in [0,1]. Singular values beyond |
//| Threshold are considered zero. Set it to 0.0, |
//| if you don't understand what it means, so the |
//| solver will choose good value on its own. |
//| OUTPUT PARAMETERS |
//| Info - return code: |
//| * -4 SVD subroutine failed |
//| * -1 if NRows<=0 or NCols<=0 or Threshold<0 |
//| was passed |
//| * 1 if task is solved |
//| Rep - solver report, see below for more info |
//| X - array[0..N-1,0..M-1], it contains: |
//| * solution of A*X=B if A is non-singular |
//| (well-conditioned or ill-conditioned, but not |
//| very close to singular) |
//| * zeros, if A is singular or VERY close to |
//| singular (in this case Info=-3). |
//| SOLVER REPORT |
//| Subroutine sets following fields of the Rep structure: |
//| * R2 reciprocal of condition number: 1/cond(A), 2-norm. |
//| * N = NCols |
//| * K dim(Null(A)) |
//| * CX array[0..N-1,0..K-1], kernel of A. |
//| Columns of CX store such vectors that A*CX[i]=0. |
//+------------------------------------------------------------------+
void CAlglib::RMatrixSolveLS(CMatrixDouble &a,const int nrows,
const int ncols,double &b[],
const double threshold,int &info,
CDenseSolverLSReportShell &rep,
double &x[])
{
//--- initialization
info=0;
//--- function call
CDenseSolver::RMatrixSolveLS(a,nrows,ncols,b,threshold,info,rep.GetInnerObj(),x);
}
//+------------------------------------------------------------------+
//| Sparse linear solver for A*x = b with N*N sparse real symmetric |
//| positive definite matrix A, N * 1 vectors x and b. |
//| This solver converts input matrix to SKS format, performs |
//| Cholesky factorization using SKS Cholesky subroutine (works well |
//| for limited bandwidth matrices) and uses sparse triangular |
//| solvers to get solution of the original system. |
//| INPUT PARAMETERS: |
//| A - sparse matrix, must be NxN exactly |
//| IsUpper - which half of A is provided (another half is |
//| ignored) |
//| B - array[0..N - 1], right part |
//| OUTPUT PARAMETERS: |
//| X - array[N], it contains: |
//| * rep.m_terminationtype > 0 => solution |
//| * rep.m_terminationtype = -3 => filled by zeros |
//| Rep - solver report, following fields are set: |
//| * rep.m_terminationtype - solver status; > 0 for |
//| success, set to - 3 on |
//| failure (degenerate or |
//| non-SPD system). |
//+------------------------------------------------------------------+
void CAlglib::SparseSPDSolveSKS(CSparseMatrix &a,bool IsUpper,
CRowDouble &b,CRowDouble &x,
CSparseSolverReport &rep)
{
CDirectSparseSolvers::SparseSPDSolveSKS(a,IsUpper,b,x,rep);
}
//+------------------------------------------------------------------+
//| Sparse linear solver for A*x = b with N*N sparse real symmetric |
//| positive definite matrix A, N * 1 vectors x and b. |
//| This solver converts input matrix to CRS format, performs |
//| Cholesky factorization using supernodal Cholesky decomposition |
//| with permutation-reducing ordering and uses sparse triangular |
//| solver to get solution of the original system. |
//| INPUT PARAMETERS: |
//| A - sparse matrix, must be NxN exactly |
//| IsUpper - which half of A is provided (another half is |
//| ignored) |
//| B - array[N], right part |
//| OUTPUT PARAMETERS: |
//| X - array[N], it contains: |
//| * rep.m_terminationtype > 0 => solution |
//| * rep.m_terminationtype = -3 => filled by zeros |
//| Rep - solver report, following fields are set: |
//| * rep.m_terminationtype - solver status; > 0 for |
//| success, set to - 3 on |
//| failure (degenerate or |
//| non-SPD system). |
//+------------------------------------------------------------------+
void CAlglib::SparseSPDSolve(CSparseMatrix &a,bool IsUpper,
CRowDouble &b,CRowDouble &x,
CSparseSolverReport &rep)
{
CDirectSparseSolvers::SparseSPDSolve(a,IsUpper,b,x,rep);
}
//+------------------------------------------------------------------+
//| Sparse linear solver for A*x = b with N*N real symmetric positive|
//| definite matrix A given by its Cholesky decomposition, and N * 1 |
//| vectors x and b. |
//| IMPORTANT: this solver requires input matrix to be in the SKS |
//| (Skyline) or CRS (compressed row storage) format. An |
//| exception will be generated if you pass matrix in some|
//| other format. |
//| INPUT PARAMETERS: |
//| A - sparse NxN matrix stored in CRs or SKS format, must|
//| be NxN exactly |
//| IsUpper - which half of A is provided (another half is |
//| ignored) |
//| B - array[N], right part |
//| OUTPUT PARAMETERS: |
//| X - array[N], it contains: |
//| * rep.m_terminationtype > 0 => solution |
//| * rep.m_terminationtype = -3 => filled by zeros |
//| Rep - solver report, following fields are set: |
//| * rep.m_terminationtype - solver status; > 0 for |
//| success, set to - 3 on |
//| failure (degenerate or |
//| non-SPD system). |
//+------------------------------------------------------------------+
void CAlglib::SparseSPDCholeskySolve(CSparseMatrix &a,bool IsUpper,
CRowDouble &b,CRowDouble &x,
CSparseSolverReport &rep)
{
CDirectSparseSolvers::SparseSPDCholeskySolve(a,IsUpper,b,x,rep);
}
//+------------------------------------------------------------------+
//| Sparse linear solver for A*x = b with general (nonsymmetric) N*N |
//| sparse real matrix A, N * 1 vectors x and b. |
//| This solver converts input matrix to CRS format, performs LU |
//| factorization and uses sparse triangular solvers to get solution |
//| of the original system. |
//| INPUT PARAMETERS: |
//| A - sparse matrix, must be NxN exactly, any storage |
//| format |
//| N - size of A, N > 0 |
//| B - array[0..N - 1], right part |
//| OUTPUT PARAMETERS: |
//| X - array[N], it contains: |
//| * rep.m_terminationtype > 0 => solution |
//| * rep.m_terminationtype = -3 => filled by zeros |
//| Rep - solver report, following fields are set: |
//| * rep.m_terminationtype - solver status; > 0 for |
//| success, set to - 3 on failure (degenerate |
//| system). |
//+------------------------------------------------------------------+
void CAlglib::SparseSolve(CSparseMatrix &a,CRowDouble &b,
CRowDouble &x,CSparseSolverReport &rep)
{
CDirectSparseSolvers::SparseSolve(a,b,x,rep);
}
//+------------------------------------------------------------------+
//| Sparse linear solver for A*x = b with general (nonsymmetric) N*N |
//| sparse real matrix A given by its LU factorization, N*1 vectors x|
//| and b. |
//| IMPORTANT: this solver requires input matrix to be in the CRS |
//| sparse storage format. An exception will be generated |
//| if you pass matrix in some other format (HASH or SKS).|
//| INPUT PARAMETERS: |
//| A - LU factorization of the sparse matrix, must be NxN |
//| exactly in CRS storage format |
//| P, Q - pivot indexes from LU factorization |
//| N - size of A, N > 0 |
//| B - array[0..N - 1], right part |
//| OUTPUT PARAMETERS: |
//| X - array[N], it contains: |
//| * rep.m_terminationtype > 0 => solution |
//| * rep.m_terminationtype = -3 => filled by zeros |
//| Rep - solver report, following fields are set: |
//| * rep.m_terminationtype - solver status; > 0 for |
//| success, set to - 3 on |
//| failure (degenerate |
//| system). |
//+------------------------------------------------------------------+
void CAlglib::SparseLUSolve(CSparseMatrix &a,CRowInt &p,CRowInt &q,
CRowDouble &b,CRowDouble &x,
CSparseSolverReport &rep)
{
CDirectSparseSolvers::SparseLUSolve(a,p,q,b,x,rep);
}
//+------------------------------------------------------------------+
//| Solving sparse symmetric linear system A*x = b using GMRES(k) |
//| method. Sparse symmetric A is given by its lower or upper |
//| triangle. |
//| NOTE: use SparseSolveGMRES() to solve system with nonsymmetric A.|
//| This function provides convenience API for an 'expert' interface |
//| provided by SparseSolverState class. Use SparseSolver API if you |
//| need advanced functions like providing initial point, using |
//| out-of-core API and so on. |
//| INPUT PARAMETERS: |
//| A - sparse symmetric NxN matrix in any sparse storage |
//| format. Using CRS format is recommended because it |
//| avoids internal conversion. An exception will be |
//| generated if A is not NxN matrix (where N is a size|
//| specified during solver object creation). |
//| IsUpper - whether upper or lower triangle of A is used: |
//| * IsUpper = True => only upper triangle is used and|
//| lower triangle is not referenced at all |
//| * IsUpper = False => only lower triangle is used |
//| and upper triangle is not referenced at all |
//| B - right part, array[N] |
//| K - k parameter for GMRES(k), k >= 0. Zero value means |
//| that algorithm will choose it automatically. |
//| EpsF - stopping condition, EpsF >= 0. The algorithm will |
//| stop when residual will decrease below EpsF* | B |.|
//| Having EpsF = 0 means that this stopping condition |
//| is ignored. |
//| MaxIts - stopping condition, MaxIts >= 0. The algorithm will|
//| stop after performing MaxIts iterations. Zero value|
//| means no limit. |
//| NOTE: having both EpsF = 0 and MaxIts = 0 means that stopping |
//| criteria will be chosen automatically. |
//| OUTPUT PARAMETERS: |
//| X - array[N], the solution |
//| Rep - solution report: |
//| * Rep.TerminationType completion code: |
//| * -5 CG method was used for a matrix which is |
//| not positive definite |
//| * -4 overflow / underflow during solution (ill |
//| conditioned problem) |
//| * 1 || residual || <= EpsF* || b || |
//| * 5 MaxIts steps was taken |
//| * 7 rounding errors prevent further progress, |
//| best point found is returned |
//| * 8 the algorithm was terminated early with |
//| SparseSolverRequestTermination() being |
//| called from other thread. |
//| * Rep.IterationsCount contains iterations count |
//| * Rep.NMV contains number of matrix - vector |
//| calculations |
//| * Rep.R2 contains squared residual |
//+------------------------------------------------------------------+
void CAlglib::SparseSolveSymmetricGMRES(CSparseMatrix &a,bool IsUpper,
CRowDouble &b,int k,double epsf,
int maxits,CRowDouble &x,
CSparseSolverReport &rep)
{
CIterativeSparse::SparseSolveSymmetricGMRES(a,IsUpper,b,k,epsf,maxits,x,rep);
}
//+------------------------------------------------------------------+
//| Solving sparse linear system A*x = b using GMRES(k) method. |
//| This function provides convenience API for an 'expert' interface |
//| provided by SparseSolverState class. Use SparseSolver API if you |
//| need advanced functions like providing initial point, using |
//| out-of-core API and so on. |
//| INPUT PARAMETERS: |
//| A - sparse NxN matrix in any sparse storage format. |
//| Using CRS format is recommended because it avoids |
//| internal conversion. An exception will be generated|
//| if A is not NxN matrix (where N is a size specified|
//| during solver object creation). |
//| B - right part, array[N] |
//| K - k parameter for GMRES(k), k >= 0. Zero value means |
//| that algorithm will choose it automatically. |
//| EpsF - stopping condition, EpsF >= 0. The algorithm will |
//| stop when residual will decrease below EpsF*| B |. |
//| Having EpsF = 0 means that this stopping condition |
//| is ignored. |
//| MaxIts - stopping condition, MaxIts >= 0. The algorithm will|
//| stop after performing MaxIts iterations. Zero value|
//| means no limit. |
//| NOTE: having both EpsF = 0 and MaxIts = 0 means that stopping |
//| criteria will be chosen automatically. |
//| OUTPUT PARAMETERS: |
//| X - array[N], the solution |
//| Rep - solution report: |
//| * Rep.TerminationType completion code: |
//| * -5 CG method was used for a matrix which is |
//| not positive definite |
//| * -4 overflow / underflow during solution (ill |
//| conditioned problem) |
//| * 1 || residual || <= EpsF* || b || |
//| * 5 MaxIts steps was taken |
//| * 7 rounding errors prevent further progress, |
//| best point found is returned |
//| * 8 the algorithm was terminated early with |
//| SparseSolverRequestTermination() being |
//| called from other thread. |
//| * Rep.IterationsCount contains iterations count |
//| * Rep.NMV contains number of matrix - vector |
//| calculations |
//| * Rep.R2 contains squared residual |
//+------------------------------------------------------------------+
void CAlglib::SparseSolveGMRES(CSparseMatrix &a,CRowDouble &b,int k,
double epsf,int maxits,CRowDouble &x,
CSparseSolverReport &rep)
{
CIterativeSparse::SparseSolveGMRES(a,b,k,epsf,maxits,x,rep);
}
//+------------------------------------------------------------------+
//| This function initializes sparse linear iterative solver object. |
//| This solver can be used to solve nonsymmetric and symmetric |
//| positive definite NxN(square) linear systems. |
//| The solver provides 'expert' API which allows advanced control |
//| over algorithms being used, including ability to get progress |
//| report, terminate long-running solver from other thread, |
//| out-of-core solution and so on. |
//| NOTE: there are also convenience functions that allows quick one-|
//| line to the solvers: |
//| * SparseSolveCG() to solve SPD linear systems |
//| * SparseSolveGMRES() to solve unsymmetric linear systems. |
//| NOTE: if you want to solve MxN(rectangular) linear problem you |
//| may use LinLSQR solver provided by ALGLIB. |
//| USAGE (A is given by the SparseMatrix structure): |
//| 1. User initializes algorithm state with SparseSolverCreate() |
//| call |
//| 2. User selects algorithm with one of the SparseSolverSetAlgo??|
//| functions. By default, GMRES(k) is used with automatically |
//| chosen k |
//| 3. Optionally, user tunes solver parameters, sets starting |
//| point, etc. |
//| 4. Depending on whether system is symmetric or not, user calls:|
//| * SparseSolverSolveSymmetric() for a symmetric system given |
//| by its lower or upper triangle |
//| * SparseSolverSolve() for a nonsymmetric system or a |
//| symmetric one given by the full matrix |
//| 5. User calls SparseSolverResults() to get the solution |
//| It is possible to call SparseSolverSolve???() again to solve |
//| another task with same dimensionality but different matrix and/or|
//| right part without reinitializing SparseSolverState structure. |
//| USAGE(out-of-core mode): |
//| 1. User initializes algorithm state with SparseSolverCreate() |
//| call |
//| 2. User selects algorithm with one of the SparseSolverSetAlgo??|
//| functions. By default, GMRES(k) is used with automatically |
//| chosen k |
//| 3. Optionally, user tunes solver parameters, sets starting |
//| point, etc. |
//| 4. After that user should work with out-of-core interface in a |
//| loop like one given below: |
//| > CAlgLib::SparseSolverOOCStart(state) |
//| > while CAlgLib::SparseSolverOOCContinue(state) do |
//| > CAlgLib::SparseSolver))CGetRequestInfo(state, |
//| RequestType) |
//| > CAlgLib::SparseSolverOOCGetRequeStdata(state, X) |
//| > if RequestType = 0 then |
//| > [calculate Y = A * X, with X = R ^ N] |
//| > CAlgLib:SparseSolverOOCSendResult(state, Y) |
//| > CAlgLib::SparseSolverOOCStop(state, X, Report) |
//| INPUT PARAMETERS: |
//| N - problem dimensionality (fixed at start-up) |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//+------------------------------------------------------------------+
void CAlglib::SparseSolverCreate(int n,CSparseSolverState &state)
{
CIterativeSparse::SparseSolverCreate(n,state);
}
//+------------------------------------------------------------------+
//| This function sets the solver algorithm to GMRES(k). |
//| NOTE: if you do not need advanced functionality of the |
//| SparseSolver API, you may use convenience functions |
//| SparseSolveGMRES() and SparseSolveSymmetricGMRES(). |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| K - GMRES parameter, K >= 0: |
//| * recommended values are in 10..100 range |
//| * larger values up to N are possible but have |
//| little sense - the algorithm will be slower than |
//| any dense solver. |
//| * values above N are truncated down to N |
//| * zero value means that default value is chosen. |
//| This value is 50 in the current version, but it |
//| may change in future ALGLIB releases. |
//+------------------------------------------------------------------+
void CAlglib::SparseSolverSetAlgoGMRES(CSparseSolverState &state,int k)
{
CIterativeSparse::SparseSolverSetAlgoGMRES(state,k);
}
//+------------------------------------------------------------------+
//| This function sets starting point. |
//| By default, zero starting point is used. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| X - starting point, array[N] |
//| OUTPUT PARAMETERS: |
//| State - new starting point was set |
//+------------------------------------------------------------------+
void CAlglib::SparseSolverSetStartingPoint(CSparseSolverState &state,
CRowDouble &x)
{
CIterativeSparse::SparseSolverSetStartingPoint(state,x);
}
//+------------------------------------------------------------------+
//| This function sets stopping criteria. |
//| INPUT PARAMETERS: |
//| EpsF - algorithm will be stopped if norm of residual is |
//| less than EpsF* || b ||. |
//| MaxIts - algorithm will be stopped if number of iterations |
//| is more than MaxIts. |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| NOTES: If both EpsF and MaxIts are zero then small EpsF will be |
//| set to small value. |
//+------------------------------------------------------------------+
void CAlglib::SparseSolverSetCond(CSparseSolverState &state,
double epsf,int maxits)
{
CIterativeSparse::SparseSolverSetCond(state,epsf,maxits);
}
//+------------------------------------------------------------------+
//| Procedure for the solution of A*x = b with sparse symmetric A |
//| given by its lower or upper triangle. |
//| This function will work with any solver algorithm being used, |
//| SPD one (like CG) or not(like GMRES). Using unsymmetric solvers |
//| (like GMRES) on SPD problems is suboptimal, but still possible. |
//| NOTE: the solver behavior is ill-defined for a situation when a |
//| SPD solver is used on indefinite matrix. It may solve the |
//| problem up to desired precision (sometimes, rarely) or |
//| return with error code signalling violation of underlying |
//| assumptions. |
//| INPUT PARAMETERS: |
//| State - algorithm state |
//| A - sparse symmetric NxN matrix in any sparse storage |
//| format. Using CRS format is recommended because it |
//| avoids internal conversion. An exception will be |
//| generated if A is not NxN matrix (where N is a size|
//| specified during solver object creation). |
//| IsUpper - whether upper or lower triangle of A is used: |
//| * IsUpper = True => only upper triangle is used and|
//| lower triangle is not referenced at all |
//| * IsUpper = False => only lower triangle is used |
//| and upper triangle is not referenced at all |
//| B - right part, array[N] |
//| RESULT: |
//| This function returns no result. You can get the solution by |
//| calling SparseSolverResults() |
//+------------------------------------------------------------------+
void CAlglib::SparseSolverSolveSymmetric(CSparseSolverState &state,
CSparseMatrix &a,
bool IsUpper,CRowDouble &b)
{
CIterativeSparse::SparseSolverSolveSymmetric(state,a,IsUpper,b);
}
//+------------------------------------------------------------------+
//| Procedure for the solution of A*x = b with sparse nonsymmetric A |
//| IMPORTANT: this function will work with any solver algorithm |
//| being used, symmetric solver like CG, or not. However,|
//| using symmetric solvers on nonsymmetric problems is |
//| dangerous. It may solve the problem up to desired |
//| precision (sometimes, rarely) or terminate with error |
//| code signalling violation of underlying assumptions. |
//| INPUT PARAMETERS: |
//| State - algorithm state |
//| A - sparse NxN matrix in any sparse storage format. |
//| Using CRS format is recommended because it avoids |
//| internal conversion. An exception will be generated|
//| if A is not NxN matrix (where N is a size specified|
//| during solver object creation). |
//| B - right part, array[N] |
//| RESULT: |
//| This function returns no result. |
//| You can get the solution by calling SparseSolverResults() |
//+------------------------------------------------------------------+
void CAlglib::SparseSolverSolve(CSparseSolverState &state,
CSparseMatrix &a,CRowDouble &b)
{
CIterativeSparse::SparseSolverSolve(state,a,b);
}
//+------------------------------------------------------------------+
//| Sparse solver results. |
//| This function must be called after calling one of the |
//| SparseSolverSolve() functions. |
//| INPUT PARAMETERS: |
//| State - algorithm state |
//| OUTPUT PARAMETERS: |
//| X - array[N], solution |
//| Rep - solution report: |
//| * Rep.TerminationType completion code: |
//| * -5 CG method was used for a matrix which is |
//| not positive definite |
//| * -4 overflow/underflow during solution (ill |
//| conditioned problem) |
//| * 1 || residual || <= EpsF* || b || |
//| * 5 MaxIts steps was taken |
//| * 7 rounding errors prevent further progress, |
//| best point found is returned |
//| * 8 the algorithm was terminated early with |
//| SparseSolverRequestTermination() being |
//| called from other thread. |
//| * Rep.IterationsCount contains iterations count |
//| * Rep.NMV contains number of matrix - vector |
//| calculations |
//| * Rep.R2 contains squared residual |
//+------------------------------------------------------------------+
void CAlglib::SparseSolverResults(CSparseSolverState &state,
CRowDouble &x,
CSparseSolverReport &rep)
{
CIterativeSparse::SparseSolverResults(state,x,rep);
}
//+------------------------------------------------------------------+
//|This function turns on/off reporting during out-of-core processing|
//| When the solver works in the out-of-core mode, it can be |
//| configured to report its progress by returning current location. |
//| These location reports implemented as a special kind of the |
//| out-of-core request: |
//| * SparseSolverOOCGetRequestInfo() returns - 1 |
//| * SparseSolverOOCGetRequestData() returns current location |
//| * SparseSolverOOCGetRequestData1() returns squared norm of |
//| the residual |
//| * SparseSolverOOCSendResult() shall NOT be called |
//| This function has no effect when SparseSolverSolve() is used |
//| because this function has no method of reporting its progress. |
//| NOTE: when used with GMRES(k), this function reports progress |
//| every k-th iteration. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| NeedXRep - whether iteration reports are needed or not |
//+------------------------------------------------------------------+
void CAlglib::SparseSolverSetXRep(CSparseSolverState &state,
bool needxrep)
{
CIterativeSparse::SparseSolverSetXRep(state,needxrep);
}
//+------------------------------------------------------------------+
//| This function initiates out-of-core mode of the sparse solver. It|
//| should be used in conjunction with other out-of-core - related |
//| functions of this subspackage in a loop like one given below: |
//| > CAlgLib::SparseSolverOOCStart(state) |
//| > while CAlgLib::SparseSolverOOCContinue(state) do |
//| > CAlgLib::SparseSolverOOCGetRequestInfo(state, RequestType) |
//| > CAlgLib::SparseSolverOOCGetRequestData(state, out X) |
//| > if RequestType = 0 then |
//| > [calculate Y = A * X, with X = R ^ N] |
//| > CAlgLib::SparseSolverOOCSendResult(state, in Y) |
//| > CAlgLib::SparseSolverOOCStop(state, out X, out Report) |
//| INPUT PARAMETERS: |
//| State - solver object |
//+------------------------------------------------------------------+
void CAlglib::SparseSolverOOCStart(CSparseSolverState &state,
CRowDouble &b)
{
CIterativeSparse::SparseSolverOOCStart(state,b);
}
//+------------------------------------------------------------------+
//| This function performs iterative solution of the linear system in|
//| the out-of-core mode. It should be used in conjunction with other|
//| out-of-core - related functions of this subspackage in a loop |
//| like one given below: |
//| > CAlgLib::SparseSolverOOCStart(state) |
//| > while CAlgLib::SparseSolverOOCContinue(state) do |
//| > CAlgLib::SparseSolverOOCGetRequestInfo(state, RequestType) |
//| > CAlgLib::SparseSolverOOCGetRequestData(state, out X) |
//| > if RequestType = 0 then |
//| > [calculate Y = A * X, with X = R ^ N] |
//| > CAlgLib::SparseSolverOOCSendResult(state, in Y) |
//| > CAlgLib::SparseSolverOOCStop(state, out X, out Report) |
//| INPUT PARAMETERS: |
//| State - solver object |
//+------------------------------------------------------------------+
bool CAlglib::SparseSolverOOCContinue(CSparseSolverState &state)
{
return(CIterativeSparse::SparseSolverOOCContinue(state));
}
//+------------------------------------------------------------------+
//| This function is used to retrieve information about out-of-core |
//| request sent by the solver: |
//| * RequestType = 0 means that matrix - vector products A * x is |
//| requested |
//| * RequestType = -1 means that solver reports its progress; this|
//| request is returned only when reports are |
//| activated wit SparseSolverSetXRep(). |
//| This function returns just request type; in order to get contents|
//| of the trial vector, use SparseSolverOOCGetRequestData(). |
//| It should be used in conjunction with other out-of-core - related|
//| functions of this subspackage in a loop like one given below: |
//| > CAlgLib::SparseSolverOOCStart(state) |
//| > while CAlgLib::SparseSolverOOCContinue(state) do |
//| > CAlgLib::SparseSolverOOCGetRequestInfo(state, RequestType) |
//| > CAlgLib::SparseSolverOOCGetRequestData(state, out X) |
//| > if RequestType = 0 then |
//| > [calculate Y = A * X, with X = R ^ N] |
//| > CAlgLib::SparseSolverOOCSendResult(state, in Y) |
//| > CAlgLib::SparseSolverOOCStop(state, out X, out Report) |
//| INPUT PARAMETERS: |
//| State - solver running in out-of-core mode |
//| OUTPUT PARAMETERS: |
//| RequestType - type of the request to process: |
//| * 0 for matrix - vector product A * x, with A |
//| being NxN system matrix and X being |
//| N-dimensional vector |
//| *-1 for location and residual report |
//+------------------------------------------------------------------+
void CAlglib::SparseSolverOOCGetRequestInfo(CSparseSolverState &state,
int &requesttype)
{
CIterativeSparse::SparseSolverOOCGetRequestInfo(state,requesttype);
}
//+------------------------------------------------------------------+
//| This function is used to retrieve vector associated with |
//| out-of-core request sent by the solver to user code. Depending on|
//| the request type(returned by the SparseSolverOOCGetRequestInfo())|
//| this vector should be multiplied by A or subjected to another |
//| processing. |
//| It should be used in conjunction with other out-of-core - related|
//| functions of this subspackage in a loop like one given below: |
//| > CAlgLib::SparseSolverOOCStart(state) |
//| > while CAlgLib::SparseSolverOOCContinue(state) do |
//| > CAlgLib::SparseSolverOOCGetRequestInfo(state, RequestType) |
//| > CAlgLib::SparseSolverOOCGetRequestData(state, out X) |
//| > if RequestType = 0 then |
//| > [calculate Y = A * X, with X = R ^ N] |
//| > CAlgLib::SparseSolverOOCSendResult(state, in Y) |
//| > CAlgLib::SparseSolverOOCStop(state, out X, out Report) |
//| INPUT PARAMETERS: |
//| State - solver running in out-of-core mode |
//| X - possibly preallocated storage; reallocated if |
//| needed, left unchanged, if large enough to store|
//| request data. |
//| OUTPUT PARAMETERS: |
//| X - array[N] or larger, leading N elements are |
//| filled with vector X. |
//+------------------------------------------------------------------+
void CAlglib::SparseSolverOOCGetRequestData(CSparseSolverState &state,CRowDouble &x)
{
CIterativeSparse::SparseSolverOOCGetRequestData(state,x);
}
//+------------------------------------------------------------------+
//| This function is used to retrieve scalar value associated with |
//| out-of-core request sent by the solver to user code. In the |
//| current ALGLIB version this function is used to retrieve squared |
//| residual norm during progress reports. |
//| INPUT PARAMETERS: |
//| State - solver running in out-of-core mode |
//| OUTPUT PARAMETERS: |
//| V - scalar value associated with the current request|
//+------------------------------------------------------------------+
void CAlglib::SparseSolverOOCGetRequestData1(CSparseSolverState &state,
double &v)
{
CIterativeSparse::SparseSolverOOCGetRequestData1(state,v);
}
//+------------------------------------------------------------------+
//| This function is used to send user reply to out-of-core request |
//| sent by the solver. Usually it is product A*x for vector X |
//| returned by the solver. |
//| It should be used in conjunction with other out-of-core - related|
//| functions of this subspackage in a loop like one given below: |
//| > CAlgLib::SparseSolverOOCStart(state) |
//| > while CAlgLib::SparseSolverOOCContinue(state) do |
//| > CAlgLib::SparseSolverOOCGetRequestInfo(state, RequestType) |
//| > CAlgLib::SparseSolverOOCGetRequestData(state, out X) |
//| > if RequestType = 0 then |
//| > [calculate Y = A * X, with X = R ^ N] |
//| > CAlgLib::SparseSolverOOCSendResult(state, in Y) |
//| > CAlgLib::SparseSolverOOCStop(state, out X, out Report) |
//| INPUT PARAMETERS: |
//| State - solver running in out-of-core mode |
//| AX - array[N] or larger, leading N elements contain A*x |
//+------------------------------------------------------------------+
void CAlglib::SparseSolverOOCSendResult(CSparseSolverState &state,
CRowDouble &ax)
{
CIterativeSparse::SparseSolverOOCSendResult(state,ax);
}
//+------------------------------------------------------------------+
//| This function finalizes out-of-core mode of the linear solver. It|
//| should be used in conjunction with other out-of-core - related |
//| functions of this subspackage in a loop like one given below: |
//| > CAlgLib::SparseSolverOOCStart(state) |
//| > while CAlgLib::SparseSolverOOCContinue(state) do |
//| > CAlgLib::SparseSolverOOCGetRequestInfo(state, RequestType) |
//| > CAlgLib::SparseSolverOOCGetRequestData(state, out X) |
//| > if RequestType = 0 then |
//| > [calculate Y = A * X, with X = R ^ N] |
//| > CAlgLib::SparseSolverOOCSendResult(state, in Y) |
//| > CAlgLib::SparseSolverOOCStop(state, out X, out Report) |
//| INPUT PARAMETERS: |
//| State - solver state |
//| OUTPUT PARAMETERS: |
//| X - array[N], the solution. |
//| Zero - filled on the failure(Rep.TerminationType < 0). |
//| Rep - report with additional info: |
//| * Rep.TerminationType completion code: |
//| * -5 CG method was used for a matrix which is |
//| not positive definite |
//| * -4 overflow/underflow during solution (ill |
//| conditioned problem) |
//| * 1 || residual || <= EpsF* || b || |
//| * 5 MaxIts steps was taken |
//| * 7 rounding errors prevent further progress, |
//| best point found is returned |
//| * 8 the algorithm was terminated early with |
//| SparseSolverRequestTermination() being |
//| called from other thread. |
//| * Rep.IterationsCount contains iterations count |
//| * Rep.NMV contains number of matrix - vector |
//| calculations |
//| * Rep.R2 contains squared residual |
//+------------------------------------------------------------------+
void CAlglib::SparseSolverOOCStop(CSparseSolverState &state,
CRowDouble &x,CSparseSolverReport &rep)
{
CIterativeSparse::SparseSolverOOCStop(state,x,rep);
}
//+------------------------------------------------------------------+
//| This subroutine submits request for termination of the running |
//| solver. It can be called from some other thread which wants the |
//| solver to terminate or when processing an out-of-core request. |
//| As result, solver stops at point which was "current accepted" |
//| when the termination request was submitted and returns error code|
//| 8 (successful termination). Such termination is a smooth process |
//| which properly deallocates all temporaries. |
//| INPUT PARAMETERS: |
//| State - solver structure |
//| NOTE: calling this function on solver which is NOT running will |
//| have no effect. |
//| NOTE: multiple calls to this function are possible. First call is|
//| counted, subsequent calls are silently ignored. |
//| NOTE: solver clears termination flag on its start, it means that |
//| if some other thread will request termination too soon, its|
//| request will went unnoticed. |
//+------------------------------------------------------------------+
void CAlglib::SparseSolverRequestTermination(CSparseSolverState &state)
{
CIterativeSparse::SparseSolverRequestTermination(state);
}
//+------------------------------------------------------------------+
//| This function initializes linear CG Solver. This solver is used |
//| to solve symmetric positive definite problems. If you want to |
//| solve nonsymmetric (or non-positive definite) problem you may use|
//| LinLSQR solver provided by ALGLIB. |
//| USAGE: |
//| 1. User initializes algorithm state with LinCGCreate() call |
//| 2. User tunes solver parameters with LinCGSetCond() and other |
//| functions |
//| 3. Optionally, user sets starting point with |
//| LinCGSetStartingPoint() |
//| 4. User calls LinCGSolveSparse() function which takes algorithm|
//| state and SparseMatrix object. |
//| 5. User calls LinCGResults() to get solution |
//| 6. Optionally, user may call LinCGSolveSparse() again to solve |
//| another problem with different matrix and/or right part |
//| without reinitializing LinCGState structure. |
//| INPUT PARAMETERS: |
//| N - problem dimension, N > 0 |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//+------------------------------------------------------------------+
void CAlglib::LinCGCreate(int n,CLinCGState &state)
{
CLinCG::LinCGCreate(n,state);
}
//+------------------------------------------------------------------+
//| This function sets starting point. |
//| By default, zero starting point is used. |
//| INPUT PARAMETERS: |
//| X - starting point, array[N] |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//+------------------------------------------------------------------+
void CAlglib::LinCGSetStartingPoint(CLinCGState &state,CRowDouble &x)
{
CLinCG::LinCGSetStartingPoint(state,x);
}
//+------------------------------------------------------------------+
//| This function changes preconditioning settings of |
//| LinCGSolveSparse() function. By default, SolveSparse() uses |
//| diagonal preconditioner, but if you want to use solver without |
//| preconditioning, you can call this function which forces solver |
//| to use unit matrix for preconditioning. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//+------------------------------------------------------------------+
void CAlglib::LinCGSetPrecUnit(CLinCGState &state)
{
CLinCG::LinCGSetPrecUnit(state);
}
//+------------------------------------------------------------------+
//| This function changes preconditioning settings of |
//| LinCGSolveSparse() function. LinCGSolveSparse() will use diagonal|
//| of the system matrix as preconditioner. This preconditioning mode|
//| is active by default. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//+------------------------------------------------------------------+
void CAlglib::LinCGSetPrecDiag(CLinCGState &state)
{
CLinCG::LinCGSetPrecDiag(state);
}
//+------------------------------------------------------------------+
//| This function sets stopping criteria. |
//| INPUT PARAMETERS: |
//| EpsF - algorithm will be stopped if norm of residual is |
//| less than EpsF* || b ||. |
//| MaxIts - algorithm will be stopped if number of iterations |
//| is more than MaxIts. |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| NOTES: If both EpsF and MaxIts are zero then small EpsF will be |
//| set to small value. |
//+------------------------------------------------------------------+
void CAlglib::LinCGSetCond(CLinCGState &state,double epsf,int maxits)
{
CLinCG::LinCGSetCond(state,epsf,maxits);
}
//+------------------------------------------------------------------+
//| Procedure for solution of A*x = b with sparse A. |
//| INPUT PARAMETERS: |
//| State - algorithm state |
//| A - sparse matrix in the CRS format (you MUST |
//| contvert it to CRS format by calling |
//| SparseConvertToCRS() function). |
//| IsUpper - whether upper or lower triangle of A is used: |
//| * IsUpper = True => only upper triangle is used |
//| and lower triangle is not |
//| referenced at all |
//| * IsUpper = False => only lower triangle is used|
//| and upper triangle is not |
//| referenced at all |
//| B - right part, array[N] |
//| RESULT: |
//| This function returns no result. |
//| You can get solution by calling LinCGResults() |
//| NOTE: this function uses lightweight preconditioning - |
//| multiplication by inverse of diag(A). If you want, you can |
//| turn preconditioning off by calling LinCGSetPrecUnit(). |
//| However, preconditioning cost is low and preconditioner is |
//| very important for solution of badly scaled problems. |
//+------------------------------------------------------------------+
void CAlglib::LinCGSolveSparse(CLinCGState &state,CSparseMatrix &a,
bool IsUpper,CRowDouble &b)
{
CLinCG::LinCGSolveSparse(state,a,IsUpper,b);
}
//+------------------------------------------------------------------+
//| CG - solver: results. |
//| This function must be called after LinCGSolve |
//| INPUT PARAMETERS: |
//| State - algorithm state |
//| OUTPUT PARAMETERS: |
//| X - array[N], solution |
//| Rep - optimization report: |
//| * Rep.TerminationType completetion code: |
//| * -5 input matrix is either not positive |
//| definite, too large or too small |
//| * -4 overflow / underflow during solution |
//| (ill conditioned problem) |
//| * 1 || residual || <= EpsF* || b || |
//| * 5 MaxIts steps was taken |
//| * 7 rounding errors prevent further |
//| progress, best point found is returned |
//| * Rep.IterationsCount contains iterations count |
//| * NMV countains number of matrix - vector |
//| calculations |
//+------------------------------------------------------------------+
void CAlglib::LinCGResult(CLinCGState &state,CRowDouble &x,
CLinCGReport &rep)
{
CLinCG::LinCGResult(state,x,rep);
}
//+------------------------------------------------------------------+
//| This function sets restart frequency. By default, algorithm is |
//| restarted after N subsequent iterations. |
//+------------------------------------------------------------------+
void CAlglib::LinCGSetRestartFreq(CLinCGState &state,int srf)
{
CLinCG::LinCGSetRestartFreq(state,srf);
}
//+------------------------------------------------------------------+
//| This function sets frequency of residual recalculations. |
//| Algorithm updates residual r_k using iterative formula, but |
//| recalculates it from scratch after each 10 iterations. It is done|
//| to avoid accumulation of numerical errors and to stop algorithm |
//| when r_k starts to grow. |
//| Such low update frequence(1 / 10) gives very little overhead, |
//| but makes algorithm a bit more robust against numerical errors. |
//| However, you may change it |
//| INPUT PARAMETERS: |
//| Freq - desired update frequency, Freq >= 0. |
//| Zero value means that no updates will be done. |
//+------------------------------------------------------------------+
void CAlglib::LinCGSetRUpdateFreq(CLinCGState &state,int freq)
{
CLinCG::LinCGSetRUpdateFreq(state,freq);
}
//+------------------------------------------------------------------+
//| This function turns on / off reporting. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| NeedXRep - whether iteration reports are needed or not |
//| If NeedXRep is True, algorithm will call rep() callback function |
//| if it is provided to MinCGOptimize(). |
//+------------------------------------------------------------------+
void CAlglib::LinCGSetXRep(CLinCGState &state,bool needxrep)
{
CLinCG::LinCGSetXRep(state,needxrep);
}
//+------------------------------------------------------------------+
//| This function initializes linear LSQR Solver. This solver is used|
//| to solve non-symmetric (and, possibly, non-square) problems. |
//| Least squares solution is returned for non - compatible systems. |
//| USAGE: |
//| 1. User initializes algorithm state with LinLSQRCreate() call |
//| 2. User tunes solver parameters with LinLSQRSetCond() and other|
//| functions |
//| 3. User calls LinLSQRSolveSparse() function which takes |
//| algorithm state and SparseMatrix object. |
//| 4. User calls LinLSQRResults() to get solution |
//| 5. Optionally, user may call LinLSQRSolveSparse() again to |
//| solve another problem with different matrix and/or right |
//| part without reinitializing LinLSQRState structure. |
//| INPUT PARAMETERS: |
//| M - number of rows in A |
//| N - number of variables, N > 0 |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| NOTE: see also LinLSQRCreateBuf() for version which reuses |
//| previously allocated place as much as possible. |
//+------------------------------------------------------------------+
void CAlglib::LinLSQRCreate(int m,int n,CLinLSQRState &state)
{
CLinLSQR::LinLSQRCreate(m,n,state);
}
//+------------------------------------------------------------------+
//| This function initializes linear LSQR Solver. It provides exactly|
//| same functionality as LinLSQRCreate(), but reuses previously |
//| allocated space as much as possible. |
//| INPUT PARAMETERS: |
//| M - number of rows in A |
//| N - number of variables, N > 0 |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//+------------------------------------------------------------------+
void CAlglib::LinLSQRCreateBuf(int m,int n,CLinLSQRState &state)
{
CLinLSQR::LinLSQRCreateBuf(m,n,state);
}
//+------------------------------------------------------------------+
//| This function changes preconditioning settings of |
//| LinLSQQSolveSparse() function. By default, SolveSparse() uses |
//| diagonal preconditioner, but if you want to use solver without |
//| preconditioning, you can call this function which forces solver |
//| to use unit matrix for preconditioning. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//+------------------------------------------------------------------+
void CAlglib::LinLSQRSetPrecUnit(CLinLSQRState &state)
{
CLinLSQR::LinLSQRSetPrecUnit(state);
}
//+------------------------------------------------------------------+
//| This function changes preconditioning settings of |
//| LinCGSolveSparse() function. LinCGSolveSparse() will use diagonal|
//| of the system matrix as preconditioner. This preconditioning mode|
//| is active by default. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//+------------------------------------------------------------------+
void CAlglib::LinLSQRSetPrecDiag(CLinLSQRState &state)
{
CLinLSQR::LinLSQRSetPrecDiag(state);
}
//+------------------------------------------------------------------+
//| This function sets optional Tikhonov regularization coefficient. |
//| It is zero by default. |
//| INPUT PARAMETERS: |
//| LambdaI - regularization factor, LambdaI >= 0 |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//+------------------------------------------------------------------+
void CAlglib::LinLSQRSetLambdaI(CLinLSQRState &state,double lambdai)
{
CLinLSQR::LinLSQRSetLambdaI(state,lambdai);
}
//+------------------------------------------------------------------+
//| Procedure for solution of A*x = b with sparse A. |
//| INPUT PARAMETERS: |
//| State - algorithm state |
//| A - sparse M*N matrix in the CRS format (you MUST |
//| contvert it to CRS format by calling |
//| SparseConvertToCRS() function BEFORE you pass it|
//| to this function). |
//| B - right part, array[M] |
//| RESULT: |
//| This function returns no result. |
//| You can get solution by calling LinCGResults() |
//| NOTE: this function uses lightweight preconditioning - |
//| multiplication by inverse of diag(A). If you want, you can |
//| turn preconditioning off by calling LinLSQRSetPrecUnit(). |
//| However, preconditioning cost is low and preconditioner is |
//| very important for solution of badly scaled problems. |
//+------------------------------------------------------------------+
void CAlglib::LinLSQRSolveSparse(CLinLSQRState &state,CSparseMatrix &a,
CRowDouble &b)
{
CLinLSQR::LinLSQRSolveSparse(state,a,b);
}
//+------------------------------------------------------------------+
//| This function sets stopping criteria. |
//| INPUT PARAMETERS: |
//| EpsA - algorithm will be stopped if |
//| || A^T * Rk || / ( || A || * || Rk ||) <= EpsA. |
//| EpsB - algorithm will be stopped if |
//| || Rk || <= EpsB * || B || |
//| MaxIts - algorithm will be stopped if number of |
//| iterations more than MaxIts. |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| NOTE: if EpsA, EpsB, EpsC and MaxIts are zero then these |
//| variables will be setted as default values. |
//+------------------------------------------------------------------+
void CAlglib::LinLSQRSetCond(CLinLSQRState &state,double epsa,
double epsb,int maxits)
{
CLinLSQR::LinLSQRSetCond(state,epsa,epsb,maxits);
}
//+------------------------------------------------------------------+
//| LSQR solver: results. |
//| This function must be called after LinLSQRSolve |
//| INPUT PARAMETERS: |
//| State - algorithm state |
//| OUTPUT PARAMETERS: |
//| X - array[N], solution |
//| Rep - optimization report: |
//| * Rep.TerminationType completetion code: |
//| * 1 || Rk || <= EpsB* || B || |
//| * 4 ||A^T * Rk|| / (||A|| * ||Rk||) <= EpsA |
//| * 5 MaxIts steps was taken |
//| * 7 rounding errors prevent further progress, |
//| X contains best point found so far. |
//| (sometimes returned on singular systems) |
//| * 8 user requested termination via calling |
//| LinLSQRRequestTermination() |
//| * Rep.IterationsCount contains iterations count |
//| * NMV countains number of matrix - vector |
//| calculations |
//+------------------------------------------------------------------+
void CAlglib::LinLSQRResults(CLinLSQRState &state,CRowDouble &x,
CLinLSQRReport &rep)
{
CLinLSQR::LinLSQRResults(state,x,rep);
}
//+------------------------------------------------------------------+
//| This function turns on / off reporting. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| NeedXRep - whether iteration reports are needed or not |
//| If NeedXRep is True, algorithm will call rep() |
//| callback function if it is provided to |
//| MinCGOptimize(). |
//+------------------------------------------------------------------+
void CAlglib::LinLSQRSetXRep(CLinLSQRState &state,bool needxrep)
{
CLinLSQR::LinLSQRSetXRep(state,needxrep);
}
//+------------------------------------------------------------------+
//| This function is used to peek into LSQR solver and get current |
//| iteration counter. You can safely "peek" into the solver from |
//| another thread. |
//| INPUT PARAMETERS: |
//| S - solver object |
//| RESULT: |
//| iteration counter, in [0, INF) |
//+------------------------------------------------------------------+
int CAlglib::LinLSQRPeekIterationsCount(CLinLSQRState &s)
{
return(CLinLSQR::LinLSQRPeekIterationsCount(s));
}
//+------------------------------------------------------------------+
//| This subroutine submits request for termination of the running |
//| solver. It can be called from some other thread which wants LSQR |
//| solver to terminate (obviously, the thread running LSQR solver |
//| can not request termination because it is already busy working on|
//| LSQR). |
//| As result, solver stops at point which was "current accepted" |
//| when termination request was submitted and returns error code 8 |
//| (successful termination). Such termination is a smooth process |
//| which properly deallocates all temporaries. |
//| INPUT PARAMETERS: |
//| State - solver structure |
//| NOTE: calling this function on solver which is NOT running will |
//| have no effect. |
//| NOTE: multiple calls to this function are possible. First call is|
//| counted, subsequent calls are silently ignored. |
//| NOTE: solver clears termination flag on its start, it means that |
//| if some other thread will request termination too soon, its|
//| request will went unnoticed. |
//+------------------------------------------------------------------+
void CAlglib::LinLSQRRequestTermination(CLinLSQRState &state)
{
CLinLSQR::LinLSQRRequestTermination(state);
}
//+------------------------------------------------------------------+
//| LEVENBERG-MARQUARDT-LIKE NONLINEAR SOLVER |
//| DESCRIPTION: |
//| This algorithm solves system of nonlinear equations |
//| F[0](x[0], ..., x[n-1]) = 0 |
//| F[1](x[0], ..., x[n-1]) = 0 |
//| ... |
//| F[M-1](x[0], ..., x[n-1]) = 0 |
//| with M/N do not necessarily coincide. Algorithm converges |
//| quadratically under following conditions: |
//| * the solution set XS is nonempty |
//| * for some xs in XS there exist such neighbourhood N(xs) |
//| that: |
//| * vector function F(x) and its Jacobian J(x) are |
//| continuously differentiable on N |
//| * ||F(x)|| provides local error bound on N, i.e. there |
//| exists such c1, that ||F(x)||>c1*distance(x,XS) |
//| Note that these conditions are much more weaker than usual |
//| non-singularity conditions. For example, algorithm will converge |
//| for any affine function F (whether its Jacobian singular or not).|
//| REQUIREMENTS: |
//| Algorithm will request following information during its |
//| operation: |
//| * function vector F[] and Jacobian matrix at given point X |
//| * value of merit function f(x)=F[0]^2(x)+...+F[M-1]^2(x) at given|
//| point X |
//| USAGE: |
//| 1. User initializes algorithm state with NLEQCreateLM() call |
//| 2. User tunes solver parameters with NLEQSetCond(), |
//| NLEQSetStpMax() and other functions |
//| 3. User calls NLEQSolve() function which takes algorithm state |
//| and pointers (delegates, etc.) to callback functions which |
//| calculate merit function value and Jacobian. |
//| 4. User calls NLEQResults() to get solution |
//| 5. Optionally, user may call NLEQRestartFrom() to solve another |
//| problem with same parameters (N/M) but another starting point |
//| and/or another function vector. NLEQRestartFrom() allows to |
//| reuse already initialized structure. |
//| INPUT PARAMETERS: |
//| N - space dimension, N>1: |
//| * if provided, only leading N elements of X are |
//| used |
//| * if not provided, determined automatically from |
//| size of X |
//| M - system size |
//| X - starting point |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| NOTES: |
//| 1. you may tune stopping conditions with NLEQSetCond() function |
//| 2. if target function contains exp() or other fast growing |
//| functions, and optimization algorithm makes too large steps |
//| which leads to overflow, use NLEQSetStpMax() function to bound|
//| algorithm's steps. |
//| 3. this algorithm is a slightly modified implementation of the |
//| method described in 'Levenberg-Marquardt method for |
//| constrained nonlinear equations with strong local convergence |
//| properties' by Christian Kanzow Nobuo Yamashita and Masao |
//| Fukushima and further developed in 'On the convergence of a |
//| New Levenberg-Marquardt Method' by Jin-yan Fan and Ya-Xiang |
//| Yuan. |
//+------------------------------------------------------------------+
void CAlglib::NlEqCreateLM(const int n,const int m,double &x[],
CNlEqStateShell &state)
{
CNlEq::NlEqCreateLM(n,m,x,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| LEVENBERG-MARQUARDT-LIKE NONLINEAR SOLVER |
//| DESCRIPTION: |
//| This algorithm solves system of nonlinear equations |
//| F[0](x[0], ..., x[n-1]) = 0 |
//| F[1](x[0], ..., x[n-1]) = 0 |
//| ... |
//| F[M-1](x[0], ..., x[n-1]) = 0 |
//| with M/N do not necessarily coincide. Algorithm converges |
//| quadratically under following conditions: |
//| * the solution set XS is nonempty |
//| * for some xs in XS there exist such neighbourhood N(xs) |
//| that: |
//| * vector function F(x) and its Jacobian J(x) are |
//| continuously differentiable on N |
//| * ||F(x)|| provides local error bound on N, i.e. there |
//| exists such c1, that ||F(x)||>c1*distance(x,XS) |
//| Note that these conditions are much more weaker than usual |
//| non-singularity conditions. For example, algorithm will converge |
//| for any affine function F (whether its Jacobian singular or not).|
//| REQUIREMENTS: |
//| Algorithm will request following information during its |
//| operation: |
//| * function vector F[] and Jacobian matrix at given point X |
//| * value of merit function f(x)=F[0]^2(x)+...+F[M-1]^2(x) at given|
//| point X |
//| USAGE: |
//| 1. User initializes algorithm state with NLEQCreateLM() call |
//| 2. User tunes solver parameters with NLEQSetCond(), |
//| NLEQSetStpMax() and other functions |
//| 3. User calls NLEQSolve() function which takes algorithm state |
//| and pointers (delegates, etc.) to callback functions which |
//| calculate merit function value and Jacobian. |
//| 4. User calls NLEQResults() to get solution |
//| 5. Optionally, user may call NLEQRestartFrom() to solve another |
//| problem with same parameters (N/M) but another starting point |
//| and/or another function vector. NLEQRestartFrom() allows to |
//| reuse already initialized structure. |
//| INPUT PARAMETERS: |
//| N - space dimension, N>1: |
//| * if provided, only leading N elements of X are |
//| used |
//| * if not provided, determined automatically from |
//| size of X |
//| M - system size |
//| X - starting point |
//| OUTPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| NOTES: |
//| 1. you may tune stopping conditions with NLEQSetCond() function |
//| 2. if target function contains exp() or other fast growing |
//| functions, and optimization algorithm makes too large steps |
//| which leads to overflow, use NLEQSetStpMax() function to bound|
//| algorithm's steps. |
//| 3. this algorithm is a slightly modified implementation of the |
//| method described in 'Levenberg-Marquardt method for |
//| constrained nonlinear equations with strong local convergence |
//| properties' by Christian Kanzow Nobuo Yamashita and Masao |
//| Fukushima and further developed in 'On the convergence of a |
//| New Levenberg-Marquardt Method' by Jin-yan Fan and Ya-Xiang |
//| Yuan. |
//+------------------------------------------------------------------+
void CAlglib::NlEqCreateLM(const int m,double &x[],CNlEqStateShell &state)
{
//--- initialization
int n=CAp::Len(x);
//--- function call
CNlEq::NlEqCreateLM(n,m,x,state.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This function sets stopping conditions for the nonlinear solver |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| EpsF - >=0 |
//| The subroutine finishes its work if on k+1-th |
//| iteration the condition ||F||<=EpsF is satisfied |
//| MaxIts - maximum number of iterations. If MaxIts=0, the |
//| number of iterations is unlimited. |
//| Passing EpsF=0 and MaxIts=0 simultaneously will lead to |
//| automatic stopping criterion selection (small EpsF). |
//| NOTES: |
//+------------------------------------------------------------------+
void CAlglib::NlEqSetCond(CNlEqStateShell &state,const double epsf,
const int maxits)
{
CNlEq::NlEqSetCond(state.GetInnerObj(),epsf,maxits);
}
//+------------------------------------------------------------------+
//| This function turns on/off reporting. |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| NeedXRep- whether iteration reports are needed or not |
//| If NeedXRep is True, algorithm will call rep() callback function |
//| if it is provided to NLEQSolve(). |
//+------------------------------------------------------------------+
void CAlglib::NlEqSetXRep(CNlEqStateShell &state,const bool needxrep)
{
CNlEq::NlEqSetXRep(state.GetInnerObj(),needxrep);
}
//+------------------------------------------------------------------+
//| This function sets maximum step length |
//| INPUT PARAMETERS: |
//| State - structure which stores algorithm state |
//| StpMax - maximum step length, >=0. Set StpMax to 0.0, if |
//| you don't want to limit step length. |
//| Use this subroutine when target function contains exp() or other |
//| fast growing functions, and algorithm makes too large steps which|
//| lead to overflow. This function allows us to reject steps that |
//| are too large (and therefore expose us to the possible overflow) |
//| without actually calculating function value at the x+stp*d. |
//+------------------------------------------------------------------+
void CAlglib::NlEqSetStpMax(CNlEqStateShell &state,const double stpmax)
{
CNlEq::NlEqSetStpMax(state.GetInnerObj(),stpmax);
}
//+------------------------------------------------------------------+
//| This function provides reverse communication interface |
//| Reverse communication interface is not documented or recommended |
//| to use. |
//| See below for functions which provide better documented API |
//+------------------------------------------------------------------+
bool CAlglib::NlEqIteration(CNlEqStateShell &state)
{
return(CNlEq::NlEqIteration(state.GetInnerObj()));
}
//+------------------------------------------------------------------+
//| This family of functions is used to launcn iterations of |
//| nonlinear solver |
//| These functions accept following parameters: |
//| func - callback which calculates function (or merit |
//| function) value func at given point x |
//| jac - callback which calculates function vector fi[] |
//| and Jacobian jac at given point x |
//| rep - optional callback which is called after each |
//| iteration can be null |
//| obj - optional object which is passed to |
//| func/grad/hess/jac/rep can be null |
//+------------------------------------------------------------------+
void CAlglib::NlEqSolve(CNlEqStateShell &state,CNDimensional_Func &func,
CNDimensional_Jac &jac,CNDimensional_Rep &rep,
bool rep_status,CObject &obj)
{
//--- cycle
while(CAlglib::NlEqIteration(state))
{
//--- check
if(state.GetNeedF())
{
func.Func(state.GetInnerObj().m_x,state.GetInnerObj().m_f,obj);
//--- next iteration
continue;
}
//--- check
if(state.GetNeedFIJ())
{
jac.Jac(state.GetInnerObj().m_x,state.GetInnerObj().m_fi,state.GetInnerObj().m_j,obj);
//--- next iteration
continue;
}
//--- check
if(state.GetInnerObj().m_xupdated)
{
//--- check
if(rep_status)
rep.Rep(state.GetInnerObj().m_x,state.GetInnerObj().m_f,obj);
//--- next iteration
continue;
}
Print("ALGLIB: error in 'nleqsolve' (some derivatives were not provided?)");
CAp::exception_happened=true;
break;
}
}
//+------------------------------------------------------------------+
//| NLEQ solver results |
//| INPUT PARAMETERS: |
//| State - algorithm state. |
//| OUTPUT PARAMETERS: |
//| X - array[0..N-1], solution |
//| Rep - optimization report: |
//| * Rep.TerminationType completetion code: |
//| * -4 ERROR: algorithm has converged to the|
//| stationary point Xf which is local |
//| minimum of f=F[0]^2+...+F[m-1]^2, |
//| but is not solution of nonlinear |
//| system. |
//| * 1 sqrt(f)<=EpsF. |
//| * 5 MaxIts steps was taken |
//| * 7 stopping conditions are too |
//| stringent, further improvement is |
//| impossible |
//| * Rep.IterationsCount contains iterations count |
//| * NFEV countains number of function calculations |
//| * ActiveConstraints contains number of active |
//| constraints |
//+------------------------------------------------------------------+
void CAlglib::NlEqResults(CNlEqStateShell &state,double &x[],
CNlEqReportShell &rep)
{
CNlEq::NlEqResults(state.GetInnerObj(),x,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| NLEQ solver results |
//| Buffered implementation of NLEQResults(), which uses |
//| pre-allocated buffer to store X[]. If buffer size is too small, |
//| it resizes buffer. It is intended to be used in the inner cycles |
//| of performance critical algorithms where array reallocation |
//| penalty is too large to be ignored. |
//+------------------------------------------------------------------+
void CAlglib::NlEqResultsBuf(CNlEqStateShell &state,double &x[],
CNlEqReportShell &rep)
{
CNlEq::NlEqResultsBuf(state.GetInnerObj(),x,rep.GetInnerObj());
}
//+------------------------------------------------------------------+
//| This subroutine restarts CG algorithm from new point. All |
//| optimization parameters are left unchanged. |
//| This function allows to solve multiple optimization problems |
//| (which must have same number of dimensions) without object |
//| reallocation penalty. |
//| INPUT PARAMETERS: |
//| State - structure used for reverse communication |
//| previously allocated with MinCGCreate call. |
//| X - new starting point. |
//| BndL - new lower bounds |
//| BndU - new upper bounds |
//+------------------------------------------------------------------+
void CAlglib::NlEqRestartFrom(CNlEqStateShell &state,double &x[])
{
CNlEq::NlEqRestartFrom(state.GetInnerObj(),x);
}
//+------------------------------------------------------------------+
//| Gamma function |
//| Input parameters: |
//| X - argument |
//| Domain: |
//| 0 < X < 171.6 |
//| -170 < X < 0, X is not an integer. |
//| Relative error: |
//| arithmetic domain # trials peak rms |
//| IEEE -170,-33 20000 2.3e-15 3.3e-16 |
//| IEEE -33, 33 20000 9.4e-16 2.2e-16 |
//| IEEE 33, 171.6 20000 2.3e-15 3.2e-16 |
//+------------------------------------------------------------------+
double CAlglib::GammaFunction(const double x)
{
return(CGammaFunc::GammaFunc(x));
}
//+------------------------------------------------------------------+
//| Natural logarithm of gamma function |
//| Input parameters: |
//| X - argument |
//| Result: |
//| logarithm of the absolute value of the Gamma(X). |
//| Output parameters: |
//| SgnGam - sign(Gamma(X)) |
//| Domain: |
//| 0 < X < 2.55e305 |
//| -2.55e305 < X < 0, X is not an integer. |
//| ACCURACY: |
//| arithmetic domain # trials peak rms |
//| IEEE 0, 3 28000 5.4e-16 1.1e-16 |
//| IEEE 2.718, 2.556e305 40000 3.5e-16 8.3e-17 |
//| The error criterion was relative when the function magnitude |
//| was greater than one but absolute when it was less than one. |
//| The following test used the relative error criterion, though |
//| at certain points the relative error could be much higher than |
//| indicated. |
//| IEEE -200, -4 10000 4.8e-16 1.3e-16 |
//+------------------------------------------------------------------+
double CAlglib::LnGamma(const double x,double &sgngam)
{
//--- initialization
sgngam=0;
//--- return result
return(CGammaFunc::LnGamma(x,sgngam));
}
//+------------------------------------------------------------------+
//| Error function |
//| The integral is |
//| x |
//| - |
//| 2 | | 2 |
//| erf(x) = -------- | exp( - t ) dt. |
//| sqrt(pi) | | |
//| - |
//| 0 |
//| For 0 <= |x| < 1, erf(x) = x * P4(x**2)/Q5(x**2); otherwise |
//| erf(x) = 1 - erfc(x). |
//| ACCURACY: |
//| Relative error: |
//| arithmetic domain # trials peak rms |
//| IEEE 0,1 30000 3.7e-16 1.0e-16 |
//+------------------------------------------------------------------+
double CAlglib::ErrorFunction(const double x)
{
return(CNormalDistr::ErrorFunction(x));
}
//+------------------------------------------------------------------+
//| Complementary error function |
//| 1 - erf(x) = |
//| inf. |
//| - |
//| 2 | | 2 |
//| erfc(x) = -------- | exp( - t ) dt |
//| sqrt(pi) | | |
//| - |
//| x |
//| For small x, erfc(x) = 1 - erf(x); otherwise rational |
//| approximations are computed. |
//| ACCURACY: |
//| Relative error: |
//| arithmetic domain # trials peak rms |
//| IEEE 0,26.6417 30000 5.7e-14 1.5e-14 |
//+------------------------------------------------------------------+
double CAlglib::ErrorFunctionC(const double x)
{
return(CNormalDistr::ErrorFunctionC(x));
}
//+------------------------------------------------------------------+
//| Normal distribution function |
//| Returns the area under the Gaussian probability density |
//| function, integrated from minus infinity to x: |
//| x |
//| - |
//| 1 | | 2 |
//| ndtr(x) = --------- | exp( - t /2 ) dt |
//| sqrt(2pi) | | |
//| - |
//| -inf. |
//| = ( 1 + erf(z) ) / 2 |
//| = erfc(z) / 2 |
//| where z = x/sqrt(2). Computation is via the functions |
//| erf and erfc. |
//| ACCURACY: |
//| Relative error: |
//| arithmetic domain # trials peak rms |
//| IEEE -13,0 30000 3.4e-14 6.7e-15 |
//+------------------------------------------------------------------+
double CAlglib::NormalDistribution(const double x)
{
return(CNormalDistr::NormalDistribution(x));
}
//+------------------------------------------------------------------+
//| Normal distribution PDF |
//| Returns Gaussian probability density function: |
//| 1 |
//| f(x) = --------- * exp(-x^2/2) |
//| sqrt(2pi) |
//| Cephes Math Library Release 2.8: June, 2000 |
//| Copyright 1984, 1987, 1988, 1992, 2000 by Stephen L. Moshier |
//+------------------------------------------------------------------+
double CAlglib::NormalPDF(const double x)
{
return(CNormalDistr::NormalPDF(x));
}
//+------------------------------------------------------------------+
//| Normal distribution CDF |
//| Returns the area under the Gaussian probability density |
//| function, integrated from minus infinity to x: |
//| x |
//| - |
//| 1 | | 2 |
//| ndtr(x) = --------- | exp( - t /2 ) dt |
//| sqrt(2pi) | | |
//| - |
//| -inf. |
//| |
//| = ( 1 + erf(z) ) / 2 |
//| = erfc(z) / 2 |
//| where z = x/sqrt(2). Computation is via the functions erf and |
//| erfc. |
//| ACCURACY: |
//| Relative error: |
//| arithmetic domain # trials peak rms |
//| IEEE -13,0 30000 3.4e-14 6.7e-15 |
//| Cephes Math Library Release 2.8: June, 2000 |
//| Copyright 1984, 1987, 1988, 1992, 2000 by Stephen L. Moshier |
//+------------------------------------------------------------------+
double CAlglib::NormalCDF(const double x)
{
return(CNormalDistr::NormalCDF(x));
}
//+------------------------------------------------------------------+
//| Inverse of the error function |
//+------------------------------------------------------------------+
double CAlglib::InvErF(double e)
{
return(CNormalDistr::InvErF(e));
}
//+------------------------------------------------------------------+
//| Inverse of Normal distribution function |
//| Returns the argument, x, for which the area under the |
//| Gaussian probability density function (integrated from |
//| minus infinity to x) is equal to y. |
//| For small arguments 0 < y < exp(-2), the program computes |
//| z = sqrt( -2.0 * log(y) ); then the approximation is |
//| x = z - log(z)/z - (1/z) P(1/z) / Q(1/z). |
//| There are two rational functions P/Q, one for 0 < y < exp(-32) |
//| and the other for y up to exp(-2). For larger arguments, |
//| w = y - 0.5, and x/sqrt(2pi) = w + w**3 R(w**2)/S(w**2)). |
//| ACCURACY: |
//| Relative error: |
//| arithmetic domain # trials peak rms |
//| IEEE 0.125, 1 20000 7.2e-16 1.3e-16 |
//| IEEE 3e-308, 0.135 50000 4.6e-16 9.8e-17 |
//+------------------------------------------------------------------+
double CAlglib::InvNormalDistribution(const double y0)
{
return(CNormalDistr::InvNormalDistribution(y0));
}
//+------------------------------------------------------------------+
//| Inverse of Normal CDF |
//| Returns the argument, x, for which the area under the Gaussian |
//| probability density function (integrated from minus infinity to |
//| x) is equal to y. |
//| For small arguments 0 < y < exp(-2), the program computes |
//| z = sqrt( -2.0 * log(y) ); then the approximation is |
//| x = z - log(z)/z - (1/z) P(1/z) / Q(1/z). |
//| There are two rational functions P/Q, one for 0 < y < exp(-32) |
//| and the other for y up to exp(-2). For larger arguments, |
//| w = y - 0.5, and x/sqrt(2pi) = w + w**3 R(w**2)/S(w**2)). |
//| ACCURACY: |
//| Relative error: |
//| arithmetic domain # trials peak rms |
//| IEEE 0.125, 1 20000 7.2e-16 1.3e-16 |
//| IEEE 3e-308, 0.135 50000 4.6e-16 9.8e-17 |
//| Cephes Math Library Release 2.8: June, 2000 |
//| Copyright 1984, 1987, 1988, 1992, 2000 by Stephen L. Moshier |
//+------------------------------------------------------------------+
double CAlglib::InvNormalCDF(const double y0)
{
return(CNormalDistr::InvNormalCDF(y0));
}
//+------------------------------------------------------------------+
//| Bivariate normal PDF |
//| Returns probability density function of the bivariate Gaussian |
//| with correlation parameter equal to Rho: |
//| 1 ( x^2 - 2*rho*x*y + y^2 )|
//| f(x,y,rho) = ----------------- * exp( - ----------------------- )|
//| 2pi*sqrt(1-rho^2) ( 2*(1-rho^2) )|
//| with -1<rho<+1 and arbitrary x, y. |
//| This function won't fail as long as Rho is in (-1,+1) range. |
//+------------------------------------------------------------------+
double CAlglib::BivariateNormalPDF(const double x,const double y,const double rho)
{
return(CNormalDistr::BivariateNormalPDF(x,y,rho));
}
//+------------------------------------------------------------------+
//| Bivariate normal CDF |
//| Returns the area under the bivariate Gaussian PDF with |
//| correlation parameter equal to Rho, integrated from minus |
//| infinity to (x,y): |
//| x y |
//| - - |
//| 1 | | | | |
//| bvn(x,y,rho) = ------------------- | | f(u,v,rho)*du*dv |
//| 2pi*sqrt(1-rho^2) | | | | |
//| - - |
//| -INF -INF |
//| where |
//| ( u^2 - 2*rho*u*v + v^2 ) |
//| f(u,v,rho) = exp( - ----------------------- ) |
//| ( 2*(1-rho^2) ) |
//| with -1<rho<+1 and arbitrary x, y. |
//| This subroutine uses high-precision approximation scheme proposed|
//| by Alan Genz in "Numerical Computation of Rectangular Bivariate |
//| and Trivariate Normal and t probabilities", which computes CDF |
//| with absolute error roughly equal to 1e-14. |
//| This function won't fail as long as Rho is in (-1,+1) range. |
//+------------------------------------------------------------------+
double CAlglib::BivariateNormalCDF(double x,double y,const double rho)
{
return(CNormalDistr::BivariateNormalCDF(x,y,rho));
}
//+------------------------------------------------------------------+
//| Incomplete gamma integral |
//| The function is defined by |
//| x |
//| - |
//| 1 | | -t a-1 |
//| igam(a,x) = ----- | e t dt. |
//| - | | |
//| | (a) - |
//| 0 |
//| In this implementation both arguments must be positive. |
//| The integral is evaluated by either a power series or |
//| continued fraction expansion, depending on the relative |
//| values of a and x. |
//| ACCURACY: |
//| Relative error: |
//| arithmetic domain # trials peak rms |
//| IEEE 0,30 200000 3.6e-14 2.9e-15 |
//| IEEE 0,100 300000 9.9e-14 1.5e-14 |
//+------------------------------------------------------------------+
double CAlglib::IncompleteGamma(const double a,const double x)
{
return(CIncGammaF::IncompleteGamma(a,x));
}
//+------------------------------------------------------------------+
//| Complemented incomplete gamma integral |
//| The function is defined by |
//| igamc(a,x) = 1 - igam(a,x) |
//| inf. |
//| - |
//| 1 | | -t a-1 |
//| = ----- | e t dt. |
//| - | | |
//| | (a) - |
//| x |
//| In this implementation both arguments must be positive. |
//| The integral is evaluated by either a power series or |
//| continued fraction expansion, depending on the relative |
//| values of a and x. |
//| ACCURACY: |
//| Tested at random a, x. |
//| a x Relative error: |
//| arithmetic domain domain # trials peak rms |
//| IEEE 0.5,100 0,100 200000 1.9e-14 1.7e-15|
//| IEEE 0.01,0.5 0,100 200000 1.4e-13 1.6e-15|
//+------------------------------------------------------------------+
double CAlglib::IncompleteGammaC(const double a,const double x)
{
return(CIncGammaF::IncompleteGammaC(a,x));
}
//+------------------------------------------------------------------+
//| Inverse of complemented imcomplete gamma integral |
//| Given p, the function finds x such that |
//| igamc( a, x ) = p. |
//| Starting with the approximate value |
//| 3 |
//| x = a t |
//| where |
//| t = 1 - d - ndtri(p) sqrt(d) |
//| and |
//| d = 1/9a, |
//| the routine performs up to 10 Newton iterations to find the |
//| root of igamc(a,x) - p = 0. |
//| ACCURACY: |
//| Tested at random a, p in the intervals indicated. |
//| a p Relative error: |
//| arithmetic domain domain # trials peak rms |
//| IEEE 0.5,100 0,0.5 100000 1.0e-14 1.7e-15|
//| IEEE 0.01,0.5 0,0.5 100000 9.0e-14 3.4e-15|
//| IEEE 0.5,10000 0,0.5 20000 2.3e-13 3.8e-14|
//+------------------------------------------------------------------+
double CAlglib::InvIncompleteGammaC(const double a,const double y0)
{
return(CIncGammaF::InvIncompleteGammaC(a,y0));
}
//+------------------------------------------------------------------+
//| Airy function |
//| Solution of the differential equation |
//| y"(x) = xy. |
//| The function returns the two independent solutions Ai, Bi |
//| and their first derivatives Ai'(x), Bi'(x). |
//| Evaluation is by power series summation for small x, |
//| by rational minimax approximations for large x. |
//| ACCURACY: |
//| Error criterion is absolute when function <= 1, relative |
//| when function > 1, except * denotes relative error criterion. |
//| For large negative x, the absolute error increases as x^1.5. |
//| For large positive x, the relative error increases as x^1.5. |
//| Arithmetic domain function # trials peak rms |
//| IEEE -10, 0 Ai 10000 1.6e-15 2.7e-16 |
//| IEEE 0, 10 Ai 10000 2.3e-14* 1.8e-15*|
//| IEEE -10, 0 Ai' 10000 4.6e-15 7.6e-16 |
//| IEEE 0, 10 Ai' 10000 1.8e-14* 1.5e-15*|
//| IEEE -10, 10 Bi 30000 4.2e-15 5.3e-16 |
//| IEEE -10, 10 Bi' 30000 4.9e-15 7.3e-16 |
//+------------------------------------------------------------------+
void CAlglib::Airy(const double x,double &ai,double &aip,double &bi,
double &bip)
{
//--- initialization
ai=0;
aip=0;
bi=0;
bip=0;
//--- function call
CAiryF::Airy(x,ai,aip,bi,bip);
}
//+------------------------------------------------------------------+
//| Bessel function of order zero |
//| Returns Bessel function of order zero of the argument. |
//| The domain is divided into the intervals [0, 5] and |
//| (5, infinity). In the first interval the following rational |
//| approximation is used: |
//| 2 2 |
//| (w - r ) (w - r ) P (w) / Q (w) |
//| 1 2 3 8 |
//| 2 |
//| where w = x and the two r's are zeros of the function. |
//| In the second interval, the Hankel asymptotic expansion |
//| is employed with two rational functions of degree 6/6 |
//| and 7/7. |
//| ACCURACY: |
//| Absolute error: |
//| arithmetic domain # trials peak rms |
//| IEEE 0, 30 60000 4.2e-16 1.1e-16 |
//+------------------------------------------------------------------+
double CAlglib::BesselJ0(const double x)
{
return(CBessel::BesselJ0(x));
}
//+------------------------------------------------------------------+
//| Bessel function of order one |
//| Returns Bessel function of order one of the argument. |
//| The domain is divided into the intervals [0, 8] and |
//| (8, infinity). In the first interval a 24 term Chebyshev |
//| expansion is used. In the second, the asymptotic |
//| trigonometric representation is employed using two |
//| rational functions of degree 5/5. |
//| ACCURACY: |
//| Absolute error: |
//| arithmetic domain # trials peak rms |
//| IEEE 0, 30 30000 2.6e-16 1.1e-16 |
//+------------------------------------------------------------------+
double CAlglib::BesselJ1(const double x)
{
return(CBessel::BesselJ1(x));
}
//+------------------------------------------------------------------+
//| Bessel function of integer order |
//| Returns Bessel function of order n, where n is a |
//| (possibly negative) integer. |
//| The ratio of jn(x) to j0(x) is computed by backward |
//| recurrence. First the ratio jn/jn-1 is found by a |
//| continued fraction expansion. Then the recurrence |
//| relating successive orders is applied until j0 or j1 is |
//| reached. |
//| If n = 0 or 1 the routine for j0 or j1 is called |
//| directly. |
//| ACCURACY: |
//| Absolute error: |
//| arithmetic range # trials peak rms |
//| IEEE 0, 30 5000 4.4e-16 7.9e-17 |
//| Not suitable for large n or x. Use jv() (fractional order) |
//| instead. |
//+------------------------------------------------------------------+
double CAlglib::BesselJN(const int n,const double x)
{
return(CBessel::BesselJN(n,x));
}
//+------------------------------------------------------------------+
//| Bessel function of the second kind, order zero |
//| Returns Bessel function of the second kind, of order |
//| zero, of the argument. |
//| The domain is divided into the intervals [0, 5] and |
//| (5, infinity). In the first interval a rational approximation |
//| R(x) is employed to compute |
//| y0(x) = R(x) + 2 * log(x) * j0(x) / PI. |
//| Thus a call to j0() is required. |
//| In the second interval, the Hankel asymptotic expansion |
//| is employed with two rational functions of degree 6/6 |
//| and 7/7. |
//| ACCURACY: |
//| Absolute error, when y0(x) < 1; else relative error: |
//| arithmetic domain # trials peak rms |
//| IEEE 0, 30 30000 1.3e-15 1.6e-16 |
//+------------------------------------------------------------------+
double CAlglib::BesselY0(const double x)
{
return(CBessel::BesselY0(x));
}
//+------------------------------------------------------------------+
//| Bessel function of second kind of order one |
//| Returns Bessel function of the second kind of order one |
//| of the argument. |
//| The domain is divided into the intervals [0, 8] and |
//| (8, infinity). In the first interval a 25 term Chebyshev |
//| expansion is used, and a call to j1() is required. |
//| In the second, the asymptotic trigonometric representation |
//| is employed using two rational functions of degree 5/5. |
//| ACCURACY: |
//| Absolute error: |
//| arithmetic domain # trials peak rms |
//| IEEE 0, 30 30000 1.0e-15 1.3e-16 |
//+------------------------------------------------------------------+
double CAlglib::BesselY1(const double x)
{
return(CBessel::BesselY1(x));
}
//+------------------------------------------------------------------+
//| Bessel function of second kind of integer order |
//| Returns Bessel function of order n, where n is a |
//| (possibly negative) integer. |
//| The function is evaluated by forward recurrence on |
//| n, starting with values computed by the routines |
//| y0() and y1(). |
//| If n = 0 or 1 the routine for y0 or y1 is called |
//| directly. |
//| ACCURACY: |
//| Absolute error, except relative |
//| when y > 1: |
//| arithmetic domain # trials peak rms |
//| IEEE 0, 30 30000 3.4e-15 4.3e-16 |
//+------------------------------------------------------------------+
double CAlglib::BesselYN(const int n,const double x)
{
return(CBessel::BesselYN(n,x));
}
//+------------------------------------------------------------------+
//| Modified Bessel function of order zero |
//| Returns modified Bessel function of order zero of the |
//| argument. |
//| The function is defined as i0(x) = j0( ix ). |
//| The range is partitioned into the two intervals [0,8] and |
//| (8, infinity). Chebyshev polynomial expansions are employed |
//| in each interval. |
//| ACCURACY: |
//| Relative error: |
//| arithmetic domain # trials peak rms |
//| IEEE 0,30 30000 5.8e-16 1.4e-16 |
//+------------------------------------------------------------------+
double CAlglib::BesselI0(const double x)
{
return(CBessel::BesselI0(x));
}
//+------------------------------------------------------------------+
//| Modified Bessel function of order one |
//| Returns modified Bessel function of order one of the |
//| argument. |
//| The function is defined as i1(x) = -i j1( ix ). |
//| The range is partitioned into the two intervals [0,8] and |
//| (8, infinity). Chebyshev polynomial expansions are employed |
//| in each interval. |
//| ACCURACY: |
//| Relative error: |
//| arithmetic domain # trials peak rms |
//| IEEE 0, 30 30000 1.9e-15 2.1e-16 |
//+------------------------------------------------------------------+
double CAlglib::BesselI1(const double x)
{
return(CBessel::BesselI1(x));
}
//+------------------------------------------------------------------+
//| Modified Bessel function, second kind, order zero |
//| Returns modified Bessel function of the second kind |
//| of order zero of the argument. |
//| The range is partitioned into the two intervals [0,8] and |
//| (8, infinity). Chebyshev polynomial expansions are employed |
//| in each interval. |
//| ACCURACY: |
//| Tested at 2000 random points between 0 and 8. Peak absolute |
//| error (relative when K0 > 1) was 1.46e-14; rms, 4.26e-15. |
//| Relative error: |
//| arithmetic domain # trials peak rms |
//| IEEE 0, 30 30000 1.2e-15 1.6e-16 |
//+------------------------------------------------------------------+
double CAlglib::BesselK0(const double x)
{
return(CBessel::BesselK0(x));
}
//+------------------------------------------------------------------+
//| Modified Bessel function, second kind, order one |
//| Computes the modified Bessel function of the second kind |
//| of order one of the argument. |
//| The range is partitioned into the two intervals [0,2] and |
//| (2, infinity). Chebyshev polynomial expansions are employed |
//| in each interval. |
//| ACCURACY: |
//| Relative error: |
//| arithmetic domain # trials peak rms |
//| IEEE 0, 30 30000 1.2e-15 1.6e-16 |
//+------------------------------------------------------------------+
double CAlglib::BesselK1(const double x)
{
return(CBessel::BesselK1(x));
}
//+------------------------------------------------------------------+
//| Modified Bessel function, second kind, integer order |
//| Returns modified Bessel function of the second kind |
//| of order n of the argument. |
//| The range is partitioned into the two intervals [0,9.55] and |
//| (9.55, infinity). An ascending power series is used in the |
//| low range, and an asymptotic expansion in the high range. |
//| ACCURACY: |
//| Relative error: |
//| arithmetic domain # trials peak rms |
//| IEEE 0,30 90000 1.8e-8 3.0e-10 |
//| Error is high only near the crossover point x = 9.55 |
//| between the two expansions used. |
//+------------------------------------------------------------------+
double CAlglib::BesselKN(const int nn,const double x)
{
return(CBessel::BesselKN(nn,x));
}
//+------------------------------------------------------------------+
//| Beta function |
//| - - |
//| | (a) | (b) |
//| beta( a, b ) = -----------. |
//| - |
//| | (a+b) |
//| For large arguments the logarithm of the function is |
//| evaluated using lgam(), then exponentiated. |
//| ACCURACY: |
//| Relative error: |
//| arithmetic domain # trials peak rms |
//| IEEE 0,30 30000 8.1e-14 1.1e-14 |
//+------------------------------------------------------------------+
double CAlglib::Beta(const double a,const double b)
{
return(CBetaF::Beta(a,b));
}
//+------------------------------------------------------------------+
//| Incomplete beta integral |
//| Returns incomplete beta integral of the arguments, evaluated |
//| from zero to x. The function is defined as |
//| x |
//| - - |
//| | (a+b) | | a-1 b-1 |
//| ----------- | t (1-t) dt. |
//| - - | | |
//| | (a) | (b) - |
//| 0 |
//| The domain of definition is 0 <= x <= 1. In this |
//| implementation a and b are restricted to positive values. |
//| The integral from x to 1 may be obtained by the symmetry |
//| relation |
//| 1 - incbet( a, b, x ) = incbet( b, a, 1-x ). |
//| The integral is evaluated by a continued fraction expansion |
//| or, when b*x is small, by a power series. |
//| ACCURACY: |
//| Tested at uniformly distributed random points (a,b,x) with a and |
//| b in "domain" and x between 0 and 1. |
//| Relative error |
//| arithmetic domain # trials peak rms |
//| IEEE 0,5 10000 6.9e-15 4.5e-16 |
//| IEEE 0,85 250000 2.2e-13 1.7e-14 |
//| IEEE 0,1000 30000 5.3e-12 6.3e-13 |
//| IEEE 0,10000 250000 9.3e-11 7.1e-12 |
//| IEEE 0,100000 10000 8.7e-10 4.8e-11 |
//| Outputs smaller than the IEEE gradual underflow threshold |
//| were excluded from these statistics. |
//+------------------------------------------------------------------+
double CAlglib::IncompleteBeta(const double a,const double b,const double x)
{
return(CIncBetaF::IncompleteBeta(a,b,x));
}
//+------------------------------------------------------------------+
//| Inverse of imcomplete beta integral |
//| Given y, the function finds x such that |
//| incbet( a, b, x ) = y . |
//| The routine performs interval halving or Newton iterations to |
//| find the root of incbet(a,b,x) - y = 0. |
//| ACCURACY: |
//| Relative error: |
//| x a,b |
//| arithmetic domain domain # trials peak rms |
//| IEEE 0,1 .5,10000 50000 5.8e-12 1.3e-13 |
//| IEEE 0,1 .25,100 100000 1.8e-13 3.9e-15 |
//| IEEE 0,1 0,5 50000 1.1e-12 5.5e-15 |
//| With a and b constrained to half-integer or integer values: |
//| IEEE 0,1 .5,10000 50000 5.8e-12 1.1e-13 |
//| IEEE 0,1 .5,100 100000 1.7e-14 7.9e-16 |
//| With a = .5, b constrained to half-integer or integer values: |
//| IEEE 0,1 .5,10000 10000 8.3e-11 1.0e-11 |
//+------------------------------------------------------------------+
double CAlglib::InvIncompleteBeta(const double a,const double b,double y)
{
return(CIncBetaF::InvIncompleteBeta(a,b,y));
}
//+------------------------------------------------------------------+
//| Binomial distribution |
//| Returns the sum of the terms 0 through k of the Binomial |
//| probability density: |
//| k |
//| -- ( n ) j n-j |
//| > ( ) p (1-p) |
//| -- ( j ) |
//| j=0 |
//| The terms are not summed directly; instead the incomplete |
//| beta integral is employed, according to the formula |
//| y = bdtr( k, n, p ) = incbet( n-k, k+1, 1-p ). |
//| The arguments must be positive, with p ranging from 0 to 1. |
//| ACCURACY: |
//| Tested at random points (a,b,p), with p between 0 and 1. |
//| a,b Relative error: |
//| arithmetic domain # trials peak rms |
//| For p between 0.001 and 1: |
//| IEEE 0,100 100000 4.3e-15 2.6e-16 |
//+------------------------------------------------------------------+
double CAlglib::BinomialDistribution(const int k,const int n,const double p)
{
return(CBinomialDistr::BinomialDistribution(k,n,p));
}
//+------------------------------------------------------------------+
//| Complemented binomial distribution |
//| Returns the sum of the terms k+1 through n of the Binomial |
//| probability density: |
//| n |
//| -- ( n ) j n-j |
//| > ( ) p (1-p) |
//| -- ( j ) |
//| j=k+1 |
//| The terms are not summed directly; instead the incomplete |
//| beta integral is employed, according to the formula |
//| y = bdtrc( k, n, p ) = incbet( k+1, n-k, p ). |
//| The arguments must be positive, with p ranging from 0 to 1. |
//| ACCURACY: |
//| Tested at random points (a,b,p). |
//| a,b Relative error: |
//| arithmetic domain # trials peak rms |
//| For p between 0.001 and 1: |
//| IEEE 0,100 100000 6.7e-15 8.2e-16 |
//| For p between 0 and .001: |
//| IEEE 0,100 100000 1.5e-13 2.7e-15 |
//+------------------------------------------------------------------+
double CAlglib::BinomialComplDistribution(const int k,const int n,const double p)
{
return(CBinomialDistr::BinomialComplDistribution(k,n,p));
}
//+------------------------------------------------------------------+
//| Inverse binomial distribution |
//| Finds the event probability p such that the sum of the |
//| terms 0 through k of the Binomial probability density |
//| is equal to the given cumulative probability y. |
//| This is accomplished using the inverse beta integral |
//| function and the relation |
//| 1 - p = incbi( n-k, k+1, y ). |
//| ACCURACY: |
//| Tested at random points (a,b,p). |
//| a,b Relative error: |
//| arithmetic domain # trials peak rms |
//| For p between 0.001 and 1: |
//| IEEE 0,100 100000 2.3e-14 6.4e-16 |
//| IEEE 0,10000 100000 6.6e-12 1.2e-13 |
//| For p between 10^-6 and 0.001: |
//| IEEE 0,100 100000 2.0e-12 1.3e-14 |
//| IEEE 0,10000 100000 1.5e-12 3.2e-14 |
//+------------------------------------------------------------------+
double CAlglib::InvBinomialDistribution(const int k,const int n,const double y)
{
return(CBinomialDistr::InvBinomialDistribution(k,n,y));
}
//+------------------------------------------------------------------+
//| Calculation of the value of the Chebyshev polynomials of the |
//| first and second kinds. |
//| Parameters: |
//| r - polynomial kind, either 1 or 2. |
//| n - degree, n>=0 |
//| x - argument, -1 <= x <= 1 |
//| Result: |
//| the value of the Chebyshev polynomial at x |
//+------------------------------------------------------------------+
double CAlglib::ChebyshevCalculate(int r,const int n,const double x)
{
return(CChebyshev::ChebyshevCalculate(r,n,x));
}
//+------------------------------------------------------------------+
//| Summation of Chebyshev polynomials using Clenshaw?s recurrence |
//| formula. |
//| This routine calculates |
//| c[0]*T0(x) + c[1]*T1(x) + ... + c[N]*TN(x) |
//| or |
//| c[0]*U0(x) + c[1]*U1(x) + ... + c[N]*UN(x) |
//| depending on the R. |
//| Parameters: |
//| r - polynomial kind, either 1 or 2. |
//| n - degree, n>=0 |
//| x - argument |
//| Result: |
//| the value of the Chebyshev polynomial at x |
//+------------------------------------------------------------------+
double CAlglib::ChebyshevSum(double &c[],const int r,const int n,const double x)
{
return(CChebyshev::ChebyshevSum(c,r,n,x));
}
//+------------------------------------------------------------------+
//| Representation of Tn as C[0] + C[1]*X + ... + C[N]*X^N |
//| Input parameters: |
//| N - polynomial degree, n>=0 |
//| Output parameters: |
//| C - coefficients |
//+------------------------------------------------------------------+
void CAlglib::ChebyshevCoefficients(const int n,double &c[])
{
CChebyshev::ChebyshevCoefficients(n,c);
}
//+------------------------------------------------------------------+
//| Conversion of a series of Chebyshev polynomials to a power |
//| series. |
//| Represents A[0]*T0(x) + A[1]*T1(x) + ... + A[N]*Tn(x) as |
//| B[0] + B[1]*X + ... + B[N]*X^N. |
//| Input parameters: |
//| A - Chebyshev series coefficients |
//| N - degree, N>=0 |
//| Output parameters |
//| B - power series coefficients |
//+------------------------------------------------------------------+
void CAlglib::FromChebyshev(double &a[],const int n,double &b[])
{
CChebyshev::FromChebyshev(a,n,b);
}
//+------------------------------------------------------------------+
//| Chi-square distribution |
//| Returns the area under the left hand tail (from 0 to x) |
//| of the Chi square probability density function with |
//| v degrees of freedom. |
//| x |
//| - |
//| 1 | | v/2-1 -t/2 |
//| P( x | v ) = ----------- | t e dt |
//| v/2 - | | |
//| 2 | (v/2) - |
//| 0 |
//| where x is the Chi-square variable. |
//| The incomplete gamma integral is used, according to the |
//| formula |
//| y = chdtr( v, x ) = igam( v/2.0, x/2.0 ). |
//| The arguments must both be positive. |
//| ACCURACY: |
//| See incomplete gamma function |
//+------------------------------------------------------------------+
double CAlglib::ChiSquareDistribution(const double v,const double x)
{
return(CChiSquareDistr::ChiSquareDistribution(v,x));
}
//+------------------------------------------------------------------+
//| Complemented Chi-square distribution |
//| Returns the area under the right hand tail (from x to |
//| infinity) of the Chi square probability density function |
//| with v degrees of freedom: |
//| inf. |
//| - |
//| 1 | | v/2-1 -t/2 |
//| P( x | v ) = ----------- | t e dt |
//| v/2 - | | |
//| 2 | (v/2) - |
//| x |
//| where x is the Chi-square variable. |
//| The incomplete gamma integral is used, according to the |
//| formula |
//| y = chdtr( v, x ) = igamc( v/2.0, x/2.0 ). |
//| The arguments must both be positive. |
//| ACCURACY: |
//| See incomplete gamma function |
//+------------------------------------------------------------------+
double CAlglib::ChiSquareComplDistribution(const double v,const double x)
{
return(CChiSquareDistr::ChiSquareComplDistribution(v,x));
}
//+------------------------------------------------------------------+
//| Inverse of complemented Chi-square distribution |
//| Finds the Chi-square argument x such that the integral |
//| from x to infinity of the Chi-square density is equal |
//| to the given cumulative probability y. |
//| This is accomplished using the inverse gamma integral |
//| function and the relation |
//| x/2 = igami( df/2, y ); |
//| ACCURACY: |
//| See inverse incomplete gamma function |
//+------------------------------------------------------------------+
double CAlglib::InvChiSquareDistribution(const double v,const double y)
{
return(CChiSquareDistr::InvChiSquareDistribution(v,y));
}
//+------------------------------------------------------------------+
//| Dawson's Integral |
//| Approximates the integral |
//| x |
//| - |
//| 2 | | 2 |
//| dawsn(x) = exp( -x ) | exp( t ) dt |
//| | | |
//| - |
//| 0 |
//| Three different rational approximations are employed, for |
//| the intervals 0 to 3.25; 3.25 to 6.25; and 6.25 up. |
//| ACCURACY: |
//| Relative error: |
//| arithmetic domain # trials peak rms |
//| IEEE 0,10 10000 6.9e-16 1.0e-16 |
//+------------------------------------------------------------------+
double CAlglib::DawsonIntegral(const double x)
{
return(CDawson::DawsonIntegral(x));
}
//+------------------------------------------------------------------+
//| Complete elliptic integral of the first kind |
//| Approximates the integral |
//| pi/2 |
//| - |
//| | | |
//| | dt |
//| K(m) = | ------------------ |
//| | 2 |
//| | | sqrt( 1 - m sin t ) |
//| - |
//| 0 |
//| using the approximation |
//| P(x) - log x Q(x). |
//| ACCURACY: |
//| Relative error: |
//| arithmetic domain # trials peak rms |
//| IEEE 0,1 30000 2.5e-16 6.8e-17 |
//+------------------------------------------------------------------+
double CAlglib::EllipticIntegralK(const double m)
{
return(CElliptic::EllipticIntegralK(m));
}
//+------------------------------------------------------------------+
//| Complete elliptic integral of the first kind |
//| Approximates the integral |
//| pi/2 |
//| - |
//| | | |
//| | dt |
//| K(m) = | ------------------ |
//| | 2 |
//| | | sqrt( 1 - m sin t ) |
//| - |
//| 0 |
//| where m = 1 - m1, using the approximation |
//| P(x) - log x Q(x). |
//| The argument m1 is used rather than m so that the logarithmic |
//| singularity at m = 1 will be shifted to the origin; this |
//| preserves maximum accuracy. |
//| K(0) = pi/2. |
//| ACCURACY: |
//| Relative error: |
//| arithmetic domain # trials peak rms |
//| IEEE 0,1 30000 2.5e-16 6.8e-17 |
//+------------------------------------------------------------------+
double CAlglib::EllipticIntegralKhighPrecision(const double m1)
{
return(CElliptic::EllipticIntegralKhighPrecision(m1));
}
//+------------------------------------------------------------------+
//| Incomplete elliptic integral of the first kind F(phi|m) |
//| Approximates the integral |
//| phi |
//| - |
//| | | |
//| | dt |
//| F(phi_\m) = | ------------------ |
//| | 2 |
//| | | sqrt( 1 - m sin t ) |
//| - |
//| 0 |
//| of amplitude phi and modulus m, using the arithmetic - |
//| geometric mean algorithm. |
//| ACCURACY: |
//| Tested at random points with m in [0, 1] and phi as indicated. |
//| Relative error: |
//| arithmetic domain # trials peak rms |
//| IEEE -10,10 200000 7.4e-16 1.0e-16 |
//+------------------------------------------------------------------+
double CAlglib::IncompleteEllipticIntegralK(const double phi,const double m)
{
return(CElliptic::IncompleteEllipticIntegralK(phi,m));
}
//+------------------------------------------------------------------+
//| Complete elliptic integral of the second kind |
//| Approximates the integral |
//| pi/2 |
//| - |
//| | | 2 |
//| E(m) = | sqrt( 1 - m sin t ) dt |
//| | | |
//| - |
//| 0 |
//| using the approximation |
//| P(x) - x log x Q(x). |
//| ACCURACY: |
//| Relative error: |
//| arithmetic domain # trials peak rms |
//| IEEE 0, 1 10000 2.1e-16 7.3e-17 |
//+------------------------------------------------------------------+
double CAlglib::EllipticIntegralE(const double m)
{
return(CElliptic::EllipticIntegralE(m));
}
//+------------------------------------------------------------------+
//| Incomplete elliptic integral of the second kind |
//| Approximates the integral |
//| phi |
//| - |
//| | | |
//| | 2 |
//| E(phi_\m) = | sqrt( 1 - m sin t ) dt |
//| | |
//| | | |
//| - |
//| 0 |
//| of amplitude phi and modulus m, using the arithmetic - |
//| geometric mean algorithm. |
//| ACCURACY: |
//| Tested at random arguments with phi in [-10, 10] and m in |
//| [0, 1]. |
//| Relative error: |
//| arithmetic domain # trials peak rms |
//| IEEE -10,10 150000 3.3e-15 1.4e-16 |
//+------------------------------------------------------------------+
double CAlglib::IncompleteEllipticIntegralE(const double phi,const double m)
{
return(CElliptic::IncompleteEllipticIntegralE(phi,m));
}
//+------------------------------------------------------------------+
//| Exponential integral Ei(x) |
//| x |
//| - t |
//| | | e |
//| Ei(x) = -|- --- dt . |
//| | | t |
//| - |
//| -inf |
//| Not defined for x <= 0. |
//| See also expn.c. |
//| ACCURACY: |
//| Relative error: |
//| arithmetic domain # trials peak rms |
//| IEEE 0,100 50000 8.6e-16 1.3e-16 |
//+------------------------------------------------------------------+
double CAlglib::ExponentialIntegralEi(const double x)
{
return(CExpIntegrals::ExponentialIntegralEi(x));
}
//+------------------------------------------------------------------+
//| Exponential integral En(x) |
//| Evaluates the exponential integral |
//| inf. |
//| - |
//| | | -xt |
//| | e |
//| E (x) = | ---- dt. |
//| n | n |
//| | | t |
//| - |
//| 1 |
//| Both n and x must be nonnegative. |
//| The routine employs either a power series, a continued |
//| fraction, or an asymptotic formula depending on the |
//| relative values of n and x. |
//| ACCURACY: |
//| Relative error: |
//| arithmetic domain # trials peak rms |
//| IEEE 0, 30 10000 1.7e-15 3.6e-16 |
//+------------------------------------------------------------------+
double CAlglib::ExponentialIntegralEn(const double x,const int n)
{
return(CExpIntegrals::ExponentialIntegralEn(x,n));
}
//+------------------------------------------------------------------+
//| F distribution |
//| Returns the area from zero to x under the F density |
//| function (also known as Snedcor's density or the |
//| variance ratio density). This is the density |
//| of x = (u1/df1)/(u2/df2), where u1 and u2 are random |
//| variables having Chi square distributions with df1 |
//| and df2 degrees of freedom, respectively. |
//| The incomplete beta integral is used, according to the |
//| formula |
//| P(x) = incbet( df1/2, df2/2, (df1*x/(df2 + df1*x) ). |
//| The arguments a and b are greater than zero, and x is |
//| nonnegative. |
//| ACCURACY: |
//| Tested at random points (a,b,x). |
//| x a,b Relative error: |
//| arithmetic domain domain # trials peak rms |
//| IEEE 0,1 0,100 100000 9.8e-15 1.7e-15 |
//| IEEE 1,5 0,100 100000 6.5e-15 3.5e-16 |
//| IEEE 0,1 1,10000 100000 2.2e-11 3.3e-12 |
//| IEEE 1,5 1,10000 100000 1.1e-11 1.7e-13 |
//+------------------------------------------------------------------+
double CAlglib::FDistribution(const int a,const int b,const double x)
{
return(CFDistr::FDistribution(a,b,x));
}
//+------------------------------------------------------------------+
//| Complemented F distribution |
//| Returns the area from x to infinity under the F density |
//| function (also known as Snedcor's density or the |
//| variance ratio density). |
//| inf. |
//| - |
//| 1 | | a-1 b-1 |
//| 1-P(x) = ------ | t (1-t) dt |
//| B(a,b) | | |
//| - |
//| x |
//| The incomplete beta integral is used, according to the |
//| formula |
//| P(x) = incbet( df2/2, df1/2, (df2/(df2 + df1*x) ). |
//| ACCURACY: |
//| Tested at random points (a,b,x) in the indicated intervals. |
//| x a,b Relative error: |
//| arithmetic domain domain # trials peak rms |
//| IEEE 0,1 1,100 100000 3.7e-14 5.9e-16 |
//| IEEE 1,5 1,100 100000 8.0e-15 1.6e-15 |
//| IEEE 0,1 1,10000 100000 1.8e-11 3.5e-13 |
//| IEEE 1,5 1,10000 100000 2.0e-11 3.0e-12 |
//+------------------------------------------------------------------+
double CAlglib::FComplDistribution(const int a,const int b,const double x)
{
return(CFDistr::FComplDistribution(a,b,x));
}
//+------------------------------------------------------------------+
//| Inverse of complemented F distribution |
//| Finds the F density argument x such that the integral |
//| from x to infinity of the F density is equal to the |
//| given probability p. |
//| This is accomplished using the inverse beta integral |
//| function and the relations |
//| z = incbi( df2/2, df1/2, p ) |
//| x = df2 (1-z) / (df1 z). |
//| Note: the following relations hold for the inverse of |
//| the uncomplemented F distribution: |
//| z = incbi( df1/2, df2/2, p ) |
//| x = df2 z / (df1 (1-z)). |
//| ACCURACY: |
//| Tested at random points (a,b,p). |
//| a,b Relative error: |
//| arithmetic domain # trials peak rms |
//| For p between .001 and 1: |
//| IEEE 1,100 100000 8.3e-15 4.7e-16 |
//| IEEE 1,10000 100000 2.1e-11 1.4e-13 |
//| For p between 10^-6 and 10^-3: |
//| IEEE 1,100 50000 1.3e-12 8.4e-15 |
//| IEEE 1,10000 50000 3.0e-12 4.8e-14 |
//+------------------------------------------------------------------+
double CAlglib::InvFDistribution(const int a,const int b,const double y)
{
return(CFDistr::InvFDistribution(a,b,y));
}
//+------------------------------------------------------------------+
//| Fresnel integral |
//| Evaluates the Fresnel integrals |
//| x |
//| - |
//| | | |
//| C(x) = | cos(pi/2 t**2) dt, |
//| | | |
//| - |
//| 0 |
//| x |
//| - |
//| | | |
//| S(x) = | sin(pi/2 t**2) dt. |
//| | | |
//| - |
//| 0 |
//| The integrals are evaluated by a power series for x < 1. |
//| For x >= 1 auxiliary functions f(x) and g(x) are employed |
//| such that |
//| C(x) = 0.5 + f(x) sin( pi/2 x**2 ) - g(x) cos( pi/2 x**2 ) |
//| S(x) = 0.5 - f(x) cos( pi/2 x**2 ) - g(x) sin( pi/2 x**2 ) |
//| ACCURACY: |
//| Relative error. |
//| Arithmetic function domain # trials peak rms |
//| IEEE S(x) 0, 10 10000 2.0e-15 3.2e-16|
//| IEEE C(x) 0, 10 10000 1.8e-15 3.3e-16|
//+------------------------------------------------------------------+
void CAlglib::FresnelIntegral(const double x,double &c,double &s)
{
CFresnel::FresnelIntegral(x,c,s);
}
//+------------------------------------------------------------------+
//| Calculation of the value of the Hermite polynomial. |
//| Parameters: |
//| n - degree, n>=0 |
//| x - argument |
//| Result: |
//| the value of the Hermite polynomial Hn at x |
//+------------------------------------------------------------------+
double CAlglib::HermiteCalculate(const int n,const double x)
{
return(CHermite::HermiteCalculate(n,x));
}
//+------------------------------------------------------------------+
//| Summation of Hermite polynomials using Clenshaw?s recurrence |
//| formula. |
//| This routine calculates |
//| c[0]*H0(x) + c[1]*H1(x) + ... + c[N]*HN(x) |
//| Parameters: |
//| n - degree, n>=0 |
//| x - argument |
//| Result: |
//| the value of the Hermite polynomial at x |
//+------------------------------------------------------------------+
double CAlglib::HermiteSum(double &c[],const int n,const double x)
{
return(CHermite::HermiteSum(c,n,x));
}
//+------------------------------------------------------------------+
//| Representation of Hn as C[0] + C[1]*X + ... + C[N]*X^N |
//| Input parameters: |
//| N - polynomial degree, n>=0 |
//| Output parameters: |
//| C - coefficients |
//+------------------------------------------------------------------+
void CAlglib::HermiteCoefficients(const int n,double &c[])
{
CHermite::HermiteCoefficients(n,c);
}
//+------------------------------------------------------------------+
//| Jacobian Elliptic Functions |
//| Evaluates the Jacobian elliptic functions sn(u|m), cn(u|m), |
//| and dn(u|m) of parameter m between 0 and 1, and real |
//| argument u. |
//| These functions are periodic, with quarter-period on the |
//| real axis equal to the complete elliptic integral |
//| ellpk(1.0-m). |
//| Relation to incomplete elliptic integral: |
//| If u = ellik(phi,m), then sn(u|m) = sin(phi), |
//| and cn(u|m) = cos(phi). Phi is called the amplitude of u. |
//| Computation is by means of the arithmetic-geometric mean |
//| algorithm, except when m is within 1e-9 of 0 or 1. In the |
//| latter case with m close to 1, the approximation applies |
//| only for phi < pi/2. |
//| ACCURACY: |
//| Tested at random points with u between 0 and 10, m between |
//| 0 and 1. |
//| Absolute error (* = relative error): |
//| arithmetic function # trials peak rms |
//| IEEE phi 10000 9.2e-16* 1.4e-16* |
//| IEEE sn 50000 4.1e-15 4.6e-16 |
//| IEEE cn 40000 3.6e-15 4.4e-16 |
//| IEEE dn 10000 1.3e-12 1.8e-14 |
//| Peak error observed in consistency check using addition |
//| theorem for sn(u+v) was 4e-16 (absolute). Also tested by |
//| the above relation to the incomplete elliptic integral. |
//| Accuracy deteriorates when u is large. |
//+------------------------------------------------------------------+
void CAlglib::JacobianEllipticFunctions(const double u,const double m,
double &sn,double &cn,
double &dn,double &ph)
{
//--- initialization
sn=0;
cn=0;
dn=0;
ph=0;
//--- function call
CJacobianElliptic::JacobianEllipticFunctions(u,m,sn,cn,dn,ph);
}
//+------------------------------------------------------------------+
//| Calculation of the value of the Laguerre polynomial. |
//| Parameters: |
//| n - degree, n>=0 |
//| x - argument |
//| Result: |
//| the value of the Laguerre polynomial Ln at x |
//+------------------------------------------------------------------+
double CAlglib::LaguerreCalculate(const int n,const double x)
{
return(CLaguerre::LaguerreCalculate(n,x));
}
//+------------------------------------------------------------------+
//| Summation of Laguerre polynomials using Clenshaw?s recurrence |
//| formula. |
//| This routine calculates c[0]*L0(x) + c[1]*L1(x) + ... + |
//| + c[N]*LN(x) |
//| Parameters: |
//| n - degree, n>=0 |
//| x - argument |
//| Result: |
//| the value of the Laguerre polynomial at x |
//+------------------------------------------------------------------+
double CAlglib::LaguerreSum(double &c[],const int n,const double x)
{
return(CLaguerre::LaguerreSum(c,n,x));
}
//+------------------------------------------------------------------+
//| Representation of Ln as C[0] + C[1]*X + ... + C[N]*X^N |
//| Input parameters: |
//| N - polynomial degree, n>=0 |
//| Output parameters: |
//| C - coefficients |
//+------------------------------------------------------------------+
void CAlglib::LaguerreCoefficients(const int n,double &c[])
{
CLaguerre::LaguerreCoefficients(n,c);
}
//+------------------------------------------------------------------+
//| Calculation of the value of the Legendre polynomial Pn. |
//| Parameters: |
//| n - degree, n>=0 |
//| x - argument |
//| Result: |
//| the value of the Legendre polynomial Pn at x |
//+------------------------------------------------------------------+
double CAlglib::LegendreCalculate(const int n,const double x)
{
return(CLegendre::LegendreCalculate(n,x));
}
//+------------------------------------------------------------------+
//| Summation of Legendre polynomials using Clenshaw?s recurrence |
//| formula. |
//| This routine calculates |
//| c[0]*P0(x) + c[1]*P1(x) + ... + c[N]*PN(x) |
//| Parameters: |
//| n - degree, n>=0 |
//| x - argument |
//| Result: |
//| the value of the Legendre polynomial at x |
//+------------------------------------------------------------------+
double CAlglib::LegendreSum(double &c[],const int n,const double x)
{
return(CLegendre::LegendreSum(c,n,x));
}
//+------------------------------------------------------------------+
//| Representation of Pn as C[0] + C[1]*X + ... + C[N]*X^N |
//| Input parameters: |
//| N - polynomial degree, n>=0 |
//| Output parameters: |
//| C - coefficients |
//+------------------------------------------------------------------+
void CAlglib::LegendreCoefficients(const int n,double &c[])
{
CLegendre::LegendreCoefficients(n,c);
}
//+------------------------------------------------------------------+
//| Poisson distribution |
//| Returns the sum of the first k+1 terms of the Poisson |
//| distribution: |
//| k j |
//| -- -m m |
//| > e -- |
//| -- j! |
//| j=0 |
//| The terms are not summed directly; instead the incomplete |
//| gamma integral is employed, according to the relation |
//| y = pdtr( k, m ) = igamc( k+1, m ). |
//| The arguments must both be positive. |
//| ACCURACY: |
//| See incomplete gamma function |
//+------------------------------------------------------------------+
double CAlglib::PoissonDistribution(const int k,const double m)
{
return(CPoissonDistr::PoissonDistribution(k,m));
}
//+------------------------------------------------------------------+
//| Complemented Poisson distribution |
//| Returns the sum of the terms k+1 to infinity of the Poisson |
//| distribution: |
//| inf. j |
//| -- -m m |
//| > e -- |
//| -- j! |
//| j=k+1 |
//| The terms are not summed directly; instead the incomplete |
//| gamma integral is employed, according to the formula |
//| y = pdtrc( k, m ) = igam( k+1, m ). |
//| The arguments must both be positive. |
//| ACCURACY: |
//| See incomplete gamma function |
//+------------------------------------------------------------------+
double CAlglib::PoissonComplDistribution(const int k,const double m)
{
return(CPoissonDistr::PoissonComplDistribution(k,m));
}
//+------------------------------------------------------------------+
//| Inverse Poisson distribution |
//| Finds the Poisson variable x such that the integral |
//| from 0 to x of the Poisson density is equal to the |
//| given probability y. |
//| This is accomplished using the inverse gamma integral |
//| function and the relation |
//| m = igami( k+1, y ). |
//| ACCURACY: |
//| See inverse incomplete gamma function |
//+------------------------------------------------------------------+
double CAlglib::InvPoissonDistribution(const int k,const double y)
{
return(CPoissonDistr::InvPoissonDistribution(k,y));
}
//+------------------------------------------------------------------+
//| Psi (digamma) function |
//| d - |
//| psi(x) = -- ln | (x) |
//| dx |
//| is the logarithmic derivative of the gamma function. |
//| For integer x, |
//| n-1 |
//| - |
//| psi(n) = -EUL + > 1/k. |
//| - |
//| k=1 |
//| This formula is used for 0 < n <= 10. If x is negative, it |
//| is transformed to a positive argument by the reflection |
//| formula psi(1-x) = psi(x) + pi cot(pi x). |
//| For general positive x, the argument is made greater than 10 |
//| using the recurrence psi(x+1) = psi(x) + 1/x. |
//| Then the following asymptotic expansion is applied: |
//| inf. B |
//| - 2k |
//| psi(x) = log(x) - 1/2x - > ------- |
//| - 2k |
//| k=1 2k x |
//| where the B2k are Bernoulli numbers. |
//| ACCURACY: |
//| Relative error (except absolute when |psi| < 1): |
//| arithmetic domain # trials peak rms |
//| IEEE 0,30 30000 1.3e-15 1.4e-16 |
//| IEEE -30,0 40000 1.5e-15 2.2e-16 |
//+------------------------------------------------------------------+
double CAlglib::Psi(const double x)
{
return(CPsiF::Psi(x));
}
//+------------------------------------------------------------------+
//| Student's t distribution |
//| Computes the integral from minus infinity to t of the Student |
//| t distribution with integer k > 0 degrees of freedom: |
//| t |
//| - |
//| | | |
//| - | 2 -(k+1)/2 |
//| | ( (k+1)/2 ) | ( x ) |
//| ---------------------- | ( 1 + --- ) dx |
//| - | ( k ) |
//| sqrt( k pi ) | ( k/2 ) | |
//| | | |
//| - |
//| -inf. |
//| Relation to incomplete beta integral: |
//| 1 - stdtr(k,t) = 0.5 * incbet( k/2, 1/2, z ) |
//| where |
//| z = k/(k + t**2). |
//| For t < -2, this is the method of computation. For higher t, |
//| a direct method is derived from integration by parts. |
//| Since the function is symmetric about t=0, the area under the |
//| right tail of the density is found by calling the function |
//| with -t instead of t. |
//| ACCURACY: |
//| Tested at random 1<=k<=25. The "domain" refers to t. |
//| Relative error: |
//| arithmetic domain # trials peak rms |
//| IEEE -100,-2 50000 5.9e-15 1.4e-15 |
//| IEEE -2,100 500000 2.7e-15 4.9e-17 |
//+------------------------------------------------------------------+
double CAlglib::StudenttDistribution(const int k,const double t)
{
return(CStudenttDistr::StudenttDistribution(k,t));
}
//+------------------------------------------------------------------+
//| Functional inverse of Student's t distribution |
//| Given probability p, finds the argument t such that stdtr(k,t) |
//| is equal to p. |
//| ACCURACY: |
//| Tested at random 1<=k<=100. The "domain" refers to p: |
//| Relative error: |
//| arithmetic domain # trials peak rms |
//| IEEE .001,.999 25000 5.7e-15 8.0e-16 |
//| IEEE 10^-6,.001 25000 2.0e-12 2.9e-14 |
//+------------------------------------------------------------------+
double CAlglib::InvStudenttDistribution(const int k,const double p)
{
return(CStudenttDistr::InvStudenttDistribution(k,p));
}
//+------------------------------------------------------------------+
//| Sine and cosine integrals |
//| Evaluates the integrals |
//| x |
//| - |
//| | cos t - 1 |
//| Ci(x) = eul + ln x + | --------- dt, |
//| | t |
//| - |
//| 0 |
//| x |
//| - |
//| | sin t |
//| Si(x) = | ----- dt |
//| | t |
//| - |
//| 0 |
//| where eul = 0.57721566490153286061 is Euler's constant. |
//| The integrals are approximated by rational functions. |
//| For x > 8 auxiliary functions f(x) and g(x) are employed |
//| such that |
//| Ci(x) = f(x) sin(x) - g(x) cos(x) |
//| Si(x) = pi/2 - f(x) cos(x) - g(x) sin(x) |
//| ACCURACY: |
//| Test interval = [0,50]. |
//| Absolute error, except relative when > 1: |
//| arithmetic function # trials peak rms |
//| IEEE Si 30000 4.4e-16 7.3e-17 |
//| IEEE Ci 30000 6.9e-16 5.1e-17 |
//+------------------------------------------------------------------+
void CAlglib::SineCosineIntegrals(const double x,double &si,double &ci)
{
//--- initialization
si=0;
ci=0;
//--- function call
CTrigIntegrals::SineCosineIntegrals(x,si,ci);
}
//+------------------------------------------------------------------+
//| Hyperbolic sine and cosine integrals |
//| Approximates the integrals |
//| x |
//| - |
//| | | cosh t - 1 |
//| Chi(x) = eul + ln x + | ----------- dt, |
//| | | t |
//| - |
//| 0 |
//| x |
//| - |
//| | | sinh t |
//| Shi(x) = | ------ dt |
//| | | t |
//| - |
//| 0 |
//| where eul = 0.57721566490153286061 is Euler's constant. |
//| The integrals are evaluated by power series for x < 8 |
//| and by Chebyshev expansions for x between 8 and 88. |
//| For large x, both functions approach exp(x)/2x. |
//| Arguments greater than 88 in magnitude return MAXNUM. |
//| ACCURACY: |
//| Test interval 0 to 88. |
//| Relative error: |
//| arithmetic function # trials peak rms |
//| IEEE Shi 30000 6.9e-16 1.6e-16 |
//| Absolute error, except relative when |Chi| > 1: |
//| IEEE Chi 30000 8.4e-16 1.4e-16 |
//+------------------------------------------------------------------+
void CAlglib::HyperbolicSineCosineIntegrals(const double x,double &shi,double &chi)
{
//--- initialization
shi=0;
chi=0;
//--- function call
CTrigIntegrals::HyperbolicSineCosineIntegrals(x,shi,chi);
}
//+------------------------------------------------------------------+
//| Calculation of the distribution moments: mean, variance, |
//| skewness, kurtosis. |
//| INPUT PARAMETERS: |
//| X - sample |
//| N - N>=0, sample size: |
//| * if given, only leading N elements of X are |
//| processed |
//| * if not given, automatically determined from |
//| size of X |
//| OUTPUT PARAMETERS |
//| Mean - mean. |
//| Variance- variance. |
//| Skewness- skewness (if variance<>0; zero otherwise). |
//| Kurtosis- kurtosis (if variance<>0; zero otherwise). |
//+------------------------------------------------------------------+
void CAlglib::SampleMoments(const double &x[],const int n,double &mean,
double &variance,double &skewness,
double &kurtosis)
{
//--- initialization
mean=0;
variance=0;
skewness=0;
kurtosis=0;
//--- function call
CBaseStat::SampleMoments(x,n,mean,variance,skewness,kurtosis);
}
//+------------------------------------------------------------------+
//| Calculation of the distribution moments: mean, variance, |
//| skewness, kurtosis. |
//| INPUT PARAMETERS: |
//| X - sample |
//| N - N>=0, sample size: |
//| * if given, only leading N elements of X are |
//| processed |
//| * if not given, automatically determined from |
//| size of X |
//| OUTPUT PARAMETERS |
//| Mean - mean. |
//| Variance- variance. |
//| Skewness- skewness (if variance<>0; zero otherwise). |
//| Kurtosis- kurtosis (if variance<>0; zero otherwise). |
//+------------------------------------------------------------------+
void CAlglib::SampleMoments(const double &x[],double &mean,
double &variance,double &skewness,
double &kurtosis)
{
//--- initialization
mean=0;
variance=0;
skewness=0;
kurtosis=0;
//--- get lenght
int n=CAp::Len(x);
//--- function call
CBaseStat::SampleMoments(x,n,mean,variance,skewness,kurtosis);
}
//+------------------------------------------------------------------+
//| Calculation of the mean. |
//| INPUT PARAMETERS: |
//| X - sample |
//| N - N>=0, sample size: |
//| * if given, only leading N elements of X are processed |
//| * if not given, automatically determined from size of X |
//| NOTE: This function return result which calculated by |
//| 'SampleMoments' function and stored at 'Mean' variable. |
//+------------------------------------------------------------------+
double CAlglib::SampleMean(CRowDouble &x,int n)
{
return(CBaseStat::SampleMean(x,n));
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CAlglib::SampleMean(CRowDouble &x)
{
//--- initialization
int n=CAp::Len(x);
//--- function call
return(CBaseStat::SampleMean(x,n));
}
//+------------------------------------------------------------------+
//| Calculation of the variance. |
//| INPUT PARAMETERS: |
//| X - sample |
//| N - N>=0, sample size: |
//| * if given, only leading N elements of X are processed |
//| * if not given, automatically determined from size of X |
//| NOTE: This function return result which calculated by |
//| 'SampleMoments' function and stored at 'Variance' variable.|
//+------------------------------------------------------------------+
double CAlglib::SampleVariance(CRowDouble &x,int n)
{
return(CBaseStat::SampleVariance(x,n));
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CAlglib::SampleVariance(CRowDouble &x)
{
//--- initialization
int n=CAp::Len(x);
//--- function call
return(CBaseStat::SampleVariance(x,n));
}
//+------------------------------------------------------------------+
//| Calculation of the skewness. |
//| INPUT PARAMETERS: |
//| X - sample |
//| N - N>=0, sample size: |
//| * if given, only leading N elements of X are processed |
//| * if not given, automatically determined from size of X |
//| NOTE: This function return result which calculated by |
//| 'SampleMoments' function and stored at 'Skewness' variable.|
//+------------------------------------------------------------------+
double CAlglib::SampleSkewness(CRowDouble &x,int n)
{
return(CBaseStat::SampleSkewness(x,n));
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CAlglib::SampleSkewness(CRowDouble &x)
{
//--- initialization
int n=CAp::Len(x);
//--- function call
return(CBaseStat::SampleSkewness(x,n));
}
//+------------------------------------------------------------------+
//| Calculation of the kurtosis. |
//| INPUT PARAMETERS: |
//| X - sample |
//| N - N>=0, sample size: |
//| * if given, only leading N elements of X are processed |
//| * if not given, automatically determined from size of X |
//| NOTE: This function return result which calculated by |
//| 'SampleMoments' function and stored at 'Kurtosis' variable.|
//+------------------------------------------------------------------+
double CAlglib::SampleKurtosis(CRowDouble &x,int n)
{
return(CBaseStat::SampleKurtosis(x,n));
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CAlglib::SampleKurtosis(CRowDouble &x)
{
//--- initialization
int n=CAp::Len(x);
//--- function call
return(CBaseStat::SampleKurtosis(x,n));
}
//+------------------------------------------------------------------+
//| ADev |
//| Input parameters: |
//| X - sample |
//| N - N>=0, sample size: |
//| * if given, only leading N elements of X are |
//| processed |
//| * if not given, automatically determined from size |
//| of X |
//| Output parameters: |
//| ADev- ADev |
//+------------------------------------------------------------------+
void CAlglib::SampleAdev(const double &x[],const int n,double &adev)
{
//--- initialization
adev=0;
//--- function call
CBaseStat::SampleAdev(x,n,adev);
}
//+------------------------------------------------------------------+
//| ADev |
//| Input parameters: |
//| X - sample |
//| N - N>=0, sample size: |
//| * if given, only leading N elements of X are |
//| processed |
//| * if not given, automatically determined from size |
//| of X |
//| Output parameters: |
//| ADev- ADev |
//+------------------------------------------------------------------+
void CAlglib::SampleAdev(const double &x[],double &adev)
{
//--- create a variable
int n=CAp::Len(x);
//--- initialization
adev=0;
//--- function call
CBaseStat::SampleAdev(x,n,adev);
}
//+------------------------------------------------------------------+
//| Median calculation. |
//| Input parameters: |
//| X - sample (array indexes: [0..N-1]) |
//| N - N>=0, sample size: |
//| * if given, only leading N elements of X are |
//| processed |
//| * if not given, automatically determined from size |
//| of X |
//| Output parameters: |
//| Median |
//+------------------------------------------------------------------+
void CAlglib::SampleMedian(const double &x[],const int n,double &median)
{
//--- initialization
median=0;
//--- function call
CBaseStat::SampleMedian(x,n,median);
}
//+------------------------------------------------------------------+
//| Median calculation. |
//| Input parameters: |
//| X - sample (array indexes: [0..N-1]) |
//| N - N>=0, sample size: |
//| * if given, only leading N elements of X are |
//| processed |
//| * if not given, automatically determined from size |
//| of X |
//| Output parameters: |
//| Median |
//+------------------------------------------------------------------+
void CAlglib::SampleMedian(const double &x[],double &median)
{
//--- create a variable
int n=CAp::Len(x);
//--- initialization
median=0;
//--- function call
CBaseStat::SampleMedian(x,n,median);
}
//+------------------------------------------------------------------+
//| Percentile calculation. |
//| Input parameters: |
//| X - sample (array indexes: [0..N-1]) |
//| N - N>=0, sample size: |
//| * if given, only leading N elements of X are |
//| processed |
//| * if not given, automatically determined from size |
//| of X |
//| P - percentile (0<=P<=1) |
//| Output parameters: |
//| V - percentile |
//+------------------------------------------------------------------+
void CAlglib::SamplePercentile(const double &x[],const int n,
const double p,double &v)
{
//--- initialization
v=0;
//--- function call
CBaseStat::SamplePercentile(x,n,p,v);
}
//+------------------------------------------------------------------+
//| Percentile calculation. |
//| Input parameters: |
//| X - sample (array indexes: [0..N-1]) |
//| N - N>=0, sample size: |
//| * if given, only leading N elements of X are |
//| processed |
//| * if not given, automatically determined from size |
//| of X |
//| P - percentile (0<=P<=1) |
//| Output parameters: |
//| V - percentile |
//+------------------------------------------------------------------+
void CAlglib::SamplePercentile(const double &x[],const double p,
double &v)
{
//--- create a variable
int n=CAp::Len(x);
//--- initialization
v=0;
//--- function call
CBaseStat::SamplePercentile(x,n,p,v);
}
//+------------------------------------------------------------------+
//| 2-sample covariance |
//| Input parameters: |
//| X - sample 1 (array indexes: [0..N-1]) |
//| Y - sample 2 (array indexes: [0..N-1]) |
//| N - N>=0, sample size: |
//| * if given, only N leading elements of X/Y are |
//| processed |
//| * if not given, automatically determined from |
//| input sizes |
//| Result: |
//| covariance (zero for N=0 or N=1) |
//+------------------------------------------------------------------+
double CAlglib::Cov2(const double &x[],const double &y[],const int n)
{
return(CBaseStat::Cov2(x,y,n));
}
//+------------------------------------------------------------------+
//| 2-sample covariance |
//| Input parameters: |
//| X - sample 1 (array indexes: [0..N-1]) |
//| Y - sample 2 (array indexes: [0..N-1]) |
//| N - N>=0, sample size: |
//| * if given, only N leading elements of X/Y are |
//| processed |
//| * if not given, automatically determined from |
//| input sizes |
//| Result: |
//| covariance (zero for N=0 or N=1) |
//+------------------------------------------------------------------+
double CAlglib::Cov2(const double &x[],const double &y[])
{
//--- check
if(CAp::Len(x)!=CAp::Len(y))
{
Print(__FUNCTION__+": arrays size are not equal");
CAp::exception_happened=true;
return(EMPTY_VALUE);
}
//--- initialization
int n=CAp::Len(x);
//--- return result
return(CBaseStat::Cov2(x,y,n));
}
//+------------------------------------------------------------------+
//| Pearson product-moment correlation coefficient |
//| Input parameters: |
//| X - sample 1 (array indexes: [0..N-1]) |
//| Y - sample 2 (array indexes: [0..N-1]) |
//| N - N>=0, sample size: |
//| * if given, only N leading elements of X/Y are |
//| processed |
//| * if not given, automatically determined from |
//| input sizes |
//| Result: |
//| Pearson product-moment correlation coefficient |
//| (zero for N=0 or N=1) |
//+------------------------------------------------------------------+
double CAlglib::PearsonCorr2(const double &x[],const double &y[],
const int n)
{
return(CBaseStat::PearsonCorr2(x,y,n));
}
//+------------------------------------------------------------------+
//| Pearson product-moment correlation coefficient |
//| Input parameters: |
//| X - sample 1 (array indexes: [0..N-1]) |
//| Y - sample 2 (array indexes: [0..N-1]) |
//| N - N>=0, sample size: |
//| * if given, only N leading elements of X/Y are |
//| processed |
//| * if not given, automatically determined from |
//| input sizes |
//| Result: |
//| Pearson product-moment correlation coefficient |
//| (zero for N=0 or N=1) |
//+------------------------------------------------------------------+
double CAlglib::PearsonCorr2(const double &x[],const double &y[])
{
//--- check
if(CAp::Len(x)!=CAp::Len(y))
{
Print(__FUNCTION__+": arrays size are not equal");
CAp::exception_happened=true;
return(EMPTY_VALUE);
}
//--- initialization
int n=CAp::Len(x);
//--- return result
return(CBaseStat::PearsonCorr2(x,y,n));
}
//+------------------------------------------------------------------+
//| Spearman's rank correlation coefficient |
//| Input parameters: |
//| X - sample 1 (array indexes: [0..N-1]) |
//| Y - sample 2 (array indexes: [0..N-1]) |
//| N - N>=0, sample size: |
//| * if given, only N leading elements of X/Y are |
//| processed |
//| * if not given, automatically determined from |
//| input sizes |
//| Result: |
//| Spearman's rank correlation coefficient |
//| (zero for N=0 or N=1) |
//+------------------------------------------------------------------+
double CAlglib::SpearmanCorr2(const double &x[],const double &y[],
const int n)
{
return(CBaseStat::SpearmanCorr2(x,y,n));
}
//+------------------------------------------------------------------+
//| Spearman's rank correlation coefficient |
//| Input parameters: |
//| X - sample 1 (array indexes: [0..N-1]) |
//| Y - sample 2 (array indexes: [0..N-1]) |
//| N - N>=0, sample size: |
//| * if given, only N leading elements of X/Y are |
//| processed |
//| * if not given, automatically determined from |
//| input sizes |
//| Result: |
//| Spearman's rank correlation coefficient |
//| (zero for N=0 or N=1) |
//+------------------------------------------------------------------+
double CAlglib::SpearmanCorr2(const double &x[],const double &y[])
{
//--- check
if(CAp::Len(x)!=CAp::Len(y))
{
Print(__FUNCTION__+": arrays size are not equal");
CAp::exception_happened=true;
return(EMPTY_VALUE);
}
//--- initialization
int n=CAp::Len(x);
//--- return result
return(CBaseStat::SpearmanCorr2(x,y,n));
}
//+------------------------------------------------------------------+
//| Covariance matrix |
//| INPUT PARAMETERS: |
//| X - array[N,M], sample matrix: |
//| * J-th column corresponds to J-th variable |
//| * I-th row corresponds to I-th observation |
//| N - N>=0, number of observations: |
//| * if given, only leading N rows of X are used |
//| * if not given, automatically determined from input |
//| size |
//| M - M>0, number of variables: |
//| * if given, only leading M columns of X are used |
//| * if not given, automatically determined from input |
//| size |
//| OUTPUT PARAMETERS: |
//| C - array[M,M], covariance matrix (zero if N=0 or N=1) |
//+------------------------------------------------------------------+
void CAlglib::CovM(const CMatrixDouble &x,const int n,const int m,
CMatrixDouble &c)
{
CBaseStat::CovM(x,n,m,c);
}
//+------------------------------------------------------------------+
//| Covariance matrix |
//| INPUT PARAMETERS: |
//| X - array[N,M], sample matrix: |
//| * J-th column corresponds to J-th variable |
//| * I-th row corresponds to I-th observation |
//| N - N>=0, number of observations: |
//| * if given, only leading N rows of X are used |
//| * if not given, automatically determined from input |
//| size |
//| M - M>0, number of variables: |
//| * if given, only leading M columns of X are used |
//| * if not given, automatically determined from input |
//| size |
//| OUTPUT PARAMETERS: |
//| C - array[M,M], covariance matrix (zero if N=0 or N=1) |
//+------------------------------------------------------------------+
void CAlglib::CovM(const CMatrixDouble &x,CMatrixDouble &c)
{
//--- initialization
int n=(int)CAp::Rows(x);
int m=(int)CAp::Cols(x);
//--- function call
CBaseStat::CovM(x,n,m,c);
}
//+------------------------------------------------------------------+
//| Pearson product-moment correlation matrix |
//| INPUT PARAMETERS: |
//| X - array[N,M], sample matrix: |
//| * J-th column corresponds to J-th variable |
//| * I-th row corresponds to I-th observation |
//| N - N>=0, number of observations: |
//| * if given, only leading N rows of X are used |
//| * if not given, automatically determined from input |
//| size |
//| M - M>0, number of variables: |
//| * if given, only leading M columns of X are used |
//| * if not given, automatically determined from input |
//| size |
//| OUTPUT PARAMETERS: |
//| C - array[M,M], correlation matrix (zero if N=0 or N=1) |
//+------------------------------------------------------------------+
void CAlglib::PearsonCorrM(const CMatrixDouble &x,const int n,
const int m,CMatrixDouble &c)
{
CBaseStat::PearsonCorrM(x,n,m,c);
}
//+------------------------------------------------------------------+
//| Pearson product-moment correlation matrix |
//| INPUT PARAMETERS: |
//| X - array[N,M], sample matrix: |
//| * J-th column corresponds to J-th variable |
//| * I-th row corresponds to I-th observation |
//| N - N>=0, number of observations: |
//| * if given, only leading N rows of X are used |
//| * if not given, automatically determined from input |
//| size |
//| M - M>0, number of variables: |
//| * if given, only leading M columns of X are used |
//| * if not given, automatically determined from input |
//| size |
//| OUTPUT PARAMETERS: |
//| C - array[M,M], correlation matrix (zero if N=0 or N=1) |
//+------------------------------------------------------------------+
void CAlglib::PearsonCorrM(CMatrixDouble &x,CMatrixDouble &c)
{
//--- initialization
int n=(int)CAp::Rows(x);
int m=(int)CAp::Cols(x);
//--- function call
CBaseStat::PearsonCorrM(x,n,m,c);
}
//+------------------------------------------------------------------+
//| Spearman's rank correlation matrix |
//| INPUT PARAMETERS: |
//| X - array[N,M], sample matrix: |
//| * J-th column corresponds to J-th variable |
//| * I-th row corresponds to I-th observation |
//| N - N>=0, number of observations: |
//| * if given, only leading N rows of X are used |
//| * if not given, automatically determined from input |
//| size |
//| M - M>0, number of variables: |
//| * if given, only leading M columns of X are used |
//| * if not given, automatically determined from input |
//| size |
//| OUTPUT PARAMETERS: |
//| C - array[M,M], correlation matrix (zero if N=0 or N=1) |
//+------------------------------------------------------------------+
void CAlglib::SpearmanCorrM(const CMatrixDouble &x,const int n,
const int m,CMatrixDouble &c)
{
CBaseStat::SpearmanCorrM(x,n,m,c);
}
//+------------------------------------------------------------------+
//| Spearman's rank correlation matrix |
//| INPUT PARAMETERS: |
//| X - array[N,M], sample matrix: |
//| * J-th column corresponds to J-th variable |
//| * I-th row corresponds to I-th observation |
//| N - N>=0, number of observations: |
//| * if given, only leading N rows of X are used |
//| * if not given, automatically determined from input |
//| size |
//| M - M>0, number of variables: |
//| * if given, only leading M columns of X are used |
//| * if not given, automatically determined from input |
//| size |
//| OUTPUT PARAMETERS: |
//| C - array[M,M], correlation matrix (zero if N=0 or N=1) |
//+------------------------------------------------------------------+
void CAlglib::SpearmanCorrM(const CMatrixDouble &x,CMatrixDouble &c)
{
//--- initialization
int n=(int)CAp::Rows(x);
int m=(int)CAp::Cols(x);
//--- function call
CBaseStat::SpearmanCorrM(x,n,m,c);
}
//+------------------------------------------------------------------+
//| Cross-covariance matrix |
//| INPUT PARAMETERS: |
//| X - array[N,M1], sample matrix: |
//| * J-th column corresponds to J-th variable |
//| * I-th row corresponds to I-th observation |
//| Y - array[N,M2], sample matrix: |
//| * J-th column corresponds to J-th variable |
//| * I-th row corresponds to I-th observation |
//| N - N>=0, number of observations: |
//| * if given, only leading N rows of X/Y are used |
//| * if not given, automatically determined from input |
//| sizes |
//| M1 - M1>0, number of variables in X: |
//| * if given, only leading M1 columns of X are used |
//| * if not given, automatically determined from input |
//| size |
//| M2 - M2>0, number of variables in Y: |
//| * if given, only leading M1 columns of X are used |
//| * if not given, automatically determined from input |
//| size |
//| OUTPUT PARAMETERS: |
//| C - array[M1,M2], cross-covariance matrix (zero if N=0 or|
//| N=1) |
//+------------------------------------------------------------------+
void CAlglib::CovM2(const CMatrixDouble &x,const CMatrixDouble &y,
const int n,const int m1,const int m2,
CMatrixDouble &c)
{
CBaseStat::CovM2(x,y,n,m1,m2,c);
}
//+------------------------------------------------------------------+
//| Cross-covariance matrix |
//| INPUT PARAMETERS: |
//| X - array[N,M1], sample matrix: |
//| * J-th column corresponds to J-th variable |
//| * I-th row corresponds to I-th observation |
//| Y - array[N,M2], sample matrix: |
//| * J-th column corresponds to J-th variable |
//| * I-th row corresponds to I-th observation |
//| N - N>=0, number of observations: |
//| * if given, only leading N rows of X/Y are used |
//| * if not given, automatically determined from input |
//| sizes |
//| M1 - M1>0, number of variables in X: |
//| * if given, only leading M1 columns of X are used |
//| * if not given, automatically determined from input |
//| size |
//| M2 - M2>0, number of variables in Y: |
//| * if given, only leading M1 columns of X are used |
//| * if not given, automatically determined from input |
//| size |
//| OUTPUT PARAMETERS: |
//| C - array[M1,M2], cross-covariance matrix (zero if N=0 or|
//| N=1) |
//+------------------------------------------------------------------+
void CAlglib::CovM2(const CMatrixDouble &x,const CMatrixDouble &y,
CMatrixDouble &c)
{
//--- check
if(CAp::Rows(x)!=CAp::Rows(y))
{
Print(__FUNCTION__+": rows size are not equal");
CAp::exception_happened=true;
return;
}
//--- initialization
int n =(int)CAp::Rows(x);
int m1=(int)CAp::Cols(x);
int m2=(int)CAp::Cols(y);
//--- function call
CBaseStat::CovM2(x,y,n,m1,m2,c);
}
//+------------------------------------------------------------------+
//| Pearson product-moment cross-correlation matrix |
//| INPUT PARAMETERS: |
//| X - array[N,M1], sample matrix: |
//| * J-th column corresponds to J-th variable |
//| * I-th row corresponds to I-th observation |
//| Y - array[N,M2], sample matrix: |
//| * J-th column corresponds to J-th variable |
//| * I-th row corresponds to I-th observation |
//| N - N>=0, number of observations: |
//| * if given, only leading N rows of X/Y are used |
//| * if not given, automatically determined from input |
//| sizes |
//| M1 - M1>0, number of variables in X: |
//| * if given, only leading M1 columns of X are used |
//| * if not given, automatically determined from input |
//| size |
//| M2 - M2>0, number of variables in Y: |
//| * if given, only leading M1 columns of X are used |
//| * if not given, automatically determined from input |
//| size |
//| OUTPUT PARAMETERS: |
//| C - array[M1,M2], cross-correlation matrix (zero if N=0 |
//| or N=1) |
//+------------------------------------------------------------------+
void CAlglib::PearsonCorrM2(const CMatrixDouble &x,const CMatrixDouble &y,
const int n,const int m1,const int m2,
CMatrixDouble &c)
{
CBaseStat::PearsonCorrM2(x,y,n,m1,m2,c);
}
//+------------------------------------------------------------------+
//| Pearson product-moment cross-correlation matrix |
//| INPUT PARAMETERS: |
//| X - array[N,M1], sample matrix: |
//| * J-th column corresponds to J-th variable |
//| * I-th row corresponds to I-th observation |
//| Y - array[N,M2], sample matrix: |
//| * J-th column corresponds to J-th variable |
//| * I-th row corresponds to I-th observation |
//| N - N>=0, number of observations: |
//| * if given, only leading N rows of X/Y are used |
//| * if not given, automatically determined from input |
//| sizes |
//| M1 - M1>0, number of variables in X: |
//| * if given, only leading M1 columns of X are used |
//| * if not given, automatically determined from input |
//| size |
//| M2 - M2>0, number of variables in Y: |
//| * if given, only leading M1 columns of X are used |
//| * if not given, automatically determined from input |
//| size |
//| OUTPUT PARAMETERS: |
//| C - array[M1,M2], cross-correlation matrix (zero if N=0 |
//| or N=1) |
//+------------------------------------------------------------------+
void CAlglib::PearsonCorrM2(const CMatrixDouble &x,const CMatrixDouble &y,
CMatrixDouble &c)
{
//--- check
if(CAp::Rows(x)!=CAp::Rows(y))
{
Print(__FUNCTION__+": rows size are not equal");
CAp::exception_happened=true;
return;
}
//--- initialization
int n =(int)CAp::Rows(x);
int m1=(int)CAp::Cols(x);
int m2=(int)CAp::Cols(y);
//--- function call
CBaseStat::PearsonCorrM2(x,y,n,m1,m2,c);
}
//+------------------------------------------------------------------+
//| Spearman's rank cross-correlation matrix |
//| INPUT PARAMETERS: |
//| X - array[N,M1], sample matrix: |
//| * J-th column corresponds to J-th variable |
//| * I-th row corresponds to I-th observation |
//| Y - array[N,M2], sample matrix: |
//| * J-th column corresponds to J-th variable |
//| * I-th row corresponds to I-th observation |
//| N - N>=0, number of observations: |
//| * if given, only leading N rows of X/Y are used |
//| * if not given, automatically determined from input |
//| sizes |
//| M1 - M1>0, number of variables in X: |
//| * if given, only leading M1 columns of X are used |
//| * if not given, automatically determined from input |
//| size |
//| M2 - M2>0, number of variables in Y: |
//| * if given, only leading M1 columns of X are used |
//| * if not given, automatically determined from input |
//| size |
//| OUTPUT PARAMETERS: |
//| C - array[M1,M2], cross-correlation matrix (zero if N=0 |
//| or N=1) |
//+------------------------------------------------------------------+
void CAlglib::SpearmanCorrM2(const CMatrixDouble &x,const CMatrixDouble &y,
const int n,const int m1,const int m2,
CMatrixDouble &c)
{
CBaseStat::SpearmanCorrM2(x,y,n,m1,m2,c);
}
//+------------------------------------------------------------------+
//| Spearman's rank cross-correlation matrix |
//| INPUT PARAMETERS: |
//| X - array[N,M1], sample matrix: |
//| * J-th column corresponds to J-th variable |
//| * I-th row corresponds to I-th observation |
//| Y - array[N,M2], sample matrix: |
//| * J-th column corresponds to J-th variable |
//| * I-th row corresponds to I-th observation |
//| N - N>=0, number of observations: |
//| * if given, only leading N rows of X/Y are used |
//| * if not given, automatically determined from input |
//| sizes |
//| M1 - M1>0, number of variables in X: |
//| * if given, only leading M1 columns of X are used |
//| * if not given, automatically determined from input |
//| size |
//| M2 - M2>0, number of variables in Y: |
//| * if given, only leading M1 columns of X are used |
//| * if not given, automatically determined from input |
//| size |
//| OUTPUT PARAMETERS: |
//| C - array[M1,M2], cross-correlation matrix (zero if N=0 |
//| or N=1) |
//+------------------------------------------------------------------+
void CAlglib::SpearmanCorrM2(const CMatrixDouble &x,const CMatrixDouble &y,
CMatrixDouble &c)
{
//--- check
if(CAp::Rows(x)!=CAp::Rows(y))
{
Print(__FUNCTION__+": rows size are not equal");
CAp::exception_happened=true;
return;
}
//--- initialization
int n =(int)CAp::Rows(x);
int m1=(int)CAp::Cols(x);
int m2=(int)CAp::Cols(y);
//--- function call
CBaseStat::SpearmanCorrM2(x,y,n,m1,m2,c);
}
//+------------------------------------------------------------------+
//| This function replaces data in XY by their ranks: |
//| * XY is processed row-by-row |
//| * rows are processed separately |
//| * tied data are correctly handled (tied ranks |
//| are calculated) |
//| * ranking starts from 0, ends at NFeatures-1 |
//| * sum of within-row values is equal |
//| to (NFeatures-1)*NFeatures/2 |
//| INPUT PARAMETERS: |
//| XY - array[NPoints,NFeatures], dataset |
//| NPoints - number of points |
//| NFeatures- number of features |
//| OUTPUT PARAMETERS: |
//| XY - data are replaced by their within-row ranks; |
//| ranking starts from 0, ends at NFeatures-1 |
//+------------------------------------------------------------------+
void CAlglib::RankData(CMatrixDouble &xy,int npoints,int nfeatures)
{
CBaseStat::RankData(xy,npoints,nfeatures);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::RankData(CMatrixDouble &xy)
{
//--- initialization
int npoints=CAp::Rows(xy);
int nfeatures=CAp::Cols(xy);
//--- function call
CBaseStat::RankData(xy,npoints,nfeatures);
}
//+------------------------------------------------------------------+
//| This function replaces data in XY by their CENTERED ranks: |
//| * XY is processed row-by-row |
//| * rows are processed separately |
//| * tied data are correctly handled (tied ranks are calculated) |
//| * centered ranks are just usual ranks, but centered in such way|
//| that sum of within-row values is equal to 0.0. |
//| * centering is performed by subtracting mean from each row, |
//| i.e it changes mean value, but does NOT change higher moments|
//| INPUT PARAMETERS: |
//| XY - array[NPoints,NFeatures], dataset |
//| NPoints - number of points |
//| NFeatures- number of features |
//| OUTPUT PARAMETERS: |
//| XY - data are replaced by their within-row ranks; |
//| ranking starts from 0, ends at NFeatures-1 |
//+------------------------------------------------------------------+
void CAlglib::RankDataCentered(CMatrixDouble &xy,int npoints,int nfeatures)
{
CBaseStat::RankDataCentered(xy,npoints,nfeatures);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CAlglib::RankDataCentered(CMatrixDouble &xy)
{
//--- initialization
int npoints=CAp::Rows(xy);
int nfeatures=CAp::Cols(xy);
//--- function call
CBaseStat::RankDataCentered(xy,npoints,nfeatures);
}
//+------------------------------------------------------------------+
//| Pearson's correlation coefficient significance test |
//| This test checks hypotheses about whether X and Y are samples of|
//| two continuous distributions having zero correlation or whether |
//| their correlation is non-zero. |
//| The following tests are performed: |
//| * two-tailed test (null hypothesis - X and Y have zero |
//| correlation) |
//| * left-tailed test (null hypothesis - the correlation |
//| coefficient is greater than or equal to 0) |
//| * right-tailed test (null hypothesis - the correlation |
//| coefficient is less than or equal to 0). |
//| Requirements: |
//| * the number of elements in each sample is not less than 5 |
//| * normality of distributions of X and Y. |
//| Input parameters: |
//| R - Pearson's correlation coefficient for X and Y |
//| N - number of elements in samples, N>=5. |
//| Output parameters: |
//| BothTails - p-value for two-tailed test. |
//| If BothTails is less than the given |
//| significance level the null hypothesis is |
//| rejected. |
//| LeftTail - p-value for left-tailed test. |
//| If LeftTail is less than the given |
//| significance level, the null hypothesis is |
//| rejected. |
//| RightTail - p-value for right-tailed test. |
//| If RightTail is less than the given |
//| significance level the null hypothesis is |
//| rejected. |
//+------------------------------------------------------------------+
void CAlglib::PearsonCorrelationSignificance(const double r,const int n,
double &bothTails,
double &leftTail,
double &rightTail)
{
CCorrTests::PearsonCorrSignific(r,n,bothTails,leftTail,rightTail);
}
//+------------------------------------------------------------------+
//| Spearman's rank correlation coefficient significance test |
//| This test checks hypotheses about whether X and Y are samples of |
//| two continuous distributions having zero correlation or whether |
//| their correlation is non-zero. |
//| The following tests are performed: |
//| * two-tailed test (null hypothesis - X and Y have zero |
//| correlation) |
//| * left-tailed test (null hypothesis - the correlation |
//| coefficient is greater than or equal to 0) |
//| * right-tailed test (null hypothesis - the correlation |
//| coefficient is less than or equal to 0). |
//| Requirements: |
//| * the number of elements in each sample is not less than 5. |
//| The test is non-parametric and doesn't require distributions X |
//| and Y to be normal. |
//| Input parameters: |
//| R - Spearman's rank correlation coefficient for X and Y |
//| N - number of elements in samples, N>=5. |
//| Output parameters: |
//| BothTails - p-value for two-tailed test. |
//| If BothTails is less than the given |
//| significance level the null hypothesis is |
//| rejected. |
//| LeftTail - p-value for left-tailed test. |
//| If LeftTail is less than the given |
//| significance level, the null hypothesis is |
//| rejected. |
//| RightTail - p-value for right-tailed test. |
//| If RightTail is less than the given |
//| significance level the null hypothesis is |
//| rejected. |
//+------------------------------------------------------------------+
void CAlglib::SpearmanRankCorrelationSignificance(const double r,
const int n,
double &bothTails,
double &leftTail,
double &rightTail)
{
CCorrTests::SpearmanRankCorrSignific(r,n,bothTails,leftTail,rightTail);
}
//+------------------------------------------------------------------+
//| Jarque-Bera test |
//| This test checks hypotheses about the fact that a given sample X |
//| is a sample of normal random variable. |
//| Requirements: |
//| * the number of elements in the sample is not less than 5. |
//| Input parameters: |
//| X - sample. Array whose index goes from 0 to N-1. |
//| N - size of the sample. N>=5 |
//| Output parameters: |
//| BothTails - p-value for two-tailed test. |
//| If BothTails is less than the given |
//| significance level the null hypothesis is |
//| rejected. |
//| LeftTail - p-value for left-tailed test. |
//| If LeftTail is less than the given |
//| significance level, the null hypothesis is |
//| rejected. |
//| RightTail - p-value for right-tailed test. |
//| If RightTail is less than the given |
//| significance level the null hypothesis is |
//| rejected. |
//| Accuracy of the approximation used (5<=N<=1951): |
//| p-value relative error (5<=N<=1951) |
//| [1, 0.1] < 1% |
//| [0.1, 0.01] < 2% |
//| [0.01, 0.001] < 6% |
//| [0.001, 0] wasn't measured |
//| For N>1951 accuracy wasn't measured but it shouldn't be sharply |
//| different from table values. |
//+------------------------------------------------------------------+
void CAlglib::JarqueBeraTest(const double &x[],const int n,double &p)
{
//--- initialization
p=0;
//--- function call
CJarqueBera::JarqueBeraTest(x,n,p);
}
//+------------------------------------------------------------------+
//| Mann-Whitney U-test |
//| This test checks hypotheses about whether X and Y are samples of |
//| two continuous distributions of the same shape and same median or|
//| whether their medians are different. |
//| The following tests are performed: |
//| * two-tailed test (null hypothesis - the medians are equal) |
//| * left-tailed test (null hypothesis - the median of the first|
//| sample is greater than or equal to the median of the second|
//| sample) |
//| * right-tailed test (null hypothesis - the median of the |
//| first sample is less than or equal to the median of the |
//| second sample). |
//| Requirements: |
//| * the samples are independent |
//| * X and Y are continuous distributions (or discrete |
//| distributions well- approximating continuous distributions)|
//| * distributions of X and Y have the same shape. The only |
//| possible difference is their position (i.e. the value of |
//| the median) |
//| * the number of elements in each sample is not less than 5 |
//| * the scale of measurement should be ordinal, interval or |
//| ratio (i.e. the test could not be applied to nominal |
//| variables). |
//| The test is non-parametric and doesn't require distributions to |
//| be normal. |
//| Input parameters: |
//| X - sample 1. Array whose index goes from 0 to N-1. |
//| N - size of the sample. N>=5 |
//| Y - sample 2. Array whose index goes from 0 to M-1. |
//| M - size of the sample. M>=5 |
//| Output parameters: |
//| BothTails - p-value for two-tailed test. |
//| If BothTails is less than the given |
//| significance level the null hypothesis is |
//| rejected. |
//| LeftTail - p-value for left-tailed test. |
//| If LeftTail is less than the given |
//| significance level, the null hypothesis is |
//| rejected. |
//| RightTail - p-value for right-tailed test. |
//| If RightTail is less than the given |
//| significance level the null hypothesis is |
//| rejected. |
//| To calculate p-values, special approximation is used. This |
//| method lets us calculate p-values with satisfactory accuracy in |
//| interval [0.0001, 1]. There is no approximation outside the |
//| [0.0001, 1] interval. Therefore, if the significance level |
//| outlies this interval, the test returns 0.0001. |
//| Relative precision of approximation of p-value: |
//| N M Max.err. Rms.err. |
//| 5..10 N..10 1.4e-02 6.0e-04 |
//| 5..10 N..100 2.2e-02 5.3e-06 |
//| 10..15 N..15 1.0e-02 3.2e-04 |
//| 10..15 N..100 1.0e-02 2.2e-05 |
//| 15..100 N..100 6.1e-03 2.7e-06 |
//| For N,M>100 accuracy checks weren't put into practice, but taking|
//| into account characteristics of asymptotic approximation used, |
//| precision should not be sharply different from the values for |
//| interval [5, 100]. |
//+------------------------------------------------------------------+
void CAlglib::MannWhitneyUTest(const double &x[],const int n,
const double &y[],const int m,
double &bothTails,double &leftTail,
double &rightTail)
{
CMannWhitneyU::CMannWhitneyUTest(x,n,y,m,bothTails,leftTail,rightTail);
}
//+------------------------------------------------------------------+
//| Sign test |
//| This test checks three hypotheses about the median of the given |
//| sample. |
//| The following tests are performed: |
//| * two-tailed test (null hypothesis - the median is equal to |
//| the given value) |
//| * left-tailed test (null hypothesis - the median is greater |
//| than or equal to the given value) |
//| * right-tailed test (null hypothesis - the median is less |
//| than or equal to the given value) |
//| Requirements: |
//| * the scale of measurement should be ordinal, interval or |
//| ratio (i.e. the test could not be applied to nominal |
//| variables). |
//| The test is non-parametric and doesn't require distribution X to |
//| be normal |
//| Input parameters: |
//| X - sample. Array whose index goes from 0 to N-1. |
//| N - size of the sample. |
//| Median - assumed median value. |
//| Output parameters: |
//| BothTails - p-value for two-tailed test. |
//| If BothTails is less than the given |
//| significance level the null hypothesis is |
//| rejected. |
//| LeftTail - p-value for left-tailed test. |
//| If LeftTail is less than the given |
//| significance level, the null hypothesis is |
//| rejected. |
//| RightTail - p-value for right-tailed test. |
//| If RightTail is less than the given |
//| significance levelthe null hypothesis is |
//| rejected. |
//| While calculating p-values high-precision binomial distribution |
//| approximation is used, so significance levels have about 15 exact|
//| digits. |
//+------------------------------------------------------------------+
void CAlglib::OneSampleSignTest(const double &x[],const int n,
const double median,double &bothTails,
double &leftTail,double &rightTail)
{
CSignTest::OneSampleSignTest(x,n,median,bothTails,leftTail,rightTail);
}
//+------------------------------------------------------------------+
//| One-sample t-test |
//| This test checks three hypotheses about the mean of the given |
//| sample. The following tests are performed: |
//| * two-tailed test (null hypothesis - the mean is equal to the|
//| given value) |
//| * left-tailed test (null hypothesis - the mean is greater |
//| than or equal to the given value) |
//| * right-tailed test (null hypothesis - the mean is less than |
//| or equal to the given value). |
//| The test is based on the assumption that a given sample has a |
//| normal distribution and an unknown dispersion. If the |
//| distribution sharply differs from normal, the test will work |
//| incorrectly. |
//| Input parameters: |
//| X - sample. Array whose index goes from 0 to N-1. |
//| N - size of sample. |
//| Mean - assumed value of the mean. |
//| Output parameters: |
//| BothTails - p-value for two-tailed test. |
//| If BothTails is less than the given |
//| significance level the null hypothesis is |
//| rejected. |
//| LeftTail - p-value for left-tailed test. |
//| If LeftTail is less than the given |
//| significance level, the null hypothesis is |
//| rejected. |
//| RightTail - p-value for right-tailed test. |
//| If RightTail is less than the given |
//| significance level the null hypothesis is |
//| rejected. |
//+------------------------------------------------------------------+
void CAlglib::StudentTest1(const double &x[],const int n,const double mean,
double &bothTails,double &leftTail,
double &rightTail)
{
CStudentTests::StudentTest1(x,n,mean,bothTails,leftTail,rightTail);
}
//+------------------------------------------------------------------+
//| Two-sample pooled test |
//| This test checks three hypotheses about the mean of the given |
//| samples. The following tests are performed: |
//| * two-tailed test (null hypothesis - the means are equal) |
//| * left-tailed test (null hypothesis - the mean of the first |
//| sample is greater than or equal to the mean of the second |
//| sample) |
//| * right-tailed test (null hypothesis - the mean of the first |
//| sample is less than or equal to the mean of the second |
//| sample). |
//| Test is based on the following assumptions: |
//| * given samples have normal distributions |
//| * dispersions are equal |
//| * samples are independent. |
//| Input parameters: |
//| X - sample 1. Array whose index goes from 0 to N-1. |
//| N - size of sample. |
//| Y - sample 2. Array whose index goes from 0 to M-1. |
//| M - size of sample. |
//| Output parameters: |
//| BothTails - p-value for two-tailed test. |
//| If BothTails is less than the given |
//| significance level the null hypothesis is |
//| rejected. |
//| LeftTail - p-value for left-tailed test. |
//| If LeftTail is less than the given |
//| significance level, the null hypothesis is |
//| rejected. |
//| RightTail - p-value for right-tailed test. |
//| If RightTail is less than the given |
//| significance level the null hypothesis is |
//| rejected. |
//+------------------------------------------------------------------+
void CAlglib::StudentTest2(const double &x[],const int n,const double &y[],
const int m,double &bothTails,
double &leftTail,double &rightTail)
{
CStudentTests::StudentTest2(x,n,y,m,bothTails,leftTail,rightTail);
}
//+------------------------------------------------------------------+
//| Two-sample unpooled test |
//| This test checks three hypotheses about the mean of the given |
//| samples. The following tests are performed: |
//| * two-tailed test (null hypothesis - the means are equal) |
//| * left-tailed test (null hypothesis - the mean of the first |
//| sample is greater than or equal to the mean of the second |
//| sample) |
//| * right-tailed test (null hypothesis - the mean of the first |
//| sample is less than or equal to the mean of the second |
//| sample). |
//| Test is based on the following assumptions: |
//| * given samples have normal distributions |
//| * samples are independent. |
//| Dispersion equality is not required |
//| Input parameters: |
//| X - sample 1. Array whose index goes from 0 to N-1. |
//| N - size of the sample. |
//| Y - sample 2. Array whose index goes from 0 to M-1. |
//| M - size of the sample. |
//| Output parameters: |
//| BothTails - p-value for two-tailed test. |
//| If BothTails is less than the given |
//| significance level the null hypothesis is |
//| rejected. |
//| LeftTail - p-value for left-tailed test. |
//| If LeftTail is less than the given |
//| significance level, the null hypothesis is |
//| rejected. |
//| RightTail - p-value for right-tailed test. |
//| If RightTail is less than the given |
//| significance level the null hypothesis is |
//| rejected. |
//+------------------------------------------------------------------+
void CAlglib::UnequalVarianceTest(const double &x[],const int n,
const double &y[],const int m,
double &bothTails,double &leftTail,
double &rightTail)
{
CStudentTests::UnequalVarianceTest(x,n,y,m,bothTails,leftTail,rightTail);
}
//+------------------------------------------------------------------+
//| Two-sample F-test |
//| This test checks three hypotheses about dispersions of the given |
//| samples. The following tests are performed: |
//| * two-tailed test (null hypothesis - the dispersions are |
//| equal) |
//| * left-tailed test (null hypothesis - the dispersion of the |
//| first sample is greater than or equal to the dispersion of |
//| the second sample). |
//| * right-tailed test (null hypothesis - the dispersion of the |
//| first sample is less than or equal to the dispersion of |
//| the second sample) |
//| The test is based on the following assumptions: |
//| * the given samples have normal distributions |
//| * the samples are independent. |
//| Input parameters: |
//| X - sample 1. Array whose index goes from 0 to N-1. |
//| N - sample size. |
//| Y - sample 2. Array whose index goes from 0 to M-1. |
//| M - sample size. |
//| Output parameters: |
//| BothTails - p-value for two-tailed test. |
//| If BothTails is less than the given |
//| significance level the null hypothesis is |
//| rejected. |
//| LeftTail - p-value for left-tailed test. |
//| If LeftTail is less than the given |
//| significance level, the null hypothesis is |
//| rejected. |
//| RightTail - p-value for right-tailed test. |
//| If RightTail is less than the given |
//| significance level the null hypothesis is |
//| rejected. |
//+------------------------------------------------------------------+
void CAlglib::FTest(const double &x[],const int n,const double &y[],
const int m,double &bothTails,double &leftTail,
double &rightTail)
{
CVarianceTests::FTest(x,n,y,m,bothTails,leftTail,rightTail);
}
//+------------------------------------------------------------------+
//| One-sample chi-square test |
//| This test checks three hypotheses about the dispersion of the |
//| given sample The following tests are performed: |
//| * two-tailed test (null hypothesis - the dispersion equals |
//| the given number) |
//| * left-tailed test (null hypothesis - the dispersion is |
//| greater than or equal to the given number) |
//| * right-tailed test (null hypothesis - dispersion is less |
//| than or equal to the given number). |
//| Test is based on the following assumptions: |
//| * the given sample has a normal distribution. |
//| Input parameters: |
//| X - sample 1. Array whose index goes from 0 to |
//| N-1. |
//| N - size of the sample. |
//| Variance - dispersion value to compare with. |
//| Output parameters: |
//| BothTails - p-value for two-tailed test. |
//| If BothTails is less than the given |
//| significance level the null hypothesis is |
//| rejected. |
//| LeftTail - p-value for left-tailed test. |
//| If LeftTail is less than the given |
//| significance level, the null hypothesis is |
//| rejected. |
//| RightTail - p-value for right-tailed test. |
//| If RightTail is less than the given |
//| significance level the null hypothesis is |
//| rejected. |
//+------------------------------------------------------------------+
void CAlglib::OneSampleVarianceTest(double &x[],int n,double variance,
double &bothTails,double &leftTail,
double &rightTail)
{
CVarianceTests::OneSampleVarianceTest(x,n,variance,bothTails,leftTail,rightTail);
}
//+------------------------------------------------------------------+
//| Wilcoxon signed-rank test |
//| This test checks three hypotheses about the median of the given |
//| sample. The following tests are performed: |
//| * two-tailed test (null hypothesis - the median is equal to |
//| the given value) |
//| * left-tailed test (null hypothesis - the median is greater |
//| than or equal to the given value) |
//| * right-tailed test (null hypothesis - the median is less |
//| than or equal to the given value) |
//| Requirements: |
//| * the scale of measurement should be ordinal, interval or |
//| ratio (i.e. the test could not be applied to nominal |
//| variables). |
//| * the distribution should be continuous and symmetric |
//| relative to its median. |
//| * number of distinct values in the X array should be greater |
//| than 4 |
//| The test is non-parametric and doesn't require distribution X to |
//| be normal |
//| Input parameters: |
//| X - sample. Array whose index goes from 0 to N-1. |
//| N - size of the sample. |
//| Median - assumed median value. |
//| Output parameters: |
//| BothTails - p-value for two-tailed test. |
//| If BothTails is less than the given |
//| significance level the null hypothesis is |
//| rejected. |
//| LeftTail - p-value for left-tailed test. |
//| If LeftTail is less than the given |
//| significance level, the null hypothesis is |
//| rejected. |
//| RightTail - p-value for right-tailed test. |
//| If RightTail is less than the given |
//| significance level the null hypothesis is |
//| rejected. |
//| To calculate p-values, special approximation is used. This method|
//| lets us calculate p-values with two decimal places in interval |
//| [0.0001, 1]. |
//| "Two decimal places" does not sound very impressive, but in |
//| practice the relative error of less than 1% is enough to make a |
//| decision. |
//| There is no approximation outside the [0.0001, 1] interval. |
//| Therefore, if the significance level outlies this interval, the |
//| test returns 0.0001. |
//+------------------------------------------------------------------+
void CAlglib::WilcoxonSignedRankTest(const double &x[],const int n,
const double e,double &bothTails,
double &leftTail,double &rightTail)
{
CWilcoxonSignedRank::WilcoxonSignedRankTest(x,n,e,bothTails,leftTail,rightTail);
}
//+------------------------------------------------------------------+