//+------------------------------------------------------------------+ //| alglib.mqh | //| Copyright 2003-2022 Sergey Bochkanov (ALGLIB project) | //| Copyright 2012-2023, MetaQuotes Ltd. | //| https://www.mql5.com | //+------------------------------------------------------------------+ //| Implementation of ALGLIB library in MetaQuotes Language 5 | //| | //| The features of the library include: | //| - Linear algebra (direct algorithms, EVD, SVD) | //| - Solving systems of linear and non-linear equations | //| - Interpolation | //| - Optimization | //| - FFT (Fast Fourier Transform) | //| - Numerical integration | //| - Linear and nonlinear least-squares fitting | //| - Ordinary differential equations | //| - Computation of special functions | //| - Descriptive statistics and hypothesis testing | //| - Data analysis - classification, regression | //| - Implementing linear algebra algorithms, interpolation, etc. | //| in high-precision arithmetic (using MPFR) | //| | //| This file is free software; you can redistribute it and/or | //| modify it under the terms of the GNU General Public License as | //| published by the Free Software Foundation (www.fsf.org); either | //| version 2 of the License, or (at your option) any later version. | //| | //| This program is distributed in the hope that it will be useful, | //| but WITHOUT ANY WARRANTY; without even the implied warranty of | //| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | //| GNU General Public License for more details. | //+------------------------------------------------------------------+ #include #include "alglibmisc.mqh" #include "dataanalysis.mqh" #include "diffequations.mqh" #include "delegatefunctions.mqh" #include "fasttransforms.mqh" #include "integration.mqh" #include "interpolation.mqh" //+------------------------------------------------------------------+ //| The main class, which includes functions for users | //+------------------------------------------------------------------+ class CAlglib { public: //--- function of package alglibmisc //--- high quality random number generator static void HQRndRandomize(CHighQualityRandStateShell &state); static void HQRndSeed(const int s1,const int s2,CHighQualityRandStateShell &state); static double HQRndUniformR(CHighQualityRandStateShell &state); static int HQRndUniformI(CHighQualityRandStateShell &state,const int n); static double HQRndNormal(CHighQualityRandStateShell &state); static void HQRndNormalV(CHighQualityRandStateShell &state,int n,CRowDouble &x); static void HQRndNormalV(CHighQualityRandStateShell &state,int n,vector &x); static void HQRndNormalM(CHighQualityRandStateShell &state,int m,int n,CMatrixDouble &x); static void HQRndNormalM(CHighQualityRandStateShell &state,int m,int n,matrix &x); static void HQRndUnit2(CHighQualityRandStateShell &state,double &x,double &y); static void HQRndNormal2(CHighQualityRandStateShell &state,double &x1,double &x2); static double HQRndExponential(CHighQualityRandStateShell &state,const double lambdav); static double HQRndDiscrete(CHighQualityRandStateShell &state,int n,CRowDouble &x); static double HQRndDiscrete(CHighQualityRandStateShell &state,int n,vector &x); static double HQRndContinuous(CHighQualityRandStateShell &state,int n,CRowDouble &x); static double HQRndContinuous(CHighQualityRandStateShell &state,int n,vector &x); //--- build KD-trees static void KDTreeSerialize(CKDTreeShell &obj,string &s_out); static void KDTreeUnserialize(string s_in,CKDTreeShell &obj); static void KDTreeBuild(CMatrixDouble &xy,const int n,const int nx,const int ny,const int normtype,CKDTreeShell &kdt); static void KDTreeBuild(CMatrixDouble &xy,const int nx,const int ny,const int normtype,CKDTreeShell &kdt); static void KDTreeBuildTagged(CMatrixDouble &xy,int &tags[],const int n,const int nx,const int ny,const int normtype,CKDTreeShell &kdt); static void KDTreeBuildTagged(CMatrixDouble &xy,CRowInt &tags,const int n,const int nx,const int ny,const int normtype,CKDTreeShell &kdt); static void KDTreeBuildTagged(CMatrixDouble &xy,int &tags[],const int nx,const int ny,const int normtype,CKDTreeShell &kdt); static void KDTreeBuildTagged(CMatrixDouble &xy,CRowInt &tags,const int nx,const int ny,const int normtype,CKDTreeShell &kdt); static void KDTreeCreateRequestBuffer(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf); static int KDTreeQueryKNN(CKDTreeShell &kdt,double &x[],const int k,const bool selfmatch=true); static int KDTreeQueryKNN(CKDTreeShell &kdt,CRowDouble &x,const int k,const bool selfmatch=true); static int KDTreeQueryKNN(CKDTreeShell &kdt,vector &x,const int k,const bool selfmatch=true); static int KDTreeTsQueryKNN(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,double &x[],const int k,const bool selfmatch=true); static int KDTreeTsQueryKNN(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,CRowDouble &x,const int k,const bool selfmatch=true); static int KDTreeTsQueryKNN(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,vector &x,const int k,const bool selfmatch=true); static int KDTreeQueryRNN(CKDTreeShell &kdt,double &x[],const double r,const bool selfmatch=true); static int KDTreeQueryRNN(CKDTreeShell &kdt,CRowDouble &x,const double r,const bool selfmatch=true); static int KDTreeQueryRNN(CKDTreeShell &kdt,vector &x,const double r,const bool selfmatch=true); static int KDTreeQueryRNNU(CKDTreeShell &kdt,double &x[],const double r,const bool selfmatch=true); static int KDTreeQueryRNNU(CKDTreeShell &kdt,CRowDouble &x,const double r,const bool selfmatch=true); static int KDTreeQueryRNNU(CKDTreeShell &kdt,vector &x,const double r,const bool selfmatch=true); static int KDTreeTsQueryRNN(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,double &x[],const double r,const bool selfmatch=true); static int KDTreeTsQueryRNN(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,CRowDouble &x,const double r,const bool selfmatch=true); static int KDTreeTsQueryRNN(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,vector &x,const double r,const bool selfmatch=true); static int KDTreeTsQueryRNNU(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,double &x[],const double r,const bool selfmatch=true); static int KDTreeTsQueryRNNU(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,CRowDouble &x,const double r,const bool selfmatch=true); static int KDTreeTsQueryRNNU(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,vector &x,const double r,const bool selfmatch=true); static int KDTreeQueryAKNN(CKDTreeShell &kdt,double &x[],const int k,const bool selfmatch=true,const double eps=0); static int KDTreeQueryAKNN(CKDTreeShell &kdt,vector &x,const int k,const bool selfmatch=true,const double eps=0); static int KDTreeQueryAKNN(CKDTreeShell &kdt,CRowDouble &x,const int k,const bool selfmatch=true,const double eps=0); static int KDTreeQueryBox(CKDTreeShell &kdt,double &boxmin[],double &boxmax[]); static int KDTreeQueryBox(CKDTreeShell &kdt,vector &boxmin,vector &boxmax); static int KDTreeQueryBox(CKDTreeShell &kdt,CRowDouble &boxmin,CRowDouble &boxmax); static int KDTreeTsQueryBox(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,double &boxmin[],double &boxmax[]); static int KDTreeTsQueryBox(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,vector &boxmin,vector &boxmax); static int KDTreeTsQueryBox(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,CRowDouble &boxmin,CRowDouble &boxmax); static void KDTreeQueryResultsX(CKDTreeShell &kdt,CMatrixDouble &x); static void KDTreeQueryResultsXY(CKDTreeShell &kdt,CMatrixDouble &xy); static void KDTreeQueryResultsTags(CKDTreeShell &kdt,int &tags[]); static void KDTreeQueryResultsTags(CKDTreeShell &kdt,CRowInt &tags); static void KDTreeQueryResultsDistances(CKDTreeShell &kdt,double &r[]); static void KDTreeQueryResultsDistances(CKDTreeShell &kdt,vector &r); static void KDTreeQueryResultsDistances(CKDTreeShell &kdt,CRowDouble &r); static void KDTreeQueryResultsXI(CKDTreeShell &kdt,CMatrixDouble &x); static void KDTreeQueryResultsXYI(CKDTreeShell &kdt,CMatrixDouble &xy); static void KDTreeQueryResultsTagsI(CKDTreeShell &kdt,int &tags[]); static void KDTreeQueryResultsTagsI(CKDTreeShell &kdt,CRowInt &tags); static void KDTreeQueryResultsDistancesI(CKDTreeShell &kdt,double &r[]); static void KDTreeQueryResultsDistancesI(CKDTreeShell &kdt,vector &r); static void KDTreeQueryResultsDistancesI(CKDTreeShell &kdt,CRowDouble &r); //--- functions of package dataanalysis //--- data analysis static void DSOptimalSplit2(double &a[],int &c[],const int n,int &info,double &threshold,double &pal,double &pbl,double &par,double &pbr,double &cve); static void DSOptimalSplit2(CRowDouble &a,CRowInt &c,const int n,int &info,double &threshold,double &pal,double &pbl,double &par,double &pbr,double &cve); static void DSOptimalSplit2Fast(double &a[],int &c[],int &tiesbuf[],int &cntbuf[],double &bufr[],int &bufi[],const int n,const int nc,const double alpha,int &info,double &threshold,double &rms,double &cvrms); static void DSOptimalSplit2Fast(CRowDouble &a,CRowInt &c,CRowInt &tiesbuf,CRowInt &cntbuf,CRowDouble &bufr,CRowInt &bufi,const int n,const int nc,const double alpha,int &info,double &threshold,double &rms,double &cvrms); //--- decision forest static void DFSerialize(CDecisionForestShell &obj,string &s_out); static void DFUnserialize(const string s_in,CDecisionForestShell &obj); static void DFCreateBuffer(CDecisionForestShell &model,CDecisionForestBuffer &buf); static void DFBuilderCreate(CDecisionForestBuilder &s); static void DFBuilderSetDataset(CDecisionForestBuilder &s,CMatrixDouble &xy,int npoints,int nvars,int nclasses); static void DFBuilderSetRndVars(CDecisionForestBuilder &s,int rndvars); static void DFBuilderSetRndVarsRatio(CDecisionForestBuilder &s,double f); static void DFBuilderSetRndVarsAuto(CDecisionForestBuilder &s); static void DFBuilderSetSubsampleRatio(CDecisionForestBuilder &s,double f); static void DFBuilderSetSeed(CDecisionForestBuilder &s,int seedval); static void DFBuilderSetRDFAlgo(CDecisionForestBuilder &s,int algotype); static void DFBuilderSetRDFSplitStrength(CDecisionForestBuilder &s,int splitstrength); static void DFBuilderSetImportanceTrnGini(CDecisionForestBuilder &s); static void DFBuilderSetImportanceOOBGini(CDecisionForestBuilder &s); static void DFBuilderSetImportancePermutation(CDecisionForestBuilder &s); static void DFBuilderSetImportanceNone(CDecisionForestBuilder &s); static double DFBuilderGetProgress(CDecisionForestBuilder &s); static double DFBuilderPeekProgress(CDecisionForestBuilder &s); static void DFBuilderBuildRandomForest(CDecisionForestBuilder &s,int ntrees,CDecisionForestShell &df,CDFReportShell &rep); static double DFBinaryCompression(CDecisionForestShell &df); static void DFProcess(CDecisionForestShell &df,double &x[],double &y[]); static void DFProcessI(CDecisionForestShell &df,double &x[],double &y[]); static double DFProcess0(CDecisionForestShell &model,double &x[]); static double DFProcess0(CDecisionForestShell &model,CRowDouble &x); static int DFClassify(CDecisionForestShell &model,double &x[]); static int DFClassify(CDecisionForestShell &model,CRowDouble &x); static double DFRelClsError(CDecisionForestShell &df,CMatrixDouble &xy,const int npoints); static double DFAvgCE(CDecisionForestShell &df,CMatrixDouble &xy,const int npoints); static double DFRMSError(CDecisionForestShell &df,CMatrixDouble &xy,const int npoints); static double DFAvgError(CDecisionForestShell &df,CMatrixDouble &xy,const int npoints); static double DFAvgRelError(CDecisionForestShell &df,CMatrixDouble &xy,const int npoints); static void DFBuildRandomDecisionForest(CMatrixDouble &xy,const int npoints,const int nvars,const int nclasses,const int ntrees,const double r,int &info,CDecisionForestShell &df,CDFReportShell &rep); static void DFBuildRandomDecisionForestX1(CMatrixDouble &xy,const int npoints,const int nvars,const int nclasses,const int ntrees,int nrndvars,const double r,int &info,CDecisionForestShell &df,CDFReportShell &rep); //--- middle and clusterization static void ClusterizerCreate(CClusterizerState &s); static void ClusterizerSetPoints(CClusterizerState &s,CMatrixDouble &xy,int npoints,int nfeatures,int disttype); static void ClusterizerSetPoints(CClusterizerState &s,CMatrixDouble &xy,int disttype); static void ClusterizerSetDistances(CClusterizerState &s,CMatrixDouble &d,int npoints,bool IsUpper); static void ClusterizerSetDistances(CClusterizerState &s,CMatrixDouble &d,bool IsUpper); static void ClusterizerSetAHCAlgo(CClusterizerState &s,int algo); static void ClusterizerSetKMeansLimits(CClusterizerState &s,int restarts,int maxits); static void ClusterizerSetKMeansInit(CClusterizerState &s,int initalgo); static void ClusterizerSetSeed(CClusterizerState &s,int seed); static void ClusterizerRunAHC(CClusterizerState &s,CAHCReport &rep); static void ClusterizerRunKMeans(CClusterizerState &s,int k,CKmeansReport &rep); static void ClusterizerGetDistances(CMatrixDouble &xy,int npoints,int nfeatures,int disttype,CMatrixDouble &d); static void ClusterizerGetKClusters(CAHCReport &rep,int k,CRowInt &cidx,CRowInt &cz); static void ClusterizerSeparatedByDist(CAHCReport &rep,double r,int &k,CRowInt &cidx,CRowInt &cz); static void ClusterizerSeparatedByCorr(CAHCReport &rep,double r,int &k,CRowInt &cidx,CRowInt &cz); static void KMeansGenerate(CMatrixDouble &xy,const int npoints,const int nvars,const int k,const int restarts,int &info,CMatrixDouble &c,int &xyc[]); //--- Fisher LDA functions static void FisherLDA(CMatrixDouble &xy,const int npoints,const int nvars,const int nclasses,int &info,double &w[]); static void FisherLDA(CMatrixDouble &xy,const int npoints,const int nvars,const int nclasses,int &info,CRowDouble &w); static void FisherLDAN(CMatrixDouble &xy,const int npoints,const int nvars,const int nclasses,int &info,CMatrixDouble &w); //--- linear regression static void LRBuild(CMatrixDouble &xy,const int npoints,const int nvars,int &info,CLinearModelShell &lm,CLRReportShell &ar); static void LRBuildS(CMatrixDouble &xy,double &s[],const int npoints,const int nvars,int &info,CLinearModelShell &lm,CLRReportShell &ar); static void LRBuildS(CMatrixDouble &xy,CRowDouble &s,const int npoints,const int nvars,int &info,CLinearModelShell &lm,CLRReportShell &ar); static void LRBuildZS(CMatrixDouble &xy,double &s[],const int npoints,const int nvars,int &info,CLinearModelShell &lm,CLRReportShell &ar); static void LRBuildZS(CMatrixDouble &xy,CRowDouble &s,const int npoints,const int nvars,int &info,CLinearModelShell &lm,CLRReportShell &ar); static void LRBuildZ(CMatrixDouble &xy,const int npoints,const int nvars,int &info,CLinearModelShell &lm,CLRReportShell &ar); static void LRUnpack(CLinearModelShell &lm,double &v[],int &nvars); static void LRUnpack(CLinearModelShell &lm,CRowDouble &v,int &nvars); static void LRPack(double &v[],const int nvars,CLinearModelShell &lm); static void LRPack(CRowDouble &v,const int nvars,CLinearModelShell &lm); static double LRProcess(CLinearModelShell &lm,double &x[]); static double LRProcess(CLinearModelShell &lm,CRowDouble &x); static double LRRMSError(CLinearModelShell &lm,CMatrixDouble &xy,const int npoints); static double LRAvgError(CLinearModelShell &lm,CMatrixDouble &xy,const int npoints); static double LRAvgRelError(CLinearModelShell &lm,CMatrixDouble &xy,const int npoints); //--- multilayer perceptron static void MLPSerialize(CMultilayerPerceptronShell &obj,string &s_out); static void MLPUnserialize(const string s_in,CMultilayerPerceptronShell &obj); static void MLPCreate0(const int nin,const int nout,CMultilayerPerceptronShell &network); static void MLPCreate1(const int nin,int nhid,const int nout,CMultilayerPerceptronShell &network); static void MLPCreate2(const int nin,const int nhid1,const int nhid2,const int nout,CMultilayerPerceptronShell &network); static void MLPCreateB0(const int nin,const int nout,const double b,const double d,CMultilayerPerceptronShell &network); static void MLPCreateB1(const int nin,int nhid,const int nout,const double b,const double d,CMultilayerPerceptronShell &network); static void MLPCreateB2(const int nin,const int nhid1,const int nhid2,const int nout,const double b,const double d,CMultilayerPerceptronShell &network); static void MLPCreateR0(const int nin,const int nout,double a,const double b,CMultilayerPerceptronShell &network); static void MLPCreateR1(const int nin,int nhid,const int nout,const double a,const double b,CMultilayerPerceptronShell &network); static void MLPCreateR2(const int nin,const int nhid1,const int nhid2,const int nout,const double a,const double b,CMultilayerPerceptronShell &network); static void MLPCreateC0(const int nin,const int nout,CMultilayerPerceptronShell &network); static void MLPCreateC1(const int nin,int nhid,const int nout,CMultilayerPerceptronShell &network); static void MLPCreateC2(const int nin,const int nhid1,const int nhid2,const int nout,CMultilayerPerceptronShell &network); static void MLPRandomize(CMultilayerPerceptronShell &network); static void MLPRandomizeFull(CMultilayerPerceptronShell &network); static void MLPInitPreprocessor(CMultilayerPerceptronShell &network,CMatrixDouble &xy,int ssize); static void MLPProperties(CMultilayerPerceptronShell &network,int &nin,int &nout,int &wcount); static int MLPGetInputsCount(CMultilayerPerceptronShell &network); static int MLPGetOutputsCount(CMultilayerPerceptronShell &network); static int MLPGetWeightsCount(CMultilayerPerceptronShell &network); static bool MLPIsSoftMax(CMultilayerPerceptronShell &network); static int MLPGetLayersCount(CMultilayerPerceptronShell &network); static int MLPGetLayerSize(CMultilayerPerceptronShell &network,const int k); static void MLPGetInputScaling(CMultilayerPerceptronShell &network,const int i,double &mean,double &sigma); static void MLPGetOutputScaling(CMultilayerPerceptronShell &network,const int i,double &mean,double &sigma); static void MLPGetNeuronInfo(CMultilayerPerceptronShell &network,const int k,const int i,int &fkind,double &threshold); static double MLPGetWeight(CMultilayerPerceptronShell &network,const int k0,const int i0,const int k1,const int i1); static void MLPSetInputScaling(CMultilayerPerceptronShell &network,const int i,const double mean,const double sigma); static void MLPSetOutputScaling(CMultilayerPerceptronShell &network,const int i,const double mean,const double sigma); static void MLPSetNeuronInfo(CMultilayerPerceptronShell &network,const int k,const int i,int fkind,double threshold); static void MLPSetWeight(CMultilayerPerceptronShell &network,const int k0,const int i0,const int k1,const int i1,const double w); static void MLPActivationFunction(const double net,const int k,double &f,double &df,double &d2f); static void MLPProcess(CMultilayerPerceptronShell &network,double &x[],double &y[]); static void MLPProcessI(CMultilayerPerceptronShell &network,double &x[],double &y[]); static double MLPError(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int ssize); static double MLPErrorSparse(CMultilayerPerceptronShell &network,CSparseMatrix &xy,int npoints); static double MLPErrorN(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int ssize); static int MLPClsError(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int ssize); static double MLPRelClsError(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int npoints); static double MLPRelClsErrorSparse(CMultilayerPerceptronShell &network,CSparseMatrix &xy,int npoints); static double MLPAvgCE(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int npoints); static double MLPAvgCESparse(CMultilayerPerceptronShell &network,CSparseMatrix &xy,int npoints); static double MLPRMSError(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int npoints); static double MLPRMSErrorSparse(CMultilayerPerceptronShell &network,CSparseMatrix &xy,int npoints); static double MLPAvgError(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int npoints); static double MLPAvgErrorSparse(CMultilayerPerceptronShell &network,CSparseMatrix &xy,int npoints); static double MLPAvgRelError(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int npoints); static double MLPAvgRelErrorSparse(CMultilayerPerceptronShell &network,CSparseMatrix &xy,int npoints); static void MLPGrad(CMultilayerPerceptronShell &network,double &x[],double &desiredy[],double &e,double &grad[]); static void MLPGradN(CMultilayerPerceptronShell &network,double &x[],double &desiredy[],double &e,double &grad[]); static void MLPGradBatch(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int ssize,double &e,double &grad[]); static void MLPGradBatch(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int ssize,double &e,CRowDouble &grad); static void MLPGradBatchSparse(CMultilayerPerceptronShell &network,CSparseMatrix &xy,int ssize,double &e,CRowDouble &grad); static void MLPGradBatchSubset(CMultilayerPerceptronShell &network,CMatrixDouble &xy,int setsize,CRowInt &idx,int subsetsize,double &e,CRowDouble &grad); static void MLPGradBatchSparseSubset(CMultilayerPerceptronShell &network,CSparseMatrix &xy,int setsize,CRowInt &idx,int subsetsize,double &e,CRowDouble &grad); static void MLPGradNBatch(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int ssize,double &e,double &grad[]); static void MLPGradNBatch(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int ssize,double &e,CRowDouble &grad); static void MLPHessianNBatch(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int ssize,double &e,double &grad[],CMatrixDouble &h); static void MLPHessianNBatch(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int ssize,double &e,CRowDouble &grad,CMatrixDouble &h); static void MLPHessianBatch(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int ssize,double &e,double &grad[],CMatrixDouble &h); static void MLPHessianBatch(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int ssize,double &e,CRowDouble &grad,CMatrixDouble &h); static void MLPAllErrorsSubset(CMultilayerPerceptronShell &network,CMatrixDouble &xy,int setsize,CRowInt &subset,int subsetsize,CModelErrors &rep); static void MLPAllErrorsSparseSubset(CMultilayerPerceptronShell &network,CSparseMatrix &xy,int setsize,CRowInt &subset,int subsetsize,CModelErrors &rep); static double MLPErrorSubset(CMultilayerPerceptronShell &network,CMatrixDouble &xy,int setsize,CRowInt &subset,int subsetsize); static double MLPErrorSparseSubset(CMultilayerPerceptronShell &network,CSparseMatrix &xy,int setsize,CRowInt &subset,int subsetsize); //--- logit model functions static void MNLTrainH(CMatrixDouble &xy,const int npoints,const int nvars,const int nclasses,int &info,CLogitModelShell &lm,CMNLReportShell &rep); static void MNLProcess(CLogitModelShell &lm,double &x[],double &y[]); static void MNLProcess(CLogitModelShell &lm,CRowDouble &x,CRowDouble &y); static void MNLProcessI(CLogitModelShell &lm,double &x[],double &y[]); static void MNLProcessI(CLogitModelShell &lm,CRowDouble &x,CRowDouble &y); static void MNLUnpack(CLogitModelShell &lm,CMatrixDouble &a,int &nvars,int &nclasses); static void MNLPack(CMatrixDouble &a,const int nvars,const int nclasses,CLogitModelShell &lm); static double MNLAvgCE(CLogitModelShell &lm,CMatrixDouble &xy,const int npoints); static double MNLRelClsError(CLogitModelShell &lm,CMatrixDouble &xy,const int npoints); static double MNLRMSError(CLogitModelShell &lm,CMatrixDouble &xy,const int npoints); static double MNLAvgError(CLogitModelShell &lm,CMatrixDouble &xy,const int npoints); static double MNLAvgRelError(CLogitModelShell &lm,CMatrixDouble &xy,const int ssize); static int MNLClsError(CLogitModelShell &lm,CMatrixDouble &xy,const int npoints); //--- Markov chains static void MCPDCreate(const int n,CMCPDStateShell &s); static void MCPDCreateEntry(const int n,const int entrystate,CMCPDStateShell &s); static void MCPDCreateExit(const int n,const int exitstate,CMCPDStateShell &s); static void MCPDCreateEntryExit(const int n,const int entrystate,const int exitstate,CMCPDStateShell &s); static void MCPDAddTrack(CMCPDStateShell &s,CMatrixDouble &xy,const int k); static void MCPDAddTrack(CMCPDStateShell &s,CMatrixDouble &xy); static void MCPDSetEC(CMCPDStateShell &s,CMatrixDouble &ec); static void MCPDAddEC(CMCPDStateShell &s,const int i,const int j,const double c); static void MCPDSetBC(CMCPDStateShell &s,CMatrixDouble &bndl,CMatrixDouble &bndu); static void MCPDAddBC(CMCPDStateShell &s,const int i,const int j,const double bndl,const double bndu); static void MCPDSetLC(CMCPDStateShell &s,CMatrixDouble &c,int &ct[],const int k); static void MCPDSetLC(CMCPDStateShell &s,CMatrixDouble &c,CRowInt &ct,const int k); static void MCPDSetLC(CMCPDStateShell &s,CMatrixDouble &c,int &ct[]); static void MCPDSetLC(CMCPDStateShell &s,CMatrixDouble &c,CRowInt &ct); static void MCPDSetTikhonovRegularizer(CMCPDStateShell &s,const double v); static void MCPDSetPrior(CMCPDStateShell &s,CMatrixDouble &pp); static void MCPDSetPredictionWeights(CMCPDStateShell &s,double &pw[]); static void MCPDSetPredictionWeights(CMCPDStateShell &s,CRowDouble &pw); static void MCPDSolve(CMCPDStateShell &s); static void MCPDResults(CMCPDStateShell &s,CMatrixDouble &p,CMCPDReportShell &rep); //--- training neural networks static void MLPTrainLM(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int npoints,const double decay,const int restarts,int &info,CMLPReportShell &rep); static void MLPTrainLBFGS(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int npoints,const double decay,const int restarts,const double wstep,int maxits,int &info,CMLPReportShell &rep); static void MLPTrainES(CMultilayerPerceptronShell &network,CMatrixDouble &trnxy,const int trnsize,CMatrixDouble &valxy,const int valsize,const double decay,const int restarts,int &info,CMLPReportShell &rep); static void MLPKFoldCVLBFGS(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int npoints,const double decay,const int restarts,const double wstep,const int maxits,const int foldscount,int &info,CMLPReportShell &rep,CMLPCVReportShell &cvrep); static void MLPKFoldCVLM(CMultilayerPerceptronShell &network,CMatrixDouble &xy,const int npoints,const double decay,const int restarts,const int foldscount,int &info,CMLPReportShell &rep,CMLPCVReportShell &cvrep); static void MLPCreateTrainer(int nin,int nout,CMLPTrainer &s); static void MLPCreateTrainerCls(int nin,int nclasses,CMLPTrainer &s); static void MLPSetDataset(CMLPTrainer &s,CMatrixDouble &xy,int npoints); static void MLPSetSparseDataset(CMLPTrainer &s,CSparseMatrix &xy,int npoints); static void MLPSetDecay(CMLPTrainer &s,double decay); static void MLPSetCond(CMLPTrainer &s,double wstep,int maxits); static void MLPSetAlgoBatch(CMLPTrainer &s); static void MLPTrainNetwork(CMLPTrainer &s,CMultilayerPerceptronShell &network,int nrestarts,CMLPReportShell &rep); static void MLPStartTraining(CMLPTrainer &s,CMultilayerPerceptronShell &network,bool randomstart); static bool MLPContinueTraining(CMLPTrainer &s,CMultilayerPerceptronShell &network); //--- neural networks ensemble functions static void MLPECreate0(const int nin,const int nout,const int ensemblesize,CMLPEnsembleShell &ensemble); static void MLPECreate1(const int nin,int nhid,const int nout,const int ensemblesize,CMLPEnsembleShell &ensemble); static void MLPECreate2(const int nin,const int nhid1,const int nhid2,const int nout,const int ensemblesize,CMLPEnsembleShell &ensemble); static void MLPECreateB0(const int nin,const int nout,const double b,const double d,const int ensemblesize,CMLPEnsembleShell &ensemble); static void MLPECreateB1(const int nin,int nhid,const int nout,const double b,const double d,const int ensemblesize,CMLPEnsembleShell &ensemble); static void MLPECreateB2(const int nin,const int nhid1,const int nhid2,const int nout,const double b,const double d,const int ensemblesize,CMLPEnsembleShell &ensemble); static void MLPECreateR0(const int nin,const int nout,const double a,const double b,const int ensemblesize,CMLPEnsembleShell &ensemble); static void MLPECreateR1(const int nin,int nhid,const int nout,const double a,const double b,const int ensemblesize,CMLPEnsembleShell &ensemble); static void MLPECreateR2(const int nin,const int nhid1,const int nhid2,const int nout,const double a,const double b,const int ensemblesize,CMLPEnsembleShell &ensemble); static void MLPECreateC0(const int nin,const int nout,const int ensemblesize,CMLPEnsembleShell &ensemble); static void MLPECreateC1(const int nin,int nhid,const int nout,const int ensemblesize,CMLPEnsembleShell &ensemble); static void MLPECreateC2(const int nin,const int nhid1,const int nhid2,const int nout,const int ensemblesize,CMLPEnsembleShell &ensemble); static void MLPECreateFromNetwork(CMultilayerPerceptronShell &network,const int ensemblesize,CMLPEnsembleShell &ensemble); static void MLPERandomize(CMLPEnsembleShell &ensemble); static void MLPEProperties(CMLPEnsembleShell &ensemble,int &nin,int &nout); static bool MLPEIsSoftMax(CMLPEnsembleShell &ensemble); static void MLPEProcess(CMLPEnsembleShell &ensemble,double &x[],double &y[]); static void MLPEProcessI(CMLPEnsembleShell &ensemble,double &x[],double &y[]); static double MLPERelClsError(CMLPEnsembleShell &ensemble,CMatrixDouble &xy,const int npoints); static double MLPEAvgCE(CMLPEnsembleShell &ensemble,CMatrixDouble &xy,const int npoints); static double MLPERMSError(CMLPEnsembleShell &ensemble,CMatrixDouble &xy,const int npoints); static double MLPEAvgError(CMLPEnsembleShell &ensemble,CMatrixDouble &xy,const int npoints); static double MLPEAvgRelError(CMLPEnsembleShell &ensemble,CMatrixDouble &xy,const int npoints); static void MLPEBaggingLM(CMLPEnsembleShell &ensemble,CMatrixDouble &xy,const int npoints,const double decay,const int restarts,int &info,CMLPReportShell &rep,CMLPCVReportShell &ooberrors); static void MLPEBaggingLBFGS(CMLPEnsembleShell &ensemble,CMatrixDouble &xy,const int npoints,const double decay,const int restarts,const double wstep,const int maxits,int &info,CMLPReportShell &rep,CMLPCVReportShell &ooberrors); static void MLPETrainES(CMLPEnsembleShell &ensemble,CMatrixDouble &xy,const int npoints,const double decay,const int restarts,int &info,CMLPReportShell &rep); static void MLPTrainEnsembleES(CMLPTrainer &s,CMLPEnsembleShell &ensemble,int nrestarts,CMLPReportShell &rep); //--- principal components analysis static void PCABuildBasis(CMatrixDouble &x,const int npoints,const int nvars,int &info,double &s2[],CMatrixDouble &v); static void PCABuildBasis(CMatrixDouble &x,const int npoints,const int nvars,int &info,CRowDouble &s2,CMatrixDouble &v); static void PCATruncatedSubspace(CMatrixDouble &x,int npoints,int nvars,int nneeded,double eps,int maxits,CRowDouble &s2,CMatrixDouble &v); static void PCATruncatedSubspaceSparse(CSparseMatrix &x,int npoints,int nvars,int nneeded,double eps,int maxits,CRowDouble &s2,CMatrixDouble &v); //--- functions of package diffequations static void ODESolverRKCK(double &y[],const int n,double &x[],const int m,const double eps,const double h,CODESolverStateShell &state); static void ODESolverRKCK(double &y[],double &x[],const double eps,const double h,CODESolverStateShell &state); static bool ODESolverIteration(CODESolverStateShell &state); static void ODESolverSolve(CODESolverStateShell &state,CNDimensional_ODE_RP &diff,CObject &obj); static void ODESolverResults(CODESolverStateShell &state,int &m,double &xtbl[],CMatrixDouble &ytbl,CODESolverReportShell &rep); //--- filters static void FilterSMA(CRowDouble &x,int n,int k); static void FilterSMA(CRowDouble &x,int k); static void FilterEMA(CRowDouble &x,int n,double alpha); static void FilterEMA(CRowDouble &x,double alpha); static void FilterLRMA(CRowDouble &x,int n,int k); static void FilterLRMA(CRowDouble &x,int k); //--- SSA models static void SSACreate(CSSAModel &s); static void SSASetWindow(CSSAModel &s,int windowwidth); static void SSASetSeed(CSSAModel &s,int seed); static void SSASetPowerUpLength(CSSAModel &s,int pwlen); static void SSASetMemoryLimit(CSSAModel &s,int memlimit); static void SSAAddSequence(CSSAModel &s,CRowDouble &x,int n); static void SSAAddSequence(CSSAModel &s,CRowDouble &x); static void SSAAppendPointAndUpdate(CSSAModel &s,double x,double updateits); static void SSAAppendSequenceAndUpdate(CSSAModel &s,CRowDouble &x,int nticks,double updateits); static void SSAAppendSequenceAndUpdate(CSSAModel &s,CRowDouble &x,double updateits); static void SSASetAlgoPrecomputed(CSSAModel &s,CMatrixDouble &a,int windowwidth,int nbasis); static void SSASetAlgoPrecomputed(CSSAModel &s,CMatrixDouble &a); static void SSASetAlgoTopKDirect(CSSAModel &s,int topk); static void SSASetAlgoTopKRealtime(CSSAModel &s,int topk); static void SSAClearData(CSSAModel &s); static void SSAGetBasis(CSSAModel &s,CMatrixDouble &a,CRowDouble &sv,int &windowwidth,int &nbasis); static void SSAGetLRR(CSSAModel &s,CRowDouble &a,int &windowwidth); static void SSAAnalyzeLastWindow(CSSAModel &s,CRowDouble &trend,CRowDouble &noise,int &nticks); static void SSAAnalyzeLast(CSSAModel &s,int nticks,CRowDouble &trend,CRowDouble &noise); static void SSAAnalyzeSequence(CSSAModel &s,CRowDouble &data,int nticks,CRowDouble &trend,CRowDouble &noise); static void SSAAnalyzeSequence(CSSAModel &s,CRowDouble &data,CRowDouble &trend,CRowDouble &noise); static void SSAForecastLast(CSSAModel &s,int nticks,CRowDouble &trend); static void SSAForecastSequence(CSSAModel &s,CRowDouble &data,int datalen,int forecastlen,bool applysmoothing,CRowDouble &trend); static void SSAForecastSequence(CSSAModel &s,CRowDouble &data,int forecastlen,CRowDouble &trend); static void SSAForecastAvgLast(CSSAModel &s,int m,int nticks,CRowDouble &trend); static void SSAForecastAvgSequence(CSSAModel &s,CRowDouble &data,int datalen,int m,int forecastlen,bool applysmoothing,CRowDouble &trend); static void SSAForecastAvgSequence(CSSAModel &s,CRowDouble &data,int m,int forecastlen,CRowDouble &trend); //--- KNN models static void KNNSerialize(CKNNModel &obj,string &s_out); static void KNNUnserialize(const string s_in,CKNNModel &obj); static void KNNCreateBuffer(CKNNModel &model,CKNNBuffer &buf); static void KNNBuilderCreate(CKNNBuilder &s); static void KNNBuilderSetDatasetReg(CKNNBuilder &s,CMatrixDouble &xy,int npoints,int nvars,int nout); static void KNNBuilderSetDatasetCLS(CKNNBuilder &s,CMatrixDouble &xy,int npoints,int nvars,int nclasses); static void KNNBuilderSetNorm(CKNNBuilder &s,int nrmtype); static void KNNBuilderBuildKNNModel(CKNNBuilder &s,int k,double eps,CKNNModel &model,CKNNReport &rep); static void KNNRewriteKEps(CKNNModel &model,int k,double eps); static void KNNProcess(CKNNModel &model,CRowDouble &x,CRowDouble &y); static double KNNProcess0(CKNNModel &model,CRowDouble &x); static int KNNClassify(CKNNModel &model,CRowDouble &x); static void KNNProcessI(CKNNModel &model,CRowDouble &x,CRowDouble &y); static void KNNTsProcess(CKNNModel &model,CKNNBuffer &buf,CRowDouble &x,CRowDouble &y); static double KNNRelClsError(CKNNModel &model,CMatrixDouble &xy,int npoints); static double KNNAvgCE(CKNNModel &model,CMatrixDouble &xy,int npoints); static double KNNRMSError(CKNNModel &model,CMatrixDouble &xy,int npoints); static double KNNAvgError(CKNNModel &model,CMatrixDouble &xy,int npoints); static double KNNAvgRelError(CKNNModel &model,CMatrixDouble &xy,int npoints); static void KNNAllErrors(CKNNModel &model,CMatrixDouble &xy,int npoints,CKNNReport &rep); //--- functions of package fasttransforms //--- fast Fourier transform static void FFTC1D(complex &a[],const int n); static void FFTC1D(complex &a[]); static void FFTC1DInv(complex &a[],const int n); static void FFTC1DInv(complex &a[]); static void FFTR1D(double &a[],const int n,complex &f[]); static void FFTR1D(double &a[],complex &f[]); static void FFTR1DInv(complex &f[],const int n,double &a[]); static void FFTR1DInv(complex &f[],double &a[]); static void FFTC1D(CRowComplex &a,const int n); static void FFTC1D(CRowComplex &a); static void FFTC1DInv(CRowComplex &a,const int n); static void FFTC1DInv(CRowComplex &a); static void FFTR1D(CRowDouble &a,const int n,CRowComplex &f); static void FFTR1D(CRowDouble &a,CRowComplex &f); static void FFTR1DInv(CRowComplex &f,const int n,CRowDouble &a); static void FFTR1DInv(CRowComplex &f,CRowDouble &a); //--- convolution static void ConvC1D(complex &a[],const int m,complex &b[],const int n,complex &r[]); static void ConvC1DInv(complex &a[],const int m,complex &b[],const int n,complex &r[]); static void ConvC1DCircular(complex &s[],const int m,complex &r[],const int n,complex &c[]); static void ConvC1DCircularInv(complex &a[],const int m,complex &b[],const int n,complex &r[]); static void ConvR1D(double &a[],const int m,double &b[],const int n,double &r[]); static void ConvR1DInv(double &a[],const int m,double &b[],const int n,double &r[]); static void ConvR1DCircular(double &s[],const int m,double &r[],const int n,double &c[]); static void ConvR1DCircularInv(double &a[],const int m,double &b[],const int n,double &r[]); static void CorrC1D(complex &signal[],const int n,complex &pattern[],const int m,complex &r[]); static void CorrC1DCircular(complex &signal[],const int m,complex &pattern[],const int n,complex &c[]); static void CorrR1D(double &signal[],const int n,double &pattern[],const int m,double &r[]); static void CorrR1DCircular(double &signal[],const int m,double &pattern[],const int n,double &c[]); //--- fast Hartley transform static void FHTR1D(double &a[],const int n); static void FHTR1DInv(double &a[],const int n); //--- functions of package integration //--- Gauss quadrature formula static void GQGenerateRec(double &alpha[],double &beta[],const double mu0,const int n,int &info,double &x[],double &w[]); static void GQGenerateGaussLobattoRec(double &alpha[],double &beta[],const double mu0,const double a,const double b,const int n,int &info,double &x[],double &w[]); static void GQGenerateGaussRadauRec(double &alpha[],double &beta[],const double mu0,const double a,const int n,int &info,double &x[],double &w[]); static void GQGenerateGaussLegendre(const int n,int &info,double &x[],double &w[]); static void GQGenerateGaussJacobi(const int n,const double alpha,const double beta,int &info,double &x[],double &w[]); static void GQGenerateGaussLaguerre(const int n,const double alpha,int &info,double &x[],double &w[]); static void GQGenerateGaussHermite(const int n,int &info,double &x[],double &w[]); //--- Gauss-Kronrod quadrature formula static void GKQGenerateRec(double &alpha[],double &beta[],const double mu0,const int n,int &info,double &x[],double &wkronrod[],double &wgauss[]); static void GKQGenerateGaussLegendre(const int n,int &info,double &x[],double &wkronrod[],double &wgauss[]); static void GKQGenerateGaussJacobi(const int n,const double alpha,const double beta,int &info,double &x[],double &wkronrod[],double &wgauss[]); static void GKQLegendreCalc(const int n,int &info,double &x[],double &wkronrod[],double &wgauss[]); static void GKQLegendreTbl(const int n,double &x[],double &wkronrod[],double &wgauss[],double &eps); //--- auto Gauss-Kronrod static void AutoGKSmooth(const double a,const double b,CAutoGKStateShell &state); static void AutoGKSmoothW(const double a,const double b,double xwidth,CAutoGKStateShell &state); static void AutoGKSingular(const double a,const double b,const double alpha,const double beta,CAutoGKStateShell &state); static bool AutoGKIteration(CAutoGKStateShell &state); static void AutoGKIntegrate(CAutoGKStateShell &state,CIntegrator1_Func &func,CObject &obj); static void AutoGKResults(CAutoGKStateShell &state,double &v,CAutoGKReportShell &rep); //--- functions of package interpolation //--- inverse distance weighting interpolation static void IDWSerialize(CIDWModelShell &obj,string &s_out); static void IDWUnserialize(string s_in,CIDWModelShell &obj); static void IDWCreateCalcBuffer(CIDWModelShell &s,CIDWCalcBuffer &buf); static void IDWBuilderCreate(int nx,int ny,CIDWBuilder &state); static void IDWBuilderSetNLayers(CIDWBuilder &state,int nlayers); static void IDWBuilderSetPoints(CIDWBuilder &state,CMatrixDouble &xy,int n); static void IDWBuilderSetPoints(CIDWBuilder &state,CMatrixDouble &xy); static void IDWBuilderSetAlgoMSTAB(CIDWBuilder &state,double srad); static void IDWBuilderSetAlgoTextBookShepard(CIDWBuilder &state,double p); static void IDWBuilderSetAlgoTextBookModShepard(CIDWBuilder &state,double r); static void IDWBuilderSetUserTerm(CIDWBuilder &state,double v); static void IDWBuilderSetConstTerm(CIDWBuilder &state); static void IDWBuilderSetZeroTerm(CIDWBuilder &state); static double IDWCalc1(CIDWModelShell &s,double x0); static double IDWCalc2(CIDWModelShell &s,double x0,double x1); static double IDWCalc3(CIDWModelShell &s,double x0,double x1,double x2); static void IDWCalc(CIDWModelShell &s,CRowDouble &x,CRowDouble &y); static void IDWCalcBuf(CIDWModelShell &s,CRowDouble &x,CRowDouble &y); static void IDWTsCalcBuf(CIDWModelShell &s,CIDWCalcBuffer &buf,CRowDouble &x,CRowDouble &y); static void IDWFit(CIDWBuilder &state,CIDWModelShell &model,CIDWReport &rep); //--- rational interpolation static double BarycentricCalc(CBarycentricInterpolantShell &b,const double t); static void BarycentricDiff1(CBarycentricInterpolantShell &b,const double t,double &f,double &df); static void BarycentricDiff2(CBarycentricInterpolantShell &b,const double t,double &f,double &df,double &d2f); static void BarycentricLinTransX(CBarycentricInterpolantShell &b,const double ca,const double cb); static void BarycentricLinTransY(CBarycentricInterpolantShell &b,const double ca,const double cb); static void BarycentricUnpack(CBarycentricInterpolantShell &b,int &n,double &x[],double &y[],double &w[]); static void BarycentricBuildXYW(double &x[],double &y[],double &w[],const int n,CBarycentricInterpolantShell &b); static void BarycentricBuildFloaterHormann(double &x[],double &y[],const int n,const int d,CBarycentricInterpolantShell &b); //--- polynomial interpolant static void PolynomialBar2Cheb(CBarycentricInterpolantShell &p,const double a,const double b,double &t[]); static void PolynomialCheb2Bar(double &t[],const int n,const double a,const double b,CBarycentricInterpolantShell &p); static void PolynomialCheb2Bar(double &t[],const double a,const double b,CBarycentricInterpolantShell &p); static void PolynomialBar2Pow(CBarycentricInterpolantShell &p,const double c,const double s,double &a[]); static void PolynomialBar2Pow(CBarycentricInterpolantShell &p,double &a[]); static void PolynomialPow2Bar(double &a[],const int n,const double c,const double s,CBarycentricInterpolantShell &p); static void PolynomialPow2Bar(double &a[],CBarycentricInterpolantShell &p); static void PolynomialBuild(double &x[],double &y[],const int n,CBarycentricInterpolantShell &p); static void PolynomialBuild(double &x[],double &y[],CBarycentricInterpolantShell &p); static void PolynomialBuildEqDist(const double a,const double b,double &y[],const int n,CBarycentricInterpolantShell &p); static void PolynomialBuildEqDist(const double a,const double b,double &y[],CBarycentricInterpolantShell &p); static void PolynomialBuildCheb1(const double a,const double b,double &y[],const int n,CBarycentricInterpolantShell &p); static void PolynomialBuildCheb1(const double a,const double b,double &y[],CBarycentricInterpolantShell &p); static void PolynomialBuildCheb2(const double a,const double b,double &y[],const int n,CBarycentricInterpolantShell &p); static void PolynomialBuildCheb2(const double a,const double b,double &y[],CBarycentricInterpolantShell &p); static double PolynomialCalcEqDist(const double a,const double b,double &f[],const int n,const double t); static double PolynomialCalcEqDist(const double a,const double b,double &f[],const double t); static double PolynomialCalcCheb1(const double a,const double b,double &f[],const int n,const double t); static double PolynomialCalcCheb1(const double a,const double b,double &f[],const double t); static double PolynomialCalcCheb2(const double a,const double b,double &f[],const int n,const double t); static double PolynomialCalcCheb2(const double a,const double b,double &f[],const double t); //--- 1-dimensional spline interpolation static void Spline1DBuildLinear(double &x[],double &y[],const int n,CSpline1DInterpolantShell &c); static void Spline1DBuildLinear(double &x[],double &y[],CSpline1DInterpolantShell &c); static void Spline1DBuildCubic(double &x[],double &y[],const int n,const int boundltype,const double boundl,const int boundrtype,const double boundr,CSpline1DInterpolantShell &c); static void Spline1DBuildCubic(double &x[],double &y[],CSpline1DInterpolantShell &c); static void Spline1DGridDiffCubic(double &x[],double &y[],const int n,const int boundltype,const double boundl,const int boundrtype,const double boundr,double &d[]); static void Spline1DGridDiffCubic(double &x[],double &y[],double &d[]); static void Spline1DGridDiff2Cubic(double &x[],double &y[],const int n,const int boundltype,const double boundl,const int boundrtype,const double boundr,double &d1[],double &d2[]); static void Spline1DGridDiff2Cubic(double &x[],double &y[],double &d1[],double &d2[]); static void Spline1DConvCubic(double &x[],double &y[],const int n,const int boundltype,const double boundl,const int boundrtype,const double boundr,double &x2[],int n2,double &y2[]); static void Spline1DConvCubic(double &x[],double &y[],double &x2[],double &y2[]); static void Spline1DConvDiffCubic(double &x[],double &y[],const int n,const int boundltype,const double boundl,const int boundrtype,const double boundr,double &x2[],int n2,double &y2[],double &d2[]); static void Spline1DConvDiffCubic(double &x[],double &y[],double &x2[],double &y2[],double &d2[]); static void Spline1DConvDiff2Cubic(double &x[],double &y[],const int n,const int boundltype,const double boundl,const int boundrtype,const double boundr,double &x2[],const int n2,double &y2[],double &d2[],double &dd2[]); static void Spline1DConvDiff2Cubic(double &x[],double &y[],double &x2[],double &y2[],double &d2[],double &dd2[]); static void Spline1DBuildCatmullRom(double &x[],double &y[],const int n,const int boundtype,const double tension,CSpline1DInterpolantShell &c); static void Spline1DBuildCatmullRom(double &x[],double &y[],CSpline1DInterpolantShell &c); static void Spline1DBuildHermite(double &x[],double &y[],double &d[],const int n,CSpline1DInterpolantShell &c); static void Spline1DBuildHermite(double &x[],double &y[],double &d[],CSpline1DInterpolantShell &c); static void Spline1DBuildAkima(double &x[],double &y[],const int n,CSpline1DInterpolantShell &c); static void Spline1DBuildAkima(double &x[],double &y[],CSpline1DInterpolantShell &c); static double Spline1DCalc(CSpline1DInterpolantShell &c,const double x); static void Spline1DDiff(CSpline1DInterpolantShell &c,const double x,double &s,double &ds,double &d2s); static void Spline1DUnpack(CSpline1DInterpolantShell &c,int &n,CMatrixDouble &tbl); static void Spline1DLinTransX(CSpline1DInterpolantShell &c,const double a,const double b); static void Spline1DLinTransY(CSpline1DInterpolantShell &c,const double a,const double b); static double Spline1DIntegrate(CSpline1DInterpolantShell &c,const double x); static void Spline1DFit(double &x[],double &y[],int n,int m,double lambdans,CSpline1DInterpolantShell &s,CSpline1DFitReportShell &rep); static void Spline1DFit(double &x[],double &y[],int m,double lambdans,CSpline1DInterpolantShell &s,CSpline1DFitReportShell &rep); static void Spline1DBuildMonotone(double &x[],double &y[],int n,CSpline1DInterpolantShell &c); static void Spline1DBuildMonotone(double &x[],double &y[],CSpline1DInterpolantShell &c); static void Spline1DBuildMonotone(CRowDouble &x,CRowDouble &y,CSpline1DInterpolantShell &c); //--- least squares fitting static void PolynomialFit(double &x[],double &y[],const int n,const int m,int &info,CBarycentricInterpolantShell &p,CPolynomialFitReportShell &rep); static void PolynomialFit(double &x[],double &y[],const int m,int &info,CBarycentricInterpolantShell &p,CPolynomialFitReportShell &rep); static void PolynomialFitWC(double &x[],double &y[],double &w[],const int n,double &xc[],double &yc[],int &dc[],const int k,const int m,int &info,CBarycentricInterpolantShell &p,CPolynomialFitReportShell &rep); static void PolynomialFitWC(double &x[],double &y[],double &w[],double &xc[],double &yc[],int &dc[],const int m,int &info,CBarycentricInterpolantShell &p,CPolynomialFitReportShell &rep); static void BarycentricFitFloaterHormannWC(double &x[],double &y[],double &w[],const int n,double &xc[],double &yc[],int &dc[],const int k,const int m,int &info,CBarycentricInterpolantShell &b,CBarycentricFitReportShell &rep); static void BarycentricFitFloaterHormann(double &x[],double &y[],const int n,const int m,int &info,CBarycentricInterpolantShell &b,CBarycentricFitReportShell &rep); static void Spline1DFitPenalized(double &x[],double &y[],const int n,const int m,const double rho,int &info,CSpline1DInterpolantShell &s,CSpline1DFitReportShell &rep); static void Spline1DFitPenalized(double &x[],double &y[],const int m,const double rho,int &info,CSpline1DInterpolantShell &s,CSpline1DFitReportShell &rep); static void Spline1DFitPenalizedW(double &x[],double &y[],double &w[],const int n,const int m,const double rho,int &info,CSpline1DInterpolantShell &s,CSpline1DFitReportShell &rep); static void Spline1DFitPenalizedW(double &x[],double &y[],double &w[],const int m,const double rho,int &info,CSpline1DInterpolantShell &s,CSpline1DFitReportShell &rep); static void Spline1DFitCubicWC(double &x[],double &y[],double &w[],const int n,double &xc[],double &yc[],int &dc[],const int k,const int m,int &info,CSpline1DInterpolantShell &s,CSpline1DFitReportShell &rep); static void Spline1DFitCubicWC(double &x[],double &y[],double &w[],double &xc[],double &yc[],int &dc[],const int m,int &info,CSpline1DInterpolantShell &s,CSpline1DFitReportShell &rep); static void Spline1DFitHermiteWC(double &x[],double &y[],double &w[],const int n,double &xc[],double &yc[],int &dc[],const int k,const int m,int &info,CSpline1DInterpolantShell &s,CSpline1DFitReportShell &rep); static void Spline1DFitHermiteWC(double &x[],double &y[],double &w[],double &xc[],double &yc[],int &dc[],const int m,int &info,CSpline1DInterpolantShell &s,CSpline1DFitReportShell &rep); static void Spline1DFitCubic(double &x[],double &y[],const int n,const int m,int &info,CSpline1DInterpolantShell &s,CSpline1DFitReportShell &rep); static void Spline1DFitCubic(double &x[],double &y[],const int m,int &info,CSpline1DInterpolantShell &s,CSpline1DFitReportShell &rep); static void Spline1DFitHermite(double &x[],double &y[],const int n,const int m,int &info,CSpline1DInterpolantShell &s,CSpline1DFitReportShell &rep); static void Spline1DFitHermite(double &x[],double &y[],const int m,int &info,CSpline1DInterpolantShell &s,CSpline1DFitReportShell &rep); static void LSFitLinearW(double &y[],double &w[],CMatrixDouble &fmatrix,const int n,const int m,int &info,double &c[],CLSFitReportShell &rep); static void LSFitLinearW(double &y[],double &w[],CMatrixDouble &fmatrix,int &info,double &c[],CLSFitReportShell &rep); static void LSFitLinearWC(double &y[],double &w[],CMatrixDouble &fmatrix,CMatrixDouble &cmatrix,const int n,const int m,const int k,int &info,double &c[],CLSFitReportShell &rep); static void LSFitLinearWC(double &y[],double &w[],CMatrixDouble &fmatrix,CMatrixDouble &cmatrix,int &info,double &c[],CLSFitReportShell &rep); static void LSFitLinear(double &y[],CMatrixDouble &fmatrix,const int n,const int m,int &info,double &c[],CLSFitReportShell &rep); static void LSFitLinear(double &y[],CMatrixDouble &fmatrix,int &info,double &c[],CLSFitReportShell &rep); static void LSFitLinearC(double &y[],CMatrixDouble &fmatrix,CMatrixDouble &cmatrix,const int n,const int m,const int k,int &info,double &c[],CLSFitReportShell &rep); static void LSFitLinearC(double &y[],CMatrixDouble &fmatrix,CMatrixDouble &cmatrix,int &info,double &c[],CLSFitReportShell &rep); static void LSFitCreateWF(CMatrixDouble &x,double &y[],double &w[],double &c[],const int n,const int m,const int k,const double diffstep,CLSFitStateShell &state); static void LSFitCreateWF(CMatrixDouble &x,double &y[],double &w[],double &c[],const double diffstep,CLSFitStateShell &state); static void LSFitCreateF(CMatrixDouble &x,double &y[],double &c[],const int n,const int m,const int k,const double diffstep,CLSFitStateShell &state); static void LSFitCreateF(CMatrixDouble &x,double &y[],double &c[],const double diffstep,CLSFitStateShell &state); static void LSFitCreateWFG(CMatrixDouble &x,double &y[],double &w[],double &c[],const int n,const int m,const int k,const bool cheapfg,CLSFitStateShell &state); static void LSFitCreateWFG(CMatrixDouble &x,double &y[],double &w[],double &c[],const bool cheapfg,CLSFitStateShell &state); static void LSFitCreateFG(CMatrixDouble &x,double &y[],double &c[],const int n,const int m,const int k,const bool cheapfg,CLSFitStateShell &state); static void LSFitCreateFG(CMatrixDouble &x,double &y[],double &c[],const bool cheapfg,CLSFitStateShell &state); static void LSFitCreateWFGH(CMatrixDouble &x,double &y[],double &w[],double &c[],const int n,const int m,const int k,CLSFitStateShell &state); static void LSFitCreateWFGH(CMatrixDouble &x,double &y[],double &w[],double &c[],CLSFitStateShell &state); static void LSFitCreateFGH(CMatrixDouble &x,double &y[],double &c[],const int n,const int m,const int k,CLSFitStateShell &state); static void LSFitCreateFGH(CMatrixDouble &x,double &y[],double &c[],CLSFitStateShell &state); static void LSFitSetCond(CLSFitStateShell &state,const double epsx,const int maxits); static void LSFitSetStpMax(CLSFitStateShell &state,const double stpmax); static void LSFitSetXRep(CLSFitStateShell &state,const bool needxrep); static void LSFitSetScale(CLSFitStateShell &state,double &s[]); static void LSFitSetBC(CLSFitStateShell &state,double &bndl[],double &bndu[]); static bool LSFitIteration(CLSFitStateShell &state); static void LSFitFit(CLSFitStateShell &state,CNDimensional_PFunc &func,CNDimensional_Rep &rep,bool rep_status,CObject &obj); static void LSFitFit(CLSFitStateShell &state,CNDimensional_PFunc &func,CNDimensional_PGrad &grad,CNDimensional_Rep &rep,bool rep_status,CObject &obj); static void LSFitFit(CLSFitStateShell &state,CNDimensional_PFunc &func,CNDimensional_PGrad &grad,CNDimensional_PHess &hess,CNDimensional_Rep &rep,bool rep_status,CObject &obj); static void LSFitResults(CLSFitStateShell &state,int &info,double &c[],CLSFitReportShell &rep); static double LogisticCalc4(double x,double a,double b,double c,double d); static double LogisticCalc5(double x,double a,double b,double c,double d,double g); static void LogisticFit4(CRowDouble &x,CRowDouble &y,int n,double &a,double &b,double &c,double &d,CLSFitReportShell &rep); static void LogisticFit4ec(CRowDouble &x,CRowDouble &y,int n,double cnstrleft,double cnstrright,double &a,double &b,double &c,double &d,CLSFitReportShell &rep); static void LogisticFit5(CRowDouble &x,CRowDouble &y,int n,double &a,double &b,double &c,double &d,double &g,CLSFitReportShell &rep); static void LogisticFit5ec(CRowDouble &x,CRowDouble &y,int n,double cnstrleft,double cnstrright,double &a,double &b,double &c,double &d,double &g,CLSFitReportShell &rep); static void LogisticFit45x(CRowDouble &x,CRowDouble &y,int n,double cnstrleft,double cnstrright,bool is4pl,double lambdav,double epsx,int rscnt,double &a,double &b,double &c,double &d,double &g,CLSFitReportShell &rep); //--- least squares (LS) circle static void FitSphereLS(CMatrixDouble &xy,int npoints,int nx,CRowDouble &cx,double &r); static void FitSphereMC(CMatrixDouble &xy,int npoints,int nx,CRowDouble &cx,double &rhi); static void FitSphereMI(CMatrixDouble &xy,int npoints,int nx,CRowDouble &cx,double &rlo); static void FitSphereMZ(CMatrixDouble &xy,int npoints,int nx,CRowDouble &cx,double &rlo,double &rhi); static void FitSphereX(CMatrixDouble &xy,int npoints,int nx,int problemtype,double epsx,int aulits,double penalty,CRowDouble &cx,double &rlo,double &rhi); //--- parametric spline static void PSpline2Build(CMatrixDouble &xy,const int n,const int st,const int pt,CPSpline2InterpolantShell &p); static void PSpline3Build(CMatrixDouble &xy,const int n,const int st,const int pt,CPSpline3InterpolantShell &p); static void PSpline2BuildPeriodic(CMatrixDouble &xy,const int n,const int st,const int pt,CPSpline2InterpolantShell &p); static void PSpline3BuildPeriodic(CMatrixDouble &xy,const int n,const int st,const int pt,CPSpline3InterpolantShell &p); static void PSpline2ParameterValues(CPSpline2InterpolantShell &p,int &n,double &t[]); static void PSpline3ParameterValues(CPSpline3InterpolantShell &p,int &n,double &t[]); static void PSpline2Calc(CPSpline2InterpolantShell &p,const double t,double &x,double &y); static void PSpline3Calc(CPSpline3InterpolantShell &p,const double t,double &x,double &y,double &z); static void PSpline2Tangent(CPSpline2InterpolantShell &p,const double t,double &x,double &y); static void PSpline3Tangent(CPSpline3InterpolantShell &p,const double t,double &x,double &y,double &z); static void PSpline2Diff(CPSpline2InterpolantShell &p,const double t,double &x,double &dx,double &y,double &dy); static void PSpline3Diff(CPSpline3InterpolantShell &p,const double t,double &x,double &dx,double &y,double &dy,double &z,double &dz); static void PSpline2Diff2(CPSpline2InterpolantShell &p,const double t,double &x,double &dx,double &d2x,double &y,double &dy,double &d2y); static void PSpline3Diff2(CPSpline3InterpolantShell &p,const double t,double &x,double &dx,double &d2x,double &y,double &dy,double &d2y,double &z,double &dz,double &d2z); static double PSpline2ArcLength(CPSpline2InterpolantShell &p,const double a,const double b); static double PSpline3ArcLength(CPSpline3InterpolantShell &p,const double a,const double b); static void ParametricRDPFixed(CMatrixDouble &x,int n,int d,int stopm,double stopeps,CMatrixDouble &x2,int &idx2[],int &nsections); //--- 2-dimensional spline interpolation static void Spline2DSerialize(CSpline2DInterpolantShell &obj,string &s_out); static void Spline2DUnserialize(string s_in,CSpline2DInterpolantShell &obj); static double Spline2DCalc(CSpline2DInterpolantShell &c,const double x,const double y); static void Spline2DDiff(CSpline2DInterpolantShell &c,const double x,const double y,double &f,double &fx,double &fy,double &fxy); static void Spline2DCalcVBuf(CSpline2DInterpolantShell &c,double x,double y,CRowDouble &f); static double Spline2DCalcVi(CSpline2DInterpolantShell &c,double x,double y,int i); static void Spline2DCalcV(CSpline2DInterpolantShell &c,double x,double y,CRowDouble &f); static void Spline2DDiffVi(CSpline2DInterpolantShell &c,double x,double y,int i,double &f,double &fx,double &fy,double &fxy); static void Spline2DLinTransXY(CSpline2DInterpolantShell &c,const double ax,const double bx,const double ay,const double by); static void Spline2DLinTransF(CSpline2DInterpolantShell &c,const double a,const double b); static void Spline2DCopy(CSpline2DInterpolantShell &c,CSpline2DInterpolantShell &cc); static void Spline2DResampleBicubic(CMatrixDouble &a,const int oldheight,const int oldwidth,CMatrixDouble &b,const int newheight,const int newwidth); static void Spline2DResampleBilinear(CMatrixDouble &a,const int oldheight,const int oldwidth,CMatrixDouble &b,const int newheight,const int newwidth); static void Spline2DBuildBilinearV(CRowDouble &x,int n,CRowDouble &y,int m,CRowDouble &f,int d,CSpline2DInterpolantShell &c); static void Spline2DBuildBicubicV(CRowDouble &x,int n,CRowDouble &y,int m,CRowDouble &f,int d,CSpline2DInterpolantShell &c); static void Spline2DUnpackV(CSpline2DInterpolantShell &c,int &m,int &n,int &d,CMatrixDouble &tbl); static void Spline2DBuildBilinear(double &x[],double &y[],CMatrixDouble &f,const int m,const int n,CSpline2DInterpolantShell &c); static void Spline2DBuildBicubic(double &x[],double &y[],CMatrixDouble &f,const int m,const int n,CSpline2DInterpolantShell &c); static void Spline2DUnpack(CSpline2DInterpolantShell &c,int &m,int &n,CMatrixDouble &tbl); static void Spline2DBuilderCreate(int d,CSpline2DBuilder &state); static void Spline2DBuilderSetUserTerm(CSpline2DBuilder &state,double v); static void Spline2DBuilderSetLinTerm(CSpline2DBuilder &state); static void Spline2DBuilderSetConstTerm(CSpline2DBuilder &state); static void Spline2DBuilderSetZeroTerm(CSpline2DBuilder &state); static void Spline2DBuilderSetPoints(CSpline2DBuilder &state,CMatrixDouble &xy,int n); static void Spline2DBuilderSetAreaAuto(CSpline2DBuilder &state); static void Spline2DBuilderSetArea(CSpline2DBuilder &state,double xa,double xb,double ya,double yb); static void Spline2DBuilderSetGrid(CSpline2DBuilder &state,int kx,int ky); static void Spline2DBuilderSetAlgoFastDDM(CSpline2DBuilder &state,int nlayers,double lambdav); static void Spline2DBuilderSetAlgoBlockLLS(CSpline2DBuilder &state,double lambdans); static void Spline2DBuilderSetAlgoNaiveLLS(CSpline2DBuilder &state,double lambdans); static void Spline2DFit(CSpline2DBuilder &state,CSpline2DInterpolantShell &s,CSpline2DFitReport &rep); //--- 3-dimensional spline interpolation static double Spline3DCalc(CSpline3DInterpolant &c,double x,double y,double z); static void Spline3DLinTransXYZ(CSpline3DInterpolant &c,double ax,double bx,double ay,double by,double az,double bz); static void Spline3DLinTransF(CSpline3DInterpolant &c,double a,double b); static void Spline3DResampleTrilinear(CRowDouble &a,int oldzcount,int oldycount,int oldxcount,int newzcount,int newycount,int newxcount,CRowDouble &b); static void Spline3DBuildTrilinearV(CRowDouble &x,int n,CRowDouble &y,int m,CRowDouble &z,int l,CRowDouble &f,int d,CSpline3DInterpolant &c); static void Spline3DCalcVBuf(CSpline3DInterpolant &c,double x,double y,double z,CRowDouble &f); static void Spline3DCalcV(CSpline3DInterpolant &c,double x,double y,double z,CRowDouble &f); static void Spline3DUnpackV(CSpline3DInterpolant &c,int &n,int &m,int &l,int &d,int &stype,CMatrixDouble &tbl); //--- RBF model static void RBFSerialize(CRBFModel &obj,string &s_out); static void RBFUnserialize(string s_in,CRBFModel &obj); static void RBFCreate(int nx,int ny,CRBFModel &s); static void RBFCreateCalcBuffer(CRBFModel &s,CRBFCalcBuffer &buf); static void RBFSetPoints(CRBFModel &s,CMatrixDouble &xy,int n); static void RBFSetPoints(CRBFModel &s,CMatrixDouble &xy); static void RBFSetPointsAndScales(CRBFModel &r,CMatrixDouble &xy,int n,CRowDouble &s); static void RBFSetPointsAndScales(CRBFModel &r,CMatrixDouble &xy,CRowDouble &s); static void RBFSetAlgoQNN(CRBFModel &s,double q=1.0,double z=5.0); static void RBFSetAlgoMultilayer(CRBFModel &s,double rbase,int nlayers,double lambdav=0.01); static void RBFSetAlgoHierarchical(CRBFModel &s,double rbase,int nlayers,double lambdans); static void RBFSetAlgoThinPlateSpline(CRBFModel &s,double lambdav=0.0); static void RBFSetAlgoMultiQuadricManual(CRBFModel &s,double alpha,double lambdav=0.0); static void RBFSetAlgoMultiQuadricAuto(CRBFModel &s,double lambdav=0.0); static void RBFSetAlgoBiharmonic(CRBFModel &s,double lambdav=0.0); static void RBFSetLinTerm(CRBFModel &s); static void RBFSetConstTerm(CRBFModel &s); static void RBFSetZeroTerm(CRBFModel &s); static void RBFSetV2BF(CRBFModel &s,int bf); static void RBFSetV2Its(CRBFModel &s,int maxits); static void RBFSetV2SupportR(CRBFModel &s,double r); static void RBFBuildModel(CRBFModel &s,CRBFReport &rep); static double RBFCalc1(CRBFModel &s,double x0); static double RBFCalc2(CRBFModel &s,double x0,double x1); static double RBFCalc3(CRBFModel &s,double x0,double x1,double x2); static void RBFDiff1(CRBFModel &s,double x0,double &y,double &dy0); static void RBFDiff2(CRBFModel &s,double x0,double x1,double &y,double &dy0,double &dy1); static void RBFDiff3(CRBFModel &s,double x0,double x1,double x2,double &y,double &dy0,double &dy1,double &dy2); static void RBFCalc(CRBFModel &s,CRowDouble &x,CRowDouble &y); static void RBFDiff(CRBFModel &s,CRowDouble &x,CRowDouble &y,CRowDouble &dy); static void RBFHess(CRBFModel &s,CRowDouble &x,CRowDouble &y,CRowDouble &dy,CRowDouble &d2y); static void RBFCalcBuf(CRBFModel &s,CRowDouble &x,CRowDouble &y); static void RBFDiffBuf(CRBFModel &s,CRowDouble &x,CRowDouble &y,CRowDouble &dy); static void RBFHessBuf(CRBFModel &s,CRowDouble &x,CRowDouble &y,CRowDouble &dy,CRowDouble &d2y); static void RBFTSCalcBuf(CRBFModel &s,CRBFCalcBuffer &buf,CRowDouble &x,CRowDouble &y); static void RBFTSDiffBuf(CRBFModel &s,CRBFCalcBuffer &buf,CRowDouble &x,CRowDouble &y,CRowDouble &dy); static void RBFTSHessBuf(CRBFModel &s,CRBFCalcBuffer &buf,CRowDouble &x,CRowDouble &y,CRowDouble &dy,CRowDouble &d2y); static void RBFGridCalc2(CRBFModel &s,CRowDouble &x0,int n0,CRowDouble &x1,int n1,CMatrixDouble &y); static void RBFGridCalc2V(CRBFModel &s,CRowDouble &x0,int n0,CRowDouble &x1,int n1,CRowDouble &y); static void RBFGridCalc2VSubset(CRBFModel &s,CRowDouble &x0,int n0,CRowDouble &x1,int n1,bool &flagy[],CRowDouble &y); static void RBFGridCalc3V(CRBFModel &s,CRowDouble &x0,int n0,CRowDouble &x1,int n1,CRowDouble &x2,int n2,CRowDouble &y); static void RBFGridCalc3VSubset(CRBFModel &s,CRowDouble &x0,int n0,CRowDouble &x1,int n1,CRowDouble &x2,int n2,bool &flagy[],CRowDouble &y); static void RBFUnpack(CRBFModel &s,int &nx,int &ny,CMatrixDouble &xwr,int &nc,CMatrixDouble &v,int &modelversion); static int RBFGetModelVersion(CRBFModel &s); static double RBFPeekProgress(CRBFModel &s); static void RBFRequestTermination(CRBFModel &s); //--- functions of package linalg //--- working with matrix forms static void CMatrixTranspose(const int m,const int n,CMatrixComplex &a,const int ia,const int ja,CMatrixComplex &b,const int ib,const int jb); static void RMatrixTranspose(const int m,const int n,CMatrixDouble &a,const int ia,const int ja,CMatrixDouble &b,const int ib,const int jb); static void RMatrixEnforceSymmetricity(CMatrixDouble &a,int n,bool IsUpper); static void CMatrixCopy(const int m,const int n,CMatrixComplex &a,const int ia,const int ja,CMatrixComplex &b,const int ib,const int jb); static void RVectorCopy(int n,CRowDouble &a,int ia,CRowDouble &b,int ib); static void RMatrixCopy(const int m,const int n,CMatrixDouble &a,const int ia,const int ja,CMatrixDouble &b,const int ib,const int jb); static void RMatrixGenCopy(int m,int n,double alpha,CMatrixDouble &a,int ia,int ja,double beta,CMatrixDouble &b,int ib,int jb); static void RMatrixGer(int m,int n,CMatrixDouble &a,int ia,int ja,double alpha,CRowDouble &u,int iu,CRowDouble &v,int iv); static void CMatrixRank1(const int m,const int n,CMatrixComplex &a,const int ia,const int ja,complex &u[],const int iu,complex &v[],const int iv); static void RMatrixRank1(const int m,const int n,CMatrixDouble &a,const int ia,const int ja,double &u[],const int iu,double &v[],const int iv); static void RMatrixGemVect(int m,int n,double alpha,CMatrixDouble &a,int ia,int ja,int opa,CRowDouble &x,int ix,double beta,CRowDouble &y,int iy); static void CMatrixMVect(const int m,const int n,CMatrixComplex &a,const int ia,const int ja,const int opa,complex &x[],const int ix,complex &y[],const int iy); static void RMatrixMVect(const int m,const int n,CMatrixDouble &a,const int ia,const int ja,const int opa,double &x[],const int ix,double &y[],const int iy); static void RMatrixSymVect(int n,double alpha,CMatrixDouble &a,int ia,int ja,bool IsUpper,CRowDouble &x,int ix,double beta,CRowDouble &y,int iy); static double RMatrixSyvMVect(int n,CMatrixDouble &a,int ia,int ja,bool IsUpper,CRowDouble &x,int ix,CRowDouble &tmp); static void RMatrixTrsVect(int n,CMatrixDouble &a,int ia,int ja,bool IsUpper,bool IsUnit,int OpType,CRowDouble &x,int ix); static void CMatrixRightTrsM(const int m,const int n,CMatrixComplex &a,const int i1,const int j1,const bool IsUpper,const bool IsUnit,const int OpType,CMatrixComplex &x,const int i2,const int j2); static void CMatrixLeftTrsM(const int m,const int n,CMatrixComplex &a,const int i1,const int j1,const bool IsUpper,const bool IsUnit,const int OpType,CMatrixComplex &x,const int i2,const int j2); static void RMatrixRightTrsM(const int m,const int n,CMatrixDouble &a,const int i1,const int j1,const bool IsUpper,const bool IsUnit,const int OpType,CMatrixDouble &x,const int i2,const int j2); static void RMatrixLeftTrsM(const int m,const int n,CMatrixDouble &a,const int i1,const int j1,const bool IsUpper,const bool IsUnit,const int OpType,CMatrixDouble &x,const int i2,const int j2); static void CMatrixSyrk(const int n,const int k,const double alpha,CMatrixComplex &a,const int ia,const int ja,const int optypea,const double beta,CMatrixComplex &c,const int ic,const int jc,const bool IsUpper); static void RMatrixSyrk(const int n,const int k,const double alpha,CMatrixDouble &a,const int ia,const int ja,const int optypea,const double beta,CMatrixDouble &c,const int ic,const int jc,const bool IsUpper); static void CMatrixGemm(const int m,const int n,const int k,complex alpha,CMatrixComplex &a,const int ia,const int ja,const int optypea,CMatrixComplex &b,const int ib,const int jb,const int optypeb,complex beta,CMatrixComplex &c,const int ic,const int jc); static void RMatrixGemm(const int m,const int n,const int k,const double alpha,CMatrixDouble &a,const int ia,const int ja,const int optypea,CMatrixDouble &b,const int ib,const int jb,const int optypeb,const double beta,CMatrixDouble &c,const int ic,const int jc); //--- orthogonal factorizations static void RMatrixQR(CMatrixDouble &a,const int m,const int n,double &tau[]); static void RMatrixLQ(CMatrixDouble &a,const int m,const int n,double &tau[]); static void CMatrixQR(CMatrixComplex &a,const int m,const int n,complex &tau[]); static void CMatrixLQ(CMatrixComplex &a,const int m,const int n,complex &tau[]); static void RMatrixQRUnpackQ(CMatrixDouble &a,const int m,const int n,double &tau[],const int qcolumns,CMatrixDouble &q); static void RMatrixQRUnpackR(CMatrixDouble &a,const int m,const int n,CMatrixDouble &r); static void RMatrixLQUnpackQ(CMatrixDouble &a,const int m,const int n,double &tau[],const int qrows,CMatrixDouble &q); static void RMatrixLQUnpackL(CMatrixDouble &a,const int m,const int n,CMatrixDouble &l); static void CMatrixQRUnpackQ(CMatrixComplex &a,const int m,const int n,complex &tau[],const int qcolumns,CMatrixComplex &q); static void CMatrixQRUnpackR(CMatrixComplex &a,const int m,const int n,CMatrixComplex &r); static void CMatrixLQUnpackQ(CMatrixComplex &a,const int m,const int n,complex &tau[],const int qrows,CMatrixComplex &q); static void CMatrixLQUnpackL(CMatrixComplex &a,const int m,const int n,CMatrixComplex &l); static void RMatrixBD(CMatrixDouble &a,const int m,const int n,double &tauq[],double &taup[]); static void RMatrixBDUnpackQ(CMatrixDouble &qp,const int m,const int n,double &tauq[],const int qcolumns,CMatrixDouble &q); static void RMatrixBDMultiplyByQ(CMatrixDouble &qp,const int m,const int n,double &tauq[],CMatrixDouble &z,const int zrows,const int zcolumns,const bool fromtheright,const bool dotranspose); static void RMatrixBDUnpackPT(CMatrixDouble &qp,const int m,const int n,double &taup[],const int ptrows,CMatrixDouble &pt); static void RMatrixBDMultiplyByP(CMatrixDouble &qp,const int m,const int n,double &taup[],CMatrixDouble &z,const int zrows,const int zcolumns,const bool fromtheright,const bool dotranspose); static void RMatrixBDUnpackDiagonals(CMatrixDouble &b,const int m,const int n,bool &IsUpper,double &d[],double &e[]); static void RMatrixHessenberg(CMatrixDouble &a,const int n,double &tau[]); static void RMatrixHessenbergUnpackQ(CMatrixDouble &a,const int n,double &tau[],CMatrixDouble &q); static void RMatrixHessenbergUnpackH(CMatrixDouble &a,const int n,CMatrixDouble &h); static void SMatrixTD(CMatrixDouble &a,const int n,const bool IsUpper,double &tau[],double &d[],double &e[]); static void SMatrixTDUnpackQ(CMatrixDouble &a,const int n,const bool IsUpper,double &tau[],CMatrixDouble &q); static void HMatrixTD(CMatrixComplex &a,const int n,const bool IsUpper,complex &tau[],double &d[],double &e[]); static void HMatrixTDUnpackQ(CMatrixComplex &a,const int n,const bool IsUpper,complex &tau[],CMatrixComplex &q); //--- eigenvalues and eigenvectors static void EigSubSpaceCreate(int n,int k,CEigSubSpaceState &state); static void EigSubSpaceCreateBuf(int n,int k,CEigSubSpaceState &state); static void EigSubSpaceSetCond(CEigSubSpaceState &state,double eps,int maxits); static void EigSubSpaceSetWarmStart(CEigSubSpaceState &state,bool usewarmstart); static void EigSubSpaceOOCStart(CEigSubSpaceState &state,int mtype); static bool EigSubSpaceOOCContinue(CEigSubSpaceState &state); static void EigSubSpaceOOCGetRequestInfo(CEigSubSpaceState &state,int &requesttype,int &requestsize); static void EigSubSpaceOOCGetRequestData(CEigSubSpaceState &state,CMatrixDouble &x); static void EigSubSpaceOOCSendResult(CEigSubSpaceState &state,CMatrixDouble &ax); static void EigSubSpaceOOCStop(CEigSubSpaceState &state,CRowDouble &w,CMatrixDouble &z,CEigSubSpaceReport &rep); static void EigSubSpaceSolveDenses(CEigSubSpaceState &state,CMatrixDouble &a,bool IsUpper,CRowDouble &w,CMatrixDouble &z,CEigSubSpaceReport &rep); static void EigSubSpaceSolveSparses(CEigSubSpaceState &state,CSparseMatrix &a,bool IsUpper,CRowDouble &w,CMatrixDouble &z,CEigSubSpaceReport &rep); static bool SMatrixEVD(CMatrixDouble &a,const int n,int zneeded,const bool IsUpper,double &d[],CMatrixDouble &z); static bool SMatrixEVDR(CMatrixDouble &a,const int n,int zneeded,const bool IsUpper,double b1,double b2,int &m,double &w[],CMatrixDouble &z); static bool SMatrixEVDI(CMatrixDouble &a,const int n,int zneeded,const bool IsUpper,const int i1,const int i2,double &w[],CMatrixDouble &z); static bool HMatrixEVD(CMatrixComplex &a,const int n,const int zneeded,const bool IsUpper,double &d[],CMatrixComplex &z); static bool HMatrixEVDR(CMatrixComplex &a,const int n,const int zneeded,const bool IsUpper,double b1,double b2,int &m,double &w[],CMatrixComplex &z); static bool HMatrixEVDI(CMatrixComplex &a,const int n,const int zneeded,const bool IsUpper,const int i1,const int i2,double &w[],CMatrixComplex &z); static bool SMatrixTdEVD(double &d[],double &e[],const int n,const int zneeded,CMatrixDouble &z); static bool SMatrixTdEVDR(double &d[],double &e[],const int n,const int zneeded,const double a,const double b,int &m,CMatrixDouble &z); static bool SMatrixTdEVDI(double &d[],double &e[],const int n,const int zneeded,const int i1,const int i2,CMatrixDouble &z); static bool RMatrixEVD(CMatrixDouble &a,const int n,const int vneeded,double &wr[],double &wi[],CMatrixDouble &vl,CMatrixDouble &vr); //--- random matrix generation static void RMatrixRndOrthogonal(const int n,CMatrixDouble &a); static void RMatrixRndCond(const int n,const double c,CMatrixDouble &a); static void CMatrixRndOrthogonal(const int n,CMatrixComplex &a); static void CMatrixRndCond(const int n,const double c,CMatrixComplex &a); static void SMatrixRndCond(const int n,const double c,CMatrixDouble &a); static void SPDMatrixRndCond(const int n,const double c,CMatrixDouble &a); static void HMatrixRndCond(const int n,const double c,CMatrixComplex &a); static void HPDMatrixRndCond(const int n,const double c,CMatrixComplex &a); static void RMatrixRndOrthogonalFromTheRight(CMatrixDouble &a,const int m,const int n); static void RMatrixRndOrthogonalFromTheLeft(CMatrixDouble &a,const int m,const int n); static void CMatrixRndOrthogonalFromTheRight(CMatrixComplex &a,const int m,const int n); static void CMatrixRndOrthogonalFromTheLeft(CMatrixComplex &a,const int m,const int n); static void SMatrixRndMultiply(CMatrixDouble &a,const int n); static void HMatrixRndMultiply(CMatrixComplex &a,const int n); //--- sparse matrix static void SparseSerialize(CSparseMatrix &obj,string &s_out); static void SparseUunserialize(string s_in,CSparseMatrix &obj); static void SparseCreate(int m,int n,int k,CSparseMatrix &s); static void SparseCreate(int m,int n,CSparseMatrix &s); static void SparseCreateBuf(int m,int n,int k,CSparseMatrix &s); static void SparseCreateBuf(int m,int n,CSparseMatrix &s); static void SparseCreateCRS(int m,int n,CRowInt &ner,CSparseMatrix &s); static void SparseCreateCRS(int m,int n,int &ner[],CSparseMatrix &s); static void SparseCreateCRSBuf(int m,int n,CRowInt &ner,CSparseMatrix &s); static void SparseCreateCRSBuf(int m,int n,int &ner[],CSparseMatrix &s); static void SparseCreateSKS(int m,int n,CRowInt &d,CRowInt &u,CSparseMatrix &s); static void SparseCreateSKS(int m,int n,int &d[],int &u[],CSparseMatrix &s); static void SparseCreateSKSBuf(int m,int n,CRowInt &d,CRowInt &u,CSparseMatrix &s); static void SparseCreateSKSBuf(int m,int n,int &d[],int &u[],CSparseMatrix &s); static void SparseCreateSKSBand(int m,int n,int bw,CSparseMatrix &s); static void SparseCreateSKSBandBuf(int m,int n,int bw,CSparseMatrix &s); static void SparseCopy(CSparseMatrix &s0,CSparseMatrix &s1); static void SparseCopyBuf(CSparseMatrix &s0,CSparseMatrix &s1); static void SparseSwap(CSparseMatrix &s0,CSparseMatrix &s1); static void SparseAdd(CSparseMatrix &s,int i,int j,double v); static void SparseSet(CSparseMatrix &s,int i,int j,double v); static double SparseGet(CSparseMatrix &s,int i,int j); static bool SparseExists(CSparseMatrix &s,int i,int j); static double SparseGetDiagonal(CSparseMatrix &s,int i); static void SparseMV(CSparseMatrix &s,CRowDouble &x,CRowDouble &y); static void SparseMTV(CSparseMatrix &s,CRowDouble &x,CRowDouble &y); static void SparseGemV(CSparseMatrix &s,double alpha,int ops,CRowDouble &x,int ix,double beta,CRowDouble &y,int iy); static void SparseMV2(CSparseMatrix &s,CRowDouble &x,CRowDouble &y0,CRowDouble &y1); static void SparseSMV(CSparseMatrix &s,bool IsUpper,CRowDouble &x,CRowDouble &y); static double SparseVSMV(CSparseMatrix &s,bool IsUpper,CRowDouble &x); static void SparseMM(CSparseMatrix &s,CMatrixDouble &a,int k,CMatrixDouble &b); static void SparseMTM(CSparseMatrix &s,CMatrixDouble &a,int k,CMatrixDouble &b); static void SparseMM2(CSparseMatrix &s,CMatrixDouble &a,int k,CMatrixDouble &b0,CMatrixDouble &b1); static void SparseSMM(CSparseMatrix &s,bool IsUpper,CMatrixDouble &a,int k,CMatrixDouble &b); static void SparseTRMV(CSparseMatrix &s,bool IsUpper,bool IsUnit,int OpType,CRowDouble &x,CRowDouble &y); static void SparseTRSV(CSparseMatrix &s,bool IsUpper,bool IsUnit,int OpType,CRowDouble &x); static void SparseSymmPermTbl(CSparseMatrix &a,bool IsUpper,CRowInt &p,CSparseMatrix &b); static void SparseSymmPermTblBuf(CSparseMatrix &a,bool IsUpper,CRowInt &p,CSparseMatrix &b); static void SparseResizeMatrix(CSparseMatrix &s); static bool SparseEnumerate(CSparseMatrix &s,int &t0,int &t1,int &i,int &j,double &v); static bool SparseRewriteExisting(CSparseMatrix &s,int i,int j,double v); static void SparseGetRow(CSparseMatrix &s,int i,CRowDouble &irow); static void SparseGetCompressedRow(CSparseMatrix &s,int i,CRowInt &colidx,CRowDouble &vals,int &nzcnt); static void SparseTransposeSKS(CSparseMatrix &s); static void SparseTransposeCRS(CSparseMatrix &s); static void SparseCopyTransposeCRS(CSparseMatrix &s0,CSparseMatrix &s1); static void SparseCopyTransposeCRSBuf(CSparseMatrix &s0,CSparseMatrix &s1); static void SparseConvertTo(CSparseMatrix &s0,int fmt); static void SparseCopyToBuf(CSparseMatrix &s0,int fmt,CSparseMatrix &s1); static void SparseConvertToHash(CSparseMatrix &s); static void SparseCopyToHash(CSparseMatrix &s0,CSparseMatrix &s1); static void SparseCopyToHashBuf(CSparseMatrix &s0,CSparseMatrix &s1); static void SparseConvertToCRS(CSparseMatrix &s); static void SparseCopyToCRS(CSparseMatrix &s0,CSparseMatrix &s1); static void SparseCopyToCRSBuf(CSparseMatrix &s0,CSparseMatrix &s1); static void SparseConvertToSKS(CSparseMatrix &s); static void SparseCopyToSKS(CSparseMatrix &s0,CSparseMatrix &s1); static void SparseCopyToSKSBuf(CSparseMatrix &s0,CSparseMatrix &s1); static int SparseGetMatrixType(CSparseMatrix &s); static bool SparseIsHash(CSparseMatrix &s); static bool SparseIsCRS(CSparseMatrix &s); static bool SparseIsSKS(CSparseMatrix &s); static void SparseFree(CSparseMatrix &s); static int SparseGetNRows(CSparseMatrix &s); static int SparseGetNCols(CSparseMatrix &s); static int SparseGetUpperCount(CSparseMatrix &s); static int SparseGetLowerCount(CSparseMatrix &s); //--- triangular factorizations static void RMatrixLU(CMatrixDouble &a,const int m,const int n,int &pivots[]); static void CMatrixLU(CMatrixComplex &a,const int m,const int n,int &pivots[]); static bool HPDMatrixCholesky(CMatrixComplex &a,const int n,const bool IsUpper); static bool SPDMatrixCholesky(CMatrixDouble &a,const int n,const bool IsUpper); static void SPDMatrixCholeskyUpdateAdd1(CMatrixDouble &a,int n,bool IsUpper,CRowDouble &u); static void SPDMatrixCholeskyUpdateFix(CMatrixDouble &a,int n,bool IsUpper,bool &fix[]); static void SPDMatrixCholeskyUpdateAdd1Buf(CMatrixDouble &a,int n,bool IsUpper,CRowDouble &u,CRowDouble &bufr); static void SPDMatrixCholeskyUpdateFixBuf(CMatrixDouble &a,int n,bool IsUpper,bool &fix[],CRowDouble &bufr); static bool SparseLU(CSparseMatrix &a,int pivottype,CRowInt &p,CRowInt &q); static bool SparseCholeskySkyLine(CSparseMatrix &a,int n,bool IsUpper); static bool SparseCholesky(CSparseMatrix &a,bool IsUpper); static bool SparseCholeskyP(CSparseMatrix &a,bool IsUpper,CRowInt &p); static bool sparsecholeskyanalyze(CSparseMatrix &a,bool IsUpper,int facttype,int permtype,CSparseDecompositionAnalysis &analysis); static bool SparseCholeskyFactorize(CSparseDecompositionAnalysis &analysis,bool needupper,CSparseMatrix &a,CRowDouble &d,CRowInt &p); static void SparseCholeskyReload(CSparseDecompositionAnalysis &analysis,CSparseMatrix &a,bool IsUpper); //--- estimate of the condition numbers static double RMatrixRCond1(CMatrixDouble &a,const int n); static double RMatrixRCondInf(CMatrixDouble &a,const int n); static double SPDMatrixRCond(CMatrixDouble &a,const int n,const bool IsUpper); static double RMatrixTrRCond1(CMatrixDouble &a,const int n,const bool IsUpper,const bool IsUnit); static double RMatrixTrRCondInf(CMatrixDouble &a,const int n,const bool IsUpper,const bool IsUnit); static double HPDMatrixRCond(CMatrixComplex &a,const int n,const bool IsUpper); static double CMatrixRCond1(CMatrixComplex &a,const int n); static double CMatrixRCondInf(CMatrixComplex &a,const int n); static double RMatrixLURCond1(CMatrixDouble &lua,const int n); static double RMatrixLURCondInf(CMatrixDouble &lua,const int n); static double SPDMatrixCholeskyRCond(CMatrixDouble &a,const int n,const bool IsUpper); static double HPDMatrixCholeskyRCond(CMatrixComplex &a,const int n,const bool IsUpper); static double CMatrixLURCond1(CMatrixComplex &lua,const int n); static double CMatrixLURCondInf(CMatrixComplex &lua,const int n); static double CMatrixTrRCond1(CMatrixComplex &a,const int n,const bool IsUpper,const bool IsUnit); static double CMatrixTrRCondInf(CMatrixComplex &a,const int n,const bool IsUpper,const bool IsUnit); //--- norm estimator static void NormEstimatorCreate(int m,int n,int nstart,int nits,CNormEstimatorState &state); static void NormEstimatorSetSeed(CNormEstimatorState &state,int seedval); static void NormEstimatorEstimateSparse(CNormEstimatorState &state,CSparseMatrix &a); static void NormEstimatorResults(CNormEstimatorState &state,double &nrm); //--- matrix inversion static void RMatrixLUInverse(CMatrixDouble &a,int &pivots[],const int n,int &info,CMatInvReportShell &rep); static void RMatrixLUInverse(CMatrixDouble &a,int &pivots[],int &info,CMatInvReportShell &rep); static void RMatrixInverse(CMatrixDouble &a,const int n,int &info,CMatInvReportShell &rep); static void RMatrixInverse(CMatrixDouble &a,int &info,CMatInvReportShell &rep); static void CMatrixLUInverse(CMatrixComplex &a,int &pivots[],const int n,int &info,CMatInvReportShell &rep); static void CMatrixLUInverse(CMatrixComplex &a,int &pivots[],int &info,CMatInvReportShell &rep); static void CMatrixInverse(CMatrixComplex &a,const int n,int &info,CMatInvReportShell &rep); static void CMatrixInverse(CMatrixComplex &a,int &info,CMatInvReportShell &rep); static void SPDMatrixCholeskyInverse(CMatrixDouble &a,const int n,const bool IsUpper,int &info,CMatInvReportShell &rep); static void SPDMatrixCholeskyInverse(CMatrixDouble &a,int &info,CMatInvReportShell &rep); static void SPDMatrixInverse(CMatrixDouble &a,const int n,const bool IsUpper,int &info,CMatInvReportShell &rep); static void SPDMatrixInverse(CMatrixDouble &a,int &info,CMatInvReportShell &rep); static void HPDMatrixCholeskyInverse(CMatrixComplex &a,const int n,const bool IsUpper,int &info,CMatInvReportShell &rep); static void HPDMatrixCholeskyInverse(CMatrixComplex &a,int &info,CMatInvReportShell &rep); static void HPDMatrixInverse(CMatrixComplex &a,const int n,const bool IsUpper,int &info,CMatInvReportShell &rep); static void HPDMatrixInverse(CMatrixComplex &a,int &info,CMatInvReportShell &rep); static void RMatrixTrInverse(CMatrixDouble &a,const int n,const bool IsUpper,const bool IsUnit,int &info,CMatInvReportShell &rep); static void RMatrixTrInverse(CMatrixDouble &a,const bool IsUpper,int &info,CMatInvReportShell &rep); static void CMatrixTrInverse(CMatrixComplex &a,const int n,const bool IsUpper,const bool IsUnit,int &info,CMatInvReportShell &rep); static void CMatrixTrInverse(CMatrixComplex &a,const bool IsUpper,int &info,CMatInvReportShell &rep); //--- singular value decomposition of a bidiagonal matrix static bool RMatrixBdSVD(double &d[],double &e[],const int n,const bool IsUpper,bool isfractionalaccuracyrequired,CMatrixDouble &u,const int nru,CMatrixDouble &c,const int ncc,CMatrixDouble &vt,const int ncvt); //--- singular value decomposition static bool RMatrixSVD(CMatrixDouble &a,const int m,const int n,const int uneeded,const int vtneeded,const int additionalmemory,double &w[],CMatrixDouble &u,CMatrixDouble &vt); //--- calculation determinant of the matrix static double RMatrixLUDet(CMatrixDouble &a,int &pivots[],const int n); static double RMatrixLUDet(CMatrixDouble &a,int &pivots[]); static double RMatrixDet(CMatrixDouble &a,const int n); static double RMatrixDet(CMatrixDouble &a); static complex CMatrixLUDet(CMatrixComplex &a,int &pivots[],const int n); static complex CMatrixLUDet(CMatrixComplex &a,int &pivots[]); static complex CMatrixDet(CMatrixComplex &a,const int n); static complex CMatrixDet(CMatrixComplex &a); static double SPDMatrixCholeskyDet(CMatrixDouble &a,const int n); static double SPDMatrixCholeskyDet(CMatrixDouble &a); static double SPDMatrixDet(CMatrixDouble &a,const int n,const bool IsUpper); static double SPDMatrixDet(CMatrixDouble &a); //--- generalized symmetric positive definite eigenproblem static bool SMatrixGEVD(CMatrixDouble &a,const int n,const bool isuppera,CMatrixDouble &b,const bool isupperb,const int zneeded,const int problemtype,double &d[],CMatrixDouble &z); static bool SMatrixGEVDReduce(CMatrixDouble &a,const int n,const bool isuppera,CMatrixDouble &b,const bool isupperb,const int problemtype,CMatrixDouble &r,bool &isupperr); //--- update of the inverse matrix by the Sherman-Morrison formula static void RMatrixInvUpdateSimple(CMatrixDouble &inva,const int n,const int updrow,const int updcolumn,const double updval); static void RMatrixInvUpdateRow(CMatrixDouble &inva,const int n,const int updrow,double &v[]); static void RMatrixInvUpdateColumn(CMatrixDouble &inva,const int n,const int updcolumn,double &u[]); static void RMatrixInvUpdateUV(CMatrixDouble &inva,const int n,double &u[],double &v[]); //--- Schur decomposition static bool RMatrixSchur(CMatrixDouble &a,const int n,CMatrixDouble &s); //--- functions of package optimization //--- conjugate gradient method static void MinCGCreate(const int n,double &x[],CMinCGStateShell &state); static void MinCGCreate(double &x[],CMinCGStateShell &state); static void MinCGCreateF(const int n,double &x[],double diffstep,CMinCGStateShell &state); static void MinCGCreateF(double &x[],double diffstep,CMinCGStateShell &state); static void MinCGSetCond(CMinCGStateShell &state,double epsg,double epsf,double epsx,int maxits); static void MinCGSetScale(CMinCGStateShell &state,double &s[]); static void MinCGSetXRep(CMinCGStateShell &state,bool needxrep); static void MinCGSetCGType(CMinCGStateShell &state,int cgtype); static void MinCGSetStpMax(CMinCGStateShell &state,double stpmax); static void MinCGSuggestStep(CMinCGStateShell &state,double stp); static void MinCGSetPrecDefault(CMinCGStateShell &state); static void MinCGSetPrecDiag(CMinCGStateShell &state,double &d[]); static void MinCGSetPrecScale(CMinCGStateShell &state); static bool MinCGIteration(CMinCGStateShell &state); static void MinCGOptimize(CMinCGStateShell &state,CNDimensional_Func &func,CNDimensional_Rep &rep,bool rep_status,CObject &obj); static void MinCGOptimize(CMinCGStateShell &state,CNDimensional_Grad &grad,CNDimensional_Rep &rep,bool rep_status,CObject &obj); static void MinCGResults(CMinCGStateShell &state,double &x[],CMinCGReportShell &rep); static void MinCGResultsBuf(CMinCGStateShell &state,double &x[],CMinCGReportShell &rep); static void MinCGRestartFrom(CMinCGStateShell &state,double &x[]); static void MinLBFGSRequestTermination(CMinLBFGSStateShell &state); //--- bound constrained optimization with additional linear equality and inequality constraints static void MinBLEICCreate(const int n,double &x[],CMinBLEICStateShell &state); static void MinBLEICCreate(double &x[],CMinBLEICStateShell &state); static void MinBLEICCreateF(const int n,double &x[],double diffstep,CMinBLEICStateShell &state); static void MinBLEICCreateF(double &x[],double diffstep,CMinBLEICStateShell &state); static void MinBLEICSetBC(CMinBLEICStateShell &state,double &bndl[],double &bndu[]); static void MinBLEICSetLC(CMinBLEICStateShell &state,CMatrixDouble &c,int &ct[],const int k); static void MinBLEICSetLC(CMinBLEICStateShell &state,CMatrixDouble &c,int &ct[]); static void MinBLEICSetInnerCond(CMinBLEICStateShell &state,const double epsg,const double epsf,const double epsx); static void MinBLEICSetOuterCond(CMinBLEICStateShell &state,const double epsx,const double epsi); static void MinBLEICSetScale(CMinBLEICStateShell &state,double &s[]); static void MinBLEICSetPrecDefault(CMinBLEICStateShell &state); static void MinBLEICSetPrecDiag(CMinBLEICStateShell &state,double &d[]); static void MinBLEICSetPrecScale(CMinBLEICStateShell &state); static void MinBLEICSetMaxIts(CMinBLEICStateShell &state,const int maxits); static void MinBLEICSetXRep(CMinBLEICStateShell &state,bool needxrep); static void MinBLEICSetStpMax(CMinBLEICStateShell &state,double stpmax); static bool MinBLEICIteration(CMinBLEICStateShell &state); static void MinBLEICOptimize(CMinBLEICStateShell &state,CNDimensional_Func &func,CNDimensional_Rep &rep,bool rep_status,CObject &obj); static void MinBLEICOptimize(CMinBLEICStateShell &state,CNDimensional_Grad &grad,CNDimensional_Rep &rep,bool rep_status,CObject &obj); static void MinBLEICOptGuardGradient(CMinBLEICStateShell &state,double teststep); static void MinBLEICOptGuardSmoothness(CMinBLEICStateShell &state,int level=1); static void MinBLEICOptGuardResults(CMinBLEICStateShell &state,COptGuardReport &rep); static void MinBLEICOptGuardNonC1Test0Results(CMinBLEICStateShell &state,COptGuardNonC1Test0Report &strrep,COptGuardNonC1Test0Report &lngrep); static void MinBLEICOptGuardNonC1Test1Results(CMinBLEICStateShell &state,COptGuardNonC1Test1Report &strrep,COptGuardNonC1Test1Report &lngrep); static void MinBLEICResults(CMinBLEICStateShell &state,double &x[],CMinBLEICReportShell &rep); static void MinBLEICResultsBuf(CMinBLEICStateShell &state,double &x[],CMinBLEICReportShell &rep); static void MinBLEICRestartFrom(CMinBLEICStateShell &state,double &x[]); static void MinBLEICRequestTermination(CMinBLEICStateShell &state); //--- limited memory BFGS method for large scale optimization static void MinLBFGSCreate(const int n,const int m,double &x[],CMinLBFGSStateShell &state); static void MinLBFGSCreate(const int m,double &x[],CMinLBFGSStateShell &state); static void MinLBFGSCreateF(const int n,const int m,double &x[],const double diffstep,CMinLBFGSStateShell &state); static void MinLBFGSCreateF(const int m,double &x[],const double diffstep,CMinLBFGSStateShell &state); static void MinLBFGSSetCond(CMinLBFGSStateShell &state,const double epsg,const double epsf,const double epsx,const int maxits); static void MinLBFGSSetXRep(CMinLBFGSStateShell &state,const bool needxrep); static void MinLBFGSSetStpMax(CMinLBFGSStateShell &state,const double stpmax); static void MinLBFGSSetScale(CMinLBFGSStateShell &state,double &s[]); static void MinLBFGSSetPrecDefault(CMinLBFGSStateShell &state); static void MinLBFGSSetPrecCholesky(CMinLBFGSStateShell &state,CMatrixDouble &p,const bool IsUpper); static void MinLBFGSSetPrecDiag(CMinLBFGSStateShell &state,double &d[]); static void MinLBFGSSetPrecScale(CMinLBFGSStateShell &state); static bool MinLBFGSIteration(CMinLBFGSStateShell &state); static void MinLBFGSOptimize(CMinLBFGSStateShell &state,CNDimensional_Func &func,CNDimensional_Rep &rep,bool rep_status,CObject &obj); static void MinLBFGSOptimize(CMinLBFGSStateShell &state,CNDimensional_Grad &grad,CNDimensional_Rep &rep,bool rep_status,CObject &obj); static void MinLBFGSResults(CMinLBFGSStateShell &state,double &x[],CMinLBFGSReportShell &rep); static void MinLBFGSresultsbuf(CMinLBFGSStateShell &state,double &x[],CMinLBFGSReportShell &rep); static void MinLBFGSRestartFrom(CMinLBFGSStateShell &state,double &x[]); //--- constrained quadratic programming static void MinQPCreate(const int n,CMinQPStateShell &state); static void MinQPSetLinearTerm(CMinQPStateShell &state,double &b[]); static void MinQPSetQuadraticTerm(CMinQPStateShell &state,CMatrixDouble &a,const bool IsUpper); static void MinQPSetQuadraticTerm(CMinQPStateShell &state,CMatrixDouble &a); static void MinQPSetQuadraticTermSparse(CMinQPStateShell &state,CSparseMatrix &a,bool IsUpper); static void MinQPSetStartingPoint(CMinQPStateShell &state,double &x[]); static void MinQPSetOrigin(CMinQPStateShell &state,double &xorigin[]); static void MinQPSetScale(CMinQPStateShell &state,CRowDouble &s); static void MinQPSetScaleAutoDiag(CMinQPStateShell &state); static void MinQPSetAlgoBLEIC(CMinQPStateShell &state,double epsg,double epsf,double epsx,int maxits); static void MinQPSetAlgoDenseAUL(CMinQPStateShell &state,double epsx,double rho,int itscnt); static void MinQPSetAlgoDenseIPM(CMinQPStateShell &state,double eps); static void MinQPSetAlgoSparseIPM(CMinQPStateShell &state,double eps); static void MinQPSetAlgoQuickQP(CMinQPStateShell &state,double epsg,double epsf,double epsx,int maxouterits,bool usenewton); static void MinQPSetBCAll(CMinQPStateShell &state,double bndl,double bndu); static void MinQPSetAlgoCholesky(CMinQPStateShell &state); static void MinQPSetBC(CMinQPStateShell &state,double &bndl[],double &bndu[]); static void MinQPSetBCI(CMinQPStateShell &state,int i,double bndl,double bndu); static void MinQPSetLC(CMinQPStateShell &state,CMatrixDouble &c,CRowInt &ct,int k); static void MinQPSetLC(CMinQPStateShell &state,CMatrixDouble &c,CRowInt &ct); static void MinQPSetLCSparse(CMinQPStateShell &state,CSparseMatrix &c,CRowInt &ct,int k); static void MinQPSetLCMixed(CMinQPStateShell &state,CSparseMatrix &sparsec,CRowInt &sparsect,int sparsek,CMatrixDouble &densec,CRowInt &densect,int densek); static void MinQPSetLCMixedLegacy(CMinQPStateShell &state,CMatrixDouble &densec,CRowInt &densect,int densek,CSparseMatrix &sparsec,CRowInt &sparsect,int sparsek); static void MinQPSetLC2Dense(CMinQPStateShell &state,CMatrixDouble &a,CRowDouble &al,CRowDouble &au,int k); static void MinQPSetLC2Dense(CMinQPStateShell &state,CMatrixDouble &a,CRowDouble &al,CRowDouble &au); static void MinQPSetLC2(CMinQPStateShell &state,CSparseMatrix &a,CRowDouble &al,CRowDouble &au,int k); static void MinQPSetLC2Mixed(CMinQPStateShell &state,CSparseMatrix &sparsea,int ksparse,CMatrixDouble &densea,int kdense,CRowDouble &al,CRowDouble &au); static void MinQPAddLC2Dense(CMinQPStateShell &state,CRowDouble &a,double al,double au); static void MinQPAddLC2(CMinQPStateShell &state,CRowInt &idxa,CRowDouble &vala,int nnz,double al,double au); static void MinQPAddLC2SparseFromDense(CMinQPStateShell &state,CRowDouble &da,double al,double au); static void MinQPOptimize(CMinQPStateShell &state); static void MinQPResults(CMinQPStateShell &state,double &x[],CMinQPReportShell &rep); static void MinQPResultsBuf(CMinQPStateShell &state,double &x[],CMinQPReportShell &rep); //--- Levenberg-Marquardt method static void MinLMCreateVJ(const int n,const int m,double &x[],CMinLMStateShell &state); static void MinLMCreateVJ(const int m,double &x[],CMinLMStateShell &state); static void MinLMCreateV(const int n,const int m,double &x[],double diffstep,CMinLMStateShell &state); static void MinLMCreateV(const int m,double &x[],const double diffstep,CMinLMStateShell &state); static void MinLMCreateFGH(const int n,double &x[],CMinLMStateShell &state); static void MinLMCreateFGH(double &x[],CMinLMStateShell &state); static void MinLMSetCond(CMinLMStateShell &state,const double epsx,const int maxits); static void MinLMSetXRep(CMinLMStateShell &state,const bool needxrep); static void MinLMSetStpMax(CMinLMStateShell &state,const double stpmax); static void MinLMSetScale(CMinLMStateShell &state,double &s[]); static void MinLMSetBC(CMinLMStateShell &state,double &bndl[],double &bndu[]); static void MinLMSetAccType(CMinLMStateShell &state,const int acctype); static bool MinLMIteration(CMinLMStateShell &state); static void MinLMOptimize(CMinLMStateShell &state,CNDimensional_FVec &fvec,CNDimensional_Rep &rep,bool rep_status,CObject &obj); static void MinLMOptimize(CMinLMStateShell &state,CNDimensional_FVec &fvec,CNDimensional_Jac &jac,CNDimensional_Rep &rep,bool rep_status,CObject &obj); static void MinLMOptimize(CMinLMStateShell &state,CNDimensional_Func &func,CNDimensional_Grad &grad,CNDimensional_Hess &hess,CNDimensional_Rep &rep,bool rep_status,CObject &obj); static void MinLMOptimize(CMinLMStateShell &state,CNDimensional_Func &func,CNDimensional_Jac &jac,CNDimensional_Rep &rep,bool rep_status,CObject &obj); static void MinLMOptimize(CMinLMStateShell &state,CNDimensional_Func &func,CNDimensional_Grad &grad,CNDimensional_Jac &jac,CNDimensional_Rep &rep,bool rep_status,CObject &obj); static void MinLMResults(CMinLMStateShell &state,double &x[],CMinLMReportShell &rep); static void MinLMResultsBuf(CMinLMStateShell &state,double &x[],CMinLMReportShell &rep); static void MinLMRestartFrom(CMinLMStateShell &state,double &x[]); static void MinLMCreateVGJ(const int n,const int m,double &x[],CMinLMStateShell &state); static void MinLMCreateVGJ(const int m,double &x[],CMinLMStateShell &state); static void MinLMCreateFGJ(const int n,const int m,double &x[],CMinLMStateShell &state); static void MinLMCreateFGJ(const int m,double &x[],CMinLMStateShell &state); static void MinLMCreateFJ(const int n,const int m,double &x[],CMinLMStateShell &state); static void MinLMCreateFJ(const int m,double &x[],CMinLMStateShell &state); //--- linear programming static void MinLPCreate(int n,CMinLPState &state); static void MinLPSetAlgoDSS(CMinLPState &state,double eps); static void MinLPSetAlgoIPM(CMinLPState &state,double eps=0); static void MinLPSetCost(CMinLPState &state,CRowDouble &c); static void MinLPSetScale(CMinLPState &state,CRowDouble &s); static void MinLPSetBC(CMinLPState &state,CRowDouble &bndl,CRowDouble &bndu); static void MinLPSetBCAll(CMinLPState &state,double bndl,double bndu); static void MinLPSetBCi(CMinLPState &state,int i,double bndl,double bndu); static void MinLPSetLC(CMinLPState &state,CMatrixDouble &a,CRowInt &ct,int k); static void MinLPSetLC(CMinLPState &state,CMatrixDouble &a,CRowInt &ct); static void MinLPSetLC2Dense(CMinLPState &state,CMatrixDouble &a,CRowDouble &al,CRowDouble &au,int k); static void MinLPSetLC2Dense(CMinLPState &state,CMatrixDouble &a,CRowDouble &al,CRowDouble &au); static void MinLPSetLC2(CMinLPState &state,CSparseMatrix &a,CRowDouble &al,CRowDouble &au,int k); static void MinLPAddLC2Dense(CMinLPState &state,CRowDouble &a,double al,double au); static void MinLPAddLC2(CMinLPState &state,CRowInt &idxa,CRowDouble &vala,int nnz,double al,double au); static void MinLPOptimize(CMinLPState &state); static void MinLPResults(CMinLPState &state,CRowDouble &x,CMinLPReport &rep); static void MinLPResultsBuf(CMinLPState &state,CRowDouble &x,CMinLPReport &rep); //--- non-linear constrained optimization static void MinNLCCreate(int n,CRowDouble &x,CMinNLCState &state); static void MinNLCCreate(CRowDouble &x,CMinNLCState &state); static void MinNLCCreateF(int n,CRowDouble &x,double diffstep,CMinNLCState &state); static void MinNLCCreateF(CRowDouble &x,double diffstep,CMinNLCState &state); static void MinNLCSetBC(CMinNLCState &state,CRowDouble &bndl,CRowDouble &bndu); static void MinNLCSetLC(CMinNLCState &state,CMatrixDouble &c,CRowInt &ct,int k); static void MinNLCSetLC(CMinNLCState &state,CMatrixDouble &c,CRowInt &ct); static void MinNLCSetNLC(CMinNLCState &state,int nlec,int nlic); static void MinNLCSetCond(CMinNLCState &state,double epsx,int maxits); static void MinNLCSetScale(CMinNLCState &state,CRowDouble &s); static void MinNLCSetPrecInexact(CMinNLCState &state); static void MinNLCSetPrecExactLowRank(CMinNLCState &state,int updatefreq); static void MinNLCSetPrecExactRobust(CMinNLCState &state,int updatefreq); static void MinNLCSetPrecNone(CMinNLCState &state); static void MinNLCSetSTPMax(CMinNLCState &state,double stpmax); static void MinNLCSetAlgoAUL(CMinNLCState &state,double rho,int itscnt); static void MinNLCSetAlgoSLP(CMinNLCState &state); static void MinNLCSetAlgoSQP(CMinNLCState &state); static void MinNLCSetXRep(CMinNLCState &state,bool needxrep); static bool MinNLCIteration(CMinNLCState &state); static void MinNLCOptimize(CMinNLCState &state,CNDimensional_FVec &fvec,CNDimensional_Rep &rep,CObject &obj); static void MinNLCOptimize(CMinNLCState &state,CNDimensional_Jac &jac,CNDimensional_Rep &rep,CObject &obj); static void MinNLCOptGuardGradient(CMinNLCState &state,double teststep); static void MinNLCOptGuardSmoothness(CMinNLCState &state,int level=1); static void MinNLCOptGuardResults(CMinNLCState &state,COptGuardReport &rep); static void MinNLCOptGuardNonC1Test0Results(CMinNLCState &state,COptGuardNonC1Test0Report &strrep,COptGuardNonC1Test0Report &lngrep); static void MinNLCOptGuardNonC1Test1Results(CMinNLCState &state,COptGuardNonC1Test1Report &strrep,COptGuardNonC1Test1Report &lngrep); static void MinNLCResults(CMinNLCState &state,CRowDouble &x,CMinNLCReport &rep); static void MinNLCResultsBuf(CMinNLCState &state,CRowDouble &x,CMinNLCReport &rep); static void MinNLCRequestTermination(CMinNLCState &state); static void MinNLCRestartFrom(CMinNLCState &state,CRowDouble &x); //--- non-smooth non-convex optimization static void MinNSCreate(int n,CRowDouble &x,CMinNSState &state); static void MinNSCreate(CRowDouble &x,CMinNSState &state); static void MinNSCreateF(int n,CRowDouble &x,double diffstep,CMinNSState &state); static void MinNSCreateF(CRowDouble &x,double diffstep,CMinNSState &state); static void MinNSSetBC(CMinNSState &state,CRowDouble &bndl,CRowDouble &bndu); static void MinNSSetLC(CMinNSState &state,CMatrixDouble &c,CRowInt &ct,int k); static void MinNSSetLC(CMinNSState &state,CMatrixDouble &c,CRowInt &ct); static void MinNSSetNLC(CMinNSState &state,int nlec,int nlic); static void MinNSSetCond(CMinNSState &state,double epsx,int maxits); static void MinNSSetScale(CMinNSState &state,CRowDouble &s); static void MinNSSetAlgoAGS(CMinNSState &state,double radius,double penalty); static void MinNSSetXRep(CMinNSState &state,bool needxrep); static void MinNSRequestTermination(CMinNSState &state); static bool MinNSIteration(CMinNSState &state); static void MinNSOptimize(CMinNSState &state,CNDimensional_FVec &fvec,CNDimensional_Rep &rep,CObject &obj); static void MinNSOptimize(CMinNSState &state,CNDimensional_Jac &jac,CNDimensional_Rep &rep,CObject &obj); static void MinNSResults(CMinNSState &state,CRowDouble &x,CMinNSReport &rep); static void MinNSResultsBuf(CMinNSState &state,CRowDouble &x,CMinNSReport &rep); static void MinNSRestartFrom(CMinNSState &state,CRowDouble &x); //---box constrained optimization static void MinBCCreate(int n,CRowDouble &x,CMinBCState &state); static void MinBCCreate(CRowDouble &x,CMinBCState &state); static void MinBCCreateF(int n,CRowDouble &x,double diffstep,CMinBCState &state); static void MinBCCreateF(CRowDouble &x,double diffstep,CMinBCState &state); static void MinBCSetBC(CMinBCState &state,CRowDouble &bndl,CRowDouble &bndu); static void MinBCSetCond(CMinBCState &state,double epsg,double epsf,double epsx,int maxits); static void MinBCSetScale(CMinBCState &state,CRowDouble &s); static void MinBCSetPrecDefault(CMinBCState &state); static void MinBCSetPrecDiag(CMinBCState &state,CRowDouble &d); static void MinBCSetPrecScale(CMinBCState &state); static void MinBCSetXRep(CMinBCState &state,bool needxrep); static void MinBCSetStpMax(CMinBCState &state,double stpmax); static bool MinBCIteration(CMinBCState &state); static void MinBCOptimize(CMinBCState &state,CNDimensional_Func &func,CNDimensional_Rep &rep,CObject &obj); static void MinBCOptimize(CMinBCState &state,CNDimensional_Grad &grad,CNDimensional_Rep &rep,CObject &obj); static void MinBCOptGuardGradient(CMinBCState &state,double teststep); static void MinBCOptGuardSmoothness(CMinBCState &state,int level=1); static void MinBCOptGuardResults(CMinBCState &state,COptGuardReport &rep); static void MinBCOptGuardNonC1Test0Results(CMinBCState &state,COptGuardNonC1Test0Report &strrep,COptGuardNonC1Test0Report &lngrep); static void MinBCOptGuardNonC1Test1Results(CMinBCState &state,COptGuardNonC1Test1Report &strrep,COptGuardNonC1Test1Report &lngrep); static void MinBCResults(CMinBCState &state,CRowDouble &x,CMinBCReport &rep); static void MinBCResultsBuf(CMinBCState &state,CRowDouble &x,CMinBCReport &rep); static void MinBCRestartFrom(CMinBCState &state,CRowDouble &x); static void MinBCRequestTermination(CMinBCState &state); //--- optimization static void MinLBFGSSetDefaultPreconditioner(CMinLBFGSStateShell &state); static void MinLBFGSSetCholeskyPreconditioner(CMinLBFGSStateShell &state,CMatrixDouble &p,bool IsUpper); static void MinBLEICSetBarrierWidth(CMinBLEICStateShell &state,const double mu); static void MinBLEICSetBarrierDecay(CMinBLEICStateShell &state,const double mudecay); static void MinASACreate(const int n,double &x[],double &bndl[],double &bndu[],CMinASAStateShell &state); static void MinASACreate(double &x[],double &bndl[],double &bndu[],CMinASAStateShell &state); static void MinASASetCond(CMinASAStateShell &state,const double epsg,const double epsf,const double epsx,const int maxits); static void MinASASetXRep(CMinASAStateShell &state,const bool needxrep); static void MinASASetAlgorithm(CMinASAStateShell &state,const int algotype); static void MinASASetStpMax(CMinASAStateShell &state,const double stpmax); static bool MinASAIteration(CMinASAStateShell &state); static void MinASAOptimize(CMinASAStateShell &state,CNDimensional_Grad &grad,CNDimensional_Rep &rep,bool rep_status,CObject &obj); static void MinASAResults(CMinASAStateShell &state,double &x[],CMinASAReportShell &rep); static void MinASAResultsBuf(CMinASAStateShell &state,double &x[],CMinASAReportShell &rep); static void MinASARestartFrom(CMinASAStateShell &state,double &x[],double &bndl[],double &bndu[]); //--- functions of package solvers //--- polynomial root finding static void PolynomialSolve(CRowDouble &a,int n,CRowComplex &x,CPolynomialSolverReport &rep); //--- dense solver static void RMatrixSolve(CMatrixDouble &a,const int n,double &b[],int &info,CDenseSolverReportShell &rep,double &x[]); static void RMatrixSolveM(CMatrixDouble &a,const int n,CMatrixDouble &b,const int m,const bool rfs,int &info,CDenseSolverReportShell &rep,CMatrixDouble &x); static void RMatrixLUSolve(CMatrixDouble &lua,int &p[],const int n,double &b[],int &info,CDenseSolverReportShell &rep,double &x[]); static void RMatrixLUSolveM(CMatrixDouble &lua,int &p[],const int n,CMatrixDouble &b,const int m,int &info,CDenseSolverReportShell &rep,CMatrixDouble &x); static void RMatrixMixedSolve(CMatrixDouble &a,CMatrixDouble &lua,int &p[],const int n,double &b[],int &info,CDenseSolverReportShell &rep,double &x[]); static void RMatrixMixedSolveM(CMatrixDouble &a,CMatrixDouble &lua,int &p[],const int n,CMatrixDouble &b,const int m,int &info,CDenseSolverReportShell &rep,CMatrixDouble &x); static void CMatrixSolveM(CMatrixComplex &a,const int n,CMatrixComplex &b,const int m,const bool rfs,int &info,CDenseSolverReportShell &rep,CMatrixComplex &x); static void CMatrixSolve(CMatrixComplex &a,const int n,complex &b[],int &info,CDenseSolverReportShell &rep,complex &x[]); static void CMatrixLUSolveM(CMatrixComplex &lua,int &p[],const int n,CMatrixComplex &b,const int m,int &info,CDenseSolverReportShell &rep,CMatrixComplex &x); static void CMatrixLUSolve(CMatrixComplex &lua,int &p[],const int n,complex &b[],int &info,CDenseSolverReportShell &rep,complex &x[]); static void CMatrixMixedSolveM(CMatrixComplex &a,CMatrixComplex &lua,int &p[],const int n,CMatrixComplex &b,const int m,int &info,CDenseSolverReportShell &rep,CMatrixComplex &x); static void CMatrixMixedSolve(CMatrixComplex &a,CMatrixComplex &lua,int &p[],const int n,complex &b[],int &info,CDenseSolverReportShell &rep,complex &x[]); static void SPDMatrixSolveM(CMatrixDouble &a,const int n,const bool IsUpper,CMatrixDouble &b,const int m,int &info,CDenseSolverReportShell &rep,CMatrixDouble &x); static void SPDMatrixSolve(CMatrixDouble &a,const int n,const bool IsUpper,double &b[],int &info,CDenseSolverReportShell &rep,double &x[]); static void SPDMatrixCholeskySolveM(CMatrixDouble &cha,const int n,const bool IsUpper,CMatrixDouble &b,const int m,int &info,CDenseSolverReportShell &rep,CMatrixDouble &x); static void SPDMatrixCholeskySolve(CMatrixDouble &cha,const int n,const bool IsUpper,double &b[],int &info,CDenseSolverReportShell &rep,double &x[]); static void HPDMatrixSolveM(CMatrixComplex &a,const int n,const bool IsUpper,CMatrixComplex &b,const int m,int &info,CDenseSolverReportShell &rep,CMatrixComplex &x); static void HPDMatrixSolve(CMatrixComplex &a,const int n,const bool IsUpper,complex &b[],int &info,CDenseSolverReportShell &rep,complex &x[]); static void HPDMatrixCholeskySolveM(CMatrixComplex &cha,const int n,const bool IsUpper,CMatrixComplex &b,const int m,int &info,CDenseSolverReportShell &rep,CMatrixComplex &x); static void HPDMatrixCholeskySolve(CMatrixComplex &cha,const int n,const bool IsUpper,complex &b[],int &info,CDenseSolverReportShell &rep,complex &x[]); static void RMatrixSolveLS(CMatrixDouble &a,const int nrows,const int ncols,double &b[],const double threshold,int &info,CDenseSolverLSReportShell &rep,double &x[]); //--- sparse linear solver static void SparseSPDSolveSKS(CSparseMatrix &a,bool IsUpper,CRowDouble &b,CRowDouble &x,CSparseSolverReport &rep); static void SparseSPDSolve(CSparseMatrix &a,bool IsUpper,CRowDouble &b,CRowDouble &x,CSparseSolverReport &rep); static void SparseSPDCholeskySolve(CSparseMatrix &a,bool IsUpper,CRowDouble &b,CRowDouble &x,CSparseSolverReport &rep); static void SparseSolve(CSparseMatrix &a,CRowDouble &b,CRowDouble &x,CSparseSolverReport &rep); static void SparseLUSolve(CSparseMatrix &a,CRowInt &p,CRowInt &q,CRowDouble &b,CRowDouble &x,CSparseSolverReport &rep); //--- sparse symmetric linear solver static void SparseSolveSymmetricGMRES(CSparseMatrix &a,bool IsUpper,CRowDouble &b,int k,double epsf,int maxits,CRowDouble &x,CSparseSolverReport &rep); static void SparseSolveGMRES(CSparseMatrix &a,CRowDouble &b,int k,double epsf,int maxits,CRowDouble &x,CSparseSolverReport &rep); static void SparseSolverCreate(int n,CSparseSolverState &state); static void SparseSolverSetAlgoGMRES(CSparseSolverState &state,int k); static void SparseSolverSetStartingPoint(CSparseSolverState &state,CRowDouble &x); static void SparseSolverSetCond(CSparseSolverState &state,double epsf,int maxits); static void SparseSolverSolveSymmetric(CSparseSolverState &state,CSparseMatrix &a,bool IsUpper,CRowDouble &b); static void SparseSolverSolve(CSparseSolverState &state,CSparseMatrix &a,CRowDouble &b); static void SparseSolverResults(CSparseSolverState &state,CRowDouble &x,CSparseSolverReport &rep); static void SparseSolverSetXRep(CSparseSolverState &state,bool needxrep); static void SparseSolverOOCStart(CSparseSolverState &state,CRowDouble &b); static bool SparseSolverOOCContinue(CSparseSolverState &state); static void SparseSolverOOCGetRequestInfo(CSparseSolverState &state,int &requesttype); static void SparseSolverOOCGetRequestData(CSparseSolverState &state,CRowDouble &x); static void SparseSolverOOCGetRequestData1(CSparseSolverState &state,double &v); static void SparseSolverOOCSendResult(CSparseSolverState &state,CRowDouble &ax); static void SparseSolverOOCStop(CSparseSolverState &state,CRowDouble &x,CSparseSolverReport &rep); static void SparseSolverRequestTermination(CSparseSolverState &state); //--- linear CG Solver static void LinCGCreate(int n,CLinCGState &state); static void LinCGSetStartingPoint(CLinCGState &state,CRowDouble &x); static void LinCGSetPrecUnit(CLinCGState &state); static void LinCGSetPrecDiag(CLinCGState &state); static void LinCGSetCond(CLinCGState &state,double epsf,int maxits); static void LinCGSolveSparse(CLinCGState &state,CSparseMatrix &a,bool IsUpper,CRowDouble &b); static void LinCGResult(CLinCGState &state,CRowDouble &x,CLinCGReport &rep); static void LinCGSetRestartFreq(CLinCGState &state,int srf); static void LinCGSetRUpdateFreq(CLinCGState &state,int freq); static void LinCGSetXRep(CLinCGState &state,bool needxrep); //--- linear LSQR Solver static void LinLSQRCreate(int m,int n,CLinLSQRState &state); static void LinLSQRCreateBuf(int m,int n,CLinLSQRState &state); static void LinLSQRSetPrecUnit(CLinLSQRState &state); static void LinLSQRSetPrecDiag(CLinLSQRState &state); static void LinLSQRSetLambdaI(CLinLSQRState &state,double lambdai); static void LinLSQRSolveSparse(CLinLSQRState &state,CSparseMatrix &a,CRowDouble &b); static void LinLSQRSetCond(CLinLSQRState &state,double epsa,double epsb,int maxits); static void LinLSQRResults(CLinLSQRState &state,CRowDouble &x,CLinLSQRReport &rep); static void LinLSQRSetXRep(CLinLSQRState &state,bool needxrep); static int LinLSQRPeekIterationsCount(CLinLSQRState &s); static void LinLSQRRequestTermination(CLinLSQRState &state); //--- solving systems of nonlinear equations static void NlEqCreateLM(const int n,const int m,double &x[],CNlEqStateShell &state); static void NlEqCreateLM(const int m,double &x[],CNlEqStateShell &state); static void NlEqSetCond(CNlEqStateShell &state,const double epsf,const int maxits); static void NlEqSetXRep(CNlEqStateShell &state,const bool needxrep); static void NlEqSetStpMax(CNlEqStateShell &state,const double stpmax); static bool NlEqIteration(CNlEqStateShell &state); static void NlEqSolve(CNlEqStateShell &state,CNDimensional_Func &func,CNDimensional_Jac &jac,CNDimensional_Rep &rep,bool rep_status,CObject &obj); static void NlEqResults(CNlEqStateShell &state,double &x[],CNlEqReportShell &rep); static void NlEqResultsBuf(CNlEqStateShell &state,double &x[],CNlEqReportShell &rep); static void NlEqRestartFrom(CNlEqStateShell &state,double &x[]); //--- functions of package specialfunctions //--- gamma function static double GammaFunction(const double x); static double LnGamma(const double x,double &sgngam); //--- normal distribution static double ErrorFunction(const double x); static double ErrorFunctionC(const double x); static double NormalDistribution(const double x); static double NormalPDF(const double x); static double NormalCDF(const double x); static double InvErF(const double e); static double InvNormalDistribution(double y0); static double InvNormalCDF(const double y0); static double BivariateNormalPDF(const double x,const double y,const double rho); static double BivariateNormalCDF(double x,double y,const double rho); //--- incomplete gamma function static double IncompleteGamma(const double a,const double x); static double IncompleteGammaC(const double a,const double x); static double InvIncompleteGammaC(const double a,const double y0); //--- airy function static void Airy(const double x,double &ai,double &aip,double &bi,double &bip); //--- Bessel function static double BesselJ0(const double x); static double BesselJ1(const double x); static double BesselJN(const int n,const double x); static double BesselY0(const double x); static double BesselY1(const double x); static double BesselYN(const int n,const double x); static double BesselI0(const double x); static double BesselI1(const double x); static double BesselK0(const double x); static double BesselK1(const double x); static double BesselKN(const int nn,const double x); //--- beta function static double Beta(const double a,const double b); static double IncompleteBeta(const double a,const double b,const double x); static double InvIncompleteBeta(const double a,const double b,double y); //--- binomial distribution static double BinomialDistribution(const int k,const int n,const double p); static double BinomialComplDistribution(const int k,const int n,const double p); static double InvBinomialDistribution(const int k,const int n,const double y); //--- Chebyshev polynom static double ChebyshevCalculate(int r,const int n,const double x); static double ChebyshevSum(double &c[],const int r,const int n,const double x); static void ChebyshevCoefficients(const int n,double &c[]); static void FromChebyshev(double &a[],const int n,double &b[]); //--- chi-square distribution static double ChiSquareDistribution(const double v,const double x); static double ChiSquareComplDistribution(const double v,const double x); static double InvChiSquareDistribution(const double v,const double y); //--- Dawson's Integral static double DawsonIntegral(const double x); //--- elliptic integral static double EllipticIntegralK(const double m); static double EllipticIntegralKhighPrecision(const double m1); static double IncompleteEllipticIntegralK(const double phi,const double m); static double EllipticIntegralE(const double m); static double IncompleteEllipticIntegralE(const double phi,const double m); //--- exponential integral static double ExponentialIntegralEi(const double x); static double ExponentialIntegralEn(const double x,const int n); //--- F distribution functions static double FDistribution(const int a,const int b,const double x); static double FComplDistribution(const int a,const int b,const double x); static double InvFDistribution(const int a,const int b,const double y); //--- Fresnel integral static void FresnelIntegral(const double x,double &c,double &s); //--- Hermite polynomial static double HermiteCalculate(const int n,const double x); static double HermiteSum(double &c[],const int n,const double x); static void HermiteCoefficients(const int n,double &c[]); //--- Jacobian elliptic functions static void JacobianEllipticFunctions(const double u,const double m,double &sn,double &cn,double &dn,double &ph); //--- Laguerre polynomial static double LaguerreCalculate(const int n,const double x); static double LaguerreSum(double &c[],const int n,const double x); static void LaguerreCoefficients(const int n,double &c[]); //--- Legendre polynomial static double LegendreCalculate(const int n,const double x); static double LegendreSum(double &c[],const int n,const double x); static void LegendreCoefficients(const int n,double &c[]); //--- Poisson distribution static double PoissonDistribution(const int k,const double m); static double PoissonComplDistribution(const int k,const double m); static double InvPoissonDistribution(const int k,const double y); //--- psi function static double Psi(const double x); //--- Student's t distribution static double StudenttDistribution(const int k,const double t); static double InvStudenttDistribution(const int k,const double p); //--- trigonometric integrals static void SineCosineIntegrals(const double x,double &si,double &ci); static void HyperbolicSineCosineIntegrals(const double x,double &shi,double &chi); //--- functions of package statistics //--- basic statistics methods static void SampleMoments(const double &x[],const int n,double &mean,double &variance,double &skewness,double &kurtosis); static void SampleMoments(const double &x[],double &mean,double &variance,double &skewness,double &kurtosis); static double SampleMean(CRowDouble &x,int n); static double SampleMean(CRowDouble &x); static double SampleVariance(CRowDouble &x,int n); static double SampleVariance(CRowDouble &x); static double SampleSkewness(CRowDouble &x,int n); static double SampleSkewness(CRowDouble &x); static double SampleKurtosis(CRowDouble &x,int n); static double SampleKurtosis(CRowDouble &x); static void SampleAdev(const double &x[],const int n,double &adev); static void SampleAdev(const double &x[],double &adev); static void SampleMedian(const double &x[],const int n,double &median); static void SampleMedian(const double &x[],double &median); static void SamplePercentile(const double &x[],const int n,const double p,double &v); static void SamplePercentile(const double &x[],const double p,double &v); static double Cov2(const double &x[],const double &y[],const int n); static double Cov2(const double &x[],const double &y[]); static double PearsonCorr2(const double &x[],const double &y[],const int n); static double PearsonCorr2(const double &x[],const double &y[]); static double SpearmanCorr2(const double &x[],const double &y[],const int n); static double SpearmanCorr2(const double &x[],const double &y[]); static void CovM(const CMatrixDouble &x,const int n,const int m,CMatrixDouble &c); static void CovM(const CMatrixDouble &x,CMatrixDouble &c); static void PearsonCorrM(const CMatrixDouble &x,const int n,const int m,CMatrixDouble &c); static void PearsonCorrM(CMatrixDouble &x,CMatrixDouble &c); static void SpearmanCorrM(const CMatrixDouble &x,const int n,const int m,CMatrixDouble &c); static void SpearmanCorrM(const CMatrixDouble &x,CMatrixDouble &c); static void CovM2(const CMatrixDouble &x,const CMatrixDouble &y,const int n,const int m1,const int m2,CMatrixDouble &c); static void CovM2(const CMatrixDouble &x,const CMatrixDouble &y,CMatrixDouble &c); static void PearsonCorrM2(const CMatrixDouble &x,const CMatrixDouble &y,const int n,const int m1,const int m2,CMatrixDouble &c); static void PearsonCorrM2(const CMatrixDouble &x,const CMatrixDouble &y,CMatrixDouble &c); static void SpearmanCorrM2(const CMatrixDouble &x,const CMatrixDouble &y,const int n,const int m1,const int m2,CMatrixDouble &c); static void SpearmanCorrM2(const CMatrixDouble &x,const CMatrixDouble &y,CMatrixDouble &c); static void RankData(CMatrixDouble &xy,int npoints,int nfeatures); static void RankData(CMatrixDouble &xy); static void RankDataCentered(CMatrixDouble &xy,int npoints,int nfeatures); static void RankDataCentered(CMatrixDouble &xy); //--- correlation tests static void PearsonCorrelationSignificance(const double r,const int n,double &bothTails,double &leftTail,double &rightTail); static void SpearmanRankCorrelationSignificance(const double r,const int n,double &bothTails,double &leftTail,double &rightTail); //--- Jarque-Bera test static void JarqueBeraTest(const double &x[],const int n,double &p); //--- Mann-Whitney U-test static void MannWhitneyUTest(const double &x[],const int n,const double &y[],const int m,double &bothTails,double &leftTail,double &rightTail); //--- sign test static void OneSampleSignTest(const double &x[],const int n,const double median,double &bothTails,double &leftTail,double &rightTail); //--- Student Tests static void StudentTest1(const double &x[],const int n,const double mean,double &bothTails,double &leftTail,double &rightTail); static void StudentTest2(const double &x[],const int n,const double &y[],const int m,double &bothTails,double &leftTail,double &rightTail); static void UnequalVarianceTest(const double &x[],const int n,const double &y[],const int m,double &bothTails,double &leftTail,double &rightTail); //--- variance tests static void FTest(const double &x[],const int n,const double &y[],const int m,double &bothTails,double &leftTail,double &rightTail); static void OneSampleVarianceTest(double &x[],int n,double variance,double &bothTails,double &leftTail,double &rightTail); //--- Wilcoxon signed-rank test static void WilcoxonSignedRankTest(const double &x[],const int n,const double e,double &bothTails,double &leftTail,double &rightTail); }; //+------------------------------------------------------------------+ //| HQRNDState initialization with random values which come from | //| standard RNG. | //+------------------------------------------------------------------+ void CAlglib::HQRndRandomize(CHighQualityRandStateShell &state) { CHighQualityRand::HQRndRandomize(state.GetInnerObj()); } //+------------------------------------------------------------------+ //| HQRNDState initialization with seed values | //+------------------------------------------------------------------+ void CAlglib::HQRndSeed(const int s1,const int s2,CHighQualityRandStateShell &state) { CHighQualityRand::HQRndSeed(s1,s2,state.GetInnerObj()); } //+------------------------------------------------------------------+ //| This function generates random real number in (0,1), | //| not including interval boundaries | //| State structure must be initialized with HQRNDRandomize() or | //| HQRNDSeed(). | //+------------------------------------------------------------------+ double CAlglib::HQRndUniformR(CHighQualityRandStateShell &state) { return(CHighQualityRand::HQRndUniformR(state.GetInnerObj())); } //+------------------------------------------------------------------+ //| This function generates random integer number in [0, N) | //| 1. N must be less than HQRNDMax-1. | //| 2. State structure must be initialized with HQRNDRandomize() or | //| HQRNDSeed() | //+------------------------------------------------------------------+ int CAlglib::HQRndUniformI(CHighQualityRandStateShell &state,const int n) { return(CHighQualityRand::HQRndUniformI(state.GetInnerObj(),n)); } //+------------------------------------------------------------------+ //| Random number generator: normal numbers | //| This function generates one random number from normal | //| distribution. | //| Its performance is equal to that of HQRNDNormal2() | //| State structure must be initialized with HQRNDRandomize() or | //| HQRNDSeed(). | //+------------------------------------------------------------------+ double CAlglib::HQRndNormal(CHighQualityRandStateShell &state) { return(CHighQualityRand::HQRndNormal(state.GetInnerObj())); } //+------------------------------------------------------------------+ //| Random number generator: vector with random entries (normal | //| distribution) | //| This function generates N random numbers from normal | //| distribution. | //| State structure must be initialized with HQRNDRandomize() or | //| HQRNDSeed(). | //+------------------------------------------------------------------+ void CAlglib::HQRndNormalV(CHighQualityRandStateShell &state, int n,CRowDouble &x) { CHighQualityRand::HQRndNormalV(state.GetInnerObj(),n,x); } //+------------------------------------------------------------------+ //| Random number generator: vector with random entries (normal | //| distribution) | //| This function generates N random numbers from normal | //| distribution. | //| State structure must be initialized with HQRNDRandomize() or | //| HQRNDSeed(). | //+------------------------------------------------------------------+ void CAlglib::HQRndNormalV(CHighQualityRandStateShell &state, int n,vector &x) { CRowDouble X=x; HQRndNormalV(state,n,X); x=X.ToVector(); } //+------------------------------------------------------------------+ //| Random number generator: matrix with random entries (normal | //| distribution) | //| This function generates MxN random matrix. | //| State structure must be initialized with HQRNDRandomize() or | //| HQRNDSeed(). | //+------------------------------------------------------------------+ void CAlglib::HQRndNormalM(CHighQualityRandStateShell &state, int m,int n,CMatrixDouble &x) { CHighQualityRand::HQRndNormalM(state.GetInnerObj(),m,n,x); } //+------------------------------------------------------------------+ //| Random number generator: matrix with random entries (normal | //| distribution) | //| This function generates MxN random matrix. | //| State structure must be initialized with HQRNDRandomize() or | //| HQRNDSeed(). | //+------------------------------------------------------------------+ void CAlglib::HQRndNormalM(CHighQualityRandStateShell &state, int m,int n,matrix &x) { CMatrixDouble X=x; HQRndNormalM(state,m,n,X); x=X.ToMatrix(); } //+------------------------------------------------------------------+ //| Random number generator: random X and Y such that X^2+Y^2=1 | //| State structure must be initialized with HQRNDRandomize() or | //| HQRNDSeed(). | //+------------------------------------------------------------------+ void CAlglib::HQRndUnit2(CHighQualityRandStateShell &state, double &x,double &y) { //--- initialization x=0; y=0; //--- function call CHighQualityRand::HQRndUnit2(state.GetInnerObj(),x,y); } //+------------------------------------------------------------------+ //| Random number generator: normal numbers | //| This function generates two independent random numbers from | //| normal distribution. Its performance is equal to that of | //| HQRNDNormal() | //| State structure must be initialized with HQRNDRandomize() or | //| HQRNDSeed(). | //+------------------------------------------------------------------+ void CAlglib::HQRndNormal2(CHighQualityRandStateShell &state, double &x1,double &x2) { //--- initialization x1=0; x2=0; //--- function call CHighQualityRand::HQRndNormal2(state.GetInnerObj(),x1,x2); } //+------------------------------------------------------------------+ //| Random number generator: exponential distribution | //| State structure must be initialized with HQRNDRandomize() or | //| HQRNDSeed(). | //+------------------------------------------------------------------+ double CAlglib::HQRndExponential(CHighQualityRandStateShell &state, const double lambdav) { return(CHighQualityRand::HQRndExponential(state.GetInnerObj(),lambdav)); } //+------------------------------------------------------------------+ //| This function generates random number from discrete distribution| //| given by finite sample X. | //| INPUT PARAMETERS | //| State - high quality random number generator, must be | //| initialized with HQRNDRandomize() or HQRNDSeed(). | //| X - finite sample | //| N - number of elements to use, N>=1 | //| RESULT | //| this function returns one of the X[i] for random i=0..N-1 | //+------------------------------------------------------------------+ double CAlglib::HQRndDiscrete(CHighQualityRandStateShell &state, int n,CRowDouble &x) { return(CHighQualityRand::HQRndDiscrete(state.GetInnerObj(),n,x)); } //+------------------------------------------------------------------+ //| This function generates random number from discrete distribution| //| given by finite sample X. | //| INPUT PARAMETERS | //| State - high quality random number generator, must be | //| initialized with HQRNDRandomize() or HQRNDSeed(). | //| X - finite sample | //| N - number of elements to use, N>=1 | //| RESULT | //| this function returns one of the X[i] for random i=0..N-1 | //+------------------------------------------------------------------+ double CAlglib::HQRndDiscrete(CHighQualityRandStateShell &state, int n,vector &x) { return(CHighQualityRand::HQRndDiscrete(state.GetInnerObj(),n,x)); } //+------------------------------------------------------------------+ //| This function generates random number from continuous | //| distribution given by finite sample X. | //| INPUT PARAMETERS | //| State - high quality random number generator, must be | //| initialized with HQRNDRandomize() or HQRNDSeed(). | //| X - finite sample, array[N] (can be larger, in this | //| case only leading N elements are used). THIS ARRAY | //| MUST BE SORTED BY ASCENDING. | //| N - number of elements to use, N>=1 | //| RESULT | //| this function returns random number from continuous | //| distribution which tries to approximate X as mush as possible. | //| min(X)<=Result<=max(X). | //+------------------------------------------------------------------+ double CAlglib::HQRndContinuous(CHighQualityRandStateShell &state, int n,CRowDouble &x) { return(CHighQualityRand::HQRndContinuous(state.GetInnerObj(),n,x)); } //+------------------------------------------------------------------+ //| This function generates random number from continuous | //| distribution given by finite sample X. | //| INPUT PARAMETERS | //| State - high quality random number generator, must be | //| initialized with HQRNDRandomize() or HQRNDSeed(). | //| X - finite sample, array[N] (can be larger, in this | //| case only leading N elements are used). THIS ARRAY | //| MUST BE SORTED BY ASCENDING. | //| N - number of elements to use, N>=1 | //| RESULT | //| this function returns random number from continuous | //| distribution which tries to approximate X as mush as possible. | //| min(X)<=Result<=max(X). | //+------------------------------------------------------------------+ double CAlglib::HQRndContinuous(CHighQualityRandStateShell &state, int n,vector &x) { return(CHighQualityRand::HQRndContinuous(state.GetInnerObj(),n,x)); } //+------------------------------------------------------------------+ //| This function serializes data structure to string. | //| Important properties of s_out: | //| * it contains alphanumeric characters, dots, underscores, minus | //| signs | //| * these symbols are grouped into words, which are separated by | //| spaces and Windows-style (CR+LF) newlines | //| * although serializer uses spaces and CR+LF as separators, you | //| can replace any separator character by arbitrary combination | //| of spaces, tabs, Windows or Unix newlines. It allows flexible | //| reformatting of the string in case you want to include it into | //| text or XML file. But you should not insert separators into the| //| middle of the "words" nor you should change case of letters. | //| * s_out can be freely moved between 32-bit and 64-bit systems, | //| little and big endian machines, and so on. You can reference | //| structure on 32-bit machine and unserialize it on 64-bit one | //| (or vice versa), or reference it on SPARC and unserialize on | //| x86. You can also reference it in C# version of ALGLIB and | //| unserialize in C++ one, and vice versa. | //+------------------------------------------------------------------+ void CAlglib::KDTreeSerialize(CKDTreeShell &obj,string &s_out) { //--- create a variable CSerializer s; //--- serialization start s.Alloc_Start(); //--- function call CNearestNeighbor::KDTreeAlloc(s,obj.GetInnerObj()); s.SStart_Str(); //--- function call CNearestNeighbor::KDTreeSerialize(s,obj.GetInnerObj()); s.Stop(); //--- get result s_out=s.Get_String(); } //+------------------------------------------------------------------+ //| This function unserializes data structure from string. | //+------------------------------------------------------------------+ void CAlglib::KDTreeUnserialize(string s_in,CKDTreeShell &obj) { //--- object of class CSerializer s; s.UStart_Str(s_in); //--- function call CNearestNeighbor::KDTreeUnserialize(s,obj.GetInnerObj()); s.Stop(); } //+------------------------------------------------------------------+ //| KD-tree creation | //| This subroutine creates KD-tree from set of X-values and optional| //| Y-values | //| INPUT PARAMETERS | //| XY - dataset,array[0..N-1, 0..NX+NY-1]. | //| one row corresponds to one point. | //| first NX columns contain X-values, next NY (NY | //| may be zero) | //| columns may contain associated Y-values | //| N - number of points, N>=1 | //| NX - space dimension, NX>=1. | //| NY - number of optional Y-values, NY>=0. | //| NormType- norm type: | //| * 0 denotes infinity-norm | //| * 1 denotes 1-norm | //| * 2 denotes 2-norm (Euclidean norm) | //| OUTPUT PARAMETERS | //| KDT - KD-tree | //| NOTES | //| 1. KD-tree creation have O(N*logN) complexity and | //| O(N*(2*NX+NY)) memory requirements. | //| 2. Although KD-trees may be used with any combination of N and | //| NX, they are more efficient than brute-force search only when | //| N >> 4^NX. So they are most useful in low-dimensional tasks | //| (NX=2, NX=3). NX=1 is another inefficient case, because | //| simple binary search (without additional structures) is | //| much more efficient in such tasks than KD-trees. | //+------------------------------------------------------------------+ void CAlglib::KDTreeBuild(CMatrixDouble &xy,const int n,const int nx, const int ny,const int normtype,CKDTreeShell &kdt) { CNearestNeighbor::KDTreeBuild(xy,n,nx,ny,normtype,kdt.GetInnerObj()); } //+------------------------------------------------------------------+ //| KD-tree creation | //| This subroutine creates KD-tree from set of X-values and optional| //| Y-values | //| INPUT PARAMETERS | //| XY - dataset,array[0..N-1, 0..NX+NY-1]. | //| one row corresponds to one point. | //| first NX columns contain X-values, next NY (NY | //| may be zero) | //| columns may contain associated Y-values | //| N - number of points, N>=1 | //| NX - space dimension, NX>=1. | //| NY - number of optional Y-values, NY>=0. | //| NormType- norm type: | //| * 0 denotes infinity-norm | //| * 1 denotes 1-norm | //| * 2 denotes 2-norm (Euclidean norm) | //| OUTPUT PARAMETERS | //| KDT - KD-tree | //| NOTES | //| 1. KD-tree creation have O(N*logN) complexity and | //| O(N*(2*NX+NY)) memory requirements. | //| 2. Although KD-trees may be used with any combination of N and | //| NX, they are more efficient than brute-force search only when | //| N >> 4^NX. So they are most useful in low-dimensional tasks | //| (NX=2, NX=3). NX=1 is another inefficient case, because | //| simple binary search (without additional structures) is | //| much more efficient in such tasks than KD-trees. | //+------------------------------------------------------------------+ void CAlglib::KDTreeBuild(CMatrixDouble &xy,const int nx,const int ny, const int normtype,CKDTreeShell &kdt) { //--- create a variable int n=(int)CAp::Rows(xy); //--- function call CNearestNeighbor::KDTreeBuild(xy,n,nx,ny,normtype,kdt.GetInnerObj()); } //+------------------------------------------------------------------+ //| KD-tree creation | //| This subroutine creates KD-tree from set of X-values, integer | //| tags and optional Y-values | //| INPUT PARAMETERS | //| XY - dataset,array[0..N-1, 0..NX+NY-1]. | //| one row corresponds to one point. | //| first NX columns contain X-values, next NY (NY | //| may be zero) | //| columns may contain associated Y-values | //| Tags - tags, array[0..N-1], contains integer tags | //| associated with points. | //| N - number of points, N>=1 | //| NX - space dimension, NX>=1. | //| NY - number of optional Y-values, NY>=0. | //| NormType- norm type: | //| * 0 denotes infinity-norm | //| * 1 denotes 1-norm | //| * 2 denotes 2-norm (Euclidean norm) | //| OUTPUT PARAMETERS | //| KDT - KD-tree | //| NOTES | //| 1. KD-tree creation have O(N*logN) complexity and | //| O(N*(2*NX+NY)) memory requirements. | //| 2. Although KD-trees may be used with any combination of N and | //| NX, they are more efficient than brute-force search only when | //| N >> 4^NX. So they are most useful in low-dimensional tasks | //| (NX=2, NX=3). NX=1 is another inefficient case, because simple| //| binary search (without additional structures) is much more | //| efficient in such tasks than KD-trees. | //+------------------------------------------------------------------+ void CAlglib::KDTreeBuildTagged(CMatrixDouble &xy,int &tags[], const int n,const int nx, const int ny,const int normtype, CKDTreeShell &kdt) { CNearestNeighbor::KDTreeBuildTagged(xy,tags,n,nx,ny,normtype,kdt.GetInnerObj()); } //+------------------------------------------------------------------+ //| | //+------------------------------------------------------------------+ void CAlglib::KDTreeBuildTagged(CMatrixDouble &xy,CRowInt &tags, const int n,const int nx, const int ny,const int normtype, CKDTreeShell &kdt) { CNearestNeighbor::KDTreeBuildTagged(xy,tags,n,nx,ny,normtype,kdt.GetInnerObj()); } //+------------------------------------------------------------------+ //| KD-tree creation | //| This subroutine creates KD-tree from set of X-values, integer | //| tags and optional Y-values | //| INPUT PARAMETERS | //| XY - dataset,array[0..N-1, 0..NX+NY-1]. | //| one row corresponds to one point. | //| first NX columns contain X-values, next NY (NY | //| may be zero) | //| columns may contain associated Y-values | //| Tags - tags, array[0..N-1], contains integer tags | //| associated with points. | //| N - number of points, N>=1 | //| NX - space dimension, NX>=1. | //| NY - number of optional Y-values, NY>=0. | //| NormType- norm type: | //| * 0 denotes infinity-norm | //| * 1 denotes 1-norm | //| * 2 denotes 2-norm (Euclidean norm) | //| OUTPUT PARAMETERS | //| KDT - KD-tree | //| NOTES | //| 1. KD-tree creation have O(N*logN) complexity and | //| O(N*(2*NX+NY)) memory requirements. | //| 2. Although KD-trees may be used with any combination of N and | //| NX, they are more efficient than brute-force search only when | //| N >> 4^NX. So they are most useful in low-dimensional tasks | //| (NX=2, NX=3). NX=1 is another inefficient case, because simple| //| binary search (without additional structures) is much more | //| efficient in such tasks than KD-trees. | //+------------------------------------------------------------------+ void CAlglib::KDTreeBuildTagged(CMatrixDouble &xy,int &tags[], const int nx,const int ny, const int normtype,CKDTreeShell &kdt) { //--- check if((CAp::Rows(xy)!=CAp::Len(tags))) { Print(__FUNCTION__+": looks like one of arguments has wrong size"); CAp::exception_happened=true; return; } //--- initialization int n=(int)CAp::Rows(xy); //--- function call CNearestNeighbor::KDTreeBuildTagged(xy,tags,n,nx,ny,normtype,kdt.GetInnerObj()); } //+------------------------------------------------------------------+ //| | //+------------------------------------------------------------------+ void CAlglib::KDTreeBuildTagged(CMatrixDouble &xy,CRowInt &tags, const int nx,const int ny, const int normtype,CKDTreeShell &kdt) { //--- check if((CAp::Rows(xy)!=CAp::Len(tags))) { Print(__FUNCTION__+": looks like one of arguments has wrong size"); CAp::exception_happened=true; return; } //--- initialization int n=(int)CAp::Rows(xy); //--- function call CNearestNeighbor::KDTreeBuildTagged(xy,tags,n,nx,ny,normtype,kdt.GetInnerObj()); } //+------------------------------------------------------------------+ //| K-NN query: K nearest neighbors | //| INPUT PARAMETERS | //| KDT - KD-tree | //| X - point, array[0..NX-1]. | //| K - number of neighbors to return, K>=1 | //| SelfMatch - whether self-matches are allowed: | //| * if True, nearest neighbor may be the point | //| itself (if it exists in original dataset) | //| * if False, then only points with non-zero | //| distance are returned | //| * if not given, considered True | //| RESULT | //| number of actual neighbors found (either K or N, if K>N). | //| This subroutine performs query and stores its result in the | //| internal structures of the KD-tree. You can use following | //| subroutines to obtain these results: | //| * KDTreeQueryResultsX() to get X-values | //| * KDTreeQueryResultsXY() to get X- and Y-values | //| * KDTreeQueryResultsTags() to get tag values | //| * KDTreeQueryResultsDistances() to get distances | //+------------------------------------------------------------------+ int CAlglib::KDTreeQueryKNN(CKDTreeShell &kdt,double &x[], const int k,const bool selfmatch=true) { return(CNearestNeighbor::KDTreeQueryKNN(kdt.GetInnerObj(),x,k,selfmatch)); } //+------------------------------------------------------------------+ //| K-NN query: K nearest neighbors | //| INPUT PARAMETERS | //| KDT - KD-tree | //| X - point, array[0..NX-1]. | //| K - number of neighbors to return, K>=1 | //| SelfMatch - whether self-matches are allowed: | //| * if True, nearest neighbor may be the point | //| itself (if it exists in original dataset) | //| * if False, then only points with non-zero | //| distance are returned | //| * if not given, considered True | //| RESULT | //| number of actual neighbors found (either K or N, if K>N). | //| This subroutine performs query and stores its result in the | //| internal structures of the KD-tree. You can use following | //| subroutines to obtain these results: | //| * KDTreeQueryResultsX() to get X-values | //| * KDTreeQueryResultsXY() to get X- and Y-values | //| * KDTreeQueryResultsTags() to get tag values | //| * KDTreeQueryResultsDistances() to get distances | //+------------------------------------------------------------------+ int CAlglib::KDTreeQueryKNN(CKDTreeShell &kdt,vector &x, const int k,const bool selfmatch) { CRowDouble X=x; return(CNearestNeighbor::KDTreeQueryKNN(kdt.GetInnerObj(),X,k,selfmatch)); } //+------------------------------------------------------------------+ //| | //+------------------------------------------------------------------+ int CAlglib::KDTreeQueryKNN(CKDTreeShell &kdt,CRowDouble &x, const int k,const bool selfmatch=true) { return(CNearestNeighbor::KDTreeQueryKNN(kdt.GetInnerObj(),x,k,selfmatch)); } //+------------------------------------------------------------------+ //| K-NN query: K nearest neighbors, using external thread-local | //| buffer. | //| You can call this function from multiple threads for same kd-tree| //| instance, assuming that different instances of buffer object are | //| passed to different threads. | //| INPUT PARAMETERS | //| KDT - kd-tree | //| Buf - request buffer object created for this particular | //| instance of kd-tree structure with | //| KDTreeCreateRequestBuffer() function. | //| X - point, array[0..NX-1]. | //| K - number of neighbors to return, K>=1 | //| SelfMatch - whether self-matches are allowed: | //| * if True, nearest neighbor may be the point | //| itself (if it exists in original dataset) | //| * if False, then only points with non-zero | //| distance are returned | //| * if not given, considered True | //| RESULT | //| number of actual neighbors found (either K or N, if K>N). | //| This subroutine performs query and stores its result in the | //| internal structures of the buffer object. You can use following | //| subroutines to obtain these results (pay attention to "buf" in | //| their names): | //| * KDTreeTsQueryResultsX() to get X-values | //| * KDTreeTsQueryResultsXY() to get X- and Y-values | //| * KDTreeTsQueryResultsTags() to get tag values | //| * KDTreeTsQueryResultsDistances() to get distances | //| IMPORTANT: kd-tree buffer should be used only with KD-tree object| //| which was used to initialize buffer. Any attempt to use biffer | //| with different object is dangerous - you may get integrity check | //| failure (exception) because sizes of internal arrays do not fit | //| to dimensions of KD-tree structure. | //+------------------------------------------------------------------+ int CAlglib::KDTreeTsQueryKNN(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf, double &x[],const int k,const bool selfmatch) { CRowDouble X=x; return(CNearestNeighbor::KDTreeTsQueryKNN(kdt.GetInnerObj(),buf.GetInnerObj(),X,k,selfmatch)); } //+------------------------------------------------------------------+ //| | //+------------------------------------------------------------------+ int CAlglib::KDTreeTsQueryKNN(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf, vector &x,const int k,const bool selfmatch) { CRowDouble X=x; return(CNearestNeighbor::KDTreeTsQueryKNN(kdt.GetInnerObj(),buf.GetInnerObj(),X,k,selfmatch)); } //+------------------------------------------------------------------+ //| | //+------------------------------------------------------------------+ int CAlglib::KDTreeTsQueryKNN(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf, CRowDouble &x,const int k,const bool selfmatch) { return(CNearestNeighbor::KDTreeTsQueryKNN(kdt.GetInnerObj(),buf.GetInnerObj(),x,k,selfmatch)); } //+------------------------------------------------------------------+ //| R-NN query: all points within R-sphere centered at X | //| INPUT PARAMETERS | //| KDT - KD-tree | //| X - point, array[0..NX-1]. | //| R - radius of sphere (in corresponding norm), R>0| //| SelfMatch - whether self-matches are allowed: | //| * if True, nearest neighbor may be the point | //| itself (if it exists in original dataset) | //| * if False, then only points with non-zero | //| distance are returned | //| * if not given, considered True | //| RESULT | //| number of neighbors found, >=0 | //| This subroutine performs query and stores its result in the | //| internal structures of the KD-tree. You can use following | //| subroutines to obtain actual results: | //| * KDTreeQueryResultsX() to get X-values | //| * KDTreeQueryResultsXY() to get X- and Y-values | //| * KDTreeQueryResultsTags() to get tag values | //| * KDTreeQueryResultsDistances() to get distances | //+------------------------------------------------------------------+ int CAlglib::KDTreeQueryRNN(CKDTreeShell &kdt,double &x[], const double r,const bool selfmatch) { return(CNearestNeighbor::KDTreeQueryRNN(kdt.GetInnerObj(),x,r,selfmatch)); } //+------------------------------------------------------------------+ //| R-NN query: all points within R-sphere centered at X | //| INPUT PARAMETERS | //| KDT - KD-tree | //| X - point, array[0..NX-1]. | //| R - radius of sphere (in corresponding norm), R>0| //| SelfMatch - whether self-matches are allowed: | //| * if True, nearest neighbor may be the point | //| itself (if it exists in original dataset) | //| * if False, then only points with non-zero | //| distance are returned | //| * if not given, considered True | //| RESULT | //| number of neighbors found, >=0 | //| This subroutine performs query and stores its result in the | //| internal structures of the KD-tree. You can use following | //| subroutines to obtain actual results: | //| * KDTreeQueryResultsX() to get X-values | //| * KDTreeQueryResultsXY() to get X- and Y-values | //| * KDTreeQueryResultsTags() to get tag values | //| * KDTreeQueryResultsDistances() to get distances | //+------------------------------------------------------------------+ int CAlglib::KDTreeQueryRNN(CKDTreeShell &kdt,vector &x, const double r,const bool selfmatch) { CRowDouble X=x; return(CNearestNeighbor::KDTreeQueryRNN(kdt.GetInnerObj(),X,r,selfmatch)); } //+------------------------------------------------------------------+ //| | //+------------------------------------------------------------------+ int CAlglib::KDTreeQueryRNN(CKDTreeShell &kdt,CRowDouble &x, const double r,const bool selfmatch) { return(CNearestNeighbor::KDTreeQueryRNN(kdt.GetInnerObj(),x,r,selfmatch)); } //+------------------------------------------------------------------+ //| R-NN query: all points within R-sphere centered at X, no ordering| //| by distance as undicated by "U" suffix (faster that ordered | //| query, for large queries - significantly faster). | //| IMPORTANT: this function can not be used in multithreaded code | //| because it uses internal temporary buffer of kd-tree | //| object, which can not be shared between multiple | //| threads. If you want to perform parallel requests, use| //| function which uses external request buffer: | //| KDTreeTsQueryRNN() ("Ts" stands for "thread-safe"). | //| INPUT PARAMETERS | //| KDT - KD-tree | //| X - point, array[0..NX-1]. | //| R - radius of sphere (in corresponding norm), R>0 | //| SelfMatch - whether self-matches are allowed: | //| * if True, nearest neighbor may be the point | //| itself (if it exists in original dataset) | //| * if False, then only points with non-zero | //| distance are returned | //| * if not given, considered True | //| RESULT | //| number of neighbors found, >=0 | //| This subroutine performs query and stores its result in the | //| internal structures of the KD-tree. You can use following | //| subroutines to obtain actual results: | //| * KDTreeQueryResultsX() to get X-values | //| * KDTreeQueryResultsXY() to get X- and Y-values | //| * KDTreeQueryResultsTags() to get tag values | //| * KDTreeQueryResultsDistances() to get distances | //| As indicated by "U" suffix, this function returns unordered | //| results. | //+------------------------------------------------------------------+ int CAlglib::KDTreeQueryRNNU(CKDTreeShell &kdt,double &x[],const double r,bool selfmatch) { return(CNearestNeighbor::KDTreeQueryRNNU(kdt.GetInnerObj(),x,r,selfmatch)); } //+------------------------------------------------------------------+ //| | //+------------------------------------------------------------------+ int CAlglib::KDTreeQueryRNNU(CKDTreeShell &kdt,vector &x,const double r,bool selfmatch) { CRowDouble X=x; return(CNearestNeighbor::KDTreeQueryRNNU(kdt.GetInnerObj(),X,r,selfmatch)); } //+------------------------------------------------------------------+ //| | //+------------------------------------------------------------------+ int CAlglib::KDTreeQueryRNNU(CKDTreeShell &kdt,CRowDouble &x,const double r,bool selfmatch) { return(CNearestNeighbor::KDTreeQueryRNNU(kdt.GetInnerObj(),x,r,selfmatch)); } //+------------------------------------------------------------------+ //| R-NN query: all points within R-sphere centered at X, using | //| external thread-local buffer, sorted by distance between point | //| and X (by ascending) | //| You can call this function from multiple threads for same kd-tree| //| instance, assuming that different instances of buffer object are | //| passed to different threads. | //| NOTE: it is also possible to perform undordered queries performed| //| by means of KDTreeQueryRNNU() and KDTreeTsQueryRNNU() functions. | //| Such queries are faster because we do not have to use heap | //| structure for sorting. | //| INPUT PARAMETERS | //| KDT - KD-tree | //| Buf - request buffer object created for this particular | //| instance of kd-tree structure with | //| KDTreeCreateRequestBuffer() function. | //| X - point, array[0..NX-1]. | //| R - radius of sphere (in corresponding norm), R>0 | //| SelfMatch - whether self-matches are allowed: | //| * if True, nearest neighbor may be the point itself | //| (if it exists in original dataset) | //| * if False, then only points with non-zero distance | //| are returned | //| * if not given, considered True | //| RESULT | //| number of neighbors found, >=0 | //| This subroutine performs query and stores its result in the | //| internal structures of the buffer object. You can use following | //| subroutines to obtain these results (pay attention to "buf" in | //| their names): | //| * KDTreeTsQueryResultsX() to get X-values | //| * KDTreeTsQueryResultsXY() to get X- and Y-values | //| * KDTreeTsQueryResultsTags() to get tag values | //| * KDTreeTsQueryResultsDistances() to get distances | //| IMPORTANT: kd-tree buffer should be used only with KD-tree object| //| which was used to initialize buffer. Any attempt to | //| use biffer with different object is dangerous - you | //| may get integrity check failure (exception) because | //| sizes of internal arrays do not fit to dimensions of | //| KD-tree structure. | //+------------------------------------------------------------------+ int CAlglib::KDTreeTsQueryRNN(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf, double &x[],const double r,bool selfmatch) { CRowDouble X=x; return(CNearestNeighbor::KDTreeTsQueryRNN(kdt.GetInnerObj(),buf.GetInnerObj(),X,r,selfmatch)); } //+------------------------------------------------------------------+ //| | //+------------------------------------------------------------------+ int CAlglib::KDTreeTsQueryRNN(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf, vector &x,const double r,bool selfmatch) { CRowDouble X=x; return(CNearestNeighbor::KDTreeTsQueryRNN(kdt.GetInnerObj(),buf.GetInnerObj(),X,r,selfmatch)); } //+------------------------------------------------------------------+ //| | //+------------------------------------------------------------------+ int CAlglib::KDTreeTsQueryRNN(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf, CRowDouble &x,const double r,bool selfmatch) { return(CNearestNeighbor::KDTreeTsQueryRNN(kdt.GetInnerObj(),buf.GetInnerObj(),x,r,selfmatch)); } //+------------------------------------------------------------------+ //| R-NN query: all points within R-sphere centered at X, using | //| external thread-local buffer, no ordering by distance as | //| undicated by "U" suffix (faster that ordered query, for large | //| queries - significantly faster). | //| You can call this function from multiple threads for same kd-tree| //| instance, assuming that different instances of buffer object are | //| passed to different threads. | //| INPUT PARAMETERS | //| KDT - KD-tree | //| Buf - request buffer object created for this particular | //| instance of kd-tree structure with | //| KDTreeCreateRequestBuffer() function. | //| X - point, array[0..NX-1]. | //| R - radius of sphere (in corresponding norm), R>0 | //| SelfMatch - whether self-matches are allowed: | //| * if True, nearest neighbor may be the point itself| //| (if it exists in original dataset) | //| * if False, then only points with non-zero distance| //| are returned | //| * if not given, considered True | //| RESULT | //| number of neighbors found, >=0 | //| This subroutine performs query and stores its result in the | //| internal structures of the buffer object. You can use following | //| subroutines to obtain these results (pay attention to "buf" in | //| their names): | //| * KDTreeTsQueryResultsX() to get X-values | //| * KDTreeTsQueryResultsXY() to get X- and Y-values | //| * KDTreeTsQueryResultsTags() to get tag values | //| * KDTreeTsQueryResultsDistances() to get distances | //| As indicated by "U" suffix, this function returns unordered | //| results. | //| IMPORTANT: kd-tree buffer should be used only with KD-tree object| //| which was used to initialize buffer. Any attempt to | //| use biffer with different object is dangerous - you | //| may get integrity check failure (exception) because | //| sizes of internal arrays do not fit to dimensions of | //| KD-tree structure. | //+------------------------------------------------------------------+ int CAlglib::KDTreeTsQueryRNNU(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf, double &x[],const double r,const bool selfmatch) { CRowDouble X=x; return(CNearestNeighbor::KDTreeTsQueryRNNU(kdt.GetInnerObj(),buf.GetInnerObj(),X,r,selfmatch)); } //+------------------------------------------------------------------+ //| | //+------------------------------------------------------------------+ int CAlglib::KDTreeTsQueryRNNU(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf, vector &x,const double r,const bool selfmatch) { CRowDouble X=x; return(CNearestNeighbor::KDTreeTsQueryRNNU(kdt.GetInnerObj(),buf.GetInnerObj(),X,r,selfmatch)); } //+------------------------------------------------------------------+ //| | //+------------------------------------------------------------------+ int CAlglib::KDTreeTsQueryRNNU(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf, CRowDouble &x,const double r,const bool selfmatch) { return(CNearestNeighbor::KDTreeTsQueryRNNU(kdt.GetInnerObj(),buf.GetInnerObj(),x,r,selfmatch)); } //+------------------------------------------------------------------+ //| K-NN query: approximate K nearest neighbors | //| INPUT PARAMETERS | //| KDT - KD-tree | //| X - point, array[0..NX-1]. | //| K - number of neighbors to return, K>=1 | //| SelfMatch - whether self-matches are allowed: | //| * if True, nearest neighbor may be the point | //| itself (if it exists in original dataset) | //| * if False, then only points with non-zero | //| distance are returned | //| * if not given, considered True | //| Eps - approximation factor, Eps>=0. eps-approximate| //| nearest neighbor is a neighbor whose distance| //| from X is at most (1+eps) times distance of | //| true nearest neighbor. | //| RESULT | //| number of actual neighbors found (either K or N, if K>N). | //| NOTES | //| significant performance gain may be achieved only when Eps is| //| on the order of magnitude of 1 or larger. | //| This subroutine performs query and stores its result in the | //| internal structures of the KD-tree. You can use following | //| these subroutines to obtain results: | //| * KDTreeQueryResultsX() to get X-values | //| * KDTreeQueryResultsXY() to get X- and Y-values | //| * KDTreeQueryResultsTags() to get tag values | //| * KDTreeQueryResultsDistances() to get distances | //+------------------------------------------------------------------+ int CAlglib::KDTreeQueryAKNN(CKDTreeShell &kdt,double &x[], const int k,const bool selfmatch, const double eps) { return(CNearestNeighbor::KDTreeQueryAKNN(kdt.GetInnerObj(),x,k,selfmatch,eps)); } //+------------------------------------------------------------------+ //| K-NN query: approximate K nearest neighbors | //| INPUT PARAMETERS | //| KDT - KD-tree | //| X - point, array[0..NX-1]. | //| K - number of neighbors to return, K>=1 | //| SelfMatch - whether self-matches are allowed: | //| * if True, nearest neighbor may be the point | //| itself (if it exists in original dataset) | //| * if False, then only points with non-zero | //| distance are returned | //| * if not given, considered True | //| Eps - approximation factor, Eps>=0. eps-approximate| //| nearest neighbor is a neighbor whose distance| //| from X is at most (1+eps) times distance of | //| true nearest neighbor. | //| RESULT | //| number of actual neighbors found (either K or N, if K>N). | //| NOTES | //| significant performance gain may be achieved only when Eps is| //| on the order of magnitude of 1 or larger. | //| This subroutine performs query and stores its result in the | //| internal structures of the KD-tree. You can use following | //| these subroutines to obtain results: | //| * KDTreeQueryResultsX() to get X-values | //| * KDTreeQueryResultsXY() to get X- and Y-values | //| * KDTreeQueryResultsTags() to get tag values | //| * KDTreeQueryResultsDistances() to get distances | //+------------------------------------------------------------------+ int CAlglib::KDTreeQueryAKNN(CKDTreeShell &kdt,vector &x, const int k,const bool selfmatch, const double eps=0) { CRowDouble X=x; return(CNearestNeighbor::KDTreeQueryAKNN(kdt.GetInnerObj(),X,k,selfmatch,eps)); } //+------------------------------------------------------------------+ //| | //+------------------------------------------------------------------+ int CAlglib::KDTreeQueryAKNN(CKDTreeShell &kdt,CRowDouble &x, const int k,const bool selfmatch, const double eps) { return(CNearestNeighbor::KDTreeQueryAKNN(kdt.GetInnerObj(),x,k,selfmatch,eps)); } //+------------------------------------------------------------------+ //| Box query: all points within user-specified box. | //| IMPORTANT: this function can not be used in multithreaded code | //| because it uses internal temporary buffer of kd-tree | //| object, which can not be shared between multiple | //| threads. If you want to perform parallel requests, | //| use function which uses external request buffer: | //| KDTreeTsQueryBox() ("Ts" stands for "thread-safe"). | //| INPUT PARAMETERS | //| KDT - KD-tree | //| BoxMin - lower bounds, array[0..NX-1]. | //| BoxMax - upper bounds, array[0..NX-1]. | //| RESULT | //| number of actual neighbors found (in [0,N]). | //| This subroutine performs query and stores its result in the | //| internal structures of the KD-tree. You can use following | //| subroutines to obtain these results: | //| * KDTreeQueryResultsX() to get X-values | //| * KDTreeQueryResultsXY() to get X- and Y-values | //| * KDTreeQueryResultsTags() to get tag values | //| * KDTreeQueryResultsDistances() returns zeros for this request | //| NOTE: this particular query returns unordered results, because | //| there is no meaningful way of ordering points. Furthermore,| //| no 'distance' is associated with points - it is either | //| INSIDE or OUTSIDE (so request for distances will return | //| zeros). | //+------------------------------------------------------------------+ int CAlglib::KDTreeQueryBox(CKDTreeShell &kdt,double &boxmin[],double &boxmax[]) { return(CNearestNeighbor::KDTreeQueryBox(kdt.GetInnerObj(),boxmin,boxmax)); } //+------------------------------------------------------------------+ //| | //+------------------------------------------------------------------+ int CAlglib::KDTreeQueryBox(CKDTreeShell &kdt,vector &boxmin,vector &boxmax) { CRowDouble Min=boxmin; CRowDouble Max=boxmax; return(CNearestNeighbor::KDTreeQueryBox(kdt.GetInnerObj(),Min,Max)); } //+------------------------------------------------------------------+ //| | //+------------------------------------------------------------------+ int CAlglib::KDTreeQueryBox(CKDTreeShell &kdt,CRowDouble &boxmin,CRowDouble &boxmax) { return(CNearestNeighbor::KDTreeQueryBox(kdt.GetInnerObj(),boxmin,boxmax)); } //+------------------------------------------------------------------+ //| Box query: all points within user-specified box, using | //| thread-local buffer. | //| You can call this function from multiple threads for same kd-tree| //| instance, assuming that different instances of buffer object are | //| passed to different threads. | //| INPUT PARAMETERS | //| KDT - KD-tree | //| Buf - request buffer object created for this particular | //| instance of kd-tree structure with | //| KDTreeCreateRequestBuffer() function. | //| BoxMin - lower bounds, array[0..NX-1]. | //| BoxMax - upper bounds, array[0..NX-1]. | //| RESULT | //| number of actual neighbors found (in [0,N]). | //| This subroutine performs query and stores its result in the | //| internal structures of the buffer object. You can use following | //| subroutines to obtain these results (pay attention to "ts" in | //| their names): | //| * KDTreeTsQueryResultsX() to get X-values | //| * KDTreeTsQueryResultsXY() to get X- and Y-values | //| * KDTreeTsQueryResultsTags() to get tag values | //| * KDTreeTsQueryResultsDistances() returns zeros for this query | //| NOTE: this particular query returns unordered results, because | //| there is no meaningful way of ordering points. Furthermore,| //| no 'distance' is associated with points - it is either | //| INSIDE or OUTSIDE (so request for distances will return | //| zeros). | //| IMPORTANT: kd-tree buffer should be used only with KD-tree object| //| which was used to initialize buffer. Any attempt to | //| use biffer with different object is dangerous - you | //| may get integrity check failure (exception) because| //| sizes of internal arrays do not fit to dimensions of | //| KD-tree structure. | //+------------------------------------------------------------------+ int CAlglib::KDTreeTsQueryBox(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,double &boxmin[],double &boxmax[]) { return(CNearestNeighbor::KDTreeTsQueryBox(kdt.GetInnerObj(),buf.GetInnerObj(),boxmin,boxmax)); } //+------------------------------------------------------------------+ //| | //+------------------------------------------------------------------+ int CAlglib::KDTreeTsQueryBox(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,CRowDouble &boxmin,CRowDouble &boxmax) { return(CNearestNeighbor::KDTreeTsQueryBox(kdt.GetInnerObj(),buf.GetInnerObj(),boxmin,boxmax)); } //+------------------------------------------------------------------+ //| | //+------------------------------------------------------------------+ int CAlglib::KDTreeTsQueryBox(CKDTreeShell &kdt,CKDTreeRequestBufferShell &buf,vector &boxmin,vector &boxmax) { CRowDouble Min=boxmin; CRowDouble Max=boxmax; return(CNearestNeighbor::KDTreeTsQueryBox(kdt.GetInnerObj(),buf.GetInnerObj(),Min,Max)); } //+------------------------------------------------------------------+ //| X-values from last query | //| INPUT PARAMETERS | //| KDT - KD-tree | //| X - possibly pre-allocated buffer. If X is too small | //| to store result, it is resized. If size(X) is | //| enough to store result, it is left unchanged. | //| OUTPUT PARAMETERS | //| X - rows are filled with X-values | //| NOTES | //| 1. points are ordered by distance from the query point (first = | //| closest) | //| 2. if XY is larger than required to store result, only leading | //| part will be overwritten; trailing part will be left | //| unchanged. So if on input XY = [[A,B],[C,D]], and result is | //| [1,2], then on exit we will get XY = [[1,2],[C,D]]. This is | //| done purposely to increase performance; if you want function | //| to resize array according to result size, use function with | //| same name and suffix 'I'. | //| SEE ALSO | //| * KDTreeQueryResultsXY() X- and Y-values | //| * KDTreeQueryResultsTags() tag values | //| * KDTreeQueryResultsDistances() distances | //+------------------------------------------------------------------+ void CAlglib::KDTreeQueryResultsX(CKDTreeShell &kdt,CMatrixDouble &x) { CNearestNeighbor::KDTreeQueryResultsX(kdt.GetInnerObj(),x); } //+------------------------------------------------------------------+ //| X- and Y-values from last query | //| INPUT PARAMETERS | //| KDT - KD-tree | //| XY - possibly pre-allocated buffer. If XY is too small| //| to store result, it is resized. If size(XY) is | //| enough to store result, it is left unchanged. | //| OUTPUT PARAMETERS | //| XY - rows are filled with points: first NX columns | //| with X-values, next NY columns - with Y-values. | //| NOTES | //| 1. points are ordered by distance from the query point (first = | //| closest) | //| 2. if XY is larger than required to store result, only leading | //| part will be overwritten; trailing part will be left | //| unchanged. So if on input XY = [[A,B],[C,D]], and result is | //| [1,2], then on exit we will get XY = [[1,2],[C,D]]. This is | //| done purposely to increase performance; if you want function | //| to resize array according to result size, use function with | //| same name and suffix 'I'. | //| SEE ALSO | //| * KDTreeQueryResultsX() X-values | //| * KDTreeQueryResultsTags() tag values | //| * KDTreeQueryResultsDistances() distances | //+------------------------------------------------------------------+ void CAlglib::KDTreeQueryResultsXY(CKDTreeShell &kdt,CMatrixDouble &xy) { CNearestNeighbor::KDTreeQueryResultsXY(kdt.GetInnerObj(),xy); } //+------------------------------------------------------------------+ //| Tags from last query | //| INPUT PARAMETERS | //| KDT - KD-tree | //| Tags - possibly pre-allocated buffer. If X is too small | //| to store result, it is resized. If size(X) is | //| enough to store result, it is left unchanged. | //| OUTPUT PARAMETERS | //| Tags - filled with tags associated with points, | //| or, when no tags were supplied, with zeros | //| NOTES | //| 1. points are ordered by distance from the query point (first | //| = closest) | //| 2. if XY is larger than required to store result, only leading | //| part will be overwritten; trailing part will be left | //| unchanged. So if on input XY = [[A,B],[C,D]], and result is | //| [1,2],then on exit we will get XY = [[1,2], [C,D]]. This is | //| done purposely to increase performance; if you want function | //| to resize array according to result size, use function with | //| same name and suffix 'I'. | //| SEE ALSO | //| * KDTreeQueryResultsX() X-values | //| * KDTreeQueryResultsXY() X- and Y-values | //| * KDTreeQueryResultsDistances() distances | //+------------------------------------------------------------------+ void CAlglib::KDTreeQueryResultsTags(CKDTreeShell &kdt,int &tags[]) { CNearestNeighbor::KDTreeQueryResultsTags(kdt.GetInnerObj(),tags); } //+------------------------------------------------------------------+ //| | //+------------------------------------------------------------------+ void CAlglib::KDTreeQueryResultsTags(CKDTreeShell &kdt,CRowInt &tags) { CNearestNeighbor::KDTreeQueryResultsTags(kdt.GetInnerObj(),tags); } //+------------------------------------------------------------------+ //| Distances from last query | //| INPUT PARAMETERS | //| KDT - KD-tree | //| R - possibly pre-allocated buffer. If X is too small | //| to store result, it is resized. If size(X) is | //| enough to store result, it is left unchanged. | //| OUTPUT PARAMETERS | //| R - filled with distances (in corresponding norm) | //| NOTES | //| 1. points are ordered by distance from the query point (first | //| = closest) | //| 2. if XY is larger than required to store result, only leading | //| part will be overwritten; trailing part will be left | //| unchanged. So if on input XY = [[A,B], [C,D]],and result is | //| [1,2], then on exit we will get XY = [[1,2], C,D]]. This is | //| done purposely to increase performance; if you want function | //| to resize array according to result size, use function with | //| same name and suffix 'I'. | //| SEE ALSO | //| * KDTreeQueryResultsX() X-values | //| * KDTreeQueryResultsXY() X- and Y-values | //| * KDTreeQueryResultsTags() tag values | //+------------------------------------------------------------------+ void CAlglib::KDTreeQueryResultsDistances(CKDTreeShell &kdt,double &r[]) { CNearestNeighbor::KDTreeQueryResultsDistances(kdt.GetInnerObj(),r); } //+------------------------------------------------------------------+ //| | //+------------------------------------------------------------------+ void CAlglib::KDTreeQueryResultsDistances(CKDTreeShell &kdt,CRowDouble &r) { CNearestNeighbor::KDTreeQueryResultsDistances(kdt.GetInnerObj(),r); } //+------------------------------------------------------------------+ //| | //+------------------------------------------------------------------+ void CAlglib::KDTreeQueryResultsDistances(CKDTreeShell &kdt,vector &r) { //--- create variables CRowDouble R; //--- function call CNearestNeighbor::KDTreeQueryResultsDistances(kdt.GetInnerObj(),R); //---copy result r=R.ToVector(); } //+------------------------------------------------------------------+ //| X-values from last query; 'interactive' variant for languages | //| like Python which support constructs like "X = | //| KDTreeQueryResultsXI(KDT)" and interactive mode of interpreter. | //| This function allocates new array on each call, so it is | //| significantly slower than its 'non-interactive' counterpart, but | //| it is more convenient when you call it from command line. | //+------------------------------------------------------------------+ void CAlglib::KDTreeQueryResultsXI(CKDTreeShell &kdt,CMatrixDouble &x) { CNearestNeighbor::KDTreeQueryResultsXI(kdt.GetInnerObj(),x); } //+------------------------------------------------------------------+ //| XY-values from last query; 'interactive' variant for languages | //| like Python which support constructs like "XY = | //| KDTreeQueryResultsXYI(KDT)" and interactive mode of interpreter. | //| This function allocates new array on each call, so it is | //| significantly slower than its 'non-interactive' counterpart, but | //| it is more convenient when you call it from command line. | //+------------------------------------------------------------------+ void CAlglib::KDTreeQueryResultsXYI(CKDTreeShell &kdt,CMatrixDouble &xy) { CNearestNeighbor::KDTreeQueryResultsXYI(kdt.GetInnerObj(),xy); } //+------------------------------------------------------------------+ //| Tags from last query; 'interactive' variant for languages like | //| Python which support constructs like "Tags = | //| KDTreeQueryResultsTagsI(KDT)" and interactive mode of | //| interpreter. | //| This function allocates new array on each call, so it is | //| significantly slower than its 'non-interactive' counterpart, but | //| it is more convenient when you call it from command line. | //+------------------------------------------------------------------+ void CAlglib::KDTreeQueryResultsTagsI(CKDTreeShell &kdt,int &tags[]) { CNearestNeighbor::KDTreeQueryResultsTagsI(kdt.GetInnerObj(),tags); } //+------------------------------------------------------------------+ //| | //+------------------------------------------------------------------+ void CAlglib::KDTreeQueryResultsTagsI(CKDTreeShell &kdt,CRowInt &tags) { CNearestNeighbor::KDTreeQueryResultsTagsI(kdt.GetInnerObj(),tags); } //+------------------------------------------------------------------+ //| Distances from last query; 'interactive' variant for languages | //| like Python which support constructs like "R = | //| KDTreeQueryResultsDistancesI(KDT)" and interactive mode of | //| interpreter. | //| This function allocates new array on each call, so it is | //| significantly slower than its 'non-interactive' counterpart, but | //| it is more convenient when you call it from command line. | //+------------------------------------------------------------------+ void CAlglib::KDTreeQueryResultsDistancesI(CKDTreeShell &kdt,double &r[]) { CNearestNeighbor::KDTreeQueryResultsDistancesI(kdt.GetInnerObj(),r); } //+------------------------------------------------------------------+ //| | //+------------------------------------------------------------------+ void CAlglib::KDTreeQueryResultsDistancesI(CKDTreeShell &kdt,vector &r) { CRowDouble R; CNearestNeighbor::KDTreeQueryResultsDistancesI(kdt.GetInnerObj(),R); r=R.ToVector(); } //+------------------------------------------------------------------+ //| | //+------------------------------------------------------------------+ void CAlglib::KDTreeQueryResultsDistancesI(CKDTreeShell &kdt,CRowDouble &r) { CNearestNeighbor::KDTreeQueryResultsDistancesI(kdt.GetInnerObj(),r); } //+------------------------------------------------------------------+ //| Optimal binary classification | //| Algorithms finds optimal (=with minimal cross-entropy) binary | //| partition. | //| Internal subroutine. | //| INPUT PARAMETERS: | //| A - array[0..N-1], variable | //| C - array[0..N-1], class numbers (0 or 1). | //| N - array size | //| OUTPUT PARAMETERS: | //| Info - completetion code: | //| * -3, all values of A[] are same (partition is | //| impossible) | //| * -2, one of C[] is incorrect (<0, >1) | //| * -1, incorrect pararemets were passed (N<=0). | //| * 1, OK | //| Threshold- partiton boundary. Left part contains values | //| which are strictly less than Threshold. Right | //| part contains values which are greater than or | //| equal to Threshold. | //| PAL, PBL- probabilities P(0|v=Threshold) and | //| P(1|v>=Threshold) | //| CVE - cross-validation estimate of cross-entropy | //+------------------------------------------------------------------+ void CAlglib::DSOptimalSplit2(double &a[],int &c[],const int n, int &info,double &threshold, double &pal,double &pbl,double &par, double &pbr,double &cve) { //--- initialization info=0; threshold=0; pal=0; pbl=0; par=0; pbr=0; cve=0; //--- function call CBdSS::DSOptimalSplit2(a,c,n,info,threshold,pal,pbl,par,pbr,cve); } //+------------------------------------------------------------------+ //| | //+------------------------------------------------------------------+ void CAlglib::DSOptimalSplit2(CRowDouble &a,CRowInt &c,const int n, int &info,double &threshold, double &pal,double &pbl,double &par, double &pbr,double &cve) { //--- initialization info=0; threshold=0; pal=0; pbl=0; par=0; pbr=0; cve=0; //--- function call CBdSS::DSOptimalSplit2(a,c,n,info,threshold,pal,pbl,par,pbr,cve); } //+------------------------------------------------------------------+ //| Optimal partition, internal subroutine. Fast version. | //| Accepts: | //| A array[0..N-1] array of attributes array[0..N-1]| //| C array[0..N-1] array of class labels | //| TiesBuf array[0..N] temporaries (ties) | //| CntBuf array[0..2*NC-1] temporaries (counts) | //| Alpha centering factor (0<=alpha<=1, | //| recommended value - 0.05) | //| BufR array[0..N-1] temporaries | //| BufI array[0..N-1] temporaries | //| Output: | //| Info error code (">0"=OK, "<0"=bad) | //| RMS training set RMS error | //| CVRMS leave-one-out RMS error | //| Note: | //| content of all arrays is changed by subroutine; | //| it doesn't allocate temporaries. | //+------------------------------------------------------------------+ void CAlglib::DSOptimalSplit2Fast(double &a[],int &c[],int &tiesbuf[], int &cntbuf[],double &bufr[], int &bufi[],const int n, const int nc,const double alpha, int &info,double &threshold, double &rms,double &cvrms) { //--- initialization info=0; threshold=0; rms=0; cvrms=0; //--- function call CBdSS::DSOptimalSplit2Fast(a,c,tiesbuf,cntbuf,bufr,bufi,n,nc,alpha,info,threshold,rms,cvrms); } //+------------------------------------------------------------------+ //| | //+------------------------------------------------------------------+ void CAlglib::DSOptimalSplit2Fast(CRowDouble &a,CRowInt &c,CRowInt &tiesbuf, CRowInt &cntbuf,CRowDouble &bufr, CRowInt &bufi,const int n, const int nc,const double alpha, int &info,double &threshold, double &rms,double &cvrms) { //--- initialization info=0; threshold=0; rms=0; cvrms=0; //--- function call CBdSS::DSOptimalSplit2Fast(a,c,tiesbuf,cntbuf,bufr,bufi,n,nc,alpha,info,threshold,rms,cvrms); } //+------------------------------------------------------------------+ //| This function serializes data structure to string. | //| Important properties of s_out: | //| * it contains alphanumeric characters, dots, underscores, minus | //| signs | //| * these symbols are grouped into words, which are separated by | //| spaces and Windows-style (CR+LF) newlines | //| * although serializer uses spaces and CR+LF as separators, you| //| can replace any separator character by arbitrary combination of| //| spaces, tabs, Windows or Unix newlines. It allows flexible | //| reformatting of the string in case you want to include it into | //| text or XML file. But you should not insert separators into the| //| middle of the "words" nor you should change case of letters. | //| * s_out can be freely moved between 32-bit and 64-bit systems, | //| little and big endian machines, and so on. You can reference | //| structure on 32-bit machine and unserialize it on 64-bit one | //| (or vice versa), or reference it on SPARC and unserialize on | //| x86. You can also reference it in C# version of ALGLIB and | //| unserialize in C++ one, and vice versa. | //+------------------------------------------------------------------+ void CAlglib::DFSerialize(CDecisionForestShell &obj,string &s_out) { CSerializer s; //--- serialization start s.Alloc_Start(); //--- function call CDForest::DFAlloc(s,obj.GetInnerObj()); //--- serialization s.SStart_Str(); //--- function call CDForest::DFSerialize(s,obj.GetInnerObj()); //--- stop s.Stop(); //--- change value s_out=s.Get_String(); } //+------------------------------------------------------------------+ //| This function unserializes data structure from string. | //+------------------------------------------------------------------+ void CAlglib::DFUnserialize(const string s_in,CDecisionForestShell &obj) { CSerializer s; //--- unserialization s.UStart_Str(s_in); //--- function call CDForest::DFUnserialize(s,obj.GetInnerObj()); //--- stop s.Stop(); } //+------------------------------------------------------------------+ //| This function creates buffer structure which can be used to | //| perform parallel inference requests. | //| DF subpackage provides two sets of computing functions - ones | //| which use internal buffer of DF model (these functions are | //| single-threaded because they use same buffer, which can not | //| shared between threads), and ones which use external buffer. | //| This function is used to initialize external buffer. | //| INPUT PARAMETERS: | //| Model - DF model which is associated with newly created | //| buffer | //| OUTPUT PARAMETERS: | //| Buf - external buffer. | //| IMPORTANT: buffer object should be used only with model which was| //| used to initialize buffer. Any attempt to use buffer | //| with different object is dangerous - you may get | //| integrity check failure (exception) because sizes of | //| internal arrays do not fit to dimensions of the model | //| structure. | //+------------------------------------------------------------------+ void CAlglib::DFCreateBuffer(CDecisionForestShell &model, CDecisionForestBuffer &buf) { CDForest::DFCreateBuffer(model.GetInnerObj(),buf); } //+------------------------------------------------------------------+ //| This subroutine creates CDecisionForestBuilder object which is | //| used to train decision forests. | //| By default, new builder stores empty dataset and some reasonable | //| default settings. At the very least, you should specify dataset | //| prior to building decision forest. You can also tweak settings of| //| the forest construction algorithm (recommended, although default | //| setting should work well). | //| Following actions are mandatory: | //| * calling DFBuilderSetDataset() to specify dataset | //| * calling DFBuilderBuildRandomForest() to build decision forest| //| using current dataset and default settings | //| Additionally, you may call: | //| * DFBuilderSetRndVars() or DFBuilderSetRndVarsRatio() to | //| specify number of variables randomly chosen for each split | //| * DFBuilderSetSubsampleRatio() to specify fraction of the | //| dataset randomly subsampled to build each tree | //| * DFBuilderSetSeed() to control random seed chosen for tree | //| construction | //| INPUT PARAMETERS: | //| none | //| OUTPUT PARAMETERS: | //| S - decision forest builder | //+------------------------------------------------------------------+ void CAlglib::DFBuilderCreate(CDecisionForestBuilder &s) { CDForest::DFBuilderCreate(s); } //+------------------------------------------------------------------+ //| This subroutine adds dense dataset to the internal storage of the| //| builder object. Specifying your dataset in the dense format means| //| that the dense version of the forest construction algorithm will | //| be invoked. | //| INPUT PARAMETERS: | //| S - decision forest builder object | //| XY - array[NPoints,NVars+1] (minimum size; actual size | //| can be larger, only leading part is used anyway), | //| dataset: | //| * first NVars elements of each row store values of | //| the independent variables | //| * last column store class number(in 0...NClasses-1)| //| or real value of the dependent variable | //| NPoints - number of rows in the dataset, NPoints>=1 | //| NVars - number of independent variables, NVars>=1 | //| NClasses - indicates type of the problem being solved: | //| * NClasses>=2 means that classification problem is | //| solved (last column of the dataset stores class | //| number) | //| * NClasses=1 means that regression problem is | //| solved (last column of the dataset stores | //| variable value) | //| OUTPUT PARAMETERS: | //| S - decision forest builder | //+------------------------------------------------------------------+ void CAlglib::DFBuilderSetDataset(CDecisionForestBuilder &s,CMatrixDouble &xy, int npoints,int nvars,int nclasses) { CDForest::DFBuilderSetDataset(s,xy,npoints,nvars,nclasses); } //+------------------------------------------------------------------+ //| This function sets number of variables (in [1,NVars] range) used | //| by decision forest construction algorithm. | //| The default option is to use roughly sqrt(NVars) variables. | //| INPUT PARAMETERS: | //| S - decision forest builder object | //| RndVars - number of randomly selected variables; values | //| outside of [1,NVars] range are silently clipped. | //| OUTPUT PARAMETERS: | //| S - decision forest builder | //+------------------------------------------------------------------+ void CAlglib::DFBuilderSetRndVars(CDecisionForestBuilder &s,int rndvars) { CDForest::DFBuilderSetRndVars(s,rndvars); } //+------------------------------------------------------------------+ //| This function sets number of variables used by decision forest | //| construction algorithm as a fraction of total variable count | //| (0,1) range. | //| The default option is to use roughly sqrt(NVars) variables. | //| INPUT PARAMETERS: | //| S - decision forest builder object | //| F - round(NVars*F) variables are selected | //| OUTPUT PARAMETERS: | //| S - decision forest builder | //+------------------------------------------------------------------+ void CAlglib::DFBuilderSetRndVarsRatio(CDecisionForestBuilder &s,double f) { CDForest::DFBuilderSetRndVarsRatio(s,f); } //+------------------------------------------------------------------+ //| This function tells decision forest builder to automatically | //| choose number of variables used by decision forest construction | //| algorithm. Roughly sqrt(NVars) variables will be used. | //| INPUT PARAMETERS: | //| S - decision forest builder object | //| OUTPUT PARAMETERS: | //| S - decision forest builder | //+------------------------------------------------------------------+ void CAlglib::DFBuilderSetRndVarsAuto(CDecisionForestBuilder &s) { CDForest::DFBuilderSetRndVarsAuto(s); } //+------------------------------------------------------------------+ //| This function sets size of dataset subsample generated the | //| decision forest construction algorithm. Size is specified as a | //| fraction of total dataset size. | //| The default option is to use 50% of the dataset for training, | //| 50% for the OOB estimates. You can decrease fraction F down to | //| 10%, 1% or even below in order to reduce overfitting. | //| INPUT PARAMETERS: | //| S - decision forest builder object | //| F - fraction of the dataset to use, in (0,1] range. | //| Values outside of this range will be silently | //| clipped. At least one element is always selected | //| for the training set. | //| OUTPUT PARAMETERS: | //| S - decision forest builder | //+------------------------------------------------------------------+ void CAlglib::DFBuilderSetSubsampleRatio(CDecisionForestBuilder &s,double f) { CDForest::DFBuilderSetSubsampleRatio(s,f); } //+------------------------------------------------------------------+ //| This function sets seed used by internal RNG for random | //| subsampling and random selection of variable subsets. | //| By default random seed is used, i.e. every time you build | //| decision forest, we seed generator with new value obtained from | //| system-wide RNG. Thus, decision forest builder returns | //| non-deterministic results. You can change such behavior by | //| specyfing fixed positive seed value. | //| INPUT PARAMETERS: | //| S - decision forest builder object | //| SeedVal - seed value: | //| * positive values are used for seeding RNG with | //| fixed seed, i.e. subsequent runs on same data | //| will return same decision forests | //| * non-positive seed means that random seed is used | //| for every run of builder, i.e. subsequent runs | //| on same datasets will return slightly different | //| decision forests | //| OUTPUT PARAMETERS: | //| S - decision forest builder, see | //+------------------------------------------------------------------+ void CAlglib::DFBuilderSetSeed(CDecisionForestBuilder &s,int seedval) { CDForest::DFBuilderSetSeed(s,seedval); } //+------------------------------------------------------------------+ //| This function sets random decision forest construction algorithm.| //| As for now, only one decision forest construction algorithm is | //| supported-a dense "baseline" RDF algorithm. | //| INPUT PARAMETERS: | //| S - decision forest builder object | //| AlgoType - algorithm type: | //| * 0 = baseline dense RDF | //| OUTPUT PARAMETERS: | //| S - decision forest builder, see | //+------------------------------------------------------------------+ void CAlglib::DFBuilderSetRDFAlgo(CDecisionForestBuilder &s,int algotype) { CDForest::DFBuilderSetRDFAlgo(s,algotype); } //+------------------------------------------------------------------+ //| This function sets split selection algorithm used by decision | //| forest classifier. You may choose several algorithms, with | //| different speed and quality of the results. | //| INPUT PARAMETERS: | //| S - decision forest builder object | //| SplitStrength - split type: | //| * 0 = split at the random position, fastest one | //| * 1 = split at the middle of the range | //| * 2 = strong split at the best point of the range | //| (default) | //| OUTPUT PARAMETERS: | //| S - decision forest builder, see | //+------------------------------------------------------------------+ void CAlglib::DFBuilderSetRDFSplitStrength(CDecisionForestBuilder &s, int splitstrength) { CDForest::DFBuilderSetRDFSplitStrength(s,splitstrength); } //+------------------------------------------------------------------+ //| This function tells decision forest construction algorithm to use| //| Gini impurity based variable importance estimation (also known as| //| MDI). | //| This version of importance estimation algorithm analyzes mean | //| decrease in impurity (MDI) on training sample during splits. The | //| result is divided by impurity at the root node in order to | //| produce estimate in [0,1] range. | //| Such estimates are fast to calculate and beautifully normalized | //| (sum to one) but have following downsides: | //| * They ALWAYS sum to 1.0, even if output is completely | //| unpredictable. I.e. MDI allows to order variables by | //| importance, but does not tell us about "absolute" | //| importances of variables | //| * there exist some bias towards continuous and high-cardinality| //| categorical variables | //| NOTE: informally speaking, MDA (permutation importance) rating | //| answers the question "what part of the model | //| predictive power is ruined by permuting k-th variable?" | //| while MDI tells us "what part of the model predictive power| //| was achieved due to usage of k-th variable". | //| Thus, MDA rates each variable independently at "0 to 1" scale | //| while MDI (and OOB-MDI too) tends to divide "unit amount of | //| importance" between several important variables. | //| If all variables are equally important, they will have same | //| MDI/OOB-MDI rating, equal (for OOB-MDI: roughly equal) to | //| 1/NVars. However, roughly same picture will be produced for | //| the "all variables provide information no one is critical" | //| situation and for the "all variables are critical, drop any one, | //| everything is ruined" situation. | //| Contrary to that, MDA will rate critical variable as ~1.0 | //| important, and important but non-critical variable will have less| //| than unit rating. | //| NOTE: quite an often MDA and MDI return same results. It | //| generally happens on problems with low test set error | //| (a few percents at most) and large enough training set | //| to avoid overfitting. | //| The difference between MDA, MDI and OOB-MDI becomes important | //| only on "hard" tasks with high test set error and/or small | //| training set. | //| INPUT PARAMETERS: | //| S - decision forest builder object | //| OUTPUT PARAMETERS: | //| S - decision forest builder object. Next call to the | //| forest construction function will produce: | //| * importance estimates in rep.varimportances field | //| * variable ranks in rep.topvars field | //+------------------------------------------------------------------+ void CAlglib::DFBuilderSetImportanceTrnGini(CDecisionForestBuilder &s) { CDForest::DFBuilderSetImportanceTrnGini(s); } //+------------------------------------------------------------------+ //| This function tells decision forest construction algorithm to use| //| out-of-bag version of Gini variable importance estimation (also | //| known as OOB-MDI). | //| This version of importance estimation algorithm analyzes mean | //| decrease in impurity (MDI) on out-of-bag sample during splits. | //| The result is divided by impurity at the root node in order to | //| produce estimate in [0,1] range. | //| Such estimates are fast to calculate and resistant to overfitting| //| issues (thanks to the out-of-bag estimates used). However, OOB | //| Gini rating has following downsides: | //| * there exist some bias towards continuous and | //| high-cardinality categorical variables | //| * Gini rating allows us to order variables by importance,| //| but it is hard to define importance of the variable by | //| itself. | //| NOTE: informally speaking, MDA (permutation importance) rating | //| answers the question "what part of the model predictive | //| power is ruined by permuting k-th variable?" while MDI | //| tells us "what part of the model predictive power was | //| achieved due to usage of k-th variable". | //| Thus, MDA rates each variable independently at "0 to 1" scale | //| while MDI (and OOB-MDI too) tends to divide "unit amount of | //| importance" between several important variables. | //| If all variables are equally important, they will have same | //| MDI/OOB-MDI rating, equal (for OOB-MDI: roughly equal) to | //| 1/NVars. However, roughly same picture will be produced for the | //| "all variables provide information no one is critical" situation | //| and for the "all variables are critical, drop any one, everything| //| is ruined" situation. | //| Contrary to that, MDA will rate critical variable as ~1.0 | //| important, and important but non-critical variable will have less| //| than unit rating. | //| NOTE: quite an often MDA and MDI return same results. It | //| generally happens on problems with low test set error | //| (a few percents at most) and large enough training set to | //| avoid overfitting. | //| The difference between MDA, MDI and OOB-MDI becomes important | //| only on "hard" tasks with high test set error and/or small | //| training set. | //| INPUT PARAMETERS: | //| S - decision forest builder object | //| OUTPUT PARAMETERS: | //| S - decision forest builder object. Next call to the| //| forest construction function will produce: | //| * importance estimates in rep.varimportances field | //| * variable ranks in rep.topvars field | //+------------------------------------------------------------------+ void CAlglib::DFBuilderSetImportanceOOBGini(CDecisionForestBuilder &s) { CDForest::DFBuilderSetImportanceOOBGini(s); } //+------------------------------------------------------------------+ //| This function tells decision forest construction algorithm to use| //| permutation variable importance estimator (also known as MDA). | //| This version of importance estimation algorithm analyzes mean | //| increase in out-of-bag sum of squared residuals after random | //| permutation of J-th variable. The result is divided by error | //| computed with all variables being perturbed in order to produce | //| R-squared-like estimate in [0,1] range. | //| Such estimate is slower to calculate than Gini-based rating | //| because it needs multiple inference runs for each of variables | //| being studied. | //| MDA rating has following benefits over Gini-based ones: | //| * no bias towards specific variable types | //| *ability to directly evaluate "absolute" importance of some| //| variable at "0 to 1" scale (contrary to Gini-based rating,| //| which returns comparative importances). | //| NOTE: informally speaking, MDA (permutation importance) rating | //| answers the question "what part of the model predictive | //| power is ruined by permuting k-th variable?" while MDI | //| tells us "what part of the model predictive power was | //| achieved due to usage of k-th variable". | //| Thus, MDA rates each variable independently at "0 to 1" scale | //| while MDI (and OOB-MDI too) tends to divide "unit amount of | //| importance" between several important variables. | //| If all variables are equally important, they will have same | //| MDI/OOB-MDI rating, equal (for OOB-MDI: roughly equal) to | //| 1/NVars. However, roughly same picture will be produced forthe | //| "all variables provide information no one is critical" situation | //| and for the "all variables are critical, drop any one, everything| //| is ruined" situation. | //| Contrary to that, MDA will rate critical variable as ~1.0 | //| important, and important but non-critical variable will have less| //| than unit rating. | //| NOTE: quite an often MDA and MDI return same results. It | //| generally happens on problems with low test set error | //| (a few percents at most) and large enough training set | //| to avoid overfitting. | //| The difference between MDA, MDI and OOB-MDI becomes important | //| only on "hard" tasks with high test set error and/or small | //| training set. | //| INPUT PARAMETERS: | //| S - decision forest builder object | //| OUTPUT PARAMETERS: | //| S - decision forest builder object. Next call to | //| the forest construction function will produce: | //| * importance estimates in rep.varimportances field | //| * variable ranks in rep.topvars field | //+------------------------------------------------------------------+ void CAlglib::DFBuilderSetImportancePermutation(CDecisionForestBuilder &s) { CDForest::DFBuilderSetImportancePermutation(s); } //+------------------------------------------------------------------+ //| This function tells decision forest construction algorithm to | //| skip variable importance estimation. | //| INPUT PARAMETERS: | //| S - decision forest builder object | //| OUTPUT PARAMETERS: | //| S - decision forest builder object. Next call to the | //| forest construction function will result in forest | //| being built without variable importance estimation.| //+------------------------------------------------------------------+ void CAlglib::DFBuilderSetImportanceNone(CDecisionForestBuilder &s) { CDForest::DFBuilderSetImportanceNone(s); } //+------------------------------------------------------------------+ //| This function is an alias for dfbuilderpeekprogress(), left in | //| ALGLIB for backward compatibility reasons. | //+------------------------------------------------------------------+ double CAlglib::DFBuilderGetProgress(CDecisionForestBuilder &s) { return(CDForest::DFBuilderGetProgress(s)); } //+------------------------------------------------------------------+ //| This function is used to peek into decision forest construction | //| process from some other thread and get current progress indicator| //| It returns value in [0,1]. | //| INPUT PARAMETERS: | //| S - decision forest builder object used to build forest| //| in some other thread | //| RESULT: | //| progress value, in [0,1] | //+------------------------------------------------------------------+ double CAlglib::DFBuilderPeekProgress(CDecisionForestBuilder &s) { return(CDForest::DFBuilderPeekProgress(s)); } //+------------------------------------------------------------------+ //| This subroutine builds decision forest according to current | //| settings using dataset internally stored in the builder object. | //| Dense algorithm is used. | //| NOTE: this function uses dense algorithm for forest construction | //| independently from the dataset format (dense or sparse). | //| NOTE: forest built with this function is stored in-memory using | //| 64-bit data structures for offsets/indexes/split values. It| //| is possible to convert forest into more memory-efficient | //| compressed binary representation. Depending on the problem | //| properties, 3.7x-5.7x compression factors are possible. | //| The downsides of compression are (a) slight reduction in the | //| model accuracy and (b) ~1.5x reduction in the inference speed | //| (due to increased complexity of the storage format). | //| See comments on DFBinaryCompression() for more info. | //| Default settings are used by the algorithm; you can tweak them | //| with the help of the following functions: | //| * DFBuilderSetRFactor() - to control a fraction of the | //| dataset used for subsampling | //| * DFBuilderSetRandomVars() - to control number of variables | //| randomly chosen for decision rule | //| creation | //| INPUT PARAMETERS: | //| S - decision forest builder object | //| NTrees - NTrees>=1, number of trees to train | //| OUTPUT PARAMETERS: | //| D - decision forest. You can compress this forest to | //| more compact 16-bit representation with | //| DFBinaryCompression() | //| Rep - report, see below for information on its fields. | //| == report information produced by forest construction function = | //| Decision forest training report includes following information: | //| * training set errors | //| * out-of-bag estimates of errors | //| * variable importance ratings | //| Following fields are used to store information: | //| * training set errors are stored in rep.RelCLSError, rep.AvgCE,| //| rep.RMSError, rep.AvgError and rep.AvgRelError | //| * out-of-bag estimates of errors are stored in | //| rep.oobrelclserror, rep.oobavgce, rep.oobrmserror, | //| rep.oobavgerror and rep.oobavgrelerror | //| Variable importance reports, if requested by | //| DFBuilderSetImportanceGini(), DFBuilderSetImportanceTrnGini() or | //| DFBuilderSetImportancePermutation() call, are stored in: | //| * rep.varimportances field stores importance ratings | //| * rep.topvars stores variable indexes ordered from the most | //| important to less important ones | //| You can find more information about report fields in: | //| * comments on CDFReport structure | //| * comments on DFBuilderSetImportanceGini function | //| * comments on DFBuilderSetImportanceTrnGini function | //| * comments on DFBuilderDetImportancePermutation function | //+------------------------------------------------------------------+ void CAlglib::DFBuilderBuildRandomForest(CDecisionForestBuilder &s,int ntrees, CDecisionForestShell &df, CDFReportShell &rep) { CDForest::DFBuilderBuildRandomForest(s,ntrees,df.GetInnerObj(),rep.GetInnerObj()); } //+------------------------------------------------------------------+ //| This function performs binary compression of the decision forest.| //| Original decision forest produced by the forest builder is stored| //| using 64-bit representation for all numbers - offsets, variable | //| indexes, split points. | //| It is possible to significantly reduce model size by means of: | //| * using compressed dynamic encoding for integers (offsets and | //| variable indexes), which uses just 1 byte to store small ints| //| (less than 128), just 2 bytes for larger values (less than | //| 128^2) and so on | //| * storing floating point numbers using 8-bit exponent and | //| 16-bit mantissa | //| As result, model needs significantly less memory (compression | //| factor depends on variable and class counts). In particular: | //| * NVars<128 and NClasses<128 result in 4.4x-5.7x model size | //| reduction | //| * NVars<16384 and NClasses<128 result in 3.7x-4.5x model size | //| reduction | //| Such storage format performs lossless compression of all integers| //| but compression of floating point values (split values) is lossy,| //| with roughly 0.01% relative error introduced during rounding. | //| Thus, we recommend you to re-evaluate model accuracy after | //| compression. | //| Another downside of compression is ~1.5x reduction in the | //| inference speed due to necessity of dynamic decompression of the | //| compressed model. | //| INPUT PARAMETERS: | //| DF - decision forest built by forest builder | //| OUTPUT PARAMETERS: | //| DF - replaced by compressed forest | //| RESULT: | //| compression factor (in-RAM size of the compressed model vs than | //| of the uncompressed one), positive number larger than 1.0 | //+------------------------------------------------------------------+ double CAlglib::DFBinaryCompression(CDecisionForestShell &df) { return(CDForest::DFBinaryCompression(df.GetInnerObj())); } //+------------------------------------------------------------------+ //| Procesing | //| INPUT PARAMETERS: | //| DF - decision forest model | //| X - input vector, array[0..NVars-1]. | //| OUTPUT PARAMETERS: | //| Y - result. Regression estimate when solving | //| regression task, vector of posterior | //| probabilities for classification task. | //| See also DFProcessI. | //+------------------------------------------------------------------+ void CAlglib::DFProcess(CDecisionForestShell &df,double &x[], double &y[]) { CDForest::DFProcess(df.GetInnerObj(),x,y); } //+------------------------------------------------------------------+ //| 'interactive' variant of DFProcess for languages like Python | //| which support constructs like "Y = DFProcessI(DF,X)" and | //| interactive mode of interpreter | //| This function allocates new array on each call, so it is | //| significantly slower than its 'non-interactive' counterpart, but | //| it is more convenient when you call it from command line. | //+------------------------------------------------------------------+ void CAlglib::DFProcessI(CDecisionForestShell &df, double &x[],double &y[]) { CDForest::DFProcessI(df.GetInnerObj(),x,y); } //+------------------------------------------------------------------+ //| This function returns first component of the inferred vector | //| (i.e. one with index #0). | //| It is a convenience wrapper for dfprocess() intended for either: | //| * 1-dimensional regression problems | //| * 2-class classification problems | //| In the former case this function returns inference result as | //| scalar, which is definitely more convenient that wrapping it as | //| vector. In the latter case it returns probability of object | //| belonging to class #0. | //| If you call it for anything different from two cases above, it | //| will work as defined, i.e. return y[0], although it is of less | //| use in such cases. | //| INPUT PARAMETERS: | //| Model - DF model | //| X - input vector, array[0..NVars-1]. | //| RESULT: | //| Y[0] | //+------------------------------------------------------------------+ double CAlglib::DFProcess0(CDecisionForestShell &model,double &X[]) { CRowDouble x=X; return(CDForest::DFProcess0(model.GetInnerObj(),x)); } //+------------------------------------------------------------------+ //| | //+------------------------------------------------------------------+ double CAlglib::DFProcess0(CDecisionForestShell &model,CRowDouble &x) { return(CDForest::DFProcess0(model.GetInnerObj(),x)); } //+------------------------------------------------------------------+ //| This function returns most probable class number for an input X. | //| It is same as calling DFProcess(model,x,y), then determining | //| i=ArgMax(y[i]) and returning i. | //| A class number in [0,NOut) range in returned for classification | //| problems, -1 is returned when this function is called for | //| regression problems. | //| INPUT PARAMETERS: | //| Model - decision forest model | //| X - input vector, array[0..NVars-1]. | //| RESULT: | //| class number, -1 for regression tasks | //+------------------------------------------------------------------+ int CAlglib::DFClassify(CDecisionForestShell &model,double &X[]) { CRowDouble x=X; return(CDForest::DFClassify(model.GetInnerObj(),x)); } //+------------------------------------------------------------------+ //| | //+------------------------------------------------------------------+ int CAlglib::DFClassify(CDecisionForestShell &model,CRowDouble &x) { return(CDForest::DFClassify(model.GetInnerObj(),x)); } //+------------------------------------------------------------------+ //| Relative classification error on the test set | //| INPUT PARAMETERS: | //| DF - decision forest model | //| XY - test set | //| NPoints - test set size | //| RESULT: | //| percent of incorrectly classified cases. | //| Zero if model solves regression task. | //+------------------------------------------------------------------+ double CAlglib::DFRelClsError(CDecisionForestShell &df,CMatrixDouble &xy, const int npoints) { return(CDForest::DFRelClsError(df.GetInnerObj(),xy,npoints)); } //+------------------------------------------------------------------+ //| Average cross-entropy (in bits per element) on the test set | //| INPUT PARAMETERS: | //| DF - decision forest model | //| XY - test set | //| NPoints - test set size | //| RESULT: | //| CrossEntropy/(NPoints*LN(2)). | //| Zero if model solves regression task. | //+------------------------------------------------------------------+ double CAlglib::DFAvgCE(CDecisionForestShell &df,CMatrixDouble &xy, const int npoints) { return(CDForest::DFAvgCE(df.GetInnerObj(),xy,npoints)); } //+------------------------------------------------------------------+ //| RMS error on the test set | //| INPUT PARAMETERS: | //| DF - decision forest model | //| XY - test set | //| NPoints - test set size | //| RESULT: | //| root mean square error. | //| Its meaning for regression task is obvious. As for | //| classification task,RMS error means error when estimating | //| posterior probabilities. | //+------------------------------------------------------------------+ double CAlglib::DFRMSError(CDecisionForestShell &df,CMatrixDouble &xy, const int npoints) { return(CDForest::DFRMSError(df.GetInnerObj(),xy,npoints)); } //+------------------------------------------------------------------+ //| Average error on the test set | //| INPUT PARAMETERS: | //| DF - decision forest model | //| XY - test set | //| NPoints - test set size | //| RESULT: | //| Its meaning for regression task is obvious. As for | //| classification task, it means average error when estimating | //| posterior probabilities. | //+------------------------------------------------------------------+ double CAlglib::DFAvgError(CDecisionForestShell &df,CMatrixDouble &xy, const int npoints) { return(CDForest::DFAvgError(df.GetInnerObj(),xy,npoints)); } //+------------------------------------------------------------------+ //| Average relative error on the test set | //| INPUT PARAMETERS: | //| DF - decision forest model | //| XY - test set | //| NPoints - test set size | //| RESULT: | //| Its meaning for regression task is obvious. As for | //| classification task, it means average relative error when | //| estimating posterior probability of belonging to the correct | //| class. | //+------------------------------------------------------------------+ double CAlglib::DFAvgRelError(CDecisionForestShell &df,CMatrixDouble &xy, const int npoints) { return(CDForest::DFAvgRelError(df.GetInnerObj(),xy,npoints)); } //+------------------------------------------------------------------+ //| This subroutine builds random decision forest. | //| ---- DEPRECATED VERSION! USE DECISION FOREST BUILDER OBJECT ---- | //+------------------------------------------------------------------+ void CAlglib::DFBuildRandomDecisionForest(CMatrixDouble &xy,const int npoints, const int nvars,const int nclasses, const int ntrees,const double r, int &info,CDecisionForestShell &df, CDFReportShell &rep) { //--- initialization info=0; //--- function call CDForest::DFBuildRandomDecisionForest(xy,npoints,nvars,nclasses,ntrees,r,info,df.GetInnerObj(),rep.GetInnerObj()); } //+------------------------------------------------------------------+ //| This subroutine builds random decision forest. | //| ---- DEPRECATED VERSION! USE DECISION FOREST BUILDER OBJECT ---- | //+------------------------------------------------------------------+ void CAlglib::DFBuildRandomDecisionForestX1(CMatrixDouble &xy, const int npoints, const int nvars, const int nclasses, const int ntrees, int nrndvars, const double r, int &info, CDecisionForestShell &df, CDFReportShell &rep) { //--- initialization info=0; //--- function call CDForest::DFBuildRandomDecisionForestX1(xy,npoints,nvars,nclasses,ntrees,nrndvars,r,info,df.GetInnerObj(),rep.GetInnerObj()); } //+------------------------------------------------------------------+ //| This function initializes clusterizer object. Newly initialized | //| object is empty, i.e. it does not contain dataset. You should | //| use it as follows: | //| 1. creation | //| 2. dataset is added with ClusterizerSetPoints() | //| 3. additional parameters are set | //| 3. clusterization is performed with one of the clustering | //| functions | //+------------------------------------------------------------------+ void CAlglib::ClusterizerCreate(CClusterizerState &s) { CClustering::ClusterizerCreate(s); } //+------------------------------------------------------------------+ //| This function adds dataset to the clusterizer structure. | //| This function overrides all previous calls of | //| ClusterizerSetPoints() or ClusterizerSetDistances(). | //| INPUT PARAMETERS: | //| S - clusterizer state, initialized by | //| ClusterizerCreate() | //| XY - array[NPoints,NFeatures], dataset | //| NPoints - number of points, >=0 | //| NFeatures- number of features, >=1 | //| DistType - distance function: | //| * 0 Chebyshev distance (L-inf norm) | //| * 1 city block distance (L1 norm) | //| * 2 Euclidean distance (L2 norm), non-squared | //| * 10 Pearson correlation: | //| dist(a,b) = 1-corr(a,b) | //| * 11 Absolute Pearson correlation: | //| dist(a,b) = 1-|corr(a,b)| | //| * 12 Uncentered Pearson correlation (cosine of | //| the angle): dist(a,b) = a'*b/(|a|*|b|) | //| * 13 Absolute uncentered Pearson correlation | //| dist(a,b) = |a'*b|/(|a|*|b|) | //| * 20 Spearman rank correlation: | //| dist(a,b) = 1-rankcorr(a,b) | //| * 21 Absolute Spearman rank correlation | //| dist(a,b) = 1-|rankcorr(a,b)| | //| NOTE 1: different distance functions have different performance | //| penalty: | //| * Euclidean or Pearson correlation distances are | //| the fastest ones | //| * Spearman correlation distance function is a bit slower | //| * city block and Chebyshev distances are order | //| of magnitude slower | //| The reason behing difference in performance is that | //| correlation-based distance functions are computed using | //| optimized linear algebra kernels, while Chebyshev and | //| city block distance functions are computed using simple | //| nested loops with two branches at each iteration. | //| NOTE 2: different clustering algorithms have different | //| limitations: | //| * agglomerative hierarchical clustering algorithms may | //| be used with any kind of distance metric | //| * k-means++ clustering algorithm may be used only with | //| Euclidean distance function | //| Thus, list of specific clustering algorithms you may use | //| depends on distance function you specify when you set | //| your dataset. | //+------------------------------------------------------------------+ void CAlglib::ClusterizerSetPoints(CClusterizerState &s,CMatrixDouble &xy, int npoints,int nfeatures,int disttype) { CClustering::ClusterizerSetPoints(s,xy,npoints,nfeatures,disttype); } //+------------------------------------------------------------------+ //| | //+------------------------------------------------------------------+ void CAlglib::ClusterizerSetPoints(CClusterizerState &s,CMatrixDouble &xy,int disttype) { //--- create variables int npoints=CAp::Rows(xy); int nfeatures=CAp::Cols(xy); //--- function call CClustering::ClusterizerSetPoints(s,xy,npoints,nfeatures,disttype); } //+------------------------------------------------------------------+ //| This function adds dataset given by distance matrix to the | //| clusterizer structure. It is important that dataset is not given | //| explicitly - only distance matrix is given. | //| This function overrides all previous calls of | //| ClusterizerSetPoints() or ClusterizerSetDistances(). | //| INPUT PARAMETERS: | //| S - clusterizer state, initialized by | //| ClusterizerCreate() | //| D - array[NPoints,NPoints], distance matrix given by | //| its upper or lower triangle (main diagonal is | //| ignored because its entries are expected to | //| be zero). | //| NPoints - number of points | //| IsUpper - whether upper or lower triangle of D is given. | //| NOTE 1: different clustering algorithms have different | //| limitations: | //| * agglomerative hierarchical clustering algorithms may | //| be used with any kind of distance metric, including | //| one which is given by distance matrix | //| * k-means++ clustering algorithm may be used only with | //| Euclidean distance function and explicitly given | //| points - it can not be used with dataset given by | //| distance matrix. Thus, if you call this function, you | //| will be unable to use k-means clustering algorithm | //| to process your problem. | //+------------------------------------------------------------------+ void CAlglib::ClusterizerSetDistances(CClusterizerState &s,CMatrixDouble &d,int npoints,bool IsUpper) { CClustering::ClusterizerSetDistances(s,d,npoints,IsUpper); } //+------------------------------------------------------------------+ //| | //+------------------------------------------------------------------+ void CAlglib::ClusterizerSetDistances(CClusterizerState &s,CMatrixDouble &d,bool IsUpper) { //--- check if(!CAp::Assert(CAp::Rows(d)==CAp::Cols(d),"Error while calling 'ClusterizerSetDistances': looks like one of arguments has wrong size")) return; //--- initialization int npoints=CAp::Rows(d); //--- function call CClustering::ClusterizerSetDistances(s,d,npoints,IsUpper); } //+------------------------------------------------------------------+ //| This function sets agglomerative hierarchical clustering | //| algorithm | //| INPUT PARAMETERS: | //| S - clusterizer state, initialized by ClusterizerCreate() | //| Algo - algorithm type: | //| * 0 complete linkage(default algorithm) | //| * 1 single linkage | //| * 2 unweighted average linkage | //| * 3 weighted average linkage | //| * 4 Ward's method | //| NOTE: Ward's method works correctly only with Euclidean distance,| //| that's why algorithm will return negative termination | //| code(failure) for any other distance type. | //| It is possible, however, to use this method with user - supplied | //| distance matrix. It is your responsibility to pass one which was | //| calculated with Euclidean distance function. | //+------------------------------------------------------------------+ void CAlglib::ClusterizerSetAHCAlgo(CClusterizerState &s,int algo) { CClustering::ClusterizerSetAHCAlgo(s,algo); } //+------------------------------------------------------------------+ //| This function sets k-means properties: | //| number of restarts and maximum | //| number of iterations per one run. | //| INPUT PARAMETERS: | //| S - clusterizer state, initialized by | //| ClusterizerCreate() | //| Restarts - restarts count, >= 1. | //| k-means++ algorithm performs several restarts | //| and chooses best set of centers(one with minimum | //| squared distance). | //| MaxIts - maximum number of k-means iterations performed | //| during one run. >= 0, zero value means that | //| algorithm performs unlimited number of iterations. | //+------------------------------------------------------------------+ void CAlglib::ClusterizerSetKMeansLimits(CClusterizerState &s,int restarts,int maxits) { CClustering::ClusterizerSetKMeansLimits(s,restarts,maxits); } //+------------------------------------------------------------------+ //| This function sets k-means initialization algorithm. Several | //| different algorithms can be chosen, including k-means++. | //| INPUT PARAMETERS: | //| S - clusterizer state, initialized by | //| ClusterizerCreate() | //| InitAlgo - initialization algorithm: | //| * 0 automatic selection(different versions of ALGLIB | //| may select different algorithms) | //| * 1 random initialization | //| * 2 k-means++ initialization(best quality of initial | //| centers, but long non-parallelizable initialization | //| phase with bad cache locality) | //| *3 "fast-greedy" algorithm with efficient, easy to | //| parallelize initialization. Quality of initial centers | //| is somewhat worse than that of k-means++. This | //| algorithm is a default one in the current version of | //| ALGLIB. | //| *-1 "debug" algorithm which always selects first K rows | //| of dataset; this algorithm is used for debug purposes | //| only. Do not use it in the industrial code! | //+------------------------------------------------------------------+ void CAlglib::ClusterizerSetKMeansInit(CClusterizerState &s,int initalgo) { CClustering::ClusterizerSetKMeansInit(s,initalgo); } //+------------------------------------------------------------------+ //| This function sets seed which is used to initialize internal RNG.| //| By default, deterministic seed is used - same for each run of | //| clusterizer. If you specify non-deterministic seed value, then | //| some algorithms which depend on random initialization(in current | //| version : k-means) may return slightly different results after | //| each run. | //| INPUT PARAMETERS: | //| S - clusterizer state, initialized by | //| ClusterizerCreate() | //| Seed - seed: | //| * positive values = use deterministic seed for each| //| run of algorithms which depend on random | //| initialization | //| * zero or negative values = use non-deterministic | //| seed | //+------------------------------------------------------------------+ void CAlglib::ClusterizerSetSeed(CClusterizerState &s,int seed) { CClustering::ClusterizerSetSeed(s,seed); } //+------------------------------------------------------------------+ //| This function performs agglomerative hierarchical clustering | //| NOTE: Agglomerative hierarchical clustering algorithm has two | //| phases: distance matrix calculation and clustering itself. | //| INPUT PARAMETERS: | //| S - clusterizer state, initialized by | //| ClusterizerCreate() | //| OUTPUT PARAMETERS: | //| Rep - clustering results; see description of AHCReport | //| structure for more information. | //| NOTE 1: hierarchical clustering algorithms require large amounts | //| of memory. In particular, this implementation needs | //| sizeof(double) *NPoints^2 bytes, which are used to store | //| distance matrix. In case we work with user - supplied | //| matrix, this amount is multiplied by 2 (we have to store | //| original matrix and to work with its copy). | //| For example, problem with 10000 points would require 800M| //| of RAM, even when working in a 1-dimensional space. | //+------------------------------------------------------------------+ void CAlglib::ClusterizerRunAHC(CClusterizerState &s,CAHCReport &rep) { CClustering::ClusterizerRunAHC(s,rep); } //+------------------------------------------------------------------+ //| This function performs clustering by k-means++ algorithm. | //| You may change algorithm properties by calling: | //| * ClusterizerSetKMeansLimits() to change number of restarts | //| or iterations | //| * ClusterizerSetKMeansInit() to change initialization | //| algorithm | //| By default, one restart and unlimited number of iterations are | //| used. Initialization algorithm is chosen automatically. | //| NOTE: k-means clustering algorithm has two phases: selection of | //| initial centers and clustering itself. | //| INPUT PARAMETERS: | //| S - clusterizer state, initialized by | //| ClusterizerCreate() | //| K - number of clusters, K >= 0. | //| K can be zero only when algorithm is called for | //| empty dataset, in this case completion code is set | //| to success(+1). | //| If K = 0 and dataset size is non-zero, we can not | //| meaningfully assign points to some center(there are| //| no centers because K = 0) and return -3 as | //| completion code (failure). | //| OUTPUT PARAMETERS: | //| Rep - clustering results; see description of KMeansReport| //| structure for more information. | //| NOTE 1: k-means clustering can be performed only for datasets | //| with Euclidean distance function. Algorithm will return | //| negative completion code in Rep.TerminationType in case | //| dataset was added to clusterizer with DistType other | //| than Euclidean (or dataset was specified by distance | //| matrix instead of explicitly given points). | //| NOTE 2: by default, k-means uses non-deterministic seed to | //| initialize RNG which is used to select initial centers. | //| As result, each run of algorithm may return different | //| values. If you need deterministic behavior, use | //| ClusterizerSetSeed() function. | //+------------------------------------------------------------------+ void CAlglib::ClusterizerRunKMeans(CClusterizerState &s,int k,CKmeansReport &rep) { CClustering::ClusterizerRunKMeans(s,k,rep); } //+------------------------------------------------------------------+ //| This function returns distance matrix for dataset | //| INPUT PARAMETERS: | //| XY - array[NPoints, NFeatures], dataset | //| NPoints - number of points, >= 0 | //| NFeatures- number of features, >= 1 | //| DistType - distance function: | //| * 0 Chebyshev distance(L - inf norm) | //| * 1 city block distance(L1 norm) | //| * 2 Euclidean distance(L2 norm, non - squared) | //| * 10 Pearson correlation: | //| dist(a, b) = 1 - corr(a, b) | //| * 11 Absolute Pearson correlation: | //| dist(a, b) = 1 - |corr(a, b)| | //| * 12 Uncentered Pearson correlation(cosine of | //| the angle): dist(a, b) = a'*b/(|a|*|b|) | //| * 13 Absolute uncentered Pearson correlation | //| dist(a, b) = |a'*b|/(|a|*|b|) | //| * 20 Spearman rank correlation: | //| dist(a, b) = 1 - rankcorr(a, b) | //| * 21 Absolute Spearman rank correlation | //| dist(a, b) = 1 - |rankcorr(a, b)| | //| OUTPUT PARAMETERS: | //| D - array[NPoints, NPoints], distance matrix (full | //| matrix is returned, with lower and upper triangles)| //| NOTE: different distance functions have different performance | //| penalty: | //| * Euclidean or Pearson correlation distances are the fastest| //| ones | //| * Spearman correlation distance function is a bit slower | //| * city block and Chebyshev distances are order of magnitude | //| slower | //| The reason behing difference in performance is that correlation -| //| based distance functions are computed using optimized linear | //| algebra kernels, while Chebyshev and city block distance | //| functions are computed using simple nested loops with two | //| branches at each iteration. | //+------------------------------------------------------------------+ void CAlglib::ClusterizerGetDistances(CMatrixDouble &xy,int npoints, int nfeatures,int disttype, CMatrixDouble &d) { d.Resize(0,0); CClustering::ClusterizerGetDistances(xy,npoints,nfeatures,disttype,d); } //+------------------------------------------------------------------+ //| This function takes as input clusterization report Rep, desired | //| clusters count K, and builds top K clusters from hierarchical | //| clusterization tree. | //| It returns assignment of points to clusters(array of cluster | //| indexes). | //| INPUT PARAMETERS: | //| Rep - report from ClusterizerRunAHC() performed on XY | //| K - desired number of clusters, 1 <= K <= NPoints. | //| K can be zero only when NPoints = 0. | //| OUTPUT PARAMETERS: | //| CIdx - array[NPoints], I-th element contains cluster | //| index(from 0 to K-1) for I-th point of the dataset.| //| CZ - array[K]. This array allows to convert cluster | //| indexes returned by this function to indexes used | //| by Rep.Z. J-th cluster returned by this function | //| corresponds to CZ[J]-th cluster stored in | //| Rep.Z/PZ/PM. It is guaranteed that CZ[I] < CZ[I+1].| //| NOTE: K clusters built by this subroutine are assumed to have no | //| hierarchy. Although they were obtained by manipulation with| //| top K nodes of dendrogram(i.e. hierarchical decomposition | //| of dataset), this function does not return information | //| about hierarchy. Each of the clusters stand on its own. | //| NOTE: Cluster indexes returned by this function does not | //| correspond to indexes returned in Rep.Z/PZ/PM. Either you | //| work with hierarchical representation of the dataset | //| (dendrogram), or you work with "flat" representation | //| returned by this function. Each of representations has its | //| own clusters indexing system(former uses [0,2*NPoints-2]), | //| while latter uses [0..K-1]), although it is possible to | //| perform conversion from one system to another by means of | //| CZ array, returned by this function, which allows you to | //| convert indexes stored in CIdx to the numeration system | //| used by Rep.Z. | //| NOTE: this subroutine is optimized for moderate values of K. | //| Say, for K=5 it will perform many times faster than for | //| K=100. Its worst - case performance is O(N*K), although in | //| average case it perform better (up to O(N*log(K))). | //+------------------------------------------------------------------+ void CAlglib::ClusterizerGetKClusters(CAHCReport &rep,int k,CRowInt &cidx,CRowInt &cz) { cidx.Resize(0); cz.Resize(0); CClustering::ClusterizerGetKClusters(rep,k,cidx,cz); } //+------------------------------------------------------------------+ //| This function accepts AHC report Rep, desired minimum | //| intercluster distance and returns top clusters from hierarchical | //| clusterization tree which are separated by distance R or HIGHER. | //| It returns assignment of points to clusters (array of cluster | //| indexes). | //| There is one more function with similar name - | //| ClusterizerSeparatedByCorr, which returns clusters with | //| intercluster correlation equal to R or LOWER (note: higher for | //| distance, lower for correlation). | //| INPUT PARAMETERS: | //| Rep - report from ClusterizerRunAHC() performed on XY | //| R - desired minimum intercluster distance, R >= 0 | //| OUTPUT PARAMETERS: | //| K - number of clusters, 1 <= K <= NPoints | //| CIdx - array[NPoints], I-th element contains cluster | //| index (from 0 to K-1) for I-th point of the dataset| //| CZ - array[K]. This array allows to convert cluster | //| indexes returned by this function to indexes used | //| by Rep.Z. J-th cluster returned by this function | //| corresponds to CZ[J]-th cluster stored in | //| Rep.Z/PZ/PM. It is guaranteed that CZ[I] < CZ[I+1].| //| NOTE: K clusters built by this subroutine are assumed to have no | //| hierarchy. Although they were obtained by manipulation with| //| top K nodes of dendrogram (i.e. hierarchical decomposition | //| of dataset), this function does not return information | //| about hierarchy. Each of the clusters stand on its own. | //| NOTE: Cluster indexes returned by this function does not | //| correspond to indexes returned in Rep.Z/PZ/PM. Either you | //| work with hierarchical representation of the dataset | //| (dendrogram), or you work with "flat" representation | //| returned by this function. Each of representations has its | //| own clusters indexing system (former uses [0,2*NPoints-2]),| //| while latter uses [0..K-1]), although it is possible to | //| perform conversion from one system to another by means of | //| CZ array, returned by this function, which allows you to | //| convert indexes stored in CIdx to the numeration system | //| used by Rep.Z. | //| NOTE: this subroutine is optimized for moderate values of K. Say,| //| for K=5 it will perform many times faster than for K=100. | //| Its worst - case performance is O(N*K), although in average| //| case it perform better (up to O(N*log(K))). | //+------------------------------------------------------------------+ void CAlglib::ClusterizerSeparatedByDist(CAHCReport &rep,double r,int &k, CRowInt &cidx,CRowInt &cz) { k=0; cidx.Resize(0); cz.Resize(0); CClustering::ClusterizerSeparatedByDist(rep,r,k,cidx,cz); } //+------------------------------------------------------------------+ //| This function accepts AHC report Rep, desired maximum | //| intercluster correlation and returns top clusters from | //| hierarchical clusterization tree which are separated by | //| correlation R or LOWER. | //| It returns assignment of points to clusters(array of cluster | //| indexes). | //| There is one more function with similar name - | //| ClusterizerSeparatedByDist, which returns clusters with | //| intercluster distance equal to R or HIGHER (note: higher for | //| distance, lower for correlation). | //| INPUT PARAMETERS: | //| Rep - report from ClusterizerRunAHC() performed on XY | //| R - desired maximum intercluster correlation, -1<=R<=+1| //| OUTPUT PARAMETERS: | //| K - number of clusters, 1 <= K <= NPoints | //| CIdx - array[NPoints], I-th element contains cluster index| //| (from 0 to K-1) for I-th point of the dataset. | //| CZ - array[K]. This array allows to convert cluster | //| indexes returned by this function to indexes used | //| by Rep.Z. J-th cluster returned by this function | //| corresponds to CZ[J]-th cluster stored in | //| Rep.Z/PZ/PM. It is guaranteed that CZ[I] < CZ[I+1].| //| NOTE: K clusters built by this subroutine are assumed to have no | //| hierarchy. Although they were obtained by manipulation with| //| top K nodes of dendrogram (i.e. hierarchical decomposition | //| of dataset), this function does not return information | //| about hierarchy. Each of the clusters stand on its own. | //| NOTE: Cluster indexes returned by this function does not | //| correspond to indexes returned in Rep.Z/PZ/PM. Either you | //| work with hierarchical representation of the dataset | //| (dendrogram), or you work with "flat" representation | //| returned by this function. Each of representations has its | //| own clusters indexing system (former uses [0,2*NPoints-2]),| //| while latter uses [0..K-1]), although it is possible to | //| perform conversion from one system to another by means of | //| CZ array, returned by this function, which allows you to | //| convert indexes stored in CIdx to the numeration system | //| used by Rep.Z. | //| NOTE: this subroutine is optimized for moderate values of K. Say,| //| for K=5 it will perform many times faster than for K=100. | //| Its worst - case performance is O(N*K), although in average| //| case it perform better (up to O(N*log(K))). | //+------------------------------------------------------------------+ void CAlglib::ClusterizerSeparatedByCorr(CAHCReport &rep,double r, int &k,CRowInt &cidx,CRowInt &cz) { k=0; cidx.Resize(0); cz.Resize(0); CClustering::ClusterizerSeparatedByCorr(rep,r,k,cidx,cz); } //+------------------------------------------------------------------+ //| k-means++ clusterization | //| Backward compatibility function, we recommend to use CLUSTERING | //| subpackage as better replacement. | //| INPUT PARAMETERS: | //| XY - dataset, array [0..NPoints-1,0..NVars-1]. | //| NPoints - dataset size, NPoints>=K | //| NVars - number of variables, NVars>=1 | //| K - desired number of clusters, K>=1 | //| Restarts - number of restarts, Restarts>=1 | //| OUTPUT PARAMETERS: | //| Info - return code: | //| * -3, if task is degenerate (number of | //| distinct points is less than K) | //| * -1, if incorrect | //| NPoints/NFeatures/K/Restarts was passed| //| * 1, if subroutine finished successfully | //| C - array[0..NVars-1,0..K-1].matrix whose columns| //| store cluster's centers | //| XYC - array[NPoints], which contains cluster | //| indexes | //+------------------------------------------------------------------+ void CAlglib::KMeansGenerate(CMatrixDouble &xy,const int npoints, const int nvars,const int k, const int restarts,int &info, CMatrixDouble &c,int &xyc[]) { //--- initialization info=0; //--- function call CKMeans::KMeansGenerate(xy,npoints,nvars,k,restarts,info,c,xyc); } //+------------------------------------------------------------------+ //| Multiclass Fisher LDA | //| Subroutine finds coefficients of linear combination which | //| optimally separates training set on classes. | //| INPUT PARAMETERS: | //| XY - training set, array[0..NPoints-1,0..NVars]. | //| First NVars columns store values of | //| independent variables, next column stores | //| number of class (from 0 to NClasses-1) which | //| dataset element belongs to. Fractional values| //| are rounded to nearest integer. | //| NPoints - training set size, NPoints>=0 | //| NVars - number of independent variables, NVars>=1 | //| NClasses - number of classes, NClasses>=2 | //| OUTPUT PARAMETERS: | //| Info - return code: | //| * -4, if internal EVD subroutine hasn't | //| converged | //| * -2, if there is a point with class number | //| outside of [0..NClasses-1]. | //| * -1, if incorrect parameters was passed | //| (NPoints<0, NVars<1, NClasses<2) | //| * 1, if task has been solved | //| * 2, if there was a multicollinearity in | //| training set, but task has been solved.| //| W - linear combination coefficients, | //| array[0..NVars-1] | //+------------------------------------------------------------------+ void CAlglib::FisherLDA(CMatrixDouble &xy,const int npoints, const int nvars,const int nclasses, int &info,double &w[]) { //--- initialization info=0; //--- function call CLDA::FisherLDA(xy,npoints,nvars,nclasses,info,w); } //+------------------------------------------------------------------+ //| | //+------------------------------------------------------------------+ void CAlglib::FisherLDA(CMatrixDouble &xy,const int npoints, const int nvars,const int nclasses, int &info,CRowDouble &w) { //--- initialization info=0; //--- function call CLDA::FisherLDA(xy,npoints,nvars,nclasses,info,w); } //+------------------------------------------------------------------+ //| N-dimensional multiclass Fisher LDA | //| Subroutine finds coefficients of linear combinations which | //| optimally separates | //| training set on classes. It returns N-dimensional basis whose | //| vector are sorted | //| by quality of training set separation (in descending order). | //| INPUT PARAMETERS: | //| XY - training set, array[0..NPoints-1,0..NVars]. | //| First NVars columns store values of | //| independent variables, next column stores | //| number of class (from 0 to NClasses-1) which | //| dataset element belongs to. Fractional values| //| are rounded to nearest integer. | //| NPoints - training set size, NPoints>=0 | //| NVars - number of independent variables, NVars>=1 | //| NClasses - number of classes, NClasses>=2 | //| OUTPUT PARAMETERS: | //| Info - return code: | //| * -4, if internal EVD subroutine hasn't | //| converged | //| * -2, if there is a point with class number | //| outside of [0..NClasses-1]. | //| * -1, if incorrect parameters was passed | //| (NPoints<0, NVars<1, NClasses<2) | //| * 1, if task has been solved | //| * 2, if there was a multicollinearity in | //| training set, but task has been solved.| //| W - basis, array[0..NVars-1,0..NVars-1] | //| columns of matrix stores basis vectors, | //| sorted by quality of training set separation | //| (in descending order) | //+------------------------------------------------------------------+ void CAlglib::FisherLDAN(CMatrixDouble &xy,const int npoints, const int nvars,const int nclasses, int &info,CMatrixDouble &w) { //--- initialization info=0; //--- function call CLDA::FisherLDAN(xy,npoints,nvars,nclasses,info,w); } //+------------------------------------------------------------------+ //| Linear regression | //| Subroutine builds model: | //| Y = A(0)*X[0] + ... + A(N-1)*X[N-1] + A(N) | //| and model found in ALGLIB format, covariation matrix, training | //| set errors (rms, average, average relative) and leave-one-out | //| cross-validation estimate of the generalization error. CV | //| estimate calculated using fast algorithm with O(NPoints*NVars) | //| complexity. | //| When covariation matrix is calculated standard deviations of| //| function values are assumed to be equal to RMS error on the | //| training set. | //| INPUT PARAMETERS: | //| XY - training set, array [0..NPoints-1,0..NVars]: | //| * NVars columns - independent variables | //| * last column - dependent variable | //| NPoints - training set size, NPoints>NVars+1 | //| NVars - number of independent variables | //| OUTPUT PARAMETERS: | //| Info - return code: | //| * -255, in case of unknown internal error | //| * -4, if internal SVD subroutine haven't | //| converged | //| * -1, if incorrect parameters was passed | //| (NPoints0. | //| NPoints - training set size, NPoints>NVars+1 | //| NVars - number of independent variables | //| OUTPUT PARAMETERS: | //| Info - return code: | //| * -255, in case of unknown internal error | //| * -4, if internal SVD subroutine haven't | //| converged | //| * -1, if incorrect parameters was passed | //| (NPoints