mirror of
https://github.com/jkriege2/JKQtPlotter.git
synced 2024-12-25 10:01:38 +08:00
bugfixed doxygen docu
This commit is contained in:
parent
456fd644bf
commit
09237a3d55
@ -1800,7 +1800,7 @@ inline void jkqtpstatKDE1D(InputIt first, InputIt last, double binXLeft, double
|
||||
|
||||
|
||||
/*! \brief calculate the linear regression coefficients for a given data range \a firstX / \a firstY ... \a lastX / \a lastY where the model is \f$ f(x)=a+b\cdot x \f$
|
||||
So this function solves the least-squares optimization problem: \f[ (a^\ast, b^\ast)=\argmin\limits_{a,b}\sum\limits_i\left(y_i-(a+b\cdot x_i)\right)^2 \f]
|
||||
So this function solves the least-squares optimization problem: \f[ (a^\ast, b^\ast)=\mathop{arg\;min}\limits_{a,b}\sum\limits_i\left(y_i-(a+b\cdot x_i)\right)^2 \f]
|
||||
\ingroup jkqtptools_math_statistics_regression
|
||||
|
||||
\tparam InputItX standard iterator type of \a firstX and \a lastX.
|
||||
@ -1857,7 +1857,7 @@ inline void jkqtpstatLinearRegression(InputItX firstX, InputItX lastX, InputItY
|
||||
|
||||
|
||||
/*! \brief calculate the weighted linear regression coefficients for a given for a given data range \a firstX / \a firstY / \a firstW ... \a lastX / \a lastY / \a lastW where the model is \f$ f(x)=a+b\cdot x \f$
|
||||
So this function solves the least-squares optimization problem: \f[ (a^\ast, b^\ast)=\argmin\limits_{a,b}\sum\limits_iw_i^2\cdot\left(y_i-(a+b\cdot x_i)\right)^2 \f]
|
||||
So this function solves the least-squares optimization problem: \f[ (a^\ast, b^\ast)=\mathop{arg\;min}\limits_{a,b}\sum\limits_iw_i^2\cdot\left(y_i-(a+b\cdot x_i)\right)^2 \f]
|
||||
\ingroup jkqtptools_math_statistics_regression
|
||||
|
||||
\tparam InputItX standard iterator type of \a firstX and \a lastX.
|
||||
@ -1937,7 +1937,7 @@ inline void jkqtpstatLinearWeightedRegression(InputItX firstX, InputItX lastX, I
|
||||
/*! \brief calculate the (robust) iteratively reweighted least-squares (IRLS) estimate for the parameters of the model \f$ f(x)=a+b\cdot x \f$
|
||||
for a given data range \a firstX / \a firstY ... \a lastX / \a lastY
|
||||
So this function finds an outlier-robust solution to the optimization problem:
|
||||
\f[ (a^\ast,b^\ast)=\argmin\limits_{a,b}\sum\limits_i|a+b\cdot x_i-y_i|^p \f]
|
||||
\f[ (a^\ast,b^\ast)=\mathop{arg\;min}\limits_{a,b}\sum\limits_i|a+b\cdot x_i-y_i|^p \f]
|
||||
\ingroup jkqtptools_math_statistics_regression
|
||||
|
||||
\ingroup jkqtptools_math_statistics_regression
|
||||
@ -1957,16 +1957,16 @@ inline void jkqtpstatLinearWeightedRegression(InputItX firstX, InputItX lastX, I
|
||||
|
||||
This is a simple form of the IRLS algorithm to estimate the parameters a and b in a linear model \f$ f(x)=a+b\cdot x \f$.
|
||||
This algorithm solves the optimization problem for a \f$ L_p\f$-norm:
|
||||
\f[ (a^\ast,b^\ast)=\argmin\limits_{a,b}\sum\limits_i|a+b\cdot x_i-y_i|^p \f]
|
||||
\f[ (a^\ast,b^\ast)=\mathop{arg\;min}\limits_{a,b}\sum\limits_i|a+b\cdot x_i-y_i|^p \f]
|
||||
by iteratively optimization weights \f$ \vec{w} \f$ and solving a weighted least squares problem in each iteration:
|
||||
\f[ (a_n,b_n)=\argmin\limits_{a,b}\sum\limits_i|a+b\cdot x_i-y_i|^{(p-2)}\cdot|a+b\cdot x_i-y_i|^2 \f]
|
||||
\f[ (a_n,b_n)=\mathop{arg\;min}\limits_{a,b}\sum\limits_i|a+b\cdot x_i-y_i|^{(p-2)}\cdot|a+b\cdot x_i-y_i|^2 \f]
|
||||
|
||||
|
||||
The IRLS-algorithm works as follows:
|
||||
- calculate initial \f$ a_0\f$ and \f$ b_0\f$ with unweighted regression from x and y
|
||||
- perform a number of iterations (parameter \a iterations ). In each iteration \f$ n\f$:
|
||||
- calculate the error vector \f$\vec{e}\f$: \f[ e_i = a+b\cdot x_i -y_i \f]
|
||||
- estimate new weights \f$\vec{w}\f$: \[ w_i=|e_i|^{(p-2)/2} \]
|
||||
- estimate new weights \f$\vec{w}\f$: \[ w_i=|e_i|^{(p-2)/2} \f]
|
||||
- calculate new estimates \f$ a_n\f$ and \f$ b_n\f$ with weighted regression from \f$ \vec{x}\f$ and \f$ \vec{y}\f$ and \f$ \vec{w}\f$
|
||||
.
|
||||
- return the last estimates \f$ a_n\f$ and \f$ b_n\f$
|
||||
@ -2064,7 +2064,7 @@ JKQTP_LIB_EXPORT std::pair<std::function<double(double)>,std::function<double(do
|
||||
|
||||
|
||||
/*! \brief calculate the linear regression coefficients for a given data range \a firstX / \a firstY ... \a lastX / \a lastY where the model is defined by \a type
|
||||
So this function solves the least-squares optimization problem: \f[ (a^\ast, b^\ast)=\argmin\limits_{a,b}\sum\limits_i\left(y_i-f_\mbox{type}(x_i,a,b)\right)^2 \f]
|
||||
So this function solves the least-squares optimization problem: \f[ (a^\ast, b^\ast)=\mathop{arg\;min}\limits_{a,b}\sum\limits_i\left(y_i-f_{\text{type}}(x_i,a,b)\right)^2 \f]
|
||||
by reducing it to a linear fit by transforming x- and/or y-data
|
||||
\ingroup jkqtptools_math_statistics_regression
|
||||
|
||||
@ -2108,7 +2108,7 @@ inline void jkqtpstatRegression(JKQTPStatRegressionModelType type, InputItX firs
|
||||
|
||||
|
||||
/*! \brief calculate the robust linear regression coefficients for a given data range \a firstX / \a firstY ... \a lastX / \a lastY where the model is defined by \a type
|
||||
So this function solves the Lp-norm optimization problem: \f[ (a^\ast, b^\ast)=\argmin\limits_{a,b}\sum\limits_i\left(y_i-f_\mbox{type}(x_i,a,b)\right)^p \f]
|
||||
So this function solves the Lp-norm optimization problem: \f[ (a^\ast, b^\ast)=\mathop{arg\;min}\limits_{a,b}\sum\limits_i\left(y_i-f_{\text{type}}(x_i,a,b)\right)^p \f]
|
||||
by reducing it to a linear fit by transforming x- and/or y-data
|
||||
\ingroup jkqtptools_math_statistics_regression
|
||||
|
||||
@ -2155,7 +2155,7 @@ inline void jkqtpstatRobustIRLSRegression(JKQTPStatRegressionModelType type, Inp
|
||||
|
||||
|
||||
/*! \brief calculate the robust linear regression coefficients for a given data range \a firstX / \a firstY ... \a lastX / \a lastY where the model is defined by \a type
|
||||
So this function solves the Lp-norm optimization problem: \f[ (a^\ast, b^\ast)=\argmin\limits_{a,b}\sum\limits_i\left(y_i-f_\mbox{type}(x_i,a,b)\right)^p \f]
|
||||
So this function solves the Lp-norm optimization problem: \f[ (a^\ast, b^\ast)=\mathop{arg\;min}\limits_{a,b}\sum\limits_i\left(y_i-f_{\text{type}}(x_i,a,b)\right)^p \f]
|
||||
by reducing it to a linear fit by transforming x- and/or y-data
|
||||
\ingroup jkqtptools_math_statistics_regression
|
||||
|
||||
@ -2222,7 +2222,7 @@ inline void jkqtpstatWeightedRegression(JKQTPStatRegressionModelType type, Input
|
||||
|
||||
This function uses jkqtpstatLinSolve() to solve the system of equations
|
||||
\f[ \begin{bmatrix} y_1\\ y_2\\ y_3 \\ \vdots \\ y_n \end{bmatrix}= \begin{bmatrix} 1 & x_1 & x_1^2 & \dots & x_1^P \\ 1 & x_2 & x_2^2 & \dots & x_2^P\\ 1 & x_3 & x_3^2 & \dots & x_3^P \\ \vdots & \vdots & \vdots & & \vdots \\ 1 & x_n & x_n^2 & \dots & x_n^P \end{bmatrix} \begin{bmatrix} p_0\\ p_1\\ p_2\\ \vdots \\ p_P \end{bmatrix} \f]
|
||||
\f[ \vec{y}=V\vec{p}\ \ \ \ \ \Rightarrow\ \ \ \ \ \vec{p}=(V^TV)^{-1}V^T\vec{y} \]
|
||||
\f[ \vec{y}=V\vec{p}\ \ \ \ \ \Rightarrow\ \ \ \ \ \vec{p}=(V^TV)^{-1}V^T\vec{y} \f]
|
||||
|
||||
\image html jkqtplotter_simpletest_datastore_regression_polynom.png
|
||||
|
||||
@ -2401,6 +2401,11 @@ QString jkqtpstatPolynomialModel2Latex(PolyItP firstP, PolyItP lastP) {
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#endif // JKQTPSTATISTICSTOOLS_H_INCLUDED
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user