diff --git a/lib/jkqtcommon/jkqtpstatisticstools.h b/lib/jkqtcommon/jkqtpstatisticstools.h index 4c35b07634..ad4946b2cd 100644 --- a/lib/jkqtcommon/jkqtpstatisticstools.h +++ b/lib/jkqtcommon/jkqtpstatisticstools.h @@ -1800,7 +1800,7 @@ inline void jkqtpstatKDE1D(InputIt first, InputIt last, double binXLeft, double /*! \brief calculate the linear regression coefficients for a given data range \a firstX / \a firstY ... \a lastX / \a lastY where the model is \f$ f(x)=a+b\cdot x \f$ - So this function solves the least-squares optimization problem: \f[ (a^\ast, b^\ast)=\argmin\limits_{a,b}\sum\limits_i\left(y_i-(a+b\cdot x_i)\right)^2 \f] + So this function solves the least-squares optimization problem: \f[ (a^\ast, b^\ast)=\mathop{arg\;min}\limits_{a,b}\sum\limits_i\left(y_i-(a+b\cdot x_i)\right)^2 \f] \ingroup jkqtptools_math_statistics_regression \tparam InputItX standard iterator type of \a firstX and \a lastX. @@ -1857,7 +1857,7 @@ inline void jkqtpstatLinearRegression(InputItX firstX, InputItX lastX, InputItY /*! \brief calculate the weighted linear regression coefficients for a given for a given data range \a firstX / \a firstY / \a firstW ... \a lastX / \a lastY / \a lastW where the model is \f$ f(x)=a+b\cdot x \f$ - So this function solves the least-squares optimization problem: \f[ (a^\ast, b^\ast)=\argmin\limits_{a,b}\sum\limits_iw_i^2\cdot\left(y_i-(a+b\cdot x_i)\right)^2 \f] + So this function solves the least-squares optimization problem: \f[ (a^\ast, b^\ast)=\mathop{arg\;min}\limits_{a,b}\sum\limits_iw_i^2\cdot\left(y_i-(a+b\cdot x_i)\right)^2 \f] \ingroup jkqtptools_math_statistics_regression \tparam InputItX standard iterator type of \a firstX and \a lastX. @@ -1937,7 +1937,7 @@ inline void jkqtpstatLinearWeightedRegression(InputItX firstX, InputItX lastX, I /*! \brief calculate the (robust) iteratively reweighted least-squares (IRLS) estimate for the parameters of the model \f$ f(x)=a+b\cdot x \f$ for a given data range \a firstX / \a firstY ... \a lastX / \a lastY So this function finds an outlier-robust solution to the optimization problem: - \f[ (a^\ast,b^\ast)=\argmin\limits_{a,b}\sum\limits_i|a+b\cdot x_i-y_i|^p \f] + \f[ (a^\ast,b^\ast)=\mathop{arg\;min}\limits_{a,b}\sum\limits_i|a+b\cdot x_i-y_i|^p \f] \ingroup jkqtptools_math_statistics_regression \ingroup jkqtptools_math_statistics_regression @@ -1957,16 +1957,16 @@ inline void jkqtpstatLinearWeightedRegression(InputItX firstX, InputItX lastX, I This is a simple form of the IRLS algorithm to estimate the parameters a and b in a linear model \f$ f(x)=a+b\cdot x \f$. This algorithm solves the optimization problem for a \f$ L_p\f$-norm: - \f[ (a^\ast,b^\ast)=\argmin\limits_{a,b}\sum\limits_i|a+b\cdot x_i-y_i|^p \f] + \f[ (a^\ast,b^\ast)=\mathop{arg\;min}\limits_{a,b}\sum\limits_i|a+b\cdot x_i-y_i|^p \f] by iteratively optimization weights \f$ \vec{w} \f$ and solving a weighted least squares problem in each iteration: - \f[ (a_n,b_n)=\argmin\limits_{a,b}\sum\limits_i|a+b\cdot x_i-y_i|^{(p-2)}\cdot|a+b\cdot x_i-y_i|^2 \f] + \f[ (a_n,b_n)=\mathop{arg\;min}\limits_{a,b}\sum\limits_i|a+b\cdot x_i-y_i|^{(p-2)}\cdot|a+b\cdot x_i-y_i|^2 \f] The IRLS-algorithm works as follows: - calculate initial \f$ a_0\f$ and \f$ b_0\f$ with unweighted regression from x and y - perform a number of iterations (parameter \a iterations ). In each iteration \f$ n\f$: - calculate the error vector \f$\vec{e}\f$: \f[ e_i = a+b\cdot x_i -y_i \f] - - estimate new weights \f$\vec{w}\f$: \[ w_i=|e_i|^{(p-2)/2} \] + - estimate new weights \f$\vec{w}\f$: \[ w_i=|e_i|^{(p-2)/2} \f] - calculate new estimates \f$ a_n\f$ and \f$ b_n\f$ with weighted regression from \f$ \vec{x}\f$ and \f$ \vec{y}\f$ and \f$ \vec{w}\f$ . - return the last estimates \f$ a_n\f$ and \f$ b_n\f$ @@ -2064,7 +2064,7 @@ JKQTP_LIB_EXPORT std::pair,std::function