일반적인 conic section 피팅은 주어진 데이터
의 계수
그리고 얼마나 잘 피팅되었난가에 척도가 필요한데 여기서는 주어진 데이터의 대수적 거리
을 최소화시키는 계수 벡터
즉, 주어진 제한조건
이 문제의 풀이는 직전의 포스팅에서 다른 바 있는데
이므로 이를 최소화시키기 위해서는 양의 값을 갖는 고유값 중에 최소에 해당하는 고유벡터를 고르면 된다. 그런데 고유값
Least Squares Fitting of Ellipses
일반적인 이차곡선은 다음의 이차식으로 표현이 된다:
kipl.tistory.com
Generalized eigenvalues problem
kipl.tistory.com
Ref: https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/ellipse-pami.pdf

double FitEllipse(std::vector<CPoint>& points, double einfo[6] ) {
if ( points.size() < 6 ) return -1;
double eigvals[6];
std::vector<double> D(6 * points.size());
double S[36];/* S = ~D * D */
double C[36];
double EIGV[36];/* R^T; transposed orthogonal matrix;*/
double offx = 0, offy = 0;
/* shift all points to zero */
for(int i = points.size(); i--> 0; ) {
offx += points[i].x;
offy += points[i].y;
}
offx /= points.size();
offy /= points.size();
/* for the sake of numerical stability, scale down to [-1:1];*/
double smax = points[0].x, smin = points[0].y;
for (int i = points.size(); i-->1; ) {
smax = max(smax, max(points[i].x, points[i].y));
smin = min(smin, min(points[i].x, points[i].y));
}
double scale = smax - smin;
double invscale = 1 / scale;
/* ax^2 + bxy + cy^2 + dx + ey + f = 0*/
/* fill D matrix rows as (x*x, x*y, y*y, x, y, 1 ) */
for(int i = points.size(); i--> 0; ) {
double x = points[i].x - offx; x *= invscale;
double y = points[i].y - offy; y *= invscale;
D[i*6 + 0] = x*x; D[i*6 + 1] = x*y;
D[i*6 + 2] = y*y; D[i*6 + 3] = x;
D[i*6 + 4] = y; D[i*6 + 5] = 1;
}
/* scattering matrix: S = ~D * D (6x6)*/
for (int i = 0; i < 6; i++)
for (int j = i; j < 6; j++) { /*upper triangle;*/
double s = 0;
for (int k = points.size(); k-- > 0; )
s += D[k*6 + i] * D[k*6 + j];
S[i*6 + j] = s;
}
for (int i = 1; i < 6; i++) /*lower triangle;*/
for (int j = 0; j < i; j++)
S[i*6 + j] = S[j*6 + i] ;
/* fill constraint matrix C */
for (int i = 0; i < 36 ; i++ ) C[i] = 0;
C[12] = 2 ;//2x0
C[2 ] = 2 ;//0x2
C[7 ] = -1 ;//1x1
/* find eigenvalues/vectors of scattering matrix; */
double RT[36]; /* each row contains eigenvector; */
JacobiEigens ( S, RT, eigvals, 6, 0 );
/* create R and INVQ;*/
double R[36];
for (int i = 0; i < 6 ; i++) {
eigvals[i] = sqrt(eigvals[i]);
for ( int k = 0; k < 6; k++ ) {
R[k*6 + i] = RT[i*6 + k]; /* R = orthogonal mat = transpose(RT);*/
RT[i*6 + k] /= eigvals[i]; /* RT /= sqrt(eigenvalue) row-wise)*/
}
}
/* create INVQ=R*(1/sqrt(eigenval))*RT;*/
double INVQ[36];
_MatrixMul(R, RT, 6, INVQ);
/* create matrix INVQ*C*INVQ */
double TMP1[36], TMP2[36];
_MatrixMul(INVQ, C, 6, TMP1 );
_MatrixMul(TMP1, INVQ, 6, TMP2 );
/* find eigenvalues and vectors of INVQ*C*INVQ:*/
JacobiEigens ( TMP2, EIGV, eigvals, 6, 0 );
/* eigvals stores eigenvalues in descending order of abs(eigvals);*/
/* search for a unique positive eigenvalue;*/
int index = -1, count = 0;
for (int i = 0 ; i < 3; i++ ) {
if (eigvals[i] > 0) {
index = i; // break;
count++;
}
}
/* only 3 eigenvalues must be non-zero
** and only one of them must be positive;*/
if ((count != 1) || (index == -1))
return -1;
/* eigenvector what we want: u = INVQ * v */
double u[6];
double *vec = &EIGV[index*6];
for (int i = 0; i < 6 ; i++) {
double s = 0;
for (int k = 0; k < 6; k++) s += INVQ[i*6 + k] * vec[k];
u[i] = s;
}
/* extract shape infos;*/
PoseEllipse(u, einfo);
/* recover original scale; center(0,1) and radii(2,3)*/
for (int i = 0; i < 4; i++) einfo[i] *= scale;
/* recover center */
einfo[0] += offx;
einfo[1] += offy;
return FitError(points, offx, offy, scale, u);
};
'Image Recognition > Fundamental' 카테고리의 다른 글
Linear Least Square Fitting: perpendicular offsets (0) | 2024.03.22 |
---|---|
Cubic Spline Kernel (1) | 2024.03.12 |
Bilateral Filter (0) | 2024.02.18 |
파라미터 공간에서 본 최소자승 Fitting (0) | 2023.05.21 |
영상에 Impulse Noise 넣기 (2) | 2023.02.09 |