Processing math: 100%

이미지를 균일한 컬러 영역으로 분할하는 것은 압축이나 segmentation 등에 매우 유용하다. 4개의 child node 가운데 parent node와 차이가 많이 나지 않는 것은  부분적으로 parent node에 merge 시키는 방법도 있다.

#include "QuadTree.h"
#include <queue>
#include <math.h>
QuadNode::QuadNode() {
    for (int i = 0; i < 4; i++) children[i] = NULL;
    rect = CRect(0, 0, 0, 0);
    hasChildren = false;
    mean = -1;
    stdev = 0;
}
QuadNode::QuadNode(CRect& rc) {
    for (int i = 0; i < 4; i++) children[i] = NULL;
    rect = rc;
    hasChildren = false;
    mean = -1;
    stdev = 0;
}
void QuadNode::Subdivide() {
    hasChildren = true;
    children[0] = new QuadNode(CRect(rect.TopLeft(), rect.CenterPoint()));
    children[1] = new QuadNode(CRect(rect.left, rect.CenterPoint().y, rect.CenterPoint().x, rect.bottom));
    children[2] = new QuadNode(CRect(rect.CenterPoint(), rect.BottomRight()));
    children[3] = new QuadNode(CRect(rect.CenterPoint().x, rect.top, rect.right, rect.CenterPoint().y));
}
// quadtree에 할당된 메모리를 정리함; root node는 별도로 정리.
void QuadNode::Clear() {
    if (this->hasChildren)
        for (int i = 0; i < 4; i++)
            if (children[i]) {
                children[i]->Clear();
                delete children[i];
            }
}
QuadTree::QuadTree(CRect rc, CRaster& image) {
    rect = rc & image.GetRect(); // rect는 이미지 안에.
    raster = image;
}
void QuadTree::Build() {
    root = new QuadNode();
    root->rect = this->rect;
    std::queue<QuadNode *> TreeQueue;
    TreeQueue.push(root);
    while (!TreeQueue.empty()) {
        QuadNode* node = TreeQueue.front();
        TreeQueue.pop();
        CRect rc = node->rect;
        if ((rc.Width() > 2 && rc.Height() > 2) && !IsHomogenous(node)) {
            node->Subdivide();
            for (int i = 0; i < 4; i++) 
                TreeQueue.push(node->children[i]);
        }
    }
}
// subdivision 조건: 편차, 최대-최소 차이등의 criteria를 사용할 수 있다.
#define THRESH 15
bool QuadTree::IsHomogenous(QuadNode* node) {
    CRect rc = node->rect;
    double s = 0, ss = 0;
    int minval = 255, maxval = 0;
    for (int y = rc.top; y < rc.bottom; y++) {
        BYTE *p = (BYTE *)raster.GetLinePtr(y) + rc.left;
        for (int x = rc.left; x < rc.right; x++) {
            int a = *p++; 
            s += a; ss += a * a;
            maxval = max(maxval, a); minval = min(minval, a);
        }
    }
    int n = rc.Width() * rc.Height();
    node->mean = s / n;
    node->stdev = sqrt((ss - s * s / n) / n);
    return (node->stdev >= THRESH) ? false: true;
    //return (maxval - minval) > THRESH ? false: true;
};
// leaf node를 찾아 list에 넣음;
void QuadTree::FindLeaf(QuadNode* node) {
    if (node == NULL) return;
    if (!node->hasChildren) {
        LeafList.push_back(node);
        return;
    }
    for (int i = 0; i < 4; i++) 
        FindLeaf(node->children[i]);
}

사용 예:

더보기
void test_main(CRaster& raster, CRaster& out) {
    ASSERT(raster.GetBPP() == 8);  // gray input;
    QuadTree qtree(raster.GetRect(), raster);
    qtree.Build();
    qtree.FindLeaf(qtree.root);
    out.SetDimensions(raster.GetSize(), 24); //RGB-color;
    // segment image;
    for (int i = 0; i < qtree.LeafList.size(); i++) {
        QuadNode *node = qtree.LeafList[i];
        int a = int(node->mean + .5);
        FillRegion(out, node->rect, RGB(a, a, a));  //gray average;
        // draw rectangle with red color;
        DrawRectangle(out, node->rect, RGB(0, 0, 0xFF));
    }
    qtree.root->Clear();
    delete qtree.root;
}
728x90

'Image Recognition > Fundamental' 카테고리의 다른 글

Moment-preserving Thresholding  (0) 2022.05.29
Minimum Cross Entropy Thresholding  (0) 2022.05.29
Harris Corner Detector  (0) 2022.04.07
Valley emphasis Otsu threshold  (0) 2022.02.23
Image rotation by FFT  (0) 2022.02.18
,

Harris response:

R=Det(M)kTrace(M)2 k[0.04,0.06]

M=(w(x)I2xw(x)IxIyw(x)IxIyw(x)I2y)

참고: http://www.ipol.im/pub/art/2018/229/article_lr.pdf

number of corners=86
nonmaximal suppression를 하기 전 conrner의 response map

double PI = 4.0 * atan(1.0);
double Gaussian(double x, double y, double sigma) {
    double sigma2 = sigma * sigma;
    double t = (x * x + y * y) / (2 * sigma2);
    double n = 1.0 / (2.0 * PI * sigma2);
    return n * exp( -t );
};

struct Corner {
    double x, y;
    double strength;
    int    flag;
};
int HarrisCornerDetector(BYTE *img, int w, int h,
                    double sigma/*=1.2*/, double kvalue/*=0.04~0.06*/,
                    double minMeasure/*=1e-1*/,
                    std::vector<Corner>& corners)
{
    // Sobel gradient 3x3
    const int nn[] = { -w - 1, -w, -w + 1, -1, 0, 1, w - 1, w, w + 1}; // 3x3 window
    const int sobelX[] = { -1, 0, 1, -2, 0, 2, -1, 0, 1};
    const int sobelY[] = { -1, -2, -1, 0, 0, 0, 1, 2, 1};
    const int xmax = w - 1, ymax = h - 1;
    std::vector<double> Gx(w * h, 0);
    std::vector<double> Gy(w * h, 0);
    for (int y = 1, pos = w; y < ymax; y++) { // pos = w; starting address;
        pos++; //skip x=0;
        for (int x = 1; x < xmax; x++, pos++) {
            double sx = 0, sy = 0;
            for (int k = 0; k < 9; k++) {
                int v = img[pos + nn[k]];
                sx += sobelX[k] * v;
                sy += sobelY[k] * v;
            }
            Gx[pos] = sx / (4 * 255);
            Gy[pos] = sy / (4 * 255);
        }
        pos++; // skip x=xmax;
    }
    // precompute the coefficients of the Gaussian filter
    const int radius = int(2 * sigma);
    const int window = 1 + 2 * radius;
    std::vector<double> gFilter(window * window);
    for (int j = -radius, pos = 0; j <= radius; j++)
        for (int i = -radius; i <= radius; i++, pos++)
            gFilter[pos] = Gaussian(i, j, sigma);

    // compute the Harris measure for each pixel of the image
    std::vector<double> harrismap(w * h, 0);
    double hmax = 0;
    for (int y = 0, pos = 0; y < h; y++) {
        for (int x = 0; x < w; x++, pos++) {
            // Convolve gradient with Gaussian filter:
            double filtersum = 0, sxx = 0, syy = 0, sxy = 0;
            for (int yk = y - radius, wpos = 0; yk <= y + radius; yk++) {
                for (int xk = x - radius; xk <= x + radius; xk++, wpos++) {
                    if (yk < 0 || yk >= h) continue;
                    if (xk < 0 || xk >= w) continue;
                    // Gaussian weight
                    double gauss = gFilter[wpos];
                    filtersum += gauss;
                    // convolution;
                    int loc = yk * w + xk; //image location;
                    sxx += gauss * Gx[loc] * Gx[loc];
                    syy += gauss * Gy[loc] * Gy[loc];
                    sxy += gauss * Gx[loc] * Gy[loc];
                }
            }
            sxx /= filtersum;
            syy /= filtersum;
            sxy /= filtersum;
            // Harris corner measure = Det(M) - kvalue.(Trace(M))^2
            double hmeasure = sxx * syy - sxy * sxy - kvalue * (sxx + syy) * (sxx + syy);
            if (hmeasure <= 0) continue;
            if (hmeasure > hmax) hmax = hmeasure;
            harrismap[pos] = hmeasure;
        }
    }
    // log scale
    double scale = 255 / log(1 + hmax);
    for (int pos = w * h; pos-- > 0;)
        harrismap[pos] = scale * log(1 + harrismap[pos]);
    // Nonmaximal suppression: keep only local maxima;
    minMeasure *= 255; //255 = the maximum value of Harris measure(in log scale);
    const int nn8[] = { -w - 1, -w, -w + 1, 1, w + 1, w, w - 1, -1}; // 8-neighbors;
    for (int y = 1, pos = w; y < ymax; y++) {
        pos++; //skip x=0;
        for (int x = 1; x < xmax; x++, pos++) {
            // thresholding: Harris measure > epsilon
            double hmeasure = harrismap[pos];
            if (hmeasure <= minMeasure) continue;
            bool isLocalMax = true;
            for (int k = 0; k < 8; k++) {
                double hk = harrismap[pos + nn8[k]];
                if (hk >= hmeasure) {
                    isLocalMax = false;
                    break;
                }
            }
            if (isLocalMax) {
                Corner c;
                c.x = x, c.y = y, c.strength = hmeasure, c.flag = 1;
                corners.push_back(c);
            }
        }
        pos++; //skip x=w-1;
    }

    // remove corners too close to each other: keep the highest measure
    double minDistance = 8;
    std::vector<Corner>::iterator iter = corners.begin();
    while (iter != corners.end()) {
        for (int i = 0; i < corners.size(); i++) {
            Corner& c1 = corners[i];
            if (c1.flag == 0) continue;
            if ((c1.x == iter->x && c1.y == iter->y) ) continue;
            if (sqrt((c1.x-iter->x)*(c1.x-iter->x) + (c1.y-iter->y)*(c1.y-iter->y)) > minDistance) 
                continue;
            if (c1.strength < iter->strength) continue;
            iter->flag = 0;    // iter = corners.erase(iter); berased = true;
            break;
        }
        iter++;
    }
    return 1;
};
728x90

'Image Recognition > Fundamental' 카테고리의 다른 글

Minimum Cross Entropy Thresholding  (0) 2022.05.29
Quadtree Segmentation  (0) 2022.05.21
Valley emphasis Otsu threshold  (0) 2022.02.23
Image rotation by FFT  (0) 2022.02.18
FFT를 이용한 영상의 미분  (0) 2022.02.12
,

Otsu 이진화에서 threshold 값의 선택은 전경과 배경 사이의 분산을 최대로 하는 픽셀 값이다. 영상의 히스토그램이 bimodal인 경우는 잘 동작하지만, unimodal인 영상의 경우는 제대로 처리가 안된다. Valley emphasis 방법은 Otsu 방법을 사용하면서 histogram profile의 valley에 가중치를 더해주는 방식으로 threshold 값을 찾는다.

Ostu's method:    threshold=argmax0t<256 (ω1(t)m1(t)2+ω2(t)m22(t))valley-emphasis:   threshold=argmax0t<256 (1pt)(ω1(t)m1(t)2+ω2(t)m22(t))

Bimodal 분포의 영상은 Otsu와 거의 같은 결과를 준다. 두 개 이상의 클래스로 분리도 쉽게 구현이 된다.

 

참고: http://msn.iecs.fcu.edu.tw/report/data/ori_paper/2006-12-21/2006-Automatic%20thresholding%20for%20defect%20detection.pdf

영상은 참고 논문에서 추출함. 중간=Otsu, 마지막=valley-emphasis

 

 

int valley_emphasis_Otsu(int hist[], int N) {
    int istart = 0, iend = N - 1;
    while (!hist[istart]) istart++;
    while (!hist[iend])   iend--;
    double st = 0, sxt = 0;           // total pdf, total cdf;
    for (int i = istart; i <= iend;i++) { 
        st += hist[i]; 
        sxt += double(i) * hist[i];
    }
    int winner = istart; 
    double maxgain = 0;
    double s0 = 0, sx0 = 0;
    for(int i = istart; i <= iend; i++) {
        s0  += hist[i];				// 1-pdf;
        sx0 += double(i) * hist[i];		// 1-cdf;
        double s1  = st - s0;			// 2-pdf
        double sx1 = sxt - sx0;			// 2-cdf
        double m0  = sx0 / s0;			// E(X1);
        double m1  = sx1 / s1;			// E(X2);
        double gain = (1 - hist[i] / st) * (s0 * m0 * m0 + s1 * m1 * m1);
        if (gain > maxgain) {
            maxgain = gain; 
            winner = i;
        }
    }
    return winner;
};

 

728x90

'Image Recognition > Fundamental' 카테고리의 다른 글

Quadtree Segmentation  (0) 2022.05.21
Harris Corner Detector  (0) 2022.04.07
Image rotation by FFT  (0) 2022.02.18
FFT를 이용한 영상의 미분  (0) 2022.02.12
SVD Fitting  (0) 2022.02.07
,