cvExtractSURF

classic Classic list List threaded Threaded
20 messages Options
Reply | Threaded
Open this post in threaded view
|

cvExtractSURF

yair_movshovitz
Hi Everyone,

I'm trying to understand how to use the SURF features capabilities of
openCV.
My scenario is as follows:
I have two rectangled areas in an image, which are supposed to bound
the same object. I would like to see how good is this assumption. In
other words I would like to see how many features they share.

Can someone drop me a hint on how to use the SURF implementation of
openCV (or direct me to somewhere that has some documentation of it)

Thanks,
Yair

Reply | Threaded
Open this post in threaded view
|

Re: cvExtractSURF

Jostein Austvik Jacobsen
If you've got your two rectangled areas stored as img1 and img2 you could do
this to extract its keypoints and corresponding descriptors:

#define EXTENDED_DESCRIPTOR 1
CvSeq *kp1=NULL, *kp2=NULL;
CvSeq *desc1=NULL, *desc2=NULL;
CvMemStorage *storage = cvCreateMemStorage(0);
cvExtractSURF(img1, NULL, &kp1, &desc1, storage, cvSURFParams(600,
EXTENDED_DESCRIPTOR));
cvExtractSURF(img2, NULL, &kp2, &desc2, storage, cvSURFParams(600,
EXTENDED_DESCRIPTOR));

You will have to correlate the descriptors with each other to determine
which keypoints in each rectangle corresponds to one another. You could use
a BBF tree which is implemented in the latest version of OpenCV, but unless
your rectangle is huge, you might just as well just correlate them the
standard way, which I do like this:

#define CORRELATION_THRESHOLD 0.7
// brute-force attempt at correlating the two sets of features
void bruteMatch(CvMat **points1, CvMat **points2, CvSeq *kp1, CvSeq *desc1,
CvSeq *kp2, CvSeq *desc2) {
    int i,j,k;
    double* avg1 = (double*)malloc(sizeof(double)*kp1->total);
    double* avg2 = (double*)malloc(sizeof(double)*kp2->total);
    double* dev1 = (double*)malloc(sizeof(double)*kp1->total);
    double* dev2 = (double*)malloc(sizeof(double)*kp2->total);
    int* best1 = (int*)malloc(sizeof(int)*kp1->total);
    int* best2 = (int*)malloc(sizeof(int)*kp2->total);
    double* best1corr = (double*)malloc(sizeof(double)*kp1->total);
    double* best2corr = (double*)malloc(sizeof(double)*kp2->total);
    float *seq1, *seq2;
    int descriptor_size = EXTENDED_DESCRIPTOR ? 128 : 64;
    for (i=0; i<kp1->total; i++) {
        // find average and standard deviation of each descriptor
        avg1[i] = 0;
        dev1[i] = 0;
        seq1 = (float*)cvGetSeqElem(desc1, i);
        for (k=0; k<descriptor_size; k++) avg1[i] += seq1[k];
        avg1[i] /= descriptor_size;
        for (k=0; k<descriptor_size; k++) dev1[i] +=
(seq1[k]-avg1[i])*(seq1[k]-avg1[i]);
        dev1[i] = sqrt(dev1[i]/descriptor_size);

        // initialize best1 and best1corr
        best1[i] = -1;
        best1corr[i] = -1.;
    }
    for (j=0; j<kp2->total; j++) {
        // find average and standard deviation of each descriptor
        avg2[j] = 0;
        dev2[j] = 0;
        seq2 = (float*)cvGetSeqElem(desc2, j);
        for (k=0; k<descriptor_size; k++) avg2[j] += seq2[k];
        avg2[j] /= descriptor_size;
        for (k=0; k<descriptor_size; k++) dev2[j] +=
(seq2[k]-avg2[j])*(seq2[k]-avg2[j]);
        dev2[j] = sqrt(dev2[j]/descriptor_size);

        // initialize best2 and best2corr
        best2[j] = -1;
        best2corr[j] = -1.;
    }
    double corr;
    for (i = 0; i < kp1->total; ++i) {
        seq1 = (float*)cvGetSeqElem(desc1, i);
        for (j = 0; j < kp2->total; ++j) {
            corr = 0;
            seq2 = (float*)cvGetSeqElem(desc2, j);
            for (k = 0; k < descriptor_size; ++k)
                corr += (seq1[k]-avg1[i])*(seq2[k]-avg2[j]);
            corr /= (descriptor_size-1)*dev1[i]*dev2[j];
            if (corr > best1corr[i]) {
                best1corr[i] = corr;
                best1[i] = j;
            }
            if (corr > best2corr[j]) {
                best2corr[j] = corr;
                best2[j] = i;
            }
        }
    }
    j = 0;
    for (i = 0; i < kp1->total; i++)
        if (best2[best1[i]] == i && best1corr[i] > CORRELATION_THRESHOLD)
            j++;
    if (j == 0) return; // no matches found
    *points1 = cvCreateMat(1,j,CV_32FC2);
    *points2 = cvCreateMat(1,j,CV_32FC2);
    CvPoint2D32f *p1, *p2;
    j = 0;
    for (i = 0; i < kp1->total; i++) {
        if (best2[best1[i]] == i && best1corr[i] > CORRELATION_THRESHOLD) {
            p1 = &((CvSURFPoint*)cvGetSeqElem(kp1,i))->pt;
            p2 = &((CvSURFPoint*)cvGetSeqElem(kp2,best1[i]))->pt;
            (*points1)->data.fl[j*2] = p1->x;
            (*points1)->data.fl[j*2+1] = p1->y;
            (*points2)->data.fl[j*2] = p2->x;
            (*points2)->data.fl[j*2+1] = p2->y;
            j++;
        }
    }
    free(best2corr);
    free(best1corr);
    free(best2);
    free(best1);
    free(avg1);
    free(avg2);
    free(dev1);
    free(dev2);
}

If you construct a fundamental matrix (a model) for the transformation
between the two rectangles, you can further determine which correspondences
are false (by how well they fit the model) and remove them, which I like to
do like this:

F = cvCreateMat(3,3,CV_32FC1);
CvMat *status = cvCreateMat(1,points1->cols,CV_8UC1);
int fm_count = cvFindFundamentalMat( points1,points2,F,
CV_FM_RANSAC,1.,0.99,status );
removeOutliers(&points1,&points2,status);

where removeOutliers() is a function I wrote to clean up after
cvFindFundamentalMat():

// iterates the set of putative correspondences and removes correspondences
marked as outliers by cvFindFundamentalMat()
void removeOutliers(CvMat **points1, CvMat **points2, CvMat *status) {
    CvMat *points1_ = *points1;
    CvMat *points2_ = *points2;
    int count = 0;
    for (int i = 0; i < status->cols; i++) if (CV_MAT_ELEM(*status,unsigned
char,0,i)) count++;
    if (!count) { // no inliers
        *points1 = NULL;
        *points2 = NULL;
    }
    else {
        *points1 = cvCreateMat(1,count,CV_32FC2);
        *points2 = cvCreateMat(1,count,CV_32FC2);
        int j = 0;
        for (int i = 0; i < status->cols; i++) {
            if (CV_MAT_ELEM(*status,unsigned char,0,i)) {
                (*points1)->data.fl[j*2] = points1_->data.fl[i*2];
//p1->x
                (*points1)->data.fl[j*2+1] = points1_->data.fl[i*2+1];
//p1->y
                (*points2)->data.fl[j*2] = points2_->data.fl[i*2];
//p2->x
                (*points2)->data.fl[j*2+1] = points2_->data.fl[i*2+1];
//p2->y
                j++;
            }
        }
    }
    cvReleaseMat(&points1_);
    cvReleaseMat(&points2_);
}


I hope this helps.

-Jostein


2009/1/8 yair_movshovitz <[hidden email]>

>   Hi Everyone,
>
> I'm trying to understand how to use the SURF features capabilities of
> openCV.
> My scenario is as follows:
> I have two rectangled areas in an image, which are supposed to bound
> the same object. I would like to see how good is this assumption. In
> other words I would like to see how many features they share.
>
> Can someone drop me a hint on how to use the SURF implementation of
> openCV (or direct me to somewhere that has some documentation of it)
>
> Thanks,
> Yair
>
>  
>


[Non-text portions of this message have been removed]

Reply | Threaded
Open this post in threaded view
|

Re: cvExtractSURF

yair_movshovitz
Hi Jostein,

Thanks a lot for your help!

Can you please explain the function parameters of cvExtractSURF?
I mean in - cvExtractSURF(img1, NULL, &kp1, &desc1, storage,
cvSURFParams(600,EXTENDED_DESCRIPTOR));
what is the role of kp1, desc1, storage and the SURFParams?
is storage just a temp area for the algorithm to use?


Thanks again
Yair

--- In [hidden email], "Jostein Austvik Jacobsen"
<josteinaj@...> wrote:
>
> If you've got your two rectangled areas stored as img1 and img2 you
could do

> this to extract its keypoints and corresponding descriptors:
>
> #define EXTENDED_DESCRIPTOR 1
> CvSeq *kp1=NULL, *kp2=NULL;
> CvSeq *desc1=NULL, *desc2=NULL;
> CvMemStorage *storage = cvCreateMemStorage(0);
> cvExtractSURF(img1, NULL, &kp1, &desc1, storage, cvSURFParams(600,
> EXTENDED_DESCRIPTOR));
> cvExtractSURF(img2, NULL, &kp2, &desc2, storage, cvSURFParams(600,
> EXTENDED_DESCRIPTOR));
>
> You will have to correlate the descriptors with each other to determine
> which keypoints in each rectangle corresponds to one another. You
could use
> a BBF tree which is implemented in the latest version of OpenCV, but
unless
> your rectangle is huge, you might just as well just correlate them the
> standard way, which I do like this:
>
> #define CORRELATION_THRESHOLD 0.7
> // brute-force attempt at correlating the two sets of features
> void bruteMatch(CvMat **points1, CvMat **points2, CvSeq *kp1, CvSeq
*desc1,

> CvSeq *kp2, CvSeq *desc2) {
>     int i,j,k;
>     double* avg1 = (double*)malloc(sizeof(double)*kp1->total);
>     double* avg2 = (double*)malloc(sizeof(double)*kp2->total);
>     double* dev1 = (double*)malloc(sizeof(double)*kp1->total);
>     double* dev2 = (double*)malloc(sizeof(double)*kp2->total);
>     int* best1 = (int*)malloc(sizeof(int)*kp1->total);
>     int* best2 = (int*)malloc(sizeof(int)*kp2->total);
>     double* best1corr = (double*)malloc(sizeof(double)*kp1->total);
>     double* best2corr = (double*)malloc(sizeof(double)*kp2->total);
>     float *seq1, *seq2;
>     int descriptor_size = EXTENDED_DESCRIPTOR ? 128 : 64;
>     for (i=0; i<kp1->total; i++) {
>         // find average and standard deviation of each descriptor
>         avg1[i] = 0;
>         dev1[i] = 0;
>         seq1 = (float*)cvGetSeqElem(desc1, i);
>         for (k=0; k<descriptor_size; k++) avg1[i] += seq1[k];
>         avg1[i] /= descriptor_size;
>         for (k=0; k<descriptor_size; k++) dev1[i] +=
> (seq1[k]-avg1[i])*(seq1[k]-avg1[i]);
>         dev1[i] = sqrt(dev1[i]/descriptor_size);
>
>         // initialize best1 and best1corr
>         best1[i] = -1;
>         best1corr[i] = -1.;
>     }
>     for (j=0; j<kp2->total; j++) {
>         // find average and standard deviation of each descriptor
>         avg2[j] = 0;
>         dev2[j] = 0;
>         seq2 = (float*)cvGetSeqElem(desc2, j);
>         for (k=0; k<descriptor_size; k++) avg2[j] += seq2[k];
>         avg2[j] /= descriptor_size;
>         for (k=0; k<descriptor_size; k++) dev2[j] +=
> (seq2[k]-avg2[j])*(seq2[k]-avg2[j]);
>         dev2[j] = sqrt(dev2[j]/descriptor_size);
>
>         // initialize best2 and best2corr
>         best2[j] = -1;
>         best2corr[j] = -1.;
>     }
>     double corr;
>     for (i = 0; i < kp1->total; ++i) {
>         seq1 = (float*)cvGetSeqElem(desc1, i);
>         for (j = 0; j < kp2->total; ++j) {
>             corr = 0;
>             seq2 = (float*)cvGetSeqElem(desc2, j);
>             for (k = 0; k < descriptor_size; ++k)
>                 corr += (seq1[k]-avg1[i])*(seq2[k]-avg2[j]);
>             corr /= (descriptor_size-1)*dev1[i]*dev2[j];
>             if (corr > best1corr[i]) {
>                 best1corr[i] = corr;
>                 best1[i] = j;
>             }
>             if (corr > best2corr[j]) {
>                 best2corr[j] = corr;
>                 best2[j] = i;
>             }
>         }
>     }
>     j = 0;
>     for (i = 0; i < kp1->total; i++)
>         if (best2[best1[i]] == i && best1corr[i] >
CORRELATION_THRESHOLD)
>             j++;
>     if (j == 0) return; // no matches found
>     *points1 = cvCreateMat(1,j,CV_32FC2);
>     *points2 = cvCreateMat(1,j,CV_32FC2);
>     CvPoint2D32f *p1, *p2;
>     j = 0;
>     for (i = 0; i < kp1->total; i++) {
>         if (best2[best1[i]] == i && best1corr[i] >
CORRELATION_THRESHOLD) {

>             p1 = &((CvSURFPoint*)cvGetSeqElem(kp1,i))->pt;
>             p2 = &((CvSURFPoint*)cvGetSeqElem(kp2,best1[i]))->pt;
>             (*points1)->data.fl[j*2] = p1->x;
>             (*points1)->data.fl[j*2+1] = p1->y;
>             (*points2)->data.fl[j*2] = p2->x;
>             (*points2)->data.fl[j*2+1] = p2->y;
>             j++;
>         }
>     }
>     free(best2corr);
>     free(best1corr);
>     free(best2);
>     free(best1);
>     free(avg1);
>     free(avg2);
>     free(dev1);
>     free(dev2);
> }
>
> If you construct a fundamental matrix (a model) for the transformation
> between the two rectangles, you can further determine which
correspondences
> are false (by how well they fit the model) and remove them, which I
like to

> do like this:
>
> F = cvCreateMat(3,3,CV_32FC1);
> CvMat *status = cvCreateMat(1,points1->cols,CV_8UC1);
> int fm_count = cvFindFundamentalMat( points1,points2,F,
> CV_FM_RANSAC,1.,0.99,status );
> removeOutliers(&points1,&points2,status);
>
> where removeOutliers() is a function I wrote to clean up after
> cvFindFundamentalMat():
>
> // iterates the set of putative correspondences and removes
correspondences
> marked as outliers by cvFindFundamentalMat()
> void removeOutliers(CvMat **points1, CvMat **points2, CvMat *status) {
>     CvMat *points1_ = *points1;
>     CvMat *points2_ = *points2;
>     int count = 0;
>     for (int i = 0; i < status->cols; i++) if
(CV_MAT_ELEM(*status,unsigned

> char,0,i)) count++;
>     if (!count) { // no inliers
>         *points1 = NULL;
>         *points2 = NULL;
>     }
>     else {
>         *points1 = cvCreateMat(1,count,CV_32FC2);
>         *points2 = cvCreateMat(1,count,CV_32FC2);
>         int j = 0;
>         for (int i = 0; i < status->cols; i++) {
>             if (CV_MAT_ELEM(*status,unsigned char,0,i)) {
>                 (*points1)->data.fl[j*2] = points1_->data.fl[i*2];
> //p1->x
>                 (*points1)->data.fl[j*2+1] = points1_->data.fl[i*2+1];
> //p1->y
>                 (*points2)->data.fl[j*2] = points2_->data.fl[i*2];
> //p2->x
>                 (*points2)->data.fl[j*2+1] = points2_->data.fl[i*2+1];
> //p2->y
>                 j++;
>             }
>         }
>     }
>     cvReleaseMat(&points1_);
>     cvReleaseMat(&points2_);
> }
>
>
> I hope this helps.
>
> -Jostein
>
>
> 2009/1/8 yair_movshovitz <yairmov@...>
>
> >   Hi Everyone,
> >
> > I'm trying to understand how to use the SURF features capabilities of
> > openCV.
> > My scenario is as follows:
> > I have two rectangled areas in an image, which are supposed to bound
> > the same object. I would like to see how good is this assumption. In
> > other words I would like to see how many features they share.
> >
> > Can someone drop me a hint on how to use the SURF implementation of
> > openCV (or direct me to somewhere that has some documentation of it)
> >
> > Thanks,
> > Yair
> >
> >  
> >
>
>
> [Non-text portions of this message have been removed]
>


Reply | Threaded
Open this post in threaded view
|

Re: Re: cvExtractSURF

Jostein Austvik Jacobsen
You can view the implementation of *cvExtractSURF(...)* here:
http://opencvlibrary.svn.sourceforge.net/viewvc/opencvlibrary/trunk/opencv/src/cv/cvsurf.cpp,
however it doesn't contain much comments.


*cvExtractSURF( const CvArr* img, const CvArr* mask, CvSeq** keypoints,
CvSeq** descriptors, CvMemStorage* storage, CvSURFParams params )*

Here, *img* is the image. Use an
*IplImage<http://opencv.willowgarage.com/wiki/CxCore#IplImage>
* for the image. To load an image from disk, use
*cvLoadImage(...)*<http://opencv.willowgarage.com/wiki/HighGui#cvLoadImage>,
and to create your own image, use
*cvCreateImage(...)*<http://opencv.willowgarage.com/wiki/CxCore#CreateImage>.
Lets say you have a IplImage *image* and want to extract the rectangle
(x,y)->(x+dx,y+dy) from it as an IplImage rectangle, you might do this:

CvSize size = cvSize(dx,dy);
IplImage* rectangle = cvCreateImage(size, IPL_DEPTH_8U, 1);
for (int i = 0; i < dx; ++i) {
    for (int j = 0; j < dy; ++j) {
        CV_IMAGE_ELEM(rectangle,unsigned char,i,j) =
CV_IMAGE_ELEM(image,unsigned char,x+i,y+j);
    }
}

I'm not sure how *mask* is used, but a quick google search gives
http://www.emgu.com/wiki/files/1.4.0.0/html/ad54862f-3d1c-c177-3bdb-768619f8dd90.htmwhich
says "The optional input 8-bit mask. The features are only found in
the areas that contain more than 50% of non-zero mask pixels". Just set it
to NULL.

*keypoints* and
*descriptors*<http://en.wikipedia.org/wiki/Feature_%28computer_vision%29>are
where the results are placed. Initialize them as null-pointers and
cvExtractSURF will do the rest for you. Afterwards you can access a
descriptor and corresponding keypoint like this:

int k = 0; // the keypoint you want. There are *keypoints->total* keypoints.
float *seq = (float*)cvGetSeqElem(descriptors, k); // the descriptor of
length 64 or 128
CvPoint2D32f *p = &((CvSURFPoint*)cvGetSeqElem(keypoints, k))->pt; // the
(x,y) coordinates of keypoint *k* can now be accessed as *p->x* and *p->y*

The *CvMemStorage*
<http://opencv.willowgarage.com/wiki/CxCore#CvMemStorage>struct
*storage* is used as a mechanism to simplify memory management. I believe
the *keypoints* and *descriptors* structures are put into *storage*, so you
can't release *storage* until you're done using *keypoints* and *descriptors
*.Put a *CvMemStorage *storage = cvCreateMemStorage(0);* before your first
call to cvExtractSURF and *cvClearMemStorage(storage);* after you're done
using *keypoints* and *descriptors*.

SURF takes a couple of parameters through the *CvSURFParams* struct *params*.
You create *params* with *cvSURFParams(double threshold, int
extended)*where threshold represents the "edgyness" that is required
from a feature to
be recognized as a feature. It can be adjusted to retrieve more or fewer
features. In the paper
<http://www.vision.ee.ethz.ch/%7Esurf/eccv06.pdf>describing the SURF
detector, they use a threshold of 600 on a 800 x 640
image which returned 1418 features. The *extended* parameter is a simple
boolean 1 or 0 which states whether or not to use the extended descriptor.
The extended descriptor consists of 128 instead of 64 values which should
gives a better result at the cost of using more memory. Instead of creating
a new CvSURFParams struct for each call to cvExtractSURF, you could do:

CvSURFParams params = cvSURFParams(600, 1);
cvExtractSURF(..., params);
cvExtractSURF(..., params);


There you go. I hope I answered your question :)

Jostein


2009/1/12 yair_movshovitz <[hidden email]>

>   Hi Jostein,
>
> Thanks a lot for your help!
>
> Can you please explain the function parameters of cvExtractSURF?
> I mean in - cvExtractSURF(img1, NULL, &kp1, &desc1, storage,
> cvSURFParams(600,EXTENDED_DESCRIPTOR));
> what is the role of kp1, desc1, storage and the SURFParams?
> is storage just a temp area for the algorithm to use?
>
> Thanks again
> Yair
>
> --- In [hidden email] <OpenCV%40yahoogroups.com>, "Jostein Austvik
> Jacobsen"
>
> <josteinaj@...> wrote:
> >
> > If you've got your two rectangled areas stored as img1 and img2 you
> could do
> > this to extract its keypoints and corresponding descriptors:
> >
> > #define EXTENDED_DESCRIPTOR 1
> > CvSeq *kp1=NULL, *kp2=NULL;
> > CvSeq *desc1=NULL, *desc2=NULL;
> > CvMemStorage *storage = cvCreateMemStorage(0);
> > cvExtractSURF(img1, NULL, &kp1, &desc1, storage, cvSURFParams(600,
> > EXTENDED_DESCRIPTOR));
> > cvExtractSURF(img2, NULL, &kp2, &desc2, storage, cvSURFParams(600,
> > EXTENDED_DESCRIPTOR));
> >
> > You will have to correlate the descriptors with each other to determine
> > which keypoints in each rectangle corresponds to one another. You
> could use
> > a BBF tree which is implemented in the latest version of OpenCV, but
> unless
> > your rectangle is huge, you might just as well just correlate them the
> > standard way, which I do like this:
> >
> > #define CORRELATION_THRESHOLD 0.7
> > // brute-force attempt at correlating the two sets of features
> > void bruteMatch(CvMat **points1, CvMat **points2, CvSeq *kp1, CvSeq
> *desc1,
> > CvSeq *kp2, CvSeq *desc2) {
> > int i,j,k;
> > double* avg1 = (double*)malloc(sizeof(double)*kp1->total);
> > double* avg2 = (double*)malloc(sizeof(double)*kp2->total);
> > double* dev1 = (double*)malloc(sizeof(double)*kp1->total);
> > double* dev2 = (double*)malloc(sizeof(double)*kp2->total);
> > int* best1 = (int*)malloc(sizeof(int)*kp1->total);
> > int* best2 = (int*)malloc(sizeof(int)*kp2->total);
> > double* best1corr = (double*)malloc(sizeof(double)*kp1->total);
> > double* best2corr = (double*)malloc(sizeof(double)*kp2->total);
> > float *seq1, *seq2;
> > int descriptor_size = EXTENDED_DESCRIPTOR ? 128 : 64;
> > for (i=0; i<kp1->total; i++) {
> > // find average and standard deviation of each descriptor
> > avg1[i] = 0;
> > dev1[i] = 0;
> > seq1 = (float*)cvGetSeqElem(desc1, i);
> > for (k=0; k<descriptor_size; k++) avg1[i] += seq1[k];
> > avg1[i] /= descriptor_size;
> > for (k=0; k<descriptor_size; k++) dev1[i] +=
> > (seq1[k]-avg1[i])*(seq1[k]-avg1[i]);
> > dev1[i] = sqrt(dev1[i]/descriptor_size);
> >
> > // initialize best1 and best1corr
> > best1[i] = -1;
> > best1corr[i] = -1.;
> > }
> > for (j=0; j<kp2->total; j++) {
> > // find average and standard deviation of each descriptor
> > avg2[j] = 0;
> > dev2[j] = 0;
> > seq2 = (float*)cvGetSeqElem(desc2, j);
> > for (k=0; k<descriptor_size; k++) avg2[j] += seq2[k];
> > avg2[j] /= descriptor_size;
> > for (k=0; k<descriptor_size; k++) dev2[j] +=
> > (seq2[k]-avg2[j])*(seq2[k]-avg2[j]);
> > dev2[j] = sqrt(dev2[j]/descriptor_size);
> >
> > // initialize best2 and best2corr
> > best2[j] = -1;
> > best2corr[j] = -1.;
> > }
> > double corr;
> > for (i = 0; i < kp1->total; ++i) {
> > seq1 = (float*)cvGetSeqElem(desc1, i);
> > for (j = 0; j < kp2->total; ++j) {
> > corr = 0;
> > seq2 = (float*)cvGetSeqElem(desc2, j);
> > for (k = 0; k < descriptor_size; ++k)
> > corr += (seq1[k]-avg1[i])*(seq2[k]-avg2[j]);
> > corr /= (descriptor_size-1)*dev1[i]*dev2[j];
> > if (corr > best1corr[i]) {
> > best1corr[i] = corr;
> > best1[i] = j;
> > }
> > if (corr > best2corr[j]) {
> > best2corr[j] = corr;
> > best2[j] = i;
> > }
> > }
> > }
> > j = 0;
> > for (i = 0; i < kp1->total; i++)
> > if (best2[best1[i]] == i && best1corr[i] >
> CORRELATION_THRESHOLD)
> > j++;
> > if (j == 0) return; // no matches found
> > *points1 = cvCreateMat(1,j,CV_32FC2);
> > *points2 = cvCreateMat(1,j,CV_32FC2);
> > CvPoint2D32f *p1, *p2;
> > j = 0;
> > for (i = 0; i < kp1->total; i++) {
> > if (best2[best1[i]] == i && best1corr[i] >
> CORRELATION_THRESHOLD) {
> > p1 = &((CvSURFPoint*)cvGetSeqElem(kp1,i))->pt;
> > p2 = &((CvSURFPoint*)cvGetSeqElem(kp2,best1[i]))->pt;
> > (*points1)->data.fl[j*2] = p1->x;
> > (*points1)->data.fl[j*2+1] = p1->y;
> > (*points2)->data.fl[j*2] = p2->x;
> > (*points2)->data.fl[j*2+1] = p2->y;
> > j++;
> > }
> > }
> > free(best2corr);
> > free(best1corr);
> > free(best2);
> > free(best1);
> > free(avg1);
> > free(avg2);
> > free(dev1);
> > free(dev2);
> > }
> >
> > If you construct a fundamental matrix (a model) for the transformation
> > between the two rectangles, you can further determine which
> correspondences
> > are false (by how well they fit the model) and remove them, which I
> like to
> > do like this:
> >
> > F = cvCreateMat(3,3,CV_32FC1);
> > CvMat *status = cvCreateMat(1,points1->cols,CV_8UC1);
> > int fm_count = cvFindFundamentalMat( points1,points2,F,
> > CV_FM_RANSAC,1.,0.99,status );
> > removeOutliers(&points1,&points2,status);
> >
> > where removeOutliers() is a function I wrote to clean up after
> > cvFindFundamentalMat():
> >
> > // iterates the set of putative correspondences and removes
> correspondences
> > marked as outliers by cvFindFundamentalMat()
> > void removeOutliers(CvMat **points1, CvMat **points2, CvMat *status) {
> > CvMat *points1_ = *points1;
> > CvMat *points2_ = *points2;
> > int count = 0;
> > for (int i = 0; i < status->cols; i++) if
> (CV_MAT_ELEM(*status,unsigned
> > char,0,i)) count++;
> > if (!count) { // no inliers
> > *points1 = NULL;
> > *points2 = NULL;
> > }
> > else {
> > *points1 = cvCreateMat(1,count,CV_32FC2);
> > *points2 = cvCreateMat(1,count,CV_32FC2);
> > int j = 0;
> > for (int i = 0; i < status->cols; i++) {
> > if (CV_MAT_ELEM(*status,unsigned char,0,i)) {
> > (*points1)->data.fl[j*2] = points1_->data.fl[i*2];
> > //p1->x
> > (*points1)->data.fl[j*2+1] = points1_->data.fl[i*2+1];
> > //p1->y
> > (*points2)->data.fl[j*2] = points2_->data.fl[i*2];
> > //p2->x
> > (*points2)->data.fl[j*2+1] = points2_->data.fl[i*2+1];
> > //p2->y
> > j++;
> > }
> > }
> > }
> > cvReleaseMat(&points1_);
> > cvReleaseMat(&points2_);
> > }
> >
> >
> > I hope this helps.
> >
> > -Jostein
> >
> >
> > 2009/1/8 yair_movshovitz <yairmov@...>
> >
> > > Hi Everyone,
> > >
> > > I'm trying to understand how to use the SURF features capabilities of
> > > openCV.
> > > My scenario is as follows:
> > > I have two rectangled areas in an image, which are supposed to bound
> > > the same object. I would like to see how good is this assumption. In
> > > other words I would like to see how many features they share.
> > >
> > > Can someone drop me a hint on how to use the SURF implementation of
> > > openCV (or direct me to somewhere that has some documentation of it)
> > >
> > > Thanks,
> > > Yair
> > >
> > >
> > >
> >
> >
> > [Non-text portions of this message have been removed]
> >
>
>  
>


[Non-text portions of this message have been removed]

Reply | Threaded
Open this post in threaded view
|

Re: cvExtractSURF

yair_movshovitz
Hi Jostein,

Thanks again for helping me out.

I have started using the cvExtractSURF function. and I have the
following problem:
When I call the function I get this error -
Windows has triggered a breakpoint in (my program name).

This may be due to a corruption of the heap, which indicates a bug in
(my program name) or any of the DLLs it has loaded.

Have you ever encountered this error before regarding this function?

Thanks,
Yair

--- In [hidden email], "Jostein Austvik Jacobsen"
<josteinaj@...> wrote:
>
> You can view the implementation of *cvExtractSURF(...)* here:
>
http://opencvlibrary.svn.sourceforge.net/viewvc/opencvlibrary/trunk/opencv/src/cv/cvsurf.cpp,

> however it doesn't contain much comments.
>
>
> *cvExtractSURF( const CvArr* img, const CvArr* mask, CvSeq** keypoints,
> CvSeq** descriptors, CvMemStorage* storage, CvSURFParams params )*
>
> Here, *img* is the image. Use an
> *IplImage<http://opencv.willowgarage.com/wiki/CxCore#IplImage>
> * for the image. To load an image from disk, use
>
*cvLoadImage(...)*<http://opencv.willowgarage.com/wiki/HighGui#cvLoadImage>,
> and to create your own image, use
>
*cvCreateImage(...)*<http://opencv.willowgarage.com/wiki/CxCore#CreateImage>.

> Lets say you have a IplImage *image* and want to extract the rectangle
> (x,y)->(x+dx,y+dy) from it as an IplImage rectangle, you might do this:
>
> CvSize size = cvSize(dx,dy);
> IplImage* rectangle = cvCreateImage(size, IPL_DEPTH_8U, 1);
> for (int i = 0; i < dx; ++i) {
>     for (int j = 0; j < dy; ++j) {
>         CV_IMAGE_ELEM(rectangle,unsigned char,i,j) =
> CV_IMAGE_ELEM(image,unsigned char,x+i,y+j);
>     }
> }
>
> I'm not sure how *mask* is used, but a quick google search gives
>
http://www.emgu.com/wiki/files/1.4.0.0/html/ad54862f-3d1c-c177-3bdb-768619f8dd90.htmwhich
> says "The optional input 8-bit mask. The features are only found in
> the areas that contain more than 50% of non-zero mask pixels". Just
set it
> to NULL.
>
> *keypoints* and
>
*descriptors*<http://en.wikipedia.org/wiki/Feature_%28computer_vision%29>are
> where the results are placed. Initialize them as null-pointers and
> cvExtractSURF will do the rest for you. Afterwards you can access a
> descriptor and corresponding keypoint like this:
>
> int k = 0; // the keypoint you want. There are *keypoints->total*
keypoints.
> float *seq = (float*)cvGetSeqElem(descriptors, k); // the descriptor of
> length 64 or 128
> CvPoint2D32f *p = &((CvSURFPoint*)cvGetSeqElem(keypoints, k))->pt;
// the
> (x,y) coordinates of keypoint *k* can now be accessed as *p->x* and
*p->y*
>
> The *CvMemStorage*
> <http://opencv.willowgarage.com/wiki/CxCore#CvMemStorage>struct
> *storage* is used as a mechanism to simplify memory management. I
believe
> the *keypoints* and *descriptors* structures are put into *storage*,
so you
> can't release *storage* until you're done using *keypoints* and
*descriptors
> *.Put a *CvMemStorage *storage = cvCreateMemStorage(0);* before your
first
> call to cvExtractSURF and *cvClearMemStorage(storage);* after you're
done
> using *keypoints* and *descriptors*.
>
> SURF takes a couple of parameters through the *CvSURFParams* struct
*params*.
> You create *params* with *cvSURFParams(double threshold, int
> extended)*where threshold represents the "edgyness" that is required
> from a feature to
> be recognized as a feature. It can be adjusted to retrieve more or fewer
> features. In the paper
> <http://www.vision.ee.ethz.ch/%7Esurf/eccv06.pdf>describing the SURF
> detector, they use a threshold of 600 on a 800 x 640
> image which returned 1418 features. The *extended* parameter is a simple
> boolean 1 or 0 which states whether or not to use the extended
descriptor.
> The extended descriptor consists of 128 instead of 64 values which
should
> gives a better result at the cost of using more memory. Instead of
creating

> a new CvSURFParams struct for each call to cvExtractSURF, you could do:
>
> CvSURFParams params = cvSURFParams(600, 1);
> cvExtractSURF(..., params);
> cvExtractSURF(..., params);
>
>
> There you go. I hope I answered your question :)
>
> Jostein
>
>
> 2009/1/12 yair_movshovitz <yairmov@...>
>
> >   Hi Jostein,
> >
> > Thanks a lot for your help!
> >
> > Can you please explain the function parameters of cvExtractSURF?
> > I mean in - cvExtractSURF(img1, NULL, &kp1, &desc1, storage,
> > cvSURFParams(600,EXTENDED_DESCRIPTOR));
> > what is the role of kp1, desc1, storage and the SURFParams?
> > is storage just a temp area for the algorithm to use?
> >
> > Thanks again
> > Yair
> >
> > --- In [hidden email] <OpenCV%40yahoogroups.com>, "Jostein
Austvik

> > Jacobsen"
> >
> > <josteinaj@> wrote:
> > >
> > > If you've got your two rectangled areas stored as img1 and img2 you
> > could do
> > > this to extract its keypoints and corresponding descriptors:
> > >
> > > #define EXTENDED_DESCRIPTOR 1
> > > CvSeq *kp1=NULL, *kp2=NULL;
> > > CvSeq *desc1=NULL, *desc2=NULL;
> > > CvMemStorage *storage = cvCreateMemStorage(0);
> > > cvExtractSURF(img1, NULL, &kp1, &desc1, storage, cvSURFParams(600,
> > > EXTENDED_DESCRIPTOR));
> > > cvExtractSURF(img2, NULL, &kp2, &desc2, storage, cvSURFParams(600,
> > > EXTENDED_DESCRIPTOR));
> > >
> > > You will have to correlate the descriptors with each other to
determine
> > > which keypoints in each rectangle corresponds to one another. You
> > could use
> > > a BBF tree which is implemented in the latest version of OpenCV, but
> > unless
> > > your rectangle is huge, you might just as well just correlate
them the

> > > standard way, which I do like this:
> > >
> > > #define CORRELATION_THRESHOLD 0.7
> > > // brute-force attempt at correlating the two sets of features
> > > void bruteMatch(CvMat **points1, CvMat **points2, CvSeq *kp1, CvSeq
> > *desc1,
> > > CvSeq *kp2, CvSeq *desc2) {
> > > int i,j,k;
> > > double* avg1 = (double*)malloc(sizeof(double)*kp1->total);
> > > double* avg2 = (double*)malloc(sizeof(double)*kp2->total);
> > > double* dev1 = (double*)malloc(sizeof(double)*kp1->total);
> > > double* dev2 = (double*)malloc(sizeof(double)*kp2->total);
> > > int* best1 = (int*)malloc(sizeof(int)*kp1->total);
> > > int* best2 = (int*)malloc(sizeof(int)*kp2->total);
> > > double* best1corr = (double*)malloc(sizeof(double)*kp1->total);
> > > double* best2corr = (double*)malloc(sizeof(double)*kp2->total);
> > > float *seq1, *seq2;
> > > int descriptor_size = EXTENDED_DESCRIPTOR ? 128 : 64;
> > > for (i=0; i<kp1->total; i++) {
> > > // find average and standard deviation of each descriptor
> > > avg1[i] = 0;
> > > dev1[i] = 0;
> > > seq1 = (float*)cvGetSeqElem(desc1, i);
> > > for (k=0; k<descriptor_size; k++) avg1[i] += seq1[k];
> > > avg1[i] /= descriptor_size;
> > > for (k=0; k<descriptor_size; k++) dev1[i] +=
> > > (seq1[k]-avg1[i])*(seq1[k]-avg1[i]);
> > > dev1[i] = sqrt(dev1[i]/descriptor_size);
> > >
> > > // initialize best1 and best1corr
> > > best1[i] = -1;
> > > best1corr[i] = -1.;
> > > }
> > > for (j=0; j<kp2->total; j++) {
> > > // find average and standard deviation of each descriptor
> > > avg2[j] = 0;
> > > dev2[j] = 0;
> > > seq2 = (float*)cvGetSeqElem(desc2, j);
> > > for (k=0; k<descriptor_size; k++) avg2[j] += seq2[k];
> > > avg2[j] /= descriptor_size;
> > > for (k=0; k<descriptor_size; k++) dev2[j] +=
> > > (seq2[k]-avg2[j])*(seq2[k]-avg2[j]);
> > > dev2[j] = sqrt(dev2[j]/descriptor_size);
> > >
> > > // initialize best2 and best2corr
> > > best2[j] = -1;
> > > best2corr[j] = -1.;
> > > }
> > > double corr;
> > > for (i = 0; i < kp1->total; ++i) {
> > > seq1 = (float*)cvGetSeqElem(desc1, i);
> > > for (j = 0; j < kp2->total; ++j) {
> > > corr = 0;
> > > seq2 = (float*)cvGetSeqElem(desc2, j);
> > > for (k = 0; k < descriptor_size; ++k)
> > > corr += (seq1[k]-avg1[i])*(seq2[k]-avg2[j]);
> > > corr /= (descriptor_size-1)*dev1[i]*dev2[j];
> > > if (corr > best1corr[i]) {
> > > best1corr[i] = corr;
> > > best1[i] = j;
> > > }
> > > if (corr > best2corr[j]) {
> > > best2corr[j] = corr;
> > > best2[j] = i;
> > > }
> > > }
> > > }
> > > j = 0;
> > > for (i = 0; i < kp1->total; i++)
> > > if (best2[best1[i]] == i && best1corr[i] >
> > CORRELATION_THRESHOLD)
> > > j++;
> > > if (j == 0) return; // no matches found
> > > *points1 = cvCreateMat(1,j,CV_32FC2);
> > > *points2 = cvCreateMat(1,j,CV_32FC2);
> > > CvPoint2D32f *p1, *p2;
> > > j = 0;
> > > for (i = 0; i < kp1->total; i++) {
> > > if (best2[best1[i]] == i && best1corr[i] >
> > CORRELATION_THRESHOLD) {
> > > p1 = &((CvSURFPoint*)cvGetSeqElem(kp1,i))->pt;
> > > p2 = &((CvSURFPoint*)cvGetSeqElem(kp2,best1[i]))->pt;
> > > (*points1)->data.fl[j*2] = p1->x;
> > > (*points1)->data.fl[j*2+1] = p1->y;
> > > (*points2)->data.fl[j*2] = p2->x;
> > > (*points2)->data.fl[j*2+1] = p2->y;
> > > j++;
> > > }
> > > }
> > > free(best2corr);
> > > free(best1corr);
> > > free(best2);
> > > free(best1);
> > > free(avg1);
> > > free(avg2);
> > > free(dev1);
> > > free(dev2);
> > > }
> > >
> > > If you construct a fundamental matrix (a model) for the
transformation

> > > between the two rectangles, you can further determine which
> > correspondences
> > > are false (by how well they fit the model) and remove them, which I
> > like to
> > > do like this:
> > >
> > > F = cvCreateMat(3,3,CV_32FC1);
> > > CvMat *status = cvCreateMat(1,points1->cols,CV_8UC1);
> > > int fm_count = cvFindFundamentalMat( points1,points2,F,
> > > CV_FM_RANSAC,1.,0.99,status );
> > > removeOutliers(&points1,&points2,status);
> > >
> > > where removeOutliers() is a function I wrote to clean up after
> > > cvFindFundamentalMat():
> > >
> > > // iterates the set of putative correspondences and removes
> > correspondences
> > > marked as outliers by cvFindFundamentalMat()
> > > void removeOutliers(CvMat **points1, CvMat **points2, CvMat
*status) {

> > > CvMat *points1_ = *points1;
> > > CvMat *points2_ = *points2;
> > > int count = 0;
> > > for (int i = 0; i < status->cols; i++) if
> > (CV_MAT_ELEM(*status,unsigned
> > > char,0,i)) count++;
> > > if (!count) { // no inliers
> > > *points1 = NULL;
> > > *points2 = NULL;
> > > }
> > > else {
> > > *points1 = cvCreateMat(1,count,CV_32FC2);
> > > *points2 = cvCreateMat(1,count,CV_32FC2);
> > > int j = 0;
> > > for (int i = 0; i < status->cols; i++) {
> > > if (CV_MAT_ELEM(*status,unsigned char,0,i)) {
> > > (*points1)->data.fl[j*2] = points1_->data.fl[i*2];
> > > //p1->x
> > > (*points1)->data.fl[j*2+1] = points1_->data.fl[i*2+1];
> > > //p1->y
> > > (*points2)->data.fl[j*2] = points2_->data.fl[i*2];
> > > //p2->x
> > > (*points2)->data.fl[j*2+1] = points2_->data.fl[i*2+1];
> > > //p2->y
> > > j++;
> > > }
> > > }
> > > }
> > > cvReleaseMat(&points1_);
> > > cvReleaseMat(&points2_);
> > > }
> > >
> > >
> > > I hope this helps.
> > >
> > > -Jostein
> > >
> > >
> > > 2009/1/8 yair_movshovitz <yairmov@>
> > >
> > > > Hi Everyone,
> > > >
> > > > I'm trying to understand how to use the SURF features
capabilities of
> > > > openCV.
> > > > My scenario is as follows:
> > > > I have two rectangled areas in an image, which are supposed to
bound
> > > > the same object. I would like to see how good is this
assumption. In
> > > > other words I would like to see how many features they share.
> > > >
> > > > Can someone drop me a hint on how to use the SURF
implementation of
> > > > openCV (or direct me to somewhere that has some documentation
of it)

> > > >
> > > > Thanks,
> > > > Yair
> > > >
> > > >
> > > >
> > >
> > >
> > > [Non-text portions of this message have been removed]
> > >
> >
> >  
> >
>
>
> [Non-text portions of this message have been removed]
>


Reply | Threaded
Open this post in threaded view
|

Re: Re: cvExtractSURF

Jostein Austvik Jacobsen
I'm using Ubuntu Linux so I can't help you there. Sorry.
Jostein

2009/1/20 yair_movshovitz <[hidden email]>

>   Hi Jostein,
>
> Thanks again for helping me out.
>
> I have started using the cvExtractSURF function. and I have the
> following problem:
> When I call the function I get this error -
> Windows has triggered a breakpoint in (my program name).
>
> This may be due to a corruption of the heap, which indicates a bug in
> (my program name) or any of the DLLs it has loaded.
>
> Have you ever encountered this error before regarding this function?
>
> Thanks,
>
> Yair
>
> --- In [hidden email] <OpenCV%40yahoogroups.com>, "Jostein Austvik
> Jacobsen"
> <josteinaj@...> wrote:
> >
> > You can view the implementation of *cvExtractSURF(...)* here:
> >
>
> http://opencvlibrary.svn.sourceforge.net/viewvc/opencvlibrary/trunk/opencv/src/cv/cvsurf.cpp,
> > however it doesn't contain much comments.
> >
> >
> > *cvExtractSURF( const CvArr* img, const CvArr* mask, CvSeq** keypoints,
> > CvSeq** descriptors, CvMemStorage* storage, CvSURFParams params )*
> >
> > Here, *img* is the image. Use an
> > *IplImage<http://opencv.willowgarage.com/wiki/CxCore#IplImage>
> > * for the image. To load an image from disk, use
> >
> *cvLoadImage(...)*<http://opencv.willowgarage.com/wiki/HighGui#cvLoadImage
> >,
> > and to create your own image, use
> >
> *cvCreateImage(...)*<
> http://opencv.willowgarage.com/wiki/CxCore#CreateImage>.
> > Lets say you have a IplImage *image* and want to extract the rectangle
> > (x,y)->(x+dx,y+dy) from it as an IplImage rectangle, you might do this:
> >
> > CvSize size = cvSize(dx,dy);
> > IplImage* rectangle = cvCreateImage(size, IPL_DEPTH_8U, 1);
> > for (int i = 0; i < dx; ++i) {
> > for (int j = 0; j < dy; ++j) {
> > CV_IMAGE_ELEM(rectangle,unsigned char,i,j) =
> > CV_IMAGE_ELEM(image,unsigned char,x+i,y+j);
> > }
> > }
> >
> > I'm not sure how *mask* is used, but a quick google search gives
> >
>
> http://www.emgu.com/wiki/files/1.4.0.0/html/ad54862f-3d1c-c177-3bdb-768619f8dd90.htmwhich
> > says "The optional input 8-bit mask. The features are only found in
> > the areas that contain more than 50% of non-zero mask pixels". Just
> set it
> > to NULL.
> >
> > *keypoints* and
> >
> *descriptors*<http://en.wikipedia.org/wiki/Feature_%28computer_vision%29
> >are
> > where the results are placed. Initialize them as null-pointers and
> > cvExtractSURF will do the rest for you. Afterwards you can access a
> > descriptor and corresponding keypoint like this:
> >
> > int k = 0; // the keypoint you want. There are *keypoints->total*
> keypoints.
> > float *seq = (float*)cvGetSeqElem(descriptors, k); // the descriptor of
> > length 64 or 128
> > CvPoint2D32f *p = &((CvSURFPoint*)cvGetSeqElem(keypoints, k))->pt;
> // the
> > (x,y) coordinates of keypoint *k* can now be accessed as *p->x* and
> *p->y*
> >
> > The *CvMemStorage*
> > <http://opencv.willowgarage.com/wiki/CxCore#CvMemStorage>struct
> > *storage* is used as a mechanism to simplify memory management. I
> believe
> > the *keypoints* and *descriptors* structures are put into *storage*,
> so you
> > can't release *storage* until you're done using *keypoints* and
> *descriptors
> > *.Put a *CvMemStorage *storage = cvCreateMemStorage(0);* before your
> first
> > call to cvExtractSURF and *cvClearMemStorage(storage);* after you're
> done
> > using *keypoints* and *descriptors*.
> >
> > SURF takes a couple of parameters through the *CvSURFParams* struct
> *params*.
> > You create *params* with *cvSURFParams(double threshold, int
> > extended)*where threshold represents the "edgyness" that is required
> > from a feature to
> > be recognized as a feature. It can be adjusted to retrieve more or fewer
> > features. In the paper
> > <http://www.vision.ee.ethz.ch/%7Esurf/eccv06.pdf>describing the SURF
> > detector, they use a threshold of 600 on a 800 x 640
> > image which returned 1418 features. The *extended* parameter is a simple
> > boolean 1 or 0 which states whether or not to use the extended
> descriptor.
> > The extended descriptor consists of 128 instead of 64 values which
> should
> > gives a better result at the cost of using more memory. Instead of
> creating
> > a new CvSURFParams struct for each call to cvExtractSURF, you could do:
> >
> > CvSURFParams params = cvSURFParams(600, 1);
> > cvExtractSURF(..., params);
> > cvExtractSURF(..., params);
> >
> >
> > There you go. I hope I answered your question :)
> >
> > Jostein
> >
> >
> > 2009/1/12 yair_movshovitz <yairmov@...>
> >
> > > Hi Jostein,
> > >
> > > Thanks a lot for your help!
> > >
> > > Can you please explain the function parameters of cvExtractSURF?
> > > I mean in - cvExtractSURF(img1, NULL, &kp1, &desc1, storage,
> > > cvSURFParams(600,EXTENDED_DESCRIPTOR));
> > > what is the role of kp1, desc1, storage and the SURFParams?
> > > is storage just a temp area for the algorithm to use?
> > >
> > > Thanks again
> > > Yair
> > >
> > > --- In [hidden email] <OpenCV%40yahoogroups.com> <OpenCV%
> 40yahoogroups.com>, "Jostein
>
> Austvik
> > > Jacobsen"
> > >
> > > <josteinaj@> wrote:
> > > >
> > > > If you've got your two rectangled areas stored as img1 and img2 you
> > > could do
> > > > this to extract its keypoints and corresponding descriptors:
> > > >
> > > > #define EXTENDED_DESCRIPTOR 1
> > > > CvSeq *kp1=NULL, *kp2=NULL;
> > > > CvSeq *desc1=NULL, *desc2=NULL;
> > > > CvMemStorage *storage = cvCreateMemStorage(0);
> > > > cvExtractSURF(img1, NULL, &kp1, &desc1, storage, cvSURFParams(600,
> > > > EXTENDED_DESCRIPTOR));
> > > > cvExtractSURF(img2, NULL, &kp2, &desc2, storage, cvSURFParams(600,
> > > > EXTENDED_DESCRIPTOR));
> > > >
> > > > You will have to correlate the descriptors with each other to
> determine
> > > > which keypoints in each rectangle corresponds to one another. You
> > > could use
> > > > a BBF tree which is implemented in the latest version of OpenCV, but
> > > unless
> > > > your rectangle is huge, you might just as well just correlate
> them the
> > > > standard way, which I do like this:
> > > >
> > > > #define CORRELATION_THRESHOLD 0.7
> > > > // brute-force attempt at correlating the two sets of features
> > > > void bruteMatch(CvMat **points1, CvMat **points2, CvSeq *kp1, CvSeq
> > > *desc1,
> > > > CvSeq *kp2, CvSeq *desc2) {
> > > > int i,j,k;
> > > > double* avg1 = (double*)malloc(sizeof(double)*kp1->total);
> > > > double* avg2 = (double*)malloc(sizeof(double)*kp2->total);
> > > > double* dev1 = (double*)malloc(sizeof(double)*kp1->total);
> > > > double* dev2 = (double*)malloc(sizeof(double)*kp2->total);
> > > > int* best1 = (int*)malloc(sizeof(int)*kp1->total);
> > > > int* best2 = (int*)malloc(sizeof(int)*kp2->total);
> > > > double* best1corr = (double*)malloc(sizeof(double)*kp1->total);
> > > > double* best2corr = (double*)malloc(sizeof(double)*kp2->total);
> > > > float *seq1, *seq2;
> > > > int descriptor_size = EXTENDED_DESCRIPTOR ? 128 : 64;
> > > > for (i=0; i<kp1->total; i++) {
> > > > // find average and standard deviation of each descriptor
> > > > avg1[i] = 0;
> > > > dev1[i] = 0;
> > > > seq1 = (float*)cvGetSeqElem(desc1, i);
> > > > for (k=0; k<descriptor_size; k++) avg1[i] += seq1[k];
> > > > avg1[i] /= descriptor_size;
> > > > for (k=0; k<descriptor_size; k++) dev1[i] +=
> > > > (seq1[k]-avg1[i])*(seq1[k]-avg1[i]);
> > > > dev1[i] = sqrt(dev1[i]/descriptor_size);
> > > >
> > > > // initialize best1 and best1corr
> > > > best1[i] = -1;
> > > > best1corr[i] = -1.;
> > > > }
> > > > for (j=0; j<kp2->total; j++) {
> > > > // find average and standard deviation of each descriptor
> > > > avg2[j] = 0;
> > > > dev2[j] = 0;
> > > > seq2 = (float*)cvGetSeqElem(desc2, j);
> > > > for (k=0; k<descriptor_size; k++) avg2[j] += seq2[k];
> > > > avg2[j] /= descriptor_size;
> > > > for (k=0; k<descriptor_size; k++) dev2[j] +=
> > > > (seq2[k]-avg2[j])*(seq2[k]-avg2[j]);
> > > > dev2[j] = sqrt(dev2[j]/descriptor_size);
> > > >
> > > > // initialize best2 and best2corr
> > > > best2[j] = -1;
> > > > best2corr[j] = -1.;
> > > > }
> > > > double corr;
> > > > for (i = 0; i < kp1->total; ++i) {
> > > > seq1 = (float*)cvGetSeqElem(desc1, i);
> > > > for (j = 0; j < kp2->total; ++j) {
> > > > corr = 0;
> > > > seq2 = (float*)cvGetSeqElem(desc2, j);
> > > > for (k = 0; k < descriptor_size; ++k)
> > > > corr += (seq1[k]-avg1[i])*(seq2[k]-avg2[j]);
> > > > corr /= (descriptor_size-1)*dev1[i]*dev2[j];
> > > > if (corr > best1corr[i]) {
> > > > best1corr[i] = corr;
> > > > best1[i] = j;
> > > > }
> > > > if (corr > best2corr[j]) {
> > > > best2corr[j] = corr;
> > > > best2[j] = i;
> > > > }
> > > > }
> > > > }
> > > > j = 0;
> > > > for (i = 0; i < kp1->total; i++)
> > > > if (best2[best1[i]] == i && best1corr[i] >
> > > CORRELATION_THRESHOLD)
> > > > j++;
> > > > if (j == 0) return; // no matches found
> > > > *points1 = cvCreateMat(1,j,CV_32FC2);
> > > > *points2 = cvCreateMat(1,j,CV_32FC2);
> > > > CvPoint2D32f *p1, *p2;
> > > > j = 0;
> > > > for (i = 0; i < kp1->total; i++) {
> > > > if (best2[best1[i]] == i && best1corr[i] >
> > > CORRELATION_THRESHOLD) {
> > > > p1 = &((CvSURFPoint*)cvGetSeqElem(kp1,i))->pt;
> > > > p2 = &((CvSURFPoint*)cvGetSeqElem(kp2,best1[i]))->pt;
> > > > (*points1)->data.fl[j*2] = p1->x;
> > > > (*points1)->data.fl[j*2+1] = p1->y;
> > > > (*points2)->data.fl[j*2] = p2->x;
> > > > (*points2)->data.fl[j*2+1] = p2->y;
> > > > j++;
> > > > }
> > > > }
> > > > free(best2corr);
> > > > free(best1corr);
> > > > free(best2);
> > > > free(best1);
> > > > free(avg1);
> > > > free(avg2);
> > > > free(dev1);
> > > > free(dev2);
> > > > }
> > > >
> > > > If you construct a fundamental matrix (a model) for the
> transformation
> > > > between the two rectangles, you can further determine which
> > > correspondences
> > > > are false (by how well they fit the model) and remove them, which I
> > > like to
> > > > do like this:
> > > >
> > > > F = cvCreateMat(3,3,CV_32FC1);
> > > > CvMat *status = cvCreateMat(1,points1->cols,CV_8UC1);
> > > > int fm_count = cvFindFundamentalMat( points1,points2,F,
> > > > CV_FM_RANSAC,1.,0.99,status );
> > > > removeOutliers(&points1,&points2,status);
> > > >
> > > > where removeOutliers() is a function I wrote to clean up after
> > > > cvFindFundamentalMat():
> > > >
> > > > // iterates the set of putative correspondences and removes
> > > correspondences
> > > > marked as outliers by cvFindFundamentalMat()
> > > > void removeOutliers(CvMat **points1, CvMat **points2, CvMat
> *status) {
> > > > CvMat *points1_ = *points1;
> > > > CvMat *points2_ = *points2;
> > > > int count = 0;
> > > > for (int i = 0; i < status->cols; i++) if
> > > (CV_MAT_ELEM(*status,unsigned
> > > > char,0,i)) count++;
> > > > if (!count) { // no inliers
> > > > *points1 = NULL;
> > > > *points2 = NULL;
> > > > }
> > > > else {
> > > > *points1 = cvCreateMat(1,count,CV_32FC2);
> > > > *points2 = cvCreateMat(1,count,CV_32FC2);
> > > > int j = 0;
> > > > for (int i = 0; i < status->cols; i++) {
> > > > if (CV_MAT_ELEM(*status,unsigned char,0,i)) {
> > > > (*points1)->data.fl[j*2] = points1_->data.fl[i*2];
> > > > //p1->x
> > > > (*points1)->data.fl[j*2+1] = points1_->data.fl[i*2+1];
> > > > //p1->y
> > > > (*points2)->data.fl[j*2] = points2_->data.fl[i*2];
> > > > //p2->x
> > > > (*points2)->data.fl[j*2+1] = points2_->data.fl[i*2+1];
> > > > //p2->y
> > > > j++;
> > > > }
> > > > }
> > > > }
> > > > cvReleaseMat(&points1_);
> > > > cvReleaseMat(&points2_);
> > > > }
> > > >
> > > >
> > > > I hope this helps.
> > > >
> > > > -Jostein
> > > >
> > > >
> > > > 2009/1/8 yair_movshovitz <yairmov@>
> > > >
> > > > > Hi Everyone,
> > > > >
> > > > > I'm trying to understand how to use the SURF features
> capabilities of
> > > > > openCV.
> > > > > My scenario is as follows:
> > > > > I have two rectangled areas in an image, which are supposed to
> bound
> > > > > the same object. I would like to see how good is this
> assumption. In
> > > > > other words I would like to see how many features they share.
> > > > >
> > > > > Can someone drop me a hint on how to use the SURF
> implementation of
> > > > > openCV (or direct me to somewhere that has some documentation
> of it)
> > > > >
> > > > > Thanks,
> > > > > Yair
> > > > >
> > > > >
> > > > >
> > > >
> > > >
> > > > [Non-text portions of this message have been removed]
> > > >
> > >
> > >
> > >
> >
> >
> > [Non-text portions of this message have been removed]
> >
>
>  
>


[Non-text portions of this message have been removed]

Reply | Threaded
Open this post in threaded view
|

Re: cvExtractSURF

yair_movshovitz
I did some investigating and found out the the error is happening
inside the icvFastHessianDetector() function in cvsurf.cpp
It happens when the function tries to free the memory it allocated:

  for( octave = k = 0; octave < params->nOctaves; octave++ )
      for( sc = -1; sc <= params->nOctaveLayers; sc++, k++ )
      {
          //this line causes the error at some iteration.
          cvReleaseMat( &hessians[k] );
          cvReleaseMat( &traces[k] );
      }


Anyone has an idea why this is happening?

Thanks,
Yair

--- In [hidden email], "Jostein Austvik Jacobsen"
<josteinaj@...> wrote:

>
> I'm using Ubuntu Linux so I can't help you there. Sorry.
> Jostein
>
> 2009/1/20 yair_movshovitz <yairmov@...>
>
> >   Hi Jostein,
> >
> > Thanks again for helping me out.
> >
> > I have started using the cvExtractSURF function. and I have the
> > following problem:
> > When I call the function I get this error -
> > Windows has triggered a breakpoint in (my program name).
> >
> > This may be due to a corruption of the heap, which indicates a bug in
> > (my program name) or any of the DLLs it has loaded.
> >
> > Have you ever encountered this error before regarding this function?
> >
> > Thanks,
> >
> > Yair
> >
> > --- In [hidden email] <OpenCV%40yahoogroups.com>, "Jostein
Austvik
> > Jacobsen"
> > <josteinaj@> wrote:
> > >
> > > You can view the implementation of *cvExtractSURF(...)* here:
> > >
> >
> >
http://opencvlibrary.svn.sourceforge.net/viewvc/opencvlibrary/trunk/opencv/src/cv/cvsurf.cpp,
> > > however it doesn't contain much comments.
> > >
> > >
> > > *cvExtractSURF( const CvArr* img, const CvArr* mask, CvSeq**
keypoints,
> > > CvSeq** descriptors, CvMemStorage* storage, CvSURFParams params )*
> > >
> > > Here, *img* is the image. Use an
> > > *IplImage<http://opencv.willowgarage.com/wiki/CxCore#IplImage>
> > > * for the image. To load an image from disk, use
> > >
> >
*cvLoadImage(...)*<http://opencv.willowgarage.com/wiki/HighGui#cvLoadImage
> > >,
> > > and to create your own image, use
> > >
> > *cvCreateImage(...)*<
> > http://opencv.willowgarage.com/wiki/CxCore#CreateImage>.
> > > Lets say you have a IplImage *image* and want to extract the
rectangle
> > > (x,y)->(x+dx,y+dy) from it as an IplImage rectangle, you might
do this:

> > >
> > > CvSize size = cvSize(dx,dy);
> > > IplImage* rectangle = cvCreateImage(size, IPL_DEPTH_8U, 1);
> > > for (int i = 0; i < dx; ++i) {
> > > for (int j = 0; j < dy; ++j) {
> > > CV_IMAGE_ELEM(rectangle,unsigned char,i,j) =
> > > CV_IMAGE_ELEM(image,unsigned char,x+i,y+j);
> > > }
> > > }
> > >
> > > I'm not sure how *mask* is used, but a quick google search gives
> > >
> >
> >
http://www.emgu.com/wiki/files/1.4.0.0/html/ad54862f-3d1c-c177-3bdb-768619f8dd90.htmwhich
> > > says "The optional input 8-bit mask. The features are only found in
> > > the areas that contain more than 50% of non-zero mask pixels". Just
> > set it
> > > to NULL.
> > >
> > > *keypoints* and
> > >
> >
*descriptors*<http://en.wikipedia.org/wiki/Feature_%28computer_vision%29
> > >are
> > > where the results are placed. Initialize them as null-pointers and
> > > cvExtractSURF will do the rest for you. Afterwards you can access a
> > > descriptor and corresponding keypoint like this:
> > >
> > > int k = 0; // the keypoint you want. There are *keypoints->total*
> > keypoints.
> > > float *seq = (float*)cvGetSeqElem(descriptors, k); // the
descriptor of

> > > length 64 or 128
> > > CvPoint2D32f *p = &((CvSURFPoint*)cvGetSeqElem(keypoints, k))->pt;
> > // the
> > > (x,y) coordinates of keypoint *k* can now be accessed as *p->x* and
> > *p->y*
> > >
> > > The *CvMemStorage*
> > > <http://opencv.willowgarage.com/wiki/CxCore#CvMemStorage>struct
> > > *storage* is used as a mechanism to simplify memory management. I
> > believe
> > > the *keypoints* and *descriptors* structures are put into *storage*,
> > so you
> > > can't release *storage* until you're done using *keypoints* and
> > *descriptors
> > > *.Put a *CvMemStorage *storage = cvCreateMemStorage(0);* before your
> > first
> > > call to cvExtractSURF and *cvClearMemStorage(storage);* after you're
> > done
> > > using *keypoints* and *descriptors*.
> > >
> > > SURF takes a couple of parameters through the *CvSURFParams* struct
> > *params*.
> > > You create *params* with *cvSURFParams(double threshold, int
> > > extended)*where threshold represents the "edgyness" that is required
> > > from a feature to
> > > be recognized as a feature. It can be adjusted to retrieve more
or fewer
> > > features. In the paper
> > > <http://www.vision.ee.ethz.ch/%7Esurf/eccv06.pdf>describing the SURF
> > > detector, they use a threshold of 600 on a 800 x 640
> > > image which returned 1418 features. The *extended* parameter is
a simple
> > > boolean 1 or 0 which states whether or not to use the extended
> > descriptor.
> > > The extended descriptor consists of 128 instead of 64 values which
> > should
> > > gives a better result at the cost of using more memory. Instead of
> > creating
> > > a new CvSURFParams struct for each call to cvExtractSURF, you
could do:

> > >
> > > CvSURFParams params = cvSURFParams(600, 1);
> > > cvExtractSURF(..., params);
> > > cvExtractSURF(..., params);
> > >
> > >
> > > There you go. I hope I answered your question :)
> > >
> > > Jostein
> > >
> > >
> > > 2009/1/12 yair_movshovitz <yairmov@>
> > >
> > > > Hi Jostein,
> > > >
> > > > Thanks a lot for your help!
> > > >
> > > > Can you please explain the function parameters of cvExtractSURF?
> > > > I mean in - cvExtractSURF(img1, NULL, &kp1, &desc1, storage,
> > > > cvSURFParams(600,EXTENDED_DESCRIPTOR));
> > > > what is the role of kp1, desc1, storage and the SURFParams?
> > > > is storage just a temp area for the algorithm to use?
> > > >
> > > > Thanks again
> > > > Yair
> > > >
> > > > --- In [hidden email] <OpenCV%40yahoogroups.com> <OpenCV%
> > 40yahoogroups.com>, "Jostein
> >
> > Austvik
> > > > Jacobsen"
> > > >
> > > > <josteinaj@> wrote:
> > > > >
> > > > > If you've got your two rectangled areas stored as img1 and
img2 you
> > > > could do
> > > > > this to extract its keypoints and corresponding descriptors:
> > > > >
> > > > > #define EXTENDED_DESCRIPTOR 1
> > > > > CvSeq *kp1=NULL, *kp2=NULL;
> > > > > CvSeq *desc1=NULL, *desc2=NULL;
> > > > > CvMemStorage *storage = cvCreateMemStorage(0);
> > > > > cvExtractSURF(img1, NULL, &kp1, &desc1, storage,
cvSURFParams(600,
> > > > > EXTENDED_DESCRIPTOR));
> > > > > cvExtractSURF(img2, NULL, &kp2, &desc2, storage,
cvSURFParams(600,
> > > > > EXTENDED_DESCRIPTOR));
> > > > >
> > > > > You will have to correlate the descriptors with each other to
> > determine
> > > > > which keypoints in each rectangle corresponds to one
another. You
> > > > could use
> > > > > a BBF tree which is implemented in the latest version of
OpenCV, but
> > > > unless
> > > > > your rectangle is huge, you might just as well just correlate
> > them the
> > > > > standard way, which I do like this:
> > > > >
> > > > > #define CORRELATION_THRESHOLD 0.7
> > > > > // brute-force attempt at correlating the two sets of features
> > > > > void bruteMatch(CvMat **points1, CvMat **points2, CvSeq
*kp1, CvSeq

> > > > *desc1,
> > > > > CvSeq *kp2, CvSeq *desc2) {
> > > > > int i,j,k;
> > > > > double* avg1 = (double*)malloc(sizeof(double)*kp1->total);
> > > > > double* avg2 = (double*)malloc(sizeof(double)*kp2->total);
> > > > > double* dev1 = (double*)malloc(sizeof(double)*kp1->total);
> > > > > double* dev2 = (double*)malloc(sizeof(double)*kp2->total);
> > > > > int* best1 = (int*)malloc(sizeof(int)*kp1->total);
> > > > > int* best2 = (int*)malloc(sizeof(int)*kp2->total);
> > > > > double* best1corr = (double*)malloc(sizeof(double)*kp1->total);
> > > > > double* best2corr = (double*)malloc(sizeof(double)*kp2->total);
> > > > > float *seq1, *seq2;
> > > > > int descriptor_size = EXTENDED_DESCRIPTOR ? 128 : 64;
> > > > > for (i=0; i<kp1->total; i++) {
> > > > > // find average and standard deviation of each descriptor
> > > > > avg1[i] = 0;
> > > > > dev1[i] = 0;
> > > > > seq1 = (float*)cvGetSeqElem(desc1, i);
> > > > > for (k=0; k<descriptor_size; k++) avg1[i] += seq1[k];
> > > > > avg1[i] /= descriptor_size;
> > > > > for (k=0; k<descriptor_size; k++) dev1[i] +=
> > > > > (seq1[k]-avg1[i])*(seq1[k]-avg1[i]);
> > > > > dev1[i] = sqrt(dev1[i]/descriptor_size);
> > > > >
> > > > > // initialize best1 and best1corr
> > > > > best1[i] = -1;
> > > > > best1corr[i] = -1.;
> > > > > }
> > > > > for (j=0; j<kp2->total; j++) {
> > > > > // find average and standard deviation of each descriptor
> > > > > avg2[j] = 0;
> > > > > dev2[j] = 0;
> > > > > seq2 = (float*)cvGetSeqElem(desc2, j);
> > > > > for (k=0; k<descriptor_size; k++) avg2[j] += seq2[k];
> > > > > avg2[j] /= descriptor_size;
> > > > > for (k=0; k<descriptor_size; k++) dev2[j] +=
> > > > > (seq2[k]-avg2[j])*(seq2[k]-avg2[j]);
> > > > > dev2[j] = sqrt(dev2[j]/descriptor_size);
> > > > >
> > > > > // initialize best2 and best2corr
> > > > > best2[j] = -1;
> > > > > best2corr[j] = -1.;
> > > > > }
> > > > > double corr;
> > > > > for (i = 0; i < kp1->total; ++i) {
> > > > > seq1 = (float*)cvGetSeqElem(desc1, i);
> > > > > for (j = 0; j < kp2->total; ++j) {
> > > > > corr = 0;
> > > > > seq2 = (float*)cvGetSeqElem(desc2, j);
> > > > > for (k = 0; k < descriptor_size; ++k)
> > > > > corr += (seq1[k]-avg1[i])*(seq2[k]-avg2[j]);
> > > > > corr /= (descriptor_size-1)*dev1[i]*dev2[j];
> > > > > if (corr > best1corr[i]) {
> > > > > best1corr[i] = corr;
> > > > > best1[i] = j;
> > > > > }
> > > > > if (corr > best2corr[j]) {
> > > > > best2corr[j] = corr;
> > > > > best2[j] = i;
> > > > > }
> > > > > }
> > > > > }
> > > > > j = 0;
> > > > > for (i = 0; i < kp1->total; i++)
> > > > > if (best2[best1[i]] == i && best1corr[i] >
> > > > CORRELATION_THRESHOLD)
> > > > > j++;
> > > > > if (j == 0) return; // no matches found
> > > > > *points1 = cvCreateMat(1,j,CV_32FC2);
> > > > > *points2 = cvCreateMat(1,j,CV_32FC2);
> > > > > CvPoint2D32f *p1, *p2;
> > > > > j = 0;
> > > > > for (i = 0; i < kp1->total; i++) {
> > > > > if (best2[best1[i]] == i && best1corr[i] >
> > > > CORRELATION_THRESHOLD) {
> > > > > p1 = &((CvSURFPoint*)cvGetSeqElem(kp1,i))->pt;
> > > > > p2 = &((CvSURFPoint*)cvGetSeqElem(kp2,best1[i]))->pt;
> > > > > (*points1)->data.fl[j*2] = p1->x;
> > > > > (*points1)->data.fl[j*2+1] = p1->y;
> > > > > (*points2)->data.fl[j*2] = p2->x;
> > > > > (*points2)->data.fl[j*2+1] = p2->y;
> > > > > j++;
> > > > > }
> > > > > }
> > > > > free(best2corr);
> > > > > free(best1corr);
> > > > > free(best2);
> > > > > free(best1);
> > > > > free(avg1);
> > > > > free(avg2);
> > > > > free(dev1);
> > > > > free(dev2);
> > > > > }
> > > > >
> > > > > If you construct a fundamental matrix (a model) for the
> > transformation
> > > > > between the two rectangles, you can further determine which
> > > > correspondences
> > > > > are false (by how well they fit the model) and remove them,
which I

> > > > like to
> > > > > do like this:
> > > > >
> > > > > F = cvCreateMat(3,3,CV_32FC1);
> > > > > CvMat *status = cvCreateMat(1,points1->cols,CV_8UC1);
> > > > > int fm_count = cvFindFundamentalMat( points1,points2,F,
> > > > > CV_FM_RANSAC,1.,0.99,status );
> > > > > removeOutliers(&points1,&points2,status);
> > > > >
> > > > > where removeOutliers() is a function I wrote to clean up after
> > > > > cvFindFundamentalMat():
> > > > >
> > > > > // iterates the set of putative correspondences and removes
> > > > correspondences
> > > > > marked as outliers by cvFindFundamentalMat()
> > > > > void removeOutliers(CvMat **points1, CvMat **points2, CvMat
> > *status) {
> > > > > CvMat *points1_ = *points1;
> > > > > CvMat *points2_ = *points2;
> > > > > int count = 0;
> > > > > for (int i = 0; i < status->cols; i++) if
> > > > (CV_MAT_ELEM(*status,unsigned
> > > > > char,0,i)) count++;
> > > > > if (!count) { // no inliers
> > > > > *points1 = NULL;
> > > > > *points2 = NULL;
> > > > > }
> > > > > else {
> > > > > *points1 = cvCreateMat(1,count,CV_32FC2);
> > > > > *points2 = cvCreateMat(1,count,CV_32FC2);
> > > > > int j = 0;
> > > > > for (int i = 0; i < status->cols; i++) {
> > > > > if (CV_MAT_ELEM(*status,unsigned char,0,i)) {
> > > > > (*points1)->data.fl[j*2] = points1_->data.fl[i*2];
> > > > > //p1->x
> > > > > (*points1)->data.fl[j*2+1] = points1_->data.fl[i*2+1];
> > > > > //p1->y
> > > > > (*points2)->data.fl[j*2] = points2_->data.fl[i*2];
> > > > > //p2->x
> > > > > (*points2)->data.fl[j*2+1] = points2_->data.fl[i*2+1];
> > > > > //p2->y
> > > > > j++;
> > > > > }
> > > > > }
> > > > > }
> > > > > cvReleaseMat(&points1_);
> > > > > cvReleaseMat(&points2_);
> > > > > }
> > > > >
> > > > >
> > > > > I hope this helps.
> > > > >
> > > > > -Jostein
> > > > >
> > > > >
> > > > > 2009/1/8 yair_movshovitz <yairmov@>
> > > > >
> > > > > > Hi Everyone,
> > > > > >
> > > > > > I'm trying to understand how to use the SURF features
> > capabilities of
> > > > > > openCV.
> > > > > > My scenario is as follows:
> > > > > > I have two rectangled areas in an image, which are supposed to
> > bound
> > > > > > the same object. I would like to see how good is this
> > assumption. In
> > > > > > other words I would like to see how many features they share.
> > > > > >
> > > > > > Can someone drop me a hint on how to use the SURF
> > implementation of
> > > > > > openCV (or direct me to somewhere that has some documentation
> > of it)
> > > > > >
> > > > > > Thanks,
> > > > > > Yair
> > > > > >
> > > > > >
> > > > > >
> > > > >
> > > > >
> > > > > [Non-text portions of this message have been removed]
> > > > >
> > > >
> > > >
> > > >
> > >
> > >
> > > [Non-text portions of this message have been removed]
> > >
> >
> >  
> >
>
>
> [Non-text portions of this message have been removed]
>


Reply | Threaded
Open this post in threaded view
|

Re: cvExtractSURF

Ricardo.
Hello!

I wonder if you found what was causing this error? 'Cause it's
happening to me too and cannot figure it out.

I'd appreciate it if you share how you solved it -if you did of
course-.

Regards,
Ricardo

--- In [hidden email], "yair_movshovitz" <yairmov@...> wrote:

>
> I did some investigating and found out the the error is happening
> inside the icvFastHessianDetector() function in cvsurf.cpp
> It happens when the function tries to free the memory it allocated:
>
>   for( octave = k = 0; octave < params->nOctaves; octave++ )
>       for( sc = -1; sc <= params->nOctaveLayers; sc++, k++ )
>       {
>           //this line causes the error at some iteration.
>           cvReleaseMat( &hessians[k] );
>           cvReleaseMat( &traces[k] );
>       }
>
>
> Anyone has an idea why this is happening?
>
> Thanks,
> Yair
>
> --- In [hidden email], "Jostein Austvik Jacobsen"
> <josteinaj@> wrote:
> >
> > I'm using Ubuntu Linux so I can't help you there. Sorry.
> > Jostein
> >
> > 2009/1/20 yair_movshovitz <yairmov@>
> >
> > >   Hi Jostein,
> > >
> > > Thanks again for helping me out.
> > >
> > > I have started using the cvExtractSURF function. and I have the
> > > following problem:
> > > When I call the function I get this error -
> > > Windows has triggered a breakpoint in (my program name).
> > >
> > > This may be due to a corruption of the heap, which indicates a
bug in
> > > (my program name) or any of the DLLs it has loaded.
> > >
> > > Have you ever encountered this error before regarding this
function?
> > >
> > > Thanks,
> > >
> > > Yair
> > >
> > > --- In [hidden email] <OpenCV%
40yahoogroups.com>, "Jostein
> Austvik
> > > Jacobsen"
> > > <josteinaj@> wrote:
> > > >
> > > > You can view the implementation of *cvExtractSURF(...)* here:
> > > >
> > >
> > >
>
http://opencvlibrary.svn.sourceforge.net/viewvc/opencvlibrary/trunk/op
encv/src/cv/cvsurf.cpp,
> > > > however it doesn't contain much comments.
> > > >
> > > >
> > > > *cvExtractSURF( const CvArr* img, const CvArr* mask, CvSeq**
> keypoints,
> > > > CvSeq** descriptors, CvMemStorage* storage, CvSURFParams
params )*
> > > >
> > > > Here, *img* is the image. Use an
> > > > *IplImage<http://opencv.willowgarage.com/wiki/CxCore#IplImage>
> > > > * for the image. To load an image from disk, use
> > > >
> > >
> *cvLoadImage(...)
*<http://opencv.willowgarage.com/wiki/HighGui#cvLoadImage

> > > >,
> > > > and to create your own image, use
> > > >
> > > *cvCreateImage(...)*<
> > > http://opencv.willowgarage.com/wiki/CxCore#CreateImage>.
> > > > Lets say you have a IplImage *image* and want to extract the
> rectangle
> > > > (x,y)->(x+dx,y+dy) from it as an IplImage rectangle, you might
> do this:
> > > >
> > > > CvSize size = cvSize(dx,dy);
> > > > IplImage* rectangle = cvCreateImage(size, IPL_DEPTH_8U, 1);
> > > > for (int i = 0; i < dx; ++i) {
> > > > for (int j = 0; j < dy; ++j) {
> > > > CV_IMAGE_ELEM(rectangle,unsigned char,i,j) =
> > > > CV_IMAGE_ELEM(image,unsigned char,x+i,y+j);
> > > > }
> > > > }
> > > >
> > > > I'm not sure how *mask* is used, but a quick google search
gives
> > > >
> > >
> > >
> http://www.emgu.com/wiki/files/1.4.0.0/html/ad54862f-3d1c-c177-3bdb-
768619f8dd90.htmwhich
> > > > says "The optional input 8-bit mask. The features are only
found in
> > > > the areas that contain more than 50% of non-zero mask
pixels". Just
> > > set it
> > > > to NULL.
> > > >
> > > > *keypoints* and
> > > >
> > >
> *descriptors*<<a href="http://en.wikipedia.org/wiki/Feature_%">http://en.wikipedia.org/wiki/Feature_%
28computer_vision%29
> > > >are
> > > > where the results are placed. Initialize them as null-
pointers and
> > > > cvExtractSURF will do the rest for you. Afterwards you can
access a

> > > > descriptor and corresponding keypoint like this:
> > > >
> > > > int k = 0; // the keypoint you want. There are *keypoints-
>total*
> > > keypoints.
> > > > float *seq = (float*)cvGetSeqElem(descriptors, k); // the
> descriptor of
> > > > length 64 or 128
> > > > CvPoint2D32f *p = &((CvSURFPoint*)cvGetSeqElem(keypoints, k))-
>pt;
> > > // the
> > > > (x,y) coordinates of keypoint *k* can now be accessed as *p-
>x* and
> > > *p->y*
> > > >
> > > > The *CvMemStorage*
> > > >
<http://opencv.willowgarage.com/wiki/CxCore#CvMemStorage>struct
> > > > *storage* is used as a mechanism to simplify memory
management. I
> > > believe
> > > > the *keypoints* and *descriptors* structures are put into
*storage*,
> > > so you
> > > > can't release *storage* until you're done using *keypoints*
and
> > > *descriptors
> > > > *.Put a *CvMemStorage *storage = cvCreateMemStorage(0);*
before your
> > > first
> > > > call to cvExtractSURF and *cvClearMemStorage(storage);* after
you're
> > > done
> > > > using *keypoints* and *descriptors*.
> > > >
> > > > SURF takes a couple of parameters through the *CvSURFParams*
struct
> > > *params*.
> > > > You create *params* with *cvSURFParams(double threshold, int
> > > > extended)*where threshold represents the "edgyness" that is
required
> > > > from a feature to
> > > > be recognized as a feature. It can be adjusted to retrieve
more
> or fewer
> > > > features. In the paper
> > > > <http://www.vision.ee.ethz.ch/%7Esurf/eccv06.pdf>describing
the SURF
> > > > detector, they use a threshold of 600 on a 800 x 640
> > > > image which returned 1418 features. The *extended* parameter
is
> a simple
> > > > boolean 1 or 0 which states whether or not to use the extended
> > > descriptor.
> > > > The extended descriptor consists of 128 instead of 64 values
which
> > > should
> > > > gives a better result at the cost of using more memory.
Instead of

> > > creating
> > > > a new CvSURFParams struct for each call to cvExtractSURF, you
> could do:
> > > >
> > > > CvSURFParams params = cvSURFParams(600, 1);
> > > > cvExtractSURF(..., params);
> > > > cvExtractSURF(..., params);
> > > >
> > > >
> > > > There you go. I hope I answered your question :)
> > > >
> > > > Jostein
> > > >
> > > >
> > > > 2009/1/12 yair_movshovitz <yairmov@>
> > > >
> > > > > Hi Jostein,
> > > > >
> > > > > Thanks a lot for your help!
> > > > >
> > > > > Can you please explain the function parameters of
cvExtractSURF?
> > > > > I mean in - cvExtractSURF(img1, NULL, &kp1, &desc1, storage,
> > > > > cvSURFParams(600,EXTENDED_DESCRIPTOR));
> > > > > what is the role of kp1, desc1, storage and the SURFParams?
> > > > > is storage just a temp area for the algorithm to use?
> > > > >
> > > > > Thanks again
> > > > > Yair
> > > > >
> > > > > --- In [hidden email] <OpenCV%40yahoogroups.com>
<OpenCV%

> > > 40yahoogroups.com>, "Jostein
> > >
> > > Austvik
> > > > > Jacobsen"
> > > > >
> > > > > <josteinaj@> wrote:
> > > > > >
> > > > > > If you've got your two rectangled areas stored as img1 and
> img2 you
> > > > > could do
> > > > > > this to extract its keypoints and corresponding
descriptors:

> > > > > >
> > > > > > #define EXTENDED_DESCRIPTOR 1
> > > > > > CvSeq *kp1=NULL, *kp2=NULL;
> > > > > > CvSeq *desc1=NULL, *desc2=NULL;
> > > > > > CvMemStorage *storage = cvCreateMemStorage(0);
> > > > > > cvExtractSURF(img1, NULL, &kp1, &desc1, storage,
> cvSURFParams(600,
> > > > > > EXTENDED_DESCRIPTOR));
> > > > > > cvExtractSURF(img2, NULL, &kp2, &desc2, storage,
> cvSURFParams(600,
> > > > > > EXTENDED_DESCRIPTOR));
> > > > > >
> > > > > > You will have to correlate the descriptors with each
other to
> > > determine
> > > > > > which keypoints in each rectangle corresponds to one
> another. You
> > > > > could use
> > > > > > a BBF tree which is implemented in the latest version of
> OpenCV, but
> > > > > unless
> > > > > > your rectangle is huge, you might just as well just
correlate
> > > them the
> > > > > > standard way, which I do like this:
> > > > > >
> > > > > > #define CORRELATION_THRESHOLD 0.7
> > > > > > // brute-force attempt at correlating the two sets of
features

> > > > > > void bruteMatch(CvMat **points1, CvMat **points2, CvSeq
> *kp1, CvSeq
> > > > > *desc1,
> > > > > > CvSeq *kp2, CvSeq *desc2) {
> > > > > > int i,j,k;
> > > > > > double* avg1 = (double*)malloc(sizeof(double)*kp1->total);
> > > > > > double* avg2 = (double*)malloc(sizeof(double)*kp2->total);
> > > > > > double* dev1 = (double*)malloc(sizeof(double)*kp1->total);
> > > > > > double* dev2 = (double*)malloc(sizeof(double)*kp2->total);
> > > > > > int* best1 = (int*)malloc(sizeof(int)*kp1->total);
> > > > > > int* best2 = (int*)malloc(sizeof(int)*kp2->total);
> > > > > > double* best1corr = (double*)malloc(sizeof(double)*kp1-
>total);
> > > > > > double* best2corr = (double*)malloc(sizeof(double)*kp2-
>total);
> > > > > > float *seq1, *seq2;
> > > > > > int descriptor_size = EXTENDED_DESCRIPTOR ? 128 : 64;
> > > > > > for (i=0; i<kp1->total; i++) {
> > > > > > // find average and standard deviation of each descriptor
> > > > > > avg1[i] = 0;
> > > > > > dev1[i] = 0;
> > > > > > seq1 = (float*)cvGetSeqElem(desc1, i);
> > > > > > for (k=0; k<descriptor_size; k++) avg1[i] += seq1[k];
> > > > > > avg1[i] /= descriptor_size;
> > > > > > for (k=0; k<descriptor_size; k++) dev1[i] +=
> > > > > > (seq1[k]-avg1[i])*(seq1[k]-avg1[i]);
> > > > > > dev1[i] = sqrt(dev1[i]/descriptor_size);
> > > > > >
> > > > > > // initialize best1 and best1corr
> > > > > > best1[i] = -1;
> > > > > > best1corr[i] = -1.;
> > > > > > }
> > > > > > for (j=0; j<kp2->total; j++) {
> > > > > > // find average and standard deviation of each descriptor
> > > > > > avg2[j] = 0;
> > > > > > dev2[j] = 0;
> > > > > > seq2 = (float*)cvGetSeqElem(desc2, j);
> > > > > > for (k=0; k<descriptor_size; k++) avg2[j] += seq2[k];
> > > > > > avg2[j] /= descriptor_size;
> > > > > > for (k=0; k<descriptor_size; k++) dev2[j] +=
> > > > > > (seq2[k]-avg2[j])*(seq2[k]-avg2[j]);
> > > > > > dev2[j] = sqrt(dev2[j]/descriptor_size);
> > > > > >
> > > > > > // initialize best2 and best2corr
> > > > > > best2[j] = -1;
> > > > > > best2corr[j] = -1.;
> > > > > > }
> > > > > > double corr;
> > > > > > for (i = 0; i < kp1->total; ++i) {
> > > > > > seq1 = (float*)cvGetSeqElem(desc1, i);
> > > > > > for (j = 0; j < kp2->total; ++j) {
> > > > > > corr = 0;
> > > > > > seq2 = (float*)cvGetSeqElem(desc2, j);
> > > > > > for (k = 0; k < descriptor_size; ++k)
> > > > > > corr += (seq1[k]-avg1[i])*(seq2[k]-avg2[j]);
> > > > > > corr /= (descriptor_size-1)*dev1[i]*dev2[j];
> > > > > > if (corr > best1corr[i]) {
> > > > > > best1corr[i] = corr;
> > > > > > best1[i] = j;
> > > > > > }
> > > > > > if (corr > best2corr[j]) {
> > > > > > best2corr[j] = corr;
> > > > > > best2[j] = i;
> > > > > > }
> > > > > > }
> > > > > > }
> > > > > > j = 0;
> > > > > > for (i = 0; i < kp1->total; i++)
> > > > > > if (best2[best1[i]] == i && best1corr[i] >
> > > > > CORRELATION_THRESHOLD)
> > > > > > j++;
> > > > > > if (j == 0) return; // no matches found
> > > > > > *points1 = cvCreateMat(1,j,CV_32FC2);
> > > > > > *points2 = cvCreateMat(1,j,CV_32FC2);
> > > > > > CvPoint2D32f *p1, *p2;
> > > > > > j = 0;
> > > > > > for (i = 0; i < kp1->total; i++) {
> > > > > > if (best2[best1[i]] == i && best1corr[i] >
> > > > > CORRELATION_THRESHOLD) {
> > > > > > p1 = &((CvSURFPoint*)cvGetSeqElem(kp1,i))->pt;
> > > > > > p2 = &((CvSURFPoint*)cvGetSeqElem(kp2,best1[i]))->pt;
> > > > > > (*points1)->data.fl[j*2] = p1->x;
> > > > > > (*points1)->data.fl[j*2+1] = p1->y;
> > > > > > (*points2)->data.fl[j*2] = p2->x;
> > > > > > (*points2)->data.fl[j*2+1] = p2->y;
> > > > > > j++;
> > > > > > }
> > > > > > }
> > > > > > free(best2corr);
> > > > > > free(best1corr);
> > > > > > free(best2);
> > > > > > free(best1);
> > > > > > free(avg1);
> > > > > > free(avg2);
> > > > > > free(dev1);
> > > > > > free(dev2);
> > > > > > }
> > > > > >
> > > > > > If you construct a fundamental matrix (a model) for the
> > > transformation
> > > > > > between the two rectangles, you can further determine
which
> > > > > correspondences
> > > > > > are false (by how well they fit the model) and remove
them,

> which I
> > > > > like to
> > > > > > do like this:
> > > > > >
> > > > > > F = cvCreateMat(3,3,CV_32FC1);
> > > > > > CvMat *status = cvCreateMat(1,points1->cols,CV_8UC1);
> > > > > > int fm_count = cvFindFundamentalMat( points1,points2,F,
> > > > > > CV_FM_RANSAC,1.,0.99,status );
> > > > > > removeOutliers(&points1,&points2,status);
> > > > > >
> > > > > > where removeOutliers() is a function I wrote to clean up
after
> > > > > > cvFindFundamentalMat():
> > > > > >
> > > > > > // iterates the set of putative correspondences and
removes
> > > > > correspondences
> > > > > > marked as outliers by cvFindFundamentalMat()
> > > > > > void removeOutliers(CvMat **points1, CvMat **points2,
CvMat

> > > *status) {
> > > > > > CvMat *points1_ = *points1;
> > > > > > CvMat *points2_ = *points2;
> > > > > > int count = 0;
> > > > > > for (int i = 0; i < status->cols; i++) if
> > > > > (CV_MAT_ELEM(*status,unsigned
> > > > > > char,0,i)) count++;
> > > > > > if (!count) { // no inliers
> > > > > > *points1 = NULL;
> > > > > > *points2 = NULL;
> > > > > > }
> > > > > > else {
> > > > > > *points1 = cvCreateMat(1,count,CV_32FC2);
> > > > > > *points2 = cvCreateMat(1,count,CV_32FC2);
> > > > > > int j = 0;
> > > > > > for (int i = 0; i < status->cols; i++) {
> > > > > > if (CV_MAT_ELEM(*status,unsigned char,0,i)) {
> > > > > > (*points1)->data.fl[j*2] = points1_->data.fl[i*2];
> > > > > > //p1->x
> > > > > > (*points1)->data.fl[j*2+1] = points1_->data.fl[i*2+1];
> > > > > > //p1->y
> > > > > > (*points2)->data.fl[j*2] = points2_->data.fl[i*2];
> > > > > > //p2->x
> > > > > > (*points2)->data.fl[j*2+1] = points2_->data.fl[i*2+1];
> > > > > > //p2->y
> > > > > > j++;
> > > > > > }
> > > > > > }
> > > > > > }
> > > > > > cvReleaseMat(&points1_);
> > > > > > cvReleaseMat(&points2_);
> > > > > > }
> > > > > >
> > > > > >
> > > > > > I hope this helps.
> > > > > >
> > > > > > -Jostein
> > > > > >
> > > > > >
> > > > > > 2009/1/8 yair_movshovitz <yairmov@>
> > > > > >
> > > > > > > Hi Everyone,
> > > > > > >
> > > > > > > I'm trying to understand how to use the SURF features
> > > capabilities of
> > > > > > > openCV.
> > > > > > > My scenario is as follows:
> > > > > > > I have two rectangled areas in an image, which are
supposed to
> > > bound
> > > > > > > the same object. I would like to see how good is this
> > > assumption. In
> > > > > > > other words I would like to see how many features they
share.
> > > > > > >
> > > > > > > Can someone drop me a hint on how to use the SURF
> > > implementation of
> > > > > > > openCV (or direct me to somewhere that has some
documentation

> > > of it)
> > > > > > >
> > > > > > > Thanks,
> > > > > > > Yair
> > > > > > >
> > > > > > >
> > > > > > >
> > > > > >
> > > > > >
> > > > > > [Non-text portions of this message have been removed]
> > > > > >
> > > > >
> > > > >
> > > > >
> > > >
> > > >
> > > > [Non-text portions of this message have been removed]
> > > >
> > >
> > >  
> > >
> >
> >
> > [Non-text portions of this message have been removed]
> >
>


Reply | Threaded
Open this post in threaded view
|

Re: cvExtractSURF

abedabed
This post has NOT been accepted by the mailing list yet.
In reply to this post by Jostein Austvik Jacobsen
Sorry i just have one question concerning how to use cvExtractSURF for detection cause for discription i will use a random test de denote the keypoints. i mean if it's possible to give me the way to use cvExtractSURF like cvGoodFeaturesToTrack un i will get the coordinate points in a table. thank you for helping
Reply | Threaded
Open this post in threaded view
|

Re: Re: cvExtractSURF

Raluca Borca
In reply to this post by Ricardo.
Hello !

I have encountered the same problem.

Can anybody tell what is the solution ?

Thanks.

On Sun, Mar 1, 2009 at 8:21 AM, Ricardo. <[hidden email]> wrote:

>   Hello!
>
> I wonder if you found what was causing this error? 'Cause it's
> happening to me too and cannot figure it out.
>
> I'd appreciate it if you share how you solved it -if you did of
> course-.
>
> Regards,
> Ricardo
>
>
> --- In [hidden email] <OpenCV%40yahoogroups.com>,
> "yair_movshovitz" <yairmov@...> wrote:
> >
> > I did some investigating and found out the the error is happening
> > inside the icvFastHessianDetector() function in cvsurf.cpp
> > It happens when the function tries to free the memory it allocated:
> >
> > for( octave = k = 0; octave < params->nOctaves; octave++ )
> > for( sc = -1; sc <= params->nOctaveLayers; sc++, k++ )
> > {
> > //this line causes the error at some iteration.
> > cvReleaseMat( &hessians[k] );
> > cvReleaseMat( &traces[k] );
> > }
> >
> >
> > Anyone has an idea why this is happening?
> >
> > Thanks,
> > Yair
> >
> > --- In [hidden email] <OpenCV%40yahoogroups.com>, "Jostein
> Austvik Jacobsen"
> > <josteinaj@> wrote:
> > >
> > > I'm using Ubuntu Linux so I can't help you there. Sorry.
> > > Jostein
> > >
> > > 2009/1/20 yair_movshovitz <yairmov@>
> > >
> > > > Hi Jostein,
> > > >
> > > > Thanks again for helping me out.
> > > >
> > > > I have started using the cvExtractSURF function. and I have the
> > > > following problem:
> > > > When I call the function I get this error -
> > > > Windows has triggered a breakpoint in (my program name).
> > > >
> > > > This may be due to a corruption of the heap, which indicates a
> bug in
> > > > (my program name) or any of the DLLs it has loaded.
> > > >
> > > > Have you ever encountered this error before regarding this
> function?
> > > >
> > > > Thanks,
> > > >
> > > > Yair
> > > >
> > > > --- In [hidden email] <OpenCV%40yahoogroups.com> <OpenCV%
> 40yahoogroups.com>, "Jostein
> > Austvik
> > > > Jacobsen"
> > > > <josteinaj@> wrote:
> > > > >
> > > > > You can view the implementation of *cvExtractSURF(...)* here:
> > > > >
> > > >
> > > >
> >
> http://opencvlibrary.svn.sourceforge.net/viewvc/opencvlibrary/trunk/op
> encv/src/cv/cvsurf.cpp,
> > > > > however it doesn't contain much comments.
> > > > >
> > > > >
> > > > > *cvExtractSURF( const CvArr* img, const CvArr* mask, CvSeq**
> > keypoints,
> > > > > CvSeq** descriptors, CvMemStorage* storage, CvSURFParams
> params )*
> > > > >
> > > > > Here, *img* is the image. Use an
> > > > > *IplImage<http://opencv.willowgarage.com/wiki/CxCore#IplImage>
> > > > > * for the image. To load an image from disk, use
> > > > >
> > > >
> > *cvLoadImage(...)
> *<http://opencv.willowgarage.com/wiki/HighGui#cvLoadImage
> > > > >,
> > > > > and to create your own image, use
> > > > >
> > > > *cvCreateImage(...)*<
> > > > http://opencv.willowgarage.com/wiki/CxCore#CreateImage>.
> > > > > Lets say you have a IplImage *image* and want to extract the
> > rectangle
> > > > > (x,y)->(x+dx,y+dy) from it as an IplImage rectangle, you might
> > do this:
> > > > >
> > > > > CvSize size = cvSize(dx,dy);
> > > > > IplImage* rectangle = cvCreateImage(size, IPL_DEPTH_8U, 1);
> > > > > for (int i = 0; i < dx; ++i) {
> > > > > for (int j = 0; j < dy; ++j) {
> > > > > CV_IMAGE_ELEM(rectangle,unsigned char,i,j) =
> > > > > CV_IMAGE_ELEM(image,unsigned char,x+i,y+j);
> > > > > }
> > > > > }
> > > > >
> > > > > I'm not sure how *mask* is used, but a quick google search
> gives
> > > > >
> > > >
> > > >
> > http://www.emgu.com/wiki/files/1.4.0.0/html/ad54862f-3d1c-c177-3bdb-
> 768619f8dd90.htmwhich
> > > > > says "The optional input 8-bit mask. The features are only
> found in
> > > > > the areas that contain more than 50% of non-zero mask
> pixels". Just
> > > > set it
> > > > > to NULL.
> > > > >
> > > > > *keypoints* and
> > > > >
> > > >
> > *descriptors*<<a href="http://en.wikipedia.org/wiki/Feature_%">http://en.wikipedia.org/wiki/Feature_%
> 28computer_vision%29
> > > > >are
> > > > > where the results are placed. Initialize them as null-
> pointers and
> > > > > cvExtractSURF will do the rest for you. Afterwards you can
> access a
> > > > > descriptor and corresponding keypoint like this:
> > > > >
> > > > > int k = 0; // the keypoint you want. There are *keypoints-
> >total*
> > > > keypoints.
> > > > > float *seq = (float*)cvGetSeqElem(descriptors, k); // the
> > descriptor of
> > > > > length 64 or 128
> > > > > CvPoint2D32f *p = &((CvSURFPoint*)cvGetSeqElem(keypoints, k))-
> >pt;
> > > > // the
> > > > > (x,y) coordinates of keypoint *k* can now be accessed as *p-
> >x* and
> > > > *p->y*
> > > > >
> > > > > The *CvMemStorage*
> > > > >
> <http://opencv.willowgarage.com/wiki/CxCore#CvMemStorage>struct
> > > > > *storage* is used as a mechanism to simplify memory
> management. I
> > > > believe
> > > > > the *keypoints* and *descriptors* structures are put into
> *storage*,
> > > > so you
> > > > > can't release *storage* until you're done using *keypoints*
> and
> > > > *descriptors
> > > > > *.Put a *CvMemStorage *storage = cvCreateMemStorage(0);*
> before your
> > > > first
> > > > > call to cvExtractSURF and *cvClearMemStorage(storage);* after
> you're
> > > > done
> > > > > using *keypoints* and *descriptors*.
> > > > >
> > > > > SURF takes a couple of parameters through the *CvSURFParams*
> struct
> > > > *params*.
> > > > > You create *params* with *cvSURFParams(double threshold, int
> > > > > extended)*where threshold represents the "edgyness" that is
> required
> > > > > from a feature to
> > > > > be recognized as a feature. It can be adjusted to retrieve
> more
> > or fewer
> > > > > features. In the paper
> > > > > <http://www.vision.ee.ethz.ch/%7Esurf/eccv06.pdf>describing
> the SURF
> > > > > detector, they use a threshold of 600 on a 800 x 640
> > > > > image which returned 1418 features. The *extended* parameter
> is
> > a simple
> > > > > boolean 1 or 0 which states whether or not to use the extended
> > > > descriptor.
> > > > > The extended descriptor consists of 128 instead of 64 values
> which
> > > > should
> > > > > gives a better result at the cost of using more memory.
> Instead of
> > > > creating
> > > > > a new CvSURFParams struct for each call to cvExtractSURF, you
> > could do:
> > > > >
> > > > > CvSURFParams params = cvSURFParams(600, 1);
> > > > > cvExtractSURF(..., params);
> > > > > cvExtractSURF(..., params);
> > > > >
> > > > >
> > > > > There you go. I hope I answered your question :)
> > > > >
> > > > > Jostein
> > > > >
> > > > >
> > > > > 2009/1/12 yair_movshovitz <yairmov@>
> > > > >
> > > > > > Hi Jostein,
> > > > > >
> > > > > > Thanks a lot for your help!
> > > > > >
> > > > > > Can you please explain the function parameters of
> cvExtractSURF?
> > > > > > I mean in - cvExtractSURF(img1, NULL, &kp1, &desc1, storage,
> > > > > > cvSURFParams(600,EXTENDED_DESCRIPTOR));
> > > > > > what is the role of kp1, desc1, storage and the SURFParams?
> > > > > > is storage just a temp area for the algorithm to use?
> > > > > >
> > > > > > Thanks again
> > > > > > Yair
> > > > > >
> > > > > > --- In [hidden email] <OpenCV%40yahoogroups.com><OpenCV%
> 40yahoogroups.com>
> <OpenCV%
> > > > 40yahoogroups.com>, "Jostein
> > > >
> > > > Austvik
> > > > > > Jacobsen"
> > > > > >
> > > > > > <josteinaj@> wrote:
> > > > > > >
> > > > > > > If you've got your two rectangled areas stored as img1 and
> > img2 you
> > > > > > could do
> > > > > > > this to extract its keypoints and corresponding
> descriptors:
> > > > > > >
> > > > > > > #define EXTENDED_DESCRIPTOR 1
> > > > > > > CvSeq *kp1=NULL, *kp2=NULL;
> > > > > > > CvSeq *desc1=NULL, *desc2=NULL;
> > > > > > > CvMemStorage *storage = cvCreateMemStorage(0);
> > > > > > > cvExtractSURF(img1, NULL, &kp1, &desc1, storage,
> > cvSURFParams(600,
> > > > > > > EXTENDED_DESCRIPTOR));
> > > > > > > cvExtractSURF(img2, NULL, &kp2, &desc2, storage,
> > cvSURFParams(600,
> > > > > > > EXTENDED_DESCRIPTOR));
> > > > > > >
> > > > > > > You will have to correlate the descriptors with each
> other to
> > > > determine
> > > > > > > which keypoints in each rectangle corresponds to one
> > another. You
> > > > > > could use
> > > > > > > a BBF tree which is implemented in the latest version of
> > OpenCV, but
> > > > > > unless
> > > > > > > your rectangle is huge, you might just as well just
> correlate
> > > > them the
> > > > > > > standard way, which I do like this:
> > > > > > >
> > > > > > > #define CORRELATION_THRESHOLD 0.7
> > > > > > > // brute-force attempt at correlating the two sets of
> features
> > > > > > > void bruteMatch(CvMat **points1, CvMat **points2, CvSeq
> > *kp1, CvSeq
> > > > > > *desc1,
> > > > > > > CvSeq *kp2, CvSeq *desc2) {
> > > > > > > int i,j,k;
> > > > > > > double* avg1 = (double*)malloc(sizeof(double)*kp1->total);
> > > > > > > double* avg2 = (double*)malloc(sizeof(double)*kp2->total);
> > > > > > > double* dev1 = (double*)malloc(sizeof(double)*kp1->total);
> > > > > > > double* dev2 = (double*)malloc(sizeof(double)*kp2->total);
> > > > > > > int* best1 = (int*)malloc(sizeof(int)*kp1->total);
> > > > > > > int* best2 = (int*)malloc(sizeof(int)*kp2->total);
> > > > > > > double* best1corr = (double*)malloc(sizeof(double)*kp1-
> >total);
> > > > > > > double* best2corr = (double*)malloc(sizeof(double)*kp2-
> >total);
> > > > > > > float *seq1, *seq2;
> > > > > > > int descriptor_size = EXTENDED_DESCRIPTOR ? 128 : 64;
> > > > > > > for (i=0; i<kp1->total; i++) {
> > > > > > > // find average and standard deviation of each descriptor
> > > > > > > avg1[i] = 0;
> > > > > > > dev1[i] = 0;
> > > > > > > seq1 = (float*)cvGetSeqElem(desc1, i);
> > > > > > > for (k=0; k<descriptor_size; k++) avg1[i] += seq1[k];
> > > > > > > avg1[i] /= descriptor_size;
> > > > > > > for (k=0; k<descriptor_size; k++) dev1[i] +=
> > > > > > > (seq1[k]-avg1[i])*(seq1[k]-avg1[i]);
> > > > > > > dev1[i] = sqrt(dev1[i]/descriptor_size);
> > > > > > >
> > > > > > > // initialize best1 and best1corr
> > > > > > > best1[i] = -1;
> > > > > > > best1corr[i] = -1.;
> > > > > > > }
> > > > > > > for (j=0; j<kp2->total; j++) {
> > > > > > > // find average and standard deviation of each descriptor
> > > > > > > avg2[j] = 0;
> > > > > > > dev2[j] = 0;
> > > > > > > seq2 = (float*)cvGetSeqElem(desc2, j);
> > > > > > > for (k=0; k<descriptor_size; k++) avg2[j] += seq2[k];
> > > > > > > avg2[j] /= descriptor_size;
> > > > > > > for (k=0; k<descriptor_size; k++) dev2[j] +=
> > > > > > > (seq2[k]-avg2[j])*(seq2[k]-avg2[j]);
> > > > > > > dev2[j] = sqrt(dev2[j]/descriptor_size);
> > > > > > >
> > > > > > > // initialize best2 and best2corr
> > > > > > > best2[j] = -1;
> > > > > > > best2corr[j] = -1.;
> > > > > > > }
> > > > > > > double corr;
> > > > > > > for (i = 0; i < kp1->total; ++i) {
> > > > > > > seq1 = (float*)cvGetSeqElem(desc1, i);
> > > > > > > for (j = 0; j < kp2->total; ++j) {
> > > > > > > corr = 0;
> > > > > > > seq2 = (float*)cvGetSeqElem(desc2, j);
> > > > > > > for (k = 0; k < descriptor_size; ++k)
> > > > > > > corr += (seq1[k]-avg1[i])*(seq2[k]-avg2[j]);
> > > > > > > corr /= (descriptor_size-1)*dev1[i]*dev2[j];
> > > > > > > if (corr > best1corr[i]) {
> > > > > > > best1corr[i] = corr;
> > > > > > > best1[i] = j;
> > > > > > > }
> > > > > > > if (corr > best2corr[j]) {
> > > > > > > best2corr[j] = corr;
> > > > > > > best2[j] = i;
> > > > > > > }
> > > > > > > }
> > > > > > > }
> > > > > > > j = 0;
> > > > > > > for (i = 0; i < kp1->total; i++)
> > > > > > > if (best2[best1[i]] == i && best1corr[i] >
> > > > > > CORRELATION_THRESHOLD)
> > > > > > > j++;
> > > > > > > if (j == 0) return; // no matches found
> > > > > > > *points1 = cvCreateMat(1,j,CV_32FC2);
> > > > > > > *points2 = cvCreateMat(1,j,CV_32FC2);
> > > > > > > CvPoint2D32f *p1, *p2;
> > > > > > > j = 0;
> > > > > > > for (i = 0; i < kp1->total; i++) {
> > > > > > > if (best2[best1[i]] == i && best1corr[i] >
> > > > > > CORRELATION_THRESHOLD) {
> > > > > > > p1 = &((CvSURFPoint*)cvGetSeqElem(kp1,i))->pt;
> > > > > > > p2 = &((CvSURFPoint*)cvGetSeqElem(kp2,best1[i]))->pt;
> > > > > > > (*points1)->data.fl[j*2] = p1->x;
> > > > > > > (*points1)->data.fl[j*2+1] = p1->y;
> > > > > > > (*points2)->data.fl[j*2] = p2->x;
> > > > > > > (*points2)->data.fl[j*2+1] = p2->y;
> > > > > > > j++;
> > > > > > > }
> > > > > > > }
> > > > > > > free(best2corr);
> > > > > > > free(best1corr);
> > > > > > > free(best2);
> > > > > > > free(best1);
> > > > > > > free(avg1);
> > > > > > > free(avg2);
> > > > > > > free(dev1);
> > > > > > > free(dev2);
> > > > > > > }
> > > > > > >
> > > > > > > If you construct a fundamental matrix (a model) for the
> > > > transformation
> > > > > > > between the two rectangles, you can further determine
> which
> > > > > > correspondences
> > > > > > > are false (by how well they fit the model) and remove
> them,
> > which I
> > > > > > like to
> > > > > > > do like this:
> > > > > > >
> > > > > > > F = cvCreateMat(3,3,CV_32FC1);
> > > > > > > CvMat *status = cvCreateMat(1,points1->cols,CV_8UC1);
> > > > > > > int fm_count = cvFindFundamentalMat( points1,points2,F,
> > > > > > > CV_FM_RANSAC,1.,0.99,status );
> > > > > > > removeOutliers(&points1,&points2,status);
> > > > > > >
> > > > > > > where removeOutliers() is a function I wrote to clean up
> after
> > > > > > > cvFindFundamentalMat():
> > > > > > >
> > > > > > > // iterates the set of putative correspondences and
> removes
> > > > > > correspondences
> > > > > > > marked as outliers by cvFindFundamentalMat()
> > > > > > > void removeOutliers(CvMat **points1, CvMat **points2,
> CvMat
> > > > *status) {
> > > > > > > CvMat *points1_ = *points1;
> > > > > > > CvMat *points2_ = *points2;
> > > > > > > int count = 0;
> > > > > > > for (int i = 0; i < status->cols; i++) if
> > > > > > (CV_MAT_ELEM(*status,unsigned
> > > > > > > char,0,i)) count++;
> > > > > > > if (!count) { // no inliers
> > > > > > > *points1 = NULL;
> > > > > > > *points2 = NULL;
> > > > > > > }
> > > > > > > else {
> > > > > > > *points1 = cvCreateMat(1,count,CV_32FC2);
> > > > > > > *points2 = cvCreateMat(1,count,CV_32FC2);
> > > > > > > int j = 0;
> > > > > > > for (int i = 0; i < status->cols; i++) {
> > > > > > > if (CV_MAT_ELEM(*status,unsigned char,0,i)) {
> > > > > > > (*points1)->data.fl[j*2] = points1_->data.fl[i*2];
> > > > > > > //p1->x
> > > > > > > (*points1)->data.fl[j*2+1] = points1_->data.fl[i*2+1];
> > > > > > > //p1->y
> > > > > > > (*points2)->data.fl[j*2] = points2_->data.fl[i*2];
> > > > > > > //p2->x
> > > > > > > (*points2)->data.fl[j*2+1] = points2_->data.fl[i*2+1];
> > > > > > > //p2->y
> > > > > > > j++;
> > > > > > > }
> > > > > > > }
> > > > > > > }
> > > > > > > cvReleaseMat(&points1_);
> > > > > > > cvReleaseMat(&points2_);
> > > > > > > }
> > > > > > >
> > > > > > >
> > > > > > > I hope this helps.
> > > > > > >
> > > > > > > -Jostein
> > > > > > >
> > > > > > >
> > > > > > > 2009/1/8 yair_movshovitz <yairmov@>
> > > > > > >
> > > > > > > > Hi Everyone,
> > > > > > > >
> > > > > > > > I'm trying to understand how to use the SURF features
> > > > capabilities of
> > > > > > > > openCV.
> > > > > > > > My scenario is as follows:
> > > > > > > > I have two rectangled areas in an image, which are
> supposed to
> > > > bound
> > > > > > > > the same object. I would like to see how good is this
> > > > assumption. In
> > > > > > > > other words I would like to see how many features they
> share.
> > > > > > > >
> > > > > > > > Can someone drop me a hint on how to use the SURF
> > > > implementation of
> > > > > > > > openCV (or direct me to somewhere that has some
> documentation
> > > > of it)
> > > > > > > >
> > > > > > > > Thanks,
> > > > > > > > Yair
> > > > > > > >
> > > > > > > >
> > > > > > > >
> > > > > > >
> > > > > > >
> > > > > > > [Non-text portions of this message have been removed]
> > > > > > >
> > > > > >
> > > > > >
> > > > > >
> > > > >
> > > > >
> > > > > [Non-text portions of this message have been removed]
> > > > >
> > > >
> > > >
> > > >
> > >
> > >
> > > [Non-text portions of this message have been removed]
> > >
> >
>
>  
>


[Non-text portions of this message have been removed]

Reply | Threaded
Open this post in threaded view
|

Re: Re: cvExtractSURF

Raluca Borca
To be more precise, I encountered the problem at the folowing line in
cvsurf.cpp

/* hessian detector */
    for( octave = k = 0; octave < params->nOctaves; octave++ )
    {
        for( sc = -1; sc <= params->nOctaveLayers; sc++, k++ )
        {
            if ( sc < 0 )
                sizeCache[k] = size = 7 << octave; // gaussian scale 1.0;
            else
                sizeCache[k] = size = (sc*6 + 9) << octave; // gaussian
scale size*1.2/9.;
            scaleCache[k] = scale = MAX(size, SIZE0);

            hessian_rows = (sum->rows)*SIZE0/scale;
            hessian_cols = (sum->cols)*SIZE0/scale;
            hessians[k] = cvCreateMat( hessian_rows, hessian_cols, CV_32FC1
);
            traces[k] = cvCreateMat( hessian_rows, hessian_cols, CV_32FC1
);  // this is the point where I get an out of memory exception

......

} } }1

On Thu, Mar 12, 2009 at 12:26 PM, Raluca Borca <[hidden email]>wrote:

> Hello !
>
> I have encountered the same problem.
>
> Can anybody tell what is the solution ?
>
> Thanks.
>
>
> On Sun, Mar 1, 2009 at 8:21 AM, Ricardo. <[hidden email]> wrote:
>
>>   Hello!
>>
>> I wonder if you found what was causing this error? 'Cause it's
>> happening to me too and cannot figure it out.
>>
>> I'd appreciate it if you share how you solved it -if you did of
>> course-.
>>
>> Regards,
>> Ricardo
>>
>>
>> --- In [hidden email] <OpenCV%40yahoogroups.com>,
>> "yair_movshovitz" <yairmov@...> wrote:
>> >
>> > I did some investigating and found out the the error is happening
>> > inside the icvFastHessianDetector() function in cvsurf.cpp
>> > It happens when the function tries to free the memory it allocated:
>> >
>> > for( octave = k = 0; octave < params->nOctaves; octave++ )
>> > for( sc = -1; sc <= params->nOctaveLayers; sc++, k++ )
>> > {
>> > //this line causes the error at some iteration.
>> > cvReleaseMat( &hessians[k] );
>> > cvReleaseMat( &traces[k] );
>> > }
>> >
>> >
>> > Anyone has an idea why this is happening?
>> >
>> > Thanks,
>> > Yair
>> >
>> > --- In [hidden email] <OpenCV%40yahoogroups.com>, "Jostein
>> Austvik Jacobsen"
>> > <josteinaj@> wrote:
>> > >
>> > > I'm using Ubuntu Linux so I can't help you there. Sorry.
>> > > Jostein
>> > >
>> > > 2009/1/20 yair_movshovitz <yairmov@>
>> > >
>> > > > Hi Jostein,
>> > > >
>> > > > Thanks again for helping me out.
>> > > >
>> > > > I have started using the cvExtractSURF function. and I have the
>> > > > following problem:
>> > > > When I call the function I get this error -
>> > > > Windows has triggered a breakpoint in (my program name).
>> > > >
>> > > > This may be due to a corruption of the heap, which indicates a
>> bug in
>> > > > (my program name) or any of the DLLs it has loaded.
>> > > >
>> > > > Have you ever encountered this error before regarding this
>> function?
>> > > >
>> > > > Thanks,
>> > > >
>> > > > Yair
>> > > >
>> > > > --- In [hidden email] <OpenCV%40yahoogroups.com> <OpenCV%
>> 40yahoogroups.com>, "Jostein
>> > Austvik
>> > > > Jacobsen"
>> > > > <josteinaj@> wrote:
>> > > > >
>> > > > > You can view the implementation of *cvExtractSURF(...)* here:
>> > > > >
>> > > >
>> > > >
>> >
>> http://opencvlibrary.svn.sourceforge.net/viewvc/opencvlibrary/trunk/op
>> encv/src/cv/cvsurf.cpp,
>> > > > > however it doesn't contain much comments.
>> > > > >
>> > > > >
>> > > > > *cvExtractSURF( const CvArr* img, const CvArr* mask, CvSeq**
>> > keypoints,
>> > > > > CvSeq** descriptors, CvMemStorage* storage, CvSURFParams
>> params )*
>> > > > >
>> > > > > Here, *img* is the image. Use an
>> > > > > *IplImage<http://opencv.willowgarage.com/wiki/CxCore#IplImage>
>> > > > > * for the image. To load an image from disk, use
>> > > > >
>> > > >
>> > *cvLoadImage(...)
>> *<http://opencv.willowgarage.com/wiki/HighGui#cvLoadImage
>> > > > >,
>> > > > > and to create your own image, use
>> > > > >
>> > > > *cvCreateImage(...)*<
>> > > > http://opencv.willowgarage.com/wiki/CxCore#CreateImage>.
>> > > > > Lets say you have a IplImage *image* and want to extract the
>> > rectangle
>> > > > > (x,y)->(x+dx,y+dy) from it as an IplImage rectangle, you might
>> > do this:
>> > > > >
>> > > > > CvSize size = cvSize(dx,dy);
>> > > > > IplImage* rectangle = cvCreateImage(size, IPL_DEPTH_8U, 1);
>> > > > > for (int i = 0; i < dx; ++i) {
>> > > > > for (int j = 0; j < dy; ++j) {
>> > > > > CV_IMAGE_ELEM(rectangle,unsigned char,i,j) =
>> > > > > CV_IMAGE_ELEM(image,unsigned char,x+i,y+j);
>> > > > > }
>> > > > > }
>> > > > >
>> > > > > I'm not sure how *mask* is used, but a quick google search
>> gives
>> > > > >
>> > > >
>> > > >
>> > http://www.emgu.com/wiki/files/1.4.0.0/html/ad54862f-3d1c-c177-3bdb-
>> 768619f8dd90.htmwhich
>> > > > > says "The optional input 8-bit mask. The features are only
>> found in
>> > > > > the areas that contain more than 50% of non-zero mask
>> pixels". Just
>> > > > set it
>> > > > > to NULL.
>> > > > >
>> > > > > *keypoints* and
>> > > > >
>> > > >
>> > *descriptors*<<a href="http://en.wikipedia.org/wiki/Feature_%">http://en.wikipedia.org/wiki/Feature_%
>> 28computer_vision%29
>> > > > >are
>> > > > > where the results are placed. Initialize them as null-
>> pointers and
>> > > > > cvExtractSURF will do the rest for you. Afterwards you can
>> access a
>> > > > > descriptor and corresponding keypoint like this:
>> > > > >
>> > > > > int k = 0; // the keypoint you want. There are *keypoints-
>> >total*
>> > > > keypoints.
>> > > > > float *seq = (float*)cvGetSeqElem(descriptors, k); // the
>> > descriptor of
>> > > > > length 64 or 128
>> > > > > CvPoint2D32f *p = &((CvSURFPoint*)cvGetSeqElem(keypoints, k))-
>> >pt;
>> > > > // the
>> > > > > (x,y) coordinates of keypoint *k* can now be accessed as *p-
>> >x* and
>> > > > *p->y*
>> > > > >
>> > > > > The *CvMemStorage*
>> > > > >
>> <http://opencv.willowgarage.com/wiki/CxCore#CvMemStorage>struct
>> > > > > *storage* is used as a mechanism to simplify memory
>> management. I
>> > > > believe
>> > > > > the *keypoints* and *descriptors* structures are put into
>> *storage*,
>> > > > so you
>> > > > > can't release *storage* until you're done using *keypoints*
>> and
>> > > > *descriptors
>> > > > > *.Put a *CvMemStorage *storage = cvCreateMemStorage(0);*
>> before your
>> > > > first
>> > > > > call to cvExtractSURF and *cvClearMemStorage(storage);* after
>> you're
>> > > > done
>> > > > > using *keypoints* and *descriptors*.
>> > > > >
>> > > > > SURF takes a couple of parameters through the *CvSURFParams*
>> struct
>> > > > *params*.
>> > > > > You create *params* with *cvSURFParams(double threshold, int
>> > > > > extended)*where threshold represents the "edgyness" that is
>> required
>> > > > > from a feature to
>> > > > > be recognized as a feature. It can be adjusted to retrieve
>> more
>> > or fewer
>> > > > > features. In the paper
>> > > > > <http://www.vision.ee.ethz.ch/%7Esurf/eccv06.pdf>describing
>> the SURF
>> > > > > detector, they use a threshold of 600 on a 800 x 640
>> > > > > image which returned 1418 features. The *extended* parameter
>> is
>> > a simple
>> > > > > boolean 1 or 0 which states whether or not to use the extended
>> > > > descriptor.
>> > > > > The extended descriptor consists of 128 instead of 64 values
>> which
>> > > > should
>> > > > > gives a better result at the cost of using more memory.
>> Instead of
>> > > > creating
>> > > > > a new CvSURFParams struct for each call to cvExtractSURF, you
>> > could do:
>> > > > >
>> > > > > CvSURFParams params = cvSURFParams(600, 1);
>> > > > > cvExtractSURF(..., params);
>> > > > > cvExtractSURF(..., params);
>> > > > >
>> > > > >
>> > > > > There you go. I hope I answered your question :)
>> > > > >
>> > > > > Jostein
>> > > > >
>> > > > >
>> > > > > 2009/1/12 yair_movshovitz <yairmov@>
>> > > > >
>> > > > > > Hi Jostein,
>> > > > > >
>> > > > > > Thanks a lot for your help!
>> > > > > >
>> > > > > > Can you please explain the function parameters of
>> cvExtractSURF?
>> > > > > > I mean in - cvExtractSURF(img1, NULL, &kp1, &desc1, storage,
>> > > > > > cvSURFParams(600,EXTENDED_DESCRIPTOR));
>> > > > > > what is the role of kp1, desc1, storage and the SURFParams?
>> > > > > > is storage just a temp area for the algorithm to use?
>> > > > > >
>> > > > > > Thanks again
>> > > > > > Yair
>> > > > > >
>> > > > > > --- In [hidden email] <OpenCV%40yahoogroups.com><OpenCV%
>> 40yahoogroups.com>
>> <OpenCV%
>> > > > 40yahoogroups.com>, "Jostein
>> > > >
>> > > > Austvik
>> > > > > > Jacobsen"
>> > > > > >
>> > > > > > <josteinaj@> wrote:
>> > > > > > >
>> > > > > > > If you've got your two rectangled areas stored as img1 and
>> > img2 you
>> > > > > > could do
>> > > > > > > this to extract its keypoints and corresponding
>> descriptors:
>> > > > > > >
>> > > > > > > #define EXTENDED_DESCRIPTOR 1
>> > > > > > > CvSeq *kp1=NULL, *kp2=NULL;
>> > > > > > > CvSeq *desc1=NULL, *desc2=NULL;
>> > > > > > > CvMemStorage *storage = cvCreateMemStorage(0);
>> > > > > > > cvExtractSURF(img1, NULL, &kp1, &desc1, storage,
>> > cvSURFParams(600,
>> > > > > > > EXTENDED_DESCRIPTOR));
>> > > > > > > cvExtractSURF(img2, NULL, &kp2, &desc2, storage,
>> > cvSURFParams(600,
>> > > > > > > EXTENDED_DESCRIPTOR));
>> > > > > > >
>> > > > > > > You will have to correlate the descriptors with each
>> other to
>> > > > determine
>> > > > > > > which keypoints in each rectangle corresponds to one
>> > another. You
>> > > > > > could use
>> > > > > > > a BBF tree which is implemented in the latest version of
>> > OpenCV, but
>> > > > > > unless
>> > > > > > > your rectangle is huge, you might just as well just
>> correlate
>> > > > them the
>> > > > > > > standard way, which I do like this:
>> > > > > > >
>> > > > > > > #define CORRELATION_THRESHOLD 0.7
>> > > > > > > // brute-force attempt at correlating the two sets of
>> features
>> > > > > > > void bruteMatch(CvMat **points1, CvMat **points2, CvSeq
>> > *kp1, CvSeq
>> > > > > > *desc1,
>> > > > > > > CvSeq *kp2, CvSeq *desc2) {
>> > > > > > > int i,j,k;
>> > > > > > > double* avg1 = (double*)malloc(sizeof(double)*kp1->total);
>> > > > > > > double* avg2 = (double*)malloc(sizeof(double)*kp2->total);
>> > > > > > > double* dev1 = (double*)malloc(sizeof(double)*kp1->total);
>> > > > > > > double* dev2 = (double*)malloc(sizeof(double)*kp2->total);
>> > > > > > > int* best1 = (int*)malloc(sizeof(int)*kp1->total);
>> > > > > > > int* best2 = (int*)malloc(sizeof(int)*kp2->total);
>> > > > > > > double* best1corr = (double*)malloc(sizeof(double)*kp1-
>> >total);
>> > > > > > > double* best2corr = (double*)malloc(sizeof(double)*kp2-
>> >total);
>> > > > > > > float *seq1, *seq2;
>> > > > > > > int descriptor_size = EXTENDED_DESCRIPTOR ? 128 : 64;
>> > > > > > > for (i=0; i<kp1->total; i++) {
>> > > > > > > // find average and standard deviation of each descriptor
>> > > > > > > avg1[i] = 0;
>> > > > > > > dev1[i] = 0;
>> > > > > > > seq1 = (float*)cvGetSeqElem(desc1, i);
>> > > > > > > for (k=0; k<descriptor_size; k++) avg1[i] += seq1[k];
>> > > > > > > avg1[i] /= descriptor_size;
>> > > > > > > for (k=0; k<descriptor_size; k++) dev1[i] +=
>> > > > > > > (seq1[k]-avg1[i])*(seq1[k]-avg1[i]);
>> > > > > > > dev1[i] = sqrt(dev1[i]/descriptor_size);
>> > > > > > >
>> > > > > > > // initialize best1 and best1corr
>> > > > > > > best1[i] = -1;
>> > > > > > > best1corr[i] = -1.;
>> > > > > > > }
>> > > > > > > for (j=0; j<kp2->total; j++) {
>> > > > > > > // find average and standard deviation of each descriptor
>> > > > > > > avg2[j] = 0;
>> > > > > > > dev2[j] = 0;
>> > > > > > > seq2 = (float*)cvGetSeqElem(desc2, j);
>> > > > > > > for (k=0; k<descriptor_size; k++) avg2[j] += seq2[k];
>> > > > > > > avg2[j] /= descriptor_size;
>> > > > > > > for (k=0; k<descriptor_size; k++) dev2[j] +=
>> > > > > > > (seq2[k]-avg2[j])*(seq2[k]-avg2[j]);
>> > > > > > > dev2[j] = sqrt(dev2[j]/descriptor_size);
>> > > > > > >
>> > > > > > > // initialize best2 and best2corr
>> > > > > > > best2[j] = -1;
>> > > > > > > best2corr[j] = -1.;
>> > > > > > > }
>> > > > > > > double corr;
>> > > > > > > for (i = 0; i < kp1->total; ++i) {
>> > > > > > > seq1 = (float*)cvGetSeqElem(desc1, i);
>> > > > > > > for (j = 0; j < kp2->total; ++j) {
>> > > > > > > corr = 0;
>> > > > > > > seq2 = (float*)cvGetSeqElem(desc2, j);
>> > > > > > > for (k = 0; k < descriptor_size; ++k)
>> > > > > > > corr += (seq1[k]-avg1[i])*(seq2[k]-avg2[j]);
>> > > > > > > corr /= (descriptor_size-1)*dev1[i]*dev2[j];
>> > > > > > > if (corr > best1corr[i]) {
>> > > > > > > best1corr[i] = corr;
>> > > > > > > best1[i] = j;
>> > > > > > > }
>> > > > > > > if (corr > best2corr[j]) {
>> > > > > > > best2corr[j] = corr;
>> > > > > > > best2[j] = i;
>> > > > > > > }
>> > > > > > > }
>> > > > > > > }
>> > > > > > > j = 0;
>> > > > > > > for (i = 0; i < kp1->total; i++)
>> > > > > > > if (best2[best1[i]] == i && best1corr[i] >
>> > > > > > CORRELATION_THRESHOLD)
>> > > > > > > j++;
>> > > > > > > if (j == 0) return; // no matches found
>> > > > > > > *points1 = cvCreateMat(1,j,CV_32FC2);
>> > > > > > > *points2 = cvCreateMat(1,j,CV_32FC2);
>> > > > > > > CvPoint2D32f *p1, *p2;
>> > > > > > > j = 0;
>> > > > > > > for (i = 0; i < kp1->total; i++) {
>> > > > > > > if (best2[best1[i]] == i && best1corr[i] >
>> > > > > > CORRELATION_THRESHOLD) {
>> > > > > > > p1 = &((CvSURFPoint*)cvGetSeqElem(kp1,i))->pt;
>> > > > > > > p2 = &((CvSURFPoint*)cvGetSeqElem(kp2,best1[i]))->pt;
>> > > > > > > (*points1)->data.fl[j*2] = p1->x;
>> > > > > > > (*points1)->data.fl[j*2+1] = p1->y;
>> > > > > > > (*points2)->data.fl[j*2] = p2->x;
>> > > > > > > (*points2)->data.fl[j*2+1] = p2->y;
>> > > > > > > j++;
>> > > > > > > }
>> > > > > > > }
>> > > > > > > free(best2corr);
>> > > > > > > free(best1corr);
>> > > > > > > free(best2);
>> > > > > > > free(best1);
>> > > > > > > free(avg1);
>> > > > > > > free(avg2);
>> > > > > > > free(dev1);
>> > > > > > > free(dev2);
>> > > > > > > }
>> > > > > > >
>> > > > > > > If you construct a fundamental matrix (a model) for the
>> > > > transformation
>> > > > > > > between the two rectangles, you can further determine
>> which
>> > > > > > correspondences
>> > > > > > > are false (by how well they fit the model) and remove
>> them,
>> > which I
>> > > > > > like to
>> > > > > > > do like this:
>> > > > > > >
>> > > > > > > F = cvCreateMat(3,3,CV_32FC1);
>> > > > > > > CvMat *status = cvCreateMat(1,points1->cols,CV_8UC1);
>> > > > > > > int fm_count = cvFindFundamentalMat( points1,points2,F,
>> > > > > > > CV_FM_RANSAC,1.,0.99,status );
>> > > > > > > removeOutliers(&points1,&points2,status);
>> > > > > > >
>> > > > > > > where removeOutliers() is a function I wrote to clean up
>> after
>> > > > > > > cvFindFundamentalMat():
>> > > > > > >
>> > > > > > > // iterates the set of putative correspondences and
>> removes
>> > > > > > correspondences
>> > > > > > > marked as outliers by cvFindFundamentalMat()
>> > > > > > > void removeOutliers(CvMat **points1, CvMat **points2,
>> CvMat
>> > > > *status) {
>> > > > > > > CvMat *points1_ = *points1;
>> > > > > > > CvMat *points2_ = *points2;
>> > > > > > > int count = 0;
>> > > > > > > for (int i = 0; i < status->cols; i++) if
>> > > > > > (CV_MAT_ELEM(*status,unsigned
>> > > > > > > char,0,i)) count++;
>> > > > > > > if (!count) { // no inliers
>> > > > > > > *points1 = NULL;
>> > > > > > > *points2 = NULL;
>> > > > > > > }
>> > > > > > > else {
>> > > > > > > *points1 = cvCreateMat(1,count,CV_32FC2);
>> > > > > > > *points2 = cvCreateMat(1,count,CV_32FC2);
>> > > > > > > int j = 0;
>> > > > > > > for (int i = 0; i < status->cols; i++) {
>> > > > > > > if (CV_MAT_ELEM(*status,unsigned char,0,i)) {
>> > > > > > > (*points1)->data.fl[j*2] = points1_->data.fl[i*2];
>> > > > > > > //p1->x
>> > > > > > > (*points1)->data.fl[j*2+1] = points1_->data.fl[i*2+1];
>> > > > > > > //p1->y
>> > > > > > > (*points2)->data.fl[j*2] = points2_->data.fl[i*2];
>> > > > > > > //p2->x
>> > > > > > > (*points2)->data.fl[j*2+1] = points2_->data.fl[i*2+1];
>> > > > > > > //p2->y
>> > > > > > > j++;
>> > > > > > > }
>> > > > > > > }
>> > > > > > > }
>> > > > > > > cvReleaseMat(&points1_);
>> > > > > > > cvReleaseMat(&points2_);
>> > > > > > > }
>> > > > > > >
>> > > > > > >
>> > > > > > > I hope this helps.
>> > > > > > >
>> > > > > > > -Jostein
>> > > > > > >
>> > > > > > >
>> > > > > > > 2009/1/8 yair_movshovitz <yairmov@>
>> > > > > > >
>> > > > > > > > Hi Everyone,
>> > > > > > > >
>> > > > > > > > I'm trying to understand how to use the SURF features
>> > > > capabilities of
>> > > > > > > > openCV.
>> > > > > > > > My scenario is as follows:
>> > > > > > > > I have two rectangled areas in an image, which are
>> supposed to
>> > > > bound
>> > > > > > > > the same object. I would like to see how good is this
>> > > > assumption. In
>> > > > > > > > other words I would like to see how many features they
>> share.
>> > > > > > > >
>> > > > > > > > Can someone drop me a hint on how to use the SURF
>> > > > implementation of
>> > > > > > > > openCV (or direct me to somewhere that has some
>> documentation
>> > > > of it)
>> > > > > > > >
>> > > > > > > > Thanks,
>> > > > > > > > Yair
>> > > > > > > >
>> > > > > > > >
>> > > > > > > >
>> > > > > > >
>> > > > > > >
>> > > > > > > [Non-text portions of this message have been removed]
>> > > > > > >
>> > > > > >
>> > > > > >
>> > > > > >
>> > > > >
>> > > > >
>> > > > > [Non-text portions of this message have been removed]
>> > > > >
>> > > >
>> > > >
>> > > >
>> > >
>> > >
>> > > [Non-text portions of this message have been removed]
>> > >
>> >
>>
>>  
>>
>
>


[Non-text portions of this message have been removed]

Reply | Threaded
Open this post in threaded view
|

Re: cvExtractSURF

liuliu_0503
I think that the modifications made by Ian Mahon should fixed the problem already. Check out the svn version of opencv and try again.

--- In [hidden email], Raluca Borca <raluca.borca@...> wrote:

>
> To be more precise, I encountered the problem at the folowing line in
> cvsurf.cpp
>
> /* hessian detector */
>     for( octave = k = 0; octave < params->nOctaves; octave++ )
>     {
>         for( sc = -1; sc <= params->nOctaveLayers; sc++, k++ )
>         {
>             if ( sc < 0 )
>                 sizeCache[k] = size = 7 << octave; // gaussian scale 1.0;
>             else
>                 sizeCache[k] = size = (sc*6 + 9) << octave; // gaussian
> scale size*1.2/9.;
>             scaleCache[k] = scale = MAX(size, SIZE0);
>
>             hessian_rows = (sum->rows)*SIZE0/scale;
>             hessian_cols = (sum->cols)*SIZE0/scale;
>             hessians[k] = cvCreateMat( hessian_rows, hessian_cols, CV_32FC1
> );
>             traces[k] = cvCreateMat( hessian_rows, hessian_cols, CV_32FC1
> );  // this is the point where I get an out of memory exception
>
> ......
>
> } } }1
>
> On Thu, Mar 12, 2009 at 12:26 PM, Raluca Borca <raluca.borca@...>wrote:
>
> > Hello !
> >
> > I have encountered the same problem.
> >
> > Can anybody tell what is the solution ?
> >
> > Thanks.
> >
> >
> > On Sun, Mar 1, 2009 at 8:21 AM, Ricardo. <dadagori@...> wrote:
> >
> >>   Hello!
> >>
> >> I wonder if you found what was causing this error? 'Cause it's
> >> happening to me too and cannot figure it out.
> >>
> >> I'd appreciate it if you share how you solved it -if you did of
> >> course-.
> >>
> >> Regards,
> >> Ricardo
> >>
> >>
> >> --- In [hidden email] <OpenCV%40yahoogroups.com>,
> >> "yair_movshovitz" <yairmov@> wrote:
> >> >
> >> > I did some investigating and found out the the error is happening
> >> > inside the icvFastHessianDetector() function in cvsurf.cpp
> >> > It happens when the function tries to free the memory it allocated:
> >> >
> >> > for( octave = k = 0; octave < params->nOctaves; octave++ )
> >> > for( sc = -1; sc <= params->nOctaveLayers; sc++, k++ )
> >> > {
> >> > //this line causes the error at some iteration.
> >> > cvReleaseMat( &hessians[k] );
> >> > cvReleaseMat( &traces[k] );
> >> > }
> >> >
> >> >
> >> > Anyone has an idea why this is happening?
> >> >
> >> > Thanks,
> >> > Yair
> >> >
> >> > --- In [hidden email] <OpenCV%40yahoogroups.com>, "Jostein
> >> Austvik Jacobsen"
> >> > <josteinaj@> wrote:
> >> > >
> >> > > I'm using Ubuntu Linux so I can't help you there. Sorry.
> >> > > Jostein
> >> > >
> >> > > 2009/1/20 yair_movshovitz <yairmov@>
> >> > >
> >> > > > Hi Jostein,
> >> > > >
> >> > > > Thanks again for helping me out.
> >> > > >
> >> > > > I have started using the cvExtractSURF function. and I have the
> >> > > > following problem:
> >> > > > When I call the function I get this error -
> >> > > > Windows has triggered a breakpoint in (my program name).
> >> > > >
> >> > > > This may be due to a corruption of the heap, which indicates a
> >> bug in
> >> > > > (my program name) or any of the DLLs it has loaded.
> >> > > >
> >> > > > Have you ever encountered this error before regarding this
> >> function?
> >> > > >
> >> > > > Thanks,
> >> > > >
> >> > > > Yair
> >> > > >
> >> > > > --- In [hidden email] <OpenCV%40yahoogroups.com> <OpenCV%
> >> 40yahoogroups.com>, "Jostein
> >> > Austvik
> >> > > > Jacobsen"
> >> > > > <josteinaj@> wrote:
> >> > > > >
> >> > > > > You can view the implementation of *cvExtractSURF(...)* here:
> >> > > > >
> >> > > >
> >> > > >
> >> >
> >> http://opencvlibrary.svn.sourceforge.net/viewvc/opencvlibrary/trunk/op
> >> encv/src/cv/cvsurf.cpp,
> >> > > > > however it doesn't contain much comments.
> >> > > > >
> >> > > > >
> >> > > > > *cvExtractSURF( const CvArr* img, const CvArr* mask, CvSeq**
> >> > keypoints,
> >> > > > > CvSeq** descriptors, CvMemStorage* storage, CvSURFParams
> >> params )*
> >> > > > >
> >> > > > > Here, *img* is the image. Use an
> >> > > > > *IplImage<http://opencv.willowgarage.com/wiki/CxCore#IplImage>
> >> > > > > * for the image. To load an image from disk, use
> >> > > > >
> >> > > >
> >> > *cvLoadImage(...)
> >> *<http://opencv.willowgarage.com/wiki/HighGui#cvLoadImage
> >> > > > >,
> >> > > > > and to create your own image, use
> >> > > > >
> >> > > > *cvCreateImage(...)*<
> >> > > > http://opencv.willowgarage.com/wiki/CxCore#CreateImage>.
> >> > > > > Lets say you have a IplImage *image* and want to extract the
> >> > rectangle
> >> > > > > (x,y)->(x+dx,y+dy) from it as an IplImage rectangle, you might
> >> > do this:
> >> > > > >
> >> > > > > CvSize size = cvSize(dx,dy);
> >> > > > > IplImage* rectangle = cvCreateImage(size, IPL_DEPTH_8U, 1);
> >> > > > > for (int i = 0; i < dx; ++i) {
> >> > > > > for (int j = 0; j < dy; ++j) {
> >> > > > > CV_IMAGE_ELEM(rectangle,unsigned char,i,j) =
> >> > > > > CV_IMAGE_ELEM(image,unsigned char,x+i,y+j);
> >> > > > > }
> >> > > > > }
> >> > > > >
> >> > > > > I'm not sure how *mask* is used, but a quick google search
> >> gives
> >> > > > >
> >> > > >
> >> > > >
> >> > http://www.emgu.com/wiki/files/1.4.0.0/html/ad54862f-3d1c-c177-3bdb-
> >> 768619f8dd90.htmwhich
> >> > > > > says "The optional input 8-bit mask. The features are only
> >> found in
> >> > > > > the areas that contain more than 50% of non-zero mask
> >> pixels". Just
> >> > > > set it
> >> > > > > to NULL.
> >> > > > >
> >> > > > > *keypoints* and
> >> > > > >
> >> > > >
> >> > *descriptors*<<a href="http://en.wikipedia.org/wiki/Feature_%">http://en.wikipedia.org/wiki/Feature_%
> >> 28computer_vision%29
> >> > > > >are
> >> > > > > where the results are placed. Initialize them as null-
> >> pointers and
> >> > > > > cvExtractSURF will do the rest for you. Afterwards you can
> >> access a
> >> > > > > descriptor and corresponding keypoint like this:
> >> > > > >
> >> > > > > int k = 0; // the keypoint you want. There are *keypoints-
> >> >total*
> >> > > > keypoints.
> >> > > > > float *seq = (float*)cvGetSeqElem(descriptors, k); // the
> >> > descriptor of
> >> > > > > length 64 or 128
> >> > > > > CvPoint2D32f *p = &((CvSURFPoint*)cvGetSeqElem(keypoints, k))-
> >> >pt;
> >> > > > // the
> >> > > > > (x,y) coordinates of keypoint *k* can now be accessed as *p-
> >> >x* and
> >> > > > *p->y*
> >> > > > >
> >> > > > > The *CvMemStorage*
> >> > > > >
> >> <http://opencv.willowgarage.com/wiki/CxCore#CvMemStorage>struct
> >> > > > > *storage* is used as a mechanism to simplify memory
> >> management. I
> >> > > > believe
> >> > > > > the *keypoints* and *descriptors* structures are put into
> >> *storage*,
> >> > > > so you
> >> > > > > can't release *storage* until you're done using *keypoints*
> >> and
> >> > > > *descriptors
> >> > > > > *.Put a *CvMemStorage *storage = cvCreateMemStorage(0);*
> >> before your
> >> > > > first
> >> > > > > call to cvExtractSURF and *cvClearMemStorage(storage);* after
> >> you're
> >> > > > done
> >> > > > > using *keypoints* and *descriptors*.
> >> > > > >
> >> > > > > SURF takes a couple of parameters through the *CvSURFParams*
> >> struct
> >> > > > *params*.
> >> > > > > You create *params* with *cvSURFParams(double threshold, int
> >> > > > > extended)*where threshold represents the "edgyness" that is
> >> required
> >> > > > > from a feature to
> >> > > > > be recognized as a feature. It can be adjusted to retrieve
> >> more
> >> > or fewer
> >> > > > > features. In the paper
> >> > > > > <http://www.vision.ee.ethz.ch/%7Esurf/eccv06.pdf>describing
> >> the SURF
> >> > > > > detector, they use a threshold of 600 on a 800 x 640
> >> > > > > image which returned 1418 features. The *extended* parameter
> >> is
> >> > a simple
> >> > > > > boolean 1 or 0 which states whether or not to use the extended
> >> > > > descriptor.
> >> > > > > The extended descriptor consists of 128 instead of 64 values
> >> which
> >> > > > should
> >> > > > > gives a better result at the cost of using more memory.
> >> Instead of
> >> > > > creating
> >> > > > > a new CvSURFParams struct for each call to cvExtractSURF, you
> >> > could do:
> >> > > > >
> >> > > > > CvSURFParams params = cvSURFParams(600, 1);
> >> > > > > cvExtractSURF(..., params);
> >> > > > > cvExtractSURF(..., params);
> >> > > > >
> >> > > > >
> >> > > > > There you go. I hope I answered your question :)
> >> > > > >
> >> > > > > Jostein
> >> > > > >
> >> > > > >
> >> > > > > 2009/1/12 yair_movshovitz <yairmov@>
> >> > > > >
> >> > > > > > Hi Jostein,
> >> > > > > >
> >> > > > > > Thanks a lot for your help!
> >> > > > > >
> >> > > > > > Can you please explain the function parameters of
> >> cvExtractSURF?
> >> > > > > > I mean in - cvExtractSURF(img1, NULL, &kp1, &desc1, storage,
> >> > > > > > cvSURFParams(600,EXTENDED_DESCRIPTOR));
> >> > > > > > what is the role of kp1, desc1, storage and the SURFParams?
> >> > > > > > is storage just a temp area for the algorithm to use?
> >> > > > > >
> >> > > > > > Thanks again
> >> > > > > > Yair
> >> > > > > >
> >> > > > > > --- In [hidden email] <OpenCV%40yahoogroups.com><OpenCV%
> >> 40yahoogroups.com>
> >> <OpenCV%
> >> > > > 40yahoogroups.com>, "Jostein
> >> > > >
> >> > > > Austvik
> >> > > > > > Jacobsen"
> >> > > > > >
> >> > > > > > <josteinaj@> wrote:
> >> > > > > > >
> >> > > > > > > If you've got your two rectangled areas stored as img1 and
> >> > img2 you
> >> > > > > > could do
> >> > > > > > > this to extract its keypoints and corresponding
> >> descriptors:
> >> > > > > > >
> >> > > > > > > #define EXTENDED_DESCRIPTOR 1
> >> > > > > > > CvSeq *kp1=NULL, *kp2=NULL;
> >> > > > > > > CvSeq *desc1=NULL, *desc2=NULL;
> >> > > > > > > CvMemStorage *storage = cvCreateMemStorage(0);
> >> > > > > > > cvExtractSURF(img1, NULL, &kp1, &desc1, storage,
> >> > cvSURFParams(600,
> >> > > > > > > EXTENDED_DESCRIPTOR));
> >> > > > > > > cvExtractSURF(img2, NULL, &kp2, &desc2, storage,
> >> > cvSURFParams(600,
> >> > > > > > > EXTENDED_DESCRIPTOR));
> >> > > > > > >
> >> > > > > > > You will have to correlate the descriptors with each
> >> other to
> >> > > > determine
> >> > > > > > > which keypoints in each rectangle corresponds to one
> >> > another. You
> >> > > > > > could use
> >> > > > > > > a BBF tree which is implemented in the latest version of
> >> > OpenCV, but
> >> > > > > > unless
> >> > > > > > > your rectangle is huge, you might just as well just
> >> correlate
> >> > > > them the
> >> > > > > > > standard way, which I do like this:
> >> > > > > > >
> >> > > > > > > #define CORRELATION_THRESHOLD 0.7
> >> > > > > > > // brute-force attempt at correlating the two sets of
> >> features
> >> > > > > > > void bruteMatch(CvMat **points1, CvMat **points2, CvSeq
> >> > *kp1, CvSeq
> >> > > > > > *desc1,
> >> > > > > > > CvSeq *kp2, CvSeq *desc2) {
> >> > > > > > > int i,j,k;
> >> > > > > > > double* avg1 = (double*)malloc(sizeof(double)*kp1->total);
> >> > > > > > > double* avg2 = (double*)malloc(sizeof(double)*kp2->total);
> >> > > > > > > double* dev1 = (double*)malloc(sizeof(double)*kp1->total);
> >> > > > > > > double* dev2 = (double*)malloc(sizeof(double)*kp2->total);
> >> > > > > > > int* best1 = (int*)malloc(sizeof(int)*kp1->total);
> >> > > > > > > int* best2 = (int*)malloc(sizeof(int)*kp2->total);
> >> > > > > > > double* best1corr = (double*)malloc(sizeof(double)*kp1-
> >> >total);
> >> > > > > > > double* best2corr = (double*)malloc(sizeof(double)*kp2-
> >> >total);
> >> > > > > > > float *seq1, *seq2;
> >> > > > > > > int descriptor_size = EXTENDED_DESCRIPTOR ? 128 : 64;
> >> > > > > > > for (i=0; i<kp1->total; i++) {
> >> > > > > > > // find average and standard deviation of each descriptor
> >> > > > > > > avg1[i] = 0;
> >> > > > > > > dev1[i] = 0;
> >> > > > > > > seq1 = (float*)cvGetSeqElem(desc1, i);
> >> > > > > > > for (k=0; k<descriptor_size; k++) avg1[i] += seq1[k];
> >> > > > > > > avg1[i] /= descriptor_size;
> >> > > > > > > for (k=0; k<descriptor_size; k++) dev1[i] +=
> >> > > > > > > (seq1[k]-avg1[i])*(seq1[k]-avg1[i]);
> >> > > > > > > dev1[i] = sqrt(dev1[i]/descriptor_size);
> >> > > > > > >
> >> > > > > > > // initialize best1 and best1corr
> >> > > > > > > best1[i] = -1;
> >> > > > > > > best1corr[i] = -1.;
> >> > > > > > > }
> >> > > > > > > for (j=0; j<kp2->total; j++) {
> >> > > > > > > // find average and standard deviation of each descriptor
> >> > > > > > > avg2[j] = 0;
> >> > > > > > > dev2[j] = 0;
> >> > > > > > > seq2 = (float*)cvGetSeqElem(desc2, j);
> >> > > > > > > for (k=0; k<descriptor_size; k++) avg2[j] += seq2[k];
> >> > > > > > > avg2[j] /= descriptor_size;
> >> > > > > > > for (k=0; k<descriptor_size; k++) dev2[j] +=
> >> > > > > > > (seq2[k]-avg2[j])*(seq2[k]-avg2[j]);
> >> > > > > > > dev2[j] = sqrt(dev2[j]/descriptor_size);
> >> > > > > > >
> >> > > > > > > // initialize best2 and best2corr
> >> > > > > > > best2[j] = -1;
> >> > > > > > > best2corr[j] = -1.;
> >> > > > > > > }
> >> > > > > > > double corr;
> >> > > > > > > for (i = 0; i < kp1->total; ++i) {
> >> > > > > > > seq1 = (float*)cvGetSeqElem(desc1, i);
> >> > > > > > > for (j = 0; j < kp2->total; ++j) {
> >> > > > > > > corr = 0;
> >> > > > > > > seq2 = (float*)cvGetSeqElem(desc2, j);
> >> > > > > > > for (k = 0; k < descriptor_size; ++k)
> >> > > > > > > corr += (seq1[k]-avg1[i])*(seq2[k]-avg2[j]);
> >> > > > > > > corr /= (descriptor_size-1)*dev1[i]*dev2[j];
> >> > > > > > > if (corr > best1corr[i]) {
> >> > > > > > > best1corr[i] = corr;
> >> > > > > > > best1[i] = j;
> >> > > > > > > }
> >> > > > > > > if (corr > best2corr[j]) {
> >> > > > > > > best2corr[j] = corr;
> >> > > > > > > best2[j] = i;
> >> > > > > > > }
> >> > > > > > > }
> >> > > > > > > }
> >> > > > > > > j = 0;
> >> > > > > > > for (i = 0; i < kp1->total; i++)
> >> > > > > > > if (best2[best1[i]] == i && best1corr[i] >
> >> > > > > > CORRELATION_THRESHOLD)
> >> > > > > > > j++;
> >> > > > > > > if (j == 0) return; // no matches found
> >> > > > > > > *points1 = cvCreateMat(1,j,CV_32FC2);
> >> > > > > > > *points2 = cvCreateMat(1,j,CV_32FC2);
> >> > > > > > > CvPoint2D32f *p1, *p2;
> >> > > > > > > j = 0;
> >> > > > > > > for (i = 0; i < kp1->total; i++) {
> >> > > > > > > if (best2[best1[i]] == i && best1corr[i] >
> >> > > > > > CORRELATION_THRESHOLD) {
> >> > > > > > > p1 = &((CvSURFPoint*)cvGetSeqElem(kp1,i))->pt;
> >> > > > > > > p2 = &((CvSURFPoint*)cvGetSeqElem(kp2,best1[i]))->pt;
> >> > > > > > > (*points1)->data.fl[j*2] = p1->x;
> >> > > > > > > (*points1)->data.fl[j*2+1] = p1->y;
> >> > > > > > > (*points2)->data.fl[j*2] = p2->x;
> >> > > > > > > (*points2)->data.fl[j*2+1] = p2->y;
> >> > > > > > > j++;
> >> > > > > > > }
> >> > > > > > > }
> >> > > > > > > free(best2corr);
> >> > > > > > > free(best1corr);
> >> > > > > > > free(best2);
> >> > > > > > > free(best1);
> >> > > > > > > free(avg1);
> >> > > > > > > free(avg2);
> >> > > > > > > free(dev1);
> >> > > > > > > free(dev2);
> >> > > > > > > }
> >> > > > > > >
> >> > > > > > > If you construct a fundamental matrix (a model) for the
> >> > > > transformation
> >> > > > > > > between the two rectangles, you can further determine
> >> which
> >> > > > > > correspondences
> >> > > > > > > are false (by how well they fit the model) and remove
> >> them,
> >> > which I
> >> > > > > > like to
> >> > > > > > > do like this:
> >> > > > > > >
> >> > > > > > > F = cvCreateMat(3,3,CV_32FC1);
> >> > > > > > > CvMat *status = cvCreateMat(1,points1->cols,CV_8UC1);
> >> > > > > > > int fm_count = cvFindFundamentalMat( points1,points2,F,
> >> > > > > > > CV_FM_RANSAC,1.,0.99,status );
> >> > > > > > > removeOutliers(&points1,&points2,status);
> >> > > > > > >
> >> > > > > > > where removeOutliers() is a function I wrote to clean up
> >> after
> >> > > > > > > cvFindFundamentalMat():
> >> > > > > > >
> >> > > > > > > // iterates the set of putative correspondences and
> >> removes
> >> > > > > > correspondences
> >> > > > > > > marked as outliers by cvFindFundamentalMat()
> >> > > > > > > void removeOutliers(CvMat **points1, CvMat **points2,
> >> CvMat
> >> > > > *status) {
> >> > > > > > > CvMat *points1_ = *points1;
> >> > > > > > > CvMat *points2_ = *points2;
> >> > > > > > > int count = 0;
> >> > > > > > > for (int i = 0; i < status->cols; i++) if
> >> > > > > > (CV_MAT_ELEM(*status,unsigned
> >> > > > > > > char,0,i)) count++;
> >> > > > > > > if (!count) { // no inliers
> >> > > > > > > *points1 = NULL;
> >> > > > > > > *points2 = NULL;
> >> > > > > > > }
> >> > > > > > > else {
> >> > > > > > > *points1 = cvCreateMat(1,count,CV_32FC2);
> >> > > > > > > *points2 = cvCreateMat(1,count,CV_32FC2);
> >> > > > > > > int j = 0;
> >> > > > > > > for (int i = 0; i < status->cols; i++) {
> >> > > > > > > if (CV_MAT_ELEM(*status,unsigned char,0,i)) {
> >> > > > > > > (*points1)->data.fl[j*2] = points1_->data.fl[i*2];
> >> > > > > > > //p1->x
> >> > > > > > > (*points1)->data.fl[j*2+1] = points1_->data.fl[i*2+1];
> >> > > > > > > //p1->y
> >> > > > > > > (*points2)->data.fl[j*2] = points2_->data.fl[i*2];
> >> > > > > > > //p2->x
> >> > > > > > > (*points2)->data.fl[j*2+1] = points2_->data.fl[i*2+1];
> >> > > > > > > //p2->y
> >> > > > > > > j++;
> >> > > > > > > }
> >> > > > > > > }
> >> > > > > > > }
> >> > > > > > > cvReleaseMat(&points1_);
> >> > > > > > > cvReleaseMat(&points2_);
> >> > > > > > > }
> >> > > > > > >
> >> > > > > > >
> >> > > > > > > I hope this helps.
> >> > > > > > >
> >> > > > > > > -Jostein
> >> > > > > > >
> >> > > > > > >
> >> > > > > > > 2009/1/8 yair_movshovitz <yairmov@>
> >> > > > > > >
> >> > > > > > > > Hi Everyone,
> >> > > > > > > >
> >> > > > > > > > I'm trying to understand how to use the SURF features
> >> > > > capabilities of
> >> > > > > > > > openCV.
> >> > > > > > > > My scenario is as follows:
> >> > > > > > > > I have two rectangled areas in an image, which are
> >> supposed to
> >> > > > bound
> >> > > > > > > > the same object. I would like to see how good is this
> >> > > > assumption. In
> >> > > > > > > > other words I would like to see how many features they
> >> share.
> >> > > > > > > >
> >> > > > > > > > Can someone drop me a hint on how to use the SURF
> >> > > > implementation of
> >> > > > > > > > openCV (or direct me to somewhere that has some
> >> documentation
> >> > > > of it)
> >> > > > > > > >
> >> > > > > > > > Thanks,
> >> > > > > > > > Yair
> >> > > > > > > >
> >> > > > > > > >
> >> > > > > > > >
> >> > > > > > >
> >> > > > > > >
> >> > > > > > > [Non-text portions of this message have been removed]
> >> > > > > > >
> >> > > > > >
> >> > > > > >
> >> > > > > >
> >> > > > >
> >> > > > >
> >> > > > > [Non-text portions of this message have been removed]
> >> > > > >
> >> > > >
> >> > > >
> >> > > >
> >> > >
> >> > >
> >> > > [Non-text portions of this message have been removed]
> >> > >
> >> >
> >>
> >>  
> >>
> >
> >
>
>
> [Non-text portions of this message have been removed]
>


Reply | Threaded
Open this post in threaded view
|

Re: cvExtractSURF

osid
Hello all,

I am also having the same problem - with the latest version
of cvSurf (its version 1520).
When Did Ian Mahon do the modification?

Thanks,

Oliver


--- In [hidden email], "liuliu_0503" <liuliu.1987+opencv@...> wrote:

>
> I think that the modifications made by Ian Mahon should fixed the problem already. Check out the svn version of opencv and try again.
>
> --- In [hidden email], Raluca Borca <raluca.borca@> wrote:
> >
> > To be more precise, I encountered the problem at the folowing line in
> > cvsurf.cpp
> >
> > /* hessian detector */
> >     for( octave = k = 0; octave < params->nOctaves; octave++ )
> >     {
> >         for( sc = -1; sc <= params->nOctaveLayers; sc++, k++ )
> >         {
> >             if ( sc < 0 )
> >                 sizeCache[k] = size = 7 << octave; // gaussian scale 1.0;
> >             else
> >                 sizeCache[k] = size = (sc*6 + 9) << octave; // gaussian
> > scale size*1.2/9.;
> >             scaleCache[k] = scale = MAX(size, SIZE0);
> >
> >             hessian_rows = (sum->rows)*SIZE0/scale;
> >             hessian_cols = (sum->cols)*SIZE0/scale;
> >             hessians[k] = cvCreateMat( hessian_rows, hessian_cols, CV_32FC1
> > );
> >             traces[k] = cvCreateMat( hessian_rows, hessian_cols, CV_32FC1
> > );  // this is the point where I get an out of memory exception
> >
> > ......
> >
> > } } }1
> >
> > On Thu, Mar 12, 2009 at 12:26 PM, Raluca Borca <raluca.borca@>wrote:
> >
> > > Hello !
> > >
> > > I have encountered the same problem.
> > >
> > > Can anybody tell what is the solution ?
> > >
> > > Thanks.
> > >
> > >
> > > On Sun, Mar 1, 2009 at 8:21 AM, Ricardo. <dadagori@> wrote:
> > >
> > >>   Hello!
> > >>
> > >> I wonder if you found what was causing this error? 'Cause it's
> > >> happening to me too and cannot figure it out.
> > >>
> > >> I'd appreciate it if you share how you solved it -if you did of
> > >> course-.
> > >>
> > >> Regards,
> > >> Ricardo
> > >>
> > >>
> > >> --- In [hidden email] <OpenCV%40yahoogroups.com>,
> > >> "yair_movshovitz" <yairmov@> wrote:
> > >> >
> > >> > I did some investigating and found out the the error is happening
> > >> > inside the icvFastHessianDetector() function in cvsurf.cpp
> > >> > It happens when the function tries to free the memory it allocated:
> > >> >
> > >> > for( octave = k = 0; octave < params->nOctaves; octave++ )
> > >> > for( sc = -1; sc <= params->nOctaveLayers; sc++, k++ )
> > >> > {
> > >> > //this line causes the error at some iteration.
> > >> > cvReleaseMat( &hessians[k] );
> > >> > cvReleaseMat( &traces[k] );
> > >> > }
> > >> >
> > >> >
> > >> > Anyone has an idea why this is happening?
> > >> >
> > >> > Thanks,
> > >> > Yair
> > >> >
> > >> > --- In [hidden email] <OpenCV%40yahoogroups.com>, "Jostein
> > >> Austvik Jacobsen"
> > >> > <josteinaj@> wrote:
> > >> > >
> > >> > > I'm using Ubuntu Linux so I can't help you there. Sorry.
> > >> > > Jostein
> > >> > >
> > >> > > 2009/1/20 yair_movshovitz <yairmov@>
> > >> > >
> > >> > > > Hi Jostein,
> > >> > > >
> > >> > > > Thanks again for helping me out.
> > >> > > >
> > >> > > > I have started using the cvExtractSURF function. and I have the
> > >> > > > following problem:
> > >> > > > When I call the function I get this error -
> > >> > > > Windows has triggered a breakpoint in (my program name).
> > >> > > >
> > >> > > > This may be due to a corruption of the heap, which indicates a
> > >> bug in
> > >> > > > (my program name) or any of the DLLs it has loaded.
> > >> > > >
> > >> > > > Have you ever encountered this error before regarding this
> > >> function?
> > >> > > >
> > >> > > > Thanks,
> > >> > > >
> > >> > > > Yair
> > >> > > >
> > >> > > > --- In [hidden email] <OpenCV%40yahoogroups.com> <OpenCV%
> > >> 40yahoogroups.com>, "Jostein
> > >> > Austvik
> > >> > > > Jacobsen"
> > >> > > > <josteinaj@> wrote:
> > >> > > > >
> > >> > > > > You can view the implementation of *cvExtractSURF(...)* here:
> > >> > > > >
> > >> > > >
> > >> > > >
> > >> >
> > >> http://opencvlibrary.svn.sourceforge.net/viewvc/opencvlibrary/trunk/op
> > >> encv/src/cv/cvsurf.cpp,
> > >> > > > > however it doesn't contain much comments.
> > >> > > > >
> > >> > > > >
> > >> > > > > *cvExtractSURF( const CvArr* img, const CvArr* mask, CvSeq**
> > >> > keypoints,
> > >> > > > > CvSeq** descriptors, CvMemStorage* storage, CvSURFParams
> > >> params )*
> > >> > > > >
> > >> > > > > Here, *img* is the image. Use an
> > >> > > > > *IplImage<http://opencv.willowgarage.com/wiki/CxCore#IplImage>
> > >> > > > > * for the image. To load an image from disk, use
> > >> > > > >
> > >> > > >
> > >> > *cvLoadImage(...)
> > >> *<http://opencv.willowgarage.com/wiki/HighGui#cvLoadImage
> > >> > > > >,
> > >> > > > > and to create your own image, use
> > >> > > > >
> > >> > > > *cvCreateImage(...)*<
> > >> > > > http://opencv.willowgarage.com/wiki/CxCore#CreateImage>.
> > >> > > > > Lets say you have a IplImage *image* and want to extract the
> > >> > rectangle
> > >> > > > > (x,y)->(x+dx,y+dy) from it as an IplImage rectangle, you might
> > >> > do this:
> > >> > > > >
> > >> > > > > CvSize size = cvSize(dx,dy);
> > >> > > > > IplImage* rectangle = cvCreateImage(size, IPL_DEPTH_8U, 1);
> > >> > > > > for (int i = 0; i < dx; ++i) {
> > >> > > > > for (int j = 0; j < dy; ++j) {
> > >> > > > > CV_IMAGE_ELEM(rectangle,unsigned char,i,j) =
> > >> > > > > CV_IMAGE_ELEM(image,unsigned char,x+i,y+j);
> > >> > > > > }
> > >> > > > > }
> > >> > > > >
> > >> > > > > I'm not sure how *mask* is used, but a quick google search
> > >> gives
> > >> > > > >
> > >> > > >
> > >> > > >
> > >> > http://www.emgu.com/wiki/files/1.4.0.0/html/ad54862f-3d1c-c177-3bdb-
> > >> 768619f8dd90.htmwhich
> > >> > > > > says "The optional input 8-bit mask. The features are only
> > >> found in
> > >> > > > > the areas that contain more than 50% of non-zero mask
> > >> pixels". Just
> > >> > > > set it
> > >> > > > > to NULL.
> > >> > > > >
> > >> > > > > *keypoints* and
> > >> > > > >
> > >> > > >
> > >> > *descriptors*<<a href="http://en.wikipedia.org/wiki/Feature_%">http://en.wikipedia.org/wiki/Feature_%
> > >> 28computer_vision%29
> > >> > > > >are
> > >> > > > > where the results are placed. Initialize them as null-
> > >> pointers and
> > >> > > > > cvExtractSURF will do the rest for you. Afterwards you can
> > >> access a
> > >> > > > > descriptor and corresponding keypoint like this:
> > >> > > > >
> > >> > > > > int k = 0; // the keypoint you want. There are *keypoints-
> > >> >total*
> > >> > > > keypoints.
> > >> > > > > float *seq = (float*)cvGetSeqElem(descriptors, k); // the
> > >> > descriptor of
> > >> > > > > length 64 or 128
> > >> > > > > CvPoint2D32f *p = &((CvSURFPoint*)cvGetSeqElem(keypoints, k))-
> > >> >pt;
> > >> > > > // the
> > >> > > > > (x,y) coordinates of keypoint *k* can now be accessed as *p-
> > >> >x* and
> > >> > > > *p->y*
> > >> > > > >
> > >> > > > > The *CvMemStorage*
> > >> > > > >
> > >> <http://opencv.willowgarage.com/wiki/CxCore#CvMemStorage>struct
> > >> > > > > *storage* is used as a mechanism to simplify memory
> > >> management. I
> > >> > > > believe
> > >> > > > > the *keypoints* and *descriptors* structures are put into
> > >> *storage*,
> > >> > > > so you
> > >> > > > > can't release *storage* until you're done using *keypoints*
> > >> and
> > >> > > > *descriptors
> > >> > > > > *.Put a *CvMemStorage *storage = cvCreateMemStorage(0);*
> > >> before your
> > >> > > > first
> > >> > > > > call to cvExtractSURF and *cvClearMemStorage(storage);* after
> > >> you're
> > >> > > > done
> > >> > > > > using *keypoints* and *descriptors*.
> > >> > > > >
> > >> > > > > SURF takes a couple of parameters through the *CvSURFParams*
> > >> struct
> > >> > > > *params*.
> > >> > > > > You create *params* with *cvSURFParams(double threshold, int
> > >> > > > > extended)*where threshold represents the "edgyness" that is
> > >> required
> > >> > > > > from a feature to
> > >> > > > > be recognized as a feature. It can be adjusted to retrieve
> > >> more
> > >> > or fewer
> > >> > > > > features. In the paper
> > >> > > > > <http://www.vision.ee.ethz.ch/%7Esurf/eccv06.pdf>describing
> > >> the SURF
> > >> > > > > detector, they use a threshold of 600 on a 800 x 640
> > >> > > > > image which returned 1418 features. The *extended* parameter
> > >> is
> > >> > a simple
> > >> > > > > boolean 1 or 0 which states whether or not to use the extended
> > >> > > > descriptor.
> > >> > > > > The extended descriptor consists of 128 instead of 64 values
> > >> which
> > >> > > > should
> > >> > > > > gives a better result at the cost of using more memory.
> > >> Instead of
> > >> > > > creating
> > >> > > > > a new CvSURFParams struct for each call to cvExtractSURF, you
> > >> > could do:
> > >> > > > >
> > >> > > > > CvSURFParams params = cvSURFParams(600, 1);
> > >> > > > > cvExtractSURF(..., params);
> > >> > > > > cvExtractSURF(..., params);
> > >> > > > >
> > >> > > > >
> > >> > > > > There you go. I hope I answered your question :)
> > >> > > > >
> > >> > > > > Jostein
> > >> > > > >
> > >> > > > >
> > >> > > > > 2009/1/12 yair_movshovitz <yairmov@>
> > >> > > > >
> > >> > > > > > Hi Jostein,
> > >> > > > > >
> > >> > > > > > Thanks a lot for your help!
> > >> > > > > >
> > >> > > > > > Can you please explain the function parameters of
> > >> cvExtractSURF?
> > >> > > > > > I mean in - cvExtractSURF(img1, NULL, &kp1, &desc1, storage,
> > >> > > > > > cvSURFParams(600,EXTENDED_DESCRIPTOR));
> > >> > > > > > what is the role of kp1, desc1, storage and the SURFParams?
> > >> > > > > > is storage just a temp area for the algorithm to use?
> > >> > > > > >
> > >> > > > > > Thanks again
> > >> > > > > > Yair
> > >> > > > > >
> > >> > > > > > --- In [hidden email] <OpenCV%40yahoogroups.com><OpenCV%
> > >> 40yahoogroups.com>
> > >> <OpenCV%
> > >> > > > 40yahoogroups.com>, "Jostein
> > >> > > >
> > >> > > > Austvik
> > >> > > > > > Jacobsen"
> > >> > > > > >
> > >> > > > > > <josteinaj@> wrote:
> > >> > > > > > >
> > >> > > > > > > If you've got your two rectangled areas stored as img1 and
> > >> > img2 you
> > >> > > > > > could do
> > >> > > > > > > this to extract its keypoints and corresponding
> > >> descriptors:
> > >> > > > > > >
> > >> > > > > > > #define EXTENDED_DESCRIPTOR 1
> > >> > > > > > > CvSeq *kp1=NULL, *kp2=NULL;
> > >> > > > > > > CvSeq *desc1=NULL, *desc2=NULL;
> > >> > > > > > > CvMemStorage *storage = cvCreateMemStorage(0);
> > >> > > > > > > cvExtractSURF(img1, NULL, &kp1, &desc1, storage,
> > >> > cvSURFParams(600,
> > >> > > > > > > EXTENDED_DESCRIPTOR));
> > >> > > > > > > cvExtractSURF(img2, NULL, &kp2, &desc2, storage,
> > >> > cvSURFParams(600,
> > >> > > > > > > EXTENDED_DESCRIPTOR));
> > >> > > > > > >
> > >> > > > > > > You will have to correlate the descriptors with each
> > >> other to
> > >> > > > determine
> > >> > > > > > > which keypoints in each rectangle corresponds to one
> > >> > another. You
> > >> > > > > > could use
> > >> > > > > > > a BBF tree which is implemented in the latest version of
> > >> > OpenCV, but
> > >> > > > > > unless
> > >> > > > > > > your rectangle is huge, you might just as well just
> > >> correlate
> > >> > > > them the
> > >> > > > > > > standard way, which I do like this:
> > >> > > > > > >
> > >> > > > > > > #define CORRELATION_THRESHOLD 0.7
> > >> > > > > > > // brute-force attempt at correlating the two sets of
> > >> features
> > >> > > > > > > void bruteMatch(CvMat **points1, CvMat **points2, CvSeq
> > >> > *kp1, CvSeq
> > >> > > > > > *desc1,
> > >> > > > > > > CvSeq *kp2, CvSeq *desc2) {
> > >> > > > > > > int i,j,k;
> > >> > > > > > > double* avg1 = (double*)malloc(sizeof(double)*kp1->total);
> > >> > > > > > > double* avg2 = (double*)malloc(sizeof(double)*kp2->total);
> > >> > > > > > > double* dev1 = (double*)malloc(sizeof(double)*kp1->total);
> > >> > > > > > > double* dev2 = (double*)malloc(sizeof(double)*kp2->total);
> > >> > > > > > > int* best1 = (int*)malloc(sizeof(int)*kp1->total);
> > >> > > > > > > int* best2 = (int*)malloc(sizeof(int)*kp2->total);
> > >> > > > > > > double* best1corr = (double*)malloc(sizeof(double)*kp1-
> > >> >total);
> > >> > > > > > > double* best2corr = (double*)malloc(sizeof(double)*kp2-
> > >> >total);
> > >> > > > > > > float *seq1, *seq2;
> > >> > > > > > > int descriptor_size = EXTENDED_DESCRIPTOR ? 128 : 64;
> > >> > > > > > > for (i=0; i<kp1->total; i++) {
> > >> > > > > > > // find average and standard deviation of each descriptor
> > >> > > > > > > avg1[i] = 0;
> > >> > > > > > > dev1[i] = 0;
> > >> > > > > > > seq1 = (float*)cvGetSeqElem(desc1, i);
> > >> > > > > > > for (k=0; k<descriptor_size; k++) avg1[i] += seq1[k];
> > >> > > > > > > avg1[i] /= descriptor_size;
> > >> > > > > > > for (k=0; k<descriptor_size; k++) dev1[i] +=
> > >> > > > > > > (seq1[k]-avg1[i])*(seq1[k]-avg1[i]);
> > >> > > > > > > dev1[i] = sqrt(dev1[i]/descriptor_size);
> > >> > > > > > >
> > >> > > > > > > // initialize best1 and best1corr
> > >> > > > > > > best1[i] = -1;
> > >> > > > > > > best1corr[i] = -1.;
> > >> > > > > > > }
> > >> > > > > > > for (j=0; j<kp2->total; j++) {
> > >> > > > > > > // find average and standard deviation of each descriptor
> > >> > > > > > > avg2[j] = 0;
> > >> > > > > > > dev2[j] = 0;
> > >> > > > > > > seq2 = (float*)cvGetSeqElem(desc2, j);
> > >> > > > > > > for (k=0; k<descriptor_size; k++) avg2[j] += seq2[k];
> > >> > > > > > > avg2[j] /= descriptor_size;
> > >> > > > > > > for (k=0; k<descriptor_size; k++) dev2[j] +=
> > >> > > > > > > (seq2[k]-avg2[j])*(seq2[k]-avg2[j]);
> > >> > > > > > > dev2[j] = sqrt(dev2[j]/descriptor_size);
> > >> > > > > > >
> > >> > > > > > > // initialize best2 and best2corr
> > >> > > > > > > best2[j] = -1;
> > >> > > > > > > best2corr[j] = -1.;
> > >> > > > > > > }
> > >> > > > > > > double corr;
> > >> > > > > > > for (i = 0; i < kp1->total; ++i) {
> > >> > > > > > > seq1 = (float*)cvGetSeqElem(desc1, i);
> > >> > > > > > > for (j = 0; j < kp2->total; ++j) {
> > >> > > > > > > corr = 0;
> > >> > > > > > > seq2 = (float*)cvGetSeqElem(desc2, j);
> > >> > > > > > > for (k = 0; k < descriptor_size; ++k)
> > >> > > > > > > corr += (seq1[k]-avg1[i])*(seq2[k]-avg2[j]);
> > >> > > > > > > corr /= (descriptor_size-1)*dev1[i]*dev2[j];
> > >> > > > > > > if (corr > best1corr[i]) {
> > >> > > > > > > best1corr[i] = corr;
> > >> > > > > > > best1[i] = j;
> > >> > > > > > > }
> > >> > > > > > > if (corr > best2corr[j]) {
> > >> > > > > > > best2corr[j] = corr;
> > >> > > > > > > best2[j] = i;
> > >> > > > > > > }
> > >> > > > > > > }
> > >> > > > > > > }
> > >> > > > > > > j = 0;
> > >> > > > > > > for (i = 0; i < kp1->total; i++)
> > >> > > > > > > if (best2[best1[i]] == i && best1corr[i] >
> > >> > > > > > CORRELATION_THRESHOLD)
> > >> > > > > > > j++;
> > >> > > > > > > if (j == 0) return; // no matches found
> > >> > > > > > > *points1 = cvCreateMat(1,j,CV_32FC2);
> > >> > > > > > > *points2 = cvCreateMat(1,j,CV_32FC2);
> > >> > > > > > > CvPoint2D32f *p1, *p2;
> > >> > > > > > > j = 0;
> > >> > > > > > > for (i = 0; i < kp1->total; i++) {
> > >> > > > > > > if (best2[best1[i]] == i && best1corr[i] >
> > >> > > > > > CORRELATION_THRESHOLD) {
> > >> > > > > > > p1 = &((CvSURFPoint*)cvGetSeqElem(kp1,i))->pt;
> > >> > > > > > > p2 = &((CvSURFPoint*)cvGetSeqElem(kp2,best1[i]))->pt;
> > >> > > > > > > (*points1)->data.fl[j*2] = p1->x;
> > >> > > > > > > (*points1)->data.fl[j*2+1] = p1->y;
> > >> > > > > > > (*points2)->data.fl[j*2] = p2->x;
> > >> > > > > > > (*points2)->data.fl[j*2+1] = p2->y;
> > >> > > > > > > j++;
> > >> > > > > > > }
> > >> > > > > > > }
> > >> > > > > > > free(best2corr);
> > >> > > > > > > free(best1corr);
> > >> > > > > > > free(best2);
> > >> > > > > > > free(best1);
> > >> > > > > > > free(avg1);
> > >> > > > > > > free(avg2);
> > >> > > > > > > free(dev1);
> > >> > > > > > > free(dev2);
> > >> > > > > > > }
> > >> > > > > > >
> > >> > > > > > > If you construct a fundamental matrix (a model) for the
> > >> > > > transformation
> > >> > > > > > > between the two rectangles, you can further determine
> > >> which
> > >> > > > > > correspondences
> > >> > > > > > > are false (by how well they fit the model) and remove
> > >> them,
> > >> > which I
> > >> > > > > > like to
> > >> > > > > > > do like this:
> > >> > > > > > >
> > >> > > > > > > F = cvCreateMat(3,3,CV_32FC1);
> > >> > > > > > > CvMat *status = cvCreateMat(1,points1->cols,CV_8UC1);
> > >> > > > > > > int fm_count = cvFindFundamentalMat( points1,points2,F,
> > >> > > > > > > CV_FM_RANSAC,1.,0.99,status );
> > >> > > > > > > removeOutliers(&points1,&points2,status);
> > >> > > > > > >
> > >> > > > > > > where removeOutliers() is a function I wrote to clean up
> > >> after
> > >> > > > > > > cvFindFundamentalMat():
> > >> > > > > > >
> > >> > > > > > > // iterates the set of putative correspondences and
> > >> removes
> > >> > > > > > correspondences
> > >> > > > > > > marked as outliers by cvFindFundamentalMat()
> > >> > > > > > > void removeOutliers(CvMat **points1, CvMat **points2,
> > >> CvMat
> > >> > > > *status) {
> > >> > > > > > > CvMat *points1_ = *points1;
> > >> > > > > > > CvMat *points2_ = *points2;
> > >> > > > > > > int count = 0;
> > >> > > > > > > for (int i = 0; i < status->cols; i++) if
> > >> > > > > > (CV_MAT_ELEM(*status,unsigned
> > >> > > > > > > char,0,i)) count++;
> > >> > > > > > > if (!count) { // no inliers
> > >> > > > > > > *points1 = NULL;
> > >> > > > > > > *points2 = NULL;
> > >> > > > > > > }
> > >> > > > > > > else {
> > >> > > > > > > *points1 = cvCreateMat(1,count,CV_32FC2);
> > >> > > > > > > *points2 = cvCreateMat(1,count,CV_32FC2);
> > >> > > > > > > int j = 0;
> > >> > > > > > > for (int i = 0; i < status->cols; i++) {
> > >> > > > > > > if (CV_MAT_ELEM(*status,unsigned char,0,i)) {
> > >> > > > > > > (*points1)->data.fl[j*2] = points1_->data.fl[i*2];
> > >> > > > > > > //p1->x
> > >> > > > > > > (*points1)->data.fl[j*2+1] = points1_->data.fl[i*2+1];
> > >> > > > > > > //p1->y
> > >> > > > > > > (*points2)->data.fl[j*2] = points2_->data.fl[i*2];
> > >> > > > > > > //p2->x
> > >> > > > > > > (*points2)->data.fl[j*2+1] = points2_->data.fl[i*2+1];
> > >> > > > > > > //p2->y
> > >> > > > > > > j++;
> > >> > > > > > > }
> > >> > > > > > > }
> > >> > > > > > > }
> > >> > > > > > > cvReleaseMat(&points1_);
> > >> > > > > > > cvReleaseMat(&points2_);
> > >> > > > > > > }
> > >> > > > > > >
> > >> > > > > > >
> > >> > > > > > > I hope this helps.
> > >> > > > > > >
> > >> > > > > > > -Jostein
> > >> > > > > > >
> > >> > > > > > >
> > >> > > > > > > 2009/1/8 yair_movshovitz <yairmov@>
> > >> > > > > > >
> > >> > > > > > > > Hi Everyone,
> > >> > > > > > > >
> > >> > > > > > > > I'm trying to understand how to use the SURF features
> > >> > > > capabilities of
> > >> > > > > > > > openCV.
> > >> > > > > > > > My scenario is as follows:
> > >> > > > > > > > I have two rectangled areas in an image, which are
> > >> supposed to
> > >> > > > bound
> > >> > > > > > > > the same object. I would like to see how good is this
> > >> > > > assumption. In
> > >> > > > > > > > other words I would like to see how many features they
> > >> share.
> > >> > > > > > > >
> > >> > > > > > > > Can someone drop me a hint on how to use the SURF
> > >> > > > implementation of
> > >> > > > > > > > openCV (or direct me to somewhere that has some
> > >> documentation
> > >> > > > of it)
> > >> > > > > > > >
> > >> > > > > > > > Thanks,
> > >> > > > > > > > Yair
> > >> > > > > > > >
> > >> > > > > > > >
> > >> > > > > > > >
> > >> > > > > > >
> > >> > > > > > >
> > >> > > > > > > [Non-text portions of this message have been removed]
> > >> > > > > > >
> > >> > > > > >
> > >> > > > > >
> > >> > > > > >
> > >> > > > >
> > >> > > > >
> > >> > > > > [Non-text portions of this message have been removed]
> > >> > > > >
> > >> > > >
> > >> > > >
> > >> > > >
> > >> > >
> > >> > >
> > >> > > [Non-text portions of this message have been removed]
> > >> > >
> > >> >
> > >>
> > >>  
> > >>
> > >
> > >
> >
> >
> > [Non-text portions of this message have been removed]
> >
>


Reply | Threaded
Open this post in threaded view
|

Re: cvExtractSURF

liuliu_0503
i believe this change was made later than rev1520, checkout the newest rev1642 or any revision no earlier than rev1550.
--- In [hidden email], "oliver_sidla" <oliver_sidla@...> wrote:

>
> Hello all,
>
> I am also having the same problem - with the latest version
> of cvSurf (its version 1520).
> When Did Ian Mahon do the modification?
>
> Thanks,
>
> Oliver
>
>
> --- In [hidden email], "liuliu_0503" <liuliu.1987+opencv@> wrote:
> >
> > I think that the modifications made by Ian Mahon should fixed the problem already. Check out the svn version of opencv and try again.
> >
> > --- In [hidden email], Raluca Borca <raluca.borca@> wrote:
> > >
> > > To be more precise, I encountered the problem at the folowing line in
> > > cvsurf.cpp
> > >
> > > /* hessian detector */
> > >     for( octave = k = 0; octave < params->nOctaves; octave++ )
> > >     {
> > >         for( sc = -1; sc <= params->nOctaveLayers; sc++, k++ )
> > >         {
> > >             if ( sc < 0 )
> > >                 sizeCache[k] = size = 7 << octave; // gaussian scale 1.0;
> > >             else
> > >                 sizeCache[k] = size = (sc*6 + 9) << octave; // gaussian
> > > scale size*1.2/9.;
> > >             scaleCache[k] = scale = MAX(size, SIZE0);
> > >
> > >             hessian_rows = (sum->rows)*SIZE0/scale;
> > >             hessian_cols = (sum->cols)*SIZE0/scale;
> > >             hessians[k] = cvCreateMat( hessian_rows, hessian_cols, CV_32FC1
> > > );
> > >             traces[k] = cvCreateMat( hessian_rows, hessian_cols, CV_32FC1
> > > );  // this is the point where I get an out of memory exception
> > >
> > > ......
> > >
> > > } } }1
> > >
> > > On Thu, Mar 12, 2009 at 12:26 PM, Raluca Borca <raluca.borca@>wrote:
> > >
> > > > Hello !
> > > >
> > > > I have encountered the same problem.
> > > >
> > > > Can anybody tell what is the solution ?
> > > >
> > > > Thanks.
> > > >
> > > >
> > > > On Sun, Mar 1, 2009 at 8:21 AM, Ricardo. <dadagori@> wrote:
> > > >
> > > >>   Hello!
> > > >>
> > > >> I wonder if you found what was causing this error? 'Cause it's
> > > >> happening to me too and cannot figure it out.
> > > >>
> > > >> I'd appreciate it if you share how you solved it -if you did of
> > > >> course-.
> > > >>
> > > >> Regards,
> > > >> Ricardo
> > > >>
> > > >>
> > > >> --- In [hidden email] <OpenCV%40yahoogroups.com>,
> > > >> "yair_movshovitz" <yairmov@> wrote:
> > > >> >
> > > >> > I did some investigating and found out the the error is happening
> > > >> > inside the icvFastHessianDetector() function in cvsurf.cpp
> > > >> > It happens when the function tries to free the memory it allocated:
> > > >> >
> > > >> > for( octave = k = 0; octave < params->nOctaves; octave++ )
> > > >> > for( sc = -1; sc <= params->nOctaveLayers; sc++, k++ )
> > > >> > {
> > > >> > //this line causes the error at some iteration.
> > > >> > cvReleaseMat( &hessians[k] );
> > > >> > cvReleaseMat( &traces[k] );
> > > >> > }
> > > >> >
> > > >> >
> > > >> > Anyone has an idea why this is happening?
> > > >> >
> > > >> > Thanks,
> > > >> > Yair
> > > >> >
> > > >> > --- In [hidden email] <OpenCV%40yahoogroups.com>, "Jostein
> > > >> Austvik Jacobsen"
> > > >> > <josteinaj@> wrote:
> > > >> > >
> > > >> > > I'm using Ubuntu Linux so I can't help you there. Sorry.
> > > >> > > Jostein
> > > >> > >
> > > >> > > 2009/1/20 yair_movshovitz <yairmov@>
> > > >> > >
> > > >> > > > Hi Jostein,
> > > >> > > >
> > > >> > > > Thanks again for helping me out.
> > > >> > > >
> > > >> > > > I have started using the cvExtractSURF function. and I have the
> > > >> > > > following problem:
> > > >> > > > When I call the function I get this error -
> > > >> > > > Windows has triggered a breakpoint in (my program name).
> > > >> > > >
> > > >> > > > This may be due to a corruption of the heap, which indicates a
> > > >> bug in
> > > >> > > > (my program name) or any of the DLLs it has loaded.
> > > >> > > >
> > > >> > > > Have you ever encountered this error before regarding this
> > > >> function?
> > > >> > > >
> > > >> > > > Thanks,
> > > >> > > >
> > > >> > > > Yair
> > > >> > > >
> > > >> > > > --- In [hidden email] <OpenCV%40yahoogroups.com> <OpenCV%
> > > >> 40yahoogroups.com>, "Jostein
> > > >> > Austvik
> > > >> > > > Jacobsen"
> > > >> > > > <josteinaj@> wrote:
> > > >> > > > >
> > > >> > > > > You can view the implementation of *cvExtractSURF(...)* here:
> > > >> > > > >
> > > >> > > >
> > > >> > > >
> > > >> >
> > > >> http://opencvlibrary.svn.sourceforge.net/viewvc/opencvlibrary/trunk/op
> > > >> encv/src/cv/cvsurf.cpp,
> > > >> > > > > however it doesn't contain much comments.
> > > >> > > > >
> > > >> > > > >
> > > >> > > > > *cvExtractSURF( const CvArr* img, const CvArr* mask, CvSeq**
> > > >> > keypoints,
> > > >> > > > > CvSeq** descriptors, CvMemStorage* storage, CvSURFParams
> > > >> params )*
> > > >> > > > >
> > > >> > > > > Here, *img* is the image. Use an
> > > >> > > > > *IplImage<http://opencv.willowgarage.com/wiki/CxCore#IplImage>
> > > >> > > > > * for the image. To load an image from disk, use
> > > >> > > > >
> > > >> > > >
> > > >> > *cvLoadImage(...)
> > > >> *<http://opencv.willowgarage.com/wiki/HighGui#cvLoadImage
> > > >> > > > >,
> > > >> > > > > and to create your own image, use
> > > >> > > > >
> > > >> > > > *cvCreateImage(...)*<
> > > >> > > > http://opencv.willowgarage.com/wiki/CxCore#CreateImage>.
> > > >> > > > > Lets say you have a IplImage *image* and want to extract the
> > > >> > rectangle
> > > >> > > > > (x,y)->(x+dx,y+dy) from it as an IplImage rectangle, you might
> > > >> > do this:
> > > >> > > > >
> > > >> > > > > CvSize size = cvSize(dx,dy);
> > > >> > > > > IplImage* rectangle = cvCreateImage(size, IPL_DEPTH_8U, 1);
> > > >> > > > > for (int i = 0; i < dx; ++i) {
> > > >> > > > > for (int j = 0; j < dy; ++j) {
> > > >> > > > > CV_IMAGE_ELEM(rectangle,unsigned char,i,j) =
> > > >> > > > > CV_IMAGE_ELEM(image,unsigned char,x+i,y+j);
> > > >> > > > > }
> > > >> > > > > }
> > > >> > > > >
> > > >> > > > > I'm not sure how *mask* is used, but a quick google search
> > > >> gives
> > > >> > > > >
> > > >> > > >
> > > >> > > >
> > > >> > http://www.emgu.com/wiki/files/1.4.0.0/html/ad54862f-3d1c-c177-3bdb-
> > > >> 768619f8dd90.htmwhich
> > > >> > > > > says "The optional input 8-bit mask. The features are only
> > > >> found in
> > > >> > > > > the areas that contain more than 50% of non-zero mask
> > > >> pixels". Just
> > > >> > > > set it
> > > >> > > > > to NULL.
> > > >> > > > >
> > > >> > > > > *keypoints* and
> > > >> > > > >
> > > >> > > >
> > > >> > *descriptors*<<a href="http://en.wikipedia.org/wiki/Feature_%">http://en.wikipedia.org/wiki/Feature_%
> > > >> 28computer_vision%29
> > > >> > > > >are
> > > >> > > > > where the results are placed. Initialize them as null-
> > > >> pointers and
> > > >> > > > > cvExtractSURF will do the rest for you. Afterwards you can
> > > >> access a
> > > >> > > > > descriptor and corresponding keypoint like this:
> > > >> > > > >
> > > >> > > > > int k = 0; // the keypoint you want. There are *keypoints-
> > > >> >total*
> > > >> > > > keypoints.
> > > >> > > > > float *seq = (float*)cvGetSeqElem(descriptors, k); // the
> > > >> > descriptor of
> > > >> > > > > length 64 or 128
> > > >> > > > > CvPoint2D32f *p = &((CvSURFPoint*)cvGetSeqElem(keypoints, k))-
> > > >> >pt;
> > > >> > > > // the
> > > >> > > > > (x,y) coordinates of keypoint *k* can now be accessed as *p-
> > > >> >x* and
> > > >> > > > *p->y*
> > > >> > > > >
> > > >> > > > > The *CvMemStorage*
> > > >> > > > >
> > > >> <http://opencv.willowgarage.com/wiki/CxCore#CvMemStorage>struct
> > > >> > > > > *storage* is used as a mechanism to simplify memory
> > > >> management. I
> > > >> > > > believe
> > > >> > > > > the *keypoints* and *descriptors* structures are put into
> > > >> *storage*,
> > > >> > > > so you
> > > >> > > > > can't release *storage* until you're done using *keypoints*
> > > >> and
> > > >> > > > *descriptors
> > > >> > > > > *.Put a *CvMemStorage *storage = cvCreateMemStorage(0);*
> > > >> before your
> > > >> > > > first
> > > >> > > > > call to cvExtractSURF and *cvClearMemStorage(storage);* after
> > > >> you're
> > > >> > > > done
> > > >> > > > > using *keypoints* and *descriptors*.
> > > >> > > > >
> > > >> > > > > SURF takes a couple of parameters through the *CvSURFParams*
> > > >> struct
> > > >> > > > *params*.
> > > >> > > > > You create *params* with *cvSURFParams(double threshold, int
> > > >> > > > > extended)*where threshold represents the "edgyness" that is
> > > >> required
> > > >> > > > > from a feature to
> > > >> > > > > be recognized as a feature. It can be adjusted to retrieve
> > > >> more
> > > >> > or fewer
> > > >> > > > > features. In the paper
> > > >> > > > > <http://www.vision.ee.ethz.ch/%7Esurf/eccv06.pdf>describing
> > > >> the SURF
> > > >> > > > > detector, they use a threshold of 600 on a 800 x 640
> > > >> > > > > image which returned 1418 features. The *extended* parameter
> > > >> is
> > > >> > a simple
> > > >> > > > > boolean 1 or 0 which states whether or not to use the extended
> > > >> > > > descriptor.
> > > >> > > > > The extended descriptor consists of 128 instead of 64 values
> > > >> which
> > > >> > > > should
> > > >> > > > > gives a better result at the cost of using more memory.
> > > >> Instead of
> > > >> > > > creating
> > > >> > > > > a new CvSURFParams struct for each call to cvExtractSURF, you
> > > >> > could do:
> > > >> > > > >
> > > >> > > > > CvSURFParams params = cvSURFParams(600, 1);
> > > >> > > > > cvExtractSURF(..., params);
> > > >> > > > > cvExtractSURF(..., params);
> > > >> > > > >
> > > >> > > > >
> > > >> > > > > There you go. I hope I answered your question :)
> > > >> > > > >
> > > >> > > > > Jostein
> > > >> > > > >
> > > >> > > > >
> > > >> > > > > 2009/1/12 yair_movshovitz <yairmov@>
> > > >> > > > >
> > > >> > > > > > Hi Jostein,
> > > >> > > > > >
> > > >> > > > > > Thanks a lot for your help!
> > > >> > > > > >
> > > >> > > > > > Can you please explain the function parameters of
> > > >> cvExtractSURF?
> > > >> > > > > > I mean in - cvExtractSURF(img1, NULL, &kp1, &desc1, storage,
> > > >> > > > > > cvSURFParams(600,EXTENDED_DESCRIPTOR));
> > > >> > > > > > what is the role of kp1, desc1, storage and the SURFParams?
> > > >> > > > > > is storage just a temp area for the algorithm to use?
> > > >> > > > > >
> > > >> > > > > > Thanks again
> > > >> > > > > > Yair
> > > >> > > > > >
> > > >> > > > > > --- In [hidden email] <OpenCV%40yahoogroups.com><OpenCV%
> > > >> 40yahoogroups.com>
> > > >> <OpenCV%
> > > >> > > > 40yahoogroups.com>, "Jostein
> > > >> > > >
> > > >> > > > Austvik
> > > >> > > > > > Jacobsen"
> > > >> > > > > >
> > > >> > > > > > <josteinaj@> wrote:
> > > >> > > > > > >
> > > >> > > > > > > If you've got your two rectangled areas stored as img1 and
> > > >> > img2 you
> > > >> > > > > > could do
> > > >> > > > > > > this to extract its keypoints and corresponding
> > > >> descriptors:
> > > >> > > > > > >
> > > >> > > > > > > #define EXTENDED_DESCRIPTOR 1
> > > >> > > > > > > CvSeq *kp1=NULL, *kp2=NULL;
> > > >> > > > > > > CvSeq *desc1=NULL, *desc2=NULL;
> > > >> > > > > > > CvMemStorage *storage = cvCreateMemStorage(0);
> > > >> > > > > > > cvExtractSURF(img1, NULL, &kp1, &desc1, storage,
> > > >> > cvSURFParams(600,
> > > >> > > > > > > EXTENDED_DESCRIPTOR));
> > > >> > > > > > > cvExtractSURF(img2, NULL, &kp2, &desc2, storage,
> > > >> > cvSURFParams(600,
> > > >> > > > > > > EXTENDED_DESCRIPTOR));
> > > >> > > > > > >
> > > >> > > > > > > You will have to correlate the descriptors with each
> > > >> other to
> > > >> > > > determine
> > > >> > > > > > > which keypoints in each rectangle corresponds to one
> > > >> > another. You
> > > >> > > > > > could use
> > > >> > > > > > > a BBF tree which is implemented in the latest version of
> > > >> > OpenCV, but
> > > >> > > > > > unless
> > > >> > > > > > > your rectangle is huge, you might just as well just
> > > >> correlate
> > > >> > > > them the
> > > >> > > > > > > standard way, which I do like this:
> > > >> > > > > > >
> > > >> > > > > > > #define CORRELATION_THRESHOLD 0.7
> > > >> > > > > > > // brute-force attempt at correlating the two sets of
> > > >> features
> > > >> > > > > > > void bruteMatch(CvMat **points1, CvMat **points2, CvSeq
> > > >> > *kp1, CvSeq
> > > >> > > > > > *desc1,
> > > >> > > > > > > CvSeq *kp2, CvSeq *desc2) {
> > > >> > > > > > > int i,j,k;
> > > >> > > > > > > double* avg1 = (double*)malloc(sizeof(double)*kp1->total);
> > > >> > > > > > > double* avg2 = (double*)malloc(sizeof(double)*kp2->total);
> > > >> > > > > > > double* dev1 = (double*)malloc(sizeof(double)*kp1->total);
> > > >> > > > > > > double* dev2 = (double*)malloc(sizeof(double)*kp2->total);
> > > >> > > > > > > int* best1 = (int*)malloc(sizeof(int)*kp1->total);
> > > >> > > > > > > int* best2 = (int*)malloc(sizeof(int)*kp2->total);
> > > >> > > > > > > double* best1corr = (double*)malloc(sizeof(double)*kp1-
> > > >> >total);
> > > >> > > > > > > double* best2corr = (double*)malloc(sizeof(double)*kp2-
> > > >> >total);
> > > >> > > > > > > float *seq1, *seq2;
> > > >> > > > > > > int descriptor_size = EXTENDED_DESCRIPTOR ? 128 : 64;
> > > >> > > > > > > for (i=0; i<kp1->total; i++) {
> > > >> > > > > > > // find average and standard deviation of each descriptor
> > > >> > > > > > > avg1[i] = 0;
> > > >> > > > > > > dev1[i] = 0;
> > > >> > > > > > > seq1 = (float*)cvGetSeqElem(desc1, i);
> > > >> > > > > > > for (k=0; k<descriptor_size; k++) avg1[i] += seq1[k];
> > > >> > > > > > > avg1[i] /= descriptor_size;
> > > >> > > > > > > for (k=0; k<descriptor_size; k++) dev1[i] +=
> > > >> > > > > > > (seq1[k]-avg1[i])*(seq1[k]-avg1[i]);
> > > >> > > > > > > dev1[i] = sqrt(dev1[i]/descriptor_size);
> > > >> > > > > > >
> > > >> > > > > > > // initialize best1 and best1corr
> > > >> > > > > > > best1[i] = -1;
> > > >> > > > > > > best1corr[i] = -1.;
> > > >> > > > > > > }
> > > >> > > > > > > for (j=0; j<kp2->total; j++) {
> > > >> > > > > > > // find average and standard deviation of each descriptor
> > > >> > > > > > > avg2[j] = 0;
> > > >> > > > > > > dev2[j] = 0;
> > > >> > > > > > > seq2 = (float*)cvGetSeqElem(desc2, j);
> > > >> > > > > > > for (k=0; k<descriptor_size; k++) avg2[j] += seq2[k];
> > > >> > > > > > > avg2[j] /= descriptor_size;
> > > >> > > > > > > for (k=0; k<descriptor_size; k++) dev2[j] +=
> > > >> > > > > > > (seq2[k]-avg2[j])*(seq2[k]-avg2[j]);
> > > >> > > > > > > dev2[j] = sqrt(dev2[j]/descriptor_size);
> > > >> > > > > > >
> > > >> > > > > > > // initialize best2 and best2corr
> > > >> > > > > > > best2[j] = -1;
> > > >> > > > > > > best2corr[j] = -1.;
> > > >> > > > > > > }
> > > >> > > > > > > double corr;
> > > >> > > > > > > for (i = 0; i < kp1->total; ++i) {
> > > >> > > > > > > seq1 = (float*)cvGetSeqElem(desc1, i);
> > > >> > > > > > > for (j = 0; j < kp2->total; ++j) {
> > > >> > > > > > > corr = 0;
> > > >> > > > > > > seq2 = (float*)cvGetSeqElem(desc2, j);
> > > >> > > > > > > for (k = 0; k < descriptor_size; ++k)
> > > >> > > > > > > corr += (seq1[k]-avg1[i])*(seq2[k]-avg2[j]);
> > > >> > > > > > > corr /= (descriptor_size-1)*dev1[i]*dev2[j];
> > > >> > > > > > > if (corr > best1corr[i]) {
> > > >> > > > > > > best1corr[i] = corr;
> > > >> > > > > > > best1[i] = j;
> > > >> > > > > > > }
> > > >> > > > > > > if (corr > best2corr[j]) {
> > > >> > > > > > > best2corr[j] = corr;
> > > >> > > > > > > best2[j] = i;
> > > >> > > > > > > }
> > > >> > > > > > > }
> > > >> > > > > > > }
> > > >> > > > > > > j = 0;
> > > >> > > > > > > for (i = 0; i < kp1->total; i++)
> > > >> > > > > > > if (best2[best1[i]] == i && best1corr[i] >
> > > >> > > > > > CORRELATION_THRESHOLD)
> > > >> > > > > > > j++;
> > > >> > > > > > > if (j == 0) return; // no matches found
> > > >> > > > > > > *points1 = cvCreateMat(1,j,CV_32FC2);
> > > >> > > > > > > *points2 = cvCreateMat(1,j,CV_32FC2);
> > > >> > > > > > > CvPoint2D32f *p1, *p2;
> > > >> > > > > > > j = 0;
> > > >> > > > > > > for (i = 0; i < kp1->total; i++) {
> > > >> > > > > > > if (best2[best1[i]] == i && best1corr[i] >
> > > >> > > > > > CORRELATION_THRESHOLD) {
> > > >> > > > > > > p1 = &((CvSURFPoint*)cvGetSeqElem(kp1,i))->pt;
> > > >> > > > > > > p2 = &((CvSURFPoint*)cvGetSeqElem(kp2,best1[i]))->pt;
> > > >> > > > > > > (*points1)->data.fl[j*2] = p1->x;
> > > >> > > > > > > (*points1)->data.fl[j*2+1] = p1->y;
> > > >> > > > > > > (*points2)->data.fl[j*2] = p2->x;
> > > >> > > > > > > (*points2)->data.fl[j*2+1] = p2->y;
> > > >> > > > > > > j++;
> > > >> > > > > > > }
> > > >> > > > > > > }
> > > >> > > > > > > free(best2corr);
> > > >> > > > > > > free(best1corr);
> > > >> > > > > > > free(best2);
> > > >> > > > > > > free(best1);
> > > >> > > > > > > free(avg1);
> > > >> > > > > > > free(avg2);
> > > >> > > > > > > free(dev1);
> > > >> > > > > > > free(dev2);
> > > >> > > > > > > }
> > > >> > > > > > >
> > > >> > > > > > > If you construct a fundamental matrix (a model) for the
> > > >> > > > transformation
> > > >> > > > > > > between the two rectangles, you can further determine
> > > >> which
> > > >> > > > > > correspondences
> > > >> > > > > > > are false (by how well they fit the model) and remove
> > > >> them,
> > > >> > which I
> > > >> > > > > > like to
> > > >> > > > > > > do like this:
> > > >> > > > > > >
> > > >> > > > > > > F = cvCreateMat(3,3,CV_32FC1);
> > > >> > > > > > > CvMat *status = cvCreateMat(1,points1->cols,CV_8UC1);
> > > >> > > > > > > int fm_count = cvFindFundamentalMat( points1,points2,F,
> > > >> > > > > > > CV_FM_RANSAC,1.,0.99,status );
> > > >> > > > > > > removeOutliers(&points1,&points2,status);
> > > >> > > > > > >
> > > >> > > > > > > where removeOutliers() is a function I wrote to clean up
> > > >> after
> > > >> > > > > > > cvFindFundamentalMat():
> > > >> > > > > > >
> > > >> > > > > > > // iterates the set of putative correspondences and
> > > >> removes
> > > >> > > > > > correspondences
> > > >> > > > > > > marked as outliers by cvFindFundamentalMat()
> > > >> > > > > > > void removeOutliers(CvMat **points1, CvMat **points2,
> > > >> CvMat
> > > >> > > > *status) {
> > > >> > > > > > > CvMat *points1_ = *points1;
> > > >> > > > > > > CvMat *points2_ = *points2;
> > > >> > > > > > > int count = 0;
> > > >> > > > > > > for (int i = 0; i < status->cols; i++) if
> > > >> > > > > > (CV_MAT_ELEM(*status,unsigned
> > > >> > > > > > > char,0,i)) count++;
> > > >> > > > > > > if (!count) { // no inliers
> > > >> > > > > > > *points1 = NULL;
> > > >> > > > > > > *points2 = NULL;
> > > >> > > > > > > }
> > > >> > > > > > > else {
> > > >> > > > > > > *points1 = cvCreateMat(1,count,CV_32FC2);
> > > >> > > > > > > *points2 = cvCreateMat(1,count,CV_32FC2);
> > > >> > > > > > > int j = 0;
> > > >> > > > > > > for (int i = 0; i < status->cols; i++) {
> > > >> > > > > > > if (CV_MAT_ELEM(*status,unsigned char,0,i)) {
> > > >> > > > > > > (*points1)->data.fl[j*2] = points1_->data.fl[i*2];
> > > >> > > > > > > //p1->x
> > > >> > > > > > > (*points1)->data.fl[j*2+1] = points1_->data.fl[i*2+1];
> > > >> > > > > > > //p1->y
> > > >> > > > > > > (*points2)->data.fl[j*2] = points2_->data.fl[i*2];
> > > >> > > > > > > //p2->x
> > > >> > > > > > > (*points2)->data.fl[j*2+1] = points2_->data.fl[i*2+1];
> > > >> > > > > > > //p2->y
> > > >> > > > > > > j++;
> > > >> > > > > > > }
> > > >> > > > > > > }
> > > >> > > > > > > }
> > > >> > > > > > > cvReleaseMat(&points1_);
> > > >> > > > > > > cvReleaseMat(&points2_);
> > > >> > > > > > > }
> > > >> > > > > > >
> > > >> > > > > > >
> > > >> > > > > > > I hope this helps.
> > > >> > > > > > >
> > > >> > > > > > > -Jostein
> > > >> > > > > > >
> > > >> > > > > > >
> > > >> > > > > > > 2009/1/8 yair_movshovitz <yairmov@>
> > > >> > > > > > >
> > > >> > > > > > > > Hi Everyone,
> > > >> > > > > > > >
> > > >> > > > > > > > I'm trying to understand how to use the SURF features
> > > >> > > > capabilities of
> > > >> > > > > > > > openCV.
> > > >> > > > > > > > My scenario is as follows:
> > > >> > > > > > > > I have two rectangled areas in an image, which are
> > > >> supposed to
> > > >> > > > bound
> > > >> > > > > > > > the same object. I would like to see how good is this
> > > >> > > > assumption. In
> > > >> > > > > > > > other words I would like to see how many features they
> > > >> share.
> > > >> > > > > > > >
> > > >> > > > > > > > Can someone drop me a hint on how to use the SURF
> > > >> > > > implementation of
> > > >> > > > > > > > openCV (or direct me to somewhere that has some
> > > >> documentation
> > > >> > > > of it)
> > > >> > > > > > > >
> > > >> > > > > > > > Thanks,
> > > >> > > > > > > > Yair
> > > >> > > > > > > >
> > > >> > > > > > > >
> > > >> > > > > > > >
> > > >> > > > > > >
> > > >> > > > > > >
> > > >> > > > > > > [Non-text portions of this message have been removed]
> > > >> > > > > > >
> > > >> > > > > >
> > > >> > > > > >
> > > >> > > > > >
> > > >> > > > >
> > > >> > > > >
> > > >> > > > > [Non-text portions of this message have been removed]
> > > >> > > > >
> > > >> > > >
> > > >> > > >
> > > >> > > >
> > > >> > >
> > > >> > >
> > > >> > > [Non-text portions of this message have been removed]
> > > >> > >
> > > >> >
> > > >>
> > > >>  
> > > >>
> > > >
> > > >
> > >
> > >
> > > [Non-text portions of this message have been removed]
> > >
> >
>


Reply | Threaded
Open this post in threaded view
|

Re: Re: cvExtractSURF

Raluca Borca
I build the current version from SVN and the problem was solved. Thanks.

On Fri, Mar 13, 2009 at 10:00 PM, liuliu_0503
<[hidden email]<liuliu.1987%[hidden email]>
> wrote:

>   i believe this change was made later than rev1520, checkout the newest
> rev1642 or any revision no earlier than rev1550.
>
> --- In [hidden email] <OpenCV%40yahoogroups.com>, "oliver_sidla"
> <oliver_sidla@...> wrote:
> >
> > Hello all,
> >
> > I am also having the same problem - with the latest version
> > of cvSurf (its version 1520).
> > When Did Ian Mahon do the modification?
> >
> > Thanks,
> >
> > Oliver
> >
> >
> > --- In [hidden email] <OpenCV%40yahoogroups.com>, "liuliu_0503"
> <liuliu.1987+opencv@> wrote:
> > >
> > > I think that the modifications made by Ian Mahon should fixed the
> problem already. Check out the svn version of opencv and try again.
> > >
> > > --- In [hidden email] <OpenCV%40yahoogroups.com>, Raluca Borca
> <raluca.borca@> wrote:
> > > >
> > > > To be more precise, I encountered the problem at the folowing line in
> > > > cvsurf.cpp
> > > >
> > > > /* hessian detector */
> > > > for( octave = k = 0; octave < params->nOctaves; octave++ )
> > > > {
> > > > for( sc = -1; sc <= params->nOctaveLayers; sc++, k++ )
> > > > {
> > > > if ( sc < 0 )
> > > > sizeCache[k] = size = 7 << octave; // gaussian scale 1.0;
> > > > else
> > > > sizeCache[k] = size = (sc*6 + 9) << octave; // gaussian
> > > > scale size*1.2/9.;
> > > > scaleCache[k] = scale = MAX(size, SIZE0);
> > > >
> > > > hessian_rows = (sum->rows)*SIZE0/scale;
> > > > hessian_cols = (sum->cols)*SIZE0/scale;
> > > > hessians[k] = cvCreateMat( hessian_rows, hessian_cols, CV_32FC1
> > > > );
> > > > traces[k] = cvCreateMat( hessian_rows, hessian_cols, CV_32FC1
> > > > ); // this is the point where I get an out of memory exception
> > > >
> > > > ......
> > > >
> > > > } } }1
> > > >
> > > > On Thu, Mar 12, 2009 at 12:26 PM, Raluca Borca <raluca.borca@>wrote:
> > > >
> > > > > Hello !
> > > > >
> > > > > I have encountered the same problem.
> > > > >
> > > > > Can anybody tell what is the solution ?
> > > > >
> > > > > Thanks.
> > > > >
> > > > >
> > > > > On Sun, Mar 1, 2009 at 8:21 AM, Ricardo. <dadagori@> wrote:
> > > > >
> > > > >> Hello!
> > > > >>
> > > > >> I wonder if you found what was causing this error? 'Cause it's
> > > > >> happening to me too and cannot figure it out.
> > > > >>
> > > > >> I'd appreciate it if you share how you solved it -if you did of
> > > > >> course-.
> > > > >>
> > > > >> Regards,
> > > > >> Ricardo
> > > > >>
> > > > >>
> > > > >> --- In [hidden email] <OpenCV%40yahoogroups.com> <OpenCV%
> 40yahoogroups.com>,
> > > > >> "yair_movshovitz" <yairmov@> wrote:
> > > > >> >
> > > > >> > I did some investigating and found out the the error is
> happening
> > > > >> > inside the icvFastHessianDetector() function in cvsurf.cpp
> > > > >> > It happens when the function tries to free the memory it
> allocated:
> > > > >> >
> > > > >> > for( octave = k = 0; octave < params->nOctaves; octave++ )
> > > > >> > for( sc = -1; sc <= params->nOctaveLayers; sc++, k++ )
> > > > >> > {
> > > > >> > //this line causes the error at some iteration.
> > > > >> > cvReleaseMat( &hessians[k] );
> > > > >> > cvReleaseMat( &traces[k] );
> > > > >> > }
> > > > >> >
> > > > >> >
> > > > >> > Anyone has an idea why this is happening?
> > > > >> >
> > > > >> > Thanks,
> > > > >> > Yair
> > > > >> >
> > > > >> > --- In [hidden email] <OpenCV%40yahoogroups.com><OpenCV%
> 40yahoogroups.com>, "Jostein
> > > > >> Austvik Jacobsen"
> > > > >> > <josteinaj@> wrote:
> > > > >> > >
> > > > >> > > I'm using Ubuntu Linux so I can't help you there. Sorry.
> > > > >> > > Jostein
> > > > >> > >
> > > > >> > > 2009/1/20 yair_movshovitz <yairmov@>
> > > > >> > >
> > > > >> > > > Hi Jostein,
> > > > >> > > >
> > > > >> > > > Thanks again for helping me out.
> > > > >> > > >
> > > > >> > > > I have started using the cvExtractSURF function. and I have
> the
> > > > >> > > > following problem:
> > > > >> > > > When I call the function I get this error -
> > > > >> > > > Windows has triggered a breakpoint in (my program name).
> > > > >> > > >
> > > > >> > > > This may be due to a corruption of the heap, which indicates
> a
> > > > >> bug in
> > > > >> > > > (my program name) or any of the DLLs it has loaded.
> > > > >> > > >
> > > > >> > > > Have you ever encountered this error before regarding this
> > > > >> function?
> > > > >> > > >
> > > > >> > > > Thanks,
> > > > >> > > >
> > > > >> > > > Yair
> > > > >> > > >
> > > > >> > > > --- In [hidden email] <OpenCV%40yahoogroups.com><OpenCV%
> 40yahoogroups.com> <OpenCV%
> > > > >> 40yahoogroups.com>, "Jostein
> > > > >> > Austvik
> > > > >> > > > Jacobsen"
> > > > >> > > > <josteinaj@> wrote:
> > > > >> > > > >
> > > > >> > > > > You can view the implementation of *cvExtractSURF(...)*
> here:
> > > > >> > > > >
> > > > >> > > >
> > > > >> > > >
> > > > >> >
> > > > >>
> http://opencvlibrary.svn.sourceforge.net/viewvc/opencvlibrary/trunk/op
> > > > >> encv/src/cv/cvsurf.cpp,
> > > > >> > > > > however it doesn't contain much comments.
> > > > >> > > > >
> > > > >> > > > >
> > > > >> > > > > *cvExtractSURF( const CvArr* img, const CvArr* mask,
> CvSeq**
> > > > >> > keypoints,
> > > > >> > > > > CvSeq** descriptors, CvMemStorage* storage, CvSURFParams
> > > > >> params )*
> > > > >> > > > >
> > > > >> > > > > Here, *img* is the image. Use an
> > > > >> > > > > *IplImage<
> http://opencv.willowgarage.com/wiki/CxCore#IplImage>
> > > > >> > > > > * for the image. To load an image from disk, use
> > > > >> > > > >
> > > > >> > > >
> > > > >> > *cvLoadImage(...)
> > > > >> *<http://opencv.willowgarage.com/wiki/HighGui#cvLoadImage
> > > > >> > > > >,
> > > > >> > > > > and to create your own image, use
> > > > >> > > > >
> > > > >> > > > *cvCreateImage(...)*<
> > > > >> > > > http://opencv.willowgarage.com/wiki/CxCore#CreateImage>.
> > > > >> > > > > Lets say you have a IplImage *image* and want to extract
> the
> > > > >> > rectangle
> > > > >> > > > > (x,y)->(x+dx,y+dy) from it as an IplImage rectangle, you
> might
> > > > >> > do this:
> > > > >> > > > >
> > > > >> > > > > CvSize size = cvSize(dx,dy);
> > > > >> > > > > IplImage* rectangle = cvCreateImage(size, IPL_DEPTH_8U,
> 1);
> > > > >> > > > > for (int i = 0; i < dx; ++i) {
> > > > >> > > > > for (int j = 0; j < dy; ++j) {
> > > > >> > > > > CV_IMAGE_ELEM(rectangle,unsigned char,i,j) =
> > > > >> > > > > CV_IMAGE_ELEM(image,unsigned char,x+i,y+j);
> > > > >> > > > > }
> > > > >> > > > > }
> > > > >> > > > >
> > > > >> > > > > I'm not sure how *mask* is used, but a quick google search
> > > > >> gives
> > > > >> > > > >
> > > > >> > > >
> > > > >> > > >
> > > > >> >
> http://www.emgu.com/wiki/files/1.4.0.0/html/ad54862f-3d1c-c177-3bdb-
> > > > >> 768619f8dd90.htmwhich
> > > > >> > > > > says "The optional input 8-bit mask. The features are only
> > > > >> found in
> > > > >> > > > > the areas that contain more than 50% of non-zero mask
> > > > >> pixels". Just
> > > > >> > > > set it
> > > > >> > > > > to NULL.
> > > > >> > > > >
> > > > >> > > > > *keypoints* and
> > > > >> > > > >
> > > > >> > > >
> > > > >> > *descriptors*<<a href="http://en.wikipedia.org/wiki/Feature_%">http://en.wikipedia.org/wiki/Feature_%
> > > > >> 28computer_vision%29
> > > > >> > > > >are
> > > > >> > > > > where the results are placed. Initialize them as null-
> > > > >> pointers and
> > > > >> > > > > cvExtractSURF will do the rest for you. Afterwards you can
> > > > >> access a
> > > > >> > > > > descriptor and corresponding keypoint like this:
> > > > >> > > > >
> > > > >> > > > > int k = 0; // the keypoint you want. There are *keypoints-
> > > > >> >total*
> > > > >> > > > keypoints.
> > > > >> > > > > float *seq = (float*)cvGetSeqElem(descriptors, k); // the
> > > > >> > descriptor of
> > > > >> > > > > length 64 or 128
> > > > >> > > > > CvPoint2D32f *p = &((CvSURFPoint*)cvGetSeqElem(keypoints,
> k))-
> > > > >> >pt;
> > > > >> > > > // the
> > > > >> > > > > (x,y) coordinates of keypoint *k* can now be accessed as
> *p-
> > > > >> >x* and
> > > > >> > > > *p->y*
> > > > >> > > > >
> > > > >> > > > > The *CvMemStorage*
> > > > >> > > > >
> > > > >> <http://opencv.willowgarage.com/wiki/CxCore#CvMemStorage>struct
> > > > >> > > > > *storage* is used as a mechanism to simplify memory
> > > > >> management. I
> > > > >> > > > believe
> > > > >> > > > > the *keypoints* and *descriptors* structures are put into
> > > > >> *storage*,
> > > > >> > > > so you
> > > > >> > > > > can't release *storage* until you're done using
> *keypoints*
> > > > >> and
> > > > >> > > > *descriptors
> > > > >> > > > > *.Put a *CvMemStorage *storage = cvCreateMemStorage(0);*
> > > > >> before your
> > > > >> > > > first
> > > > >> > > > > call to cvExtractSURF and *cvClearMemStorage(storage);*
> after
> > > > >> you're
> > > > >> > > > done
> > > > >> > > > > using *keypoints* and *descriptors*.
> > > > >> > > > >
> > > > >> > > > > SURF takes a couple of parameters through the
> *CvSURFParams*
> > > > >> struct
> > > > >> > > > *params*.
> > > > >> > > > > You create *params* with *cvSURFParams(double threshold,
> int
> > > > >> > > > > extended)*where threshold represents the "edgyness" that
> is
> > > > >> required
> > > > >> > > > > from a feature to
> > > > >> > > > > be recognized as a feature. It can be adjusted to retrieve
> > > > >> more
> > > > >> > or fewer
> > > > >> > > > > features. In the paper
> > > > >> > > > > <http://www.vision.ee.ethz.ch/%7Esurf/eccv06.pdf
> >describing
> > > > >> the SURF
> > > > >> > > > > detector, they use a threshold of 600 on a 800 x 640
> > > > >> > > > > image which returned 1418 features. The *extended*
> parameter
> > > > >> is
> > > > >> > a simple
> > > > >> > > > > boolean 1 or 0 which states whether or not to use the
> extended
> > > > >> > > > descriptor.
> > > > >> > > > > The extended descriptor consists of 128 instead of 64
> values
> > > > >> which
> > > > >> > > > should
> > > > >> > > > > gives a better result at the cost of using more memory.
> > > > >> Instead of
> > > > >> > > > creating
> > > > >> > > > > a new CvSURFParams struct for each call to cvExtractSURF,
> you
> > > > >> > could do:
> > > > >> > > > >
> > > > >> > > > > CvSURFParams params = cvSURFParams(600, 1);
> > > > >> > > > > cvExtractSURF(..., params);
> > > > >> > > > > cvExtractSURF(..., params);
> > > > >> > > > >
> > > > >> > > > >
> > > > >> > > > > There you go. I hope I answered your question :)
> > > > >> > > > >
> > > > >> > > > > Jostein
> > > > >> > > > >
> > > > >> > > > >
> > > > >> > > > > 2009/1/12 yair_movshovitz <yairmov@>
> > > > >> > > > >
> > > > >> > > > > > Hi Jostein,
> > > > >> > > > > >
> > > > >> > > > > > Thanks a lot for your help!
> > > > >> > > > > >
> > > > >> > > > > > Can you please explain the function parameters of
> > > > >> cvExtractSURF?
> > > > >> > > > > > I mean in - cvExtractSURF(img1, NULL, &kp1, &desc1,
> storage,
> > > > >> > > > > > cvSURFParams(600,EXTENDED_DESCRIPTOR));
> > > > >> > > > > > what is the role of kp1, desc1, storage and the
> SURFParams?
> > > > >> > > > > > is storage just a temp area for the algorithm to use?
> > > > >> > > > > >
> > > > >> > > > > > Thanks again
> > > > >> > > > > > Yair
> > > > >> > > > > >
> > > > >> > > > > > --- In [hidden email]<OpenCV%40yahoogroups.com><OpenCV%
> 40yahoogroups.com><OpenCV%
> > > > >> 40yahoogroups.com>
> > > > >> <OpenCV%
> > > > >> > > > 40yahoogroups.com>, "Jostein
> > > > >> > > >
> > > > >> > > > Austvik
> > > > >> > > > > > Jacobsen"
> > > > >> > > > > >
> > > > >> > > > > > <josteinaj@> wrote:
> > > > >> > > > > > >
> > > > >> > > > > > > If you've got your two rectangled areas stored as img1
> and
> > > > >> > img2 you
> > > > >> > > > > > could do
> > > > >> > > > > > > this to extract its keypoints and corresponding
> > > > >> descriptors:
> > > > >> > > > > > >
> > > > >> > > > > > > #define EXTENDED_DESCRIPTOR 1
> > > > >> > > > > > > CvSeq *kp1=NULL, *kp2=NULL;
> > > > >> > > > > > > CvSeq *desc1=NULL, *desc2=NULL;
> > > > >> > > > > > > CvMemStorage *storage = cvCreateMemStorage(0);
> > > > >> > > > > > > cvExtractSURF(img1, NULL, &kp1, &desc1, storage,
> > > > >> > cvSURFParams(600,
> > > > >> > > > > > > EXTENDED_DESCRIPTOR));
> > > > >> > > > > > > cvExtractSURF(img2, NULL, &kp2, &desc2, storage,
> > > > >> > cvSURFParams(600,
> > > > >> > > > > > > EXTENDED_DESCRIPTOR));
> > > > >> > > > > > >
> > > > >> > > > > > > You will have to correlate the descriptors with each
> > > > >> other to
> > > > >> > > > determine
> > > > >> > > > > > > which keypoints in each rectangle corresponds to one
> > > > >> > another. You
> > > > >> > > > > > could use
> > > > >> > > > > > > a BBF tree which is implemented in the latest version
> of
> > > > >> > OpenCV, but
> > > > >> > > > > > unless
> > > > >> > > > > > > your rectangle is huge, you might just as well just
> > > > >> correlate
> > > > >> > > > them the
> > > > >> > > > > > > standard way, which I do like this:
> > > > >> > > > > > >
> > > > >> > > > > > > #define CORRELATION_THRESHOLD 0.7
> > > > >> > > > > > > // brute-force attempt at correlating the two sets of
> > > > >> features
> > > > >> > > > > > > void bruteMatch(CvMat **points1, CvMat **points2,
> CvSeq
> > > > >> > *kp1, CvSeq
> > > > >> > > > > > *desc1,
> > > > >> > > > > > > CvSeq *kp2, CvSeq *desc2) {
> > > > >> > > > > > > int i,j,k;
> > > > >> > > > > > > double* avg1 =
> (double*)malloc(sizeof(double)*kp1->total);
> > > > >> > > > > > > double* avg2 =
> (double*)malloc(sizeof(double)*kp2->total);
> > > > >> > > > > > > double* dev1 =
> (double*)malloc(sizeof(double)*kp1->total);
> > > > >> > > > > > > double* dev2 =
> (double*)malloc(sizeof(double)*kp2->total);
> > > > >> > > > > > > int* best1 = (int*)malloc(sizeof(int)*kp1->total);
> > > > >> > > > > > > int* best2 = (int*)malloc(sizeof(int)*kp2->total);
> > > > >> > > > > > > double* best1corr =
> (double*)malloc(sizeof(double)*kp1-
> > > > >> >total);
> > > > >> > > > > > > double* best2corr =
> (double*)malloc(sizeof(double)*kp2-
> > > > >> >total);
> > > > >> > > > > > > float *seq1, *seq2;
> > > > >> > > > > > > int descriptor_size = EXTENDED_DESCRIPTOR ? 128 : 64;
> > > > >> > > > > > > for (i=0; i<kp1->total; i++) {
> > > > >> > > > > > > // find average and standard deviation of each
> descriptor
> > > > >> > > > > > > avg1[i] = 0;
> > > > >> > > > > > > dev1[i] = 0;
> > > > >> > > > > > > seq1 = (float*)cvGetSeqElem(desc1, i);
> > > > >> > > > > > > for (k=0; k<descriptor_size; k++) avg1[i] += seq1[k];
> > > > >> > > > > > > avg1[i] /= descriptor_size;
> > > > >> > > > > > > for (k=0; k<descriptor_size; k++) dev1[i] +=
> > > > >> > > > > > > (seq1[k]-avg1[i])*(seq1[k]-avg1[i]);
> > > > >> > > > > > > dev1[i] = sqrt(dev1[i]/descriptor_size);
> > > > >> > > > > > >
> > > > >> > > > > > > // initialize best1 and best1corr
> > > > >> > > > > > > best1[i] = -1;
> > > > >> > > > > > > best1corr[i] = -1.;
> > > > >> > > > > > > }
> > > > >> > > > > > > for (j=0; j<kp2->total; j++) {
> > > > >> > > > > > > // find average and standard deviation of each
> descriptor
> > > > >> > > > > > > avg2[j] = 0;
> > > > >> > > > > > > dev2[j] = 0;
> > > > >> > > > > > > seq2 = (float*)cvGetSeqElem(desc2, j);
> > > > >> > > > > > > for (k=0; k<descriptor_size; k++) avg2[j] += seq2[k];
> > > > >> > > > > > > avg2[j] /= descriptor_size;
> > > > >> > > > > > > for (k=0; k<descriptor_size; k++) dev2[j] +=
> > > > >> > > > > > > (seq2[k]-avg2[j])*(seq2[k]-avg2[j]);
> > > > >> > > > > > > dev2[j] = sqrt(dev2[j]/descriptor_size);
> > > > >> > > > > > >
> > > > >> > > > > > > // initialize best2 and best2corr
> > > > >> > > > > > > best2[j] = -1;
> > > > >> > > > > > > best2corr[j] = -1.;
> > > > >> > > > > > > }
> > > > >> > > > > > > double corr;
> > > > >> > > > > > > for (i = 0; i < kp1->total; ++i) {
> > > > >> > > > > > > seq1 = (float*)cvGetSeqElem(desc1, i);
> > > > >> > > > > > > for (j = 0; j < kp2->total; ++j) {
> > > > >> > > > > > > corr = 0;
> > > > >> > > > > > > seq2 = (float*)cvGetSeqElem(desc2, j);
> > > > >> > > > > > > for (k = 0; k < descriptor_size; ++k)
> > > > >> > > > > > > corr += (seq1[k]-avg1[i])*(seq2[k]-avg2[j]);
> > > > >> > > > > > > corr /= (descriptor_size-1)*dev1[i]*dev2[j];
> > > > >> > > > > > > if (corr > best1corr[i]) {
> > > > >> > > > > > > best1corr[i] = corr;
> > > > >> > > > > > > best1[i] = j;
> > > > >> > > > > > > }
> > > > >> > > > > > > if (corr > best2corr[j]) {
> > > > >> > > > > > > best2corr[j] = corr;
> > > > >> > > > > > > best2[j] = i;
> > > > >> > > > > > > }
> > > > >> > > > > > > }
> > > > >> > > > > > > }
> > > > >> > > > > > > j = 0;
> > > > >> > > > > > > for (i = 0; i < kp1->total; i++)
> > > > >> > > > > > > if (best2[best1[i]] == i && best1corr[i] >
> > > > >> > > > > > CORRELATION_THRESHOLD)
> > > > >> > > > > > > j++;
> > > > >> > > > > > > if (j == 0) return; // no matches found
> > > > >> > > > > > > *points1 = cvCreateMat(1,j,CV_32FC2);
> > > > >> > > > > > > *points2 = cvCreateMat(1,j,CV_32FC2);
> > > > >> > > > > > > CvPoint2D32f *p1, *p2;
> > > > >> > > > > > > j = 0;
> > > > >> > > > > > > for (i = 0; i < kp1->total; i++) {
> > > > >> > > > > > > if (best2[best1[i]] == i && best1corr[i] >
> > > > >> > > > > > CORRELATION_THRESHOLD) {
> > > > >> > > > > > > p1 = &((CvSURFPoint*)cvGetSeqElem(kp1,i))->pt;
> > > > >> > > > > > > p2 = &((CvSURFPoint*)cvGetSeqElem(kp2,best1[i]))->pt;
> > > > >> > > > > > > (*points1)->data.fl[j*2] = p1->x;
> > > > >> > > > > > > (*points1)->data.fl[j*2+1] = p1->y;
> > > > >> > > > > > > (*points2)->data.fl[j*2] = p2->x;
> > > > >> > > > > > > (*points2)->data.fl[j*2+1] = p2->y;
> > > > >> > > > > > > j++;
> > > > >> > > > > > > }
> > > > >> > > > > > > }
> > > > >> > > > > > > free(best2corr);
> > > > >> > > > > > > free(best1corr);
> > > > >> > > > > > > free(best2);
> > > > >> > > > > > > free(best1);
> > > > >> > > > > > > free(avg1);
> > > > >> > > > > > > free(avg2);
> > > > >> > > > > > > free(dev1);
> > > > >> > > > > > > free(dev2);
> > > > >> > > > > > > }
> > > > >> > > > > > >
> > > > >> > > > > > > If you construct a fundamental matrix (a model) for
> the
> > > > >> > > > transformation
> > > > >> > > > > > > between the two rectangles, you can further determine
> > > > >> which
> > > > >> > > > > > correspondences
> > > > >> > > > > > > are false (by how well they fit the model) and remove
> > > > >> them,
> > > > >> > which I
> > > > >> > > > > > like to
> > > > >> > > > > > > do like this:
> > > > >> > > > > > >
> > > > >> > > > > > > F = cvCreateMat(3,3,CV_32FC1);
> > > > >> > > > > > > CvMat *status = cvCreateMat(1,points1->cols,CV_8UC1);
> > > > >> > > > > > > int fm_count = cvFindFundamentalMat(
> points1,points2,F,
> > > > >> > > > > > > CV_FM_RANSAC,1.,0.99,status );
> > > > >> > > > > > > removeOutliers(&points1,&points2,status);
> > > > >> > > > > > >
> > > > >> > > > > > > where removeOutliers() is a function I wrote to clean
> up
> > > > >> after
> > > > >> > > > > > > cvFindFundamentalMat():
> > > > >> > > > > > >
> > > > >> > > > > > > // iterates the set of putative correspondences and
> > > > >> removes
> > > > >> > > > > > correspondences
> > > > >> > > > > > > marked as outliers by cvFindFundamentalMat()
> > > > >> > > > > > > void removeOutliers(CvMat **points1, CvMat **points2,
> > > > >> CvMat
> > > > >> > > > *status) {
> > > > >> > > > > > > CvMat *points1_ = *points1;
> > > > >> > > > > > > CvMat *points2_ = *points2;
> > > > >> > > > > > > int count = 0;
> > > > >> > > > > > > for (int i = 0; i < status->cols; i++) if
> > > > >> > > > > > (CV_MAT_ELEM(*status,unsigned
> > > > >> > > > > > > char,0,i)) count++;
> > > > >> > > > > > > if (!count) { // no inliers
> > > > >> > > > > > > *points1 = NULL;
> > > > >> > > > > > > *points2 = NULL;
> > > > >> > > > > > > }
> > > > >> > > > > > > else {
> > > > >> > > > > > > *points1 = cvCreateMat(1,count,CV_32FC2);
> > > > >> > > > > > > *points2 = cvCreateMat(1,count,CV_32FC2);
> > > > >> > > > > > > int j = 0;
> > > > >> > > > > > > for (int i = 0; i < status->cols; i++) {
> > > > >> > > > > > > if (CV_MAT_ELEM(*status,unsigned char,0,i)) {
> > > > >> > > > > > > (*points1)->data.fl[j*2] = points1_->data.fl[i*2];
> > > > >> > > > > > > //p1->x
> > > > >> > > > > > > (*points1)->data.fl[j*2+1] = points1_->data.fl[i*2+1];
> > > > >> > > > > > > //p1->y
> > > > >> > > > > > > (*points2)->data.fl[j*2] = points2_->data.fl[i*2];
> > > > >> > > > > > > //p2->x
> > > > >> > > > > > > (*points2)->data.fl[j*2+1] = points2_->data.fl[i*2+1];
> > > > >> > > > > > > //p2->y
> > > > >> > > > > > > j++;
> > > > >> > > > > > > }
> > > > >> > > > > > > }
> > > > >> > > > > > > }
> > > > >> > > > > > > cvReleaseMat(&points1_);
> > > > >> > > > > > > cvReleaseMat(&points2_);
> > > > >> > > > > > > }
> > > > >> > > > > > >
> > > > >> > > > > > >
> > > > >> > > > > > > I hope this helps.
> > > > >> > > > > > >
> > > > >> > > > > > > -Jostein
> > > > >> > > > > > >
> > > > >> > > > > > >
> > > > >> > > > > > > 2009/1/8 yair_movshovitz <yairmov@>
> > > > >> > > > > > >
> > > > >> > > > > > > > Hi Everyone,
> > > > >> > > > > > > >
> > > > >> > > > > > > > I'm trying to understand how to use the SURF
> features
> > > > >> > > > capabilities of
> > > > >> > > > > > > > openCV.
> > > > >> > > > > > > > My scenario is as follows:
> > > > >> > > > > > > > I have two rectangled areas in an image, which are
> > > > >> supposed to
> > > > >> > > > bound
> > > > >> > > > > > > > the same object. I would like to see how good is
> this
> > > > >> > > > assumption. In
> > > > >> > > > > > > > other words I would like to see how many features
> they
> > > > >> share.
> > > > >> > > > > > > >
> > > > >> > > > > > > > Can someone drop me a hint on how to use the SURF
> > > > >> > > > implementation of
> > > > >> > > > > > > > openCV (or direct me to somewhere that has some
> > > > >> documentation
> > > > >> > > > of it)
> > > > >> > > > > > > >
> > > > >> > > > > > > > Thanks,
> > > > >> > > > > > > > Yair
> > > > >> > > > > > > >
> > > > >> > > > > > > >
> > > > >> > > > > > > >
> > > > >> > > > > > >
> > > > >> > > > > > >
> > > > >> > > > > > > [Non-text portions of this message have been removed]
> > > > >> > > > > > >
> > > > >> > > > > >
> > > > >> > > > > >
> > > > >> > > > > >
> > > > >> > > > >
> > > > >> > > > >
> > > > >> > > > > [Non-text portions of this message have been removed]
> > > > >> > > > >
> > > > >> > > >
> > > > >> > > >
> > > > >> > > >
> > > > >> > >
> > > > >> > >
> > > > >> > > [Non-text portions of this message have been removed]
> > > > >> > >
> > > > >> >
> > > > >>
> > > > >>
> > > > >>
> > > > >
> > > > >
> > > >
> > > >
> > > > [Non-text portions of this message have been removed]
> > > >
> > >
> >
>
>  
>


[Non-text portions of this message have been removed]

Reply | Threaded
Open this post in threaded view
|

Re: cvExtractSURF

osid
Hi All,

I fear the cvExtractSurf Odyssey is not yet over...

after using the current  version of cvExtractSURF, I think I have found another problem which is not yet fixed. I could verify this with the debugger:

Under some circumstances the descriptor function does not get any gradient pixels (at the border of the image)  and some vars become empty and the PolarToCart function throws an exception:

/***** this is about in Line 554 in cvsurf.cpp ***/

  /*   ... some code */
 /* To find the dominant orientation, the gradients in x and y are
           sampled in a circle of radius 6s using wavelets of size 4s.
           We ensure the gradient wavelet size is even to ensure the
           wavelet pattern is balanced and symmetric around its center */
        /** some code **/
        for( kk = 0, nangle = 0; kk < nangle0; kk++ )
        {
            const int* ptr;
            float vx, vy;
            x = cvRound( center.x + apt[kk].x*s - (float)(grad_wav_size-1)/2 );
            y = cvRound( center.y + apt[kk].y*s - (float)(grad_wav_size-1)/2 );

            if( (unsigned)y >= (unsigned)sum->rows - grad_wav_size ||
                (unsigned)x >= (unsigned)sum->cols - grad_wav_size )
                continue;


            /* .... some code */


            // ** THESE are NEVER assigned because if(...) above always triggers!
            X[nangle] = vx*apt_w[kk]; Y[nangle] = vy*apt_w[kk];
            nangle++;
        }
       
        // EMPTY here ....
        _X.cols = _Y.cols = _angle.cols = nangle;

      ====> exception HERE because nangle == 0 ! ===
        cvCartToPolar( &_X, &_Y, 0, &_angle, 1 );



The temporary solution for me was to  check if nangle == 0 and then
simply continue - this leaves an unassigned descriptor, but at least it does not crash the prgram.


Could someone verify and patch this please...

Thanks,


Oliver




--

 In [hidden email], Raluca Borca <raluca.borca@...> wrote:

>
> I build the current version from SVN and the problem was solved. Thanks.
>
> On Fri, Mar 13, 2009 at 10:00 PM, liuliu_0503
> <liuliu.1987+opencv@...<liuliu.1987%2Bopencv@...>
> > wrote:
>
> >   i believe this change was made later than rev1520, checkout the newest
> > rev1642 or any revision no earlier than rev1550.
> >
> > --- In [hidden email] <OpenCV%40yahoogroups.com>, "oliver_sidla"
> > <oliver_sidla@> wrote:
> > >
> > > Hello all,
> > >
> > > I am also having the same problem - with the latest version
> > > of cvSurf (its version 1520).
> > > When Did Ian Mahon do the modification?
> > >
> > > Thanks,
> > >
> > > Oliver
> > >
> > >
> > > --- In [hidden email] <OpenCV%40yahoogroups.com>, "liuliu_0503"
> > <liuliu.1987+opencv@> wrote:
> > > >
> > > > I think that the modifications made by Ian Mahon should fixed the
> > problem already. Check out the svn version of opencv and try again.
> > > >
> > > > --- In [hidden email] <OpenCV%40yahoogroups.com>, Raluca Borca
> > <raluca.borca@> wrote:
> > > > >
> > > > > To be more precise, I encountered the problem at the folowing line in
> > > > > cvsurf.cpp
> > > > >
> > > > > /* hessian detector */
> > > > > for( octave = k = 0; octave < params->nOctaves; octave++ )
> > > > > {
> > > > > for( sc = -1; sc <= params->nOctaveLayers; sc++, k++ )
> > > > > {
> > > > > if ( sc < 0 )
> > > > > sizeCache[k] = size = 7 << octave; // gaussian scale 1.0;
> > > > > else
> > > > > sizeCache[k] = size = (sc*6 + 9) << octave; // gaussian
> > > > > scale size*1.2/9.;
> > > > > scaleCache[k] = scale = MAX(size, SIZE0);
> > > > >
> > > > > hessian_rows = (sum->rows)*SIZE0/scale;
> > > > > hessian_cols = (sum->cols)*SIZE0/scale;
> > > > > hessians[k] = cvCreateMat( hessian_rows, hessian_cols, CV_32FC1
> > > > > );
> > > > > traces[k] = cvCreateMat( hessian_rows, hessian_cols, CV_32FC1
> > > > > ); // this is the point where I get an out of memory exception
> > > > >
> > > > > ......
> > > > >
> > > > > } } }1
> > > > >
> > > > > On Thu, Mar 12, 2009 at 12:26 PM, Raluca Borca <raluca.borca@>wrote:
> > > > >
> > > > > > Hello !
> > > > > >
> > > > > > I have encountered the same problem.
> > > > > >
> > > > > > Can anybody tell what is the solution ?
> > > > > >
> > > > > > Thanks.
> > > > > >
> > > > > >
> > > > > > On Sun, Mar 1, 2009 at 8:21 AM, Ricardo. <dadagori@> wrote:
> > > > > >
> > > > > >> Hello!
> > > > > >>
> > > > > >> I wonder if you found what was causing this error? 'Cause it's
> > > > > >> happening to me too and cannot figure it out.
> > > > > >>
> > > > > >> I'd appreciate it if you share how you solved it -if you did of
> > > > > >> course-.
> > > > > >>
> > > > > >> Regards,
> > > > > >> Ricardo
> > > > > >>
> > > > > >>
> > > > > >> --- In [hidden email] <OpenCV%40yahoogroups.com> <OpenCV%
> > 40yahoogroups.com>,
> > > > > >> "yair_movshovitz" <yairmov@> wrote:
> > > > > >> >
> > > > > >> > I did some investigating and found out the the error is
> > happening
> > > > > >> > inside the icvFastHessianDetector() function in cvsurf.cpp
> > > > > >> > It happens when the function tries to free the memory it
> > allocated:
> > > > > >> >
> > > > > >> > for( octave = k = 0; octave < params->nOctaves; octave++ )
> > > > > >> > for( sc = -1; sc <= params->nOctaveLayers; sc++, k++ )
> > > > > >> > {
> > > > > >> > //this line causes the error at some iteration.
> > > > > >> > cvReleaseMat( &hessians[k] );
> > > > > >> > cvReleaseMat( &traces[k] );
> > > > > >> > }
> > > > > >> >
> > > > > >> >
> > > > > >> > Anyone has an idea why this is happening?
> > > > > >> >
> > > > > >> > Thanks,
> > > > > >> > Yair
> > > > > >> >
> > > > > >> > --- In [hidden email] <OpenCV%40yahoogroups.com><OpenCV%
> > 40yahoogroups.com>, "Jostein
> > > > > >> Austvik Jacobsen"
> > > > > >> > <josteinaj@> wrote:
> > > > > >> > >
> > > > > >> > > I'm using Ubuntu Linux so I can't help you there. Sorry.
> > > > > >> > > Jostein
> > > > > >> > >
> > > > > >> > > 2009/1/20 yair_movshovitz <yairmov@>
> > > > > >> > >
> > > > > >> > > > Hi Jostein,
> > > > > >> > > >
> > > > > >> > > > Thanks again for helping me out.
> > > > > >> > > >
> > > > > >> > > > I have started using the cvExtractSURF function. and I have
> > the
> > > > > >> > > > following problem:
> > > > > >> > > > When I call the function I get this error -
> > > > > >> > > > Windows has triggered a breakpoint in (my program name).
> > > > > >> > > >
> > > > > >> > > > This may be due to a corruption of the heap, which indicates
> > a
> > > > > >> bug in
> > > > > >> > > > (my program name) or any of the DLLs it has loaded.
> > > > > >> > > >
> > > > > >> > > > Have you ever encountered this error before regarding this
> > > > > >> function?
> > > > > >> > > >
> > > > > >> > > > Thanks,
> > > > > >> > > >
> > > > > >> > > > Yair
> > > > > >> > > >
> > > > > >> > > > --- In [hidden email] <OpenCV%40yahoogroups.com><OpenCV%
> > 40yahoogroups.com> <OpenCV%
> > > > > >> 40yahoogroups.com>, "Jostein
> > > > > >> > Austvik
> > > > > >> > > > Jacobsen"
> > > > > >> > > > <josteinaj@> wrote:
> > > > > >> > > > >
> > > > > >> > > > > You can view the implementation of *cvExtractSURF(...)*
> > here:
> > > > > >> > > > >
> > > > > >> > > >
> > > > > >> > > >
> > > > > >> >
> > > > > >>
> > http://opencvlibrary.svn.sourceforge.net/viewvc/opencvlibrary/trunk/op
> > > > > >> encv/src/cv/cvsurf.cpp,
> > > > > >> > > > > however it doesn't contain much comments.
> > > > > >> > > > >
> > > > > >> > > > >
> > > > > >> > > > > *cvExtractSURF( const CvArr* img, const CvArr* mask,
> > CvSeq**
> > > > > >> > keypoints,
> > > > > >> > > > > CvSeq** descriptors, CvMemStorage* storage, CvSURFParams
> > > > > >> params )*
> > > > > >> > > > >
> > > > > >> > > > > Here, *img* is the image. Use an
> > > > > >> > > > > *IplImage<
> > http://opencv.willowgarage.com/wiki/CxCore#IplImage>
> > > > > >> > > > > * for the image. To load an image from disk, use
> > > > > >> > > > >
> > > > > >> > > >
> > > > > >> > *cvLoadImage(...)
> > > > > >> *<http://opencv.willowgarage.com/wiki/HighGui#cvLoadImage
> > > > > >> > > > >,
> > > > > >> > > > > and to create your own image, use
> > > > > >> > > > >
> > > > > >> > > > *cvCreateImage(...)*<
> > > > > >> > > > http://opencv.willowgarage.com/wiki/CxCore#CreateImage>.
> > > > > >> > > > > Lets say you have a IplImage *image* and want to extract
> > the
> > > > > >> > rectangle
> > > > > >> > > > > (x,y)->(x+dx,y+dy) from it as an IplImage rectangle, you
> > might
> > > > > >> > do this:
> > > > > >> > > > >
> > > > > >> > > > > CvSize size = cvSize(dx,dy);
> > > > > >> > > > > IplImage* rectangle = cvCreateImage(size, IPL_DEPTH_8U,
> > 1);
> > > > > >> > > > > for (int i = 0; i < dx; ++i) {
> > > > > >> > > > > for (int j = 0; j < dy; ++j) {
> > > > > >> > > > > CV_IMAGE_ELEM(rectangle,unsigned char,i,j) =
> > > > > >> > > > > CV_IMAGE_ELEM(image,unsigned char,x+i,y+j);
> > > > > >> > > > > }
> > > > > >> > > > > }
> > > > > >> > > > >
> > > > > >> > > > > I'm not sure how *mask* is used, but a quick google search
> > > > > >> gives
> > > > > >> > > > >
> > > > > >> > > >
> > > > > >> > > >
> > > > > >> >
> > http://www.emgu.com/wiki/files/1.4.0.0/html/ad54862f-3d1c-c177-3bdb-
> > > > > >> 768619f8dd90.htmwhich
> > > > > >> > > > > says "The optional input 8-bit mask. The features are only
> > > > > >> found in
> > > > > >> > > > > the areas that contain more than 50% of non-zero mask
> > > > > >> pixels". Just
> > > > > >> > > > set it
> > > > > >> > > > > to NULL.
> > > > > >> > > > >
> > > > > >> > > > > *keypoints* and
> > > > > >> > > > >
> > > > > >> > > >
> > > > > >> > *descriptors*<<a href="http://en.wikipedia.org/wiki/Feature_%">http://en.wikipedia.org/wiki/Feature_%
> > > > > >> 28computer_vision%29
> > > > > >> > > > >are
> > > > > >> > > > > where the results are placed. Initialize them as null-
> > > > > >> pointers and
> > > > > >> > > > > cvExtractSURF will do the rest for you. Afterwards you can
> > > > > >> access a
> > > > > >> > > > > descriptor and corresponding keypoint like this:
> > > > > >> > > > >
> > > > > >> > > > > int k = 0; // the keypoint you want. There are *keypoints-
> > > > > >> >total*
> > > > > >> > > > keypoints.
> > > > > >> > > > > float *seq = (float*)cvGetSeqElem(descriptors, k); // the
> > > > > >> > descriptor of
> > > > > >> > > > > length 64 or 128
> > > > > >> > > > > CvPoint2D32f *p = &((CvSURFPoint*)cvGetSeqElem(keypoints,
> > k))-
> > > > > >> >pt;
> > > > > >> > > > // the
> > > > > >> > > > > (x,y) coordinates of keypoint *k* can now be accessed as
> > *p-
> > > > > >> >x* and
> > > > > >> > > > *p->y*
> > > > > >> > > > >
> > > > > >> > > > > The *CvMemStorage*
> > > > > >> > > > >
> > > > > >> <http://opencv.willowgarage.com/wiki/CxCore#CvMemStorage>struct
> > > > > >> > > > > *storage* is used as a mechanism to simplify memory
> > > > > >> management. I
> > > > > >> > > > believe
> > > > > >> > > > > the *keypoints* and *descriptors* structures are put into
> > > > > >> *storage*,
> > > > > >> > > > so you
> > > > > >> > > > > can't release *storage* until you're done using
> > *keypoints*
> > > > > >> and
> > > > > >> > > > *descriptors
> > > > > >> > > > > *.Put a *CvMemStorage *storage = cvCreateMemStorage(0);*
> > > > > >> before your
> > > > > >> > > > first
> > > > > >> > > > > call to cvExtractSURF and *cvClearMemStorage(storage);*
> > after
> > > > > >> you're
> > > > > >> > > > done
> > > > > >> > > > > using *keypoints* and *descriptors*.
> > > > > >> > > > >
> > > > > >> > > > > SURF takes a couple of parameters through the
> > *CvSURFParams*
> > > > > >> struct
> > > > > >> > > > *params*.
> > > > > >> > > > > You create *params* with *cvSURFParams(double threshold,
> > int
> > > > > >> > > > > extended)*where threshold represents the "edgyness" that
> > is
> > > > > >> required
> > > > > >> > > > > from a feature to
> > > > > >> > > > > be recognized as a feature. It can be adjusted to retrieve
> > > > > >> more
> > > > > >> > or fewer
> > > > > >> > > > > features. In the paper
> > > > > >> > > > > <http://www.vision.ee.ethz.ch/%7Esurf/eccv06.pdf
> > >describing
> > > > > >> the SURF
> > > > > >> > > > > detector, they use a threshold of 600 on a 800 x 640
> > > > > >> > > > > image which returned 1418 features. The *extended*
> > parameter
> > > > > >> is
> > > > > >> > a simple
> > > > > >> > > > > boolean 1 or 0 which states whether or not to use the
> > extended
> > > > > >> > > > descriptor.
> > > > > >> > > > > The extended descriptor consists of 128 instead of 64
> > values
> > > > > >> which
> > > > > >> > > > should
> > > > > >> > > > > gives a better result at the cost of using more memory.
> > > > > >> Instead of
> > > > > >> > > > creating
> > > > > >> > > > > a new CvSURFParams struct for each call to cvExtractSURF,
> > you
> > > > > >> > could do:
> > > > > >> > > > >
> > > > > >> > > > > CvSURFParams params = cvSURFParams(600, 1);
> > > > > >> > > > > cvExtractSURF(..., params);
> > > > > >> > > > > cvExtractSURF(..., params);
> > > > > >> > > > >
> > > > > >> > > > >
> > > > > >> > > > > There you go. I hope I answered your question :)
> > > > > >> > > > >
> > > > > >> > > > > Jostein
> > > > > >> > > > >
> > > > > >> > > > >
> > > > > >> > > > > 2009/1/12 yair_movshovitz <yairmov@>
> > > > > >> > > > >
> > > > > >> > > > > > Hi Jostein,
> > > > > >> > > > > >
> > > > > >> > > > > > Thanks a lot for your help!
> > > > > >> > > > > >
> > > > > >> > > > > > Can you please explain the function parameters of
> > > > > >> cvExtractSURF?
> > > > > >> > > > > > I mean in - cvExtractSURF(img1, NULL, &kp1, &desc1,
> > storage,
> > > > > >> > > > > > cvSURFParams(600,EXTENDED_DESCRIPTOR));
> > > > > >> > > > > > what is the role of kp1, desc1, storage and the
> > SURFParams?
> > > > > >> > > > > > is storage just a temp area for the algorithm to use?
> > > > > >> > > > > >
> > > > > >> > > > > > Thanks again
> > > > > >> > > > > > Yair
> > > > > >> > > > > >
> > > > > >> > > > > > --- In [hidden email]<OpenCV%40yahoogroups.com><OpenCV%
> > 40yahoogroups.com><OpenCV%
> > > > > >> 40yahoogroups.com>
> > > > > >> <OpenCV%
> > > > > >> > > > 40yahoogroups.com>, "Jostein
> > > > > >> > > >
> > > > > >> > > > Austvik
> > > > > >> > > > > > Jacobsen"
> > > > > >> > > > > >
> > > > > >> > > > > > <josteinaj@> wrote:
> > > > > >> > > > > > >
> > > > > >> > > > > > > If you've got your two rectangled areas stored as img1
> > and
> > > > > >> > img2 you
> > > > > >> > > > > > could do
> > > > > >> > > > > > > this to extract its keypoints and corresponding
> > > > > >> descriptors:
> > > > > >> > > > > > >
> > > > > >> > > > > > > #define EXTENDED_DESCRIPTOR 1
> > > > > >> > > > > > > CvSeq *kp1=NULL, *kp2=NULL;
> > > > > >> > > > > > > CvSeq *desc1=NULL, *desc2=NULL;
> > > > > >> > > > > > > CvMemStorage *storage = cvCreateMemStorage(0);
> > > > > >> > > > > > > cvExtractSURF(img1, NULL, &kp1, &desc1, storage,
> > > > > >> > cvSURFParams(600,
> > > > > >> > > > > > > EXTENDED_DESCRIPTOR));
> > > > > >> > > > > > > cvExtractSURF(img2, NULL, &kp2, &desc2, storage,
> > > > > >> > cvSURFParams(600,
> > > > > >> > > > > > > EXTENDED_DESCRIPTOR));
> > > > > >> > > > > > >
> > > > > >> > > > > > > You will have to correlate the descriptors with each
> > > > > >> other to
> > > > > >> > > > determine
> > > > > >> > > > > > > which keypoints in each rectangle corresponds to one
> > > > > >> > another. You
> > > > > >> > > > > > could use
> > > > > >> > > > > > > a BBF tree which is implemented in the latest version
> > of
> > > > > >> > OpenCV, but
> > > > > >> > > > > > unless
> > > > > >> > > > > > > your rectangle is huge, you might just as well just
> > > > > >> correlate
> > > > > >> > > > them the
> > > > > >> > > > > > > standard way, which I do like this:
> > > > > >> > > > > > >
> > > > > >> > > > > > > #define CORRELATION_THRESHOLD 0.7
> > > > > >> > > > > > > // brute-force attempt at correlating the two sets of
> > > > > >> features
> > > > > >> > > > > > > void bruteMatch(CvMat **points1, CvMat **points2,
> > CvSeq
> > > > > >> > *kp1, CvSeq
> > > > > >> > > > > > *desc1,
> > > > > >> > > > > > > CvSeq *kp2, CvSeq *desc2) {
> > > > > >> > > > > > > int i,j,k;
> > > > > >> > > > > > > double* avg1 =
> > (double*)malloc(sizeof(double)*kp1->total);
> > > > > >> > > > > > > double* avg2 =
> > (double*)malloc(sizeof(double)*kp2->total);
> > > > > >> > > > > > > double* dev1 =
> > (double*)malloc(sizeof(double)*kp1->total);
> > > > > >> > > > > > > double* dev2 =
> > (double*)malloc(sizeof(double)*kp2->total);
> > > > > >> > > > > > > int* best1 = (int*)malloc(sizeof(int)*kp1->total);
> > > > > >> > > > > > > int* best2 = (int*)malloc(sizeof(int)*kp2->total);
> > > > > >> > > > > > > double* best1corr =
> > (double*)malloc(sizeof(double)*kp1-
> > > > > >> >total);
> > > > > >> > > > > > > double* best2corr =
> > (double*)malloc(sizeof(double)*kp2-
> > > > > >> >total);
> > > > > >> > > > > > > float *seq1, *seq2;
> > > > > >> > > > > > > int descriptor_size = EXTENDED_DESCRIPTOR ? 128 : 64;
> > > > > >> > > > > > > for (i=0; i<kp1->total; i++) {
> > > > > >> > > > > > > // find average and standard deviation of each
> > descriptor
> > > > > >> > > > > > > avg1[i] = 0;
> > > > > >> > > > > > > dev1[i] = 0;
> > > > > >> > > > > > > seq1 = (float*)cvGetSeqElem(desc1, i);
> > > > > >> > > > > > > for (k=0; k<descriptor_size; k++) avg1[i] += seq1[k];
> > > > > >> > > > > > > avg1[i] /= descriptor_size;
> > > > > >> > > > > > > for (k=0; k<descriptor_size; k++) dev1[i] +=
> > > > > >> > > > > > > (seq1[k]-avg1[i])*(seq1[k]-avg1[i]);
> > > > > >> > > > > > > dev1[i] = sqrt(dev1[i]/descriptor_size);
> > > > > >> > > > > > >
> > > > > >> > > > > > > // initialize best1 and best1corr
> > > > > >> > > > > > > best1[i] = -1;
> > > > > >> > > > > > > best1corr[i] = -1.;
> > > > > >> > > > > > > }
> > > > > >> > > > > > > for (j=0; j<kp2->total; j++) {
> > > > > >> > > > > > > // find average and standard deviation of each
> > descriptor
> > > > > >> > > > > > > avg2[j] = 0;
> > > > > >> > > > > > > dev2[j] = 0;
> > > > > >> > > > > > > seq2 = (float*)cvGetSeqElem(desc2, j);
> > > > > >> > > > > > > for (k=0; k<descriptor_size; k++) avg2[j] += seq2[k];
> > > > > >> > > > > > > avg2[j] /= descriptor_size;
> > > > > >> > > > > > > for (k=0; k<descriptor_size; k++) dev2[j] +=
> > > > > >> > > > > > > (seq2[k]-avg2[j])*(seq2[k]-avg2[j]);
> > > > > >> > > > > > > dev2[j] = sqrt(dev2[j]/descriptor_size);
> > > > > >> > > > > > >
> > > > > >> > > > > > > // initialize best2 and best2corr
> > > > > >> > > > > > > best2[j] = -1;
> > > > > >> > > > > > > best2corr[j] = -1.;
> > > > > >> > > > > > > }
> > > > > >> > > > > > > double corr;
> > > > > >> > > > > > > for (i = 0; i < kp1->total; ++i) {
> > > > > >> > > > > > > seq1 = (float*)cvGetSeqElem(desc1, i);
> > > > > >> > > > > > > for (j = 0; j < kp2->total; ++j) {
> > > > > >> > > > > > > corr = 0;
> > > > > >> > > > > > > seq2 = (float*)cvGetSeqElem(desc2, j);
> > > > > >> > > > > > > for (k = 0; k < descriptor_size; ++k)
> > > > > >> > > > > > > corr += (seq1[k]-avg1[i])*(seq2[k]-avg2[j]);
> > > > > >> > > > > > > corr /= (descriptor_size-1)*dev1[i]*dev2[j];
> > > > > >> > > > > > > if (corr > best1corr[i]) {
> > > > > >> > > > > > > best1corr[i] = corr;
> > > > > >> > > > > > > best1[i] = j;
> > > > > >> > > > > > > }
> > > > > >> > > > > > > if (corr > best2corr[j]) {
> > > > > >> > > > > > > best2corr[j] = corr;
> > > > > >> > > > > > > best2[j] = i;
> > > > > >> > > > > > > }
> > > > > >> > > > > > > }
> > > > > >> > > > > > > }
> > > > > >> > > > > > > j = 0;
> > > > > >> > > > > > > for (i = 0; i < kp1->total; i++)
> > > > > >> > > > > > > if (best2[best1[i]] == i && best1corr[i] >
> > > > > >> > > > > > CORRELATION_THRESHOLD)
> > > > > >> > > > > > > j++;
> > > > > >> > > > > > > if (j == 0) return; // no matches found
> > > > > >> > > > > > > *points1 = cvCreateMat(1,j,CV_32FC2);
> > > > > >> > > > > > > *points2 = cvCreateMat(1,j,CV_32FC2);
> > > > > >> > > > > > > CvPoint2D32f *p1, *p2;
> > > > > >> > > > > > > j = 0;
> > > > > >> > > > > > > for (i = 0; i < kp1->total; i++) {
> > > > > >> > > > > > > if (best2[best1[i]] == i && best1corr[i] >
> > > > > >> > > > > > CORRELATION_THRESHOLD) {
> > > > > >> > > > > > > p1 = &((CvSURFPoint*)cvGetSeqElem(kp1,i))->pt;
> > > > > >> > > > > > > p2 = &((CvSURFPoint*)cvGetSeqElem(kp2,best1[i]))->pt;
> > > > > >> > > > > > > (*points1)->data.fl[j*2] = p1->x;
> > > > > >> > > > > > > (*points1)->data.fl[j*2+1] = p1->y;
> > > > > >> > > > > > > (*points2)->data.fl[j*2] = p2->x;
> > > > > >> > > > > > > (*points2)->data.fl[j*2+1] = p2->y;
> > > > > >> > > > > > > j++;
> > > > > >> > > > > > > }
> > > > > >> > > > > > > }
> > > > > >> > > > > > > free(best2corr);
> > > > > >> > > > > > > free(best1corr);
> > > > > >> > > > > > > free(best2);
> > > > > >> > > > > > > free(best1);
> > > > > >> > > > > > > free(avg1);
> > > > > >> > > > > > > free(avg2);
> > > > > >> > > > > > > free(dev1);
> > > > > >> > > > > > > free(dev2);
> > > > > >> > > > > > > }
> > > > > >> > > > > > >
> > > > > >> > > > > > > If you construct a fundamental matrix (a model) for
> > the
> > > > > >> > > > transformation
> > > > > >> > > > > > > between the two rectangles, you can further determine
> > > > > >> which
> > > > > >> > > > > > correspondences
> > > > > >> > > > > > > are false (by how well they fit the model) and remove
> > > > > >> them,
> > > > > >> > which I
> > > > > >> > > > > > like to
> > > > > >> > > > > > > do like this:
> > > > > >> > > > > > >
> > > > > >> > > > > > > F = cvCreateMat(3,3,CV_32FC1);
> > > > > >> > > > > > > CvMat *status = cvCreateMat(1,points1->cols,CV_8UC1);
> > > > > >> > > > > > > int fm_count = cvFindFundamentalMat(
> > points1,points2,F,
> > > > > >> > > > > > > CV_FM_RANSAC,1.,0.99,status );
> > > > > >> > > > > > > removeOutliers(&points1,&points2,status);
> > > > > >> > > > > > >
> > > > > >> > > > > > > where removeOutliers() is a function I wrote to clean
> > up
> > > > > >> after
> > > > > >> > > > > > > cvFindFundamentalMat():
> > > > > >> > > > > > >
> > > > > >> > > > > > > // iterates the set of putative correspondences and
> > > > > >> removes
> > > > > >> > > > > > correspondences
> > > > > >> > > > > > > marked as outliers by cvFindFundamentalMat()
> > > > > >> > > > > > > void removeOutliers(CvMat **points1, CvMat **points2,
> > > > > >> CvMat
> > > > > >> > > > *status) {
> > > > > >> > > > > > > CvMat *points1_ = *points1;
> > > > > >> > > > > > > CvMat *points2_ = *points2;
> > > > > >> > > > > > > int count = 0;
> > > > > >> > > > > > > for (int i = 0; i < status->cols; i++) if
> > > > > >> > > > > > (CV_MAT_ELEM(*status,unsigned
> > > > > >> > > > > > > char,0,i)) count++;
> > > > > >> > > > > > > if (!count) { // no inliers
> > > > > >> > > > > > > *points1 = NULL;
> > > > > >> > > > > > > *points2 = NULL;
> > > > > >> > > > > > > }
> > > > > >> > > > > > > else {
> > > > > >> > > > > > > *points1 = cvCreateMat(1,count,CV_32FC2);
> > > > > >> > > > > > > *points2 = cvCreateMat(1,count,CV_32FC2);
> > > > > >> > > > > > > int j = 0;
> > > > > >> > > > > > > for (int i = 0; i < status->cols; i++) {
> > > > > >> > > > > > > if (CV_MAT_ELEM(*status,unsigned char,0,i)) {
> > > > > >> > > > > > > (*points1)->data.fl[j*2] = points1_->data.fl[i*2];
> > > > > >> > > > > > > //p1->x
> > > > > >> > > > > > > (*points1)->data.fl[j*2+1] = points1_->data.fl[i*2+1];
> > > > > >> > > > > > > //p1->y
> > > > > >> > > > > > > (*points2)->data.fl[j*2] = points2_->data.fl[i*2];
> > > > > >> > > > > > > //p2->x
> > > > > >> > > > > > > (*points2)->data.fl[j*2+1] = points2_->data.fl[i*2+1];
> > > > > >> > > > > > > //p2->y
> > > > > >> > > > > > > j++;
> > > > > >> > > > > > > }
> > > > > >> > > > > > > }
> > > > > >> > > > > > > }
> > > > > >> > > > > > > cvReleaseMat(&points1_);
> > > > > >> > > > > > > cvReleaseMat(&points2_);
> > > > > >> > > > > > > }
> > > > > >> > > > > > >
> > > > > >> > > > > > >
> > > > > >> > > > > > > I hope this helps.
> > > > > >> > > > > > >
> > > > > >> > > > > > > -Jostein
> > > > > >> > > > > > >
> > > > > >> > > > > > >
> > > > > >> > > > > > > 2009/1/8 yair_movshovitz <yairmov@>
> > > > > >> > > > > > >
> > > > > >> > > > > > > > Hi Everyone,
> > > > > >> > > > > > > >
> > > > > >> > > > > > > > I'm trying to understand how to use the SURF
> > features
> > > > > >> > > > capabilities of
> > > > > >> > > > > > > > openCV.
> > > > > >> > > > > > > > My scenario is as follows:
> > > > > >> > > > > > > > I have two rectangled areas in an image, which are
> > > > > >> supposed to
> > > > > >> > > > bound
> > > > > >> > > > > > > > the same object. I would like to see how good is
> > this
> > > > > >> > > > assumption. In
> > > > > >> > > > > > > > other words I would like to see how many features
> > they
> > > > > >> share.
> > > > > >> > > > > > > >
> > > > > >> > > > > > > > Can someone drop me a hint on how to use the SURF
> > > > > >> > > > implementation of
> > > > > >> > > > > > > > openCV (or direct me to somewhere that has some
> > > > > >> documentation
> > > > > >> > > > of it)
> > > > > >> > > > > > > >
> > > > > >> > > > > > > > Thanks,
> > > > > >> > > > > > > > Yair
> > > > > >> > > > > > > >
> > > > > >> > > > > > > >
> > > > > >> > > > > > > >
> > > > > >> > > > > > >
> > > > > >> > > > > > >
> > > > > >> > > > > > > [Non-text portions of this message have been removed]
> > > > > >> > > > > > >
> > > > > >> > > > > >
> > > > > >> > > > > >
> > > > > >> > > > > >
> > > > > >> > > > >
> > > > > >> > > > >
> > > > > >> > > > > [Non-text portions of this message have been removed]
> > > > > >> > > > >
> > > > > >> > > >
> > > > > >> > > >
> > > > > >> > > >
> > > > > >> > >
> > > > > >> > >
> > > > > >> > > [Non-text portions of this message have been removed]
> > > > > >> > >
> > > > > >> >
> > > > > >>
> > > > > >>
> > > > > >>
> > > > > >
> > > > > >
> > > > >
> > > > >
> > > > > [Non-text portions of this message have been removed]
> > > > >
> > > >
> > >
> >
> >  
> >
>
>
> [Non-text portions of this message have been removed]
>


Reply | Threaded
Open this post in threaded view
|

Re: cvExtractSURF

Martin Bäuml
Hi Oliver,

FYI I already filed a bug report for that a couple of days ago.  You'll also find a patch attached to that bug report which removes the interest points for which no angles could be sampled.

See https://sourceforge.net/tracker2/?func=detail&aid=2676785&group_id=22870&atid=376677 for the bug report and the patch...

-Martin


--- In [hidden email], "oliver_sidla" <oliver_sidla@...> wrote:

>
> Hi All,
>
> I fear the cvExtractSurf Odyssey is not yet over...
>
> after using the current  version of cvExtractSURF, I think I have found another problem which is not yet fixed. I could verify this with the debugger:
>
> Under some circumstances the descriptor function does not get any gradient pixels (at the border of the image)  and some vars become empty and the PolarToCart function throws an exception:
>
> /***** this is about in Line 554 in cvsurf.cpp ***/
>
>   /*   ... some code */
>  /* To find the dominant orientation, the gradients in x and y are
>            sampled in a circle of radius 6s using wavelets of size 4s.
>            We ensure the gradient wavelet size is even to ensure the
>            wavelet pattern is balanced and symmetric around its center */
>         /** some code **/
>         for( kk = 0, nangle = 0; kk < nangle0; kk++ )
>         {
>             const int* ptr;
>             float vx, vy;
>             x = cvRound( center.x + apt[kk].x*s - (float)(grad_wav_size-1)/2 );
>             y = cvRound( center.y + apt[kk].y*s - (float)(grad_wav_size-1)/2 );
>
>             if( (unsigned)y >= (unsigned)sum->rows - grad_wav_size ||
>                 (unsigned)x >= (unsigned)sum->cols - grad_wav_size )
>                 continue;
>
>
>             /* .... some code */
>
>
>             // ** THESE are NEVER assigned because if(...) above always triggers!
>             X[nangle] = vx*apt_w[kk]; Y[nangle] = vy*apt_w[kk];
>             nangle++;
>         }
>        
>         // EMPTY here ....
>         _X.cols = _Y.cols = _angle.cols = nangle;
>
>       ====> exception HERE because nangle == 0 ! ===
>         cvCartToPolar( &_X, &_Y, 0, &_angle, 1 );
>
>
>
> The temporary solution for me was to  check if nangle == 0 and then
> simply continue - this leaves an unassigned descriptor, but at least it does not crash the prgram.
>
>
> Could someone verify and patch this please...
>
> Thanks,
>
>
> Oliver
>
>
>
>
> --
>
>  In [hidden email], Raluca Borca <raluca.borca@> wrote:
> >
> > I build the current version from SVN and the problem was solved. Thanks.
> >
> > On Fri, Mar 13, 2009 at 10:00 PM, liuliu_0503
> > <liuliu.1987+opencv@<liuliu.1987%2Bopencv@>
> > > wrote:
> >
> > >   i believe this change was made later than rev1520, checkout the newest
> > > rev1642 or any revision no earlier than rev1550.
> > >
> > > --- In [hidden email] <OpenCV%40yahoogroups.com>, "oliver_sidla"
> > > <oliver_sidla@> wrote:
> > > >
> > > > Hello all,
> > > >
> > > > I am also having the same problem - with the latest version
> > > > of cvSurf (its version 1520).
> > > > When Did Ian Mahon do the modification?
> > > >
> > > > Thanks,
> > > >
> > > > Oliver
> > > >
> > > >
> > > > --- In [hidden email] <OpenCV%40yahoogroups.com>, "liuliu_0503"
> > > <liuliu.1987+opencv@> wrote:
> > > > >
> > > > > I think that the modifications made by Ian Mahon should fixed the
> > > problem already. Check out the svn version of opencv and try again.
> > > > >
> > > > > --- In [hidden email] <OpenCV%40yahoogroups.com>, Raluca Borca
> > > <raluca.borca@> wrote:
> > > > > >
> > > > > > To be more precise, I encountered the problem at the folowing line in
> > > > > > cvsurf.cpp
> > > > > >
> > > > > > /* hessian detector */
> > > > > > for( octave = k = 0; octave < params->nOctaves; octave++ )
> > > > > > {
> > > > > > for( sc = -1; sc <= params->nOctaveLayers; sc++, k++ )
> > > > > > {
> > > > > > if ( sc < 0 )
> > > > > > sizeCache[k] = size = 7 << octave; // gaussian scale 1.0;
> > > > > > else
> > > > > > sizeCache[k] = size = (sc*6 + 9) << octave; // gaussian
> > > > > > scale size*1.2/9.;
> > > > > > scaleCache[k] = scale = MAX(size, SIZE0);
> > > > > >
> > > > > > hessian_rows = (sum->rows)*SIZE0/scale;
> > > > > > hessian_cols = (sum->cols)*SIZE0/scale;
> > > > > > hessians[k] = cvCreateMat( hessian_rows, hessian_cols, CV_32FC1
> > > > > > );
> > > > > > traces[k] = cvCreateMat( hessian_rows, hessian_cols, CV_32FC1
> > > > > > ); // this is the point where I get an out of memory exception
> > > > > >
> > > > > > ......
> > > > > >
> > > > > > } } }1
> > > > > >
> > > > > > On Thu, Mar 12, 2009 at 12:26 PM, Raluca Borca <raluca.borca@>wrote:
> > > > > >
> > > > > > > Hello !
> > > > > > >
> > > > > > > I have encountered the same problem.
> > > > > > >
> > > > > > > Can anybody tell what is the solution ?
> > > > > > >
> > > > > > > Thanks.
> > > > > > >
> > > > > > >
> > > > > > > On Sun, Mar 1, 2009 at 8:21 AM, Ricardo. <dadagori@> wrote:
> > > > > > >
> > > > > > >> Hello!
> > > > > > >>
> > > > > > >> I wonder if you found what was causing this error? 'Cause it's
> > > > > > >> happening to me too and cannot figure it out.
> > > > > > >>
> > > > > > >> I'd appreciate it if you share how you solved it -if you did of
> > > > > > >> course-.
> > > > > > >>
> > > > > > >> Regards,
> > > > > > >> Ricardo
> > > > > > >>
> > > > > > >>
> > > > > > >> --- In [hidden email] <OpenCV%40yahoogroups.com> <OpenCV%
> > > 40yahoogroups.com>,
> > > > > > >> "yair_movshovitz" <yairmov@> wrote:
> > > > > > >> >
> > > > > > >> > I did some investigating and found out the the error is
> > > happening
> > > > > > >> > inside the icvFastHessianDetector() function in cvsurf.cpp
> > > > > > >> > It happens when the function tries to free the memory it
> > > allocated:
> > > > > > >> >
> > > > > > >> > for( octave = k = 0; octave < params->nOctaves; octave++ )
> > > > > > >> > for( sc = -1; sc <= params->nOctaveLayers; sc++, k++ )
> > > > > > >> > {
> > > > > > >> > //this line causes the error at some iteration.
> > > > > > >> > cvReleaseMat( &hessians[k] );
> > > > > > >> > cvReleaseMat( &traces[k] );
> > > > > > >> > }
> > > > > > >> >
> > > > > > >> >
> > > > > > >> > Anyone has an idea why this is happening?
> > > > > > >> >
> > > > > > >> > Thanks,
> > > > > > >> > Yair
> > > > > > >> >
> > > > > > >> > --- In [hidden email] <OpenCV%40yahoogroups.com><OpenCV%
> > > 40yahoogroups.com>, "Jostein
> > > > > > >> Austvik Jacobsen"
> > > > > > >> > <josteinaj@> wrote:
> > > > > > >> > >
> > > > > > >> > > I'm using Ubuntu Linux so I can't help you there. Sorry.
> > > > > > >> > > Jostein
> > > > > > >> > >
> > > > > > >> > > 2009/1/20 yair_movshovitz <yairmov@>
> > > > > > >> > >
> > > > > > >> > > > Hi Jostein,
> > > > > > >> > > >
> > > > > > >> > > > Thanks again for helping me out.
> > > > > > >> > > >
> > > > > > >> > > > I have started using the cvExtractSURF function. and I have
> > > the
> > > > > > >> > > > following problem:
> > > > > > >> > > > When I call the function I get this error -
> > > > > > >> > > > Windows has triggered a breakpoint in (my program name).
> > > > > > >> > > >
> > > > > > >> > > > This may be due to a corruption of the heap, which indicates
> > > a
> > > > > > >> bug in
> > > > > > >> > > > (my program name) or any of the DLLs it has loaded.
> > > > > > >> > > >
> > > > > > >> > > > Have you ever encountered this error before regarding this
> > > > > > >> function?
> > > > > > >> > > >
> > > > > > >> > > > Thanks,
> > > > > > >> > > >
> > > > > > >> > > > Yair
> > > > > > >> > > >
> > > > > > >> > > > --- In [hidden email] <OpenCV%40yahoogroups.com><OpenCV%
> > > 40yahoogroups.com> <OpenCV%
> > > > > > >> 40yahoogroups.com>, "Jostein
> > > > > > >> > Austvik
> > > > > > >> > > > Jacobsen"
> > > > > > >> > > > <josteinaj@> wrote:
> > > > > > >> > > > >
> > > > > > >> > > > > You can view the implementation of *cvExtractSURF(...)*
> > > here:
> > > > > > >> > > > >
> > > > > > >> > > >
> > > > > > >> > > >
> > > > > > >> >
> > > > > > >>
> > > http://opencvlibrary.svn.sourceforge.net/viewvc/opencvlibrary/trunk/op
> > > > > > >> encv/src/cv/cvsurf.cpp,
> > > > > > >> > > > > however it doesn't contain much comments.
> > > > > > >> > > > >
> > > > > > >> > > > >
> > > > > > >> > > > > *cvExtractSURF( const CvArr* img, const CvArr* mask,
> > > CvSeq**
> > > > > > >> > keypoints,
> > > > > > >> > > > > CvSeq** descriptors, CvMemStorage* storage, CvSURFParams
> > > > > > >> params )*
> > > > > > >> > > > >
> > > > > > >> > > > > Here, *img* is the image. Use an
> > > > > > >> > > > > *IplImage<
> > > http://opencv.willowgarage.com/wiki/CxCore#IplImage>
> > > > > > >> > > > > * for the image. To load an image from disk, use
> > > > > > >> > > > >
> > > > > > >> > > >
> > > > > > >> > *cvLoadImage(...)
> > > > > > >> *<http://opencv.willowgarage.com/wiki/HighGui#cvLoadImage
> > > > > > >> > > > >,
> > > > > > >> > > > > and to create your own image, use
> > > > > > >> > > > >
> > > > > > >> > > > *cvCreateImage(...)*<
> > > > > > >> > > > http://opencv.willowgarage.com/wiki/CxCore#CreateImage>.
> > > > > > >> > > > > Lets say you have a IplImage *image* and want to extract
> > > the
> > > > > > >> > rectangle
> > > > > > >> > > > > (x,y)->(x+dx,y+dy) from it as an IplImage rectangle, you
> > > might
> > > > > > >> > do this:
> > > > > > >> > > > >
> > > > > > >> > > > > CvSize size = cvSize(dx,dy);
> > > > > > >> > > > > IplImage* rectangle = cvCreateImage(size, IPL_DEPTH_8U,
> > > 1);
> > > > > > >> > > > > for (int i = 0; i < dx; ++i) {
> > > > > > >> > > > > for (int j = 0; j < dy; ++j) {
> > > > > > >> > > > > CV_IMAGE_ELEM(rectangle,unsigned char,i,j) =
> > > > > > >> > > > > CV_IMAGE_ELEM(image,unsigned char,x+i,y+j);
> > > > > > >> > > > > }
> > > > > > >> > > > > }
> > > > > > >> > > > >
> > > > > > >> > > > > I'm not sure how *mask* is used, but a quick google search
> > > > > > >> gives
> > > > > > >> > > > >
> > > > > > >> > > >
> > > > > > >> > > >
> > > > > > >> >
> > > http://www.emgu.com/wiki/files/1.4.0.0/html/ad54862f-3d1c-c177-3bdb-
> > > > > > >> 768619f8dd90.htmwhich
> > > > > > >> > > > > says "The optional input 8-bit mask. The features are only
> > > > > > >> found in
> > > > > > >> > > > > the areas that contain more than 50% of non-zero mask
> > > > > > >> pixels". Just
> > > > > > >> > > > set it
> > > > > > >> > > > > to NULL.
> > > > > > >> > > > >
> > > > > > >> > > > > *keypoints* and
> > > > > > >> > > > >
> > > > > > >> > > >
> > > > > > >> > *descriptors*<<a href="http://en.wikipedia.org/wiki/Feature_%">http://en.wikipedia.org/wiki/Feature_%
> > > > > > >> 28computer_vision%29
> > > > > > >> > > > >are
> > > > > > >> > > > > where the results are placed. Initialize them as null-
> > > > > > >> pointers and
> > > > > > >> > > > > cvExtractSURF will do the rest for you. Afterwards you can
> > > > > > >> access a
> > > > > > >> > > > > descriptor and corresponding keypoint like this:
> > > > > > >> > > > >
> > > > > > >> > > > > int k = 0; // the keypoint you want. There are *keypoints-
> > > > > > >> >total*
> > > > > > >> > > > keypoints.
> > > > > > >> > > > > float *seq = (float*)cvGetSeqElem(descriptors, k); // the
> > > > > > >> > descriptor of
> > > > > > >> > > > > length 64 or 128
> > > > > > >> > > > > CvPoint2D32f *p = &((CvSURFPoint*)cvGetSeqElem(keypoints,
> > > k))-
> > > > > > >> >pt;
> > > > > > >> > > > // the
> > > > > > >> > > > > (x,y) coordinates of keypoint *k* can now be accessed as
> > > *p-
> > > > > > >> >x* and
> > > > > > >> > > > *p->y*
> > > > > > >> > > > >
> > > > > > >> > > > > The *CvMemStorage*
> > > > > > >> > > > >
> > > > > > >> <http://opencv.willowgarage.com/wiki/CxCore#CvMemStorage>struct
> > > > > > >> > > > > *storage* is used as a mechanism to simplify memory
> > > > > > >> management. I
> > > > > > >> > > > believe
> > > > > > >> > > > > the *keypoints* and *descriptors* structures are put into
> > > > > > >> *storage*,
> > > > > > >> > > > so you
> > > > > > >> > > > > can't release *storage* until you're done using
> > > *keypoints*
> > > > > > >> and
> > > > > > >> > > > *descriptors
> > > > > > >> > > > > *.Put a *CvMemStorage *storage = cvCreateMemStorage(0);*
> > > > > > >> before your
> > > > > > >> > > > first
> > > > > > >> > > > > call to cvExtractSURF and *cvClearMemStorage(storage);*
> > > after
> > > > > > >> you're
> > > > > > >> > > > done
> > > > > > >> > > > > using *keypoints* and *descriptors*.
> > > > > > >> > > > >
> > > > > > >> > > > > SURF takes a couple of parameters through the
> > > *CvSURFParams*
> > > > > > >> struct
> > > > > > >> > > > *params*.
> > > > > > >> > > > > You create *params* with *cvSURFParams(double threshold,
> > > int
> > > > > > >> > > > > extended)*where threshold represents the "edgyness" that
> > > is
> > > > > > >> required
> > > > > > >> > > > > from a feature to
> > > > > > >> > > > > be recognized as a feature. It can be adjusted to retrieve
> > > > > > >> more
> > > > > > >> > or fewer
> > > > > > >> > > > > features. In the paper
> > > > > > >> > > > > <http://www.vision.ee.ethz.ch/%7Esurf/eccv06.pdf
> > > >describing
> > > > > > >> the SURF
> > > > > > >> > > > > detector, they use a threshold of 600 on a 800 x 640
> > > > > > >> > > > > image which returned 1418 features. The *extended*
> > > parameter
> > > > > > >> is
> > > > > > >> > a simple
> > > > > > >> > > > > boolean 1 or 0 which states whether or not to use the
> > > extended
> > > > > > >> > > > descriptor.
> > > > > > >> > > > > The extended descriptor consists of 128 instead of 64
> > > values
> > > > > > >> which
> > > > > > >> > > > should
> > > > > > >> > > > > gives a better result at the cost of using more memory.
> > > > > > >> Instead of
> > > > > > >> > > > creating
> > > > > > >> > > > > a new CvSURFParams struct for each call to cvExtractSURF,
> > > you
> > > > > > >> > could do:
> > > > > > >> > > > >
> > > > > > >> > > > > CvSURFParams params = cvSURFParams(600, 1);
> > > > > > >> > > > > cvExtractSURF(..., params);
> > > > > > >> > > > > cvExtractSURF(..., params);
> > > > > > >> > > > >
> > > > > > >> > > > >
> > > > > > >> > > > > There you go. I hope I answered your question :)
> > > > > > >> > > > >
> > > > > > >> > > > > Jostein
> > > > > > >> > > > >
> > > > > > >> > > > >
> > > > > > >> > > > > 2009/1/12 yair_movshovitz <yairmov@>
> > > > > > >> > > > >
> > > > > > >> > > > > > Hi Jostein,
> > > > > > >> > > > > >
> > > > > > >> > > > > > Thanks a lot for your help!
> > > > > > >> > > > > >
> > > > > > >> > > > > > Can you please explain the function parameters of
> > > > > > >> cvExtractSURF?
> > > > > > >> > > > > > I mean in - cvExtractSURF(img1, NULL, &kp1, &desc1,
> > > storage,
> > > > > > >> > > > > > cvSURFParams(600,EXTENDED_DESCRIPTOR));
> > > > > > >> > > > > > what is the role of kp1, desc1, storage and the
> > > SURFParams?
> > > > > > >> > > > > > is storage just a temp area for the algorithm to use?
> > > > > > >> > > > > >
> > > > > > >> > > > > > Thanks again
> > > > > > >> > > > > > Yair
> > > > > > >> > > > > >
> > > > > > >> > > > > > --- In [hidden email]<OpenCV%40yahoogroups.com><OpenCV%
> > > 40yahoogroups.com><OpenCV%
> > > > > > >> 40yahoogroups.com>
> > > > > > >> <OpenCV%
> > > > > > >> > > > 40yahoogroups.com>, "Jostein
> > > > > > >> > > >
> > > > > > >> > > > Austvik
> > > > > > >> > > > > > Jacobsen"
> > > > > > >> > > > > >
> > > > > > >> > > > > > <josteinaj@> wrote:
> > > > > > >> > > > > > >
> > > > > > >> > > > > > > If you've got your two rectangled areas stored as img1
> > > and
> > > > > > >> > img2 you
> > > > > > >> > > > > > could do
> > > > > > >> > > > > > > this to extract its keypoints and corresponding
> > > > > > >> descriptors:
> > > > > > >> > > > > > >
> > > > > > >> > > > > > > #define EXTENDED_DESCRIPTOR 1
> > > > > > >> > > > > > > CvSeq *kp1=NULL, *kp2=NULL;
> > > > > > >> > > > > > > CvSeq *desc1=NULL, *desc2=NULL;
> > > > > > >> > > > > > > CvMemStorage *storage = cvCreateMemStorage(0);
> > > > > > >> > > > > > > cvExtractSURF(img1, NULL, &kp1, &desc1, storage,
> > > > > > >> > cvSURFParams(600,
> > > > > > >> > > > > > > EXTENDED_DESCRIPTOR));
> > > > > > >> > > > > > > cvExtractSURF(img2, NULL, &kp2, &desc2, storage,
> > > > > > >> > cvSURFParams(600,
> > > > > > >> > > > > > > EXTENDED_DESCRIPTOR));
> > > > > > >> > > > > > >
> > > > > > >> > > > > > > You will have to correlate the descriptors with each
> > > > > > >> other to
> > > > > > >> > > > determine
> > > > > > >> > > > > > > which keypoints in each rectangle corresponds to one
> > > > > > >> > another. You
> > > > > > >> > > > > > could use
> > > > > > >> > > > > > > a BBF tree which is implemented in the latest version
> > > of
> > > > > > >> > OpenCV, but
> > > > > > >> > > > > > unless
> > > > > > >> > > > > > > your rectangle is huge, you might just as well just
> > > > > > >> correlate
> > > > > > >> > > > them the
> > > > > > >> > > > > > > standard way, which I do like this:
> > > > > > >> > > > > > >
> > > > > > >> > > > > > > #define CORRELATION_THRESHOLD 0.7
> > > > > > >> > > > > > > // brute-force attempt at correlating the two sets of
> > > > > > >> features
> > > > > > >> > > > > > > void bruteMatch(CvMat **points1, CvMat **points2,
> > > CvSeq
> > > > > > >> > *kp1, CvSeq
> > > > > > >> > > > > > *desc1,
> > > > > > >> > > > > > > CvSeq *kp2, CvSeq *desc2) {
> > > > > > >> > > > > > > int i,j,k;
> > > > > > >> > > > > > > double* avg1 =
> > > (double*)malloc(sizeof(double)*kp1->total);
> > > > > > >> > > > > > > double* avg2 =
> > > (double*)malloc(sizeof(double)*kp2->total);
> > > > > > >> > > > > > > double* dev1 =
> > > (double*)malloc(sizeof(double)*kp1->total);
> > > > > > >> > > > > > > double* dev2 =
> > > (double*)malloc(sizeof(double)*kp2->total);
> > > > > > >> > > > > > > int* best1 = (int*)malloc(sizeof(int)*kp1->total);
> > > > > > >> > > > > > > int* best2 = (int*)malloc(sizeof(int)*kp2->total);
> > > > > > >> > > > > > > double* best1corr =
> > > (double*)malloc(sizeof(double)*kp1-
> > > > > > >> >total);
> > > > > > >> > > > > > > double* best2corr =
> > > (double*)malloc(sizeof(double)*kp2-
> > > > > > >> >total);
> > > > > > >> > > > > > > float *seq1, *seq2;
> > > > > > >> > > > > > > int descriptor_size = EXTENDED_DESCRIPTOR ? 128 : 64;
> > > > > > >> > > > > > > for (i=0; i<kp1->total; i++) {
> > > > > > >> > > > > > > // find average and standard deviation of each
> > > descriptor
> > > > > > >> > > > > > > avg1[i] = 0;
> > > > > > >> > > > > > > dev1[i] = 0;
> > > > > > >> > > > > > > seq1 = (float*)cvGetSeqElem(desc1, i);
> > > > > > >> > > > > > > for (k=0; k<descriptor_size; k++) avg1[i] += seq1[k];
> > > > > > >> > > > > > > avg1[i] /= descriptor_size;
> > > > > > >> > > > > > > for (k=0; k<descriptor_size; k++) dev1[i] +=
> > > > > > >> > > > > > > (seq1[k]-avg1[i])*(seq1[k]-avg1[i]);
> > > > > > >> > > > > > > dev1[i] = sqrt(dev1[i]/descriptor_size);
> > > > > > >> > > > > > >
> > > > > > >> > > > > > > // initialize best1 and best1corr
> > > > > > >> > > > > > > best1[i] = -1;
> > > > > > >> > > > > > > best1corr[i] = -1.;
> > > > > > >> > > > > > > }
> > > > > > >> > > > > > > for (j=0; j<kp2->total; j++) {
> > > > > > >> > > > > > > // find average and standard deviation of each
> > > descriptor
> > > > > > >> > > > > > > avg2[j] = 0;
> > > > > > >> > > > > > > dev2[j] = 0;
> > > > > > >> > > > > > > seq2 = (float*)cvGetSeqElem(desc2, j);
> > > > > > >> > > > > > > for (k=0; k<descriptor_size; k++) avg2[j] += seq2[k];
> > > > > > >> > > > > > > avg2[j] /= descriptor_size;
> > > > > > >> > > > > > > for (k=0; k<descriptor_size; k++) dev2[j] +=
> > > > > > >> > > > > > > (seq2[k]-avg2[j])*(seq2[k]-avg2[j]);
> > > > > > >> > > > > > > dev2[j] = sqrt(dev2[j]/descriptor_size);
> > > > > > >> > > > > > >
> > > > > > >> > > > > > > // initialize best2 and best2corr
> > > > > > >> > > > > > > best2[j] = -1;
> > > > > > >> > > > > > > best2corr[j] = -1.;
> > > > > > >> > > > > > > }
> > > > > > >> > > > > > > double corr;
> > > > > > >> > > > > > > for (i = 0; i < kp1->total; ++i) {
> > > > > > >> > > > > > > seq1 = (float*)cvGetSeqElem(desc1, i);
> > > > > > >> > > > > > > for (j = 0; j < kp2->total; ++j) {
> > > > > > >> > > > > > > corr = 0;
> > > > > > >> > > > > > > seq2 = (float*)cvGetSeqElem(desc2, j);
> > > > > > >> > > > > > > for (k = 0; k < descriptor_size; ++k)
> > > > > > >> > > > > > > corr += (seq1[k]-avg1[i])*(seq2[k]-avg2[j]);
> > > > > > >> > > > > > > corr /= (descriptor_size-1)*dev1[i]*dev2[j];
> > > > > > >> > > > > > > if (corr > best1corr[i]) {
> > > > > > >> > > > > > > best1corr[i] = corr;
> > > > > > >> > > > > > > best1[i] = j;
> > > > > > >> > > > > > > }
> > > > > > >> > > > > > > if (corr > best2corr[j]) {
> > > > > > >> > > > > > > best2corr[j] = corr;
> > > > > > >> > > > > > > best2[j] = i;
> > > > > > >> > > > > > > }
> > > > > > >> > > > > > > }
> > > > > > >> > > > > > > }
> > > > > > >> > > > > > > j = 0;
> > > > > > >> > > > > > > for (i = 0; i < kp1->total; i++)
> > > > > > >> > > > > > > if (best2[best1[i]] == i && best1corr[i] >
> > > > > > >> > > > > > CORRELATION_THRESHOLD)
> > > > > > >> > > > > > > j++;
> > > > > > >> > > > > > > if (j == 0) return; // no matches found
> > > > > > >> > > > > > > *points1 = cvCreateMat(1,j,CV_32FC2);
> > > > > > >> > > > > > > *points2 = cvCreateMat(1,j,CV_32FC2);
> > > > > > >> > > > > > > CvPoint2D32f *p1, *p2;
> > > > > > >> > > > > > > j = 0;
> > > > > > >> > > > > > > for (i = 0; i < kp1->total; i++) {
> > > > > > >> > > > > > > if (best2[best1[i]] == i && best1corr[i] >
> > > > > > >> > > > > > CORRELATION_THRESHOLD) {
> > > > > > >> > > > > > > p1 = &((CvSURFPoint*)cvGetSeqElem(kp1,i))->pt;
> > > > > > >> > > > > > > p2 = &((CvSURFPoint*)cvGetSeqElem(kp2,best1[i]))->pt;
> > > > > > >> > > > > > > (*points1)->data.fl[j*2] = p1->x;
> > > > > > >> > > > > > > (*points1)->data.fl[j*2+1] = p1->y;
> > > > > > >> > > > > > > (*points2)->data.fl[j*2] = p2->x;
> > > > > > >> > > > > > > (*points2)->data.fl[j*2+1] = p2->y;
> > > > > > >> > > > > > > j++;
> > > > > > >> > > > > > > }
> > > > > > >> > > > > > > }
> > > > > > >> > > > > > > free(best2corr);
> > > > > > >> > > > > > > free(best1corr);
> > > > > > >> > > > > > > free(best2);
> > > > > > >> > > > > > > free(best1);
> > > > > > >> > > > > > > free(avg1);
> > > > > > >> > > > > > > free(avg2);
> > > > > > >> > > > > > > free(dev1);
> > > > > > >> > > > > > > free(dev2);
> > > > > > >> > > > > > > }
> > > > > > >> > > > > > >
> > > > > > >> > > > > > > If you construct a fundamental matrix (a model) for
> > > the
> > > > > > >> > > > transformation
> > > > > > >> > > > > > > between the two rectangles, you can further determine
> > > > > > >> which
> > > > > > >> > > > > > correspondences
> > > > > > >> > > > > > > are false (by how well they fit the model) and remove
> > > > > > >> them,
> > > > > > >> > which I
> > > > > > >> > > > > > like to
> > > > > > >> > > > > > > do like this:
> > > > > > >> > > > > > >
> > > > > > >> > > > > > > F = cvCreateMat(3,3,CV_32FC1);
> > > > > > >> > > > > > > CvMat *status = cvCreateMat(1,points1->cols,CV_8UC1);
> > > > > > >> > > > > > > int fm_count = cvFindFundamentalMat(
> > > points1,points2,F,
> > > > > > >> > > > > > > CV_FM_RANSAC,1.,0.99,status );
> > > > > > >> > > > > > > removeOutliers(&points1,&points2,status);
> > > > > > >> > > > > > >
> > > > > > >> > > > > > > where removeOutliers() is a function I wrote to clean
> > > up
> > > > > > >> after
> > > > > > >> > > > > > > cvFindFundamentalMat():
> > > > > > >> > > > > > >
> > > > > > >> > > > > > > // iterates the set of putative correspondences and
> > > > > > >> removes
> > > > > > >> > > > > > correspondences
> > > > > > >> > > > > > > marked as outliers by cvFindFundamentalMat()
> > > > > > >> > > > > > > void removeOutliers(CvMat **points1, CvMat **points2,
> > > > > > >> CvMat
> > > > > > >> > > > *status) {
> > > > > > >> > > > > > > CvMat *points1_ = *points1;
> > > > > > >> > > > > > > CvMat *points2_ = *points2;
> > > > > > >> > > > > > > int count = 0;
> > > > > > >> > > > > > > for (int i = 0; i < status->cols; i++) if
> > > > > > >> > > > > > (CV_MAT_ELEM(*status,unsigned
> > > > > > >> > > > > > > char,0,i)) count++;
> > > > > > >> > > > > > > if (!count) { // no inliers
> > > > > > >> > > > > > > *points1 = NULL;
> > > > > > >> > > > > > > *points2 = NULL;
> > > > > > >> > > > > > > }
> > > > > > >> > > > > > > else {
> > > > > > >> > > > > > > *points1 = cvCreateMat(1,count,CV_32FC2);
> > > > > > >> > > > > > > *points2 = cvCreateMat(1,count,CV_32FC2);
> > > > > > >> > > > > > > int j = 0;
> > > > > > >> > > > > > > for (int i = 0; i < status->cols; i++) {
> > > > > > >> > > > > > > if (CV_MAT_ELEM(*status,unsigned char,0,i)) {
> > > > > > >> > > > > > > (*points1)->data.fl[j*2] = points1_->data.fl[i*2];
> > > > > > >> > > > > > > //p1->x
> > > > > > >> > > > > > > (*points1)->data.fl[j*2+1] = points1_->data.fl[i*2+1];
> > > > > > >> > > > > > > //p1->y
> > > > > > >> > > > > > > (*points2)->data.fl[j*2] = points2_->data.fl[i*2];
> > > > > > >> > > > > > > //p2->x
> > > > > > >> > > > > > > (*points2)->data.fl[j*2+1] = points2_->data.fl[i*2+1];
> > > > > > >> > > > > > > //p2->y
> > > > > > >> > > > > > > j++;
> > > > > > >> > > > > > > }
> > > > > > >> > > > > > > }
> > > > > > >> > > > > > > }
> > > > > > >> > > > > > > cvReleaseMat(&points1_);
> > > > > > >> > > > > > > cvReleaseMat(&points2_);
> > > > > > >> > > > > > > }
> > > > > > >> > > > > > >
> > > > > > >> > > > > > >
> > > > > > >> > > > > > > I hope this helps.
> > > > > > >> > > > > > >
> > > > > > >> > > > > > > -Jostein
> > > > > > >> > > > > > >
> > > > > > >> > > > > > >
> > > > > > >> > > > > > > 2009/1/8 yair_movshovitz <yairmov@>
> > > > > > >> > > > > > >
> > > > > > >> > > > > > > > Hi Everyone,
> > > > > > >> > > > > > > >
> > > > > > >> > > > > > > > I'm trying to understand how to use the SURF
> > > features
> > > > > > >> > > > capabilities of
> > > > > > >> > > > > > > > openCV.
> > > > > > >> > > > > > > > My scenario is as follows:
> > > > > > >> > > > > > > > I have two rectangled areas in an image, which are
> > > > > > >> supposed to
> > > > > > >> > > > bound
> > > > > > >> > > > > > > > the same object. I would like to see how good is
> > > this
> > > > > > >> > > > assumption. In
> > > > > > >> > > > > > > > other words I would like to see how many features
> > > they
> > > > > > >> share.
> > > > > > >> > > > > > > >
> > > > > > >> > > > > > > > Can someone drop me a hint on how to use the SURF
> > > > > > >> > > > implementation of
> > > > > > >> > > > > > > > openCV (or direct me to somewhere that has some
> > > > > > >> documentation
> > > > > > >> > > > of it)
> > > > > > >> > > > > > > >
> > > > > > >> > > > > > > > Thanks,
> > > > > > >> > > > > > > > Yair
> > > > > > >> > > > > > > >
> > > > > > >> > > > > > > >
> > > > > > >> > > > > > > >
> > > > > > >> > > > > > >
> > > > > > >> > > > > > >
> > > > > > >> > > > > > > [Non-text portions of this message have been removed]
> > > > > > >> > > > > > >
> > > > > > >> > > > > >
> > > > > > >> > > > > >
> > > > > > >> > > > > >
> > > > > > >> > > > >
> > > > > > >> > > > >
> > > > > > >> > > > > [Non-text portions of this message have been removed]
> > > > > > >> > > > >
> > > > > > >> > > >
> > > > > > >> > > >
> > > > > > >> > > >
> > > > > > >> > >
> > > > > > >> > >
> > > > > > >> > > [Non-text portions of this message have been removed]
> > > > > > >> > >
> > > > > > >> >
> > > > > > >>
> > > > > > >>
> > > > > > >>
> > > > > > >
> > > > > > >
> > > > > >
> > > > > >
> > > > > > [Non-text portions of this message have been removed]
> > > > > >
> > > > >
> > > >
> > >
> > >  
> > >
> >
> >
> > [Non-text portions of this message have been removed]
> >
>


Reply | Threaded
Open this post in threaded view
|

Re: cvExtractSURF

osid
Hi Martin,

thanks for the info. Looks good.
But, I came across another problem in the same loop. Under some circumstances
x/y co-ordinates in this loop become negative and create a segmentation violation
(= a crash). I could verify this also in the debugger. My solution for this was
to insert a fix:

for( kk = 0, nangle = 0; kk < nangle0; kk++ )
  535         {
  536             const int* ptr;
  537             float vx, vy;
    //
   // ===>  x/y can become < 0 and crash the function
  //                fix:  
                      (if x < 0 || y < 0) continue;
  //
  538             x = cvRound( center.x + apt[kk].x*s - (float)(grad_wav_size-1)/2 );
  539             y = cvRound( center.y + apt[kk].y*s - (float)(grad_wav_size-1)/2 );
  540             if( (unsigned)y >= (unsigned)sum->rows - grad_wav_size ||
  541                 (unsigned)x >= (unsigned)sum->cols - grad_wav_size )
  542                 continue;
  543             ptr = sum_ptr + x + y*sum_cols;
  544             vx = icvCalcHaarPattern( ptr, dx_t, 2 );
  545             vy = icvCalcHaarPattern( ptr, dy_t, 2 );
  546             X[nangle] = vx*apt_w[kk]; Y[nangle] = vy*apt_w[kk];
  547             nangle++;
  548         }


Your patch + the new fix now work in my video sequence, finally.
I will provide liuliu with the offending image so that he can fix
those two problems in the best possible way (he is the original author of the function).

Best wishes

Oliver





--- In [hidden email], "smaddimaddin" <martinbaeuml@...> wrote:

>
> Hi Oliver,
>
> FYI I already filed a bug report for that a couple of days ago.  You'll also find a patch attached to that bug report which removes the interest points for which no angles could be sampled.
>
> See https://sourceforge.net/tracker2/?func=detail&aid=2676785&group_id=22870&atid=376677 for the bug report and the patch...
>
> -Martin
>
>
> --- In [hidden email], "oliver_sidla" <oliver_sidla@> wrote:
> >
> > Hi All,
> >
> > I fear the cvExtractSurf Odyssey is not yet over...
> >
> > after using the current  version of cvExtractSURF, I think I have found another problem which is not yet fixed. I could verify this with the debugger:
> >
> > Under some circumstances the descriptor function does not get any gradient pixels (at the border of the image)  and some vars become empty and the PolarToCart function throws an exception:
> >
> > /***** this is about in Line 554 in cvsurf.cpp ***/
> >
> >   /*   ... some code */
> >  /* To find the dominant orientation, the gradients in x and y are
> >            sampled in a circle of radius 6s using wavelets of size 4s.
> >            We ensure the gradient wavelet size is even to ensure the
> >            wavelet pattern is balanced and symmetric around its center */
> >         /** some code **/
> >         for( kk = 0, nangle = 0; kk < nangle0; kk++ )
> >         {
> >             const int* ptr;
> >             float vx, vy;
> >             x = cvRound( center.x + apt[kk].x*s - (float)(grad_wav_size-1)/2 );
> >             y = cvRound( center.y + apt[kk].y*s - (float)(grad_wav_size-1)/2 );
> >
> >             if( (unsigned)y >= (unsigned)sum->rows - grad_wav_size ||
> >                 (unsigned)x >= (unsigned)sum->cols - grad_wav_size )
> >                 continue;
> >
> >
> >             /* .... some code */
> >
> >
> >             // ** THESE are NEVER assigned because if(...) above always triggers!
> >             X[nangle] = vx*apt_w[kk]; Y[nangle] = vy*apt_w[kk];
> >             nangle++;
> >         }
> >        
> >         // EMPTY here ....
> >         _X.cols = _Y.cols = _angle.cols = nangle;
> >
> >       ====> exception HERE because nangle == 0 ! ===
> >         cvCartToPolar( &_X, &_Y, 0, &_angle, 1 );
> >
> >
> >
> > The temporary solution for me was to  check if nangle == 0 and then
> > simply continue - this leaves an unassigned descriptor, but at least it does not crash the prgram.
> >
> >
> > Could someone verify and patch this please...
> >
> > Thanks,
> >
> >
> > Oliver
> >
> >
> >
> >
> > --
> >
> >  In [hidden email], Raluca Borca <raluca.borca@> wrote:
> > >
> > > I build the current version from SVN and the problem was solved. Thanks.
> > >
> > > On Fri, Mar 13, 2009 at 10:00 PM, liuliu_0503
> > > <liuliu.1987+opencv@<liuliu.1987%2Bopencv@>
> > > > wrote:
> > >
> > > >   i believe this change was made later than rev1520, checkout the newest
> > > > rev1642 or any revision no earlier than rev1550.
> > > >
> > > > --- In [hidden email] <OpenCV%40yahoogroups.com>, "oliver_sidla"
> > > > <oliver_sidla@> wrote:
> > > > >
> > > > > Hello all,
> > > > >
> > > > > I am also having the same problem - with the latest version
> > > > > of cvSurf (its version 1520).
> > > > > When Did Ian Mahon do the modification?
> > > > >
> > > > > Thanks,
> > > > >
> > > > > Oliver
> > > > >
> > > > >
> > > > > --- In [hidden email] <OpenCV%40yahoogroups.com>, "liuliu_0503"
> > > > <liuliu.1987+opencv@> wrote:
> > > > > >
> > > > > > I think that the modifications made by Ian Mahon should fixed the
> > > > problem already. Check out the svn version of opencv and try again.
> > > > > >
> > > > > > --- In [hidden email] <OpenCV%40yahoogroups.com>, Raluca Borca
> > > > <raluca.borca@> wrote:
> > > > > > >
> > > > > > > To be more precise, I encountered the problem at the folowing line in
> > > > > > > cvsurf.cpp
> > > > > > >
> > > > > > > /* hessian detector */
> > > > > > > for( octave = k = 0; octave < params->nOctaves; octave++ )
> > > > > > > {
> > > > > > > for( sc = -1; sc <= params->nOctaveLayers; sc++, k++ )
> > > > > > > {
> > > > > > > if ( sc < 0 )
> > > > > > > sizeCache[k] = size = 7 << octave; // gaussian scale 1.0;
> > > > > > > else
> > > > > > > sizeCache[k] = size = (sc*6 + 9) << octave; // gaussian
> > > > > > > scale size*1.2/9.;
> > > > > > > scaleCache[k] = scale = MAX(size, SIZE0);
> > > > > > >
> > > > > > > hessian_rows = (sum->rows)*SIZE0/scale;
> > > > > > > hessian_cols = (sum->cols)*SIZE0/scale;
> > > > > > > hessians[k] = cvCreateMat( hessian_rows, hessian_cols, CV_32FC1
> > > > > > > );
> > > > > > > traces[k] = cvCreateMat( hessian_rows, hessian_cols, CV_32FC1
> > > > > > > ); // this is the point where I get an out of memory exception
> > > > > > >
> > > > > > > ......
> > > > > > >
> > > > > > > } } }1
> > > > > > >
> > > > > > > On Thu, Mar 12, 2009 at 12:26 PM, Raluca Borca <raluca.borca@>wrote:
> > > > > > >
> > > > > > > > Hello !
> > > > > > > >
> > > > > > > > I have encountered the same problem.
> > > > > > > >
> > > > > > > > Can anybody tell what is the solution ?
> > > > > > > >
> > > > > > > > Thanks.
> > > > > > > >
> > > > > > > >
> > > > > > > > On Sun, Mar 1, 2009 at 8:21 AM, Ricardo. <dadagori@> wrote:
> > > > > > > >
> > > > > > > >> Hello!
> > > > > > > >>
> > > > > > > >> I wonder if you found what was causing this error? 'Cause it's
> > > > > > > >> happening to me too and cannot figure it out.
> > > > > > > >>
> > > > > > > >> I'd appreciate it if you share how you solved it -if you did of
> > > > > > > >> course-.
> > > > > > > >>
> > > > > > > >> Regards,
> > > > > > > >> Ricardo
> > > > > > > >>
> > > > > > > >>
> > > > > > > >> --- In [hidden email] <OpenCV%40yahoogroups.com> <OpenCV%
> > > > 40yahoogroups.com>,
> > > > > > > >> "yair_movshovitz" <yairmov@> wrote:
> > > > > > > >> >
> > > > > > > >> > I did some investigating and found out the the error is
> > > > happening
> > > > > > > >> > inside the icvFastHessianDetector() function in cvsurf.cpp
> > > > > > > >> > It happens when the function tries to free the memory it
> > > > allocated:
> > > > > > > >> >
> > > > > > > >> > for( octave = k = 0; octave < params->nOctaves; octave++ )
> > > > > > > >> > for( sc = -1; sc <= params->nOctaveLayers; sc++, k++ )
> > > > > > > >> > {
> > > > > > > >> > //this line causes the error at some iteration.
> > > > > > > >> > cvReleaseMat( &hessians[k] );
> > > > > > > >> > cvReleaseMat( &traces[k] );
> > > > > > > >> > }
> > > > > > > >> >
> > > > > > > >> >
> > > > > > > >> > Anyone has an idea why this is happening?
> > > > > > > >> >
> > > > > > > >> > Thanks,
> > > > > > > >> > Yair
> > > > > > > >> >
> > > > > > > >> > --- In [hidden email] <OpenCV%40yahoogroups.com><OpenCV%
> > > > 40yahoogroups.com>, "Jostein
> > > > > > > >> Austvik Jacobsen"
> > > > > > > >> > <josteinaj@> wrote:
> > > > > > > >> > >
> > > > > > > >> > > I'm using Ubuntu Linux so I can't help you there. Sorry.
> > > > > > > >> > > Jostein
> > > > > > > >> > >
> > > > > > > >> > > 2009/1/20 yair_movshovitz <yairmov@>
> > > > > > > >> > >
> > > > > > > >> > > > Hi Jostein,
> > > > > > > >> > > >
> > > > > > > >> > > > Thanks again for helping me out.
> > > > > > > >> > > >
> > > > > > > >> > > > I have started using the cvExtractSURF function. and I have
> > > > the
> > > > > > > >> > > > following problem:
> > > > > > > >> > > > When I call the function I get this error -
> > > > > > > >> > > > Windows has triggered a breakpoint in (my program name).
> > > > > > > >> > > >
> > > > > > > >> > > > This may be due to a corruption of the heap, which indicates
> > > > a
> > > > > > > >> bug in
> > > > > > > >> > > > (my program name) or any of the DLLs it has loaded.
> > > > > > > >> > > >
> > > > > > > >> > > > Have you ever encountered this error before regarding this
> > > > > > > >> function?
> > > > > > > >> > > >
> > > > > > > >> > > > Thanks,
> > > > > > > >> > > >
> > > > > > > >> > > > Yair
> > > > > > > >> > > >
> > > > > > > >> > > > --- In [hidden email] <OpenCV%40yahoogroups.com><OpenCV%
> > > > 40yahoogroups.com> <OpenCV%
> > > > > > > >> 40yahoogroups.com>, "Jostein
> > > > > > > >> > Austvik
> > > > > > > >> > > > Jacobsen"
> > > > > > > >> > > > <josteinaj@> wrote:
> > > > > > > >> > > > >
> > > > > > > >> > > > > You can view the implementation of *cvExtractSURF(...)*
> > > > here:
> > > > > > > >> > > > >
> > > > > > > >> > > >
> > > > > > > >> > > >
> > > > > > > >> >
> > > > > > > >>
> > > > http://opencvlibrary.svn.sourceforge.net/viewvc/opencvlibrary/trunk/op
> > > > > > > >> encv/src/cv/cvsurf.cpp,
> > > > > > > >> > > > > however it doesn't contain much comments.
> > > > > > > >> > > > >
> > > > > > > >> > > > >
> > > > > > > >> > > > > *cvExtractSURF( const CvArr* img, const CvArr* mask,
> > > > CvSeq**
> > > > > > > >> > keypoints,
> > > > > > > >> > > > > CvSeq** descriptors, CvMemStorage* storage, CvSURFParams
> > > > > > > >> params )*
> > > > > > > >> > > > >
> > > > > > > >> > > > > Here, *img* is the image. Use an
> > > > > > > >> > > > > *IplImage<
> > > > http://opencv.willowgarage.com/wiki/CxCore#IplImage>
> > > > > > > >> > > > > * for the image. To load an image from disk, use
> > > > > > > >> > > > >
> > > > > > > >> > > >
> > > > > > > >> > *cvLoadImage(...)
> > > > > > > >> *<http://opencv.willowgarage.com/wiki/HighGui#cvLoadImage
> > > > > > > >> > > > >,
> > > > > > > >> > > > > and to create your own image, use
> > > > > > > >> > > > >
> > > > > > > >> > > > *cvCreateImage(...)*<
> > > > > > > >> > > > http://opencv.willowgarage.com/wiki/CxCore#CreateImage>.
> > > > > > > >> > > > > Lets say you have a IplImage *image* and want to extract
> > > > the
> > > > > > > >> > rectangle
> > > > > > > >> > > > > (x,y)->(x+dx,y+dy) from it as an IplImage rectangle, you
> > > > might
> > > > > > > >> > do this:
> > > > > > > >> > > > >
> > > > > > > >> > > > > CvSize size = cvSize(dx,dy);
> > > > > > > >> > > > > IplImage* rectangle = cvCreateImage(size, IPL_DEPTH_8U,
> > > > 1);
> > > > > > > >> > > > > for (int i = 0; i < dx; ++i) {
> > > > > > > >> > > > > for (int j = 0; j < dy; ++j) {
> > > > > > > >> > > > > CV_IMAGE_ELEM(rectangle,unsigned char,i,j) =
> > > > > > > >> > > > > CV_IMAGE_ELEM(image,unsigned char,x+i,y+j);
> > > > > > > >> > > > > }
> > > > > > > >> > > > > }
> > > > > > > >> > > > >
> > > > > > > >> > > > > I'm not sure how *mask* is used, but a quick google search
> > > > > > > >> gives
> > > > > > > >> > > > >
> > > > > > > >> > > >
> > > > > > > >> > > >
> > > > > > > >> >
> > > > http://www.emgu.com/wiki/files/1.4.0.0/html/ad54862f-3d1c-c177-3bdb-
> > > > > > > >> 768619f8dd90.htmwhich
> > > > > > > >> > > > > says "The optional input 8-bit mask. The features are only
> > > > > > > >> found in
> > > > > > > >> > > > > the areas that contain more than 50% of non-zero mask
> > > > > > > >> pixels". Just
> > > > > > > >> > > > set it
> > > > > > > >> > > > > to NULL.
> > > > > > > >> > > > >
> > > > > > > >> > > > > *keypoints* and
> > > > > > > >> > > > >
> > > > > > > >> > > >
> > > > > > > >> > *descriptors*<<a href="http://en.wikipedia.org/wiki/Feature_%">http://en.wikipedia.org/wiki/Feature_%
> > > > > > > >> 28computer_vision%29
> > > > > > > >> > > > >are
> > > > > > > >> > > > > where the results are placed. Initialize them as null-
> > > > > > > >> pointers and
> > > > > > > >> > > > > cvExtractSURF will do the rest for you. Afterwards you can
> > > > > > > >> access a
> > > > > > > >> > > > > descriptor and corresponding keypoint like this:
> > > > > > > >> > > > >
> > > > > > > >> > > > > int k = 0; // the keypoint you want. There are *keypoints-
> > > > > > > >> >total*
> > > > > > > >> > > > keypoints.
> > > > > > > >> > > > > float *seq = (float*)cvGetSeqElem(descriptors, k); // the
> > > > > > > >> > descriptor of
> > > > > > > >> > > > > length 64 or 128
> > > > > > > >> > > > > CvPoint2D32f *p = &((CvSURFPoint*)cvGetSeqElem(keypoints,
> > > > k))-
> > > > > > > >> >pt;
> > > > > > > >> > > > // the
> > > > > > > >> > > > > (x,y) coordinates of keypoint *k* can now be accessed as
> > > > *p-
> > > > > > > >> >x* and
> > > > > > > >> > > > *p->y*
> > > > > > > >> > > > >
> > > > > > > >> > > > > The *CvMemStorage*
> > > > > > > >> > > > >
> > > > > > > >> <http://opencv.willowgarage.com/wiki/CxCore#CvMemStorage>struct
> > > > > > > >> > > > > *storage* is used as a mechanism to simplify memory
> > > > > > > >> management. I
> > > > > > > >> > > > believe
> > > > > > > >> > > > > the *keypoints* and *descriptors* structures are put into
> > > > > > > >> *storage*,
> > > > > > > >> > > > so you
> > > > > > > >> > > > > can't release *storage* until you're done using
> > > > *keypoints*
> > > > > > > >> and
> > > > > > > >> > > > *descriptors
> > > > > > > >> > > > > *.Put a *CvMemStorage *storage = cvCreateMemStorage(0);*
> > > > > > > >> before your
> > > > > > > >> > > > first
> > > > > > > >> > > > > call to cvExtractSURF and *cvClearMemStorage(storage);*
> > > > after
> > > > > > > >> you're
> > > > > > > >> > > > done
> > > > > > > >> > > > > using *keypoints* and *descriptors*.
> > > > > > > >> > > > >
> > > > > > > >> > > > > SURF takes a couple of parameters through the
> > > > *CvSURFParams*
> > > > > > > >> struct
> > > > > > > >> > > > *params*.
> > > > > > > >> > > > > You create *params* with *cvSURFParams(double threshold,
> > > > int
> > > > > > > >> > > > > extended)*where threshold represents the "edgyness" that
> > > > is
> > > > > > > >> required
> > > > > > > >> > > > > from a feature to
> > > > > > > >> > > > > be recognized as a feature. It can be adjusted to retrieve
> > > > > > > >> more
> > > > > > > >> > or fewer
> > > > > > > >> > > > > features. In the paper
> > > > > > > >> > > > > <http://www.vision.ee.ethz.ch/%7Esurf/eccv06.pdf
> > > > >describing
> > > > > > > >> the SURF
> > > > > > > >> > > > > detector, they use a threshold of 600 on a 800 x 640
> > > > > > > >> > > > > image which returned 1418 features. The *extended*
> > > > parameter
> > > > > > > >> is
> > > > > > > >> > a simple
> > > > > > > >> > > > > boolean 1 or 0 which states whether or not to use the
> > > > extended
> > > > > > > >> > > > descriptor.
> > > > > > > >> > > > > The extended descriptor consists of 128 instead of 64
> > > > values
> > > > > > > >> which
> > > > > > > >> > > > should
> > > > > > > >> > > > > gives a better result at the cost of using more memory.
> > > > > > > >> Instead of
> > > > > > > >> > > > creating
> > > > > > > >> > > > > a new CvSURFParams struct for each call to cvExtractSURF,
> > > > you
> > > > > > > >> > could do:
> > > > > > > >> > > > >
> > > > > > > >> > > > > CvSURFParams params = cvSURFParams(600, 1);
> > > > > > > >> > > > > cvExtractSURF(..., params);
> > > > > > > >> > > > > cvExtractSURF(..., params);
> > > > > > > >> > > > >
> > > > > > > >> > > > >
> > > > > > > >> > > > > There you go. I hope I answered your question :)
> > > > > > > >> > > > >
> > > > > > > >> > > > > Jostein
> > > > > > > >> > > > >
> > > > > > > >> > > > >
> > > > > > > >> > > > > 2009/1/12 yair_movshovitz <yairmov@>
> > > > > > > >> > > > >
> > > > > > > >> > > > > > Hi Jostein,
> > > > > > > >> > > > > >
> > > > > > > >> > > > > > Thanks a lot for your help!
> > > > > > > >> > > > > >
> > > > > > > >> > > > > > Can you please explain the function parameters of
> > > > > > > >> cvExtractSURF?
> > > > > > > >> > > > > > I mean in - cvExtractSURF(img1, NULL, &kp1, &desc1,
> > > > storage,
> > > > > > > >> > > > > > cvSURFParams(600,EXTENDED_DESCRIPTOR));
> > > > > > > >> > > > > > what is the role of kp1, desc1, storage and the
> > > > SURFParams?
> > > > > > > >> > > > > > is storage just a temp area for the algorithm to use?
> > > > > > > >> > > > > >
> > > > > > > >> > > > > > Thanks again
> > > > > > > >> > > > > > Yair
> > > > > > > >> > > > > >
> > > > > > > >> > > > > > --- In [hidden email]<OpenCV%40yahoogroups.com><OpenCV%
> > > > 40yahoogroups.com><OpenCV%
> > > > > > > >> 40yahoogroups.com>
> > > > > > > >> <OpenCV%
> > > > > > > >> > > > 40yahoogroups.com>, "Jostein
> > > > > > > >> > > >
> > > > > > > >> > > > Austvik
> > > > > > > >> > > > > > Jacobsen"
> > > > > > > >> > > > > >
> > > > > > > >> > > > > > <josteinaj@> wrote:
> > > > > > > >> > > > > > >
> > > > > > > >> > > > > > > If you've got your two rectangled areas stored as img1
> > > > and
> > > > > > > >> > img2 you
> > > > > > > >> > > > > > could do
> > > > > > > >> > > > > > > this to extract its keypoints and corresponding
> > > > > > > >> descriptors:
> > > > > > > >> > > > > > >
> > > > > > > >> > > > > > > #define EXTENDED_DESCRIPTOR 1
> > > > > > > >> > > > > > > CvSeq *kp1=NULL, *kp2=NULL;
> > > > > > > >> > > > > > > CvSeq *desc1=NULL, *desc2=NULL;
> > > > > > > >> > > > > > > CvMemStorage *storage = cvCreateMemStorage(0);
> > > > > > > >> > > > > > > cvExtractSURF(img1, NULL, &kp1, &desc1, storage,
> > > > > > > >> > cvSURFParams(600,
> > > > > > > >> > > > > > > EXTENDED_DESCRIPTOR));
> > > > > > > >> > > > > > > cvExtractSURF(img2, NULL, &kp2, &desc2, storage,
> > > > > > > >> > cvSURFParams(600,
> > > > > > > >> > > > > > > EXTENDED_DESCRIPTOR));
> > > > > > > >> > > > > > >
> > > > > > > >> > > > > > > You will have to correlate the descriptors with each
> > > > > > > >> other to
> > > > > > > >> > > > determine
> > > > > > > >> > > > > > > which keypoints in each rectangle corresponds to one
> > > > > > > >> > another. You
> > > > > > > >> > > > > > could use
> > > > > > > >> > > > > > > a BBF tree which is implemented in the latest version
> > > > of
> > > > > > > >> > OpenCV, but
> > > > > > > >> > > > > > unless
> > > > > > > >> > > > > > > your rectangle is huge, you might just as well just
> > > > > > > >> correlate
> > > > > > > >> > > > them the
> > > > > > > >> > > > > > > standard way, which I do like this:
> > > > > > > >> > > > > > >
> > > > > > > >> > > > > > > #define CORRELATION_THRESHOLD 0.7
> > > > > > > >> > > > > > > // brute-force attempt at correlating the two sets of
> > > > > > > >> features
> > > > > > > >> > > > > > > void bruteMatch(CvMat **points1, CvMat **points2,
> > > > CvSeq
> > > > > > > >> > *kp1, CvSeq
> > > > > > > >> > > > > > *desc1,
> > > > > > > >> > > > > > > CvSeq *kp2, CvSeq *desc2) {
> > > > > > > >> > > > > > > int i,j,k;
> > > > > > > >> > > > > > > double* avg1 =
> > > > (double*)malloc(sizeof(double)*kp1->total);
> > > > > > > >> > > > > > > double* avg2 =
> > > > (double*)malloc(sizeof(double)*kp2->total);
> > > > > > > >> > > > > > > double* dev1 =
> > > > (double*)malloc(sizeof(double)*kp1->total);
> > > > > > > >> > > > > > > double* dev2 =
> > > > (double*)malloc(sizeof(double)*kp2->total);
> > > > > > > >> > > > > > > int* best1 = (int*)malloc(sizeof(int)*kp1->total);
> > > > > > > >> > > > > > > int* best2 = (int*)malloc(sizeof(int)*kp2->total);
> > > > > > > >> > > > > > > double* best1corr =
> > > > (double*)malloc(sizeof(double)*kp1-
> > > > > > > >> >total);
> > > > > > > >> > > > > > > double* best2corr =
> > > > (double*)malloc(sizeof(double)*kp2-
> > > > > > > >> >total);
> > > > > > > >> > > > > > > float *seq1, *seq2;
> > > > > > > >> > > > > > > int descriptor_size = EXTENDED_DESCRIPTOR ? 128 : 64;
> > > > > > > >> > > > > > > for (i=0; i<kp1->total; i++) {
> > > > > > > >> > > > > > > // find average and standard deviation of each
> > > > descriptor
> > > > > > > >> > > > > > > avg1[i] = 0;
> > > > > > > >> > > > > > > dev1[i] = 0;
> > > > > > > >> > > > > > > seq1 = (float*)cvGetSeqElem(desc1, i);
> > > > > > > >> > > > > > > for (k=0; k<descriptor_size; k++) avg1[i] += seq1[k];
> > > > > > > >> > > > > > > avg1[i] /= descriptor_size;
> > > > > > > >> > > > > > > for (k=0; k<descriptor_size; k++) dev1[i] +=
> > > > > > > >> > > > > > > (seq1[k]-avg1[i])*(seq1[k]-avg1[i]);
> > > > > > > >> > > > > > > dev1[i] = sqrt(dev1[i]/descriptor_size);
> > > > > > > >> > > > > > >
> > > > > > > >> > > > > > > // initialize best1 and best1corr
> > > > > > > >> > > > > > > best1[i] = -1;
> > > > > > > >> > > > > > > best1corr[i] = -1.;
> > > > > > > >> > > > > > > }
> > > > > > > >> > > > > > > for (j=0; j<kp2->total; j++) {
> > > > > > > >> > > > > > > // find average and standard deviation of each
> > > > descriptor
> > > > > > > >> > > > > > > avg2[j] = 0;
> > > > > > > >> > > > > > > dev2[j] = 0;
> > > > > > > >> > > > > > > seq2 = (float*)cvGetSeqElem(desc2, j);
> > > > > > > >> > > > > > > for (k=0; k<descriptor_size; k++) avg2[j] += seq2[k];
> > > > > > > >> > > > > > > avg2[j] /= descriptor_size;
> > > > > > > >> > > > > > > for (k=0; k<descriptor_size; k++) dev2[j] +=
> > > > > > > >> > > > > > > (seq2[k]-avg2[j])*(seq2[k]-avg2[j]);
> > > > > > > >> > > > > > > dev2[j] = sqrt(dev2[j]/descriptor_size);
> > > > > > > >> > > > > > >
> > > > > > > >> > > > > > > // initialize best2 and best2corr
> > > > > > > >> > > > > > > best2[j] = -1;
> > > > > > > >> > > > > > > best2corr[j] = -1.;
> > > > > > > >> > > > > > > }
> > > > > > > >> > > > > > > double corr;
> > > > > > > >> > > > > > > for (i = 0; i < kp1->total; ++i) {
> > > > > > > >> > > > > > > seq1 = (float*)cvGetSeqElem(desc1, i);
> > > > > > > >> > > > > > > for (j = 0; j < kp2->total; ++j) {
> > > > > > > >> > > > > > > corr = 0;
> > > > > > > >> > > > > > > seq2 = (float*)cvGetSeqElem(desc2, j);
> > > > > > > >> > > > > > > for (k = 0; k < descriptor_size; ++k)
> > > > > > > >> > > > > > > corr += (seq1[k]-avg1[i])*(seq2[k]-avg2[j]);
> > > > > > > >> > > > > > > corr /= (descriptor_size-1)*dev1[i]*dev2[j];
> > > > > > > >> > > > > > > if (corr > best1corr[i]) {
> > > > > > > >> > > > > > > best1corr[i] = corr;
> > > > > > > >> > > > > > > best1[i] = j;
> > > > > > > >> > > > > > > }
> > > > > > > >> > > > > > > if (corr > best2corr[j]) {
> > > > > > > >> > > > > > > best2corr[j] = corr;
> > > > > > > >> > > > > > > best2[j] = i;
> > > > > > > >> > > > > > > }
> > > > > > > >> > > > > > > }
> > > > > > > >> > > > > > > }
> > > > > > > >> > > > > > > j = 0;
> > > > > > > >> > > > > > > for (i = 0; i < kp1->total; i++)
> > > > > > > >> > > > > > > if (best2[best1[i]] == i && best1corr[i] >
> > > > > > > >> > > > > > CORRELATION_THRESHOLD)
> > > > > > > >> > > > > > > j++;
> > > > > > > >> > > > > > > if (j == 0) return; // no matches found
> > > > > > > >> > > > > > > *points1 = cvCreateMat(1,j,CV_32FC2);
> > > > > > > >> > > > > > > *points2 = cvCreateMat(1,j,CV_32FC2);
> > > > > > > >> > > > > > > CvPoint2D32f *p1, *p2;
> > > > > > > >> > > > > > > j = 0;
> > > > > > > >> > > > > > > for (i = 0; i < kp1->total; i++) {
> > > > > > > >> > > > > > > if (best2[best1[i]] == i && best1corr[i] >
> > > > > > > >> > > > > > CORRELATION_THRESHOLD) {
> > > > > > > >> > > > > > > p1 = &((CvSURFPoint*)cvGetSeqElem(kp1,i))->pt;
> > > > > > > >> > > > > > > p2 = &((CvSURFPoint*)cvGetSeqElem(kp2,best1[i]))->pt;
> > > > > > > >> > > > > > > (*points1)->data.fl[j*2] = p1->x;
> > > > > > > >> > > > > > > (*points1)->data.fl[j*2+1] = p1->y;
> > > > > > > >> > > > > > > (*points2)->data.fl[j*2] = p2->x;
> > > > > > > >> > > > > > > (*points2)->data.fl[j*2+1] = p2->y;
> > > > > > > >> > > > > > > j++;
> > > > > > > >> > > > > > > }
> > > > > > > >> > > > > > > }
> > > > > > > >> > > > > > > free(best2corr);
> > > > > > > >> > > > > > > free(best1corr);
> > > > > > > >> > > > > > > free(best2);
> > > > > > > >> > > > > > > free(best1);
> > > > > > > >> > > > > > > free(avg1);
> > > > > > > >> > > > > > > free(avg2);
> > > > > > > >> > > > > > > free(dev1);
> > > > > > > >> > > > > > > free(dev2);
> > > > > > > >> > > > > > > }
> > > > > > > >> > > > > > >
> > > > > > > >> > > > > > > If you construct a fundamental matrix (a model) for
> > > > the
> > > > > > > >> > > > transformation
> > > > > > > >> > > > > > > between the two rectangles, you can further determine
> > > > > > > >> which
> > > > > > > >> > > > > > correspondences
> > > > > > > >> > > > > > > are false (by how well they fit the model) and remove
> > > > > > > >> them,
> > > > > > > >> > which I
> > > > > > > >> > > > > > like to
> > > > > > > >> > > > > > > do like this:
> > > > > > > >> > > > > > >
> > > > > > > >> > > > > > > F = cvCreateMat(3,3,CV_32FC1);
> > > > > > > >> > > > > > > CvMat *status = cvCreateMat(1,points1->cols,CV_8UC1);
> > > > > > > >> > > > > > > int fm_count = cvFindFundamentalMat(
> > > > points1,points2,F,
> > > > > > > >> > > > > > > CV_FM_RANSAC,1.,0.99,status );
> > > > > > > >> > > > > > > removeOutliers(&points1,&points2,status);
> > > > > > > >> > > > > > >
> > > > > > > >> > > > > > > where removeOutliers() is a function I wrote to clean
> > > > up
> > > > > > > >> after
> > > > > > > >> > > > > > > cvFindFundamentalMat():
> > > > > > > >> > > > > > >
> > > > > > > >> > > > > > > // iterates the set of putative correspondences and
> > > > > > > >> removes
> > > > > > > >> > > > > > correspondences
> > > > > > > >> > > > > > > marked as outliers by cvFindFundamentalMat()
> > > > > > > >> > > > > > > void removeOutliers(CvMat **points1, CvMat **points2,
> > > > > > > >> CvMat
> > > > > > > >> > > > *status) {
> > > > > > > >> > > > > > > CvMat *points1_ = *points1;
> > > > > > > >> > > > > > > CvMat *points2_ = *points2;
> > > > > > > >> > > > > > > int count = 0;
> > > > > > > >> > > > > > > for (int i = 0; i < status->cols; i++) if
> > > > > > > >> > > > > > (CV_MAT_ELEM(*status,unsigned
> > > > > > > >> > > > > > > char,0,i)) count++;
> > > > > > > >> > > > > > > if (!count) { // no inliers
> > > > > > > >> > > > > > > *points1 = NULL;
> > > > > > > >> > > > > > > *points2 = NULL;
> > > > > > > >> > > > > > > }
> > > > > > > >> > > > > > > else {
> > > > > > > >> > > > > > > *points1 = cvCreateMat(1,count,CV_32FC2);
> > > > > > > >> > > > > > > *points2 = cvCreateMat(1,count,CV_32FC2);
> > > > > > > >> > > > > > > int j = 0;
> > > > > > > >> > > > > > > for (int i = 0; i < status->cols; i++) {
> > > > > > > >> > > > > > > if (CV_MAT_ELEM(*status,unsigned char,0,i)) {
> > > > > > > >> > > > > > > (*points1)->data.fl[j*2] = points1_->data.fl[i*2];
> > > > > > > >> > > > > > > //p1->x
> > > > > > > >> > > > > > > (*points1)->data.fl[j*2+1] = points1_->data.fl[i*2+1];
> > > > > > > >> > > > > > > //p1->y
> > > > > > > >> > > > > > > (*points2)->data.fl[j*2] = points2_->data.fl[i*2];
> > > > > > > >> > > > > > > //p2->x
> > > > > > > >> > > > > > > (*points2)->data.fl[j*2+1] = points2_->data.fl[i*2+1];
> > > > > > > >> > > > > > > //p2->y
> > > > > > > >> > > > > > > j++;
> > > > > > > >> > > > > > > }
> > > > > > > >> > > > > > > }
> > > > > > > >> > > > > > > }
> > > > > > > >> > > > > > > cvReleaseMat(&points1_);
> > > > > > > >> > > > > > > cvReleaseMat(&points2_);
> > > > > > > >> > > > > > > }
> > > > > > > >> > > > > > >
> > > > > > > >> > > > > > >
> > > > > > > >> > > > > > > I hope this helps.
> > > > > > > >> > > > > > >
> > > > > > > >> > > > > > > -Jostein
> > > > > > > >> > > > > > >
> > > > > > > >> > > > > > >
> > > > > > > >> > > > > > > 2009/1/8 yair_movshovitz <yairmov@>
> > > > > > > >> > > > > > >
> > > > > > > >> > > > > > > > Hi Everyone,
> > > > > > > >> > > > > > > >
> > > > > > > >> > > > > > > > I'm trying to understand how to use the SURF
> > > > features
> > > > > > > >> > > > capabilities of
> > > > > > > >> > > > > > > > openCV.
> > > > > > > >> > > > > > > > My scenario is as follows:
> > > > > > > >> > > > > > > > I have two rectangled areas in an image, which are
> > > > > > > >> supposed to
> > > > > > > >> > > > bound
> > > > > > > >> > > > > > > > the same object. I would like to see how good is
> > > > this
> > > > > > > >> > > > assumption. In
> > > > > > > >> > > > > > > > other words I would like to see how many features
> > > > they
> > > > > > > >> share.
> > > > > > > >> > > > > > > >
> > > > > > > >> > > > > > > > Can someone drop me a hint on how to use the SURF
> > > > > > > >> > > > implementation of
> > > > > > > >> > > > > > > > openCV (or direct me to somewhere that has some
> > > > > > > >> documentation
> > > > > > > >> > > > of it)
> > > > > > > >> > > > > > > >
> > > > > > > >> > > > > > > > Thanks,
> > > > > > > >> > > > > > > > Yair
> > > > > > > >> > > > > > > >
> > > > > > > >> > > > > > > >
> > > > > > > >> > > > > > > >
> > > > > > > >> > > > > > >
> > > > > > > >> > > > > > >
> > > > > > > >> > > > > > > [Non-text portions of this message have been removed]
> > > > > > > >> > > > > > >
> > > > > > > >> > > > > >
> > > > > > > >> > > > > >
> > > > > > > >> > > > > >
> > > > > > > >> > > > >
> > > > > > > >> > > > >
> > > > > > > >> > > > > [Non-text portions of this message have been removed]
> > > > > > > >> > > > >
> > > > > > > >> > > >
> > > > > > > >> > > >
> > > > > > > >> > > >
> > > > > > > >> > >
> > > > > > > >> > >
> > > > > > > >> > > [Non-text portions of this message have been removed]
> > > > > > > >> > >
> > > > > > > >> >
> > > > > > > >>
> > > > > > > >>
> > > > > > > >>
> > > > > > > >
> > > > > > > >
> > > > > > >
> > > > > > >
> > > > > > > [Non-text portions of this message have been removed]
> > > > > > >
> > > > > >
> > > > >
> > > >
> > > >  
> > > >
> > >
> > >
> > > [Non-text portions of this message have been removed]
> > >
> >
>


Reply | Threaded
Open this post in threaded view
|

Re: cvExtractSURF

Jostein Austvik Jacobsen
In reply to this post by Jostein Austvik Jacobsen
Thanks. It's nice to know that its still useful :)

After running bruteMatch, you'll be left with two sets of points, named
"points1" and "points2" in the earlier code. Using these points, we make a
fundamental matrix with cvFindFundamentalMat, which also determines which
points are outliers and which points fits the model. After applying
removeOutliers, you'll be left with only points that both look like
eachother and are located in a reasonable location.

If you want to use this method for determining whether "this image closely
matches this one", then it would probably be sufficient to see whether there
are any remaining points at all, or possibly use a threshold for how many
point correspondences are needed to consider the images to be similar.

Depending on what type of images you are comparing, other methods might be
more efficient though. If you know what kind of images you can expect, then
more primitive methods like simply subtracting one image from the other and
using a threshold for determining yes/no could give good (and faster!)
results (although I haven't tried, so don't take my word for it). Then
again, point correspondences are much cooler :)

Regards
Jostein


2011/3/29 Lenny <[hidden email]>

>
>
> @Jostein Absolutely great post and example code, even a few years on, thank
> you :)
>
> I am currently working on a project that does (or at least is trying to do
> :P) a very similar thing as described here -- I didn't want to recreate a
> brand new post, and as this was high on Google I thought it would be best to
> reply here... sorry for hijacking!
>
> My question was in regard to your code - once that is implemented and
> working, so you get the keypoints/descriptors for two separate images and
> run through bruteForce, I'm not sure how to take this and basically figure
> out if yes, the first image matches the second. Do you have any pointers
> here, like further reading? I basically want to say "yes, this image closely
> matches this one" or "no, this image doesn't match (or not enough keypoints
> could be found)".
>
> Hope that makes sense... as you may tell I'm still rather new to OpenCV!
>
> Many thanks :)
>
>
> --- In [hidden email], "Jostein Austvik Jacobsen" <josteinaj@...>
> wrote:
> >
> > You can view the implementation of *cvExtractSURF(...)* here:
> >
> http://opencvlibrary.svn.sourceforge.net/viewvc/opencvlibrary/trunk/opencv/src/cv/cvsurf.cpp
> ,
> > however it doesn't contain much comments.
> >
> >
> > *cvExtractSURF( const CvArr* img, const CvArr* mask, CvSeq** keypoints,
> > CvSeq** descriptors, CvMemStorage* storage, CvSURFParams params )*
> >
> > Here, *img* is the image. Use an
> > *IplImage<http://opencv.willowgarage.com/wiki/CxCore#IplImage>
> > * for the image. To load an image from disk, use
> > *cvLoadImage(...)*<
> http://opencv.willowgarage.com/wiki/HighGui#cvLoadImage>,
> > and to create your own image, use
> > *cvCreateImage(...)*<
> http://opencv.willowgarage.com/wiki/CxCore#CreateImage>.
> > Lets say you have a IplImage *image* and want to extract the rectangle
> > (x,y)->(x+dx,y+dy) from it as an IplImage rectangle, you might do this:
> >
> > CvSize size = cvSize(dx,dy);
> > IplImage* rectangle = cvCreateImage(size, IPL_DEPTH_8U, 1);
> > for (int i = 0; i < dx; ++i) {
> >     for (int j = 0; j < dy; ++j) {
> >         CV_IMAGE_ELEM(rectangle,unsigned char,i,j) =
> > CV_IMAGE_ELEM(image,unsigned char,x+i,y+j);
> >     }
> > }
> >
> > I'm not sure how *mask* is used, but a quick google search gives
> >
> http://www.emgu.com/wiki/files/1.4.0.0/html/ad54862f-3d1c-c177-3bdb-768619f8dd90.htmwhich
> > says "The optional input 8-bit mask. The features are only found in
> > the areas that contain more than 50% of non-zero mask pixels". Just set
> it
> > to NULL.
> >
> > *keypoints* and
> > *descriptors*<http://en.wikipedia.org/wiki/Feature_%28computer_vision%29
> >are
> > where the results are placed. Initialize them as null-pointers and
> > cvExtractSURF will do the rest for you. Afterwards you can access a
> > descriptor and corresponding keypoint like this:
> >
> > int k = 0; // the keypoint you want. There are *keypoints->total*
> keypoints.
> > float *seq = (float*)cvGetSeqElem(descriptors, k); // the descriptor of
> > length 64 or 128
> > CvPoint2D32f *p = &((CvSURFPoint*)cvGetSeqElem(keypoints, k))->pt; // the
> > (x,y) coordinates of keypoint *k* can now be accessed as *p->x* and
> *p->y*
> >
> > The *CvMemStorage*
> > <http://opencv.willowgarage.com/wiki/CxCore#CvMemStorage>struct
> > *storage* is used as a mechanism to simplify memory management. I believe
> > the *keypoints* and *descriptors* structures are put into *storage*, so
> you
> > can't release *storage* until you're done using *keypoints* and
> *descriptors
> > *.Put a *CvMemStorage *storage = cvCreateMemStorage(0);* before your
> first
> > call to cvExtractSURF and *cvClearMemStorage(storage);* after you're done
> > using *keypoints* and *descriptors*.
> >
> > SURF takes a couple of parameters through the *CvSURFParams* struct
> *params*.
> > You create *params* with *cvSURFParams(double threshold, int
> > extended)*where threshold represents the "edgyness" that is required
> > from a feature to
> > be recognized as a feature. It can be adjusted to retrieve more or fewer
> > features. In the paper
> > <http://www.vision.ee.ethz.ch/%7Esurf/eccv06.pdf>describing the SURF
> > detector, they use a threshold of 600 on a 800 x 640
> > image which returned 1418 features. The *extended* parameter is a simple
> > boolean 1 or 0 which states whether or not to use the extended
> descriptor.
> > The extended descriptor consists of 128 instead of 64 values which should
> > gives a better result at the cost of using more memory. Instead of
> creating
> > a new CvSURFParams struct for each call to cvExtractSURF, you could do:
> >
> > CvSURFParams params = cvSURFParams(600, 1);
> > cvExtractSURF(..., params);
> > cvExtractSURF(..., params);
> >
> >
> > There you go. I hope I answered your question :)
> >
> > Jostein
> >
> >
> > 2009/1/12 yair_movshovitz <yairmov@...>
> >
> > >   Hi Jostein,
> > >
> > > Thanks a lot for your help!
> > >
> > > Can you please explain the function parameters of cvExtractSURF?
> > > I mean in - cvExtractSURF(img1, NULL, &kp1, &desc1, storage,
> > > cvSURFParams(600,EXTENDED_DESCRIPTOR));
> > > what is the role of kp1, desc1, storage and the SURFParams?
> > > is storage just a temp area for the algorithm to use?
> > >
> > > Thanks again
> > > Yair
> > >
> > > --- In [hidden email] <OpenCV%40yahoogroups.com>, "Jostein
> Austvik
> > > Jacobsen"
> > >
> > > <josteinaj@> wrote:
> > > >
> > > > If you've got your two rectangled areas stored as img1 and img2 you
> > > could do
> > > > this to extract its keypoints and corresponding descriptors:
> > > >
> > > > #define EXTENDED_DESCRIPTOR 1
> > > > CvSeq *kp1=NULL, *kp2=NULL;
> > > > CvSeq *desc1=NULL, *desc2=NULL;
> > > > CvMemStorage *storage = cvCreateMemStorage(0);
> > > > cvExtractSURF(img1, NULL, &kp1, &desc1, storage, cvSURFParams(600,
> > > > EXTENDED_DESCRIPTOR));
> > > > cvExtractSURF(img2, NULL, &kp2, &desc2, storage, cvSURFParams(600,
> > > > EXTENDED_DESCRIPTOR));
> > > >
> > > > You will have to correlate the descriptors with each other to
> determine
> > > > which keypoints in each rectangle corresponds to one another. You
> > > could use
> > > > a BBF tree which is implemented in the latest version of OpenCV, but
> > > unless
> > > > your rectangle is huge, you might just as well just correlate them
> the
> > > > standard way, which I do like this:
> > > >
> > > > #define CORRELATION_THRESHOLD 0.7
> > > > // brute-force attempt at correlating the two sets of features
> > > > void bruteMatch(CvMat **points1, CvMat **points2, CvSeq *kp1, CvSeq
> > > *desc1,
> > > > CvSeq *kp2, CvSeq *desc2) {
> > > > int i,j,k;
> > > > double* avg1 = (double*)malloc(sizeof(double)*kp1->total);
> > > > double* avg2 = (double*)malloc(sizeof(double)*kp2->total);
> > > > double* dev1 = (double*)malloc(sizeof(double)*kp1->total);
> > > > double* dev2 = (double*)malloc(sizeof(double)*kp2->total);
> > > > int* best1 = (int*)malloc(sizeof(int)*kp1->total);
> > > > int* best2 = (int*)malloc(sizeof(int)*kp2->total);
> > > > double* best1corr = (double*)malloc(sizeof(double)*kp1->total);
> > > > double* best2corr = (double*)malloc(sizeof(double)*kp2->total);
> > > > float *seq1, *seq2;
> > > > int descriptor_size = EXTENDED_DESCRIPTOR ? 128 : 64;
> > > > for (i=0; i<kp1->total; i++) {
> > > > // find average and standard deviation of each descriptor
> > > > avg1[i] = 0;
> > > > dev1[i] = 0;
> > > > seq1 = (float*)cvGetSeqElem(desc1, i);
> > > > for (k=0; k<descriptor_size; k++) avg1[i] += seq1[k];
> > > > avg1[i] /= descriptor_size;
> > > > for (k=0; k<descriptor_size; k++) dev1[i] +=
> > > > (seq1[k]-avg1[i])*(seq1[k]-avg1[i]);
> > > > dev1[i] = sqrt(dev1[i]/descriptor_size);
> > > >
> > > > // initialize best1 and best1corr
> > > > best1[i] = -1;
> > > > best1corr[i] = -1.;
> > > > }
> > > > for (j=0; j<kp2->total; j++) {
> > > > // find average and standard deviation of each descriptor
> > > > avg2[j] = 0;
> > > > dev2[j] = 0;
> > > > seq2 = (float*)cvGetSeqElem(desc2, j);
> > > > for (k=0; k<descriptor_size; k++) avg2[j] += seq2[k];
> > > > avg2[j] /= descriptor_size;
> > > > for (k=0; k<descriptor_size; k++) dev2[j] +=
> > > > (seq2[k]-avg2[j])*(seq2[k]-avg2[j]);
> > > > dev2[j] = sqrt(dev2[j]/descriptor_size);
> > > >
> > > > // initialize best2 and best2corr
> > > > best2[j] = -1;
> > > > best2corr[j] = -1.;
> > > > }
> > > > double corr;
> > > > for (i = 0; i < kp1->total; ++i) {
> > > > seq1 = (float*)cvGetSeqElem(desc1, i);
> > > > for (j = 0; j < kp2->total; ++j) {
> > > > corr = 0;
> > > > seq2 = (float*)cvGetSeqElem(desc2, j);
> > > > for (k = 0; k < descriptor_size; ++k)
> > > > corr += (seq1[k]-avg1[i])*(seq2[k]-avg2[j]);
> > > > corr /= (descriptor_size-1)*dev1[i]*dev2[j];
> > > > if (corr > best1corr[i]) {
> > > > best1corr[i] = corr;
> > > > best1[i] = j;
> > > > }
> > > > if (corr > best2corr[j]) {
> > > > best2corr[j] = corr;
> > > > best2[j] = i;
> > > > }
> > > > }
> > > > }
> > > > j = 0;
> > > > for (i = 0; i < kp1->total; i++)
> > > > if (best2[best1[i]] == i && best1corr[i] >
> > > CORRELATION_THRESHOLD)
> > > > j++;
> > > > if (j == 0) return; // no matches found
> > > > *points1 = cvCreateMat(1,j,CV_32FC2);
> > > > *points2 = cvCreateMat(1,j,CV_32FC2);
> > > > CvPoint2D32f *p1, *p2;
> > > > j = 0;
> > > > for (i = 0; i < kp1->total; i++) {
> > > > if (best2[best1[i]] == i && best1corr[i] >
> > > CORRELATION_THRESHOLD) {
> > > > p1 = &((CvSURFPoint*)cvGetSeqElem(kp1,i))->pt;
> > > > p2 = &((CvSURFPoint*)cvGetSeqElem(kp2,best1[i]))->pt;
> > > > (*points1)->data.fl[j*2] = p1->x;
> > > > (*points1)->data.fl[j*2+1] = p1->y;
> > > > (*points2)->data.fl[j*2] = p2->x;
> > > > (*points2)->data.fl[j*2+1] = p2->y;
> > > > j++;
> > > > }
> > > > }
> > > > free(best2corr);
> > > > free(best1corr);
> > > > free(best2);
> > > > free(best1);
> > > > free(avg1);
> > > > free(avg2);
> > > > free(dev1);
> > > > free(dev2);
> > > > }
> > > >
> > > > If you construct a fundamental matrix (a model) for the
> transformation
> > > > between the two rectangles, you can further determine which
> > > correspondences
> > > > are false (by how well they fit the model) and remove them, which I
> > > like to
> > > > do like this:
> > > >
> > > > F = cvCreateMat(3,3,CV_32FC1);
> > > > CvMat *status = cvCreateMat(1,points1->cols,CV_8UC1);
> > > > int fm_count = cvFindFundamentalMat( points1,points2,F,
> > > > CV_FM_RANSAC,1.,0.99,status );
> > > > removeOutliers(&points1,&points2,status);
> > > >
> > > > where removeOutliers() is a function I wrote to clean up after
> > > > cvFindFundamentalMat():
> > > >
> > > > // iterates the set of putative correspondences and removes
> > > correspondences
> > > > marked as outliers by cvFindFundamentalMat()
> > > > void removeOutliers(CvMat **points1, CvMat **points2, CvMat *status)
> {
> > > > CvMat *points1_ = *points1;
> > > > CvMat *points2_ = *points2;
> > > > int count = 0;
> > > > for (int i = 0; i < status->cols; i++) if
> > > (CV_MAT_ELEM(*status,unsigned
> > > > char,0,i)) count++;
> > > > if (!count) { // no inliers
> > > > *points1 = NULL;
> > > > *points2 = NULL;
> > > > }
> > > > else {
> > > > *points1 = cvCreateMat(1,count,CV_32FC2);
> > > > *points2 = cvCreateMat(1,count,CV_32FC2);
> > > > int j = 0;
> > > > for (int i = 0; i < status->cols; i++) {
> > > > if (CV_MAT_ELEM(*status,unsigned char,0,i)) {
> > > > (*points1)->data.fl[j*2] = points1_->data.fl[i*2];
> > > > //p1->x
> > > > (*points1)->data.fl[j*2+1] = points1_->data.fl[i*2+1];
> > > > //p1->y
> > > > (*points2)->data.fl[j*2] = points2_->data.fl[i*2];
> > > > //p2->x
> > > > (*points2)->data.fl[j*2+1] = points2_->data.fl[i*2+1];
> > > > //p2->y
> > > > j++;
> > > > }
> > > > }
> > > > }
> > > > cvReleaseMat(&points1_);
> > > > cvReleaseMat(&points2_);
> > > > }
> > > >
> > > >
> > > > I hope this helps.
> > > >
> > > > -Jostein
> > > >
> > > >
> > > > 2009/1/8 yair_movshovitz <yairmov@>
> > > >
> > > > > Hi Everyone,
> > > > >
> > > > > I'm trying to understand how to use the SURF features capabilities
> of
> > > > > openCV.
> > > > > My scenario is as follows:
> > > > > I have two rectangled areas in an image, which are supposed to
> bound
> > > > > the same object. I would like to see how good is this assumption.
> In
> > > > > other words I would like to see how many features they share.
> > > > >
> > > > > Can someone drop me a hint on how to use the SURF implementation of
> > > > > openCV (or direct me to somewhere that has some documentation of
> it)
> > > > >
> > > > > Thanks,
> > > > > Yair
> > > > >
> > > > >
> > > > >
> > > >
> > > >
> > > > [Non-text portions of this message have been removed]
> > > >
> > >
> > >
> > >
> >
> >
> > [Non-text portions of this message have been removed]
> >
>
>
>
Reply | Threaded
Open this post in threaded view
|

Re: cvExtractSURF

Jostein Austvik Jacobsen
Yes, each point is put into its own column, so the columns will vary while
the row length remains the same.

The return value from cvFindFundamentalMat fm_count, or "fundamental matrix
count", is the number of fundamental matrices found. For the 7-point
algorithm (CV_FM_7POINT), this can be 3. However, for CV_FM_RANSAC, you'll
get exactly one fundamental matrix, so fm_count will be 1.

You are correct in relying on points1->cols/points2->cols, and yes they will
always be the same.

(I'm CC'ing the list, others may have the same question, hope it's okay)

Regards
Jostein

2011/3/30 Lenny <[hidden email]>

> Thanks for the reply Jostein, much appreciated.
>
> I have tried doing as you suggested -- and from what I can make out I'm
> supposed to be using the value returned from cvFindFundamentalMat
> (fm_count)? The thing is, this seems to always be 1, no matter what images
> are used. When I inspect the points1 or points2 rows I always see 1 too, but
> the cols are ones that vary. For a completely different image I got < 10
> cols for a completely different image, > 500 cols for the exact same image ~
> 300 - 350 for various versions of the same image with annotations and/or
> rotations and a little less for a photo taken with a camera of the same
> object.
>
> My question, then, is am I doing something wrong, or is it correct for me
> to rely on the points1->cols/points2->cols (which are always the same,
> right?) - then either use my own threshold to see what a valid match is or
> select the match with the highest number of cols or a combination of both?
>
> Thanks again Jostein.
>
>
> --- In [hidden email], Jostein Austvik Jacobsen <josteinaj@...>
> wrote:
> >
> > Thanks. It's nice to know that its still useful :)
> >
> > After running bruteMatch, you'll be left with two sets of points, named
> > "points1" and "points2" in the earlier code. Using these points, we make
> a
> > fundamental matrix with cvFindFundamentalMat, which also determines which
> > points are outliers and which points fits the model. After applying
> > removeOutliers, you'll be left with only points that both look like
> > eachother and are located in a reasonable location.
> >
> > If you want to use this method for determining whether "this image
> closely
> > matches this one", then it would probably be sufficient to see whether
> there
> > are any remaining points at all, or possibly use a threshold for how many
> > point correspondences are needed to consider the images to be similar.
> >
> > Depending on what type of images you are comparing, other methods might
> be
> > more efficient though. If you know what kind of images you can expect,
> then
> > more primitive methods like simply subtracting one image from the other
> and
> > using a threshold for determining yes/no could give good (and faster!)
> > results (although I haven't tried, so don't take my word for it). Then
> > again, point correspondences are much cooler :)
> >
> > Regards
> > Jostein
> >
> >
> > 2011/3/29 Lenny <gisterogue@...>
> >
> > >
> > >
> > > @Jostein Absolutely great post and example code, even a few years on,
> thank
> > > you :)
> > >
> > > I am currently working on a project that does (or at least is trying to
> do
> > > :P) a very similar thing as described here -- I didn't want to recreate
> a
> > > brand new post, and as this was high on Google I thought it would be
> best to
> > > reply here... sorry for hijacking!
> > >
> > > My question was in regard to your code - once that is implemented and
> > > working, so you get the keypoints/descriptors for two separate images
> and
> > > run through bruteForce, I'm not sure how to take this and basically
> figure
> > > out if yes, the first image matches the second. Do you have any
> pointers
> > > here, like further reading? I basically want to say "yes, this image
> closely
> > > matches this one" or "no, this image doesn't match (or not enough
> keypoints
> > > could be found)".
> > >
> > > Hope that makes sense... as you may tell I'm still rather new to
> OpenCV!
> > >
> > > Many thanks :)
> > >
> > >
> > > --- In [hidden email], "Jostein Austvik Jacobsen" <josteinaj@>
> > > wrote:
> > > >
> > > > You can view the implementation of *cvExtractSURF(...)* here:
> > > >
> > >
> http://opencvlibrary.svn.sourceforge.net/viewvc/opencvlibrary/trunk/opencv/src/cv/cvsurf.cpp
> > > ,
> > > > however it doesn't contain much comments.
> > > >
> > > >
> > > > *cvExtractSURF( const CvArr* img, const CvArr* mask, CvSeq**
> keypoints,
> > > > CvSeq** descriptors, CvMemStorage* storage, CvSURFParams params )*
> > > >
> > > > Here, *img* is the image. Use an
> > > > *IplImage<http://opencv.willowgarage.com/wiki/CxCore#IplImage>
> > > > * for the image. To load an image from disk, use
> > > > *cvLoadImage(...)*<
> > > http://opencv.willowgarage.com/wiki/HighGui#cvLoadImage>,
> > > > and to create your own image, use
> > > > *cvCreateImage(...)*<
> > > http://opencv.willowgarage.com/wiki/CxCore#CreateImage>.
> > > > Lets say you have a IplImage *image* and want to extract the
> rectangle
> > > > (x,y)->(x+dx,y+dy) from it as an IplImage rectangle, you might do
> this:
> > > >
> > > > CvSize size = cvSize(dx,dy);
> > > > IplImage* rectangle = cvCreateImage(size, IPL_DEPTH_8U, 1);
> > > > for (int i = 0; i < dx; ++i) {
> > > >     for (int j = 0; j < dy; ++j) {
> > > >         CV_IMAGE_ELEM(rectangle,unsigned char,i,j) =
> > > > CV_IMAGE_ELEM(image,unsigned char,x+i,y+j);
> > > >     }
> > > > }
> > > >
> > > > I'm not sure how *mask* is used, but a quick google search gives
> > > >
> > >
> http://www.emgu.com/wiki/files/1.4.0.0/html/ad54862f-3d1c-c177-3bdb-768619f8dd90.htmwhich
> > > > says "The optional input 8-bit mask. The features are only found in
> > > > the areas that contain more than 50% of non-zero mask pixels". Just
> set
> > > it
> > > > to NULL.
> > > >
> > > > *keypoints* and
> > > > *descriptors*<
> http://en.wikipedia.org/wiki/Feature_%28computer_vision%29
> > > >are
> > > > where the results are placed. Initialize them as null-pointers and
> > > > cvExtractSURF will do the rest for you. Afterwards you can access a
> > > > descriptor and corresponding keypoint like this:
> > > >
> > > > int k = 0; // the keypoint you want. There are *keypoints->total*
> > > keypoints.
> > > > float *seq = (float*)cvGetSeqElem(descriptors, k); // the descriptor
> of
> > > > length 64 or 128
> > > > CvPoint2D32f *p = &((CvSURFPoint*)cvGetSeqElem(keypoints, k))->pt; //
> the
> > > > (x,y) coordinates of keypoint *k* can now be accessed as *p->x* and
> > > *p->y*
> > > >
> > > > The *CvMemStorage*
> > > > <http://opencv.willowgarage.com/wiki/CxCore#CvMemStorage>struct
> > > > *storage* is used as a mechanism to simplify memory management. I
> believe
> > > > the *keypoints* and *descriptors* structures are put into *storage*,
> so
> > > you
> > > > can't release *storage* until you're done using *keypoints* and
> > > *descriptors
> > > > *.Put a *CvMemStorage *storage = cvCreateMemStorage(0);* before your
> > > first
> > > > call to cvExtractSURF and *cvClearMemStorage(storage);* after you're
> done
> > > > using *keypoints* and *descriptors*.
> > > >
> > > > SURF takes a couple of parameters through the *CvSURFParams* struct
> > > *params*.
> > > > You create *params* with *cvSURFParams(double threshold, int
> > > > extended)*where threshold represents the "edgyness" that is required
> > > > from a feature to
> > > > be recognized as a feature. It can be adjusted to retrieve more or
> fewer
> > > > features. In the paper
> > > > <http://www.vision.ee.ethz.ch/%7Esurf/eccv06.pdf>describing the SURF
> > > > detector, they use a threshold of 600 on a 800 x 640
> > > > image which returned 1418 features. The *extended* parameter is a
> simple
> > > > boolean 1 or 0 which states whether or not to use the extended
> > > descriptor.
> > > > The extended descriptor consists of 128 instead of 64 values which
> should
> > > > gives a better result at the cost of using more memory. Instead of
> > > creating
> > > > a new CvSURFParams struct for each call to cvExtractSURF, you could
> do:
> > > >
> > > > CvSURFParams params = cvSURFParams(600, 1);
> > > > cvExtractSURF(..., params);
> > > > cvExtractSURF(..., params);
> > > >
> > > >
> > > > There you go. I hope I answered your question :)
> > > >
> > > > Jostein
> > > >
> > > >
> > > > 2009/1/12 yair_movshovitz <yairmov@>
> > > >
> > > > >   Hi Jostein,
> > > > >
> > > > > Thanks a lot for your help!
> > > > >
> > > > > Can you please explain the function parameters of cvExtractSURF?
> > > > > I mean in - cvExtractSURF(img1, NULL, &kp1, &desc1, storage,
> > > > > cvSURFParams(600,EXTENDED_DESCRIPTOR));
> > > > > what is the role of kp1, desc1, storage and the SURFParams?
> > > > > is storage just a temp area for the algorithm to use?
> > > > >
> > > > > Thanks again
> > > > > Yair
> > > > >
> > > > > --- In [hidden email] <OpenCV%40yahoogroups.com>, "Jostein
> > > Austvik
> > > > > Jacobsen"
> > > > >
> > > > > <josteinaj@> wrote:
> > > > > >
> > > > > > If you've got your two rectangled areas stored as img1 and img2
> you
> > > > > could do
> > > > > > this to extract its keypoints and corresponding descriptors:
> > > > > >
> > > > > > #define EXTENDED_DESCRIPTOR 1
> > > > > > CvSeq *kp1=NULL, *kp2=NULL;
> > > > > > CvSeq *desc1=NULL, *desc2=NULL;
> > > > > > CvMemStorage *storage = cvCreateMemStorage(0);
> > > > > > cvExtractSURF(img1, NULL, &kp1, &desc1, storage,
> cvSURFParams(600,
> > > > > > EXTENDED_DESCRIPTOR));
> > > > > > cvExtractSURF(img2, NULL, &kp2, &desc2, storage,
> cvSURFParams(600,
> > > > > > EXTENDED_DESCRIPTOR));
> > > > > >
> > > > > > You will have to correlate the descriptors with each other to
> > > determine
> > > > > > which keypoints in each rectangle corresponds to one another. You
> > > > > could use
> > > > > > a BBF tree which is implemented in the latest version of OpenCV,
> but
> > > > > unless
> > > > > > your rectangle is huge, you might just as well just correlate
> them
> > > the
> > > > > > standard way, which I do like this:
> > > > > >
> > > > > > #define CORRELATION_THRESHOLD 0.7
> > > > > > // brute-force attempt at correlating the two sets of features
> > > > > > void bruteMatch(CvMat **points1, CvMat **points2, CvSeq *kp1,
> CvSeq
> > > > > *desc1,
> > > > > > CvSeq *kp2, CvSeq *desc2) {
> > > > > > int i,j,k;
> > > > > > double* avg1 = (double*)malloc(sizeof(double)*kp1->total);
> > > > > > double* avg2 = (double*)malloc(sizeof(double)*kp2->total);
> > > > > > double* dev1 = (double*)malloc(sizeof(double)*kp1->total);
> > > > > > double* dev2 = (double*)malloc(sizeof(double)*kp2->total);
> > > > > > int* best1 = (int*)malloc(sizeof(int)*kp1->total);
> > > > > > int* best2 = (int*)malloc(sizeof(int)*kp2->total);
> > > > > > double* best1corr = (double*)malloc(sizeof(double)*kp1->total);
> > > > > > double* best2corr = (double*)malloc(sizeof(double)*kp2->total);
> > > > > > float *seq1, *seq2;
> > > > > > int descriptor_size = EXTENDED_DESCRIPTOR ? 128 : 64;
> > > > > > for (i=0; i<kp1->total; i++) {
> > > > > > // find average and standard deviation of each descriptor
> > > > > > avg1[i] = 0;
> > > > > > dev1[i] = 0;
> > > > > > seq1 = (float*)cvGetSeqElem(desc1, i);
> > > > > > for (k=0; k<descriptor_size; k++) avg1[i] += seq1[k];
> > > > > > avg1[i] /= descriptor_size;
> > > > > > for (k=0; k<descriptor_size; k++) dev1[i] +=
> > > > > > (seq1[k]-avg1[i])*(seq1[k]-avg1[i]);
> > > > > > dev1[i] = sqrt(dev1[i]/descriptor_size);
> > > > > >
> > > > > > // initialize best1 and best1corr
> > > > > > best1[i] = -1;
> > > > > > best1corr[i] = -1.;
> > > > > > }
> > > > > > for (j=0; j<kp2->total; j++) {
> > > > > > // find average and standard deviation of each descriptor
> > > > > > avg2[j] = 0;
> > > > > > dev2[j] = 0;
> > > > > > seq2 = (float*)cvGetSeqElem(desc2, j);
> > > > > > for (k=0; k<descriptor_size; k++) avg2[j] += seq2[k];
> > > > > > avg2[j] /= descriptor_size;
> > > > > > for (k=0; k<descriptor_size; k++) dev2[j] +=
> > > > > > (seq2[k]-avg2[j])*(seq2[k]-avg2[j]);
> > > > > > dev2[j] = sqrt(dev2[j]/descriptor_size);
> > > > > >
> > > > > > // initialize best2 and best2corr
> > > > > > best2[j] = -1;
> > > > > > best2corr[j] = -1.;
> > > > > > }
> > > > > > double corr;
> > > > > > for (i = 0; i < kp1->total; ++i) {
> > > > > > seq1 = (float*)cvGetSeqElem(desc1, i);
> > > > > > for (j = 0; j < kp2->total; ++j) {
> > > > > > corr = 0;
> > > > > > seq2 = (float*)cvGetSeqElem(desc2, j);
> > > > > > for (k = 0; k < descriptor_size; ++k)
> > > > > > corr += (seq1[k]-avg1[i])*(seq2[k]-avg2[j]);
> > > > > > corr /= (descriptor_size-1)*dev1[i]*dev2[j];
> > > > > > if (corr > best1corr[i]) {
> > > > > > best1corr[i] = corr;
> > > > > > best1[i] = j;
> > > > > > }
> > > > > > if (corr > best2corr[j]) {
> > > > > > best2corr[j] = corr;
> > > > > > best2[j] = i;
> > > > > > }
> > > > > > }
> > > > > > }
> > > > > > j = 0;
> > > > > > for (i = 0; i < kp1->total; i++)
> > > > > > if (best2[best1[i]] == i && best1corr[i] >
> > > > > CORRELATION_THRESHOLD)
> > > > > > j++;
> > > > > > if (j == 0) return; // no matches found
> > > > > > *points1 = cvCreateMat(1,j,CV_32FC2);
> > > > > > *points2 = cvCreateMat(1,j,CV_32FC2);
> > > > > > CvPoint2D32f *p1, *p2;
> > > > > > j = 0;
> > > > > > for (i = 0; i < kp1->total; i++) {
> > > > > > if (best2[best1[i]] == i && best1corr[i] >
> > > > > CORRELATION_THRESHOLD) {
> > > > > > p1 = &((CvSURFPoint*)cvGetSeqElem(kp1,i))->pt;
> > > > > > p2 = &((CvSURFPoint*)cvGetSeqElem(kp2,best1[i]))->pt;
> > > > > > (*points1)->data.fl[j*2] = p1->x;
> > > > > > (*points1)->data.fl[j*2+1] = p1->y;
> > > > > > (*points2)->data.fl[j*2] = p2->x;
> > > > > > (*points2)->data.fl[j*2+1] = p2->y;
> > > > > > j++;
> > > > > > }
> > > > > > }
> > > > > > free(best2corr);
> > > > > > free(best1corr);
> > > > > > free(best2);
> > > > > > free(best1);
> > > > > > free(avg1);
> > > > > > free(avg2);
> > > > > > free(dev1);
> > > > > > free(dev2);
> > > > > > }
> > > > > >
> > > > > > If you construct a fundamental matrix (a model) for the
> > > transformation
> > > > > > between the two rectangles, you can further determine which
> > > > > correspondences
> > > > > > are false (by how well they fit the model) and remove them, which
> I
> > > > > like to
> > > > > > do like this:
> > > > > >
> > > > > > F = cvCreateMat(3,3,CV_32FC1);
> > > > > > CvMat *status = cvCreateMat(1,points1->cols,CV_8UC1);
> > > > > > int fm_count = cvFindFundamentalMat( points1,points2,F,
> > > > > > CV_FM_RANSAC,1.,0.99,status );
> > > > > > removeOutliers(&points1,&points2,status);
> > > > > >
> > > > > > where removeOutliers() is a function I wrote to clean up after
> > > > > > cvFindFundamentalMat():
> > > > > >
> > > > > > // iterates the set of putative correspondences and removes
> > > > > correspondences
> > > > > > marked as outliers by cvFindFundamentalMat()
> > > > > > void removeOutliers(CvMat **points1, CvMat **points2, CvMat
> *status)
> > > {
> > > > > > CvMat *points1_ = *points1;
> > > > > > CvMat *points2_ = *points2;
> > > > > > int count = 0;
> > > > > > for (int i = 0; i < status->cols; i++) if
> > > > > (CV_MAT_ELEM(*status,unsigned
> > > > > > char,0,i)) count++;
> > > > > > if (!count) { // no inliers
> > > > > > *points1 = NULL;
> > > > > > *points2 = NULL;
> > > > > > }
> > > > > > else {
> > > > > > *points1 = cvCreateMat(1,count,CV_32FC2);
> > > > > > *points2 = cvCreateMat(1,count,CV_32FC2);
> > > > > > int j = 0;
> > > > > > for (int i = 0; i < status->cols; i++) {
> > > > > > if (CV_MAT_ELEM(*status,unsigned char,0,i)) {
> > > > > > (*points1)->data.fl[j*2] = points1_->data.fl[i*2];
> > > > > > //p1->x
> > > > > > (*points1)->data.fl[j*2+1] = points1_->data.fl[i*2+1];
> > > > > > //p1->y
> > > > > > (*points2)->data.fl[j*2] = points2_->data.fl[i*2];
> > > > > > //p2->x
> > > > > > (*points2)->data.fl[j*2+1] = points2_->data.fl[i*2+1];
> > > > > > //p2->y
> > > > > > j++;
> > > > > > }
> > > > > > }
> > > > > > }
> > > > > > cvReleaseMat(&points1_);
> > > > > > cvReleaseMat(&points2_);
> > > > > > }
> > > > > >
> > > > > >
> > > > > > I hope this helps.
> > > > > >
> > > > > > -Jostein
> > > > > >
> > > > > >
> > > > > > 2009/1/8 yair_movshovitz <yairmov@>
> > > > > >
> > > > > > > Hi Everyone,
> > > > > > >
> > > > > > > I'm trying to understand how to use the SURF features
> capabilities
> > > of
> > > > > > > openCV.
> > > > > > > My scenario is as follows:
> > > > > > > I have two rectangled areas in an image, which are supposed to
> > > bound
> > > > > > > the same object. I would like to see how good is this
> assumption.
> > > In
> > > > > > > other words I would like to see how many features they share.
> > > > > > >
> > > > > > > Can someone drop me a hint on how to use the SURF
> implementation of
> > > > > > > openCV (or direct me to somewhere that has some documentation
> of
> > > it)
> > > > > > >
> > > > > > > Thanks,
> > > > > > > Yair
> > > > > > >
> > > > > > >
> > > > > > >
> > > > > >
> > > > > >
> > > > > > [Non-text portions of this message have been removed]
> > > > > >
> > > > >
> > > > >
> > > > >
> > > >
> > > >
> > > > [Non-text portions of this message have been removed]
> > > >
> > >
> > >
> > >
> >
>
>
>