7

Ich brauche den Feature-Erkennungsalgorithmus. Ich habe es satt, im Internet zu surfen und finde nichts als SURF Beispiel und Hinweise wie man das macht, aber ich fand kein Beispiel mit anderen als patentierten Deskriptoren wie SIFT oder SURF.Feature-Erkennung mit patentfreien Deskriptoren

Kann jemand schreiben ein Beispiel für die Verwendung der frei Feature-Erkennung-Algorithmus (wie ORB/BRISK [soweit ich verstanden SURF und FLAAN sind unfreie])?

Ich benutze OpenCV 3.0.0.

Antwort

24

Anstatt einen SURF-Schlüsselpunktdetektor und Deskriptor-Extraktor zu verwenden, wechseln Sie einfach zur Verwendung von ORB. Sie können einfach die an create übergebene Zeichenfolge ändern, um verschiedene Extraktoren und Deskriptoren zu haben.

Folgendes gilt für OpenCV 2.4.11.

Feature Detector

  • "FAST" - FastFeatureDetector
  • "Stern" - StarFeatureDetector
  • "SIFT" - SIFT (unfreie Modul)
  • "Surf" - SURF (unfreie Modul)
  • "ORB" - ORB
  • "BRISK" - BRISK
  • "MSER" - MSER
  • "GFTT" - GoodFeaturesToTrackDetector
  • "HARRIS" - DenseFeatureDetector
  • "SimpleBlob" - - SimpleBlobDetector

Descriptor Extractor

GoodFeaturesToTrackDetector mit Harris Detektor
  • "Dense" aktiviert
    • "SIFT" - SIFT
    • "Surf" - SURF
    • "kurzen" - BriefDescriptorExtractor
    • "BRISK" - Rege
    • "ORB" - ORB
    • "FREAK" - FREAK

    Descriptor Matcher

    • BruteForce (verwendet L2)
    • BruteForce-L1
    • Bruteforce-Hamming
    • Bruteforce-Hamming (2)
    • FlannBased

    FLANN ist nicht in unfreie.Sie können jedoch andere Matcher wie BruteForce verwenden.

    nachstehendes Beispiel:

    #include <iostream> 
    #include <opencv2\opencv.hpp> 
    
    using namespace cv; 
    
    /** @function main */ 
    int main(int argc, char** argv) 
    { 
    
        Mat img_object = imread("D:\\SO\\img\\box.png", CV_LOAD_IMAGE_GRAYSCALE); 
        Mat img_scene = imread("D:\\SO\\img\\box_in_scene.png", CV_LOAD_IMAGE_GRAYSCALE); 
    
        if (!img_object.data || !img_scene.data) 
        { 
         std::cout << " --(!) Error reading images " << std::endl; return -1; 
        } 
    
        //-- Step 1: Detect the keypoints using SURF Detector 
        Ptr<FeatureDetector> detector = FeatureDetector::create("ORB"); 
    
        std::vector<KeyPoint> keypoints_object, keypoints_scene; 
    
        detector->detect(img_object, keypoints_object); 
        detector->detect(img_scene, keypoints_scene); 
    
        //-- Step 2: Calculate descriptors (feature vectors) 
        Ptr<DescriptorExtractor> extractor = DescriptorExtractor::create("ORB"); 
    
        Mat descriptors_object, descriptors_scene; 
    
        extractor->compute(img_object, keypoints_object, descriptors_object); 
        extractor->compute(img_scene, keypoints_scene, descriptors_scene); 
    
        //-- Step 3: Matching descriptor vectors using FLANN matcher 
        Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce"); 
        std::vector<DMatch> matches; 
        matcher->match(descriptors_object, descriptors_scene, matches); 
    
        double max_dist = 0; double min_dist = 100; 
    
        //-- Quick calculation of max and min distances between keypoints 
        for (int i = 0; i < descriptors_object.rows; i++) 
        { 
         double dist = matches[i].distance; 
         if (dist < min_dist) min_dist = dist; 
         if (dist > max_dist) max_dist = dist; 
        } 
    
        printf("-- Max dist : %f \n", max_dist); 
        printf("-- Min dist : %f \n", min_dist); 
    
        //-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist) 
        std::vector<DMatch> good_matches; 
    
        for (int i = 0; i < descriptors_object.rows; i++) 
        { 
         if (matches[i].distance < 3 * min_dist) 
         { 
          good_matches.push_back(matches[i]); 
         } 
        } 
    
        Mat img_matches; 
        drawMatches(img_object, keypoints_object, img_scene, keypoints_scene, 
         good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), 
         vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS); 
    
        //-- Localize the object 
        std::vector<Point2f> obj; 
        std::vector<Point2f> scene; 
    
        for (int i = 0; i < good_matches.size(); i++) 
        { 
         //-- Get the keypoints from the good matches 
         obj.push_back(keypoints_object[good_matches[i].queryIdx].pt); 
         scene.push_back(keypoints_scene[good_matches[i].trainIdx].pt); 
        } 
    
        Mat H = findHomography(obj, scene, CV_RANSAC); 
    
        //-- Get the corners from the image_1 (the object to be "detected") 
        std::vector<Point2f> obj_corners(4); 
        obj_corners[0] = cvPoint(0, 0); obj_corners[1] = cvPoint(img_object.cols, 0); 
        obj_corners[2] = cvPoint(img_object.cols, img_object.rows); obj_corners[3] = cvPoint(0, img_object.rows); 
        std::vector<Point2f> scene_corners(4); 
    
        perspectiveTransform(obj_corners, scene_corners, H); 
    
        //-- Draw lines between the corners (the mapped object in the scene - image_2) 
        line(img_matches, scene_corners[0] + Point2f(img_object.cols, 0), scene_corners[1] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4); 
        line(img_matches, scene_corners[1] + Point2f(img_object.cols, 0), scene_corners[2] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4); 
        line(img_matches, scene_corners[2] + Point2f(img_object.cols, 0), scene_corners[3] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4); 
        line(img_matches, scene_corners[3] + Point2f(img_object.cols, 0), scene_corners[0] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4); 
    
        //-- Show detected matches 
        imshow("Good Matches & Object detection", img_matches); 
    
        waitKey(0); 
        return 0; 
    } 
    

    UPDATE

    OpenCV 3.0.0 hat eine andere API.

    Sie können eine Liste von nicht patentierten Feature-Detektor und Deskriptor Extraktor finden.

    #include <iostream> 
    #include <opencv2\opencv.hpp> 
    
    using namespace cv; 
    
    /** @function main */ 
    int main(int argc, char** argv) 
    { 
    
        Mat img_object = imread("D:\\SO\\img\\box.png", CV_LOAD_IMAGE_GRAYSCALE); 
        Mat img_scene = imread("D:\\SO\\img\\box_in_scene.png", CV_LOAD_IMAGE_GRAYSCALE); 
    
        if (!img_object.data || !img_scene.data) 
        { 
         std::cout << " --(!) Error reading images " << std::endl; return -1; 
        } 
    
        //-- Step 1: Detect the keypoints using SURF Detector 
        Ptr<FeatureDetector> detector = ORB::create(); 
    
        std::vector<KeyPoint> keypoints_object, keypoints_scene; 
    
        detector->detect(img_object, keypoints_object); 
        detector->detect(img_scene, keypoints_scene); 
    
        //-- Step 2: Calculate descriptors (feature vectors) 
        Ptr<DescriptorExtractor> extractor = ORB::create(); 
    
        Mat descriptors_object, descriptors_scene; 
    
        extractor->compute(img_object, keypoints_object, descriptors_object); 
        extractor->compute(img_scene, keypoints_scene, descriptors_scene); 
    
        //-- Step 3: Matching descriptor vectors using FLANN matcher 
        Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce"); 
        std::vector<DMatch> matches; 
        matcher->match(descriptors_object, descriptors_scene, matches); 
    
        double max_dist = 0; double min_dist = 100; 
    
        //-- Quick calculation of max and min distances between keypoints 
        for (int i = 0; i < descriptors_object.rows; i++) 
        { 
         double dist = matches[i].distance; 
         if (dist < min_dist) min_dist = dist; 
         if (dist > max_dist) max_dist = dist; 
        } 
    
        printf("-- Max dist : %f \n", max_dist); 
        printf("-- Min dist : %f \n", min_dist); 
    
        //-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist) 
        std::vector<DMatch> good_matches; 
    
        for (int i = 0; i < descriptors_object.rows; i++) 
        { 
         if (matches[i].distance < 3 * min_dist) 
         { 
          good_matches.push_back(matches[i]); 
         } 
        } 
    
        Mat img_matches; 
    
        drawMatches(img_object, keypoints_object, img_scene, keypoints_scene, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), std::vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS); 
    
        //-- Localize the object 
        std::vector<Point2f> obj; 
        std::vector<Point2f> scene; 
    
        for (int i = 0; i < good_matches.size(); i++) 
        { 
         //-- Get the keypoints from the good matches 
         obj.push_back(keypoints_object[good_matches[i].queryIdx].pt); 
         scene.push_back(keypoints_scene[good_matches[i].trainIdx].pt); 
        } 
    
        Mat H = findHomography(obj, scene, CV_RANSAC); 
    
        //-- Get the corners from the image_1 (the object to be "detected") 
        std::vector<Point2f> obj_corners(4); 
        obj_corners[0] = cvPoint(0, 0); obj_corners[1] = cvPoint(img_object.cols, 0); 
        obj_corners[2] = cvPoint(img_object.cols, img_object.rows); obj_corners[3] = cvPoint(0, img_object.rows); 
        std::vector<Point2f> scene_corners(4); 
    
        perspectiveTransform(obj_corners, scene_corners, H); 
    
        //-- Draw lines between the corners (the mapped object in the scene - image_2) 
        line(img_matches, scene_corners[0] + Point2f(img_object.cols, 0), scene_corners[1] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4); 
        line(img_matches, scene_corners[1] + Point2f(img_object.cols, 0), scene_corners[2] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4); 
        line(img_matches, scene_corners[2] + Point2f(img_object.cols, 0), scene_corners[3] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4); 
        line(img_matches, scene_corners[3] + Point2f(img_object.cols, 0), scene_corners[0] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4); 
    
        //-- Show detected matches 
        imshow("Good Matches & Object detection", img_matches); 
    
        waitKey(0); 
        return 0; 
    } 
    
  • +0

    Welche OpenCV-Version verwenden Sie? Dies funktioniert ok in OpenCV 2.4.9 und 2.4.11 – Miki

    +0

    das neueste, das für iOS auf opencv-Website verfügbar ist. – denis631

    +0

    Kein Mitglied namens 'create' in 'cv :: Feature2d' – denis631