2015-07-07 18 views
5

previous question'umda, OpenCV Python'u SIFT gibi harici modüller ile kullanmak için opencv-contrib'u yüklemem gerektiğini öğrendim. Ancak projemde ORB'yi veya benzer bir şeyi kullanmak istiyorum. cv2.ORB() çalışmak, ne cv2.xfeatures2d.ORB_create() veya komutların başka aglütinasyon yok etmez.OpenCV, ORB'yi bulamıyor

olarak SO OpenCV onun Python API için oldukça kötü belgelerine sahiptir bilir.

nasıl OpenCV Python özellikleri birlikte ORB kullanırım?

MWE

:

#!/usr/bin/python2.7 
import numpy as np 
import cv2 
from matplotlib import pyplot as plt 

img = cv2.imread('smallburger.jpg',0) 

# Initiate STAR detector 
orb = cv2.ORB() 

# find the keypoints with ORB 
kp = orb.detect(img,None) 

# compute the descriptors with ORB 
kp, des = orb.compute(img, kp) 

# draw only keypoints location,not size and orientation 
img2 = cv2.drawKeypoints(img,kp,color=(0,255,0), flags=0) 
plt.imshow(img2),plt.show() 

CLI çıkışı:

Traceback (most recent call last): 
    File "./mwe.py", line 9, in <module> 
    orb = cv2.ORB() 
AttributeError: 'module' object has no attribute 'ORB' 

cevap

1

eğitim için benim kod var

def featureMatchingBF(self,img1,img2,method): 
    corners = cv2.goodFeaturesToTrack(img1, 7, 0.05, 25) 
    corners = np.float32(corners) 

    for item in corners: 
     x, y = item[0] 
     cv2.circle(img1, (x,y), 5, (255,0,0)) 

    cv2.imshow("Top 'k' features", img1) 
    cv2.waitKey() 

    #======================================================================= 
    # (H1, hogImage1) = feature.hog(img1, orientations=9, pixels_per_cell=(6, 6), 
    # cells_per_block=(2, 2), transform_sqrt=True, visualise=True) 
    # hogImage1 = exposure.rescale_intensity(hogImage1, out_range=(0, 255)) 
    # hogImage1 = hogImage1.astype("uint8") 
    # cv2.imshow("Input:",img1) 
    # cv2.imshow("HOG Image", hogImage1) 
    # cv2.waitKey(0) 
    #======================================================================= 
    if method is "ORB": 
     #Compute keypoints for both images 
     kp1,des1 = self.computeORB(img1) 
     kp2,des2 = self.computeORB(img2) 
     #=================================================================== 
     # for i,j in zip(kp1,kp2): 
     #  print("KP1:",i.pt) 
     #  print("KP2:",j.pt) 
     #=================================================================== 
     #use brute force matcher for matching descriptor1 and descriptor2 
     bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True) 
     # Match descriptors. 
     matches = bf.match(des1,des2) 

     # Sort them in the order of their distance. 
     matches = sorted(matches, key = lambda x:x.distance) 
     self.filterMatches(matches) 

     # Draw first 10 matches. 
     img3 = cv2.drawMatches(img1,kp1,img2,kp2,matches[:20], flags=2,outImg = img1) 

     #show result 
     cv2.imshow("Matches",img3) 
     cv2.waitKey(0) 

def computeORB(self,img): 
    #Initiate ORB detector 
    orb = cv2.ORB_create() 

    #find keypoints 
    kp = orb.detect(img,None) 

    #compute despriptor 
    kp, des = orb.compute(img,kp) 
    # draw only keypoints location,not size and orientation 
    img2 = cv2.drawKeypoints(img, kp, None, color=(0,255,0), flags=0) 
    #plt.imshow(img2), plt.show() 

    return kp,des