-
Notifications
You must be signed in to change notification settings - Fork 9
Expand file tree
/
Copy pathkeypoint_utils.py
More file actions
74 lines (60 loc) · 2.65 KB
/
keypoint_utils.py
File metadata and controls
74 lines (60 loc) · 2.65 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
from pyraws.utils.img_processing_utils import (
is_2D_image,
three_channel_to_grayscale_img,
normalize_img,
)
import numpy as np
import cv2
def get_matched_keypoints_sift(
img_1: np.ndarray,
img_2: np.ndarray,
min_matches: int = 10,
threshold_lowe_ratio: float = 0.7,
max_val_grayscale: int = 255,
):
"""Detect keypoints in two different images and get keypoints that match using SIFT and Lowe's ratio test.
Args:
img_1 (np.ndarray): first image.
img_2 (np.ndarray): second image.
min_matches (int, optional): Minimum number of matches to get. Defaults to 10.
threshold_lowe_ratio (float, optional): Threshold for Lowe's ratio test. It should be a number in [0, 1]. Defaults to 0.7.
max_val_grayscale (int, optional): Maximum image grayscale value. Defaults to 255.
Raises:
ValueError: There are less than 'min_matches' keypoints matches.
Returns:
(np.ndarray, np.ndarray): coordinates of matching keypoints.
"""
# Convert images to grayscale if they are not already.
if not is_2D_image(img_1):
img_1 = three_channel_to_grayscale_img(img_1)
if not is_2D_image(img_2):
img_2 = three_channel_to_grayscale_img(img_2)
sift = cv2.SIFT_create()
img_1 = normalize_img(img_1, np.min(img_1), np.max(img_1), 0, max_val_grayscale)
img_2 = normalize_img(img_2, np.min(img_2), np.max(img_2), 0, max_val_grayscale)
img_1 = img_1.astype(np.uint8)
img_2 = img_2.astype(np.uint8)
# Find keypoints and descriptors.
kp1, d1 = sift.detectAndCompute(img_1, None)
kp2, d2 = sift.detectAndCompute(img_2, None)
# Match features between the two images.
# We create a Brute Force matcher with Euclidean distance as measurement mode.
matcher = cv2.BFMatcher()
# Match the two sets of descriptors.
matches = matcher.knnMatch(d1, d2, k=2)
# Store all the good matches as per Lowe's ratio test.
lowe_filtered_matches = []
for m, n in matches:
if m.distance < threshold_lowe_ratio * n.distance:
lowe_filtered_matches.append(m)
if len(lowe_filtered_matches) < min_matches:
raise ValueError("Not enough matches between keypoints were found.")
no_of_filtered_matches = len(lowe_filtered_matches)
# Define empty matrices of shape no_of_matches * len_coordinates.
len_coordinates = 2
p1 = np.zeros((no_of_filtered_matches, len_coordinates))
p2 = np.zeros((no_of_filtered_matches, len_coordinates))
for i in range(len(lowe_filtered_matches)):
p1[i, :] = kp1[lowe_filtered_matches[i].queryIdx].pt
p2[i, :] = kp2[lowe_filtered_matches[i].trainIdx].pt
return p1, p2