Skip to content

Commit 14ea11c

Browse files
committed
Add two more examples
1 parent 8f3772f commit 14ea11c

3 files changed

Lines changed: 109 additions & 3 deletions

File tree

README.md

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,6 @@ In addition to tutorial slides, example codes are provided in the purpose of edu
7373
* Structure-from-Motion (SfM)
7474
* Global SfM
7575
* Incremental SfM
76-
* COLMAP
7776
* **Section 7. Visual SLAM and Odometry**
7877

7978

@@ -84,8 +83,8 @@ In addition to tutorial slides, example codes are provided in the purpose of edu
8483

8584

8685
### Authors
87-
* [Sunglok Choi](http://sites.google.com/site/sunglok/) (sunglok@hanmail.net)
88-
* [JunHyeok Choi](https://mint-lab.github.io/members/) (dkwnsgur12@gmail.com)
86+
* [Sunglok Choi](https://mint-lab.github.io/sunglok/)
87+
* [JunHyeok Choi](https://github.com/cjh1995-ros)
8988

9089

9190

Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
import numpy as np
2+
import cv2 as cv
3+
import random
4+
5+
def mouse_event_handler(event, x, y, flags, param):
6+
if event == cv.EVENT_LBUTTONDOWN:
7+
param.append((x, y))
8+
9+
def draw_straight_line(img, line, color, thickness=1):
10+
assert img.ndim >= 2
11+
h, w, *_ = img.shape
12+
a, b, c = line # Line: ax + by + c = 0
13+
if abs(a) > abs(b):
14+
pt1 = (int(c / -a), 0)
15+
pt2 = (int((b*h + c) / -a), h)
16+
else:
17+
pt1 = (0, int(c / -b))
18+
pt2 = (w, int((a*w + c) / -b))
19+
cv.line(img, pt1, pt2, color, thickness)
20+
21+
if __name__ == '__main__':
22+
# Load two images
23+
img1 = cv.imread('../data/KITTI07/image_0/000000.png', cv.IMREAD_COLOR)
24+
img2 = cv.imread('../data/KITTI07/image_0/000023.png', cv.IMREAD_COLOR)
25+
assert (img1 is not None) and (img2 is not None), 'Cannot read the given images'
26+
# Note) `F` is derived from `fundamental_mat_estimation.py`.
27+
F = np.array([[ 3.34638533e-07, 7.58547151e-06, -2.04147752e-03],
28+
[-5.83765868e-06, 1.36498636e-06, 2.67566877e-04],
29+
[ 1.45892349e-03, -4.37648316e-03, 1.00000000e+00]])
30+
31+
# Register event handlers and show images
32+
wnd1_name, wnd2_name = 'Epipolar Line: Image #1', 'Epipolar Line: Image #2'
33+
img1_pts, img2_pts = [], []
34+
cv.namedWindow(wnd1_name)
35+
cv.namedWindow(wnd2_name)
36+
cv.setMouseCallback(wnd1_name, mouse_event_handler, img1_pts)
37+
cv.setMouseCallback(wnd2_name, mouse_event_handler, img2_pts)
38+
cv.imshow(wnd1_name, img1)
39+
cv.imshow(wnd2_name, img2)
40+
41+
# Get a point from a image and draw its correponding epipolar line on the other image
42+
while True:
43+
if len(img1_pts) > 0:
44+
for x, y in img1_pts:
45+
color = (random.randrange(256), random.randrange(256), random.randrange(256))
46+
cv.circle(img1, (x, y), 4, color, -1)
47+
epipolar_line = F @ [[x], [y], [1]]
48+
draw_straight_line(img2, epipolar_line, color, 2)
49+
img1_pts.clear()
50+
if len(img2_pts) > 0:
51+
for x, y in img2_pts:
52+
color = (random.randrange(256), random.randrange(256), random.randrange(256))
53+
cv.circle(img2, (x, y), 4, color, -1)
54+
epipolar_line = F.T @ [[x], [y], [1]]
55+
draw_straight_line(img1, epipolar_line, color, 2)
56+
img2_pts.clear()
57+
cv.imshow(wnd2_name, img2)
58+
cv.imshow(wnd1_name, img1)
59+
key = cv.waitKey(10)
60+
if key == 27: # ESC
61+
break
62+
63+
cv.destroyAllWindows()
Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
import numpy as np
2+
import cv2 as cv
3+
4+
# Load two images
5+
img1 = cv.imread('../data/KITTI07/image_0/000000.png')
6+
img2 = cv.imread('../data/KITTI07/image_0/000023.png')
7+
assert (img1 is not None) and (img2 is not None), 'Cannot read the given images'
8+
f, cx, cy = 707.0912, 601.8873, 183.1104 # From the KITTI dataset
9+
K = np.array([[f, 0, cx], [0, f, cy], [0, 0, 1]])
10+
11+
# Retrieve matching points
12+
brisk = cv.BRISK_create()
13+
keypoints1, descriptors1 = brisk.detectAndCompute(img1, None)
14+
keypoints2, descriptors2 = brisk.detectAndCompute(img2, None)
15+
16+
fmatcher = cv.DescriptorMatcher_create('BruteForce-Hamming')
17+
match = fmatcher.match(descriptors1, descriptors2)
18+
19+
# Calculate the fundamental matrix
20+
pts1, pts2 = [], []
21+
for i in range(len(match)):
22+
pts1.append(keypoints1[match[i].queryIdx].pt)
23+
pts2.append(keypoints2[match[i].trainIdx].pt)
24+
pts1 = np.array(pts1, dtype=np.float32)
25+
pts2 = np.array(pts2, dtype=np.float32)
26+
F, inlier_mask = cv.findFundamentalMat(pts1, pts2, cv.FM_RANSAC, 0.5, 0.999)
27+
print(f'* F = {F}')
28+
print(f'* The number of inliers = {sum(inlier_mask.ravel())}')
29+
30+
# Extract relative camera pose between two images
31+
E = K.T @ F @ K
32+
positive_num, R, t, positive_mask = cv.recoverPose(E, pts1, pts2, K, mask=inlier_mask)
33+
print(f'* R = {R}')
34+
print(f'* t = {t}')
35+
print(f'* The position of Image #2 = {-R.T @ t}') # [-0.57, 0.09, 0.82]
36+
print(f'* The number of positive-depth inliers = {sum(positive_mask.ravel())}')
37+
38+
# Show the matched images
39+
img_matched = cv.drawMatches(img1, keypoints1, img2, keypoints2, match, None, None, None,
40+
matchesMask=inlier_mask.ravel().tolist()) # Remove `matchesMask` if you want to show all putative matches
41+
cv.namedWindow('Fundamental Matrix Estimation', cv.WINDOW_NORMAL)
42+
cv.imshow('Fundamental Matrix Estimation', img_matched)
43+
cv.waitKey(0)
44+
cv.destroyAllWindows()

0 commit comments

Comments
 (0)