Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
import cv2
import numpy as np
# global show_cv because I didn't want to have show_cv as an input to every function
show_cv = None
def init_show_cv(val):
global show_cv
show_cv = val
def find_longest_lines(img):
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# sobel gradients
sobel_x = cv2.Sobel(gray_img, cv2.CV_64F, 1, 0, ksize=3)
sobel_y = cv2.Sobel(gray_img, cv2.CV_64F, 0, 1, ksize=3)
abs_sobel_x = np.absolute(sobel_x)
abs_sobel_y = np.absolute(sobel_y)
# threshold on abs values of sobel gradients and combine them
_, threshold_x = cv2.threshold(abs_sobel_x, 17, 255, cv2.THRESH_BINARY)
_, threshold_y = cv2.threshold(abs_sobel_y, 17, 255, cv2.THRESH_BINARY)
combined_threshold = cv2.bitwise_or(threshold_x, threshold_y)
combined_threshold = np.uint8(combined_threshold) # median blur needs this
combined_threshold = cv2.medianBlur(combined_threshold, 5) # this gets rid of outliers so weird diagonal lines don't get made
edges = combined_threshold
# edges = cv2.Canny(gray_img, 30, 150, apertureSize=3) # didn't work as well
if (show_cv):
cv2.imshow('Sobel Filter', edges)
cv2.waitKey(0)
cv2.destroyAllWindows()
lines = cv2.HoughLinesP(edges, 1, np.pi/180, 200, minLineLength=400, maxLineGap=15)
vertical_lines = []
horizontal_lines = []
# separate horizontal and vertical lines
if lines is not None:
for line in lines:
x1, y1, x2, y2 = line[0]
if abs(x2 - x1) < abs(y2 - y1): # vertical line
vertical_lines.append(line)
else:
horizontal_lines.append(line)
# filter lines too close to each other
filtered_vertical = filter_lines(vertical_lines, 50)
filtered_horizontal = filter_lines(horizontal_lines, 50)
# sorted_vertical = sorted(filtered_vertical, key=lambda line: min(line[0][1], line[0][3]))
# sorted_horizontal = sorted(filtered_horizontal, key=lambda line: min(line[0][0], line[0][2]))
return filtered_vertical, filtered_horizontal
def filter_lines(lines, min_distance):
filtered_lines = []
# filter out lines too close to each other
# (this assumes lines are around the same size and parallel)
# (extremely simplified to improve computational speed because this is all we need)
for line1 in lines:
x1, y1, x2, y2 = line1[0]
line1_x_avg = (x1 + x2) / 2
line1_y_avg = (y1 + y2) / 2
keep_line = True
for line2 in filtered_lines:
x3, y3, x4, y4 = line2[0]
line2_x_avg = (x3 + x4) / 2
line2_y_avg = (y3 + y4) / 2
# calculate dist between average points of the 2 lines
dist = np.sqrt((line1_x_avg - line2_x_avg)**2 + (line1_y_avg - line2_y_avg)**2)
if dist < min_distance:
keep_line = False
break
if keep_line:
filtered_lines.append(line1)
return filtered_lines
def detect_board(img):
vertical_lines, horizontal_lines = find_longest_lines(img)
print("# of Vertical:",len(vertical_lines))
print("# of Horizontal:",len(horizontal_lines))
height, width, _ = img.shape
black_img = np.zeros((height, width), dtype=np.uint8)
# create bitmasks for vert and horiz so we can get lines and intersections
height, width, _ = img.shape
vertical_mask = np.zeros((height, width), dtype=np.uint8)
horizontal_mask = np.zeros((height, width), dtype=np.uint8)
for line in vertical_lines:
x1, y1, x2, y2 = line[0]
cv2.line(vertical_mask, (x1, y1), (x2, y2), (255), 2)
for line in horizontal_lines:
x1, y1, x2, y2 = line[0]
cv2.line(horizontal_mask, (x1, y1), (x2, y2), (255), 2)
intersection = cv2.bitwise_and(vertical_mask, horizontal_mask)
board_lines = cv2.bitwise_or(vertical_mask, horizontal_mask)
contours, hierarchy = cv2.findContours(board_lines, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
intersection_points, hierarchy = cv2.findContours(intersection, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if (show_cv):
board_lines_img = img.copy()
cv2.drawContours(board_lines_img, contours, -1, (255, 255, 0), 2)
cv2.drawContours(board_lines_img, intersection_points, -1, (0, 0, 255), 2)
cv2.imshow('Lines of Board', board_lines_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# find largest contour and get rid of it because it contains weird edges from lines
max_area = 100000 # we're assuming board is going to be big (hopefully to speed up computation on raspberry pi)
largest = -1
# second_largest = -1
# max_rect = None
for i, contour in enumerate(contours):
area = cv2.contourArea(contour)
if area > max_area:
max_area = area
largest = i
# "largest" is index of largest contour
# get rid of contour containing the edges of the lines
contours = list(contours)
contours.pop(largest)
contours = tuple(contours)
# thicken lines so that connections are made
contour_mask = np.zeros((height, width), dtype=np.uint8)
cv2.drawContours(contour_mask, contours, -1, (255), thickness=10)
thick_contours, _ = cv2.findContours(contour_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# obtain largest contour of the thickened lines (the border) and approximate a 4 sided polygon onto it
max_area = 100000
largest = -1
max_rect = None
for i, contour in enumerate(thick_contours):
area = cv2.contourArea(contour)
if area > max_area:
epsilon = 0.05 * cv2.arcLength(contour, True)
rect = cv2.approxPolyDP(contour, epsilon, True) # uses Douglas-Peucker algorithm (probably overkill)
if (len(rect) == 4):
max_area = area
largest = i
max_rect = rect
# perspective transform based on rectangle outline of board
corners = max_rect.reshape(-1, 2) # turn rectangle into coordinate pairs
tl = corners[1] # FIND A BETTER WAY TO DO THIS - sorting wasn't working for some reason
tr = corners[0]
bl = corners[2]
br = corners[3]
src = np.float32([list(tl), list(tr), list(bl), list(br)])
dest = np.float32([[0,0], [width, 0], [0, height], [width, height]])
M = cv2.getPerspectiveTransform(src, dest)
Minv = cv2.getPerspectiveTransform(dest, src)
warped_img = img.copy()
warped_img = cv2.warpPerspective(np.uint8(warped_img), M, (width, height))
M = cv2.getPerspectiveTransform(src, dest)
Minv = cv2.getPerspectiveTransform(dest, src)
warped_ip = img.copy()
warped_ip = cv2.drawContours(warped_ip, intersection_points, -1, (0, 0, 255), 2)
warped_ip = cv2.warpPerspective(np.uint8(warped_ip), M, (width, height))
if (show_cv):
contours_img = img.copy()
# for i in range(63):
# cv2.drawContours(contours_img, [sorted_contours[i]], -1, (255-4*i, 4*i, 0), 2)
cv2.drawContours(contours_img, thick_contours, -1, (0, 255, 0), 2)
cv2.drawContours(contours_img, [thick_contours[largest]], -1, (0, 0, 255), 2)
cv2.drawContours(contours_img, [max_rect], -1, (255, 0, 0), 2)
for corner in corners:
x,y = corner.ravel()
cv2.circle(contours_img, (x, y), 5, (0, 255, 255), -1)
# cv2.circle(contours_img, (int(min_x), int(min_y)), 5, (255, 0, 0), -1)
# cv2.circle(contours_img, (int(max_x), int(max_y)), 5, (255, 0, 0), -1)
cv2.imshow('Contours', contours_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imshow('Warped', warped_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# cv2.imshow('Warped', warped_ip)
# cv2.waitKey(0)
# cv2.destroyAllWindows()