diff --git a/board_detector.py b/board_detector.py new file mode 100644 index 0000000000000000000000000000000000000000..ee5b76e927f1816dd257f82b8f10ccc24e3221a6 --- /dev/null +++ b/board_detector.py @@ -0,0 +1,197 @@ +import cv2 +import numpy as np + +# global show_cv because I didn't want to have show_cv as an input to every function +show_cv = None +def init_show_cv(val): + global show_cv + show_cv = val + +def find_longest_lines(img): + gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + + # sobel gradients + sobel_x = cv2.Sobel(gray_img, cv2.CV_64F, 1, 0, ksize=3) + sobel_y = cv2.Sobel(gray_img, cv2.CV_64F, 0, 1, ksize=3) + abs_sobel_x = np.absolute(sobel_x) + abs_sobel_y = np.absolute(sobel_y) + + # threshold on abs values of sobel gradients and combine them + _, threshold_x = cv2.threshold(abs_sobel_x, 17, 255, cv2.THRESH_BINARY) + _, threshold_y = cv2.threshold(abs_sobel_y, 17, 255, cv2.THRESH_BINARY) + combined_threshold = cv2.bitwise_or(threshold_x, threshold_y) + combined_threshold = np.uint8(combined_threshold) # median blur needs this + combined_threshold = cv2.medianBlur(combined_threshold, 5) # this gets rid of outliers so weird diagonal lines don't get made + edges = combined_threshold + # edges = cv2.Canny(gray_img, 30, 150, apertureSize=3) # didn't work as well + + if (show_cv): + cv2.imshow('Sobel Filter', edges) + cv2.waitKey(0) + cv2.destroyAllWindows() + + lines = cv2.HoughLinesP(edges, 1, np.pi/180, 200, minLineLength=400, maxLineGap=15) + + vertical_lines = [] + horizontal_lines = [] + + # separate horizontal and vertical lines + if lines is not None: + for line in lines: + x1, y1, x2, y2 = line[0] + if abs(x2 - x1) < abs(y2 - y1): # vertical line + vertical_lines.append(line) + else: + horizontal_lines.append(line) + + # filter lines too close to each other + filtered_vertical = filter_lines(vertical_lines, 50) + filtered_horizontal = filter_lines(horizontal_lines, 50) + + # sorted_vertical = sorted(filtered_vertical, key=lambda line: min(line[0][1], line[0][3])) + # sorted_horizontal = sorted(filtered_horizontal, key=lambda line: min(line[0][0], line[0][2])) + + return filtered_vertical, filtered_horizontal + +def filter_lines(lines, min_distance): + filtered_lines = [] + + # filter out lines too close to each other + # (this assumes lines are around the same size and parallel) + # (extremely simplified to improve computational speed because this is all we need) + for line1 in lines: + x1, y1, x2, y2 = line1[0] + line1_x_avg = (x1 + x2) / 2 + line1_y_avg = (y1 + y2) / 2 + keep_line = True + for line2 in filtered_lines: + x3, y3, x4, y4 = line2[0] + line2_x_avg = (x3 + x4) / 2 + line2_y_avg = (y3 + y4) / 2 + + # calculate dist between average points of the 2 lines + dist = np.sqrt((line1_x_avg - line2_x_avg)**2 + (line1_y_avg - line2_y_avg)**2) + + if dist < min_distance: + keep_line = False + break + + if keep_line: + filtered_lines.append(line1) + + return filtered_lines + + +def detect_board(img): + vertical_lines, horizontal_lines = find_longest_lines(img) + print("# of Vertical:",len(vertical_lines)) + print("# of Horizontal:",len(horizontal_lines)) + + height, width, _ = img.shape + black_img = np.zeros((height, width), dtype=np.uint8) + + # create bitmasks for vert and horiz so we can get lines and intersections + height, width, _ = img.shape + vertical_mask = np.zeros((height, width), dtype=np.uint8) + horizontal_mask = np.zeros((height, width), dtype=np.uint8) + + for line in vertical_lines: + x1, y1, x2, y2 = line[0] + cv2.line(vertical_mask, (x1, y1), (x2, y2), (255), 2) + + for line in horizontal_lines: + x1, y1, x2, y2 = line[0] + cv2.line(horizontal_mask, (x1, y1), (x2, y2), (255), 2) + + intersection = cv2.bitwise_and(vertical_mask, horizontal_mask) + board_lines = cv2.bitwise_or(vertical_mask, horizontal_mask) + + contours, hierarchy = cv2.findContours(board_lines, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + intersection_points, hierarchy = cv2.findContours(intersection, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + + if (show_cv): + board_lines_img = img.copy() + cv2.drawContours(board_lines_img, contours, -1, (255, 255, 0), 2) + cv2.drawContours(board_lines_img, intersection_points, -1, (0, 0, 255), 2) + cv2.imshow('Lines of Board', board_lines_img) + cv2.waitKey(0) + cv2.destroyAllWindows() + + # find largest contour and get rid of it because it contains weird edges from lines + max_area = 100000 # we're assuming board is going to be big (hopefully to speed up computation on raspberry pi) + largest = -1 + # second_largest = -1 + # max_rect = None + for i, contour in enumerate(contours): + area = cv2.contourArea(contour) + if area > max_area: + max_area = area + largest = i + # "largest" is index of largest contour + + # get rid of contour containing the edges of the lines + contours = list(contours) + contours.pop(largest) + contours = tuple(contours) + + # thicken lines so that connections are made + contour_mask = np.zeros((height, width), dtype=np.uint8) + cv2.drawContours(contour_mask, contours, -1, (255), thickness=10) + thick_contours, _ = cv2.findContours(contour_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + + # obtain largest contour of the thickened lines (the border) and approximate a 4 sided polygon onto it + max_area = 100000 + largest = -1 + max_rect = None + for i, contour in enumerate(thick_contours): + area = cv2.contourArea(contour) + if area > max_area: + epsilon = 0.05 * cv2.arcLength(contour, True) + rect = cv2.approxPolyDP(contour, epsilon, True) # uses Douglas-Peucker algorithm (probably overkill) + if (len(rect) == 4): + max_area = area + largest = i + max_rect = rect + + # perspective transform based on rectangle outline of board + corners = max_rect.reshape(-1, 2) # turn rectangle into coordinate pairs + tl = corners[1] # FIND A BETTER WAY TO DO THIS - sorting wasn't working for some reason + tr = corners[0] + bl = corners[2] + br = corners[3] + src = np.float32([list(tl), list(tr), list(bl), list(br)]) + dest = np.float32([[0,0], [width, 0], [0, height], [width, height]]) + M = cv2.getPerspectiveTransform(src, dest) + Minv = cv2.getPerspectiveTransform(dest, src) + warped_img = img.copy() + warped_img = cv2.warpPerspective(np.uint8(warped_img), M, (width, height)) + + M = cv2.getPerspectiveTransform(src, dest) + Minv = cv2.getPerspectiveTransform(dest, src) + warped_ip = img.copy() + warped_ip = cv2.drawContours(warped_ip, intersection_points, -1, (0, 0, 255), 2) + warped_ip = cv2.warpPerspective(np.uint8(warped_ip), M, (width, height)) + + if (show_cv): + contours_img = img.copy() + # for i in range(63): + # cv2.drawContours(contours_img, [sorted_contours[i]], -1, (255-4*i, 4*i, 0), 2) + cv2.drawContours(contours_img, thick_contours, -1, (0, 255, 0), 2) + cv2.drawContours(contours_img, [thick_contours[largest]], -1, (0, 0, 255), 2) + cv2.drawContours(contours_img, [max_rect], -1, (255, 0, 0), 2) + for corner in corners: + x,y = corner.ravel() + cv2.circle(contours_img, (x, y), 5, (0, 255, 255), -1) + # cv2.circle(contours_img, (int(min_x), int(min_y)), 5, (255, 0, 0), -1) + # cv2.circle(contours_img, (int(max_x), int(max_y)), 5, (255, 0, 0), -1) + cv2.imshow('Contours', contours_img) + cv2.waitKey(0) + cv2.destroyAllWindows() + + cv2.imshow('Warped', warped_img) + cv2.waitKey(0) + cv2.destroyAllWindows() + + # cv2.imshow('Warped', warped_ip) + # cv2.waitKey(0) + # cv2.destroyAllWindows() diff --git a/color_analyzer.py b/color_analyzer.py new file mode 100644 index 0000000000000000000000000000000000000000..a485480e362373324307368de31b44b6668848fd --- /dev/null +++ b/color_analyzer.py @@ -0,0 +1,10 @@ +import cv2 +import chess + +def calibrate_colors(img, show_cv): + # TODO + pass + +def find_pieces(img, show_cv): + # TODO - do cv stuff, write this as uci. then convert uci to fen + pass \ No newline at end of file diff --git a/game.py b/game.py new file mode 100644 index 0000000000000000000000000000000000000000..ef7db3f5812aa31e32422e4fd694b4c94588d90c --- /dev/null +++ b/game.py @@ -0,0 +1,90 @@ +import argparse +import chess +import chess.pgn +from board_detector import detect_board +from board_detector import init_show_cv +import color_analyzer +import move_translator +import cv2 +import os + +class ChessGame: + def __init__(self, difficulty, show_cv, test_img = None): + self.board = chess.Board() + self.difficulty = difficulty + self.show_cv = show_cv + self.test_img = test_img + + def start_game(self): + print(f"Starting chess game (difficulty: {self.difficulty})") + + # TODO - call initialize board in board_detector, initialize colors for color analysis, + # then loop until checkmate. also handle illegal moves (writing to screen if we end up doing that or just LEDs) + + init_show_cv(self.show_cv) + + if (self.test_img): + img_path = os.path.join('test_images', self.test_img) + img = cv2.imread(img_path) + else: + img = cv2.imread('test_images/board1.jpg') # TODO - CHANGE TO MAKE IT RECEIVE INPUT FROM CAMERA + img = cv2.resize(img, (512, 512)) + + if (self.show_cv): + cv2.imshow('Original Image Before Processing', img) + cv2.waitKey(0) + cv2.destroyAllWindows() + + detect_board(img) + + while(1): # game loop + self.player_turn() + # handle cheating + if self.board.is_checkmate(): + break + + quit() # TODO - REMOVE + + self.ai_turn() + if self.board.is_checkmate(): + break + + # game is over + + + def player_turn(self): + # TODO - wait for user button, then check for valid move. loop until a valid move has been made + + # while(1): # loop until legal move has been made + # cur_state = "rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1" + # potential_next_state = "rnbqkbnr/pppppppp/8/8/4P3/8/PPPP1PPP/RNBQKBNR b KQkq - 0 1" + # board = chess.Board(cur_state) + # next_board = chess.Board(potential_next_state) + # move = next_board.peek() # NOTHING ON STACK SO DOESN'T WORK + # if next_board.is_legal(move): + # board.set_fen(potential_next_state) + # print("Move executed successfully.") + # else: + # print("Illegal move. Move not executed.") + + pass + + def ai_turn(self): + # TODO + # 1. use detect_pieces in color_analyzer + # 2. update internal representation of board + # 3. give to python-chess and get best move + # 4. call move_translator with argument of best move + pass + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="AI Chess Robot with Computer Vision") + parser.add_argument("--difficulty", choices=["easy", "medium", "hard"], + default="medium", help="Chess AI difficulty (how far it looks ahead)") + parser.add_argument("--show_cv", action="store_true", help="Show opencv images as processing occurs during game") + parser.add_argument("--test_img", help="If specified, will use said image in test_images folder rather than camera input") + args = parser.parse_args() + + game = ChessGame(args.difficulty, args.show_cv, args.test_img) + game.start_game() \ No newline at end of file diff --git a/move_translator.py b/move_translator.py new file mode 100644 index 0000000000000000000000000000000000000000..962b99ab62ca970071b08e364172aa3e45ec27fa --- /dev/null +++ b/move_translator.py @@ -0,0 +1,5 @@ + + +def move_translator(): + # TODO + pass \ No newline at end of file diff --git a/test_images/board1.jpg b/test_images/board1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..787b25e5a7d9bb7f670483f5661ad97ee9957901 Binary files /dev/null and b/test_images/board1.jpg differ diff --git a/test_images/board10.jpg b/test_images/board10.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c2a87f12e8252bce035243b8a9f6754298dd63af Binary files /dev/null and b/test_images/board10.jpg differ diff --git a/test_images/board2.jpg b/test_images/board2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9a0d9101b317c2c914bb65406f391ffde1d62dd1 Binary files /dev/null and b/test_images/board2.jpg differ diff --git a/test_images/board3.jpg b/test_images/board3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3a6d7df8bc1d5d27008a370ee011b2fa70834c1d Binary files /dev/null and b/test_images/board3.jpg differ diff --git a/test_images/board4.jpg b/test_images/board4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..44a18862a5ef3fb3153bfa4a3a5e61b4e4e158f6 Binary files /dev/null and b/test_images/board4.jpg differ diff --git a/test_images/board5.jpg b/test_images/board5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2477dfa37e4b562e2310b5ee606c7d0eed0fc5a3 Binary files /dev/null and b/test_images/board5.jpg differ diff --git a/test_images/board6.jpg b/test_images/board6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6f3c82b7cc9ae16d1013c6b9bc058300ae27131c Binary files /dev/null and b/test_images/board6.jpg differ diff --git a/test_images/board7.jpg b/test_images/board7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ad7c3613e5e8859acc4122e9a114be2cfe8313bb Binary files /dev/null and b/test_images/board7.jpg differ diff --git a/test_images/board8.jpg b/test_images/board8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..daeeaaceec254c25d5f16ce3bd22548fc3025695 Binary files /dev/null and b/test_images/board8.jpg differ diff --git a/test_images/board9.jpg b/test_images/board9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2fda95544abaec9694fd490d1a6b7ba368cb9e04 Binary files /dev/null and b/test_images/board9.jpg differ