Tryag File Manager
Home
||
Turbo Force
||
B-F Config_Cpanel
Current Path :
/
paip
/
script
/
lpr
/
detectron2_LPR
/
Or
Select Your Path :
Upload File :
New :
File
Dir
//paip/script/lpr/detectron2_LPR/detectron2_lpd_inference.py
# Copyright (c) 2020~2024 PAIPTREE SmartFarm, Inc. All Rights Reserved. # Modifier : 박정훈 # Modified : 2024-02-05 # 번호판 이미지 전처리를 위한 적응형 이진화 적용(cv2.adaptiveThreshold) # 번호판 이미지 크기 2배로 변경후 이미지 전처리 적용 import torch, torchvision import detectron2 from detectron2.utils.logger import setup_logger setup_logger() # import some common libraries import numpy as np import os, cv2 import math import imutils # import matplotlib.pyplot as plt # import some common detectron2 utilities from detectron2 import model_zoo from detectron2.engine import DefaultPredictor from detectron2.config import get_cfg # from detectron2.structures import BoxMode # import copy from detectron2.utils.visualizer import Visualizer from detectron2.data import detection_utils as utils from detectron2.engine import DefaultTrainer from detectron2.data import DatasetCatalog, MetadataCatalog, build_detection_test_loader, build_detection_train_loader from detectron2.utils.visualizer import ColorMode import sys import warnings MODEL_DIR = './model/' num_of_layers = 6 threshold_num = 1000 # 10000 to 2000 . 2022.01.12 delta_db_std = .3 dead_detector_alpha = .4 HOME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir) DATA_DIR = 'data' MODULE_DIR = 'util' OUTPUT_DIR = 'out' warnings.filterwarnings('ignore') # set sys path to import PyDBconnector HOME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir, os.pardir) # HOME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__))) MODULE_DIR = 'util' sys.path.append(os.path.join(HOME_PATH, MODULE_DIR)) from PyDBconnector import PyDBconnector class LicensePlate_Detector: __instance = None def __init__(self, ): DatasetCatalog.clear() MetadataCatalog.clear() if not LicensePlate_Detector.__instance: # 인스턴스가 존재하지 않을 때 구현 self.cfg = get_cfg() self.cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")) self.cfg.merge_from_list(['MODEL.DEVICE', 'cpu']) # CPU 변경 self.cfg.DATALOADER.NUM_WORKERS = 4 self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url( "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml") self.cfg.SOLVER.IMS_PER_BATCH = 4 self.cfg.SOLVER.BASE_LR = 0.00025 # pick a good LR self.cfg.SOLVER.MAX_ITER = 300 # 300 iterations seems good enough for this toy dataset; you will need to train longer for a practical dataset self.cfg.SOLVER.STEPS = [] # do not decay learning rate self.cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128 # faster, and good enough for this toy dataset (default: 512) self.cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1 # only has one class (ballon). (see https://detectron2.readthedocs.io/tutorials/datasets.html#update-the-config-for-new-datasets) self.cfg.MODEL_DIR = MODEL_DIR # NOTE: this config means the number of classes, but a few popular unofficial tutorials incorrect uses num_classes+1 here. os.makedirs(self.cfg.MODEL_DIR, exist_ok=True) # Inference should use the config with parameters that are used in training # cfg now already contains everything we've set previously. We changed it a little bit for inference: self.cfg.MODEL.WEIGHTS = "./weight/lpd_weight/lpr_detecting_label385.pth" self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.9 # set a custom testing threshold self.predictor = DefaultPredictor(self.cfg) else: # 인스턴스가 이미 존재 할 때 구현 self.getInstance() # 마우스와 원근 변환으로 문서 스캔 효과 내기 (perspective_scan.py) win_name = "scanning" def onMouse(self, img, pts): # 좌표가 4개 수집됨 # 좌표 4개 중 상하좌우 찾기 ---② sm = pts.sum(axis=1) # 4쌍의 좌표 각각 x+y 계산 diff = np.diff(pts, axis=1) # 4쌍의 좌표 각각 x-y 계산 topLeft = pts[np.argmin(sm)] # x+y가 가장 값이 좌상단 좌표 bottomRight = pts[np.argmax(sm)] # x+y가 가장 큰 값이 우하단 좌표 topRight = pts[np.argmin(diff)] # x-y가 가장 작은 것이 우상단 좌표 bottomLeft = pts[np.argmax(diff)] # x-y가 가장 큰 값이 좌하단 좌표 # 변환 전 4개 좌표 pts1 = np.float32([topLeft, topRight, bottomRight, bottomLeft]) # 변환 후 영상에 사용할 서류의 폭과 높이 계산 ---③ w1 = abs(bottomRight[0] - bottomLeft[0]) # 상단 좌우 좌표간의 거리 w2 = abs(topRight[0] - topLeft[0]) # 하당 좌우 좌표간의 거리 h1 = abs(topRight[1] - bottomRight[1]) # 우측 상하 좌표간의 거리 h2 = abs(topLeft[1] - bottomLeft[1]) # 좌측 상하 좌표간의 거리 width = max([w1, w2]) # 두 좌우 거리간의 최대값이 서류의 폭 height = max([h1, h2]) # 두 상하 거리간의 최대값이 서류의 높이 # 변환 후 4개 좌표 pts2 = np.float32([[0, 0], [width - 1, 0], [width - 1, height - 1], [0, height - 1]]) # 변환 행렬 계산 mtrx = cv2.getPerspectiveTransform(pts1, pts2) # 원근 변환 적용 result = cv2.warpPerspective(img, mtrx, (width, height)) return result def cal_rotate_degree(self, boxes, pred_masks): x1, y1, x2, y2 = boxes plate_box = pred_masks[0][int(y1):int(y2), int(x1):int(x2)] first = plate_box.shape[1] * 0.25 third = plate_box.shape[1] * 0.75 width = third - first temp_first = np.array(plate_box).T[int(first)] temp_third = np.array(plate_box).T[int(third)] first_y = np.where(temp_first == True) third_y = np.where(temp_third == True) height = first_y[0][0] - third_y[0][0] rad = math.atan2(height, width) # y 길이, x 길이 deg = (rad * 180) / math.pi return deg def __angle_between(self, p1, p2): ang1 = np.arctan2(*p1[::-1]) ang2 = np.arctan2(*p2[::-1]) res = np.rad2deg((ang1 - ang2) % (2 * np.pi)) return res def getAngle3P(self, points): p1 = points[0] p2 = points[1] p3 = points[2] pt1 = (p1[0] - p2[0], p1[1] - p2[1]) pt2 = (p3[0] - p2[0], p3[1] - p2[1]) res = self.__angle_between(pt1, pt2) res = (res + 360) % 360 res = (360 - res) % 360 return res @classmethod def getInstance(cls): if not cls.__instance: cls.__instance = LicensePlate_Detector() return cls.__instance def Predict_LPD(self, image_dir, image_name, farm_id): print("============================== Starting to detect license plate ==============================") imageName = image_dir + image_name try: im = cv2.imread(imageName) im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) # im = im[:,:, ::-1] print("image name = ", imageName) im_height, im_width, im_channel = im.shape print("image width : %d, image height : %d" % (im_width, im_height)) outputs = self.predictor(im) # v = Visualizer(im[:, :, ::-1], metadata=self.test_metadata, scale=0.8) v = Visualizer(im[:, :, ::-1], metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE) output_cpu = outputs['instances'].to("cpu") boxes = output_cpu.pred_boxes score = output_cpu.scores pred_classes = output_cpu.pred_classes pred_masks = output_cpu.pred_masks # print("이미지 예측 정보 추출 완료") x_sum = [] for i, x in enumerate(boxes): x1, y1, x2, y2 = x x_area = round(float((x2 - x1) * (y2 - y1)), 1) x_sum.append(x_area) x_mean = np.mean(x_sum) if np.isnan(x_mean) or len(x_sum) == 0: # save_nodetection = './LPD_test/no_detection/' + new_name # plt.imsave(save_crop, save_nodetection) raise Exception(imageName + "---------------> (No Detecting LPD) \n") # print(imageName + " No Detecting LPD\n") # out = v.draw_instance_predictions(outputs["instances"].to("cpu")) out = v.draw_instance_predictions(output_cpu) # # 평활화 # im = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY) # im = cv2.equalizeHist(im) # #im = cv2.cvtColor(im, cv2.COLOR_GRAY2RGB) if len(score) > 1: print("1 more box detected. will get best scored box") score_list = list(score) best_score_index = score_list.index(max(score_list)) x1, y1, x2, y2 = list(boxes)[best_score_index] else: best_score_index = 0 x1, y1, x2, y2 = list(boxes)[best_score_index] # print(list(boxes)[best_score_index]) # crop_img = im[int(y1)-20:int(y2)+20, int(x1)-20:int(x2)+20].copy() # crop_img = im[int(y1)-10:int(y2)+10, int(x1)-10:int(x2)+10].copy() # 3개 맞음 crop_img = im[int(y1):int(y2), int(x1):int(x2)].copy() # 3개 맞음 / 9599 4개 맞음 # JPG 확장자 인덱싱 처리 jpg_index = image_name.rfind('.jpg') # crop 이미지 저장 # crop_dir = image_dir + 'crop' # self.createFolder(crop_dir) # save_dir = crop_dir + '/' + image_name[0:jpg_index] + "_crop.jpg" # crop 이미지 저장 # plt.imsave(save_dir, crop_img) h, w = pred_masks[0].shape # 좌표 인덱스 true_index = np.where(pred_masks[0] == True) listOfCoordinates = list(zip(true_index[0], true_index[1])) true_array = np.array([[x[1], x[0]] for x in listOfCoordinates]) # polygon 생성 mask2 = np.zeros((h, w), dtype=np.uint8) mask2 = cv2.fillPoly(mask2, [true_array], 1) # 점 좌표 contours, hierarchy = cv2.findContours(mask2, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) pointDp = cv2.approxPolyDP(contours[0], cv2.arcLength(contours[0], True) * 0.02, True).squeeze(1) deg = self.cal_rotate_degree(list(boxes)[best_score_index], pred_masks) if deg <= -10 or deg >= 10: print('affine') rotated_img= self.onMouse(im, pointDp) else : print('rotate') rotated_img = imutils.rotate_bound(crop_img, deg) # 한줄 번호판 두줄 번호판 구분 license_plate_type = 1 height, width, channel = rotated_img.shape if height * 2.5 > width: license_plate_type = 2 # ROI 영역 확인 # mx=500; my=250; mx2=1500; my2=850 # roi 좌표(기본) dbConn = PyDBconnector() if farm_id is None: select_str = f"select * from tbl_lpr_roi limit 1" else: select_str = f"select * from tbl_farm_lpr_roi where farm_id='{farm_id}' limit 1" try : rows = dbConn.select_from_db(select_str) #print("tbl_lpr_roi :", rows); if rows.empty or im_width < 1920: mx = 0 my = 0 mx2 = im_width my2 = im_height else: mx = rows['x1'].item() my = rows['y1'].item() mx2 = rows['x2'].item() my2 = rows['y2'].item() dbConn.close() except Exception as e : print(e) dbConn.close() print("roi: x, x2, y, y2", mx, mx2, my, my2) isX = False isY = False if (mx <= int(x1)) & (mx2 >= int(x2)): print("x : ok"); isX = True else : print("x : fail ") if (my <= int(y1)) & (my2 >= int(y2)): print("y : ok"); isY = True else : print("y : fail ") if isX & isY : print("roi : ok") else : print("roi : Fail!") license_plate_type = 3 # raise Exception(imageName + "---------------> (ROI FAIL) \n") height, width, _ = rotated_img.shape rotated_img = cv2.resize(rotated_img, dsize=(width * 2, height * 2)) rotated_gray = cv2.cvtColor(rotated_img, cv2.COLOR_RGB2GRAY) #rotated_img = cv2.equalizeHist(rotated_img) #rotated_img = cv2.cvtColor(rotated_img, cv2.COLOR_GRAY2RGB) # 지역 적응형 이진화 rotated_gray = cv2.adaptiveThreshold(rotated_gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 15, 2) # 명암비 조절 - 정규화 함수(cv2.normalize) : 히스토그램 스트레칭 rotated_img = cv2.normalize(rotated_gray, None, 0, 255, cv2.NORM_MINMAX) # rotated_img = cv2.cvtColor(rotated_img, cv2.COLOR_GRAY2RGB) # rotate 이미지 저장 # plt_dir = image_dir + 'rotate' # self.createFolder(plt_dir) # save_dir = plt_dir + '/' + image_name[0:jpg_index] + "_rotate.jpg" # rotate 이미지 저장 # plt.imsave(save_dir, rotated_img) return rotated_img, out, license_plate_type, mx, my, mx2, my2 # cv2_imshow(out.get_image()[:, :, ::-1]) except Exception as e: raise Exception(e) if __name__ == '__main__': predictor = LicensePlate_Detector.getInstance() predictor.Predict_LPD()