Tryag File Manager
Home
||
Turbo Force
||
B-F Config_Cpanel
Current Path :
/
paip
/
script
/
lpr
/
detectron2_LPR
/
Or
Select Your Path :
Upload File :
New :
File
Dir
//paip/script/lpr/detectron2_LPR/detectron2_lpr_inference.py
# Copyright (c) 2020~2022 PAIPTREE SmartFarm, Inc. All Rights Reserved. # Author: 조한별, 전규빈 import torch, torchvision import detectron2 from detectron2.utils.logger import setup_logger setup_logger() # import some common libraries import numpy as np import os, json, cv2, random, glob import math import imutils # import some common detectron2 utilities from detectron2 import model_zoo from detectron2.engine import DefaultPredictor from detectron2.config import get_cfg # from detectron2.structures import BoxMode # import copy from detectron2.utils.visualizer import Visualizer from detectron2.data import detection_utils as utils # import detectron2.data.transforms as T from detectron2.engine import DefaultTrainer from detectron2.data import DatasetCatalog, MetadataCatalog, build_detection_test_loader, build_detection_train_loader # from detectron2.data.datasets import register_coco_instances import torch from detectron2.utils.visualizer import ColorMode # import pandas as pd # import flask # from flask_cors import CORS # from flask import request, jsonify import time import matplotlib.pyplot as plt import cv2 # import joblib # from sklearn.mixture import GaussianMixture as GMM import sys import warnings # from datetime import date, datetime # from sklearn.preprocessing import PolynomialFeatures # from apscheduler.schedulers.background import BackgroundScheduler MODEL_DIR = './model/' num_of_layers = 6 threshold_num = 1000 # 10000 to 2000 . 2022.01.12 delta_db_std = .3 dead_detector_alpha = .4 HOME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir) DATA_DIR = 'data' MODULE_DIR = 'util' OUTPUT_DIR = 'out' warnings.filterwarnings('ignore') # set sys path to import PyDBconnector sys.path.append(os.path.join(HOME_PATH, MODULE_DIR)) # from PyDBconnector import PyDBconnector class LicensePlate_Recognition: __instance = None def __init__(self, ): DatasetCatalog.clear() MetadataCatalog.clear() if not LicensePlate_Recognition.__instance: # 인스턴스가 존재하지 않을 때 구현 self.cfg = get_cfg() self.cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml")) self.cfg.merge_from_list(['MODEL.DEVICE', 'cpu']) # CPU 변경 # self.cfg.DATASETS.TRAIN = ("lp_dataset_train",) # self.cfg.DATASETS.TEST = ("lp_dataset_val",) self.cfg.DATALOADER.NUM_WORKERS = 4 self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml") self.cfg.SOLVER.IMS_PER_BATCH = 4 self.cfg.SOLVER.BASE_LR = 0.00025 # pick a good LR self.cfg.SOLVER.MAX_ITER = 10000 # 300 iterations seems good enough for this toy dataset; you will need to train longer for a practical dataset self.cfg.SOLVER.STEPS = [] # do not decay learning rate self.cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512 # faster, and good enough for this toy dataset (default: 512) self.cfg.MODEL.ROI_HEADS.NUM_CLASSES = 67 # only has one class (ballon). (see https://detectron2.readthedocs.io/tutorials/datasets.html#update-the-config-for-new-datasets) self.cfg.MODEL_DIR = MODEL_DIR # NOTE: this config means the number of classes, but a few popular unofficial tutorials incorrect uses num_classes+1 here. os.makedirs(self.cfg.MODEL_DIR, exist_ok=True) # Inference should use the config with parameters that are used in training # cfg now already contains everything we've set previously. We changed it a little bit for inference: # self.cfg.MODEL.WEIGHTS = os.path.join(self.cfg.OUTPUT_DIR, "detectron2_lpd.pth") self.cfg.MODEL.WEIGHTS = "./weight/lpr_weight/detectron2_lpr_920_model_0059999.pth" self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.9 # set a custom testing threshold self.predictor = DefaultPredictor(self.cfg) self.lpr_class_mapping = np.array(["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "가", "강", "거", "경", "고", "광", "구", "기", "나", "남", "너", "노", "누", "다", "대", "더", "도", "두", "라", "러", "로", "루", "마", "머", "모", "무", "바", "배", "버", "보", "부", "북", "사", "산", "서", "세", "소", "수", "아", "어", "오", "우", "울", "원", "인", "자", "저", "전", "제", "조", "종", "주", "천", "충", "하", "허", "호", "육"]) else: # 인스턴스가 이미 존재 할 때 구현 self.getInstance() @classmethod def getInstance(cls): if not cls.__instance: cls.__instance = LicensePlate_Recognition() return cls.__instance ####################################################################################################### def class_mapping(self, pred_box, pred_class): lpr_class = '' two_line = False pred_class = np.array(pred_class) pred_box = np.array(pred_box) y_avg = (pred_box[:, 1].mean() + pred_box[:, 3].mean()) / 2 if len(np.where(pred_box[:, 3] < y_avg)[0]) >= 2: two_line = True if two_line == False: # 한줄 번호판일때 print('한줄 번호판') if len(pred_class) < 9: # 글자가 7~8자 일때 final_second_line = pred_class[np.argsort(pred_box[:, 0])] lpr_class = "".join(self.lpr_class_mapping[final_second_line]) print(lpr_class) else: # 글자가 9자일때 if pred_box[np.argsort(pred_box[:, 0])[0], 1] > pred_box[np.argsort(pred_box[:, 0])[1], 1]: temp = self.lpr_class_mapping[pred_class[np.argsort(pred_box[:, 0])]] temp_element = temp[1] temp[1] = temp[0] temp[0] = temp_element lpr_class = "".join(temp) print(lpr_class) else: lpr_class = "".join(self.lpr_class_mapping[pred_class[np.argsort(pred_box[:, 0])]]) print(lpr_class) else: # 두줄 번호판일때 print('두줄 번호판') first_index = np.where(pred_box[:, 3] < y_avg) second_index = np.where(pred_box[:, 3] > y_avg) first_line = pred_class[first_index] second_line = pred_class[second_index] final_first_line = first_line[np.argsort(pred_box[first_index, 0])[0]] final_second_line = second_line[np.argsort(pred_box[second_index, 0])[0]] lpr_class = "".join(np.concatenate([self.lpr_class_mapping[final_first_line], self.lpr_class_mapping[final_second_line]])) print(lpr_class) return lpr_class ############################################################################################################# def Predict_LPR(self, crop_img): print("============================== Starting to Recognition license plate ==============================") try: # im = cv2.imread(crop_img) im = cv2.cvtColor(crop_img, cv2.COLOR_BGR2RGB) im = im[:, :, ::-1] outputs = self.predictor(im) v = Visualizer(im[:, :, ::-1], metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE) output_cpu = outputs['instances'].to("cpu") # print(output_cpu) boxes = output_cpu.pred_boxes.tensor.numpy() # score = output_cpu.scores pred_classes = output_cpu.pred_classes return self.class_mapping(boxes, pred_classes) out = v.draw_instance_predictions(output_cpu) # out = v.overlay_instances(output_cpu) # out_im = cv2.cvtColor(out, cv2.COLOR_BGR2RGB) # save_dir = image_dir + "LPR_result_" + image_name # plt.imsave(save_dir, out.get_image()[:, :, ::-1]) except Exception as e: raise Exception(e) if __name__ == '__main__': image_dir = "" image_name = "" crop_img = '' predictor = LicensePlate_Recognition.getInstance() predictor.Predict_LPR(crop_img, image_dir, image_name)