Tryag File Manager
Home
||
Turbo Force
||
B-F Config_Cpanel
Current Path :
/
paip
/
script
/
weight
/
Or
Select Your Path :
Upload File :
New :
File
Dir
//paip/script/weight/chicken_test.py
# Copyright (c) 2020~2022 PAIPTREE SmartFarm, Inc. All Rights Reserved. # Author: 조한별, 전규빈 import torch, torchvision # torch.cuda.empty_cache() # print(torch.__version__, torch.cuda.is_available()) #assert torch.__version__.startswith("1.9") # Some basic setup: # Setup detectron2 logger import detectron2 from detectron2.utils.logger import setup_logger setup_logger() # import some common libraries import numpy as np import os, json, cv2, random from detectron2.structures import BoxMode # import some common detectron2 utilities from detectron2 import model_zoo from detectron2.engine import DefaultPredictor from detectron2.config import get_cfg import copy from detectron2.utils.visualizer import Visualizer from detectron2.data import detection_utils as utils import detectron2.data.transforms as T from detectron2.engine import DefaultTrainer from detectron2.data import DatasetCatalog, MetadataCatalog, build_detection_test_loader, build_detection_train_loader import torch from detectron2.utils.visualizer import ColorMode import pandas as pd import flask from flask_cors import CORS from flask import request, jsonify import json import time import os, glob import matplotlib.pyplot as plt import cv2 import joblib from sklearn.mixture import GaussianMixture as GMM import sys import warnings from datetime import date, datetime from sklearn.preprocessing import PolynomialFeatures from apscheduler.schedulers.background import BackgroundScheduler MODEL_DIR = './model/' num_of_layers = 6 threshold_num = 1000 # 10000 to 2000 . 2022.01.12 delta_db_std = .3 dead_detector_alpha = .4 HOME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir) DATA_DIR = 'data' MODULE_DIR = 'util' OUTPUT_DIR = 'out' warnings.filterwarnings('ignore') # set sys path to import PyDBconnector sys.path.append(os.path.join(HOME_PATH, MODULE_DIR)) from PyDBconnector import PyDBconnector class Prepare_predictor: __instance = None def __init__(self, ): if not Prepare_predictor.__instance: # 인스턴스가 존재하지 않을 때 구현 for d in ["train", "val"]: try: DatasetCatalog.register("chicken_" + d, lambda d=d: self.get_chicken_dicts("/gate/script/weight/dataset/" + d)) MetadataCatalog.get("chicken_" + d).set(thing_classes=["chicken"]) except: MetadataCatalog.get("chicken_" + d).set(thing_classes=["chicken"]) self.chicken_metadata = MetadataCatalog.get("chicken_train") self.cfg = get_cfg() self.cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")) device = 'cuda' if torch.cuda.is_available() else 'cpu' self.cfg.merge_from_list(['MODEL.DEVICE', device]) # CPU 변경 self.cfg.DATASETS.TRAIN = ("chicken_train",) self.cfg.DATASETS.TEST = () self.cfg.DATALOADER.NUM_WORKERS = 2 self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url( "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml") # Let training initialize from model zoo self.cfg.SOLVER.IMS_PER_BATCH = 2 self.cfg.SOLVER.BASE_LR = 0.00025 # pick a good LR self.cfg.SOLVER.MAX_ITER = 300 # 300 iterations seems good enough for this toy dataset; you will need to train longer for a practical dataset self.cfg.SOLVER.STEPS = [] # do not decay learning rate self.cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128 # faster, and good enough for this toy dataset (default: 512) self.cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1 # only has one class (ballon). (see https://detectron2.readthedocs.io/tutorials/datasets.html#update-the-config-for-new-datasets) self.cfg.MODEL_DIR = MODEL_DIR # NOTE: this config means the number of classes, but a few popular unofficial tutorials incorrect uses num_classes+1 here. os.makedirs(self.cfg.MODEL_DIR, exist_ok=True) # Inference should use the config with parameters that are used in training # cfg now already contains everything we've set previously. We changed it a little bit for inference: self.chicken_model_name = "weight_predict_new.pth" self.cfg.MODEL.WEIGHTS = os.path.join(self.cfg.MODEL_DIR, self.chicken_model_name) # path to the model we just trained self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.8 # set a custom testing threshold self.predictor = DefaultPredictor(self.cfg) self.backgroundScheduler() else: # 인스턴스가 이미 존재 할 때 구현 self.getInstance() def model_update(self): # 일령 검사 sql_str = "select IN_DATE from tbl_house_breed_hist order by CREATE_TIME desc limit 1" dbConn = PyDBconnector() rows = dbConn.select_from_db(sql_str) db_chicken_age = rows['IN_DATE'].item() #print("DB - 일령 날짜 ", db_chicken_age) today = date.today() #print("오늘 날짜 ", today) date_diff = (today - db_chicken_age).days print(date_diff) # 일령이 20일 이상인 경우 if date_diff >= 20 and self.chicken_model_name == "chery_chung_model.pth": self.chicken_model_name = "chery_chung_makgok_model_2.pth" # 일령이 20 미만인 경우 if date_diff < 20 and self.chicken_model_name == "chery_chung_makgok_model_2.pth": self.chicken_model_name = "chery_chung_model.pth" self.cfg.MODEL.WEIGHTS = os.path.join(self.cfg.MODEL_DIR, self.chicken_model_name) self.predictor = DefaultPredictor(self.cfg) def backgroundScheduler(self, ): # apscheduler 선언 self.sched = BackgroundScheduler(daemon=True) # scheduler 시간 설정 self.sched.add_job(self.model_update, 'cron', hour='00', minute='00') #self.sched.add_job(self.model_update, 'interval', seconds=10) self.sched.start() def get_chicken_dicts(self, img_dir): json_file = os.path.join(img_dir, "via_region_data.json") with open(json_file) as f: imgs_anns = json.load(f) dataset_dicts = [] for idx, v in enumerate(imgs_anns.values()): record = {} # print(v) filename = os.path.join(img_dir, v["filename"]) # print(filename) height, width = cv2.imread(filename).shape[:2] record["file_name"] = filename record["image_id"] = idx record["height"] = height record["width"] = width annos = v["regions"] # print(annos[0].keys()) objs = [] for anno in annos: # assert not anno["region_attributes"] # anno = anno["shape_attributes"] anno = anno["shape_attributes"] px = anno["all_points_x"] py = anno["all_points_y"] poly = [(x + 0.5, y + 0.5) for x, y in zip(px, py)] poly = [p for x in poly for p in x] obj = { "bbox": [np.min(px), np.min(py), np.max(px), np.max(py)], "bbox_mode": BoxMode.XYXY_ABS, "segmentation": [poly], "category_id": 0, } objs.append(obj) record["annotations"] = objs dataset_dicts.append(record) return self.dataset_dicts @classmethod def getInstance(cls): if not cls.__instance: cls.__instance = Prepare_predictor() return cls.__instance def get_binary_img(img, n_cluster=2, bg_img=None, era=5): ''' :param img: :param n_cluster: :param bg_img: :param era: :return: ''' g_model = GMM(n_components=n_cluster, covariance_type='tied', random_state=0).fit(img.reshape(-1, 1)) if bg_img is not None: img = np.where(img - bg_img > 10, img, 0) return np.where(img < (g_model.means_[1] - era), 0, 255) get_gray_binary_img2 = lambda x: np.where(x > np.mean(x) + np.std(x), 255, 0) def detect_death(src_img: np.array([int, int]), ref_imgs: list, bg_img=None, plotY=False, steps: int = 2): ''' 이미지 리스트를 입력으로 받아서 각 이미지의 겹치는 부분만을 남기는 후 return 하는 함수 :param src_img: 기준 이미지 :param ref_imgs: 비교할 과거 이미지들 :param bg_img: background 이미지 background substraction에 활용 :param plotY: 이미지 처리 과정을 plot 할 지 여부 :param steps: 한 번 연산에 사용할 이미지 갯수 :return: 계산된 마스크 ''' ## to-do :: bg image substraction final_img = src_img.copy() print(f'no. of images : {len(ref_imgs)}') for i, x in enumerate(ref_imgs[1:-2:steps]): # if i > 0 : break img_inner = cv2.imread(x) img_inner = cv2.cvtColor(img_inner, cv2.COLOR_BGR2GRAY) new_binary = get_gray_binary_img2(img_inner) diff1 = src_img + new_binary diff1 = np.where(diff1 > 255, 255, 0) img_inner2 = cv2.imread(ref_imgs[(i + steps)]) img_inner2 = cv2.cvtColor(img_inner2, cv2.COLOR_BGR2GRAY) new_binary2 = get_gray_binary_img2(img_inner2) diff2 = diff1 + new_binary2 # print(f'{i}, max : {np.max(diff2)}, min : {np.min(diff2)}, mean : {np.mean(diff2)}') diff2 = np.where(diff2 > 255, 255, 0) if plotY: fig = plt.figure(figsize=(40, 10)) _ = fig.add_subplot(141) _ = plt.imshow(src_img) _ = fig.add_subplot(142) _ = plt.imshow(new_binary) _ = fig.add_subplot(143) _ = plt.imshow(new_binary2) _ = fig.add_subplot(144) _ = plt.imshow(diff2) final_img = diff2 # rotation : diff2, linear : new_binary src_img = diff2 return final_img def dead_body_detectron2(dead_body_img): try: outputs = predictor.predictor(dead_body_img) v = Visualizer(dead_body_img[:, :, ::-1], metadata=predictor.chicken_metadata, scale=0.5, instance_mode=ColorMode.IMAGE) output_cpu = outputs['instances'].to("cpu") boxes = output_cpu.pred_boxes boxes_list = [] for i, x in enumerate(boxes): boxes_list.append(x.int().tolist()) out = v.draw_box(x, alpha=1.0, edge_color='r', line_style='-') count = len(boxes) # out = v.draw_instance_predictions(output_cpu.to("cpu")) return out, count, boxes_list except Exception as e: raise Exception(" failed : No Dead body detected ") def weight_predict(no_distortion_pixel_mean, house_id, module_id, dbConn): ''' 산수 용 함수 ''' needRawWeight = False try : sql_str = "select CAM_TYPE, CAM_DIST, CAM_INFO from tbl_camera where CAM_TYPE != 'ther' and HOUSE_ID = '%s' and MODULE_ID = '%s' order by CREATE_TIME desc limit 1" % ( house_id, module_id) rows_ex = dbConn.select_from_db(sql_str) dist, info = float(rows_ex['CAM_DIST'].item()),float(rows_ex['CAM_INFO'].item()) except : raise Exception("cam info select failed") sql_str = f'''select predictedWeight from tbl_pixel_stats where 1=1 and house_id = '{house_id}' and module_id = '{module_id}' order by create_time desc limit 1 ''' try: rows = dbConn.select_from_db(sql_str) predictedWeight = str(rows.values[0][0]) except: needRawWeight = True # raise Exception(f"weight_predict function returns no rows of weight ") # reload model ## check loaded_model = None try : regressor = joblib.load('./model/pixel_weight_' + str(house_id) + '_regressor.pkl') except : try : regressor = joblib.load('./model/pixel_weight_regressor.pkl') except : try : loaded_model = joblib.load('./model/chery_linear_model.pkl') except : raise Exception("failed to load regression model") if loaded_model is not None : weight = loaded_model.predict(pd.DataFrame({no_distortion_pixel_mean}))[0] if needRawWeight: return round(weight * (dist**2) * info/ (5.74**2), 1) return round(max(weight * (dist**2) * info/ (5.74**2), float(predictedWeight * (dist**2) * info/ (5.74 **2)) * .9), 1) else : poly_feature = PolynomialFeatures(degree=2, include_bias=False) A_poly = poly_feature.fit_transform(pd.DataFrame({no_distortion_pixel_mean})) if needRawWeight: return round(regressor.predict(A_poly)[0], 1) print(f"realWeight : {predictedWeight}, predicted : {regressor.predict(A_poly)[0]}") return round(max(regressor.predict(A_poly)[0], float(predictedWeight) * .9), 1) def calib_image(img_source): img = img_source.copy() # img = cv2.imread(os.path.join(image_dir, image_name)) cent_x, cent_y = img.shape[:2] ''' @ f_val : 1100 ''' # 1100 , 1200 f_val = 1100. K = np.array([[f_val, 0.0, cent_y / 2], [0.0, f_val, cent_x / 2], [0., 0., 1.0]]) D = np.array([0.04004325, 0.00112638, 0.01004722, -0.00593285]) map1, map2 = cv2.fisheye.initUndistortRectifyMap(K, D, np.eye(3), K, (cent_y, cent_x), cv2.CV_16SC2) newImg = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT) return newImg def detect_weight(): df = pd.DataFrame(columns=['date', 'image_name', 'pixel_list', 'pixel_count']) if predictor.predictor is None: raise Exception("error = predictor is None") if predictor.chicken_metadata is None: raise Exception("error = chicken_metadata is None") data_dir = "/home/centos/data/chicken/test_500/" resultFileName = "chicken_weighter_fa0010_all_20221215_20230102__re.csv" FileList = glob.glob(data_dir + '/*.jpg', recursive=True) img_namess = FileList print(f'파일 갯수 : {len(img_namess)}') for idx, d in enumerate(img_namess): image_dir, image_name = os.path.split(d) im = cv2.imread(d) im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) outputs = predictor.predictor(im) # format is documented at https://detectron2.readthedocs.io/tutorials/models.html#model-output-format v = Visualizer(im[:, :, ::-1], metadata=predictor.chicken_metadata, scale=0.5, # instance_mode=ColorMode.IMAGE_BW instance_mode=ColorMode.IMAGE # remove the colors of unsegmented pixels. This option is only available for segmentation models ) output_cpu = outputs['instances'].to("cpu") boxes = output_cpu.pred_boxes # scores = output_cpu.scores # pred_classes = output_cpu.pred_classes pred_masks = output_cpu.pred_masks bound_xy = torch.tensor([0, 0, pred_masks.shape[2], pred_masks.shape[1]]) masks = torch.tensor([1, 1, -1, -1]) bound_check = lambda x: all((bound_xy - x) * masks < -5) # margin : 5 num_of_interest = 40 x_sum, x_std = [], 0. toggle_bound = [] for x in boxes: x1, y1, x2, y2 = x x_area = round(float((x2 - x1) * (y2 - y1)), 1) x_sum.append(x_area) toggle_bound.append(bound_check(x)) x_mean = np.mean(x_sum) x_std = np.std(x_sum) if np.isnan(x_mean) or np.isnan(x_std): # raise Exception('x_mean or x_std is None ') continue else: ind_toggle = [(x_mean - x_std <= x <= x_mean + x_std) for x in x_sum] ind_toggle = np.array(ind_toggle) & np.array(toggle_bound) ind_toggle = [ind_toggle[i] if sum(ind_toggle[:i]) < num_of_interest else False for i in range(len(ind_toggle))] total_sum = torch.sum(pred_masks, (1, 2))[ind_toggle] # total_mean = torch.round(torch.sum(total_sum) / len(total_sum)).item() total_count = len(total_sum) if total_count <= 0: raise Exception('detection count 0') # if camera_type != 'cctv': # image_height, image_width = output_cpu.image_size # # print(type(image_height), type(image_width)) # center_point = torch.tensor((image_width / 2, image_height / 2), dtype=torch.float) # # circle_redius_point = (image_width / 2, (3 * image_height) / 8) # circle_redius_distance = round(((3 * image_height) / 8), 1) # # # distances for distortion # distances = (boxes.get_centers() - center_point).pow(2).sum(axis=1).sqrt().numpy() # distortion_toggle = np.array([x > circle_redius_distance for x in distances]) # # # setup toggle # dist_toggle = ind_toggle & distortion_toggle # no_dist_toggle = ind_toggle & (~distortion_toggle) # else: # # setup toggle no_dist_toggle = ind_toggle # no_distortion_pixel_sum = torch.sum(pred_masks, (1, 2))[no_dist_toggle] # no_distortion_pixel_mean = np.median(torch.sum(pred_masks, (1, 2))[no_dist_toggle].tolist()) no_distortion_count = int(sum(no_dist_toggle)) # print(f'no_distortion_pixel_mean : {no_distortion_pixel_mean}, no_distortion_count : {no_distortion_count}') if no_distortion_count == 0: raise Exception('no_distortion_count is 0') final_toggle = no_dist_toggle #out = v.draw_instance_predictions(output_cpu[final_toggle]) out = v.overlay_instances(boxes=output_cpu[final_toggle].pred_boxes, masks=output_cpu[final_toggle].pred_masks) #weight = weight_predict(no_distortion_pixel_mean, house_id, module_id, dbConn) # if camera_type == 'cctv': # weight = round(weight * dist, 1) # print(f'no_distortion_pixel_mean = {no_distortion_pixel_mean}') # print(f'weight = {weight}') # print(f'total count = {total_count}') # output_img = out.get_image()[:, :, ::-1].copy() # jpg_index = image_name.rfind('.jpg') # plt_image = image_name[0:jpg_index] + "_weightPredictionResult.jpg" # plt_dir = image_dir + plt_image # # if not os.path.exists(plt_dir): # print("-------------------------------------이미지를 저장합니다.--------------------------------------------") # plt.imsave(plt_dir, output_img) # else: # print("-------------------------------기존에 이미지가 존재하여 덮어 씌우기로 저장합니다.-----------------------------") # plt.imsave(plt_dir, output_img) index1 = d.rfind('_202') index2 = d.rfind('H') date = d[index1 + 1: index1 + 15] df.loc[idx] = [date, d[index2:], torch.sum(pred_masks, (1, 2))[no_dist_toggle].tolist(), no_distortion_count] print(df) df.to_csv(resultFileName, index=False, columns=['date', 'image_name', 'pixel_list', 'pixel_count'], encoding='utf-8') if __name__ == '__main__': print('--------------------model loading---------------------') try: predictor = Prepare_predictor.getInstance() #loaded_model = joblib.load('./model/chery_linear_model.pkl') detect_weight() except Exception as e: raise Exception(f" failed : {e} ")