Tryag File Manager
Home
||
Turbo Force
||
B-F Config_Cpanel
Current Path :
/
paip
/
script
/
broiler_onoff
/
Or
Select Your Path :
Upload File :
New :
File
Dir
//paip/script/broiler_onoff/broiler_input_script.py
# Copyright (c) 2020~2024 PAIPTREE SmartFarm, Inc. All Rights Reserved. # Author: 박정훈 # modified last date : 2024-01-12 # 코드 리팩토링(rest api 관련 코드 제거) import torch import tensorflow as tf # import some common libraries import numpy as np import os, json, cv2 import time import glob import matplotlib.pyplot as plt import sys import warnings # import some common detectron2 utilities from detectron2 import model_zoo from detectron2.engine import DefaultPredictor from detectron2.config import get_cfg #from detectron2.data import DatasetCatalog, MetadataCatalog #from detectron2.structures import BoxMode from detectron2.utils.visualizer import Visualizer from detectron2.utils.visualizer import ColorMode from detectron2.utils.logger import setup_logger setup_logger() # set sys path to import PyDBconnector, log_files HOME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir) MODULE_DIR = 'util' sys.path.append(os.path.join(HOME_PATH, MODULE_DIR)) from PyDBconnector import PyDBconnector from logs import paiplog IS_GATEWAY = True IS_GPU = False warnings.filterwarnings('ignore') chicken_model_name = "weight_predict_new.pth" # self.chicken_metadata = MetadataCatalog.get("chicken_train") ##디텍트론 cfg 세팅####################################################################################################################### def initDetectron(): cfg = get_cfg() cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")) if IS_GPU == False : print('Running CPU!!') cfg.merge_from_list(['MODEL.DEVICE', 'cpu']) cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml") # Let training initialize from model zoo cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1 # only has one class (ballon). (see https://detectron2.readthedocs.io/tutorials/datasets.html#update-the-config-for-new-datasets) if IS_GATEWAY : cfg.MODEL_DIR = '/gate/script/weight/model/' else : cfg.MODEL_DIR = '/paip/script/weight/model/' # NOTE: this config means the number of classes, but a few popular unofficial tutorials incorrect uses num_classes+1 here. os.makedirs(cfg.MODEL_DIR, exist_ok=True) # Inference should use the config with parameters that are used in training # cfg now already contains everything we've set previously. We changed it a little bit for inference: cfg.MODEL.WEIGHTS = os.path.join(cfg.MODEL_DIR, chicken_model_name) # path to the model we just trained cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.8 # set a custom testing threshold return DefaultPredictor(cfg) ##디텍트론 cfg 세팅 완료################################################################################################################### def calib_image(img_source): img = img_source.copy() # img = cv2.imread(os.path.join(image_dir, image_name)) cent_x, cent_y = img.shape[:2] ''' @ f_val : 1100 ''' # 1100 , 1200 f_val = 1100. K = np.array([[f_val, 0.0, cent_y / 2], [0.0, f_val, cent_x / 2], [0., 0., 1.0]]) D = np.array([0.04004325, 0.00112638, 0.01004722, -0.00593285]) map1, map2 = cv2.fisheye.initUndistortRectifyMap(K, D, np.eye(3), K, (cent_y, cent_x), cv2.CV_16SC2) newImg = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT) return newImg @paiplog def detect_weight(image_dir, image_name): if predictor is None: raise Exception("error = predictor is None") image_data = image_dir + image_name image_data = glob.glob(image_data) if len(image_data) == 0: raise Exception("failed : Image is Not Found") id = image_name.split('_') if IS_GATEWAY : #imgName : H02_CT03,1_20231108122102_farm_image_real_2295ae414e7f.jpg farm_id = '' house_id = id[0] module_id = id[1] else: #imgName : FA0001_GW00_H02_CT03,1_20231108122102_farm_image_real_2295ae414e7f.jpg farm_id = id[0] house_id = id[2] module_id = id[3] try: if IS_GATEWAY : sql_str = "select is_ptz from tbl_cctv where house_id = '%s' and cctv_id = '%s' limit 1" % (house_id, module_id.split(',')[0]) else : sql_str = "select ptz_support from tbl_farm_cctv where farm_id = '%s' and house_id = '%s' and cctv_id = '%s' limit 1" % (farm_id, house_id, module_id.split(',')[0]) dbConn = PyDBconnector() rows = dbConn.select_from_db(sql_str) if len(rows.index) == 0: camera_type = 'cctv' else: if IS_GATEWAY : camera_type = rows['is_ptz'].item() else : camera_type = rows['ptz_support'].item() print('카메라타입 : ', camera_type) except Exception as e: camera_type = 'cctv' print(e) # raise Exception(' failed : DB_data is error ') ##################################### 이미지 체킹 ################################### for idx, d in enumerate(image_data): # print(f'{idx}, image_name : {d}') print(f'Image_name : {d}') im = cv2.imread(d) if 'y' not in camera_type.lower(): # 카메라 타입이 PTZ가 아니면...... im = calib_image(im) im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) outputs = predictor(im) # format is documented at https://detectron2.readthedocs.io/tutorials/models.html#model-output-format print("디텍션 모델 이름 : ", chicken_model_name) v = Visualizer(im[:, :, ::-1], metadata=None, scale=0.5, instance_mode=ColorMode.IMAGE ) output_cpu = outputs['instances'].to("cpu") boxes = output_cpu.pred_boxes pred_masks = output_cpu.pred_masks bound_xy = torch.tensor([0, 0, pred_masks.shape[2], pred_masks.shape[1]]) masks = torch.tensor([1, 1, -1, -1]) bound_check = lambda x: all((bound_xy - x) * masks < -5) # margin : 5 num_of_interest = 40 x_sum, x_std = [], 0. toggle_bound = [] for i, x in enumerate(boxes): x1, y1, x2, y2 = x x_area = round(float((x2 - x1) * (y2 - y1)), 1) x_sum.append(x_area) toggle_bound.append(bound_check(x)) x_mean = np.mean(x_sum) x_std = np.std(x_sum) if np.isnan(x_mean) or np.isnan(x_std): raise Exception('x_mean or x_std is None') else: # type != cctv, 중심 좌표 기준 거리 내의 이미지만 처리하고 나머지는 버림 if 'y' not in camera_type.lower(): # 카메라 타입이 PTZ가 아니면...... ind_toggle = [(x_mean - 1.5*x_std <= x <= x_mean + 0.8*x_std) for x in x_sum] # ind_toggle = [True for x in range(len(x_sum))] # True list .. length ind_toggle = np.array(ind_toggle) & np.array(toggle_bound) ind_toggle = [ind_toggle[i] if sum(ind_toggle[:i]) < num_of_interest else False for i in range(len(ind_toggle))] # type != cctv, 중심 좌표 기준 거리 내의 이미지만 처리하고 나머지는 버림 #if 'y' not in camera_type.lower(): # 카메라 타입이 PTZ가 아니면...... image_height, image_width = output_cpu.image_size # print(type(image_height), type(image_width)) center_point = torch.tensor((image_width / 2, image_height / 2), dtype=torch.float) # circle_redius_point = (image_width / 2, (3 * image_height) / 8) circle_redius_distance = round(((3 * image_height) / 8), 1) # distances for distortion distances = (boxes.get_centers() - center_point).pow(2).sum(axis=1).sqrt().numpy() distortion_toggle = np.array([x > circle_redius_distance for x in distances]) # setup toggle dist_toggle = ind_toggle & distortion_toggle no_dist_toggle = ind_toggle & (~distortion_toggle) # type == cctv 일 경우, 왜곡 처리 하지 않음 else: ind_toggle = [(x_mean - 1.5*x_std <= x <= x_mean + 1.5*x_std) for x in x_sum] # ind_toggle = [True for x in range(len(x_sum))] # True list .. length ind_toggle = np.array(ind_toggle) & np.array(toggle_bound) ind_toggle = [ind_toggle[i] if sum(ind_toggle[:i]) < num_of_interest else False for i in range(len(ind_toggle))] # total_sum = torch.sum(pred_masks, (1, 2))[ind_toggle] # setup toggle no_dist_toggle = ind_toggle no_distortion_count = int(sum(no_dist_toggle)) if no_distortion_count <= 2: # 2마리 이하 디텍션은 짜름 raise Exception('no_distortion_count is less than 2.') final_toggle = no_dist_toggle out = v.overlay_instances(boxes=output_cpu[final_toggle].pred_boxes, masks=output_cpu[final_toggle].pred_masks) output_img = out.get_image()[:, :, ::-1].copy() jpg_index = image_name.rfind('.jpg') plt_image = image_name[0:jpg_index] + "_broilerInputResult.jpg" plt_dir = image_dir + plt_image # print("-------------------------------------이미지를 저장합니다. (있으면 덮어쓰기)--------------------------------------------") plt.imsave(plt_dir, output_img) response = { "pixel_count": no_distortion_count, "image_name": plt_image, "status": "success" } return response if __name__ == '__main__': try: print(sys.argv) jdata=json.loads(sys.argv[1].replace('\'', '')) image_dir = jdata.get("dir") image_name = jdata.get("image_name") if image_name.find(".jpg") == -1: image_name = image_name + '.jpg' print('1. 파일 경로 : ', image_dir) print('2. 이미지 이름 :', image_name) if image_name is None: raise Exception('Not Exist FileName!!') # Check Gateway Or Cloud if 'FA' in image_name: IS_GATEWAY = False # Check Exist GPU if len(tf.config.experimental.list_physical_devices('GPU')) > 0 : IS_GPU = True start_time = time.time() predictor = initDetectron() try: response = detect_weight(image_dir, image_name) except Exception as e: print(e) response = { "image_name": image_name, "status": "fail" } end_time = time.time() print(f"<<<< detect_result finish {end_time - start_time:.5f} sec >>>>") print(response) except Exception as e: raise Exception("failed : main Error")