Tryag File Manager
Home
||
Turbo Force
||
B-F Config_Cpanel
Current Path :
/
paip
/
script
/
weight
/
Or
Select Your Path :
Upload File :
New :
File
Dir
//paip/script/weight/weighter_detection.py
# Copyright (c) 2020~2023 PAIPTREE SmartFarm, Inc. All Rights Reserved. # Author: 박정훈 # modified : 2024-04-04 # GPU/CPU 실행 로직부분 버그수정 # import some common libraries import torch import numpy as np import os, json, cv2 # from detectron2.structures import BoxMode # import some common detectron2 utilities from detectron2 import model_zoo from detectron2.engine import DefaultPredictor from detectron2.config import get_cfg # from detectron2.data import DatasetCatalog, MetadataCatalog from detectron2.utils.visualizer import Visualizer from detectron2.utils.visualizer import ColorMode # from detectron2.utils.logger import setup_logger # setup_logger() import json import os import matplotlib.pyplot as plt import cv2 import sys import tensorflow as tf if __name__ == '__main__': if len(sys.argv) <= 2 : print("fail - not input img_path") sys.exit() print("체중계 닭 디텍션 시작!!") save_dir = sys.argv[1] img_path = sys.argv[2] print("Input img_path : ", img_path) try : # find model file if 'FA' in img_path: windows_model_path = 'D:\\paip\\script\\weight\\model\\' linux_model_path = '/paip/script/weight/model/' else : windows_model_path = 'D:\\gate\\script\\weight\\model\\' linux_model_path = '/gate/script/weight/model/' model_path = windows_model_path if os.name == 'nt' else linux_model_path # chicken_metadata = MetadataCatalog.get("my_dataset").thing_classes = ["chicken"] cfg = get_cfg() cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")) if len(tf.config.experimental.list_physical_devices('GPU')) <= 0 : print('Running CPU!!') cfg.merge_from_list(['MODEL.DEVICE', 'cpu']) cfg.MODEL.WEIGHTS = os.path.join(model_path, "weighter_model_final_230516.pth") # path to the model we just trained cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set a custom testing threshold cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128 # faster, and good enough for this toy dataset (default: 512) cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1 # only has one class (ballon) predictor = DefaultPredictor(cfg) image_name = img_path.split('/')[-1] print('image name : ', image_name) im = cv2.imread(img_path) if im is None : raise Exception('image not found') im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) #im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR) outputs = predictor(im) # format is documented at https://detectron2.readthedocs.io/tutorials/models.html#model-output-format v = Visualizer(im[:, :, ::-1], # metadata=chicken_metadata, scale=1.0, #instance_mode=ColorMode.IMAGE_BW # remove the colors of unsegmented pixels. This option is only available for segmentation models instance_mode=ColorMode.IMAGE) output_cpu = outputs['instances'].to("cpu") boxes = output_cpu.pred_boxes scores = output_cpu.scores pred_classes = output_cpu.pred_classes pred_masks = output_cpu.pred_masks # print(output_cpu) x1 = boxes.tensor.numpy()[0][0] y1 = boxes.tensor.numpy()[0][1] x2 = boxes.tensor.numpy()[0][2] y2 = boxes.tensor.numpy()[0][3] bound_xy = torch.tensor([0, 0, pred_masks.shape[2], pred_masks.shape[1]]) masks = torch.tensor([1, 1, -1, -1]) bound_check = lambda x: all((bound_xy - x) * masks < -5) # margin : 5 num_of_interest = 40 x_sum, x_std = [], 0. toggle_bound = [] for i, x in enumerate(boxes): x1, y1, x2, y2 = x x_area = round(float((x2 - x1) * (y2 - y1)), 1) x_sum.append(x_area) toggle_bound.append(bound_check(x)) x_mean = np.mean(x_sum) x_std = np.std(x_sum) if np.isnan(x_mean) or np.isnan(x_std): raise Exception('x_mean or x_std is None ') image_height, image_width = output_cpu.image_size # print(type(image_height), type(image_width)) # center_point = torch.tensor((image_width / 2, image_height / 2), dtype=torch.float) #if len(pred_classes) == 1 and x1/2 < int(image_width/2) < x2*1.5 and y1/2 < int(image_height/2) < y2*1.5: # bound_xy = torch.tensor([0,0,pred_masks.shape[2],pred_masks.shape[1]]) # masks = torch.tensor([1,1,-1,-1]) # out = v.draw_instance_predictions(output_cpu) ind_toggle = [(x_mean - 2*x_std <= x <= x_mean + 2*x_std) for x in x_sum] ind_toggle = np.array(ind_toggle) & np.array(toggle_bound) ind_toggle = [ind_toggle[i] if sum(ind_toggle[:i]) < 10 else False for i in range(len(ind_toggle))] image_height, image_width = output_cpu.image_size # print(type(image_height), type(image_width)) center_point = torch.tensor((image_width / 2, image_height / 2), dtype=torch.float) # circle_redius_point = (image_width / 2, (3 * image_height) / 8) circle_redius_distance = round(((3 * image_height) / 8), 1) # distances for distortion distances = (boxes.get_centers() - center_point).pow(2).sum(axis=1).sqrt().numpy() distortion_toggle = np.array([x > circle_redius_distance for x in distances]) # setup toggle dist_toggle = ind_toggle & distortion_toggle no_dist_toggle = ind_toggle & (~distortion_toggle) # pixel_list = torch.sum(pred_masks, (1, 2))[0].tolist() pixel_list = torch.sum(pred_masks, (1, 2))[no_dist_toggle].tolist() no_distortion_count = int(sum(no_dist_toggle)) # cv2_imshow(out.get_image()[:, :, ::-1]) final_toggle = no_dist_toggle out = v.overlay_instances(boxes=output_cpu[final_toggle].pred_boxes, masks=output_cpu[final_toggle].pred_masks) output_img = out.get_image()[:, :, ::-1].copy() jpg_index = image_name.rfind('.jpg') plt_image = image_name[0:jpg_index] + "_weighterDetectionResult.jpg" # plt_dir = image_dir + plt_image plt_dir = save_dir + plt_image print("success:%s:%d:%s" % (plt_image, no_distortion_count, str(pixel_list))) plt.imsave(plt_dir, output_img) except: print('fail') # ex) success:14:[12,13,13,13,13] # fail # pixel_list = torch.sum(pred_masks, (1, 2))[no_dist_toggle].tolist() # ind_toggle = np.array(ind_toggle) & np.array(toggle_bound) # ind_toggle = [ind_toggle[i] if sum(ind_toggle[:i]) < num_of_interest else False for i in range(len(ind_toggle))]