Tryag File Manager
Home
||
Turbo Force
||
B-F Config_Cpanel
Current Path :
/
paip
/
script
/
weight
/
Or
Select Your Path :
Upload File :
New :
File
Dir
//paip/script/weight/new_chicken_test.py
# Copyright (c) 2020~2022 PAIPTREE SmartFarm, Inc. All Rights Reserved. # Author: 조한별, 전규빈 import torch from detectron2.utils.logger import setup_logger setup_logger() # import some common libraries import numpy as np import pandas as pd import multiprocessing as mp import os, json, cv2, json, time, os, glob, cv2, warnings, bisect, atexit, torch from detectron2.structures import BoxMode # import some common detectron2 utilities from detectron2 import model_zoo from detectron2.engine import DefaultPredictor from detectron2.config import get_cfg from detectron2.utils.visualizer import Visualizer from detectron2.data import DatasetCatalog, MetadataCatalog from detectron2.utils.visualizer import ColorMode MODEL_DIR = './model/' num_of_layers = 6 threshold_num = 1000 # 10000 to 2000 . 2022.01.12 delta_db_std = .3 dead_detector_alpha = .4 HOME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir) DATA_DIR = 'data' MODULE_DIR = 'util' OUTPUT_DIR = 'out' warnings.filterwarnings('ignore') class AsyncPredictor: """ A predictor that runs the model asynchronously, possibly on >1 GPUs. Because rendering the visualization takes considerably amount of time, this helps improve throughput a little bit when rendering videos. @ class from detectron2 demo/predictor.py """ class _StopToken: pass class _PredictWorker(mp.Process): def __init__(self, cfg, task_queue, result_queue): self.cfg = cfg self.task_queue = task_queue self.result_queue = result_queue super().__init__() def run(self): predictor = DefaultPredictor(self.cfg) while True: task = self.task_queue.get() if isinstance(task, AsyncPredictor._StopToken): break idx, data = task result = predictor(data) self.result_queue.put((idx, result)) def __init__(self, cfg, num_gpus: int = 1): """ Args: cfg (CfgNode): num_gpus (int): if 0, will run on CPU """ num_workers = max(num_gpus, 1) self.task_queue = mp.Queue(maxsize=num_workers * 3) self.result_queue = mp.Queue(maxsize=num_workers * 3) self.procs = [] for gpuid in range(max(num_gpus, 1)): cfg = cfg.clone() cfg.defrost() cfg.MODEL.DEVICE = "cuda:{}".format(gpuid) if num_gpus > 0 else "cpu" self.procs.append( AsyncPredictor._PredictWorker(cfg, self.task_queue, self.result_queue) ) self.put_idx = 0 self.get_idx = 0 self.result_rank = [] self.result_data = [] for p in self.procs: p.start() atexit.register(self.shutdown) def put(self, image): self.put_idx += 1 self.task_queue.put((self.put_idx, image)) def get(self): self.get_idx += 1 # the index needed for this request if len(self.result_rank) and self.result_rank[0] == self.get_idx: res = self.result_data[0] del self.result_data[0], self.result_rank[0] return res while True: # make sure the results are returned in the correct order idx, res = self.result_queue.get() if idx == self.get_idx: return res insert = bisect.bisect(self.result_rank, idx) self.result_rank.insert(insert, idx) self.result_data.insert(insert, res) def __len__(self): return self.put_idx - self.get_idx def __call__(self, image): self.put(image) return self.get() def shutdown(self): for _ in self.procs: self.task_queue.put(AsyncPredictor._StopToken()) @property def default_buffer_size(self): return len(self.procs) * 5 class Prepare_predictor: ''' singleton class predictor for instance segmentation purpose : to get chicken(broiler) pixel counts for prediction of chicken weights ''' __instance = None def __init__(self, ): if not Prepare_predictor.__instance: # 인스턴스가 존재하지 않을 때 구현 for d in ["train", "val"]: try: DatasetCatalog.register("chicken_" + d, lambda d=d: self.get_chicken_dicts("/gate/script/weight/dataset/" + d)) MetadataCatalog.get("chicken_" + d).set(thing_classes=["chicken"]) except: MetadataCatalog.get("chicken_" + d).set(thing_classes=["chicken"]) self.chicken_metadata = MetadataCatalog.get("chicken_train") self.cfg = get_cfg() self.cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")) device = 'cuda' if torch.cuda.is_available() else 'cpu' self.cfg.merge_from_list(['MODEL.DEVICE', device]) # CPU 변경 self.cfg.DATASETS.TRAIN = ("chicken_train",) self.cfg.DATASETS.TEST = () self.cfg.DATALOADER.NUM_WORKERS = 2 self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url( "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml") # Let training initialize from model zoo self.cfg.SOLVER.IMS_PER_BATCH = 2 self.cfg.SOLVER.BASE_LR = 0.00025 # pick a good LR self.cfg.SOLVER.MAX_ITER = 300 # 300 iterations seems good enough for this toy dataset; you will need to train longer for a practical dataset self.cfg.SOLVER.STEPS = [] # do not decay learning rate self.cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128 # faster, and good enough for this toy dataset (default: 512) self.cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1 # only has one class (ballon). (see https://detectron2.readthedocs.io/tutorials/datasets.html#update-the-config-for-new-datasets) self.cfg.MODEL_DIR = MODEL_DIR # NOTE: this config means the number of classes, but a few popular unofficial tutorials incorrect uses num_classes+1 here. os.makedirs(self.cfg.MODEL_DIR, exist_ok=True) # Inference should use the config with parameters that are used in training # cfg now already contains everything we've set previously. We changed it a little bit for inference: self.chicken_model_name = "weight_predict_new.pth" self.cfg.MODEL.WEIGHTS = os.path.join(self.cfg.MODEL_DIR, self.chicken_model_name) # path to the model we just trained self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.8 # set a custom testing threshold ######################################################################################################### # add parallel ######################################################################################################### parallel = True # edit this !! self.cfg.parallel = parallel if parallel: num_gpu = torch.cuda.device_count() self.predictor = AsyncPredictor(self.cfg, num_gpus=num_gpu) else: self.predictor = DefaultPredictor(self.cfg) ######################################################################################################### else: # 인스턴스가 이미 존재 할 때 구현 self.getInstance() def get_chicken_dicts(self, img_dir): json_file = os.path.join(img_dir, "via_region_data.json") with open(json_file) as f: imgs_anns = json.load(f) dataset_dicts = [] for idx, v in enumerate(imgs_anns.values()): record = {} # print(v) filename = os.path.join(img_dir, v["filename"]) # print(filename) height, width = cv2.imread(filename).shape[:2] record["file_name"] = filename record["image_id"] = idx record["height"] = height record["width"] = width annos = v["regions"] # print(annos[0].keys()) objs = [] for anno in annos: # assert not anno["region_attributes"] # anno = anno["shape_attributes"] anno = anno["shape_attributes"] px = anno["all_points_x"] py = anno["all_points_y"] poly = [(x + 0.5, y + 0.5) for x, y in zip(px, py)] poly = [p for x in poly for p in x] obj = { "bbox": [np.min(px), np.min(py), np.max(px), np.max(py)], "bbox_mode": BoxMode.XYXY_ABS, "segmentation": [poly], "category_id": 0, } objs.append(obj) record["annotations"] = objs dataset_dicts.append(record) return self.dataset_dicts @classmethod def getInstance(cls): if not cls.__instance: cls.__instance = Prepare_predictor() return cls.__instance def detect_weight(): # df = pd.DataFrame(columns=['date', 'image_name', 'pixel_list', 'pixel_count']) if predictor.predictor is None: raise Exception("error = predictor is None") if predictor.chicken_metadata is None: raise Exception("error = chicken_metadata is None") data_dir = "/home/centos/data/chicken/test_500/" FileList = glob.glob(data_dir + '/*.jpg', recursive=True) img_namess = FileList print(f'파일 갯수 : {len(img_namess)}') for idx, d in enumerate(img_namess): # print(f'{idx}, image_name : {d}') # print(f'Image_name : {d}') image_dir, image_name = os.path.split(d) im = cv2.imread(d) im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) outputs = predictor.predictor( im) # format is documented at https://detectron2.readthedocs.io/tutorials/models.html#model-output-format #print("모델 이름 : ", predictor.chicken_model_name) v = Visualizer(im[:, :, ::-1], metadata=predictor.chicken_metadata, scale=0.5, # instance_mode=ColorMode.IMAGE_BW instance_mode=ColorMode.IMAGE # remove the colors of unsegmented pixels. This option is only available for segmentation models ) output_cpu = outputs['instances'].to("cpu") boxes = output_cpu.pred_boxes pred_masks = output_cpu.pred_masks bound_xy = torch.tensor([0, 0, pred_masks.shape[2], pred_masks.shape[1]]) masks = torch.tensor([1, 1, -1, -1]) bound_check = lambda x: all((bound_xy - x) * masks < -5) # margin : 5 # num_of_interest = 40 x_sum, x_std = [], 0. toggle_bound = [] for i, x in enumerate(boxes): x1, y1, x2, y2 = x x_area = round(float((x2 - x1) * (y2 - y1)), 1) x_sum.append(x_area) toggle_bound.append(bound_check(x)) # x_mean = np.mean(x_sum) # x_std = np.std(x_sum) # if np.isnan(x_mean) or np.isnan(x_std): # raise Exception('x_mean or x_std is None ') # else: # ind_toggle = [(x_mean - x_std <= x <= x_mean + x_std) for x in x_sum] # ind_toggle = np.array(ind_toggle) & np.array(toggle_bound) # ind_toggle = [ind_toggle[i] if sum(ind_toggle[:i]) < num_of_interest else False for i in # range(len(ind_toggle))] total_sum = torch.sum(pred_masks, (1, 2))[ind_toggle] # total_mean = torch.round(torch.sum(total_sum) / len(total_sum)).item() total_count = len(total_sum) if total_count <= 0: raise Exception('detection count 0') # no_dist_toggle = ind_toggle # no_distortion_pixel_sum = torch.sum(pred_masks, (1, 2))[no_dist_toggle] # no_distortion_pixel_mean = np.median(torch.sum(pred_masks, (1, 2))[no_dist_toggle].tolist()) # no_distortion_count = int(sum(no_dist_toggle)) # # print(f'no_distortion_pixel_mean : {no_distortion_pixel_mean}, no_distortion_count : {no_distortion_count}') # if no_distortion_count == 0: # raise Exception('no_distortion_count is 0') # final_toggle = no_dist_toggle # out = v.overlay_instances(boxes=output_cpu[final_toggle].pred_boxes, # masks=output_cpu[final_toggle].pred_masks) #weight = weight_predict(no_distortion_pixel_mean, house_id, module_id, dbConn) # if camera_type == 'cctv': # weight = round(weight * dist, 1) # print(f'no_distortion_pixel_mean = {no_distortion_pixel_mean}') # print(f'weight = {weight}') # # print(f'total count = {total_count}') # output_img = out.get_image()[:, :, ::-1].copy() # jpg_index = image_name.rfind('.jpg') # plt_image = image_name[0:jpg_index] + "_weightPredictionResult.jpg" # plt_dir = image_dir + plt_image # # # # if not os.path.exists(plt_dir): # # print("-------------------------------------이미지를 저장합니다.--------------------------------------------") # # plt.imsave(plt_dir, output_img) # # else: # # print("-------------------------------기존에 이미지가 존재하여 덮어 씌우기로 저장합니다.-----------------------------") # # plt.imsave(plt_dir, output_img) # index1 = d.rfind('2022') # index2 = d.rfind('H') # date = d[index1: index1 + 14] # df.loc[idx] = [date, d[index2:], torch.sum(pred_masks, (1, 2))[no_dist_toggle].tolist(), no_distortion_count] # print(df) # df.to_csv('chicken_result.csv', index=False, # columns=['date', 'image_name', 'pixel_list', 'pixel_count'], encoding='utf-8') if __name__ == '__main__': torch.multiprocessing.set_start_method('spawn') print('--------------------model loading---------------------') try: predictor = Prepare_predictor.getInstance() #loaded_model = joblib.load('./model/chery_linear_model.pkl') sTime = time.time() detect_weight() print(f"duration : {time.time() - sTime}") except Exception as e: raise Exception(f" failed : {e} ")