Tryag File Manager
Home
||
Turbo Force
||
B-F Config_Cpanel
Current Path :
/
paip
/
script
/
weight
/
unused
/
Or
Select Your Path :
Upload File :
New :
File
Dir
//paip/script/weight/unused/unused_broiler_runner.py
import torch, torchvision #torch.cuda.empty_cache() print(torch.__version__, torch.cuda.is_available()) assert torch.__version__.startswith("1.9") # Some basic setup: # Setup detectron2 logger import detectron2 from detectron2.utils.logger import setup_logger setup_logger() # import some common libraries import numpy as np import os, json, cv2, random from detectron2.structures import BoxMode # import some common detectron2 utilities from detectron2 import model_zoo from detectron2.engine import DefaultPredictor from detectron2.config import get_cfg import glob import copy from detectron2.utils.visualizer import Visualizer from detectron2.data import detection_utils as utils import detectron2.data.transforms as T from detectron2.engine import DefaultTrainer from detectron2.data import DatasetCatalog, MetadataCatalog, build_detection_test_loader, build_detection_train_loader import torch from detectron2.utils.visualizer import ColorMode import pandas as pd DATA_DIR = data_dir = '/home/gate/jupyter-workspace/output/fisheye/20210908/' FileList = glob.glob(data_dir + '*.jpg') img_namess = FileList OUTPUT_DIR = '.' def get_chicken_dicts(img_dir): json_file = os.path.join(img_dir, "via_region_data.json") with open(json_file) as f: imgs_anns = json.load(f) dataset_dicts = [] for idx, v in enumerate(imgs_anns.values()): record = {} print(v) filename = os.path.join(img_dir, v["filename"]) print(filename) height, width = cv2.imread(filename).shape[:2] record["file_name"] = filename record["image_id"] = idx record["height"] = height record["width"] = width annos = v["regions"] print(annos[0].keys()) objs = [] for anno in annos: # assert not anno["region_attributes"] # anno = anno["shape_attributes"] anno = anno["shape_attributes"] px = anno["all_points_x"] py = anno["all_points_y"] poly = [(x + 0.5, y + 0.5) for x, y in zip(px, py)] poly = [p for x in poly for p in x] obj = { "bbox": [np.min(px), np.min(py), np.max(px), np.max(py)], "bbox_mode": BoxMode.XYXY_ABS, "segmentation": [poly], "category_id": 0, } objs.append(obj) record["annotations"] = objs dataset_dicts.append(record) return dataset_dicts def custom_mapper(dataset_dict): mylist = np.arange(0.5, 1.5, 0.3) mylist = mylist.tolist() # Implement a mapper, similar to the default DatasetMapper, but with your own customizations dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below image = utils.read_image(dataset_dict["file_name"], format="BGR") resize_list = [(int(image.shape[0] * i), int(image.shape[1] * i)) for i in mylist] # transform_list = [T.Resize(x) for x in resize_list] transform_list = [T.Resize((500, 500))] # print(transform_list) image, transforms = T.apply_transform_gens(transform_list, image) dataset_dict["image"] = torch.as_tensor(image.transpose(2, 0, 1).astype("float32")) annos = [ utils.transform_instance_annotations(obj, transforms, image.shape[:2]) for obj in dataset_dict.pop("annotations") if obj.get("iscrowd", 0) == 0 ] instances = utils.annotations_to_instances(annos, image.shape[:2]) dataset_dict["instances"] = utils.filter_empty_instances(instances) return dataset_dict class WheatTrainer(DefaultTrainer): @classmethod def build_train_loader(cls, cfg): return build_detection_train_loader(cfg, mapper=custom_mapper) if __name__ == '__main__' : for d in ["train", "val"]: DatasetCatalog.register("chicken_" + d, lambda d=d: get_chicken_dicts("/gate/script/weight/" + d)) MetadataCatalog.get("chicken_" + d).set(thing_classes=["chicken"]) chicken_metadata = MetadataCatalog.get("chicken_train") cfg = get_cfg() cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")) cfg.merge_from_list(['MODEL.DEVICE','cpu']) # CPU 변경 cfg.DATASETS.TRAIN = ("chicken_train",) cfg.DATASETS.TEST = () cfg.DATALOADER.NUM_WORKERS = 2 cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml") # Let training initialize from model zoo cfg.SOLVER.IMS_PER_BATCH = 2 cfg.SOLVER.BASE_LR = 0.00025 # pick a good LR cfg.SOLVER.MAX_ITER = 300 # 300 iterations seems good enough for this toy dataset; you will need to train longer for a practical dataset cfg.SOLVER.STEPS = [] # do not decay learning rate cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128 # faster, and good enough for this toy dataset (default: 512) cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1 # only has one class (ballon). (see https://detectron2.readthedocs.io/tutorials/datasets.html#update-the-config-for-new-datasets) cfg.OUTPUT_DIR = OUTPUT_DIR # NOTE: this config means the number of classes, but a few popular unofficial tutorials incorrect uses num_classes+1 here. os.makedirs(cfg.OUTPUT_DIR, exist_ok=True) #trainer = DefaultTrainer(cfg) trainer = WheatTrainer(cfg) # Inference should use the config with parameters that are used in training # cfg now already contains everything we've set previously. We changed it a little bit for inference: cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "chery_chung_model.pth") # path to the model we just trained cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set a custom testing threshold predictor = DefaultPredictor(cfg) # drive code df = pd.DataFrame(columns=['date', 'mean', 'area', 'horizontal', 'vertical', 'raw']) for idx, d in enumerate(img_namess): # if idx > 1: # break im = cv2.imread(d) outputs = predictor( im) # format is documented at https://detectron2.readthedocs.io/tutorials/models.html#model-output-format v = Visualizer(im[:, :, ::-1], metadata=chicken_metadata, scale=0.5, instance_mode=ColorMode.IMAGE_BW # remove the colors of unsegmented pixels. This option is only available for segmentation models ) output_cpu = outputs['instances'].to("cpu") boxes = output_cpu.pred_boxes scores = output_cpu.scores pred_classes = output_cpu.pred_classes pred_masks = output_cpu.pred_masks # print((output_cpu)) # print(pred_masks) print(pred_masks.shape) # print(f'torch.sum : {torch.sum(pred_masks)}') # print(f'torch.sum keep dim 0 : {torch.sum(pred_masks, (1,2))}, {torch.sum(torch.sum(pred_masks, (1,2)))}') # print(f'{len(boxes)}') # boundary check bound_xy = torch.tensor([0, 0, pred_masks.shape[2], pred_masks.shape[1]]) masks = torch.tensor([1, 1, -1, -1]) bound_check = lambda x: all((bound_xy - x) * masks < -5) # margin : 5 x_sum, x_std = [], 0. area, horizontal, vertical = [], [], [] toggle_bound = [] for i, x in enumerate(boxes): x1, y1, x2, y2 = x x_area = round(float((x2 - x1) * (y2 - y1)), 1) width = round(float(x2 - x1), 1) height = round(float(y2 - y1), 1) # print(f'{i} : {x_area}') x_sum.append(x_area) horizontal.append(width) vertical.append(height) area.append(x_area) toggle_bound.append(bound_check(x)) x_mean = np.mean(x_sum) x_std = np.std(x_sum) # print(f'{x_mean},{x_std}') # ind_toggle = [ x <= x_mean + x_std for x in x_sum] ind_toggle = [x_mean - x_std <= x <= x_mean + x_std for x in x_sum] # ind_toggle = [x <= x_mean for x in x_sum] # update toggle ind_toggle = np.array(ind_toggle) & np.array(toggle_bound) # print(output_cpu[ind_toggle]) tensor_sum = torch.sum(pred_masks, (1, 2))[ind_toggle] tensor_mean = torch.round(torch.sum(tensor_sum) / len(tensor_sum)).item() print(f'torch.sum toggle : , {tensor_sum}') print(f'torch.mean toggle : {tensor_mean}') print(f'fileName : {d}') index = d.rfind('2021') date = d[index: index + 14] df.loc[idx] = [date, tensor_mean, area, horizontal, vertical, ind_toggle] # df.to_csv('pixel_data', index=False, encoding='cp949') # out = v.draw_instance_predictions(output_cpu[ind_toggle]) # cv2_imshow(out.get_image()[:, :, ::-1]) print('------------------------------------------------') print(df) if not os.path.exists('chungju_pixel_data.csv'): df.to_csv('chungju_pixel_data', index=False, columns=['date', 'mean', 'area', 'horizontal', 'vertical', 'raw'], encoding='utf-8') else: df.to_csv('chungju_pixel_data', index=False, columns=['date', 'mean', 'area', 'horizontal', 'vertical', 'raw'], encoding='utf-8')