Tryag File Manager
Home
||
Turbo Force
||
B-F Config_Cpanel
Current Path :
/
paip
/
script
/
weight
/
Or
Select Your Path :
Upload File :
New :
File
Dir
//paip/script/weight/chicken_rest_api.py
# Copyright (c) 2020~2023 PAIPTREE SmartFarm, Inc. All Rights Reserved. # Author: 조한별 # Modify : 박정훈 # modified last date : 2023-06-02 # 암전 상황 추가 import torch import tensorflow as tf from tensorflow import keras # import some common libraries import numpy as np import os, json, cv2 from detectron2.structures import BoxMode # import some common detectron2 utilities from detectron2 import model_zoo from detectron2.engine import DefaultPredictor from detectron2.config import get_cfg from detectron2.data import DatasetCatalog, MetadataCatalog from detectron2.utils.visualizer import Visualizer from detectron2.utils.visualizer import ColorMode from detectron2.utils.logger import setup_logger setup_logger() import pandas as pd import flask from flask_cors import CORS from flask import request, jsonify import json import time import os, glob, math import matplotlib.pyplot as plt import cv2 import joblib from sklearn.mixture import GaussianMixture as GMM import sys import warnings from datetime import date from sklearn.preprocessing import PolynomialFeatures from apscheduler.schedulers.background import BackgroundScheduler from scipy import interpolate import functools MODEL_DIR = './model/' HOME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir) DATA_DIR = 'data' MODULE_DIR = 'util' OUTPUT_DIR = 'out' BREED_HIST_TARGET_TABLE = 'tbl_house_breed_hist' refTable = None warnings.filterwarnings('ignore') # set sys path to import PyDBconnector, log_files sys.path.append(os.path.join(HOME_PATH, MODULE_DIR)) from PyDBconnector import PyDBconnector from logs import paiplog class Prepare_predictor: __instance = None def __init__(self, ): if not Prepare_predictor.__instance: # 인스턴스가 존재하지 않을 때 구현 for d in ["train", "val"]: try: DatasetCatalog.register("chicken_" + d, lambda d=d: self.get_chicken_dicts("/gate/script/weight/dataset/" + d)) MetadataCatalog.get("chicken_" + d).set(thing_classes=["chicken"]) except: MetadataCatalog.get("chicken_" + d).set(thing_classes=["chicken"]) self.chicken_metadata = MetadataCatalog.get("chicken_train") self.cfg = get_cfg() self.cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")) self.cfg.merge_from_list(['MODEL.DEVICE', 'cpu']) # CPU 변경 self.cfg.DATASETS.TRAIN = ("chicken_train",) self.cfg.DATASETS.TEST = () self.cfg.DATALOADER.NUM_WORKERS = 2 self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml") # Let training initialize from model zoo self.cfg.SOLVER.IMS_PER_BATCH = 2 self.cfg.SOLVER.BASE_LR = 0.00025 # pick a good LR self.cfg.SOLVER.MAX_ITER = 300 # 300 iterations seems good enough for this toy dataset; you will need to train longer for a practical dataset self.cfg.SOLVER.STEPS = [] # do not decay learning rate self.cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128 # faster, and good enough for this toy dataset (default: 512) self.cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1 # only has one class (ballon). (see https://detectron2.readthedocs.io/tutorials/datasets.html#update-the-config-for-new-datasets) self.cfg.MODEL_DIR = MODEL_DIR # NOTE: this config means the number of classes, but a few popular unofficial tutorials incorrect uses num_classes+1 here. os.makedirs(self.cfg.MODEL_DIR, exist_ok=True) # Inference should use the config with parameters that are used in training # cfg now already contains everything we've set previously. We changed it a little bit for inference: self.chicken_model_name = "weight_predict_new.pth" self.cfg.MODEL.WEIGHTS = os.path.join(self.cfg.MODEL_DIR, self.chicken_model_name) # path to the model we just trained self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.8 # set a custom testing threshold self.predictor = DefaultPredictor(self.cfg) #self.backgroundScheduler() else: # 인스턴스가 이미 존재 할 때 구현 self.getInstance() def model_update(self): # 일령 검사 sql_str = f"select IN_DATE from {BREED_HIST_TARGET_TABLE} order by CREATE_TIME desc limit 1" dbConn = PyDBconnector() rows = dbConn.select_from_db(sql_str) db_chicken_age = rows['IN_DATE'].item() #print("DB - 일령 날짜 ", db_chicken_age) today = date.today() #print("오늘 날짜 ", today) date_diff = (today - db_chicken_age).days print(date_diff) # 일령이 20일 이상인 경우 if date_diff >= 20 and self.chicken_model_name == "chery_chung_model.pth": self.chicken_model_name = "chery_chung_makgok_model_2.pth" # 일령이 20 미만인 경우 if date_diff < 20 and self.chicken_model_name == "chery_chung_makgok_model_2.pth": self.chicken_model_name = "chery_chung_model.pth" self.cfg.MODEL.WEIGHTS = os.path.join(self.cfg.MODEL_DIR, self.chicken_model_name) self.predictor = DefaultPredictor(self.cfg) def backgroundScheduler(self, ): # apscheduler 선언 self.sched = BackgroundScheduler(daemon=True) # scheduler 시간 설정 self.sched.add_job(self.model_update, 'cron', hour='00', minute='00') #self.sched.add_job(self.model_update, 'interval', seconds=10) self.sched.start() def get_chicken_dicts(self, img_dir): json_file = os.path.join(img_dir, "via_region_data.json") with open(json_file) as f: imgs_anns = json.load(f) dataset_dicts = [] for idx, v in enumerate(imgs_anns.values()): record = {} # print(v) filename = os.path.join(img_dir, v["filename"]) # print(filename) height, width = cv2.imread(filename).shape[:2] record["file_name"] = filename record["image_id"] = idx record["height"] = height record["width"] = width annos = v["regions"] # print(annos[0].keys()) objs = [] for anno in annos: # assert not anno["region_attributes"] # anno = anno["shape_attributes"] anno = anno["shape_attributes"] px = anno["all_points_x"] py = anno["all_points_y"] poly = [(x + 0.5, y + 0.5) for x, y in zip(px, py)] poly = [p for x in poly for p in x] obj = { "bbox": [np.min(px), np.min(py), np.max(px), np.max(py)], "bbox_mode": BoxMode.XYXY_ABS, "segmentation": [poly], "category_id": 0, } objs.append(obj) record["annotations"] = objs dataset_dicts.append(record) return self.dataset_dicts @classmethod def getInstance(cls): if not cls.__instance: cls.__instance = Prepare_predictor() return cls.__instance @paiplog def find_nearest(arrayParam,value): ''' 가장 가까운 일령을 찾아서 ex) 24.5312 넘겨주기 global refTable의 index 값 @param arrayParam: 찾고자 하는 index list @param value: 실제 일령 데이터 ''' idx = np.searchsorted(arrayParam, value, side="left") if idx > 0 and (idx == len(arrayParam) or math.fabs(value - arrayParam[idx-1]) < math.fabs(value - arrayParam[idx])): return arrayParam[idx-1] else: return arrayParam[idx] @paiplog @functools.lru_cache() def getRefTable(dbConn, dayAge) : sql_refTable = "select * from tbl_weight_ref" try : refTable = dbConn.select_from_db(sql_refTable) except : raise Exception("Error to read refTable") ref_re = pd.DataFrame(index=[round(x/(24*60),4) for x in range(24*60*(len(refTable)-1)+1) ]) ref_re = ref_re.merge(refTable.ref_weight, left_index=True, right_index=True, how='left') f_linear = interpolate.interp1d(refTable.index.to_list(), refTable.ref_weight.to_list(), kind='linear') y_new = f_linear(ref_re.index.to_list()) ref_re.ref_weight = y_new ref_re.dropna(inplace=True) ref_re['new_ref_weight'] = np.round(ref_re.ref_weight + list(np.random.normal(size=len(ref_re))*10),2) refTable = ref_re.copy() # hotfix. # arrayTwo length < 2 arrayTwo = [x for x in refTable.index.to_list() if int(x) == int(dayAge) ] return refTable.loc[find_nearest(arrayTwo if len(arrayTwo) > 0 else refTable.index.to_list()[-2:], dayAge),'new_ref_weight'] @paiplog def weight_predict_new(no_distortion_pixel_list, no_distortion_count, house_id, module_id, file_date, dbConn, camera_type): needRawWeight, needRefTable = False, False # 면적-체중 모델 있는지 여부 check, ref table 데이터가 필요한 지 여부 # 모델 loading 되었을 때 담을 변수 loaded_model = None to_day = pd.Timestamp(file_date) ######################################################################################################################## ## 0-1. 일령 데이터 가져오기 sql_dayAge = f'''select in_date from {BREED_HIST_TARGET_TABLE} where 1=1 and house_id = '{house_id}' order by create_time desc limit 1''' try : rows = dbConn.select_from_db(sql_dayAge) in_date = str(rows.values[0][0]) dayAge = (to_day - pd.Timestamp(in_date)).days + round(((to_day - pd.Timestamp(in_date)).seconds // 60) / (24 * 60),4) # 일령 데이터 print("일령정보 : ", dayAge) except : raise Exception("Error to read in date from {BREED_HIST_TARGET_TABLE}") ## day age 35일령 이후는 -1로 통일 // 다시 주석 처리 그냥 35일 refTable 데이터 출력 # if dayAge > 36. : # refWeight = -1. # else : ## 0-2. ref table 가져오기 try: ### interpolation & add noise to make natural results refWeight = getRefTable(dbConn, dayAge) except: raise Exception(f"failed to get ref table data for {to_day.strftime('%Y%m%d')}") ######################################################################################################################## ## 0-3. check 출하 ## 일령이 29일 이상이고, detection count가 5 이하이면 예측 무게를 전송하지 않는다. ## 2022.11.17 추가 ## 2022.12.29 수정, 입추, 출하 감지 위해 주석 처리, 해당 기능 플랫폼 이관 # if checkIsShipping(dayAge,no_distortion_count) : raise Exception(f'{house_id}-{file_date} shipping is in progress') ######################################################################################################################## ## 1. model prediction ## 면적-체중 일반화 모델 loading try : ## 계사별 모델 loading regressor = joblib.load('./model/pixel_weight_' + str(house_id) + '_regressor.pkl') except : try : ## 농장 일반화 모델 # regressor = joblib.load('./model/pixel_weight_regressor.pkl') if dayAge >= 15: print('무게예측모델 : ./model/miri_dnn_model_sqrt_36_230306.h5') loaded_model1 = tf.keras.models.load_model("./model/miri_dnn_model_sqrt_36_230306.h5") loaded_model3 = tf.keras.models.load_model("./model/miri_dnn_reference_sqrt_230410.h5") elif dayAge < 15: print('무게예측모델 : ./model/miri_dnn_8day_sqrt_230420.h5') loaded_model1 = tf.keras.models.load_model("./model/miri_dnn_8day_sqrt_230420.h5") loaded_model3 = tf.keras.models.load_model("./model/miri_dnn_reference_sqrt_230410.h5") except : needRawWeight = True ## 모델 로딩 되었을 때 if not needRawWeight : ## 2. 체중계 try : # sql_str = "select CAM_TYPE, CAM_DIST, CAM_INFO from tbl_camera where CAM_TYPE != 'ther' and HOUSE_ID = '%s' and MODULE_ID = '%s' order by CREATE_TIME desc limit 1" % (house_id, module_id) sql_str = "select a.CAM_DIST, a.HOUSE_TYPE, b.pixel_resolution from tbl_cctv AS a \ inner join tbl_cctv_pixel_resolution as b on a.CCTV_ID = '%s' and b.cctv_id = '%s' where a.HOUSE_ID = '%s' limit 1" % (module_id.split(',')[0], module_id.split(',')[0], house_id) # CAM_DIST, HOUSE_TYPE, pixel_resolution_width, pixel_resolution_height rows_ex = dbConn.select_from_db(sql_str) dist, house_type = float(rows_ex['CAM_DIST'].item()), rows_ex['HOUSE_TYPE'].item() resolution = float(rows_ex['pixel_resolution'].item()) # 뷸렛 카메라일때 PTZ의 레졸루션 값을 추출하기 위한 쿼리. PTZ 레졸루션 기준으로 픽셀을 보정해서 모델에 넣어야함. sql_str1 = "select a.pixel_resolution from tbl_cctv_pixel_resolution AS a inner join tbl_cctv as b \ on a.CCTV_ID = b.cctv_id where b.IS_PTZ = 'yes' and b.HOUSE_ID='%s' limit 1" % (house_id) rows_ex1 = dbConn.select_from_db(sql_str1) crisis_resolution = float(rows_ex1['pixel_resolution'].item()) except : raise Exception("cam info select failed") if camera_type == 'yes': # PTZ 카메라일때 temp = pd.DataFrame(no_distortion_pixel_list, columns=['pixel']) ## weight1 : model_nnr(neural_network_regression) NN 모델(modeled by 박정훈) temp_nnr = np.sqrt(temp['pixel']) * (resolution / 1.554) # 미리농장 5.4m기준 모델링 model1 = loaded_model1.predict(temp_nnr) model1 = model1.astype(int) ## weight2 : model_iwr(integration_weighter_regression) 통합체중계 기반(modeled by 이병권, pred_weight = 0.00017*length^2.5961) temp_iwr = np.sqrt(temp['pixel']) * (resolution / 0.36) # 0.36은 통합체중계 단위해상도 model2 = np.power(temp_iwr, 2.5961) * 0.00017 model2 = model2.astype(int) ## weight3 : model_reference_nnr(modeled by 박정훈) temp_nnr = np.sqrt(temp['pixel']) * (resolution / 1.554) # 미리농장 5.4m기준 모델링 model3 = loaded_model3.predict(temp_nnr) model3 = model3.astype(int) ## weight4 : None yet model4 = 0 ## weight5 : None yet model5 = 0 ## weight6 : None yet model6 = 0 elif camera_type != 'yes': # 뷸렛 카메라일때 temp = pd.DataFrame(no_distortion_pixel_list, columns=['pixel']) temp['pixel'] = temp['pixel'] * pow(resolution / crisis_resolution, 2) # 뷸렛 픽셀넓이를 PTZ 픽셀 넓이로 변환 ## weight1 : model_nnr(neural_network_regression) NN 모델(modeled by 박정훈) temp_nnr = np.sqrt(temp['pixel']) * (crisis_resolution / 1.554) # 미리농장 5.4m기준 모델링 model1 = loaded_model1.predict(temp_nnr) model1 = model1.astype(int) ## weight2 : model_iwr(integration_weighter_regression) 통합체중계 기반(modeled by 이병권, pred_weight = 0.00017*length^2.5961) temp_iwr = np.sqrt(temp['pixel']) * (crisis_resolution / 0.36) # 0.36은 통합체중계 단위해상도 model2 = np.power(temp_iwr, 2.5961) * 0.00017 model2 = model2.astype(int) ## weight3 : model_reference_nnr(modeled by 박정훈) temp_nnr = np.sqrt(temp['pixel']) * (crisis_resolution / 1.554) # 미리농장 5.4m기준 모델링 model3 = loaded_model3.predict(temp_nnr) model3 = model3.astype(int) ## weight4 : None yet model4 = 0 ## weight5 : None yet model5 = 0 ## weight6 : None yet model6 = 0 return model1.flatten().tolist(), model2.tolist(), model3.flatten().tolist(), model4, model5, model6 ######################################################################################################################## ## 2. 체중계 데이터 else : # 모델이 없으면 ... # 먼저 체중 데이터 체크 sql_str = f'''select avg(rollingWeight) as rollingWeight from tbl_weight_stats where 1=1 and house_id = '{house_id}' and create_time >= '{(to_day - pd.Timedelta(4, unit='hour')).strftime("%Y-%m-%d %H:%M")}' and create_time <= '{(to_day + pd.Timedelta(2, unit='hour')).strftime("%Y-%m-%d %H:%M")}' ''' try: rows = dbConn.select_from_db(sql_str) ## 체중계 데이터 길이가 0 일 경우. ref table 데이터를 가져온다. if len(rows) < 1 : raise Exception("no weight data on DB") predictedWeight = float(rows.values[0][0]) # add random noise predictedWeight = predictedWeight + np.random.normal() * (predictedWeight * .01) return predictedWeight # 체중 데이터 return except: needRefTable = True ######################################################################################################################## # exception 발생 시... needRefTable == True ## 3. ref.Table if needRefTable : ### interpolation & add noise to make natural results return refWeight # ref Weight return raise Exception(f"failed to get [[model-weight, weight-stats, ref-weight]] ") def calib_image(img_source): img = img_source.copy() # img = cv2.imread(os.path.join(image_dir, image_name)) cent_x, cent_y = img.shape[:2] ''' @ f_val : 1100 ''' # 1100 , 1200 f_val = 1100. K = np.array([[f_val, 0.0, cent_y / 2], [0.0, f_val, cent_x / 2], [0., 0., 1.0]]) D = np.array([0.04004325, 0.00112638, 0.01004722, -0.00593285]) map1, map2 = cv2.fisheye.initUndistortRectifyMap(K, D, np.eye(3), K, (cent_y, cent_x), cv2.CV_16SC2) newImg = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT) return newImg @paiplog def detect_weight(image_dir, image_name): if predictor.predictor is None: raise Exception("error = predictor is None") if predictor.chicken_metadata is None: raise Exception("error = chicken_metadata is None") image_data = image_dir + image_name image_data = glob.glob(image_data) if len(image_data) == 0: raise Exception(" failed : Image is Not Found ") id = image_name.split('_') if "FA" in id[0]: house_id = id[2] module_id = id[3] file_date = id[4] else: house_id = id[0] module_id = id[1] file_date = id[2] try: #sql_str = "select CAM_TYPE, CAM_DIST, CAM_INFO from tbl_camera where CAM_TYPE != 'ther' and HOUSE_ID = '%s' and MODULE_ID = '%s' order by CREATE_TIME desc limit 1" % (house_id, module_id) sql_str = "select CAM_DIST, HOUSE_TYPE, IS_PTZ from tbl_cctv where HOUSE_ID = '%s' and CCTV_ID = '%s' limit 1" % (house_id, module_id.split(',')[0]) dbConn = PyDBconnector() rows = dbConn.select_from_db(sql_str) if len(rows.index) == 0: camera_type = 'cctv' else: camera_type = rows['IS_PTZ'].item() dist = float(rows['CAM_DIST'].item()) print('카메라높이 : ', dist) print('카메라타입 : ', camera_type) except Exception as e: camera_type = 'cctv' print(e) # raise Exception(' failed : DB_data is error ') ##################################### 이미지 체킹 ################################### for idx, d in enumerate(image_data): # print(f'{idx}, image_name : {d}') print(f'Image_name : {d}') im = cv2.imread(d) im2 = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) color = ('b', 'g', 'r') for i, col in enumerate(color): hist = cv2.calcHist([im2], [i], None, [256], [0, 256]) if camera_type.lower() != 'yes': # 카메라 타입이 PTZ가 아니면...... im = calib_image(im) im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) outputs = predictor.predictor( im) # format is documented at https://detectron2.readthedocs.io/tutorials/models.html#model-output-format print("디텍션 모델 이름 : ", predictor.chicken_model_name) v = Visualizer(im[:, :, ::-1], metadata=predictor.chicken_metadata, scale=0.5, # instance_mode=ColorMode.IMAGE_BW instance_mode=ColorMode.IMAGE # remove the colors of unsegmented pixels. This option is only available for segmentation models ) output_cpu = outputs['instances'].to("cpu") boxes = output_cpu.pred_boxes # scores = output_cpu.scores # pred_classes = output_cpu.pred_classes pred_masks = output_cpu.pred_masks bound_xy = torch.tensor([0, 0, pred_masks.shape[2], pred_masks.shape[1]]) masks = torch.tensor([1, 1, -1, -1]) bound_check = lambda x: all((bound_xy - x) * masks < -5) # margin : 5 num_of_interest = 40 x_sum, x_std = [], 0. toggle_bound = [] for i, x in enumerate(boxes): x1, y1, x2, y2 = x x_area = round(float((x2 - x1) * (y2 - y1)), 1) x_sum.append(x_area) toggle_bound.append(bound_check(x)) x_mean = np.mean(x_sum) x_std = np.std(x_sum) if np.isnan(x_mean) or np.isnan(x_std): raise Exception('x_mean or x_std is None ') else: # type != cctv, 중심 좌표 기준 거리 내의 이미지만 처리하고 나머지는 버림 if camera_type.lower() != 'yes': # 카메라 타입이 PTZ가 아니면...... ind_toggle = [(x_mean - 1.5*x_std <= x <= x_mean + 0.8*x_std) for x in x_sum] # ind_toggle = [True for x in range(len(x_sum))] # True list .. length ind_toggle = np.array(ind_toggle) & np.array(toggle_bound) ind_toggle = [ind_toggle[i] if sum(ind_toggle[:i]) < num_of_interest else False for i in range(len(ind_toggle))] #total_sum = torch.sum(pred_masks, (1, 2))[ind_toggle] # total_mean = torch.round(torch.sum(total_sum) / len(total_sum)).item() #total_count = len(total_sum) # detection count <= 0 부분 삭제 # if total_count <= 0: # raise Exception('detection count 0') # type != cctv, 중심 좌표 기준 거리 내의 이미지만 처리하고 나머지는 버림 #if camera_type.lower() != 'yes': # 카메라 타입이 PTZ가 아니면...... image_height, image_width = output_cpu.image_size # print(type(image_height), type(image_width)) center_point = torch.tensor((image_width / 2, image_height / 2), dtype=torch.float) # circle_redius_point = (image_width / 2, (3 * image_height) / 8) circle_redius_distance = round(((3 * image_height) / 8), 1) # distances for distortion distances = (boxes.get_centers() - center_point).pow(2).sum(axis=1).sqrt().numpy() distortion_toggle = np.array([x > circle_redius_distance for x in distances]) # setup toggle dist_toggle = ind_toggle & distortion_toggle no_dist_toggle = ind_toggle & (~distortion_toggle) # type == cctv 일 경우, 왜곡 처리 하지 않음 else: ind_toggle = [(x_mean - 1.5*x_std <= x <= x_mean + 1.5*x_std) for x in x_sum] # ind_toggle = [True for x in range(len(x_sum))] # True list .. length ind_toggle = np.array(ind_toggle) & np.array(toggle_bound) ind_toggle = [ind_toggle[i] if sum(ind_toggle[:i]) < num_of_interest else False for i in range(len(ind_toggle))] # total_sum = torch.sum(pred_masks, (1, 2))[ind_toggle] # setup toggle no_dist_toggle = ind_toggle no_distortion_pixel_list = torch.sum(pred_masks, (1, 2))[no_dist_toggle].tolist() # no_distortion_pixel_mean = np.median(torch.sum(pred_masks, (1, 2))[no_dist_toggle].tolist()) no_distortion_count = int(sum(no_dist_toggle)) # print(f'no_distortion_pixel_mean : {no_distortion_pixel_mean}, no_distortion_count : {no_distortion_count}') if no_distortion_count <= 2: # 2마리 이하 디텍션은 짜름 raise Exception('no_distortion_count is less than 2.') final_toggle = no_dist_toggle #out = v.draw_instance_predictions(output_cpu[final_toggle]) out = v.overlay_instances(boxes=output_cpu[final_toggle].pred_boxes, masks=output_cpu[final_toggle].pred_masks) model1, model2, model3, model4, model5, model6 = weight_predict_new(no_distortion_pixel_list, no_distortion_count, house_id, module_id, file_date, dbConn, camera_type.lower()) # weight = weight_predict(no_distortion_pixel_mean, house_id, module_id, dbConn) # if camera_type == 'cctv': # weight = round(weight * dist, 1) # print(f'no_distortion_pixel_mean = {no_distortion_pixel_mean}') # print(f'weight = {weight}') # print(f'total count = {total_count}') output_img = out.get_image()[:, :, ::-1].copy() jpg_index = image_name.rfind('.jpg') plt_image = image_name[0:jpg_index] + "_weightPredictionResult.jpg" plt_dir = image_dir + plt_image if not os.path.exists(plt_dir): # print("-------------------------------------이미지를 저장합니다.--------------------------------------------") plt.imsave(plt_dir, output_img) else: # print("-------------------------------기존에 이미지가 존재하여 덮어 씌우기로 저장합니다.-----------------------------") plt.imsave(plt_dir, output_img) if hist[160] > 3500: # 암전 상황이 아니면.... response = { "image_name": plt_image, "pixel_list": torch.sum(pred_masks, (1, 2))[no_dist_toggle].tolist(), "pixel_count": no_distortion_count, "model1_list": model1, "model1": int(sum(model1) / len(model1)), "model2_list": model2, "model2": int(sum(model2) / len(model2)), "model3_list": model3, "model3": int(sum(model3) / len(model3)), "model4_list": '', "model4": '', "model5_list": '', "model5": '', "model6_list": '', "model6": '', "status": "success" } return response elif hist[160] <= 3500: # 암전 상황이면 response = { "image_name": plt_image, "pixel_list": torch.sum(pred_masks, (1, 2))[no_dist_toggle].tolist(), "pixel_count": no_distortion_count, "model1_list": model1, "model1": int(sum(model1) / len(model1)), "model2_list": model2, "model2": int(sum(model2) / len(model2)), "model3": int(sum(model3) / len(model3)), "model4_list": '', "model4": '', "model5_list": '', "model5": '', "model6_list": '', "model6": '', "status": "blackout" } return response def on_json_loading_failed_return_dict(e): print(e) return { 'status': "fail" } app = flask.Flask(__name__) CORS(app) @app.route("/api/detect_result", methods=["POST"]) def result_request(): request.on_json_loading_failed = on_json_loading_failed_return_dict start_time = time.time() image_reponse = request.get_json('dir') image_dir = image_reponse['dir'] image_name = image_reponse['image_name'] if image_name.find(".jpg") == -1: image_name = image_name + '.jpg' print('파일 경로 : ', image_dir) print('이미지 이름 :', image_name) try: response = detect_weight(image_dir, image_name) except Exception as e: print(e) response = { "image_name": image_name, "status": "fail", } end_time = time.time() print(f"<<<< detect_result finish {end_time - start_time:.5f} sec >>>>") return jsonify(response) if __name__ == '__main__': print('--------------------model loading---------------------') try: predictor = Prepare_predictor.getInstance() print("인스턴스 생성 완료") except Exception as e: raise Exception("failed : Model loading") app.run(host="0.0.0.0", port=8890)