Tryag File Manager
Home
||
Turbo Force
||
B-F Config_Cpanel
Current Path :
/
paip
/
script
/
weight
/
Or
Select Your Path :
Upload File :
New :
File
Dir
//paip/script/weight/chicken_rest_script.py
# Copyright (c) 2020~2024 PAIPTREE SmartFarm, Inc. All Rights Reserved. # Modify : 박정훈 # modified last date : 2024-02-21 # bugfix : 암전 판단시 이미지를 중심 기준으로 전체 크기 * 0.5배 사이즈로 크롭하고 있는데 # bugfix : 암전 판단시 width는 1배, height는 0.9배 사이즈 크롭으로 변경 import torch import tensorflow as tf from tensorflow import keras # import some common libraries import numpy as np import os, json, cv2 # import some common detectron2 utilities from detectron2 import model_zoo from detectron2.engine import DefaultPredictor from detectron2.config import get_cfg from detectron2.utils.visualizer import Visualizer from detectron2.utils.visualizer import ColorMode from detectron2.utils.logger import setup_logger setup_logger() import pandas as pd import time import glob, math import matplotlib.pyplot as plt import sys import warnings from scipy import interpolate import functools HOME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir) DATA_DIR = 'data' MODULE_DIR = 'util' OUTPUT_DIR = 'out' IS_GATEWAY = True IS_GPU = False refTable = None warnings.filterwarnings('ignore') chicken_model_name = "weight_predict_new.pth" # set sys path to import PyDBconnector, log_files sys.path.append(os.path.join(HOME_PATH, MODULE_DIR)) from PyDBconnector import PyDBconnector from logs import paiplog ##디텍트론 cfg 세팅####################################################################################################################### def initDetectron(): cfg = get_cfg() cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")) if IS_GPU == False : print('Running CPU!!') cfg.merge_from_list(['MODEL.DEVICE', 'cpu']) cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml") # Let training initialize from model zoo cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1 # only has one class (ballon). (see https://detectron2.readthedocs.io/tutorials/datasets.html#update-the-config-for-new-datasets) if IS_GATEWAY : cfg.MODEL_DIR = '/gate/script/weight/model/' else : cfg.MODEL_DIR = '/paip/script/weight/model/' os.makedirs(cfg.MODEL_DIR, exist_ok=True) # Inference should use the config with parameters that are used in training # cfg now already contains everything we've set previously. We changed it a little bit for inference: cfg.MODEL.WEIGHTS = os.path.join(cfg.MODEL_DIR, chicken_model_name) # path to the model we just trained cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.8 # set a custom testing threshold return DefaultPredictor(cfg) ##디텍트론 cfg 세팅 완료################################################################################################################### ########### 암전 판단 로직 ################################ def enhance_red_channel(image): source = image.astype(np.float32) blue, green, red = cv2.split(source) enhanced_red = 2 * red - green - blue return enhanced_red def get_h_channel_median(image): hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) hue_channel, _, _ = cv2.split(hsv_image) h_channel_median = np.median(hue_channel) return h_channel_median def is_blackout(image_path): # 암전 판단 로직 result = False ratio_width = 1 ratio_height = 0.9 img = cv2.imread(image_path) height, width, _ = img.shape center_x, center_y = width // 2, height // 2 crop_width, crop_height = int(width * ratio_width), int(height * ratio_height) crop_half_width, crop_half_height = crop_width // 2, crop_height // 2 # x1 = center_x - crop_half_width # y1 = center_y - crop_half_height # x2 = center_x + crop_half_width # y2 = center_y + crop_half_height # 위와 같은 코드이지만 성능은 아래가 더 나음 x1 = max(0, center_x - crop_half_width) y1 = max(0, center_y - crop_half_height) x2 = min(width, center_x + crop_half_width) y2 = min(height, center_y + crop_half_height) cropped_image = img[y1:y2, x1:x2] h_median = get_h_channel_median(cropped_image) if h_median >= 145: # IR 모드(X) 야간 : 145 이상 result = True elif h_median == 0: # IR 모드(O) 야간 : 0 enhanced_red = enhance_red_channel(cropped_image) total_sum = np.sum(enhanced_red) if total_sum == 0: result = True return result ############################################################# @paiplog def find_nearest(arrayParam,value): ''' 가장 가까운 일령을 찾아서 ex) 24.5312 넘겨주기 global refTable의 index 값 @param arrayParam: 찾고자 하는 index list @param value: 실제 일령 데이터 ''' idx = np.searchsorted(arrayParam, value, side="left") if idx > 0 and (idx == len(arrayParam) or math.fabs(value - arrayParam[idx-1]) < math.fabs(value - arrayParam[idx])): return arrayParam[idx-1] else: return arrayParam[idx] @paiplog @functools.lru_cache() def getRefTable(dbConn, dayAge) : if IS_GATEWAY : sql_refTable = "select * from tbl_weight_ref" else : sql_refTable = "select * from tbl_farm_weight_ref" try : refTable = dbConn.select_from_db(sql_refTable) except : raise Exception("Error to read refTable") ref_re = pd.DataFrame(index=[round(x/(24*60),4) for x in range(24*60*(len(refTable)-1)+1) ]) ref_re = ref_re.merge(refTable.ref_weight, left_index=True, right_index=True, how='left') f_linear = interpolate.interp1d(refTable.index.to_list(), refTable.ref_weight.to_list(), kind='linear') y_new = f_linear(ref_re.index.to_list()) ref_re.ref_weight = y_new ref_re.dropna(inplace=True) ref_re['new_ref_weight'] = np.round(ref_re.ref_weight + list(np.random.normal(size=len(ref_re))*10),2) refTable = ref_re.copy() # hotfix. # arrayTwo length < 2 arrayTwo = [x for x in refTable.index.to_list() if int(x) == int(dayAge) ] return refTable.loc[find_nearest(arrayTwo if len(arrayTwo) > 0 else refTable.index.to_list()[-2:], dayAge),'new_ref_weight'] @paiplog def weight_predict_new(no_distortion_pixel_list, no_distortion_count, farm_id, house_id, module_id, file_date, dbConn, camera_type): needRawWeight, needRefTable = False, False # 면적-체중 모델 있는지 여부 check, ref table 데이터가 필요한 지 여부 # 모델 loading 되었을 때 담을 변수 loaded_model = None to_day = pd.Timestamp(file_date) ######################################################################################################################## ## 0-1. 일령 데이터 가져오기 if IS_GATEWAY : sql_dayAge = f'''select in_date from tbl_house_breed_hist where 1=1 and house_id = '{house_id}' order by in_date desc limit 1''' else : sql_dayAge = f'''select in_date from tbl_farm_house_breed_hist where 1=1 and farm_id = '{farm_id}' and house_id = '{house_id}' order by in_date desc limit 1''' try : rows = dbConn.select_from_db(sql_dayAge) in_date = str(rows.values[0][0]) dayAge = (to_day - pd.Timestamp(in_date)).days + round(((to_day - pd.Timestamp(in_date)).seconds // 60) / (24 * 60),4) # 일령 데이터 print("일령정보 : ", dayAge) except : if IS_GATEWAY : raise Exception("Error to read in date from tbl_house_breed_hist") else : raise Exception("Error to read in date from tbl_farm_house_breed_hist") ## day age 35일령 이후는 -1로 통일 // 다시 주석 처리 그냥 35일 refTable 데이터 출력 # if dayAge > 36. : # refWeight = -1. # else : ## 0-2. ref table 가져오기 try: ### interpolation & add noise to make natural results refWeight = getRefTable(dbConn, dayAge) except: raise Exception(f"failed to get ref table data for {to_day.strftime('%Y%m%d')}") ######################################################################################################################## ## 0-3. check 출하 ## 일령이 29일 이상이고, detection count가 5 이하이면 예측 무게를 전송하지 않는다. ## 2022.11.17 추가 ## 2022.12.29 수정, 입추, 출하 감지 위해 주석 처리, 해당 기능 플랫폼 이관 # if checkIsShipping(dayAge,no_distortion_count) : raise Exception(f'{house_id}-{file_date} shipping is in progress') ######################################################################################################################## ## 1. model prediction ## 면적-체중 일반화 모델 loading try : ## 농장 일반화 모델 # regressor = joblib.load('/gate/script/weight/model/pixel_weight_regressor.pkl') if dayAge >= 15: if IS_GATEWAY : print('무게예측모델 : /gate/script/weight/model/miri_dnn_model_sqrt_36_230306.h5') loaded_model1 = tf.keras.models.load_model("/gate/script/weight/model/miri_dnn_model_sqrt_36_230306.h5") loaded_model3 = tf.keras.models.load_model("/gate/script/weight/model/miri_dnn_reference_sqrt_230410.h5") else : print('무게예측모델 : /paip/script/weight/model/miri_dnn_model_sqrt_36_230306.h5') loaded_model1 = tf.keras.models.load_model("/paip/script/weight/model/miri_dnn_model_sqrt_36_230306.h5") loaded_model3 = tf.keras.models.load_model("/paip/script/weight/model/miri_dnn_reference_sqrt_230410.h5") elif dayAge < 15: if IS_GATEWAY : print('무게예측모델 : /gate/script/weight/model/miri_dnn_8day_sqrt_230420.h5') loaded_model1 = tf.keras.models.load_model("/gate/script/weight/model/miri_dnn_8day_sqrt_230420.h5") loaded_model3 = tf.keras.models.load_model("/gate/script/weight/model/miri_dnn_reference_sqrt_230410.h5") else : print('무게예측모델 : /paip/script/weight/model/miri_dnn_8day_sqrt_230420.h5') loaded_model1 = tf.keras.models.load_model("/paip/script/weight/model/miri_dnn_8day_sqrt_230420.h5") loaded_model3 = tf.keras.models.load_model("/paip/script/weight/model/miri_dnn_reference_sqrt_230410.h5") except : needRawWeight = True ## 모델 로딩 되었을 때 if not needRawWeight : ## 2. 체중계 try : if IS_GATEWAY : sql_str = "select pixel_resolution from tbl_cctv_pixel_resolution where cctv_id = '%s' order by create_time desc limit 1" % (module_id.split(',')[0]) else : sql_str = "select pixel_resolution, pixel_resolution_ratio from tbl_farm_cctv_pixel_resolution where farm_id = '%s' and cctv_id = '%s' order by create_time desc limit 1" % (farm_id, module_id.split(',')[0]) rows_ex = dbConn.select_from_db(sql_str) resolution = float(rows_ex['pixel_resolution'].item()) resolution_ratio = float(rows_ex['pixel_resolution_ratio'].item()) print("resolution : %f, resolution_ratio : %f -> final resolution : %f" % (resolution, resolution_ratio, resolution*resolution_ratio)) resolution = resolution * resolution_ratio # 뷸렛 카메라일때 PTZ의 레졸루션 값을 추출하기 위한 쿼리. PTZ 레졸루션 기준으로 픽셀을 보정해서 모델에 넣어야함. if IS_GATEWAY : sql_str1 = "select a.pixel_resolution from tbl_cctv_pixel_resolution AS a inner join tbl_cctv as b \ on a.cctv_id = b.cctv_id where b.is_ptz like '%s' and b.house_id = '%s' order by create_time desc limit 1" % ('%y%', house_id) else : sql_str1 = "select a.pixel_resolution, a.pixel_resolution_ratio from tbl_farm_cctv_pixel_resolution AS a inner join tbl_farm_cctv as b \ on a.farm_id = b.farm_id and a.cctv_id = b.cctv_id where b.ptz_support like '%s' and b.farm_id = '%s' and b.house_id = '%s' order by create_time desc limit 1" % ('%y%', farm_id, house_id) rows_ex1 = dbConn.select_from_db(sql_str1) crisis_resolution = float(rows_ex1['pixel_resolution'].item()) crisis_resolution_ratio = float(rows_ex1['pixel_resolution_ratio'].item()) print("crisis_resolution : %f, crisis_resolution_ratio : %f -> final crisis_resolution : %f" % (crisis_resolution, crisis_resolution_ratio, crisis_resolution*resolution_ratio)) crisis_resolution = crisis_resolution * crisis_resolution_ratio except : raise Exception("cam info select failed") if 'y' in camera_type: # PTZ 카메라일때 temp = pd.DataFrame(no_distortion_pixel_list, columns=['pixel']) ## weight1 : model_nnr(neural_network_regression) NN 모델(modeled by 박정훈) temp_nnr = np.sqrt(temp['pixel']) * (resolution / 1.554) # 미리농장 5.4m기준 모델링 model1 = loaded_model1.predict(temp_nnr) model1 = model1.astype(int) ## weight2 : model_iwr(integration_weighter_regression) 통합체중계 기반(modeled by 이병권, pred_weight = 0.00017*length^2.5961) temp_iwr = np.sqrt(temp['pixel']) * (resolution / 0.36) # 0.36은 통합체중계 단위해상도 model2 = np.power(temp_iwr, 2.5961) * 0.00017 model2 = model2.astype(int) ## weight3 : model_reference_nnr(modeled by 박정훈) temp_nnr = np.sqrt(temp['pixel']) * (resolution / 1.554) # 미리농장 5.4m기준 모델링 model3 = loaded_model3.predict(temp_nnr) model3 = model3.astype(int) ## weight4 : None yet model4 = 0 ## weight5 : None yet model5 = 0 ## weight6 : None yet model6 = 0 elif 'y' not in camera_type: # 뷸렛 카메라일때 temp = pd.DataFrame(no_distortion_pixel_list, columns=['pixel']) temp['pixel'] = temp['pixel'] * pow(resolution / crisis_resolution, 2) # 뷸렛 픽셀넓이를 PTZ 픽셀 넓이로 변환 ## weight1 : model_nnr(neural_network_regression) NN 모델(modeled by 박정훈) temp_nnr = np.sqrt(temp['pixel']) * (crisis_resolution / 1.554) # 미리농장 5.4m기준 모델링 model1 = loaded_model1.predict(temp_nnr) model1 = model1.astype(int) ## weight2 : model_iwr(integration_weighter_regression) 통합체중계 기반(modeled by 이병권, pred_weight = 0.00017*length^2.5961) temp_iwr = np.sqrt(temp['pixel']) * (crisis_resolution / 0.36) # 0.36은 통합체중계 단위해상도 model2 = np.power(temp_iwr, 2.5961) * 0.00017 model2 = model2.astype(int) ## weight3 : model_reference_nnr(modeled by 박정훈) temp_nnr = np.sqrt(temp['pixel']) * (crisis_resolution / 1.554) # 미리농장 5.4m기준 모델링 model3 = loaded_model3.predict(temp_nnr) model3 = model3.astype(int) ## weight4 : None yet model4 = 0 ## weight5 : None yet model5 = 0 ## weight6 : None yet model6 = 0 return model1.flatten().tolist(), model2.tolist(), model3.flatten().tolist(), model4, model5, model6 ######################################################################################################################## ## 2. 체중계 데이터 else : # 모델이 없으면 ... return refWeight # ref Weight return def calib_image(img_source): img = img_source.copy() # img = cv2.imread(os.path.join(image_dir, image_name)) cent_x, cent_y = img.shape[:2] ''' @ f_val : 1100 ''' # 1100 , 1200 f_val = 1100. K = np.array([[f_val, 0.0, cent_y / 2], [0.0, f_val, cent_x / 2], [0., 0., 1.0]]) D = np.array([0.04004325, 0.00112638, 0.01004722, -0.00593285]) map1, map2 = cv2.fisheye.initUndistortRectifyMap(K, D, np.eye(3), K, (cent_y, cent_x), cv2.CV_16SC2) newImg = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT) return newImg @paiplog def detect_weight(image_dir, image_name): if predictor is None: raise Exception("error = predictor is None") image_data = image_dir + image_name image_data = glob.glob(image_data) if len(image_data) == 0: raise Exception("failed : Image is Not Found") id = image_name.split('_') if IS_GATEWAY : #imgName : H02_CT03,1_20231108122102_farm_image_real_2295ae414e7f.jpg farm_id = '' house_id = id[0] module_id = id[1] file_date = id[2] else: #imgName : FA0001_GW00_H02_CT03,1_20231108122102_farm_image_real_2295ae414e7f.jpg farm_id = id[0] house_id = id[2] module_id = id[3] file_date = id[4] try: if IS_GATEWAY : sql_str = "select is_ptz from tbl_cctv where house_id = '%s' and cctv_id = '%s' limit 1" % (house_id, module_id.split(',')[0]) else : sql_str = "select ptz_support from tbl_farm_cctv where farm_id = '%s' and house_id = '%s' and cctv_id = '%s' limit 1" % (farm_id, house_id, module_id.split(',')[0]) dbConn = PyDBconnector() rows = dbConn.select_from_db(sql_str) if len(rows.index) == 0: camera_type = 'cctv' else: if IS_GATEWAY : camera_type = rows['is_ptz'].item() else : camera_type = rows['ptz_support'].item() print('카메라타입 : ', camera_type) except Exception as e: camera_type = 'cctv' print(e) # raise Exception(' failed : DB_data is error ') ##################################### 이미지 체킹 ################################### for idx, d in enumerate(image_data): # print(f'{idx}, image_name : {d}') print(f'Image_name : {d}') if is_blackout(d) == False: # 암전 상황이 아니면.... im = cv2.imread(d) if 'y' not in camera_type.lower(): # 카메라 타입이 PTZ가 아니면...... im = calib_image(im) im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) outputs = predictor( im) # format is documented at https://detectron2.readthedocs.io/tutorials/models.html#model-output-format print("디텍션 모델 이름 : ", chicken_model_name) v = Visualizer(im[:, :, ::-1], metadata=None, scale=0.5, # instance_mode=ColorMode.IMAGE_BW instance_mode=ColorMode.IMAGE # remove the colors of unsegmented pixels. This option is only available for segmentation models ) output_cpu = outputs['instances'].to("cpu") boxes = output_cpu.pred_boxes # scores = output_cpu.scores # pred_classes = output_cpu.pred_classes pred_masks = output_cpu.pred_masks bound_xy = torch.tensor([0, 0, pred_masks.shape[2], pred_masks.shape[1]]) masks = torch.tensor([1, 1, -1, -1]) bound_check = lambda x: all((bound_xy - x) * masks < -5) # margin : 5 num_of_interest = 40 x_sum, x_std = [], 0. toggle_bound = [] for i, x in enumerate(boxes): x1, y1, x2, y2 = x x_area = round(float((x2 - x1) * (y2 - y1)), 1) x_sum.append(x_area) toggle_bound.append(bound_check(x)) x_mean = np.mean(x_sum) x_std = np.std(x_sum) if np.isnan(x_mean) or np.isnan(x_std): raise Exception('x_mean or x_std is None ') else: # type != cctv, 중심 좌표 기준 거리 내의 이미지만 처리하고 나머지는 버림 if 'y' not in camera_type.lower(): # 카메라 타입이 PTZ가 아니면...... ind_toggle = [(x_mean - 1.5*x_std <= x <= x_mean + 0.8*x_std) for x in x_sum] # ind_toggle = [True for x in range(len(x_sum))] # True list .. length ind_toggle = np.array(ind_toggle) & np.array(toggle_bound) ind_toggle = [ind_toggle[i] if sum(ind_toggle[:i]) < num_of_interest else False for i in range(len(ind_toggle))] #total_sum = torch.sum(pred_masks, (1, 2))[ind_toggle] # total_mean = torch.round(torch.sum(total_sum) / len(total_sum)).item() #total_count = len(total_sum) # detection count <= 0 부분 삭제 # if total_count <= 0: # raise Exception('detection count 0') # type != cctv, 중심 좌표 기준 거리 내의 이미지만 처리하고 나머지는 버림 #if 'y' not in camera_type.lower(): # 카메라 타입이 PTZ가 아니면...... image_height, image_width = output_cpu.image_size # print(type(image_height), type(image_width)) center_point = torch.tensor((image_width / 2, image_height / 2), dtype=torch.float) # circle_redius_point = (image_width / 2, (3 * image_height) / 8) circle_redius_distance = round(((3 * image_height) / 8), 1) # distances for distortion distances = (boxes.get_centers() - center_point).pow(2).sum(axis=1).sqrt().numpy() distortion_toggle = np.array([x > circle_redius_distance for x in distances]) # setup toggle dist_toggle = ind_toggle & distortion_toggle no_dist_toggle = ind_toggle & (~distortion_toggle) # type == cctv 일 경우, 왜곡 처리 하지 않음 else: ind_toggle = [(x_mean - 1.5*x_std <= x <= x_mean + 1.5*x_std) for x in x_sum] # ind_toggle = [True for x in range(len(x_sum))] # True list .. length ind_toggle = np.array(ind_toggle) & np.array(toggle_bound) ind_toggle = [ind_toggle[i] if sum(ind_toggle[:i]) < num_of_interest else False for i in range(len(ind_toggle))] # total_sum = torch.sum(pred_masks, (1, 2))[ind_toggle] # setup toggle no_dist_toggle = ind_toggle no_distortion_pixel_list = torch.sum(pred_masks, (1, 2))[no_dist_toggle].tolist() # no_distortion_pixel_mean = np.median(torch.sum(pred_masks, (1, 2))[no_dist_toggle].tolist()) no_distortion_count = int(sum(no_dist_toggle)) # print(f'no_distortion_pixel_mean : {no_distortion_pixel_mean}, no_distortion_count : {no_distortion_count}') if no_distortion_count <= 2: # 2마리 이하 디텍션은 짜름 raise Exception('no_distortion_count is less than 2.') final_toggle = no_dist_toggle #out = v.draw_instance_predictions(output_cpu[final_toggle]) out = v.overlay_instances(boxes=output_cpu[final_toggle].pred_boxes, masks=output_cpu[final_toggle].pred_masks) model1, model2, model3, model4, model5, model6 = weight_predict_new(no_distortion_pixel_list, no_distortion_count, farm_id, house_id, module_id, file_date, dbConn, camera_type.lower()) # weight = weight_predict(no_distortion_pixel_mean, house_id, module_id, dbConn) # if camera_type == 'cctv': # weight = round(weight * dist, 1) # print(f'no_distortion_pixel_mean = {no_distortion_pixel_mean}') # print(f'weight = {weight}') # print(f'total count = {total_count}') output_img = out.get_image()[:, :, ::-1].copy() jpg_index = image_name.rfind('.jpg') plt_image = image_name[0:jpg_index] + "_weightPredictionResult.jpg" plt_dir = image_dir + plt_image # print("-------------------------------------이미지를 저장합니다. (있으면 덮어쓰기)--------------------------------------------") plt.imsave(plt_dir, output_img) response = { "image_name": plt_image, "pixel_list": torch.sum(pred_masks, (1, 2))[no_dist_toggle].tolist(), "pixel_count": no_distortion_count, "model1_list": model1, "model1": int(sum(model1) / len(model1)), "model2_list": model2, "model2": int(sum(model2) / len(model2)), "model3_list": model3, "model3": int(sum(model3) / len(model3)), "model4_list": '', "model4": '', "model5_list": '', "model5": '', "model6_list": '', "model6": '', "status": "success" } return response else: # 암전 상황이면 jpg_index = image_name.rfind('.jpg') plt_image = image_name[0:jpg_index] + ".jpg" response = { "image_name": plt_image, #"pixel_list": torch.sum(pred_masks, (1, 2))[no_dist_toggle].tolist(), #"pixel_count": no_distortion_count, #"model1_list": model1, #"model1": int(sum(model1) / len(model1)), #"model2_list": model2, #"model2": int(sum(model2) / len(model2)), #"model3": int(sum(model3) / len(model3)), #"model4_list": '', #"model4": '', #"model5_list": '', #"model5": '', #"model6_list": '', #"model6": '', "status": "blackout" } return response if __name__ == '__main__': try: print(sys.argv) jdata=json.loads(sys.argv[1].replace('\'', '')) image_dir = jdata.get("dir") image_name = jdata.get("image_name") if image_name.find(".jpg") == -1: image_name = image_name + '.jpg' print('1. 파일 경로 : ', image_dir) print('2. 이미지 이름 :', image_name) if image_name is None: raise Exception('Not Exist FileName!!') # Check Gateway Or Cloud if 'FA' in image_name: IS_GATEWAY = False # Check Exist GPU if len(tf.config.experimental.list_physical_devices('GPU')) > 0 : IS_GPU = True start_time = time.time() # predictor = Prepare_predictor.getInstance() # print("3. 인스턴스 생성 완료") predictor = initDetectron() try: response = detect_weight(image_dir, image_name) except Exception as e: print(e) response = { "image_name": image_name, "status": "fail", } end_time = time.time() print(f"<<<< detect_result finish {end_time - start_time:.5f} sec >>>>") print(response) except Exception as e: raise Exception("failed : main Error")