Tryag File Manager
Home
||
Turbo Force
||
B-F Config_Cpanel
Current Path :
/
paip
/
script
/
camera_dirty
/
Or
Select Your Path :
Upload File :
New :
File
Dir
//paip/script/camera_dirty/camera_dirty.py
# Copyright (c) 2020~2024 PAIPTREE SmartFarm, Inc. All Rights Reserved. # Title : 카메라 이물질 감지 로직 개선 # Author: 박정훈 # Modify : 카메라 오염부위 이미지화 & 파일저장 코드 추가 # Modify Date : 2024.04.04 import os import cv2 import sys import numpy as np from sklearn.metrics.pairwise import cosine_similarity # Define colors in BGR format green_color = (0, 128, 0) # Green for top 25% lightgreen_color = (144, 238, 144) # Light green for top 50% def prepare_images(img_path): # 원본 이미지 불러오기 base_img = cv2.imread(img_path) if base_img is None: raise Exception("Image not found: " + img_path) # 가우시안 블러 적용하여 이미지 준비 blurred_img = cv2.GaussianBlur(base_img, (399, 399), sigmaX=0, sigmaY=0) return base_img, blurred_img # 히스토그램 이미지 차이 구하기 def calc_hist(base_img, blurred_img): imgs = [base_img, blurred_img] hists = [] ret = [0.0, 0.0, 0.0, 0.0, 0.0] for index, img in enumerate(imgs) : #---1. 각 이미지를 HSV로 변환 hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) #---2. H,S 채널에 대한 히스토그램 계산 hist = cv2.calcHist([hsv], [0,1], None, [180,256], [0,180,0, 256]) #---3. 0~1로 정규화 cv2.normalize(hist, hist, 0, 1, cv2.NORM_MINMAX) hists.append(hist) # 비교 알고리즘의 이름들을 리스트에 저장 methods = ['CORREL', 'CHISQR', 'INTERSECT', 'BHATTACHARYYA', 'cosine'] # 상관관계, 카이제곱, 인터섹션, 바타챠랴, 코싸인 # 5회 반복(5개 비교 알고리즘을 모두 사용) for index, name in enumerate(methods): if name == 'cosine': # Convert the images to 1D arrays base_img_flatten = base_img.flatten().reshape(1, -1) blurred_img_flatten = blurred_img.flatten().reshape(1, -1) ret[index] = cosine_similarity(base_img_flatten, blurred_img_flatten) else: ret[index] = cv2.compareHist(hists[0], hists[1], index) return ret # Define the function to split an image into grids and compute the cosine similarity for each grid def grid_cosine_similarity(original, blurred, grid_size): # Calculate the size of each grid cell height, width, _ = original.shape grid_height = height // grid_size grid_width = width // grid_size # Initialize an array to store the cosine similarity values cosine_similarities = np.zeros((grid_size, grid_size)) # Loop over the grid for i in range(grid_size): for j in range(grid_size): # Compute the start and end points for the current grid cell start_x, end_x = i * grid_width, (i + 1) * grid_width start_y, end_y = j * grid_height, (j + 1) * grid_height # Extract the grid cells from the original and blurred images original_grid = original[start_y:end_y, start_x:end_x].flatten().reshape(1, -1) blurred_grid = blurred[start_y:end_y, start_x:end_x].flatten().reshape(1, -1) # Compute the cosine similarity and store it cosine_similarities[j, i] = cosine_similarity(original_grid, blurred_grid) return cosine_similarities # Function to color the grids based on the quantiles of cosine similarity values with different colors and transparencies def color_grids_by_quantiles_two_colors(image, similarities, grid_size, color1, color2, alpha1, alpha2): # Create a copy of the image to draw on colored_image = image.copy() height, width, _ = image.shape grid_height = height // grid_size grid_width = width // grid_size # Calculate the quantiles quantiles = np.quantile(similarities, [0.5, 0.75, 1]) # Loop over the grid and color the cells based on the quantiles with corresponding colors and transparency for i in range(grid_size): for j in range(grid_size): similarity = similarities[j, i] # Determine the color and alpha based on the similarity value if similarity > quantiles[1]: # Top 25% chosen_color = color1 chosen_alpha = alpha1 elif similarity > quantiles[0]: # Top 50% chosen_color = color2 chosen_alpha = alpha2 else: # Below top 50%, maintain original image continue # Skip coloring this grid # Compute the start and end points for the current grid cell start_x, end_x = i * grid_width, (i + 1) * grid_width start_y, end_y = j * grid_height, (j + 1) * grid_height # Create the color overlay overlay = np.full((grid_height, grid_width, 3), chosen_color, np.uint8) # Blend the overlay and the original image with the determined transparency level colored_image[start_y:end_y, start_x:end_x] = cv2.addWeighted( overlay, chosen_alpha, image[start_y:end_y, start_x:end_x], 1 - chosen_alpha, 0) return colored_image # 오염부위 score 뽑기 - 사용 안함 def calculate_green_pixel_ratio(colored_image, green_color=(0, 128, 0)): # 이미지의 전체 픽셀 수 total_pixels = colored_image.shape[0] * colored_image.shape[1] # 그린 색상 픽셀 수를 계산합니다. # np.all을 사용하여 각 픽셀이 그린 색상과 정확히 일치하는지 확인합니다. green_pixels = np.sum(np.all(colored_image == green_color, axis=-1)) # 그린 색상 픽셀의 비율을 계산합니다. green_pixel_ratio = (green_pixels / total_pixels) * 100 return green_pixel_ratio if __name__ == '__main__': if len(sys.argv) <= 1: print("fail - not input img_path") sys.exit() print("카메라 이물질 확인 시작!!") img_path = sys.argv[1] print("Input img_path : ", img_path) try : base_img, blurred_img = prepare_images(img_path) result_hist = calc_hist(base_img, blurred_img) if result_hist[0] < 1.0 and result_hist[4] > 0.98: print("Dirty!\n") # Calculate the cosine similarity for each grid cell grid_cosine_similarities = grid_cosine_similarity(base_img, blurred_img, 60) grid_cosine_similarities.flatten() # Apply the function with the updated colors and transparencies for the top 25% and 50% colored_grids_by_quantiles_two_colors_image = color_grids_by_quantiles_two_colors(base_img, grid_cosine_similarities, 60, green_color, lightgreen_color, 0.5, 0.15) # 이미지에 적용된 그린 색상 픽셀의 비율 계산 - 사용 안함 # green_pixel_ratio = calculate_green_pixel_ratio(colored_grids_by_quantiles_two_colors_image, green_color=green_color) # Save the image with colored grids result_image = img_path.split('.')[0] + '_cameraDirtyResult.jpg' cv2.imwrite(result_image, colored_grids_by_quantiles_two_colors_image) # print('score:' % green_pixel_ratio) # 사용 안함 print('1:' + os.path.basename(result_image)) else: print("Clean\n") print(0) except : print("fail") #except Exception as e: # print(e)