代码拉取完成,页面将自动刷新
import cv2
import os
import random
import numpy as np
import math
from colormath.color_diff import delta_e_cie2000
from colormath.color_objects import sRGBColor, XYZColor, LabColor
from colormath.color_conversions import convert_color
from colormath import color_conversions
import tkinter as tk
import json
import urllib.request
from model_11 import predict
import colorsys
import util as imageUtil
import re
import openpyxl
import pandas as pd
from openpyxl import load_workbook, Workbook
def save_detection(img, filename, path):
""" 根据模型发现试剂
:param im_data: (numpy.ndarray) 输入图片
:return: (numpy.ndarray) 发现区域后,裁剪置信度大于0.9的区域图片
"""
# 推理图像
result = predict.detection(img)
boxes = result.boxes
scores = result.scores
label_ids = result.label_ids
detection_results = [
DetectionResult(*box, score, label_id)
for box, score, label_id in zip(boxes, scores, label_ids)
]
highest_scores = {}
# Iterate through all results
for result in detection_results:
label_id = result.label_id
score = result.score
# Check if this label_id has been seen before or if the current score is higher
if label_id not in highest_scores or score > highest_scores[label_id]:
highest_scores[label_id] = score
# Filter the detection_results to keep only the highest-scored entry for each label_id
filtered_results = [
result
for result in detection_results
if result.score == highest_scores[result.label_id]
]
# Print the final results
for result in filtered_results:
print(
f"File: {filename}, Label ID: {result.label_id}, "
f"Box: ({result.xmin}, {result.ymin}, {result.xmax}, {result.ymax}), "
f"Score: {result.score}"
)
# 在图像上绘制边界框并显示
img_with_boxes = img.copy()
for result in filtered_results:
cv2.rectangle(img_with_boxes, (int(result.xmin), int(result.ymin)),
(int(result.xmax), int(result.ymax)), (0, 255, 0), 2)
# file_path = os.path.join(path, filename)0
# cv2.imwrite(file_path, img_with_boxes)
# vis_im = vision.vis_detection(img, detection_results, score_threshold=0.001)
# file_path = os.path.join(path, filename)
# cv2.imwrite(file_path, vis_im)
class DetectionResult:
def __init__(self, xmin, ymin, xmax, ymax, score, label_id):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
self.score = score
self.label_id = label_id
# Initialize filtered attributes
self.filtered_xmin = xmin
self.filtered_ymin = ymin
self.filtered_xmax = xmax
self.filtered_ymax = ymax
self.filtered_score = score
self.filtered_label_id = label_id
def image_cutout(img, box):
left = int(box[0])
top = int(box[1])
right = int(box[2])
bottom = int(box[3])
image_box = img[top: bottom,
left:right]
return image_box
def lab_image(img):
lab_image_box = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
random_y = random.randint(0, lab_image_box.shape[0] - 1)
random_x = random.randint(0, lab_image_box.shape[1] - 1)
# Get the CIELab values at the random coordinate
random_L1 = lab_image_box[random_y, random_x, 0]
random_a1 = lab_image_box[random_y, random_x, 1]
random_b1 = lab_image_box[random_y, random_x, 2]
return random_L1, random_a1, random_b1
# 定义反伽马校正函数
def gamma_correct(c):
if c <= 0.04045:
return c / 12.92
else:
return ((c + 0.055) / 1.055) ** 2.4
# 线性化RGB值
def linearize_rgb(R, G, B):
# R, G, B = rgbValue
linear_R_value = gamma_correct(R / 255.0)
linear_G_value = gamma_correct(G / 255.0)
linear_B_value = gamma_correct(B / 255.0)
return linear_R_value, linear_G_value, linear_B_value
def rgb_to_xyz(R, G, B):
r = gamma_correct(R / 255.0)
g = gamma_correct(G / 255.0)
b = gamma_correct(B / 255.0)
x = r * 0.4124564 + g * 0.3575761 + b * 0.1804375
y = r * 0.2126729 + g * 0.7151522 + b * 0.0721750
z = r * 0.0193339 + g * 0.1191920 + b * 0.9503041
return x, y, z
def gamma_correct(c):
if c <= 0.04045:
return c / 12.92
else:
return ((c + 0.055) / 1.055) ** 2.4
def xyz_to_lab(xyz, x_n=95.047, y_n=100.0, z_n=108.883):
X1, Y1, Z1 = xyz # 解包xyz参数 # Normalize XYZ values to the reference white point
x = X1 / x_n
y = Y1 / y_n
z = Z1 / z_n
def f(t):
if t > (6 / 29) ** 3:
return t ** (1 / 3)
else:
return (1 / 3) * ((29 / 6) ** 2) * t + (4 / 29)
L = 116 * f(y) - 16
a = 500 * (f(x) - f(y))
b = 200 * (f(y) - f(z))
return L, a, b
def CIELab(imgData):
height, width, _ = imgData.shape
# Calculate the center coordinates of the cutout image
center_x = width // 2
center_y = height // 2
# Extract R, G, B values from the image data at the center coordinate
B, G, R = imgData[center_y, center_x]
linear_R, linear_G, linear_B = linearize_rgb(R, G, B)
return xyz_to_lab(rgb_to_xyz(*linearize_rgb(R, G, B)))
# 中心坐标rgb
def central_coordinate(imgData):
height, width, _ = imgData.shape
# Calculate the center coordinates of the cutout image
center_x = width // 2
center_y = height // 2
# Extract R, G, B values from the image data at the center coordinate
B, G, R = imgData[center_y, center_x]
return R, G, B
def RGBCIELab(R, G, B):
linear_R, linear_G, linear_B = linearize_rgb(R, G, B)
return xyz_to_lab(rgb_to_xyz(*linearize_rgb(R, G, B)))
def RGB(imgData):
height, width, _ = imgData.shape
# Calculate the center coordinates of the cutout image
center_x = width // 2
center_y = height // 2
# Extract R, G, B values from the image data at the center coordinate
B, G, R = imgData[center_y, center_x]
return R, G, B
def calculate_color_difference(L1, a1, b1, L2, a2, b2):
delta_L = L2 - L1
delta_a = a2 - a1
delta_b = b2 - b1
color_difference = math.sqrt(delta_L ** 2 + delta_a ** 2 + delta_b ** 2)
return color_difference
def rgb_to_lab(R, G, B):
# RGB反伽马校正和线性化
linear_R, linear_G, linear_B = linearize_rgb(R, G, B)
# 转换为XYZ颜色空间
xyz = (
linear_R * 0.4124564 + linear_G * 0.3575761 + linear_B * 0.1804375,
linear_R * 0.2126729 + linear_G * 0.7151522 + linear_B * 0.0721750,
linear_R * 0.0193339 + linear_G * 0.1191920 + linear_B * 0.9503041
)
# 转换为Lab颜色空间
lab = color_conversions.xyz2lab(xyz[0], xyz[1], xyz[2])
return lab
def show_color_box_cv2(rgb, name):
blue, green, red = rgb
# 创建一个空白图像
img = np.zeros((100, 100, 3), dtype=np.uint8)
# 设置颜色通道的值
img[:, :] = [blue, green, red]
# 显示图像
cv2.imshow(name, img)
cv2.namedWindow(name, cv2.WINDOW_FREERATIO) # 窗口大小自适应比例
def parse_json_file(file_path):
with open(file_path, 'r') as f:
data = json.load(f)
# 这里可以根据需要对 JSON 数据进行进一步解析或处理
return data # 根据需要返回解析后的数据
def color_block(json, rgb):
min_delta_e = float('inf') # 初始化最小的 delta_e 值为正无穷
closest_color = None # 初始化最接近的颜色
for i in json:
rgb_tuple = tuple(map(int, i['RGB'].strip('()').split(','))) # 将字符串转换为元组
delta_e = Calculated_color_difference(rgb_tuple, rgb) # Pass the RGB values as a tuple
if delta_e < min_delta_e: # 如果当前色差比记录的最小值小
min_delta_e = delta_e # 更新最小的 delta_e 值
closest_color = rgb_tuple # 更新最接近的颜色
closest_color_index = i['index']
# if json[closest_color_index + 1] is None:
# return [json[closest_color_index]]
if (len(json) == closest_color_index + 1):
return [json[closest_color_index]]
return [json[closest_color_index], json[closest_color_index + 1]]
def Calculated_color_difference(rgb1, rgb2):
r1, g1, b1 = rgb1 # Unpack the RGB tuple
r2, g2, b2 = rgb2 # Unpack the RGB tuple
linear_R1, linear_G1, linear_B1 = linearize_rgb(*(r1, g1, b1))
linear_R2, linear_G2, linear_B2 = linearize_rgb(*(r2, g2, b2))
color1_xyz = convert_color(sRGBColor(linear_R1, linear_G1, linear_B1), XYZColor)
color2_xyz = convert_color(sRGBColor(linear_R2, linear_G2, linear_B2), XYZColor)
color1_lab = convert_color(color1_xyz, LabColor)
color2_lab = convert_color(color2_xyz, LabColor)
delta_e = delta_e_cie2000(color1_lab, color2_lab)
return delta_e;
def compute(RGBT, RGBA, RGBB):
rT, gT, bT = RGBT # Unpack the RGB tuple
rA, gA, bA = RGBA # Unpack the RGB tuple
rB, gB, bB = RGBB # Unpack the RGB tuple
linear_RT, linear_GT, linear_BT = linearize_rgb(*(rT, gT, bT))
linear_RA, linear_GA, linear_BA = linearize_rgb(*(rA, gA, bA))
linear_RB, linear_GB, linear_BB = linearize_rgb(*(rB, gB, bB))
# 将线性化后的RGB值转换为XYZColor对象
colorT_xyz = convert_color(sRGBColor(linear_RT, linear_GT, linear_BT), XYZColor)
colorA_xyz = convert_color(sRGBColor(linear_RA, linear_GA, linear_BA), XYZColor)
colorB_xyz = convert_color(sRGBColor(linear_RB, linear_GB, linear_BB), XYZColor)
# 将XYZColor对象转换为LabColor对象
colorT_lab = convert_color(colorT_xyz, LabColor)
colorA_lab = convert_color(colorA_xyz, LabColor)
colorB_lab = convert_color(colorB_xyz, LabColor)
delta_TA = delta_e_cie2000(colorT_lab, colorA_lab)
delta_TB = delta_e_cie2000(colorT_lab, colorB_lab)
delta_AB = delta_e_cie2000(colorA_lab, colorB_lab)
return [delta_TA, delta_TB, delta_AB]
def similarity(image):
# 读取网络图片
res = urllib.request.urlopen(image)
img = np.asarray(bytearray(res.read()), dtype="uint8")
original_pic = cv2.imdecode(img, cv2.IMREAD_COLOR)
jsonData = parse_json_file("block.json");
filtered_results = predict.detection(original_pic, "")
data = list();
for result in filtered_results:
box = [result.xmin, result.ymin, result.xmax, result.ymax]
ID = result.label_id
rgbdata = color_block(jsonData[str(ID)], (central_coordinate(image_cutout(original_pic, box))))
if len(rgbdata) == 1:
Ctest = rgbdata[0]["value"]
return_data = json.dumps({
'name': ID,
'value': Ctest,
})
else:
RGBA = tuple(map(int, rgbdata[0]["RGB"].strip('()').split(','))) # 将字符串转换为元组
RGBB = tuple(map(int, rgbdata[1]["RGB"].strip('()').split(','))) # 将字符串转换为元组
delta_list = compute(central_coordinate(image_cutout(original_pic, box)), RGBA, RGBB)
oa = (delta_list[1] ** 2 - delta_list[1] ** 2 - delta_list[0] ** 2) / -(2 * delta_list[2])
value1 = float(rgbdata[0]["value"])
value2 = float(rgbdata[1]["value"])
Ctest = value1 + (oa / delta_list[2]) * (value2 - value1)
return_data = json.dumps({
'name': ID,
'value': Ctest,
})
data.append(return_data)
return data;
def showImg(name, img):
cv2.imshow(name, img)
def reestablishColorImg(img, r1, g1, b1):
if isinstance(img, int):
raise ValueError("Invalid image object. Ensure that img is a PIL Image object.")
height = img.shape[0]
width = img.shape[1]
# Traverse through each pixel of the image for restoration and reconstruction
for y in range(height):
for x in range(width):
# Get the RGB value of the current pixel
pixel_rgb = img[y, x]
new_pixel_rgb = (
safely_add(pixel_rgb[0], r1),
safely_add(pixel_rgb[1], g1),
safely_add(pixel_rgb[2], b1)
)
# Reconstruct the color of the pixel and update the image
img[y, x] = new_pixel_rgb
return img
# 计算一个图片的测试值
def test_average(image):
# 计算灰度图的平均灰度值(即图像的亮度)
brightness = int(image.mean())
# return brightness
height = image.shape[0]
weight = image.shape[1]
count = height * weight
# 循环获取像素值
pixel_values = []
for row in range(height): # 遍历高
for col in range(weight): # 遍历宽
pixel_values.append(image[row, col])
# 计算平均值
# result = average(pixel_values)
# 将二维数组展平成一维数组
flattened_data = image.flatten()
# 对一维数组进行排序
sorted_data = np.sort(flattened_data)
num_pixels = len(sorted_data)
reduce_percentage = 20
reduce_count = int(num_pixels * reduce_percentage / 100)
# 排序并去掉极值
sorted_h = sorted(sorted_data)
sorted_data = sorted_h[reduce_count:-reduce_count]
# 计算平均像素值
average_pixel_value = np.mean(sorted_data)
return average_pixel_value
# 计算数组平均值 , 去掉一个最大值, 去掉一个最小值
def average(data_list):
# 去除0的情况
data_list_none_zero = []
for i in range(len(data_list)):
if data_list[i] != 0:
data_list_none_zero.append(data_list[i])
data_list = data_list_none_zero[2:-2]
if len(data_list) == 0:
return 0
if len(data_list) > 2:
data_list.remove(min(data_list))
data_list.remove(max(data_list))
average_data = float(sum(data_list)) / len(data_list)
return average_data
elif len(data_list) <= 2:
average_data = float(sum(data_list)) / len(data_list)
return average_data
# 计算灰度1 使用opencv默认方法
def calculate_gray(img):
return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def get_box_dimensions(box):
x1, y1, x2, y2 = box # Unpack the box coordinates
width = x2 - x1 # Calculate width
height = y2 - y1 # Calculate height
return width, height
def minification(box):
center_x = (box[0] + box[2]) // 2
center_y = (box[1] + box[3]) // 2
width, height = get_box_dimensions(box)
left = int(center_x - width / 2)
top = int(center_y - height / 2)
right = int(center_x + width / 2)
bottom = int(center_y + height / 2)
box = [left, top, right, bottom]
return box;
def calculate_white_balance(image):
# Convert image to LAB color space for better white balance adjustment
lab_image = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
# Compute mean values for each channel
mean_channels = np.mean(lab_image, axis=(0, 1))
# Compute white balance parameters
white_balance_params = {
'L': mean_channels[0],
'A': mean_channels[1],
'B': mean_channels[2]
}
return white_balance_params
def tailor_box(box, width, height):
# 提取矩形框的左上角和右下角坐标
x1, y1, x2, y2 = box[0], box[1], box[2], box[3]
# 计算中心点坐标
x = (x1 + x2) / 2
y = (y1 + y2) / 2
left = int(x - width / 2)
top = int(y - height / 2)
right = int(x + width / 2)
bottom = int(y + height / 2)
new_box = [left, top, right, bottom]
return new_box
def similarity_localImage(image):
original_pic = cv2.imread(image)
# Restore white balance
filtered_results = predict.detection(original_pic, "")
data = list();
# original_pic = reestablishColorImg(original_pic, delta_r, delta_g, delta_b)
for result in filtered_results:
box = [result.xmin, result.ymin, result.xmax, result.ymax]
original_pic1 = image_cutout(original_pic, box)
# original_pic1 = image_cutout(original_pic, minification(box))
# box = tailor_box(box, 19, 20)
# original_pic1 = image_cutout(original_pic, box)
# original_pic1 = reestablishColorImg(original_pic1, delta_r, delta_g, delta_b)
cv2.namedWindow(str(result.label_id), cv2.WINDOW_FREERATIO) # 窗口大小自适应比例
# cv2.imshow(str(result.label_id), original_pic1)
Ctest = test_average(calculate_gray(original_pic1))
print(str(result.label_id), getHSV(original_pic1))
h, s, v = getHSV(original_pic1)
# h = h / 360 # H值转换到[0, 1]范围
s = s / 100 # S值转换到[0, 1]范围
v = v / 100 # V值转换到[0, 1]范围
s = 1.0
v = 1.0
r, g, b = colorsys.hsv_to_rgb(h, s, v)
r = min(255, max(0, int(r * 255)))
g = min(255, max(0, int(g * 255)))
b = min(255, max(0, int(b * 255)))
grayscale = imageUtil.rgb_to_grayscale(r, g, b);
return_data = json.dumps({
'name': str(result.label_id),
'value': h,
})
data.append(return_data)
return data;
def histogram_matching(source, template):
"""
Adjust the pixel values of a grayscale image such that its histogram
matches that of a target image.
"""
oldshape = source.shape
source = source.ravel()
template = template.ravel()
s_values, bin_idx, s_counts = np.unique(source, return_inverse=True, return_counts=True)
t_values, t_counts = np.unique(template, return_counts=True)
s_quantiles = np.cumsum(s_counts).astype(np.float64)
s_quantiles /= s_quantiles[-1]
t_quantiles = np.cumsum(t_counts).astype(np.float64)
t_quantiles /= t_quantiles[-1]
interp_t_values = np.interp(s_quantiles, t_quantiles, t_values)
return interp_t_values[bin_idx].reshape(oldshape)
def similarity_localImage_gray(imgPath, delta_r, delta_g, delta_b):
original_pic = cv2.imread(imgPath)
# cv2.imshow("original", original_pic)
# Restore white balance
filtered_results = predict.detection(original_pic, "")
# cv2.namedWindow('aaaa', cv2.WINDOW_NORMAL) # 窗口大小自适应比例
# cv2.imshow("aaaa", original_pic)
data = list();
# original_pic = reestablishColorImg(original_pic, delta_r, delta_g, delta_b)
for result in filtered_results:
box = [result.xmin, result.ymin, result.xmax, result.ymax]
original_pic1 = image_cutout(original_pic, minification(box))
# original_pic1 = image_cutout(original_pic, box)
# height, width, _ = original_pic1.shape
# if height * width < 100:
# print(imgPath)
h, s, v = getHSV(original_pic1)
h = h / 360 # H值转换到[0, 1]范围
s = s / 100 # S值转换到[0, 1]范围
v = v / 100 # V值转换到[0, 1]范围
s = 1.0
v = 1.0
r, g, b = colorsys.hsv_to_rgb(h, s, v)
r = min(255, max(0, int(r * 255)))
g = min(255, max(0, int(g * 255)))
b = min(255, max(0, int(b * 255)))
grayscale = imageUtil.rgb_to_grayscale(r, g, b);
# original_pic1 = reestablishColorImg(original_pic1, delta_r, delta_g, delta_b)
# cv2.imshow(str(result.label_id), original_pic1)
return_data = test_average(calculate_gray(original_pic1))
dataList = json.dumps({
'name': testList[(result.label_id)]['value'],
'gray': return_data,
'hsv': getHSV(original_pic1),
})
data.append(dataList)
return data;
def safely_add(value, delta):
return max(min(value + delta, 255), 0) # 限制在有效范围内
# 图片校准
def restore_white_balance(image, white_balance_params):
# Convert image to LAB color space
lab_image = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
# Calculate the mean of each LAB channel in the original image
mean_l, mean_a, mean_b = cv2.split(lab_image)
# Calculate the difference between the original mean and the target mean
delta_l = white_balance_params['L'] - np.mean(mean_l)
delta_a = white_balance_params['A'] - np.mean(mean_a)
delta_b = white_balance_params['B'] - np.mean(mean_b)
# Apply the white balance correction
corrected_lab = cv2.merge((
np.clip(mean_l + delta_l, 0, 255).astype(np.uint8),
np.clip(mean_a + delta_a, 0, 255).astype(np.uint8),
np.clip(mean_b + delta_b, 0, 255).astype(np.uint8)
))
# Convert LAB image back to BGR color space
restored_image = cv2.cvtColor(corrected_lab, cv2.COLOR_LAB2BGR)
return restored_image
def calculate_color_space_differences(image_A, image_B):
# Convert images to LAB color space
lab_A = cv2.cvtColor(image_A, cv2.COLOR_BGR2LAB)
lab_B = cv2.cvtColor(image_B, cv2.COLOR_BGR2LAB)
# Calculate mean values for LAB channels in each image
mean_A = np.mean(lab_A, axis=(0, 1))
mean_B = np.mean(lab_B, axis=(0, 1))
# Calculate differences in A and B channels
diff_A = mean_B[1] - mean_A[1]
diff_B = mean_B[2] - mean_A[2]
return diff_A, diff_B
##
def restore_color_space(image, diff_A, diff_B):
# Convert image to LAB color space
lab_image = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
# Split LAB channels
L, A, B = cv2.split(lab_image)
# Adjust A and B channels to match the target differences
restored_A = np.clip(A + diff_A, 0, 255).astype(np.uint8)
restored_B = np.clip(B + diff_B, 0, 255).astype(np.uint8)
# Merge adjusted LAB channels
restored_lab = cv2.merge((L, restored_A, restored_B))
# Convert LAB image back to BGR color space
restored_image = cv2.cvtColor(restored_lab, cv2.COLOR_LAB2BGR)
return restored_image
def getHSV(img):
hsv_image = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h_channel, s_channel, v_channel = cv2.split(hsv_image)
height, width, _ = img.shape
h_values = []
s_values = []
v_values = []
for y in range(height):
for x in range(width):
h_value = h_channel[y, x]
s_value = s_channel[y, x]
v_value = v_channel[y, x]
if (h_value != 0) or (s_value != 0) or (v_value != 0):
h_values.append(h_value)
s_values.append(s_value)
v_values.append(v_value)
# 计算要减少的像素点数量
num_pixels = len(h_values)
reduce_percentage = 20
reduce_count = int(num_pixels * reduce_percentage / 100)
# 排序并去掉极值
sorted_h = sorted(h_values)
sorted_s = sorted(s_values)
sorted_v = sorted(v_values)
reduced_h = sorted_h[reduce_count:-reduce_count]
reduced_s = sorted_s[reduce_count:-reduce_count]
reduced_v = sorted_v[reduce_count:-reduce_count]
# 计算均值
meanH = np.mean(reduced_h)
meanS = np.mean(reduced_s)
meanV = np.mean(reduced_v)
return meanH, meanS, meanV
def write_to_excel(data, excel_path):
# 创建一个新的工作簿
wb = openpyxl.Workbook()
ws = wb.active
ws.title = "Barcodes"
# 写入标题
headers = ["gray", "hsv"]
ws.append(headers)
if isinstance(data, str):
try:
data = json.loads(data)
except json.JSONDecodeError as e:
print(f"Error decoding JSON: {e}")
return
for json_str in data:
try:
entry = json.loads(json_str) # Parse JSON string into a dictionary
row = [
entry.get('gray', ''), # Use .get() to safely retrieve value or default to ''
entry.get('hsv', ''),
]
ws.append(row)
except json.JSONDecodeError as e:
print(f"Error decoding JSON: {e}")
# 保存工作簿
wb.save(excel_path)
testList = [
{
"text": '白',
"value": "bai"
},
{
"text": '潜血',
"value": "qianxue"
},
{
"text": '肌酐',
"value": "jigan"
},
{
"text": '钙离子',
"value": "gai"
},
{
"text": '胆红素',
"value": "danhongsu"
},
{
"text": '酮体',
"value": "tongti"
},
{
"text": '葡萄糖',
"value": "putaotang"
},
{
"text": '微量白蛋白',
"value": "baidanbai"
},
{
"text": '蛋白质',
"value": "danbaizhi"
},
{
"text": '亚硝酸盐',
"value": "yaxiaosuanyan"
},
{
"text": '白细胞',
"value": "baixibao"
},
{
"text": 'PH',
"value": "PH"
},
{
"text": '比重',
"value": "bizhong"
},
{
"text": '胆原',
"value": "danyuan"
},
{
"text": '抗坏血酸',
"value": "xuesuan"
}
]
def start1():
data = []
fileList = ["iPhone"] ##iPhone", "Redmi", "samsung", "vivo"
testTimeList = ["30", "60", "90"]
nongdu = "1" ##检测浓度
substrings2 = "jigan" ##检测项目
substrings = [substrings2]
substrings = [substrings2, "high"]
substrings3 = substrings2 ##显示项目 high normal
for i in fileList:
path = "uploads0725/" + i + "/"
for filename in os.listdir(path):
# 要查找的多个子字符串列表
# 使用 any 函数检查是否存在任何一个子字符串
exists = all(re.search(substring, filename) for substring in substrings)
if filename.count('_') >= 1:
parts = filename.split('_')
if parts[1] != substrings2:
continue;
if filename.count('_') >= 3:
parts = filename.split('_')
quality = parts[3]
testTime = parts[4]
if filename.count('_') >= 2:
parts = filename.split('_')
# 获取第二个下划线后的部分并进行替换
if parts[2] == nongdu and exists == True:
file_path = os.path.join(path, filename)
dataList = json.dumps({
'phoneName': i,
'fileList': file_path,
'quality': quality,
'testTime': testTime,
})
data.append(dataList)
print(data)
testTimeLists = {}
for t in testTimeList:
testTimeLists[t] = []
for d in data:
jsonObj = json.loads(d)
currTestTime = jsonObj.get("testTime")
if currTestTime in testTimeLists:
testTimeLists[currTestTime].append(jsonObj)
# Print the classified data
for t in testTimeList:
print(f"Test Time: {t}")
for item1 in testTimeLists[t]:
data_list = similarity_localImage_gray(item1["fileList"], delta_r, delta_g, delta_b)
for item in data_list:
data1 = json.loads(item)
if data1['name'] == substrings3:
print(data1['gray'])
def save_to_excel(df, excel_path):
# Create a new workbook and save
book = Workbook()
book.save(excel_path)
# Load the existing workbook
book = load_workbook(excel_path)
# Create an Excel writer object with the existing workbook
with pd.ExcelWriter(excel_path, engine='openpyxl') as writer:
writer.book = book
test_times = ["30", "60", "90"]
qualities = ["high", "normal"]
for testTime in test_times:
for quality in qualities:
subset_df = df[(df['quality'] == quality) & (df['testTime'] == testTime)]
pivot_table = subset_df.pivot(index='gray', columns='phoneName', values='gray').reset_index(drop=True)
sheet_name = f"{quality}_testTime_{testTime}_consolidated"
# Write the consolidated data to a new sheet
with pd.ExcelWriter(excel_path, engine='openpyxl') as writer:
writer.book = book
pivot_table.to_excel(writer, sheet_name=sheet_name, index=False)
writer.save()
print("Data saved to Excel successfully.")
def similarity_localImage_gray_1(file_path, delta_r, delta_g, delta_b):
# Dummy implementation for illustration
# Assume it returns a list of JSON strings with a `gray` value
return [json.dumps({"name": "baidanbai", "gray": 123})]
def start2():
data = []
fileList = ["iPhone", "Redmi", "samsung", "vivo"]
testTimeList = ["30", "60", "90"]
qualityList = ["normal", "high"]
testItem = "baidanbai" ##检测项目图片
nongdu = "1" ##检测浓度
picFileNameList = [testItem]
showItem = testItem ##显示项目 14项目
for i in fileList:
path = "uploads0725/" + i + "/"
for filename in os.listdir(path):
if '_' not in filename:
continue
parts = filename.split('_')
if parts[1] != testItem:
continue
quality = parts[3]
testTime = parts[4]
exists = all(substring in filename for substring in picFileNameList)
if exists and parts[2] == nongdu:
file_path = os.path.join(path, filename)
dataList = json.dumps({
'phoneName': i,
'fileList': file_path,
'quality': quality,
'testTime': testTime,
'concentration': testTime,
})
data.append(dataList)
processed_data = []
for item in data:
i = json.loads(item)
data_list = similarity_localImage_gray(i["fileList"], delta_r, delta_g, delta_b)
for data_item in data_list:
data1 = json.loads(data_item)
if data1['name'] == showItem:
i.update(data1)
processed_data.append(json.dumps(i))
writer = pd.ExcelWriter('phone_gray_data.xlsx', engine='openpyxl')
# Create data subsets and save to Excel
for testTime in testTimeList:
for quality in qualityList:
data_dict = {phone: [] for phone in fileList}
for item in processed_data:
i = json.loads(item)
if i['testTime'] == testTime and i['quality'] == quality:
gray_values = i.get('gray', [])
if isinstance(gray_values, float):
gray_values = [gray_values]
# data_dict[i['phoneName']].extend(gray_values)
data_dict[i['phoneName']].extend(gray_values)
###hsv
# if isinstance(gray_values, int):
# gray_values = [gray_values]
# if gray_values:
# data_dict[i['phoneName']].append(gray_values[0])
# Convert dictionary to DataFrame
max_length = max(len(values) for values in data_dict.values())
for phone in data_dict:
while len(data_dict[phone]) < max_length:
data_dict[phone].append('')
df = pd.DataFrame(data_dict)
sheet_name = f"{quality}_{testTime}"
df.to_excel(writer, sheet_name=sheet_name, index=False)
writer.save()
# df = pd.DataFrame([json.loads(item) for item in processed_data])
# save_to_excel(df, "phone_gray_data.xlsx")
print("qwq")
if __name__ == '__main__':
delta_r = 43.35056747740992
delta_g = 74.17964642280691
delta_b = 30.161377930938723
# data=similarity_localImage_gray("uploads0725/vivo/vivo_jigan_1_high_90_tmp_ad25075caaa248fc341d3fde65e0148b1c94619fea6c67b0.jpg",delta_r,delta_g,delta_b)
# print(data)
start2();
cv2.waitKey(0)
cv2.destroyAllWindows()
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。