代码拉取完成,页面将自动刷新
import datetime
import os
import re
import sys
import threading
import time
from itertools import islice
import cv2
import numpy as np
from PIL import ImageEnhance, ImageQt
from pathlib import Path
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QSize, QTimer, QDateTime
from PyQt5.QtGui import QPixmap, QIcon
from PyQt5.QtWidgets import QFileDialog, QApplication, QMessageBox
import pymysql
from sqlalchemy.testing import db
import shutil
import torch
import torch.backends.cudnn as cudnn
import os.path as osp
import smtplib
from email.mime.text import MIMEText
from email.utils import formataddr
my_sender = 'wangjiahao1220@foxmail.com' # 填写发信人的邮箱账号
my_pass = 'pqodlwmvjtplibia' # 发件人邮箱授权码
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0] # YOLOv5 root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
from models.common import DetectMultiBackend
from utils.datasets import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams
from utils.general import (LOGGER, check_file, check_img_size, check_imshow, check_requirements, colorstr,
increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh)
from utils.plots import Annotator, colors, save_one_box
from utils.torch_utils import select_device, time_sync
import qwe
from main import Ui_MainWindow # 导入创建的GUI类
from qwe import Ui_MainWindow as login1
def mail(my_user):
ret = True
try:
msg = MIMEText('着火咯!!!!', 'plain', 'utf-8') # 填写邮件内容
msg['From'] = formataddr(["pig", my_sender]) # 括号里的对应发件人邮箱昵称、发件人邮箱账号
msg['To'] = formataddr(["hjd", my_user]) # 括号里的对应收件人邮箱昵称、收件人邮箱账号
msg['Subject'] = "着火啦速来" # 邮件的主题,也可以说是标题
server = smtplib.SMTP_SSL("smtp.qq.com", 465) # 发件人邮箱中的SMTP服务器
server.login(my_sender, my_pass) # 括号中对应的是发件人邮箱账号、邮箱授权码
server.sendmail(my_sender, [my_user, ], msg.as_string()) # 括号中对应的是发件人邮箱账号、收件人邮箱账号、发送邮件
server.quit() # 关闭连接
except Exception: # 如果 try 中的语句没有执行,则会执行下面的 ret=False
ret = False
return ret
def is_valid_email(email):
pattern = r'^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$'
return re.match(pattern, email)
def model_load(weights="", # model.pt path(s)
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
half=False, # use FP16 half-precision inference
dnn=False, # use OpenCV DNN for ONNX inference
):
device = select_device(device)
half &= device.type != 'cpu' # half precision only supported on CUDA
device = select_device(device)
model = DetectMultiBackend(weights, device=device, dnn=dnn)
stride, names, pt, jit, onnx = model.stride, model.names, model.pt, model.jit, model.onnx
# Half
half &= pt and device.type != 'cpu' # half precision only supported by PyTorch on CUDA
if pt:
model.model.half() if half else model.model.float()
print("模型加载完成!")
return model
class mywindow(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self):
super(mywindow, self).__init__()
self.end_video = None
self.output_path = None
self.start_time = None
self.record = False
self.f1 = 0
self.vol = 0
self.row = 0
self.lable = -1
self.email = ""
self.pro = False
self.k = 0
self.playing = True
self.webcam = True
self.vid_source = '0' # 初始设置为摄像头
self.img2predict = ""
self.setupUi(self)
self.setWindowTitle('火灾检测系统')
self.device = 'cpu'
self.output_size = 480
self.stopEvent = threading.Event()
self.stopEvent.clear()
self.process.setVisible(False)
self.model = model_load(weights="runs/train/exp8/weights/best.pt",
device=self.device)
self.out = None
self.reset_video()
self.process.valueChanged.connect(self.process_video)
self.action_1.triggered.connect(self.btn1_fun)
self.action_2.triggered.connect(self.btn2_fun)
# 图片检测页面
self.upload.clicked.connect(self.upload_img)
self.begin_pic.clicked.connect(self.detect_img)
# 视频检测页面
self.cemera.clicked.connect(self.open_camera)
icon = QIcon("./images/UI/camera.png")
self.cemera.setIcon(icon)
self.cemera.setIconSize(QSize(30, 30))
self.cemera.setFixedSize(icon.actualSize(QSize(30, 30)))
self.ved.clicked.connect(self.open_video)
icon = QIcon("./images/UI/video.png")
self.ved.setIcon(icon)
self.ved.setIconSize(QSize(30, 30))
self.ved.setFixedSize(icon.actualSize(QSize(30, 30)))
self.stop.clicked.connect(self.close_video)
icon = QIcon("./images/UI/stop.png")
self.stop.setIcon(icon)
self.stop.setIconSize(QSize(30, 30))
self.stop.setFixedSize(icon.actualSize(QSize(30, 30)))
self.pause.clicked.connect(self.pause_video)
self.again.clicked.connect(self.again_video)
icon = QIcon("./images/UI/pause.png")
self.pause.setIcon(icon)
self.pause.setIconSize(QSize(40, 40))
self.pause.setFixedSize(icon.actualSize(QSize(40, 40)))
self.pause.setVisible(False)
icon = QIcon("./images/UI/again.png")
self.again.setIcon(icon)
self.again.setIconSize(QSize(40, 40))
self.again.setFixedSize(icon.actualSize(QSize(40, 40)))
self.again.setVisible(False)
self.pushButton.clicked.connect(self.phone_click)
self.pushButton_2.clicked.connect(self.reset_video)
self.timer = QTimer()
# 将更新标签文本的函数连接到定时器的 timeout 信号
self.timer.timeout.connect(self.update_label_text)
self.tableWidget.itemClicked.connect(self.item_clicked_handler)
# 设置定时器的时间间隔(毫秒为单位,这里设置为每秒更新一次)
self.timer.start(1000)
# 定义一个更新标签文本的函数
def update_label_text(self):
# 获取当前时间
current_time = QDateTime.currentDateTime().toString("yyyy-MM-dd HH:mm:ss")
# 将当前时间设置为标签文本
self.label_2.setText(current_time)
def Table_Data(self, i, j, data):
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setItem(i, j, item)
item.setText(str(data))
# 如果是第一列(type列),且值为0,则设置为“小火”
if j == 0:
if data == 0:
item.setText("小火")
if data == 1:
item.setText("中火")
if data == 2:
item.setText("大火")
# 设置时间单元格的字体大小
if j == 1:
item.setFont(QtGui.QFont("Arial", 8)) # 设置字体大小为8
def item_clicked_handler(self, item):
if item.column() == 2: # 假设路径在第三列
path_value = item.text() # 获取路径值
self.get_file_by_path(path_value) # 调用处理路径值的函数
def get_file_by_path(self, path):
normalized_path = os.path.normpath(path) # 规范化路径
if os.path.exists(normalized_path): # 检查路径是否存在
print(normalized_path)
self.process.setVisible(True)
self.pause.setVisible(True)
self.again.setVisible(True)
self.pause.setEnabled(True)
self.again.setEnabled(True)
self.groupBox.setVisible(False)
self.playing = True
self.cemera.setEnabled(False)
self.ved.setEnabled(False)
self.vid_source = normalized_path
self.webcam = False
th = threading.Thread(target=self.detect_vid)
th.start()
else:
print("File does not exist.")
def My_Sql(self): # 连接mysql数据库
connection = pymysql.connect(host='localhost', port=3306, user='root', passwd='123456', db='pic',
charset='utf8')
print('successfully connect')
cur = connection.cursor()
cur.execute('select type,time,video from fire where manager=%s', self.email) # 将数据从数据库中拿出来
total = cur.fetchall()
try:
col_result = cur.description
self.row = cur.rowcount # 取得记录个数,用于设置表格的行数
self.vol = len(total[0]) # 取得字段数,用于设置表格的列数
col_result = list(col_result)
a = 0
self.tableWidget.setColumnCount(self.vol)
self.tableWidget.setRowCount(self.row)
for i in col_result: # 设置表头信息,将mysql数据表中的表头信息拿出来,放进TableWidget中
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(a, item)
item = self.tableWidget.horizontalHeaderItem(a)
item.setText(i[0])
a = a + 1
total = list(total) # 将数据格式改为列表形式,其是将数据库中取出的数据整体改为列表形式
for i in range(len(total)): # 将相关的数据
total[i] = list(total[i]) # 将获取的数据转为列表形式
for i in range(self.row):
for j in range(self.vol):
self.Table_Data(i, j, total[i][j])
except Exception as e:
print("An error occurred:", str(e))
def btn1_fun(self):
self.stackedWidget.setCurrentIndex(0)
def btn2_fun(self):
self.stackedWidget.setCurrentIndex(1)
# 上传图片
def upload_img(self):
try:
# 选择图片进行读取
fileName, fileType = QFileDialog.getOpenFileName(self, 'Choose file', '', '*.jpg *.png *.tif *.jpeg')
if fileName:
suffix = fileName.split(".")[-1]
save_path = osp.join("images/tmp", "tmp_upload." + suffix)
shutil.copy(fileName, save_path)
# 调整图片大小
im0 = cv2.imread(save_path)
resize_scale = self.output_size / im0.shape[0]
im0 = cv2.resize(im0, (0, 0), fx=resize_scale, fy=resize_scale)
cv2.imwrite("images/tmp/upload_show_result.jpg", im0)
self.img2predict = fileName
image = QtGui.QPixmap("images/tmp/upload_show_result.jpg").scaled(self.yuan.width(),
self.yuan.height())
self.yuan.setPixmap(image)
# image1 = QtGui.QPixmap("images/UI/right.jpeg").scaled(self.pic.width(),
# self.pic.height())
# self.pic.setPixmap(image1)
except Exception as e:
print("An error occurred:", str(e))
# 检测图片
def detect_img(self):
model = self.model
output_size = self.output_size
source = self.img2predict # file/dir/URL/glob, 0 for webcam
imgsz = [640, 640] # inference size (pixels)
conf_thres = 0.25 # confidence threshold
iou_thres = 0.45 # NMS IOU threshold
max_det = 1000 # maximum detections per image
device = self.device # cuda device, i.e. 0 or 0,1,2,3 or cpu
view_img = False # show results
save_txt = False # save results to *.txt
save_conf = False # save confidences in --save-txt labels
save_crop = False # save cropped prediction boxes
nosave = False # do not save images/videos
classes = None # filter by class: --class 0, or --class 0 2 3
agnostic_nms = False # class-agnostic NMS
augment = False # ugmented inference
visualize = False # visualize features
line_thickness = 3 # bounding box thickness (pixels)
hide_labels = False # hide labels
hide_conf = False # hide confidences
half = False # use FP16 half-precision inference
dnn = False # use OpenCV DNN for ONNX inference
# print(source)
if source == "":
QMessageBox.warning(self, "请上传", "请先上传图片再进行检测")
else:
source = str(source)
device = select_device(self.device)
webcam = False
stride, names, pt, jit, onnx = model.stride, model.names, model.pt, model.jit, model.onnx
imgsz = check_img_size(imgsz, s=stride) # check image size
save_img = not nosave and not source.endswith('.txt') # save inference images
# Dataloader
if webcam:
view_img = check_imshow()
cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt and not jit)
bs = len(dataset) # batch_size
else:
dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt and not jit)
bs = 1 # batch_size
vid_path, vid_writer = [None] * bs, [None] * bs
# Run inference
if pt and device.type != 'cpu':
model(torch.zeros(1, 3, *imgsz).to(device).type_as(next(model.model.parameters()))) # warmup
dt, seen = [0.0, 0.0, 0.0], 0
for path, im, im0s, vid_cap, s in dataset:
t1 = time_sync()
im = torch.from_numpy(im).to(device)
im = im.half() if half else im.float() # uint8 to fp16/32
im /= 255 # 0 - 255 to 0.0 - 1.0
if len(im.shape) == 3:
im = im[None] # expand for batch dim
t2 = time_sync()
dt[0] += t2 - t1
# Inference
# visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
pred = model(im, augment=augment, visualize=visualize)
t3 = time_sync()
dt[1] += t3 - t2
# NMS
pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
dt[2] += time_sync() - t3
# Second-stage classifier (optional)
# pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
# Process predictions
for i, det in enumerate(pred): # per image
seen += 1
if webcam: # batch_size >= 1
p, im0, frame = path[i], im0s[i].copy(), dataset.count
s += f'{i}: '
else:
p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
p = Path(p) # to Path
s += '%gx%g ' % im.shape[2:] # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
imc = im0.copy() if save_crop else im0 # for save_crop
annotator = Annotator(im0, line_width=line_thickness, example=str(names))
mask = np.zeros(im0.shape[:2], dtype=np.uint8)
if len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
# Write results
for *xyxy, conf, cls in reversed(det):
if save_txt: # Write to file
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(
-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
# with open(txt_path + '.txt', 'a') as f:
# f.write(('%g ' * len(line)).rstrip() % line + '\n')
if save_img or save_crop or view_img: # Add bbox to image
c = int(cls) # integer class
label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
annotator.box_label(xyxy, label, color=colors(c, True))
# if save_crop:
# save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg',
# BGR=True)
x1, y1, x2, y2 = map(int, xyxy)
mask[y1:y2, x1:x2] = 1
# Calculate the total area of the union of all bounding boxes
total_area = np.sum(mask)
# Calculate total area percentage
image_area = im0.shape[0] * im0.shape[1]
area_percentage = (total_area / image_area) * 100
print(area_percentage)
if area_percentage < 10:
self.label_9.setText("目前为小火")
elif area_percentage < 50:
self.label_9.setText("中火")
else:
self.label_9.setText("大火")
LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)')
# Stream results
im0 = annotator.result()
# if view_img:
# cv2.imshow(str(p), im0)
# cv2.waitKey(1) # 1 millisecond
# Save results (image with detections)
resize_scale = output_size / im0.shape[0]
im0 = cv2.resize(im0, (0, 0), fx=resize_scale, fy=resize_scale)
cv2.imwrite("images/tmp/single_result.jpg", im0)
image = QtGui.QPixmap("images/tmp/single_result.jpg").scaled(self.pic.width(),
self.pic.height())
self.pic.setPixmap(image)
# 开启摄像头
def open_camera(self):
self.cemera.setEnabled(False)
self.ved.setEnabled(False)
self.stop.setEnabled(True)
self.vid_source = '0'
self.webcam = True
self.My_Sql()
th = threading.Thread(target=self.detect_vid)
th.start()
# 上传视频
def open_video(self):
self.process.setVisible(True)
self.pause.setVisible(True)
self.again.setVisible(True)
self.groupBox.setVisible(False)
fileName, fileType = QFileDialog.getOpenFileName(self, 'Choose file', '', '*.mp4 *.avi')
if fileName:
self.playing = True
self.cemera.setEnabled(False)
self.ved.setEnabled(False)
self.vid_source = fileName
self.webcam = False
th = threading.Thread(target=self.detect_vid)
th.start()
# 检测
def detect_vid(self):
# pass
model = self.model
output_size = self.output_size
# source = self.img2predict # file/dir/URL/glob, 0 for webcam
imgsz = [640, 640] # inference size (pixels)
conf_thres = 0.25 # confidence threshold
iou_thres = 0.45 # NMS IOU threshold
max_det = 1000 # maximum detections per image
# device = self.device # cuda device, i.e. 0 or 0,1,2,3 or cpu
view_img = False # show results
save_txt = False # save results to *.txt
save_conf = False # save confidences in --save-txt labels
save_crop = False # save cropped prediction boxes
nosave = False # do not save images/videos
classes = None # filter by class: --class 0, or --class 0 2 3
agnostic_nms = False # class-agnostic NMS
augment = False # ugmented inference
visualize = False # visualize features
line_thickness = 3 # bounding box thickness (pixels)
hide_labels = False # hide labels
hide_conf = False # hide confidences
half = False # use FP16 half-precision inference
dnn = False # use OpenCV DNN for ONNX inference
source = str(self.vid_source)
webcam = self.webcam
device = select_device(self.device)
stride, names, pt, jit, onnx = model.stride, model.names, model.pt, model.jit, model.onnx
imgsz = check_img_size(imgsz, s=stride) # check image size
save_img = not nosave and not source.endswith('.txt') # save inference images
# Dataloader
if webcam:
view_img = check_imshow()
cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt and not jit)
bs = len(dataset) # batch_size
else:
dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt and not jit)
bs = 1 # batch_size
vid_path, vid_writer = [None] * bs, [None] * bs
# Run inference
db = get_conn()
if pt and device.type != 'cpu':
model(torch.zeros(1, 3, *imgsz).to(device).type_as(next(model.model.parameters()))) # warmup
dt, seen = [0.0, 0.0, 0.0], 0
flag = 0
if not webcam:
length = dataset.__frames__()
self.process.setMaximum(length)
crm_zero = -1
crm = 0
while flag != 1:
t = self.k
if not webcam:
dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt and not jit, cc=t)
for path, im, im0s, vid_cap, s in dataset:
t1 = time_sync()
im = torch.from_numpy(im).to(device)
im = im.half() if half else im.float() # uint8 to fp16/32
im /= 255 # 0 - 255 to 0.0 - 1.0
if len(im.shape) == 3:
im = im[None] # expand for batch dim
t2 = time_sync()
dt[0] += t2 - t1
# Inference
# visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
pred = model(im, augment=augment, visualize=visualize)
t3 = time_sync()
dt[1] += t3 - t2
# NMS
pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
dt[2] += time_sync() - t3
fl = 0
for i, det in enumerate(pred): # per image
seen += 1
if not self.playing: # 检查 playing 的值,如果为 False 则暂停等待
while not self.playing and not self.stopEvent.is_set():
time.sleep(0.1)
continue
if webcam: # batch_size >= 1
p, im0, frame = path[i], im0s[i].copy(), dataset.count
s += f'{i}: '
else:
p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
s += '%gx%g ' % im.shape[2:] # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
imc = im0.copy() if save_crop else im0 # for save_crop
annotator = Annotator(im0, line_width=line_thickness, example=str(names))
# print(len(det))
if len(det) == 0:
crm_zero = 1
self.label_7.setText("目前无火灾发生")
else:
if webcam:
crm_zero = 0
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
mask = np.zeros(im0.shape[:2], dtype=np.uint8)
# 获取所有检测框的坐标。
# 将每个检测框作为一个二值图像的区域。
# 合并所有检测框区域以创建一个联合区域。
# 计算联合区域的面积。
# Write results
for *xyxy, conf, cls in reversed(det):
if save_txt: # Write to file
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(
-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
# with open(txt_path + '.txt', 'a') as f:
# f.write(('%g ' * len(line)).rstrip() % line + '\n')
if save_img or save_crop or view_img: # Add bbox to image
c = int(cls) # integer class
# print(c) 火灾分类标签
label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
annotator.box_label(xyxy, label, color=colors(c, True))
# if save_crop:
# save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg',
# BGR=True)
x1, y1, x2, y2 = map(int, xyxy)
mask[y1:y2, x1:x2] = 1
# Calculate the total area of the union of all bounding boxes
total_area = np.sum(mask)
# Calculate total area percentage
image_area = im0.shape[0] * im0.shape[1]
area_percentage = (total_area / image_area) * 100
print(area_percentage)
if area_percentage < 10:
self.label_7.setText("目前火势较小")
self.lable = 0
elif area_percentage < 50:
self.label_7.setText("火势增大,请注意")
self.lable = 1
else:
self.label_7.setText("目前火势很大,请来支援")
self.lable = 2
if crm_zero == 0:
if crm == 0:
mail(self.email)
current_datetime = datetime.datetime.now()
self.f1 += 1
if self.f1 == 1:
self.record = True
if self.record & self.f1 == 1:
self.start_time = time.time()
fourcc = cv2.VideoWriter.fourcc(*"mp4v")
video_name = current_datetime.strftime("%Y-%m-%d_%H-%M-%S") + ".mp4"
if self.playback.text() == "":
QMessageBox.warning(self, "警告", "未输入保存回放的路径")
else:
absolute_path = os.path.abspath(self.playback.text())
output_path = absolute_path + "\\" + video_name
self.output_path = output_path
self.out = cv2.VideoWriter(self.output_path, fourcc, 20.0, (640, 480))
add_fire(self.lable, current_datetime, self.email, self.output_path, db)
self.My_Sql()
crm += 1
if self.end_video is not None:
print(time.time() - self.end_video)
if self.end_video is not None and (time.time() - self.end_video) >= 40:
self.end_video = None
crm = 0
print(crm)
# Print time (inference-only)
LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)')
# Stream results
# Save results (image with detections)
im0 = annotator.result()
frame = im0
if self.out is not None and webcam and self.record: # 检查 self.out 是否为空
self.out.write(frame)
if time.time() - self.start_time > 10:
self.record = False
self.f1 = 0
print("Recording finished.")
self.end_video = time.time()
self.out.release()
resize_scale = output_size / frame.shape[0]
frame_resized = cv2.resize(frame, (0, 0), fx=resize_scale, fy=resize_scale)
cv2.imwrite("images/tmp/single_result_vid.jpg", frame_resized)
image = QtGui.QPixmap("images/tmp/single_result_vid.jpg").scaled(self.video.width(),
self.video.height())
self.video.setPixmap(image)
self.k = self.k + 1
self.process.setValue(self.k)
if self.pro:
self.pro = False
time.sleep(0.1)
self.k = self.process.value()
fl = 1
break
# self.vid_img
# if view_img:
# cv2.imshow(str(p), im0)
# self.vid_img.setPixmap(QPixmap("images/tmp/single_result_vid.jpg"))
# cv2.waitKey(1) # 1 millisecond
if fl == 1:
break
if cv2.waitKey(20) & self.stopEvent.is_set() == True:
self.stopEvent.clear()
self.cemera.setEnabled(True)
self.ved.setEnabled(True)
self.reset_video()
flag = 1
break
print("kkk")
# 停止检测
def close_video(self):
self.stopEvent.set()
self.reset_video()
def phone_click(self):
email = self.user.text()
if is_valid_email(email):
self.cemera.setEnabled(True)
self.ved.setEnabled(True)
self.pause.setEnabled(True)
self.again.setEnabled(True)
self.user.setEnabled(False)
self.pushButton.setEnabled(False)
self.pushButton_2.setEnabled(True)
self.email = email
self.My_Sql()
else:
QMessageBox.warning(self, "警告", "邮箱格式不正确,请重新输入")
# 界面重置
def reset_video(self):
self.video.clear()
image = QtGui.QPixmap("images/UI/fire.png").scaled(self.video.width(),
self.video.height())
self.video.setPixmap(image)
self.process.setValue(0)
self.playing = True
self.cemera.setEnabled(False)
self.ved.setEnabled(False)
self.user.setEnabled(True)
self.pause.setEnabled(False)
self.again.setEnabled(False)
self.pushButton.setEnabled(True)
self.pushButton_2.setEnabled(False)
self.groupBox.setVisible(True)
self.vid_source = '0'
self.webcam = True
self.process.setVisible(False)
self.pause.setVisible(False)
self.again.setVisible(False)
# 暂停视频
def pause_video(self):
self.playing = False
self.pause.setEnabled(False)
self.again.setEnabled(True)
# 继续播放视频
def again_video(self):
self.playing = True
self.pause.setEnabled(True)
self.again.setEnabled(False)
# 拖动进度条
def process_video(self, value):
if not self.webcam:
self.k = value
self.pro = True
def get_conn(): # 实现数据库连接的功能
try:
conn = pymysql.connect(host='localhost', # 主机
user='root', # 用户'NoneType' object has no attribute 'cursor'
port=3306, # 端口
password='123456', # 密码
charset='utf8', # 编码
database='pic' # 数据库名称,实际是表的名称
)
# 必须先和数据库建立一个传输数据的连接通道,也就是Connection实例,需要用到pymysql下的connect()方法
print('数据库连接成功')
return conn # 返回初始化好的Connection实例
except pymysql.connect.Error:
print('数据库连接错误')
def add_fire(type, time, manager, video, connection):
# 使用参数化查询构建 SQL 语句
sql = "INSERT INTO fire (type, time, manager, video) VALUES (%s, %s, %s, %s)"
# 尝试执行查询
try:
with connection.cursor() as cursor: # 创建游标
cursor.execute(sql, (type, time, manager, video))
connection.commit() # 提交事务
print("Fire record added successfully.")
return True
except Exception as e:
print(f"Error during database query: {e}")
return False
def close_conn(conn, cursor): # conn:连接实例 cursor:游标
try:
if cursor:
cursor.close()
if conn:
conn.close()
except Exception as s:
print(s)
finally:
try:
cursor.close()
except:
pass
try:
conn.close()
except:
pass
def has_user(Account, Passwd, connection):
# 使用参数化查询构建 SQL 语句
sql = "SELECT * FROM user WHERE user = %s AND pwd = %s"
# 尝试执行查询
try:
with connection.cursor() as cursor: # 创建游标
cursor.execute(sql, (Account, Passwd))
print(sql)
print((Account, Passwd))
result = cursor.fetchone()
close_conn(db, cursor)
# 如果结果不为None,表示找到匹配的用户
return result is not None
except Exception as e:
print(f"Error during database query: {e}")
return False
def quit():
login.close()
class login(QtWidgets.QMainWindow, login1):
def __init__(self):
super(login, self).__init__()
self.setupUi(self)
self.login.clicked.connect(self.pushButtonLogin_click)
self.quit.clicked.connect(quit)
def pushButtonLogin_click(self):
db = get_conn()
# 判断用户名密码
username = self.user.text()
password = self.pwd.text()
if len(username) and len(password) and has_user(username, password, db):
login.close() # 关闭该登录界面
window.show()
else: # 使用 PyQt 中的 QMessageBox 类创建一个警告对话框,并在用户输入的用户名或密码错误时显示该对话框
QMessageBox.warning(window,
"警告",
"用户名或密码错误!",
QMessageBox.Yes)
if __name__ == '__main__':
QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling)
app = QtWidgets.QApplication(sys.argv)
window = mywindow()
login = login()
login.show()
sys.exit(app.exec_())
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。