代码拉取完成,页面将自动刷新
import cv2
import numpy as np
import mvsdk
from ultralytics import YOLO
from photo2world import pixel_to_world
from square29 import calculate_subsquare_centers
import globals
def enumerate_cameras():
DevList = mvsdk.CameraEnumerateDevice()
nDev = len(DevList)
if nDev < 1:
print("No camera was found!")
return None
for i, DevInfo in enumerate(DevList):
print("{}: {} {}".format(i, DevInfo.GetFriendlyName(), DevInfo.GetPortType()))
i = 0 if nDev == 1 else int(input("Select camera: "))
return DevList[i] if nDev > 0 else None
def init_camera(DevInfo):
hCamera = mvsdk.CameraInit(DevInfo, -1, -1)
cap = mvsdk.CameraGetCapability(hCamera)
monoCamera = (cap.sIspCapacity.bMonoSensor != 0)
if monoCamera:
mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_MONO8)
else:
mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_BGR8)
mvsdk.CameraSetTriggerMode(hCamera, 0)
mvsdk.CameraSetAeState(hCamera, True)
mvsdk.CameraPlay(hCamera)
mvsdk.CameraSetWbMode(hCamera, True)
mvsdk.CameraSetAeTarget(hCamera, 110)
mvsdk.CameraSetOnceWB(hCamera)
return hCamera, cap, monoCamera
def get_frame(hCamera, pFrameBuffer, monoCamera):
pRawData, FrameHead = mvsdk.CameraGetImageBuffer(hCamera, 200)
mvsdk.CameraImageProcess(hCamera, pRawData, pFrameBuffer, FrameHead)
mvsdk.CameraReleaseImageBuffer(hCamera, pRawData)
frame_data = (mvsdk.c_ubyte * FrameHead.uBytes).from_address(pFrameBuffer)
frame = np.frombuffer(frame_data, dtype=np.uint8)
frame = frame.reshape((FrameHead.iHeight, FrameHead.iWidth,
1 if FrameHead.uiMediaType == mvsdk.CAMERA_MEDIA_TYPE_MONO8 else 3))
return frame
def crop_frame(frame):
width, height = 1280, 1024
crop_width, crop_height = width // 3 + 200, height // 3 + 100
start_x = (width - crop_width) // 2
start_y = (height - crop_height) // 2 - 100
return frame[start_y:start_y + crop_height, start_x:start_x + crop_width]
def process_frame_for_contours(frame):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
edged = cv2.Canny(gray, 30, 150)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
closed = cv2.morphologyEx(edged, cv2.MORPH_CLOSE, kernel)
contours, _ = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
return contours, edged
def draw_largest_contour(frame, contours):
if contours:
largest_contour = max(contours, key=cv2.contourArea)
epsilon = 0.1 * cv2.arcLength(largest_contour, True)
approx = cv2.approxPolyDP(largest_contour, epsilon, True)
if len(approx) == 4:
cv2.polylines(frame, [approx], True, (0, 255, 0), 2)
approx = sorted(approx, key=lambda x: (x[0][1], x[0][0]))
top_points = sorted(approx[:2], key=lambda x: x[0][0])
bottom_points = sorted(approx[2:], key=lambda x: x[0][0])
sorted_vertices = np.array([top_points[0], top_points[1], bottom_points[1], bottom_points[0]])
# 颠倒左右
right_up, left_up, right_down, left_down = sorted_vertices[1][0], sorted_vertices[0][0], sorted_vertices[3][
0], sorted_vertices[2][0]
# 交换坐标
right_up_ca, right_down_ca, left_down_ca, left_up_ca = right_up, left_up, right_down, left_down
# 标记顶点和坐标
coordinates = [
(right_up, right_up_ca),
(left_up, left_up_ca),
(right_down, right_down_ca),
(left_down, left_down_ca),
]
for point, coord in coordinates:
cv2.circle(frame, tuple(point), 5, (0, 0, 255), -1)
cv2.putText(frame, f'{point} {tuple(map(int, coord))}', tuple(point), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0), 2)
def detect_with_yolo(model, frame):
results = model.predict(source=frame)
confidence_threshold = 0.75
filtered_results = []
for result in results:
boxes = result.boxes
high_conf_boxes = [box for box in boxes if box.conf > confidence_threshold]
result.boxes = high_conf_boxes
filtered_results.append(result)
for box in high_conf_boxes:
xyxy = box.xyxy[0]
x_center = (xyxy[0] + xyxy[2]) / 2
y_center = (xyxy[1] + xyxy[3]) / 2
if not (globals.initial_left_up_world[0] <= x_center <= globals.initial_right_down_world[0] and
globals.initial_left_down_world[1] <= y_center <= globals.initial_right_up_world[1]):
color = 'white' if box.cls == 'white' else 'black'
if color == 'white':
globals.white_ca.append((color, (x_center, y_center)))
else:
globals.black_ca.append((color, (x_center, y_center)))
else:
# 找到最近的小正方形中心点
min_dist = float('inf')
min_index = -1
# 计算棋盘上每个小正方形的中心点
centers = calculate_subsquare_centers(globals.initial_left_up_world, globals.initial_left_down_world,
globals.initial_right_up_world, globals.initial_right_down_world)
for box in high_conf_boxes:
x_center, y_center, cls = box
color = 'white' if cls == 'white' else 'black'
min_dist = float('inf')
min_index = -1
# 找到最近的小正方形中心点
for i, center in enumerate(centers):
dist = np.linalg.norm(np.array([x_center, y_center]) - np.array(center))
if dist < min_dist:
min_dist = dist
min_index = i + 1 # 中心点的索引从1开始
# 更新board_ca
globals.board_ca[min_index] = color
globals.board_locate[min_index] = (x_center, y_center)
annotated_frame = filtered_results[0].plot()
return annotated_frame
# 假设模型和帧已经定义好
# annotated_frame, world_coords = detect_with_yolo(model, frame)
def read_variables_from_file():
variables = {}
with open('stderr.txt', 'r') as file:
for line in file:
name, value = line.strip().split(' = ')
variables[name] = value
# 将读取的值赋给全局变量
globals.initial_right_up_world = variables.get('initial_right_up_ca')
globals.initial_right_down_world = variables.get('initial_right_down_ca')
globals.initial_left_down_world = variables.get('initial_left_down_ca')
globals.initial_left_up_world = variables.get('initial_left_up_ca')
def main_loop():
# read_variables_from_file()
DevInfo = enumerate_cameras()
if not DevInfo:
return
hCamera, cap, monoCamera = init_camera(DevInfo)
FrameBufferSize = cap.sResolutionRange.iWidthMax * cap.sResolutionRange.iHeightMax * (1 if monoCamera else 3)
pFrameBuffer = mvsdk.CameraAlignMalloc(FrameBufferSize, 16)
model = YOLO("best.pt")
try:
while (cv2.waitKey(1) & 0xFF) != ord('q'):
frame = get_frame(hCamera, pFrameBuffer, monoCamera)
frame = crop_frame(frame)
frame = cv2.flip(frame, 1) # 水平翻转
# frame = cv2.flip(frame, 0) # 垂直翻转
contours, edged = process_frame_for_contours(frame)
draw_largest_contour(frame, contours)
annotated_frame = detect_with_yolo(model, frame)
cv2.imshow("Edged Image", edged)
cv2.imshow("Press 'q' to quit, 's' to save", annotated_frame)
except mvsdk.CameraException as e:
if e.error_code != mvsdk.CAMERA_STATUS_TIME_OUT:
print("CameraGetImageBuffer failed({}): {}".format(e.error_code, e.message))
finally:
mvsdk.CameraUnInit(hCamera)
mvsdk.CameraAlignFree(pFrameBuffer)
cv2.destroyAllWindows()
def start():
try:
main_loop()
finally:
cv2.destroyAllWindows()
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。