1 Star 0 Fork 1

重庆大学-李睿/Badminton_3Dtrajectory

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
Fearture_3d_wrapper.py 12.03 KB
一键复制 编辑 原始数据 按行查看 历史
威覃言 提交于 2022-12-29 11:25 . badminton_tra
import os
import queue
from tkinter import N
import cv2
import numpy as np
from PIL import Image, ImageDraw
import csv
import sys
from math import *
from std_msgs.msg import Header
import rospy
sys.path.append('D:/vscode/Python_code/badminton_trajectory')
from msg import Info
def bounding_line(input_path):
line_list = []
with open(input_path) as b_lines:
lineCSV = csv.reader(b_lines, delimiter=',')
for row in lineCSV:
line_list.append(row)
p_close_left = [line_list[0][1], line_list[0][2]] # 四个场边顶点
p_close_right = []
p_far_left = []
p_far_right = []
return p_close_left, p_close_right, p_far_left, p_far_right
def publish_image(real_x, real_y, real_z, stroke, drop):
detect_result=Info()
rate = rospy.Rate(30)
header = Header(stamp=rospy.Time.now())
header.frame_id = 'map'
detect_result.x = real_x
detect_result.y = real_y
detect_result.z = real_z
detect_result.stroke = stroke
detect_result.drop = drop
pub.publish(detect_result)
rate.sleep()
def compute_badminton_position(img_high, img_width, x_world, y_world, z_world, focal, R_o2c, T_o2c):
K = np.array([[focal[0], 0, img_width / 2], [0, focal[0], img_high / 2], [0, 0, 1]]).reshape(3, 3) # 内参
R_T = np.array([[R_o2c[0][0][0], R_o2c[0][0][1], R_o2c[0][0][2], T_o2c[0][0]],
[R_o2c[0][1][0], R_o2c[0][1][1], R_o2c[0][1][2], T_o2c[0][1]],
[R_o2c[0][2][0], R_o2c[0][2][1], R_o2c[0][2][2], T_o2c[0][2]]]).reshape(3, 4) # 外参
K_R_T = np.dot(K, R_T)
depth = K_R_T[2][0] * x_world + K_R_T[2][1] * y_world + K_R_T[2][2] * z_world + K_R_T[2][3]
x_pixel_position = (K_R_T[0][0] * x_world + K_R_T[0][1] * y_world + K_R_T[0][2] * z_world + K_R_T[0][3]) / depth
y_pixel_position = (K_R_T[1][0] * x_world + K_R_T[1][1] * y_world + K_R_T[1][2] * z_world + K_R_T[1][3]) / depth
badminton_pixel_position = np.array([x_pixel_position, y_pixel_position, depth])
print(badminton_pixel_position.shape)
return badminton_pixel_position
class feature_point:
def __int__(self):
### 获取视频和轨迹点文件
try:
self.input_video_path = sys.argv[1]
self.input_csv_path = sys.argv[2]
#output_video_path = sys.argv[3]
if (not self.input_video_path) or (not self.input_csv_path):
raise ''
except:
print('usage: python show_trajectory.py <input_video_path> <input_csv_path>')
exit(1)
def main(self, line_path = None, person_path = None):
### 获取视频和轨迹点数据
with open(self.input_csv_path) as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
x, y, z = [], [], [] # 每个识别到的点的坐标
img_x, img_y = [], [] # 图像坐标
list1 = [] # 全部数据
feature_point = [] # 特征点的坐标和对应帧索引(第几帧)
for row in readCSV:
list1.append(row)
n_frame = len(list1)
### 获取边线检测参数
p_close_left, p_close_right, p_far_left, p_far_right = bounding_line(line_path)
### 获取目标人员检测的坐标
with open(person_path) as person_pos:
read_CSV = csv.reader(person_pos, delimiter=',')
x1, x2 = [], [] # 每个识别到的点的坐标
list_person = [] # 全部数据
for row in read_CSV:
list_person.append(row)
rospy.init_node('Feature_point')
pub = rospy.Publisher("/feature_point", Info, queue_size=10)
cout = 0 # 下标索引x,y列表中的第cout个xy坐标
frame_idx = -1 # 图像的当前帧,第一帧索引从0开始
q = queue.deque()
for i in range(3):
q.append(None)
feature_2 = queue.deque() # 存储两个相邻特征点的对应帧索引
feature_2.append(None)
feature_xyz = queue.deque() # 存储两个相邻特征点的x下标索引
feature_xyz.append(None)
video = cv2.VideoCapture(self.input_video_path)
"""
fps = 30
output_width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
output_height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
output_video_path = input_video_path[:-4]+'_predict_curve'+input_video_path[-4:]
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
output_video = cv2.VideoWriter(output_video_path,fourcc, fps, (output_width,output_height))
"""
for i in range(1 , n_frame):
frame_idx += 1
# if int(float(list1[i][2])) != 0 or int(float(list1[i][3])) != 0:
if list1[i][1] == '1':
a = float(list1[i][2]) # x坐标值
b = float(list1[i][3]) # y坐标值
c = float(list1[i][4]) # z坐标值
x.append(a)
y.append(b)
z.append(c)
img_x.append(int(float(list1[i][6])))
img_y.append(int(float(list1[i][7])))
cout += 1
q.append([frame_idx, a, b, c])
q.popleft()
if cout == 1:
feature_point.append([frame_idx, a, b, c])
feature_2.append(frame_idx)
feature_xyz.append(0)
if cout == 2:
break
first_tra = frame_idx + 1
# print("first_tra=", first_tra)
for i in range(first_tra , n_frame):
# frames += [int(list1[i][0])]
frame_idx += 1
# if int(float(list1[i][2])) != 0 or int(float(list1[i][3])) != 0 or int(float(list1[i][4])):
if list1[i][1] == '1':
a = float(list1[i][2])
b = float(list1[i][3])
c = float(list1[i][4])
pixel_x = int(float(list1[i][6]))
pixel_y = int(float(list1[i][7]))
if abs(a - q[2][1]) < 0.15 or abs(b - q[2][2]) < 0.15: # 过滤相邻过近的点
list1[i][1] = list1[i][2] = list1[i][3] = list1[i][4] = list1[i][6] = list1[i][7] = '0'
continue
""" ### 判断点有没有超出边界很多, 超出了既可能是干扰点, 根据实际情况修改
edge_y = (corner_y[1] - corner_y[0])/(corner_x[1] - corner_x[0]) * (a - corner_x[0]) + corner_y[0]
edge_x = (corner_x[1] - corner_x[0])/(corner_y[1] - corner_y[0]) * (b - corner_y[0]) + corner_x[0]
if edge_y > b + 2 or edge_x > a + 2:
list1[i][2] = list1[i][3] = list1[i][4] = '0'
continue
"""
q.append([frame_idx, a, b, c])
q.popleft()
# 曲线上z坐标不合理的点
if (q[0][1]-q[1][1]) * (q[1][1]-q[2][1]) > 0 and (q[0][3] - q[1][3]) * (q[1][3] - q[2][3]) < 0:
p = q.popleft()
q.popleft()
q.appendleft(p)
q.appendleft(None)
continue
# print(q)
# 落点检测(未完成), 根据检测到球的时间差来判断球是否落地
if float(list1[i][5]) - float(list1[q[1][0]+1][5]) > 300:
if x[feature_xyz[0]] < x[feature_xyz[1]]:
drop_point_x = (- curve_fit2[1] + sqrt(curve_fit2[1]**2 - 4 * curve_fit2[0] * curve_fit2[2]))/(2 * curve_fit2[0]) # 抛物线求根公式
if x[feature_xyz[0]] > x[feature_xyz[1]]:
drop_point_x = (- curve_fit2[1] - sqrt(curve_fit2[1]**2 - 4 * curve_fit2[0] * curve_fit2[2]))/(2 * curve_fit2[0])
curve_fity = np.polyfit(y[feature_xyz[0]:(feature_xyz[1]+1)], z[feature_xyz[0]:(feature_xyz[1]+1)], 2) # 空间曲线投影到yoz平面
if y[feature_xyz[0]] < y[feature_xyz[1]]:
drop_point_y = (- curve_fity[1] + sqrt(curve_fity[1]**2 - 4 * curve_fity[0] * curve_fity[2]))/(2 * curve_fity[0])
if y[feature_xyz[0]] > y[feature_xyz[1]]:
drop_point_y = (- curve_fity[1] - sqrt(curve_fity[1]**2 - 4 * curve_fity[0] * curve_fity[2]))/(2 * curve_fity[0])
badminton_pixel_position = compute_badminton_position(img_high, img_width, drop_point_x, drop_point_y, 0, focal, R_o2c, T_o2c)
cv2.circle(img1, (badminton_pixel_position[0], badminton_pixel_position[1]), 5, (255,0,0), -1) # 这里(drop_point_x, drop_point_y)要换算到图像坐标系
cv2.putText(img1, str([drop_point_x, drop_point_y]), (badminton_pixel_position[0]+20, badminton_pixel_position[1]+10), 0, 1,
[225, 255, 255], thickness=2, lineType=cv2.LINE_AA) # 标出落点坐标
cv2.namedWindow('Drop_point', flags=cv2.WINDOW_NORMAL |
cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_EXPANDED)
cv2.imshow('Drop_point', img1)
publish_image(drop_point_x, drop_point_y, 0, 0, 1)
# output_video.write(img1)
key = cv2.waitKey(50)
cv2.destroyWindow('Drop_point')
continue
"""
# 已知空间中三个点坐标求平面方程Ax + By + Cz + D = 0 (一般式), 其中D = -(A * x1 + B * y1 + C * z1)或者A(x - x1) + B(y - y1) + C(z - z1) = 0 (点法式)
A = (y2 - y1)*(z3 - z1) - (z2 -z1)*(y3 - y1)
B = (x3 - x1)*(z2 - z1) - (x2 - x1)*(z3 - z1)
C = (x2 - x1)*(y3 - y1) - (x3 - x1)*(y2 - y1)
"""
# 判断是否是特征点
if (q[0][1]-q[1][1]) * (q[1][1]-q[2][1]) < 0 and abs(q[0][1] - q[1][1]) > 0.15 and abs(q[1][1] - q[2][1]) > 0.15:
feature_point.append([q[1][0] - 1, x[cout - 1], y[cout - 1], z[cout - 1]]) # 存储特征点坐标
feature_2.append(q[1][0] - 1) # 当前帧已经是特帧点之后的一帧,故不加入计算
feature_2.popleft()
feature_xyz.append(cout - 1)
feature_xyz.popleft()
# print('feature=', feature_point)
# print('x=', x[feature_xyz[0]:feature_xyz[1]])
# print('x_num=', x[feature_xyz[0]])
# print('y_num=', feature_xyz[1])
# print(feature_2[1] - feature_2[0] + 1)
# print(feature_2[0])
# print(feature_2[1])
curve_fit2 = np.polyfit(x[feature_xyz[0]:(feature_xyz[1]+1)], z[feature_xyz[0]:(feature_xyz[1]+1)], 2) # 空间曲线投影到xoz平面
curve_pixel = np.polyfit(img_x[feature_xyz[0]:(feature_xyz[1]+1)], img_y[feature_xyz[0]:(feature_xyz[1]+1)], 2) # 像素平面曲线
if curve_pixel[0] > 0:
l_feature = min(img_x[feature_xyz[0]], img_x[feature_xyz[1]])
r_feature = max(img_x[feature_xyz[0]], img_x[feature_xyz[1]])
person_left = list_person[frame_idx + 1][1]
person_right = list_person[frame_idx + 1][2]
if l_feature - person_left >= 30 and person_right - r_feature >= 30: # 自己设定距离阈值,判断人和击球点的横向距离,看是否需要预测曲线
l_feature = person_left + 30
r_feature = person_right - 30
if l_feature - person_left >= 30 and person_right - r_feature < 30:
l_feature = person_left + 30
if l_feature - person_left < 30 and person_right - r_feature >= 30:
r_feature = person_right - 30
plotx = np.linspace(l_feature, r_feature, r_feature - l_feature + 1) # 按步长为1,设置点的x坐标
ploty = curve_pixel[0]*plotx**2 + curve_pixel[1]*plotx + curve_pixel[2] # 得到二次曲线对应的z坐标
curve_fited2 = np.array([np.transpose(np.vstack([plotx, ploty]))]) # 得到二次曲线对应的点集
curve_per_frame = int((abs(r_feature - l_feature) + 1) / (feature_2[1] - feature_2[0] + 1)) # 每帧画的曲线的长度
for j in range(0, feature_2[1] - feature_2[0] + 1):
ret, img1 = video.read()
if not ret:
break
cv2.polylines(img1, np.int_([curve_fited2[::,:j * curve_per_frame]]), False, (0, 0, 255), 5) # 红色 二次曲线上的散点构成的折线图,近似为曲线
for feat in range(feature_xyz[0]+1, feature_xyz[1]-1):
cv2.circle(img1, (img_x[feat], img_y[feat]), 5, (255,0,0), -1)
cv2.circle(img1, (img_x[feature_xyz[0]], img_y[feature_xyz[0]]), 5, (0,255,0), -1)
# publish_image(real_x, real_y, real_z, 1, 0)
cv2.circle(img1, (img_x[feature_xyz[1]-1], img_y[feature_xyz[1]-1]), 5, (0,255,0), -1)
cv2.namedWindow('Curve', flags=cv2.WINDOW_NORMAL |
cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_EXPANDED)
cv2.imshow('Curve', img1)
# output_video.write(img1)
key = cv2.waitKey(50)
if key & 0xFF == ord('q') or key == 27:
cv2.destroyAllWindows()
break
if key & 0xFF == ord('p'):
action_reg = True
else:
for j in range(0, feature_2[1] - feature_2[0] + 1):
ret, img1 = video.read()
if not ret:
break
for feat in range(feature_xyz[0], feature_xyz[1]):
cv2.circle(img1, (img_x[feat], img_y[feat]), 5, (255,0,0), -1)
cv2.namedWindow('Curve', flags=cv2.WINDOW_NORMAL |
cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_EXPANDED)
cv2.imshow('Curve', img1)
# output_video.write(img1)
key = cv2.waitKey(50)
if key & 0xFF == ord('q') or key == 27:
cv2.destroyAllWindows()
break
if key & 0xFF == ord('p'):
action_reg = True
x.append(a)
y.append(b)
z.append(c)
img_x.append(pixel_x)
img_y.append(pixel_y)
cout += 1
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
Python
1
https://gitee.com/RIS-Lab/badminton_trajectory.git
git@gitee.com:RIS-Lab/badminton_trajectory.git
RIS-Lab
badminton_trajectory
Badminton_3Dtrajectory
master

搜索帮助