Merge remote-tracking branch 'origin/master'

This commit is contained in:
FrankCV2048
2024-10-11 20:00:20 +08:00
8 changed files with 632 additions and 66 deletions

218
Trace/calibrate_test.py Normal file
View File

@ -0,0 +1,218 @@
import sys
import time
import numpy as np
import cv2
import glob
from math import *
import pandas as pd
from pathlib import Path
import openpyxl
from COM.COM_Robot import RobotClient
from app import MainWindow
from Vision.tool.CameraRVC import camera
from Model.RobotModel import DataAddress,MoveType
from PySide6.QtWidgets import QMainWindow, QApplication, QLineEdit
# 用于根据欧拉角计算旋转矩阵
def myRPY2R_robot(x, y, z):
Rx = np.array([[1, 0, 0], [0, cos(x), -sin(x)], [0, sin(x), cos(x)]]) # 这是将示教器上的角度转换成旋转矩阵
Ry = np.array([[cos(y), 0, sin(y)], [0, 1, 0], [-sin(y), 0, cos(y)]])
Rz = np.array([[cos(z), -sin(z), 0], [sin(z), cos(z), 0], [0, 0, 1]])
# R = Rx@Ry@Rz
R = Rz @ Ry @ Rx
return R
# 用于根据位姿计算变换矩阵
def pose_robot(x, y, z, Tx, Ty, Tz):
thetaX = Tx / 180 * pi
thetaY = Ty / 180 * pi
thetaZ = Tz / 180 * pi
R = myRPY2R_robot(thetaX, thetaY, thetaZ)
t = np.array([[x], [y], [z]])
RT1 = np.column_stack([R, t]) # 列合并
RT1 = np.row_stack((RT1, np.array([0, 0, 0, 1])))
return RT1
'''
#用来从棋盘格图片得到相机外参
:param img_path: 读取图片路径
:param chess_board_x_num: 棋盘格x方向格子数
:param chess_board_y_num: 棋盘格y方向格子数
:param chess_board_len: 单位棋盘格长度,mm
:param cam_mtx: 相机内参
:param cam_dist: 相机畸变参数
:return: 相机外参
'''
def get_RT_from_chessboard(img_path, chess_board_x_num, chess_board_y_num, chess_board_len, cam_mtx, cam_dist):
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
# 标定板世界坐标
objp = np.zeros((chess_board_y_num * chess_board_x_num, 3), np.float32)
for m in range(chess_board_y_num):
for n in range(chess_board_x_num):
objp[m * chess_board_x_num + n] = [n * chess_board_len, m * chess_board_len, 0]
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
img = cv2.imread(img_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (chess_board_x_num, chess_board_y_num), None)
# If found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
imgpoints.append(corners2)
# 可视化棋盘格角点检测结果
img = cv2.drawChessboardCorners(img, (chess_board_x_num, chess_board_y_num), corners2, ret)
cv2.imshow('Corners', img)
cv2.waitKey(500) # 显示500ms
retval, rvec, tvec = cv2.solvePnP(objpoints[0], imgpoints[0], cam_mtx, cam_dist)
RT = np.column_stack(((cv2.Rodrigues(rvec))[0], tvec))
RT = np.row_stack((RT, np.array([0, 0, 0, 1])))
return RT
return 0
# 计算board to cam 变换矩阵
# img_path = "./hand_eye" 棋盘格存放文件夹
def get_chess_to_cam_mtx(img_path, chess_board_x_num, chess_board_y_num, chess_board_len, cam_mtx, cam_dist):
img_path += "/image_*.png"
R_all_chess_to_cam_1 = []
T_all_chess_to_cam_1 = []
images = glob.glob(img_path)
for fname in images:
RT = get_RT_from_chessboard(fname, chess_board_x_num, chess_board_y_num, chess_board_len, cam_mtx, cam_dist)
print("chesstocam",RT)
R_all_chess_to_cam_1.append(RT[:3, :3])
T_all_chess_to_cam_1.append(RT[:3, 3].reshape((3, 1)))
return R_all_chess_to_cam_1, T_all_chess_to_cam_1
def save_images(image, path, img_num):
"""保存拍摄的图片"""
img_path = path / f'image_{img_num}.png'
cv2.imwrite(str(img_path), image)
def save_pic_and_excel(): # 将机械臂的末端位姿保存在excel文件里,以及拍摄的图片保存在pic里# 动,拍照,保存
robot.send_switch_tool_command()
# 定义初始关节位置
initial_joint_angles = [5, 0, 0, 0, -12, 0] # 初始关节角度
fixed_joint_positions = [
[0.000,0.000,-0.005,0.001,-68.752,0.000],
[13.944,-2.732,-0.007,0.001,-71.402,0.000],
[7.256,-2.732,-0.011,0.000,-71.402,-13.003],
[1.061,3.196,-4.484,0.000,-71.402,-13.003],
[1.061,-7.045,-4.486,8.935,-60.210,-13.003],
[-12.811,-7.045,-4.488,15.131,-60.210,-5.156],
[-12.811,3.929,-12.911,15.131,-62.695,-5.156],
[14.269,3.929,-12.915,-7.996,-62.695,-5.156],
[14.269,3.929,-9.102,-9.642,-66.652,6.868],
[14.269,10.147,-13.732,-9.642,-66.652,6.868],
[4.356,10.147,-13.736,-2.658,-66.652,6.868],
[4.356,10.147,-13.736,-2.658,-66.652,21.705],
[4.356,10.147,-3.741,-2.658,-74.334,21.705]
]
# 保存 Excel 的路径和文件
excel_path = Path("calibration_images//robot_calibration_data.xlsx")
# 创建 DataFrame 来存储机械臂坐标
data_list = []
img_num = 0 # 图片编号
image_save_path = Path("calibration_images")
image_save_path.mkdir(exist_ok=True) # 创建目录保存图像
# 遍历固定点位,依次移动机械臂
for joint_angles in fixed_joint_positions:
# 移动机械臂到指定位置
robot.send_position_command(*joint_angles, MoveType.AXIS)
time.sleep(20)
# 获取图像并保存
_, rgb_image = ca.get_img()
save_images(rgb_image, image_save_path, img_num)
img_path = image_save_path / f'image_{img_num}.png'
img_num += 1
# 获取当前机械臂的世界坐标
real_position = robot.robotClient.status_model.getRealPosition()
position = [real_position.X, real_position.Y, real_position.Z,
real_position.U, real_position.V, real_position.W]
# 将当前机械臂的坐标和图片路径存储为元组
data_list.append([position, str(img_path)])
# 将数据写入 Excel
df = pd.DataFrame(data_list, columns=["Position", "Image Path"])
df.to_excel(excel_path, index=False)
print(f"数据已保存至 {excel_path}")
# 计算end to base变换矩阵
# path = "./hand_eye" 棋盘格和坐标文件存放文件夹
def get_end_to_base_mtx(path):
img_path = path + "/image_*.png"
coord_file_path = path + '/机械臂末端位姿.xlsx' # 从记录文件读取机器人六个位姿
sheet_1 = pd.read_excel(coord_file_path)
R_all_end_to_base_1 = []
T_all_end_to_base_1 = []
# print(sheet_1.iloc[0]['ax'])
images = glob.glob(img_path)
# print(len(images))
for i in range(len(images)):
pose_str = sheet_1.values[i][0]
pose_array = [float(num) for num in pose_str[1:-1].split(',')]
# print(pose_array)
RT = pose_robot(pose_array[0], pose_array[1], pose_array[2], pose_array[3], pose_array[4], pose_array[5])
print("end_base",RT)
R_all_end_to_base_1.append(RT[:3, :3])
T_all_end_to_base_1.append(RT[:3, 3].reshape((3, 1)))
return R_all_end_to_base_1, T_all_end_to_base_1
if __name__ == "__main__":
print("手眼标定测试\n")
np.set_printoptions(suppress=True)
# app = QApplication(sys.argv)
# robot = MainWindow()
# ca = camera()
# dataaddress = DataAddress()
# 相机内参
cam_mtx = np.array([[2402.1018066406, 0.0000000000, 739.7069091797],
[0.0000000000, 2401.7873535156, 584.7304687500],
[0.00000000e+00, 0.00000000e+00, 1.00000000e+00]], dtype=np.float64)
# 畸变系数
cam_dist = np.array([-0.0424814112, 0.2438604534, -0.3833343089, -0.3833343089, 0.0007602088],
dtype=np.float64)
# save_pic_and_excel()
chess_board_x_num = 11 # 标定板长度
chess_board_y_num = 8
chess_board_len = 30
path = "./calibration_images"
R_all_chess_to_cam_1, T_all_chess_to_cam_1 = get_chess_to_cam_mtx(path, chess_board_x_num, chess_board_y_num,
chess_board_len, cam_mtx, cam_dist)
R_all_end_to_base_1, T_all_end_to_base_1 = get_end_to_base_mtx(path)
R, T = cv2.calibrateHandEye(R_all_end_to_base_1, T_all_end_to_base_1, R_all_chess_to_cam_1, T_all_chess_to_cam_1,
cv2.CALIB_HAND_EYE_TSAI) # 手眼标定
RT = np.column_stack((R, T))
RT = np.row_stack((RT, np.array([0, 0, 0, 1]))) # 即为cam to end变换矩阵00
print('相机相对于末端的变换矩阵为:')
print(RT)

View File

@ -20,12 +20,13 @@ from Vision.tool.utils import class_names
from Vision.tool.utils import get_disk_space from Vision.tool.utils import get_disk_space
from Vision.tool.utils import remove_nan_mean_value from Vision.tool.utils import remove_nan_mean_value
from Vision.tool.utils import out_bounds_dete from Vision.tool.utils import out_bounds_dete
from Vision.tool.utils import uv_to_XY
import time import time
import os import os
class Detection: class Detection:
def __init__(self, use_openvino_model=True, cameraType = 'RVC'): # cameraType = 'RVC' or cameraType = 'Pe' def __init__(self, use_openvino_model=False, cameraType = 'RVC'): # cameraType = 'RVC' or cameraType = 'Pe'
""" """
初始化相机及模型 初始化相机及模型
:param use_openvino_model: 选择模型默认使用openvino :param use_openvino_model: 选择模型默认使用openvino
@ -33,16 +34,19 @@ class Detection:
""" """
self.use_openvino_model = use_openvino_model self.use_openvino_model = use_openvino_model
self.cameraType = cameraType
if self.use_openvino_model == False: if self.use_openvino_model == False:
model_path = ''.join([os.getcwd(), '/Vision/model/pt/best.pt']) model_path = ''.join([os.getcwd(), '/Vision/model/pt/best.pt'])
device = 'cpu' device = 'cpu'
self.cameraType = cameraType
if self.cameraType == 'RVC': if self.cameraType == 'RVC':
self.camera_rvc = camera_rvc() self.camera_rvc = camera_rvc()
self.seg_distance_threshold = 0.005 self.seg_distance_threshold = 0.005
else: elif self.cameraType == 'Pe':
self.camera_rvc = camera_pe() self.camera_rvc = camera_pe()
self.seg_distance_threshold = 10 self.seg_distance_threshold = 10
else:
print('相机参数错误')
return
self.model = yolov8_segment() self.model = yolov8_segment()
self.model.load_model(model_path, device) self.model.load_model(model_path, device)
else: else:
@ -51,17 +55,21 @@ class Detection:
if self.cameraType == 'RVC': if self.cameraType == 'RVC':
self.camera_rvc = camera_rvc() self.camera_rvc = camera_rvc()
self.seg_distance_threshold = 0.005 self.seg_distance_threshold = 0.005
else: elif self.cameraType == 'Pe':
self.camera_rvc = camera_pe() self.camera_rvc = camera_pe()
self.seg_distance_threshold = 10 self.seg_distance_threshold = 10
else:
print('相机参数错误')
return
self.model = yolov8_segment_openvino(model_path, device, conf_thres=0.3, iou_thres=0.3) self.model = yolov8_segment_openvino(model_path, device, conf_thres=0.3, iou_thres=0.3)
def get_position(self, Point_isVision=False, save_img_point=False, Height_reduce = 30, width_reduce = 30): def get_position(self, Point_isVision=False, Box_isPoint=True, save_img_point=0, Height_reduce = 30, width_reduce = 30):
""" """
检测料袋相关信息 检测料袋相关信息
:param Point_isVision: 点云可视化 :param Point_isVision: 点云可视化
:param save_img_point: 保存点云和图片 :param Box_isPoint: True 返回点云值; False 返回box相机坐标
:param save_img_point: 0不保存 ; 1保存原图 ;2保存处理后的图 ; 3保存点云和原图4 保存点云和处理后的图
:param Height_reduce: 检测框的高内缩像素 :param Height_reduce: 检测框的高内缩像素
:param width_reduce: 检测框的宽内缩像素 :param width_reduce: 检测框的宽内缩像素
:return ret: bool 相机是否正常工作 :return ret: bool 相机是否正常工作
@ -74,15 +82,18 @@ class Detection:
ret, img, pm = self.camera_rvc.get_img_and_point_map() # 拍照,获取图像及 ret, img, pm = self.camera_rvc.get_img_and_point_map() # 拍照,获取图像及
if self.camera_rvc.caminit_isok == True: if self.camera_rvc.caminit_isok == True:
if ret == 1: if ret == 1:
if save_img_point == True: if save_img_point != 0:
if get_disk_space(path=os.getcwd()) < 15: # 内存小于15G,停止保存数据 if get_disk_space(path=os.getcwd()) < 15: # 内存小于15G,停止保存数据
save_img_point = False save_img_point = 0
print('系统内存不足,无法保存数据') print('系统内存不足,无法保存数据')
else: else:
save_path = ''.join([os.getcwd(), '/Vision/model/data/', save_path = ''.join([os.getcwd(), '/Vision/model/data/',
time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(time.time()))]) time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(time.time()))])
save_img_name = ''.join([save_path, '.png']) save_img_name = ''.join([save_path, '.png'])
save_point_name = ''.join([save_path, '.xyz']) save_point_name = ''.join([save_path, '.xyz'])
if save_img_point==1 or save_img_point==3:
cv2.imwrite(save_img_name, img)
if save_img_point==3 or save_img_point==4:
row_list = list(range(1, img.shape[0], 2)) row_list = list(range(1, img.shape[0], 2))
column_list = list(range(1, img.shape[1], 2)) column_list = list(range(1, img.shape[1], 2))
pm_save = pm.copy() pm_save = pm.copy()
@ -165,8 +176,13 @@ class Detection:
''' '''
rect = cv2.minAreaRect(max_contour) rect = cv2.minAreaRect(max_contour)
if rect[1][0]-width_reduce < 30 or rect[1][1]-Height_reduce < 30:
rect_reduce = ( rect_reduce = (
(rect[0][0], rect[0][1]), (rect[1][0] - width_reduce, rect[1][1] - Height_reduce), rect[2]) (rect[0][0], rect[0][1]), (rect[1][0] - width_reduce, rect[1][1] - Height_reduce), rect[2])
else:
rect_reduce = (
(rect[0][0], rect[0][1]), (rect[1][0], rect[1][1]), rect[2])
# cv2.boxPoints可以将轮廓点转换为四个角点坐标 # cv2.boxPoints可以将轮廓点转换为四个角点坐标
box_outside = cv2.boxPoints(rect) box_outside = cv2.boxPoints(rect)
# 这一步不影响后面的画图,但是可以保证四个角点坐标为顺时针 # 这一步不影响后面的画图,但是可以保证四个角点坐标为顺时针
@ -223,11 +239,16 @@ class Detection:
box[1][0][1], box[1][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[1][0][1], box[1][0][0]) box[1][0][1], box[1][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[1][0][1], box[1][0][0])
box[2][0][1], box[2][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[2][0][1], box[2][0][0]) box[2][0][1], box[2][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[2][0][1], box[2][0][0])
box[3][0][1], box[3][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[3][0][1], box[3][0][0]) box[3][0][1], box[3][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[3][0][1], box[3][0][0])
if Box_isPoint == True:
box_point_x1, box_point_y1, box_point_z1 = remove_nan_mean_value(pm, box[0][0][1], box[0][0][0]) box_point_x1, box_point_y1, box_point_z1 = remove_nan_mean_value(pm, box[0][0][1], box[0][0][0])
box_point_x2, box_point_y2, box_point_z2 = remove_nan_mean_value(pm, box[1][0][1], box[1][0][0]) box_point_x2, box_point_y2, box_point_z2 = remove_nan_mean_value(pm, box[1][0][1], box[1][0][0])
box_point_x3, box_point_y3, box_point_z3 = remove_nan_mean_value(pm, box[2][0][1], box[2][0][0]) box_point_x3, box_point_y3, box_point_z3 = remove_nan_mean_value(pm, box[2][0][1], box[2][0][0])
box_point_x4, box_point_y4, box_point_z4 = remove_nan_mean_value(pm, box[3][0][1], box[3][0][0]) box_point_x4, box_point_y4, box_point_z4 = remove_nan_mean_value(pm, box[3][0][1], box[3][0][0])
else:
x1, y1, z1= uv_to_XY(box[0][0][0], box[0][0][1])
x2, y2, z2 = uv_to_XY(box[1][0][0], box[1][0][1])
x3, y3, z3 = uv_to_XY(box[2][0][0], box[2][0][1])
x4, y4, z4 = uv_to_XY(box[3][0][0], box[3][0][1])
x_rotation_center = int((box[0][0][0] + box[1][0][0] + box[2][0][0] + box[3][0][0]) / 4) x_rotation_center = int((box[0][0][0] + box[1][0][0] + box[2][0][0] + box[3][0][0]) / 4)
y_rotation_center = int((box[0][0][1] + box[1][0][1] + box[2][0][1] + box[3][0][1]) / 4) y_rotation_center = int((box[0][0][1] + box[1][0][1] + box[2][0][1] + box[3][0][1]) / 4)
point_x, point_y, point_z = remove_nan_mean_value(pm, y_rotation_center, x_rotation_center) point_x, point_y, point_z = remove_nan_mean_value(pm, y_rotation_center, x_rotation_center)
@ -235,15 +256,22 @@ class Detection:
if np.isnan(point_x): # 点云值为无效值 if np.isnan(point_x): # 点云值为无效值
continue continue
else: else:
if Box_isPoint == True:
box_list.append( box_list.append(
[[box_point_x1, box_point_y1, box_point_z1], [[box_point_x1, box_point_y1, box_point_z1],
[box_point_x2, box_point_y2, box_point_z2], [box_point_x2, box_point_y2, box_point_z2],
[box_point_x3, box_point_y3, box_point_z3], [box_point_x3, box_point_y3, box_point_z3],
[box_point_x4, box_point_y4, box_point_z4]]) [box_point_x4, box_point_y4, box_point_z4]])
else:
box_list.append([[x1, y1, z1],
[x2, y2, z2],
[x3, y3, z3],
[x4, y4, z4],
])
if self.cameraType=='RVC': if self.cameraType=='RVC':
xyz.append([point_x*1000, point_y*1000, point_z*1000]) xyz.append([point_x*1000, point_y*1000, point_z*1000])
Depth_Z.append(point_z*1000) Depth_Z.append(point_z*1000)
else: elif self.cameraType=='Pe':
xyz.append([point_x, point_y, point_z]) xyz.append([point_x, point_y, point_z])
Depth_Z.append(point_z) Depth_Z.append(point_z)
nx_ny_nz.append([a, b, c]) nx_ny_nz.append([a, b, c])
@ -271,12 +299,12 @@ class Detection:
outlier_cloud = pcd.select_by_index(inliers, invert=True) outlier_cloud = pcd.select_by_index(inliers, invert=True)
outlier_cloud.paint_uniform_color([0, 0, 1]) outlier_cloud.paint_uniform_color([0, 0, 1])
o3d.visualization.draw_geometries([inlier_cloud, outlier_cloud, pcd2]) o3d.visualization.draw_geometries([inlier_cloud, outlier_cloud, pcd2])
if save_img_point == True: if save_img_point == 2 or save_img_point ==4:
save_img = cv2.resize(img, (720, 540)) save_img = cv2.resize(img, (720, 540))
cv2.imwrite(save_img_name, save_img) cv2.imwrite(save_img_name, save_img)
return 1, img, xyz[_idx], nx_ny_nz[_idx], box_list[_idx] return 1, img, xyz[_idx], nx_ny_nz[_idx], box_list[_idx]
else: else:
if save_img_point == True: if save_img_point == 2 or save_img_point ==4:
save_img = cv2.resize(img,(720, 540)) save_img = cv2.resize(img,(720, 540))
cv2.imwrite(save_img_name, save_img) cv2.imwrite(save_img_name, save_img)
return 1, img, None, None, None return 1, img, None, None, None
@ -367,8 +395,13 @@ class Detection:
''' '''
rect = cv2.minAreaRect(max_contour) rect = cv2.minAreaRect(max_contour)
if rect[1][0] - width_reduce < 30 or rect[1][1] - Height_reduce < 30:
rect_reduce = ( rect_reduce = (
(rect[0][0], rect[0][1]), (rect[1][0] - width_reduce, rect[1][1] - Height_reduce), rect[2]) (rect[0][0], rect[0][1]), (rect[1][0] - width_reduce, rect[1][1] - Height_reduce),
rect[2])
else:
rect_reduce = (
(rect[0][0], rect[0][1]), (rect[1][0], rect[1][1]), rect[2])
# cv2.boxPoints可以将轮廓点转换为四个角点坐标 # cv2.boxPoints可以将轮廓点转换为四个角点坐标
box_outside = cv2.boxPoints(rect) box_outside = cv2.boxPoints(rect)
# 这一步不影响后面的画图,但是可以保证四个角点坐标为顺时针 # 这一步不影响后面的画图,但是可以保证四个角点坐标为顺时针
@ -402,7 +435,7 @@ class Detection:
if self.cameraType == 'RVC': if self.cameraType == 'RVC':
xyz.append([point_x * 1000, point_y * 1000, point_z * 1000]) xyz.append([point_x * 1000, point_y * 1000, point_z * 1000])
Depth_Z.append(point_z * 1000) Depth_Z.append(point_z * 1000)
else: elif self.cameraType == 'Pe':
xyz.append([point_x, point_y, point_z]) xyz.append([point_x, point_y, point_z])
Depth_Z.append(point_z) Depth_Z.append(point_z)
RegionalArea.append(cv2.contourArea(max_contour)) RegionalArea.append(cv2.contourArea(max_contour))

View File

@ -21,11 +21,12 @@ from Vision.tool.utils import class_names
from Vision.tool.utils import get_disk_space from Vision.tool.utils import get_disk_space
from Vision.tool.utils import remove_nan_mean_value from Vision.tool.utils import remove_nan_mean_value
from Vision.tool.utils import out_bounds_dete from Vision.tool.utils import out_bounds_dete
from Vision.tool.utils import uv_to_XY
import os import os
class Detection: class Detection:
def __init__(self, use_openvino_model=True, cameraType = 'RVC'): def __init__(self, use_openvino_model=False, cameraType = 'RVC'):
self.use_openvino_model = use_openvino_model self.use_openvino_model = use_openvino_model
self.cameraType = cameraType self.cameraType = cameraType
if self.use_openvino_model == False: if self.use_openvino_model == False:
@ -45,11 +46,12 @@ class Detection:
pass pass
def get_position(self, Point_isVision=False, save_img_point=True, seg_distance_threshold = 0.01, Height_reduce = 30, width_reduce = 30): def get_position(self, Point_isVision=False, Box_isPoint=False, save_img_point=0, seg_distance_threshold = 0.01, Height_reduce = 30, width_reduce = 30):
""" """
检测料袋相关信息 检测料袋相关信息
:param Point_isVision: 点云可视化 :param Point_isVision: 点云可视化
:param save_img_point: 保存点云和图片 :param Box_isPoint: True 返回点云值; False 返回box相机坐标
:param save_img_point: 0不保存 ; 1保存原图 ;2保存处理后的图 ; 3保存点云和原图4 保存点云和处理后的图
:param Height_reduce: 检测框的高内缩像素 :param Height_reduce: 检测框的高内缩像素
:param width_reduce: 检测框的宽内缩像素 :param width_reduce: 检测框的宽内缩像素
:return ret: bool 相机是否正常工作 :return ret: bool 相机是否正常工作
@ -57,21 +59,23 @@ class Detection:
:return xyz: list 目标中心点云值形如[x,y,z] :return xyz: list 目标中心点云值形如[x,y,z]
:return nx_ny_nz: list 拟合平面法向量,形如[a,b,c] :return nx_ny_nz: list 拟合平面法向量,形如[a,b,c]
:return box_list: list 内缩检测框四顶点,形如[[x1,y1],[],[],[]] :return box_list: list 内缩检测框四顶点,形如[[x1,y1],[],[],[]]
""" """
ret = 1 ret = 1
img = self.img img = self.img
pm = self.point pm = self.point
if ret == 1: if ret == 1:
if save_img_point == True: if save_img_point != 0:
if get_disk_space(path=os.getcwd())<15: # 内存小于15G,停止保存数据 if get_disk_space(path=os.getcwd())<15: # 内存小于15G,停止保存数据
save_img_point = False save_img_point = 0
print('系统内存不足,无法保存数据') print('系统内存不足,无法保存数据')
else: else:
save_path = ''.join([os.getcwd(), '/Vision/model/data/', save_path = ''.join([os.getcwd(), '/Vision/model/data/',
time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(time.time()))]) time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(time.time()))])
save_img_name = ''.join([save_path, '.png']) save_img_name = ''.join([save_path, '.png'])
save_point_name = ''.join([save_path, '.xyz']) save_point_name = ''.join([save_path, '.xyz'])
if save_img_point == 1 or save_img_point == 3:
cv2.imwrite(save_img_name, img)
if save_img_point == 3 or save_img_point == 4:
row_list = list(range(1, img.shape[0], 2)) row_list = list(range(1, img.shape[0], 2))
column_list = list(range(1, img.shape[1], 2)) column_list = list(range(1, img.shape[1], 2))
pm_save = pm.copy() pm_save = pm.copy()
@ -156,7 +160,12 @@ class Detection:
''' '''
rect = cv2.minAreaRect(max_contour) rect = cv2.minAreaRect(max_contour)
rect_reduce = ((rect[0][0], rect[0][1]), (rect[1][0] - width_reduce, rect[1][1] - Height_reduce), rect[2]) if rect[1][0] - width_reduce < 30 or rect[1][1] - Height_reduce < 30:
rect_reduce = (
(rect[0][0], rect[0][1]), (rect[1][0] - width_reduce, rect[1][1] - Height_reduce), rect[2])
else:
rect_reduce = (
(rect[0][0], rect[0][1]), (rect[1][0], rect[1][1]), rect[2])
# cv2.boxPoints可以将轮廓点转换为四个角点坐标 # cv2.boxPoints可以将轮廓点转换为四个角点坐标
box_outside = cv2.boxPoints(rect) box_outside = cv2.boxPoints(rect)
# 这一步不影响后面的画图,但是可以保证四个角点坐标为顺时针 # 这一步不影响后面的画图,但是可以保证四个角点坐标为顺时针
@ -201,8 +210,8 @@ class Detection:
ransac_n=5, ransac_n=5,
num_iterations=5000) num_iterations=5000)
[a, b, c, d] = plane_model [a, b, c, d] = plane_model
# print(f"Plane equation: {a:.2f}x + {b:.2f}y + {c:.2f}z + {d:.2f} = 0")
# print(f"Plane equation: {a:.2f}x + {b:.2f}y + {c:.2f}z + {d:.2f} = 0")
# inlier_cloud = pcd.select_by_index(inliers) # 点云可视化 # inlier_cloud = pcd.select_by_index(inliers) # 点云可视化
# inlier_cloud.paint_uniform_color([1.0, 0, 0]) # inlier_cloud.paint_uniform_color([1.0, 0, 0])
# outlier_cloud = pcd.select_by_index(inliers, invert=True) # outlier_cloud = pcd.select_by_index(inliers, invert=True)
@ -216,12 +225,16 @@ class Detection:
box[1][0][1], box[1][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[1][0][1], box[1][0][0]) box[1][0][1], box[1][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[1][0][1], box[1][0][0])
box[2][0][1], box[2][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[2][0][1], box[2][0][0]) box[2][0][1], box[2][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[2][0][1], box[2][0][0])
box[3][0][1], box[3][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[3][0][1], box[3][0][0]) box[3][0][1], box[3][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[3][0][1], box[3][0][0])
if Box_isPoint == True:
box_point_x1, box_point_y1, box_point_z1 = remove_nan_mean_value(pm, box[0][0][1], box[0][0][0]) box_point_x1, box_point_y1, box_point_z1 = remove_nan_mean_value(pm, box[0][0][1], box[0][0][0])
box_point_x2, box_point_y2, box_point_z2 = remove_nan_mean_value(pm, box[1][0][1], box[1][0][0]) box_point_x2, box_point_y2, box_point_z2 = remove_nan_mean_value(pm, box[1][0][1], box[1][0][0])
box_point_x3, box_point_y3, box_point_z3 = remove_nan_mean_value(pm, box[2][0][1], box[2][0][0]) box_point_x3, box_point_y3, box_point_z3 = remove_nan_mean_value(pm, box[2][0][1], box[2][0][0])
box_point_x4, box_point_y4, box_point_z4 = remove_nan_mean_value(pm, box[3][0][1], box[3][0][0]) box_point_x4, box_point_y4, box_point_z4 = remove_nan_mean_value(pm, box[3][0][1], box[3][0][0])
else:
x1, y1, z1 = uv_to_XY(self.cameraType, box[0][0][0], box[0][0][1])
x2, y2, z2 = uv_to_XY(self.cameraType, box[1][0][0], box[1][0][1])
x3, y3, z3 = uv_to_XY(self.cameraType, box[2][0][0], box[2][0][1])
x4, y4, z4 = uv_to_XY(self.cameraType, box[3][0][0], box[3][0][1])
x_rotation_center = int((box[0][0][0] + box[1][0][0] + box[2][0][0] + box[3][0][0]) / 4) x_rotation_center = int((box[0][0][0] + box[1][0][0] + box[2][0][0] + box[3][0][0]) / 4)
y_rotation_center = int((box[0][0][1] + box[1][0][1] + box[2][0][1] + box[3][0][1]) / 4) y_rotation_center = int((box[0][0][1] + box[1][0][1] + box[2][0][1] + box[3][0][1]) / 4)
point_x, point_y, point_z = remove_nan_mean_value(pm, y_rotation_center, x_rotation_center) point_x, point_y, point_z = remove_nan_mean_value(pm, y_rotation_center, x_rotation_center)
@ -229,10 +242,25 @@ class Detection:
if np.isnan(point_x): # 点云值为无效值 if np.isnan(point_x): # 点云值为无效值
continue continue
else: else:
box_list.append([[box_point_x1, box_point_y1, box_point_z1], [box_point_x2, box_point_y2, box_point_z2], if Box_isPoint == True:
[box_point_x3, box_point_y3, box_point_z3], [box_point_x4, box_point_y4, box_point_z4]]) box_list.append(
[[box_point_x1, box_point_y1, box_point_z1],
[box_point_x2, box_point_y2, box_point_z2],
[box_point_x3, box_point_y3, box_point_z3],
[box_point_x4, box_point_y4, box_point_z4]])
else:
box_list.append([[x1, y1, z1],
[x2, y2, z2],
[x3, y3, z3],
[x4, y4, z4],
])
if self.cameraType == 'RVC':
xyz.append([point_x * 1000, point_y * 1000, point_z * 1000]) xyz.append([point_x * 1000, point_y * 1000, point_z * 1000])
Depth_Z.append(point_z * 1000) Depth_Z.append(point_z * 1000)
elif self.cameraType == 'Pe':
xyz.append([point_x, point_y, point_z])
Depth_Z.append(point_z)
#Depth_Z.append(point_z * 1000)
nx_ny_nz.append([a, b, c]) nx_ny_nz.append([a, b, c])
RegionalArea.append(cv2.contourArea(max_contour)) RegionalArea.append(cv2.contourArea(max_contour))
uv.append([x_rotation_center, y_rotation_center]) uv.append([x_rotation_center, y_rotation_center])
@ -258,13 +286,13 @@ class Detection:
outlier_cloud = pcd.select_by_index(inliers, invert=True) outlier_cloud = pcd.select_by_index(inliers, invert=True)
outlier_cloud.paint_uniform_color([0, 1, 0]) outlier_cloud.paint_uniform_color([0, 1, 0])
o3d.visualization.draw_geometries([inlier_cloud, outlier_cloud, pcd2]) o3d.visualization.draw_geometries([inlier_cloud, outlier_cloud, pcd2])
if save_img_point == True: if save_img_point == 2 or save_img_point == 4:
save_img = cv2.resize(img,(720, 540)) save_img = cv2.resize(img,(720, 540))
cv2.imwrite(save_img_name, save_img) cv2.imwrite(save_img_name, save_img)
return 1, img, xyz[_idx], nx_ny_nz[_idx], box_list[_idx] return 1, img, xyz[_idx], nx_ny_nz[_idx], box_list[_idx]
else: else:
if save_img_point == True: if save_img_point == 2 or save_img_point == 4:
save_img = cv2.resize(img, (720, 540)) save_img = cv2.resize(img, (720, 540))
cv2.imwrite(save_img_name, save_img) cv2.imwrite(save_img_name, save_img)
@ -274,7 +302,7 @@ class Detection:
print("RVC X Camera capture failed!") print("RVC X Camera capture failed!")
return 0, None, None, None, None return 0, None, None, None, None
def get_take_photo_position(self, Point_isVision=False, save_img_point=False, Height_reduce = 30, width_reduce = 30): def get_take_photo_position(self, Height_reduce=30, width_reduce=30):
""" """
专用于拍照位置点查找,检测当前拍照点能否检测到料袋 专用于拍照位置点查找,检测当前拍照点能否检测到料袋
:param Height_reduce: :param Height_reduce:
@ -354,8 +382,12 @@ class Detection:
''' '''
rect = cv2.minAreaRect(max_contour) rect = cv2.minAreaRect(max_contour)
if rect[1][0] - width_reduce < 30 or rect[1][1] - Height_reduce < 30:
rect_reduce = ( rect_reduce = (
(rect[0][0], rect[0][1]), (rect[1][0] - width_reduce, rect[1][1] - Height_reduce), rect[2]) (rect[0][0], rect[0][1]), (rect[1][0] - width_reduce, rect[1][1] - Height_reduce), rect[2])
else:
rect_reduce = (
(rect[0][0], rect[0][1]), (rect[1][0], rect[1][1]), rect[2])
# cv2.boxPoints可以将轮廓点转换为四个角点坐标 # cv2.boxPoints可以将轮廓点转换为四个角点坐标
box_outside = cv2.boxPoints(rect) box_outside = cv2.boxPoints(rect)
# 这一步不影响后面的画图,但是可以保证四个角点坐标为顺时针 # 这一步不影响后面的画图,但是可以保证四个角点坐标为顺时针
@ -389,7 +421,7 @@ class Detection:
if self.cameraType == 'RVC': if self.cameraType == 'RVC':
xyz.append([point_x * 1000, point_y * 1000, point_z * 1000]) xyz.append([point_x * 1000, point_y * 1000, point_z * 1000])
Depth_Z.append(point_z * 1000) Depth_Z.append(point_z * 1000)
else: elif self.cameraType == 'Pe':
xyz.append([point_x, point_y, point_z]) xyz.append([point_x, point_y, point_z])
Depth_Z.append(point_z) Depth_Z.append(point_z)
RegionalArea.append(cv2.contourArea(max_contour)) RegionalArea.append(cv2.contourArea(max_contour))

136
Vision/detect_bag_num.py Normal file
View File

@ -0,0 +1,136 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
# @Time : 2024/10/11 9:44
# @Author : hjw
# @File : detect_bag_num.py
'''
import os.path
import random
import cv2
import numpy as np
import torch
from Vision.tool.CameraHIK import camera_HIK
from ultralytics.nn.autobackend import AutoBackend
from ultralytics.utils import ops
class DetectionBagNum:
"""
检测顶层料袋数量
:param
api: camera -> ip , port, name, pw
:return ret :[bool] 相机是否正常工作
:return BagNum :[int] 返回料袋数量
:return img_src :[ndarray] 返回检测图像
"""
def __init__(self, ip="192.168.1.121", port=554, name="admin", pw="zlzk.123"):
model_path = ''.join([os.getcwd(), '/Vision/model/pt/bagNum.pt'])
self.HIk_Camera = camera_HIK(ip, port, name, pw)
self.imgsz = 640
self.cuda = 'cpu'
self.conf = 0.40
self.iou = 0.45
self.model = AutoBackend(model_path, device=torch.device(self.cuda))
self.model.eval()
self.names = self.model.names
self.half = False
self.color = {"font": (255, 255, 255)}
self.color.update(
{self.names[i]: (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
for i in range(len(self.names))})
def get_BagNum(self):
"""
检测顶层料袋数量
:param
api: camera -> ip , port, name, pw
:return ret :[bool] 相机是否正常工作
:return BagNum :[int] 返回料袋数量
:return img_src :[ndarray] 返回检测图像
"""
BagNum = 0
ret, img_src = self.HIk_Camera.get_img()
if ret:
# img_src = cv2.imread(img_path)
img = self.precess_image(img_src, self.imgsz, self.half, self.cuda)
preds = self.model(img)
det = ops.non_max_suppression(preds, self.conf, self.iou, classes=None, agnostic=False, max_det=300,
nc=len(self.names))
for i, pred in enumerate(det):
lw = max(round(sum(img_src.shape) / 2 * 0.003), 2) # line width
tf = max(lw - 1, 1) # font thickness
sf = lw / 3 # font scale
pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], img_src.shape)
results = pred.cpu().detach().numpy()
# cv2.imshow('img2', img_src)
# cv2.waitKey(1)
for result in results:
if self.names[result[5]]=="bag":
BagNum += 1
conf = round(result[4], 2)
self.draw_box(img_src, result[:4], conf, self.names[result[5]], lw, sf, tf)
return ret, BagNum, img_src
def draw_box(self, img_src, box, conf, cls_name, lw, sf, tf):
color = self.color[cls_name]
conf = str(conf)
label = f'{cls_name} {conf}'
p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))
# 绘制矩形框
cv2.rectangle(img_src, p1, p2, color, thickness=lw, lineType=cv2.LINE_AA)
# text width, height
w, h = cv2.getTextSize(label, 0, fontScale=sf, thickness=tf)[0]
# label fits outside box
outside = box[1] - h - 3 >= 0
p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3
# 绘制矩形框填充
cv2.rectangle(img_src, p1, p2, color, -1, cv2.LINE_AA)
# 绘制标签
cv2.putText(img_src, label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2),
0, sf, self.color["font"], thickness=2, lineType=cv2.LINE_AA)
@staticmethod
def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), scaleup=True, stride=32):
# Resize and pad image while meeting stride-multiple constraints
shape = im.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better val mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
# minimum rectangle
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return im, ratio, (dw, dh)
def precess_image(self, img_src, img_size, half, device):
# Padded resize
img = self.letterbox(img_src, img_size)[0]
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img = img / 255 # 0 - 255 to 0.0 - 1.0
if len(img.shape) == 3:
img = img[None] # expand for batch dim
return img

BIN
Vision/model/pt/bagNum.pt Normal file

Binary file not shown.

63
Vision/tool/CameraHIK.py Normal file
View File

@ -0,0 +1,63 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
# @Time : 2024/10/11 10:43
# @Author : hjw
# @File : CameraHIK.py
'''
import cv2
import socket
def portisopen(ip, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
state = sock.connect_ex((ip, port))
if 0 == state:
# print("port is open")
return True
else:
# print("port is closed")
return False
class camera_HIK():
def __init__(self, ip, port, name, pw):
# "rtsp://admin:zlzk.123@192.168.1.64:554"
self.camera_url = "rtsp://" + name + ":" + pw + "@" + ip + ":" + str(port)
ret = portisopen(ip, port)
self.ip = ip
self.port = port
self.init_success = False
if ret:
self.cap = cv2.VideoCapture(self.camera_url)
self.init_sucess = True
else:
print('海康摄像头网络错误请检测IP')
def get_img(self):
ret = False
frame = None
if self.init_success==True:
if portisopen(self.ip, self.port):
ret, frame = self.cap.read()
else:
print('海康摄像头网络断开')
else:
if portisopen(self.ip, self.port):
self.reconnect_camera()
ret, frame = self.cap.read()
else:
print('海康摄像头网络断开')
return ret, frame
def reconnect_camera(self):
if self.init_success == True:
self.cap.release()
self.cap = cv2.VideoCapture(self.camera_url)
print("海康摄像头重连")
def release_camera(self):
self.cap.release()

View File

@ -14,6 +14,66 @@ import psutil
from psutil._common import bytes2human from psutil._common import bytes2human
def uv_to_XY(cameraType, u, v):
"""
像素坐标转相机坐标
Args:
cameraType:
u:
v:
Returns:
如本:
ExtrinsicMatrix:
[-0.9916700124740601, -0.003792409785091877, 0.12874870002269745, 0.10222162306308746, -0.003501748666167259, 0.9999907612800598, 0.002483875723555684, -0.08221593499183655, -0.12875692546367645, 0.0020123394206166267, -0.9916741251945496, 0.6480034589767456, 0.0, 0.0, 0.0, 1.0]
IntrinsicParameters:
[2402.101806640625, 0.0, 739.7069091796875,
0.0, 2401.787353515625, 584.73046875,
0.0, 0.0, 1.0]
distortion:
[-0.04248141124844551, 0.24386045336723328, -0.38333430886268616, -0.0017840253422036767, 0.0007602088153362274]
图漾:
depth image format list:
0 -size[640x480] - desc:DEPTH16_640x480
1 -size[1280x960] - desc:DEPTH16_1280x960
2 -size[320x240] - desc:DEPTH16_320x240
delth calib info:
calib size :[1280x960]
calib intr :
(1048.3614501953125, 0.0, 652.146240234375,
0.0, 1048.3614501953125, 500.26397705078125,
0.0, 0.0, 1.0)
calib extr : (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
calib distortion : (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
"""
x = None
y = None
zc = 1 # 设深度z为1
if cameraType == 'RVC':
u0 = 739.70
v0 = 584.73
fx = 2402.10
fy = 2401.78
x = (u-u0)*zc/fx
y = (v-v0)*zc/fy
elif cameraType == 'Pe':
u0 = 652.14
v0 = 500.26
fx = 1048.36
fy = 1048.36
x = (u - u0) * zc / fx
y = (v - v0) * zc / fy
return x, y, zc
def out_bounds_dete(pm_y, pm_x, piont_y, piont_x): def out_bounds_dete(pm_y, pm_x, piont_y, piont_x):
if piont_y>=pm_y: if piont_y>=pm_y:
piont_y = pm_y-1 piont_y = pm_y-1

View File

@ -10,6 +10,8 @@ from Trace.handeye_calibration import *
from Vision.tool.utils import get_disk_space from Vision.tool.utils import get_disk_space
from Vision.tool.utils import remove_nan_mean_value from Vision.tool.utils import remove_nan_mean_value
from Vision.detect_person import DetectionPerson from Vision.detect_person import DetectionPerson
from Vision.detect_bag_num import DetectionBagNum
from Vision.tool.CameraHIK import camera_HIK
import platform import platform
import cv2 import cv2
import os import os
@ -61,6 +63,28 @@ def detectionPerson_test():
else: else:
break break
def HIK_Camera_test():
MY_CAMERA = camera_HIK("192.168.1.121", 554, "admin", "zlzk.123")
while True:
ret, frame = MY_CAMERA.get_img()
if ret:
img = cv2.resize(frame, (800, 600))
cv2.imshow('img', img)
cv2.waitKey(1)
def detectionBagNum_test():
detectionBagNum = DetectionBagNum()
video_path = ''.join([os.getcwd(), '/Vision/model/data/2.mp4'])
cam = cv2.VideoCapture(video_path)
while cam.isOpened():
ret, frame = cam.read()
if ret:
num, img = detectionBagNum.get_BagNum(frame)
print(num)
cv2.imshow('img', img)
cv2.waitKey(1)
else:
break
if __name__ == '__main__': if __name__ == '__main__':
detectionPosition_test() detectionBagNum_test()