diff --git a/Vision/camera_coordinate_dete.py b/Vision/camera_coordinate_dete.py index 29266d1..62f19ee 100644 --- a/Vision/camera_coordinate_dete.py +++ b/Vision/camera_coordinate_dete.py @@ -11,13 +11,15 @@ import numpy as np import cv2 import open3d as o3d -from Vision.tool.CameraRVC import camera +# from Vision.tool.CameraRVC import camera +from Vision.tool.CameraPe import camera from Vision.yolo.yolov8_pt_seg import yolov8_segment from Vision.yolo.yolov8_openvino import yolov8_segment_openvino from Vision.tool.utils import find_position from Vision.tool.utils import class_names from Vision.tool.utils import get_disk_space -from Vision.tool.utils import remove_nan +from Vision.tool.utils import remove_nan_mean_value +from Vision.tool.utils import out_bounds_dete import time import os @@ -39,7 +41,7 @@ class Detection: - def get_position(self, Point_isVision=False, save_img_piont=True): + def get_position(self, Point_isVision=False, save_img_point=False, seg_distance_threshold= 10): "" ''' :param api: None @@ -48,22 +50,22 @@ class Detection: ret, img, pm = self.camera_rvc.get_img_and_point_map() # 拍照,获取图像及 if self.camera_rvc.caminit_isok == True: if ret == 1: - if save_img_piont == True: + if save_img_point == True: if get_disk_space(path=os.getcwd()) < 15: # 内存小于15G,停止保存数据 - save_img_piont = False + save_img_point = False print('系统内存不足,无法保存数据') else: save_path = ''.join([os.getcwd(), '/Vision/model/data/', time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(time.time()))]) save_img_name = ''.join([save_path, '.png']) - save_piont_name = ''.join([save_path, '.xyz']) - row_list = list(range(1, 1080, 2)) - column_list = list(range(1, 1440, 2)) + save_point_name = ''.join([save_path, '.xyz']) + row_list = list(range(1, img.shape[0], 2)) + column_list = list(range(1, img.shape[1], 2)) pm_save = pm.copy() pm_save1 = np.delete(pm_save, row_list, axis=0) point_new = np.delete(pm_save1, column_list, axis=1) point_new = point_new.reshape(-1, 3) - np.savetxt(save_piont_name, point_new) + np.savetxt(save_point_name, point_new) if self.use_openvino_model == False: flag, det_cpu, dst_img, masks, category_names = self.model.model_inference(img, 0) else: @@ -124,8 +126,8 @@ class Detection: # cv2.imshow('bin',binary) # cv2.waitKey(1) contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) - # all_piont_list = contours_in(contours) - # print(len(all_piont_list)) + # all_point_list = contours_in(contours) + # print(len(all_point_list)) max_contour = None max_perimeter = 0 for contour in contours: # 排除小分割区域或干扰区域 @@ -150,13 +152,13 @@ class Detection: print("分割点云数量较少,无法拟合平面") continue - # cv2.imshow('result', piont_result) + # cv2.imshow('result', point_result) ''' 拟合平面,计算法向量 ''' pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(pm_seg) - plane_model, inliers = pcd.segment_plane(distance_threshold=0.01, + plane_model, inliers = pcd.segment_plane(distance_threshold=seg_distance_threshold, ransac_n=5, num_iterations=5000) [a, b, c, d] = plane_model @@ -181,22 +183,28 @@ class Detection: # 在原图上画出预测的外接矩形 box = box.reshape((-1, 1, 2)).astype(np.int32) box = box + [[[box_x1, box_y1]], [[box_x1, box_y1]], [[box_x1, box_y1]], [[box_x1, box_y1]]] - box_piont_x1, box_piont_y1, box_piont_z1 = remove_nan(pm, box[0][0][1], box[0][0][0]) - box_piont_x2, box_piont_y2, box_piont_z2 = remove_nan(pm, box[1][0][1], box[1][0][0]) - box_piont_x3, box_piont_y3, box_piont_z3 = remove_nan(pm, box[2][0][1], box[2][0][0]) - box_piont_x4, box_piont_y4, box_piont_z4 = remove_nan(pm, box[3][0][1], box[3][0][0]) + + box[0][0][1], box[0][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[0][0][1], box[0][0][0]) + box[1][0][1], box[1][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[1][0][1], box[1][0][0]) + box[2][0][1], box[2][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[2][0][1], box[2][0][0]) + box[3][0][1], box[3][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[3][0][1], box[3][0][0]) + + box_point_x1, box_point_y1, box_point_z1 = remove_nan_mean_value(pm, box[0][0][1], box[0][0][0]) + box_point_x2, box_point_y2, box_point_z2 = remove_nan_mean_value(pm, box[1][0][1], box[1][0][0]) + box_point_x3, box_point_y3, box_point_z3 = remove_nan_mean_value(pm, box[2][0][1], box[2][0][0]) + box_point_x4, box_point_y4, box_point_z4 = remove_nan_mean_value(pm, box[3][0][1], box[3][0][0]) x_rotation_center = int((box[0][0][0] + box[1][0][0] + box[2][0][0] + box[3][0][0]) / 4) y_rotation_center = int((box[0][0][1] + box[1][0][1] + box[2][0][1] + box[3][0][1]) / 4) - point_x, point_y, point_z = pm[y_rotation_center, x_rotation_center] + point_x, point_y, point_z = remove_nan_mean_value(pm, y_rotation_center, x_rotation_center) cv2.circle(img, (x_rotation_center, y_rotation_center), 4, (255, 255, 255), 5) # 标出中心点 if np.isnan(point_x): # 点云值为无效值 continue else: box_list.append( - [[box_piont_x1, box_piont_y1, box_piont_z1], - [box_piont_x2, box_piont_y2, box_piont_z2], - [box_piont_x3, box_piont_y3, box_piont_z3], - [box_piont_x4, box_piont_y4, box_piont_z4]]) + [[box_point_x1, box_point_y1, box_point_z1], + [box_point_x2, box_point_y2, box_point_z2], + [box_point_x3, box_point_y3, box_point_z3], + [box_point_x4, box_point_y4, box_point_z4]]) xyz.append([point_x*1000, point_y*1000, point_z*1000]) Depth_Z.append(point_z*1000) nx_ny_nz.append([a, b, c]) @@ -208,27 +216,27 @@ class Detection: _idx = find_position(Depth_Z, RegionalArea, 100,True) if _idx == None: - return 1, img, None, None + return 1, img, None, None, None else: cv2.circle(img, (uv[_idx][0], uv[_idx][1]), 30, (0, 0, 255), 20) # 标出中心点 if Point_isVision==True: pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(seg_point[_idx]) - plane_model, inliers = pcd.segment_plane(distance_threshold=0.01, + plane_model, inliers = pcd.segment_plane(distance_threshold=seg_distance_threshold, ransac_n=5, num_iterations=5000) inlier_cloud = pcd.select_by_index(inliers) # 点云可视化 inlier_cloud.paint_uniform_color([1.0, 0, 0]) outlier_cloud = pcd.select_by_index(inliers, invert=True) - outlier_cloud.paint_uniform_color([0, 1, 0]) + outlier_cloud.paint_uniform_color([0, 0, 1]) o3d.visualization.draw_geometries([inlier_cloud, outlier_cloud, pcd2]) - if save_img_piont == True: + if save_img_point == True: save_img = cv2.resize(img, (720, 540)) cv2.imwrite(save_img_name, save_img) return 1, img, xyz[_idx], nx_ny_nz[_idx], box_list[_idx] else: - if save_img_piont == True: + if save_img_point == True: save_img = cv2.resize(img,(720, 540)) cv2.imwrite(save_img_name, save_img) return 1, img, None, None, None diff --git a/Vision/camera_coordinate_dete_img.py b/Vision/camera_coordinate_dete_img.py index 2764f1a..a415aab 100644 --- a/Vision/camera_coordinate_dete_img.py +++ b/Vision/camera_coordinate_dete_img.py @@ -18,8 +18,8 @@ from Vision.yolo.yolov8_openvino import yolov8_segment_openvino from Vision.tool.utils import find_position from Vision.tool.utils import class_names from Vision.tool.utils import get_disk_space -from Vision.tool.utils import remove_nan - +from Vision.tool.utils import remove_nan_mean_value +from Vision.tool.utils import out_bounds_dete import os class Detection: @@ -43,7 +43,7 @@ class Detection: - def get_position(self, Point_isVision=False, save_img_piont=True): + def get_position(self, Point_isVision=False, save_img_point=True, seg_distance_threshold = 10): "" ''' :param api: None @@ -54,22 +54,22 @@ class Detection: img = self.img pm = self.point if ret == 1: - if save_img_piont == True: + if save_img_point == True: if get_disk_space(path=os.getcwd())<15: # 内存小于15G,停止保存数据 - save_img_piont = False + save_img_point = False print('系统内存不足,无法保存数据') else: save_path = ''.join([os.getcwd(), '/Vision/model/data/', time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(time.time()))]) save_img_name = ''.join([save_path, '.png']) - save_piont_name = ''.join([save_path, '.xyz']) - row_list = list(range(1, 1080, 2)) - column_list = list(range(1, 1440, 2)) + save_point_name = ''.join([save_path, '.xyz']) + row_list = list(range(1, img.shape[0], 2)) + column_list = list(range(1, img.shape[1], 2)) pm_save = pm.copy() pm_save1 = np.delete(pm_save, row_list, axis=0) point_new = np.delete(pm_save1, column_list, axis=1) point_new = point_new.reshape(-1, 3) - np.savetxt(save_piont_name, point_new) + np.savetxt(save_point_name, point_new) if self.use_openvino_model == False: @@ -133,8 +133,8 @@ class Detection: # cv2.imshow('bin',binary) # cv2.waitKey(1) contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) - # all_piont_list = contours_in(contours) - # print(len(all_piont_list)) + # all_point_list = contours_in(contours) + # print(len(all_point_list)) max_contour = None max_perimeter = 0 for contour in contours: # 排除小分割区域或干扰区域 @@ -158,13 +158,13 @@ class Detection: if pm_seg.size < 300: print("分割点云数量较少,无法拟合平面") continue - # cv2.imshow('result', piont_result) + # cv2.imshow('result', point_result) ''' 拟合平面,计算法向量 ''' pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(pm_seg) - plane_model, inliers = pcd.segment_plane(distance_threshold=0.01, + plane_model, inliers = pcd.segment_plane(distance_threshold=seg_distance_threshold, ransac_n=5, num_iterations=5000) [a, b, c, d] = plane_model @@ -190,20 +190,26 @@ class Detection: # 在原图上画出预测的外接矩形 box = box.reshape((-1, 1, 2)).astype(np.int32) box = box + [[[box_x1, box_y1]], [[box_x1, box_y1]], [[box_x1, box_y1]], [[box_x1, box_y1]]] - box_piont_x1, box_piont_y1, box_piont_z1 = remove_nan(pm, box[0][0][1], box[0][0][0]) - box_piont_x2, box_piont_y2, box_piont_z2 = remove_nan(pm, box[1][0][1], box[1][0][0]) - box_piont_x3, box_piont_y3, box_piont_z3 = remove_nan(pm, box[2][0][1], box[2][0][0]) - box_piont_x4, box_piont_y4, box_piont_z4 = remove_nan(pm, box[3][0][1], box[3][0][0]) + + box[0][0][1], box[0][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[0][0][1], box[0][0][0]) + box[1][0][1], box[1][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[1][0][1], box[1][0][0]) + box[2][0][1], box[2][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[2][0][1], box[2][0][0]) + box[3][0][1], box[3][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[3][0][1], box[3][0][0]) + + box_point_x1, box_point_y1, box_point_z1 = remove_nan_mean_value(pm, box[0][0][1], box[0][0][0]) + box_point_x2, box_point_y2, box_point_z2 = remove_nan_mean_value(pm, box[1][0][1], box[1][0][0]) + box_point_x3, box_point_y3, box_point_z3 = remove_nan_mean_value(pm, box[2][0][1], box[2][0][0]) + box_point_x4, box_point_y4, box_point_z4 = remove_nan_mean_value(pm, box[3][0][1], box[3][0][0]) x_rotation_center = int((box[0][0][0] + box[1][0][0] + box[2][0][0] + box[3][0][0]) / 4) y_rotation_center = int((box[0][0][1] + box[1][0][1] + box[2][0][1] + box[3][0][1]) / 4) - point_x, point_y, point_z = pm[y_rotation_center, x_rotation_center] + point_x, point_y, point_z = remove_nan_mean_value(pm, y_rotation_center, x_rotation_center) cv2.circle(img, (x_rotation_center, y_rotation_center), 4, (255, 255, 255), 5) # 标出中心点 if np.isnan(point_x): # 点云值为无效值 continue else: - box_list.append([[box_piont_x1, box_piont_y1, box_piont_z1], [box_piont_x2, box_piont_y2, box_piont_z2], - [box_piont_x3, box_piont_y3, box_piont_z3], [box_piont_x4, box_piont_y4, box_piont_z4]]) + box_list.append([[box_point_x1, box_point_y1, box_point_z1], [box_point_x2, box_point_y2, box_point_z2], + [box_point_x3, box_point_y3, box_point_z3], [box_point_x4, box_point_y4, box_point_z4]]) xyz.append([point_x * 1000, point_y * 1000, point_z * 1000]) Depth_Z.append(point_z * 1000) nx_ny_nz.append([a, b, c]) @@ -222,7 +228,7 @@ class Detection: if Point_isVision == True: pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(seg_point[_idx]) - plane_model, inliers = pcd.segment_plane(distance_threshold=0.01, + plane_model, inliers = pcd.segment_plane(distance_threshold=seg_distance_threshold, ransac_n=5, num_iterations=5000) inlier_cloud = pcd.select_by_index(inliers) # 点云可视化 @@ -230,13 +236,13 @@ class Detection: outlier_cloud = pcd.select_by_index(inliers, invert=True) outlier_cloud.paint_uniform_color([0, 1, 0]) o3d.visualization.draw_geometries([inlier_cloud, outlier_cloud, pcd2]) - if save_img_piont == True: + if save_img_point == True: save_img = cv2.resize(img,(720, 540)) cv2.imwrite(save_img_name, save_img) return 1, img, xyz[_idx], nx_ny_nz[_idx], box_list[_idx] else: - if save_img_piont == True: + if save_img_point == True: save_img = cv2.resize(img, (720, 540)) cv2.imwrite(save_img_name, save_img) diff --git a/Vision/tool/CameraPe.py b/Vision/tool/CameraPe.py index 2989966..74b15b4 100644 --- a/Vision/tool/CameraPe.py +++ b/Vision/tool/CameraPe.py @@ -145,7 +145,7 @@ class camera(): :return: ret , img, point_map ''' if self.caminit_isok == False or self.event.IsOffline(): - return 0, None + return 0, None, None else: image_list = self.cl.DeviceStreamRead(self.handle, 2000) if len(image_list) == 2: @@ -153,19 +153,18 @@ class camera(): frame = image_list[i] if frame.streamID == PERCIPIO_STREAM_DEPTH: img_depth = frame - img_depth2p3d = frame if frame.streamID == PERCIPIO_STREAM_COLOR: img_color = frame - self.cl.DeviceStreamMapDepthImageToColorCoordinate(self.depth_calib, img_depth.width, img_depth.height, - self.scale_unit, img_depth, self.color_calib, img_color.width, - img_color.height, self.img_registration_depth) - + # self.cl.DeviceStreamMapDepthImageToColorCoordinate(self.depth_calib, img_depth.width, img_depth.height, + # self.scale_unit, img_depth, self.color_calib, img_color.width, + # img_color.height, self.img_registration_depth) + # # self.cl.DeviceStreamDepthRender(self.img_registration_depth, self.img_registration_render) # mat_depth_render = self.img_registration_render.as_nparray() # cv2.imshow('registration', mat_depth_render) - self.cl.DeviceStreamMapDepthImageToPoint3D(self.img_registration_depth, self.depth_calib, self.scale_unit, + self.cl.DeviceStreamMapDepthImageToPoint3D(img_depth, self.depth_calib, self.scale_unit, self.pointcloud_data_arr) # show p3d arr data diff --git a/Vision/tool/utils.py b/Vision/tool/utils.py index 99d4d27..f6d86fd 100644 --- a/Vision/tool/utils.py +++ b/Vision/tool/utils.py @@ -13,14 +13,82 @@ import cv2 import psutil from psutil._common import bytes2human -def remove_nan(pm, y, x): - piont_x, piont_y, piont_z = pm[y, x] - if np.isnan(piont_x): - for i in range(10): - piont_x, piont_y, piont_z = pm[y+i, x] - if np.isnan(piont_x)==False: + +def out_bounds_dete(pm_y, pm_x, piont_y, piont_x): + if piont_y>pm_y: + piont_y = pm_y + print('四坐标点超出点云大小') + if piont_y<0: + piont_y=0 + print('四坐标点超出点云大小') + if piont_x>pm_x: + piont_x = pm_x + print('四坐标点超出点云大小') + if piont_x<0: + piont_x=0 + print('四坐标点超出点云大小') + return piont_y, piont_x + +def remove_nan_mean_value(pm, y, x, iter_max=15): + point_x, point_y, point_z = pm[y, x] + if np.isnan(point_x): + print('Nan值去除') + point_x_list = [] + point_y_list = [] + point_z_list = [] + iter_current = 1 + pm_shape_y = pm.shape[0] + pm_shape_x = pm.shape[1] + remove_nan_isok = False + while iter_current < iter_max: + # 计算开始点 + if y - iter_current > 0: + y_start = y - iter_current + else: + y_start = 0 + + if x - iter_current > 0: + x_start = x - iter_current + else: + x_start = 0 + + for idx_y in range(iter_current*2 + 1): + y_current = y_start + idx_y + if y_current > pm_shape_y-1: + continue + for idx_x in range(iter_current*2 + 1): + x_current = x_start + idx_x + if x_current > pm_shape_x-1: + continue + elif np.isnan(pm[y_current, x_current][0]) == False: + point_x_list.append(pm[y_current, x_current][0]) + point_y_list.append(pm[y_current, x_current][1]) + point_z_list.append(pm[y_current, x_current][2]) + + len_point_x = len(point_x_list) + if len_point_x>0: + point_x = sum(point_x_list)/len_point_x + point_y = sum(point_y_list)/len_point_x + point_z = sum(point_z_list)/len_point_x + remove_nan_isok = True break - return piont_x, piont_y, piont_z + iter_current += 1 + else: + remove_nan_isok = True + if remove_nan_isok == True: + return point_x, point_y, point_z + else: + print('在15*15范围中未找到有效值,所有点云值为无效值') + return np.nan, np.nan, np.nan + +def remove_nan(pm, y, x): + point_x, point_y, point_z = pm[y, x] + if np.isnan(point_x): + for i in range(10): + point_x, point_y, point_z = pm[y+i, x] + if np.isnan(point_x)==False: + break + return point_x, point_y, point_z def get_disk_space(path='C:'): diff --git a/Vision/yolo/yolov8_openvino.py b/Vision/yolo/yolov8_openvino.py index 0871ba8..f5d6b4b 100644 --- a/Vision/yolo/yolov8_openvino.py +++ b/Vision/yolo/yolov8_openvino.py @@ -42,6 +42,9 @@ class yolov8_segment_openvino: self.boxes, self.scores, self.class_ids, mask_pred = self.process_box_output(outputs[0]) self.mask_maps = self.process_mask_output(mask_pred, outputs[1]) + if type(self.boxes) == list: + if len(self.boxes)==0: + return 0, None, None, None, None self.boxes = self.boxes.tolist() self.scores = self.scores.tolist() for t in range(len(self.scores)): diff --git a/get_position_test.py b/get_position_test.py index b50a8a0..3ebaac9 100644 --- a/get_position_test.py +++ b/get_position_test.py @@ -5,9 +5,10 @@ # @Author : hjw # @File : get_position_test.py ''' -from Vision.camera_coordinate_dete_img import Detection +from Vision.camera_coordinate_dete import Detection from Trace.handeye_calibration import * from Vision.tool.utils import get_disk_space +from Vision.tool.utils import remove_nan_mean_value import platform import cv2 @@ -19,10 +20,19 @@ while True: print('xyz点云坐标:', xyz) print('nx_ny_nz法向量:', nx_ny_nz) print('矩形框四顶点:', box) - img = cv2.resize(img,(720, 540)) - transformation_matrix = R_matrix(521.534, 0.705, 850.03, 0.0, 90.0, 0.0) # (x, y, z, u, v, w) - target_position, noraml_base = getPosition(xyz[0], xyz[1], xyz[2], nx_ny_nz[0], nx_ny_nz[1], nx_ny_nz[2], transformation_matrix, box) - print("target_position:", target_position) - print("noraml_base", noraml_base) + # img = cv2.resize(img,(720, 540)) + if xyz!=None: + transformation_matrix = R_matrix(521.534, 0.705, 850.03, 0.0, 90.0, 0.0) # (x, y, z, u, v, w) + target_position, noraml_base = getPosition(xyz[0], xyz[1], xyz[2], nx_ny_nz[0], nx_ny_nz[1], nx_ny_nz[2], transformation_matrix, box) + print("target_position:", target_position) + print("noraml_base", noraml_base) cv2.imshow('img', img) - cv2.waitKey(0) \ No newline at end of file + cv2.waitKey(1) + + +# point = np.loadtxt('./Vision/model/data/test_nan.xyz').reshape((6, 8, 3)) +# a = point.copy() +# c = a.tolist() +# x, y, z = remove_nan_mean_value(point,3,3) +# print(x, y, z ) +# pass \ No newline at end of file