diff --git a/Vision/camera_coordinate_dete.py b/Vision/camera_coordinate_dete.py index e18f189..cf92e23 100644 --- a/Vision/camera_coordinate_dete.py +++ b/Vision/camera_coordinate_dete.py @@ -19,65 +19,71 @@ from Vision.tool.CameraPe_color2depth import camera_pe as camera_pe_color2depth from Vision.tool.CameraPe_depth2color import camera_pe as camera_pe_depth2color from Vision.yolo.yolov8_pt_seg import yolov8_segment from Vision.yolo.yolov8_openvino import yolov8_segment_openvino +from Vision.yolo.yolov8_pt_pose import yolov8_pose from Vision.tool.utils import find_position from Vision.tool.utils import class_names from Vision.tool.utils import get_disk_space from Vision.tool.utils import remove_nan_mean_value -from Vision.tool.utils import out_bounds_dete -from Vision.tool.utils import uv_to_XY +from Vision.tool.utils import out_bounds_dete, find_closest_point_index +from Vision.tool.utils import uv_to_XY, shrink_quadrilateral class Detection: - def __init__(self, use_openvino_model=False, cameraType = 'Pe', alignmentType = 'color2depth'): # cameraType = 'RVC' or cameraType = 'Pe' + def __init__(self, use_openvino_model=False, use_pose_model=True, use_seg_pt_model=True, cameraType = 'Pe', alignmentType = 'color2depth'): # cameraType = 'RVC' or cameraType = 'Pe' """ 初始化相机及模型 + :param use_openvino_model: 加载分割模型 + :param use_pose_model: 加载关键点pt模型 + :param use_seg_pt_model: 加载分割pt模型 :param use_openvino_model: 选择模型,默认使用openvino :param cameraType: 选择相机 如本相机 'RVC', 图漾相机 'Pe' :param alignmentType: 相机对齐方式 color2depth:彩色图对齐深度图 ;depth2color:深度图对齐彩色图 """ + if use_seg_pt_model: # 优先使用pt模型 + use_openvino_model = False + elif use_openvino_model: + use_seg_pt_model = False self.use_openvino_model = use_openvino_model self.cameraType = cameraType - self.alignmentType= alignmentType - if self.use_openvino_model == False: - model_path = ''.join([os.getcwd(), '/Vision/model/pt/one_bag.pt']) - device = 'cpu' - if self.cameraType == 'RVC': - self.camera_rvc = camera_rvc() - self.seg_distance_threshold = 10 # 1厘米 - elif self.cameraType == 'Pe': - if self.alignmentType=='color2depth': - self.camera_rvc = camera_pe_color2depth() - else: - self.camera_rvc = camera_pe_depth2color() - self.seg_distance_threshold = 15 # 2厘米 + self.use_pose_model = use_pose_model + self.use_seg_pt_model = use_seg_pt_model + self.alignmentType = alignmentType + if self.cameraType == 'RVC': + self.camera_rvc = camera_rvc() + self.seg_distance_threshold = 10 # 1厘米 + elif self.cameraType == 'Pe': + if self.alignmentType == 'color2depth': + self.camera_rvc = camera_pe_color2depth() else: - print('相机参数错误') - return - self.model = yolov8_segment() - self.model.load_model(model_path, device) + self.camera_rvc = camera_pe_depth2color() + self.seg_distance_threshold = 15 # 2厘米 else: + print('相机参数错误') + return + # 加载openvino-seg + if self.use_openvino_model: model_path = ''.join([os.getcwd(), '/Vision/model/openvino/one_bag.xml']) device = 'CPU' - if self.cameraType == 'RVC': - self.camera_rvc = camera_rvc() - self.seg_distance_threshold = 10 - elif self.cameraType == 'Pe': - if self.alignmentType == 'color2depth': - self.camera_rvc = camera_pe_color2depth() - else: - self.camera_rvc = camera_pe_depth2color() - self.seg_distance_threshold = 20 - else: - print('相机参数错误') - return - self.model = yolov8_segment_openvino(model_path, device, conf_thres=0.3, iou_thres=0.3) + self.model_seg = yolov8_segment_openvino(model_path, device, conf_thres=0.6, iou_thres=0.6) + # 加载pt-seg + if self.use_seg_pt_model: + model_path = ''.join([os.getcwd(), '/Vision/model/pt/one_bag.pt']) + device = 'cpu' + self.model_seg = yolov8_segment() + self.model_seg.load_model(model_path, device) + # 加载pt-pose + if self.use_pose_model: + model_path = ''.join([os.getcwd(), '/Vision/model/pt/one_bag_pose.pt']) + device = 'cpu' + self.model_pose = yolov8_pose(model_path, device) - def get_position(self, Point_isVision=False, Box_isPoint=True, First_Depth =True, Iter_Max_Pixel = 30, save_img_point=0, Height_reduce = 80, width_reduce = 60, Xmin =160, Xmax = 1050, Ymin =290 ,Ymax = 780): + def get_position(self, Use_Pose_Model_Pro=False, Point_isVision=False, Box_isPoint=True, First_Depth =True, Iter_Max_Pixel = 30, save_img_point=0, Height_reduce = 80, width_reduce = 60, Xmin =160, Xmax = 1050, Ymin =290 ,Ymax = 780): """ 检测料袋相关信息 + :param Use_Pose_Model_Pro: True: 选用关键点推理 False : 选用分割模型推理 :param Point_isVision: 点云可视化 :param Box_isPoint: True 返回点云值; False 返回box相机坐标 :param First_Depth: True 返回料袋中心点深度最小的点云值; False 返回面积最大的料袋中心点云值 @@ -124,10 +130,19 @@ class Detection: Abnormal_data_point = point_new.copy() else: np.savetxt(save_point_name, point_new) - if self.use_openvino_model == False: - flag, det_cpu, dst_img, masks, category_names = self.model.model_inference(img, 0) + + if self.use_pose_model and Use_Pose_Model_Pro: + real_model_pro_isPose = True else: - flag, det_cpu, scores, masks, category_names = self.model.segment_objects(img) + real_model_pro_isPose = False + + if real_model_pro_isPose: + flag, det_cpu, category_names, score_list = self.model_pose.model_inference(img) + else: + if self.use_openvino_model == False: + flag, det_cpu, dst_img, masks, category_names = self.model_seg.model_inference(img, 0) + else: + flag, det_cpu, scores, masks, category_names = self.model_seg.segment_objects(img) if flag == 1: xyz = [] nx_ny_nz = [] @@ -148,13 +163,23 @@ class Detection: for i, item in enumerate(det_cpu): # 画box - box_x1, box_y1, box_x2, box_y2 = item[0:4].astype(np.int32) - if self.use_openvino_model == False: - label = category_names[int(item[5])] + if real_model_pro_isPose: + label = category_names[i] + score = score_list[i] + box_x1 = item[0][0] + box_y1 = item[0][1] + box_x2 = item[3][0] + box_y2 = item[3][1] + pass else: - label = class_names[int(item[4])] + box_x1, box_y1, box_x2, box_y2 = item[0:4].astype(np.int32) + if self.use_openvino_model == False: + label = category_names[int(item[5])] + score = item[4] + else: + label = class_names[int(item[4])] + score = item[4] rand_color = (0, 255, 255) - score = item[4] org = (int((box_x1 + box_x2) / 2), int((box_y1 + box_y2) / 2)) x_center = int((box_x1 + box_x2) / 2) y_center = int((box_y1 + box_y2) / 2) @@ -164,75 +189,117 @@ class Detection: thickness=2) # 画mask # mask = masks[i].cpu().numpy().astype(int) - if self.use_openvino_model == False: - mask = masks[i].cpu().data.numpy().astype(int) + if real_model_pro_isPose: + # 创建一个与输入数组相同形状的掩码,初始值全为 0 + mask = np.zeros(pm.shape[:2], dtype=np.uint8) + # 将四点坐标转换为 numpy 数组 + if item[0][0] < item[1][0]: + arr = [[item[0][0], item[0][1]], + [item[1][0], item[1][1]], + [item[3][0], item[3][1]], + [item[2][0], item[2][1]]] + # new_points.reshape((-1, 1, 2)) + else: + arr = [[item[3][0], item[3][1]], + [item[2][0], item[2][1]], + [item[0][0], item[0][1]], + [item[1][0], item[1][1]]] + box = arr.copy() + box_outside = arr.copy() + box = shrink_quadrilateral(box, Height_reduce) + pts = np.array(box, np.int32) + # 将四点构成的四边形区域在掩码上标记为 255 + cv2.fillPoly(mask, [pts], 255) + # 根据掩码提取对应区域的数据 + pm_seg = pm[mask == 255] + # box =[[[item[0][0]+width_reduce, item[0][1]+Height_reduce]], + # [[item[1][0]-width_reduce, item[1][1]+Height_reduce]], + # [[item[3][0]-width_reduce, item[3][1]-Height_reduce]], + # [[item[2][0]+width_reduce, item[2][1]-Height_reduce]]] + box = box.reshape((-1, 1, 2)) + # box = np.array(box) + # 内缩 + # box_outside = [[[item[0][0], item[0][1]]], + # [[item[1][0], item[1][1]]], + # [[item[3][0], item[3][1]]], + # [[item[2][0], item[2][1]]]] # 外框 + box_outside = np.array(box_outside) + box_outside = box_outside.reshape((-1, 1, 2)) + # box_outside = np.array(box_outside) else: - mask = masks[i].astype(int) - mask = mask[box_y1:box_y2, box_x1:box_x2] + if self.use_openvino_model == False: + mask = masks[i].cpu().data.numpy().astype(int) + else: + mask = masks[i].astype(int) + mask = mask[box_y1:box_y2, box_x1:box_x2] - # mask = masks[i].numpy().astype(int) - h, w = box_y2 - box_y1, box_x2 - box_x1 - mask_colored = np.zeros((h, w, 3), dtype=np.uint8) - mask_colored[np.where(mask)] = rand_color - ################################## - imgray = cv2.cvtColor(mask_colored, cv2.COLOR_BGR2GRAY) - # cv2.imshow('mask',imgray) - # cv2.waitKey(1) - # 2、二进制图像 - ret, binary = cv2.threshold(imgray, 10, 255, 0) - # 阈值 二进制图像 - # cv2.imshow('bin',binary) - # cv2.waitKey(1) - contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) - # all_point_list = contours_in(contours) - # print(len(all_point_list)) - max_contour = None - max_perimeter = 0 - for contour in contours: # 排除小分割区域或干扰区域 - perimeter = cv2.arcLength(contour, True) - if perimeter > max_perimeter: - max_perimeter = perimeter - max_contour = contour + # mask = masks[i].numpy().astype(int) + h, w = box_y2 - box_y1, box_x2 - box_x1 + mask_colored = np.zeros((h, w, 3), dtype=np.uint8) + mask_colored[np.where(mask)] = rand_color + ################################## + imgray = cv2.cvtColor(mask_colored, cv2.COLOR_BGR2GRAY) + # cv2.imshow('mask',imgray) + # cv2.waitKey(1) + # 2、二进制图像 + ret, binary = cv2.threshold(imgray, 10, 255, 0) + # 阈值 二进制图像 + # cv2.imshow('bin',binary) + # cv2.waitKey(1) + contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) + # all_point_list = contours_in(contours) + # print(len(all_point_list)) + max_contour = None + max_perimeter = 0 + for contour in contours: # 排除小分割区域或干扰区域 + perimeter = cv2.arcLength(contour, True) + if perimeter > max_perimeter: + max_perimeter = perimeter + max_contour = contour - ''' - 拟合最小外接矩形,计算矩形中心 - ''' + ''' + 拟合最小外接矩形,计算矩形中心 + ''' - rect = cv2.minAreaRect(max_contour) - if rect[1][0]-width_reduce > 30 and rect[1][1]-Height_reduce > 30: - rect_reduce = ( - (rect[0][0], rect[0][1]), (rect[1][0] - width_reduce, rect[1][1] - Height_reduce), rect[2]) - else: - rect_reduce = ( - (rect[0][0], rect[0][1]), (rect[1][0], rect[1][1]), rect[2]) + rect = cv2.minAreaRect(max_contour) + if rect[1][0]-width_reduce > 30 and rect[1][1]-Height_reduce > 30: + rect_reduce = ( + (rect[0][0], rect[0][1]), (rect[1][0] - width_reduce, rect[1][1] - Height_reduce), rect[2]) + else: + rect_reduce = ( + (rect[0][0], rect[0][1]), (rect[1][0], rect[1][1]), rect[2]) - # cv2.boxPoints可以将轮廓点转换为四个角点坐标 - box_outside = cv2.boxPoints(rect) - # 这一步不影响后面的画图,但是可以保证四个角点坐标为顺时针 - startidx = box_outside.sum(axis=1).argmin() - box_outside = np.roll(box_outside, 4 - startidx, 0) - box_outside = np.intp(box_outside) - box_outside = box_outside.reshape((-1, 1, 2)).astype(np.int32) + # cv2.boxPoints可以将轮廓点转换为四个角点坐标 + box_outside = cv2.boxPoints(rect) + # 这一步不影响后面的画图,但是可以保证四个角点坐标为顺时针 + startidx = box_outside.sum(axis=1).argmin() + box_outside = np.roll(box_outside, 4 - startidx, 0) + box_outside = np.intp(box_outside) + box_outside = box_outside.reshape((-1, 1, 2)).astype(np.int32) - # cv2.boxPoints可以将轮廓点转换为四个角点坐标 - box_reduce = cv2.boxPoints(rect_reduce) - startidx = box_reduce.sum(axis=1).argmin() - box_reduce = np.roll(box_reduce, 4 - startidx, 0) - box_reduce = np.intp(box_reduce) - box_reduce = box_reduce.reshape((-1, 1, 2)).astype(np.int32) + # cv2.boxPoints可以将轮廓点转换为四个角点坐标 + box_reduce = cv2.boxPoints(rect_reduce) + startidx = box_reduce.sum(axis=1).argmin() + box_reduce = np.roll(box_reduce, 4 - startidx, 0) + box_reduce = np.intp(box_reduce) + box_reduce = box_reduce.reshape((-1, 1, 2)).astype(np.int32) + box_outside = box_outside + [[[box_x1, box_y1]], [[box_x1, box_y1]], [[box_x1, box_y1]], + [[box_x1, box_y1]]] + box = box_reduce + [[[box_x1, box_y1]], [[box_x1, box_y1]], [[box_x1, box_y1]], + [[box_x1, box_y1]]] - ''' - 提取区域范围内的(x, y) - ''' - mask_inside = np.zeros(binary.shape, np.uint8) - cv2.fillPoly(mask_inside, [box_reduce], (255)) - pixel_point2 = cv2.findNonZero(mask_inside) - # result = np.zeros_like(color_image) - select_point = [] - for i in range(pixel_point2.shape[0]): - select_point.append(pm[pixel_point2[i][0][1]+box_y1, pixel_point2[i][0][0]+box_x1]) - select_point = np.array(select_point) - pm_seg = select_point.reshape(-1, 3) + ''' + 提取区域范围内的(x, y) + ''' + mask_inside = np.zeros(binary.shape, np.uint8) + cv2.fillPoly(mask_inside, [box_reduce], (255)) + pixel_point2 = cv2.findNonZero(mask_inside) + # result = np.zeros_like(color_image) + select_point = [] + for i in range(pixel_point2.shape[0]): + select_point.append(pm[pixel_point2[i][0][1]+box_y1, pixel_point2[i][0][0]+box_x1]) + select_point = np.array(select_point) + pm_seg = select_point.reshape(-1, 3) pm_seg = pm_seg[~np.isnan(pm_seg).all(axis=-1), :] # 剔除 nan if pm_seg.size < 100: print("分割点云数量较少,无法拟合平面") @@ -255,9 +322,6 @@ class Detection: # outlier_cloud.paint_uniform_color([0, 1, 0]) # o3d.visualization.draw_geometries([inlier_cloud, outlier_cloud]) - box_outside = box_outside + [[[box_x1, box_y1]], [[box_x1, box_y1]], [[box_x1, box_y1]],[[box_x1, box_y1]]] - box = box_reduce + [[[box_x1, box_y1]], [[box_x1, box_y1]], [[box_x1, box_y1]], [[box_x1, box_y1]]] - box[0][0][1], box[0][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[0][0][1], box[0][0][0]) box[1][0][1], box[1][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[1][0][1], box[1][0][0]) box[2][0][1], box[2][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[2][0][1], box[2][0][0]) @@ -277,7 +341,7 @@ class Detection: point_x, point_y, point_z = remove_nan_mean_value(pm, y_rotation_center, x_rotation_center, iter_max=Iter_Max_Pixel) if x_rotation_centerXmax or y_rotation_centerYmax: continue - cv2.circle(img, (x_rotation_center, y_rotation_center), 4, (255, 255, 255), 5) # 标出中心点 + cv2.circle(img, (x_rotation_center, y_rotation_center), 2, (255, 255, 255), 3) # 标出中心点 if np.isnan(point_x): # 点云值为无效值 continue else: @@ -299,8 +363,11 @@ class Detection: elif self.cameraType=='Pe': xyz.append([point_x, point_y, point_z]) Depth_Z.append(point_z) + if real_model_pro_isPose: + RegionalArea.append(0) + else: + RegionalArea.append(cv2.contourArea(max_contour)) nx_ny_nz.append([a, b, c]) - RegionalArea.append(cv2.contourArea(max_contour)) uv.append([x_rotation_center, y_rotation_center]) seg_point.append(pm_seg) cv2.polylines(img, [box], True, (0, 255, 0), 2) @@ -314,7 +381,7 @@ class Detection: np.savetxt(save_point_name, Abnormal_data_point) return 1, img, None, None, None else: - cv2.circle(img, (uv[_idx][0], uv[_idx][1]), 30, (0, 0, 255), 20) # 标出中心点 + cv2.circle(img, (uv[_idx][0], uv[_idx][1]), 30, (0, 0, 255), 10) # 标出中心点 if Point_isVision==True: pcd = o3d.geometry.PointCloud() diff --git a/Vision/camera_coordinate_dete_img.py b/Vision/camera_coordinate_dete_img.py index d5fdcd2..600914f 100644 --- a/Vision/camera_coordinate_dete_img.py +++ b/Vision/camera_coordinate_dete_img.py @@ -12,43 +12,86 @@ import numpy as np import cv2 import open3d as o3d import time +import os + from Vision.tool.CameraRVC import camera_rvc -from Vision.tool.CameraPe import camera_pe +from Vision.tool.CameraPe_color2depth import camera_pe as camera_pe_color2depth +from Vision.tool.CameraPe_depth2color import camera_pe as camera_pe_depth2color from Vision.yolo.yolov8_pt_seg import yolov8_segment from Vision.yolo.yolov8_openvino import yolov8_segment_openvino +from Vision.yolo.yolov8_pt_pose import yolov8_pose from Vision.tool.utils import find_position from Vision.tool.utils import class_names from Vision.tool.utils import get_disk_space from Vision.tool.utils import remove_nan_mean_value -from Vision.tool.utils import out_bounds_dete -from Vision.tool.utils import uv_to_XY -import os +from Vision.tool.utils import out_bounds_dete, find_closest_point_index +from Vision.tool.utils import uv_to_XY, shrink_quadrilateral + class Detection: - def __init__(self, use_openvino_model=False, cameraType = 'RVC'): + def __init__(self, use_openvino_model=False, use_pose_model=True, use_seg_pt_model=True, cameraType='Pe', + alignmentType='color2depth'): # cameraType = 'RVC' or cameraType = 'Pe' + """ + 初始化相机及模型 + :param use_openvino_model: 加载分割模型 + :param use_pose_model: 加载关键点pt模型 + :param use_seg_pt_model: 加载分割pt模型 + :param use_openvino_model: 选择模型,默认使用openvino + :param cameraType: 选择相机 如本相机 'RVC', 图漾相机 'Pe' + :param alignmentType: 相机对齐方式 color2depth:彩色图对齐深度图 ;depth2color:深度图对齐彩色图 + + """ + if use_seg_pt_model: # 优先使用pt模型 + use_openvino_model = False + elif use_openvino_model: + use_seg_pt_model = False self.use_openvino_model = use_openvino_model self.cameraType = cameraType - if self.use_openvino_model == False: - model_path = ''.join([os.getcwd(), '/Vision/model/pt/one_bag.pt']) - device = 'cpu' - self.model = yolov8_segment() - self.model.load_model(model_path, device) - else: - model_path = ''.join([os.getcwd(), '/Vision/model/openvino/one_bag.xml']) - device = 'CPU' - self.model = yolov8_segment_openvino(model_path, device, conf_thres=0.6, iou_thres=0.6) - + self.use_pose_model = use_pose_model + self.use_seg_pt_model = use_seg_pt_model + self.alignmentType = alignmentType + # 相机加载 + # if self.cameraType == 'RVC': + # self.camera_rvc = camera_rvc() + # self.seg_distance_threshold = 10 # 1厘米 + # elif self.cameraType == 'Pe': + # if self.alignmentType == 'color2depth': + # self.camera_rvc = camera_pe_color2depth() + # else: + # self.camera_rvc = camera_pe_depth2color() + # self.seg_distance_threshold = 15 # 2厘米 + # else: + # print('相机参数错误') + # return img_path = ''.join([os.getcwd(), '/Vision/model/data/2024_11_29_10_05_58.png']) point_path = ''.join([os.getcwd(), '/Vision/model/data/2024_11_29_10_05_58.xyz']) self.img = cv2.imread(img_path) self.point = np.loadtxt(point_path).reshape((self.img.shape[0], self.img.shape[1], 3)) - pass + self.seg_distance_threshold = 10 # 1厘米 + # 加载openvino-seg + if self.use_openvino_model: + model_path = ''.join([os.getcwd(), '/Vision/model/openvino/one_bag.xml']) + device = 'CPU' + self.model_seg = yolov8_segment_openvino(model_path, device, conf_thres=0.6, iou_thres=0.6) + # 加载pt-seg + if self.use_seg_pt_model: + model_path = ''.join([os.getcwd(), '/Vision/model/pt/one_bag.pt']) + device = 'cpu' + self.model_seg = yolov8_segment() + self.model_seg.load_model(model_path, device) + # 加载pt-pose + if self.use_pose_model: + model_path = ''.join([os.getcwd(), '/Vision/model/pt/one_bag_pose.pt']) + device = 'cpu' + self.model_pose = yolov8_pose(model_path, device) - - def get_position(self, Point_isVision=False, Box_isPoint=True, First_Depth =True, Iter_Max_Pixel = 30, save_img_point=0, seg_distance_threshold = 10, Height_reduce = 50, width_reduce = 50): + def get_position(self, Use_Pose_Model_Pro=False, Point_isVision=False, Box_isPoint=True, First_Depth=True, + Iter_Max_Pixel=30, save_img_point=0, Height_reduce=80, width_reduce=60, Xmin=160, Xmax=1050, + Ymin=290, Ymax=780): """ 检测料袋相关信息 + :param Use_Pose_Model_Pro: True: 选用关键点推理 False : 选用分割模型推理 :param Point_isVision: 点云可视化 :param Box_isPoint: True 返回点云值; False 返回box相机坐标 :param First_Depth: True 返回料袋中心点深度最小的点云值; False 返回面积最大的料袋中心点云值 @@ -56,267 +99,636 @@ class Detection: :param save_img_point: 0不保存 ; 1保存原图 ;2保存处理后的图 ; 3保存点云和原图;4 保存点云和处理后的图; 5 异常数据保存(点云NAN) :param Height_reduce: 检测框的高内缩像素 :param width_reduce: 检测框的宽内缩像素 + :param Xmin: 限定料袋中心点的范围 + :param Xmax: 限定料袋中心点的范围 + :param Ymin: 限定料袋中心点的范围 + :param Ymax: 限定料袋中心点的范围 + + :return ret: bool 相机是否正常工作 + :return img: ndarray 返回img + :return xyz: list 目标中心点云值形如[x,y,z] + :return nx_ny_nz: list 拟合平面法向量,形如[a,b,c] + :return box_list: list 内缩检测框四顶点,形如[[x1,y1],[],[],[]] + + """ + ret = 1 + img = self.img + pm = self.point + if True: + if ret == 1: + if save_img_point != 0: + if get_disk_space(path=os.getcwd()) < 15: # 内存小于15G,停止保存数据 + save_img_point = 0 + print('系统内存不足,无法保存数据') + else: + save_path = ''.join([os.getcwd(), '/Vision/model/data/', + time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(time.time()))]) + save_img_name = ''.join([save_path, '.png']) + save_point_name = ''.join([save_path, '.xyz']) + if save_img_point == 5: + Abnormal_data_img = img.copy() + if save_img_point == 1 or save_img_point == 3: + cv2.imwrite(save_img_name, img) + if save_img_point == 3 or save_img_point == 4 or save_img_point == 5: + row_list = list(range(1, img.shape[0], 2)) + column_list = list(range(1, img.shape[1], 2)) + pm_save = pm.copy() + pm_save1 = np.delete(pm_save, row_list, axis=0) + point_new = np.delete(pm_save1, column_list, axis=1) + point_new = point_new.reshape(-1, 3) + if save_img_point == 5: + Abnormal_data_point = point_new.copy() + else: + np.savetxt(save_point_name, point_new) + + if self.use_pose_model and Use_Pose_Model_Pro: + real_model_pro_isPose = True + else: + real_model_pro_isPose = False + + if real_model_pro_isPose: + flag, det_cpu, category_names, score_list = self.model_pose.model_inference(img) + else: + if self.use_openvino_model == False: + flag, det_cpu, dst_img, masks, category_names = self.model_seg.model_inference(img, 0) + else: + flag, det_cpu, scores, masks, category_names = self.model_seg.segment_objects(img) + if flag == 1: + xyz = [] + nx_ny_nz = [] + RegionalArea = [] + Depth_Z = [] + uv = [] + seg_point = [] + box_list = [] + if Point_isVision == True: + pm2 = pm.copy() + pm2 = pm2.reshape(-1, 3) + pm2 = pm2[~np.isnan(pm2).all(axis=-1), :] + pm2[:, 2] = pm2[:, 2] + 0.25 + pcd2 = o3d.geometry.PointCloud() + pcd2.points = o3d.utility.Vector3dVector(pm2) + # o3d.visualization.draw_geometries([pcd2]) + + for i, item in enumerate(det_cpu): + + # 画box + if real_model_pro_isPose: + label = category_names[i] + score = score_list[i] + box_x1 = item[0][0] + box_y1 = item[0][1] + box_x2 = item[3][0] + box_y2 = item[3][1] + pass + else: + box_x1, box_y1, box_x2, box_y2 = item[0:4].astype(np.int32) + if self.use_openvino_model == False: + label = category_names[int(item[5])] + score = item[4] + else: + label = class_names[int(item[4])] + score = item[4] + rand_color = (0, 255, 255) + org = (int((box_x1 + box_x2) / 2), int((box_y1 + box_y2) / 2)) + x_center = int((box_x1 + box_x2) / 2) + y_center = int((box_y1 + box_y2) / 2) + text = '{}|{:.2f}'.format(label, score) + cv2.putText(img, text, org=org, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.8, + color=rand_color, + thickness=2) + # 画mask + # mask = masks[i].cpu().numpy().astype(int) + if real_model_pro_isPose: + # 创建一个与输入数组相同形状的掩码,初始值全为 0 + mask = np.zeros(pm.shape[:2], dtype=np.uint8) + # 将四点坐标转换为 numpy 数组 + if item[0][0] < item[1][0]: + arr = [[item[0][0], item[0][1]], + [item[1][0], item[1][1]], + [item[3][0], item[3][1]], + [item[2][0], item[2][1]]] + # new_points.reshape((-1, 1, 2)) + else: + arr = [[item[3][0], item[3][1]], + [item[2][0], item[2][1]], + [item[0][0], item[0][1]], + [item[1][0], item[1][1]]] + box = arr.copy() + box_outside = arr.copy() + box = shrink_quadrilateral(box, width_reduce) + pts = np.array(box, np.int32) + # 将四点构成的四边形区域在掩码上标记为 255 + cv2.fillPoly(mask, [pts], 255) + # 根据掩码提取对应区域的数据 + pm_seg = pm[mask == 255] + # box =[[[item[0][0]+width_reduce, item[0][1]+Height_reduce]], + # [[item[1][0]-width_reduce, item[1][1]+Height_reduce]], + # [[item[3][0]-width_reduce, item[3][1]-Height_reduce]], + # [[item[2][0]+width_reduce, item[2][1]-Height_reduce]]] + box = box.reshape((-1, 1, 2)) + # box = np.array(box) + # 内缩 + # box_outside = [[[item[0][0], item[0][1]]], + # [[item[1][0], item[1][1]]], + # [[item[3][0], item[3][1]]], + # [[item[2][0], item[2][1]]]] # 外框 + box_outside = np.array(box_outside) + box_outside = box_outside.reshape((-1, 1, 2)) + # box_outside = np.array(box_outside) + else: + if self.use_openvino_model == False: + mask = masks[i].cpu().data.numpy().astype(int) + else: + mask = masks[i].astype(int) + mask = mask[box_y1:box_y2, box_x1:box_x2] + + # mask = masks[i].numpy().astype(int) + h, w = box_y2 - box_y1, box_x2 - box_x1 + mask_colored = np.zeros((h, w, 3), dtype=np.uint8) + mask_colored[np.where(mask)] = rand_color + ################################## + imgray = cv2.cvtColor(mask_colored, cv2.COLOR_BGR2GRAY) + # cv2.imshow('mask',imgray) + # cv2.waitKey(1) + # 2、二进制图像 + ret, binary = cv2.threshold(imgray, 10, 255, 0) + # 阈值 二进制图像 + # cv2.imshow('bin',binary) + # cv2.waitKey(1) + contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) + # all_point_list = contours_in(contours) + # print(len(all_point_list)) + max_contour = None + max_perimeter = 0 + for contour in contours: # 排除小分割区域或干扰区域 + perimeter = cv2.arcLength(contour, True) + if perimeter > max_perimeter: + max_perimeter = perimeter + max_contour = contour + + ''' + 拟合最小外接矩形,计算矩形中心 + ''' + + rect = cv2.minAreaRect(max_contour) + if rect[1][0] - width_reduce > 30 and rect[1][1] - Height_reduce > 30: + rect_reduce = ( + (rect[0][0], rect[0][1]), (rect[1][0] - width_reduce, rect[1][1] - Height_reduce), + rect[2]) + else: + rect_reduce = ( + (rect[0][0], rect[0][1]), (rect[1][0], rect[1][1]), rect[2]) + + # cv2.boxPoints可以将轮廓点转换为四个角点坐标 + box_outside = cv2.boxPoints(rect) + # 这一步不影响后面的画图,但是可以保证四个角点坐标为顺时针 + startidx = box_outside.sum(axis=1).argmin() + box_outside = np.roll(box_outside, 4 - startidx, 0) + box_outside = np.intp(box_outside) + box_outside = box_outside.reshape((-1, 1, 2)).astype(np.int32) + + # cv2.boxPoints可以将轮廓点转换为四个角点坐标 + box_reduce = cv2.boxPoints(rect_reduce) + startidx = box_reduce.sum(axis=1).argmin() + box_reduce = np.roll(box_reduce, 4 - startidx, 0) + box_reduce = np.intp(box_reduce) + box_reduce = box_reduce.reshape((-1, 1, 2)).astype(np.int32) + box_outside = box_outside + [[[box_x1, box_y1]], [[box_x1, box_y1]], [[box_x1, box_y1]], + [[box_x1, box_y1]]] + box = box_reduce + [[[box_x1, box_y1]], [[box_x1, box_y1]], [[box_x1, box_y1]], + [[box_x1, box_y1]]] + + ''' + 提取区域范围内的(x, y) + ''' + mask_inside = np.zeros(binary.shape, np.uint8) + cv2.fillPoly(mask_inside, [box_reduce], (255)) + pixel_point2 = cv2.findNonZero(mask_inside) + # result = np.zeros_like(color_image) + select_point = [] + for i in range(pixel_point2.shape[0]): + select_point.append(pm[pixel_point2[i][0][1] + box_y1, pixel_point2[i][0][0] + box_x1]) + select_point = np.array(select_point) + pm_seg = select_point.reshape(-1, 3) + pm_seg = pm_seg[~np.isnan(pm_seg).all(axis=-1), :] # 剔除 nan + if pm_seg.size < 100: + print("分割点云数量较少,无法拟合平面") + continue + + # cv2.imshow('result', point_result) + ''' + 拟合平面,计算法向量 + ''' + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(pm_seg) + plane_model, inliers = pcd.segment_plane(distance_threshold=self.seg_distance_threshold, + ransac_n=5, + num_iterations=5000) + [a, b, c, d] = plane_model + # print(f"Plane equation: {a:.2f}x + {b:.2f}y + {c:.2f}z + {d:.2f} = 0") + # inlier_cloud = pcd.select_by_index(inliers) # 点云可视化 + # inlier_cloud.paint_uniform_color([1.0, 0, 0]) + # outlier_cloud = pcd.select_by_index(inliers, invert=True) + # outlier_cloud.paint_uniform_color([0, 1, 0]) + # o3d.visualization.draw_geometries([inlier_cloud, outlier_cloud]) + + box[0][0][1], box[0][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[0][0][1], + box[0][0][0]) + box[1][0][1], box[1][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[1][0][1], + box[1][0][0]) + box[2][0][1], box[2][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[2][0][1], + box[2][0][0]) + box[3][0][1], box[3][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[3][0][1], + box[3][0][0]) + if Box_isPoint == True: + box_point_x1, box_point_y1, box_point_z1 = remove_nan_mean_value(pm, box[0][0][1], + box[0][0][0], + iter_max=Iter_Max_Pixel) + box_point_x2, box_point_y2, box_point_z2 = remove_nan_mean_value(pm, box[1][0][1], + box[1][0][0], + iter_max=Iter_Max_Pixel) + box_point_x3, box_point_y3, box_point_z3 = remove_nan_mean_value(pm, box[2][0][1], + box[2][0][0], + iter_max=Iter_Max_Pixel) + box_point_x4, box_point_y4, box_point_z4 = remove_nan_mean_value(pm, box[3][0][1], + box[3][0][0], + iter_max=Iter_Max_Pixel) + else: + x1, y1, z1 = uv_to_XY(box[0][0][0], box[0][0][1]) + x2, y2, z2 = uv_to_XY(box[1][0][0], box[1][0][1]) + x3, y3, z3 = uv_to_XY(box[2][0][0], box[2][0][1]) + x4, y4, z4 = uv_to_XY(box[3][0][0], box[3][0][1]) + x_rotation_center = int((box[0][0][0] + box[1][0][0] + box[2][0][0] + box[3][0][0]) / 4) + y_rotation_center = int((box[0][0][1] + box[1][0][1] + box[2][0][1] + box[3][0][1]) / 4) + point_x, point_y, point_z = remove_nan_mean_value(pm, y_rotation_center, x_rotation_center, + iter_max=Iter_Max_Pixel) + if x_rotation_center < Xmin or x_rotation_center > Xmax or y_rotation_center < Ymin or y_rotation_center > Ymax: + continue + cv2.circle(img, (x_rotation_center, y_rotation_center), 2, (255, 255, 255), 3) # 标出中心点 + if np.isnan(point_x): # 点云值为无效值 + continue + else: + if Box_isPoint == True: + box_list.append( + [[box_point_x1, box_point_y1, box_point_z1], + [box_point_x2, box_point_y2, box_point_z2], + [box_point_x3, box_point_y3, box_point_z3], + [box_point_x4, box_point_y4, box_point_z4]]) + else: + box_list.append([[x1, y1, z1], + [x2, y2, z2], + [x3, y3, z3], + [x4, y4, z4], + ]) + if self.cameraType == 'RVC': + xyz.append([point_x * 1000, point_y * 1000, point_z * 1000]) + Depth_Z.append(point_z * 1000) + elif self.cameraType == 'Pe': + xyz.append([point_x, point_y, point_z]) + Depth_Z.append(point_z) + if real_model_pro_isPose: + RegionalArea.append(0) + else: + RegionalArea.append(cv2.contourArea(max_contour)) + nx_ny_nz.append([a, b, c]) + uv.append([x_rotation_center, y_rotation_center]) + seg_point.append(pm_seg) + cv2.polylines(img, [box], True, (0, 255, 0), 2) + cv2.polylines(img, [box_outside], True, (226, 12, 89), 2) + + _idx = find_position(Depth_Z, RegionalArea, 100, First_Depth) + + if _idx == None: + if save_img_point == 5: + cv2.imwrite(save_img_name, Abnormal_data_img) + np.savetxt(save_point_name, Abnormal_data_point) + return 1, img, None, None, None + else: + cv2.circle(img, (uv[_idx][0], uv[_idx][1]), 30, (0, 0, 255), 10) # 标出中心点 + + if Point_isVision == True: + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(seg_point[_idx]) + plane_model, inliers = pcd.segment_plane(distance_threshold=self.seg_distance_threshold, + ransac_n=5, + num_iterations=5000) + inlier_cloud = pcd.select_by_index(inliers) # 点云可视化 + inlier_cloud.paint_uniform_color([1.0, 0, 0]) + outlier_cloud = pcd.select_by_index(inliers, invert=True) + outlier_cloud.paint_uniform_color([0, 0, 1]) + o3d.visualization.draw_geometries([inlier_cloud, outlier_cloud, pcd2]) + if save_img_point == 2 or save_img_point == 4: + save_img = cv2.resize(img, (720, 540)) + cv2.imwrite(save_img_name, save_img) + return 1, img, xyz[_idx], nx_ny_nz[_idx], box_list[_idx] + else: + if save_img_point == 2 or save_img_point == 4: + save_img = cv2.resize(img, (720, 540)) + cv2.imwrite(save_img_name, save_img) + if save_img_point == 5: + cv2.imwrite(save_img_name, Abnormal_data_img) + np.savetxt(save_point_name, Abnormal_data_point) + return 1, img, None, None, None + + else: + print("RVC X Camera capture failed!") + return 0, None, None, None, None + + else: + print("RVC X Camera is not opened!") + return 0, None, None, None, None + + def get_position_and_depth(self, Point_isVision=False, Box_isPoint=True, First_Depth=True, + Target_pixel_threshold=200, Iter_Max_Pixel=30, save_img_point=0, Height_reduce=30, + width_reduce=30): + """ + 眼在手上,用于料袋顶层抓取,检测料袋相关信息 + :param Point_isVision: 点云可视化 + :param Box_isPoint: True 返回点云值; False 返回box相机坐标 + :param First_Depth: True 返回料袋中心点深度最小的点云值; False 返回面积最大的料袋中心点云值 + :param Target_pixel_threshold: [int] 设定像素阈值,判断是否可以抓取 + :param Iter_Max_Pixel: [int] 点云为NAN时,向该点周围寻找替代值,寻找最大区域(Iter_Max_Pixel×Iter_Max_Pixel) + :param save_img_point: 0不保存 ; 1保存原图 ;2保存处理后的图 ; 3保存点云和原图;4 保存点云和处理后的图; 5 异常数据保存(点云NAN) + :param Height_reduce: 检测框的高内缩像素 + :param width_reduce: 检测框的宽内缩像素 :return ret: bool 相机是否正常工作 :return img: ndarry 返回img :return xyz: list 目标中心点云值形如[x,y,z] :return nx_ny_nz: list 拟合平面法向量,形如[a,b,c] :return box_list: list 内缩检测框四顶点,形如[[x1,y1],[],[],[]] + """ - ret = 1 - img = self.img - pm = self.point - if ret == 1: - if save_img_point != 0: - if get_disk_space(path=os.getcwd())<15: # 内存小于15G,停止保存数据 - save_img_point = 0 - print('系统内存不足,无法保存数据') - else: - save_path = ''.join([os.getcwd(), '/Vision/model/data/', - time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(time.time()))]) - save_img_name = ''.join([save_path, '.png']) - save_point_name = ''.join([save_path, '.xyz']) - if save_img_point == 5: - Abnormal_data_img = img.copy() - if save_img_point == 1 or save_img_point == 3: - cv2.imwrite(save_img_name, img) - if save_img_point == 3 or save_img_point == 4 or save_img_point == 5: - row_list = list(range(1, img.shape[0], 2)) - column_list = list(range(1, img.shape[1], 2)) - pm_save = pm.copy() - pm_save1 = np.delete(pm_save, row_list, axis=0) - point_new = np.delete(pm_save1, column_list, axis=1) - point_new = point_new.reshape(-1, 3) + ret, img, pm = self.camera_rvc.get_img_and_point_map() # 拍照,获取图像及 + if self.camera_rvc.caminit_isok == True: + if ret == 1: + if save_img_point != 0: + if get_disk_space(path=os.getcwd()) < 15: # 内存小于15G,停止保存数据 + save_img_point = 0 + print('系统内存不足,无法保存数据') + else: + save_path = ''.join([os.getcwd(), '/Vision/model/data/', + time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(time.time()))]) + save_img_name = ''.join([save_path, '.png']) + save_point_name = ''.join([save_path, '.xyz']) if save_img_point == 5: - Abnormal_data_point = point_new.copy() + Abnormal_data_img = img.copy() + if save_img_point == 1 or save_img_point == 3: + cv2.imwrite(save_img_name, img) + if save_img_point == 3 or save_img_point == 4 or save_img_point == 5: + row_list = list(range(1, img.shape[0], 2)) + column_list = list(range(1, img.shape[1], 2)) + pm_save = pm.copy() + pm_save1 = np.delete(pm_save, row_list, axis=0) + point_new = np.delete(pm_save1, column_list, axis=1) + point_new = point_new.reshape(-1, 3) + if save_img_point == 5: + Abnormal_data_point = point_new.copy() + else: + np.savetxt(save_point_name, point_new) + if self.use_openvino_model == False: + flag, det_cpu, dst_img, masks, category_names = self.model.model_inference(img, 0) + else: + flag, det_cpu, scores, masks, category_names = self.model.segment_objects(img) + if flag == 1: + xyz = [] + nx_ny_nz = [] + RegionalArea = [] + Depth_Z = [] + uv = [] + seg_point = [] + box_list = [] + target_box_area = 0 + if Point_isVision == True: + pm2 = pm.copy() + pm2 = pm2.reshape(-1, 3) + pm2 = pm2[~np.isnan(pm2).all(axis=-1), :] + pm2[:, 2] = pm2[:, 2] + 0.25 + pcd2 = o3d.geometry.PointCloud() + pcd2.points = o3d.utility.Vector3dVector(pm2) + # o3d.visualization.draw_geometries([pcd2]) + + for i, item in enumerate(det_cpu): + target_box_area = 0 + # 画box + box_x1, box_y1, box_x2, box_y2 = item[0:4].astype(np.int32) + if self.use_openvino_model == False: + label = category_names[int(item[5])] else: - np.savetxt(save_point_name, point_new) + label = class_names[int(item[4])] + rand_color = (0, 255, 255) + score = item[4] + org = (int((box_x1 + box_x2) / 2), int((box_y1 + box_y2) / 2)) + x_center = int((box_x1 + box_x2) / 2) + y_center = int((box_y1 + box_y2) / 2) + text = '{}|{:.2f}'.format(label, score) + cv2.putText(img, text, org=org, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.8, + color=rand_color, + thickness=2) + # 画mask + # mask = masks[i].cpu().numpy().astype(int) + if self.use_openvino_model == False: + mask = masks[i].cpu().data.numpy().astype(int) + else: + mask = masks[i].astype(int) + mask = mask[box_y1:box_y2, box_x1:box_x2] + # mask = masks[i].numpy().astype(int) + h, w = box_y2 - box_y1, box_x2 - box_x1 + mask_colored = np.zeros((h, w, 3), dtype=np.uint8) + mask_colored[np.where(mask)] = rand_color + ################################## + imgray = cv2.cvtColor(mask_colored, cv2.COLOR_BGR2GRAY) + # cv2.imshow('mask',imgray) + # cv2.waitKey(1) + # 2、二进制图像 + ret, binary = cv2.threshold(imgray, 10, 255, 0) + # 阈值 二进制图像 + # cv2.imshow('bin',binary) + # cv2.waitKey(1) + contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) + # all_point_list = contours_in(contours) + # print(len(all_point_list)) + max_contour = None + max_perimeter = 0 + for contour in contours: # 排除小分割区域或干扰区域 + perimeter = cv2.arcLength(contour, True) + if perimeter > max_perimeter: + max_perimeter = perimeter + max_contour = contour - if self.use_openvino_model == False: - flag, det_cpu, dst_img, masks, category_names = self.model.model_inference(img, 0) - else: - flag, det_cpu, scores, masks, category_names = self.model.segment_objects(img) - if flag == 1: - xyz = [] - nx_ny_nz = [] - RegionalArea = [] - Depth_Z = [] - uv = [] - seg_point = [] - box_list = [] + ''' + 拟合最小外接矩形,计算矩形中心 + ''' - if Point_isVision == True: - pm2 = pm.copy() - pm2 = pm2.reshape(-1, 3) - pm2 = pm2[~np.isnan(pm2).all(axis=-1), :] - pm2[:, 2] = pm2[:, 2] + 0.25 - pcd2 = o3d.geometry.PointCloud() - pcd2.points = o3d.utility.Vector3dVector(pm2) - # o3d.visualization.draw_geometries([pcd2]) + rect = cv2.minAreaRect(max_contour) + if rect[1][0] - width_reduce > 30 and rect[1][1] - Height_reduce > 30: + rect_reduce = ( + (rect[0][0], rect[0][1]), (rect[1][0] - width_reduce, rect[1][1] - Height_reduce), + rect[2]) + else: + rect_reduce = ( + (rect[0][0], rect[0][1]), (rect[1][0], rect[1][1]), rect[2]) + target_box_area = rect[1][0] * rect[1][1] + # cv2.boxPoints可以将轮廓点转换为四个角点坐标 + box_outside = cv2.boxPoints(rect) + # 这一步不影响后面的画图,但是可以保证四个角点坐标为顺时针 + startidx = box_outside.sum(axis=1).argmin() + box_outside = np.roll(box_outside, 4 - startidx, 0) + box_outside = np.intp(box_outside) + box_outside = box_outside.reshape((-1, 1, 2)).astype(np.int32) - for i, item in enumerate(det_cpu): + # cv2.boxPoints可以将轮廓点转换为四个角点坐标 + box_reduce = cv2.boxPoints(rect_reduce) + startidx = box_reduce.sum(axis=1).argmin() + box_reduce = np.roll(box_reduce, 4 - startidx, 0) + box_reduce = np.intp(box_reduce) + box_reduce = box_reduce.reshape((-1, 1, 2)).astype(np.int32) - # 画box - box_x1, box_y1, box_x2, box_y2 = item[0:4].astype(np.int32) - if self.use_openvino_model == False: - label = category_names[int(item[5])] - else: - label = class_names[int(item[4])] - rand_color = (0, 255, 255) - score = item[4] - org = (int((box_x1 + box_x2) / 2), int((box_y1 + box_y2) / 2)) - x_center = int((box_x1 + box_x2) / 2) - y_center = int((box_y1 + box_y2) / 2) - text = '{}|{:.2f}'.format(label, score) - cv2.putText(img, text, org=org, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.8, - color=rand_color, - thickness=2) - # 画mask - # mask = masks[i].cpu().numpy().astype(int) - if self.use_openvino_model == False: - mask = masks[i].cpu().data.numpy().astype(int) - else: - mask = masks[i].astype(int) - mask = mask[box_y1:box_y2, box_x1:box_x2] + ''' + 提取区域范围内的(x, y) + ''' + mask_inside = np.zeros(binary.shape, np.uint8) + cv2.fillPoly(mask_inside, [box_reduce], (255)) + pixel_point2 = cv2.findNonZero(mask_inside) + # result = np.zeros_like(color_image) + select_point = [] + for i in range(pixel_point2.shape[0]): + select_point.append(pm[pixel_point2[i][0][1] + box_y1, pixel_point2[i][0][0] + box_x1]) + select_point = np.array(select_point) + pm_seg = select_point.reshape(-1, 3) + pm_seg = pm_seg[~np.isnan(pm_seg).all(axis=-1), :] # 剔除 nan + if pm_seg.size < 100: + print("分割点云数量较少,无法拟合平面") + continue - # mask = masks[i].numpy().astype(int) - h, w = box_y2 - box_y1, box_x2 - box_x1 - mask_colored = np.zeros((h, w, 3), dtype=np.uint8) - mask_colored[np.where(mask)] = rand_color - ################################## - imgray = cv2.cvtColor(mask_colored, cv2.COLOR_BGR2GRAY) - # cv2.imshow('mask',imgray) - # cv2.waitKey(1) - # 2、二进制图像 - ret, binary = cv2.threshold(imgray, 10, 255, 0) - # 阈值 二进制图像 - # cv2.imshow('bin',binary) - # cv2.waitKey(0) - contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) - # all_point_list = contours_in(contours) - # print(len(all_point_list)) - max_contour = None - max_perimeter = 0 - for contour in contours: # 排除小分割区域或干扰区域 - perimeter = cv2.arcLength(contour, True) - if perimeter > max_perimeter: - max_perimeter = perimeter - max_contour = contour - ''' - 拟合最小外接矩形,计算矩形中心 - ''' + # cv2.imshow('result', point_result) + ''' + 拟合平面,计算法向量 + ''' + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(pm_seg) + plane_model, inliers = pcd.segment_plane(distance_threshold=self.seg_distance_threshold, + ransac_n=5, + num_iterations=5000) + [a, b, c, d] = plane_model + # print(f"Plane equation: {a:.2f}x + {b:.2f}y + {c:.2f}z + {d:.2f} = 0") + # inlier_cloud = pcd.select_by_index(inliers) # 点云可视化 + # inlier_cloud.paint_uniform_color([1.0, 0, 0]) + # outlier_cloud = pcd.select_by_index(inliers, invert=True) + # outlier_cloud.paint_uniform_color([0, 1, 0]) + # o3d.visualization.draw_geometries([inlier_cloud, outlier_cloud]) - rect = cv2.minAreaRect(max_contour) - if rect[1][0] - width_reduce > 30 and rect[1][1] - Height_reduce > 30: - rect_reduce = ( - (rect[0][0], rect[0][1]), (rect[1][0] - width_reduce, rect[1][1] - Height_reduce), rect[2]) - else: - rect_reduce = ( - (rect[0][0], rect[0][1]), (rect[1][0], rect[1][1]), rect[2]) - # cv2.boxPoints可以将轮廓点转换为四个角点坐标 - box_outside = cv2.boxPoints(rect) - # 这一步不影响后面的画图,但是可以保证四个角点坐标为顺时针 - startidx = box_outside.sum(axis=1).argmin() - box_outside = np.roll(box_outside, 4 - startidx, 0) - box_outside = np.intp(box_outside) - box_outside = box_outside.reshape((-1, 1, 2)).astype(np.int32) + box_outside = box_outside + [[[box_x1, box_y1]], [[box_x1, box_y1]], [[box_x1, box_y1]], + [[box_x1, box_y1]]] + box = box_reduce + [[[box_x1, box_y1]], [[box_x1, box_y1]], [[box_x1, box_y1]], + [[box_x1, box_y1]]] - # cv2.boxPoints可以将轮廓点转换为四个角点坐标 - box_reduce = cv2.boxPoints(rect_reduce) - startidx = box_reduce.sum(axis=1).argmin() - box_reduce = np.roll(box_reduce, 4 - startidx, 0) - box_reduce = np.intp(box_reduce) - box_reduce = box_reduce.reshape((-1, 1, 2)).astype(np.int32) - - ''' - 提取区域范围内的(x, y) - ''' - mask_inside = np.zeros(binary.shape, np.uint8) - cv2.fillPoly(mask_inside, [box_reduce], (255)) - # cv2.drawContours(mask_inside, [max_contour], 0, 255, -1) - # cv2.imshow('mask_inside', mask_inside) - # cv2.waitKey(0) - pixel_point2 = cv2.findNonZero(mask_inside) - # result = np.zeros_like(color_image) - select_point = [] - for i in range(pixel_point2.shape[0]): - select_point.append(pm[pixel_point2[i][0][1] + box_y1, pixel_point2[i][0][0] + box_x1]) - select_point = np.array(select_point) - pm_seg = select_point.reshape(-1, 3) - pm_seg = pm_seg[~np.isnan(pm_seg).all(axis=-1), :] # 剔除 nan - if pm_seg.size < 100: - print("分割点云数量较少,无法拟合平面") - continue - # cv2.imshow('result', point_result) - ''' - 拟合平面,计算法向量 - ''' - pcd = o3d.geometry.PointCloud() - pcd.points = o3d.utility.Vector3dVector(pm_seg) - plane_model, inliers = pcd.segment_plane(distance_threshold=seg_distance_threshold, - ransac_n=5, - num_iterations=5000) - [a, b, c, d] = plane_model - - # print(f"Plane equation: {a:.2f}x + {b:.2f}y + {c:.2f}z + {d:.2f} = 0") - # inlier_cloud = pcd.select_by_index(inliers) # 点云可视化 - # inlier_cloud.paint_uniform_color([1.0, 0, 0]) - # outlier_cloud = pcd.select_by_index(inliers, invert=True) - # outlier_cloud.paint_uniform_color([0, 1, 0]) - # o3d.visualization.draw_geometries([inlier_cloud, outlier_cloud]) - - box = box_reduce + [[[box_x1, box_y1]], [[box_x1, box_y1]], [[box_x1, box_y1]], [[box_x1, box_y1]]] - box_outside = box_outside + [[[box_x1, box_y1]], [[box_x1, box_y1]], [[box_x1, box_y1]], [[box_x1, box_y1]]] - - box[0][0][1], box[0][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[0][0][1], box[0][0][0]) - box[1][0][1], box[1][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[1][0][1], box[1][0][0]) - box[2][0][1], box[2][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[2][0][1], box[2][0][0]) - box[3][0][1], box[3][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[3][0][1], box[3][0][0]) - if Box_isPoint == True: - box_point_x1, box_point_y1, box_point_z1 = remove_nan_mean_value(pm, box[0][0][1], box[0][0][0], iter_max=First_Depth) - box_point_x2, box_point_y2, box_point_z2 = remove_nan_mean_value(pm, box[1][0][1], box[1][0][0], iter_max=First_Depth) - box_point_x3, box_point_y3, box_point_z3 = remove_nan_mean_value(pm, box[2][0][1], box[2][0][0], iter_max=First_Depth) - box_point_x4, box_point_y4, box_point_z4 = remove_nan_mean_value(pm, box[3][0][1], box[3][0][0], iter_max=First_Depth) - else: - x1, y1, z1 = uv_to_XY(self.cameraType, box[0][0][0], box[0][0][1]) - x2, y2, z2 = uv_to_XY(self.cameraType, box[1][0][0], box[1][0][1]) - x3, y3, z3 = uv_to_XY(self.cameraType, box[2][0][0], box[2][0][1]) - x4, y4, z4 = uv_to_XY(self.cameraType, box[3][0][0], box[3][0][1]) - x_rotation_center = int((box[0][0][0] + box[1][0][0] + box[2][0][0] + box[3][0][0]) / 4) - y_rotation_center = int((box[0][0][1] + box[1][0][1] + box[2][0][1] + box[3][0][1]) / 4) - point_x, point_y, point_z = remove_nan_mean_value(pm, y_rotation_center, x_rotation_center, iter_max=First_Depth) - cv2.circle(img, (x_rotation_center, y_rotation_center), 4, (255, 255, 255), 5) # 标出中心点 - if np.isnan(point_x): # 点云值为无效值 - continue - else: + box[0][0][1], box[0][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[0][0][1], + box[0][0][0]) + box[1][0][1], box[1][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[1][0][1], + box[1][0][0]) + box[2][0][1], box[2][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[2][0][1], + box[2][0][0]) + box[3][0][1], box[3][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[3][0][1], + box[3][0][0]) if Box_isPoint == True: - box_list.append( - [[box_point_x1, box_point_y1, box_point_z1], - [box_point_x2, box_point_y2, box_point_z2], - [box_point_x3, box_point_y3, box_point_z3], - [box_point_x4, box_point_y4, box_point_z4]]) + box_point_x1, box_point_y1, box_point_z1 = remove_nan_mean_value(pm, box[0][0][1], + box[0][0][0], + iter_max=Iter_Max_Pixel) + box_point_x2, box_point_y2, box_point_z2 = remove_nan_mean_value(pm, box[1][0][1], + box[1][0][0], + iter_max=Iter_Max_Pixel) + box_point_x3, box_point_y3, box_point_z3 = remove_nan_mean_value(pm, box[2][0][1], + box[2][0][0], + iter_max=Iter_Max_Pixel) + box_point_x4, box_point_y4, box_point_z4 = remove_nan_mean_value(pm, box[3][0][1], + box[3][0][0], + iter_max=Iter_Max_Pixel) else: - box_list.append([[x1, y1, z1], - [x2, y2, z2], - [x3, y3, z3], - [x4, y4, z4], - ]) - if self.cameraType == 'RVC': - xyz.append([point_x * 1000, point_y * 1000, point_z * 1000]) - Depth_Z.append(point_z * 1000) - elif self.cameraType == 'Pe': - xyz.append([point_x, point_y, point_z]) - Depth_Z.append(point_z) - #Depth_Z.append(point_z * 1000) - nx_ny_nz.append([a, b, c]) - RegionalArea.append(cv2.contourArea(max_contour)) - uv.append([x_rotation_center, y_rotation_center]) - seg_point.append(pm_seg) - cv2.polylines(img, [box], True, (0, 255, 0), 2) - cv2.polylines(img, [box_outside], True, (226, 12, 89), 2) + x1, y1, z1 = uv_to_XY(box[0][0][0], box[0][0][1]) + x2, y2, z2 = uv_to_XY(box[1][0][0], box[1][0][1]) + x3, y3, z3 = uv_to_XY(box[2][0][0], box[2][0][1]) + x4, y4, z4 = uv_to_XY(box[3][0][0], box[3][0][1]) + x_rotation_center = int((box[0][0][0] + box[1][0][0] + box[2][0][0] + box[3][0][0]) / 4) + y_rotation_center = int((box[0][0][1] + box[1][0][1] + box[2][0][1] + box[3][0][1]) / 4) + point_x, point_y, point_z = remove_nan_mean_value(pm, y_rotation_center, x_rotation_center, + iter_max=Iter_Max_Pixel) + cv2.circle(img, (x_rotation_center, y_rotation_center), 4, (255, 255, 255), 5) # 标出中心点 + if np.isnan(point_x): # 点云值为无效值 + continue + else: + if Box_isPoint == True: + box_list.append( + [[box_point_x1, box_point_y1, box_point_z1], + [box_point_x2, box_point_y2, box_point_z2], + [box_point_x3, box_point_y3, box_point_z3], + [box_point_x4, box_point_y4, box_point_z4]]) + else: + box_list.append([[x1, y1, z1], + [x2, y2, z2], + [x3, y3, z3], + [x4, y4, z4], + ]) + if target_box_area > img.shape[0] * img.shape[1] * (2 / 3): # Target_pixel_threshold + if self.cameraType == 'RVC': + xyz.append([point_x * 1000, point_y * 1000, point_z * 1000]) + Depth_Z.append(point_z * 1000) + elif self.cameraType == 'Pe': + xyz.append([point_x, point_y, point_z]) + Depth_Z.append(point_z) + nx_ny_nz.append([a, b, c]) + RegionalArea.append(cv2.contourArea(max_contour)) + uv.append([x_rotation_center, y_rotation_center]) + seg_point.append(pm_seg) + cv2.polylines(img, [box], True, (0, 255, 0), 2) + cv2.polylines(img, [box_outside], True, (226, 12, 89), 2) - _idx = find_position(Depth_Z, RegionalArea, 100, first_depth = Iter_Max_Pixel) + _idx = find_position(Depth_Z, RegionalArea, 100, First_Depth) - if _idx == None: + if _idx == None: + if save_img_point == 5: + cv2.imwrite(save_img_name, Abnormal_data_img) + np.savetxt(save_point_name, Abnormal_data_point) + return 1, img, None, None, None + else: + cv2.circle(img, (uv[_idx][0], uv[_idx][1]), 30, (0, 0, 255), 20) # 标出中心点 + + if Point_isVision == True: + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(seg_point[_idx]) + plane_model, inliers = pcd.segment_plane(distance_threshold=self.seg_distance_threshold, + ransac_n=5, + num_iterations=5000) + inlier_cloud = pcd.select_by_index(inliers) # 点云可视化 + inlier_cloud.paint_uniform_color([1.0, 0, 0]) + outlier_cloud = pcd.select_by_index(inliers, invert=True) + outlier_cloud.paint_uniform_color([0, 0, 1]) + o3d.visualization.draw_geometries([inlier_cloud, outlier_cloud, pcd2]) + if save_img_point == 2 or save_img_point == 4: + save_img = cv2.resize(img, (720, 540)) + cv2.imwrite(save_img_name, save_img) + return 1, img, xyz[_idx], nx_ny_nz[_idx], box_list[_idx] + else: + if save_img_point == 2 or save_img_point == 4: + save_img = cv2.resize(img, (720, 540)) + cv2.imwrite(save_img_name, save_img) if save_img_point == 5: cv2.imwrite(save_img_name, Abnormal_data_img) np.savetxt(save_point_name, Abnormal_data_point) return 1, img, None, None, None - else: - cv2.circle(img, (uv[_idx][0], uv[_idx][1]), 30, (0, 0, 255), 20) # 标出中心点 - if Point_isVision == True: - pcd = o3d.geometry.PointCloud() - pcd.points = o3d.utility.Vector3dVector(seg_point[_idx]) - plane_model, inliers = pcd.segment_plane(distance_threshold=seg_distance_threshold, - ransac_n=5, - num_iterations=5000) - inlier_cloud = pcd.select_by_index(inliers) # 点云可视化 - inlier_cloud.paint_uniform_color([1.0, 0, 0]) - outlier_cloud = pcd.select_by_index(inliers, invert=True) - outlier_cloud.paint_uniform_color([0, 1, 0]) - o3d.visualization.draw_geometries([inlier_cloud, outlier_cloud, pcd2]) - if save_img_point == 2 or save_img_point == 4: - save_img = cv2.resize(img,(720, 540)) - cv2.imwrite(save_img_name, save_img) - - return 1, img, xyz[_idx], nx_ny_nz[_idx], box_list[_idx] else: - if save_img_point == 2 or save_img_point == 4: - save_img = cv2.resize(img, (720, 540)) - cv2.imwrite(save_img_name, save_img) - if save_img_point == 5: - cv2.imwrite(save_img_name, Abnormal_data_img) - np.savetxt(save_point_name, Abnormal_data_point) - return 1, img, None, None, None + print("RVC X Camera capture failed!") + return 0, None, None, None, None else: - print("RVC X Camera capture failed!") + print("RVC X Camera is not opened!") return 0, None, None, None, None def get_take_photo_position(self, Height_reduce=30, width_reduce=30): """ - 专用于拍照位置点查找,检测当前拍照点能否检测到料袋 + 检测当前拍照点能否检测到料袋 :param Height_reduce: :param width_reduce: :return ret: bool 相机是否正常工作 @@ -325,135 +737,167 @@ class Detection: :return xyz: list 目标中心点云值,形如[x,y,z] """ - ret = 1 - img = self.img - pm = self.point + ret, img, pm = self.camera_rvc.get_img_and_point_map() # 拍照,获取图像及 find_target = False - - if ret == 1: - if self.use_openvino_model == False: - flag, det_cpu, dst_img, masks, category_names = self.model.model_inference(img, 0) - else: - flag, det_cpu, scores, masks, category_names = self.model.segment_objects(img) - if flag == 1: - xyz = [] - RegionalArea = [] - Depth_Z = [] - uv = [] - for i, item in enumerate(det_cpu): - find_target = True - # 画box - box_x1, box_y1, box_x2, box_y2 = item[0:4].astype(np.int32) - if self.use_openvino_model == False: - label = category_names[int(item[5])] - else: - label = class_names[int(item[4])] - rand_color = (0, 255, 255) - score = item[4] - org = (int((box_x1 + box_x2) / 2), int((box_y1 + box_y2) / 2)) - x_center = int((box_x1 + box_x2) / 2) - y_center = int((box_y1 + box_y2) / 2) - text = '{}|{:.2f}'.format(label, score) - cv2.putText(img, text, org=org, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.8, - color=rand_color, - thickness=2) - # 画mask - # mask = masks[i].cpu().numpy().astype(int) - if self.use_openvino_model == False: - mask = masks[i].cpu().data.numpy().astype(int) - else: - mask = masks[i].astype(int) - mask = mask[box_y1:box_y2, box_x1:box_x2] - - # mask = masks[i].numpy().astype(int) - h, w = box_y2 - box_y1, box_x2 - box_x1 - mask_colored = np.zeros((h, w, 3), dtype=np.uint8) - mask_colored[np.where(mask)] = rand_color - ################################## - imgray = cv2.cvtColor(mask_colored, cv2.COLOR_BGR2GRAY) - # cv2.imshow('mask',imgray) - # cv2.waitKey(1) - # 2、二进制图像 - ret, binary = cv2.threshold(imgray, 10, 255, 0) - # 阈值 二进制图像 - # cv2.imshow('bin',binary) - # cv2.waitKey(1) - contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) - # all_point_list = contours_in(contours) - # print(len(all_point_list)) - max_contour = None - max_perimeter = 0 - for contour in contours: # 排除小分割区域或干扰区域 - perimeter = cv2.arcLength(contour, True) - if perimeter > max_perimeter: - max_perimeter = perimeter - max_contour = contour - - ''' - 拟合最小外接矩形,计算矩形中心 - ''' - - rect = cv2.minAreaRect(max_contour) - if rect[1][0] - width_reduce < 30 or rect[1][1] - Height_reduce < 30: - rect_reduce = ( - (rect[0][0], rect[0][1]), (rect[1][0] - width_reduce, rect[1][1] - Height_reduce), rect[2]) - else: - rect_reduce = ( - (rect[0][0], rect[0][1]), (rect[1][0], rect[1][1]), rect[2]) - # cv2.boxPoints可以将轮廓点转换为四个角点坐标 - box_outside = cv2.boxPoints(rect) - # 这一步不影响后面的画图,但是可以保证四个角点坐标为顺时针 - startidx = box_outside.sum(axis=1).argmin() - box_outside = np.roll(box_outside, 4 - startidx, 0) - box_outside = np.intp(box_outside) - box_outside = box_outside.reshape((-1, 1, 2)).astype(np.int32) - - # cv2.boxPoints可以将轮廓点转换为四个角点坐标 - box_reduce = cv2.boxPoints(rect_reduce) - startidx = box_reduce.sum(axis=1).argmin() - box_reduce = np.roll(box_reduce, 4 - startidx, 0) - box_reduce = np.intp(box_reduce) - box_reduce = box_reduce.reshape((-1, 1, 2)).astype(np.int32) - - box_outside = box_outside + [[[box_x1, box_y1]], [[box_x1, box_y1]], [[box_x1, box_y1]],[[box_x1, box_y1]]] - box = box_reduce + [[[box_x1, box_y1]], [[box_x1, box_y1]], [[box_x1, box_y1]], [[box_x1, box_y1]]] - - box[0][0][1], box[0][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[0][0][1], box[0][0][0]) - box[1][0][1], box[1][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[1][0][1], box[1][0][0]) - box[2][0][1], box[2][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[2][0][1], box[2][0][0]) - box[3][0][1], box[3][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[3][0][1], box[3][0][0]) - - x_rotation_center = int((box[0][0][0] + box[1][0][0] + box[2][0][0] + box[3][0][0]) / 4) - y_rotation_center = int((box[0][0][1] + box[1][0][1] + box[2][0][1] + box[3][0][1]) / 4) - point_x, point_y, point_z = remove_nan_mean_value(pm, y_rotation_center, x_rotation_center) - cv2.circle(img, (x_rotation_center, y_rotation_center), 4, (255, 255, 255), 5) # 标出中心点 - if np.isnan(point_x): # 点云值为无效值 - continue - else: - if self.cameraType == 'RVC': - xyz.append([point_x * 1000, point_y * 1000, point_z * 1000]) - Depth_Z.append(point_z * 1000) - elif self.cameraType == 'Pe': - xyz.append([point_x, point_y, point_z]) - Depth_Z.append(point_z) - RegionalArea.append(cv2.contourArea(max_contour)) - uv.append([x_rotation_center, y_rotation_center]) - - cv2.polylines(img, [box], True, (0, 255, 0), 2) - cv2.polylines(img, [box_outside], True, (226, 12, 89), 2) - - _idx = find_position(Depth_Z, RegionalArea, 100,True) - - if _idx == None: - return 1, img, find_target, None + if self.camera_rvc.caminit_isok == True: + if ret == 1: + if self.use_openvino_model == False: + flag, det_cpu, dst_img, masks, category_names = self.model.model_inference(img, 0) else: - cv2.circle(img, (uv[_idx][0], uv[_idx][1]), 30, (0, 0, 255), 20) # 标出中心点 - return 1, img, find_target, xyz[_idx] + flag, det_cpu, scores, masks, category_names = self.model.segment_objects(img) + if flag == 1: + xyz = [] + RegionalArea = [] + Depth_Z = [] + uv = [] + for i, item in enumerate(det_cpu): + find_target = True + # 画box + box_x1, box_y1, box_x2, box_y2 = item[0:4].astype(np.int32) + if self.use_openvino_model == False: + label = category_names[int(item[5])] + else: + label = class_names[int(item[4])] + rand_color = (0, 255, 255) + score = item[4] + org = (int((box_x1 + box_x2) / 2), int((box_y1 + box_y2) / 2)) + x_center = int((box_x1 + box_x2) / 2) + y_center = int((box_y1 + box_y2) / 2) + text = '{}|{:.2f}'.format(label, score) + cv2.putText(img, text, org=org, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.8, + color=rand_color, + thickness=2) + # 画mask + # mask = masks[i].cpu().numpy().astype(int) + if self.use_openvino_model == False: + mask = masks[i].cpu().data.numpy().astype(int) + else: + mask = masks[i].astype(int) + mask = mask[box_y1:box_y2, box_x1:box_x2] + + # mask = masks[i].numpy().astype(int) + h, w = box_y2 - box_y1, box_x2 - box_x1 + mask_colored = np.zeros((h, w, 3), dtype=np.uint8) + mask_colored[np.where(mask)] = rand_color + ################################## + imgray = cv2.cvtColor(mask_colored, cv2.COLOR_BGR2GRAY) + # cv2.imshow('mask',imgray) + # cv2.waitKey(1) + # 2、二进制图像 + ret, binary = cv2.threshold(imgray, 10, 255, 0) + # 阈值 二进制图像 + # cv2.imshow('bin',binary) + # cv2.waitKey(1) + contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) + # all_point_list = contours_in(contours) + # print(len(all_point_list)) + max_contour = None + max_perimeter = 0 + for contour in contours: # 排除小分割区域或干扰区域 + perimeter = cv2.arcLength(contour, True) + if perimeter > max_perimeter: + max_perimeter = perimeter + max_contour = contour + + ''' + 拟合最小外接矩形,计算矩形中心 + ''' + + rect = cv2.minAreaRect(max_contour) + if rect[1][0] - width_reduce < 30 or rect[1][1] - Height_reduce < 30: + rect_reduce = ( + (rect[0][0], rect[0][1]), (rect[1][0] - width_reduce, rect[1][1] - Height_reduce), + rect[2]) + else: + rect_reduce = ( + (rect[0][0], rect[0][1]), (rect[1][0], rect[1][1]), rect[2]) + # cv2.boxPoints可以将轮廓点转换为四个角点坐标 + box_outside = cv2.boxPoints(rect) + # 这一步不影响后面的画图,但是可以保证四个角点坐标为顺时针 + startidx = box_outside.sum(axis=1).argmin() + box_outside = np.roll(box_outside, 4 - startidx, 0) + box_outside = np.intp(box_outside) + box_outside = box_outside.reshape((-1, 1, 2)).astype(np.int32) + + # cv2.boxPoints可以将轮廓点转换为四个角点坐标 + box_reduce = cv2.boxPoints(rect_reduce) + startidx = box_reduce.sum(axis=1).argmin() + box_reduce = np.roll(box_reduce, 4 - startidx, 0) + box_reduce = np.intp(box_reduce) + box_reduce = box_reduce.reshape((-1, 1, 2)).astype(np.int32) + + box_outside = box_outside + [[[box_x1, box_y1]], [[box_x1, box_y1]], [[box_x1, box_y1]], + [[box_x1, box_y1]]] + box = box_reduce + [[[box_x1, box_y1]], [[box_x1, box_y1]], [[box_x1, box_y1]], + [[box_x1, box_y1]]] + + box[0][0][1], box[0][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[0][0][1], + box[0][0][0]) + box[1][0][1], box[1][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[1][0][1], + box[1][0][0]) + box[2][0][1], box[2][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[2][0][1], + box[2][0][0]) + box[3][0][1], box[3][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[3][0][1], + box[3][0][0]) + + x_rotation_center = int((box[0][0][0] + box[1][0][0] + box[2][0][0] + box[3][0][0]) / 4) + y_rotation_center = int((box[0][0][1] + box[1][0][1] + box[2][0][1] + box[3][0][1]) / 4) + point_x, point_y, point_z = remove_nan_mean_value(pm, y_rotation_center, x_rotation_center) + cv2.circle(img, (x_rotation_center, y_rotation_center), 4, (255, 255, 255), 5) # 标出中心点 + if np.isnan(point_x): # 点云值为无效值 + continue + else: + if self.cameraType == 'RVC': + xyz.append([point_x * 1000, point_y * 1000, point_z * 1000]) + Depth_Z.append(point_z * 1000) + elif self.cameraType == 'Pe': + xyz.append([point_x, point_y, point_z]) + Depth_Z.append(point_z) + RegionalArea.append(cv2.contourArea(max_contour)) + uv.append([x_rotation_center, y_rotation_center]) + + cv2.polylines(img, [box], True, (0, 255, 0), 2) + cv2.polylines(img, [box_outside], True, (226, 12, 89), 2) + + _idx = find_position(Depth_Z, RegionalArea, 100, True) + + if _idx == None: + return 1, img, find_target, None + else: + cv2.circle(img, (uv[_idx][0], uv[_idx][1]), 30, (0, 0, 255), 20) # 标出中心点 + return 1, img, find_target, xyz[_idx] + else: + return 0, None, None else: return 0, None, None + pass + + def get_center_position(self): + "" + ''' + :param api: None + :return: ret , img, (x,y,z) 图像中心点位置对应的点云数据 + ''' + ret, img, pm = self.camera_rvc.get_img_and_point_map() # 拍照,获取图像及 + if self.camera_rvc.caminit_isok == True: + if ret: + if pm != 'None': + pm_shape_y = pm.shape[0] + pm_shape_x = pm.shape[1] + center_point = [int(pm_shape_y / 2), int(pm_shape_x / 2)] + point_x, point_y, point_z = remove_nan_mean_value(pm, center_point[0], center_point[1]) + return img, [point_x, point_y, point_z] + else: + print('点云值为NAN') + return None, None + else: + return None, None + else: + return None, None def release(self): + self.camera_rvc.release() self.model.clear() diff --git a/Vision/model/pt/one_bag_pose.pt b/Vision/model/pt/one_bag_pose.pt new file mode 100644 index 0000000..adc3ac7 Binary files /dev/null and b/Vision/model/pt/one_bag_pose.pt differ diff --git a/Vision/tool/utils.py b/Vision/tool/utils.py index 27ff772..020a246 100644 --- a/Vision/tool/utils.py +++ b/Vision/tool/utils.py @@ -14,6 +14,59 @@ import psutil from psutil._common import bytes2human +def shrink_quadrilateral(points, d): + """ + 给定4个点围成的四边形,沿着对角线内缩小d个像素 + :param points: 四边形的4个顶点,形状为 (4, 2) + :param d: 内缩的像素距离 + :return: 缩小后的4个顶点 + """ + # 将点转换为 numpy 数组 + points = np.array(points, dtype=np.float32) + + # 计算四边形的中心点 + center = np.mean(points, axis=0) + + # 计算每个点到中心点的向量 + vectors = points - center + + # 计算每个向量的长度 + lengths = np.linalg.norm(vectors, axis=1) + + # 计算缩放比例 + scale = (lengths - d) / lengths + + # 对每个点进行缩放 + new_points = center + vectors * scale[:, np.newaxis] + new_points = new_points.astype(np.int32) + + return new_points + + +def find_closest_point_index(point_cloud, x1, y1): + x_coords = point_cloud[:, :, 0] + y_coords = point_cloud[:, :, 1] + + # 创建一个掩码,标记非 NaN 的点 + valid_mask = ~(np.isnan(x_coords) & ~np.isnan(y_coords)) + + # 初始化最小距离为一个很大的值 + min_distance = np.inf + min_index = (None, None) + + # 遍历所有有效点 + for i in range(point_cloud.shape[0]): + for j in range(point_cloud.shape[1]): + if valid_mask[i, j]: + # 计算当前点到 (x1, y1) 的欧几里得距离 + distance = np.sqrt((x_coords[i, j] - x1) ** 2 + (y_coords[i, j] - y1) ** 2) + # 如果当前距离小于最小距离,则更新最小距离和索引 + if distance < min_distance: + min_distance = distance + min_index = (i, j) + + return min_index + def uv_to_XY(cameraType, u, v): """ 像素坐标转相机坐标 diff --git a/Vision/yolo/yolov8_pt_pose.py b/Vision/yolo/yolov8_pt_pose.py new file mode 100644 index 0000000..876f060 --- /dev/null +++ b/Vision/yolo/yolov8_pt_pose.py @@ -0,0 +1,214 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +''' +# @Time : 2025/3/18 15:29 +# @Author : hjw +# @File : yolov8_pt_pose.py +''' + +import os.path +import random +import cv2 +import numpy as np +import torch +import time +from ultralytics.nn.autobackend import AutoBackend +from ultralytics.utils import ops + + +class yolov8_pose: + def __init__(self, weights, cuda, conf_thres=0.45, iou_thres=0.45) -> None: + """ + weights = r'./runs/pose/train25/weights/last.pt' + cuda = 'cpu' + save_path = "./img_test" + """ + self.imgsz = 640 + self.device = cuda + self.model = AutoBackend(weights, device=torch.device(cuda)) + self.model.eval() + self.names = self.model.names + self.half = False + self.conf = conf_thres + self.iou = iou_thres + self.color = {"font": (255, 255, 255)} + self.color.update( + {self.names[i]: (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) + for i in range(len(self.names))}) + + # self.skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12], [7, 13], [6, 7], [6, 8], + # [7, 9], [8, 10], [9, 11], [2, 3], [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]] + # pose_palette = np.array([[255, 128, 0], [255, 153, 51], [255, 178, 102], [230, 230, 0], [255, 153, 255], + # [153, 204, 255], [255, 102, 255], [255, 51, 255], [102, 178, 255], [51, 153, 255], + # [255, 153, 153], [255, 102, 102], [255, 51, 51], [153, 255, 153], [102, 255, 102], + # [51, 255, 51], [0, 255, 0], [0, 0, 255], [255, 0, 0], [255, 255, 255]], dtype=np.uint8) + # self.kpt_color = pose_palette[[16, 16, 16, 16, 16, 0, 0, 0, 0, 0, 0, 9, 9, 9, 9, 9, 9]] + # self.limb_color = pose_palette[[9, 9, 9, 9, 7, 7, 7, 0, 0, 0, 0, 0, 16, 16, 16, 16, 16, 16, 16]] + self.skeleton = [[1, 2], [2, 3], [3, 4]] + pose_palette = np.array([[255, 0, 0], [255, 153, 51], [255, 3, 102], [0, 230, 0]], dtype=np.uint8) + self.kpt_color = pose_palette[[0, 1, 2, 3]] + self.limb_color = pose_palette[[0, 1, 2, 3]] + # print(len(self.skeleton )) + # print(len(pose_palette)) + # print(len(self.kpt_color)) + # print(len(self.limb_color)) + + def model_inference(self, img_src): + img = self.precess_image(img_src, self.imgsz, self.half, self.device) + preds = self.model(img) # shape [1, 56, 6300] + det = ops.non_max_suppression(preds, self.conf, self.iou, classes=None, agnostic=False, max_det=300, + nc=len(self.names)) + point_xy = [] + name_list = [] + score_list = [] + for i, pred in enumerate(det): + lw = max(round(sum(img_src.shape) / 2 * 0.003), 2) # line width + tf = max(lw - 1, 1) # font thickness + sf = lw / 3 # font scale + + pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], img_src.shape) + pred_bbox = pred[:, :6].cpu().detach().numpy() + + pred_kpts = pred[:, 6:].view(len(pred), *self.model.kpt_shape) if len(pred) else pred[:, 6:] + pred_kpts = ops.scale_coords(img.shape[2:], pred_kpts, img_src.shape) + pred_kpts = pred_kpts.cpu().detach().numpy() + point_xy = [] + for kpts, bbox in zip(pred_kpts, pred_bbox): + box = bbox[:4] + score = bbox[4] + name = self.names[bbox[5]] + shape = (640, 640) + radius = 5 + kpt_line = True + nkpt, ndim = kpts.shape + is_pose = nkpt == 4 and ndim in {2, 3} + kpt_line &= is_pose # `kpt_line=True` for now only supports human pose plotting + xy = [] + for i, k in enumerate(kpts): + color_k = [int(x) for x in self.kpt_color[i]] + x_coord, y_coord = k[0], k[1] + if x_coord % shape[1] != 0 and y_coord % shape[0] != 0: + if len(k) == 3: + conf = k[2] + if conf < 0.5: + continue + xy.append([int(x_coord), int(y_coord)]) + cv2.circle(img_src, (int(x_coord), int(y_coord)), radius, color_k, -1, lineType=cv2.LINE_AA) + point_xy.append(xy) + name_list.append(name) + score_list.append(score) + return True, point_xy, name_list, score_list + + + def draw_box(self, img_src, box, conf, cls_name, lw, sf, tf): + color = self.color[cls_name] + + label = f'{cls_name} {conf}' + p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) + # 绘制矩形框 + cv2.rectangle(img_src, p1, p2, color, thickness=lw, lineType=cv2.LINE_AA) + # text width, height + w, h = cv2.getTextSize(label, 0, fontScale=sf, thickness=tf)[0] + # label fits outside box + outside = box[1] - h - 3 >= 0 + p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3 + # 绘制矩形框填充 + cv2.rectangle(img_src, p1, p2, color, -1, cv2.LINE_AA) + # 绘制标签 + cv2.putText(img_src, label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), + 0, sf, self.color["font"], thickness=2, lineType=cv2.LINE_AA) + + def draw_kpts(self, img_src, kpts, box, score, name, lw, sf, tf, shape=(640, 640), radius=5, kpt_line=True): + flag = False + nkpt, ndim = kpts.shape + is_pose = nkpt == 4 and ndim in {2, 3} + kpt_line &= is_pose # `kpt_line=True` for now only supports human pose plotting + + for i, k in enumerate(kpts): + color_k = [int(x) for x in self.kpt_color[i]] + x_coord, y_coord = k[0], k[1] + if x_coord % shape[1] != 0 and y_coord % shape[0] != 0: + if len(k) == 3: + conf = k[2] + if conf < 0.5: + continue + cv2.circle(img_src, (int(x_coord), int(y_coord)), radius, color_k, -1, lineType=cv2.LINE_AA) + + if kpt_line: + ndim = kpts.shape[-1] + for i, sk in enumerate(self.skeleton): + pos1 = (int(kpts[(sk[0] - 1), 0]), int(kpts[(sk[0] - 1), 1])) + pos2 = (int(kpts[(sk[1] - 1), 0]), int(kpts[(sk[1] - 1), 1])) + if ndim == 3: + conf1 = kpts[(sk[0] - 1), 2] + conf2 = kpts[(sk[1] - 1), 2] + if conf1 < 0.5 or conf2 < 0.5: + continue + if pos1[0] % shape[1] == 0 or pos1[1] % shape[0] == 0 or pos1[0] < 0 or pos1[1] < 0: + continue + if pos2[0] % shape[1] == 0 or pos2[1] % shape[0] == 0 or pos2[0] < 0 or pos2[1] < 0: + continue + cv2.line(img_src, pos1, pos2, [int(x) for x in self.limb_color[i]], thickness=2, lineType=cv2.LINE_AA) + flag = True + + if flag: + self.draw_box(img_src, box, score, name, lw, sf, tf) + + @staticmethod + def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), scaleup=True, stride=32): + # Resize and pad image while meeting stride-multiple constraints + shape = im.shape[:2] # current shape [height, width] + if isinstance(new_shape, int): + new_shape = (new_shape, new_shape) + + # Scale ratio (new / old) + r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) + if not scaleup: # only scale down, do not scale up (for better val mAP) + r = min(r, 1.0) + + # Compute padding + ratio = r, r # width, height ratios + new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) + dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding + # minimum rectangle + dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding + dw /= 2 # divide padding into 2 sides + dh /= 2 + + if shape[::-1] != new_unpad: # resize + im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR) + + top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) + left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) + im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border + return im, ratio, (dw, dh) + + def precess_image(self, img_src, img_size, half, device): + # Padded resize + img = self.letterbox(img_src, img_size)[0] + # Convert + img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + img = np.ascontiguousarray(img) + img = torch.from_numpy(img).to(device) + + img = img.half() if half else img.float() # uint8 to fp16/32 + img = img / 255 # 0 - 255 to 0.0 - 1.0 + if len(img.shape) == 3: + img = img[None] # expand for batch dim + return img + + +# if __name__ == '__main__': +# weights = r'./runs/pose/train25/weights/last.pt' +# cuda = 'cpu' +# save_path = "./img_test" +# start = time.time() +# if not os.path.exists(save_path): +# os.mkdir(save_path) +# +# model = yolov8_pose(weights, cuda, 0.45, 0.45) +# +# img_path = r'./1106-08-pe-518.png' +# model.infer(img_path, save_path) +# end = time.time() +# print('推理时间:',end -start) \ No newline at end of file diff --git a/get_position_test.py b/get_position_test.py index 5cec46f..2a75b71 100644 --- a/get_position_test.py +++ b/get_position_test.py @@ -10,7 +10,7 @@ import platform import cv2 import os -from Vision.camera_coordinate_dete import Detection +from Vision.camera_coordinate_dete_img import Detection from Vision.camera_coordinate_dete_planevison import Detection_plane_vsion from Trace.handeye_calibration import * from Vision.tool.utils import get_disk_space @@ -26,9 +26,9 @@ from Vision.bag_collection import DetectionBag """ def detectionPosition_test(): - detection = Detection() + detection = Detection(use_pose_model=True) # 模型选择 use_openvino_model=False, use_pose_model=True, use_seg_pt_model=True while True: - ret, img, xyz, nx_ny_nz, box = detection.get_position(Point_isVision=True, save_img_point=1) + ret, img, xyz, nx_ny_nz, box = detection.get_position(Use_Pose_Model_Pro=True, Point_isVision=True, save_img_point=1) if ret==1: print('xyz点云坐标:', xyz) print('nx_ny_nz法向量:', nx_ny_nz) @@ -134,4 +134,4 @@ def bag_collection_test(): if __name__ == '__main__': - Detection_plane_vsion_test() \ No newline at end of file + detectionPosition_test() \ No newline at end of file