Merge remote-tracking branch 'origin/master'

This commit is contained in:
FrankCV2048
2024-12-19 20:22:03 +08:00
7 changed files with 19 additions and 9 deletions

View File

@ -54,7 +54,7 @@ def R_matrix(x,y,z,u,v,w):
# 图像识别结果xyz和法向量
def getPosition(x,y,z,a,b,c,points):
def getPosition(x,y,z,a,b,c,rotation,points):
target = np.asarray([x, y, z,1])
camera2robot = np.loadtxt('./Trace/com_pose.txt', delimiter=' ') #相对目录且分隔符采用os.sep
# robot2base = rotation

View File

@ -15,7 +15,8 @@ import time
import os
from Vision.tool.CameraRVC import camera_rvc
from Vision.tool.CameraPe_color2depth import camera_pe
from Vision.tool.CameraPe_color2depth import camera_pe as camera_pe_color2depth
from Vision.tool.CameraPe_depth2color import camera_pe as camera_pe_depth2color
from Vision.yolo.yolov8_pt_seg import yolov8_segment
from Vision.yolo.yolov8_openvino import yolov8_segment_openvino
from Vision.tool.utils import find_position
@ -28,15 +29,17 @@ from Vision.tool.utils import uv_to_XY
class Detection:
def __init__(self, use_openvino_model=False, cameraType = 'Pe'): # cameraType = 'RVC' or cameraType = 'Pe'
def __init__(self, use_openvino_model=False, cameraType = 'Pe', alignmentType = 'color2depth'): # cameraType = 'RVC' or cameraType = 'Pe'
"""
初始化相机及模型
:param use_openvino_model: 选择模型默认使用openvino
:param cameraType: 选择相机 如本相机 'RVC', 图漾相机 'Pe'
:param alignmentType: 相机对齐方式 color2depth彩色图对齐深度图 depth2color深度图对齐彩色图
"""
self.use_openvino_model = use_openvino_model
self.cameraType = cameraType
self.alignmentType= alignmentType
if self.use_openvino_model == False:
model_path = ''.join([os.getcwd(), '/Vision/model/pt/one_bag.pt'])
device = 'cpu'
@ -44,7 +47,10 @@ class Detection:
self.camera_rvc = camera_rvc()
self.seg_distance_threshold = 10 # 1厘米
elif self.cameraType == 'Pe':
self.camera_rvc = camera_pe()
if self.alignmentType=='color2depth':
self.camera_rvc = camera_pe_color2depth()
else:
self.camera_rvc = camera_pe_depth2color()
self.seg_distance_threshold = 15 # 2厘米
else:
print('相机参数错误')
@ -58,7 +64,10 @@ class Detection:
self.camera_rvc = camera_rvc()
self.seg_distance_threshold = 10
elif self.cameraType == 'Pe':
self.camera_rvc = camera_pe()
if self.alignmentType == 'color2depth':
self.camera_rvc = camera_pe_color2depth()
else:
self.camera_rvc = camera_pe_depth2color()
self.seg_distance_threshold = 20
else:
print('相机参数错误')

View File

@ -1,6 +1,6 @@
description: Ultralytics YOLOv8n-seg model trained on D:\work\ultralytics-main\ultralytics\cfg\datasets\coco8-seg-dy.yaml
author: Ultralytics
date: '2024-12-02T17:16:53.470305'
date: '2024-12-19T09:48:12.419566'
version: 8.2.86
license: AGPL-3.0 License (https://ultralytics.com/license)
docs: https://docs.ultralytics.com

Binary file not shown.

Binary file not shown.

View File

@ -124,7 +124,7 @@ class camera_pe():
:return: ret , img, point_map
'''
if self.caminit_isok == False or self.event.IsOffline():
return 0, None, None
return 0, None, None, None
else:
image_list = self.cl.DeviceStreamRead(self.handle, 2000)
if len(image_list) == 2:

View File

@ -80,6 +80,7 @@ class camera_pe():
# print('\t{} -size[{}x{}]\t-\t desc:{}'.format(idx, cl.Width(fmt), cl.Height(fmt), fmt.getDesc()))
self.cl.DeviceStreamFormatConfig(self.handle, PERCIPIO_STREAM_DEPTH, depth_fmt_list[2]) # 深度图大小
self.depth_calib_data = self.cl.DeviceReadCalibData(self.handle, PERCIPIO_STREAM_DEPTH)
self.color_calib_data = self.cl.DeviceReadCalibData(self.handle, PERCIPIO_STREAM_COLOR)
err = self.cl.DeviceLoadDefaultParameters(self.handle)
if err:
@ -124,7 +125,7 @@ class camera_pe():
:return: ret , img, point_map
'''
if self.caminit_isok == False or self.event.IsOffline():
return 0, None, None
return 0, None, None, None
else:
image_list = self.cl.DeviceStreamRead(self.handle, 2000)
if len(image_list) == 2:
@ -140,7 +141,7 @@ class camera_pe():
self.cl.DeviceStreamDepthRender(self.img_registration_depth, self.img_registration_render)
mat_depth_render = self.img_registration_render.as_nparray()
self.cl.DeviceStreamMapDepthImageToPoint3D(self.img_registration_depth, self.depth_calib_data, self.scale_unit,
self.cl.DeviceStreamMapDepthImageToPoint3D(self.img_registration_depth, self.color_calib_data, self.scale_unit,
self.pointcloud_data_arr)
p3d_nparray = self.pointcloud_data_arr.as_nparray()