Compare commits

...

10 Commits

23 changed files with 3603 additions and 4632 deletions

View File

@ -2,7 +2,7 @@
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="jdk" jdkName="Python 3.9" jdkType="Python SDK" />
<orderEntry type="jdk" jdkName="rob" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
<component name="PyDocumentationSettings">

2
.idea/misc.xml generated
View File

@ -3,5 +3,5 @@
<component name="Black">
<option name="sdkName" value="rob" />
</component>
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.9" project-jdk-type="Python SDK" />
<component name="ProjectRootManager" version="2" project-jdk-name="rob" project-jdk-type="Python SDK" />
</project>

View File

@ -36,7 +36,7 @@ class Catch:
# 本身IO
##self.robotClient.sendIOControl(self.robotClient.con_ios[0],1)
# 网络继电器
open(1, 0, 0)
close(1, 0, 0)
self.is_send_take_command = True
if self.catch_status == CatchStatus.CDrop:
@ -45,15 +45,16 @@ class Catch:
# self.robotClient.sendIOControl(self.robotClient.con_ios[0], 0)
# 网络继电器
close(1, 0, 0)
time.sleep(1)
for _ in range(self.drop_count):
# self.robotClient.sendIOControl(self.robotClient.con_ios[1], 1, delay=self.robotClient.time_delay_put)
open(0, 1, 0)
open(0, 0, 1)
time.sleep(self.robotClient.time_delay_put) # 会造成这个时间点 其他命令插入不进去 需要另开线程
close(0, 1, 0)
close(0, 0, 1)
# self.robotClient.sendIOControl(self.robotClient.con_ios[1], 0)
# self.robotClient.sendIOControl(self.robotClient.con_ios[1], 1)
open(0, 1, 0)
close(0, 0, 1)
self.is_send_command = True
if self.drop_continue.Q(True,self.robotClient.time_delay_put*1000*self.drop_count):
# if Constant.Debug or self.robotClient.check_outputQ(self.robotClient.con_ios[1]) and not self.robotClient.check_outputQ(self.robotClient.con_ios[0]):
@ -63,23 +64,26 @@ class Catch:
if self.catch_status == CatchStatus.CShake: # 1500
self.shake_Q = not self.shake_Q # 10
if not self.shake_continue.Q(True, 600):
if not self.shake_continue.Q(True, 6000):
if self.shake_Q:
open(0, 0, 1)
open(0, 1, 0)
else:
close(0, 0, 1)
close(0, 1, 0)
else:
self.shake_continue.SetReset()
self.catch_status = CatchStatus.COk
#self.catch_status = CatchStatus.COk
#if Constant.Debug or self.robotClient.check_outputQ(self.robotClient.con_ios[2]):
# self.robotClient.sendIOControl(self.robotClient.con_ios[2], 0)
close(0, 0, 1)
close(0, 1, 0)
print("震动结束")
if self.catch_status == CatchStatus.COk :
self.shake_continue.SetReset()
# self.robotClient.sendIOControl(self.robotClient.con_ios[1], 0,emptyList='1')
open(1,0,0)
close(0, 1, 0)
close(0, 0, 1)
self.is_send_take_command = False
pass

View File

@ -37,8 +37,12 @@ class Detect:
return
target_position, noraml_base = getPosition(*xyz, *uvw, None, points)
position = Real_Position().init_position(*target_position[:3], *noraml_base[:3])
position.Z = position.Z + 200
# position.Z = position.Z
position.a = uvw[0]
position.b = uvw[1]
position.c = uvw[2]
self.detect_position = position
self.detect_status = DetectStatus.DOk

View File

@ -36,7 +36,7 @@ def send_command(command):
print(f"收到响应: {binascii.hexlify(response)}")
# 校验响应
if response == byte_data:
print("命令成功下发,继电器已执行操作。")
# print("命令成功下发,继电器已执行操作。")
return True
else:
print("命令下发失败,响应与请求不符。")
@ -67,7 +67,7 @@ def close(grasp, shake, throw):
if send_command(valve_commands[1]['close']):
time.sleep(1)
if shake:
print("关闭电磁阀 2")
# print("关闭电磁阀 2")
if send_command(valve_commands[2]['close']):
time.sleep(0.05)
if throw:
@ -76,8 +76,8 @@ def close(grasp, shake, throw):
time.sleep(0.5)
# 关闭电磁阀
# open(True, False, False) # 参数传True和False
# close(False,False,True)
# open(False, False, True) # 参数传True和False
# close(True,False,True)
# for i in range(10):
# open(False,True,True)
# close(True,True,True)

View File

@ -20,6 +20,7 @@ from enum import Enum, IntEnum
from COM.COM_Robot import RobotClient, DetectType
from Model.RobotModel import CMDInstructRequest, MoveType
from Trace.handeye_calibration import getPosition
from Trace.handeye_calibration import getxyz
from Util.util_math import get_distance
from Util.util_time import CRisOrFall
from Vision.camera_coordinate_dete import Detection
@ -34,7 +35,7 @@ class ResetStatus(Enum):
class FeedStatus(IntEnum):
FNone = 0
FNone = 0 #
FStart = 1
FCheck = 2
FMid = 3
@ -132,21 +133,21 @@ class FeedLine:
def set_take_position(self,position:Real_Position,dynamic_height=0):
for i in range(len(self.feeding_to_end)):
if self.feeding_to_end[i].status == FeedStatus.FTake.value:
if position != None:
befor_take_position = Real_Position().init_position(position.X,
position.Y,
position.Z+dynamic_height,
position.U,
position.V,
position.W)
after_take_position = Real_Position().init_position(position.X,
position.Y,
position.Z+dynamic_height,
position.U,
position.V,
position.W)
self.feeding_to_end[i - 1].set_position(befor_take_position)
self.feeding_to_end[i + 1].set_position(after_take_position)
xyz = getxyz(position.X, position.Y, position.Z, position.a, position.b, position.c)
befor_take_position = Real_Position().init_position(xyz[0],
xyz[1],
xyz[2],
position.U,
position.V,
position.W)
after_take_position = Real_Position().init_position(xyz[0],
xyz[1],
xyz[2],
position.U,
position.V,
position.W)
self.feeding_to_end[i - 1].set_position(befor_take_position)
self.feeding_to_end[i + 1].set_position(after_take_position)
self.feeding_to_end[i].set_position(position)
@ -242,7 +243,7 @@ class Feeding(QObject):
self.detect.run()
time.sleep(0.02)
def run(self):
def run(self):
self.catch.run()
# 获取事件坐标
real_position = Real_Position()
@ -448,8 +449,13 @@ class Feeding(QObject):
#self.feedConfig.feedLine.set_take_position(None)
# time.sleep(self.robotClient.time_delay_take)
self.log_signal.emit(logging.INFO, Constant.str_feed_take_success)
self.next_position()
self.catch.catch_status = CatchStatus.COk
if self.catch.catch_status == CatchStatus.COk:
self.next_position()
self.catch.catch_status = CatchStatus.CNone
return
if self.catch.catch_status == CatchStatus.CTake:
self.catch.catch_status = CatchStatus.COk
else:
self.log_signal.emit(logging.ERROR, Constant.str_feed_takePhoto_fail)
@ -480,17 +486,23 @@ class Feeding(QObject):
if self.get_current_position().get_position().compare(real_position,is_action=True):
# TODO 震动方案
self.log_signal.emit(logging.INFO, Constant.str_feed_shake)
if self.catch.catch_status == CatchStatus.COk:
self.catch.catch_status = CatchStatus.CNone
self.next_position()
if self.catch.catch_status == CatchStatus.CNone:
self.catch.catch_status = CatchStatus.CShake
return
if self.catch.catch_status == CatchStatus.CShake:
# if self.feedConfig.feedLine.feeding_to_end[
# self.feedConfig.feedLine.feeding2end_pos_index + 1].status != FeedStatus.FShake:
# self.catch.catch_status = CatchStatus.COk
# else:
self.catch.shake_continue.SetReset()
self.next_position()
if self.feedStatus!=FeedStatus.FShake:
self.catch.catch_status = CatchStatus.CNone
return
elif self.feedStatus == FeedStatus.FDropBag:

View File

@ -46,12 +46,12 @@ status = 3
linetype = 0
[Position8]
x = 1430.494385
y = 1765.716187
x = 1445.789185
y = 1707.384888
z = 2050.0
u = 1.57722
v = 4.174088
w = -87.506218
u = 2.204855
v = 3.428981
w = -85.25634
id = 8
order = 8
lineid = 1
@ -59,12 +59,12 @@ status = 6
linetype = 0
[Position9]
x = 1430.492554
y = 1765.717407
z = 1832.536255
u = 1.57702
v = 4.174215
w = -87.506783
x = 1445.789185
y = 1707.384888
z = 1826.260132
u = 2.204855
v = 3.428981
w = -85.25634
id = 9
order = 9
lineid = 1
@ -72,12 +72,12 @@ status = 7
linetype = 0
[Position10]
x = 1375.01416
y = 1702.021973
z = 2117.369385
u = 8.211453
v = 4.232689
w = -100.153625
x = 1339.699585
y = 1702.385742
z = 2197.976318
u = 9.554496
v = 7.15853
w = -99.243294
id = 10
order = 10
lineid = 1
@ -92,7 +92,7 @@ u = 5.812903
v = 5.431066
w = -168.01712
id = 12
order = 11
order = 13
lineid = 1
status = 9
linetype = 0
@ -188,3 +188,29 @@ lineid = 1
status = 3
linetype = 0
[Position4]
x = 1510.92981
y = 1653.713745
z = 2381.065186
u = 60.821259
v = -4.995515
w = -99.228653
id = 4
order = 11
lineid = 1
status = 8
linetype = 0
[Position11]
x = 1256.956909
y = 1809.304443
z = 2368.663574
u = -45.444492
v = 18.997807
w = -131.11731
id = 11
order = 12
lineid = 1
status = 8
linetype = 0

View File

@ -5,7 +5,7 @@ IO_EmergencyPoint = 2
max_log_len = 100
bag_height = 10 # 一袋的高度
position_accuracy_action = 0.1 #动作时的位置精度6 这个精度要高 必须到位置才做动作
position_accuracy_command = 300 #命令时的位置精度
position_accuracy_command = 500 #命令时的位置精度
manual_adjust_accuracy = 1
# speed = 10
# shake_speed = 20
@ -13,7 +13,7 @@ manual_adjust_accuracy = 1
# return_speed = 10
feedLine_set_section = 'FeedLine'
position_set_section = 'Position'
feedLine_set_file = f'.{os.sep}Config{os.sep}feedLine.ini'
feedLine_set_file = f'.{os.sep}Config{os.sep}FeedLine.ini'
MAX_Position_num = 1000
MAX_Line_num = 10
set_ini = 'Seting.ini'

View File

@ -10,14 +10,21 @@ class Position:
self.U = 0.0
self.V = 0.0
self.W = 0.0
self.a = 0.0
self.b = 0.0
self.c = 0.0
def compare(self,position,is_action=False):
distance = math.sqrt((self.X-position.X)**2+
(self.Y-position.Y)**2+
(self.Z - position.Z)**2+
(self.U - position.U)**2+
(self.V - position.V)**2+
(self.W - position.W) ** 2)
# distance = math.sqrt((self.X-position.X)**2+
# (self.Y-position.Y)**2+
# (self.Z - position.Z)**2+
# (self.U - position.U)**2+
# (self.V - position.V)**2+
# (self.W - position.W) ** 2)
distance = math.sqrt((self.X - position.X) ** 2 +
(self.Y - position.Y) ** 2 +
(self.Z - position.Z) ** 2 )
if distance<=(position_accuracy_action if is_action else position_accuracy_command):
return True
else:

View File

@ -1,7 +1,7 @@
[Main]
[Robot_Feed]
ipaddress = 192.168.20.4
ipaddress = 127.0.0.1
port = 502
j1_min = -150
j1_max = +150
@ -47,7 +47,7 @@ photo_v5 = 0.0
photo_w5 = 1.0
linecount = 2
remain_linename = 1
remain_count = 0
remain_count = 999
io_take_addr = 8
io_zip_addr = 11
io_shake_addr = 12
@ -59,9 +59,9 @@ smooth = 9
dynamic_height = 350.0
[Speed]
debug_speed = 50
feed_speed = 550
reset_speed = 35
debug_speed = 100
feed_speed = 100
reset_speed = 100
[Origin]
x = 204.996765

View File

@ -1,4 +1,4 @@
9.4566884811714796e-02 -9.9470945966114444e-01 4.0127725032944608e-02 4.1471010091895931e+02
-9.9551731828304890e-01 -9.4428128820258375e-02 5.3435988155243787e-03 1.9335993881936060e+03
-1.5261424109928572e-03 -4.0453173929085366e-02 -9.9918028231904499e-01 2.7052051690106582e+03
0 0 0 1
1.1224831479369565e-01 -9.9366361198855646e-01 5.7395152958431492e-03 4.1812293824507674e+02
-9.9269850944334537e-01 -1.1239223491705273e-01 -4.3791036517882603e-02 1.9260165135520242e+03
4.4158636470522372e-02 -7.8213822690929326e-04 -9.9902422547446690e-01 2.7083490666098191e+03
0. 0. 0. 1.

View File

@ -1,4 +1,4 @@
9.6751729364544639e-02 -9.9459449602428807e-01 3.7694712403242861e-02 4.1708226127179734e+02
-9.9527317207468335e-01 -9.6998182736478769e-02 -4.7608291523444628e-03 1.9086796578832980e+03
8.3914130733227475e-03 -3.7055917530319504e-02 -9.9927796091108601e-01 2.7052748714031904e+03
0. 0. 0. 1
1.1224831479369565e-01 -9.9366361198855646e-01 5.7395152958431492e-03 4.1812293824507674e+02
-9.9269850944334537e-01 -1.1239223491705273e-01 -4.3791036517882603e-02 1.9260165135520242e+03
4.4158636470522372e-02 -7.8213822690929326e-04 -9.9902422547446690e-01 2.7083490666098191e+03
0. 0. 0. 1.

View File

@ -76,7 +76,7 @@ def getPosition(x,y,z,a,b,c,rotation,points):
# 单位化方向向量
short_edge_direction = edge_vector / np.linalg.norm(edge_vector)
delta = -200#沿法向量方向抬高和压低,-指表示抬高,+值表示压低
delta = -10#沿法向量方向抬高和压低,-指表示抬高,+值表示压低
angle = np.asarray([a,b,c])
noraml = camera2robot[:3, :3]@angle
normal_vector = noraml / np.linalg.norm(noraml)
@ -86,4 +86,14 @@ def getPosition(x,y,z,a,b,c,rotation,points):
return target_position,noraml_base
def getxyz(x,y,z,a,b,c):
target = np.asarray([x, y, z])
camera2robot = np.loadtxt('./Trace/com_pose2.txt', delimiter=' ')
# target_position_raw = np.dot(camera2robot, target)
delta = -500 # 沿法向量方向抬高和压低,-指表示抬高,+值表示压低
angle = np.asarray([a, b, c])
noraml = camera2robot[:3, :3] @ angle
normal_vector = noraml / np.linalg.norm(noraml)
target_position = target + delta * normal_vector
return target_position

View File

@ -31,7 +31,7 @@ class DetectionBag:
model_path = ''.join([os.getcwd(), '/Vision/model/pt/bag_collection.pt'])
self.camera_rvc = camera_pe()
self.imgsz = 640
self.cuda = 'cpu'
self.cuda = '0'
self.conf = 0.40
self.iou = 0.45
self.model = AutoBackend(model_path, device=torch.device(self.cuda))
@ -82,7 +82,7 @@ class DetectionBag:
if Bag==True:
if get_disk_space(path=os.getcwd()) < 15: # 内存小于15G,停止保存数据
save_img_point = 0
print('系统内存不足,无法保存数据')
print('硬盘空间不足,无法保存数据')
else:
save_path = ''.join([os.getcwd(), '/Vision/model/data/',
time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(time.time()))])

View File

@ -19,65 +19,71 @@ from Vision.tool.CameraPe_color2depth import camera_pe as camera_pe_color2depth
from Vision.tool.CameraPe_depth2color import camera_pe as camera_pe_depth2color
from Vision.yolo.yolov8_pt_seg import yolov8_segment
from Vision.yolo.yolov8_openvino import yolov8_segment_openvino
from Vision.yolo.yolov8_pt_pose import yolov8_pose
from Vision.tool.utils import find_position
from Vision.tool.utils import class_names
from Vision.tool.utils import get_disk_space
from Vision.tool.utils import remove_nan_mean_value
from Vision.tool.utils import out_bounds_dete
from Vision.tool.utils import uv_to_XY
from Vision.tool.utils import out_bounds_dete, find_closest_point_index
from Vision.tool.utils import uv_to_XY, shrink_quadrilateral
class Detection:
def __init__(self, use_openvino_model=False, cameraType = 'Pe', alignmentType = 'color2depth'): # cameraType = 'RVC' or cameraType = 'Pe'
def __init__(self, use_openvino_model=False, use_pose_model=True, use_seg_pt_model=True, cameraType = 'Pe', alignmentType = 'color2depth'): # cameraType = 'RVC' or cameraType = 'Pe'
"""
初始化相机及模型
:param use_openvino_model: 加载分割模型
:param use_pose_model: 加载关键点pt模型
:param use_seg_pt_model: 加载分割pt模型
:param use_openvino_model: 选择模型默认使用openvino
:param cameraType: 选择相机 如本相机 'RVC', 图漾相机 'Pe'
:param alignmentType: 相机对齐方式 color2depth彩色图对齐深度图 depth2color深度图对齐彩色图
"""
if use_seg_pt_model: # 优先使用pt模型
use_openvino_model = False
elif use_openvino_model:
use_seg_pt_model = False
self.use_openvino_model = use_openvino_model
self.cameraType = cameraType
self.alignmentType= alignmentType
if self.use_openvino_model == False:
model_path = ''.join([os.getcwd(), '/Vision/model/pt/one_bag.pt'])
device = 'cpu'
if self.cameraType == 'RVC':
self.camera_rvc = camera_rvc()
self.seg_distance_threshold = 10 # 1厘米
elif self.cameraType == 'Pe':
if self.alignmentType=='color2depth':
self.camera_rvc = camera_pe_color2depth()
else:
self.camera_rvc = camera_pe_depth2color()
self.seg_distance_threshold = 15 # 2厘米
self.use_pose_model = use_pose_model
self.use_seg_pt_model = use_seg_pt_model
self.alignmentType = alignmentType
if self.cameraType == 'RVC':
self.camera_rvc = camera_rvc()
self.seg_distance_threshold = 10 # 1厘米
elif self.cameraType == 'Pe':
if self.alignmentType == 'color2depth':
self.camera_rvc = camera_pe_color2depth()
else:
print('相机参数错误')
return
self.model = yolov8_segment()
self.model.load_model(model_path, device)
self.camera_rvc = camera_pe_depth2color()
self.seg_distance_threshold = 15 # 2厘米
else:
model_path = ''.join([os.getcwd(), '/Vision/model/openvino/one_bag.xml'])
print('相机参数错误')
return
# 加载openvino-seg
if self.use_openvino_model:
model_path = ''.join([os.getcwd(), './Vision/model/openvino/one_bag.xml'])
device = 'CPU'
if self.cameraType == 'RVC':
self.camera_rvc = camera_rvc()
self.seg_distance_threshold = 10
elif self.cameraType == 'Pe':
if self.alignmentType == 'color2depth':
self.camera_rvc = camera_pe_color2depth()
else:
self.camera_rvc = camera_pe_depth2color()
self.seg_distance_threshold = 20
else:
print('相机参数错误')
return
self.model = yolov8_segment_openvino(model_path, device, conf_thres=0.3, iou_thres=0.3)
self.model_seg = yolov8_segment_openvino(model_path, device, conf_thres=0.6, iou_thres=0.6)
# 加载pt-seg
if self.use_seg_pt_model:
model_path = ''.join([os.getcwd(), './Vision/model/pt/one_bag.pt'])
device = 'cpu'
self.model_seg = yolov8_segment()
self.model_seg.load_model(model_path, device)
# 加载pt-pose
if self.use_pose_model:
model_path = ''.join([os.getcwd(), './Vision/model/pt/one_bag_pose.pt'])
device = 'cpu'
self.model_pose = yolov8_pose(model_path, device)
def get_position(self, Point_isVision=False, Box_isPoint=True, First_Depth =True, Iter_Max_Pixel = 30, save_img_point=0, Height_reduce = 80, width_reduce = 60, Xmin =160, Xmax = 1050, Ymin =290 ,Ymax = 780):
def get_position(self, Use_Pose_Model_Pro=False, Point_isVision=False, Box_isPoint=True, First_Depth =True, Iter_Max_Pixel = 30, save_img_point=0, Height_reduce = 80, width_reduce = 60, Xmin =160, Xmax = 1050, Ymin =290 ,Ymax = 780):
"""
检测料袋相关信息
:param Use_Pose_Model_Pro: True: 选用关键点推理 False : 选用分割模型推理
:param Point_isVision: 点云可视化
:param Box_isPoint: True 返回点云值; False 返回box相机坐标
:param First_Depth: True 返回料袋中心点深度最小的点云值; False 返回面积最大的料袋中心点云值
@ -97,7 +103,11 @@ class Detection:
:return box_list: list 内缩检测框四顶点,形如[[x1,y1],[],[],[]]
"""
ret, img, pm, _depth_align = self.camera_rvc.get_img_and_point_map() # 拍照,获取图像及
# ret, img, pm, _depth_align = self.camera_rvc.get_img_and_point_map() # 拍照,获取图像及
ret = 1
pm1 = np.loadtxt('D:\pychram_rob\AutoControlSystem-git\Vision\model\data\\2024_11_29_10_05_58.xyz', dtype=np.float32)
img = cv2.imread('D:\pychram_rob\AutoControlSystem-git\Vision\model\data\\2024_11_29_10_05_58.png')
pm = pm1.reshape((img.shape[0], img.shape[1], 3))
if self.camera_rvc.caminit_isok == True:
if ret == 1:
if save_img_point != 0:
@ -124,10 +134,19 @@ class Detection:
Abnormal_data_point = point_new.copy()
else:
np.savetxt(save_point_name, point_new)
if self.use_openvino_model == False:
flag, det_cpu, dst_img, masks, category_names = self.model.model_inference(img, 0)
if self.use_pose_model and Use_Pose_Model_Pro:
real_model_pro_isPose = True
else:
flag, det_cpu, scores, masks, category_names = self.model.segment_objects(img)
real_model_pro_isPose = False
if real_model_pro_isPose:
flag, det_cpu, category_names, score_list = self.model_pose.model_inference(img)#用关键点检测模型
else:
if self.use_openvino_model == False:
flag, det_cpu, dst_img, masks, category_names = self.model_seg.model_inference(img, 0) #用分割模型
else:
flag, det_cpu, scores, masks, category_names = self.model_seg.segment_objects(img)
if flag == 1:
xyz = []
nx_ny_nz = []
@ -145,16 +164,26 @@ class Detection:
pcd2.points = o3d.utility.Vector3dVector(pm2)
# o3d.visualization.draw_geometries([pcd2])
for i, item in enumerate(det_cpu):
for i, item in enumerate(det_cpu):#提供检测到的框信息
# 画box
box_x1, box_y1, box_x2, box_y2 = item[0:4].astype(np.int32)
if self.use_openvino_model == False:
label = category_names[int(item[5])]
if real_model_pro_isPose:
label = category_names[i]
score = score_list[i]
box_x1 = item[0][0]
box_y1 = item[0][1]
box_x2 = item[3][0]
box_y2 = item[3][1]
pass
else:
label = class_names[int(item[4])]
box_x1, box_y1, box_x2, box_y2 = item[0:4].astype(np.int32)#找最近的框的13角点坐标
if self.use_openvino_model == False:
label = category_names[int(item[5])]
score = item[4]
else:
label = class_names[int(item[4])]
score = item[4]
rand_color = (0, 255, 255)
score = item[4]
org = (int((box_x1 + box_x2) / 2), int((box_y1 + box_y2) / 2))
x_center = int((box_x1 + box_x2) / 2)
y_center = int((box_y1 + box_y2) / 2)
@ -164,75 +193,116 @@ class Detection:
thickness=2)
# 画mask
# mask = masks[i].cpu().numpy().astype(int)
if self.use_openvino_model == False:
mask = masks[i].cpu().data.numpy().astype(int)
if real_model_pro_isPose:
# 创建一个与输入数组相同形状的掩码,初始值全为 0
mask = np.zeros(pm.shape[:2], dtype=np.uint8)
# 将四点坐标转换为 numpy 数组
if item[0][0] < item[1][0]:
arr = [[item[0][0], item[0][1]],
[item[1][0], item[1][1]],
[item[3][0], item[3][1]],
[item[2][0], item[2][1]]]
# new_points.reshape((-1, 1, 2))
else:
arr = [[item[3][0], item[3][1]],
[item[2][0], item[2][1]],
[item[0][0], item[0][1]],
[item[1][0], item[1][1]]]
box = arr.copy()
box_outside = arr.copy()
box = shrink_quadrilateral(box, Height_reduce)
pts = np.array(box, np.int32)
# 将四点构成的四边形区域在掩码上标记为 255
cv2.fillPoly(mask, [pts], 255)
# 根据掩码提取对应区域的数据
pm_seg = pm[mask == 255]
# box =[[[item[0][0]+width_reduce, item[0][1]+Height_reduce]],
# [[item[1][0]-width_reduce, item[1][1]+Height_reduce]],
# [[item[3][0]-width_reduce, item[3][1]-Height_reduce]],
# [[item[2][0]+width_reduce, item[2][1]-Height_reduce]]]
box = box.reshape((-1, 1, 2))
# box = np.array(box)
# 内缩
# box_outside = [[[item[0][0], item[0][1]]],
# [[item[1][0], item[1][1]]],
# [[item[3][0], item[3][1]]],
# [[item[2][0], item[2][1]]]]# 外框
box_outside = np.array(box_outside)
box_outside = box_outside.reshape((-1, 1, 2))
# box_outside = np.array(box_outside)
else:
mask = masks[i].astype(int)
mask = mask[box_y1:box_y2, box_x1:box_x2]
if self.use_openvino_model == False:
mask = masks[i].cpu().data.numpy().astype(int)
else:
mask = masks[i].astype(int)
mask = mask[box_y1:box_y2, box_x1:box_x2]
# mask = masks[i].numpy().astype(int)
h, w = box_y2 - box_y1, box_x2 - box_x1
mask_colored = np.zeros((h, w, 3), dtype=np.uint8)
mask_colored[np.where(mask)] = rand_color
##################################
imgray = cv2.cvtColor(mask_colored, cv2.COLOR_BGR2GRAY)
# cv2.imshow('mask',imgray)
# cv2.waitKey(1)
# 2、二进制图像
ret, binary = cv2.threshold(imgray, 10, 255, 0)
# 阈值 二进制图像
# cv2.imshow('bin',binary)
# cv2.waitKey(1)
contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# all_point_list = contours_in(contours)
# print(len(all_point_list))
max_contour = None
max_perimeter = 0
for contour in contours: # 排除小分割区域或干扰区域
perimeter = cv2.arcLength(contour, True)
if perimeter > max_perimeter:
max_perimeter = perimeter
max_contour = contour
# mask = masks[i].numpy().astype(int)
h, w = box_y2 - box_y1, box_x2 - box_x1
mask_colored = np.zeros((h, w, 3), dtype=np.uint8)
mask_colored[np.where(mask)] = rand_color
##################################
imgray = cv2.cvtColor(mask_colored, cv2.COLOR_BGR2GRAY)
# cv2.imshow('mask',imgray)
# cv2.waitKey(1)
# 2、二进制图像
ret, binary = cv2.threshold(imgray, 10, 255, 0)
# 阈值 二进制图像
# cv2.imshow('bin',binary)
# cv2.waitKey(1)
contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)#检测物体轮廓在灰度化和二值化之后contours是轮廓信息
# all_point_list = contours_in(contours)
# print(len(all_point_list))
max_contour = None
max_perimeter = 0
for contour in contours: # 排除小分割区域或干扰区域
perimeter = cv2.arcLength(contour, True)#计算周长
if perimeter > max_perimeter:
max_perimeter = perimeter
max_contour = contour
'''
拟合最小外接矩形,计算矩形中心
'''
'''
拟合最小外接矩形,计算矩形中心
'''
rect = cv2.minAreaRect(max_contour)#计算一组点的最小外接矩形
if rect[1][0]-width_reduce > 30 and rect[1][1]-Height_reduce > 30:
rect_reduce = (
(rect[0][0], rect[0][1]), (rect[1][0] - width_reduce, rect[1][1] - Height_reduce), rect[2])
else:
rect_reduce = (
(rect[0][0], rect[0][1]), (rect[1][0], rect[1][1]), rect[2])
rect = cv2.minAreaRect(max_contour)
if rect[1][0]-width_reduce > 30 and rect[1][1]-Height_reduce > 30:
rect_reduce = (
(rect[0][0], rect[0][1]), (rect[1][0] - width_reduce, rect[1][1] - Height_reduce), rect[2])
else:
rect_reduce = (
(rect[0][0], rect[0][1]), (rect[1][0], rect[1][1]), rect[2])
# cv2.boxPoints可以将轮廓点转换为四个角点坐标
box_outside = cv2.boxPoints(rect)#计算顶点坐标
# 这一步不影响后面的画图,但是可以保证四个角点坐标为顺时针
startidx = box_outside.sum(axis=1).argmin()
box_outside = np.roll(box_outside, 4 - startidx, 0)#外框
box_outside = np.intp(box_outside)
box_outside = box_outside.reshape((-1, 1, 2)).astype(np.int32)
# cv2.boxPoints可以将轮廓点转换为四个角点坐标
box_outside = cv2.boxPoints(rect)
# 这一步不影响后面的画图,但是可以保证四个角点坐标为顺时针
startidx = box_outside.sum(axis=1).argmin()
box_outside = np.roll(box_outside, 4 - startidx, 0)
box_outside = np.intp(box_outside)
box_outside = box_outside.reshape((-1, 1, 2)).astype(np.int32)
# cv2.boxPoints可以将轮廓点转换为四个角点坐标
box_reduce = cv2.boxPoints(rect_reduce)
startidx = box_reduce.sum(axis=1).argmin()
box_reduce = np.roll(box_reduce, 4 - startidx, 0)#内框
box_reduce = np.intp(box_reduce)
box_reduce = box_reduce.reshape((-1, 1, 2)).astype(np.int32)
box_outside = box_outside + [[[box_x1, box_y1]], [[box_x1, box_y1]], [[box_x1, box_y1]],
[[box_x1, box_y1]]]
box = box_reduce + [[[box_x1, box_y1]], [[box_x1, box_y1]], [[box_x1, box_y1]],
[[box_x1, box_y1]]]#我也当他是锚点
# cv2.boxPoints可以将轮廓点转换为四个角点坐标
box_reduce = cv2.boxPoints(rect_reduce)
startidx = box_reduce.sum(axis=1).argmin()
box_reduce = np.roll(box_reduce, 4 - startidx, 0)
box_reduce = np.intp(box_reduce)
box_reduce = box_reduce.reshape((-1, 1, 2)).astype(np.int32)
'''
提取区域范围内的x, y
'''
mask_inside = np.zeros(binary.shape, np.uint8)
cv2.fillPoly(mask_inside, [box_reduce], (255))
pixel_point2 = cv2.findNonZero(mask_inside)
# result = np.zeros_like(color_image)
select_point = []
for i in range(pixel_point2.shape[0]):
select_point.append(pm[pixel_point2[i][0][1]+box_y1, pixel_point2[i][0][0]+box_x1])
select_point = np.array(select_point)
pm_seg = select_point.reshape(-1, 3)
'''
提取区域范围内的x, y
'''
mask_inside = np.zeros(binary.shape, np.uint8)
cv2.fillPoly(mask_inside, [box_reduce], (255))
pixel_point2 = cv2.findNonZero(mask_inside)
# result = np.zeros_like(color_image)
select_point = []
for i in range(pixel_point2.shape[0]):
select_point.append(pm[pixel_point2[i][0][1]+box_y1, pixel_point2[i][0][0]+box_x1])#我为什么要加这个box_y1和box_x1呢是因为mask取出来不是原图的坐标了box_y1和box_x1相当于mask在原图的锚点用来帮助剪切后的形状回到原图的位置
select_point = np.array(select_point)
pm_seg = select_point.reshape(-1, 3)#小框里面对应的点云
pm_seg = pm_seg[~np.isnan(pm_seg).all(axis=-1), :] # 剔除 nan
if pm_seg.size < 100:
print("分割点云数量较少,无法拟合平面")
@ -242,27 +312,24 @@ class Detection:
'''
拟合平面,计算法向量
'''
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(pm_seg)
pcd = o3d.geometry.PointCloud()#创建点云对象
pcd.points = o3d.utility.Vector3dVector(pm_seg)#转换格式
plane_model, inliers = pcd.segment_plane(distance_threshold=self.seg_distance_threshold,
ransac_n=5,
num_iterations=5000)
[a, b, c, d] = plane_model
num_iterations=5000)#平面分割平面拟合plane_model拟合平面的系数
[a, b, c, d] = plane_model#ax+by+cz+d=0a,b,c就是法向量
# print(f"Plane equation: {a:.2f}x + {b:.2f}y + {c:.2f}z + {d:.2f} = 0")
# inlier_cloud = pcd.select_by_index(inliers) # 点云可视化
# inlier_cloud.paint_uniform_color([1.0, 0, 0])
# outlier_cloud = pcd.select_by_index(inliers, invert=True)
# outlier_cloud.paint_uniform_color([0, 1, 0])
# o3d.visualization.draw_geometries([inlier_cloud, outlier_cloud])
box_outside = box_outside + [[[box_x1, box_y1]], [[box_x1, box_y1]], [[box_x1, box_y1]],[[box_x1, box_y1]]]
box = box_reduce + [[[box_x1, box_y1]], [[box_x1, box_y1]], [[box_x1, box_y1]], [[box_x1, box_y1]]]
box[0][0][1], box[0][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[0][0][1], box[0][0][0])
print(box)
box[0][0][1], box[0][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[0][0][1], box[0][0][0])#判断box有没有超过点云范围pm直接是整个图片的点云box只是分割模型识别的框
box[1][0][1], box[1][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[1][0][1], box[1][0][0])
box[2][0][1], box[2][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[2][0][1], box[2][0][0])
box[3][0][1], box[3][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[3][0][1], box[3][0][0])
if Box_isPoint == True:
if Box_isPoint == True:#保证box的坐标能被传回来如果这个点上的没有就用旁边的均值
box_point_x1, box_point_y1, box_point_z1 = remove_nan_mean_value(pm, box[0][0][1], box[0][0][0], iter_max=Iter_Max_Pixel)
box_point_x2, box_point_y2, box_point_z2 = remove_nan_mean_value(pm, box[1][0][1], box[1][0][0], iter_max=Iter_Max_Pixel)
box_point_x3, box_point_y3, box_point_z3 = remove_nan_mean_value(pm, box[2][0][1], box[2][0][0], iter_max=Iter_Max_Pixel)
@ -274,10 +341,10 @@ class Detection:
x4, y4, z4 = uv_to_XY(box[3][0][0], box[3][0][1])
x_rotation_center = int((box[0][0][0] + box[1][0][0] + box[2][0][0] + box[3][0][0]) / 4)
y_rotation_center = int((box[0][0][1] + box[1][0][1] + box[2][0][1] + box[3][0][1]) / 4)
point_x, point_y, point_z = remove_nan_mean_value(pm, y_rotation_center, x_rotation_center, iter_max=Iter_Max_Pixel)
point_x, point_y, point_z = remove_nan_mean_value(pm, y_rotation_center, x_rotation_center, iter_max=Iter_Max_Pixel)#求中心点位置
if x_rotation_center<Xmin or x_rotation_center>Xmax or y_rotation_center<Ymin or y_rotation_center>Ymax:
continue
cv2.circle(img, (x_rotation_center, y_rotation_center), 4, (255, 255, 255), 5) # 标出中心点
cv2.circle(img, (x_rotation_center, y_rotation_center), 2, (255, 255, 255), 3) # 标出中心点,只是标出来
if np.isnan(point_x): # 点云值为无效值
continue
else:
@ -286,25 +353,28 @@ class Detection:
[[box_point_x1, box_point_y1, box_point_z1],
[box_point_x2, box_point_y2, box_point_z2],
[box_point_x3, box_point_y3, box_point_z3],
[box_point_x4, box_point_y4, box_point_z4]])
[box_point_x4, box_point_y4, box_point_z4]])#四个顶点加入到列表中
else:
box_list.append([[x1, y1, z1],
[x2, y2, z2],
[x3, y3, z3],
[x4, y4, z4],
])
if self.cameraType=='RVC':
if self.cameraType=='RVC':#换单位?
xyz.append([point_x*1000, point_y*1000, point_z*1000])
Depth_Z.append(point_z*1000)
elif self.cameraType=='Pe':
xyz.append([point_x, point_y, point_z])
Depth_Z.append(point_z)
nx_ny_nz.append([a, b, c])
RegionalArea.append(cv2.contourArea(max_contour))
uv.append([x_rotation_center, y_rotation_center])
seg_point.append(pm_seg)
cv2.polylines(img, [box], True, (0, 255, 0), 2)
cv2.polylines(img, [box_outside], True, (226, 12, 89), 2)
if real_model_pro_isPose:
RegionalArea.append(0)
else:
RegionalArea.append(cv2.contourArea(max_contour))#计算面积
nx_ny_nz.append([a, b, c])#法向量
uv.append([x_rotation_center, y_rotation_center])#中心点x,y
seg_point.append(pm_seg)#区域点云
cv2.polylines(img, [box], True, (0, 255, 0), 2)#把框可视化
cv2.polylines(img, [box_outside], True, (226, 12, 89), 2)#外框可视化
_idx = find_position(Depth_Z, RegionalArea, 100, First_Depth)
@ -314,7 +384,7 @@ class Detection:
np.savetxt(save_point_name, Abnormal_data_point)
return 1, img, None, None, None
else:
cv2.circle(img, (uv[_idx][0], uv[_idx][1]), 30, (0, 0, 255), 20) # 标出中心点
cv2.circle(img, (uv[_idx][0], uv[_idx][1]), 30, (0, 0, 255), 10) # 标出中心点
if Point_isVision==True:
pcd = o3d.geometry.PointCloud()
@ -348,276 +418,272 @@ class Detection:
print("RVC X Camera is not opened!")
return 0, None, None, None, None
def read_data(self, xyz_path, img_path):
pm1 = np.loadtxt(xyz_path, dtype=np.float32)
img = cv2.imread(img_path)
pm = pm1.reshape((img.shape[0], img.shape[1], 3))
return img, pm
def get_position_and_depth(self, Point_isVision=False, Box_isPoint=True, First_Depth =True, Target_pixel_threshold = 200, Iter_Max_Pixel = 30, save_img_point=0, Height_reduce = 30, width_reduce = 30):
"""
眼在手上,用于料袋顶层抓取,检测料袋相关信息
:param Point_isVision: 点云可视化
:param Box_isPoint: True 返回点云值; False 返回box相机坐标
:param First_Depth: True 返回料袋中心点深度最小的点云值; False 返回面积最大的料袋中心点云值
:param Target_pixel_threshold: [int] 设定像素阈值,判断是否可以抓取
:param Iter_Max_Pixel: [int] 点云为NAN时向该点周围寻找替代值寻找最大区域Iter_Max_Pixel×Iter_Max_Pixel
:param save_img_point: 0不保存 ; 1保存原图 ;2保存处理后的图 ; 3保存点云和原图4 保存点云和处理后的图; 5 异常数据保存点云NAN
:param Height_reduce: 检测框的高内缩像素
:param width_reduce: 检测框的宽内缩像素
:return ret: bool 相机是否正常工作
:return img: ndarry 返回img
:return xyz: list 目标中心点云值形如[x,y,z]
:return nx_ny_nz: list 拟合平面法向量,形如[a,b,c]
:return box_list: list 内缩检测框四顶点,形如[[x1,y1],[],[],[]]
def save_data(self, img, pm, save_img_point, save_path):
if save_img_point == 0:
return
if not os.path.exists(os.path.dirname(save_path)):
os.makedirs(os.path.dirname(save_path))
save_img_name = save_path + '.png'
save_point_name = save_path + '.xyz'
"""
ret, img, pm = self.camera_rvc.get_img_and_point_map() # 拍照,获取图像及
if self.camera_rvc.caminit_isok == True:
if ret == 1:
if save_img_point != 0:
if get_disk_space(path=os.getcwd()) < 15: # 内存小于15G,停止保存数据
save_img_point = 0
print('系统内存不足,无法保存数据')
else:
save_path = ''.join([os.getcwd(), '/Vision/model/data/',
time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(time.time()))])
save_img_name = ''.join([save_path, '.png'])
save_point_name = ''.join([save_path, '.xyz'])
if save_img_point == 5:
Abnormal_data_img = img.copy()
if save_img_point==1 or save_img_point==3:
cv2.imwrite(save_img_name, img)
if save_img_point==3 or save_img_point==4 or save_img_point==5:
row_list = list(range(1, img.shape[0], 2))
column_list = list(range(1, img.shape[1], 2))
pm_save = pm.copy()
pm_save1 = np.delete(pm_save, row_list, axis=0)
point_new = np.delete(pm_save1, column_list, axis=1)
point_new = point_new.reshape(-1, 3)
if save_img_point==5:
Abnormal_data_point = point_new.copy()
else:
np.savetxt(save_point_name, point_new)
if self.use_openvino_model == False:
flag, det_cpu, dst_img, masks, category_names = self.model.model_inference(img, 0)
else:
flag, det_cpu, scores, masks, category_names = self.model.segment_objects(img)
if flag == 1:
xyz = []
nx_ny_nz = []
RegionalArea = []
Depth_Z = []
uv = []
seg_point = []
box_list = []
target_box_area = 0
if Point_isVision==True:
pm2 = pm.copy()
pm2 = pm2.reshape(-1, 3)
pm2 = pm2[~np.isnan(pm2).all(axis=-1), :]
pm2[:, 2] = pm2[:, 2] + 0.25
pcd2 = o3d.geometry.PointCloud()
pcd2.points = o3d.utility.Vector3dVector(pm2)
# o3d.visualization.draw_geometries([pcd2])
for i, item in enumerate(det_cpu):
target_box_area = 0
# 画box
box_x1, box_y1, box_x2, box_y2 = item[0:4].astype(np.int32)
if self.use_openvino_model == False:
label = category_names[int(item[5])]
else:
label = class_names[int(item[4])]
rand_color = (0, 255, 255)
score = item[4]
org = (int((box_x1 + box_x2) / 2), int((box_y1 + box_y2) / 2))
x_center = int((box_x1 + box_x2) / 2)
y_center = int((box_y1 + box_y2) / 2)
text = '{}|{:.2f}'.format(label, score)
cv2.putText(img, text, org=org, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.8,
color=rand_color,
thickness=2)
# 画mask
# mask = masks[i].cpu().numpy().astype(int)
if self.use_openvino_model == False:
mask = masks[i].cpu().data.numpy().astype(int)
else:
mask = masks[i].astype(int)
mask = mask[box_y1:box_y2, box_x1:box_x2]
# mask = masks[i].numpy().astype(int)
h, w = box_y2 - box_y1, box_x2 - box_x1
mask_colored = np.zeros((h, w, 3), dtype=np.uint8)
mask_colored[np.where(mask)] = rand_color
##################################
imgray = cv2.cvtColor(mask_colored, cv2.COLOR_BGR2GRAY)
# cv2.imshow('mask',imgray)
# cv2.waitKey(1)
# 2、二进制图像
ret, binary = cv2.threshold(imgray, 10, 255, 0)
# 阈值 二进制图像
# cv2.imshow('bin',binary)
# cv2.waitKey(1)
contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# all_point_list = contours_in(contours)
# print(len(all_point_list))
max_contour = None
max_perimeter = 0
for contour in contours: # 排除小分割区域或干扰区域
perimeter = cv2.arcLength(contour, True)
if perimeter > max_perimeter:
max_perimeter = perimeter
max_contour = contour
'''
拟合最小外接矩形,计算矩形中心
'''
rect = cv2.minAreaRect(max_contour)
if rect[1][0]-width_reduce > 30 and rect[1][1]-Height_reduce > 30:
rect_reduce = (
(rect[0][0], rect[0][1]), (rect[1][0] - width_reduce, rect[1][1] - Height_reduce), rect[2])
else:
rect_reduce = (
(rect[0][0], rect[0][1]), (rect[1][0], rect[1][1]), rect[2])
target_box_area = rect[1][0] * rect[1][1]
# cv2.boxPoints可以将轮廓点转换为四个角点坐标
box_outside = cv2.boxPoints(rect)
# 这一步不影响后面的画图,但是可以保证四个角点坐标为顺时针
startidx = box_outside.sum(axis=1).argmin()
box_outside = np.roll(box_outside, 4 - startidx, 0)
box_outside = np.intp(box_outside)
box_outside = box_outside.reshape((-1, 1, 2)).astype(np.int32)
# cv2.boxPoints可以将轮廓点转换为四个角点坐标
box_reduce = cv2.boxPoints(rect_reduce)
startidx = box_reduce.sum(axis=1).argmin()
box_reduce = np.roll(box_reduce, 4 - startidx, 0)
box_reduce = np.intp(box_reduce)
box_reduce = box_reduce.reshape((-1, 1, 2)).astype(np.int32)
'''
提取区域范围内的x, y
'''
mask_inside = np.zeros(binary.shape, np.uint8)
cv2.fillPoly(mask_inside, [box_reduce], (255))
pixel_point2 = cv2.findNonZero(mask_inside)
# result = np.zeros_like(color_image)
select_point = []
for i in range(pixel_point2.shape[0]):
select_point.append(pm[pixel_point2[i][0][1]+box_y1, pixel_point2[i][0][0]+box_x1])
select_point = np.array(select_point)
pm_seg = select_point.reshape(-1, 3)
pm_seg = pm_seg[~np.isnan(pm_seg).all(axis=-1), :] # 剔除 nan
if pm_seg.size < 100:
print("分割点云数量较少,无法拟合平面")
continue
# cv2.imshow('result', point_result)
'''
拟合平面,计算法向量
'''
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(pm_seg)
plane_model, inliers = pcd.segment_plane(distance_threshold=self.seg_distance_threshold,
ransac_n=5,
num_iterations=5000)
[a, b, c, d] = plane_model
# print(f"Plane equation: {a:.2f}x + {b:.2f}y + {c:.2f}z + {d:.2f} = 0")
# inlier_cloud = pcd.select_by_index(inliers) # 点云可视化
# inlier_cloud.paint_uniform_color([1.0, 0, 0])
# outlier_cloud = pcd.select_by_index(inliers, invert=True)
# outlier_cloud.paint_uniform_color([0, 1, 0])
# o3d.visualization.draw_geometries([inlier_cloud, outlier_cloud])
box_outside = box_outside + [[[box_x1, box_y1]], [[box_x1, box_y1]], [[box_x1, box_y1]],[[box_x1, box_y1]]]
box = box_reduce + [[[box_x1, box_y1]], [[box_x1, box_y1]], [[box_x1, box_y1]], [[box_x1, box_y1]]]
box[0][0][1], box[0][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[0][0][1], box[0][0][0])
box[1][0][1], box[1][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[1][0][1], box[1][0][0])
box[2][0][1], box[2][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[2][0][1], box[2][0][0])
box[3][0][1], box[3][0][0] = out_bounds_dete(pm.shape[0], pm.shape[1], box[3][0][1], box[3][0][0])
if Box_isPoint == True:
box_point_x1, box_point_y1, box_point_z1 = remove_nan_mean_value(pm, box[0][0][1], box[0][0][0], iter_max=Iter_Max_Pixel)
box_point_x2, box_point_y2, box_point_z2 = remove_nan_mean_value(pm, box[1][0][1], box[1][0][0], iter_max=Iter_Max_Pixel)
box_point_x3, box_point_y3, box_point_z3 = remove_nan_mean_value(pm, box[2][0][1], box[2][0][0], iter_max=Iter_Max_Pixel)
box_point_x4, box_point_y4, box_point_z4 = remove_nan_mean_value(pm, box[3][0][1], box[3][0][0], iter_max=Iter_Max_Pixel)
else:
x1, y1, z1 = uv_to_XY(box[0][0][0], box[0][0][1])
x2, y2, z2 = uv_to_XY(box[1][0][0], box[1][0][1])
x3, y3, z3 = uv_to_XY(box[2][0][0], box[2][0][1])
x4, y4, z4 = uv_to_XY(box[3][0][0], box[3][0][1])
x_rotation_center = int((box[0][0][0] + box[1][0][0] + box[2][0][0] + box[3][0][0]) / 4)
y_rotation_center = int((box[0][0][1] + box[1][0][1] + box[2][0][1] + box[3][0][1]) / 4)
point_x, point_y, point_z = remove_nan_mean_value(pm, y_rotation_center, x_rotation_center, iter_max=Iter_Max_Pixel)
cv2.circle(img, (x_rotation_center, y_rotation_center), 4, (255, 255, 255), 5) # 标出中心点
if np.isnan(point_x): # 点云值为无效值
continue
else:
if Box_isPoint == True:
box_list.append(
[[box_point_x1, box_point_y1, box_point_z1],
[box_point_x2, box_point_y2, box_point_z2],
[box_point_x3, box_point_y3, box_point_z3],
[box_point_x4, box_point_y4, box_point_z4]])
else:
box_list.append([[x1, y1, z1],
[x2, y2, z2],
[x3, y3, z3],
[x4, y4, z4],
])
if target_box_area > img.shape[0]*img.shape[1]*(2/3): # Target_pixel_threshold
if self.cameraType == 'RVC':
xyz.append([point_x*1000, point_y*1000, point_z*1000])
Depth_Z.append(point_z*1000)
elif self.cameraType=='Pe':
xyz.append([point_x, point_y, point_z])
Depth_Z.append(point_z)
nx_ny_nz.append([a, b, c])
RegionalArea.append(cv2.contourArea(max_contour))
uv.append([x_rotation_center, y_rotation_center])
seg_point.append(pm_seg)
cv2.polylines(img, [box], True, (0, 255, 0), 2)
cv2.polylines(img, [box_outside], True, (226, 12, 89), 2)
_idx = find_position(Depth_Z, RegionalArea, 100, First_Depth)
if _idx == None:
if save_img_point == 5:
cv2.imwrite(save_img_name, Abnormal_data_img)
np.savetxt(save_point_name, Abnormal_data_point)
return 1, img, None, None, None
else:
cv2.circle(img, (uv[_idx][0], uv[_idx][1]), 30, (0, 0, 255), 20) # 标出中心点
if Point_isVision==True:
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(seg_point[_idx])
plane_model, inliers = pcd.segment_plane(distance_threshold=self.seg_distance_threshold,
ransac_n=5,
num_iterations=5000)
inlier_cloud = pcd.select_by_index(inliers) # 点云可视化
inlier_cloud.paint_uniform_color([1.0, 0, 0])
outlier_cloud = pcd.select_by_index(inliers, invert=True)
outlier_cloud.paint_uniform_color([0, 0, 1])
o3d.visualization.draw_geometries([inlier_cloud, outlier_cloud, pcd2])
if save_img_point == 2 or save_img_point ==4:
save_img = cv2.resize(img, (720, 540))
cv2.imwrite(save_img_name, save_img)
return 1, img, xyz[_idx], nx_ny_nz[_idx], box_list[_idx]
else:
if save_img_point == 2 or save_img_point ==4:
save_img = cv2.resize(img,(720, 540))
cv2.imwrite(save_img_name, save_img)
if save_img_point == 5:
cv2.imwrite(save_img_name, Abnormal_data_img)
np.savetxt(save_point_name, Abnormal_data_point)
return 1, img, None, None, None
else:
print("RVC X Camera capture failed!")
return 0, None, None, None, None
if save_img_point in (1, 3):
cv2.imwrite(save_img_name, img)
if save_img_point in (3, 4):
row_list = list(range(1, img.shape[0], 2))
column_list = list(range(1, img.shape[1], 2))
pm_save = pm.copy()
pm_save1 = np.delete(pm_save, row_list, axis=0)
point_new = np.delete(pm_save1, column_list, axis=1)
point_new = point_new.reshape(-1, 3)
np.savetxt(save_point_name, point_new)
def model_inference(self, img, Use_Pose_Model_Pro):
real_model_pro_isPose = self.use_pose_model and Use_Pose_Model_Pro
if real_model_pro_isPose:
flag, det_cpu, category_names, score_list = self.model_pose.model_inference(img)
return flag, det_cpu, category_names, score_list, real_model_pro_isPose
else:
if self.use_openvino_model:
flag, det_cpu, scores, masks, category_names = self.model_seg.segment_objects(img)
else:
flag, det_cpu, dst_img, masks, category_names = self.model_seg.model_inference(img, 0)
return flag, det_cpu, category_names, masks, real_model_pro_isPose
def get_box_3d_points(self, pm, box, Box_isPoint=True, Iter_Max_Pixel=30):
"""
输入: box 为 (4, 2) 像素坐标 [[x1,y1], ..., [x4,y4]]
输出: 4个点的3D坐标 [x, y, z]
"""
box = np.array(box).reshape(-1, 2) # 强制为 (4, 2)
pts_3d = []
for pt in box:
# 确保 pt 是 [x, y] 结构
x_img, y_img = int(pt[0]), int(pt[1])
if Box_isPoint:
x3d, y3d, z3d = remove_nan_mean_value(pm, y_img, x_img, iter_max=Iter_Max_Pixel)
else:
x3d, y3d, z3d = uv_to_XY(x_img, y_img)
pts_3d.append([x3d, y3d, z3d])
return pts_3d
def process_mask_and_get_box(self, i,item, masks, pm, box_coords, Height_reduce, width_reduce, real_model_pro_isPose, use_openvino_model):
"""
处理mask提取区域点云和box内缩和外框
返回 box (内缩), box_outside(外框), pm_seg(区域点云)
"""
if real_model_pro_isPose:
# 关键点模型的box四点坐标已经给出
mask = np.zeros(pm.shape[:2], dtype=np.uint8)
if item[0][0] < item[1][0]:
arr = [[item[0][0], item[0][1]], [item[1][0], item[1][1]], [item[3][0], item[3][1]], [item[2][0], item[2][1]]]
else:
arr = [[item[3][0], item[3][1]], [item[2][0], item[2][1]], [item[0][0], item[0][1]], [item[1][0], item[1][1]]]
box = shrink_quadrilateral(arr, Height_reduce)
pts = np.array(box, np.int32)
cv2.fillPoly(mask, [pts], 255)
pm_seg = pm[mask == 255]
box = np.array(box).reshape((-1, 1, 2)).astype(np.int32)
box_outside = np.array(arr).reshape((-1, 1, 2)).astype(np.int32)
else:
# 分割模型
box_x1, box_y1, box_x2, box_y2 = box_coords
if not use_openvino_model:
mask = masks[i].cpu().data.numpy().astype(int)
else:
mask = masks[i].astype(int)
mask = mask[box_y1:box_y2, box_x1:box_x2]
h, w = box_y2 - box_y1, box_x2 - box_x1
mask_colored = np.zeros((h, w, 3), dtype=np.uint8)
mask_colored[np.where(mask)] = (0, 255, 255)
imgray = cv2.cvtColor(mask_colored, cv2.COLOR_BGR2GRAY)
ret, binary = cv2.threshold(imgray, 10, 255, 0)
contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
max_contour = None
max_perimeter = 0
for contour in contours:
perimeter = cv2.arcLength(contour, True)
if perimeter > max_perimeter:
max_perimeter = perimeter
max_contour = contour
rect = cv2.minAreaRect(max_contour)
if rect[1][0] - width_reduce > 30 and rect[1][1] - Height_reduce > 30:
rect_reduce = (rect[0], (rect[1][0] - width_reduce, rect[1][1] - Height_reduce), rect[2])
else:
rect_reduce = rect
box_outside = cv2.boxPoints(rect)
startidx = box_outside.sum(axis=1).argmin()
box_outside = np.roll(box_outside, 4 - startidx, 0).astype(np.int32).reshape((-1, 1, 2))
box_reduce = cv2.boxPoints(rect_reduce)
startidx = box_reduce.sum(axis=1).argmin()
box_reduce = np.roll(box_reduce, 4 - startidx, 0).astype(np.int32).reshape((-1, 1, 2))
box_outside += np.array([[[box_x1, box_y1]]] * 4)
box = box_reduce + np.array([[[box_x1, box_y1]]] * 4)
mask_inside = np.zeros(binary.shape, np.uint8)
cv2.fillPoly(mask_inside, [box_reduce], (255))
pixel_point2 = cv2.findNonZero(mask_inside)
select_point = []
for i in range(pixel_point2.shape[0]):
select_point.append(pm[pixel_point2[i][0][1] + box_y1, pixel_point2[i][0][0] + box_x1])
pm_seg = np.array(select_point).reshape(-1, 3)
pm_seg = pm_seg[~np.isnan(pm_seg).all(axis=1), :]
return box, box_outside, pm_seg,max_contour
def fit_plane_and_get_normal(self, pm_seg):
if pm_seg.shape[0] < 100:
print("分割点云数量较少,无法拟合平面")
return None
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(pm_seg)
plane_model, inliers = pcd.segment_plane(distance_threshold=self.seg_distance_threshold,
ransac_n=5,
num_iterations=5000)
[a, b, c, d] = plane_model
return [a, b, c]
def get_position_test(self, Use_Pose_Model_Pro=False, Point_isVision=False, Box_isPoint=True,
First_Depth=True, Iter_Max_Pixel=30, save_img_point=0,
Height_reduce=80, width_reduce=60,
Xmin=160, Xmax=1050, Ymin=290, Ymax=780):
if self.camera_rvc.caminit_isok:
print("RVC X Camera is not opened!")
return 0, None, None, None, None
# 这里示例用固定路径,建议修改为参数输入
xyz_path = 'D:/pychram_rob/AutoControlSystem-git/Vision/model/data/2024_11_29_10_05_58.xyz'
img_path = 'D:/pychram_rob/AutoControlSystem-git/Vision/model/data/2024_11_29_10_05_58.png'
img, pm = self.read_data(xyz_path, img_path)
if save_img_point != 0:
free_space = get_disk_space(path=os.getcwd())
if free_space < 15:
print('系统内存不足,无法保存数据')
save_img_point = 0
else:
save_path = os.path.join(os.getcwd(), 'Vision/model/data/',
time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime()))
self.save_data(img, pm, save_img_point, save_path)
flag, det_cpu, category_names, extra, real_model_pro_isPose = self.model_inference(img, Use_Pose_Model_Pro)
if flag != 1:
print("模型推理失败")
return 1, img, None, None, None
xyz_list = []
normal_list = []
area_list = []
depth_list = []
uv_list = []
seg_point_list = []
box_list = []
for i, item in enumerate(det_cpu):
if real_model_pro_isPose:
box_coords = None
else:
box_coords = item[0:4].astype(np.int32)
masks = extra if not real_model_pro_isPose else None
box, box_outside, pm_seg,max_contour = self.process_mask_and_get_box(i,
item, masks, pm, box_coords, Height_reduce, width_reduce,
real_model_pro_isPose, self.use_openvino_model)
if pm_seg.shape[0] < 100:
continue
normal = self.fit_plane_and_get_normal(pm_seg)
if normal is None:
continue
# 计算中心点坐标
if real_model_pro_isPose:
x_center = int((item[0][0] + item[1][0] + item[2][0] + item[3][0]) / 4)
y_center = int((item[0][1] + item[1][1] + item[2][1] + item[3][1]) / 4)
else:
x_center = int(np.mean(box[:, 0, 0]))
y_center = int(np.mean(box[:, 0, 1]))
# 确保中心点坐标在范围内
if x_center < Xmin or x_center > Xmax or y_center < Ymin or y_center > Ymax:
continue
# 获取中心点点云坐标
point_x, point_y, point_z = remove_nan_mean_value(pm, y_center, x_center, iter_max=Iter_Max_Pixel)
if np.isnan(point_x):
continue
# 计算面积(如果有轮廓)
if real_model_pro_isPose:
area = 0
else:
area = cv2.contourArea(max_contour) if 'max_contour' in locals() else 0
xyz = [point_x, point_y, point_z]
if self.cameraType == 'RVC':
xyz = [v * 1000 for v in xyz] # 换单位为mm
depth_list.append(point_z * 1000)
else:
depth_list.append(point_z)
xyz_list.append(xyz)
normal_list.append(normal)
area_list.append(area)
uv_list.append([x_center, y_center])
seg_point_list.append(pm_seg)
box = box.reshape(-1,2)
print("box.shape:", box.shape)
print("box example:", box)
box_3d_points = self.get_box_3d_points(pm, box, Box_isPoint)
box_list.append(box_3d_points)
# 画图示例
cv2.polylines(img, [box], True, (0, 255, 0), 2)
cv2.polylines(img, [box_outside], True, (226, 12, 89), 2)
cv2.circle(img, (x_center, y_center), 2, (255, 255, 255), 3)
# 选取最终结果索引
idx = find_position(depth_list, area_list, 100, First_Depth)
if idx is None:
return 1, img, None, None, None
# 标记最终中心点
cv2.circle(img, (uv_list[idx][0], uv_list[idx][1]), 30, (0, 0, 255), 10)
# 点云可视化示例
if Point_isVision:
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(seg_point_list[idx])
plane_model, inliers = pcd.segment_plane(distance_threshold=self.seg_distance_threshold,
ransac_n=5,
num_iterations=5000)
inlier_cloud = pcd.select_by_index(inliers)
inlier_cloud.paint_uniform_color([1.0, 0, 0])
outlier_cloud = pcd.select_by_index(inliers, invert=True)
outlier_cloud.paint_uniform_color([0, 0, 1])
o3d.visualization.draw_geometries([inlier_cloud, outlier_cloud])
# 保存图像
if save_img_point in (2, 4):
save_img = cv2.resize(img, (720, 540))
save_path = os.path.join(os.getcwd(), 'Vision/model/data/',
time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime()))
cv2.imwrite(save_path + '.png', save_img)
return 1, img, xyz_list[idx], normal_list[idx], box_list[idx]
def get_take_photo_position(self, Height_reduce = 30, width_reduce = 30):
"""

File diff suppressed because it is too large Load Diff

View File

@ -13,7 +13,7 @@ import cv2
detection = Detection()
while True:
ret, img, xyz, nx_ny_nz, box = detection.get_position()
ret, img, xyz, nx_ny_nz, box = detection.get_position_test()
if ret==1:
print('xyz点云坐标', xyz)
print('nx_ny_nz法向量', nx_ny_nz)

Binary file not shown.

View File

@ -14,6 +14,59 @@ import psutil
from psutil._common import bytes2human
def shrink_quadrilateral(points, d):
"""
给定4个点围成的四边形沿着对角线内缩小d个像素
:param points: 四边形的4个顶点形状为 (4, 2)
:param d: 内缩的像素距离
:return: 缩小后的4个顶点
"""
# 将点转换为 numpy 数组
points = np.array(points, dtype=np.float32)
# 计算四边形的中心点
center = np.mean(points, axis=0)
# 计算每个点到中心点的向量
vectors = points - center
# 计算每个向量的长度
lengths = np.linalg.norm(vectors, axis=1)
# 计算缩放比例
scale = (lengths - d) / lengths
# 对每个点进行缩放
new_points = center + vectors * scale[:, np.newaxis]
new_points = new_points.astype(np.int32)
return new_points
def find_closest_point_index(point_cloud, x1, y1):
x_coords = point_cloud[:, :, 0]
y_coords = point_cloud[:, :, 1]
# 创建一个掩码,标记非 NaN 的点
valid_mask = ~(np.isnan(x_coords) & ~np.isnan(y_coords))
# 初始化最小距离为一个很大的值
min_distance = np.inf
min_index = (None, None)
# 遍历所有有效点
for i in range(point_cloud.shape[0]):
for j in range(point_cloud.shape[1]):
if valid_mask[i, j]:
# 计算当前点到 (x1, y1) 的欧几里得距离
distance = np.sqrt((x_coords[i, j] - x1) ** 2 + (y_coords[i, j] - y1) ** 2)
# 如果当前距离小于最小距离,则更新最小距离和索引
if distance < min_distance:
min_distance = distance
min_index = (i, j)
return min_index
def uv_to_XY(cameraType, u, v):
"""
像素坐标转相机坐标
@ -91,8 +144,8 @@ def out_bounds_dete(pm_y, pm_x, piont_y, piont_x):
def remove_nan_mean_value(pm, y, x, iter_max=50):
y, x = out_bounds_dete(pm.shape[0], pm.shape[1], y, x)
point_x, point_y, point_z = pm[y, x]
if np.isnan(point_x):
point_x, point_y, point_z = pm[y, x]#得到这个位置的点云的坐标
if np.isnan(point_x):#如果这个位置是nan找到周围50个像素的范围内的点云并求平均来代替这个点的坐标
point_x_list = []
point_y_list = []
point_z_list = []
@ -101,7 +154,7 @@ def remove_nan_mean_value(pm, y, x, iter_max=50):
pm_shape_x = pm.shape[1]
remove_nan_isok = False
print('Nan值去除')
while iter_current < iter_max:
while iter_current < iter_max:#这个邻域内不是nan的点就被放到列表中
# 计算开始点
if y - iter_current > 0:
y_start = y - iter_current
@ -127,7 +180,7 @@ def remove_nan_mean_value(pm, y, x, iter_max=50):
point_z_list.append(pm[y_current, x_current][2])
len_point_x = len(point_x_list)
if len_point_x > 0:
if len_point_x > 0:#计算x,y,z的均值
point_x = sum(point_x_list)/len_point_x
point_y = sum(point_y_list)/len_point_x
point_z = sum(point_z_list)/len_point_x

View File

@ -0,0 +1,214 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
# @Time : 2025/3/18 15:29
# @Author : hjw
# @File : yolov8_pt_pose.py
'''
import os.path
import random
import cv2
import numpy as np
import torch
import time
from ultralytics.nn.autobackend import AutoBackend
from ultralytics.utils import ops
class yolov8_pose:
def __init__(self, weights, cuda, conf_thres=0.45, iou_thres=0.45) -> None:
"""
weights = r'./runs/pose/train25/weights/last.pt'
cuda = 'cpu'
save_path = "./img_test"
"""
self.imgsz = 640
self.device = cuda
self.model = AutoBackend(weights, device=torch.device(cuda))
self.model.eval()
self.names = self.model.names
self.half = False
self.conf = conf_thres
self.iou = iou_thres
self.color = {"font": (255, 255, 255)}
self.color.update(
{self.names[i]: (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
for i in range(len(self.names))})
# self.skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12], [7, 13], [6, 7], [6, 8],
# [7, 9], [8, 10], [9, 11], [2, 3], [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]
# pose_palette = np.array([[255, 128, 0], [255, 153, 51], [255, 178, 102], [230, 230, 0], [255, 153, 255],
# [153, 204, 255], [255, 102, 255], [255, 51, 255], [102, 178, 255], [51, 153, 255],
# [255, 153, 153], [255, 102, 102], [255, 51, 51], [153, 255, 153], [102, 255, 102],
# [51, 255, 51], [0, 255, 0], [0, 0, 255], [255, 0, 0], [255, 255, 255]], dtype=np.uint8)
# self.kpt_color = pose_palette[[16, 16, 16, 16, 16, 0, 0, 0, 0, 0, 0, 9, 9, 9, 9, 9, 9]]
# self.limb_color = pose_palette[[9, 9, 9, 9, 7, 7, 7, 0, 0, 0, 0, 0, 16, 16, 16, 16, 16, 16, 16]]
self.skeleton = [[1, 2], [2, 3], [3, 4]]
pose_palette = np.array([[255, 0, 0], [255, 153, 51], [255, 3, 102], [0, 230, 0]], dtype=np.uint8)
self.kpt_color = pose_palette[[0, 1, 2, 3]]
self.limb_color = pose_palette[[0, 1, 2, 3]]
# print(len(self.skeleton ))
# print(len(pose_palette))
# print(len(self.kpt_color))
# print(len(self.limb_color))
def model_inference(self, img_src):
img = self.precess_image(img_src, self.imgsz, self.half, self.device)
preds = self.model(img) # shape [1, 56, 6300]
det = ops.non_max_suppression(preds, self.conf, self.iou, classes=None, agnostic=False, max_det=300,
nc=len(self.names))
point_xy = []
name_list = []
score_list = []
for i, pred in enumerate(det):
lw = max(round(sum(img_src.shape) / 2 * 0.003), 2) # line width
tf = max(lw - 1, 1) # font thickness
sf = lw / 3 # font scale
pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], img_src.shape)
pred_bbox = pred[:, :6].cpu().detach().numpy()
pred_kpts = pred[:, 6:].view(len(pred), *self.model.kpt_shape) if len(pred) else pred[:, 6:]
pred_kpts = ops.scale_coords(img.shape[2:], pred_kpts, img_src.shape)
pred_kpts = pred_kpts.cpu().detach().numpy()
point_xy = []
for kpts, bbox in zip(pred_kpts, pred_bbox):
box = bbox[:4]
score = bbox[4]
name = self.names[bbox[5]]
shape = (640, 640)
radius = 5
kpt_line = True
nkpt, ndim = kpts.shape
is_pose = nkpt == 4 and ndim in {2, 3}
kpt_line &= is_pose # `kpt_line=True` for now only supports human pose plotting
xy = []
for i, k in enumerate(kpts):
color_k = [int(x) for x in self.kpt_color[i]]
x_coord, y_coord = k[0], k[1]
if x_coord % shape[1] != 0 and y_coord % shape[0] != 0:
if len(k) == 3:
conf = k[2]
if conf < 0.5:
continue
xy.append([int(x_coord), int(y_coord)])
cv2.circle(img_src, (int(x_coord), int(y_coord)), radius, color_k, -1, lineType=cv2.LINE_AA)
point_xy.append(xy)
name_list.append(name)
score_list.append(score)
return True, point_xy, name_list, score_list
def draw_box(self, img_src, box, conf, cls_name, lw, sf, tf):
color = self.color[cls_name]
label = f'{cls_name} {conf}'
p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))
# 绘制矩形框
cv2.rectangle(img_src, p1, p2, color, thickness=lw, lineType=cv2.LINE_AA)
# text width, height
w, h = cv2.getTextSize(label, 0, fontScale=sf, thickness=tf)[0]
# label fits outside box
outside = box[1] - h - 3 >= 0
p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3
# 绘制矩形框填充
cv2.rectangle(img_src, p1, p2, color, -1, cv2.LINE_AA)
# 绘制标签
cv2.putText(img_src, label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2),
0, sf, self.color["font"], thickness=2, lineType=cv2.LINE_AA)
def draw_kpts(self, img_src, kpts, box, score, name, lw, sf, tf, shape=(640, 640), radius=5, kpt_line=True):
flag = False
nkpt, ndim = kpts.shape
is_pose = nkpt == 4 and ndim in {2, 3}
kpt_line &= is_pose # `kpt_line=True` for now only supports human pose plotting
for i, k in enumerate(kpts):
color_k = [int(x) for x in self.kpt_color[i]]
x_coord, y_coord = k[0], k[1]
if x_coord % shape[1] != 0 and y_coord % shape[0] != 0:
if len(k) == 3:
conf = k[2]
if conf < 0.5:
continue
cv2.circle(img_src, (int(x_coord), int(y_coord)), radius, color_k, -1, lineType=cv2.LINE_AA)
if kpt_line:
ndim = kpts.shape[-1]
for i, sk in enumerate(self.skeleton):
pos1 = (int(kpts[(sk[0] - 1), 0]), int(kpts[(sk[0] - 1), 1]))
pos2 = (int(kpts[(sk[1] - 1), 0]), int(kpts[(sk[1] - 1), 1]))
if ndim == 3:
conf1 = kpts[(sk[0] - 1), 2]
conf2 = kpts[(sk[1] - 1), 2]
if conf1 < 0.5 or conf2 < 0.5:
continue
if pos1[0] % shape[1] == 0 or pos1[1] % shape[0] == 0 or pos1[0] < 0 or pos1[1] < 0:
continue
if pos2[0] % shape[1] == 0 or pos2[1] % shape[0] == 0 or pos2[0] < 0 or pos2[1] < 0:
continue
cv2.line(img_src, pos1, pos2, [int(x) for x in self.limb_color[i]], thickness=2, lineType=cv2.LINE_AA)
flag = True
if flag:
self.draw_box(img_src, box, score, name, lw, sf, tf)
@staticmethod
def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), scaleup=True, stride=32):
# Resize and pad image while meeting stride-multiple constraints
shape = im.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better val mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
# minimum rectangle
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return im, ratio, (dw, dh)
def precess_image(self, img_src, img_size, half, device):
# Padded resize
img = self.letterbox(img_src, img_size)[0]
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img = img / 255 # 0 - 255 to 0.0 - 1.0
if len(img.shape) == 3:
img = img[None] # expand for batch dim
return img
# if __name__ == '__main__':
# weights = r'./runs/pose/train25/weights/last.pt'
# cuda = 'cpu'
# save_path = "./img_test"
# start = time.time()
# if not os.path.exists(save_path):
# os.mkdir(save_path)
#
# model = yolov8_pose(weights, cuda, 0.45, 0.45)
#
# img_path = r'./1106-08-pe-518.png'
# model.infer(img_path, save_path)
# end = time.time()
# print('推理时间:',end -start)

View File

@ -332,10 +332,10 @@ class yolov8_segment():
# NMS
det = non_max_suppression(preds, conf_thres=0.4, iou_thres=0.4, nc=len(self.model.CLASSES))[0]
if det.shape[0] != 0:
# bbox还原至原图尺寸
# box还原至原图尺寸
det[:, :4] = scale_boxes(img.shape[2:], det[:, :4], ori_img.shape)
# mask转换成原图尺寸并做裁剪
masks = process_mask(proto[0], det[:, 6:], det[:, :4], img.shape[2:], ori_img.shape[0:2])
masks = process_mask(proto[0], det[:, 6:], det[:, :4], img.shape[2:], ori_img.shape[0:2])#mask尺寸为什么与box尺寸一样
category_names = self.model.CLASSES
# 画图
# result_frame = plot_result(det.cpu().data.numpy(), ori_img, masks, category_names)

View File

@ -10,7 +10,7 @@ import platform
import cv2
import os
from Vision.camera_coordinate_dete import Detection
from Vision.camera_coordinate_dete_img import Detection
from Vision.camera_coordinate_dete_planevison import Detection_plane_vsion
from Trace.handeye_calibration import *
from Vision.tool.utils import get_disk_space
@ -26,9 +26,9 @@ from Vision.bag_collection import DetectionBag
"""
def detectionPosition_test():
detection = Detection()
detection = Detection(use_pose_model=True) # 模型选择 use_openvino_model=False, use_pose_model=True, use_seg_pt_model=True
while True:
ret, img, xyz, nx_ny_nz, box = detection.get_position(Point_isVision=True, save_img_point=1)
ret, img, xyz, nx_ny_nz, box = detection.get_position(Use_Pose_Model_Pro=True, Point_isVision=True, save_img_point=1)
if ret==1:
print('xyz点云坐标', xyz)
print('nx_ny_nz法向量', nx_ny_nz)
@ -134,4 +134,4 @@ def bag_collection_test():
if __name__ == '__main__':
Detection_plane_vsion_test()
detectionPosition_test()

File diff suppressed because it is too large Load Diff