import numpy as np def vec2attitude(a,b,c): pi = np.arccos(-1.0) # pi 的值 # 输入a, b, c # 定义矩阵 r1 = np.array([[1, 0, 0], [0, np.cos(a * pi), np.sin(a * pi)], [0, -np.sin(a * pi), np.cos(a * pi)]]) # r1 矩阵 3x3 r2 = np.array([[np.cos(b * pi), 0, -np.sin(b * pi)], [0, 1, 0], [np.sin(b * pi), 0, np.cos(b * pi)]]) # r2 矩阵 3x3 r3 = np.array([[np.cos(c * pi), np.sin(c * pi), 0], [-np.sin(c * pi), np.cos(c * pi), 0], [0, 0, 1]]) # r3 矩阵 3x3 vector1 = np.array([[1], [1], [1]]) # 初始向量 3x1 # 旋转矩阵相乘并应用于向量 matrix_result = np.dot(np.dot(np.dot(r1, r2), r3), vector1) # 输出结果 rpy=[] for m in range(matrix_result.shape[0]): rpy.append(matrix_result[m][0]) print(f"{matrix_result[m][0]:<20}") return rpy #黄老师会给我传目标物的中心点坐标x,y,z和目标位姿的平面法向量a,b,c def getPosition(x,y,z,a,b,c): target = np.asarray([x, y, z]) camera2robot = np.loadtxt('D:\BaiduNetdiskDownload\机械臂\GRCNN\\real\cam_pose.txt', delimiter=' ') position = np.dot(camera2robot[0:3, 0:3], target) + camera2robot[0:3, 3:] target_position = position[0:3, 0]#转换后的位置信息 # vector = np.asarray([a, b, c]) # normal_vector = vector / np.linalg.norm(vector)#归一化 # normal_vector.shape = (3, 1) # dot_angle = np.dot(camera2robot[0:3, 0:3], normal_vector)#转换后的法向量,方向依然是同一个方向,只是表示方法变了 # angle_tool = R.from_matrix(camera2robot[0:3, 0:3]) rpy = vec2attitude(a,b,c) r,p,y=rpy[0],rpy[1],rpy[2]#r表示u,p表示v,y表示w target_angle_raw = np.asarray([r,p,y]) target_angle = np.dot(camera2robot[0:3, 0:3],target_angle_raw) print(target_position,target_angle) return target_position, target_angle