first commit

This commit is contained in:
2025-07-29 13:16:30 +08:00
commit dca51db4eb
145 changed files with 721268 additions and 0 deletions

12
Trace/README.md Normal file
View File

@ -0,0 +1,12 @@
# com_pose.txt 彩色图对齐深度图
# com_pose2.txt 深度图对齐彩色图
# 路径处理
使用model的Position类和Expection的code定义
## 获取世界坐标
```
def fun1(Detection_Position):
return Code, RealPosition;
```

Binary file not shown.

267
Trace/calibrate.py Normal file
View File

@ -0,0 +1,267 @@
import sys
import time
import numpy as np
import cv2
import glob
from math import *
import pandas as pd
from pathlib import Path
from COM.COM_Robot import RobotClient
from app import MainWindow
from Vision.tool.CameraRVC import camera
from Model.RobotModel import DataAddress,MoveType
from PySide6.QtWidgets import QMainWindow, QApplication, QLineEdit
# 用于根据欧拉角计算旋转矩阵
def myRPY2R_robot(x, y, z):
Rx = np.array([[1, 0, 0], [0, cos(x), -sin(x)], [0, sin(x), cos(x)]]) # 这是将示教器上的角度转换成旋转矩阵
Ry = np.array([[cos(y), 0, sin(y)], [0, 1, 0], [-sin(y), 0, cos(y)]])
Rz = np.array([[cos(z), -sin(z), 0], [sin(z), cos(z), 0], [0, 0, 1]])
# R = Rx@Ry@Rz
R = Rz @ Ry @ Rx
return R
# 用于根据位姿计算变换矩阵
def pose_robot(x, y, z, Tx, Ty, Tz):
thetaX = Tx / 180 * pi
thetaY = Ty / 180 * pi
thetaZ = Tz / 180 * pi
R = myRPY2R_robot(thetaX, thetaY, thetaZ)
t = np.array([[x], [y], [z]])
RT1 = np.column_stack([R, t]) # 列合并
RT1 = np.row_stack((RT1, np.array([0, 0, 0, 1])))
return RT1
'''
#用来从棋盘格图片得到相机外参
:param img_path: 读取图片路径
:param chess_board_x_num: 棋盘格x方向格子数
:param chess_board_y_num: 棋盘格y方向格子数
:param chess_board_len: 单位棋盘格长度,mm
:param cam_mtx: 相机内参
:param cam_dist: 相机畸变参数
:return: 相机外参
'''
def get_RT_from_chessboard(img_path, chess_board_x_num, chess_board_y_num, chess_board_len, cam_mtx, cam_dist):
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
# 标定板世界坐标
objp = np.zeros((chess_board_y_num * chess_board_x_num, 3), np.float32)
for m in range(chess_board_y_num):
for n in range(chess_board_x_num):
objp[m * chess_board_x_num + n] = [n * chess_board_len, m * chess_board_len, 0]
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
img = cv2.imread(img_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (chess_board_x_num, chess_board_y_num), None)
# If found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
imgpoints.append(corners2)
retval, rvec, tvec = cv2.solvePnP(objpoints[0], imgpoints[0], cam_mtx, cam_dist)
# print(rvec.reshape((1,3)))
RT = np.column_stack(((cv2.Rodrigues(rvec))[0], tvec))
RT = np.row_stack((RT, np.array([0, 0, 0, 1])))
# print(RT)
return RT
return 0
# 计算board to cam 变换矩阵
# img_path = "./hand_eye" 棋盘格存放文件夹
def get_chess_to_cam_mtx(img_path, chess_board_x_num, chess_board_y_num, chess_board_len, cam_mtx, cam_dist):
img_path += "/*.jpg"
R_all_chess_to_cam_1 = []
T_all_chess_to_cam_1 = []
images = glob.glob(img_path)
for fname in images:
RT = get_RT_from_chessboard(fname, chess_board_x_num, chess_board_y_num, chess_board_len, cam_mtx, cam_dist)
R_all_chess_to_cam_1.append(RT[:3, :3])
T_all_chess_to_cam_1.append(RT[:3, 3].reshape((3, 1)))
return R_all_chess_to_cam_1, T_all_chess_to_cam_1
def save_images(image, path, img_num):
"""保存拍摄的图片"""
img_path = path / f'image_{img_num}.png'
cv2.imwrite(str(img_path), image)
def save_pic_and_excel(): # 将机械臂的末端位姿保存在excel文件里,以及拍摄的图片保存在pic里# 动,拍照,保存
robot.send_switch_tool_command()
# 定义初始关节位置
initial_joint_angles = [0, 0, 0, 0, 5, 0] # 初始关节角度
move_step = 5 # 每次移动5度
move_range = [-15, 15] # 关节移动的阈值范围
# 保存 Excel 的路径和文件
excel_path = Path("robot_calibration_data.xlsx")
# 创建 DataFrame 来存储机械臂坐标
data = {
"x1": [], "x2": [], "x3": [],"x4": [],"x5": [],"x6": [],"image_path": []
}
img_num = 0 # 图片编号
image_save_path = Path("calibration_images")
image_save_path.mkdir(exist_ok=True) # 创建目录保存图像
# 遍历各轴,依次移动
for axis in range(6):
if axis==4:
continue
for move in range(move_range[0], move_range[1] + 1, move_step):
# 计算目标关节位置
joint_angles = initial_joint_angles.copy()
joint_angles[axis] = move
# 移动机械臂到指定位置
robot.send_position_command(*joint_angles,MoveType.AXIS)
# 拍照
time.sleep(10)
_,rgb_image = ca.get_img()
# 保存图片
save_images(rgb_image, image_save_path, img_num)
img_path = image_save_path / f'image_{img_num}.png'
img_num += 1
# 获取当前机械臂的世界坐标
real_position = robot.robotClient.status_model.getRealPosition()
x1 = real_position.X
x2 = real_position.Y
x3 = real_position.Z
x4 = real_position.U
x5 = real_position.V
x6 = real_position.W
# 保存数据到DataFrame
data["x1"].append(x1)
data["x2"].append(x2)
data["x3"].append(x3)
data["x4"].append(x4)
data["x5"].append(x5)
data["x6"].append(x6)
data["image_path"].append(str(img_path))
# 将数据写入Excel
df = pd.DataFrame(data)
df.to_excel(excel_path, index=False)
print(f"数据已保存至 {excel_path}")
# 计算end to base变换矩阵
# path = "./hand_eye" 棋盘格和坐标文件存放文件夹
def get_end_to_base_mtx(path):
img_path = path + "image_/*.png"
coord_file_path = path + '/robot_calibration_data.xlsx' # 从记录文件读取机器人六个位姿
sheet_1 = pd.read_excel(coord_file_path)
R_all_end_to_base_1 = []
T_all_end_to_base_1 = []
# print(sheet_1.iloc[0]['ax'])
images = glob.glob(img_path)
# print(len(images))
for i in range(len(images)):
pose_str = sheet_1.values[i][0]
pose_array = [float(num) for num in pose_str[1:-1].split(',')]
# print(pose_array)
RT = pose_robot(pose_array[0], pose_array[1], pose_array[2], pose_array[3], pose_array[4], pose_array[5])
R_all_end_to_base_1.append(RT[:3, :3])
T_all_end_to_base_1.append(RT[:3, 3].reshape((3, 1)))
return R_all_end_to_base_1, T_all_end_to_base_1
if __name__ == "__main__":
print("手眼标定测试\n")
app = QApplication(sys.argv)
robot = MainWindow()
ca = camera()
dataaddress = DataAddress()
# 相机内参
cam_mtx = np.array([[2402.1018066406, 0.0000000000, 739.7069091797],
[0.0000000000, 2401.7873535156, 584.7304687500],
[0.00000000e+00, 0.00000000e+00, 1.00000000e+00]], dtype=np.float64)
# 畸变系数
cam_dist = np.array([-0.0424814112, 0.2438604534, -0.3833343089, -0.3833343089, 0.0007602088],
dtype=np.float64)
save_pic_and_excel()
chess_board_x_num = 12 - 1 # 标定板长度
chess_board_y_num = 9 - 1
chess_board_len = 30 / 1000
path = "./calibration_images"
R_all_chess_to_cam_1, T_all_chess_to_cam_1 = get_chess_to_cam_mtx(path, chess_board_x_num, chess_board_y_num,
chess_board_len, cam_mtx, cam_dist)
R_all_end_to_base_1, T_all_end_to_base_1 = get_end_to_base_mtx(path)
R, T = cv2.calibrateHandEye(R_all_end_to_base_1, T_all_end_to_base_1, R_all_chess_to_cam_1, T_all_chess_to_cam_1,
cv2.CALIB_HAND_EYE_TSAI) # 手眼标定
RT = np.column_stack((R, T))
RT = np.row_stack((RT, np.array([0, 0, 0, 1]))) # 即为cam to end变换矩阵00
print('相机相对于末端的变换矩阵为:')
print(RT)
chess_base_mtx = np.zeros((4, 4), np.float32)
# 结果验证,原则上来说,每次结果相差较小
for i in range(20):
RT_end_to_base = np.column_stack((R_all_end_to_base_1[i], T_all_end_to_base_1[i]))
RT_end_to_base = np.row_stack((RT_end_to_base, np.array([0, 0, 0, 1])))
RT_chess_to_cam = np.column_stack((R_all_chess_to_cam_1[i], T_all_chess_to_cam_1[i]))
RT_chess_to_cam = np.row_stack((RT_chess_to_cam, np.array([0, 0, 0, 1])))
RT_cam_to_end = np.column_stack((R, T))
RT_cam_to_end = np.row_stack((RT_cam_to_end, np.array([0, 0, 0, 1])))
R_cam_to_end = RT_cam_to_end[:3, :3]
T_cam_to_end = RT_cam_to_end[:3, 3]
rotation_vector, _ = cv2.Rodrigues(R_cam_to_end)
print("11111111", rotation_vector)
print("222222222", R_cam_to_end)
thetaX = rotation_vector[0][0] * 180 / pi
thetaY = rotation_vector[1][0] * 180 / pi
thetaZ = rotation_vector[2][0] * 180 / pi
# print(RT_cam_to_end)
RT_chess_to_base = RT_end_to_base @ RT_cam_to_end @ RT_chess_to_cam # 即为固定的棋盘格相对于机器人基坐标系位姿
print('')
print(RT_chess_to_base)
chess_base_mtx += RT_chess_to_base
chess_pose = np.array([0, 0, 0, 1])
# 转换棋盘坐标到像素坐标
world_pose = RT_chess_to_base @ chess_pose.T
cam_pose = np.linalg.inv(RT_end_to_base @ RT_cam_to_end) @ world_pose
print(cam_pose)
# cam_pose = RT_chess_to_cam@chess_pose.T
zero3 = np.zeros((3, 1))
imgTocam_matrix = np.hstack((cam_mtx, zero3)) # 3*4 矩阵
imgTocam_matrix = np.row_stack((imgTocam_matrix, np.array([0, 0, 0, 1])))
img_pose = imgTocam_matrix @ (cam_pose / cam_pose[2])
# 像素坐标转换为世界坐标(机械臂基坐标)
cam_pose = np.linalg.inv(imgTocam_matrix) @ img_pose
cam_pose = cam_pose * (1 / cam_pose[3])
world_pose = RT_end_to_base @ RT_cam_to_end @ cam_pose
print(chess_base_mtx / 20)

218
Trace/calibrate_test.py Normal file
View File

@ -0,0 +1,218 @@
import sys
import time
import numpy as np
import cv2
import glob
from math import *
import pandas as pd
from pathlib import Path
import openpyxl
from COM.COM_Robot import RobotClient
from app import MainWindow
from Vision.tool.CameraRVC import camera
from Model.RobotModel import DataAddress,MoveType
from PySide6.QtWidgets import QMainWindow, QApplication, QLineEdit
# 用于根据欧拉角计算旋转矩阵
def myRPY2R_robot(x, y, z):
Rx = np.array([[1, 0, 0], [0, cos(x), -sin(x)], [0, sin(x), cos(x)]]) # 这是将示教器上的角度转换成旋转矩阵
Ry = np.array([[cos(y), 0, sin(y)], [0, 1, 0], [-sin(y), 0, cos(y)]])
Rz = np.array([[cos(z), -sin(z), 0], [sin(z), cos(z), 0], [0, 0, 1]])
# R = Rx@Ry@Rz
R = Rz @ Ry @ Rx
return R
# 用于根据位姿计算变换矩阵
def pose_robot(x, y, z, Tx, Ty, Tz):
thetaX = Tx / 180 * pi
thetaY = Ty / 180 * pi
thetaZ = Tz / 180 * pi
R = myRPY2R_robot(thetaX, thetaY, thetaZ)
t = np.array([[x], [y], [z]])
RT1 = np.column_stack([R, t]) # 列合并
RT1 = np.row_stack((RT1, np.array([0, 0, 0, 1])))
return RT1
'''
#用来从棋盘格图片得到相机外参
:param img_path: 读取图片路径
:param chess_board_x_num: 棋盘格x方向格子数
:param chess_board_y_num: 棋盘格y方向格子数
:param chess_board_len: 单位棋盘格长度,mm
:param cam_mtx: 相机内参
:param cam_dist: 相机畸变参数
:return: 相机外参
'''
def get_RT_from_chessboard(img_path, chess_board_x_num, chess_board_y_num, chess_board_len, cam_mtx, cam_dist):
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
# 标定板世界坐标
objp = np.zeros((chess_board_y_num * chess_board_x_num, 3), np.float32)
for m in range(chess_board_y_num):
for n in range(chess_board_x_num):
objp[m * chess_board_x_num + n] = [n * chess_board_len, m * chess_board_len, 0]
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
img = cv2.imread(img_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (chess_board_x_num, chess_board_y_num), None)
# If found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
imgpoints.append(corners2)
# 可视化棋盘格角点检测结果
img = cv2.drawChessboardCorners(img, (chess_board_x_num, chess_board_y_num), corners2, ret)
cv2.imshow('Corners', img)
cv2.waitKey(500) # 显示500ms
retval, rvec, tvec = cv2.solvePnP(objpoints[0], imgpoints[0], cam_mtx, cam_dist)
RT = np.column_stack(((cv2.Rodrigues(rvec))[0], tvec))
RT = np.row_stack((RT, np.array([0, 0, 0, 1])))
return RT
return 0
# 计算board to cam 变换矩阵
# img_path = "./hand_eye" 棋盘格存放文件夹
def get_chess_to_cam_mtx(img_path, chess_board_x_num, chess_board_y_num, chess_board_len, cam_mtx, cam_dist):
img_path += "/image_*.png"
R_all_chess_to_cam_1 = []
T_all_chess_to_cam_1 = []
images = glob.glob(img_path)
for fname in images:
RT = get_RT_from_chessboard(fname, chess_board_x_num, chess_board_y_num, chess_board_len, cam_mtx, cam_dist)
print("chesstocam",RT)
R_all_chess_to_cam_1.append(RT[:3, :3])
T_all_chess_to_cam_1.append(RT[:3, 3].reshape((3, 1)))
return R_all_chess_to_cam_1, T_all_chess_to_cam_1
def save_images(image, path, img_num):
"""保存拍摄的图片"""
img_path = path / f'image_{img_num}.png'
cv2.imwrite(str(img_path), image)
def save_pic_and_excel(): # 将机械臂的末端位姿保存在excel文件里,以及拍摄的图片保存在pic里# 动,拍照,保存
robot.send_switch_tool_command()
# 定义初始关节位置
initial_joint_angles = [5, 0, 0, 0, -12, 0] # 初始关节角度
fixed_joint_positions = [
[0.000,0.000,-0.005,0.001,-68.752,0.000],
[13.944,-2.732,-0.007,0.001,-71.402,0.000],
[7.256,-2.732,-0.011,0.000,-71.402,-13.003],
[1.061,3.196,-4.484,0.000,-71.402,-13.003],
[1.061,-7.045,-4.486,8.935,-60.210,-13.003],
[-12.811,-7.045,-4.488,15.131,-60.210,-5.156],
[-12.811,3.929,-12.911,15.131,-62.695,-5.156],
[14.269,3.929,-12.915,-7.996,-62.695,-5.156],
[14.269,3.929,-9.102,-9.642,-66.652,6.868],
[14.269,10.147,-13.732,-9.642,-66.652,6.868],
[4.356,10.147,-13.736,-2.658,-66.652,6.868],
[4.356,10.147,-13.736,-2.658,-66.652,21.705],
[4.356,10.147,-3.741,-2.658,-74.334,21.705]
]
# 保存 Excel 的路径和文件
excel_path = Path("calibration_images//robot_calibration_data.xlsx")
# 创建 DataFrame 来存储机械臂坐标
data_list = []
img_num = 0 # 图片编号
image_save_path = Path("calibration_images")
image_save_path.mkdir(exist_ok=True) # 创建目录保存图像
# 遍历固定点位,依次移动机械臂
for joint_angles in fixed_joint_positions:
# 移动机械臂到指定位置
robot.send_position_command(*joint_angles, MoveType.AXIS)
time.sleep(20)
# 获取图像并保存
_, rgb_image = ca.get_img()
save_images(rgb_image, image_save_path, img_num)
img_path = image_save_path / f'image_{img_num}.png'
img_num += 1
# 获取当前机械臂的世界坐标
real_position = robot.robotClient.status_model.getRealPosition()
position = [real_position.X, real_position.Y, real_position.Z,
real_position.U, real_position.V, real_position.W]
# 将当前机械臂的坐标和图片路径存储为元组
data_list.append([position, str(img_path)])
# 将数据写入 Excel
df = pd.DataFrame(data_list, columns=["Position", "Image Path"])
df.to_excel(excel_path, index=False)
print(f"数据已保存至 {excel_path}")
# 计算end to base变换矩阵
# path = "./hand_eye" 棋盘格和坐标文件存放文件夹
def get_end_to_base_mtx(path):
img_path = path + "/image_*.png"
coord_file_path = path + '/机械臂末端位姿.xlsx' # 从记录文件读取机器人六个位姿
sheet_1 = pd.read_excel(coord_file_path)
R_all_end_to_base_1 = []
T_all_end_to_base_1 = []
# print(sheet_1.iloc[0]['ax'])
images = glob.glob(img_path)
# print(len(images))
for i in range(len(images)):
pose_str = sheet_1.values[i][0]
pose_array = [float(num) for num in pose_str[1:-1].split(',')]
# print(pose_array)
RT = pose_robot(pose_array[0], pose_array[1], pose_array[2], pose_array[3], pose_array[4], pose_array[5])
print("end_base",RT)
R_all_end_to_base_1.append(RT[:3, :3])
T_all_end_to_base_1.append(RT[:3, 3].reshape((3, 1)))
return R_all_end_to_base_1, T_all_end_to_base_1
if __name__ == "__main__":
print("手眼标定测试\n")
np.set_printoptions(suppress=True)
# app = QApplication(sys.argv)
# robot = MainWindow()
# ca = camera()
# dataaddress = DataAddress()
# 相机内参
cam_mtx = np.array([[2402.1018066406, 0.0000000000, 739.7069091797],
[0.0000000000, 2401.7873535156, 584.7304687500],
[0.00000000e+00, 0.00000000e+00, 1.00000000e+00]], dtype=np.float64)
# 畸变系数
cam_dist = np.array([-0.0424814112, 0.2438604534, -0.3833343089, -0.3833343089, 0.0007602088],
dtype=np.float64)
# save_pic_and_excel()
chess_board_x_num = 11 # 标定板长度
chess_board_y_num = 8
chess_board_len = 30
path = "./calibration_images"
R_all_chess_to_cam_1, T_all_chess_to_cam_1 = get_chess_to_cam_mtx(path, chess_board_x_num, chess_board_y_num,
chess_board_len, cam_mtx, cam_dist)
R_all_end_to_base_1, T_all_end_to_base_1 = get_end_to_base_mtx(path)
R, T = cv2.calibrateHandEye(R_all_end_to_base_1, T_all_end_to_base_1, R_all_chess_to_cam_1, T_all_chess_to_cam_1,
cv2.CALIB_HAND_EYE_TSAI) # 手眼标定
RT = np.column_stack((R, T))
RT = np.row_stack((RT, np.array([0, 0, 0, 1]))) # 即为cam to end变换矩阵00
print('相机相对于末端的变换矩阵为:')
print(RT)

4
Trace/com_pose.txt Normal file
View File

@ -0,0 +1,4 @@
1.1224831479369565e-01 -9.9366361198855646e-01 5.7395152958431492e-03 4.1812293824507674e+02
-9.9269850944334537e-01 -1.1239223491705273e-01 -4.3791036517882603e-02 1.9260165135520242e+03
4.4158636470522372e-02 -7.8213822690929326e-04 -9.9902422547446690e-01 2.7083490666098191e+03
0. 0. 0. 1.

4
Trace/com_pose2.txt Normal file
View File

@ -0,0 +1,4 @@
1.1224831479369565e-01 -9.9366361198855646e-01 5.7395152958431492e-03 4.1812293824507674e+02
-9.9269850944334537e-01 -1.1239223491705273e-01 -4.3791036517882603e-02 1.9260165135520242e+03
4.4158636470522372e-02 -7.8213822690929326e-04 -9.9902422547446690e-01 2.7083490666098191e+03
0. 0. 0. 1.

View File

@ -0,0 +1,104 @@
import numpy as np
from scipy.spatial.transform import Rotation as R
def vec2rpy(normal,short_edge_direction):
# 将法向量的反方向作为机械臂末端执行器的新Z轴
z_axis = (-normal / np.linalg.norm(normal)) # 归一化并取反向作为Z轴
x_axis = short_edge_direction/np.linalg.norm(short_edge_direction)
x_axis = x_axis-np.dot(x_axis,z_axis)*z_axis
x_axis = x_axis/np.linalg.norm(x_axis)
y_axis = np.cross(z_axis,x_axis)
# 构造旋转矩阵
rotation_matrix = np.vstack([x_axis, y_axis, z_axis]).T
# 将旋转矩阵转换为RPYroll, pitch, yaw
rpy = R.from_matrix(rotation_matrix).as_euler('xyz', degrees=True)
return rpy
#张啸给我的xyzuvw
def R_matrix(x,y,z,u,v,w):
rx = np.radians(u)
ry = np.radians(v)
rz = np.radians(w)
# 定义绕 X, Y, Z 轴的旋转矩阵
R_x = np.array([
[1, 0, 0],
[0, np.cos(rx), -np.sin(rx)],
[0, np.sin(rx), np.cos(rx)]
])
R_y = np.array([
[np.cos(ry), 0, np.sin(ry)],
[0, 1, 0],
[-np.sin(ry), 0, np.cos(ry)]
])
R_z = np.array([
[np.cos(rz), -np.sin(rz), 0],
[np.sin(rz), np.cos(rz), 0],
[0, 0, 1]
])
R = R_z @ R_y @ R_x
T = np.array([x, y, z])
# 构建齐次变换矩阵
transformation_matrix = np.eye(4)
transformation_matrix[:3, :3] = R
transformation_matrix[:3, 3] = T
return transformation_matrix
# 图像识别结果xyz和法向量
def getPosition(x,y,z,a,b,c,rotation,points):
target = np.asarray([x, y, z,1])
camera2robot = np.loadtxt('./Trace/com_pose2.txt', delimiter=' ') #相对目录且分隔符采用os.sep
# robot2base = rotation
# camera2base = robot2base @ camera2robot
target_position_raw = np.dot(camera2robot, target)
corner_points_camera = np.asarray(points)
corner_points_base = np.dot(camera2robot[:3, :3], corner_points_camera.T).T + camera2robot[:3, 3]
# 按照 x 轴排序
sorted_points = corner_points_base[np.argsort(corner_points_base[:, 0])]
# 选出x轴较大的两个点
point_1 = sorted_points[-1] # x值较大的点
point_2 = sorted_points[-2] # x值较小的点
# 根据 y 值选择差值方向
if point_1[1] < point_2[1]:
edge_vector = point_1 - point_2
else:
edge_vector = point_2 - point_1
# 单位化方向向量
short_edge_direction = edge_vector / np.linalg.norm(edge_vector)
delta = -10#沿法向量方向抬高和压低,-指表示抬高,+值表示压低
angle = np.asarray([a,b,c])
noraml = camera2robot[:3, :3]@angle
normal_vector = noraml / np.linalg.norm(noraml)
target_position = target_position_raw[:3] + delta * normal_vector # target_position 沿法向量移动
noraml_base = vec2rpy(noraml,short_edge_direction)
return target_position,noraml_base
def getxyz(x,y,z,a,b,c):
target = np.asarray([x, y, z])
camera2robot = np.loadtxt('./Trace/com_pose2.txt', delimiter=' ')
# target_position_raw = np.dot(camera2robot, target)
delta = -500 # 沿法向量方向抬高和压低,-指表示抬高,+值表示压低
angle = np.asarray([a, b, c])
noraml = camera2robot[:3, :3] @ angle
normal_vector = noraml / np.linalg.norm(noraml)
target_position = target + delta * normal_vector
return target_position
def getxyz1(x, y, z, a, b, c):
"""
直接返回原始的 x, y, z 坐标,不做任何变换和偏移
"""
return x, y, z, a, b, c

134
Trace/vec_change.py Normal file
View File

@ -0,0 +1,134 @@
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def plot_coordinate_system(ax, T, name, color, labels):
"""绘制坐标系"""
origin = T[:3, 3]
x_axis = origin + T[:3, 0] * 300 # X 轴
y_axis = origin + T[:3, 1] * 300 # Y 轴
z_axis = origin + T[:3, 2] * 300 # Z 轴
# 绘制原点
ax.scatter(*origin, color=color, s=100)
# 绘制轴线
ax.quiver(*origin, *(x_axis - origin), color='r', length=1, arrow_length_ratio=0.2, linewidth=2)
ax.quiver(*origin, *(y_axis - origin), color='g', length=1, arrow_length_ratio=0.2, linewidth=2)
ax.quiver(*origin, *(z_axis - origin), color='b', length=1, arrow_length_ratio=0.2, linewidth=2)
# 标注坐标系名称
ax.text(*x_axis, f'{labels[0]}', color='r', fontsize=12)
ax.text(*y_axis, f'{labels[1]}', color='g', fontsize=12)
ax.text(*z_axis, f'{labels[2]}', color='b', fontsize=12)
# A 到 B 的齐次转换矩阵 (工具到基坐标系)
T_AB = np.array([[-9.36910568e-01,-4.37100341e-03, 3.49541818e-01, 5.04226000e+02],
[-5.82144893e-03, 9.99978253e-01, -3.09911034e-03, 2.62300000e+00],
[-3.49520671e-01, -4.93842907e-03, -9.36915638e-01, 5.23709000e+02],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
# B 到 C 的齐次转换矩阵 (相机到工具)
T_BC = np.loadtxt('./com_pose.txt', delimiter=' ')
# 计算 A 到 C 的齐次转换矩阵
# T_AC = T_AB @ T_BC
# 输入四个角点的空间坐标 (相机坐标系下)
# corner_points_camera = np.array([
# [-605.3829, 288.2771, 1710.0],
# [-364.94568, 300.40274, 1634.0],
# [-301.4996, -253.04178, 1645.0],
# [-548.8065, -297.23093, 1748.0]
# ])
#
# # 将角点从相机坐标系转换到基坐标系
#
# corner_points_base = np.dot(T_BC[:3, :3], corner_points_camera.T).T + T_BC[:3, 3]
# edges = np.array([corner_points_base[1] - corner_points_base[0]])# for i in range(len(corner_points_base))])
# edge_lengths = np.linalg.norm(edges, axis=1)
# min_edge_idx = np.argmin(edge_lengths)
# short_edge_direction = edges[min_edge_idx] / edge_lengths[min_edge_idx] # 单位化方向向量
corner_points_camera = np.array([
[-548.8065, -297.23093, 1748.0],
[-301.4996, -253.04178, 1645.0],
[-364.94568, 300.40274, 1634.0],
[-605.3829, 288.2771, 1710.0]
])
# 将角点从相机坐标系转换到基坐标系
corner_points_base = np.dot(T_BC[:3, :3], corner_points_camera.T).T + T_BC[:3, 3]
# 按照 x 轴排序
sorted_points = corner_points_base[np.argsort(corner_points_base[:, 0])]
# 选出x轴较大的两个点
point_1 = sorted_points[-1] # x值较大的点
point_2 = sorted_points[-2] # x值较小的点
# 根据 y 值选择差值方向y值较大的点减去 y 值较小的点
if point_1[1] > point_2[1]:
edge_vector = point_1 - point_2
else:
edge_vector = point_2 - point_1
# 单位化方向向量
short_edge_direction = edge_vector / np.linalg.norm(edge_vector)
print("方向向量(单位化):", short_edge_direction)
# 假设法向量 (a, b, c) 在相机坐标系下
normal_vector_camera = np.array([0.2694268969253701, 0.033645691818738714, 0.9624329143556991, 0]) # 最后一个元素为0因为它是方向矢量
# 将法向量从相机坐标系转换到法兰坐标系
normal_vector_flange = T_BC @ normal_vector_camera
# 将法向量从法兰坐标系转换到基坐标系
# normal_vector_base = T_AB @ normal_vector_flange
# 创建 3D 图形对象
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# 设置绘图区域的范围
ax.set_xlim([-1000, 1000])
ax.set_ylim([-1000, 1000])
ax.set_zlim([-1000, 1000])
# 绘制基坐标系 O
plot_coordinate_system(ax, np.eye(4), 'O', 'k', ['x', 'y', 'z'])
# 绘制法兰坐标系 B
# plot_coordinate_system(ax, T_AB, 'B', 'm', ["x'", "y'", "z'"])
# 绘制相机坐标系 C
plot_coordinate_system(ax, T_BC, 'C', 'b', ["x''", "y''", "z''"])
# 绘制长边方向向量 (基坐标系下)
origin = np.zeros(3) # 基坐标系的原点
short_edge_endpoint = short_edge_direction * 300
ax.quiver(*origin, *(short_edge_endpoint), color='orange', length=1, arrow_length_ratio=0.2, linewidth=2)
ax.text(*short_edge_endpoint, 'Short Edge', color='orange', fontsize=12)
# 绘制法向量 (基坐标系下)
normal_vector_endpoint = normal_vector_flange[:3] * 300
ax.quiver(*origin, *(normal_vector_endpoint), color='purple', length=1, arrow_length_ratio=0.2, linewidth=2)
ax.text(*normal_vector_endpoint, 'Normal Vector', color='purple', fontsize=12)
# 在基坐标系下绘制四个角点和边
ax.scatter(corner_points_base[:, 0], corner_points_base[:, 1], corner_points_base[:, 2], color='b', s=50, label='Corners')
for i in range(len(corner_points_base)):
ax.plot([corner_points_base[i - 1, 0], corner_points_base[i, 0]],
[corner_points_base[i - 1, 1], corner_points_base[i, 1]],
[corner_points_base[i - 1, 2], corner_points_base[i, 2]], 'k--')
# 设置标签
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
# 显示图形
plt.show()