first commit
This commit is contained in:
75
Vision/tool/CameraHIK.py
Normal file
75
Vision/tool/CameraHIK.py
Normal file
@ -0,0 +1,75 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
# @Time : 2024/10/11 10:43
|
||||
# @Author : hjw
|
||||
# @File : CameraHIK.py
|
||||
'''
|
||||
import cv2
|
||||
import socket
|
||||
|
||||
def portisopen(ip, port):
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
sock.settimeout(1)
|
||||
state = sock.connect_ex((ip, port))
|
||||
if 0 == state:
|
||||
# print("port is open")
|
||||
return True
|
||||
else:
|
||||
# print("port is closed")
|
||||
return False
|
||||
|
||||
class camera_HIK():
|
||||
|
||||
def __init__(self, ip, port, name, pw):
|
||||
# "rtsp://admin:zlzk.123@192.168.1.64:554"
|
||||
ret = portisopen(ip, port)
|
||||
self.camera_url = "rtsp://" + str(name) + ":" + str(pw) + "@" + str(ip) + ":" + str(port)
|
||||
self.ip = ip
|
||||
self.port = port
|
||||
self.init_success = False
|
||||
if ret:
|
||||
self.cap = cv2.VideoCapture(self.camera_url)
|
||||
self.init_success = True
|
||||
else:
|
||||
print('海康摄像头网络错误,请检测IP')
|
||||
|
||||
def get_img(self):
|
||||
ret = False
|
||||
frame = None
|
||||
if self.init_success==True:
|
||||
if portisopen(self.ip, self.port):
|
||||
# ret, frame = self.cap.read()
|
||||
# if ret == False:
|
||||
self.reconnect_camera()
|
||||
ret, frame = self.cap.read()
|
||||
else:
|
||||
print('海康摄像头网络断开')
|
||||
else:
|
||||
if portisopen(self.ip, self.port):
|
||||
self.reconnect_camera()
|
||||
ret, frame = self.cap.read()
|
||||
# if ret == False:
|
||||
# self.init_success =False
|
||||
# else:
|
||||
# self.init_success = True
|
||||
else:
|
||||
print('海康摄像头网络断开')
|
||||
|
||||
return ret, frame
|
||||
|
||||
def reconnect_camera(self):
|
||||
if self.init_success == True:
|
||||
self.cap.release()
|
||||
self.cap = cv2.VideoCapture(self.camera_url)
|
||||
ret, _ = self.cap.read()
|
||||
if ret:
|
||||
self.init_success = True
|
||||
else:
|
||||
self.init_success = False
|
||||
print("海康摄像头重连")
|
||||
|
||||
def release_camera(self):
|
||||
self.cap.release()
|
||||
|
||||
|
||||
195
Vision/tool/CameraPe.py
Normal file
195
Vision/tool/CameraPe.py
Normal file
@ -0,0 +1,195 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
# @Time : 2024/9/19 16:08
|
||||
# @Author : hjw
|
||||
# @File : CameraPe.py
|
||||
'''
|
||||
from Vision.tool.percipio.win import pcammls
|
||||
from Vision.tool.percipio.win.pcammls import *
|
||||
from Expection import VisionError_Code
|
||||
import cv2
|
||||
|
||||
|
||||
class PythonPercipioDeviceEvent(pcammls.DeviceEvent):
|
||||
Offline = False
|
||||
|
||||
def __init__(self):
|
||||
pcammls.DeviceEvent.__init__(self)
|
||||
|
||||
def run(self, handle, eventID):
|
||||
if eventID==TY_EVENT_DEVICE_OFFLINE:
|
||||
print('=== Event Callback: Device Offline!')
|
||||
self.Offline = True
|
||||
return 0
|
||||
|
||||
def IsOffline(self):
|
||||
return self.Offline
|
||||
|
||||
class camera_pe():
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.caminit_isok = False
|
||||
self.cl = PercipioSDK()
|
||||
dev_list = self.cl.ListDevice()
|
||||
for idx in range(len(dev_list)):
|
||||
dev = dev_list[idx]
|
||||
print('{} -- {} \t {}'.format(idx, dev.id, dev.iface.id))
|
||||
if len(dev_list) == 0:
|
||||
print('no device')
|
||||
return
|
||||
if len(dev_list) == 1:
|
||||
selected_idx = 0
|
||||
else:
|
||||
selected_idx = int(input('select a device:'))
|
||||
if selected_idx < 0 or selected_idx >= len(dev_list):
|
||||
return
|
||||
|
||||
sn = dev_list[selected_idx].id
|
||||
# 设备ID
|
||||
self.handle = self.cl.Open(sn)
|
||||
if not self.cl.isValidHandle(self.handle):
|
||||
err = self.cl.TYGetLastErrorCodedescription()
|
||||
print('no device found : ', end='')
|
||||
print(err)
|
||||
return
|
||||
|
||||
self.event = PythonPercipioDeviceEvent()
|
||||
self.cl.DeviceRegiststerCallBackEvent(self.event)
|
||||
|
||||
color_fmt_list = self.cl.DeviceStreamFormatDump(self.handle, PERCIPIO_STREAM_COLOR)
|
||||
if len(color_fmt_list) == 0:
|
||||
print('device has no color stream.')
|
||||
return
|
||||
|
||||
# print('color image format list:')
|
||||
# for idx in range(len(color_fmt_list)): # 查看图像分辨率
|
||||
# fmt = color_fmt_list[idx]
|
||||
# print('\t{} -size[{}x{}]\t-\t desc:{}'.format(idx, cl.Width(fmt), cl.Height(fmt), fmt.getDesc()))
|
||||
self.cl.DeviceStreamFormatConfig(self.handle, PERCIPIO_STREAM_COLOR, color_fmt_list[2])
|
||||
|
||||
depth_fmt_list = self.cl.DeviceStreamFormatDump(self.handle, PERCIPIO_STREAM_DEPTH)
|
||||
if len(depth_fmt_list) == 0:
|
||||
print('device has no depth stream.')
|
||||
return
|
||||
|
||||
# print('depth image format list:') # 查看深度图分辨率
|
||||
# for idx in range(len(depth_fmt_list)):
|
||||
# fmt = depth_fmt_list[idx]
|
||||
# print('\t{} -size[{}x{}]\t-\t desc:{}'.format(idx, cl.Width(fmt), cl.Height(fmt), fmt.getDesc()))
|
||||
self.cl.DeviceStreamFormatConfig(self.handle, PERCIPIO_STREAM_DEPTH, depth_fmt_list[2])
|
||||
|
||||
err = self.cl.DeviceLoadDefaultParameters(self.handle)
|
||||
if err:
|
||||
print('Load default parameters fail: ', end='')
|
||||
print(self.cl.TYGetLastErrorCodedescription())
|
||||
else:
|
||||
print('Load default parameters successful')
|
||||
|
||||
self.scale_unit = self.cl.DeviceReadCalibDepthScaleUnit(self.handle)
|
||||
#print('depth image scale unit :{}'.format(scale_unit))
|
||||
|
||||
self.depth_calib = self.cl.DeviceReadCalibData(self.handle, PERCIPIO_STREAM_DEPTH)
|
||||
self.color_calib = self.cl.DeviceReadCalibData(self.handle, PERCIPIO_STREAM_COLOR)
|
||||
|
||||
self.pointcloud_data_arr = pointcloud_data_list()
|
||||
self.img_registration_depth = image_data()
|
||||
self.img_registration_render = image_data()
|
||||
self.img_parsed_color = image_data()
|
||||
self.img_undistortion_color = image_data()
|
||||
|
||||
err = self.cl.DeviceStreamEnable(self.handle, PERCIPIO_STREAM_COLOR | PERCIPIO_STREAM_DEPTH)
|
||||
if err:
|
||||
print('device stream enable err:{}'.format(err))
|
||||
return
|
||||
|
||||
self.cl.DeviceStreamOn(self.handle)
|
||||
self.caminit_isok = True
|
||||
print(VisionError_Code.CAMERA_SUCCESS)
|
||||
def get_img(self):
|
||||
""
|
||||
'''
|
||||
:param api: None
|
||||
:return: ret ,img
|
||||
'''
|
||||
if self.caminit_isok == False or self.event.IsOffline():
|
||||
return 0, None
|
||||
else:
|
||||
image_list = self.cl.DeviceStreamRead(self.handle, 2000)
|
||||
if len(image_list) == 2:
|
||||
for i in range(len(image_list)):
|
||||
frame = image_list[i]
|
||||
if frame.streamID == PERCIPIO_STREAM_COLOR:
|
||||
img_color = frame
|
||||
self.cl.DeviceStreamImageDecode(frame, self.img_undistortion_color)
|
||||
img = self.img_undistortion_color.as_nparray()
|
||||
return 1, img
|
||||
return 0, None
|
||||
|
||||
def get_point_map(self):
|
||||
""
|
||||
'''
|
||||
:param api: None
|
||||
:return: img
|
||||
'''
|
||||
if self.caminit_isok == False or self.event.IsOffline():
|
||||
return 0, None
|
||||
else:
|
||||
pass
|
||||
|
||||
def get_img_and_point_map(self):
|
||||
""
|
||||
'''
|
||||
:param api: None
|
||||
:return: ret , img, point_map
|
||||
'''
|
||||
if self.caminit_isok == False or self.event.IsOffline():
|
||||
return 0, None, None
|
||||
else:
|
||||
image_list = self.cl.DeviceStreamRead(self.handle, 2000)
|
||||
if len(image_list) == 2:
|
||||
for i in range(len(image_list)):
|
||||
frame = image_list[i]
|
||||
if frame.streamID == PERCIPIO_STREAM_DEPTH:
|
||||
img_depth = frame
|
||||
if frame.streamID == PERCIPIO_STREAM_COLOR:
|
||||
img_color = frame
|
||||
|
||||
self.cl.DeviceStreamMapDepthImageToColorCoordinate(self.depth_calib, img_depth.width, img_depth.height,
|
||||
self.scale_unit, img_depth, self.color_calib, img_color.width,
|
||||
img_color.height, self.img_registration_depth)
|
||||
|
||||
# self.cl.DeviceStreamDepthRender(self.img_registration_depth, self.img_registration_render)
|
||||
# mat_depth_render = self.img_registration_render.as_nparray()
|
||||
# cv2.imshow('registration', mat_depth_render)
|
||||
|
||||
self.cl.DeviceStreamMapDepthImageToPoint3D(self.img_registration_depth, self.depth_calib, self.scale_unit,
|
||||
self.pointcloud_data_arr)
|
||||
|
||||
# show p3d arr data
|
||||
p3d_nparray = self.pointcloud_data_arr.as_nparray()
|
||||
#cv2.imshow('p3d2', p3d_nparray)
|
||||
|
||||
self.cl.DeviceStreamImageDecode(img_color, self.img_parsed_color)
|
||||
self.cl.DeviceStreamDoUndistortion(self.color_calib, self.img_parsed_color, self.img_undistortion_color)
|
||||
mat_undistortion_color = self.img_undistortion_color.as_nparray()
|
||||
return 1, mat_undistortion_color, p3d_nparray
|
||||
else:
|
||||
return 0, None, None
|
||||
|
||||
def release(self):
|
||||
if self.caminit_isok == False:
|
||||
pass
|
||||
else:
|
||||
self.cl.DeviceStreamOff(self.handle)
|
||||
self.cl.Close(self.handle)
|
||||
pass
|
||||
|
||||
|
||||
# my_camera = camera()
|
||||
# while True:
|
||||
# ret, img, p3d_nparray = my_camera.get_img_and_point_map()
|
||||
# cv2.imshow('img', img)
|
||||
# cv2.imshow('3d',p3d_nparray)
|
||||
# cv2.waitKey(1)
|
||||
171
Vision/tool/CameraPe_color2depth.py
Normal file
171
Vision/tool/CameraPe_color2depth.py
Normal file
@ -0,0 +1,171 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
# @Time : 2024/9/19 16:08
|
||||
# @Author : hjw
|
||||
# @File : CameraPe.py
|
||||
'''
|
||||
from Vision.tool.tuyang import pcammls
|
||||
from Vision.tool.tuyang.pcammls import *
|
||||
from Expection import VisionError_Code
|
||||
import cv2
|
||||
|
||||
|
||||
class PythonPercipioDeviceEvent(pcammls.DeviceEvent):
|
||||
Offline = False
|
||||
|
||||
def __init__(self):
|
||||
pcammls.DeviceEvent.__init__(self)
|
||||
|
||||
def run(self, handle, eventID):
|
||||
if eventID==TY_EVENT_DEVICE_OFFLINE:
|
||||
print('=== Event Callback: Device Offline!')
|
||||
self.Offline = True
|
||||
return 0
|
||||
|
||||
def IsOffline(self):
|
||||
return self.Offline
|
||||
|
||||
class camera_pe():
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.caminit_isok = False
|
||||
self.cl = PercipioSDK()
|
||||
dev_list = self.cl.ListDevice()
|
||||
for idx in range(len(dev_list)):
|
||||
dev = dev_list[idx]
|
||||
print('{} -- {} \t {}'.format(idx, dev.id, dev.iface.id))
|
||||
if len(dev_list) == 0:
|
||||
print('no device')
|
||||
return
|
||||
if len(dev_list) == 1:
|
||||
selected_idx = 0
|
||||
else:
|
||||
selected_idx = int(input('select a device:'))
|
||||
if selected_idx < 0 or selected_idx >= len(dev_list):
|
||||
return
|
||||
|
||||
sn = dev_list[selected_idx].id
|
||||
# 设备ID
|
||||
self.handle = self.cl.Open(sn)
|
||||
if not self.cl.isValidHandle(self.handle):
|
||||
err = self.cl.TYGetLastErrorCodedescription()
|
||||
print('no device found : ', end='')
|
||||
print(err)
|
||||
return
|
||||
|
||||
self.event = PythonPercipioDeviceEvent()
|
||||
self.cl.DeviceRegiststerCallBackEvent(self.event)
|
||||
|
||||
color_fmt_list = self.cl.DeviceStreamFormatDump(self.handle, PERCIPIO_STREAM_COLOR)
|
||||
if len(color_fmt_list) == 0:
|
||||
print('device has no color stream.')
|
||||
return
|
||||
|
||||
# print('color image format list:')
|
||||
# for idx in range(len(color_fmt_list)): # 查看图像分辨率
|
||||
# fmt = color_fmt_list[idx]
|
||||
# print('\t{} -size[{}x{}]\t-\t desc:{}'.format(idx, cl.Width(fmt), cl.Height(fmt), fmt.getDesc()))
|
||||
self.cl.DeviceStreamFormatConfig(self.handle, PERCIPIO_STREAM_COLOR, color_fmt_list[0]) # 图像大小
|
||||
|
||||
depth_fmt_list = self.cl.DeviceStreamFormatDump(self.handle, PERCIPIO_STREAM_DEPTH)
|
||||
if len(depth_fmt_list) == 0:
|
||||
print('device has no depth stream.')
|
||||
return
|
||||
|
||||
# print('depth image format list:') # 查看深度图分辨率
|
||||
# for idx in range(len(depth_fmt_list)):
|
||||
# fmt = depth_fmt_list[idx]
|
||||
# print('\t{} -size[{}x{}]\t-\t desc:{}'.format(idx, cl.Width(fmt), cl.Height(fmt), fmt.getDesc()))
|
||||
self.cl.DeviceStreamFormatConfig(self.handle, PERCIPIO_STREAM_DEPTH, depth_fmt_list[0]) # 深度图大小
|
||||
self.depth_calib_data = self.cl.DeviceReadCalibData(self.handle, PERCIPIO_STREAM_DEPTH)
|
||||
|
||||
err = self.cl.DeviceLoadDefaultParameters(self.handle)
|
||||
if err:
|
||||
print('Load default parameters fail: ', end='')
|
||||
print(self.cl.TYGetLastErrorCodedescription())
|
||||
else:
|
||||
print('Load default parameters successful')
|
||||
|
||||
self.scale_unit = self.cl.DeviceReadCalibDepthScaleUnit(self.handle)
|
||||
#print('depth image scale unit :{}'.format(scale_unit))
|
||||
|
||||
self.depth_calib = self.cl.DeviceReadCalibData(self.handle, PERCIPIO_STREAM_DEPTH)
|
||||
self.color_calib = self.cl.DeviceReadCalibData(self.handle, PERCIPIO_STREAM_COLOR)
|
||||
|
||||
err = self.cl.DeviceStreamEnable(self.handle, PERCIPIO_STREAM_COLOR | PERCIPIO_STREAM_DEPTH)
|
||||
if err:
|
||||
print('device stream enable err:{}'.format(err))
|
||||
return
|
||||
self.cl.DeviceStreamOn(self.handle)
|
||||
|
||||
# self.pointcloud_data_arr = pointcloud_data_list()
|
||||
# self.img_registration_depth = image_data()
|
||||
# self.img_registration_render = image_data()
|
||||
# self.img_parsed_color = image_data()
|
||||
# self.img_undistortion_color = image_data()
|
||||
# self.img_registration_color = image_data()
|
||||
self.img_registration_render = image_data()
|
||||
self.img_registration_depth = image_data()
|
||||
self.img_registration_render = image_data()
|
||||
self.img_parsed_color = image_data()
|
||||
self.img_undistortion_color = image_data()
|
||||
self.img_registration_color = image_data()
|
||||
self.pointcloud_data_arr = pointcloud_data_list()
|
||||
self.caminit_isok = True
|
||||
print(VisionError_Code.CAMERA_SUCCESS)
|
||||
|
||||
|
||||
def get_img_and_point_map(self):
|
||||
""
|
||||
'''
|
||||
:param api: None
|
||||
:return: ret , img, point_map
|
||||
'''
|
||||
if self.caminit_isok == False or self.event.IsOffline():
|
||||
return 0, None, None, None
|
||||
else:
|
||||
image_list = self.cl.DeviceStreamRead(self.handle, 2000)
|
||||
if len(image_list) == 2:
|
||||
for i in range(len(image_list)):
|
||||
frame = image_list[i]
|
||||
if frame.streamID == PERCIPIO_STREAM_DEPTH:
|
||||
img_depth = frame
|
||||
if frame.streamID == PERCIPIO_STREAM_COLOR:
|
||||
img_color = frame
|
||||
|
||||
self.cl.DeviceStreamImageDecode(img_color, self.img_parsed_color)
|
||||
self.cl.DeviceStreamDoUndistortion(self.color_calib, self.img_parsed_color, self.img_undistortion_color)
|
||||
|
||||
self.cl.DeviceStreamMapRGBImageToDepthCoordinate(self.depth_calib, img_depth, self.scale_unit, self.color_calib,
|
||||
self.img_undistortion_color, self.img_registration_color)
|
||||
# 对齐
|
||||
# mat_depth_render = self.img_registration_render.as_nparray()
|
||||
self.cl.DeviceStreamDepthRender(img_depth, self.img_registration_render)
|
||||
mat_depth_render = self.img_registration_render.as_nparray()
|
||||
|
||||
self.cl.DeviceStreamMapDepthImageToPoint3D(img_depth, self.depth_calib_data, self.scale_unit, self.pointcloud_data_arr)
|
||||
p3d_nparray = self.pointcloud_data_arr.as_nparray()
|
||||
|
||||
mat_registration_color = self.img_registration_color.as_nparray() # 对齐的彩色图
|
||||
|
||||
return 1, mat_registration_color, p3d_nparray, mat_depth_render
|
||||
else:
|
||||
return 0, None, None, None
|
||||
|
||||
def release(self):
|
||||
if self.caminit_isok == False:
|
||||
pass
|
||||
else:
|
||||
self.cl.DeviceStreamOff(self.handle)
|
||||
self.cl.Close(self.handle)
|
||||
pass
|
||||
|
||||
|
||||
# my_camera = camera()
|
||||
# while True:
|
||||
# ret, img, p3d_nparray = my_camera.get_img_and_point_map()
|
||||
# cv2.imshow('img', img)
|
||||
# cv2.imshow('3d',p3d_nparray)
|
||||
# cv2.waitKey(1)
|
||||
183
Vision/tool/CameraPe_depth2color.py
Normal file
183
Vision/tool/CameraPe_depth2color.py
Normal file
@ -0,0 +1,183 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
# @Time : 2024/9/19 16:08
|
||||
# @Author : hjw
|
||||
# @File : CameraPe.py
|
||||
'''
|
||||
from Vision.tool.tuyang import pcammls
|
||||
from Vision.tool.tuyang.pcammls import *
|
||||
from Expection import VisionError_Code
|
||||
import cv2
|
||||
|
||||
|
||||
class PythonPercipioDeviceEvent(pcammls.DeviceEvent):
|
||||
Offline = False
|
||||
|
||||
def __init__(self):
|
||||
pcammls.DeviceEvent.__init__(self)
|
||||
|
||||
def run(self, handle, eventID):
|
||||
if eventID==TY_EVENT_DEVICE_OFFLINE:
|
||||
print('=== Event Callback: Device Offline!')
|
||||
self.Offline = True
|
||||
return 0
|
||||
|
||||
def IsOffline(self):
|
||||
return self.Offline
|
||||
|
||||
class camera_pe():
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.caminit_isok = False
|
||||
self.cl = PercipioSDK()
|
||||
dev_list = self.cl.ListDevice()
|
||||
for idx in range(len(dev_list)):
|
||||
dev = dev_list[idx]
|
||||
print('{} -- {} \t {}'.format(idx, dev.id, dev.iface.id))
|
||||
if len(dev_list) == 0:
|
||||
print('no device')
|
||||
return
|
||||
if len(dev_list) == 1:
|
||||
selected_idx = 0
|
||||
else:
|
||||
selected_idx = int(input('select a device:'))
|
||||
if selected_idx < 0 or selected_idx >= len(dev_list):
|
||||
return
|
||||
|
||||
sn = dev_list[selected_idx].id
|
||||
# 设备ID
|
||||
self.handle = self.cl.Open(sn)
|
||||
if not self.cl.isValidHandle(self.handle):
|
||||
err = self.cl.TYGetLastErrorCodedescription()
|
||||
print('no device found : ', end='')
|
||||
print(err)
|
||||
return
|
||||
|
||||
self.event = PythonPercipioDeviceEvent()
|
||||
self.cl.DeviceRegiststerCallBackEvent(self.event)
|
||||
|
||||
color_fmt_list = self.cl.DeviceStreamFormatDump(self.handle, PERCIPIO_STREAM_COLOR)
|
||||
if len(color_fmt_list) == 0:
|
||||
print('device has no color stream.')
|
||||
return
|
||||
|
||||
# print('color image format list:')
|
||||
# for idx in range(len(color_fmt_list)): # 查看图像分辨率
|
||||
# fmt = color_fmt_list[idx]
|
||||
# print('\t{} -size[{}x{}]\t-\t desc:{}'.format(idx, cl.Width(fmt), cl.Height(fmt), fmt.getDesc()))
|
||||
self.cl.DeviceStreamFormatConfig(self.handle, PERCIPIO_STREAM_COLOR, color_fmt_list[2]) # 图像大小
|
||||
|
||||
depth_fmt_list = self.cl.DeviceStreamFormatDump(self.handle, PERCIPIO_STREAM_DEPTH)
|
||||
if len(depth_fmt_list) == 0:
|
||||
print('device has no depth stream.')
|
||||
return
|
||||
|
||||
# print('depth image format list:') # 查看深度图分辨率
|
||||
# for idx in range(len(depth_fmt_list)):
|
||||
# fmt = depth_fmt_list[idx]
|
||||
# print('\t{} -size[{}x{}]\t-\t desc:{}'.format(idx, cl.Width(fmt), cl.Height(fmt), fmt.getDesc()))
|
||||
self.cl.DeviceStreamFormatConfig(self.handle, PERCIPIO_STREAM_DEPTH, depth_fmt_list[2]) # 深度图大小
|
||||
self.depth_calib_data = self.cl.DeviceReadCalibData(self.handle, PERCIPIO_STREAM_DEPTH)
|
||||
self.color_calib_data = self.cl.DeviceReadCalibData(self.handle, PERCIPIO_STREAM_COLOR)
|
||||
|
||||
err = self.cl.DeviceLoadDefaultParameters(self.handle)
|
||||
if err:
|
||||
print('Load default parameters fail: ', end='')
|
||||
print(self.cl.TYGetLastErrorCodedescription())
|
||||
else:
|
||||
print('Load default parameters successful')
|
||||
|
||||
self.scale_unit = self.cl.DeviceReadCalibDepthScaleUnit(self.handle)
|
||||
#print('depth image scale unit :{}'.format(scale_unit))
|
||||
|
||||
self.depth_calib = self.cl.DeviceReadCalibData(self.handle, PERCIPIO_STREAM_DEPTH)
|
||||
self.color_calib = self.cl.DeviceReadCalibData(self.handle, PERCIPIO_STREAM_COLOR)
|
||||
|
||||
err = self.cl.DeviceStreamEnable(self.handle, PERCIPIO_STREAM_COLOR | PERCIPIO_STREAM_DEPTH)
|
||||
if err:
|
||||
print('device stream enable err:{}'.format(err))
|
||||
return
|
||||
self.cl.DeviceStreamOn(self.handle)
|
||||
|
||||
# self.pointcloud_data_arr = pointcloud_data_list()
|
||||
# self.img_registration_depth = image_data()
|
||||
# self.img_registration_render = image_data()
|
||||
# self.img_parsed_color = image_data()
|
||||
# self.img_undistortion_color = image_data()
|
||||
# self.img_registration_color = image_data()
|
||||
self.img_registration_render = image_data()
|
||||
self.img_registration_depth = image_data()
|
||||
self.img_registration_render = image_data()
|
||||
self.img_parsed_color = image_data()
|
||||
self.img_undistortion_color = image_data()
|
||||
self.img_registration_color = image_data()
|
||||
self.pointcloud_data_arr = pointcloud_data_list()
|
||||
self.caminit_isok = True
|
||||
print(VisionError_Code.CAMERA_SUCCESS)
|
||||
|
||||
|
||||
def get_img_and_point_map(self):
|
||||
""
|
||||
'''
|
||||
:param api: None
|
||||
:return: ret , img, point_map
|
||||
'''
|
||||
if self.caminit_isok == False or self.event.IsOffline():
|
||||
return 0, None, None, None
|
||||
else:
|
||||
image_list = self.cl.DeviceStreamRead(self.handle, 2000)
|
||||
if len(image_list) == 2:
|
||||
for i in range(len(image_list)):
|
||||
frame = image_list[i]
|
||||
if frame.streamID == PERCIPIO_STREAM_DEPTH:
|
||||
img_depth = frame
|
||||
if frame.streamID == PERCIPIO_STREAM_COLOR:
|
||||
img_color = frame
|
||||
|
||||
self.cl.DeviceStreamMapDepthImageToColorCoordinate(self.depth_calib, img_depth, self.scale_unit, self.color_calib,
|
||||
img_color.width, img_color.height, self.img_registration_depth)
|
||||
|
||||
self.cl.DeviceStreamDepthRender(self.img_registration_depth, self.img_registration_render)
|
||||
mat_depth_render = self.img_registration_render.as_nparray()
|
||||
self.cl.DeviceStreamMapDepthImageToPoint3D(self.img_registration_depth, self.color_calib_data, self.scale_unit,
|
||||
self.pointcloud_data_arr)
|
||||
p3d_nparray = self.pointcloud_data_arr.as_nparray()
|
||||
|
||||
|
||||
self.cl.DeviceStreamImageDecode(img_color, self.img_parsed_color)
|
||||
self.cl.DeviceStreamDoUndistortion(self.color_calib, self.img_parsed_color, self.img_undistortion_color)
|
||||
mat_undistortion_color = self.img_undistortion_color.as_nparray()
|
||||
|
||||
# self.cl.DeviceStreamMapRGBImageToDepthCoordinate(self.depth_calib, img_depth, self.scale_unit, self.color_calib,
|
||||
# self.img_undistortion_color, self.img_registration_color)
|
||||
# # 对齐
|
||||
# # mat_depth_render = self.img_registration_render.as_nparray()
|
||||
# self.cl.DeviceStreamDepthRender(img_depth, self.img_registration_render)
|
||||
# mat_depth_render = self.img_registration_render.as_nparray()
|
||||
#
|
||||
# self.cl.DeviceStreamMapDepthImageToPoint3D(img_depth, self.depth_calib_data, self.scale_unit, self.pointcloud_data_arr)
|
||||
# p3d_nparray = self.pointcloud_data_arr.as_nparray()
|
||||
#
|
||||
# mat_registration_color = self.img_registration_color.as_nparray() # 对齐的彩色图
|
||||
|
||||
return 1, mat_undistortion_color, p3d_nparray, mat_depth_render
|
||||
else:
|
||||
return 0, None, None, None
|
||||
|
||||
def release(self):
|
||||
if self.caminit_isok == False:
|
||||
pass
|
||||
else:
|
||||
self.cl.DeviceStreamOff(self.handle)
|
||||
self.cl.Close(self.handle)
|
||||
pass
|
||||
|
||||
|
||||
# my_camera = camera()
|
||||
# while True:
|
||||
# ret, img, p3d_nparray = my_camera.get_img_and_point_map()
|
||||
# cv2.imshow('img', img)
|
||||
# cv2.imshow('3d',p3d_nparray)
|
||||
# cv2.waitKey(1)
|
||||
149
Vision/tool/CameraRVC.py
Normal file
149
Vision/tool/CameraRVC.py
Normal file
@ -0,0 +1,149 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: UTF-8 -*-
|
||||
'''
|
||||
@Project :my_work
|
||||
@File :camera.py
|
||||
@IDE :PyCharm
|
||||
@Author :hjw
|
||||
@Date :2024/8/13 11:34
|
||||
'''
|
||||
import PyRVC as RVC
|
||||
import numpy as np
|
||||
|
||||
class camera_rvc:
|
||||
|
||||
def __init__(self):
|
||||
self.caminit_isok = False
|
||||
RVC.SystemInit()
|
||||
|
||||
# Choose RVC X Camera type (USB, GigE or All)
|
||||
opt = RVC.SystemListDeviceTypeEnum.GigE
|
||||
|
||||
# Scan all RVC X Camera devices.
|
||||
ret, devices = RVC.SystemListDevices(opt)
|
||||
print("RVC X Camera devices number:%d" % len(devices))
|
||||
|
||||
# Find whether any RVC X Camera is connected or not.
|
||||
if len(devices) == 0:
|
||||
print("Can not find any RVC X Camera!")
|
||||
RVC.SystemShutdown()
|
||||
else:
|
||||
print("devices size = %d" % len(devices))
|
||||
|
||||
# Create a RVC X Camera and choose use left side camera.
|
||||
self.x = RVC.X1.Create(devices[0], RVC.CameraID_Left)
|
||||
# x = RVC.X1.Create(devices[0], RVC.CameraID_Right)
|
||||
|
||||
# Test RVC X Camera is valid or not.
|
||||
if self.x.IsValid() == True:
|
||||
print("RVC X Camera is valid!")
|
||||
# Open RVC X Camera.
|
||||
ret1 = self.x.Open()
|
||||
# Test RVC X Camera is opened or not.
|
||||
if ret1 and self.x.IsOpen() == True:
|
||||
print("RVC X Camera is opened!")
|
||||
self.caminit_isok = True
|
||||
else:
|
||||
print("RVC X Camera is not opened!")
|
||||
RVC.X1.Destroy(self.x)
|
||||
RVC.SystemShutdown()
|
||||
self.caminit_isok = False
|
||||
else:
|
||||
print("RVC X Camera is not valid!")
|
||||
RVC.X1.Destroy(self.x)
|
||||
RVC.SystemShutdown()
|
||||
self.caminit_isok = False
|
||||
|
||||
|
||||
def get_img(self):
|
||||
""
|
||||
'''
|
||||
:param api: None
|
||||
:return: ret ,img
|
||||
'''
|
||||
if self.caminit_isok == False:
|
||||
return 0, None
|
||||
else:
|
||||
# Capture a point map and a image.
|
||||
ret2 = self.x.Capture()
|
||||
# Create saving address of image and point map.
|
||||
|
||||
if ret2 == True:
|
||||
print("RVC X Camera capture successed!")
|
||||
|
||||
# Get image data and image size.
|
||||
img = self.x.GetImage()
|
||||
# Convert image to array and save it.
|
||||
img = np.array(img, copy=False)
|
||||
return 1, img
|
||||
else:
|
||||
print("RVC X Camera capture failed!")
|
||||
self.x.Close()
|
||||
RVC.X1.Destroy(self.x)
|
||||
RVC.SystemShutdown()
|
||||
return 0, None
|
||||
|
||||
def get_point_map(self):
|
||||
""
|
||||
'''
|
||||
:param api: None
|
||||
:return: img
|
||||
'''
|
||||
if self.caminit_isok == False:
|
||||
return 0, None
|
||||
else:
|
||||
# Capture a point map and a image.
|
||||
ret2 = self.x.Capture()
|
||||
# Create saving address of image and point map.
|
||||
|
||||
if ret2 == True:
|
||||
print("RVC X Camera capture successed!")
|
||||
# Convert point map (m) to array and save it.
|
||||
pm = np.array(self.x.GetPointMap(), copy=False)
|
||||
return 1, pm
|
||||
else:
|
||||
print("RVC X Camera capture failed!")
|
||||
self.x.Close()
|
||||
RVC.X1.Destroy(self.x)
|
||||
RVC.SystemShutdown()
|
||||
return 0, None
|
||||
|
||||
def get_img_and_point_map(self):
|
||||
""
|
||||
'''
|
||||
:param api: None
|
||||
:return: ret , img, point_map
|
||||
'''
|
||||
if self.caminit_isok == False:
|
||||
return 0, None, None
|
||||
else:
|
||||
# Capture a point map and a image.
|
||||
ret2 = self.x.Capture()
|
||||
# Create saving address of image and point map.
|
||||
|
||||
if ret2 == True:
|
||||
print("RVC X Camera capture successed!")
|
||||
|
||||
# Get image data and image size.
|
||||
img = self.x.GetImage()
|
||||
# Convert image to array and save it.
|
||||
img = np.array(img, copy=False)
|
||||
|
||||
# Convert point map (m) to array and save it.
|
||||
pm = np.array(self.x.GetPointMap(), copy=False)
|
||||
return 1, img, pm
|
||||
else:
|
||||
print("RVC X Camera capture failed!")
|
||||
self.x.Close()
|
||||
RVC.X1.Destroy(self.x)
|
||||
RVC.SystemShutdown()
|
||||
return 0, None, None
|
||||
|
||||
def release(self):
|
||||
if self.caminit_isok == False:
|
||||
RVC.SystemShutdown()
|
||||
else:
|
||||
RVC.X1.Destroy(self.x)
|
||||
RVC.SystemShutdown()
|
||||
|
||||
|
||||
BIN
Vision/tool/__pycache__/CameraRVC.cpython-39.pyc
Normal file
BIN
Vision/tool/__pycache__/CameraRVC.cpython-39.pyc
Normal file
Binary file not shown.
BIN
Vision/tool/percipio/win/_pcammls.pyd
Normal file
BIN
Vision/tool/percipio/win/_pcammls.pyd
Normal file
Binary file not shown.
10820
Vision/tool/percipio/win/pcammls.py
Normal file
10820
Vision/tool/percipio/win/pcammls.py
Normal file
File diff suppressed because it is too large
Load Diff
BIN
Vision/tool/percipio/win/tycam.dll
Normal file
BIN
Vision/tool/percipio/win/tycam.dll
Normal file
Binary file not shown.
BIN
Vision/tool/percipio/win/tycam.lib
Normal file
BIN
Vision/tool/percipio/win/tycam.lib
Normal file
Binary file not shown.
BIN
Vision/tool/tuyang/_pcammls.pyd
Normal file
BIN
Vision/tool/tuyang/_pcammls.pyd
Normal file
Binary file not shown.
142
Vision/tool/tuyang/frame_fetch.py
Normal file
142
Vision/tool/tuyang/frame_fetch.py
Normal file
@ -0,0 +1,142 @@
|
||||
import pcammls
|
||||
from pcammls import *
|
||||
import cv2
|
||||
import numpy
|
||||
import sys
|
||||
import os
|
||||
|
||||
class PythonPercipioDeviceEvent(pcammls.DeviceEvent):
|
||||
Offline = False
|
||||
|
||||
def __init__(self):
|
||||
pcammls.DeviceEvent.__init__(self)
|
||||
|
||||
def run(self, handle, eventID):
|
||||
if eventID==TY_EVENT_DEVICE_OFFLINE:
|
||||
print('=== Event Callback: Device Offline!')
|
||||
self.Offline = True
|
||||
return 0
|
||||
|
||||
def IsOffline(self):
|
||||
return self.Offline
|
||||
|
||||
def main():
|
||||
cl = PercipioSDK()
|
||||
|
||||
dev_list = cl.ListDevice()
|
||||
for idx in range(len(dev_list)):
|
||||
dev = dev_list[idx]
|
||||
print ('{} -- {} \t {}'.format(idx,dev.id,dev.iface.id))
|
||||
if len(dev_list)==0:
|
||||
print ('no device')
|
||||
return
|
||||
if len(dev_list) == 1:
|
||||
selected_idx = 0
|
||||
else:
|
||||
selected_idx = int(input('select a device:'))
|
||||
if selected_idx < 0 or selected_idx >= len(dev_list):
|
||||
return
|
||||
|
||||
sn = dev_list[selected_idx].id
|
||||
|
||||
handle = cl.Open(sn)
|
||||
if not cl.isValidHandle(handle):
|
||||
err = cl.TYGetLastErrorCodedescription()
|
||||
print('no device found : ', end='')
|
||||
print(err)
|
||||
return
|
||||
|
||||
event = PythonPercipioDeviceEvent()
|
||||
cl.DeviceRegiststerCallBackEvent(event)
|
||||
|
||||
color_fmt_list = cl.DeviceStreamFormatDump(handle, PERCIPIO_STREAM_COLOR)
|
||||
if len(color_fmt_list) != 0:
|
||||
print ('color image format list:')
|
||||
for idx in range(len(color_fmt_list)):
|
||||
fmt = color_fmt_list[idx]
|
||||
print ('\t{} -size[{}x{}]\t-\t desc:{}'.format(idx, cl.Width(fmt), cl.Height(fmt), fmt.getDesc()))
|
||||
print('\tSelect {}'.format(fmt.getDesc()))
|
||||
cl.DeviceStreamFormatConfig(handle, PERCIPIO_STREAM_COLOR, color_fmt_list[0])
|
||||
|
||||
color_enum_desc = TY_ENUM_ENTRY()
|
||||
cl.DeviceReadCurrentEnumData(handle, PERCIPIO_STREAM_COLOR, color_enum_desc)
|
||||
print('current color image mode {}x{}'.format(cl.Width(color_enum_desc), cl.Height(color_enum_desc)))
|
||||
|
||||
color_calib_data = cl.DeviceReadCalibData(handle, PERCIPIO_STREAM_COLOR)
|
||||
color_calib_width = color_calib_data.Width()
|
||||
color_calib_height = color_calib_data.Height()
|
||||
color_calib_intr = color_calib_data.Intrinsic()
|
||||
color_calib_extr = color_calib_data.Extrinsic()
|
||||
color_calib_dis = color_calib_data.Distortion()
|
||||
print('color calib info:')
|
||||
print('\tcalib size :[{}x{}]'.format(color_calib_width, color_calib_height))
|
||||
print('\tcalib intr : {}'.format(color_calib_intr))
|
||||
print('\tcalib extr : {}'.format(color_calib_extr))
|
||||
print('\tcalib distortion : {}'.format(color_calib_dis))
|
||||
|
||||
depth_fmt_list = cl.DeviceStreamFormatDump(handle, PERCIPIO_STREAM_DEPTH)
|
||||
if len(depth_fmt_list) != 0:
|
||||
print ('depth image format list:')
|
||||
for idx in range(len(depth_fmt_list)):
|
||||
fmt = depth_fmt_list[idx]
|
||||
print ('\t{} -size[{}x{}]\t-\t desc:{}'.format(idx, cl.Width(fmt), cl.Height(fmt), fmt.getDesc()))
|
||||
print('\tSelect {}'.format(fmt.getDesc()))
|
||||
cl.DeviceStreamFormatConfig(handle, PERCIPIO_STREAM_DEPTH, depth_fmt_list[0])
|
||||
|
||||
depth_enum_desc = TY_ENUM_ENTRY()
|
||||
cl.DeviceReadCurrentEnumData(handle, PERCIPIO_STREAM_DEPTH, depth_enum_desc)
|
||||
print('current depth image mode {}x{}'.format(cl.Width(depth_enum_desc), cl.Height(depth_enum_desc)))
|
||||
|
||||
depth_calib_data = cl.DeviceReadCalibData(handle, PERCIPIO_STREAM_DEPTH)
|
||||
depth_calib_width = depth_calib_data.Width()
|
||||
depth_calib_height = depth_calib_data.Height()
|
||||
depth_calib_intr = depth_calib_data.Intrinsic()
|
||||
depth_calib_extr = depth_calib_data.Extrinsic()
|
||||
depth_calib_dis = depth_calib_data.Distortion()
|
||||
print('delth calib info:')
|
||||
print('\tcalib size :[{}x{}]'.format(depth_calib_width, depth_calib_height))
|
||||
print('\tcalib intr : {}'.format(depth_calib_intr))
|
||||
print('\tcalib extr : {}'.format(depth_calib_extr))
|
||||
print('\tcalib distortion : {}'.format(depth_calib_dis))
|
||||
|
||||
err = cl.DeviceLoadDefaultParameters(handle)
|
||||
if err:
|
||||
print('Load default parameters fail: ', end='')
|
||||
print(cl.TYGetLastErrorCodedescription())
|
||||
else:
|
||||
print('Load default parameters successful')
|
||||
|
||||
err=cl.DeviceStreamEnable(handle, PERCIPIO_STREAM_COLOR | PERCIPIO_STREAM_DEPTH)
|
||||
if err:
|
||||
print('device stream enable err:{}'.format(err))
|
||||
return
|
||||
|
||||
rgb_image = image_data()
|
||||
depth_render = image_data()
|
||||
cl.DeviceStreamOn(handle)
|
||||
|
||||
while True:
|
||||
if event.IsOffline():
|
||||
break
|
||||
image_list = cl.DeviceStreamRead(handle, -1)
|
||||
for i in range(len(image_list)):
|
||||
frame = image_list[i]
|
||||
if frame.streamID == PERCIPIO_STREAM_DEPTH:
|
||||
cl.DeviceStreamDepthRender(frame, depth_render)
|
||||
arr = depth_render.as_nparray()
|
||||
cv2.imshow('depth',arr)
|
||||
if frame.streamID == PERCIPIO_STREAM_COLOR:
|
||||
cl.DeviceStreamImageDecode(frame, rgb_image)
|
||||
arr = rgb_image.as_nparray()
|
||||
cv2.imshow('color',arr)
|
||||
k = cv2.waitKey(10)
|
||||
if k==ord('q'):
|
||||
break
|
||||
|
||||
cl.DeviceStreamOff(handle)
|
||||
cl.Close(handle)
|
||||
pass
|
||||
|
||||
if __name__=='__main__':
|
||||
main()
|
||||
|
||||
103
Vision/tool/tuyang/frame_fetchIR.py
Normal file
103
Vision/tool/tuyang/frame_fetchIR.py
Normal file
@ -0,0 +1,103 @@
|
||||
'''
|
||||
Description:
|
||||
Author: zxy
|
||||
Date: 2023-07-14 19:12:19
|
||||
LastEditors: zxy
|
||||
LastEditTime: 2023-07-18 12:07:14
|
||||
'''
|
||||
import pcammls
|
||||
from pcammls import *
|
||||
import cv2
|
||||
import numpy
|
||||
import sys
|
||||
import os
|
||||
|
||||
class PythonPercipioDeviceEvent(pcammls.DeviceEvent):
|
||||
Offline = False
|
||||
|
||||
def __init__(self):
|
||||
pcammls.DeviceEvent.__init__(self)
|
||||
|
||||
def run(self, handle, eventID):
|
||||
if eventID==TY_EVENT_DEVICE_OFFLINE:
|
||||
print('=== Event Callback: Device Offline!')
|
||||
self.Offline = True
|
||||
return 0
|
||||
|
||||
def IsOffline(self):
|
||||
return self.Offline
|
||||
|
||||
def main():
|
||||
cl = PercipioSDK()
|
||||
|
||||
dev_list = cl.ListDevice()
|
||||
for idx in range(len(dev_list)):
|
||||
dev = dev_list[idx]
|
||||
print ('{} -- {} \t {}'.format(idx,dev.id,dev.iface.id))
|
||||
if len(dev_list)==0:
|
||||
print ('no device')
|
||||
return
|
||||
if len(dev_list) == 1:
|
||||
selected_idx = 0
|
||||
else:
|
||||
selected_idx = int(input('select a device:'))
|
||||
if selected_idx < 0 or selected_idx >= len(dev_list):
|
||||
return
|
||||
|
||||
sn = dev_list[selected_idx].id
|
||||
|
||||
handle = cl.Open(sn)
|
||||
if not cl.isValidHandle(handle):
|
||||
err = cl.TYGetLastErrorCodedescription()
|
||||
print('no device found : ', end='')
|
||||
print(err)
|
||||
return
|
||||
|
||||
event = PythonPercipioDeviceEvent()
|
||||
cl.DeviceRegiststerCallBackEvent(event)
|
||||
|
||||
cl.DeviceControlLaserPowerAutoControlEnable(handle, False)
|
||||
cl.DeviceControlLaserPowerConfig(handle, 80)
|
||||
|
||||
err = cl.DeviceLoadDefaultParameters(handle)
|
||||
if err:
|
||||
print('Load default parameters fail: ', end='')
|
||||
print(cl.TYGetLastErrorCodedescription())
|
||||
else:
|
||||
print('Load default parameters successful')
|
||||
|
||||
err = cl.DeviceStreamEnable(handle, PERCIPIO_STREAM_IR_LEFT | PERCIPIO_STREAM_IR_RIGHT)
|
||||
if err:
|
||||
print('device stream enable err:{}'.format(err))
|
||||
return
|
||||
|
||||
img_ir = image_data()
|
||||
cl.DeviceStreamOn(handle)
|
||||
|
||||
while True:
|
||||
if event.IsOffline():
|
||||
break
|
||||
|
||||
image_list = cl.DeviceStreamRead(handle, 2000)
|
||||
for i in range(len(image_list)):
|
||||
frame = image_list[i]
|
||||
if frame.streamID == PERCIPIO_STREAM_IR_LEFT:
|
||||
cl.DeviceStreamIRRender(frame, img_ir)
|
||||
arr = img_ir.as_nparray()
|
||||
cv2.imshow('leftir',arr)
|
||||
if frame.streamID == PERCIPIO_STREAM_IR_RIGHT:
|
||||
cl.DeviceStreamIRRender(frame, img_ir)
|
||||
arr = img_ir.as_nparray()
|
||||
cv2.imshow('right ir',arr)
|
||||
|
||||
k = cv2.waitKey(10)
|
||||
if k==ord('q'):
|
||||
break
|
||||
|
||||
cl.DeviceStreamOff(handle)
|
||||
cl.Close(handle)
|
||||
pass
|
||||
|
||||
if __name__=='__main__':
|
||||
main()
|
||||
|
||||
97
Vision/tool/tuyang/frame_isp.py
Normal file
97
Vision/tool/tuyang/frame_isp.py
Normal file
@ -0,0 +1,97 @@
|
||||
import pcammls
|
||||
from pcammls import *
|
||||
import cv2
|
||||
import numpy
|
||||
import sys
|
||||
import os
|
||||
|
||||
class PythonPercipioDeviceEvent(pcammls.DeviceEvent):
|
||||
Offline = False
|
||||
|
||||
def __init__(self):
|
||||
pcammls.DeviceEvent.__init__(self)
|
||||
|
||||
def run(self, handle, eventID):
|
||||
if eventID==TY_EVENT_DEVICE_OFFLINE:
|
||||
print('=== Event Callback: Device Offline!')
|
||||
self.Offline = True
|
||||
return 0
|
||||
|
||||
def IsOffline(self):
|
||||
return self.Offline
|
||||
|
||||
def main():
|
||||
cl = PercipioSDK()
|
||||
|
||||
dev_list = cl.ListDevice()
|
||||
for idx in range(len(dev_list)):
|
||||
dev = dev_list[idx]
|
||||
print ('{} -- {} \t {}'.format(idx,dev.id,dev.iface.id))
|
||||
if len(dev_list)==0:
|
||||
print ('no device')
|
||||
return
|
||||
if len(dev_list) == 1:
|
||||
selected_idx = 0
|
||||
else:
|
||||
selected_idx = int(input('select a device:'))
|
||||
if selected_idx < 0 or selected_idx >= len(dev_list):
|
||||
return
|
||||
|
||||
sn = dev_list[selected_idx].id
|
||||
|
||||
handle = cl.Open(sn)
|
||||
if not cl.isValidHandle(handle):
|
||||
err = cl.TYGetLastErrorCodedescription()
|
||||
print('no device found : ', end='')
|
||||
print(err)
|
||||
return
|
||||
|
||||
event = PythonPercipioDeviceEvent()
|
||||
cl.DeviceRegiststerCallBackEvent(event)
|
||||
|
||||
err = cl.DeviceStreamEnable(handle, PERCIPIO_STREAM_COLOR)
|
||||
if err:
|
||||
print('device stream enable err:{}'.format(err))
|
||||
return
|
||||
|
||||
color_fmt_list = cl.DeviceStreamFormatDump(handle, PERCIPIO_STREAM_COLOR)
|
||||
if len(color_fmt_list) != 0:
|
||||
print ('color image format list:')
|
||||
for idx in range(len(color_fmt_list)):
|
||||
fmt = color_fmt_list[idx]
|
||||
print ('\t{} -size[{}x{}]\t-\t desc:{}'.format(idx, cl.Width(fmt), cl.Height(fmt), fmt.getDesc()))
|
||||
print('\tSelect {}'.format(fmt.getDesc()))
|
||||
cl.DeviceStreamFormatConfig(handle, PERCIPIO_STREAM_COLOR, color_fmt_list[len(color_fmt_list) - 1])
|
||||
else:
|
||||
print ('device has no color stream.')
|
||||
cl.Close(handle)
|
||||
return
|
||||
|
||||
#enable rgb image software isp
|
||||
cl.DeviceColorStreamIspEnable(handle, True)
|
||||
|
||||
rgb_image = image_data()
|
||||
|
||||
cl.DeviceStreamOn(handle)
|
||||
|
||||
while True:
|
||||
if event.IsOffline():
|
||||
break
|
||||
image_list = cl.DeviceStreamRead(handle, -1)
|
||||
for i in range(len(image_list)):
|
||||
frame = image_list[i]
|
||||
if frame.streamID == PERCIPIO_STREAM_COLOR:
|
||||
cl.DeviceStreamImageDecode(frame, rgb_image)
|
||||
arr = rgb_image.as_nparray()
|
||||
cv2.imshow('color',arr)
|
||||
k = cv2.waitKey(10)
|
||||
if k==ord('q'):
|
||||
break
|
||||
|
||||
cl.DeviceStreamOff(handle)
|
||||
cl.Close(handle)
|
||||
pass
|
||||
|
||||
if __name__=='__main__':
|
||||
main()
|
||||
|
||||
157
Vision/tool/tuyang/frame_registration.py
Normal file
157
Vision/tool/tuyang/frame_registration.py
Normal file
@ -0,0 +1,157 @@
|
||||
'''
|
||||
Description:
|
||||
Author: zxy
|
||||
Date: 2023-07-14 09:48:00
|
||||
LastEditors: zxy
|
||||
LastEditTime: 2024-01-02 11:36:57
|
||||
'''
|
||||
import pcammls
|
||||
from pcammls import *
|
||||
import cv2
|
||||
import numpy
|
||||
import sys
|
||||
import os
|
||||
|
||||
class PythonPercipioDeviceEvent(pcammls.DeviceEvent):
|
||||
Offline = False
|
||||
|
||||
def __init__(self):
|
||||
pcammls.DeviceEvent.__init__(self)
|
||||
|
||||
def run(self, handle, eventID):
|
||||
if eventID==TY_EVENT_DEVICE_OFFLINE:
|
||||
print('=== Event Callback: Device Offline!')
|
||||
self.Offline = True
|
||||
return 0
|
||||
|
||||
def IsOffline(self):
|
||||
return self.Offline
|
||||
|
||||
def main():
|
||||
cl = PercipioSDK()
|
||||
|
||||
dev_list = cl.ListDevice()
|
||||
for idx in range(len(dev_list)):
|
||||
dev = dev_list[idx]
|
||||
print ('{} -- {} \t {}'.format(idx,dev.id,dev.iface.id))
|
||||
if len(dev_list)==0:
|
||||
print ('no device')
|
||||
return
|
||||
if len(dev_list) == 1:
|
||||
selected_idx = 0
|
||||
else:
|
||||
selected_idx = int(input('select a device:'))
|
||||
if selected_idx < 0 or selected_idx >= len(dev_list):
|
||||
return
|
||||
|
||||
sn = dev_list[selected_idx].id
|
||||
|
||||
handle = cl.Open(sn)
|
||||
if not cl.isValidHandle(handle):
|
||||
err = cl.TYGetLastErrorCodedescription()
|
||||
print('no device found : ', end='')
|
||||
print(err)
|
||||
return
|
||||
|
||||
event = PythonPercipioDeviceEvent()
|
||||
cl.DeviceRegiststerCallBackEvent(event)
|
||||
|
||||
color_fmt_list = cl.DeviceStreamFormatDump(handle, PERCIPIO_STREAM_COLOR)
|
||||
if len(color_fmt_list) == 0:
|
||||
print ('device has no color stream.')
|
||||
return
|
||||
|
||||
print ('color image format list:')
|
||||
for idx in range(len(color_fmt_list)):
|
||||
fmt = color_fmt_list[idx]
|
||||
print ('\t{} -size[{}x{}]\t-\t desc:{}'.format(idx, cl.Width(fmt), cl.Height(fmt), fmt.getDesc()))
|
||||
cl.DeviceStreamFormatConfig(handle, PERCIPIO_STREAM_COLOR, color_fmt_list[0])
|
||||
|
||||
depth_fmt_list = cl.DeviceStreamFormatDump(handle, PERCIPIO_STREAM_DEPTH)
|
||||
if len(depth_fmt_list) == 0:
|
||||
print ('device has no depth stream.')
|
||||
return
|
||||
|
||||
print ('depth image format list:')
|
||||
for idx in range(len(depth_fmt_list)):
|
||||
fmt = depth_fmt_list[idx]
|
||||
print ('\t{} -size[{}x{}]\t-\t desc:{}'.format(idx, cl.Width(fmt), cl.Height(fmt), fmt.getDesc()))
|
||||
cl.DeviceStreamFormatConfig(handle, PERCIPIO_STREAM_DEPTH, depth_fmt_list[0])
|
||||
|
||||
err = cl.DeviceLoadDefaultParameters(handle)
|
||||
if err:
|
||||
print('Load default parameters fail: ', end='')
|
||||
print(cl.TYGetLastErrorCodedescription())
|
||||
else:
|
||||
print('Load default parameters successful')
|
||||
|
||||
scale_unit = cl.DeviceReadCalibDepthScaleUnit(handle)
|
||||
print ('depth image scale unit :{}'.format(scale_unit))
|
||||
|
||||
depth_calib = cl.DeviceReadCalibData(handle, PERCIPIO_STREAM_DEPTH)
|
||||
color_calib = cl.DeviceReadCalibData(handle, PERCIPIO_STREAM_COLOR)
|
||||
|
||||
err = cl.DeviceStreamEnable(handle, PERCIPIO_STREAM_COLOR | PERCIPIO_STREAM_DEPTH)
|
||||
if err:
|
||||
print('device stream enable err:{}'.format(err))
|
||||
return
|
||||
|
||||
print ('{} -- {} \t'.format(0,"Map depth to color coordinate(suggest)"))
|
||||
print ('{} -- {} \t'.format(1,"Map color to depth coordinate"))
|
||||
registration_mode = int(input('select registration mode(0 or 1):'))
|
||||
if selected_idx < 0 or selected_idx >= 2:
|
||||
registration_mode = 0
|
||||
|
||||
cl.DeviceStreamOn(handle)
|
||||
img_registration_depth = image_data()
|
||||
img_registration_render = image_data()
|
||||
img_parsed_color = image_data()
|
||||
img_undistortion_color = image_data()
|
||||
img_registration_color = image_data()
|
||||
while True:
|
||||
if event.IsOffline():
|
||||
break
|
||||
image_list = cl.DeviceStreamRead(handle, 2000)
|
||||
if len(image_list) == 2:
|
||||
for i in range(len(image_list)):
|
||||
frame = image_list[i]
|
||||
if frame.streamID == PERCIPIO_STREAM_DEPTH:
|
||||
img_depth = frame
|
||||
if frame.streamID == PERCIPIO_STREAM_COLOR:
|
||||
img_color = frame
|
||||
|
||||
if 0 == registration_mode:
|
||||
cl.DeviceStreamMapDepthImageToColorCoordinate(depth_calib, img_depth, scale_unit, color_calib, img_color.width, img_color.height, img_registration_depth)
|
||||
|
||||
cl.DeviceStreamDepthRender(img_registration_depth, img_registration_render)
|
||||
mat_depth_render = img_registration_render.as_nparray()
|
||||
cv2.imshow('registration', mat_depth_render)
|
||||
|
||||
cl.DeviceStreamImageDecode(img_color, img_parsed_color)
|
||||
cl.DeviceStreamDoUndistortion(color_calib, img_parsed_color, img_undistortion_color)
|
||||
mat_undistortion_color = img_undistortion_color.as_nparray()
|
||||
cv2.imshow('undistortion rgb', mat_undistortion_color)
|
||||
else:
|
||||
cl.DeviceStreamImageDecode(img_color, img_parsed_color)
|
||||
cl.DeviceStreamDoUndistortion(color_calib, img_parsed_color, img_undistortion_color)
|
||||
|
||||
cl.DeviceStreamMapRGBImageToDepthCoordinate(depth_calib, img_depth, scale_unit, color_calib, img_undistortion_color, img_registration_color)
|
||||
|
||||
cl.DeviceStreamDepthRender(img_depth, img_registration_render)
|
||||
mat_depth_render = img_registration_render.as_nparray()
|
||||
cv2.imshow('depth', mat_depth_render)
|
||||
|
||||
mat_registration_color = img_registration_color.as_nparray()
|
||||
cv2.imshow('registration rgb', mat_registration_color)
|
||||
|
||||
k = cv2.waitKey(10)
|
||||
if k==ord('q'):
|
||||
break
|
||||
|
||||
cl.DeviceStreamOff(handle)
|
||||
cl.Close(handle)
|
||||
pass
|
||||
|
||||
if __name__=='__main__':
|
||||
main()
|
||||
|
||||
111
Vision/tool/tuyang/frame_trigger.py
Normal file
111
Vision/tool/tuyang/frame_trigger.py
Normal file
@ -0,0 +1,111 @@
|
||||
'''
|
||||
Description:
|
||||
Author: zxy
|
||||
Date: 2023-07-13 15:38:51
|
||||
LastEditors: zxy
|
||||
LastEditTime: 2023-07-18 11:57:37
|
||||
'''
|
||||
import pcammls
|
||||
from pcammls import *
|
||||
import cv2
|
||||
import numpy
|
||||
import sys
|
||||
import os
|
||||
|
||||
class PythonPercipioDeviceEvent(pcammls.DeviceEvent):
|
||||
Offline = False
|
||||
|
||||
def __init__(self):
|
||||
pcammls.DeviceEvent.__init__(self)
|
||||
|
||||
def run(self, handle, eventID):
|
||||
if eventID==TY_EVENT_DEVICE_OFFLINE:
|
||||
print('=== Event Callback: Device Offline!')
|
||||
self.Offline = True
|
||||
return 0
|
||||
|
||||
def IsOffline(self):
|
||||
return self.Offline
|
||||
|
||||
def main():
|
||||
cl = PercipioSDK()
|
||||
|
||||
dev_list = cl.ListDevice()
|
||||
for idx in range(len(dev_list)):
|
||||
dev = dev_list[idx]
|
||||
print ('{} -- {} \t {}'.format(idx,dev.id,dev.iface.id))
|
||||
if len(dev_list)==0:
|
||||
print ('no device')
|
||||
return
|
||||
if len(dev_list) == 1:
|
||||
selected_idx = 0
|
||||
else:
|
||||
selected_idx = int(input('select a device:'))
|
||||
if selected_idx < 0 or selected_idx >= len(dev_list):
|
||||
return
|
||||
|
||||
sn = dev_list[selected_idx].id
|
||||
|
||||
handle = cl.Open(sn)
|
||||
if not cl.isValidHandle(handle):
|
||||
err = cl.TYGetLastErrorCodedescription()
|
||||
print('no device found : ', end='')
|
||||
print(err)
|
||||
return
|
||||
|
||||
event = PythonPercipioDeviceEvent()
|
||||
cl.DeviceRegiststerCallBackEvent(event)
|
||||
|
||||
depth_fmt_list = cl.DeviceStreamFormatDump(handle, PERCIPIO_STREAM_DEPTH)
|
||||
if len(depth_fmt_list) == 0:
|
||||
print ('device has no depth stream.')
|
||||
return
|
||||
|
||||
print ('depth image format list:')
|
||||
for idx in range(len(depth_fmt_list)):
|
||||
fmt = depth_fmt_list[idx]
|
||||
print ('\t{} -size[{}x{}]\t-\t desc:{}'.format(idx, cl.Width(fmt), cl.Height(fmt), fmt.getDesc()))
|
||||
cl.DeviceStreamFormatConfig(handle, PERCIPIO_STREAM_DEPTH, depth_fmt_list[0])
|
||||
|
||||
cl.DeviceControlTriggerModeEnable(handle, 1)
|
||||
|
||||
err = cl.DeviceLoadDefaultParameters(handle)
|
||||
if err:
|
||||
print('Load default parameters fail: ', end='')
|
||||
print(cl.TYGetLastErrorCodedescription())
|
||||
else:
|
||||
print('Load default parameters successful')
|
||||
|
||||
err = cl.DeviceStreamEnable(handle, PERCIPIO_STREAM_DEPTH)
|
||||
if err:
|
||||
print('device stream enable err:{}'.format(err))
|
||||
return
|
||||
|
||||
depth_render = image_data()
|
||||
cl.DeviceStreamOn(handle)
|
||||
|
||||
while True:
|
||||
if event.IsOffline():
|
||||
break
|
||||
|
||||
|
||||
cl.DeviceControlTriggerModeSendTriggerSignal(handle)
|
||||
image_list = cl.DeviceStreamRead(handle, 20000)
|
||||
for i in range(len(image_list)):
|
||||
frame = image_list[i]
|
||||
arr = frame.as_nparray()
|
||||
if frame.streamID == PERCIPIO_STREAM_DEPTH:
|
||||
cl.DeviceStreamDepthRender(frame, depth_render)
|
||||
mat_depth_render = depth_render.as_nparray()
|
||||
cv2.imshow('depth',mat_depth_render)
|
||||
k = cv2.waitKey(10)
|
||||
if k==ord('q'):
|
||||
break
|
||||
|
||||
cl.DeviceStreamOff(handle)
|
||||
cl.Close(handle)
|
||||
pass
|
||||
|
||||
if __name__=='__main__':
|
||||
main()
|
||||
|
||||
106
Vision/tool/tuyang/multidevice_fetch.py
Normal file
106
Vision/tool/tuyang/multidevice_fetch.py
Normal file
@ -0,0 +1,106 @@
|
||||
import pcammls
|
||||
from pcammls import *
|
||||
import cv2
|
||||
import numpy
|
||||
import sys
|
||||
import os
|
||||
|
||||
class PythonPercipioDeviceEvent(pcammls.DeviceEvent):
|
||||
Offline = False
|
||||
|
||||
def __init__(self):
|
||||
pcammls.DeviceEvent.__init__(self)
|
||||
|
||||
def run(self, handle, eventID):
|
||||
if eventID==TY_EVENT_DEVICE_OFFLINE:
|
||||
print('=== Event Callback: Device Offline!')
|
||||
self.Offline = True
|
||||
return 0
|
||||
|
||||
def IsOffline(self):
|
||||
return self.Offline
|
||||
|
||||
def main():
|
||||
cl = PercipioSDK()
|
||||
|
||||
dev_list = cl.ListDevice()
|
||||
for idx in range(len(dev_list)):
|
||||
dev = dev_list[idx]
|
||||
print ('{} -- {} \t {}'.format(idx,dev.id,dev.iface.id))
|
||||
if len(dev_list)==0:
|
||||
print ('no device')
|
||||
return
|
||||
|
||||
#register offline event
|
||||
event = PythonPercipioDeviceEvent()
|
||||
cl.DeviceRegiststerCallBackEvent(event)
|
||||
|
||||
#sn list init
|
||||
sn = [0] * len(dev_list)
|
||||
for idx in range(len(dev_list)):
|
||||
sn[idx] = dev_list[idx].id
|
||||
|
||||
#open device
|
||||
handle = [0] * len(dev_list)
|
||||
for i in range(len(dev_list)):
|
||||
handle[i] = cl.Open(sn[i])
|
||||
if not cl.isValidHandle(handle[i]):
|
||||
err = cl.TYGetLastErrorCodedescription()
|
||||
print('no device found : ', end='')
|
||||
print(err)
|
||||
return
|
||||
|
||||
#device stream config
|
||||
for i in range(len(dev_list)):
|
||||
depth_fmt_list = cl.DeviceStreamFormatDump(handle[i], PERCIPIO_STREAM_DEPTH)
|
||||
print ('depth image format list:')
|
||||
for idx in range(len(depth_fmt_list)):
|
||||
fmt = depth_fmt_list[idx]
|
||||
print ('\t{} -size[{}x{}]\t-\t desc:{}'.format(idx, cl.Width(fmt), cl.Height(fmt), fmt.getDesc()))
|
||||
cl.DeviceStreamFormatConfig(handle[i], PERCIPIO_STREAM_DEPTH, depth_fmt_list[0])
|
||||
|
||||
err = cl.DeviceLoadDefaultParameters(handle[i])
|
||||
if err:
|
||||
print('Load default parameters fail: ', end='')
|
||||
print(cl.TYGetLastErrorCodedescription())
|
||||
else:
|
||||
print('Load default parameters successful')
|
||||
|
||||
err = cl.DeviceStreamEnable(handle[i], PERCIPIO_STREAM_DEPTH)
|
||||
if err:
|
||||
print('device stream enable err:{}'.format(err))
|
||||
return
|
||||
|
||||
cl.DeviceStreamOn(handle[i])
|
||||
|
||||
depth_render = [0] * len(dev_list)
|
||||
for i in range(len(dev_list)):
|
||||
depth_render[i] = image_data()
|
||||
|
||||
while True:
|
||||
if event.IsOffline():
|
||||
break
|
||||
|
||||
for m in range(len(dev_list)):
|
||||
image_list = cl.DeviceStreamRead(handle[m], -1)
|
||||
for i in range(len(image_list)):
|
||||
frame = image_list[i]
|
||||
if frame.streamID == PERCIPIO_STREAM_DEPTH:
|
||||
cl.DeviceStreamDepthRender(frame, depth_render[m])
|
||||
arr = depth_render[m].as_nparray()
|
||||
cv2.imshow(sn[m],arr)
|
||||
|
||||
k = cv2.waitKey(10)
|
||||
if k==ord('q'):
|
||||
break
|
||||
|
||||
for i in range(len(dev_list)):
|
||||
cl.DeviceStreamOff(handle[i])
|
||||
|
||||
for i in range(len(dev_list)):
|
||||
cl.Close(handle[i])
|
||||
pass
|
||||
|
||||
if __name__=='__main__':
|
||||
main()
|
||||
|
||||
109
Vision/tool/tuyang/parameter_settings.py
Normal file
109
Vision/tool/tuyang/parameter_settings.py
Normal file
@ -0,0 +1,109 @@
|
||||
'''
|
||||
Description:
|
||||
Author: zxy
|
||||
Date: 2023-07-14 09:48:00
|
||||
LastEditors: zxy
|
||||
LastEditTime: 2024-11-25 11:36:57
|
||||
'''
|
||||
import pcammls
|
||||
from pcammls import *
|
||||
import cv2
|
||||
import numpy
|
||||
import sys
|
||||
import os
|
||||
|
||||
|
||||
def main():
|
||||
cl = PercipioSDK()
|
||||
|
||||
dev_list = cl.ListDevice()
|
||||
for idx in range(len(dev_list)):
|
||||
dev = dev_list[idx]
|
||||
print ('{} -- {} \t {}'.format(idx,dev.id,dev.iface.id))
|
||||
if len(dev_list)==0:
|
||||
print ('no device')
|
||||
return
|
||||
if len(dev_list) == 1:
|
||||
selected_idx = 0
|
||||
else:
|
||||
selected_idx = int(input('select a device:'))
|
||||
if selected_idx < 0 or selected_idx >= len(dev_list):
|
||||
return
|
||||
|
||||
sn = dev_list[selected_idx].id
|
||||
|
||||
handle = cl.Open(sn)
|
||||
if not cl.isValidHandle(handle):
|
||||
err = cl.TYGetLastErrorCodedescription()
|
||||
print('no device found : ', end='')
|
||||
print(err)
|
||||
return
|
||||
|
||||
err = cl.DeviceStreamEnable(handle, PERCIPIO_STREAM_COLOR)
|
||||
if err:
|
||||
print('device stream enable err:{}'.format(err))
|
||||
return
|
||||
|
||||
#bool:color aec
|
||||
aec = cl.DeviceGetParameter(handle, TY_COMPONENT_RGB_CAM, TY_BOOL_AUTO_EXPOSURE)
|
||||
if aec.isEmpty():
|
||||
print('aec is not support!')
|
||||
else :
|
||||
print('current aec status : {}'.format(aec.toBool()))
|
||||
|
||||
#disable color aec
|
||||
aec = cl.DevParamFromBool(False)
|
||||
err = cl.DeviceSetParameter(handle, TY_COMPONENT_RGB_CAM, TY_BOOL_AUTO_EXPOSURE, aec)
|
||||
print('aec close result : ', end='')
|
||||
print(err)
|
||||
|
||||
#int:color exposure time
|
||||
exp = cl.DeviceGetParameter(handle, TY_COMPONENT_RGB_CAM, TY_INT_EXPOSURE_TIME)
|
||||
if exp.isEmpty():
|
||||
print('exposure time is not support!')
|
||||
else :
|
||||
print('current exposure time status : {}, range : {} - {}, inc : {}'.format(exp.toInt(), exp.mMin(), exp.mMax(), exp.mInc()))
|
||||
|
||||
exposure_time = int(input('Enter exposure time:'))
|
||||
exp = cl.DevParamFromInt(exposure_time)
|
||||
err = cl.DeviceSetParameter(handle, TY_COMPONENT_RGB_CAM, TY_INT_EXPOSURE_TIME, exp)
|
||||
print('set color exposure time result : ', end='')
|
||||
print(err)
|
||||
|
||||
image_mode = cl.DeviceGetParameter(handle, TY_COMPONENT_RGB_CAM, TY_ENUM_IMAGE_MODE)
|
||||
if image_mode.isEmpty():
|
||||
print('color image mode is not support!')
|
||||
else :
|
||||
list = image_mode.eList()
|
||||
for idx in range(len(list)):
|
||||
mode = list[idx]
|
||||
print('{}: {}x{} - {}'.format(idx, cl.Width(mode), cl.Height(mode), cl.Description(mode)))
|
||||
|
||||
index = int(input('Enter image mode index:'))
|
||||
image_mode = cl.DevParamFromEnum(cl.Value(list[index]))
|
||||
err = cl.DeviceSetParameter(handle, TY_COMPONENT_RGB_CAM, TY_ENUM_IMAGE_MODE, image_mode)
|
||||
|
||||
cl.DeviceStreamOn(handle)
|
||||
img_parsed_color = image_data()
|
||||
while True:
|
||||
image_list = cl.DeviceStreamRead(handle, 2000)
|
||||
for i in range(len(image_list)):
|
||||
frame = image_list[i]
|
||||
if frame.streamID == PERCIPIO_STREAM_COLOR:
|
||||
img_color = frame
|
||||
|
||||
cl.DeviceStreamImageDecode(img_color, img_parsed_color)
|
||||
mat_undistortion_color = img_parsed_color.as_nparray()
|
||||
cv2.imshow('rgb', mat_undistortion_color)
|
||||
|
||||
k = cv2.waitKey(10)
|
||||
if k==ord('q'):
|
||||
break
|
||||
|
||||
cl.DeviceStreamOff(handle)
|
||||
cl.Close(handle)
|
||||
pass
|
||||
|
||||
if __name__=='__main__':
|
||||
main()
|
||||
|
||||
BIN
Vision/tool/tuyang/pcammls.exp
Normal file
BIN
Vision/tool/tuyang/pcammls.exp
Normal file
Binary file not shown.
BIN
Vision/tool/tuyang/pcammls.lib
Normal file
BIN
Vision/tool/tuyang/pcammls.lib
Normal file
Binary file not shown.
10936
Vision/tool/tuyang/pcammls.py
Normal file
10936
Vision/tool/tuyang/pcammls.py
Normal file
File diff suppressed because it is too large
Load Diff
129
Vision/tool/tuyang/point3d_fetch.py
Normal file
129
Vision/tool/tuyang/point3d_fetch.py
Normal file
@ -0,0 +1,129 @@
|
||||
'''
|
||||
Description:
|
||||
Author: zxy
|
||||
Date: 2023-07-18 09:55:47
|
||||
LastEditors: zxy
|
||||
LastEditTime: 2023-12-28 15:49:28
|
||||
'''
|
||||
import pcammls
|
||||
from pcammls import *
|
||||
import cv2
|
||||
import numpy
|
||||
import sys
|
||||
import os
|
||||
|
||||
class PythonPercipioDeviceEvent(pcammls.DeviceEvent):
|
||||
Offline = False
|
||||
|
||||
def __init__(self):
|
||||
pcammls.DeviceEvent.__init__(self)
|
||||
|
||||
def run(self, handle, eventID):
|
||||
if eventID==TY_EVENT_DEVICE_OFFLINE:
|
||||
print('=== Event Callback: Device Offline!')
|
||||
self.Offline = True
|
||||
return 0
|
||||
|
||||
def IsOffline(self):
|
||||
return self.Offline
|
||||
|
||||
def main():
|
||||
cl = PercipioSDK()
|
||||
|
||||
dev_list = cl.ListDevice()
|
||||
for idx in range(len(dev_list)):
|
||||
dev = dev_list[idx]
|
||||
print ('{} -- {} \t {}'.format(idx,dev.id,dev.iface.id))
|
||||
if len(dev_list)==0:
|
||||
print ('no device')
|
||||
return
|
||||
if len(dev_list) == 1:
|
||||
selected_idx = 0
|
||||
else:
|
||||
selected_idx = int(input('select a device:'))
|
||||
if selected_idx < 0 or selected_idx >= len(dev_list):
|
||||
return
|
||||
|
||||
sn = dev_list[selected_idx].id
|
||||
|
||||
handle = cl.Open(sn)
|
||||
if not cl.isValidHandle(handle):
|
||||
err = cl.TYGetLastErrorCodedescription()
|
||||
print('no device found : ', end='')
|
||||
print(err)
|
||||
return
|
||||
|
||||
event = PythonPercipioDeviceEvent()
|
||||
cl.DeviceRegiststerCallBackEvent(event)
|
||||
|
||||
depth_fmt_list = cl.DeviceStreamFormatDump(handle, PERCIPIO_STREAM_DEPTH)
|
||||
if len(depth_fmt_list) == 0:
|
||||
print ('device has no depth stream.')
|
||||
return
|
||||
|
||||
print ('depth image format list:')
|
||||
for idx in range(len(depth_fmt_list)):
|
||||
fmt = depth_fmt_list[idx]
|
||||
print ('\t{} -size[{}x{}]\t-\t desc:{}'.format(idx, cl.Width(fmt), cl.Height(fmt), fmt.getDesc()))
|
||||
cl.DeviceStreamFormatConfig(handle, PERCIPIO_STREAM_DEPTH, depth_fmt_list[0])
|
||||
|
||||
depth_calib_data = cl.DeviceReadCalibData(handle, PERCIPIO_STREAM_DEPTH)
|
||||
depth_calib_width = depth_calib_data.Width()
|
||||
depth_calib_height = depth_calib_data.Height()
|
||||
depth_calib_intr = depth_calib_data.Intrinsic()
|
||||
depth_calib_extr = depth_calib_data.Extrinsic()
|
||||
depth_calib_dis = depth_calib_data.Distortion()
|
||||
print('delth calib info:')
|
||||
print('\tcalib size :[{}x{}]'.format(depth_calib_width, depth_calib_height))
|
||||
print('\tcalib intr : {}'.format(depth_calib_intr))
|
||||
print('\tcalib extr : {}'.format(depth_calib_extr))
|
||||
print('\tcalib distortion : {}'.format(depth_calib_dis))
|
||||
|
||||
err = cl.DeviceLoadDefaultParameters(handle)
|
||||
if err:
|
||||
print('Load default parameters fail: ', end='')
|
||||
print(cl.TYGetLastErrorCodedescription())
|
||||
else:
|
||||
print('Load default parameters successful')
|
||||
|
||||
scale_unit = cl.DeviceReadCalibDepthScaleUnit(handle)
|
||||
print ('depth image scale unit :{}'.format(scale_unit))
|
||||
|
||||
err = cl.DeviceStreamEnable(handle, PERCIPIO_STREAM_DEPTH)
|
||||
if err:
|
||||
print('device stream enable err:{}'.format(err))
|
||||
return
|
||||
|
||||
cl.DeviceStreamOn(handle)
|
||||
|
||||
pointcloud_data_arr = pointcloud_data_list()
|
||||
while True:
|
||||
if event.IsOffline():
|
||||
break
|
||||
image_list = cl.DeviceStreamRead(handle, -1)
|
||||
|
||||
for i in range(len(image_list)):
|
||||
frame = image_list[i]
|
||||
if frame.streamID == PERCIPIO_STREAM_DEPTH:
|
||||
cl.DeviceStreamMapDepthImageToPoint3D(frame, depth_calib_data, scale_unit, pointcloud_data_arr)
|
||||
sz = pointcloud_data_arr.size()
|
||||
print('get p3d size : {}'.format(sz))
|
||||
center = frame.width * frame.height / 2 + frame.width / 2
|
||||
|
||||
#show p3d arr data
|
||||
p3d_nparray = pointcloud_data_arr.as_nparray()
|
||||
cv2.imshow('p3d',p3d_nparray)
|
||||
|
||||
p3d = pointcloud_data_arr.get_value(int(center))
|
||||
print('\tp3d data : {} {} {}'.format(p3d.getX(), p3d.getY(), p3d.getZ()))
|
||||
k = cv2.waitKey(10)
|
||||
if k==ord('q'):
|
||||
break
|
||||
|
||||
cl.DeviceStreamOff(handle)
|
||||
cl.Close(handle)
|
||||
pass
|
||||
|
||||
if __name__=='__main__':
|
||||
main()
|
||||
|
||||
BIN
Vision/tool/tuyang/tycam.dll
Normal file
BIN
Vision/tool/tuyang/tycam.dll
Normal file
Binary file not shown.
BIN
Vision/tool/tuyang/tycam.lib
Normal file
BIN
Vision/tool/tuyang/tycam.lib
Normal file
Binary file not shown.
393
Vision/tool/utils.py
Normal file
393
Vision/tool/utils.py
Normal file
@ -0,0 +1,393 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: UTF-8 -*-
|
||||
'''
|
||||
@Project :AutoControlSystem-master
|
||||
@File :utils.py
|
||||
@IDE :PyCharm
|
||||
@Author :hjw
|
||||
@Date :2024/8/29 15:07
|
||||
'''
|
||||
|
||||
import numpy as np
|
||||
import cv2
|
||||
import psutil
|
||||
from psutil._common import bytes2human
|
||||
|
||||
|
||||
def uv_to_XY(cameraType, u, v):
|
||||
"""
|
||||
像素坐标转相机坐标
|
||||
Args:
|
||||
cameraType:
|
||||
u:
|
||||
v:
|
||||
|
||||
Returns:
|
||||
如本:
|
||||
|
||||
ExtrinsicMatrix:
|
||||
[-0.9916700124740601, -0.003792409785091877, 0.12874870002269745, 0.10222162306308746, -0.003501748666167259, 0.9999907612800598, 0.002483875723555684, -0.08221593499183655, -0.12875692546367645, 0.0020123394206166267, -0.9916741251945496, 0.6480034589767456, 0.0, 0.0, 0.0, 1.0]
|
||||
|
||||
IntrinsicParameters:
|
||||
[2402.101806640625, 0.0, 739.7069091796875,
|
||||
0.0, 2401.787353515625, 584.73046875,
|
||||
0.0, 0.0, 1.0]
|
||||
|
||||
distortion:
|
||||
[-0.04248141124844551, 0.24386045336723328, -0.38333430886268616, -0.0017840253422036767, 0.0007602088153362274]
|
||||
|
||||
图漾:
|
||||
|
||||
depth image format list:
|
||||
0 -size[640x480] - desc:DEPTH16_640x480
|
||||
1 -size[1280x960] - desc:DEPTH16_1280x960
|
||||
2 -size[320x240] - desc:DEPTH16_320x240
|
||||
delth calib info:
|
||||
calib size :[1280x960]
|
||||
calib intr :
|
||||
(1048.3614501953125, 0.0, 652.146240234375,
|
||||
0.0, 1048.3614501953125, 500.26397705078125,
|
||||
0.0, 0.0, 1.0)
|
||||
calib extr : (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
|
||||
calib distortion : (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
|
||||
|
||||
"""
|
||||
x = None
|
||||
y = None
|
||||
zc = 1 # 设深度z为1
|
||||
if cameraType == 'RVC':
|
||||
u0 = 739.70
|
||||
v0 = 584.73
|
||||
fx = 2402.10
|
||||
fy = 2401.78
|
||||
x = (u-u0)*zc/fx
|
||||
y = (v-v0)*zc/fy
|
||||
elif cameraType == 'Pe':
|
||||
u0 = 652.14
|
||||
v0 = 500.26
|
||||
fx = 1048.36
|
||||
fy = 1048.36
|
||||
|
||||
x = (u - u0) * zc / fx
|
||||
y = (v - v0) * zc / fy
|
||||
return x, y, zc
|
||||
|
||||
|
||||
|
||||
def out_bounds_dete(pm_y, pm_x, piont_y, piont_x):
|
||||
if piont_y>=pm_y:
|
||||
piont_y = pm_y-1
|
||||
print('四坐标点超出点云大小')
|
||||
if piont_y<0:
|
||||
piont_y=0
|
||||
print('四坐标点超出点云大小')
|
||||
if piont_x>=pm_x:
|
||||
piont_x = pm_x-1
|
||||
print('四坐标点超出点云大小')
|
||||
if piont_x<0:
|
||||
piont_x=0
|
||||
print('四坐标点超出点云大小')
|
||||
return piont_y, piont_x
|
||||
|
||||
def remove_nan_mean_value(pm, y, x, iter_max=50):
|
||||
y, x = out_bounds_dete(pm.shape[0], pm.shape[1], y, x)
|
||||
point_x, point_y, point_z = pm[y, x]
|
||||
if np.isnan(point_x):
|
||||
point_x_list = []
|
||||
point_y_list = []
|
||||
point_z_list = []
|
||||
iter_current = 1
|
||||
pm_shape_y = pm.shape[0]
|
||||
pm_shape_x = pm.shape[1]
|
||||
remove_nan_isok = False
|
||||
print('Nan值去除')
|
||||
while iter_current < iter_max:
|
||||
# 计算开始点
|
||||
if y - iter_current > 0:
|
||||
y_start = y - iter_current
|
||||
else:
|
||||
y_start = 0
|
||||
|
||||
if x - iter_current > 0:
|
||||
x_start = x - iter_current
|
||||
else:
|
||||
x_start = 0
|
||||
|
||||
for idx_y in range(iter_current*2 + 1):
|
||||
y_current = y_start + idx_y
|
||||
if y_current > pm_shape_y-1:
|
||||
continue
|
||||
for idx_x in range(iter_current*2 + 1):
|
||||
x_current = x_start + idx_x
|
||||
if x_current > pm_shape_x-1:
|
||||
continue
|
||||
elif np.isnan(pm[y_current, x_current][0]) == False:
|
||||
point_x_list.append(pm[y_current, x_current][0])
|
||||
point_y_list.append(pm[y_current, x_current][1])
|
||||
point_z_list.append(pm[y_current, x_current][2])
|
||||
|
||||
len_point_x = len(point_x_list)
|
||||
if len_point_x > 0:
|
||||
point_x = sum(point_x_list)/len_point_x
|
||||
point_y = sum(point_y_list)/len_point_x
|
||||
point_z = sum(point_z_list)/len_point_x
|
||||
remove_nan_isok = True
|
||||
break
|
||||
iter_current += 1
|
||||
else:
|
||||
remove_nan_isok = True
|
||||
if remove_nan_isok == True:
|
||||
return point_x, point_y, point_z
|
||||
else:
|
||||
print(f'在{iter_max}*{iter_max}范围中未找到有效值,所有点云值为无效值')
|
||||
return np.nan, np.nan, np.nan
|
||||
|
||||
def remove_nan(pm, y, x):
|
||||
point_x, point_y, point_z = pm[y, x]
|
||||
if np.isnan(point_x):
|
||||
for i in range(10):
|
||||
point_x, point_y, point_z = pm[y+i, x]
|
||||
if np.isnan(point_x)==False:
|
||||
break
|
||||
return point_x, point_y, point_z
|
||||
|
||||
|
||||
def get_disk_space(path='C:'):
|
||||
|
||||
usage = psutil.disk_usage(path)
|
||||
space_free = bytes2human(usage.free)
|
||||
# space_total = bytes2human(usage.total)
|
||||
# space_used = bytes2human(usage.used)
|
||||
# space_free = bytes2human(usage.free)
|
||||
# space_used_percent = bytes2human(usage.percent)
|
||||
space_free = float(space_free[:-1])
|
||||
return space_free
|
||||
def find_position(Depth_Z, RegionalArea, RegionalArea_Threshold, first_depth=True):
|
||||
if first_depth == True:
|
||||
sorted_id = sorted(range(len(Depth_Z)), key=lambda k: Depth_Z[k], reverse=False)
|
||||
# Depth_Z1 = [Depth_Z[i] for i in sorted_id]
|
||||
# RegionalArea1 = [RegionalArea[i] for i in sorted_id]
|
||||
# for i in range(len(Depth_Z1)):
|
||||
# if RegionalArea1[i] > RegionalArea_Threshold:
|
||||
# return sorted_id[i]
|
||||
if len(sorted_id)>0:
|
||||
return sorted_id[0]
|
||||
else:
|
||||
return
|
||||
|
||||
else:
|
||||
sorted_id = sorted(range(len(RegionalArea)), key=lambda k: RegionalArea[k], reverse=True)
|
||||
# Depth_Z1 = [Depth_Z[i] for i in sorted_id]
|
||||
# RegionalArea1 = [RegionalArea[i] for i in sorted_id]
|
||||
# for i in range(len(Depth_Z1)):
|
||||
# if RegionalArea1[i] > RegionalArea_Threshold:
|
||||
# return sorted_id[i]
|
||||
if len(sorted_id)>0:
|
||||
return sorted_id[0]
|
||||
else:
|
||||
return
|
||||
|
||||
class_names = ['box', 'other']
|
||||
|
||||
# Create a list of colors for each class where each color is a tuple of 3 integer values
|
||||
rng = np.random.default_rng(3)
|
||||
colors = rng.uniform(0, 255, size=(len(class_names), 3))
|
||||
|
||||
|
||||
def nms(boxes, scores, iou_threshold):
|
||||
# Sort by score
|
||||
sorted_indices = np.argsort(scores)[::-1]
|
||||
|
||||
keep_boxes = []
|
||||
while sorted_indices.size > 0:
|
||||
# Pick the last box
|
||||
box_id = sorted_indices[0]
|
||||
keep_boxes.append(box_id)
|
||||
|
||||
# Compute IoU of the picked box with the rest
|
||||
ious = compute_iou(boxes[box_id, :], boxes[sorted_indices[1:], :])
|
||||
|
||||
# Remove boxes with IoU over the threshold
|
||||
keep_indices = np.where(ious < iou_threshold)[0]
|
||||
|
||||
# print(keep_indices.shape, sorted_indices.shape)
|
||||
sorted_indices = sorted_indices[keep_indices + 1]
|
||||
|
||||
return keep_boxes
|
||||
|
||||
|
||||
def compute_iou(box, boxes):
|
||||
# Compute xmin, ymin, xmax, ymax for both boxes
|
||||
xmin = np.maximum(box[0], boxes[:, 0])
|
||||
ymin = np.maximum(box[1], boxes[:, 1])
|
||||
xmax = np.minimum(box[2], boxes[:, 2])
|
||||
ymax = np.minimum(box[3], boxes[:, 3])
|
||||
|
||||
# Compute intersection area
|
||||
intersection_area = np.maximum(0, xmax - xmin) * np.maximum(0, ymax - ymin)
|
||||
|
||||
# Compute union area
|
||||
box_area = (box[2] - box[0]) * (box[3] - box[1])
|
||||
boxes_area = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
|
||||
union_area = box_area + boxes_area - intersection_area
|
||||
|
||||
# Compute IoU
|
||||
iou = intersection_area / union_area
|
||||
|
||||
return iou
|
||||
|
||||
|
||||
def xywh2xyxy(x):
|
||||
# Convert bounding box (x, y, w, h) to bounding box (x1, y1, x2, y2)
|
||||
y = np.copy(x)
|
||||
y[..., 0] = x[..., 0] - x[..., 2] / 2
|
||||
y[..., 1] = x[..., 1] - x[..., 3] / 2
|
||||
y[..., 2] = x[..., 0] + x[..., 2] / 2
|
||||
y[..., 3] = x[..., 1] + x[..., 3] / 2
|
||||
return y
|
||||
|
||||
|
||||
def sigmoid(x):
|
||||
return 1 / (1 + np.exp(-x))
|
||||
|
||||
|
||||
def draw_detections(image, boxes, scores, class_ids, mask_alpha=0.3, mask_maps=None):
|
||||
img_height, img_width = image.shape[:2]
|
||||
size = min([img_height, img_width]) * 0.0006
|
||||
text_thickness = int(min([img_height, img_width]) * 0.001)
|
||||
|
||||
mask_img = draw_masks(image, boxes, class_ids, mask_alpha, mask_maps)
|
||||
|
||||
# Draw bounding boxes and labels of detections
|
||||
for box, score, class_id in zip(boxes, scores, class_ids):
|
||||
color = colors[class_id]
|
||||
|
||||
x1, y1, x2, y2 = box.astype(int)
|
||||
|
||||
# Draw rectangle
|
||||
cv2.rectangle(mask_img, (x1, y1), (x2, y2), color, 2)
|
||||
|
||||
label = class_names[class_id]
|
||||
caption = f'{label} {int(score * 100)}%'
|
||||
(tw, th), _ = cv2.getTextSize(text=caption, fontFace=cv2.FONT_HERSHEY_SIMPLEX,
|
||||
fontScale=size, thickness=text_thickness)
|
||||
th = int(th * 1.2)
|
||||
|
||||
cv2.rectangle(mask_img, (x1, y1),
|
||||
(x1 + tw, y1 - th), color, -1)
|
||||
|
||||
cv2.putText(mask_img, caption, (x1, y1),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, size, (255, 255, 255), text_thickness, cv2.LINE_AA)
|
||||
|
||||
return mask_img
|
||||
|
||||
|
||||
def draw_masks(image, boxes, class_ids, mask_alpha=0.3, mask_maps=None):
|
||||
mask_img = image.copy()
|
||||
|
||||
# Draw bounding boxes and labels of detections
|
||||
for i, (box, class_id) in enumerate(zip(boxes, class_ids)):
|
||||
color = colors[class_id]
|
||||
|
||||
x1, y1, x2, y2 = box.astype(int)
|
||||
|
||||
# Draw fill mask image
|
||||
if mask_maps is None:
|
||||
cv2.rectangle(mask_img, (x1, y1), (x2, y2), color, -1)
|
||||
else:
|
||||
crop_mask = mask_maps[i][y1:y2, x1:x2, np.newaxis]
|
||||
crop_mask_img = mask_img[y1:y2, x1:x2]
|
||||
crop_mask_img = crop_mask_img * (1 - crop_mask) + crop_mask * color
|
||||
mask_img[y1:y2, x1:x2] = crop_mask_img
|
||||
|
||||
return cv2.addWeighted(mask_img, mask_alpha, image, 1 - mask_alpha, 0)
|
||||
|
||||
|
||||
def draw_comparison(img1, img2, name1, name2, fontsize=2.6, text_thickness=3):
|
||||
(tw, th), _ = cv2.getTextSize(text=name1, fontFace=cv2.FONT_HERSHEY_DUPLEX,
|
||||
fontScale=fontsize, thickness=text_thickness)
|
||||
x1 = img1.shape[1] // 3
|
||||
y1 = th
|
||||
offset = th // 5
|
||||
cv2.rectangle(img1, (x1 - offset * 2, y1 + offset),
|
||||
(x1 + tw + offset * 2, y1 - th - offset), (0, 115, 255), -1)
|
||||
cv2.putText(img1, name1,
|
||||
(x1, y1),
|
||||
cv2.FONT_HERSHEY_DUPLEX, fontsize,
|
||||
(255, 255, 255), text_thickness)
|
||||
|
||||
(tw, th), _ = cv2.getTextSize(text=name2, fontFace=cv2.FONT_HERSHEY_DUPLEX,
|
||||
fontScale=fontsize, thickness=text_thickness)
|
||||
x1 = img2.shape[1] // 3
|
||||
y1 = th
|
||||
offset = th // 5
|
||||
cv2.rectangle(img2, (x1 - offset * 2, y1 + offset),
|
||||
(x1 + tw + offset * 2, y1 - th - offset), (94, 23, 235), -1)
|
||||
|
||||
cv2.putText(img2, name2,
|
||||
(x1, y1),
|
||||
cv2.FONT_HERSHEY_DUPLEX, fontsize,
|
||||
(255, 255, 255), text_thickness)
|
||||
|
||||
combined_img = cv2.hconcat([img1, img2])
|
||||
if combined_img.shape[1] > 3840:
|
||||
combined_img = cv2.resize(combined_img, (3840, 2160))
|
||||
|
||||
return combined_img
|
||||
|
||||
|
||||
|
||||
def fit_plane_vision(box_list, normal_vector):
|
||||
plane_x = []
|
||||
plane_y = []
|
||||
plane_z = []
|
||||
print(box_list)
|
||||
plane_x.append(box_list[0][0][0])
|
||||
plane_x.append(box_list[0][1][0])
|
||||
plane_x.append(box_list[0][2][0])
|
||||
plane_x.append(box_list[0][3][0])
|
||||
plane_y.append(box_list[0][0][1])
|
||||
plane_y.append(box_list[0][1][1])
|
||||
plane_y.append(box_list[0][2][1])
|
||||
plane_y.append(box_list[0][3][1])
|
||||
plane_z.append(box_list[0][0][2])
|
||||
plane_z.append(box_list[0][1][2])
|
||||
plane_z.append(box_list[0][2][2])
|
||||
plane_z.append(box_list[0][3][2])
|
||||
# 定义平面方程的参数
|
||||
a = normal_vector[0]
|
||||
b = normal_vector[1]
|
||||
c = normal_vector[2]
|
||||
d = normal_vector[3]
|
||||
|
||||
# 定义平面的范围
|
||||
x_range = (int(min(plane_x)), int(max(plane_x)))
|
||||
y_range = (int(min(plane_y)), int(max(plane_y)))
|
||||
z_range = (int(min(plane_z)), int(max(plane_z)))
|
||||
|
||||
# 生成平面网格
|
||||
x = np.linspace(x_range[0], x_range[1], 10)
|
||||
y = np.linspace(y_range[0], y_range[1], 20)
|
||||
X, Y = np.meshgrid(x, y)
|
||||
Z = -(a * X + b * Y + d) / c # 根据平面方程计算 Z 坐标
|
||||
|
||||
# 确保 Z 坐标在指定范围内
|
||||
Z = np.clip(Z, z_range[0], z_range[1])
|
||||
|
||||
# 创建 TriangleMesh 对象
|
||||
import open3d as o3d
|
||||
plane_mesh = o3d.geometry.TriangleMesh()
|
||||
plane_mesh.vertices = o3d.utility.Vector3dVector(np.vstack((X.ravel(), Y.ravel(), Z.ravel())).T)
|
||||
plane_mesh.triangles = o3d.utility.Vector3iVector(
|
||||
np.array([[i, i + 1, i + 100] for i in range(99)] + [[i + 1, i + 101, i + 100] for i in range(99)]))
|
||||
plane_mesh.paint_uniform_color([1, 0.5, 0.5]) # 设置平面颜色
|
||||
|
||||
return plane_mesh
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user