167 lines
7.4 KiB
Python
167 lines
7.4 KiB
Python
#!/usr/bin/env python
|
|
# -*- coding: utf-8 -*-
|
|
'''
|
|
# @Time : 2024/9/25 11:47
|
|
# @Author : hjw
|
|
# @File : detect_person.py
|
|
'''
|
|
|
|
import os.path
|
|
import random
|
|
import cv2
|
|
import numpy as np
|
|
import torch
|
|
from Vision.tool.CameraHIK import camera_HIK
|
|
from Vision.tool.CameraRVC import camera_rvc
|
|
from Vision.tool.CameraPe import camera_pe
|
|
from Vision.tool.utils import get_disk_space
|
|
from ultralytics.nn.autobackend import AutoBackend
|
|
from ultralytics.utils import ops
|
|
import time
|
|
import os
|
|
|
|
|
|
class DetectionBag:
|
|
"""
|
|
:param
|
|
api: None
|
|
:return: person [bool], img [ndarray]
|
|
"""
|
|
def __init__(self) -> None:
|
|
model_path = ''.join([os.getcwd(), '/Vision/model/pt/bag_collection.pt'])
|
|
self.camera_rvc = camera_pe()
|
|
self.imgsz = 640
|
|
self.cuda = 'cpu'
|
|
self.conf = 0.40
|
|
self.iou = 0.45
|
|
self.model = AutoBackend(model_path, device=torch.device(self.cuda))
|
|
self.model.eval()
|
|
self.names = self.model.names
|
|
self.half = False
|
|
self.color = {"font": (255, 255, 255)}
|
|
self.color.update(
|
|
{self.names[i]: (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
|
|
for i in range(len(self.names))})
|
|
|
|
def detect_bag(self, save_img_point=0):
|
|
"""
|
|
检测料袋,保存料带图像及点云
|
|
:param save_img_point: 0不保存 ; 1保存原图 ;2保存原点云和原图; 3保存缩小点云和原图
|
|
:return ret: bool 相机是否正常工作
|
|
:return img: ndarry 返回img
|
|
"""
|
|
ret, img_src, pm = self.camera_rvc.get_img_and_point_map() # 拍照,获取图像及
|
|
# ret = True
|
|
# img_src= cv2.imread('D:\\原始数据\\抚州第二次出差数据\\图漾\\tu3D\\2024_11_08_10_43_01.png')
|
|
# pm=np.loadtxt('D:\\原始数据\\抚州第二次出差数据\\图漾\\tu3D\\2024_11_08_10_43_01.xyz').reshape((480, 640, 3))
|
|
Bag = False
|
|
# self.camera_rvc.caminit_isok = True
|
|
if self.camera_rvc.caminit_isok == True:
|
|
if ret:
|
|
# img_src = cv2.imread(img_path)
|
|
img_save = img_src.copy()
|
|
img = self.precess_image(img_src, self.imgsz, self.half, self.cuda)
|
|
preds = self.model(img)
|
|
det = ops.non_max_suppression(preds, self.conf, self.iou, classes=None, agnostic=False, max_det=300,
|
|
nc=len(self.names))
|
|
|
|
for i, pred in enumerate(det):
|
|
lw = max(round(sum(img_src.shape) / 2 * 0.003), 2) # line width
|
|
tf = max(lw - 1, 1) # font thickness
|
|
sf = lw / 3 # font scale
|
|
pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], img_src.shape)
|
|
results = pred.cpu().detach().numpy()
|
|
# cv2.imshow('img2', img_src)
|
|
# cv2.waitKey(1)
|
|
for result in results:
|
|
if self.names[result[5]]=="bag":
|
|
Bag = True
|
|
conf = round(result[4], 2)
|
|
#print(conf)
|
|
self.draw_box(img_src, result[:4], conf, self.names[result[5]], lw, sf, tf)
|
|
if Bag==True:
|
|
if get_disk_space(path=os.getcwd()) < 15: # 内存小于15G,停止保存数据
|
|
save_img_point = 0
|
|
print('系统内存不足,无法保存数据')
|
|
else:
|
|
save_path = ''.join([os.getcwd(), '/Vision/model/data/',
|
|
time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(time.time()))])
|
|
save_img_name = ''.join([save_path, '.png'])
|
|
save_point_name = ''.join([save_path, '.xyz'])
|
|
if save_img_point == 1 or save_img_point==2 or save_img_point==3:
|
|
cv2.imwrite(save_img_name, img_save)
|
|
if save_img_point == 3:
|
|
row_list = list(range(1, img.shape[0], 2))
|
|
column_list = list(range(1, img.shape[1], 2))
|
|
pm_save = pm.copy()
|
|
pm_save1 = np.delete(pm_save, row_list, axis=0)
|
|
point_new = np.delete(pm_save1, column_list, axis=1)
|
|
point_new = point_new.reshape(-1, 3)
|
|
np.savetxt(save_point_name, point_new)
|
|
if save_img_point==2:
|
|
point_new = pm.reshape(-1, 3)
|
|
np.savetxt(save_point_name, point_new)
|
|
return Bag, img_src
|
|
return Bag, None
|
|
|
|
def draw_box(self, img_src, box, conf, cls_name, lw, sf, tf):
|
|
color = self.color[cls_name]
|
|
conf = str(conf)
|
|
label = f'{cls_name} {conf}'
|
|
p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))
|
|
# 绘制矩形框
|
|
cv2.rectangle(img_src, p1, p2, color, thickness=lw, lineType=cv2.LINE_AA)
|
|
# text width, height
|
|
w, h = cv2.getTextSize(label, 0, fontScale=sf, thickness=tf)[0]
|
|
# label fits outside box
|
|
outside = box[1] - h - 3 >= 0
|
|
p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3
|
|
# 绘制矩形框填充
|
|
cv2.rectangle(img_src, p1, p2, color, -1, cv2.LINE_AA)
|
|
# 绘制标签
|
|
cv2.putText(img_src, label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2),
|
|
0, sf, self.color["font"], thickness=2, lineType=cv2.LINE_AA)
|
|
|
|
@staticmethod
|
|
def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), scaleup=True, stride=32):
|
|
# Resize and pad image while meeting stride-multiple constraints
|
|
shape = im.shape[:2] # current shape [height, width]
|
|
if isinstance(new_shape, int):
|
|
new_shape = (new_shape, new_shape)
|
|
|
|
# Scale ratio (new / old)
|
|
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
|
|
if not scaleup: # only scale down, do not scale up (for better val mAP)
|
|
r = min(r, 1.0)
|
|
|
|
# Compute padding
|
|
ratio = r, r # width, height ratios
|
|
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
|
|
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
|
|
# minimum rectangle
|
|
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
|
|
dw /= 2 # divide padding into 2 sides
|
|
dh /= 2
|
|
|
|
if shape[::-1] != new_unpad: # resize
|
|
im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
|
|
|
|
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
|
|
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
|
|
im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
|
|
return im, ratio, (dw, dh)
|
|
|
|
def precess_image(self, img_src, img_size, half, device):
|
|
# Padded resize
|
|
img = self.letterbox(img_src, img_size)[0]
|
|
# Convert
|
|
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
|
|
img = np.ascontiguousarray(img)
|
|
img = torch.from_numpy(img).to(device)
|
|
|
|
img = img.half() if half else img.float() # uint8 to fp16/32
|
|
img = img / 255 # 0 - 255 to 0.0 - 1.0
|
|
if len(img.shape) == 3:
|
|
img = img[None] # expand for batch dim
|
|
return img
|