200 lines
6.3 KiB
Python
200 lines
6.3 KiB
Python
#!/usr/bin/env python
|
||
# -*- coding: UTF-8 -*-
|
||
'''
|
||
@Project :AutoControlSystem-master
|
||
@File :utils.py
|
||
@IDE :PyCharm
|
||
@Author :hjw
|
||
@Date :2024/8/29 15:07
|
||
'''
|
||
|
||
import numpy as np
|
||
import cv2
|
||
import psutil
|
||
from psutil._common import bytes2human
|
||
def get_disk_space(path='C:'):
|
||
|
||
usage = psutil.disk_usage(path)
|
||
space_free = bytes2human(usage.free)
|
||
# space_total = bytes2human(usage.total)
|
||
# space_used = bytes2human(usage.used)
|
||
# space_free = bytes2human(usage.free)
|
||
# space_used_percent = bytes2human(usage.percent)
|
||
space_free = float(space_free[:-1])
|
||
return space_free
|
||
def find_position(Depth_Z, RegionalArea, RegionalArea_Threshold, first_depth=True):
|
||
if first_depth == True:
|
||
sorted_id = sorted(range(len(Depth_Z)), key=lambda k: Depth_Z[k], reverse=False)
|
||
Depth_Z1 = [Depth_Z[i] for i in sorted_id]
|
||
RegionalArea1 = [RegionalArea[i] for i in sorted_id]
|
||
for i in range(len(Depth_Z1)):
|
||
if RegionalArea1[i] > RegionalArea_Threshold:
|
||
return sorted_id[i]
|
||
|
||
else:
|
||
sorted_id = sorted(range(len(RegionalArea)), key=lambda k: RegionalArea[k], reverse=True)
|
||
Depth_Z1 = [Depth_Z[i] for i in sorted_id]
|
||
RegionalArea1 = [RegionalArea[i] for i in sorted_id]
|
||
for i in range(len(Depth_Z1)):
|
||
if RegionalArea1[i] > RegionalArea_Threshold:
|
||
return sorted_id[i]
|
||
|
||
class_names = ['box', 'other']
|
||
|
||
# Create a list of colors for each class where each color is a tuple of 3 integer values
|
||
rng = np.random.default_rng(3)
|
||
colors = rng.uniform(0, 255, size=(len(class_names), 3))
|
||
|
||
|
||
def nms(boxes, scores, iou_threshold):
|
||
# Sort by score
|
||
sorted_indices = np.argsort(scores)[::-1]
|
||
|
||
keep_boxes = []
|
||
while sorted_indices.size > 0:
|
||
# Pick the last box
|
||
box_id = sorted_indices[0]
|
||
keep_boxes.append(box_id)
|
||
|
||
# Compute IoU of the picked box with the rest
|
||
ious = compute_iou(boxes[box_id, :], boxes[sorted_indices[1:], :])
|
||
|
||
# Remove boxes with IoU over the threshold
|
||
keep_indices = np.where(ious < iou_threshold)[0]
|
||
|
||
# print(keep_indices.shape, sorted_indices.shape)
|
||
sorted_indices = sorted_indices[keep_indices + 1]
|
||
|
||
return keep_boxes
|
||
|
||
|
||
def compute_iou(box, boxes):
|
||
# Compute xmin, ymin, xmax, ymax for both boxes
|
||
xmin = np.maximum(box[0], boxes[:, 0])
|
||
ymin = np.maximum(box[1], boxes[:, 1])
|
||
xmax = np.minimum(box[2], boxes[:, 2])
|
||
ymax = np.minimum(box[3], boxes[:, 3])
|
||
|
||
# Compute intersection area
|
||
intersection_area = np.maximum(0, xmax - xmin) * np.maximum(0, ymax - ymin)
|
||
|
||
# Compute union area
|
||
box_area = (box[2] - box[0]) * (box[3] - box[1])
|
||
boxes_area = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
|
||
union_area = box_area + boxes_area - intersection_area
|
||
|
||
# Compute IoU
|
||
iou = intersection_area / union_area
|
||
|
||
return iou
|
||
|
||
|
||
def xywh2xyxy(x):
|
||
# Convert bounding box (x, y, w, h) to bounding box (x1, y1, x2, y2)
|
||
y = np.copy(x)
|
||
y[..., 0] = x[..., 0] - x[..., 2] / 2
|
||
y[..., 1] = x[..., 1] - x[..., 3] / 2
|
||
y[..., 2] = x[..., 0] + x[..., 2] / 2
|
||
y[..., 3] = x[..., 1] + x[..., 3] / 2
|
||
return y
|
||
|
||
|
||
def sigmoid(x):
|
||
return 1 / (1 + np.exp(-x))
|
||
|
||
|
||
def draw_detections(image, boxes, scores, class_ids, mask_alpha=0.3, mask_maps=None):
|
||
img_height, img_width = image.shape[:2]
|
||
size = min([img_height, img_width]) * 0.0006
|
||
text_thickness = int(min([img_height, img_width]) * 0.001)
|
||
|
||
mask_img = draw_masks(image, boxes, class_ids, mask_alpha, mask_maps)
|
||
|
||
# Draw bounding boxes and labels of detections
|
||
for box, score, class_id in zip(boxes, scores, class_ids):
|
||
color = colors[class_id]
|
||
|
||
x1, y1, x2, y2 = box.astype(int)
|
||
|
||
# Draw rectangle
|
||
cv2.rectangle(mask_img, (x1, y1), (x2, y2), color, 2)
|
||
|
||
label = class_names[class_id]
|
||
caption = f'{label} {int(score * 100)}%'
|
||
(tw, th), _ = cv2.getTextSize(text=caption, fontFace=cv2.FONT_HERSHEY_SIMPLEX,
|
||
fontScale=size, thickness=text_thickness)
|
||
th = int(th * 1.2)
|
||
|
||
cv2.rectangle(mask_img, (x1, y1),
|
||
(x1 + tw, y1 - th), color, -1)
|
||
|
||
cv2.putText(mask_img, caption, (x1, y1),
|
||
cv2.FONT_HERSHEY_SIMPLEX, size, (255, 255, 255), text_thickness, cv2.LINE_AA)
|
||
|
||
return mask_img
|
||
|
||
|
||
def draw_masks(image, boxes, class_ids, mask_alpha=0.3, mask_maps=None):
|
||
mask_img = image.copy()
|
||
|
||
# Draw bounding boxes and labels of detections
|
||
for i, (box, class_id) in enumerate(zip(boxes, class_ids)):
|
||
color = colors[class_id]
|
||
|
||
x1, y1, x2, y2 = box.astype(int)
|
||
|
||
# Draw fill mask image
|
||
if mask_maps is None:
|
||
cv2.rectangle(mask_img, (x1, y1), (x2, y2), color, -1)
|
||
else:
|
||
crop_mask = mask_maps[i][y1:y2, x1:x2, np.newaxis]
|
||
crop_mask_img = mask_img[y1:y2, x1:x2]
|
||
crop_mask_img = crop_mask_img * (1 - crop_mask) + crop_mask * color
|
||
mask_img[y1:y2, x1:x2] = crop_mask_img
|
||
|
||
return cv2.addWeighted(mask_img, mask_alpha, image, 1 - mask_alpha, 0)
|
||
|
||
|
||
def draw_comparison(img1, img2, name1, name2, fontsize=2.6, text_thickness=3):
|
||
(tw, th), _ = cv2.getTextSize(text=name1, fontFace=cv2.FONT_HERSHEY_DUPLEX,
|
||
fontScale=fontsize, thickness=text_thickness)
|
||
x1 = img1.shape[1] // 3
|
||
y1 = th
|
||
offset = th // 5
|
||
cv2.rectangle(img1, (x1 - offset * 2, y1 + offset),
|
||
(x1 + tw + offset * 2, y1 - th - offset), (0, 115, 255), -1)
|
||
cv2.putText(img1, name1,
|
||
(x1, y1),
|
||
cv2.FONT_HERSHEY_DUPLEX, fontsize,
|
||
(255, 255, 255), text_thickness)
|
||
|
||
(tw, th), _ = cv2.getTextSize(text=name2, fontFace=cv2.FONT_HERSHEY_DUPLEX,
|
||
fontScale=fontsize, thickness=text_thickness)
|
||
x1 = img2.shape[1] // 3
|
||
y1 = th
|
||
offset = th // 5
|
||
cv2.rectangle(img2, (x1 - offset * 2, y1 + offset),
|
||
(x1 + tw + offset * 2, y1 - th - offset), (94, 23, 235), -1)
|
||
|
||
cv2.putText(img2, name2,
|
||
(x1, y1),
|
||
cv2.FONT_HERSHEY_DUPLEX, fontsize,
|
||
(255, 255, 255), text_thickness)
|
||
|
||
combined_img = cv2.hconcat([img1, img2])
|
||
if combined_img.shape[1] > 3840:
|
||
combined_img = cv2.resize(combined_img, (3840, 2160))
|
||
|
||
return combined_img
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|