最新推送

This commit is contained in:
琉璃月光
2026-03-10 13:58:21 +08:00
parent 032479f558
commit eb16eeada3
97 changed files with 16865 additions and 670 deletions

View File

@ -2,10 +2,10 @@ import os
# ================== 配置参数 ==================
# 图片所在的文件夹路径
image_folder = '/home/hx/yolo/yemian/resize_p' # 修改为你的图片文件夹路径
image_folder = '/home/hx/yolo/yemian/61_lianghua' # 修改为你的图片文件夹路径
# 输出的txt文件路径
output_txt = '/home/hx/yolo/yemian/image_list.txt' # 修改为你想保存的路径
output_txt = '/home/hx/yolo/yemian/61_lianghua/image_list.txt' # 修改为你想保存的路径
# 支持的图片格式
image_extensions = {'.jpg', '.jpeg', '.png', '.bmp', '.webp', '.tiff', '.gif'}

1
yemian/new.txt Normal file
View File

@ -0,0 +1 @@
670,623,465,178

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.6 MiB

Binary file not shown.

View File

@ -1,170 +1,188 @@
import os
import cv2
import numpy as np
from pathlib import Path
from ultralytics import YOLO
# --------------------
# 配置参数
# 配置
# --------------------
IMAGE_PATH = "/home/hx/yolo/yemian/test_image/1.png"
MODEL_PATH = "best.pt"
OUTPUT_PATH = "./output/single_result.jpg"
TARGET_SIZE = 640
# 新增:用于计算像素到实际尺寸换算比例的函数
def calculate_pixel_to_real_ratio(real_length_mm, pixel_length):
"""
计算像素到实际尺寸的换算比例。
参数:
- real_length_mm: 实际物理长度(例如:毫米)
- pixel_length: 对应的实际物理长度在图像中的像素数
返回:
- PIXEL_TO_REAL_RATIO: 每个像素代表的实际物理长度单位mm/像素)
"""
if pixel_length == 0:
raise ValueError("像素长度不能为0")
return real_length_mm / pixel_length
# 在主函数infer_single_image之前设置一个默认值
PIXEL_TO_REAL_RATIO = 1.0 # 默认值,之后会被真实计算的比例替换
# 假设我们知道某物体的真实长度是real_length_mm毫米在图像中占pixel_length像素
real_length_mm = 100 # 物体的实际长度(单位:毫米)
pixel_length = 200 # 物体在图像中的像素长度
try:
PIXEL_TO_REAL_RATIO = calculate_pixel_to_real_ratio(real_length_mm, pixel_length)
print(f"换算比例已设定: {PIXEL_TO_REAL_RATIO:.4f} mm/像素")
except ValueError as e:
print(e)
# 全局 ROI 定义:(x, y, w, h)
IMAGE_DIR = "/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/1/612/train/class0"
MODEL_PATH = "/home/hx/yolo/ultralytics_yolo11-main/runs/train/61seg/exp2/weights/best.pt"
OUTPUT_DIR = "/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/1/612/train/class2"
'''
ROIS = [
(859, 810, 696, 328),
(445, 540, 931, 319),
]
'''
ROIS = [
(0, 0, 640, 640),
]
# --------------------
# 辅助函数(保持不变)
# 从 mask 中提取左右边界点
# --------------------
def select_edge_corners(corners, w, left_ratio=0.2, right_ratio=0.2, y_var_thresh=5):
if corners is None:
return [], []
corners = np.int32(corners).reshape(-1, 2)
left_thresh = int(w * left_ratio)
right_thresh = w - int(w * right_ratio)
def extract_left_right_edge_points(mask_bin):
h, w = mask_bin.shape
left_pts = []
right_pts = []
left_candidates = corners[corners[:, 0] <= left_thresh]
right_candidates = corners[corners[:, 0] >= right_thresh]
def filter_by_y_variation(pts):
if len(pts) < 2:
return pts
pts_sorted = pts[np.argsort(pts[:, 1])]
diffs = np.abs(np.diff(pts_sorted[:, 1]))
keep_idx = np.where(diffs > y_var_thresh)[0]
selected = [pts_sorted[i] for i in keep_idx] + [pts_sorted[i + 1] for i in keep_idx]
return np.array(selected) if len(selected) > 0 else pts_sorted
left_final = filter_by_y_variation(left_candidates)
right_final = filter_by_y_variation(right_candidates)
return left_final, right_final
def fit_line_with_outlier_removal(pts, dist_thresh=10):
if pts is None or len(pts) < 2:
return None, pts
pts = np.array(pts)
x, y = pts[:, 0], pts[:, 1]
m, b = np.polyfit(y, x, 1) # x = m*y + b
x_fit = m * y + b
dists = np.abs(x - x_fit)
mask = dists < dist_thresh
if mask.sum() < 2:
return (m, b), pts
m, b = np.polyfit(y[mask], x[mask], 1)
inliers = np.stack([x[mask], y[mask]], axis=1)
return (m, b), inliers
for y in range(h):
xs = np.where(mask_bin[y] > 0)[0]
if len(xs) >= 2:
left_pts.append([xs.min(), y])
right_pts.append([xs.max(), y])
return np.array(left_pts), np.array(right_pts)
# --------------------
# 单图推理主函数
# 按 seg 的 y 百分比筛选
# --------------------
def infer_single_image(image_path, model_path, output_path):
orig_img = cv2.imread(str(image_path))
if orig_img is None:
print(f"❌ 无法读取图像: {image_path}")
def filter_by_seg_y_ratio(pts, y_start=0.35, y_end=0.85):
if len(pts) < 2:
return pts
y_min = pts[:, 1].min()
y_max = pts[:, 1].max()
h = y_max - y_min
if h < 10:
return pts
y0 = y_min + int(h * y_start)
y1 = y_min + int(h * y_end)
return pts[(pts[:, 1] >= y0) & (pts[:, 1] <= y1)]
# --------------------
# 拟合直线
# --------------------
def fit_line(pts):
if len(pts) < 2:
return None
x = pts[:, 0]
y = pts[:, 1]
m, b = np.polyfit(y, x, 1)
return m, b
overlay_img = orig_img.copy()
x_diff_pixel = None # 像素单位的差值
# --------------------
# y 参考值seg 底部)
# --------------------
def get_y_ref(mask_bin):
h, w = mask_bin.shape
ys = []
for x in range(int(w * 0.2), int(w * 0.8)):
y = np.where(mask_bin[:, x] > 0)[0]
if len(y):
ys.append(y.max())
return int(np.mean(ys)) if ys else h // 2
model = YOLO(model_path)
Path(output_path).parent.mkdir(parents=True, exist_ok=True)
# --------------------
# 单图处理
# --------------------
def process_one(img_path, model):
img = cv2.imread(str(img_path))
vis = img.copy()
for idx, (x, y, w, h) in enumerate(ROIS):
roi_img = orig_img[y:y+h, x:x+w]
resized_img = cv2.resize(roi_img, (TARGET_SIZE, TARGET_SIZE))
result_data = None # (XL, Y, XR, Y, diff)
results = model(source=resized_img, imgsz=TARGET_SIZE, verbose=False)
result = results[0]
for rx, ry, rw, rh in ROIS:
roi = img[ry:ry+rh, rx:rx+rw]
resized = cv2.resize(roi, (TARGET_SIZE, TARGET_SIZE))
if result.masks is None or len(result.masks.data) == 0:
print("❌ 未检测到 mask")
result = model(resized, imgsz=TARGET_SIZE, verbose=False)[0]
if result.masks is None:
continue
mask = result.masks.data[0].cpu().numpy()
mask_bin = (mask > 0.5).astype(np.uint8)
mask_bin = cv2.resize(mask_bin, (w, h), interpolation=cv2.INTER_NEAREST)
mask_bin = cv2.resize(mask_bin, (rw, rh), cv2.INTER_NEAREST)
color_mask = np.zeros_like(roi_img, dtype=np.uint8)
color_mask[mask_bin == 1] = (0, 255, 0)
overlay_img[y:y+h, x:x+w] = cv2.addWeighted(roi_img, 0.7, color_mask, 0.3, 0)
# overlay mask
green = np.zeros_like(roi)
green[mask_bin == 1] = (0, 255, 0)
vis[ry:ry+rh, rx:rx+rw] = cv2.addWeighted(roi, 0.7, green, 0.3, 0)
mask_gray = (mask_bin * 255).astype(np.uint8)
corners = cv2.goodFeaturesToTrack(mask_gray, maxCorners=200, qualityLevel=0.01, minDistance=5)
# 边界点
left_pts, right_pts = extract_left_right_edge_points(mask_bin)
left_pts = filter_by_seg_y_ratio(left_pts)
right_pts = filter_by_seg_y_ratio(right_pts)
left_pts, right_pts = select_edge_corners(corners, w)
left_line, _ = fit_line_with_outlier_removal(left_pts)
right_line, _ = fit_line_with_outlier_removal(right_pts)
left_line = fit_line(left_pts)
right_line = fit_line(right_pts)
if left_line is None or right_line is None:
continue
if left_line and right_line:
y_ref = h * 0.6
m1, b1 = left_line
m2, b2 = right_line
x1 = m1 * y_ref + b1
x2 = m2 * y_ref + b2
x_diff_pixel = abs(x2 - x1)
m1, b1 = left_line
m2, b2 = right_line
# 绘制参考线和文字(仍用像素值显示)
cv2.line(overlay_img[y:y+h, x:x+w], (0, int(y_ref)), (w, int(y_ref)), (0, 255, 255), 2)
cv2.putText(overlay_img[y:y+h, x:x+w],
f"x_diff={x_diff_pixel:.1f}px",
(10, 30), cv2.FONT_HERSHEY_SIMPLEX,
1, (0, 255, 255), 2)
y_ref = get_y_ref(mask_bin)
# 绘制左右拟合线
for (m, b), color in [(left_line, (0, 0, 255)), (right_line, (255, 0, 0))]:
y1, y2 = 0, h
x1_line, x2_line = int(m * y1 + b), int(m * y2 + b)
cv2.line(overlay_img[y:y+h, x:x+w], (x1_line, y1), (x2_line, y2), color, 3)
# ROI 坐标
x_left = int(m1 * y_ref + b1)
x_right = int(m2 * y_ref + b2)
cv2.imwrite(output_path, overlay_img)
print(f"✅ 结果已保存至: {output_path}")
# 🔴 全局坐标
X_L = rx + x_left
X_R = rx + x_right
Y = ry + y_ref
if x_diff_pixel is not None:
x_diff_real = x_diff_pixel * PIXEL_TO_REAL_RATIO
print(f"📊 x差值像素 = {x_diff_pixel:.2f} px")
print(f"📏 x差值实际 = {x_diff_real:.2f} mm") # 可改为 cm 或其他单位
else:
print("⚠️ 未能计算 x 差值")
diff = X_R - X_L
return x_diff_pixel
result_data = (X_L, Y, X_R, Y, diff)
# ---------- 可视化 ----------
roi_vis = vis[ry:ry+rh, rx:rx+rw]
# =====================
# 运行入口
# =====================
for (m, b), c in [((m1, b1), (0,0,255)), ((m2, b2), (255,0,0))]:
cv2.line(
roi_vis,
(int(m * 0 + b), 0),
(int(m * rh + b), rh),
c, 3
)
cv2.line(roi_vis, (0, y_ref), (rw, y_ref), (0,255,255), 2)
cv2.circle(roi_vis, (x_left, y_ref), 6, (0,0,255), -1)
cv2.circle(roi_vis, (x_right, y_ref), 6, (255,0,0), -1)
cv2.putText(
roi_vis,
f"diff={diff}px",
(10, 40),
cv2.FONT_HERSHEY_SIMPLEX,
1,
(0,255,255),
2
)
return vis, result_data
# --------------------
# 批处理
# --------------------
def run():
model = YOLO(MODEL_PATH)
Path(OUTPUT_DIR).mkdir(exist_ok=True)
for img in sorted(os.listdir(IMAGE_DIR)):
if not img.lower().endswith((".jpg", ".png", ".jpeg")):
continue
vis, data = process_one(Path(IMAGE_DIR) / img, model)
out = Path(OUTPUT_DIR) / f"vis_{img}"
cv2.imwrite(str(out), vis)
if data:
XL, YL, XR, YR, diff = data
print(f"[{img}]")
print(f" 左交点: ({XL}, {YL})")
print(f" 右交点: ({XR}, {YR})")
print(f" diff : {diff} px")
else:
print(f"[{img}] 无有效结果")
# --------------------
if __name__ == "__main__":
infer_single_image(IMAGE_PATH, MODEL_PATH, OUTPUT_PATH)
run()

Binary file not shown.

Before

Width:  |  Height:  |  Size: 452 KiB

View File

@ -1,243 +1,143 @@
import cv2
import numpy as np
from pathlib import Path
from rknnlite.api import RKNNLite
# --------------------
# 配置参数
# --------------------
IMAGE_PATH = "./11.jpg"
# ======================
# 配置
# ======================
IMAGE_PATH = "3.png"
MODEL_PATH = "segr.rknn"
OUTPUT_PATH = "./single_result.jpg"
TARGET_SIZE = 640
OUT_OVERLAY = "result_overlay.jpg"
DEBUG_INPUT = "debug_input_roi.png"
DEBUG_PROTO = "debug_proto_mask.png"
DEBUG_INST_PROTO = "debug_inst_proto.png"
# 像素到实际尺寸换算比例
def calculate_pixel_to_real_ratio(real_length_mm, pixel_length):
if pixel_length == 0:
raise ValueError("像素长度不能为0")
return real_length_mm / pixel_length
IMG_SIZE = 640
OBJ_THRESH = 0.25
MASK_THRESH = 0.5
STRIDES = [8, 16, 32]
PIXEL_TO_REAL_RATIO = 1.0
real_length_mm = 100 # 实际长度(毫米)
pixel_length = 200 # 对应像素长度
try:
PIXEL_TO_REAL_RATIO = calculate_pixel_to_real_ratio(real_length_mm, pixel_length)
print(f"换算比例已设定: {PIXEL_TO_REAL_RATIO:.4f} mm/像素")
except ValueError as e:
print(e)
# 全局 ROI 定义:(x, y, w, h)
ROIS = [
(859, 810, 696, 328),
(670, 623, 465, 178),
]
# --------------------
# RKNN 单例管理
# --------------------
_rknn_instance = None
# ======================
# 工具函数
# ======================
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def init_rknn(model_path):
global _rknn_instance
if _rknn_instance is not None:
return _rknn_instance
_rknn_instance = RKNNLite(verbose=False)
ret = _rknn_instance.load_rknn(model_path)
if ret != 0:
print(f"[ERROR] 加载 RKNN 模型失败: {ret}")
_rknn_instance = None
return None
def resize_to_640(img):
"""seg 专用:禁止 letterbox直接 resize"""
return cv2.resize(img, (IMG_SIZE, IMG_SIZE), interpolation=cv2.INTER_LINEAR)
ret = _rknn_instance.init_runtime(core_mask=RKNNLite.NPU_CORE_1)
if ret != 0:
print(f"[ERROR] 初始化 NPU 失败: {ret}")
_rknn_instance.release()
_rknn_instance = None
return None
print("[✅] RKNN 分割模型加载成功")
return _rknn_instance
def dfl_decode(dfl):
bins = np.arange(16)
dfl = sigmoid(dfl)
dfl /= np.sum(dfl, axis=1, keepdims=True)
return np.sum(dfl * bins, axis=1)
def release_rknn():
global _rknn_instance
if _rknn_instance:
_rknn_instance.release()
_rknn_instance = None
print("[INFO] RKNN 模型已释放")
# --------------------
# 工具函数letterbox resize
# --------------------
def letterbox_resize(image, size, bg_color=114):
target_w, target_h = size
h, w = image.shape[:2]
scale = min(target_w / w, target_h / h)
new_w, new_h = int(w * scale), int(h * scale)
resized = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
canvas = np.full((target_h, target_w, 3), bg_color, dtype=np.uint8)
dx = (target_w - new_w) // 2
dy = (target_h - new_h) // 2
canvas[dy:dy+new_h, dx:dx+new_w] = resized
return canvas, scale, dx, dy
def largest_cc(mask):
num, labels = cv2.connectedComponents(mask.astype(np.uint8))
if num <= 1:
return mask
areas = [(labels == i).sum() for i in range(1, num)]
return (labels == (np.argmax(areas) + 1)).astype(np.uint8)
# --------------------
# 辅助函数
# --------------------
def select_edge_corners(corners, w, left_ratio=0.2, right_ratio=0.2, y_var_thresh=5):
if corners is None:
return [], []
corners = np.int32(corners).reshape(-1, 2)
left_thresh = int(w * left_ratio)
right_thresh = w - int(w * right_ratio)
left_candidates = corners[corners[:, 0] <= left_thresh]
right_candidates = corners[corners[:, 0] >= right_thresh]
# ======================
# 单 ROI 推理(完整语义 mask
# ======================
def infer_single_roi(rknn, roi):
h0, w0 = roi.shape[:2]
def filter_by_y_variation(pts):
if len(pts) < 2:
return pts
pts_sorted = pts[np.argsort(pts[:, 1])]
diffs = np.abs(np.diff(pts_sorted[:, 1]))
keep_idx = np.where(diffs > y_var_thresh)[0]
selected = [pts_sorted[i] for i in keep_idx] + [pts_sorted[i + 1] for i in keep_idx]
return np.array(selected) if len(selected) > 0 else pts_sorted
# ---------- 1⃣ 正确的 seg 输入 ----------
inp_img = resize_to_640(roi)
cv2.imwrite(DEBUG_INPUT, inp_img)
left_final = filter_by_y_variation(left_candidates)
right_final = filter_by_y_variation(right_candidates)
return left_final, right_final
inp = inp_img[..., ::-1][None, ...]
def fit_line_with_outlier_removal(pts, dist_thresh=10):
if pts is None or len(pts) < 2:
return None, pts
pts = np.array(pts)
x, y = pts[:, 0], pts[:, 1]
m, b = np.polyfit(y, x, 1) # x = m*y + b
x_fit = m * y + b
dists = np.abs(x - x_fit)
mask = dists < dist_thresh
if mask.sum() < 2:
return (m, b), pts
m, b = np.polyfit(y[mask], x[mask], 1)
inliers = np.stack([x[mask], y[mask]], axis=1)
return (m, b), inliers
outputs = rknn.inference([inp])
# --------------------
# RKNN 推理生成 mask增加去除小区域
# --------------------
def get_mask_from_rknn(rknn_model, roi_img, target_size=640, min_area=100):
h_orig, w_orig = roi_img.shape[:2]
preprocessed, scale, dx, dy = letterbox_resize(roi_img, (target_size, target_size))
infer_input = preprocessed[np.newaxis, :, :, ::-1].astype(np.float32)
# ---------- 2⃣ proto ----------
proto = outputs[12][0] # (32,160,160)
try:
outputs = rknn_model.inference(inputs=[infer_input])
except Exception as e:
print(f"[ERROR] RKNN 推理异常: {e}")
return None
best_score = -1
best_coef = None
try:
proto = outputs[12][0] # (32,160,160)
mask_proto = np.mean(proto, axis=0)
mask_proto = 1 / (1 + np.exp(-mask_proto))
out_i = 0
for stride in STRIDES:
reg = outputs[out_i][0]
cls = outputs[out_i + 1][0, 0]
obj = outputs[out_i + 2][0, 0]
coef = outputs[out_i + 3][0]
out_i += 4
mask_lb = cv2.resize(mask_proto, (target_size, target_size), interpolation=cv2.INTER_LINEAR)
score_map = sigmoid(cls) * sigmoid(obj)
y, x = np.unravel_index(np.argmax(score_map), score_map.shape)
score = score_map[y, x]
scale = min(target_size / w_orig, target_size / h_orig)
new_w, new_h = int(w_orig*scale), int(h_orig*scale)
pad_x = (target_size - new_w)//2
pad_y = (target_size - new_h)//2
mask_cropped = mask_lb[pad_y:pad_y+new_h, pad_x:pad_x+new_w]
mask_resized = cv2.resize(mask_cropped, (w_orig, h_orig), interpolation=cv2.INTER_LINEAR)
mask_bin = (mask_resized > 0.5).astype(np.uint8)
# 去除小区域
num_labels, labels, stats, _ = cv2.connectedComponentsWithStats(mask_bin, connectivity=8)
mask_clean = np.zeros_like(mask_bin)
for i in range(1, num_labels):
area = stats[i, cv2.CC_STAT_AREA]
if area >= min_area:
mask_clean[labels == i] = 1
return mask_clean
except Exception as e:
print(f"[ERROR] 生成 mask 失败: {e}")
return None
# --------------------
# 主函数
# --------------------
def infer_single_image(image_path, model_path, output_path):
orig_img = cv2.imread(str(image_path))
if orig_img is None:
print(f"无法读取图像: {image_path}")
return None
overlay_img = orig_img.copy()
x_diff_pixel = None
rknn = init_rknn(model_path)
if rknn is None:
print("RKNN 初始化失败")
return None
Path(output_path).parent.mkdir(parents=True, exist_ok=True)
for idx, (x, y, w, h) in enumerate(ROIS):
roi_img = orig_img[y:y+h, x:x+w]
mask_bin = get_mask_from_rknn(rknn, roi_img, target_size=TARGET_SIZE, min_area=30000)
if mask_bin is None or mask_bin.sum() == 0:
print("未检测到有效 mask")
if score < OBJ_THRESH or score <= best_score:
continue
color_mask = np.zeros_like(roi_img, dtype=np.uint8)
color_mask[mask_bin == 1] = (0, 255, 0)
overlay_img[y:y+h, x:x+w] = cv2.addWeighted(roi_img, 0.7, color_mask, 0.3, 0)
best_score = score
best_coef = coef[:, y, x]
mask_gray = (mask_bin * 255).astype(np.uint8)
corners = cv2.goodFeaturesToTrack(mask_gray, maxCorners=200, qualityLevel=0.01, minDistance=5)
if best_coef is None:
return None
left_pts, right_pts = select_edge_corners(corners, w)
left_line, _ = fit_line_with_outlier_removal(left_pts)
right_line, _ = fit_line_with_outlier_removal(right_pts)
# ---------- 3⃣ proto_mask完整 ----------
proto_mask = sigmoid(np.tensordot(best_coef, proto, axes=1)) # (160,160)
if left_line and right_line:
y_ref = h * 0.6
m1, b1 = left_line
m2, b2 = right_line
x1 = m1 * y_ref + b1
x2 = m2 * y_ref + b2
x_diff_pixel = abs(x2 - x1)
pm = (proto_mask - proto_mask.min()) / (proto_mask.max() - proto_mask.min() + 1e-6)
cv2.imwrite(DEBUG_PROTO, (pm * 255).astype(np.uint8))
cv2.line(overlay_img[y:y+h, x:x+w], (0, int(y_ref)), (w, int(y_ref)), (0, 255, 255), 2)
cv2.putText(overlay_img[y:y+h, x:x+w],
f"x_diff={x_diff_pixel:.1f}px",
(10, 30), cv2.FONT_HERSHEY_SIMPLEX,
1, (0, 255, 255), 2)
# ---------- 4⃣ 二值化 + 最大连通域(不裁!) ----------
inst_proto = (proto_mask > MASK_THRESH).astype(np.uint8)
inst_proto = largest_cc(inst_proto)
for (m, b), color in [(left_line, (0, 0, 255)), (right_line, (255, 0, 0))]:
y1, y2 = 0, h
x1_line, x2_line = int(m * y1 + b), int(m * y2 + b)
cv2.line(overlay_img[y:y+h, x:x+w], (x1_line, y1), (x2_line, y2), color, 3)
cv2.imwrite(DEBUG_INST_PROTO, inst_proto * 255)
cv2.imwrite(output_path, overlay_img)
print(f"结果已保存至: {output_path}")
# ---------- 5⃣ proto → ROI ----------
inst_roi = cv2.resize(
inst_proto, (w0, h0), interpolation=cv2.INTER_NEAREST
)
if x_diff_pixel is not None:
x_diff_real = x_diff_pixel * PIXEL_TO_REAL_RATIO
print(f"x差值像素 = {x_diff_pixel:.2f} px")
print(f"x差值实际 = {x_diff_real:.2f} mm")
else:
print("未能计算 x 差值")
return inst_roi * 255
# ======================
# 主程序
# ======================
def main():
img = cv2.imread(IMAGE_PATH)
overlay = img.copy()
rknn = RKNNLite()
rknn.load_rknn(MODEL_PATH)
rknn.init_runtime()
for (x, y, w, h) in ROIS:
roi = img[y:y + h, x:x + w]
mask = infer_single_roi(rknn, roi)
if mask is None:
continue
color = np.zeros_like(roi)
color[mask == 255] = (0, 255, 0)
overlay[y:y + h, x:x + w] = cv2.addWeighted(
roi, 0.7, color, 0.3, 0
)
rknn.release()
cv2.imwrite(OUT_OVERLAY, overlay)
print("✅ 完成:", OUT_OVERLAY)
release_rknn()
return x_diff_pixel
# =====================
# 运行入口
# =====================
if __name__ == "__main__":
infer_single_image(IMAGE_PATH, MODEL_PATH, OUTPUT_PATH)
main()