增加cvat反向上传

This commit is contained in:
琉璃月光
2025-12-16 15:00:24 +08:00
parent 8b263167f8
commit 032479f558
16 changed files with 783 additions and 1766 deletions

View File

@ -0,0 +1,122 @@
<?xml version="1.0" encoding="utf-8"?>
<annotations>
<version>1.1</version>
<meta>
<task>
<id>258</id>
<name>12.161</name>
<size>27</size>
<mode>annotation</mode>
<overlap>0</overlap>
<bugtracker></bugtracker>
<created>2025-12-16 02:07:59.378104+00:00</created>
<updated>2025-12-16 02:14:18.382442+00:00</updated>
<subset>default</subset>
<start_frame>0</start_frame>
<stop_frame>26</stop_frame>
<frame_filter></frame_filter>
<segments>
<segment>
<id>176</id>
<start>0</start>
<stop>26</stop>
<url>http://www.xj-robot.com:9000/api/jobs/176</url>
</segment>
</segments>
<owner>
<username>huangxin</username>
<email>2193534909@qq.com</email>
</owner>
<assignee></assignee>
<labels>
<label>
<name>hole</name>
<color>#335b66</color>
<type>any</type>
<attributes>
</attributes>
</label>
<label>
<name>crack</name>
<color>#974b72</color>
<type>any</type>
<attributes>
</attributes>
</label>
</labels>
</task>
<dumped>2025-12-16 02:14:39.603909+00:00</dumped>
</meta>
<image id="0" name="1.jpg" width="1279" height="1706">
<box label="crack" source="manual" occluded="0" xtl="360.20" ytl="567.60" xbr="1210.70" ybr="985.80" z_order="0">
</box>
</image>
<image id="1" name="2.jpg" width="1279" height="1706">
<box label="hole" source="manual" occluded="0" xtl="350.15" ytl="229.28" xbr="481.67" ybr="420.92" z_order="0">
</box>
<box label="hole" source="manual" occluded="0" xtl="419.68" ytl="724.60" xbr="942.00" ybr="1046.50" z_order="0">
</box>
</image>
<image id="2" name="3.jpg" width="1706" height="1279">
<box label="hole" source="manual" occluded="0" xtl="960.40" ytl="391.70" xbr="1326.90" ybr="515.40" z_order="0">
</box>
</image>
<image id="3" name="4.jpg" width="1706" height="1279">
<box label="hole" source="manual" occluded="0" xtl="427.10" ytl="314.70" xbr="1401.10" ybr="1074.20" z_order="0">
</box>
</image>
<image id="4" name="微信图片_20251216095823_205.jpg" width="1279" height="1706">
</image>
<image id="5" name="微信图片_20251216095823_206.jpg" width="1706" height="1279">
</image>
<image id="6" name="微信图片_20251216095823_207.jpg" width="1279" height="1706">
</image>
<image id="7" name="微信图片_20251216095823_208.jpg" width="1706" height="1279">
</image>
<image id="8" name="微信图片_20251216095823_209.jpg" width="1706" height="1279">
</image>
<image id="9" name="微信图片_20251216095823_210.jpg" width="1279" height="1706">
</image>
<image id="10" name="微信图片_20251216095823_211.jpg" width="1279" height="1706">
</image>
<image id="11" name="微信图片_20251216095823_212.jpg" width="1279" height="1706">
</image>
<image id="12" name="微信图片_20251216095823_213.jpg" width="1279" height="1706">
</image>
<image id="13" name="微信图片_20251216095823_214.jpg" width="1279" height="1706">
<box label="hole" source="manual" occluded="0" xtl="352.04" ytl="150.90" xbr="910.70" ybr="1274.50" z_order="0">
</box>
</image>
<image id="14" name="微信图片_20251216095823_215.jpg" width="1279" height="1706">
</image>
<image id="15" name="微信图片_20251216095823_216.jpg" width="1706" height="1279">
</image>
<image id="16" name="微信图片_20251216095823_217.jpg" width="1279" height="1706">
</image>
<image id="17" name="微信图片_20251216095823_218.jpg" width="1279" height="1706">
</image>
<image id="18" name="微信图片_20251216095823_219.jpg" width="1706" height="1279">
<box label="crack" source="manual" occluded="0" xtl="1121.12" ytl="110.31" xbr="1207.51" ybr="489.69" z_order="0">
</box>
</image>
<image id="19" name="微信图片_20251216095823_220.jpg" width="1279" height="1706">
</image>
<image id="20" name="微信图片_20251216095823_221.jpg" width="1279" height="1706">
</image>
<image id="21" name="微信图片_20251216095823_222.jpg" width="1279" height="1706">
<box label="crack" source="manual" occluded="0" xtl="927.58" ytl="905.66" xbr="1091.67" ybr="999.61" z_order="0">
</box>
</image>
<image id="22" name="微信图片_20251216095823_223.jpg" width="1279" height="1706">
</image>
<image id="23" name="微信图片_20251216095823_224.jpg" width="1279" height="1706">
</image>
<image id="24" name="微信图片_20251216095823_225.jpg" width="1279" height="1706">
</image>
<image id="25" name="微信图片_20251216095823_226.jpg" width="1279" height="1706">
</image>
<image id="26" name="微信图片_20251216095823_227.jpg" width="1279" height="1706">
<box label="hole" source="manual" occluded="0" xtl="605.10" ytl="583.04" xbr="744.10" ybr="678.24" z_order="0">
</box>
</image>
</annotations>

View File

@ -0,0 +1,100 @@
import os
import xml.etree.ElementTree as ET
from pathlib import Path
def cvat_to_yolo_detect(xml_path, output_dir, class_name_to_id=None):
"""
将 CVAT 导出的 XML目标检测模式转换为 YOLO Detect 格式
Args:
xml_path (str): CVAT 导出的 XML 文件路径
output_dir (str): 输出 .txt 标注文件的目录
class_name_to_id (dict, optional): 类别名到 ID 的映射。
如果为 None则自动从 XML 的 <labels> 中按顺序分配0,1,2...
"""
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
tree = ET.parse(xml_path)
root = tree.getroot()
# 自动提取类别顺序(如果未提供映射)
if class_name_to_id is None:
class_name_to_id = {}
labels_elem = root.find(".//labels")
if labels_elem is not None:
for idx, label in enumerate(labels_elem.findall("label")):
name = label.find("name").text
class_name_to_id[name] = idx
else:
print("⚠️ 未找到 <labels>,请手动提供 class_name_to_id")
print(f"类别映射: {class_name_to_id}")
# 遍历所有 <image>
for image in root.findall("image"):
img_name = image.get("name")
width = int(image.get("width"))
height = int(image.get("height"))
# 构建 .txt 文件名(去掉扩展名)
stem = Path(img_name).stem
txt_path = output_dir / f"{stem}.txt"
boxes = []
for box in image.findall("box"):
label = box.get("label")
if label not in class_name_to_id:
print(f"⚠️ 未知类别 '{label}',跳过(图片: {img_name}")
continue
class_id = class_name_to_id[label]
xtl = float(box.get("xtl"))
ytl = float(box.get("ytl"))
xbr = float(box.get("xbr"))
ybr = float(box.get("ybr"))
# 转为 YOLO 格式(归一化)
x_center = (xtl + xbr) / 2 / width
y_center = (ytl + ybr) / 2 / height
w = (xbr - xtl) / width
h = (ybr - ytl) / height
# 限制在 [0,1](防止因标注误差越界)
x_center = max(0.0, min(1.0, x_center))
y_center = max(0.0, min(1.0, y_center))
w = max(0.0, min(1.0, w))
h = max(0.0, min(1.0, h))
boxes.append(f"{class_id} {x_center:.6f} {y_center:.6f} {w:.6f} {h:.6f}")
# 写入 .txt 文件(即使无框也创建空文件)
with open(txt_path, "w") as f:
f.write("\n".join(boxes))
print(f"{img_name}{len(boxes)} 个目标")
print(f"\n🎉 转换完成YOLO 标注已保存至: {output_dir}")
if __name__ == "__main__":
# ====== 配置区 ======
XML_PATH = "annotations.xml" # 替换为你的 CVAT XML 路径
OUTPUT_LABELS_DIR = "labels" # 输出的 YOLO .txt 目录
# 方式1自动从 XML 提取类别(推荐)
CLASS_MAP = None
# 方式2手动指定确保与训练时一致
# CLASS_MAP = {
# "hole": 0,
# "crack": 1
# }
# ====== 执行转换 ======
cvat_to_yolo_detect(
xml_path=XML_PATH,
output_dir=OUTPUT_LABELS_DIR,
class_name_to_id=CLASS_MAP
)

View File

@ -98,8 +98,8 @@ if __name__ == "__main__":
# 修改为你自己的路径
#TRAIN_DIR = "/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/cls-new/19cc/train"
#VAL_DIR = "/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/cls-new/19cc/val"
TRAIN_DIR = "/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/1/11c/train"
VAL_DIR = "/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/1/11c/val"
TRAIN_DIR = "/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/point2/train"
VAL_DIR = "/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/point2/val1"
split_train_to_val(
train_dir=TRAIN_DIR,
val_dir=VAL_DIR,

109
tool/divid_val1.py Normal file
View File

@ -0,0 +1,109 @@
import os
import shutil
import random
from pathlib import Path
def split_train_to_val(train_dir, val_dir, ratio=0.1, seed=42):
"""
从 train_dir 随机抽取 ratio 比例的 **带标签图像** 到 val_dir。
自动判断是分类结构(有子文件夹)还是平铺结构(无子文件夹)。
Args:
train_dir (str): 训练集路径
val_dir (str): 验证集路径(会自动创建)
ratio (float): 抽取比例,如 0.1 表示 10%
seed (int): 随机种子,保证可复现
"""
train_path = Path(train_dir)
val_path = Path(val_dir)
if not train_path.exists():
raise FileNotFoundError(f"训练目录不存在: {train_path}")
# 设置随机种子
random.seed(seed)
# 获取所有一级子项
items = [p for p in train_path.iterdir()]
# 判断是否为分类结构:所有子项都是目录且非空
is_classification = all(p.is_dir() for p in items) and len(items) > 0
# 定义图像扩展名
IMG_EXTENSIONS = {'.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.gif'}
if is_classification:
print("📁 检测到分类结构(含类别子文件夹)")
for class_dir in items:
class_name = class_dir.name
src_class_dir = train_path / class_name
dst_class_dir = val_path / class_name
dst_class_dir.mkdir(parents=True, exist_ok=True)
# 只找图像文件
image_files = [
f for f in src_class_dir.iterdir()
if f.is_file() and f.suffix.lower() in IMG_EXTENSIONS
]
if not image_files:
print(f" ⚠️ 类别 '{class_name}' 中无图像文件,跳过")
continue
num_val = max(1, int(len(image_files) * ratio))
val_images = random.sample(image_files, num_val)
for img in val_images:
# 移动图像
shutil.move(str(img), str(dst_class_dir / img.name))
# 移动同名 .txt 标签
txt_file = img.with_suffix('.txt')
if txt_file.exists():
shutil.move(str(txt_file), str(dst_class_dir / txt_file.name))
print(f" ✅ 类别 '{class_name}': {len(val_images)} 张图像已移至 val")
else:
print("📄 检测到平铺结构YOLO格式图像 + 同名 .txt 标签)")
val_path.mkdir(parents=True, exist_ok=True)
# 只收集图像文件(作为采样单元)
image_files = [
f for f in train_path.iterdir()
if f.is_file() and f.suffix.lower() in IMG_EXTENSIONS
]
if not image_files:
print("⚠️ 训练目录中未找到任何图像文件(支持格式: jpg, png 等)")
return
# 随机抽取图像
num_val = max(1, int(len(image_files) * ratio))
val_images = random.sample(image_files, num_val)
# 移动选中的图像及其标签
for img in val_images:
# 移动图像
shutil.move(str(img), str(val_path / img.name))
# 移动同名 .txt
txt_file = img.with_suffix('.txt')
if txt_file.exists():
shutil.move(str(txt_file), str(val_path / txt_file.name))
print(f"✅ 平铺结构: 已移动 {len(val_images)} 张图像及其标签到 {val_path}")
print(f"\n🎉 分割完成!验证集已保存至: {val_path}")
# ======================
# 使用示例
# ======================
if __name__ == "__main__":
TRAIN_DIR = "/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/point2/train"
VAL_DIR = "/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/point2/val1"
split_train_to_val(
train_dir=TRAIN_DIR,
val_dir=VAL_DIR,
ratio=0.1,
seed=42
)

117
tool/save_bigangle.py Normal file
View File

@ -0,0 +1,117 @@
import cv2
import os
import shutil
import numpy as np
from ultralytics import YOLO
IMG_EXTENSIONS = {'.jpg', '.jpeg', '.png', '.bmp', '.tif', '.tiff', '.webp'}
def process_obb_images(
model_path,
image_dir,
large_angle_output_dir="./large_angle_images", # 新增:大角度图像保存目录
conf_thresh=0.15,
imgsz=640,
angle_threshold_deg=10.0 # 夹角阈值:超过即视为“大角度”
):
"""
批量处理 OBB 图像,若任意两个目标的主方向夹角 > angle_threshold_deg
则将原图移动到 large_angle_output_dir。
"""
os.makedirs(large_angle_output_dir, exist_ok=True)
results_dict = {}
print("加载 YOLO 模型...")
model = YOLO(model_path)
print("✅ 模型加载完成")
# 获取图像文件
image_files = [f for f in os.listdir(image_dir) if os.path.splitext(f.lower())[1] in IMG_EXTENSIONS]
if not image_files:
print(f"❌ 未找到图像文件:{image_dir}")
return results_dict
print(f"发现 {len(image_files)} 张图像待处理")
for img_filename in image_files:
img_path = os.path.join(image_dir, img_filename)
print(f"\n正在处理:{img_filename}")
img = cv2.imread(img_path)
if img is None:
print(f"❌ 跳过:无法读取图像 {img_path}")
continue
# 推理 OBB
results = model(img, save=False, imgsz=imgsz, conf=conf_thresh, mode='obb')
result = results[0]
# 提取旋转角
boxes = result.obb
angles_deg = []
has_large_angle = False # 标记是否有大角度的目标
if boxes is None or len(boxes) == 0:
print("❌ 该图像中未检测到任何目标")
else:
for i, box in enumerate(boxes):
cls = int(box.cls.cpu().numpy()[0])
conf = box.conf.cpu().numpy()[0]
cx, cy, w, h, r_rad = box.xywhr.cpu().numpy()[0]
direction = r_rad if w >= h else r_rad + np.pi / 2
direction = direction % np.pi
angle_deg = np.degrees(direction)
angles_deg.append(angle_deg)
print(f" Box {i + 1}: Class={cls}, Conf={conf:.3f}, 主方向={angle_deg:.2f}°")
# 计算两两夹角
pairwise_angles_deg = []
if len(angles_deg) >= 2:
for i in range(len(angles_deg)):
for j in range(i + 1, len(angles_deg)):
diff_rad = abs(np.radians(angles_deg[i]) - np.radians(angles_deg[j]))
min_diff_rad = min(diff_rad, np.pi - diff_rad)
angle_diff_deg = np.degrees(min_diff_rad)
pairwise_angles_deg.append(angle_diff_deg)
print(f" Box {i + 1} 与 Box {j + 1} 夹角: {angle_diff_deg:.2f}°")
if angle_diff_deg > angle_threshold_deg:
has_large_angle = True
elif len(angles_deg) == 1:
print(" 仅检测到一个目标,无法计算夹角")
# 保存结果
results_dict[img_filename] = {
"angles_deg": angles_deg,
"pairwise_angles_deg": pairwise_angles_deg,
"has_large_angle": has_large_angle
}
# 如果存在大角度,移动原图到新文件夹
if has_large_angle:
print(f"发现夹角 > {angle_threshold_deg}°,移动原图到大角度文件夹")
shutil.move(img_path, os.path.join(large_angle_output_dir, img_filename))
print(f"\n所有图像处理完成!大角度图像已移动至: {large_angle_output_dir}")
return results_dict
# ------------------- 测试调用 -------------------
if __name__ == "__main__":
MODEL_PATH = r'/home/hx/yolo/ultralytics_yolo11-main/runs/train/exp_obb_new3/weights/best.pt'
IMAGE_SOURCE_DIR = r"/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/1/ready"
LARGE_ANGLE_DIR = "/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/1/large_angle_images" # 新增输出目录
results = process_obb_images(
model_path=MODEL_PATH,
image_dir=IMAGE_SOURCE_DIR,
large_angle_output_dir=LARGE_ANGLE_DIR,
conf_thresh=0.15,
imgsz=640,
angle_threshold_deg=10.0
)
# 可选:打印大角度图像
large_angle_imgs = [name for name, info in results.items() if info["has_large_angle"]]
print(f"\n{len(large_angle_imgs)} 张图像包含 >10° 的夹角:")
for name in large_angle_imgs:
print(f" - {name}")

View File

@ -0,0 +1,19 @@
from ultralytics import YOLO
if __name__ == '__main__':
#model = YOLO(r'/home/hx/yolo/ultralytics_yolo11-main/ultralytics/cfg/models/11/yolo11-obb.yaml')
model = YOLO(r'/home/hx/yolo/ultralytics_yolo11-main/ultralytics/cfg/models/11/yolo11.yaml')
results = model.train(
data='xiantiaodata.yaml',
epochs=2000,
imgsz=640,
batch=4,
workers=10,
device='0',
project='runs/train',
name='exp_detect',
exist_ok=False,
optimizer='AdamW',
lr0=0.0005,
patience=0,
)

View File

@ -2,7 +2,7 @@
# YOLO11 object detection model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect
# Parameters
nc: 1 # number of classes
nc: 2 # number of classes
scales: # model compound scaling constants, i.e. 'model=yolo11n.yaml' will call yolo11.yaml with scale 'n'
# [depth, width, max_channels]
n: [0.50, 0.25, 1024] # summary: 319 layers, 2624080 parameters, 2624064 gradients, 6.6 GFLOPs

View File

@ -0,0 +1,7 @@
path: /home/hx/开发/ML_xiantiao/class_xiantiao_pc/test_image # 数据集所在路径
train: train # 数据集路径下的train.txt
val: val # 数据集路径下的val.txt
test: test # 数据集路径下的test.txt
nc: 2
names: ['hole','crack']

File diff suppressed because it is too large Load Diff

View File

@ -3,7 +3,7 @@ import os
# =================== 配置 ===================
xml_file = 'annotations.xml' # 你的 CVAT XML 文件路径
images_dir = '/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/1/20251208' # 图像文件夹(用于读取宽高)
images_dir = '/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/1/20251209' # 图像文件夹(用于读取宽高)
output_dir = 'labels_keypoints' # 输出 YOLO 标签目录
os.makedirs(output_dir, exist_ok=True)

View File

@ -1,4 +1,5 @@
import os
import shutil
from pathlib import Path
import cv2
from ultralytics import YOLO
@ -6,15 +7,15 @@ from ultralytics import YOLO
# ---------------------------
# 配置路径(请按需修改)
# ---------------------------
MODEL_PATH = "gaiban.pt" # 你的二分类模型
INPUT_FOLDER = "/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/1/12.2" # 输入图像文件夹
OUTPUT_ROOT = "/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/1/12.2.2" # 输出根目录(会生成 合格/不合格 子文件夹)
MODEL_PATH = "xialiao.pt" # 你的二分类模型
INPUT_FOLDER = "/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/1/ready" # 输入图像文件夹
OUTPUT_ROOT = "/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/1/ready/result" # 输出根目录
# 类别映射(必须与训练时的 data.yaml 顺序一致)
CLASS_NAMES = {0: "不合格", 1: "合格"}
# ---------------------------
# 批量推理函数
# 批量推理函数(移动原图)
# ---------------------------
def batch_classify(model_path, input_folder, output_root):
# 加载模型
@ -36,27 +37,32 @@ def batch_classify(model_path, input_folder, output_root):
if img_path.suffix.lower() not in IMG_EXTS:
continue
# 读取图像
# 读取图像(用于推理)
img = cv2.imread(str(img_path))
if img is None:
print(f"❌ 无法读取: {img_path}")
print(f"❌ 无法读取图像(可能已损坏或被占用): {img_path}")
continue
# 推理(整图)
# 推理(整图分类
results = model(img)
probs = results[0].probs.data.cpu().numpy()
pred_class_id = int(probs.argmax())
pred_label = CLASS_NAMES[pred_class_id]
confidence = float(probs[pred_class_id])
# 保存原图到对应文件夹
# ⚠️ 关键修改:移动原图(不是复制)
dst = output_root / pred_label / img_path.name
cv2.imwrite(str(dst), img)
try:
shutil.move(str(img_path), str(dst))
except Exception as e:
print(f"❌ 移动失败 {img_path}{dst}: {e}")
continue
print(f"{img_path.name}{pred_label} ({confidence:.2f})")
processed += 1
print(f"\n🎉 共处理 {processed} 张图像,结果已保存至: {output_root}")
print(f"\n🎉 共处理并移动 {processed} 张图像,结果已保存至: {output_root}")
# ---------------------------
# 运行入口

View File

@ -0,0 +1,152 @@
# yolo_detect_to_cvat.py
import os
import xml.etree.ElementTree as ET
from pathlib import Path
import cv2
IMG_EXTENSIONS = {'.jpg', '.jpeg', '.png', '.bmp', '.tif', '.tiff', '.webp'}
def yolo_detect_to_cvat_xml(label_dir, image_dir, class_id_to_name, output_xml):
"""
将 YOLO Detect 格式的标签class cx cy w h转换为 CVAT XML 格式。
"""
label_dir = Path(label_dir)
image_dir = Path(image_dir)
# ======== 构建基本 XML 结构 ========
root = ET.Element("annotations")
ET.SubElement(root, "version").text = "1.1"
meta = ET.SubElement(root, "meta")
task = ET.SubElement(meta, "task")
txt_files = sorted([f for f in label_dir.glob("*.txt")])
total = len(txt_files)
ET.SubElement(task, "id").text = "1"
ET.SubElement(task, "name").text = "yolo_detect_import"
ET.SubElement(task, "size").text = str(total)
ET.SubElement(task, "mode").text = "annotation"
ET.SubElement(task, "overlap").text = "0"
ET.SubElement(task, "bugtracker").text = ""
ET.SubElement(task, "created").text = ""
ET.SubElement(task, "updated").text = ""
ET.SubElement(task, "subset").text = "default"
ET.SubElement(task, "start_frame").text = "0"
ET.SubElement(task, "stop_frame").text = str(total - 1)
ET.SubElement(task, "frame_filter").text = ""
# labels
labels_elem = ET.SubElement(task, "labels")
for name in class_id_to_name.values():
lab = ET.SubElement(labels_elem, "label")
ET.SubElement(lab, "name").text = name
ET.SubElement(lab, "color").text = "#ffffff"
ET.SubElement(lab, "type").text = "any"
ET.SubElement(lab, "attributes")
ET.SubElement(meta, "dumped").text = ""
# ======== 处理每张图片 ========
for idx, txt_file in enumerate(txt_files):
stem = txt_file.stem
# 自动匹配图像文件(支持多种扩展名)
img_path = None
for ext in IMG_EXTENSIONS:
p = image_dir / f"{stem}{ext}"
if p.exists():
img_path = p
break
p = image_dir / f"{stem.upper()}{ext}"
if p.exists():
img_path = p
break
if img_path is None:
print(f"⚠ 找不到对应图像: {stem}")
continue
# 获取图像尺寸(用于反归一化)
img = cv2.imread(str(img_path))
if img is None:
print(f"⚠ 无法读取图像: {img_path},跳过")
H, W = 1080, 1920 # fallback
else:
H, W = img.shape[:2]
# 创建 <image> 节点
image_elem = ET.SubElement(root, "image", {
"id": str(idx),
"name": img_path.name,
"width": str(W),
"height": str(H)
})
# 读取 YOLO Detect 标签
with open(txt_file, "r") as f:
for line in f:
line = line.strip()
if not line:
continue
parts = line.split()
if len(parts) != 5:
print(f"⚠ 标签格式错误应为5列: {line} in {txt_file}")
continue
cls_id = int(parts[0])
cx, cy, bw, bh = map(float, parts[1:])
# 反归一化
cx_abs = cx * W
cy_abs = cy * H
w_abs = bw * W
h_abs = bh * H
# 计算左上和右下
xtl = cx_abs - w_abs / 2
ytl = cy_abs - h_abs / 2
xbr = cx_abs + w_abs / 2
ybr = cy_abs + h_abs / 2
# 边界裁剪(防止越界)
xtl = max(0, min(W, xtl))
ytl = max(0, min(H, ytl))
xbr = max(0, min(W, xbr))
ybr = max(0, min(H, ybr))
# 添加 box无 rotation 字段!)
ET.SubElement(image_elem, "box", {
"label": class_id_to_name.get(cls_id, f"class_{cls_id}"),
"source": "manual",
"occluded": "0",
"xtl": f"{xtl:.2f}",
"ytl": f"{ytl:.2f}",
"xbr": f"{xbr:.2f}",
"ybr": f"{ybr:.2f}",
"z_order": "0"
})
print(f"✔ 处理 {img_path.name}")
# 保存 XML
tree = ET.ElementTree(root)
tree.write(output_xml, encoding="utf-8", xml_declaration=True)
print(f"\n✅ 已生成 CVAT XML 文件: {output_xml}")
# ------------------- 主函数 -------------------
if __name__ == "__main__":
CLASS_MAP = {
0: "hole",
1: "crack"
}
yolo_detect_to_cvat_xml(
label_dir="/home/hx/yolo/推理图片反向上传CVAT/detect/inference_results/labels",
image_dir="/home/hx/开发/ML_xiantiao/class_xiantiao_pc/test_image/train",
class_id_to_name=CLASS_MAP,
output_xml="detect_annotations.xml"
)

View File

@ -0,0 +1,135 @@
import os
import cv2
from pathlib import Path
from ultralytics import YOLO
IMG_EXTENSIONS = {'.jpg', '.jpeg', '.png', '.bmp', '.tif', '.tiff', '.webp'}
class ObjectDetector:
"""封装 YOLO 目标检测模型"""
def __init__(self, model_path):
if not os.path.exists(model_path):
raise FileNotFoundError(f"模型文件不存在: {model_path}")
self.model = YOLO(model_path)
print(f"[INFO] 成功加载 YOLO 目标检测模型: {model_path}")
def detect(self, img_np, conf_threshold=0.0):
"""返回所有置信度 >= conf_threshold 的检测结果"""
results = self.model.predict(img_np, conf=conf_threshold, verbose=False)
detections = []
for result in results:
boxes = result.boxes.cpu().numpy()
for box in boxes:
detection_info = {
'bbox_xyxy': box.xyxy[0], # [x1, y1, x2, y2]
'confidence': float(box.conf.item()),
'class_id': int(box.cls.item())
}
detections.append(detection_info)
return detections
def save_yolo_detect_labels_from_folder(
model_path,
image_dir,
output_dir,
conf_threshold=0.5,
label_map={0: "hole", 1: "crack"} # 可选,仅用于日志
):
"""
对 image_dir 中所有图像进行 YOLO Detect 推理,
每个类别保留最高置信度框,保存为 YOLO 格式的 .txt 标签文件。
YOLO 格式: <class_id> <cx_norm> <cy_norm> <w_norm> <h_norm>
"""
image_dir = Path(image_dir)
output_dir = Path(output_dir)
labels_dir = output_dir / "labels"
labels_dir.mkdir(parents=True, exist_ok=True)
# 获取图像列表
image_files = [
f for f in sorted(os.listdir(image_dir))
if os.path.splitext(f.lower())[1] in IMG_EXTENSIONS
]
if not image_files:
print(f"❌ 未在 {image_dir} 中找到支持的图像文件")
return
print(f"共找到 {len(image_files)} 张图像,开始推理...")
detector = ObjectDetector(model_path)
for img_filename in image_files:
img_path = image_dir / img_filename
stem = Path(img_filename).stem
txt_path = labels_dir / f"{stem}.txt"
# 读图
img = cv2.imread(str(img_path))
if img is None:
print(f"⚠️ 跳过无效图像: {img_path}")
txt_path.write_text("") # 写空文件
continue
H, W = img.shape[:2]
# 推理(获取所有 ≥ conf_threshold 的框)
all_detections = detector.detect(img, conf_threshold=conf_threshold)
# 按类别保留最高置信度框
best_per_class = {}
for det in all_detections:
cls_id = det['class_id']
if cls_id not in best_per_class or det['confidence'] > best_per_class[cls_id]['confidence']:
best_per_class[cls_id] = det
top_detections = list(best_per_class.values())
# 转为 YOLO 格式并写入
lines = []
for det in top_detections:
x1, y1, x2, y2 = det['bbox_xyxy']
cx = (x1 + x2) / 2.0
cy = (y1 + y2) / 2.0
bw = x2 - x1
bh = y2 - y1
# 归一化
cx_norm = cx / W
cy_norm = cy / H
w_norm = bw / W
h_norm = bh / H
# 限制在 [0, 1]
cx_norm = max(0.0, min(1.0, cx_norm))
cy_norm = max(0.0, min(1.0, cy_norm))
w_norm = max(0.0, min(1.0, w_norm))
h_norm = max(0.0, min(1.0, h_norm))
line = f"{det['class_id']} {cx_norm:.6f} {cy_norm:.6f} {w_norm:.6f} {h_norm:.6f}"
lines.append(line)
# 写入标签文件
with open(txt_path, "w") as f:
if lines:
f.write("\n".join(lines) + "\n")
print(f"{img_filename} -> {len(lines)} 个检测框已保存")
print(f"\n🎉 全部完成!标签文件保存在: {labels_dir}")
# ------------------- 主函数调用 -------------------
if __name__ == "__main__":
MODEL_PATH = "/home/hx/yolo/ultralytics_yolo11-main/runs/train/exp_detect/weights/best.pt"
IMAGE_DIR = "/home/hx/开发/ML_xiantiao/class_xiantiao_pc/test_image/train"
OUTPUT_DIR = "./inference_results"
save_yolo_detect_labels_from_folder(
model_path=MODEL_PATH,
image_dir=IMAGE_DIR,
output_dir=OUTPUT_DIR,
conf_threshold=0.5
)

View File

@ -5,7 +5,7 @@ import os
# ====================== 用户配置 ======================
MODEL_PATH = 'point.pt'
IMAGE_SOURCE_DIR = '/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/1/20251208'
IMAGE_SOURCE_DIR = '/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/1/20251212'
OUTPUT_DIR = './keypoints_txt'
IMG_EXTENSIONS = {'.png', '.jpg', '.jpeg', '.bmp', '.tiff', '.tif', '.webp'}

View File

@ -11,7 +11,7 @@ labels_dir = "keypoints_txt"
output_xml = "annotations_cvat.xml"
# 图片目录(用于 width/height
images_dir = "/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/1/20251208"
images_dir = "/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/1/20251212"
# 类别映射
class_mapping_reverse = {