diff --git a/YOLO—detect/annotations.xml b/YOLO—detect/annotations.xml
new file mode 100644
index 0000000..f4fd1ff
--- /dev/null
+++ b/YOLO—detect/annotations.xml
@@ -0,0 +1,122 @@
+
+
+ 1.1
+
+
+ 258
+ 12.161
+ 27
+ annotation
+ 0
+
+ 2025-12-16 02:07:59.378104+00:00
+ 2025-12-16 02:14:18.382442+00:00
+ default
+ 0
+ 26
+
+
+
+ 176
+ 0
+ 26
+ http://www.xj-robot.com:9000/api/jobs/176
+
+
+
+ huangxin
+ 2193534909@qq.com
+
+
+
+
+
+
+
+ 2025-12-16 02:14:39.603909+00:00
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/YOLO—detect/cvattodetect.py b/YOLO—detect/cvattodetect.py
new file mode 100644
index 0000000..c0b3cf7
--- /dev/null
+++ b/YOLO—detect/cvattodetect.py
@@ -0,0 +1,100 @@
+import os
+import xml.etree.ElementTree as ET
+from pathlib import Path
+
+
+def cvat_to_yolo_detect(xml_path, output_dir, class_name_to_id=None):
+ """
+ 将 CVAT 导出的 XML(目标检测模式)转换为 YOLO Detect 格式
+
+ Args:
+ xml_path (str): CVAT 导出的 XML 文件路径
+ output_dir (str): 输出 .txt 标注文件的目录
+ class_name_to_id (dict, optional): 类别名到 ID 的映射。
+ 如果为 None,则自动从 XML 的 中按顺序分配(0,1,2...)
+ """
+ output_dir = Path(output_dir)
+ output_dir.mkdir(parents=True, exist_ok=True)
+
+ tree = ET.parse(xml_path)
+ root = tree.getroot()
+
+ # 自动提取类别顺序(如果未提供映射)
+ if class_name_to_id is None:
+ class_name_to_id = {}
+ labels_elem = root.find(".//labels")
+ if labels_elem is not None:
+ for idx, label in enumerate(labels_elem.findall("label")):
+ name = label.find("name").text
+ class_name_to_id[name] = idx
+ else:
+ print("⚠️ 未找到 ,请手动提供 class_name_to_id")
+
+ print(f"类别映射: {class_name_to_id}")
+
+ # 遍历所有
+ for image in root.findall("image"):
+ img_name = image.get("name")
+ width = int(image.get("width"))
+ height = int(image.get("height"))
+
+ # 构建 .txt 文件名(去掉扩展名)
+ stem = Path(img_name).stem
+ txt_path = output_dir / f"{stem}.txt"
+
+ boxes = []
+ for box in image.findall("box"):
+ label = box.get("label")
+ if label not in class_name_to_id:
+ print(f"⚠️ 未知类别 '{label}',跳过(图片: {img_name})")
+ continue
+
+ class_id = class_name_to_id[label]
+ xtl = float(box.get("xtl"))
+ ytl = float(box.get("ytl"))
+ xbr = float(box.get("xbr"))
+ ybr = float(box.get("ybr"))
+
+ # 转为 YOLO 格式(归一化)
+ x_center = (xtl + xbr) / 2 / width
+ y_center = (ytl + ybr) / 2 / height
+ w = (xbr - xtl) / width
+ h = (ybr - ytl) / height
+
+ # 限制在 [0,1](防止因标注误差越界)
+ x_center = max(0.0, min(1.0, x_center))
+ y_center = max(0.0, min(1.0, y_center))
+ w = max(0.0, min(1.0, w))
+ h = max(0.0, min(1.0, h))
+
+ boxes.append(f"{class_id} {x_center:.6f} {y_center:.6f} {w:.6f} {h:.6f}")
+
+ # 写入 .txt 文件(即使无框也创建空文件)
+ with open(txt_path, "w") as f:
+ f.write("\n".join(boxes))
+
+ print(f"✅ {img_name} → {len(boxes)} 个目标")
+
+ print(f"\n🎉 转换完成!YOLO 标注已保存至: {output_dir}")
+
+
+if __name__ == "__main__":
+ # ====== 配置区 ======
+ XML_PATH = "annotations.xml" # 替换为你的 CVAT XML 路径
+ OUTPUT_LABELS_DIR = "labels" # 输出的 YOLO .txt 目录
+
+ # 方式1:自动从 XML 提取类别(推荐)
+ CLASS_MAP = None
+
+ # 方式2:手动指定(确保与训练时一致)
+ # CLASS_MAP = {
+ # "hole": 0,
+ # "crack": 1
+ # }
+
+ # ====== 执行转换 ======
+ cvat_to_yolo_detect(
+ xml_path=XML_PATH,
+ output_dir=OUTPUT_LABELS_DIR,
+ class_name_to_id=CLASS_MAP
+ )
\ No newline at end of file
diff --git a/tool/divid_val.py b/tool/divid_val.py
index 35002ce..82ea9a4 100644
--- a/tool/divid_val.py
+++ b/tool/divid_val.py
@@ -98,8 +98,8 @@ if __name__ == "__main__":
# 修改为你自己的路径
#TRAIN_DIR = "/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/cls-new/19cc/train"
#VAL_DIR = "/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/cls-new/19cc/val"
- TRAIN_DIR = "/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/1/11c/train"
- VAL_DIR = "/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/1/11c/val"
+ TRAIN_DIR = "/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/point2/train"
+ VAL_DIR = "/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/point2/val1"
split_train_to_val(
train_dir=TRAIN_DIR,
val_dir=VAL_DIR,
diff --git a/tool/divid_val1.py b/tool/divid_val1.py
new file mode 100644
index 0000000..9411ca6
--- /dev/null
+++ b/tool/divid_val1.py
@@ -0,0 +1,109 @@
+import os
+import shutil
+import random
+from pathlib import Path
+
+
+def split_train_to_val(train_dir, val_dir, ratio=0.1, seed=42):
+ """
+ 从 train_dir 随机抽取 ratio 比例的 **带标签图像** 到 val_dir。
+ 自动判断是分类结构(有子文件夹)还是平铺结构(无子文件夹)。
+
+ Args:
+ train_dir (str): 训练集路径
+ val_dir (str): 验证集路径(会自动创建)
+ ratio (float): 抽取比例,如 0.1 表示 10%
+ seed (int): 随机种子,保证可复现
+ """
+ train_path = Path(train_dir)
+ val_path = Path(val_dir)
+
+ if not train_path.exists():
+ raise FileNotFoundError(f"训练目录不存在: {train_path}")
+
+ # 设置随机种子
+ random.seed(seed)
+
+ # 获取所有一级子项
+ items = [p for p in train_path.iterdir()]
+
+ # 判断是否为分类结构:所有子项都是目录且非空
+ is_classification = all(p.is_dir() for p in items) and len(items) > 0
+
+ # 定义图像扩展名
+ IMG_EXTENSIONS = {'.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.gif'}
+
+ if is_classification:
+ print("📁 检测到分类结构(含类别子文件夹)")
+ for class_dir in items:
+ class_name = class_dir.name
+ src_class_dir = train_path / class_name
+ dst_class_dir = val_path / class_name
+ dst_class_dir.mkdir(parents=True, exist_ok=True)
+
+ # 只找图像文件
+ image_files = [
+ f for f in src_class_dir.iterdir()
+ if f.is_file() and f.suffix.lower() in IMG_EXTENSIONS
+ ]
+ if not image_files:
+ print(f" ⚠️ 类别 '{class_name}' 中无图像文件,跳过")
+ continue
+
+ num_val = max(1, int(len(image_files) * ratio))
+ val_images = random.sample(image_files, num_val)
+
+ for img in val_images:
+ # 移动图像
+ shutil.move(str(img), str(dst_class_dir / img.name))
+ # 移动同名 .txt 标签
+ txt_file = img.with_suffix('.txt')
+ if txt_file.exists():
+ shutil.move(str(txt_file), str(dst_class_dir / txt_file.name))
+
+ print(f" ✅ 类别 '{class_name}': {len(val_images)} 张图像已移至 val")
+
+ else:
+ print("📄 检测到平铺结构(YOLO格式:图像 + 同名 .txt 标签)")
+ val_path.mkdir(parents=True, exist_ok=True)
+
+ # 只收集图像文件(作为采样单元)
+ image_files = [
+ f for f in train_path.iterdir()
+ if f.is_file() and f.suffix.lower() in IMG_EXTENSIONS
+ ]
+
+ if not image_files:
+ print("⚠️ 训练目录中未找到任何图像文件(支持格式: jpg, png 等)")
+ return
+
+ # 随机抽取图像
+ num_val = max(1, int(len(image_files) * ratio))
+ val_images = random.sample(image_files, num_val)
+
+ # 移动选中的图像及其标签
+ for img in val_images:
+ # 移动图像
+ shutil.move(str(img), str(val_path / img.name))
+ # 移动同名 .txt
+ txt_file = img.with_suffix('.txt')
+ if txt_file.exists():
+ shutil.move(str(txt_file), str(val_path / txt_file.name))
+
+ print(f"✅ 平铺结构: 已移动 {len(val_images)} 张图像及其标签到 {val_path}")
+
+ print(f"\n🎉 分割完成!验证集已保存至: {val_path}")
+
+
+# ======================
+# 使用示例
+# ======================
+if __name__ == "__main__":
+ TRAIN_DIR = "/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/point2/train"
+ VAL_DIR = "/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/point2/val1"
+ split_train_to_val(
+ train_dir=TRAIN_DIR,
+ val_dir=VAL_DIR,
+ ratio=0.1,
+ seed=42
+ )
\ No newline at end of file
diff --git a/tool/save_bigangle.py b/tool/save_bigangle.py
new file mode 100644
index 0000000..c62c049
--- /dev/null
+++ b/tool/save_bigangle.py
@@ -0,0 +1,117 @@
+import cv2
+import os
+import shutil
+import numpy as np
+from ultralytics import YOLO
+
+IMG_EXTENSIONS = {'.jpg', '.jpeg', '.png', '.bmp', '.tif', '.tiff', '.webp'}
+
+
+def process_obb_images(
+ model_path,
+ image_dir,
+ large_angle_output_dir="./large_angle_images", # 新增:大角度图像保存目录
+ conf_thresh=0.15,
+ imgsz=640,
+ angle_threshold_deg=10.0 # 夹角阈值:超过即视为“大角度”
+):
+ """
+ 批量处理 OBB 图像,若任意两个目标的主方向夹角 > angle_threshold_deg,
+ 则将原图移动到 large_angle_output_dir。
+ """
+ os.makedirs(large_angle_output_dir, exist_ok=True)
+ results_dict = {}
+
+ print("加载 YOLO 模型...")
+ model = YOLO(model_path)
+ print("✅ 模型加载完成")
+
+ # 获取图像文件
+ image_files = [f for f in os.listdir(image_dir) if os.path.splitext(f.lower())[1] in IMG_EXTENSIONS]
+ if not image_files:
+ print(f"❌ 未找到图像文件:{image_dir}")
+ return results_dict
+
+ print(f"发现 {len(image_files)} 张图像待处理")
+
+ for img_filename in image_files:
+ img_path = os.path.join(image_dir, img_filename)
+ print(f"\n正在处理:{img_filename}")
+
+ img = cv2.imread(img_path)
+ if img is None:
+ print(f"❌ 跳过:无法读取图像 {img_path}")
+ continue
+
+ # 推理 OBB
+ results = model(img, save=False, imgsz=imgsz, conf=conf_thresh, mode='obb')
+ result = results[0]
+
+ # 提取旋转角
+ boxes = result.obb
+ angles_deg = []
+ has_large_angle = False # 标记是否有大角度的目标
+ if boxes is None or len(boxes) == 0:
+ print("❌ 该图像中未检测到任何目标")
+ else:
+ for i, box in enumerate(boxes):
+ cls = int(box.cls.cpu().numpy()[0])
+ conf = box.conf.cpu().numpy()[0]
+ cx, cy, w, h, r_rad = box.xywhr.cpu().numpy()[0]
+ direction = r_rad if w >= h else r_rad + np.pi / 2
+ direction = direction % np.pi
+ angle_deg = np.degrees(direction)
+ angles_deg.append(angle_deg)
+ print(f" Box {i + 1}: Class={cls}, Conf={conf:.3f}, 主方向={angle_deg:.2f}°")
+
+ # 计算两两夹角
+ pairwise_angles_deg = []
+ if len(angles_deg) >= 2:
+ for i in range(len(angles_deg)):
+ for j in range(i + 1, len(angles_deg)):
+ diff_rad = abs(np.radians(angles_deg[i]) - np.radians(angles_deg[j]))
+ min_diff_rad = min(diff_rad, np.pi - diff_rad)
+ angle_diff_deg = np.degrees(min_diff_rad)
+ pairwise_angles_deg.append(angle_diff_deg)
+ print(f" Box {i + 1} 与 Box {j + 1} 夹角: {angle_diff_deg:.2f}°")
+ if angle_diff_deg > angle_threshold_deg:
+ has_large_angle = True
+ elif len(angles_deg) == 1:
+ print(" 仅检测到一个目标,无法计算夹角")
+
+ # 保存结果
+ results_dict[img_filename] = {
+ "angles_deg": angles_deg,
+ "pairwise_angles_deg": pairwise_angles_deg,
+ "has_large_angle": has_large_angle
+ }
+
+ # 如果存在大角度,移动原图到新文件夹
+ if has_large_angle:
+ print(f"发现夹角 > {angle_threshold_deg}°,移动原图到大角度文件夹")
+ shutil.move(img_path, os.path.join(large_angle_output_dir, img_filename))
+
+ print(f"\n所有图像处理完成!大角度图像已移动至: {large_angle_output_dir}")
+ return results_dict
+
+
+# ------------------- 测试调用 -------------------
+if __name__ == "__main__":
+ MODEL_PATH = r'/home/hx/yolo/ultralytics_yolo11-main/runs/train/exp_obb_new3/weights/best.pt'
+ IMAGE_SOURCE_DIR = r"/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/1/ready"
+ LARGE_ANGLE_DIR = "/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/1/large_angle_images" # 新增输出目录
+
+ results = process_obb_images(
+ model_path=MODEL_PATH,
+ image_dir=IMAGE_SOURCE_DIR,
+ large_angle_output_dir=LARGE_ANGLE_DIR,
+ conf_thresh=0.15,
+ imgsz=640,
+ angle_threshold_deg=10.0
+ )
+
+ # 可选:打印大角度图像
+ large_angle_imgs = [name for name, info in results.items() if info["has_large_angle"]]
+ print(f"\n共 {len(large_angle_imgs)} 张图像包含 >10° 的夹角:")
+ for name in large_angle_imgs:
+ print(f" - {name}")
\ No newline at end of file
diff --git a/ultralytics_yolo11-main/train_xiantiao_detect.py b/ultralytics_yolo11-main/train_xiantiao_detect.py
new file mode 100644
index 0000000..7ca5ecd
--- /dev/null
+++ b/ultralytics_yolo11-main/train_xiantiao_detect.py
@@ -0,0 +1,19 @@
+from ultralytics import YOLO
+
+if __name__ == '__main__':
+ #model = YOLO(r'/home/hx/yolo/ultralytics_yolo11-main/ultralytics/cfg/models/11/yolo11-obb.yaml')
+ model = YOLO(r'/home/hx/yolo/ultralytics_yolo11-main/ultralytics/cfg/models/11/yolo11.yaml')
+ results = model.train(
+ data='xiantiaodata.yaml',
+ epochs=2000,
+ imgsz=640,
+ batch=4,
+ workers=10,
+ device='0',
+ project='runs/train',
+ name='exp_detect',
+ exist_ok=False,
+ optimizer='AdamW',
+ lr0=0.0005,
+ patience=0,
+ )
diff --git a/ultralytics_yolo11-main/ultralytics/cfg/models/11/yolo11.yaml b/ultralytics_yolo11-main/ultralytics/cfg/models/11/yolo11.yaml
index 67dd779..80e5677 100644
--- a/ultralytics_yolo11-main/ultralytics/cfg/models/11/yolo11.yaml
+++ b/ultralytics_yolo11-main/ultralytics/cfg/models/11/yolo11.yaml
@@ -2,7 +2,7 @@
# YOLO11 object detection model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect
# Parameters
-nc: 1 # number of classes
+nc: 2 # number of classes
scales: # model compound scaling constants, i.e. 'model=yolo11n.yaml' will call yolo11.yaml with scale 'n'
# [depth, width, max_channels]
n: [0.50, 0.25, 1024] # summary: 319 layers, 2624080 parameters, 2624064 gradients, 6.6 GFLOPs
diff --git a/ultralytics_yolo11-main/xiantiaodata.yaml b/ultralytics_yolo11-main/xiantiaodata.yaml
new file mode 100644
index 0000000..b486868
--- /dev/null
+++ b/ultralytics_yolo11-main/xiantiaodata.yaml
@@ -0,0 +1,7 @@
+path: /home/hx/开发/ML_xiantiao/class_xiantiao_pc/test_image # 数据集所在路径
+train: train # 数据集路径下的train.txt
+val: val # 数据集路径下的val.txt
+test: test # 数据集路径下的test.txt
+
+nc: 2
+names: ['hole','crack']
diff --git a/yolo11_point/annotations.xml b/yolo11_point/annotations.xml
deleted file mode 100644
index 0eb0cb7..0000000
--- a/yolo11_point/annotations.xml
+++ /dev/null
@@ -1,1750 +0,0 @@
-
-
- 1.1
-
-
- 255
- 12.08
- 292
- annotation
- 0
-
- 2025-12-08 09:30:47.955563+00:00
- 2025-12-09 08:07:45.872334+00:00
- default
- 0
- 291
-
-
-
- 173
- 0
- 291
- http://www.xj-robot.com:9000/api/jobs/173
-
-
-
- huangxin
- 2193534909@qq.com
-
-
-
-
-
-
-
- 2025-12-09 09:57:41.985282+00:00
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/yolo11_point/trans_cvattopoint.py b/yolo11_point/trans_cvattopoint.py
index 539be97..fe49750 100644
--- a/yolo11_point/trans_cvattopoint.py
+++ b/yolo11_point/trans_cvattopoint.py
@@ -3,7 +3,7 @@ import os
# =================== 配置 ===================
xml_file = 'annotations.xml' # 你的 CVAT XML 文件路径
-images_dir = '/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/1/20251208' # 图像文件夹(用于读取宽高)
+images_dir = '/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/1/20251209' # 图像文件夹(用于读取宽高)
output_dir = 'labels_keypoints' # 输出 YOLO 标签目录
os.makedirs(output_dir, exist_ok=True)
diff --git a/zhuangtai_class_cls/tuili_f_yuantusave—2cls.py b/zhuangtai_class_cls/tuili_f_yuantusave—2cls.py
index b9782c4..a6d1426 100644
--- a/zhuangtai_class_cls/tuili_f_yuantusave—2cls.py
+++ b/zhuangtai_class_cls/tuili_f_yuantusave—2cls.py
@@ -1,4 +1,5 @@
import os
+import shutil
from pathlib import Path
import cv2
from ultralytics import YOLO
@@ -6,15 +7,15 @@ from ultralytics import YOLO
# ---------------------------
# 配置路径(请按需修改)
# ---------------------------
-MODEL_PATH = "gaiban.pt" # 你的二分类模型
-INPUT_FOLDER = "/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/1/12.2" # 输入图像文件夹
-OUTPUT_ROOT = "/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/1/12.2.2" # 输出根目录(会生成 合格/不合格 子文件夹)
+MODEL_PATH = "xialiao.pt" # 你的二分类模型
+INPUT_FOLDER = "/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/1/ready" # 输入图像文件夹
+OUTPUT_ROOT = "/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/1/ready/result" # 输出根目录
# 类别映射(必须与训练时的 data.yaml 顺序一致)
CLASS_NAMES = {0: "不合格", 1: "合格"}
# ---------------------------
-# 批量推理函数
+# 批量推理函数(移动原图)
# ---------------------------
def batch_classify(model_path, input_folder, output_root):
# 加载模型
@@ -36,27 +37,32 @@ def batch_classify(model_path, input_folder, output_root):
if img_path.suffix.lower() not in IMG_EXTS:
continue
- # 读取图像
+ # 读取图像(用于推理)
img = cv2.imread(str(img_path))
if img is None:
- print(f"❌ 无法读取: {img_path}")
+ print(f"❌ 无法读取图像(可能已损坏或被占用): {img_path}")
continue
- # 推理(整图)
+ # 推理(整图分类)
results = model(img)
probs = results[0].probs.data.cpu().numpy()
pred_class_id = int(probs.argmax())
pred_label = CLASS_NAMES[pred_class_id]
confidence = float(probs[pred_class_id])
- # 保存原图到对应文件夹
+ # ⚠️ 关键修改:移动原图(不是复制)
dst = output_root / pred_label / img_path.name
- cv2.imwrite(str(dst), img)
+ try:
+ shutil.move(str(img_path), str(dst))
+ except Exception as e:
+ print(f"❌ 移动失败 {img_path} → {dst}: {e}")
+ continue
print(f"✅ {img_path.name} → {pred_label} ({confidence:.2f})")
processed += 1
- print(f"\n🎉 共处理 {processed} 张图像,结果已保存至: {output_root}")
+ print(f"\n🎉 共处理并移动 {processed} 张图像,结果已保存至: {output_root}")
+
# ---------------------------
# 运行入口
diff --git a/zhuangtai_class_cls/cement_cls_640v1.pt b/zhuangtai_class_cls/xialiao.pt
similarity index 100%
rename from zhuangtai_class_cls/cement_cls_640v1.pt
rename to zhuangtai_class_cls/xialiao.pt
diff --git a/推理图片反向上传CVAT/detect/trans_obbtocvat.py b/推理图片反向上传CVAT/detect/trans_obbtocvat.py
new file mode 100644
index 0000000..9f439d1
--- /dev/null
+++ b/推理图片反向上传CVAT/detect/trans_obbtocvat.py
@@ -0,0 +1,152 @@
+# yolo_detect_to_cvat.py
+import os
+import xml.etree.ElementTree as ET
+from pathlib import Path
+import cv2
+
+IMG_EXTENSIONS = {'.jpg', '.jpeg', '.png', '.bmp', '.tif', '.tiff', '.webp'}
+
+
+def yolo_detect_to_cvat_xml(label_dir, image_dir, class_id_to_name, output_xml):
+ """
+ 将 YOLO Detect 格式的标签(class cx cy w h)转换为 CVAT XML 格式。
+ """
+
+ label_dir = Path(label_dir)
+ image_dir = Path(image_dir)
+
+ # ======== 构建基本 XML 结构 ========
+ root = ET.Element("annotations")
+ ET.SubElement(root, "version").text = "1.1"
+
+ meta = ET.SubElement(root, "meta")
+ task = ET.SubElement(meta, "task")
+
+ txt_files = sorted([f for f in label_dir.glob("*.txt")])
+ total = len(txt_files)
+
+ ET.SubElement(task, "id").text = "1"
+ ET.SubElement(task, "name").text = "yolo_detect_import"
+ ET.SubElement(task, "size").text = str(total)
+ ET.SubElement(task, "mode").text = "annotation"
+ ET.SubElement(task, "overlap").text = "0"
+ ET.SubElement(task, "bugtracker").text = ""
+ ET.SubElement(task, "created").text = ""
+ ET.SubElement(task, "updated").text = ""
+ ET.SubElement(task, "subset").text = "default"
+ ET.SubElement(task, "start_frame").text = "0"
+ ET.SubElement(task, "stop_frame").text = str(total - 1)
+ ET.SubElement(task, "frame_filter").text = ""
+
+ # labels
+ labels_elem = ET.SubElement(task, "labels")
+ for name in class_id_to_name.values():
+ lab = ET.SubElement(labels_elem, "label")
+ ET.SubElement(lab, "name").text = name
+ ET.SubElement(lab, "color").text = "#ffffff"
+ ET.SubElement(lab, "type").text = "any"
+ ET.SubElement(lab, "attributes")
+
+ ET.SubElement(meta, "dumped").text = ""
+
+ # ======== 处理每张图片 ========
+ for idx, txt_file in enumerate(txt_files):
+ stem = txt_file.stem
+
+ # 自动匹配图像文件(支持多种扩展名)
+ img_path = None
+ for ext in IMG_EXTENSIONS:
+ p = image_dir / f"{stem}{ext}"
+ if p.exists():
+ img_path = p
+ break
+ p = image_dir / f"{stem.upper()}{ext}"
+ if p.exists():
+ img_path = p
+ break
+
+ if img_path is None:
+ print(f"⚠ 找不到对应图像: {stem}")
+ continue
+
+ # 获取图像尺寸(用于反归一化)
+ img = cv2.imread(str(img_path))
+ if img is None:
+ print(f"⚠ 无法读取图像: {img_path},跳过")
+ H, W = 1080, 1920 # fallback
+ else:
+ H, W = img.shape[:2]
+
+ # 创建 节点
+ image_elem = ET.SubElement(root, "image", {
+ "id": str(idx),
+ "name": img_path.name,
+ "width": str(W),
+ "height": str(H)
+ })
+
+ # 读取 YOLO Detect 标签
+ with open(txt_file, "r") as f:
+ for line in f:
+ line = line.strip()
+ if not line:
+ continue
+ parts = line.split()
+ if len(parts) != 5:
+ print(f"⚠ 标签格式错误(应为5列): {line} in {txt_file}")
+ continue
+
+ cls_id = int(parts[0])
+ cx, cy, bw, bh = map(float, parts[1:])
+
+ # 反归一化
+ cx_abs = cx * W
+ cy_abs = cy * H
+ w_abs = bw * W
+ h_abs = bh * H
+
+ # 计算左上和右下
+ xtl = cx_abs - w_abs / 2
+ ytl = cy_abs - h_abs / 2
+ xbr = cx_abs + w_abs / 2
+ ybr = cy_abs + h_abs / 2
+
+ # 边界裁剪(防止越界)
+ xtl = max(0, min(W, xtl))
+ ytl = max(0, min(H, ytl))
+ xbr = max(0, min(W, xbr))
+ ybr = max(0, min(H, ybr))
+
+ # 添加 box(无 rotation 字段!)
+ ET.SubElement(image_elem, "box", {
+ "label": class_id_to_name.get(cls_id, f"class_{cls_id}"),
+ "source": "manual",
+ "occluded": "0",
+ "xtl": f"{xtl:.2f}",
+ "ytl": f"{ytl:.2f}",
+ "xbr": f"{xbr:.2f}",
+ "ybr": f"{ybr:.2f}",
+ "z_order": "0"
+ })
+
+ print(f"✔ 处理 {img_path.name}")
+
+ # 保存 XML
+ tree = ET.ElementTree(root)
+ tree.write(output_xml, encoding="utf-8", xml_declaration=True)
+ print(f"\n✅ 已生成 CVAT XML 文件: {output_xml}")
+
+
+# ------------------- 主函数 -------------------
+if __name__ == "__main__":
+ CLASS_MAP = {
+ 0: "hole",
+ 1: "crack"
+ }
+
+ yolo_detect_to_cvat_xml(
+ label_dir="/home/hx/yolo/推理图片反向上传CVAT/detect/inference_results/labels",
+ image_dir="/home/hx/开发/ML_xiantiao/class_xiantiao_pc/test_image/train",
+ class_id_to_name=CLASS_MAP,
+ output_xml="detect_annotations.xml"
+ )
\ No newline at end of file
diff --git a/推理图片反向上传CVAT/detect/tuili_save_txt_f.py b/推理图片反向上传CVAT/detect/tuili_save_txt_f.py
new file mode 100644
index 0000000..328613e
--- /dev/null
+++ b/推理图片反向上传CVAT/detect/tuili_save_txt_f.py
@@ -0,0 +1,135 @@
+import os
+import cv2
+from pathlib import Path
+from ultralytics import YOLO
+
+IMG_EXTENSIONS = {'.jpg', '.jpeg', '.png', '.bmp', '.tif', '.tiff', '.webp'}
+
+
+class ObjectDetector:
+ """封装 YOLO 目标检测模型"""
+
+ def __init__(self, model_path):
+ if not os.path.exists(model_path):
+ raise FileNotFoundError(f"模型文件不存在: {model_path}")
+ self.model = YOLO(model_path)
+ print(f"[INFO] 成功加载 YOLO 目标检测模型: {model_path}")
+
+ def detect(self, img_np, conf_threshold=0.0):
+ """返回所有置信度 >= conf_threshold 的检测结果"""
+ results = self.model.predict(img_np, conf=conf_threshold, verbose=False)
+ detections = []
+ for result in results:
+ boxes = result.boxes.cpu().numpy()
+ for box in boxes:
+ detection_info = {
+ 'bbox_xyxy': box.xyxy[0], # [x1, y1, x2, y2]
+ 'confidence': float(box.conf.item()),
+ 'class_id': int(box.cls.item())
+ }
+ detections.append(detection_info)
+ return detections
+
+
+def save_yolo_detect_labels_from_folder(
+ model_path,
+ image_dir,
+ output_dir,
+ conf_threshold=0.5,
+ label_map={0: "hole", 1: "crack"} # 可选,仅用于日志
+):
+ """
+ 对 image_dir 中所有图像进行 YOLO Detect 推理,
+ 每个类别保留最高置信度框,保存为 YOLO 格式的 .txt 标签文件。
+
+ YOLO 格式:
+ """
+ image_dir = Path(image_dir)
+ output_dir = Path(output_dir)
+ labels_dir = output_dir / "labels"
+ labels_dir.mkdir(parents=True, exist_ok=True)
+
+ # 获取图像列表
+ image_files = [
+ f for f in sorted(os.listdir(image_dir))
+ if os.path.splitext(f.lower())[1] in IMG_EXTENSIONS
+ ]
+ if not image_files:
+ print(f"❌ 未在 {image_dir} 中找到支持的图像文件")
+ return
+
+ print(f"共找到 {len(image_files)} 张图像,开始推理...")
+ detector = ObjectDetector(model_path)
+
+ for img_filename in image_files:
+ img_path = image_dir / img_filename
+ stem = Path(img_filename).stem
+ txt_path = labels_dir / f"{stem}.txt"
+
+ # 读图
+ img = cv2.imread(str(img_path))
+ if img is None:
+ print(f"⚠️ 跳过无效图像: {img_path}")
+ txt_path.write_text("") # 写空文件
+ continue
+
+ H, W = img.shape[:2]
+
+ # 推理(获取所有 ≥ conf_threshold 的框)
+ all_detections = detector.detect(img, conf_threshold=conf_threshold)
+
+ # 按类别保留最高置信度框
+ best_per_class = {}
+ for det in all_detections:
+ cls_id = det['class_id']
+ if cls_id not in best_per_class or det['confidence'] > best_per_class[cls_id]['confidence']:
+ best_per_class[cls_id] = det
+
+ top_detections = list(best_per_class.values())
+
+ # 转为 YOLO 格式并写入
+ lines = []
+ for det in top_detections:
+ x1, y1, x2, y2 = det['bbox_xyxy']
+ cx = (x1 + x2) / 2.0
+ cy = (y1 + y2) / 2.0
+ bw = x2 - x1
+ bh = y2 - y1
+
+ # 归一化
+ cx_norm = cx / W
+ cy_norm = cy / H
+ w_norm = bw / W
+ h_norm = bh / H
+
+ # 限制在 [0, 1]
+ cx_norm = max(0.0, min(1.0, cx_norm))
+ cy_norm = max(0.0, min(1.0, cy_norm))
+ w_norm = max(0.0, min(1.0, w_norm))
+ h_norm = max(0.0, min(1.0, h_norm))
+
+ line = f"{det['class_id']} {cx_norm:.6f} {cy_norm:.6f} {w_norm:.6f} {h_norm:.6f}"
+ lines.append(line)
+
+ # 写入标签文件
+ with open(txt_path, "w") as f:
+ if lines:
+ f.write("\n".join(lines) + "\n")
+
+ print(f"✅ {img_filename} -> {len(lines)} 个检测框已保存")
+
+ print(f"\n🎉 全部完成!标签文件保存在: {labels_dir}")
+
+
+# ------------------- 主函数调用 -------------------
+if __name__ == "__main__":
+ MODEL_PATH = "/home/hx/yolo/ultralytics_yolo11-main/runs/train/exp_detect/weights/best.pt"
+ IMAGE_DIR = "/home/hx/开发/ML_xiantiao/class_xiantiao_pc/test_image/train"
+ OUTPUT_DIR = "./inference_results"
+
+ save_yolo_detect_labels_from_folder(
+ model_path=MODEL_PATH,
+ image_dir=IMAGE_DIR,
+ output_dir=OUTPUT_DIR,
+ conf_threshold=0.5
+ )
\ No newline at end of file
diff --git a/推理图片反向上传CVAT/point/point_test.py b/推理图片反向上传CVAT/point/point_test.py
index 4f97f72..c96c005 100644
--- a/推理图片反向上传CVAT/point/point_test.py
+++ b/推理图片反向上传CVAT/point/point_test.py
@@ -5,7 +5,7 @@ import os
# ====================== 用户配置 ======================
MODEL_PATH = 'point.pt'
-IMAGE_SOURCE_DIR = '/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/1/20251208'
+IMAGE_SOURCE_DIR = '/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/1/20251212'
OUTPUT_DIR = './keypoints_txt'
IMG_EXTENSIONS = {'.png', '.jpg', '.jpeg', '.bmp', '.tiff', '.tif', '.webp'}
diff --git a/推理图片反向上传CVAT/point/trans_pointtocvat.py b/推理图片反向上传CVAT/point/trans_pointtocvat.py
index fe08fe7..2ced821 100644
--- a/推理图片反向上传CVAT/point/trans_pointtocvat.py
+++ b/推理图片反向上传CVAT/point/trans_pointtocvat.py
@@ -11,7 +11,7 @@ labels_dir = "keypoints_txt"
output_xml = "annotations_cvat.xml"
# 图片目录(用于 width/height)
-images_dir = "/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/1/20251208"
+images_dir = "/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/1/20251212"
# 类别映射
class_mapping_reverse = {