This commit is contained in:
琉璃月光
2025-10-21 14:11:52 +08:00
parent 349449f2b7
commit df7c0730f5
363 changed files with 5386 additions and 578 deletions

View File

@ -1,4 +1,4 @@
path: /media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/seg/dataset2 # 数据集所在路径
path: /media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/seg/yemian_seg_camera01 # 数据集所在路径
train: train # 数据集路径下的train.txt
val: val # 数据集路径下的val.txt
test: test # 数据集路径下的test.txt

View File

@ -0,0 +1,59 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
# DOTA8 dataset 8 images from split DOTAv1 dataset by Ultralytics
# Documentation: https://docs.ultralytics.com/datasets/obb/dota8/
# Example usage: yolo train model=yolov8n-obb.pt data=dota8.yaml
# parent
# ├── ultralytics
# └── datasets
# └── dota8 ← downloads here (1MB)
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
path: /media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/point2
train: train # 数据集路径下的train.txt
val: val # 数据集路径下的val.txt
test: test # 数据集路径下的test.txt
nc: 1 # number of classes
kpt_shape: [4, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible)
scales: # model compound scaling constants, i.e. 'model=yolo11n-pose.yaml' will call yolo11.yaml with scale 'n'
# [depth, width, max_channels]
n: [0.50, 0.25, 1024] # summary: 344 layers, 2908507 parameters, 2908491 gradients, 7.7 GFLOPs
s: [0.50, 0.50, 1024] # summary: 344 layers, 9948811 parameters, 9948795 gradients, 23.5 GFLOPs
m: [0.50, 1.00, 512] # summary: 434 layers, 20973273 parameters, 20973257 gradients, 72.3 GFLOPs
l: [1.00, 1.00, 512] # summary: 656 layers, 26230745 parameters, 26230729 gradients, 91.4 GFLOPs
x: [1.00, 1.50, 512] # summary: 656 layers, 58889881 parameters, 58889865 gradients, 204.3 GFLOPs
# YOLO11n backbone
backbone:
# [from, repeats, module, args]
- [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
- [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
- [-1, 2, C3k2, [256, False, 0.25]]
- [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
- [-1, 2, C3k2, [512, False, 0.25]]
- [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
- [-1, 2, C3k2, [512, True]]
- [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
- [-1, 2, C3k2, [1024, True]]
- [-1, 1, SPPF, [1024, 5]] # 9
- [-1, 2, C2PSA, [1024]] # 10
# YOLO11n head
head:
- [-1, 1, nn.Upsample, [None, 2, "nearest"]]
- [[-1, 6], 1, Concat, [1]] # cat backbone P4
- [-1, 2, C3k2, [512, False]] # 13
- [-1, 1, nn.Upsample, [None, 2, "nearest"]]
- [[-1, 4], 1, Concat, [1]] # cat backbone P3
- [-1, 2, C3k2, [256, False]] # 16 (P3/8-small)
- [-1, 1, Conv, [256, 3, 2]]
- [[-1, 13], 1, Concat, [1]] # cat head P4
- [-1, 2, C3k2, [512, False]] # 19 (P4/16-medium)
- [-1, 1, Conv, [512, 3, 2]]
- [[-1, 10], 1, Concat, [1]] # cat head P5
- [-1, 2, C3k2, [1024, True]] # 22 (P5/32-large)
- [[16, 19, 22], 1, Pose, [nc, kpt_shape]] # Detect(P3, P4, P5)

View File

@ -0,0 +1,18 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
# DOTA8 dataset 8 images from split DOTAv1 dataset by Ultralytics
# Documentation: https://docs.ultralytics.com/datasets/obb/dota8/
# Example usage: yolo train model=yolov8n-obb.pt data=dota8.yaml
# parent
# ├── ultralytics
# └── datasets
# └── dota8 ← downloads here (1MB)
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
path: /media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/obb4
train: train # 数据集路径下的train.txt
val: val # 数据集路径下的val.txt
test: test # 数据集路径下的test.txt
nc: 1
names: ['clamp']

View File

@ -1,4 +1,4 @@
path: /media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/seg/resize_seg2 # 数据集所在路径
path: /media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/seg/resize_camera01 # 数据集所在路径
train: train # 数据集路径下的train.txt
val: val # 数据集路径下的val.txt
test: test # 数据集路径下的test.txt

View File

@ -0,0 +1,19 @@
from ultralytics import YOLO
if __name__ == '__main__':
#model = YOLO(r'/home/hx/yolo/ultralytics_yolo11-main/ultralytics/cfg/models/11/yolo11-obb.yaml')
model = YOLO(r'/home/hx/yolo/ultralytics_yolo11-main/runs/train/exp_ailai3/weights/last.pt')
results = model.train(
data='data_ailai.yaml',
epochs=300,
imgsz=640,
batch=4,
workers=10,
device='0',
project='runs/train',
name='exp_ailai',
exist_ok=False,
optimizer='AdamW',
lr0=0.0001,
patience=0,
)

View File

@ -16,3 +16,4 @@ if __name__ == '__main__':
optimizer='AdamW',
lr0=0.001,
)
#

View File

@ -0,0 +1,40 @@
from ultralytics import YOLO
if __name__ == '__main__':
# ✅ 推荐:加载官方预训练模型 或 自己的 best.pt
# model = YOLO('yolo11m-obb.pt') # 官方预训练(如果有)
model = YOLO(r'/home/hx/yolo/ultralytics_yolo11-main/obb.pt') # 使用 best 而非 last
results = model.train(
data='obb_data1.yaml',
epochs=300, # 减少 epochs配合早停
patience=0, # 50 轮无提升则停止
imgsz=640,
batch=4,
workers=10,
device='0',
project='runs/train',
name='exp_obb5', # 建议递增实验编号
exist_ok=False,
# 优化器
optimizer='AdamW',
lr0=0.0005, # 更稳定的学习率
weight_decay=0.01,
momentum=0.937,
# 数据增强OBB 关键)
degrees=5.0, # 随机旋转 ±10°
translate=0.1, # 平移
scale=0.5, # 缩放比例
shear=1.0, # 剪切
flipud=0.0, # 不推荐上下翻转(角度易错)
fliplr=0.5, # ✅ 水平翻转OBB 支持良好
hsv_h=0.015, # 色调扰动
hsv_s=0.7, # 饱和度
hsv_v=0.4, # 明度
# 其他
close_mosaic=50, # 最后10轮关闭 Mosaic 增强,提升稳定性
val=True, # 每轮验证
)

View File

@ -4,7 +4,7 @@ if __name__ == '__main__':
model = YOLO(r'/home/hx/yolo/ultralytics_yolo11-main/ultralytics/cfg/models/11/yolo11-cls-resize.yaml')
results = model.train(
data='/media/hx/04e879fa-d697-4b02-ac7e-a4148876ebb0/dataset/classdata3',
epochs=1000,
epochs=100,
imgsz=640,
batch=4,
workers=10,
@ -13,6 +13,6 @@ if __name__ == '__main__':
name='exp_cls',
exist_ok=False,
optimizer='AdamW',
lr0=0.001,
lr0=0.0003,
patience=0,
)

View File

@ -11,7 +11,7 @@ if __name__ == '__main__':
batch=4, # 每批图像数量
workers=10, # 数据加载线程数
device='0', # 使用 GPU 0
project='runs/train/seg_j', # 保存项目目录
project='runs/train/seg_01', # 保存项目目录
name='exp', # 实验名称
exist_ok=False, # 不覆盖已有实验
optimizer='AdamW', # 可选优化器

View File

@ -7,8 +7,8 @@ if __name__ == '__main__':
# 开始训练
results = model.train(
data='/home/hx/yolo/ultralytics_yolo11-main/resize_seg_data.yaml', # 数据配置文件
epochs=1000, # 训练轮数
imgsz=640,
epochs=100, # 训练轮数
imgsz=1280,
batch=4, # 每批图像数量
workers=10, # 数据加载线程数
device='0', # 使用 GPU 0
@ -16,6 +16,6 @@ if __name__ == '__main__':
name='exp', # 实验名称
exist_ok=False, # 不覆盖已有实验
optimizer='AdamW', # 可选优化器
lr0=0.001, # 初始学习率
patience=500, # 早停轮数
lr0=0.0005, # 初始学习率
patience=0, # 早停轮数
)

View File

@ -0,0 +1,48 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
# YOLO11-pose keypoints/pose estimation model. For Usage examples see https://docs.ultralytics.com/tasks/pose
# Parameters
nc: 1 # number of classes
kpt_shape: [4, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible)
scales: # model compound scaling constants, i.e. 'model=yolo11n-pose.yaml' will call yolo11.yaml with scale 'n'
# [depth, width, max_channels]
n: [0.50, 0.25, 1024] # summary: 344 layers, 2908507 parameters, 2908491 gradients, 7.7 GFLOPs
s: [0.50, 0.50, 1024] # summary: 344 layers, 9948811 parameters, 9948795 gradients, 23.5 GFLOPs
m: [0.50, 1.00, 512] # summary: 434 layers, 20973273 parameters, 20973257 gradients, 72.3 GFLOPs
l: [1.00, 1.00, 512] # summary: 656 layers, 26230745 parameters, 26230729 gradients, 91.4 GFLOPs
x: [1.00, 1.50, 512] # summary: 656 layers, 58889881 parameters, 58889865 gradients, 204.3 GFLOPs
# YOLO11n backbone
backbone:
# [from, repeats, module, args]
- [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
- [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
- [-1, 2, C3k2, [256, False, 0.25]]
- [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
- [-1, 2, C3k2, [512, False, 0.25]]
- [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
- [-1, 2, C3k2, [512, True]]
- [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
- [-1, 2, C3k2, [1024, True]]
- [-1, 1, SPPF, [1024, 5]] # 9
- [-1, 2, C2PSA, [1024]] # 10
# YOLO11n head
head:
- [-1, 1, nn.Upsample, [None, 2, "nearest"]]
- [[-1, 6], 1, Concat, [1]] # cat backbone P4
- [-1, 2, C3k2, [512, False]] # 13
- [-1, 1, nn.Upsample, [None, 2, "nearest"]]
- [[-1, 4], 1, Concat, [1]] # cat backbone P3
- [-1, 2, C3k2, [256, False]] # 16 (P3/8-small)
- [-1, 1, Conv, [256, 3, 2]]
- [[-1, 13], 1, Concat, [1]] # cat head P4
- [-1, 2, C3k2, [512, False]] # 19 (P4/16-medium)
- [-1, 1, Conv, [512, 3, 2]]
- [[-1, 10], 1, Concat, [1]] # cat head P5
- [-1, 2, C3k2, [1024, True]] # 22 (P5/32-large)
- [[16, 19, 22], 1, Pose, [nc, kpt_shape]] # Detect(P3, P4, P5)