commit e41a7455e584fe264f6547d3f30109619149e7cc Author: pengqi Date: Fri Oct 31 17:21:02 2025 +0800 分类部署例程 diff --git a/.idea/.gitignore b/.idea/.gitignore new file mode 100644 index 0000000..359bb53 --- /dev/null +++ b/.idea/.gitignore @@ -0,0 +1,3 @@ +# 默认忽略的文件 +/shelf/ +/workspace.xml diff --git a/.idea/inference_with_lite.iml b/.idea/inference_with_lite.iml new file mode 100644 index 0000000..9442837 --- /dev/null +++ b/.idea/inference_with_lite.iml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/.idea/inspectionProfiles/Project_Default.xml b/.idea/inspectionProfiles/Project_Default.xml new file mode 100644 index 0000000..78dfc6d --- /dev/null +++ b/.idea/inspectionProfiles/Project_Default.xml @@ -0,0 +1,18 @@ + + + + \ No newline at end of file diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml new file mode 100644 index 0000000..105ce2d --- /dev/null +++ b/.idea/inspectionProfiles/profiles_settings.xml @@ -0,0 +1,6 @@ + + + + \ No newline at end of file diff --git a/.idea/misc.xml b/.idea/misc.xml new file mode 100644 index 0000000..ff732a4 --- /dev/null +++ b/.idea/misc.xml @@ -0,0 +1,7 @@ + + + + + + \ No newline at end of file diff --git a/.idea/modules.xml b/.idea/modules.xml new file mode 100644 index 0000000..d1bc327 --- /dev/null +++ b/.idea/modules.xml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/.idea/vcs.xml b/.idea/vcs.xml new file mode 100644 index 0000000..94a25f7 --- /dev/null +++ b/.idea/vcs.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/222.jpg b/222.jpg new file mode 100644 index 0000000..97bbb2c Binary files /dev/null and b/222.jpg differ diff --git a/cls_inference.py b/cls_inference.py new file mode 100644 index 0000000..6b19ccc --- /dev/null +++ b/cls_inference.py @@ -0,0 +1,164 @@ +import cv2 +import numpy as np +import platform +from .labels import labels # 确保这个文件存在 +from rknnlite.api import RKNNLite + + +# ------------------- 核心:全局变量存储RKNN模型实例(确保只加载一次) ------------------- +# 初始化为None,首次调用时加载模型,后续直接复用 +_global_rknn_instance = None + +# device tree for RK356x/RK3576/RK3588 +DEVICE_COMPATIBLE_NODE = '/proc/device-tree/compatible' + +def get_host(): + # get platform and device type + system = platform.system() + machine = platform.machine() + os_machine = system + '-' + machine + if os_machine == 'Linux-aarch64': + try: + with open(DEVICE_COMPATIBLE_NODE) as f: + device_compatible_str = f.read() + if 'rk3562' in device_compatible_str: + host = 'RK3562' + elif 'rk3576' in device_compatible_str: + host = 'RK3576' + elif 'rk3588' in device_compatible_str: + host = 'RK3588' + else: + host = 'RK3566_RK3568' + except IOError: + print('Read device node {} failed.'.format(DEVICE_COMPATIBLE_NODE)) + exit(-1) + else: + host = os_machine + return host + +def get_top1_class_str(result): + """ + 从推理结果中提取出得分最高的类别,并返回字符串 + + 参数: + result (list): 模型推理输出结果(格式需与原函数一致,如 [np.ndarray]) + 返回: + str:得分最高类别的格式化字符串 + 若推理失败,返回错误提示字符串 + """ + if result is None: + print("Inference failed: result is None") + return + + # 解析推理输出(与原逻辑一致:展平输出为1维数组) + output = result[0].reshape(-1) + + # 获取得分最高的类别索引(np.argmax 直接返回最大值索引,比排序更高效) + top1_index = np.argmax(output) + + # 处理标签(确保索引在 labels 列表范围内,避免越界) + if 0 <= top1_index < len(labels): + top1_class_name = labels[top1_index] + else: + top1_class_name = "Unknown Class" # 应对索引异常的边界情况 + + # 5. 格式化返回字符串(包含索引、得分、类别名称,得分保留6位小数) + return top1_class_name + +def preprocess(raw_image, target_size=(640, 640)): + """ + 读取图像并执行预处理(BGR转RGB、调整尺寸、添加Batch维度) + + 参数: + image_path (str): 图像文件的完整路径(如 "C:/test.jpg" 或 "/home/user/test.jpg") + target_size (tuple): 预处理后图像的目标尺寸,格式为 (width, height),默认 (640, 640) + 返回: + img (numpy.ndarray): 预处理后的图像 + 异常: + FileNotFoundError: 图像路径不存在或无法读取时抛出 + ValueError: 图像读取成功但为空(如文件损坏)时抛出 + """ + # img = cv2.cvtColor(raw_image, cv2.COLOR_BGR2RGB) + # 调整尺寸 + img = cv2.resize(raw_image, target_size) + img = np.expand_dims(img, 0) # 添加batch维度 + + return img + +# ------------------- 新增:模型初始化函数(控制只加载一次) ------------------- +def init_rknn_model(model_path): + """ + 初始化RKNN模型(全局唯一实例): + - 首次调用:加载模型+初始化运行时,返回模型实例 + - 后续调用:直接返回已加载的全局实例,避免重复加载 + """ + global _global_rknn_instance # 声明使用全局变量 + + # 若模型未加载过,执行加载逻辑 + if _global_rknn_instance is None: + # 1. 创建RKNN实例(关闭内置日志) + rknn_lite = RKNNLite(verbose=False) + + # 2. 加载RKNN模型 + ret = rknn_lite.load_rknn(model_path) + if ret != 0: + print(f'[ERROR] Load CLS_RKNN model failed (code: {ret})') + exit(ret) + + # 3. 初始化运行时(绑定NPU核心0) + ret = rknn_lite.init_runtime(core_mask=RKNNLite.NPU_CORE_0) + if ret != 0: + print(f'[ERROR] Init CLS_RKNN runtime failed (code: {ret})') + exit(ret) + + # 4. 将加载好的实例赋值给全局变量 + _global_rknn_instance = rknn_lite + print(f'[INFO] CLS_RKNN model loaded successfully (path: {model_path})') + + return _global_rknn_instance + +def yolov11_cls_inference(model_path, raw_image, target_size=(640, 640)): + """ + 根据平台进行推理,并返回最终的分类结果 + + 参数: + model_path (str): RKNN模型文件路径 + image_path (str): 图像文件的完整路径(如 "C:/test.jpg" 或 "/home/user/test.jpg") + target_size (tuple): 预处理后图像的目标尺寸,格式为 (width, height),默认 (640, 640) + """ + rknn_model = model_path + + img = preprocess(raw_image, target_size) + + rknn = init_rknn_model(rknn_model) + if rknn is None: + return None, img + outputs = rknn.inference([img]) + + # Show the classification results + class_name = get_top1_class_str(outputs) + + # rknn_lite.release() + + return class_name + +if __name__ == '__main__': + + # 调用yolov11_cls_inference函数(target_size使用默认值640x640,也可显式传参如(112,112)) + image_path = "/userdata/reenrr/inference_with_lite/cover_ready.jpg" + bgr_image = cv2.imread(image_path) + if bgr_image is None: + print(f"Failed to read image from {image_path}") + exit(-1) + + rgb_frame = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB) + print(f"Read image from {image_path}, shape: {rgb_frame.shape}") + + result = yolov11_cls_inference( + model_path="/userdata/PyQt_main_test/app/view/yolo/yolov11_cls.rknn", + raw_image=rgb_frame, + target_size=(640, 640) + ) + # 打印最终结果 + print(f"\n最终分类结果:{result}") + diff --git a/cover_noready.jpg b/cover_noready.jpg new file mode 100644 index 0000000..f6f4adf Binary files /dev/null and b/cover_noready.jpg differ diff --git a/cover_noready1.jpg b/cover_noready1.jpg new file mode 100644 index 0000000..0e1a99a Binary files /dev/null and b/cover_noready1.jpg differ diff --git a/cover_ready.jpg b/cover_ready.jpg new file mode 100644 index 0000000..3b5d70c Binary files /dev/null and b/cover_ready.jpg differ diff --git a/labels.py b/labels.py new file mode 100644 index 0000000..4ed38b9 --- /dev/null +++ b/labels.py @@ -0,0 +1,6 @@ +# the labels come from synset.txt, download link: https://s3.amazonaws.com/onnx-model-zoo/synset.txt + +labels = \ +{0: 'cover_noready', + 1: 'cover_ready' +} \ No newline at end of file diff --git a/mobilenetv2_224.rknn b/mobilenetv2_224.rknn new file mode 100644 index 0000000..3e87d2a Binary files /dev/null and b/mobilenetv2_224.rknn differ diff --git a/mobilenetv2_640.rknn b/mobilenetv2_640.rknn new file mode 100644 index 0000000..98d63b9 Binary files /dev/null and b/mobilenetv2_640.rknn differ diff --git a/mobilenetv2_inference.py b/mobilenetv2_inference.py new file mode 100644 index 0000000..311cb82 --- /dev/null +++ b/mobilenetv2_inference.py @@ -0,0 +1,124 @@ +import cv2 +import numpy as np +import platform +from labels import labels # 确保这个文件存在 +from rknnlite.api import RKNNLite +import time + +model_path = '/userdata/reenrr/inference_with_lite/mobilenetv2_640.rknn' +image_path = '/userdata/reenrr/inference_with_lite/222.jpg' +target_size = (640, 640) + +# device tree for RK356x/RK3576/RK3588 +DEVICE_COMPATIBLE_NODE = '/proc/device-tree/compatible' + +def get_host(): + # get platform and device type + system = platform.system() + machine = platform.machine() + os_machine = system + '-' + machine + if os_machine == 'Linux-aarch64': + try: + with open(DEVICE_COMPATIBLE_NODE) as f: + device_compatible_str = f.read() + if 'rk3562' in device_compatible_str: + host = 'RK3562' + elif 'rk3576' in device_compatible_str: + host = 'RK3576' + elif 'rk3588' in device_compatible_str: + host = 'RK3588' + else: + host = 'RK3566_RK3568' + except IOError: + print('Read device node {} failed.'.format(DEVICE_COMPATIBLE_NODE)) + exit(-1) + else: + host = os_machine + return host + +# 模型路径配置 +RK3566_RK3568_RKNN_MODEL = 'resnet18_for_rk3566_rk3568.rknn' +RK3588_RKNN_MODEL = model_path +RK3562_RKNN_MODEL = 'resnet18_for_rk3562.rknn' +RK3576_RKNN_MODEL = 'resnet18_for_rk3576.rknn' + +def show_top5(result): + if result is None: + print("Inference failed: result is None") + return + + output = result[0].reshape(-1) + # Softmax + output = np.exp(output) / np.sum(np.exp(output)) + # Get the indices of the top 5 largest values + output_sorted_indices = np.argsort(output)[::-1][:5] + top5_str = 'resnet18\n-----TOP 5-----\n' + for i, index in enumerate(output_sorted_indices): + value = output[index] + if value > 0: + topi = '[{:>3d}] score:{:.6f} class:"{}"\n'.format(index, value, labels[index]) + else: + topi = '-1: 0.0\n' + top5_str += topi + print(top5_str) + +if __name__ == '__main__': + + # Get device information + host_name = get_host() + if host_name == 'RK3566_RK3568': + rknn_model = RK3566_RK3568_RKNN_MODEL + elif host_name == 'RK3562': + rknn_model = RK3562_RKNN_MODEL + elif host_name == 'RK3576': + rknn_model = RK3576_RKNN_MODEL + elif host_name == 'RK3588': + rknn_model = RK3588_RKNN_MODEL + else: + print("This demo cannot run on the current platform: {}".format(host_name)) + exit(-1) + + rknn_lite = RKNNLite() + + # Load RKNN model + print('--> Load RKNN model') + ret = rknn_lite.load_rknn(rknn_model) + if ret != 0: + print('Load RKNN model failed') + exit(ret) + print('done') + + # 读取并预处理图像 - 这是关键修改部分 + ori_img = cv2.imread(image_path) + img = cv2.cvtColor(ori_img, cv2.COLOR_BGR2RGB) + # 调整尺寸 + img = cv2.resize(img, target_size) + img = np.expand_dims(img, 0) # 添加batch维度 + + # Init runtime environment + print('--> Init runtime environment') + if host_name in ['RK3576', 'RK3588']: + ret = rknn_lite.init_runtime(core_mask=RKNNLite.NPU_CORE_0) + else: + ret = rknn_lite.init_runtime() + if ret != 0: + print('Init runtime environment failed') + exit(ret) + print('done') + + print("host_name:", host_name) + print("RKNNLite.NPU_CORE_0:", RKNNLite.NPU_CORE_0) + + # Inference + print('--> Running model') + start_time = time.time()*1000 # 转为毫秒 + outputs = rknn_lite.inference(inputs=[img]) + end_time = time.time()*1000 + print("outputs:", outputs) + print('Inference completed') + print("inference_time:", end_time-start_time,"ms") + + # Show the classification results + show_top5(outputs) + + rknn_lite.release() diff --git a/output_shape.py b/output_shape.py new file mode 100644 index 0000000..a85084d --- /dev/null +++ b/output_shape.py @@ -0,0 +1,19 @@ +from rknnlite.api import RKNNLite +import numpy as np + +model_path = '/userdata/reenrr/inference_with_lite/mobilenetv2_640.rknn' + +rknn_lite = RKNNLite() +rknn_lite.load_rknn(model_path) +rknn_lite.init_runtime() + +# 通过实际推理获取输出维度 +dummy_input = np.random.randn(1, 3, 640, 640).astype(np.float32) # 根据模型输入尺寸调整 +outputs = rknn_lite.inference(inputs=[dummy_input]) + +print("\n输出维度信息:") +for i, out in enumerate(outputs): + print(f"Output {i} shape: {out.shape}") # 查看输出形状 + +rknn_lite.release() + diff --git a/readme.md b/readme.md new file mode 100644 index 0000000..865ed8b --- /dev/null +++ b/readme.md @@ -0,0 +1,6 @@ +# 使用说明 +方法一:修改cls_inference.py(替换图片和rknn模型路径和输入图片大小)和labels.py(分类标签名) + (只调用一次RKNN模型) + +方法二:修改yolov11_cls_inference.py(替换图片和rknn模型路径和输入图片大小)和labels.py(分类标签名) + (每次调用RKNN模型) \ No newline at end of file diff --git a/yolov11_cls.rknn b/yolov11_cls.rknn new file mode 100644 index 0000000..eca9557 Binary files /dev/null and b/yolov11_cls.rknn differ diff --git a/yolov11_cls_inference.py b/yolov11_cls_inference.py new file mode 100644 index 0000000..0b6d10c --- /dev/null +++ b/yolov11_cls_inference.py @@ -0,0 +1,120 @@ +import cv2 +import numpy as np +import platform +from labels import labels # 确保这个文件存在 +from rknnlite.api import RKNNLite + +model_path = '/userdata/reenrr/inference_with_lite/yolov11_cls.rknn' +image_path = '/userdata/reenrr/inference_with_lite/222.jpg' +target_size = (640, 640) + +# device tree for RK356x/RK3576/RK3588 +DEVICE_COMPATIBLE_NODE = '/proc/device-tree/compatible' + +def get_host(): + # get platform and device type + system = platform.system() + machine = platform.machine() + os_machine = system + '-' + machine + if os_machine == 'Linux-aarch64': + try: + with open(DEVICE_COMPATIBLE_NODE) as f: + device_compatible_str = f.read() + if 'rk3562' in device_compatible_str: + host = 'RK3562' + elif 'rk3576' in device_compatible_str: + host = 'RK3576' + elif 'rk3588' in device_compatible_str: + host = 'RK3588' + else: + host = 'RK3566_RK3568' + except IOError: + print('Read device node {} failed.'.format(DEVICE_COMPATIBLE_NODE)) + exit(-1) + else: + host = os_machine + return host + + +RK3566_RK3568_RKNN_MODEL = 'resnet18_for_rk3566_rk3568.rknn' +RK3588_RKNN_MODEL = model_path +RK3562_RKNN_MODEL = 'resnet18_for_rk3562.rknn' +RK3576_RKNN_MODEL = 'resnet18_for_rk3576.rknn' + +def show_top5(result): + if result is None: + print("Inference failed: result is None") + return + + output = result[0].reshape(-1) + # Softmax + # output = np.exp(output) / np.sum(np.exp(output)) + # Get the indices of the top 5 largest values + output_sorted_indices = np.argsort(output)[::-1][:5] + top5_str = 'resnet18\n-----TOP 5-----\n' + for i, index in enumerate(output_sorted_indices): + value = output[index] + if value > 0: + topi = '[{:>3d}] score:{:.6f} class:"{}"\n'.format(index, value, labels[index]) + else: + topi = '-1: 0.0\n' + top5_str += topi + print(top5_str) + +if __name__ == '__main__': + + # Get device information + host_name = get_host() + if host_name == 'RK3566_RK3568': + rknn_model = RK3566_RK3568_RKNN_MODEL + elif host_name == 'RK3562': + rknn_model = RK3562_RKNN_MODEL + elif host_name == 'RK3576': + rknn_model = RK3576_RKNN_MODEL + elif host_name == 'RK3588': + rknn_model = RK3588_RKNN_MODEL + else: + print("This demo cannot run on the current platform: {}".format(host_name)) + exit(-1) + + rknn_lite = RKNNLite() + + # Load RKNN model + print('--> Load RKNN model') + ret = rknn_lite.load_rknn(rknn_model) + if ret != 0: + print('Load RKNN model failed') + exit(ret) + print('done') + + # 读取并预处理图像 - 这是关键修改部分 + ori_img = cv2.imread(image_path) + img = cv2.cvtColor(ori_img, cv2.COLOR_BGR2RGB) + # 调整尺寸 + img = cv2.resize(img, target_size) + img = np.expand_dims(img, 0) # 添加batch维度 + + # Init runtime environment + print('--> Init runtime environment') + if host_name in ['RK3576', 'RK3588']: + ret = rknn_lite.init_runtime(core_mask=RKNNLite.NPU_CORE_0) + else: + ret = rknn_lite.init_runtime() + if ret != 0: + print('Init runtime environment failed') + exit(ret) + print('done') + + print("host_name:", host_name) + print("RKNNLite.NPU_CORE_0:", RKNNLite.NPU_CORE_0) + + # Inference + print('--> Running model') + outputs = rknn_lite.inference(inputs=[img]) + print("outputs:", outputs) + print('Inference completed') + + # Show the classification results + show_top5(outputs) + + rknn_lite.release()