分类部署例程

This commit is contained in:
2025-10-31 17:21:02 +08:00
commit e41a7455e5
20 changed files with 495 additions and 0 deletions

120
yolov11_cls_inference.py Normal file
View File

@ -0,0 +1,120 @@
import cv2
import numpy as np
import platform
from labels import labels # 确保这个文件存在
from rknnlite.api import RKNNLite
model_path = '/userdata/reenrr/inference_with_lite/yolov11_cls.rknn'
image_path = '/userdata/reenrr/inference_with_lite/222.jpg'
target_size = (640, 640)
# device tree for RK356x/RK3576/RK3588
DEVICE_COMPATIBLE_NODE = '/proc/device-tree/compatible'
def get_host():
# get platform and device type
system = platform.system()
machine = platform.machine()
os_machine = system + '-' + machine
if os_machine == 'Linux-aarch64':
try:
with open(DEVICE_COMPATIBLE_NODE) as f:
device_compatible_str = f.read()
if 'rk3562' in device_compatible_str:
host = 'RK3562'
elif 'rk3576' in device_compatible_str:
host = 'RK3576'
elif 'rk3588' in device_compatible_str:
host = 'RK3588'
else:
host = 'RK3566_RK3568'
except IOError:
print('Read device node {} failed.'.format(DEVICE_COMPATIBLE_NODE))
exit(-1)
else:
host = os_machine
return host
RK3566_RK3568_RKNN_MODEL = 'resnet18_for_rk3566_rk3568.rknn'
RK3588_RKNN_MODEL = model_path
RK3562_RKNN_MODEL = 'resnet18_for_rk3562.rknn'
RK3576_RKNN_MODEL = 'resnet18_for_rk3576.rknn'
def show_top5(result):
if result is None:
print("Inference failed: result is None")
return
output = result[0].reshape(-1)
# Softmax
# output = np.exp(output) / np.sum(np.exp(output))
# Get the indices of the top 5 largest values
output_sorted_indices = np.argsort(output)[::-1][:5]
top5_str = 'resnet18\n-----TOP 5-----\n'
for i, index in enumerate(output_sorted_indices):
value = output[index]
if value > 0:
topi = '[{:>3d}] score:{:.6f} class:"{}"\n'.format(index, value, labels[index])
else:
topi = '-1: 0.0\n'
top5_str += topi
print(top5_str)
if __name__ == '__main__':
# Get device information
host_name = get_host()
if host_name == 'RK3566_RK3568':
rknn_model = RK3566_RK3568_RKNN_MODEL
elif host_name == 'RK3562':
rknn_model = RK3562_RKNN_MODEL
elif host_name == 'RK3576':
rknn_model = RK3576_RKNN_MODEL
elif host_name == 'RK3588':
rknn_model = RK3588_RKNN_MODEL
else:
print("This demo cannot run on the current platform: {}".format(host_name))
exit(-1)
rknn_lite = RKNNLite()
# Load RKNN model
print('--> Load RKNN model')
ret = rknn_lite.load_rknn(rknn_model)
if ret != 0:
print('Load RKNN model failed')
exit(ret)
print('done')
# 读取并预处理图像 - 这是关键修改部分
ori_img = cv2.imread(image_path)
img = cv2.cvtColor(ori_img, cv2.COLOR_BGR2RGB)
# 调整尺寸
img = cv2.resize(img, target_size)
img = np.expand_dims(img, 0) # 添加batch维度
# Init runtime environment
print('--> Init runtime environment')
if host_name in ['RK3576', 'RK3588']:
ret = rknn_lite.init_runtime(core_mask=RKNNLite.NPU_CORE_0)
else:
ret = rknn_lite.init_runtime()
if ret != 0:
print('Init runtime environment failed')
exit(ret)
print('done')
print("host_name:", host_name)
print("RKNNLite.NPU_CORE_0:", RKNNLite.NPU_CORE_0)
# Inference
print('--> Running model')
outputs = rknn_lite.inference(inputs=[img])
print("outputs:", outputs)
print('Inference completed')
# Show the classification results
show_top5(outputs)
rknn_lite.release()