Closed shenhuxi-pen closed 1 month ago
跑真实场景的数据你配置随机数做校正集啊
import os
import argparse
import numpy as np
from PIL import Image
import onnxsim
import onnx
import nncase
def parse_model_input_output(model_file):
onnx_model = onnx.load(model_file)
input_all = [node.name for node in onnx_model.graph.input]
input_initializer = [node.name for node in onnx_model.graph.initializer]
input_names = list(set(input_all) - set(input_initializer))
input_tensors = [
node for node in onnx_model.graph.input if node.name in input_names]
# input
inputs = []
for _, e in enumerate(input_tensors):
onnx_type = e.type.tensor_type
input_dict = {}
input_dict['name'] = e.name
input_dict['dtype'] = onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[onnx_type.elem_type]
input_dict['shape'] = [(i.dim_value if i.dim_value != 0 else d) for i, d in zip(
onnx_type.shape.dim, [1, 3, 640, 640])]
inputs.append(input_dict)
return onnx_model, inputs
def onnx_simplify(model_file, dump_dir):
onnx_model, inputs = parse_model_input_output(model_file)
onnx_model = onnx.shape_inference.infer_shapes(onnx_model)
input_shapes = {}
for input in inputs:
input_shapes[input['name']] = input['shape']
onnx_model, check = onnxsim.simplify(onnx_model, input_shapes=input_shapes)
assert check, "Simplified ONNX model could not be validated"
model_file = os.path.join(dump_dir, 'simplified.onnx')
onnx.save_model(onnx_model, model_file)
return model_file
def read_model_file(model_file):
with open(model_file, 'rb') as f:
model_content = f.read()
return model_content
def generate_data_ramdom(shape, batch):
data = []
for i in range(batch):
data.append([np.random.randint(0, 256, shape).astype(np.uint8)])
return data
def generate_data(shape, batch, calib_dir):
img_paths = [os.path.join(calib_dir, p) for p in os.listdir(calib_dir)]
data = []
for i in range(batch):
assert i < len(img_paths), "calibration images not enough."
img_data = Image.open(img_paths[i]).convert('RGB')
img_data = img_data.resize((shape[3], shape[2]), Image.BILINEAR)
img_data = np.asarray(img_data, dtype=np.uint8)
img_data = np.transpose(img_data, (2, 0, 1))
data.append([img_data[np.newaxis, ...]])
return data
def main():
parser = argparse.ArgumentParser(prog="nncase")
parser.add_argument("--target", default="k230",type=str, help='target to run')
parser.add_argument("--model",type=str, help='model file')
parser.add_argument("--dataset", type=str, help='calibration_dataset')
args = parser.parse_args()
input_shape = [1, 3, 640, 640]
dump_dir = 'tmp/object_det'
if not os.path.exists(dump_dir):
os.makedirs(dump_dir)
# onnx simplify
model_file = onnx_simplify(args.model, dump_dir)
# compile_options
compile_options = nncase.CompileOptions()
compile_options.target = args.target
compile_options.preprocess = True
compile_options.swapRB = False
compile_options.input_shape = input_shape
compile_options.input_type = 'uint8'
compile_options.input_range = [0, 1]
compile_options.mean = [
0,
0,
0
]
compile_options.std = [
1,
1,
1
]
compile_options.input_layout = 'NCHW'
#compile_options.output_layout = 'NHWC'
compile_options.dump_ir = True
compile_options.dump_asm = True
compile_options.dump_dir = dump_dir
compile_options.quant_type = 'uint8'
# compiler
compiler = nncase.Compiler(compile_options)
# import
model_content = read_model_file(model_file)
import_options = nncase.ImportOptions()
compiler.import_onnx(model_content, import_options)
# ptq_options
ptq_options = nncase.PTQTensorOptions()
ptq_options.samples_count = 10
ptq_options.calibrate_method = 'NoClip'
ptq_options.quant_type = 'int16'
#ptq_options.set_tensor_data(generate_data_ramdom(input_shape, ptq_options.samples_count))
ptq_options.set_tensor_data(generate_data(input_shape, ptq_options.samples_count, args.dataset))
compiler.use_ptq(ptq_options)
# compile
compiler.compile()
# kmodel
kmodel = compiler.gencode_tobytes()
with open(os.path.join('./onnx/yolov8n_640.kmodel'), 'wb') as f:
f.write(kmodel)
if __name__ == '__main__':
main()
参考一下这个
这个代码可以不?? import os import argparse import numpy as np from PIL import Image import onnxsim import onnx import nncase
def parse_model_input_output(model_file): onnx_model = onnx.load(model_file) input_all = [node.name for node in onnx_model.graph.input] input_initializer = [node.name for node in onnx_model.graph.initializer] input_names = list(set(input_all) - set(input_initializer)) input_tensors = [ node for node in onnx_model.graph.input if node.name in input_names]
# input
inputs = []
for _, e in enumerate(input_tensors):
onnx_type = e.type.tensor_type
input_dict = {}
input_dict['name'] = e.name
input_dict['dtype'] = onnx.helper.tensor_dtype_to_np_dtype(onnx_type.elem_type)
input_dict['shape'] = [(i.dim_value if i.dim_value != 0 else d) for i, d in zip(
onnx_type.shape.dim, [1, 3, 640, 640])]
inputs.append(input_dict)
return onnx_model, inputs
def model_simplify(model_file): """ simplify model """ if model_file.split('.')[-1] == "onnx": onnx_model, inputs = parse_model_input_output(model_file) onnx_model = onnx.shape_inference.infer_shapes(onnx_model) input_shapes = {} for input in inputs: input_shapes[input['name']] = input['shape']
onnx_model, check = onnxsim.simplify(onnx_model, test_input_shapes=input_shapes)
assert check, "Simplified ONNX model could not be validated"
model_file = os.path.join(os.path.dirname(model_file), 'simplified.onnx')
onnx.save_model(onnx_model, model_file)
print("[ onnx done ]")
elif model_file.split('.')[-1] == "tflite":
print("[ tflite skip ]")
else:
raise Exception(f"Unsupport type {model_file.split('.')[-1]}")
return model_file
def read_model_file(model_file): with open(model_file, 'rb') as f: model_content = f.read() return model_content
def generate_data_ramdom(shape, batch): data = [] for i in range(batch): data.append([np.random.randint(0, 256, shape).astype(np.uint8)]) return data
def generate_data(shape, batch, calib_dir): img_paths = [os.path.join(calib_dir, p) for p in os.listdir(calib_dir)] data = [] for i in range(batch): assert i < len(img_paths), "calibration images not enough." img_data = Image.open(img_paths[i]).convert('RGB') img_data = img_data.resize((shape[3], shape[2]), Image.BILINEAR) img_data = np.asarray(img_data, dtype=np.uint8) img_data = np.transpose(img_data, (2, 0, 1)) data.append([img_data[np.newaxis, ...]]) return np.array(data)
def main(): parser = argparse.ArgumentParser(prog="nncase") parser.add_argument("--target",default="k230",type=str, help='target to run') parser.add_argument("--model",type=str, help='model file') parser.add_argument("--dataset", type=str, help='calibration_dataset')
args = parser.parse_args()
#临时文件目录
dump_dir = 'tmp/mobile_retinaface'
if not os.path.exists(dump_dir):
os.makedirs(dump_dir)
############################设置参数#########################
# 1. 设置编译参数,compile_options
compile_options = nncase.CompileOptions()
# 指定编译目标, 如'cpu', 'k230',cpu生成cpu上推理的kmodel,k230生成在k230(kpu)上推理的kmodel
compile_options.target = "k230"
# 预处理
compile_options.preprocess = True
# (1)预处理---Transpose相关参数
# 输入数据的shape,默认为[]。当 preprocess为 True时,必须指定
input_shape = [1, 3, 640, 640]
compile_options.input_shape = input_shape
# 输入数据的layout,默认为""
compile_options.input_layout = "NCHW"
#compile_options.input_layout = "0,1,2,3"
# (2)预处理---SwapRB相关参数
compile_options.swapRB = False
# (3)预处理---Dequantize(反量化)相关参数
# 开启预处理时指定输入数据类型,默认为"float";当 preprocess为 True时,必须指定为"uint8"或者"float32"
compile_options.input_type = 'uint8'
# input_type=‘uint8’时反量化有效,反量化之后的数据范围
compile_options.input_range = [0, 1]
# (4)预处理---Normalization相关参数
compile_options.mean = [ 0,0,0]
compile_options.std = [1, 1, 1]
# 后处理
compile_options.output_layout = "NCHW"
#Compiler类, 根据编译参数配置Compiler,用于编译神经网络模型
compiler = nncase.Compiler(compile_options)
# 2. 设置导入参数,import_options(一般默认即可)
import_options = nncase.ImportOptions()
model_path = "C:/Users/Administrator/Desktop/ultralytics-main/tmp1/best.onnx"
model_file = model_simplify(model_path)
model_content = read_model_file(model_file)
compiler.import_onnx(model_content, import_options)
# 3. 设置量化参数,ptq_options
ptq_options = nncase.PTQTensorOptions()
ptq_options.samples_count = 100
calib_dir2 = "C:/Users/Administrator/Desktop/ultralytics-main/datasets/bvn2/images/calibdata"
ptq_options.set_tensor_data(generate_data(input_shape, ptq_options.samples_count, calib_dir2))
compiler.use_ptq(ptq_options)
############################设置参数#########################
# 4.编译神经网络模型(kmodel)
compiler.compile()
# 5.保存kmodel
kmodel = compiler.gencode_tobytes()
dump_path = "C:/Users/Administrator/Desktop/ultralytics-main/tmp1"
kmodel_path = "C:/Users/Administrator/Desktop/ultralytics-main/tmp1/test.kmodel"
kmodel_path = os.path.join(dump_path, "test.kmodel")
with open(kmodel_path, 'wb') as f:
f.write(kmodel)
print("----------------end-----------------")
return kmodel_path
if name == 'main': main()
我试了还是什么都识别不到
@shenhuxi-pen 目前整个流程没问题了是吧
用yolov8n训练的模型,转换成best.onnx后,再用附件代码转成test.kmodel.下载到01 Sutio K230板上运行,识别不到任何东西,问题帮忙看看是出在那个环节? best.onnx模型: best.zip test.kmode模型: test.zip
onnx在PC上推理:
01k230镜像:CanMV-K230_micropython_v1.1-0-g5a6fc54_nncase_v2.9.0.img
PC端环境:nncase==2.9.0 /nncase_kpu==2.9.0
转换代码(onnx-->kmodel): 转换代码.txt
01 Sudio k230 运行代码: 运行代码.txt