From 3d37af871f6206d96a4d870d07f5f02d472c41cf Mon Sep 17 00:00:00 2001 From: lxs01517514 Date: Wed, 25 Oct 2023 10:54:28 +0800 Subject: [PATCH] update --- configs/config_templates/yolox_itag.py | 19 +++++----------- easycv/apis/export.py | 31 +++++++++++++++----------- easycv/predictors/detector.py | 16 +++++++++---- requirements/runtime.txt | 2 +- 4 files changed, 36 insertions(+), 32 deletions(-) diff --git a/configs/config_templates/yolox_itag.py b/configs/config_templates/yolox_itag.py index 6e3396df..b190edfb 100644 --- a/configs/config_templates/yolox_itag.py +++ b/configs/config_templates/yolox_itag.py @@ -91,31 +91,22 @@ train_path = 'data/coco/train2017.manifest' val_path = 'data/coco/val2017.manifest' -train_dataset=dict( +train_dataset = dict( type='DetImagesMixDataset', - data_source=dict( - type='DetSourcePAI', - path=train_path, - classes=CLASSES), + data_source=dict(type='DetSourcePAI', path=train_path, classes=CLASSES), pipeline=train_pipeline, dynamic_scale=tuple(img_scale)) -val_dataset=dict( +val_dataset = dict( type='DetImagesMixDataset', imgs_per_gpu=2, - data_source=dict( - type='DetSourcePAI', - path=val_path, - classes=CLASSES), + data_source=dict(type='DetSourcePAI', path=val_path, classes=CLASSES), pipeline=test_pipeline, dynamic_scale=None, label_padding=False) data = dict( - imgs_per_gpu=16, - workers_per_gpu=4, - train=train_dataset, - val=val_dataset) + imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset) # additional hooks interval = 10 diff --git a/easycv/apis/export.py b/easycv/apis/export.py index 24214067..11c07fc6 100644 --- a/easycv/apis/export.py +++ b/easycv/apis/export.py @@ -354,10 +354,12 @@ def _export_yolox(model, cfg, filename): classes=cfg.CLASSES) json.dump(config, ofile) - + if export_type == 'onnx': - - with io.open(filename+'.config.json' if filename.endswith('onnx') else filename + '.onnx.config.json', 'w') as ofile: + + with io.open( + filename + '.config.json' if filename.endswith('onnx') + else filename + '.onnx.config.json', 'w') as ofile: config = dict( model=cfg.model, export=cfg.export, @@ -365,16 +367,19 @@ def _export_yolox(model, cfg, filename): classes=cfg.CLASSES) json.dump(config, ofile) - - torch.onnx.export(model, # 模型的名称 - input.to(device), # 一组实例化输入 - filename if filename.endswith('onnx') else filename + '.onnx', # 文件保存路径/名称 - export_params=True, # 如果指定为True或默认, 参数也会被导出. 如果你要导出一个没训练过的就设为 False. - opset_version=12, # ONNX 算子集的版本,当前已更新到15 - do_constant_folding=True, # 是否执行常量折叠优化 - input_names = ['input'], # 输入模型的张量的名称 - output_names = ['output'], # 输出模型的张量的名称 - ) + + torch.onnx.export( + model, # 模型的名称 + input.to(device), # 一组实例化输入 + filename if filename.endswith('onnx') else filename + + '.onnx', # 文件保存路径/名称 + export_params= + True, # 如果指定为True或默认, 参数也会被导出. 如果你要导出一个没训练过的就设为 False. + opset_version=12, # ONNX 算子集的版本,当前已更新到15 + do_constant_folding=True, # 是否执行常量折叠优化 + input_names=['input'], # 输入模型的张量的名称 + output_names=['output'], # 输出模型的张量的名称 + ) if export_type == 'jit': with io.open(filename + '.jit', 'wb') as ofile: diff --git a/easycv/predictors/detector.py b/easycv/predictors/detector.py index 4bec1a43..3d2448e3 100644 --- a/easycv/predictors/detector.py +++ b/easycv/predictors/detector.py @@ -4,7 +4,8 @@ from glob import glob import numpy as np -import torch, onnxruntime +import onnxruntime +import torch from easycv.core.visualization import imshow_bboxes from easycv.datasets.utils import replace_ImageToTensor @@ -22,9 +23,11 @@ except Exception: from .interface import PredictorInterface + # 将张量转化为ndarray格式 def onnx_to_numpy(tensor): - return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy() + return tensor.detach().cpu().numpy( + ) if tensor.requires_grad else tensor.cpu().numpy() class DetInputProcessor(InputProcessor): @@ -392,7 +395,8 @@ def _build_model(self): model = torch.jit.load(infile, self.device) else: if onnxruntime.get_device() == 'GPU': - model = onnxruntime.InferenceSession(self.model_path, providers=['CUDAExecutionProvider']) + model = onnxruntime.InferenceSession( + self.model_path, providers=['CUDAExecutionProvider']) else: model = onnxruntime.InferenceSession(self.model_path) else: @@ -422,7 +426,11 @@ def model_forward(self, inputs): if self.model_type != 'onnx': outputs = self.model(inputs['img']) else: - outputs = self.model.run(None, {self.model.get_inputs()[0].name : onnx_to_numpy(inputs['img'])})[0] + outputs = self.model.run( + None, { + self.model.get_inputs()[0].name: + onnx_to_numpy(inputs['img']) + })[0] outputs = torch.from_numpy(outputs) outputs = {'results': outputs} # convert to dict format else: diff --git a/requirements/runtime.txt b/requirements/runtime.txt index a31309e7..a72dda51 100644 --- a/requirements/runtime.txt +++ b/requirements/runtime.txt @@ -13,6 +13,7 @@ lmdb numba numpy nuscenes-devkit +onnxruntime-gpu opencv-python oss2 packaging @@ -33,4 +34,3 @@ transformers wget xtcocotools yacs -onnxruntime-gpu \ No newline at end of file