diff --git a/.gitignore b/.gitignore index eeed237..b7dc40d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,36 +1,36 @@ -# Xmake Cache +# Build Cache .xmake/ build/ +# build libs +lib/ +tensorrt_yolo/libs/ + # VSCode Cache .vscode # Python Cache __pycache__/ - - -# output Cache -output/ - -# demo images -demo/**/*.jpg -demo/**/*.jpeg -demo/**/*.png -demo/**/*.gif -demo/**/*.bmp -demo/**/*.tiff -demo/**/*.webp - -# demo models -demo/**/*.pt -demo/**/*.onnx -demo/**/*.engine +venv/ # python package dist/ tensorrt_yolo.egg-info/ tensorrt_yolo/c_lib_wrap.py -# build libs -lib/ -tensorrt_yolo/libs/ +# output Cache +output/ + +# example images +examples/**/*.jpg +examples/**/*.jpeg +examples/**/*.png +examples/**/*.gif +examples/**/*.bmp +examples/**/*.tiff +examples/**/*.webp + +# example models +examples/**/*.pt +examples/**/*.onnx +examples/**/*.engine diff --git a/CMakeLists.txt b/CMakeLists.txt index 82e5d0e..7e03d05 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 3.15.0) # 设置CMake的最低版本要求 cmake_policy(SET CMP0091 NEW) # 允许在CMake 3.10+中自动设置项目名作为二进制目录名 cmake_policy(SET CMP0146 OLD) # 忽略对find_package的过时警告 -project(TensorRT-YOLO VERSION 4.3.0 LANGUAGES CXX CUDA) # 定义项目名称、版本和使用的编程语言(C++和CUDA) +project(TensorRT-YOLO VERSION 5.0.0 LANGUAGES CXX CUDA) # 定义项目名称、版本和使用的编程语言(C++和CUDA) # 设置 C++ 标准 set(CMAKE_CXX_STANDARD 17) # 设置C++标准为17 diff --git a/README.en.md b/README.en.md index 778a934..d839c63 100644 --- a/README.en.md +++ b/README.en.md @@ -17,62 +17,320 @@ English | [简体中文](README.md) GitHub forks

-TensorRT-YOLO is an inference acceleration project that supports YOLOv3, YOLOv5, YOLOv6, YOLOv7, YOLOv8, YOLOv9, YOLOv10, YOLO11, PP-YOLOE and PP-YOLOE+ using NVIDIA TensorRT for optimization. The project not only integrates the TensorRT plugin to enhance post-processing effects but also utilizes CUDA kernel functions and CUDA graphs to accelerate inference. TensorRT-YOLO provides support for both C++ and Python inference, aiming to deliver a fast and optimized object detection solution. +🚀TensorRT-YOLO is a **user-friendly** and **extremely efficient** inference deployment tool for the **YOLO series**, specifically designed for NVIDIA devices. This project not only integrates TensorRT plugins to enhance post-processing effects but also utilizes CUDA kernels and CUDA graphs to accelerate inference. TensorRT-YOLO provides support for both C++ and Python inference, aiming to offer a **plug-and-play** deployment experience. It includes task scenarios such as [object detection](examples/detect/), [instance segmentation](examples/segment/), [pose recognition](examples/pose/), [oriented bounding box detection](examples/obb/), and [video analysis](examples/VideoPipe), meeting developers' **multi-scenario** deployment needs.
- +
- - + + + + + + + + + +
+ +
Detect
+
+ +
Segment
+
+ +
Pose
+
+ +
OBB
+
##
✨ Key Features
-- Support for YOLOv3, YOLOv5, YOLOv6, YOLOv7, YOLOv8, YOLOv9, YOLOv10, YOLO11, PP-YOLOE and PP-YOLOE+ -- Support Detection, OBB Detection and Segmentation models -- Support for ONNX static and dynamic export, as well as TensorRT inference -- Integration of TensorRT plugin for accelerated post-processing -- Utilization of CUDA kernel functions for accelerated preprocessing -- Utilization of CUDA graphs for accelerated inference process -- Support for inference in both C++ and Python -- Command-line interface for quick export and inference -- One-click Docker deployment +- **Diverse YOLO Support**: Fully compatible with YOLOv3 to YOLOv11, as well as PP-YOLOE and PP-YOLOE+, meeting the needs of different versions. +- **Multi-scenario Applications**: Provides example code for diverse scenarios such as [Detect](examples/detect/), [Segment](examples/segment/), [Pose](examples/pose/), and [OBB](examples/obb/). +- **Model Optimization and Inference Acceleration**: + - **ONNX Support**: Supports static and dynamic export of ONNX models, including TensorRT custom plugin support, simplifying the model deployment process. + - **TensorRT Integration**: Integrated TensorRT plugins, including custom plugins, accelerate post-processing for Detect, Segment, Pose, OBB, and other scenarios, enhancing inference efficiency. + - **CUDA Acceleration**: Optimizes pre-processing with CUDA kernels and accelerates inference processes with CUDA graph technology, achieving high-performance computing. +- **Language Support**: Supports C++ and Python (mapped through Pybind11, enhancing Python inference speed), meeting the needs of different programming languages. +- **Deployment Convenience**: + - **Dynamic Library Compilation**: Provides support for dynamic library compilation, facilitating calling and deployment. + - **No Third-Party Dependencies**: All features are implemented using standard libraries, with no additional dependencies, simplifying the deployment process. +- **Rapid Development and Deployment**: + - **CLI Tools**: Provides a command-line interface (CLI) tool for quick model export and inference. + - **Cross-Platform Support**: Supports various devices such as Windows, Linux, ARM, x86, adapting to different hardware environments. + - **Docker Deployment**: Supports one-click deployment with Docker, simplifying environment configuration and deployment processes. +- **TensorRT Compatibility**: Compatible with TensorRT 10.x versions, ensuring compatibility with the latest technologies. -##
🛠️ Requirements
+##
🔮 Documentation and Tutorials
+ +- **Installation Guide** + - [📦 Quick Compilation and Installation](docs/en/build_and_install.md) +- **Quick Start** + - [✴️ Python SDK Quick Start](#quick-start-python) + - [✴️ C++ SDK Quick Start](#quick-start-cpp) +- **Usage Examples** + - [Object Detection Example](examples/detect/README.md) + - [Instance Segmentation Example](examples/segment/README.md) + - [Pose Recognition Example](examples/pose/README.md) + - [Oriented Bounding Box Detection Example](examples/obb/README.md) + - [📹 Video Analysis Example](examples/VideoPipe/README.md) +- **API Documentation** + - Python API Documentation (⚠️ Not Implemented) + - C++ API Documentation (⚠️ Not Implemented) +- **Frequently Asked Questions** + - ⚠️ Collecting... +- **Model Support List** + - [🖥️ Model Support List](#support-models) + +##
💨 Quick Start
+ +### 🔸 Prerequisites - Recommended CUDA version >= 11.6 -- Recommended TensorRT version >= 8.6 +- Recommended TensorRT version >= 8.6.1 (Minimum TensorRT version 8.6.1) +- OS: Linux x86_64 (Recommended) arm / Windows / + +### 🎆 Quick Installation + +- Refer to the [📦 Quick Compilation and Installation](docs/en/build_and_install.md) documentation + +### Python SDK Quick Start
+ +> [!IMPORTANT] +> Before inference, please refer to the [🔧 CLI Model Export](/docs/en/model_export.md) documentation to export the ONNX model suitable for this project's inference and build it into a TensorRT engine. + +#### Python CLI Inference Example + +> [!NOTE] +> Using the `--cudaGraph` option can significantly improve inference speed, but note that this feature is only available for static models. +> +> The `-m, --mode` parameter can be used to select different model types, where `0` represents detection (Detect), `1` represents oriented bounding box (OBB), `2` represents segmentation (Segment), and `3` represents pose estimation (Pose). + +1. Use the `tensorrt_yolo` library's `trtyolo` command-line tool for inference. Run the following command to view help information: + + ```bash + trtyolo infer --help + ``` + +2. Run the following command for inference: + + ```bash + trtyolo infer -e models/yolo11n.engine -m 0 -i images -o output -l labels.txt --cudaGraph + ``` + + Inference results will be saved to the `output` folder, and visualized results will be generated. -##
📦 Usage Guide
+#### Python Inference Example -- [Quick Compile and Install](docs/en/build_and_install.md) +> [!NOTE] +> `DeployDet`, `DeployOBB`, `DeploySeg`, and `DeployPose` correspond to detection (Detect), oriented bounding box (OBB), segmentation (Segment), and pose estimation (Pose) models, respectively. +> +> For these models, the `CG` version utilizes CUDA Graph to further accelerate the inference process, but please note that this feature is limited to static models. -- [Export Models using CLI](docs/en/model_export.md) +```python +import cv2 +from tensorrt_yolo.infer import DeployCGDet, DeployDet, generate_labels_with_colors, visualize -- [Model Inference Examples](demo/detect/README.en.md) +use_cudaGraph = True +engine_path = "yolo11n-with-plugin.engine" +model = DeployCGDet(engine_path) if use_cudaGraph else DeployDet(engine_path) -- [Video Analysis Example](demo/VideoPipe/README.en.md) +im = cv2.imread("test_image.jpg") +result = model.predict(cv2.cvtColor(im, cv2.COLOR_BGR2RGB)) # The model accepts images in RGB format +print(f"==> detect result: {result}") -##
📺 BiliBili
+# Visualization +labels = generate_labels_with_colors("labels.txt") +vis_im = visualize(image, result, labels) +cv2.imwrite("vis_image.jpg", vis_im) -- [【TensorRT-YOLO】你的YOLO快速部署工具](https://www.bilibili.com/video/BV12T421r7ZH) +``` -- [【TensorRT-YOLO】TensorRT 自定义插件加速 YOLO OBB 部署演示](https://www.bilibili.com/video/BV1NYYze8EST) +### C++ SDK Quick Start
-- [【TensorRT-YOLO】接入 VideoPipe 演示](https://www.bilibili.com/video/BV121421C755) +> [!IMPORTANT] +> Before inference, please refer to the [🔧 CLI Model Export](/docs/en/model_export.md) documentation to export the ONNX model suitable for this project's inference and build it into a TensorRT engine. -- [【TensorRT-YOLO】CUDA Graphs 加速推理](https://www.bilibili.com/video/BV1RZ421M7JV) +> [!NOTE] +> `DeployDet`, `DeployOBB`, `DeploySeg`, and `DeployPose` correspond to detection (Detect), oriented bounding box (OBB), segmentation (Segment), and pose estimation (Pose) models, respectively. +> +> For these models, the `CG` version utilizes CUDA Graph to further accelerate the inference process, but please note that this feature is limited to static models. -- [【TensorRT-YOLO】3.0 Docker 部署演示](https://www.bilibili.com/video/BV1Jr42137EP) + +```cpp +#include +// For ease of use, the module uses standard libraries in addition to CUDA and TensorRT +#include "deploy/vision/inference.hpp" +#include "deploy/vision/result.hpp" + +int main(int argc, char* argv[]) { + bool useCudaGraph = true; + deploy::DeployBase model; + if (useCudaGraph) { + model = deploy::DeployCGDet("yolo11n-with-plugin.engine"); + } else { + model = deploy::DeployDet("yolo11n-with-plugin.engine"); + } + auto cvim = cv::imread("test_image.jpg"); + + cv::cvtColor(cvim, cvim, cv::COLOR_BGR2RGB); + deploy::Image im(cvim.data, cvim.cols, cvim.rows); // The model accepts images in RGB format + deploy::DetResult result = model.predict(im); + + // Visualization + // ... + + return 0; +} +``` + +For more deployment examples, please refer to the [Model Deployment Examples](examples) section. + +##
🖥️ Model Support List
+ +
+ + + + + + + + + +
+ +
Detect
+
+ +
Segment
+
+ +
Pose
+
+ +
OBB
+
+
+ +Symbol legend: (1) ✅ : Supported; (2) ❔: In progress; (3) ❎ : Not supported; (4) ❎ : Self-implemented export required for inference.
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Task ScenarioModelCLI ExportInference Deployment
Detectultralytics/yolov3
Detectultralytics/yolov5
Detectmeituan/YOLOv6❎ Refer to official export tutorial
DetectWongKinYiu/yolov7❎ Refer to official export tutorial
DetectWongKinYiu/yolov9❎ Refer to official export tutorial
DetectTHU-MIG/yolov10
Detectultralytics/ultralytics
DetectPaddleDetection/PP-YOLOE+
OBBultralytics/ultralytics
Poseultralytics/ultralytics
Segmentultralytics/yolov3
Segmentultralytics/yolov5
Segmentmeituan/YOLOv6-seg❎ Implement yourself referring to tensorrt_yolo/export/head.py🟢
SegmentWongKinYiu/yolov7❎ Implement yourself referring to tensorrt_yolo/export/head.py🟢
SegmentWongKinYiu/yolov9❎ Implement yourself referring to tensorrt_yolo/export/head.py🟢
Segmentultralytics/ultralytics
+
##
☕ Buy the Author a Coffee
Open source projects require effort. If this project has been helpful to you, consider buying the author a coffee. Your support is the greatest motivation for the developer to keep maintaining the project! -> It's recommended to use Alipay, as WeChat doesn't provide the avatar. Please note "TensorRT-YOLO" in the transfer. -

diff --git a/README.md b/README.md index f5d1e05..14608c2 100644 --- a/README.md +++ b/README.md @@ -17,62 +17,319 @@ GitHub forks

-TensorRT-YOLO 是一个支持 YOLOv3、YOLOv5、YOLOv6、YOLOv7、YOLOv8、YOLOv9、YOLOv10、YOLO11、PP-YOLOE 和 PP-YOLOE+ 的推理加速项目,使用 NVIDIA TensorRT 进行优化。项目不仅集成了 TensorRT 插件以增强后处理效果,还使用了 CUDA 核函数以及 CUDA 图来加速推理。TensorRT-YOLO 提供了 C++ 和 Python 推理的支持,旨在提供快速而优化的目标检测解决方案。 +🚀TensorRT-YOLO 是一款专为 NVIDIA 设备设计的**易用灵活**、**极致高效**的**YOLO系列**推理部署工具。项目不仅集成了 TensorRT 插件以增强后处理效果,还使用了 CUDA 核函数以及 CUDA 图来加速推理。TensorRT-YOLO 提供了 C++ 和 Python 推理的支持,旨在提供📦**开箱即用**的部署体验。包括 [目标检测](examples/detect/)、[实例分割](examples/segment/)、[姿态识别](examples/pose/)、[旋转目标检测](examples/obb/)、[视频分析](examples/VideoPipe)等任务场景,满足开发者**多场景**的部署需求。 +
- +
- - + + + + + + + + + +
+ +
Detect
+
+ +
Segment
+
+ +
Pose
+
+ +
OBB
+
##
✨ 主要特性
-- 支持 YOLOv3、YOLOv5、YOLOv6、YOLOv7、YOLOv8、YOLOv9、YOLOv10、YOLO11、PP-YOLOE 和 PP-YOLOE+ -- 支持 Detection、OBB Detection 与 Segmentation 模型 -- 支持 ONNX 静态、动态导出以及 TensorRT 推理 -- 集成 TensorRT 插件加速后处理 -- 利用 CUDA 核函数加速前处理 -- 利用 CUDA 图加速推理流程 -- 支持 C++ 和 Python 推理 -- CLI 快速导出与推理 -- Docker 一键部署 +- **多样化的YOLO支持**:全面兼容YOLOv3至YOLOv11以及PP-YOLOE和PP-YOLOE+,满足不同版本需求。 +- **多场景应用**:提供[Detect](examples/detect/)、[Segment](examples/segment/)、[Pose](examples/pose/)、[OBB](examples/obb/)等多样化场景的示例代码。 +- **模型优化与推理加速**: + - **ONNX支持**:支持ONNX模型的静态和动态导出,包括TensorRT自定义插件支持,简化模型部署流程。 + - **TensorRT集成**:集成TensorRT插件,包括自定义插件,加速Detect, Segment, Pose, OBB等场景的后处理,提升推理效率。 + - **CUDA加速**:利用CUDA核函数优化前处理,CUDA图技术加速推理流程,实现高性能计算。 +- **语言支持**:支持C++和Python(通过Pybind11映射,提升Python推理速度),满足不同编程语言需求。 +- **部署便捷性**: + - **动态库编译**:提供动态库编译支持,方便调用和部署。 + - **无第三方依赖**:全部功能使用标准库实现,无需额外依赖,简化部署流程。 +- **快速开发与部署**: + - **CLI工具**:提供命令行界面(CLI)工具,实现快速模型导出和推理。 + - **跨平台支持**:支持Windows、Linux、ARM、x86等多种设备,适应不同硬件环境。 + - **Docker部署**:支持Docker一键部署,简化环境配置和部署流程。 +- **TensorRT兼容性**:兼容TensorRT 10.x版本,确保与最新技术兼容。 + +##
🔮 文档教程
+ + +- **安装文档** + - [📦 快速编译安装](docs/cn/build_and_install.md) +- **快速开始** + - [✴️ Python SDK快速使用](#quick-start-python) + - [✴️ C++ SDK快速使用](#quick-start-cpp) +- **使用示例** + - [目标检测 示例](examples/detect/README.md) + - [实例分割 示例](examples/segment/README.md) + - [姿态识别 示例](examples/pose/README.md) + - [旋转目标检测 示例](examples/obb/README.md) + - [📹视频分析 示例](examples/VideoPipe/README.md) +- **API文档** + - Python API文档(⚠️ 未实现) + - C++ API文档(⚠️ 未实现) +- **常见问题** + - ⚠️ 收集中 ... +- **模型支持列表** + - [🖥️ 模型支持列表](#support-models) -##
🛠️ 环境要求
+##
💨 快速开始
+ +### 🔸 前置依赖 - 推荐 CUDA 版本 >= 11.6 -- 推荐 TensorRT 版本 >= 8.6 +- 推荐 TensorRT 版本 >= 8.6.1 (TensorRT 最低版本 8.6.1) +- OS: Linux x86_64 (推荐) arm / Windows / + +### 🎆 快速安装 + +- 参考[📦 快速编译安装](docs/cn/build_and_install.md)文档 + +### Python SDK快速开始
+ +#### Python CLI 推理示例 + +> [!NOTE] +> 使用 `--cudaGraph` 选项可以显著提升推理速度,但需知此功能仅适用于静态模型。 +> +> 通过 `-m, --mode` 参数可以选择不同的模型类型,其中 `0` 代表检测(Detect)、`1` 代表旋转边界框(OBB)、`2` 代表分割(Segment)、`3` 代表姿态估计(Pose)。 + +1. 使用 `tensorrt_yolo` 库的 `trtyolo` 命令行工具进行推理。运行以下命令查看帮助信息: + + ```bash + trtyolo infer --help + ``` + +2. 运行以下命令进行推理: + + ```bash + trtyolo infer -e models/yolo11n.engine -m 0 -i images -o output -l labels.txt --cudaGraph + ``` + + 推理结果将保存至 `output` 文件夹,并生成可视化结果。 -##
📦 使用教程
+#### Python 推理示例 -- [快速编译安装](docs/cn/build_and_install.md) +> [!NOTE] +> `DeployDet`、`DeployOBB`、`DeploySeg` 和 `DeployPose` 分别对应于检测(Detect)、方向边界框(OBB)、分割(Segment)和姿态估计(Pose)模型。 +> +> 对于这些模型,`CG` 版本利用 CUDA Graph 来进一步加速推理过程,但请注意,这一功能仅限于静态模型。 -- [使用 CLI 模型导出](docs/cn/model_export.md) +```python +import cv2 +from tensorrt_yolo.infer import DeployCGDet, DeployDet, generate_labels_with_colors, visualize -- [模型推理示例](demo/detect/README.md) +use_cudaGraph = True +engine_path = "yolo11n-with-plugin.engine" +model = DeployCGDet(engine_path) if use_cudaGraph else DeployDet(engine_path) -- [视频分析示例](demo/VideoPipe/README.md) +im = cv2.imread("test_image.jpg") +result = model.predict(cv2.cvtColor(im, cv2.COLOR_BGR2RGB)) # model 接收的图片必须是RGB格式 +print(f"==> detect result: {result}") -##
📺 BiliBili
+# 可视化 +labels = generate_labels_with_colors("labels.txt") +vis_im = visualize(image, result, labels) +cv2.imwrite("vis_image.jpg", vis_im) -- [【TensorRT-YOLO】你的YOLO快速部署工具](https://www.bilibili.com/video/BV12T421r7ZH) +``` -- [【TensorRT-YOLO】TensorRT 自定义插件加速 YOLO OBB 部署演示](https://www.bilibili.com/video/BV1NYYze8EST) +### C++ SDK快速开始
-- [【TensorRT-YOLO】接入 VideoPipe 演示](https://www.bilibili.com/video/BV121421C755) +> [!IMPORTANT] +> 在进行推理之前,请参考[🔧 CLI 导出模型](/docs/cn/model_export.md)文档,导出适用于该项目推理的ONNX模型并构建为TensorRT引擎。 -- [【TensorRT-YOLO】CUDA Graphs 加速推理](https://www.bilibili.com/video/BV1RZ421M7JV) +> [!NOTE] +> `DeployDet`、`DeployOBB`、`DeploySeg` 和 `DeployPose` 分别对应于检测(Detect)、方向边界框(OBB)、分割(Segment)和姿态估计(Pose)模型。 +> +> 对于这些模型,`CG` 版本利用 CUDA Graph 来进一步加速推理过程,但请注意,这一功能仅限于静态模型。 -- [【TensorRT-YOLO】3.0 Docker 部署演示](https://www.bilibili.com/video/BV1Jr42137EP) + +```cpp +#include +// 为了方便调用,模块除使用 CUDA、TensorRT 其余均使用标准库实现 +#include "deploy/vision/inference.hpp" +#include "deploy/vision/result.hpp" + +int main(int argc, char* argv[]) { + bool useCudaGraph = true; + deploy::DeployBase model; + if (useCudaGraph) { + model = deploy::DeployCGDet("yolo11n-with-plugin.engine"); + } else { + model = deploy::DeployDet("yolo11n-with-plugin.engine"); + } + auto cvim = cv::imread("test_image.jpg"); + + cv::cvtColor(cvim, cvim, cv::COLOR_BGR2RGB); + deploy::Image im(cvim.data, cvim.cols, cvim.rows); // model 接收的图片必须是RGB格式 + deploy::DetResult result = model.predict(im); + + // 可视化 + // ... + + return 0; +} +``` + +更多部署案例请参考[模型部署示例](examples) . + +##
🖥️ 模型支持列表
+ +
+ + + + + + + + + +
+ +
Detect
+
+ +
Segment
+
+ +
Pose
+
+ +
OBB
+
+
+ +符号说明: (1) ✅ : 已经支持; (2) ❔: 正在进行中; (3) ❎ : 暂不支持; (4) 🟢 : 导出自行实现,即可推理.
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
任务场景模型CLI 导出推理部署
Detectultralytics/yolov3
Detectultralytics/yolov5
Detectmeituan/YOLOv6❎ 参考官方导出教程
DetectWongKinYiu/yolov7❎ 参考官方导出教程
DetectWongKinYiu/yolov9❎ 参考官方导出教程
DetectTHU-MIG/yolov10
Detectultralytics/ultralytics
DetectPaddleDetection/PP-YOLOE+
OBBultralytics/ultralytics
Poseultralytics/ultralytics
Segmentultralytics/yolov3
Segmentultralytics/yolov5
Segmentmeituan/YOLOv6-seg❎ 参考tensorrt_yolo/export/head.py 自行实现🟢
SegmentWongKinYiu/yolov7❎ 参考tensorrt_yolo/export/head.py 自行实现🟢
SegmentWongKinYiu/yolov9❎ 参考tensorrt_yolo/export/head.py 自行实现🟢
Segmentultralytics/ultralytics
+
##
☕ 请作者喝杯咖啡
开源不易,如果本项目有帮助到你的话,可以考虑请作者喝杯咖啡,你的支持是开发者持续维护的最大动力~ -> 推荐使用支付宝,微信获取不到头像。转账请备注【TensorRT-YOLO】。 -

diff --git a/assets/detect.jpg b/assets/detect.jpg new file mode 100644 index 0000000..c2902ca Binary files /dev/null and b/assets/detect.jpg differ diff --git a/assets/example0.jpg b/assets/example0.jpg deleted file mode 100644 index 9df3ad8..0000000 Binary files a/assets/example0.jpg and /dev/null differ diff --git a/assets/example1.jpg b/assets/example1.jpg deleted file mode 100644 index 6c87fce..0000000 Binary files a/assets/example1.jpg and /dev/null differ diff --git a/assets/obb.png b/assets/obb.png new file mode 100644 index 0000000..475cf30 Binary files /dev/null and b/assets/obb.png differ diff --git a/assets/pose.jpg b/assets/pose.jpg new file mode 100644 index 0000000..9a34d7c Binary files /dev/null and b/assets/pose.jpg differ diff --git a/assets/segment.jpg b/assets/segment.jpg new file mode 100644 index 0000000..7ff31bf Binary files /dev/null and b/assets/segment.jpg differ diff --git a/assets/yolo-detect.jpeg b/assets/yolo-detect.jpeg new file mode 100644 index 0000000..31d4057 Binary files /dev/null and b/assets/yolo-detect.jpeg differ diff --git a/assets/yolo-obb.jpeg b/assets/yolo-obb.jpeg new file mode 100644 index 0000000..6e03e66 Binary files /dev/null and b/assets/yolo-obb.jpeg differ diff --git a/assets/yolo-pose.jpeg b/assets/yolo-pose.jpeg new file mode 100644 index 0000000..40d67ce Binary files /dev/null and b/assets/yolo-pose.jpeg differ diff --git a/assets/yolo-segment.jpeg b/assets/yolo-segment.jpeg new file mode 100644 index 0000000..12568e6 Binary files /dev/null and b/assets/yolo-segment.jpeg differ diff --git a/docs/cn/build_and_install.md b/docs/cn/build_and_install.md index 7fc4c86..2810f87 100644 --- a/docs/cn/build_and_install.md +++ b/docs/cn/build_and_install.md @@ -8,8 +8,7 @@ - Linux: gcc/g++ - Windows: MSVC -- CMake -- Xmake +- CMake or Xmake - CUDA - cuDNN - TensorRT @@ -78,7 +77,7 @@ cd TensorRT-YOLO pip install --upgrade build python -m build --wheel # 仅安装推理相关依赖 -pip install dist/tensorrt_yolo-4.*-py3-none-any.whl +pip install dist/tensorrt_yolo-5.*-py3-none-any.whl # 安装模型导出相关依赖以及推理相关依赖 -pip install dist/tensorrt_yolo-4.*-py3-none-any.whl[export] +pip install dist/tensorrt_yolo-5.*-py3-none-any.whl[export] ``` diff --git a/docs/en/build_and_install.md b/docs/en/build_and_install.md index b1023a0..8c21e5c 100644 --- a/docs/en/build_and_install.md +++ b/docs/en/build_and_install.md @@ -78,7 +78,7 @@ cd TensorRT-YOLO pip install --upgrade build python -m build --wheel # Install only the inference-related dependencies -pip install dist/tensorrt_yolo-4.*-py3-none-any.whl +pip install dist/tensorrt_yolo-5.*-py3-none-any.whl # Install both the model export-related dependencies and the inference-related dependencies -pip install dist/tensorrt_yolo-4.*-py3-none-any.whl[export] +pip install dist/tensorrt_yolo-5.*-py3-none-any.whl[export] ``` diff --git a/examples/detect/README.en.md b/examples/detect/README.en.md index a23d1f0..1d037a2 100644 --- a/examples/detect/README.en.md +++ b/examples/detect/README.en.md @@ -1,8 +1,8 @@ [简体中文](README.md) | English -# Detection Model Inference Example +# Object Detection Inference Example -This example uses the YOLO11n model to demonstrate how to perform Detection model inference using the Command Line Interface (CLI), Python, and C++. +This example uses the yolo11n model to demonstrate how to perform Object Detection inference using the Command Line Interface (CLI), Python, and C++. [yolo11n.pt](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt),[【TestImages】COCO-part.zip](https://www.ilanzou.com/s/N5Oyq8hZ) @@ -44,7 +44,7 @@ trtexec --onnx=models/yolo11n.onnx --saveEngine=models/yolo11n.engine --fp16 > [!NOTE] > The `--cudaGraph` command added from version 4.0 can further accelerate the inference process, but this feature only supports static models. > -> From version 4.2, OBB model inference is supported, and the `-m, --mode` command is added to select between Detection and OBB models. +> From version 4.2, OBB model inference is supported, and the `-m, --mode` command is added to select between Detect and OBB. 1. Use the `trtyolo` command-line tool from the `tensorrt_yolo` library for inference. Run the following command to view help information: diff --git a/examples/detect/README.md b/examples/detect/README.md index b080a1a..6961d06 100644 --- a/examples/detect/README.md +++ b/examples/detect/README.md @@ -1,8 +1,8 @@ [English](README.en.md) | 简体中文 -# Detection 模型推理示例 +# 目标检测推理示例 -本示例以 YOLO11n 模型为例,展示如何使用命令行界面(CLI)、Python 和 C++ 三种方式进行 Detection 模型推理。 +本示例以 yolo11n 模型为例,展示如何使用命令行界面(CLI)、Python 和 C++ 三种方式进行目标检测推理。 [yolo11n.pt](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt),[【测试图片】COCO-part.zip](https://www.ilanzou.com/s/N5Oyq8hZ) @@ -44,7 +44,7 @@ trtexec --onnx=models/yolo11n.onnx --saveEngine=models/yolo11n.engine --fp16 > [!NOTE] > 从 4.0 版本开始新增的 `--cudaGraph` 指令可以进一步加速推理过程,但该功能仅支持静态模型。 > -> 从 4.2 版本开始,支持 OBB 模型推理,并新增 `-m, --mode` 指令,用于选择 Detection 还是 OBB 模型。 +> 从 4.2 版本开始,支持 OBB 推理,并新增 `-m, --mode` 指令,用于选择 Detect 还是 OBB。 1. 使用 `tensorrt_yolo` 库的 `trtyolo` 命令行工具进行推理。运行以下命令查看帮助信息: diff --git a/examples/detect/detect.cpp b/examples/detect/detect.cpp index a2d5ee8..9fbd5c9 100644 --- a/examples/detect/detect.cpp +++ b/examples/detect/detect.cpp @@ -55,13 +55,13 @@ std::vector> generateLabelColorPairs(const st } // Visualize inference results -void visualize(cv::Mat& image, const deploy::DetResult& result, const std::vector>& labelColorPairs) { +void visualize(cv::Mat& image, deploy::DetResult& result, std::vector>& labelColorPairs) { for (size_t i = 0; i < result.num; ++i) { - const auto& box = result.boxes[i]; + auto& box = result.boxes[i]; int cls = result.classes[i]; float score = result.scores[i]; - const auto& label = labelColorPairs[cls].first; - const auto& color = labelColorPairs[cls].second; + auto& label = labelColorPairs[cls].first; + auto& color = labelColorPairs[cls].second; std::string labelText = label + " " + cv::format("%.2f", score); // Draw rectangle and label diff --git a/examples/obb/README.en.md b/examples/obb/README.en.md index 6117027..48f0508 100644 --- a/examples/obb/README.en.md +++ b/examples/obb/README.en.md @@ -1,8 +1,8 @@ [简体中文](README.md) | English -# OBB Detection Model Inference Example +# Oriented Bounding Boxes Object Detection Inference Example -This example uses the YOLO11n-obb model to demonstrate how to perform OBB Detection model inference using the Command Line Interface (CLI), Python, and C++. +This example uses the YOLO11n-obb model to demonstrate how to perform Oriented Bounding Boxes Object Detection inference using the Command Line Interface (CLI), Python, and C++. The required `yolo11n-obb.pt` and test images are provided and saved in the `images` folder and `models` folder, respectively. @@ -46,7 +46,7 @@ trtexec --onnx=models/yolo11n-obb.onnx --saveEngine=models/yolo11n-obb.engine -- > [!NOTE] > The `--cudaGraph` command added from version 4.0 can further accelerate the inference process, but this feature only supports static models. > -> From version 4.2, OBB model inference is supported, and the `-m, --mode` command is added to select between Detection and OBB models. +> From version 4.2, OBB model inference is supported, and the `-m, --mode` command is added to select between Detect and OBB. 1. Use the `trtyolo` command-line tool from the `tensorrt_yolo` library for inference. Run the following command to view help information: diff --git a/examples/obb/README.md b/examples/obb/README.md index 190d5a3..40c628e 100644 --- a/examples/obb/README.md +++ b/examples/obb/README.md @@ -1,8 +1,8 @@ [English](README.en.md) | 简体中文 -# OBB Detection 模型推理示例 +# 旋转目标检测推理示例 -本示例以 YOLO11n-obb 模型为例,展示如何使用命令行界面(CLI)、Python 和 C++ 三种方式进行 OBB Detection 模型推理。 +本示例以 YOLO11n-obb 模型为例,展示如何使用命令行界面(CLI)、Python 和 C++ 三种方式进行旋转目标检测推理。 [yolo11n-obb.pt](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-obb.pt),[【测试图片】DOTA-part.zip](https://www.ilanzou.com/s/yK6yq8H5) @@ -44,7 +44,7 @@ trtexec --onnx=models/yolo11n-obb.onnx --saveEngine=models/yolo11n-obb.engine -- > [!NOTE] > 从 4.0 版本开始新增的 `--cudaGraph` 指令可以进一步加速推理过程,但该功能仅支持静态模型。 > -> 从 4.2 版本开始,支持 OBB 模型推理,并新增 `-m, --mode` 指令,用于选择 Detection 还是 OBB 模型。 +> 从 4.2 版本开始,支持 OBB 推理,并新增 `-m, --mode` 指令,用于选择 Detect 还是 OBB。 1. 使用 `tensorrt_yolo` 库的 `trtyolo` 命令行工具进行推理。运行以下命令查看帮助信息: diff --git a/examples/obb/obb.cpp b/examples/obb/obb.cpp index 2543907..f02e720 100644 --- a/examples/obb/obb.cpp +++ b/examples/obb/obb.cpp @@ -83,13 +83,13 @@ std::vector xyxyr2xyxyxyxy(const deploy::RotatedBox& box) { } // Visualize inference results -void visualize(cv::Mat& image, const deploy::OBBResult& result, const std::vector>& labelColorPairs) { +void visualize(cv::Mat& image, deploy::OBBResult& result, std::vector>& labelColorPairs) { for (size_t i = 0; i < result.num; ++i) { - const auto& box = result.boxes[i]; + auto& box = result.boxes[i]; int cls = result.classes[i]; float score = result.scores[i]; - const auto& label = labelColorPairs[cls].first; - const auto& color = labelColorPairs[cls].second; + auto& label = labelColorPairs[cls].first; + auto& color = labelColorPairs[cls].second; std::string labelText = label + " " + cv::format("%.2f", score); // Draw rectangle and label diff --git a/examples/pose/pose.cpp b/examples/pose/pose.cpp index a82bb81..af3821b 100644 --- a/examples/pose/pose.cpp +++ b/examples/pose/pose.cpp @@ -80,11 +80,11 @@ void visualize(cv::Mat& image, deploy::PoseResult& result, std::vector [!NOTE] > The `--cudaGraph` command added from version 4.0 can further accelerate the inference process, but this feature only supports static models. > -> From version 4.3 and later, support for Segmentation model inference is added. The command `-m 2, --mode 2` is used to select the Segmentation model. +> From version 4.3 and later, support for Instance Segmentation inference is added. The command `-m 2, --mode 2` is used to select the Instance Segmentation. 1. Use the `trtyolo` command-line tool from the `tensorrt_yolo` library for inference. Run the following command to view help information: diff --git a/examples/segment/README.md b/examples/segment/README.md index 9e8ba54..214c578 100644 --- a/examples/segment/README.md +++ b/examples/segment/README.md @@ -1,8 +1,8 @@ [English](README.en.md) | 简体中文 -# Segmentation 模型推理示例 +# 实例分割推理示例 -本示例以 YOLO11n-seg 模型为例,展示如何使用命令行界面(CLI)、Python 和 C++ 三种方式进行 Segmentation 模型推理。 +本示例以 YOLO11n-seg 模型为例,展示如何使用命令行界面(CLI)、Python 和 C++ 三种方式进行实例分割推理。 [yolo11n-seg.pt](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-seg.pt),[【测试图片】COCO-part.zip](https://www.ilanzou.com/s/N5Oyq8hZ) @@ -44,7 +44,7 @@ trtexec --onnx=models/yolo11n-seg.onnx --saveEngine=models/yolo11n-seg.engine -- > [!NOTE] > 从 4.0 版本开始新增的 `--cudaGraph` 指令可以进一步加速推理过程,但该功能仅支持静态模型。 > -> 从 4.3 以后的版本开始,支持 Segmentation 模型推理,指令 `-m 2, --mode 2` 用于选择 Segmentation 模型。 +> 从 4.3 以后的版本开始,支持实例分割推理,指令 `-m 2, --mode 2` 用于选择实例分割。 1. 使用 `tensorrt_yolo` 库的 `trtyolo` 命令行工具进行推理。运行以下命令查看帮助信息: @@ -94,7 +94,4 @@ trtexec --onnx=models/yolo11n-seg.onnx --saveEngine=models/yolo11n-seg.engine -- ./segment -e ../models/yolo11n-seg.engine -i ../images -o ../output -l ../labels.txt --cudaGraph ``` - - 通过以上方式,您可以顺利完成模型推理。 diff --git a/examples/segment/segment.cpp b/examples/segment/segment.cpp index 49a2f2a..32f8387 100644 --- a/examples/segment/segment.cpp +++ b/examples/segment/segment.cpp @@ -57,11 +57,11 @@ std::vector> generateLabelColorPairs(const st // Visualize inference results void visualize(cv::Mat& image, deploy::SegResult& result, std::vector>& labelColorPairs) { for (size_t i = 0; i < result.num; ++i) { - const auto& box = result.boxes[i]; + auto& box = result.boxes[i]; int cls = result.classes[i]; float score = result.scores[i]; - const auto& label = labelColorPairs[cls].first; - const auto& color = labelColorPairs[cls].second; + auto& label = labelColorPairs[cls].first; + auto& color = labelColorPairs[cls].second; std::string labelText = label + " " + cv::format("%.2f", score); // Draw rectangle and label diff --git a/pyproject.toml b/pyproject.toml index 6c5e764..fa3a999 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" # Project settings ----------------------------------------------------------------------------------------------------- [project] -version = "4.3.0" +version = "5.0.0" readme = "README.md" name = "tensorrt_yolo" requires-python = ">=3.8" diff --git a/source/deploy/pybind/deploy.cpp b/source/deploy/pybind/deploy.cpp index 38706aa..e4f5455 100644 --- a/source/deploy/pybind/deploy.cpp +++ b/source/deploy/pybind/deploy.cpp @@ -356,7 +356,7 @@ void BindResult(pybind11::module &m) { // Bind inference class template template void BindClsTemplate(pybind11::module &m, const std::string &className) { - pybind11::class_(m, className.c_str()) + pybind11::class_>(m, className.c_str()) .def(pybind11::init(), pybind11::arg("file"), pybind11::arg("cudaMem") = false, pybind11::arg("device") = 0) .def( diff --git a/tensorrt_yolo/c_lib_wrap.py.in b/tensorrt_yolo/c_lib_wrap.py.in index b55b311..0780b8f 100644 --- a/tensorrt_yolo/c_lib_wrap.py.in +++ b/tensorrt_yolo/c_lib_wrap.py.in @@ -16,7 +16,7 @@ # limitations under the License. # ============================================================================== # File : c_lib_wrap.py -# Version : 1.0 +# Version : 5.0.0 # Author : laugh12321 # Contact : laugh12321@vip.qq.com # Date : 2024/07/03 13:12:29 diff --git a/tensorrt_yolo/cli.py b/tensorrt_yolo/cli.py index 3897097..c820b38 100644 --- a/tensorrt_yolo/cli.py +++ b/tensorrt_yolo/cli.py @@ -16,7 +16,7 @@ # limitations under the License. # ============================================================================== # File : cli.py -# Version : 4.0 +# Version : 5.0.0 # Author : laugh12321 # Contact : laugh12321@vip.qq.com # Date : 2024/07/05 14:26:53 diff --git a/tensorrt_yolo/export/head.py b/tensorrt_yolo/export/head.py index 62440a8..943410c 100644 --- a/tensorrt_yolo/export/head.py +++ b/tensorrt_yolo/export/head.py @@ -16,7 +16,7 @@ # limitations under the License. # ============================================================================== # File : head.py -# Version : 6.0 +# Version : 5.0.0 # Author : laugh12321 # Contact : laugh12321@vip.qq.com # Date : 2024/04/22 09:45:11 diff --git a/tensorrt_yolo/export/ppyoloe.py b/tensorrt_yolo/export/ppyoloe.py index bea10d6..5da1953 100644 --- a/tensorrt_yolo/export/ppyoloe.py +++ b/tensorrt_yolo/export/ppyoloe.py @@ -16,7 +16,7 @@ # limitations under the License. # ============================================================================== # File : ppyoloe.py -# Version : 5.0 +# Version : 5.0.0 # Author : laugh12321 # Contact : laugh12321@vip.qq.com # Date : 2024/01/28 14:37:43 diff --git a/tensorrt_yolo/infer/inference.py b/tensorrt_yolo/infer/inference.py index 8d8aac3..be101c9 100644 --- a/tensorrt_yolo/infer/inference.py +++ b/tensorrt_yolo/infer/inference.py @@ -16,7 +16,7 @@ # limitations under the License. # ============================================================================== # File : inference.py -# Version : 3.0 +# Version : 5.0.0 # Author : laugh12321 # Contact : laugh12321@vip.qq.com # Date : 2024/07/03 14:06:55 diff --git a/tensorrt_yolo/infer/result.py b/tensorrt_yolo/infer/result.py index 28e4435..b7a768b 100644 --- a/tensorrt_yolo/infer/result.py +++ b/tensorrt_yolo/infer/result.py @@ -16,7 +16,7 @@ # limitations under the License. # ============================================================================== # File : result.py -# Version : 2.0 +# Version : 5.0.0 # Author : laugh12321 # Contact : laugh12321@vip.qq.com # Date : 2024/08/04 12:39:02 diff --git a/tensorrt_yolo/infer/timer.py b/tensorrt_yolo/infer/timer.py index 7258340..f363d0c 100644 --- a/tensorrt_yolo/infer/timer.py +++ b/tensorrt_yolo/infer/timer.py @@ -16,7 +16,7 @@ # limitations under the License. # ============================================================================== # File : timer.py -# Version : 1.0 +# Version : 5.0.0 # Author : laugh12321 # Contact : laugh12321@vip.qq.com # Date : 2024/07/05 13:39:01 diff --git a/tensorrt_yolo/infer/utils.py b/tensorrt_yolo/infer/utils.py index 648284f..151366b 100644 --- a/tensorrt_yolo/infer/utils.py +++ b/tensorrt_yolo/infer/utils.py @@ -15,8 +15,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -# File : visualize.py -# Version : 3.0 +# File : utils.py +# Version : 5.0.0 # Author : laugh12321 # Contact : laugh12321@vip.qq.com # Date : 2024/07/05 14:06:46 diff --git a/xmake.lua b/xmake.lua index 99673a8..8da9b77 100644 --- a/xmake.lua +++ b/xmake.lua @@ -1,6 +1,6 @@ -- 设置项目信息 set_project("TensorRT-YOLO") -set_version("4.3.0") +set_version("5.0.0") set_languages("cxx17") set_allowedplats("windows", "linux") @@ -9,7 +9,7 @@ add_requires("python", {system = true}) add_requires("pybind11") -- 添加编译规则 -add_rules("plugin.compile_commands.autoupdate", {outputdir = ".vscode"}) +add_rules("plugin.compile_commands.autoupdate", {outputdir = "build"}) add_rules("mode.release") -- 定义选项