diff --git a/assets/detect.jpg b/assets/detect.jpg
new file mode 100644
index 0000000..c2902ca
Binary files /dev/null and b/assets/detect.jpg differ
diff --git a/assets/example0.jpg b/assets/example0.jpg
deleted file mode 100644
index 9df3ad8..0000000
Binary files a/assets/example0.jpg and /dev/null differ
diff --git a/assets/example1.jpg b/assets/example1.jpg
deleted file mode 100644
index 6c87fce..0000000
Binary files a/assets/example1.jpg and /dev/null differ
diff --git a/assets/obb.png b/assets/obb.png
new file mode 100644
index 0000000..475cf30
Binary files /dev/null and b/assets/obb.png differ
diff --git a/assets/pose.jpg b/assets/pose.jpg
new file mode 100644
index 0000000..9a34d7c
Binary files /dev/null and b/assets/pose.jpg differ
diff --git a/assets/segment.jpg b/assets/segment.jpg
new file mode 100644
index 0000000..7ff31bf
Binary files /dev/null and b/assets/segment.jpg differ
diff --git a/assets/yolo-detect.jpeg b/assets/yolo-detect.jpeg
new file mode 100644
index 0000000..31d4057
Binary files /dev/null and b/assets/yolo-detect.jpeg differ
diff --git a/assets/yolo-obb.jpeg b/assets/yolo-obb.jpeg
new file mode 100644
index 0000000..6e03e66
Binary files /dev/null and b/assets/yolo-obb.jpeg differ
diff --git a/assets/yolo-pose.jpeg b/assets/yolo-pose.jpeg
new file mode 100644
index 0000000..40d67ce
Binary files /dev/null and b/assets/yolo-pose.jpeg differ
diff --git a/assets/yolo-segment.jpeg b/assets/yolo-segment.jpeg
new file mode 100644
index 0000000..12568e6
Binary files /dev/null and b/assets/yolo-segment.jpeg differ
diff --git a/docs/cn/build_and_install.md b/docs/cn/build_and_install.md
index 7fc4c86..2810f87 100644
--- a/docs/cn/build_and_install.md
+++ b/docs/cn/build_and_install.md
@@ -8,8 +8,7 @@
- Linux: gcc/g++
- Windows: MSVC
-- CMake
-- Xmake
+- CMake or Xmake
- CUDA
- cuDNN
- TensorRT
@@ -78,7 +77,7 @@ cd TensorRT-YOLO
pip install --upgrade build
python -m build --wheel
# 仅安装推理相关依赖
-pip install dist/tensorrt_yolo-4.*-py3-none-any.whl
+pip install dist/tensorrt_yolo-5.*-py3-none-any.whl
# 安装模型导出相关依赖以及推理相关依赖
-pip install dist/tensorrt_yolo-4.*-py3-none-any.whl[export]
+pip install dist/tensorrt_yolo-5.*-py3-none-any.whl[export]
```
diff --git a/docs/en/build_and_install.md b/docs/en/build_and_install.md
index b1023a0..8c21e5c 100644
--- a/docs/en/build_and_install.md
+++ b/docs/en/build_and_install.md
@@ -78,7 +78,7 @@ cd TensorRT-YOLO
pip install --upgrade build
python -m build --wheel
# Install only the inference-related dependencies
-pip install dist/tensorrt_yolo-4.*-py3-none-any.whl
+pip install dist/tensorrt_yolo-5.*-py3-none-any.whl
# Install both the model export-related dependencies and the inference-related dependencies
-pip install dist/tensorrt_yolo-4.*-py3-none-any.whl[export]
+pip install dist/tensorrt_yolo-5.*-py3-none-any.whl[export]
```
diff --git a/examples/detect/README.en.md b/examples/detect/README.en.md
index a23d1f0..1d037a2 100644
--- a/examples/detect/README.en.md
+++ b/examples/detect/README.en.md
@@ -1,8 +1,8 @@
[简体中文](README.md) | English
-# Detection Model Inference Example
+# Object Detection Inference Example
-This example uses the YOLO11n model to demonstrate how to perform Detection model inference using the Command Line Interface (CLI), Python, and C++.
+This example uses the yolo11n model to demonstrate how to perform Object Detection inference using the Command Line Interface (CLI), Python, and C++.
[yolo11n.pt](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt),[【TestImages】COCO-part.zip](https://www.ilanzou.com/s/N5Oyq8hZ)
@@ -44,7 +44,7 @@ trtexec --onnx=models/yolo11n.onnx --saveEngine=models/yolo11n.engine --fp16
> [!NOTE]
> The `--cudaGraph` command added from version 4.0 can further accelerate the inference process, but this feature only supports static models.
>
-> From version 4.2, OBB model inference is supported, and the `-m, --mode` command is added to select between Detection and OBB models.
+> From version 4.2, OBB model inference is supported, and the `-m, --mode` command is added to select between Detect and OBB.
1. Use the `trtyolo` command-line tool from the `tensorrt_yolo` library for inference. Run the following command to view help information:
diff --git a/examples/detect/README.md b/examples/detect/README.md
index b080a1a..6961d06 100644
--- a/examples/detect/README.md
+++ b/examples/detect/README.md
@@ -1,8 +1,8 @@
[English](README.en.md) | 简体中文
-# Detection 模型推理示例
+# 目标检测推理示例
-本示例以 YOLO11n 模型为例,展示如何使用命令行界面(CLI)、Python 和 C++ 三种方式进行 Detection 模型推理。
+本示例以 yolo11n 模型为例,展示如何使用命令行界面(CLI)、Python 和 C++ 三种方式进行目标检测推理。
[yolo11n.pt](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt),[【测试图片】COCO-part.zip](https://www.ilanzou.com/s/N5Oyq8hZ)
@@ -44,7 +44,7 @@ trtexec --onnx=models/yolo11n.onnx --saveEngine=models/yolo11n.engine --fp16
> [!NOTE]
> 从 4.0 版本开始新增的 `--cudaGraph` 指令可以进一步加速推理过程,但该功能仅支持静态模型。
>
-> 从 4.2 版本开始,支持 OBB 模型推理,并新增 `-m, --mode` 指令,用于选择 Detection 还是 OBB 模型。
+> 从 4.2 版本开始,支持 OBB 推理,并新增 `-m, --mode` 指令,用于选择 Detect 还是 OBB。
1. 使用 `tensorrt_yolo` 库的 `trtyolo` 命令行工具进行推理。运行以下命令查看帮助信息:
diff --git a/examples/detect/detect.cpp b/examples/detect/detect.cpp
index a2d5ee8..9fbd5c9 100644
--- a/examples/detect/detect.cpp
+++ b/examples/detect/detect.cpp
@@ -55,13 +55,13 @@ std::vector> generateLabelColorPairs(const st
}
// Visualize inference results
-void visualize(cv::Mat& image, const deploy::DetResult& result, const std::vector>& labelColorPairs) {
+void visualize(cv::Mat& image, deploy::DetResult& result, std::vector>& labelColorPairs) {
for (size_t i = 0; i < result.num; ++i) {
- const auto& box = result.boxes[i];
+ auto& box = result.boxes[i];
int cls = result.classes[i];
float score = result.scores[i];
- const auto& label = labelColorPairs[cls].first;
- const auto& color = labelColorPairs[cls].second;
+ auto& label = labelColorPairs[cls].first;
+ auto& color = labelColorPairs[cls].second;
std::string labelText = label + " " + cv::format("%.2f", score);
// Draw rectangle and label
diff --git a/examples/obb/README.en.md b/examples/obb/README.en.md
index 6117027..48f0508 100644
--- a/examples/obb/README.en.md
+++ b/examples/obb/README.en.md
@@ -1,8 +1,8 @@
[简体中文](README.md) | English
-# OBB Detection Model Inference Example
+# Oriented Bounding Boxes Object Detection Inference Example
-This example uses the YOLO11n-obb model to demonstrate how to perform OBB Detection model inference using the Command Line Interface (CLI), Python, and C++.
+This example uses the YOLO11n-obb model to demonstrate how to perform Oriented Bounding Boxes Object Detection inference using the Command Line Interface (CLI), Python, and C++.
The required `yolo11n-obb.pt` and test images are provided and saved in the `images` folder and `models` folder, respectively.
@@ -46,7 +46,7 @@ trtexec --onnx=models/yolo11n-obb.onnx --saveEngine=models/yolo11n-obb.engine --
> [!NOTE]
> The `--cudaGraph` command added from version 4.0 can further accelerate the inference process, but this feature only supports static models.
>
-> From version 4.2, OBB model inference is supported, and the `-m, --mode` command is added to select between Detection and OBB models.
+> From version 4.2, OBB model inference is supported, and the `-m, --mode` command is added to select between Detect and OBB.
1. Use the `trtyolo` command-line tool from the `tensorrt_yolo` library for inference. Run the following command to view help information:
diff --git a/examples/obb/README.md b/examples/obb/README.md
index 190d5a3..40c628e 100644
--- a/examples/obb/README.md
+++ b/examples/obb/README.md
@@ -1,8 +1,8 @@
[English](README.en.md) | 简体中文
-# OBB Detection 模型推理示例
+# 旋转目标检测推理示例
-本示例以 YOLO11n-obb 模型为例,展示如何使用命令行界面(CLI)、Python 和 C++ 三种方式进行 OBB Detection 模型推理。
+本示例以 YOLO11n-obb 模型为例,展示如何使用命令行界面(CLI)、Python 和 C++ 三种方式进行旋转目标检测推理。
[yolo11n-obb.pt](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-obb.pt),[【测试图片】DOTA-part.zip](https://www.ilanzou.com/s/yK6yq8H5)
@@ -44,7 +44,7 @@ trtexec --onnx=models/yolo11n-obb.onnx --saveEngine=models/yolo11n-obb.engine --
> [!NOTE]
> 从 4.0 版本开始新增的 `--cudaGraph` 指令可以进一步加速推理过程,但该功能仅支持静态模型。
>
-> 从 4.2 版本开始,支持 OBB 模型推理,并新增 `-m, --mode` 指令,用于选择 Detection 还是 OBB 模型。
+> 从 4.2 版本开始,支持 OBB 推理,并新增 `-m, --mode` 指令,用于选择 Detect 还是 OBB。
1. 使用 `tensorrt_yolo` 库的 `trtyolo` 命令行工具进行推理。运行以下命令查看帮助信息:
diff --git a/examples/obb/obb.cpp b/examples/obb/obb.cpp
index 2543907..f02e720 100644
--- a/examples/obb/obb.cpp
+++ b/examples/obb/obb.cpp
@@ -83,13 +83,13 @@ std::vector xyxyr2xyxyxyxy(const deploy::RotatedBox& box) {
}
// Visualize inference results
-void visualize(cv::Mat& image, const deploy::OBBResult& result, const std::vector>& labelColorPairs) {
+void visualize(cv::Mat& image, deploy::OBBResult& result, std::vector>& labelColorPairs) {
for (size_t i = 0; i < result.num; ++i) {
- const auto& box = result.boxes[i];
+ auto& box = result.boxes[i];
int cls = result.classes[i];
float score = result.scores[i];
- const auto& label = labelColorPairs[cls].first;
- const auto& color = labelColorPairs[cls].second;
+ auto& label = labelColorPairs[cls].first;
+ auto& color = labelColorPairs[cls].second;
std::string labelText = label + " " + cv::format("%.2f", score);
// Draw rectangle and label
diff --git a/examples/pose/pose.cpp b/examples/pose/pose.cpp
index a82bb81..af3821b 100644
--- a/examples/pose/pose.cpp
+++ b/examples/pose/pose.cpp
@@ -80,11 +80,11 @@ void visualize(cv::Mat& image, deploy::PoseResult& result, std::vector [!NOTE]
> The `--cudaGraph` command added from version 4.0 can further accelerate the inference process, but this feature only supports static models.
>
-> From version 4.3 and later, support for Segmentation model inference is added. The command `-m 2, --mode 2` is used to select the Segmentation model.
+> From version 4.3 and later, support for Instance Segmentation inference is added. The command `-m 2, --mode 2` is used to select the Instance Segmentation.
1. Use the `trtyolo` command-line tool from the `tensorrt_yolo` library for inference. Run the following command to view help information:
diff --git a/examples/segment/README.md b/examples/segment/README.md
index 9e8ba54..214c578 100644
--- a/examples/segment/README.md
+++ b/examples/segment/README.md
@@ -1,8 +1,8 @@
[English](README.en.md) | 简体中文
-# Segmentation 模型推理示例
+# 实例分割推理示例
-本示例以 YOLO11n-seg 模型为例,展示如何使用命令行界面(CLI)、Python 和 C++ 三种方式进行 Segmentation 模型推理。
+本示例以 YOLO11n-seg 模型为例,展示如何使用命令行界面(CLI)、Python 和 C++ 三种方式进行实例分割推理。
[yolo11n-seg.pt](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-seg.pt),[【测试图片】COCO-part.zip](https://www.ilanzou.com/s/N5Oyq8hZ)
@@ -44,7 +44,7 @@ trtexec --onnx=models/yolo11n-seg.onnx --saveEngine=models/yolo11n-seg.engine --
> [!NOTE]
> 从 4.0 版本开始新增的 `--cudaGraph` 指令可以进一步加速推理过程,但该功能仅支持静态模型。
>
-> 从 4.3 以后的版本开始,支持 Segmentation 模型推理,指令 `-m 2, --mode 2` 用于选择 Segmentation 模型。
+> 从 4.3 以后的版本开始,支持实例分割推理,指令 `-m 2, --mode 2` 用于选择实例分割。
1. 使用 `tensorrt_yolo` 库的 `trtyolo` 命令行工具进行推理。运行以下命令查看帮助信息:
@@ -94,7 +94,4 @@ trtexec --onnx=models/yolo11n-seg.onnx --saveEngine=models/yolo11n-seg.engine --
./segment -e ../models/yolo11n-seg.engine -i ../images -o ../output -l ../labels.txt --cudaGraph
```
-
-
通过以上方式,您可以顺利完成模型推理。
diff --git a/examples/segment/segment.cpp b/examples/segment/segment.cpp
index 49a2f2a..32f8387 100644
--- a/examples/segment/segment.cpp
+++ b/examples/segment/segment.cpp
@@ -57,11 +57,11 @@ std::vector> generateLabelColorPairs(const st
// Visualize inference results
void visualize(cv::Mat& image, deploy::SegResult& result, std::vector>& labelColorPairs) {
for (size_t i = 0; i < result.num; ++i) {
- const auto& box = result.boxes[i];
+ auto& box = result.boxes[i];
int cls = result.classes[i];
float score = result.scores[i];
- const auto& label = labelColorPairs[cls].first;
- const auto& color = labelColorPairs[cls].second;
+ auto& label = labelColorPairs[cls].first;
+ auto& color = labelColorPairs[cls].second;
std::string labelText = label + " " + cv::format("%.2f", score);
// Draw rectangle and label
diff --git a/pyproject.toml b/pyproject.toml
index 6c5e764..fa3a999 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
# Project settings -----------------------------------------------------------------------------------------------------
[project]
-version = "4.3.0"
+version = "5.0.0"
readme = "README.md"
name = "tensorrt_yolo"
requires-python = ">=3.8"
diff --git a/source/deploy/pybind/deploy.cpp b/source/deploy/pybind/deploy.cpp
index 38706aa..e4f5455 100644
--- a/source/deploy/pybind/deploy.cpp
+++ b/source/deploy/pybind/deploy.cpp
@@ -356,7 +356,7 @@ void BindResult(pybind11::module &m) {
// Bind inference class template
template
void BindClsTemplate(pybind11::module &m, const std::string &className) {
- pybind11::class_(m, className.c_str())
+ pybind11::class_>(m, className.c_str())
.def(pybind11::init(),
pybind11::arg("file"), pybind11::arg("cudaMem") = false, pybind11::arg("device") = 0)
.def(
diff --git a/tensorrt_yolo/c_lib_wrap.py.in b/tensorrt_yolo/c_lib_wrap.py.in
index b55b311..0780b8f 100644
--- a/tensorrt_yolo/c_lib_wrap.py.in
+++ b/tensorrt_yolo/c_lib_wrap.py.in
@@ -16,7 +16,7 @@
# limitations under the License.
# ==============================================================================
# File : c_lib_wrap.py
-# Version : 1.0
+# Version : 5.0.0
# Author : laugh12321
# Contact : laugh12321@vip.qq.com
# Date : 2024/07/03 13:12:29
diff --git a/tensorrt_yolo/cli.py b/tensorrt_yolo/cli.py
index 3897097..c820b38 100644
--- a/tensorrt_yolo/cli.py
+++ b/tensorrt_yolo/cli.py
@@ -16,7 +16,7 @@
# limitations under the License.
# ==============================================================================
# File : cli.py
-# Version : 4.0
+# Version : 5.0.0
# Author : laugh12321
# Contact : laugh12321@vip.qq.com
# Date : 2024/07/05 14:26:53
diff --git a/tensorrt_yolo/export/head.py b/tensorrt_yolo/export/head.py
index 62440a8..943410c 100644
--- a/tensorrt_yolo/export/head.py
+++ b/tensorrt_yolo/export/head.py
@@ -16,7 +16,7 @@
# limitations under the License.
# ==============================================================================
# File : head.py
-# Version : 6.0
+# Version : 5.0.0
# Author : laugh12321
# Contact : laugh12321@vip.qq.com
# Date : 2024/04/22 09:45:11
diff --git a/tensorrt_yolo/export/ppyoloe.py b/tensorrt_yolo/export/ppyoloe.py
index bea10d6..5da1953 100644
--- a/tensorrt_yolo/export/ppyoloe.py
+++ b/tensorrt_yolo/export/ppyoloe.py
@@ -16,7 +16,7 @@
# limitations under the License.
# ==============================================================================
# File : ppyoloe.py
-# Version : 5.0
+# Version : 5.0.0
# Author : laugh12321
# Contact : laugh12321@vip.qq.com
# Date : 2024/01/28 14:37:43
diff --git a/tensorrt_yolo/infer/inference.py b/tensorrt_yolo/infer/inference.py
index 8d8aac3..be101c9 100644
--- a/tensorrt_yolo/infer/inference.py
+++ b/tensorrt_yolo/infer/inference.py
@@ -16,7 +16,7 @@
# limitations under the License.
# ==============================================================================
# File : inference.py
-# Version : 3.0
+# Version : 5.0.0
# Author : laugh12321
# Contact : laugh12321@vip.qq.com
# Date : 2024/07/03 14:06:55
diff --git a/tensorrt_yolo/infer/result.py b/tensorrt_yolo/infer/result.py
index 28e4435..b7a768b 100644
--- a/tensorrt_yolo/infer/result.py
+++ b/tensorrt_yolo/infer/result.py
@@ -16,7 +16,7 @@
# limitations under the License.
# ==============================================================================
# File : result.py
-# Version : 2.0
+# Version : 5.0.0
# Author : laugh12321
# Contact : laugh12321@vip.qq.com
# Date : 2024/08/04 12:39:02
diff --git a/tensorrt_yolo/infer/timer.py b/tensorrt_yolo/infer/timer.py
index 7258340..f363d0c 100644
--- a/tensorrt_yolo/infer/timer.py
+++ b/tensorrt_yolo/infer/timer.py
@@ -16,7 +16,7 @@
# limitations under the License.
# ==============================================================================
# File : timer.py
-# Version : 1.0
+# Version : 5.0.0
# Author : laugh12321
# Contact : laugh12321@vip.qq.com
# Date : 2024/07/05 13:39:01
diff --git a/tensorrt_yolo/infer/utils.py b/tensorrt_yolo/infer/utils.py
index 648284f..151366b 100644
--- a/tensorrt_yolo/infer/utils.py
+++ b/tensorrt_yolo/infer/utils.py
@@ -15,8 +15,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-# File : visualize.py
-# Version : 3.0
+# File : utils.py
+# Version : 5.0.0
# Author : laugh12321
# Contact : laugh12321@vip.qq.com
# Date : 2024/07/05 14:06:46
diff --git a/xmake.lua b/xmake.lua
index 99673a8..8da9b77 100644
--- a/xmake.lua
+++ b/xmake.lua
@@ -1,6 +1,6 @@
-- 设置项目信息
set_project("TensorRT-YOLO")
-set_version("4.3.0")
+set_version("5.0.0")
set_languages("cxx17")
set_allowedplats("windows", "linux")
@@ -9,7 +9,7 @@ add_requires("python", {system = true})
add_requires("pybind11")
-- 添加编译规则
-add_rules("plugin.compile_commands.autoupdate", {outputdir = ".vscode"})
+add_rules("plugin.compile_commands.autoupdate", {outputdir = "build"})
add_rules("mode.release")
-- 定义选项