NVIDIA Jetson YOLOv5 tensorRT部署和加速 C++版
Posted 一颗小树x
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了NVIDIA Jetson YOLOv5 tensorRT部署和加速 C++版相关的知识,希望对你有一定的参考价值。
前言
在实现NVIDIA Jetson AGX Xavier 部署YOLOv5的深度学习环境,然后能正常推理跑模型后;发现模型速度不够快,于是使用tensorRT部署,加速模型,本文介绍C++版本的。
NVIDIA Jetson YOLOv5应用与部署_一颗小树x的博客-CSDN博客
版本介绍:yolov5 v6.0、tensorrtx;Jetpack 4.5 [L4T 32.5.0]、CUDA: 10.2.89。
我测试了 kitti 数据集的100张图片:加速后每一张图像,平均推理时间是22ms,感觉还行。
目录
一、下载yolov5 v6.0和tensorrtx
yolov5 v6.0版本,下载来至 yolov5 release v6.0,
git clone -b v6.0 https://github.com/ultralytics/yolov5.git
对应版本的tensorrtx:
git clone https://github.com/wang-xinyu/tensorrtx.git
二、生成 xxx.wts文件
首先复制 tensorrtx/yolov5/gen_wts.py 文件到 ultralytics/yolov5 中;其中tensorrtx 是名称,不同版本名称不一致,这里叫tensorrtx-master;比如,tensorrtx-master 和 yolov5 在同级目录:
cp tensorrtx-master/yolov5/gen_wts.py ./yolov5
进入yolov5 工程目录
cd yolov5
可以把yolov5s.pt 放到yolov5 里面,然后生成yolov5s.wts
python gen_wts.py -w yolov5s.pt -o yolov5s.wts
三、修改配置
进入tensorrtx 的 yolov5目录中,cd tensorrtx/yolov5/
cd tensorrtx-master/yolov5
3.1 C++版本的注意看yolov5.cpp、yololayer.h;首先看yolov5.cpp,它可以设置GPU id、NMS thresh、BBox confidence thresh、Batch size、推理精度(INT8/FP16/FP32)等等参数。
3.2 然后看一下yololayer.h文件,它可以设置模型的类别,输入大小等等。
使用摄像头推理(默认摄像头0),修改yolov5.cpp即可:
四、编译tensorrtx
首先进入tensorrtx 的 yolov5目录中,cd tensorrtx/yolov5/
cd tensorrtx-master/yolov5
建立build目录,准备编译工作
mkdir build
cd build
复制刚才生成的 yolov5s.wts 文件到build目录中
cp ultralytics/yolov5/yolov5s.wts tensorrtx/yolov5/build
然后编译
cmake ..
make
五、运行
YOLOv5s模型
首先用yolov5s.wts生成yolov5s.engine,然后用yolov5s.engine运行;
sudo ./yolov5 -s yolov5s.wts yolov5s.engine s
sudo ./yolov5 -d yolov5s.engine ../samples
sudo ./yolov5 -s yolov5s.wts yolov5s.engine s 中的s对应模型级别(可以选择:n/s/m/l/x/n6/s6/m6/l6/x6)
../samples 中链接指向了两张图片。可以自己创建一个文件夹,放一些图片进去测试。
如果是YOLOv5m模型
sudo ./yolov5 -s yolov5m.wts yolov5m.engine m
sudo ./yolov5 -d yolov5m.engine ../samples
我测试了 kitti 数据集的100张图片:(每一张图像,平均推理时间是22ms,感觉还行;后面测试一些实时的视频流处理速度)
其他数据集测试效果:
六、解析关键代码
C++版本的注意看yolov5.cpp、yololayer.h;首先看yolov5.cpp,它可以设置GPU id、NMS thresh、BBox confidence thresh、Batch size、推理精度(INT8/FP16/FP32)等等参数。
然后看一下yololayer.h文件,它可以设置模型的类别,输入大小等等。
使用摄像头推理(默认摄像头0),修改yolov5.cpp即可:
#include <iostream>
#include <chrono>
#include <cmath>
#include "cuda_utils.h"
#include "logging.h"
#include "common.hpp"
#include "utils.h"
#include "calibrator.h"
#include "preprocess.h"
// OpenCV includes
#include <opencv2/opencv.hpp>
#include <opencv2/highgui.hpp>
#include <string>
#define USE_FP16 // set USE_INT8 or USE_FP16 or USE_FP32
#define DEVICE 0 // GPU id
#define NMS_THRESH 0.4
#define CONF_THRESH 0.5
#define BATCH_SIZE 1
#define MAX_IMAGE_INPUT_SIZE_THRESH 3000 * 3000 // ensure it exceed the maximum size in the input images !
// stuff we know about the network and the input/output blobs
static const int INPUT_H = Yolo::INPUT_H;
static const int INPUT_W = Yolo::INPUT_W;
static const int CLASS_NUM = Yolo::CLASS_NUM;
static const int OUTPUT_SIZE = Yolo::MAX_OUTPUT_BBOX_COUNT * sizeof(Yolo::Detection) / sizeof(float) + 1; // we assume the yololayer outputs no more than MAX_OUTPUT_BBOX_COUNT boxes that conf >= 0.1
const char* INPUT_BLOB_NAME = "data";
const char* OUTPUT_BLOB_NAME = "prob";
static Logger gLogger;
static int get_width(int x, float gw, int divisor = 8)
return int(ceil((x * gw) / divisor)) * divisor;
static int get_depth(int x, float gd)
if (x == 1) return 1;
int r = round(x * gd);
if (x * gd - int(x * gd) == 0.5 && (int(x * gd) % 2) == 0)
--r;
return std::max<int>(r, 1);
ICudaEngine* build_engine(unsigned int maxBatchSize, IBuilder* builder, IBuilderConfig* config, DataType dt, float& gd, float& gw, std::string& wts_name)
INetworkDefinition* network = builder->createNetworkV2(0U);
// Create input tensor of shape 3, INPUT_H, INPUT_W with name INPUT_BLOB_NAME
ITensor* data = network->addInput(INPUT_BLOB_NAME, dt, Dims3 3, INPUT_H, INPUT_W );
assert(data);
std::map<std::string, Weights> weightMap = loadWeights(wts_name);
/* ------ yolov5 backbone------ */
auto conv0 = convBlock(network, weightMap, *data, get_width(64, gw), 6, 2, 1, "model.0");
assert(conv0);
auto conv1 = convBlock(network, weightMap, *conv0->getOutput(0), get_width(128, gw), 3, 2, 1, "model.1");
auto bottleneck_CSP2 = C3(network, weightMap, *conv1->getOutput(0), get_width(128, gw), get_width(128, gw), get_depth(3, gd), true, 1, 0.5, "model.2");
auto conv3 = convBlock(network, weightMap, *bottleneck_CSP2->getOutput(0), get_width(256, gw), 3, 2, 1, "model.3");
auto bottleneck_csp4 = C3(network, weightMap, *conv3->getOutput(0), get_width(256, gw), get_width(256, gw), get_depth(6, gd), true, 1, 0.5, "model.4");
auto conv5 = convBlock(network, weightMap, *bottleneck_csp4->getOutput(0), get_width(512, gw), 3, 2, 1, "model.5");
auto bottleneck_csp6 = C3(network, weightMap, *conv5->getOutput(0), get_width(512, gw), get_width(512, gw), get_depth(9, gd), true, 1, 0.5, "model.6");
auto conv7 = convBlock(network, weightMap, *bottleneck_csp6->getOutput(0), get_width(1024, gw), 3, 2, 1, "model.7");
auto bottleneck_csp8 = C3(network, weightMap, *conv7->getOutput(0), get_width(1024, gw), get_width(1024, gw), get_depth(3, gd), false, 1, 0.5, "model.8");
auto spp9 = SPPF(network, weightMap, *bottleneck_csp8->getOutput(0), get_width(1024, gw), get_width(1024, gw), 5, "model.9");
/* ------ yolov5 head ------ */
auto conv10 = convBlock(network, weightMap, *spp9->getOutput(0), get_width(512, gw), 1, 1, 1, "model.10");
auto upsample11 = network->addResize(*conv10->getOutput(0));
assert(upsample11);
upsample11->setResizeMode(ResizeMode::kNEAREST);
upsample11->setOutputDimensions(bottleneck_csp6->getOutput(0)->getDimensions());
ITensor* inputTensors12[] = upsample11->getOutput(0), bottleneck_csp6->getOutput(0) ;
auto cat12 = network->addConcatenation(inputTensors12, 2);
auto bottleneck_csp13 = C3(network, weightMap, *cat12->getOutput(0), get_width(1024, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.13");
auto conv14 = convBlock(network, weightMap, *bottleneck_csp13->getOutput(0), get_width(256, gw), 1, 1, 1, "model.14");
auto upsample15 = network->addResize(*conv14->getOutput(0));
assert(upsample15);
upsample15->setResizeMode(ResizeMode::kNEAREST);
upsample15->setOutputDimensions(bottleneck_csp4->getOutput(0)->getDimensions());
ITensor* inputTensors16[] = upsample15->getOutput(0), bottleneck_csp4->getOutput(0) ;
auto cat16 = network->addConcatenation(inputTensors16, 2);
auto bottleneck_csp17 = C3(network, weightMap, *cat16->getOutput(0), get_width(512, gw), get_width(256, gw), get_depth(3, gd), false, 1, 0.5, "model.17");
/* ------ detect ------ */
IConvolutionLayer* det0 = network->addConvolutionNd(*bottleneck_csp17->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW 1, 1 , weightMap["model.24.m.0.weight"], weightMap["model.24.m.0.bias"]);
auto conv18 = convBlock(network, weightMap, *bottleneck_csp17->getOutput(0), get_width(256, gw), 3, 2, 1, "model.18");
ITensor* inputTensors19[] = conv18->getOutput(0), conv14->getOutput(0) ;
auto cat19 = network->addConcatenation(inputTensors19, 2);
auto bottleneck_csp20 = C3(network, weightMap, *cat19->getOutput(0), get_width(512, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.20");
IConvolutionLayer* det1 = network->addConvolutionNd(*bottleneck_csp20->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW 1, 1 , weightMap["model.24.m.1.weight"], weightMap["model.24.m.1.bias"]);
auto conv21 = convBlock(network, weightMap, *bottleneck_csp20->getOutput(0), get_width(512, gw), 3, 2, 1, "model.21");
ITensor* inputTensors22[] = conv21->getOutput(0), conv10->getOutput(0) ;
auto cat22 = network->addConcatenation(inputTensors22, 2);
auto bottleneck_csp23 = C3(network, weightMap, *cat22->getOutput(0), get_width(1024, gw), get_width(1024, gw), get_depth(3, gd), false, 1, 0.5, "model.23");
IConvolutionLayer* det2 = network->addConvolutionNd(*bottleneck_csp23->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW 1, 1 , weightMap["model.24.m.2.weight"], weightMap["model.24.m.2.bias"]);
auto yolo = addYoLoLayer(network, weightMap, "model.24", std::vector<IConvolutionLayer*>det0, det1, det2);
yolo->getOutput(0)->setName(OUTPUT_BLOB_NAME);
network->markOutput(*yolo->getOutput(0));
// Build engine
builder->setMaxBatchSize(maxBatchSize);
config->setMaxWorkspaceSize(16 * (1 << 20)); // 16MB
#if defined(USE_FP16)
config->setFlag(BuilderFlag::kFP16);
#elif defined(USE_INT8)
std::cout << "Your platform support int8: " << (builder->platformHasFastInt8() ? "true" : "false") << std::endl;
assert(builder->platformHasFastInt8());
config->setFlag(BuilderFlag::kINT8);
Int8EntropyCalibrator2* calibrator = new Int8EntropyCalibrator2(1, INPUT_W, INPUT_H, "./coco_calib/", "int8calib.table", INPUT_BLOB_NAME);
config->setInt8Calibrator(calibrator);
#endif
std::cout << "Building engine, please wait for a while..." << std::endl;
ICudaEngine* engine = builder->buildEngineWithConfig(*network, *config);
std::cout << "Build engine successfully!" << std::endl;
// Don't need the network any more
network->destroy();
// Release host memory
for (auto& mem : weightMap)
free((void*)(mem.second.values));
return engine;
ICudaEngine* build_engine_p6(unsigned int maxBatchSize, IBuilder* builder, IBuilderConfig* config, DataType dt, float& gd, float& gw, std::string& wts_name)
INetworkDefinition* network = builder->createNetworkV2(0U);
// Create input tensor of shape 3, INPUT_H, INPUT_W with name INPUT_BLOB_NAME
ITensor* data = network->addInput(INPUT_BLOB_NAME, dt, Dims3 3, INPUT_H, INPUT_W );
assert(data);
std::map<std::string, Weights> weightMap = loadWeights(wts_name);
/* ------ yolov5 backbone------ */
auto conv0 = convBlock(network, weightMap, *data, get_width(64, gw), 6, 2, 1, "model.0");
auto conv1 = convBlock(network, weightMap, *conv0->getOutput(0), get_width(128, gw), 3, 2, 1, "model.1");
auto c3_2 = C3(network, weightMap, *conv1->getOutput(0), get_width(128, gw), get_width(128, gw), get_depth(3, gd), true, 1, 0.5, "model.2");
auto conv3 = convBlock(network, weightMap, *c3_2->getOutput(0), get_width(256, gw), 3, 2, 1, "model.3");
auto c3_4 = C3(network, weightMap, *conv3->getOutput(0), get_width(256, gw), get_width(256, gw), get_depth(6, gd), true, 1, 0.5, "model.4");
auto conv5 = convBlock(network, weightMap, *c3_4->getOutput(0), get_width(512, gw), 3, 2, 1, "model.5");
auto c3_6 = C3(network, weightMap, *conv5->getOutput(0), get_width(512, gw), get_width(512, gw), get_depth(9, gd), true, 1, 0.5, "model.6");
auto conv7 = convBlock(network, weightMap, *c3_6->getOutput(0), get_width(768, gw), 3, 2, 1, "model.7");
auto c3_8 = C3(network, weightMap, *conv7->getOutput(0), get_width(768, gw), get_width(768, gw), get_depth(3, gd), true, 1, 0.5, "model.8");
auto conv9 = convBlock(network, weightMap, *c3_8->getOutput(0), get_width(1024, gw), 3, 2, 1, "model.9");
auto c3_10 = C3(network, weightMap, *conv9->getOutput(0), get_width(1024, gw), get_width(1024, gw), get_depth(3, gd), false, 1, 0.5, "model.10");
auto sppf11 = SPPF(network, weightMap, *c3_10->getOutput(0), get_width(1024, gw), get_width(1024, gw), 5, "model.11");
/* ------ yolov5 head ------ */
auto conv12 = convBlock(network, weightMap, *sppf11->getOutput(0), get_width(768, gw), 1, 1, 1, "model.12");
auto upsample13 = network->addResize(*conv12->getOutput(0));
assert(upsample13);
upsample13->setResizeMode(ResizeMode::kNEAREST);
upsample13->setOutputDimensions(c3_8->getOutput(0)->getDimensions());
ITensor* inputTensors14[] = upsample13->getOutput(0), c3_8->getOutput(0) ;
auto cat14 = network->addConcatenation(inputTensors14, 2);
auto c3_15 = C3(network, weightMap, *cat14->getOutput(0), get_width(1536, gw), get_width(768, gw), get_depth(3, gd), false, 1, 0.5, "model.15");
auto conv16 = convBlock(network, weightMap, *c3_15->getOutput(0), get_width(512, gw), 1, 1, 1, "model.16");
auto upsample17 = network->addResize(*conv16->getOutput(0));
assert(upsample17);
upsample17->setResizeMode(ResizeMode::kNEAREST);
upsample17->setOutputDimensions(c3_6->getOutput(0)->getDimensions());
ITensor* inputTensors18[] = upsample17->getOutput(0), c3_6->getOutput(0) ;
auto cat18 = network->addConcatenation(inputTensors18, 2);
auto c3_19 = C3(network, weightMap, *cat18->getOutput(0), get_width(1024, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.19");
auto conv20 = convBlock(network, weightMap, *c3_19->getOutput(0), get_width(256, gw), 1, 1, 1, "model.20");
auto upsample21 = network->addResize(*conv20->getOutput(0));
assert(upsample21);
upsample21->setResizeMode(ResizeMode::kNEAREST);
upsample21->setOutputDimensions(c3_4->getOutput(0)->getDimensions());
ITensor* inputTensors21[] = upsample21->getOutput(0), c3_4->getOutput(0) ;
auto cat22 = network->addConcatenation(inputTensors21, 2);
auto c3_23 = C3(network, weightMap, *cat22->getOutput(0), get_width(512, gw), get_width(256, gw), get_depth(3, gd), false, 1, 0.5, "model.23");
auto conv24 = convBlock(network, weightMap, *c3_23->getOutput(0), get_width(256, gw), 3, 2, 1, "model.24");
ITensor* inputTensors25[] = conv24->getOutput(0), conv20->getOutput(0) ;
auto cat25 = network->addConcatenation(inputTensors25, 2);
auto c3_26 = C3(network, weightMap, *cat25->getOutput(0), get_width(1024, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.26");
auto conv27 = convBlock(network, weightMap, *c3_26->getOutput(0), get_width(512, gw), 3, 2, 1, "model.27");
ITensor* inputTensors28[] = conv27->getOutput(0), conv16->getOutput(0) ;
auto cat28 = network->addConcatenation(inputTensors28, 2);
auto c3_29 = C3(network, weightMap, *cat28->getOutput(0), get_width(1536, gw), get_width(768, gw), get_depth(3, gd), false, 1, 0.5, "model.29");
auto conv30 = convBlock(network, weightMap, *c3_29->getOutput(0), get_width(768, gw), 3, 2, 1, "model.30");
ITensor* inputTensors31[] = conv30->getOutput(0), conv12->getOutput(0) ;
auto cat31 = network->addConcatenation(inputTensors31, 2);
auto c3_32 = C3(network, weightMap, *cat31->getOutput(0), get_width(2048, gw), get_width(1024, gw), get_depth(3, gd), false, 1, 0.5, "model.32");
/* ------ detect ------ */
IConvolutionLayer* det0 = network->addConvolutionNd(*c3_23->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW 1, 1 , weightMap["model.33.m.0.weight"], weightMap["model.33.m.0.bias"]);
IConvolutionLayer* det1 = network->addConvolutionNd(*c3_26->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW 1, 1 , weightMap["model.33.m.1.weight"], weightMap["model.33.m.1.bias"]);
IConvolutionLayer* det2 = network->addConvolutionNd(*c3_29->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW 1, 1 , weightMap["model.33.m.2.weight"], weightMap["model.33.m.2.bias"]);
IConvolutionLayer* det3 = network->addConvolutionNd(*c3_32->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW 1, 1 , weightMap["model.33.m.3.weight"], weightMap["model.33.m.3.bias"]);
auto yolo = addYoLoLayer(network, weightMap, "model.33", std::vector<IConvolutionLayer*>det0, det1, det2, det3);
yolo->getOutput(0)->setName(OUTPUT_BLOB_NAME);
network->markOutput(*yolo->getOutput(0));
// Build engine
builder->setMaxBatchSize(maxBatchSize);
config->setMaxWorkspaceSize(16 * (1 << 20)); // 16MB
#if defined(USE_FP16)
config->setFlag(BuilderFlag::kFP16);
#elif defined(USE_INT8)
std::cout << "Your platform support int8: " << (builder->platformHasFastInt8() ? "true" : "false") << std::endl;
assert(builder->platformHasFastInt8());
config->setFlag(BuilderFlag::kINT8);
Int8EntropyCalibrator2* calibrator = new Int8EntropyCalibrator2(1, INPUT_W, INPUT_H, "./coco_calib/", "int8calib.table", INPUT_BLOB_NAME);
config->setInt8Calibrator(calibrator);
#endif
std::cout << "Building engine, please wait for a while..." << std::endl;
ICudaEngine* engine = builder->buildEngineWithConfig(*network, *config);
std::cout << "Build engine successfully!" << std::endl;
// Don't need the network any more
network->destroy();
// Release host memory
for (auto& mem : weightMap)
free((void*)(mem.second.values));
return engine;
void APIToModel(unsigned int maxBatchSize, IHostMemory** modelStream, bool& is_p6, float& gd, float& gw, std::string& wts_name)
// Create builder
IBuilder* builder = createInferBuilder(gLogger);
IBuilderConfig* config = builder->createBuilderConfig();
// Create model to populate the network, then set the outputs and create an engine
ICudaEngine *engine = nullptr;
if (is_p6)
engine = build_engine_p6(maxBatchSize, builder, config, DataType::kFLOAT, gd, gw, wts_name);
else
engine = build_engine(maxBatchSize, builder, config, DataType::kFLOAT, gd, gw, wts_name);
assert(engine != nullptr);
// Serialize the engine
(*modelStream) = engine->serialize();
// Close everything down
engine->destroy();
builder->destroy();
config->destroy();
void doInference(IExecutionContext& context, cudaStream_t& stream, void **buffers, float* output, int batchSize)
// infer on the batch asynchronously, and DMA output back to host
context.enqueue(batchSize, buffers, stream, nullptr);
CUDA_CHECK(cudaMemcpyAsync(output, buffers[1], batchSize * OUTPUT_SIZE * sizeof(float), cudaMemcpyDeviceToHost, stream));
cudaStreamSynchronize(stream);
bool parse_args(int argc, char** argv, std::string& wts, std::string& engine, bool& is_p6, float& gd, float& gw, std::string& img_dir)
if (argc < 4) return false;
if (std::string(argv[1]) == "-s" && (argc == 5 || argc == 7))
wts = std::string(argv[2]);
engine = std::string(argv[3]);
auto net = std::string(argv[4]);
if (net[0] == 'n')
gd = 0.33;
gw = 0.25;
else if (net[0] == 's')
gd = 0.33;
gw = 0.50;
else if (net[0] == 'm')
gd = 0.67;
gw = 0.75;
else if (net[0] == 'l')
gd = 1.0;
gw = 1.0;
else if (net[0] == 'x')
gd = 1.33;
gw = 1.25;
else if (net[0] == 'c' && argc == 7)
gd = atof(argv[5]);
gw = atof(argv[6]);
else
return false;
if (net.size() == 2 && net[1] == '6')
is_p6 = true;
else if (std::string(argv[1]) == "-d" && argc == 4)
engine = std::string(argv[2]);
img_dir = std::string(argv[3]);
else
return false;
return true;
int main(int argc, char** argv)
// opencv
cv::VideoCapture cap; // 1.创建视频采集对象;
cv::Mat readImage; // 读取的图片;
cap.open(0); // 2.打开默认相机;
if (!cap.isOpened()) std::cout << "open Capture error !!!" << std::endl;
else std::cout << "open Capture OK !!!" << std::endl;
// cap.release(); // 释放视频采集对象!!!
cudaSetDevice(DEVICE);
std::string wts_name = "";
std::string engine_name = "";
bool is_p6 = false;
float gd = 0.0f, gw = 0.0f;
std::string img_dir;
if (!parse_args(argc, argv, wts_name, engine_name, is_p6, gd, gw, img_dir))
std::cerr << "arguments not right!" << std::endl;
std::cerr << "./yolov5 -s [.wts] [.engine] [n/s/m/l/x/n6/s6/m6/l6/x6 or c/c6 gd gw] // serialize model to plan file" << std::endl;
std::cerr << "./yolov5 -d [.engine] ../samples // deserialize plan file and run inference" << std::endl;
return -1;
// create a model using the API directly and serialize it to a stream
if (!wts_name.empty())
IHostMemory* modelStream nullptr ;
APIToModel(BATCH_SIZE, &modelStream, is_p6, gd, gw, wts_name);
assert(modelStream != nullptr);
std::ofstream p(engine_name, std::ios::binary);
if (!p)
std::cerr << "could not open plan output file" << std::endl;
return -1;
p.write(reinterpret_cast<const char*>(modelStream->data()), modelStream->size());
modelStream->destroy();
return 0;
// deserialize the .engine and run inference
std::ifstream file(engine_name, std::ios::binary);
if (!file.good())
std::cerr << "read " << engine_name << " error!" << std::endl;
return -1;
char *trtModelStream = nullptr;
size_t size = 0;
file.seekg(0, file.end);
size = file.tellg();
file.seekg(0, file.beg);
trtModelStream = new char[size];
assert(trtModelStream);
file.read(trtModelStream, size);
file.close();
std::vector<std::string> file_names;
if (read_files_in_dir(img_dir.c_str(), file_names) < 0)
std::cerr << "read_files_in_dir failed." << std::endl;
return -1;
static float prob[BATCH_SIZE * OUTPUT_SIZE];
IRuntime* runtime = createInferRuntime(gLogger);
assert(runtime != nullptr);
ICudaEngine* engine = runtime->deserializeCudaEngine(trtModelStream, size);
assert(engine != nullptr);
IExecutionContext* context = engine->createExecutionContext();
assert(context != nullptr);
delete[] trtModelStream;
assert(engine->getNbBindings() == 2);
float* buffers[2];
// In order to bind the buffers, we need to know the names of the input and output tensors.
// Note that indices are guaranteed to be less than IEngine::getNbBindings()
const int inputIndex = engine->getBindingIndex(INPUT_BLOB_NAME);
const int outputIndex = engine->getBindingIndex(OUTPUT_BLOB_NAME);
assert(inputIndex == 0);
assert(outputIndex == 1);
// Create GPU buffers on device
CUDA_CHECK(cudaMalloc((void**)&buffers[inputIndex], BATCH_SIZE * 3 * INPUT_H * INPUT_W * sizeof(float)));
CUDA_CHECK(cudaMalloc((void**)&buffers[outputIndex], BATCH_SIZE * OUTPUT_SIZE * sizeof(float)));
// Create stream
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
uint8_t* img_host = nullptr;
uint8_t* img_device = nullptr;
// prepare input data cache in pinned memory
CUDA_CHECK(cudaMallocHost((void**)&img_host, MAX_IMAGE_INPUT_SIZE_THRESH * 3));
// prepare input data cache in device memory
CUDA_CHECK(cudaMalloc((void**)&img_device, MAX_IMAGE_INPUT_SIZE_THRESH * 3));
int fcount = 0;
int save_int = 0;
std::vector<cv::Mat> imgs_buffer(BATCH_SIZE);
std::vector<AffineMatrix> matrix_buffer(BATCH_SIZE);
while (true)
// for (int f = 0; f < (int)file_names.size(); f++)
if (cv::waitKey(1) == 'q') break; //如果按下q,会推出程序
fcount++;
save_int++;
if (fcount < BATCH_SIZE ) continue;
//auto start = std::chrono::system_clock::now();
float* buffer_idx = (float*)buffers[inputIndex];
for (int b = 0; b < fcount; b++)
cv::Mat img;
cap >> img;
// cv::Mat img = cv::imread(img_dir + "/" + file_names[f - fcount + 1 + b]); // ############
if (img.empty()) continue;
imgs_buffer[b] = img;
size_t size_image = img.cols * img.rows * 3;
size_t size_image_dst = INPUT_H * INPUT_W * 3;
//copy data to pinned memory
memcpy(img_host,img.data,size_image);
//copy data to device memory
CUDA_CHECK(cudaMemcpyAsync(img_device,img_host,size_image,cudaMemcpyHostToDevice,stream));
preprocess_kernel_img(img_device, img.cols, img.rows, buffer_idx, matrix_buffer[b], INPUT_W, INPUT_H, stream);
buffer_idx += size_image_dst;
// Run inference
auto start = std::chrono::system_clock::now();
doInference(*context, stream, (void**)buffers, prob, BATCH_SIZE);
auto end = std::chrono::system_clock::now();
std::cout << "inference time: " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl;
std::vector<std::vector<Yolo::Detection>> batch_res(fcount);
for (int b = 0; b < fcount; b++)
auto& res = batch_res[b];
nms(res, &prob[b * OUTPUT_SIZE], CONF_THRESH, NMS_THRESH);
for (int b = 0; b < fcount; b++)
auto& res = batch_res[b];
auto& bbox_affine_matrix = matrix_buffer[b];
cv::Mat img = imgs_buffer[b];
for (size_t j = 0; j < res.size(); j++)
cv::Rect r = get_rect(res[j].bbox, bbox_affine_matrix);
cv::rectangle(img, r, cv::Scalar(0x27, 0xC1, 0x36), 2);
cv::putText(img, std::to_string((int)res[j].class_id), cv::Point(r.x, r.y - 1), cv::FONT_HERSHEY_PLAIN, 1.2, cv::Scalar(0xFF, 0xFF, 0xFF), 2);
// cv::imwrite("_" + file_names[f - fcount + 1 + b], img);
cv::imwrite(std::to_string(save_int)+".jpg", img);
fcount = 0;
// Release stream and buffers
cudaStreamDestroy(stream);
CUDA_CHECK(cudaFree(img_device));
CUDA_CHECK(cudaFreeHost(img_host));
CUDA_CHECK(cudaFree(buffers[inputIndex]));
CUDA_CHECK(cudaFree(buffers[outputIndex]));
// Destroy the engine
context->destroy();
engine->destroy();
runtime->destroy();
cap.release(); // 释放视频采集对象!!!
// Print histogram of the output distribution
//std::cout << "\\nOutput:\\n\\n";
//for (unsigned int i = 0; i < OUTPUT_SIZE; i++)
//
// std::cout << prob[i] << ", ";
// if (i % 10 == 0) std::cout << std::endl;
//
//std::cout << std::endl;
return 0;
效果:
七、Batch size 进一步加速实验
官方说当 batchsize=8 时,预处理 + 推理速度提高 3 倍;于是我试了一下;还是 kitti 数据集的100张图片,
当batchsize=8 时,一次推理8张图片,平均时间是46ms;46ms / 8 = 5.75ms;即现在推理一张图片需要5.75ms,对比上面单张推理时间22ms,快了3.8倍左右。
这在一个设备用来推理多个视频流输入时,还是挺不错的。
本文参考 wang-xinyu 大佬开源的tensorrtx ,致谢:https://github.com/wang-xinyu/tensorrtx
参考:https://github.com/wang-xinyu/tensorrtx/blob/master/yolov5/README.md
以上是关于NVIDIA Jetson YOLOv5 tensorRT部署和加速 C++版的主要内容,如果未能解决你的问题,请参考以下文章
NVIDIA Jetson AGX Xavier YOLOv5应用与部署
NVIDIA Jetson YOLOv5 tensorRT部署和加速 C++版
NVIDIA Jetson YOLOv5 tensorRT部署和加速 C++版