-
Notifications
You must be signed in to change notification settings - Fork 1
/
Open_LPReditor.cpp
202 lines (181 loc) · 7.49 KB
/
Open_LPReditor.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
/*
************************************************************************
// Copyright (C) 2021, Raphael Poulenard.
************************************************************************
// Line.h: interface for the C_Line class.
//
This program is free software : you can redistribute itand /or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
GNU General Public License for more details.
//////////////////////////////////////////////////////////////////////
third party software
c++ inference source code
OpenCV 4.5.0 and higher
Copyright © 2021 , OpenCV team
Apache 2 License
ONNXRUNTIME
Copyright © 2020 Microsoft. All rights reserved.
MIT License
model production
YOLOv5
by Glenn Jocher (Ultralytics.com)
GPL-3.0 License
onnx
Copyright (c) Facebook, Inc. and Microsoft Corporation. All rights reserved.
MIT License
*/
// Open_LPReditor.cpp : Defines the entry point for the console application.
//
#include <iterator>
#include <iostream>
#include <assert.h>
#include <cmath>
#include <exception>
#include <fstream>
#include <limits>
#include <numeric>
#include <string>
#include <filesystem>
#include <onnxruntime_c_api.h>
#include <onnxruntime_cxx_api.h>
#include <cuda_provider_factory.h>
#include <opencv2/opencv.hpp>
#include <opencv2/dnn/dnn.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/imgproc.hpp>
#include "yolov5_anpr_onnx_detector.h"
#include "ONNX_detector.h"
static void help(char** argv)
{
std::cout << "\nThis program demonstrates the automatic numberplate recognition software named LPReditor\n"
"Usage:\n" << argv[0] << "\n--model = path to the model *.onnx file\n"
<< "[--image = path to your image file (if you opt to process just one image) ]\n"
<< "[--dir = path to a directory containing images files (if you opt to process all the images in the same directory)]\n"
<< "[--show], whether to show the image in a window with license plate in banner\n"
<< "[--time_delay= time delay in ms between two consecutive images]\n" << std::endl;
std::cout << "Note : model.onnx file is in the package" << std::endl;
std::cout << "Note : options [--image ] and [--dir ] are incompatible, model argument is mandatory" << std::endl;
std::cout << "Note : if you want to see how well the engine performs, you must place the true license plate number in the image filename this way : number+underscore+license plate number\n"
<< "for instance filename 0000000001_3065WWA34.jpg will be interpreted as an image with the license plate 3065WWA34 in it." << std::endl;
}
#include <opencv2/dnn/dnn.hpp>
//step 1 declare a global instance of ONNX Runtime api
const OrtApi* g_ort = OrtGetApiBase()->GetApi(ORT_API_VERSION);
int main(int argc, char* argv[])
{
#ifdef LPR_EDITOR_USE_CUDA
bool useCUDA{ false };
#endif //LPR_EDITOR_USE_CUDA
#ifdef LPREDITOR_DEMO_NO_ARGS
const int argc_ = 7;
char* argv_[argc_];
/*
argv_[0] = argv[0];
argv_[1] = "";
argv_[2] = "--image=D:\\my\\path\\to\\an\\image.jpg";//
argv_[3] = "--model=D:\\my\\path\\to\\the\\yolo\\model\\yolo_carac_detect.onnx ";//the yolo model file is provided in the repo
argv_[4] = "--dir=D:\\my\\path\\to\\a\\directory\\with\\image\\files";
argv_[5] = "--time_delay=1000";//if you want the images to be displayed by hihgui
argv_[6] = "";
*/
argv_[0] = argv[0];
argv_[1] = "";
argv_[2] = "--image=../data/images/images test/0000000001_3065WWA34.jpg";//
argv_[3] = "--model=../data/models/lpreditor_anpr.onnx";
argv_[4] = "--dir=../data/images/images test";
argv_[5] = "--time_delay=-1";//we don't want the images to be displayed by hihgui
argv_[6] = "";
cv::CommandLineParser parser(argc_, argv_, "{help h | | }{show | | }{time_delay | -1 | }{model | | }{image | | }{dir | | }");
#else //LPREDITOR_DEMO_NO_ARGS
cv::CommandLineParser parser(argc, argv, "{help h | | }{show | | }{time_delay | -1 | }{model | | }{image | | }{dir | | }");
#endif //LPREDITOR_DEMO_NO_ARGS
if (parser.has("help"))
{
help(argv);
return 0;
}
bool show_image = false;
if (parser.has("show"))
{
show_image = true;
}
int time_delay = -1;
if (parser.has("time_delay"))
{
time_delay = parser.get<int>("time_delay");
}
if (!parser.has("model"))
{
std::cout << "\nYou must specify the model pathname by using mandatory arg --model=...\n" << std::endl;
help(argv);
return 0;
}
std::string model_filename = (parser.get<std::string>("model"));
if (!model_filename.size())
{
std::cout << "\nCan't find the model file\n" << std::endl;
help(argv);
return 0;
}
else {
//step 2 declare an onnx runtime environment
std::string instanceName{ "image-classification-inference" };
// https://github.com/microsoft/onnxruntime/blob/rel-1.6.0/include/onnxruntime/core/session/onnxruntime_c_api.h#L123
Ort::Env env(OrtLoggingLevel::ORT_LOGGING_LEVEL_WARNING, instanceName.c_str());
//step 3 declare options for the runtime environment
Ort::SessionOptions sessionOptions;
sessionOptions.SetIntraOpNumThreads(1);
#ifdef LPR_EDITOR_USE_CUDA
if (useCUDA)
{
// Using CUDA backend
// https://github.com/microsoft/onnxruntime/blob/rel-1.6.0/include/onnxruntime/core/providers/cuda/cuda_provider_factory.h#L13
OrtStatus* status =
OrtSessionOptionsAppendExecutionProvider_CUDA(sessionOptions, 0);
}
#endif //LPR_EDITOR_USE_CUDA
// Sets graph optimization level
// Available levels are
// ORT_DISABLE_ALL -> To disable all optimizations
// ORT_ENABLE_BASIC -> To enable basic optimizations (Such as redundant node
// removals) ORT_ENABLE_EXTENDED -> To enable extended optimizations
// (Includes level 1 + more complex optimizations like node fusions)
// ORT_ENABLE_ALL -> To Enable All possible optimizations
sessionOptions.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_EXTENDED);
#ifdef _WIN32
//step 4 declare an onnx session (ie model), by giving references to the runtime environment, session options and file path to the model
std::wstring widestr = std::wstring(model_filename.begin(), model_filename.end());
Yolov5_anpr_onxx_detector onnx_net(env, widestr.c_str(), sessionOptions);
#else
Yolov5_anpr_onxx_detector onnx_net(env, model_filename.c_str(), sessionOptions);
#endif
std::cout << "\nModel load succesfully\n" << std::endl;
std::string image_filename = (parser.get<std::string>("image"));
std::string dir = (parser.get<std::string>("dir"));
if (!parser.has("image") && !parser.has("dir"))
{
std::cout << "\nYou must specify either an image filename or a directory (with images in it)\n" << std::endl;
help(argv);
return 0;
}
if (!dir.size() && image_filename.size())
{
std::string filename = cv::samples::findFile(parser.get<std::string>("image"));
//cv::Mat img0 = cv::imread(filename, cv::IMREAD_COLOR);
std::string lpn;
onnx_net.detect(filename, lpn, show_image, time_delay);
}
else {
//process all images files of a directory
//step 5 call the detect function of the Yolov5_anpr_onxx_detector object, on a cv::mat object or an image file.
//This will retieves boxes and classes of the license plate caracters
onnx_net.detect(dir, show_image, time_delay); return 1;
}
}
return 0;
}