识别车牌的字符
Posted
技术标签:
【中文标题】识别车牌的字符【英文标题】:Recognize the characters of license plate 【发布时间】:2016-07-24 06:53:29 【问题描述】:我尝试使用 OCR ,但我的车牌质量较差。
我正在尝试以某种方式改进 OCR 的字符识别,但我最好的结果是:result。
甚至这张图片上的 tesseract 也不识别任何字符。我的代码是:
#include <cv.h> // open cv general include file
#include <highgui.h> // open cv GUI include file
#include <iostream> // standard C++ I/O
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <string>
using namespace cv;
int main( int argc, char** argv )
Mat src;
Mat dst;
Mat const structure_elem = getStructuringElement(
MORPH_RECT, Size(2,2));
src = imread(argv[1], CV_LOAD_IMAGE_COLOR); // Read the file
cvtColor(src,src,CV_BGR2GRAY);
imshow( "plate", src );
GaussianBlur(src, src, Size(1,1), 1.5, 1.5);
imshow( "blur", src );
equalizeHist(src, src);
imshow( "equalize", src );
adaptiveThreshold(src, src, 255, ADAPTIVE_THRESH_GAUSSIAN_C, CV_THRESH_BINARY, 15, -1);
imshow( "threshold", src );
morphologyEx(src, src, MORPH_CLOSE, structure_elem);
imshow( "morphological operation", src );
imwrite("end.jpg", src);
waitKey(0);
return 0;
我的问题是,你知道如何获得更好的结果吗?图像更清晰?尽管我的车牌质量较差,但结果可以读取 OCR(例如 Tesseract)。
感谢您的回答。我真的不知道该怎么做。
【问题讨论】:
community.aiim.org/blogs/richard-medina/2013/03/15/… 有机会从更高分辨率的输入开始吗?可能更好的照明和更快的曝光。也许更好的方法是尝试自己进行识别。如果您只想专注于 CZ 板,则只需处理一种字体。对于大多数板块(新的自定义板块除外),您可以对给定位置的符号做出一些假设——例如最后 4 个很可能只有 0-9,第二个可能是一个字母,等等。您可以尝试多次尝试不同的限制,看看什么给出了最佳匹配。 我不认为我会做更好的输入。我只关注 CZ 车牌,但由于 2016 年可能有一个几乎所有字符的车牌,所以我尝试找到一个清理图像的程序,然后我就可以读取字符了。你知道怎么做吗? 【参考方案1】:清理图像的一种可能算法如下:
放大图片,使字母更加充实。 通过 k-means 聚类将图像减少到只有 8 种颜色。 为图像设置阈值,并对其进行侵蚀以填充任何小间隙,使字母更加充实。 反转图像以使蒙版更容易。 创建相同大小的空白蒙版图像,设置为全零 在图像中查找轮廓。对于每个轮廓: 找到轮廓的边界框 查找边界框的区域 如果面积过小或过大,则删除轮廓(我选择了 1000 和 10000 作为限制) 否则用白色 (255) 在遮罩上绘制一个与边界框对应的填充矩形 存储边界框和对应的图像 ROI 对于每个分隔字符(边界框 + 图像) 识别字符注意:我在 Python 2.7 中使用 OpenCV 3.1 对此进行了原型设计。此代码的 C++ 端口已接近此答案的结尾。
字符识别
我从 SO 上的this question 获得了字符识别的灵感。
然后我找到了一个image,我们可以使用它来提取正确字体的训练图像。我将它们缩减为仅包含不带重音符号的数字和字母。
train_digits.png
:
train_letters.png
:
然后我编写了一个脚本,将单个字符拆分、放大并准备每个文件包含单个字符的训练图像:
import os
import cv2
import numpy as np
# ============================================================================
def extract_chars(img):
bw_image = cv2.bitwise_not(img)
contours = cv2.findContours(bw_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[1]
char_mask = np.zeros_like(img)
bounding_boxes = []
for contour in contours:
x,y,w,h = cv2.boundingRect(contour)
x,y,w,h = x-2, y-2, w+4, h+4
bounding_boxes.append((x,y,w,h))
characters = []
for bbox in bounding_boxes:
x,y,w,h = bbox
char_image = img[y:y+h,x:x+w]
characters.append(char_image)
return characters
# ============================================================================
def output_chars(chars, labels):
for i, char in enumerate(chars):
filename = "chars/%s.png" % labels[i]
char = cv2.resize(char
, None
, fx=3
, fy=3
, interpolation=cv2.INTER_CUBIC)
cv2.imwrite(filename, char)
# ============================================================================
if not os.path.exists("chars"):
os.makedirs("chars")
img_digits = cv2.imread("train_digits.png", 0)
img_letters = cv2.imread("train_letters.png", 0)
digits = extract_chars(img_digits)
letters = extract_chars(img_letters)
DIGITS = [0, 9, 8 ,7, 6, 5, 4, 3, 2, 1]
LETTERS = [chr(ord('A') + i) for i in range(25,-1,-1)]
output_chars(digits, DIGITS)
output_chars(letters, LETTERS)
# ============================================================================
下一步是从我们使用前一个脚本创建的字符文件生成训练数据。
我按照上述问题答案中的算法,将每个字符图像的大小调整为 10x10,并使用所有像素作为关键点。
我将训练数据保存为char_samples.data
和char_responses.data
生成训练数据的脚本:
import cv2
import numpy as np
CHARS = [chr(ord('0') + i) for i in range(10)] + [chr(ord('A') + i) for i in range(26)]
# ============================================================================
def load_char_images():
characters =
for char in CHARS:
char_img = cv2.imread("chars/%s.png" % char, 0)
characters[char] = char_img
return characters
# ============================================================================
characters = load_char_images()
samples = np.empty((0,100))
for char in CHARS:
char_img = characters[char]
small_char = cv2.resize(char_img,(10,10))
sample = small_char.reshape((1,100))
samples = np.append(samples,sample,0)
responses = np.array([ord(c) for c in CHARS],np.float32)
responses = responses.reshape((responses.size,1))
np.savetxt('char_samples.data',samples)
np.savetxt('char_responses.data',responses)
# ============================================================================
一旦我们创建了训练数据,我们就可以运行主脚本:
import cv2
import numpy as np
# ============================================================================
def reduce_colors(img, n):
Z = img.reshape((-1,3))
# convert to np.float32
Z = np.float32(Z)
# define criteria, number of clusters(K) and apply kmeans()
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
K = n
ret,label,center=cv2.kmeans(Z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)
# Now convert back into uint8, and make original image
center = np.uint8(center)
res = center[label.flatten()]
res2 = res.reshape((img.shape))
return res2
# ============================================================================
def clean_image(img):
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
resized_img = cv2.resize(gray_img
, None
, fx=5.0
, fy=5.0
, interpolation=cv2.INTER_CUBIC)
resized_img = cv2.GaussianBlur(resized_img,(5,5),0)
cv2.imwrite('licence_plate_large.png', resized_img)
equalized_img = cv2.equalizeHist(resized_img)
cv2.imwrite('licence_plate_equ.png', equalized_img)
reduced = cv2.cvtColor(reduce_colors(cv2.cvtColor(equalized_img, cv2.COLOR_GRAY2BGR), 8), cv2.COLOR_BGR2GRAY)
cv2.imwrite('licence_plate_red.png', reduced)
ret, mask = cv2.threshold(reduced, 64, 255, cv2.THRESH_BINARY)
cv2.imwrite('licence_plate_mask.png', mask)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
mask = cv2.erode(mask, kernel, iterations = 1)
cv2.imwrite('licence_plate_mask2.png', mask)
return mask
# ============================================================================
def extract_characters(img):
bw_image = cv2.bitwise_not(img)
contours = cv2.findContours(bw_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[1]
char_mask = np.zeros_like(img)
bounding_boxes = []
for contour in contours:
x,y,w,h = cv2.boundingRect(contour)
area = w * h
center = (x + w/2, y + h/2)
if (area > 1000) and (area < 10000):
x,y,w,h = x-4, y-4, w+8, h+8
bounding_boxes.append((center, (x,y,w,h)))
cv2.rectangle(char_mask,(x,y),(x+w,y+h),255,-1)
cv2.imwrite('licence_plate_mask3.png', char_mask)
clean = cv2.bitwise_not(cv2.bitwise_and(char_mask, char_mask, mask = bw_image))
bounding_boxes = sorted(bounding_boxes, key=lambda item: item[0][0])
characters = []
for center, bbox in bounding_boxes:
x,y,w,h = bbox
char_image = clean[y:y+h,x:x+w]
characters.append((bbox, char_image))
return clean, characters
def highlight_characters(img, chars):
output_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
for bbox, char_img in chars:
x,y,w,h = bbox
cv2.rectangle(output_img,(x,y),(x+w,y+h),255,1)
return output_img
# ============================================================================
img = cv2.imread("licence_plate.jpg")
img = clean_image(img)
clean_img, chars = extract_characters(img)
output_img = highlight_characters(clean_img, chars)
cv2.imwrite('licence_plate_out.png', output_img)
samples = np.loadtxt('char_samples.data',np.float32)
responses = np.loadtxt('char_responses.data',np.float32)
responses = responses.reshape((responses.size,1))
model = cv2.ml.KNearest_create()
model.train(samples, cv2.ml.ROW_SAMPLE, responses)
plate_chars = ""
for bbox, char_img in chars:
small_img = cv2.resize(char_img,(10,10))
small_img = small_img.reshape((1,100))
small_img = np.float32(small_img)
retval, results, neigh_resp, dists = model.findNearest(small_img, k = 1)
plate_chars += str(chr((results[0][0])))
print("Licence plate: %s" % plate_chars)
脚本输出
放大 5 倍:
均衡:
减少到8种颜色:
阈值:
侵蚀:
掩码只选择字符:
带有边界框的干净图像:
控制台输出:
Licence plate: 2B99996
C++ 代码,使用 OpenCV 2.4.11 和 Boost.Filesystem 迭代目录中的文件。
#include <boost/filesystem.hpp>
#include <opencv2/opencv.hpp>
#include <iostream>
#include <string>
// ============================================================================
namespace fs = boost::filesystem;
// ============================================================================
typedef std::vector<std::string> string_list;
struct char_match_t
cv::Point2i position;
cv::Mat image;
;
typedef std::vector<char_match_t> char_match_list;
// ----------------------------------------------------------------------------
string_list find_input_files(std::string const& dir)
string_list result;
fs::path dir_path(dir);
fs::directory_iterator end_itr;
for (fs::directory_iterator i(dir_path); i != end_itr; ++i)
if (!fs::is_regular_file(i->status())) continue;
if (i->path().extension() == ".png")
result.push_back(i->path().string());
return result;
// ----------------------------------------------------------------------------
cv::Mat reduce_image(cv::Mat const& img, int K)
int n = img.rows * img.cols;
cv::Mat data = img.reshape(1, n);
data.convertTo(data, CV_32F);
std::vector<int> labels;
cv::Mat1f colors;
cv::kmeans(data, K, labels
, cv::TermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 10000, 0.0001)
, 5, cv::KMEANS_PP_CENTERS, colors);
for (int i = 0; i < n; ++i)
data.at<float>(i, 0) = colors(labels[i], 0);
cv::Mat reduced = data.reshape(1, img.rows);
reduced.convertTo(reduced, CV_8U);
return reduced;
// ----------------------------------------------------------------------------
cv::Mat clean_image(cv::Mat const& img)
cv::Mat resized_img;
cv::resize(img, resized_img, cv::Size(), 5.0, 5.0, cv::INTER_CUBIC);
cv::Mat equalized_img;
cv::equalizeHist(resized_img, equalized_img);
cv::Mat reduced_img(reduce_image(equalized_img, 8));
cv::Mat mask;
cv::threshold(reduced_img
, mask
, 64
, 255
, cv::THRESH_BINARY);
cv::Mat kernel(cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3)));
cv::erode(mask, mask, kernel, cv::Point(-1, -1), 1);
return mask;
// ----------------------------------------------------------------------------
cv::Point2i center(cv::Rect const& bounding_box)
return cv::Point2i(bounding_box.x + bounding_box.width / 2
, bounding_box.y + bounding_box.height / 2);
// ----------------------------------------------------------------------------
char_match_list extract_characters(cv::Mat const& img)
cv::Mat inverse_img;
cv::bitwise_not(img, inverse_img);
std::vector<std::vector<cv::Point>> contours;
std::vector<cv::Vec4i> hierarchy;
cv::findContours(inverse_img.clone(), contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
char_match_list result;
double const MIN_CONTOUR_AREA(1000.0);
double const MAX_CONTOUR_AREA(6000.0);
for (uint32_t i(0); i < contours.size(); ++i)
cv::Rect bounding_box(cv::boundingRect(contours[i]));
int bb_area(bounding_box.area());
if ((bb_area >= MIN_CONTOUR_AREA) && (bb_area <= MAX_CONTOUR_AREA))
int PADDING(2);
bounding_box.x -= PADDING;
bounding_box.y -= PADDING;
bounding_box.width += PADDING * 2;
bounding_box.height += PADDING * 2;
char_match_t match;
match.position = center(bounding_box);
match.image = img(bounding_box);
result.push_back(match);
std::sort(begin(result), end(result)
, [](char_match_t const& a, char_match_t const& b) -> bool
return a.position.x < b.position.x;
);
return result;
// ----------------------------------------------------------------------------
std::pair<float, cv::Mat> train_character(char c, cv::Mat const& img)
cv::Mat small_char;
cv::resize(img, small_char, cv::Size(10, 10), 0, 0, cv::INTER_LINEAR);
cv::Mat small_char_float;
small_char.convertTo(small_char_float, CV_32FC1);
cv::Mat small_char_linear(small_char_float.reshape(1, 1));
return std::pair<float, cv::Mat>(
static_cast<float>(c)
, small_char_linear);
// ----------------------------------------------------------------------------
std::string process_image(cv::Mat const& img, cv::KNearest& knn)
cv::Mat clean_img(clean_image(img));
char_match_list characters(extract_characters(clean_img));
std::string result;
for (char_match_t const& match : characters)
cv::Mat small_char;
cv::resize(match.image, small_char, cv::Size(10, 10), 0, 0, cv::INTER_LINEAR);
cv::Mat small_char_float;
small_char.convertTo(small_char_float, CV_32FC1);
cv::Mat small_char_linear(small_char_float.reshape(1, 1));
float p = knn.find_nearest(small_char_linear, 1);
result.push_back(char(p));
return result;
// ============================================================================
int main()
string_list train_files(find_input_files("./chars"));
cv::Mat samples, responses;
for (std::string const& file_name : train_files)
cv::Mat char_img(cv::imread(file_name, 0));
std::pair<float, cv::Mat> tinfo(train_character(file_name[file_name.size() - 5], char_img));
responses.push_back(tinfo.first);
samples.push_back(tinfo.second);
cv::KNearest knn;
knn.train(samples, responses);
string_list input_files(find_input_files("./input"));
for (std::string const& file_name : input_files)
cv::Mat plate_img(cv::imread(file_name, 0));
std::string plate(process_image(plate_img, knn));
std::cout << file_name << " : " << plate << "\n";
// ============================================================================
C++ 代码,使用 OpenCV 3.1 和 Boost.Filesystem 迭代目录中的文件。
#include <boost/filesystem.hpp>
#include <opencv2/opencv.hpp>
#include <iostream>
#include <string>
// ============================================================================
namespace fs = boost::filesystem;
// ============================================================================
typedef std::vector<std::string> string_list;
struct char_match_t
cv::Point2i position;
cv::Mat image;
;
typedef std::vector<char_match_t> char_match_list;
// ----------------------------------------------------------------------------
string_list find_input_files(std::string const& dir)
string_list result;
fs::path dir_path(dir);
boost::filesystem::directory_iterator end_itr;
for (boost::filesystem::directory_iterator i(dir_path); i != end_itr; ++i)
if (!boost::filesystem::is_regular_file(i->status())) continue;
if (i->path().extension() == ".png")
result.push_back(i->path().string());
return result;
// ----------------------------------------------------------------------------
cv::Mat reduce_image(cv::Mat const& img, int K)
int n = img.rows * img.cols;
cv::Mat data = img.reshape(1, n);
data.convertTo(data, CV_32F);
std::vector<int> labels;
cv::Mat1f colors;
cv::kmeans(data, K, labels
, cv::TermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 10000, 0.0001)
, 5, cv::KMEANS_PP_CENTERS, colors);
for (int i = 0; i < n; ++i)
data.at<float>(i, 0) = colors(labels[i], 0);
cv::Mat reduced = data.reshape(1, img.rows);
reduced.convertTo(reduced, CV_8U);
return reduced;
// ----------------------------------------------------------------------------
cv::Mat clean_image(cv::Mat const& img)
cv::Mat resized_img;
cv::resize(img, resized_img, cv::Size(), 5.0, 5.0, cv::INTER_CUBIC);
cv::Mat equalized_img;
cv::equalizeHist(resized_img, equalized_img);
cv::Mat reduced_img(reduce_image(equalized_img, 8));
cv::Mat mask;
cv::threshold(reduced_img
, mask
, 64
, 255
, cv::THRESH_BINARY);
cv::Mat kernel(cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3)));
cv::erode(mask, mask, kernel, cv::Point(-1, -1), 1);
return mask;
// ----------------------------------------------------------------------------
cv::Point2i center(cv::Rect const& bounding_box)
return cv::Point2i(bounding_box.x + bounding_box.width / 2
, bounding_box.y + bounding_box.height / 2);
// ----------------------------------------------------------------------------
char_match_list extract_characters(cv::Mat const& img)
cv::Mat inverse_img;
cv::bitwise_not(img, inverse_img);
std::vector<std::vector<cv::Point>> contours;
std::vector<cv::Vec4i> hierarchy;
cv::findContours(inverse_img.clone(), contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
char_match_list result;
double const MIN_CONTOUR_AREA(1000.0);
double const MAX_CONTOUR_AREA(6000.0);
for (int i(0); i < contours.size(); ++i)
cv::Rect bounding_box(cv::boundingRect(contours[i]));
int bb_area(bounding_box.area());
if ((bb_area >= MIN_CONTOUR_AREA) && (bb_area <= MAX_CONTOUR_AREA))
int PADDING(2);
bounding_box.x -= PADDING;
bounding_box.y -= PADDING;
bounding_box.width += PADDING * 2;
bounding_box.height += PADDING * 2;
char_match_t match;
match.position = center(bounding_box);
match.image = img(bounding_box);
result.push_back(match);
std::sort(begin(result), end(result)
, [](char_match_t const& a, char_match_t const& b) -> bool
return a.position.x < b.position.x;
);
return result;
// ----------------------------------------------------------------------------
std::pair<float, cv::Mat> train_character(char c, cv::Mat const& img)
cv::Mat small_char;
cv::resize(img, small_char, cv::Size(10, 10), 0, 0, cv::INTER_LINEAR);
cv::Mat small_char_float;
small_char.convertTo(small_char_float, CV_32FC1);
cv::Mat small_char_linear(small_char_float.reshape(1, 1));
return std::pair<float, cv::Mat>(
static_cast<float>(c)
, small_char_linear);
// ----------------------------------------------------------------------------
std::string process_image(cv::Mat const& img, cv::Ptr<cv::ml::KNearest> knn)
cv::Mat clean_img(clean_image(img));
char_match_list characters(extract_characters(clean_img));
std::string result;
for (char_match_t const& match : characters)
cv::Mat small_char;
cv::resize(match.image, small_char, cv::Size(10, 10), 0, 0, cv::INTER_LINEAR);
cv::Mat small_char_float;
small_char.convertTo(small_char_float, CV_32FC1);
cv::Mat small_char_linear(small_char_float.reshape(1, 1));
cv::Mat tmp;
float p = knn->findNearest(small_char_linear, 1, tmp);
result.push_back(char(p));
return result;
// ============================================================================
int main()
string_list train_files(find_input_files("./chars"));
cv::Mat samples, responses;
for (std::string const& file_name : train_files)
cv::Mat char_img(cv::imread(file_name, 0));
std::pair<float, cv::Mat> tinfo(train_character(file_name[file_name.size() - 5], char_img));
responses.push_back(tinfo.first);
samples.push_back(tinfo.second);
cv::Ptr<cv::ml::KNearest> knn(cv::ml::KNearest::create());
cv::Ptr<cv::ml::TrainData> training_data =
cv::ml::TrainData::create(samples
, cv::ml::SampleTypes::ROW_SAMPLE
, responses);
knn->train(training_data);
string_list input_files(find_input_files("./input"));
for (std::string const& file_name : input_files)
cv::Mat plate_img(cv::imread(file_name, 0));
std::string plate(process_image(plate_img, knn));
std::cout << file_name << " : " << plate << "\n";
// ============================================================================
【讨论】:
在这么短的时间内,这是一些非常糟糕的编码。不错!! 谢谢。是的,我试过了。我把字符放在盒子上。当我有一张清晰的图片时,我有六张图片,每个角色一张。但是tesseract无法识别可能不知道cz板字体的字符,所以我不知道哪种方法适合使用。 非常感谢。你能解释一下你是如何训练图片中的角色的吗?我使用 haar 级联训练了捷克板块的本地化,但我认为训练的单个字符会有所不同。如果你知道的话。 这里有一些。他们从移动的汽车中射击,所以他们不止一次。 filedropper.com/cartar切割车牌不完美,但没关系。 @DanMašek 我有一个问题,我尝试使用我的 cmake pastebin.com/s0JWu07z 和我安装的 boost 库运行 C++ 代码,但终端写道:pastebin.com/tWtiSF5c。你知道怎么回事吗?以上是关于识别车牌的字符的主要内容,如果未能解决你的问题,请参考以下文章