Python-OpenCV车牌识别简易版(3)

第3章 模式匹配识别字符

  工程的文件可以打包下载,链接:https://pan.baidu.com/s/1tPttbM7oGS-4GMrqpR4hKQ?pwd=ghlh ,提取码:ghlh。文件结构如下所示。

338deb7b5aa0f6e25dac7c4156f07bb

  我们按照先数字后英文字母后汉字的方法导入所有车牌上可能出现的字符,注意,英文中没有O和I。

import os
# 加载字符模板的图像
templates = ['0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F','G','H','J','K','L','M','N','P','Q','R','S','T','U','V','W','X','Y','Z','京','津','冀','晋','蒙','辽','吉','黑','沪','苏','浙','皖','闽','赣','鲁','豫','鄂','湘','粤','桂','琼','渝','川','贵','云','藏','陕','甘','青','宁','新']
print(templates)
templates[33]

  可以看到输出第33号是Z。之后我们定义一个读取该文件夹下所有图片的方法,输入参数是文件名。

def read_directory(directory_name):
referImg_list = []
for filename in os.listdir(directory_name):
referImg_list.append(directory_name+"/"+filename)
return referImg_list

  我们先匹配中文,中文是在第34到64位中,读取所有中文的图片。

#匹配中文
c_words = []
for i in range(34,64):
c_word = read_directory('./refer/'+templates[i])
c_words.append(c_word)
c_words

  读取第一个中文字符。

chinese = cv2.imread('./words/test2_1.png')
plt_show(chinese)

  在识别时依然是老规矩,高斯去噪-灰度-二值化。

#高斯去噪
chinese_Gaussian = cv2.GaussianBlur(chinese,(3,3),0)
#灰度
chinese_gray = cv2.cvtColor(chinese_Gaussian,cv2.COLOR_RGB2GRAY)
plt_show(chinese_gray)
#自适应阈值处理
ret , chinese_threshold = cv2.threshold(chinese_gray,0,255,cv2.THRESH_OTSU)
mean = np.mean(chinese_threshold)
if mean > 100:#这里反转的平均阈值改成100好一些
chinese_threshold = 255 - chinese_threshold
plt_show(chinese_threshold)

  然后开始模式匹配的核心代码,根据对每个图片的分数值来评判出最佳的图片。

import numpy as np
best_score = []
for c_word in c_words:
score = []
for word in c_word:
template_img = cv2.imdecode(np.fromfile(word,dtype = np.uint8),1)
template_img = cv2.cvtColor(template_img,cv2.COLOR_RGB2GRAY)
ret , template_img = cv2.threshold(template_img,0,255,cv2.THRESH_OTSU)
height , width = template_img.shape
image_c = chinese_threshold.copy()#待匹配的图片
image_c = cv2.resize(image_c,(width,height))
result = cv2.matchTemplate(image_c,template_img,cv2.TM_CCOEFF)
score.append(result[0][0])
best_score.append(max(score))

print(best_score)
print(max(best_score))
best = best_score.index(max(best_score))
print(best)
best_chinese = templates[34+best]
print(best_chinese)

  之后匹配英文和数字的方法大同小异,但是车牌有很多个,我们可以简单的借助一个for循环来遍历所有字符,并且按顺序输出识别结果。

import glob
import cv2
import numpy as np

# 定义文件夹路径和图片格式
folder_path = './words/' # 文件夹路径
image_format = '*.png' # 图片格式

# 生成图片文件的完整路径
images_path = glob.glob (folder_path + image_format)

# 定义方法A,方法B和方法C
def method_A (imageA):
#匹配中文
c_words = []
for i in range(34,64):
c_word = read_directory('./refer/'+templates[i])#注意我的是templates,视频里是template
c_words.append(c_word)

#灰度处理,二值化
#高斯去噪
chinese_Gaussian = cv2.GaussianBlur(imageA,(3,3),0)
#灰度
chinese_gray = cv2.cvtColor(chinese_Gaussian,cv2.COLOR_RGB2GRAY)
#plt_show(chinese_gray)

#模式匹配
best_score = []
for c_word in c_words:
score = []
for word in c_word:
template_img = cv2.imdecode(np.fromfile(word,dtype = np.uint8),1)
template_img = cv2.cvtColor(template_img,cv2.COLOR_RGB2GRAY)
ret , template_img = cv2.threshold(template_img,0,255,cv2.THRESH_OTSU)
height , width = template_img.shape
image_c = chinese_threshold.copy()#待匹配的图片
image_c = cv2.resize(image_c,(width,height))
result = cv2.matchTemplate(image_c,template_img,cv2.TM_CCOEFF)
score.append(result[0][0])
best_score.append(max(score))
best = best_score.index(max(best_score))
best_chinese = templates[34+best]
print(best_chinese)
pass

def method_B (imageA):
#匹配第二个英文
E_words = []
for i in range(10,34):#左闭右开
E_word = read_directory('./refer/'+templates[i])#注意我的是templates,视频里是template
E_words.append(E_word)

#灰度处理,二值化
#高斯去噪
English_Gaussian = cv2.GaussianBlur(imageA,(3,3),0)
#灰度
English_gray = cv2.cvtColor(English_Gaussian,cv2.COLOR_RGB2GRAY)
#自适应阈值处理
ret , English_threshold = cv2.threshold(English_gray,0,255,cv2.THRESH_OTSU)
mean = np.mean(English_threshold)
if mean > 100:#这里反转的平均阈值改成100好一些
English_threshold = 255 - English_threshold
#plt_show(English_threshold)
best_score = []
for E_word in E_words:
score = []
for word in E_word:
template_img = cv2.imdecode(np.fromfile(word,dtype = np.uint8),1)
template_img = cv2.cvtColor(template_img,cv2.COLOR_RGB2GRAY)
ret , template_img = cv2.threshold(template_img,0,255,cv2.THRESH_OTSU)
height , width = template_img.shape
image_E = English_threshold.copy()#待匹配的图片
image_E = cv2.resize(image_E,(width,height))
result = cv2.matchTemplate(image_E,template_img,cv2.TM_CCOEFF)
score.append(result[0][0])
best_score.append(max(score))

best = best_score.index(max(best_score))
best_English = templates[10+best]
print(best_English)
pass

def method_C (imageA):
#匹配剩余的英文和数字
E_words = []
for i in range(0,34):#左闭右开
E_word = read_directory('./refer/'+templates[i])#注意我的是templates,视频里是template
E_words.append(E_word)

#灰度处理,二值化
#高斯去噪
English_Gaussian = cv2.GaussianBlur(imageA,(3,3),0)
#灰度
English_gray = cv2.cvtColor(English_Gaussian,cv2.COLOR_RGB2GRAY)
#自适应阈值处理
ret , English_threshold = cv2.threshold(English_gray,0,255,cv2.THRESH_OTSU)
mean = np.mean(English_threshold)
if mean > 100:#这里反转的平均阈值改成100好一些
English_threshold = 255 - English_threshold
#plt_show(English_threshold)
best_score = []
for E_word in E_words:
score = []
for word in E_word:
template_img = cv2.imdecode(np.fromfile(word,dtype = np.uint8),1)
template_img = cv2.cvtColor(template_img,cv2.COLOR_RGB2GRAY)
ret , template_img = cv2.threshold(template_img,0,255,cv2.THRESH_OTSU)
height , width = template_img.shape
image_E = English_threshold.copy()#待匹配的图片
image_E = cv2.resize(image_E,(width,height))
result = cv2.matchTemplate(image_E,template_img,cv2.TM_CCOEFF)
score.append(result[0][0])
best_score.append(max(score))

best = best_score.index(max(best_score))
best_English = templates[0+best]
print(best_English)
pass

# 定义一个计数器,用来记录读取的图片数量
count = 0

# 循环遍历图片文件
for image_path in images_path:
# 读取图片
imageA = cv2.imread (image_path)
# 根据计数器的值,调用不同的方法
if count == 0:
# 读取第一张图片时,调用方法A,只匹配汉字
method_A (imageA)
elif count == 1:
# 读取第二张图片时,调用方法B,只匹配英文
method_B (imageA)
else:
# 读取剩余图片时,调用方法C,匹配英文喝数字
method_C (imageA)
# 更新计数器
count += 1

  运行之后就可以看到识别结果,但是这个识别率并不高,只是用作熟悉OpenCV的一个例子,实际在识别的时候,一定要借助人工智能才能获得工业级识别结果,以下是完整代码。

import cv2
from matplotlib import pyplot as plt
import os
import glob
import numpy as np
def cv_show(name, img):
cv2.imshow(name, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def plt_show0(img):
b, g, r = cv2.split(img)
img = cv2.merge([r, g, b])
plt.imshow(img)
plt.show()
def plt_show(img):
plt.imshow(img, cmap='gray')
plt.show()
def read_directory(directory_name):
referImg_list = []
for filename in os.listdir(directory_name):
referImg_list.append(directory_name+"/"+filename)
return referImg_list
rawImage = cv2.imread('./img/car2.png')
plt_show0(rawImage)
image = cv2.GaussianBlur(rawImage,(3,3),0)
gray_image = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
Sobel_y = cv2.Sobel(gray_image,cv2.CV_16S,1,0)
absY = cv2.convertScaleAbs(Sobel_y)
image = absY
ret,image = cv2.threshold(image,0,255,cv2.THRESH_OTSU)
kernelX = cv2.getStructuringElement(cv2.MORPH_RECT,(17,5))
image = cv2.morphologyEx(image,cv2.MORPH_CLOSE,kernelX,iterations = 1)
kernelX = cv2.getStructuringElement(cv2.MORPH_RECT,(17,1))
kernelY = cv2.getStructuringElement(cv2.MORPH_RECT,(1,13))
image = cv2.dilate(image,kernelX)
image = cv2.erode(image,kernelX)
image = cv2.erode(image,kernelY)
image = cv2.dilate(image,kernelY)
image = cv2.medianBlur(image,15)
contours,hierarchy = cv2.findContours(image,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
image1= rawImage.copy()
cv2.drawContours(image1,contours,-1,(0,0,255),5)
image2= rawImage.copy()
hsv = cv2.cvtColor(image2, cv2.COLOR_BGR2HSV)
lower_blue = (100, 43, 46)
upper_blue = (124, 255, 255)
lower_green = (35, 43, 46)
upper_green = (77, 255, 255)
mask_blue = cv2.inRange(hsv, lower_blue, upper_blue)
mask_green = cv2.inRange(hsv, lower_green, upper_green)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
mask_blue = cv2.morphologyEx(mask_blue, cv2.MORPH_OPEN, kernel)
mask_green = cv2.morphologyEx(mask_green, cv2.MORPH_OPEN, kernel)
mask = cv2.bitwise_or(mask_blue, mask_green)
filtered = cv2.bitwise_and(image2, image2, mask=mask)
plates = []
for contour in contours:
area = cv2.contourArea(contour)
x, y, w, h = cv2.boundingRect(contour)
ratio = w / h
if ratio < 2 or ratio > 6:
print('长宽不通过')
continue
rect_area = w * h
coverage = area / rect_area
if coverage < 0.5:
print('矩形和轮廓的覆盖率不通过')
continue
mask_area = cv2.countNonZero(mask[y:y+h, x:x+w])
mask_coverage = mask_area / rect_area
if mask_coverage < 0.5:
print('掩膜覆盖率不通过')
continue
plates.append((x, y, w, h))
if len(plates) == 0:
print("No plates found.")
elif len(plates) > 1:
print("Multiple plates found.")
else:
x, y, w, h = plates[0]
image4 = image2.copy()
cv2.rectangle(image2, (x, y), (x+w, y+h), (0, 255, 0), 1)
plt_show0(image2)
image3 = image4[y:y+h,x:x+w]
cv2.imwrite('./car_license/test1.png',image3)
license = cv2.imread('./car_license/test1.png')
licence_GB = cv2.GaussianBlur(license,(1,3),0)
licence_gray = cv2.cvtColor(licence_GB,cv2.COLOR_BGR2GRAY)
ret , img = cv2.threshold(licence_gray,0,255,cv2.THRESH_OTSU)
import numpy as np
mean = np.mean(img)
if mean > 128:
img = 255 - img
kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(2,1))
img_close = cv2.dilate(img,kernel)
number_contours , hierarchy = cv2.findContours(img_close,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
img1 = license.copy()
cv2.drawContours(img1,number_contours,-1,(0,255,0),1)
plt_show(img1)
chars = []
for cnt in number_contours:
x, y, w, h = cv2.boundingRect(cnt)
if w < 5 or h < 10 or w > 50 or h > 50:
continue
ratio = w / h
if ratio < 0.25 or ratio > 0.7:
continue
chars.append((x, y, w, h))
chars = sorted(chars, key=lambda x: x[0])
i = 0
for x, y, w, h in chars:
i = i+1
char = license[y:y+h, x:x+w]
char = cv2.resize(char, (64, 64))
cv2.imwrite('./words/test2_'+str(i)+'.png',char)

templates = ['0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F','G','H','J','K','L','M','N','P','Q','R','S','T','U','V','W','X','Y','Z','京','津','冀','晋','蒙','辽','吉','黑','沪','苏','浙','皖','闽','赣','鲁','豫','鄂','湘','粤','桂','琼','渝','川','贵','云','藏','陕','甘','青','宁','新']
folder_path = './words/'
image_format = '*.png'
images_path = glob.glob(folder_path + image_format)
def method_A(imageA):
c_words = []
for i in range(34, 64):
c_word = read_directory('./refer/' + templates[i])
c_words.append(c_word)
chinese_Gaussian = cv2.GaussianBlur(imageA, (3, 3), 0)
chinese_gray = cv2.cvtColor(chinese_Gaussian, cv2.COLOR_RGB2GRAY)
ret, chinese_threshold = cv2.threshold(chinese_gray, 0, 255, cv2.THRESH_OTSU)
mean = np.mean(chinese_threshold)
if mean > 100:
chinese_threshold = 255 - chinese_threshold
best_score = []
for c_word in c_words:
score = []
for word in c_word:
template_img = cv2.imdecode(np.fromfile(word, dtype=np.uint8), 1)
template_img = cv2.cvtColor(template_img, cv2.COLOR_RGB2GRAY)
ret, template_img = cv2.threshold(template_img, 0, 255, cv2.THRESH_OTSU)
height, width = template_img.shape
image_c = chinese_threshold.copy()
image_c = cv2.resize(image_c, (width, height))
result = cv2.matchTemplate(image_c, template_img, cv2.TM_CCOEFF)
score.append(result[0][0])
best_score.append(max(score))
best = best_score.index(max(best_score))
best_chinese = templates[34 + best]
print(best_chinese)
pass
def method_B(imageA):
E_words = []
for i in range(10, 34):
E_word = read_directory('./refer/' + templates[i])
E_words.append(E_word)
English_Gaussian = cv2.GaussianBlur(imageA, (3, 3), 0)
English_gray = cv2.cvtColor(English_Gaussian, cv2.COLOR_RGB2GRAY)
ret, English_threshold = cv2.threshold(English_gray, 0, 255, cv2.THRESH_OTSU)
mean = np.mean(English_threshold)
if mean > 100:
English_threshold = 255 - English_threshold
best_score = []
for E_word in E_words:
score = []
for word in E_word:
template_img = cv2.imdecode(np.fromfile(word, dtype=np.uint8), 1)
template_img = cv2.cvtColor(template_img, cv2.COLOR_RGB2GRAY)
ret, template_img = cv2.threshold(template_img, 0, 255, cv2.THRESH_OTSU)
height, width = template_img.shape
image_E = English_threshold.copy()
image_E = cv2.resize(image_E, (width, height))
result = cv2.matchTemplate(image_E, template_img, cv2.TM_CCOEFF)
score.append(result[0][0])
best_score.append(max(score))
best = best_score.index(max(best_score))
best_English = templates[10 + best]
print(best_English)
pass
def method_C(imageA):
E_words = []
for i in range(0, 34):
E_word = read_directory('./refer/' + templates[i])
E_words.append(E_word)
English_Gaussian = cv2.GaussianBlur(imageA, (3, 3), 0)
English_gray = cv2.cvtColor(English_Gaussian, cv2.COLOR_RGB2GRAY)
ret, English_threshold = cv2.threshold(English_gray, 0, 255, cv2.THRESH_OTSU)
mean = np.mean(English_threshold)
if mean > 100:
English_threshold = 255 - English_threshold
best_score = []
for E_word in E_words:
score = []
for word in E_word:
template_img = cv2.imdecode(np.fromfile(word, dtype=np.uint8), 1)
template_img = cv2.cvtColor(template_img, cv2.COLOR_RGB2GRAY)
ret, template_img = cv2.threshold(template_img, 0, 255, cv2.THRESH_OTSU)
height, width = template_img.shape
image_E = English_threshold.copy()
image_E = cv2.resize(image_E, (width, height))
result = cv2.matchTemplate(image_E, template_img, cv2.TM_CCOEFF)
score.append(result[0][0])
best_score.append(max(score))
best = best_score.index(max(best_score))
best_English = templates[0 + best]
print(best_English)
pass
count = 0
for image_path in images_path:
imageA = cv2.imread(image_path)
if count == 0:
method_A(imageA)
elif count == 1:
method_B(imageA)
else:
method_C(imageA)
count += 1