代码拉取完成,页面将自动刷新
同步操作将从 cold/基于深度学习的车牌识别系统 强制同步,此操作会覆盖自 Fork 仓库以来所做的任何修改,且无法恢复!!!
确定后同步将在后台操作,完成时将刷新页面,请耐心等待。
from tensorflow.keras import layers, losses, models
import numpy as np
import cv2
import os
'''
def cnn_train():
char_dict = {"京": 0, "沪": 1, "津": 2, "渝": 3, "冀": 4, "晋": 5, "蒙": 6, "辽": 7, "吉": 8, "黑": 9, "苏": 10,
"浙": 11, "皖": 12, "闽": 13, "赣": 14, "鲁": 15, "豫": 16, "鄂": 17, "湘": 18, "粤": 19, "桂": 20,
"琼": 21, "川": 22, "贵": 23, "云": 24, "藏": 25, "陕": 26, "甘": 27, "青": 28, "宁": 29, "新": 30,
"0": 31, "1": 32, "2": 33, "3": 34, "4": 35, "5": 36, "6": 37, "7": 38, "8": 39, "9": 40,
"A": 41, "B": 42, "C": 43, "D": 44, "E": 45, "F": 46, "G": 47, "H": 48, "J": 49, "K": 50,
"L": 51, "M": 52, "N": 53, "P": 54, "Q": 55, "R": 56, "S": 57, "T": 58, "U": 59, "V": 60,
"W": 61, "X": 62, "Y": 63, "Z": 64}
path = 'home/cnn_datasets/'
pic_name = sorted(os.listdir(path))
n = len(pic_name)
X_train, y_train = [], []
for i in range(n):
print("正在读取第%d张图片" % i)
img = cv2.imdecode(np.fromfile(path + pic_name[i], dtype=np.uint8), -1)
label = [char_dict[name] for name in pic_name[i][0:7]]
X_train.append(img)
y_train.append(label)
X_train = np.array(X_train)
y_train = [np.array(y_train)[:, i] for i in range(7)]
Input = layers.Input((80, 240, 3))
x = Input
x = layers.Conv2D(filters=16, kernel_size=(3, 3), strides=1, padding='same', activation='relu')(x)
x = layers.MaxPool2D(pool_size=(2, 2), padding='same', strides=2)(x)
for i in range(3):
x = layers.Conv2D(filters=32 * 2 ** i, kernel_size=(3, 3), padding='valid', activation='relu')(x)
x = layers.Conv2D(filters=32 * 2 ** i, kernel_size=(3, 3), padding='valid', activation='relu')(x)
x = layers.MaxPool2D(pool_size=(2, 2), padding='same', strides=2)(x)
x = layers.Dropout(0.5)(x)
x = layers.Flatten()(x)
x = layers.Dropout(0.3)(x)
Output = [layers.Dense(65, activation='softmax', name='c%d' % (i + 1))(x) for i in range(7)]
model = models.Model(inputs=Input, outputs=Output)
model.summary()
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
print("开始训练cnn")
model.fit(X_train, y_train, epochs=35) # 总loss为7个loss的和
model.save('cnn.h5')
print('cnn.h5保存成功!!!')
'''
def cnn_predict(cnn, Lic_img):
characters = ["京", "沪", "津", "渝", "冀", "晋", "蒙", "辽", "吉", "黑", "苏", "浙", "皖", "闽", "赣", "鲁", "豫",
"鄂", "湘", "粤", "桂", "琼", "川", "贵", "云", "藏", "陕", "甘", "青", "宁", "新", "0", "1", "2",
"3", "4", "5", "6", "7", "8", "9", "A", "B", "C", "D", "E", "F", "G", "H", "J", "K", "L", "M",
"N", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"]
Lic_pred = []
for lic in Lic_img:
lic_pred = cnn.predict(lic.reshape(1, 80, 240, 3))
lic_pred = np.array(lic_pred).reshape(7, 65)
if len(lic_pred[lic_pred >= 0.8]) >= 4:
chars = ''
for arg in np.argmax(lic_pred, axis=1): # 取每行中概率值最大的arg,将其转为字符
chars += characters[arg]
chars = chars[0:2] + '·' + chars[2:]
Lic_pred.append((lic, chars)) # 将车牌和识别结果一并存入Lic_pred
return Lic_pred
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。