3 Star 0 Fork 0

银梓萌/emotion-detection-keras-master

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
visualize cnn processing data 3.50 KB
一键复制 编辑 原始数据 按行查看 历史
银梓萌 提交于 2022-03-30 14:34 . update visualize cnn processing data.
# demo image
from keras.preprocessing import image
# load and convert the img to grey scale
img = image.load_img('./data/demo_happy.jpg', target_size=(48, 48))
img_tensor = image.img_to_array(img)
img_tensor = img_tensor[:48,:48,:1].reshape(1, 48, 48, 1)
# img_tensor = np.expand_dims(img_tensor, axis=0)
img_tensor /= 255.
print(img_tensor.shape)
plt.imshow(img_tensor[0].reshape(48,48), cmap='gray')
layer_output = [layer.output for layer in model.layers[:20]]
activation_model = models.Model(inputs=model.input, outputs=layer_output)
activations = activation_model.predict(img_tensor)
# display the picture and the predicted result
plt.imshow(img_tensor.reshape(48, 48), cmap='gray')
plt.title('happy')
predict_result = np.argmax(model.predict(img_tensor))
print('The model predict the picture to be {}'.format(emotion_label[predict_result]))
# demo image
from keras.preprocessing import image
img = image.load_img('./data/demo_fear.jpg', target_size=(48, 48))
img_tensor = image.img_to_array(img)
img_tensor = img_tensor[:48,:48,:1].reshape(1, 48, 48, 1)
# img_tensor = np.expand_dims(img_tensor, axis=0)
img_tensor /= 255.
print(img_tensor.shape)
plt.imshow(img_tensor[0].reshape(48, 48), cmap='gray')
layer_output = [layer.output for layer in model.layers[:20]]
activation_model = models.Model(inputs=model.input, outputs=layer_output)
activations = activation_model.predict(img_tensor)
# display the picture and the predicted result
plt.imshow(img_tensor.reshape(48, 48), cmap='gray')
plt.title('fear')
predict_result = np.argmax(model.predict(img_tensor))
print('The model predict the picture to be {}'.format(emotion_label[predict_result]))
chosen_picture_index = 3
layer_output = [layer.output for layer in model.layers[:23]]
activation_model = models.Model(inputs=model.input, outputs=layer_output)
activations = activation_model.predict(x_test[chosen_picture_index].reshape(1, 48, 48, 1))
# display the picture and the predicted result
plt.imshow(x_test[chosen_picture_index].reshape(48, 48), cmap='gray')
plt.title(emotion_label[int(np.argwhere(y_test[chosen_picture_index]))])
predict_result = np.argmax(model.predict(x_test[chosen_picture_index].reshape(1, 48, 48, 1)))
print('The model predict the picture to be {}'.format(emotion_label[predict_result]))
# First Conv2d
first_layer_activation = activations[0]
# print(first_layer_activation.shape)
# (1, 48, 48, 32)
fig, axes = plt.subplots(3, 8, figsize=(9, 6), subplot_kw={'xticks': [], 'yticks': []})
fig.subplots_adjust(left=0.03, right=0.97, hspace=0.3, wspace=0.05)
# print(len(axes.flat))
# 24
for index in range(len(axes.flat)):
axes.flat[index].imshow(first_layer_activation[0,:,:, index], cmap='gray')
# 显示第一层激活的第1到24个通道可视化
# second Conv2d
second_layer_activation = activations[7]
fig, axes = plt.subplots(3, 8, figsize=(9, 6), subplot_kw={'xticks': [], 'yticks': []})
fig.subplots_adjust(left=0.03, right=0.97, hspace=0.3, wspace=0.05)
# print(axes.flat)
for index in range(len(axes.flat)):
axes.flat[index].imshow(second_layer_activation[0,:,:, index], cmap='gray')
# third Conv2d
third_layer_activation = activations[14]
fig, axes = plt.subplots(3, 8, figsize=(9, 6), subplot_kw={'xticks': [], 'yticks': []})
fig.subplots_adjust(left=0.03, right=0.97, hspace=0.3, wspace=0.05)
# print(axes.flat)
for index in range(len(axes.flat)):
axes.flat[index].imshow(third_layer_activation[0,:,:, index], cmap='gray')
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
1
https://gitee.com/yinzimeng11/emotion-detection-keras-master.git
git@gitee.com:yinzimeng11/emotion-detection-keras-master.git
yinzimeng11
emotion-detection-keras-master
emotion-detection-keras-master
master

搜索帮助

0d507c66 1850385 C8b1a773 1850385