1 Star 0 Fork 0

Check/char74-lenet-check

Create your Gitee Account
Explore and code with more than 12 million developers,Free private repositories !:)
Sign up
This repository doesn't specify license. Please pay attention to the specific project description and its upstream code dependency when using it.
Clone or Download
forward.py 4.57 KB
Copy Edit Raw Blame History
Check authored 2020-06-30 01:05 . feat: 添加注释
'''
@Author: your name
@Date: 2020-06-27 22:37:16
@LastEditTime: 2020-06-30 00:13:39
@LastEditors: Please set LastEditors
@Description: In User Settings Edit
@FilePath: \char74-lenet-check\forward.py
'''
import tensorflow as tf
INPUT_NODE = 784 # 全连接层输入节点数
IMAGE_SIZE = 28 # 图片分辨率为28*28
NUM_CHANNELS = 1 # 数据集为灰度图,故图片通道数取值1
CONV1_SIZE = 5 # 第一层卷积核大小为5
CONV1_KERNEL_NUM = 32 # 第一层卷积核个数32
CONV2_SIZE = 5 # 第二层卷积核大小为5
CONV2_KERNEL_NUM = 64 # 第二层卷积核个数64
FC_SIZE = 512 # 全连接层第一层为512个神经元
OUTPUT_NODE = 26 # 全连接第二层有26个神经元,实现26分类输出
# 获取权重
def get_weight(shape, regularizer):
w = tf.Variable(tf.truncated_normal(shape,stddev=0.1)) # 根据shape正态分布随机生成相应权重w
# regularization 正则化参数 为1时 在反向传播过程中优化模型参数时,需要在损失函数中加入正则化项
if regularizer != None: tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(regularizer)(w))
return w
# 获取偏置b
def get_bias(shape):
b = tf.Variable(tf.zeros(shape)) # 偏置b取全零
return b
# 生成卷积层
def conv2d(x,w):
# x 输入描述[batch,行分辨率,列分辨率,通道数]
# w 卷积核描述[行分辨率,列分辨率,通道数,卷积核个数]
# strides 核滑动步长[1,行步长,列步长,1]
# padding 填充模式 全零填充
return tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME')
# 生成最大池化层
def max_pool_2x2(x):
# x 输入描述[batch,行分辨率,列分辨率,通道数]
# ksize 池化核描述[1,行分辨率,列分辨率,1]
# strides 池化核滑动步长[1,行步长,列步长,1]
# padding 填充模式 全零填充
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# 前向传播
def forward(x, train, regularizer):
# 实现第一层卷积
# 获取权重
conv1_w = get_weight([CONV1_SIZE, CONV1_SIZE, NUM_CHANNELS, CONV1_KERNEL_NUM], regularizer)
# 获取偏置b
conv1_b = get_bias([CONV1_KERNEL_NUM])
# 进行卷积运算
conv1 = conv2d(x, conv1_w)
# 第一层卷积的输出值作为非线性激活函数的输入值,先通过tf.nn.bias_add()对卷积后的输出添加偏置,并通过tf.nn.relu()完成非线性激活
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_b))
# 将第一层激活后的输出值进行最大池化
pool1 = max_pool_2x2(relu1)
# 实现第二层卷积
# 获取权重
conv2_w = get_weight([CONV2_SIZE, CONV2_SIZE, CONV1_KERNEL_NUM, CONV2_KERNEL_NUM],regularizer)
# 获取偏置b
conv2_b = get_bias([CONV2_KERNEL_NUM])
# 进行卷积运算
conv2 = conv2d(pool1, conv2_w)
# 第二层卷积的输出值作为非线性激活函数的输入值,先通过tf.nn.bias_add()对卷积后的输出添加偏置,并通过tf.nn.relu()完成非线性激活
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_b))
# 将第二层激活后的输出值进行最大池化
pool2 = max_pool_2x2(relu2)
# 将第二层池化层的输出pool2矩阵转化为全连接层的输入格式:向量形式
# 通过get_shape()函数得到pool2输出的矩阵的维度,存入list中
pool_shape = pool2.get_shape().as_list()
# 从list中依次取出矩阵的长宽和深度,求三者的乘积,得到矩阵被拉长后的长度
nodes = pool_shape[1] * pool_shape[2] * pool_shape[3]
# 将pool2转换为向量
reshaped = tf.reshape(pool2, [pool_shape[0], nodes])
# 实现第三层全连接层
# 初始化全连接层的权重,并加入正则化
fc1_w = get_weight([nodes, FC_SIZE], regularizer)
# 初始化全连接层的偏置项
fc1_b = get_bias([FC_SIZE])
# 将转换后的reshaped向量与权重fc1_w做矩阵乘法运算,然后再加上偏置,最后再使用relu激活函数激活
fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_w) + fc1_b)
# 如果是训练阶段,则对该层输出使用dropout,随机的将该层输出中的一半神经元置为无效,避免过拟合
if train: fc1 = tf.nn.dropout(fc1, 0.5)
# 实现第四层全连接层
# 初始化权重
fc2_w = get_weight([FC_SIZE, OUTPUT_NODE], regularizer)
# 初始化偏置项
fc2_b = get_bias([OUTPUT_NODE])
# 将转换后的reshaped向量与权重fc2_w做矩阵乘法运算,然后再加上偏置
y = tf.matmul(fc1, fc2_w) + fc2_b
# 返回y,完成前向传播过程
return y
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
1
https://gitee.com/lovemitu/char74-lenet-check.git
git@gitee.com:lovemitu/char74-lenet-check.git
lovemitu
char74-lenet-check
char74-lenet-check
master

Search