FCN的网络结构:
FCN全名叫做全卷机神经网络,它在经典的VGGNet的基础上,把VGG网络最后的全连接层全部去掉,换为卷积层。为了能对图像进行分割,FCN对卷积后的结果进行了反卷积,生成和原图一样的尺寸输出,然后经过softmax就能对每个像素进行分类。具体的网络结果如下:
论文参考《Fully Convolutional Networks for Semantic Segmentation》,代码实现参考:https://github.com/shekkizh/FCN.tensorflow
代码详解:
代码的实现有四个python文件,分别是FCN.py、BatchDatasetReader.py、TensorFlowUtils.py、read_MITSceneParsingData.py。将这四个文件放在一个当前目录 . 下,然后去这里下载VGG网络的权重参数,下载好后的文件路径为./Model_zoo/imagenet-vgg-verydeep-19.mat,然后去这里下载训练会用到的数据集,并解压到路径: ./Data_zoo/MIT_SceneParsing/ADEChallengeData2016。训练时把FCN.py中的全局变量mode该为“train”,运行该文件。测试时改为“visualize”运行即可。
FCN.py为主文件,代码如下:
from __future__ import print_function
import tensorflow as tf
import numpy as np
import TensorflowUtils as utils
import read_MITSceneParsingData as scene_parsing
import datetime
import BatchDatsetReader as dataset
from six.moves import xrange
batch_size=2 # batch 大小
logs_dir="logs/"
data_dir= "Data_zoo/MIT_SceneParsing/" # 存放数据集的路径,需要提前下载
data_name="ADEChallengeData2016"
learning_rate=1e-4 # 学习率
model_path="Model_zoo/imagenet-vgg-verydeep-19.mat" # VGG网络参数文件,需要提前下载
debug= False
mode='train' # 训练模式train | visualize
MODEL_URL = 'http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-19.mat' #训练好的VGGNet参数
MAX_ITERATION = int(1e5 + 1) # 最大迭代次数
NUM_OF_CLASSESS = 151 # 类的个数
IMAGE_SIZE = 224 # 图像尺寸
# 根据载入的权重建立原始的 VGGNet 的网络
def vgg_net(weights, image):
layers = (
'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3','relu3_3', 'conv3_4', 'relu3_4', 'pool3',
'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3','relu4_3', 'conv4_4', 'relu4_4', 'pool4',
'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3','relu5_3', 'conv5_4', 'relu5_4'
)
net = {}
current = image
for i, name in enumerate(layers):
kind = name[:4]
if kind == 'conv':
kernels, bias = weights[i][0][0][0][0]
# matconvnet: weights are [width, height, in_channels, out_channels]
# tensorflow: weights are [height, width, in_channels, out_channels]
kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)), name=name + "_w")
bias = utils.get_variable(bias.reshape(-1), name=name + "_b")
current = utils.conv2d_basic(current, kernels, bias)
print ("当前形状:",np.shape(current))
elif kind == 'relu':
current = tf.nn.relu(current, name=name)
if debug:
utils.add_activation_summary(current)
elif kind == 'pool':
current = utils.avg_pool_2x2(current)
print ("当前形状:",np.shape(current))
net[name] = current
return net
# FCN的网络结构定义,网络中用到的参数是迁移VGG训练好的参数
def inference(image, keep_prob):
"""
Semantic segmentation network definition
:param image: input image. Should have values in range 0-255
:param keep_prob:
:return:
"""
# 加载模型数据
print ("原始图像:",np.shape(image))
model_data = utils.get_model_data(model_path)
mean = model_data['normalization'][0][0][0]
mean_pixel = np.mean(mean, axis=(0, 1))
weights = np.squeeze(model_data['layers'])
# 图像预处理
processed_image = utils.process_image(image, mean_pixel)
print ("预处理后的图像:",np.shape(processed_image))
with tf.variable_scope("inference"):
# 建立原始的VGGNet-19网络
print ("开始建立VGG网络:")
image_net = vgg_net(weights, processed_image)
# 在VGGNet-19之后添加 一个池化层和三个卷积层
conv_final_layer = image_net["conv5_3"]
print ("VGG处理后的图像:",np.shape(conv_final_layer))
pool5 = utils.max_pool_2x2(conv_final_layer)
print ("pool5:",np.shape(pool5))
W6 = utils.weight_variable([7, 7, 512, 4096], name="W6")
b6 = utils.bias_variable([4096], name="b6")
conv6 = utils.conv2d_basic(pool5, W6, b6)
relu6 = tf.nn.relu(conv6, name="relu6")
if debug:
utils.add_activation_summary(relu6)
relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob)
print ("conv6:",np.shape(relu_dropout6))
W7 = utils.weight_variable([1, 1