经典卷积网络——DenseNet代码实现

本文深入解析DenseNet卷积神经网络结构,包括DenseBlock、TransitionLayer及BottleneckLayer,并提供详细的TensorFlow代码实现。通过密集连接策略,DenseNet在保持高效率的同时,有效减少了参数量。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

题目:Densely Connected Convolutional Networks

论文地址:https://arxiv.org/pdf/1608.06993.pdf

常见的卷积网络结构对比:

                                                                        图1. 经典卷积结构对比

DenseNet网络拓扑图:

                                                                       图2. DenseNet网络拓扑图

图2.我们可以知道DenseNet网络主要有几个模型组成:DenseNet Block,Transition Layer,为了降低参数量,DenseNet Block内部采用了bottleneck layer。具体描述中提到的各种模块,下文本人会一一给出图解。

                                                            图3. DenseNet Block块内部采用Dense Connectivity 

                                                         图4. DenseNet Block块内部实现方式

                                                           图5. Transition Layer内部的实现方式                 

                                                              图6. DenseNet网络结构

代码实现(tensorflow版)

import tensorflow as tf

MC_MODE = False #是否要经过bottleneck结构

TF_VERSION = float('.'.join(tf.__version__.split('.')[:2]))


def batch_norm(_input,is_training):
	
	output = tf.contrib.layers.batch_norm(_input, 
										  scale=True, 
										  is_training = is_training,
										  updates_collections=None)

	return output

def dropout(_input,is_training,keep_prob):
	'''进行dropout'''

	if keep_prob < 1:
		output = tf.cond(is_training,lambda: tf.nn.dropout(_input, keep_prob),lambda: _input)
	else:
		output = _input

	return output

def composite_function(_input, is_training,keep_prob,out_features, kernel_size=3):
	''''''

	with tf.variable_scope("composite_function"):
		output = batch_norm(_input,is_training)
		output = tf.nn.relu(output)
		output = conv2d(output, out_features = out_features, kernel_size=kernel_size)
		output = dropout(output,is_training,keep_prob)

	return output

def bottleneck(_input, out_features,is_training,keep_prob):
	'''
	Bottleneck由两个部分组成:[1×1]的卷积组和[3×3]的卷积组,
	其意义在于[1×1]的卷积层能减少输入的特征图,之后再用[3×3]的卷积核进行处理。
	'''
	with tf.variable_scope("bottleneck"):
		output = batch_norm(_input,is_training)
		output = tf.nn.relu(output)
		inter_features = out_features * 4
		output = conv2d(output, out_features=inter_features, kernel_size=1,padding='VALID')
		output = dropout(output,is_training,keep_prob)

	return output

def add_internal_layer(_input, bc_mode,growth_rate,is_training,keep_prob):
	'''Denseblock每一层'''

	if not bc_mode:
		comp_out = composite_function(_input,is_training,keep_prob,out_features=growth_rate, kernel_size=3)

	elif bc_mode:
		bottleneck_out = bottleneck(_input, growth_rate,is_training,keep_prob)
		comp_out = composite_function(bottleneck_out,is_training,keep_prob, out_features=growth_rate, kernel_size=3)

	if TF_VERSION >= 1.0:
		output = tf.concat(axis=3, values=(_input, comp_out))
	else:
		output = tf.concat(3, (_input, comp_out))

	return output

def add_block(_input,bc_mode, growth_rate, layers_per_block,is_training,keep_prob):
	'''对每个dense_block中层的进行添加'''

	output = _input
	for layer in range(layers_per_block):
		with tf.variable_scope("layer_%d" % layer):
			output = add_internal_layer(output,bc_mode, growth_rate,is_training,keep_prob)


	return output

def weight_variable_msra(shape, 
						 name):
	'''对2D卷积核进行初始化'''

	return tf.get_variable(name = name,
						   shape = shape,
						   initializer = tf.contrib.layers.variance_scaling_initializer())


def conv2d(_input,
		   out_features,
		   kernel_size,
		   strides=[1, 1, 1, 1],
		   padding='SAME'):
	'''对2维卷积进行wrap'''

	in_features = int(_input.get_shape()[-1])
	kernel = weight_variable_msra([kernel_size,kernel_size,in_features,out_features],name='kernel')
	output = tf.nn.conv2d(_input,kernel,strides,padding)
	return output

def avg_pool(_input, k):
	'''定义池化层'''

	ksize = [1, k, k, 1]
	strides = [1, k, k, 1]
	padding = 'VALID'
	output = tf.nn.avg_pool(_input, ksize, strides, padding)
	return output

def transition_layer(_input,reduction,is_training,keep_prob):
	'''
	Transition_layer是介于两个Denseblock之间的转换模块,
	每一个Denseblock输出的feature maps都比较多,如果统统都
	输入到下一层,将会极大的增加神经网络的参数,所以transition_layer的主要工作就是降维。
	'''
    # call composite function with 1x1 kernel
	out_features = int(int(_input.get_shape()[-1]) * reduction)
	output = composite_function(_input,is_training,keep_prob, out_features=out_features, kernel_size=1)
	# run average pooling
	output = avg_pool(output, k=2)

	return output

def transition_layer_to_classes(_input,is_training,n_classes):
	'''定义全连接层'''

	output = batch_norm(_input,is_training)
	output = tf.nn.relu(output)
	last_pool_kernel = int(output.get_shape()[-2])
	output = avg_pool(output, k=last_pool_kernel)
	features_total = int(output.get_shape()[-1])
	output = tf.reshape(output, [-1, features_total])
	W = weight_variable_xavier([features_total, n_classes], name='W')
	bias = bias_variable([n_classes])
	logits = tf.matmul(output, W) + bias
	return logits

def weight_variable_xavier(shape, name):
	'''定义全连接层权重初始化'''

	return tf.get_variable(name,
						   shape=shape,
						   initializer=tf.contrib.layers.xavier_initializer())
def bias_variable(shape, name='bias'):
	'''定义偏置初始化'''

	initial = tf.constant(0.0, shape=shape)
	return tf.get_variable(name, initializer=initial)

def loss(logits,labels,weight_decay):
	'''定义损失函数'''

	cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
	l2_loss = tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()])
	total_loss = cross_entropy + l2_loss * weight_decay

	return total_loss

def optimizer(learning_rate,nesterov_momentum,total_loss):
	'''定义优化器'''

	optimizer = tf.train.MomentumOptimizer(learning_rate, nesterov_momentum, use_nesterov=True)
	train_step = optimizer.minimize(total_loss)

	return train_step

def accuracy(prediction,labels):
	'''定义准确度函数'''

	correct_prediction = tf.equal(tf.argmax(prediction, 1),tf.argmax(labels, 1))
	accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

	return accuracy

def DenseNet(x,
			 first_output_features,
			 total_blocks,
			 growth_rate,
			 depth,
			 is_training,
			 keep_prob,
			 reduction,
			 bc_mode,
			 n_classes):
	'''
	x:输出特征图:[BATCH_SIZE,CROP_SIZE,CROP_SIZE,CHANNELS]
	first_output_features:第一个卷积层输出的特征图个数(int)
	total_blocks:dense_blocks 的块数(int)
	growth_rate:每一层特征图的增长率(int)
	depth:网络的深度
	is_training:用来控制BN层/DROPOUT层 是训练还是测试阶段
	keep_prob:控制dropout
	reduction:降维率(0.0~1.0),用于Transition_layer
	n_classes:类别数
	mc_model:should we use bottleneck layers and features reduction or not.
	
	'''
	# 每个DenseBlock中层的个数
	layers_per_block = (depth - (total_blocks + 1)) // total_blocks

	with tf.variable_scope("Initial_convolution"):
		output = conv2d(x,out_features=first_output_features,kernel_size=3)

	for block in range(total_blocks):
		with tf.variable_scope("Block_%d" % block):
			output =  add_block(output,bc_mode, growth_rate, 
                               layers_per_block,is_training,keep_prob)

		if block != total_blocks - 1:
			with tf.variable_scope("Transition_after_block_%d" % block):
				output = transition_layer(output,reduction,is_training,keep_prob)


	with tf.variable_scope("Transition_to_classes"):
	 	logits = transition_layer_to_classes(output,is_training,n_classes)

	prediction = tf.nn.softmax(logits)

	return {"logits":logits,"prediction":prediction}

上面只是网络结构代码,具体损失函数,评估标准可以自己定义,只要在您的代码中调用上面代码中的Densenet()函数就好了!

评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值