打造微信小程序之人脸属性(ubuntu16.04+tensorflow)

本文介绍在Ubuntu 16.04上利用Tensorflow进行深度学习,完成从数据打包、模型训练、测试到转换为pb文件,再通过Flask封装模型,最终在微信小程序中实现人脸识别功能的过程。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

最近学习小程序开发,涉及到了下列内容:

1.数据打包

##creat_data.py
##实现数据的打包
import cv2
import tensorflow as tf
##dlib  实现抠图
import dlib

##读取标志信息索引值,不同的索引值对应不同的属性
anno_file = "/home/lsy/CelebA/Anno/list_attr_celeba.txt"
ff = open(anno_file)

anno_info = ff.readlines()

attribute_class = anno_info[1].split(" ")

print(attribute_class)
##Eyeglasses,Male,Young,Smiling
idx = 0
##打印索引值
for i in attribute_class:
  if i =="Eyeglasses":
      print("Eyeglasses",idx)
  elif i=="Male":
      print("Male",idx)
  elif i =="Young":
      print("Young",idx)
  elif i=="Smiling":
      print("Smiling",idx)

  idx += 1


##打包数据
writer_train = tf.python_io.TFRecordWriter("train.tfrecords")
writer_test = tf.python_io.TFRecordWriter("test.tfrecords")
##人脸检测器
detector = dlib.get_frontal_face_detector()
##分析
for idx in range(2,anno_info.__len__()):
    info = anno_info[idx]
##有些数据有两个空格,需要替换成一个
    attr_val = info.replace("  "," ").split(" ")

    #print(attr_val.__len__())
##属性需要加1
    print(attr_val[0])
    print(attr_val[16])
    print(attr_val[21])
    print(attr_val[32])
    print(attr_val[40])
##下载的数据集地址
    im_data = cv2.imread("/home/lsy/CelebA/Img/img_celeba.7z/img_celeba/" + attr_val[0])

    rects = detector(im_data,0)
##如果没检测到人脸
    if len(rects) == 0:
        continue

    x1 = rects[0].left()
    y1 = rects[0].top()
    x2 = rects[0].right()
    y2 = rects[0].bottom()
##需要将检测到的人脸扣下来
    y1 = int(max(y1 - 0.3 * (y2 - y1),0))


##可视化
    #cv2.rectangle(im_data,(x1,y1),(x2,y2),(255,0,0),2)
    #cv2.imshow("11",im_data)
    #cv2.waitKey(0)


##数据清洗

    if y2 - y1 < 50 or x2 - x1 < 50 or x1 < 0 or y1 < 0:
        continue

    im_data = im_data[y1:y2,x1:x2]
    im_data = cv2.resize(im_data,(128,128))

    ex = tf.train.Example(
        features = tf.train.Features(
            feature = {
                "image":tf.train.Feature(
                    bytes_list = tf.train.BytesList(value=[im_data.tobytes()])
                ),
                "Eyeglasses":tf.train.Feature(
                    int64_list = tf.train.Int64List(
                        value = [int(attr_val[16])]
                    )
                ),
                "Male": tf.train.Feature(
                    int64_list=tf.train.Int64List(
                        value=[int(attr_val[21])]
                    )
                ),
                "Young": tf.train.Feature(
                    int64_list=tf.train.Int64List(
                         value=[int(attr_val[32])]
                    )
                ),
                "Smiling": tf.train.Feature(
                    int64_list=tf.train.Int64List(
                        value=[int(attr_val[40])]
                    )
                )
            }
        )
    )
##序列化,生成训练集与测试集
    if idx > anno_info.__len__() * 0.95:
        writer_test.write(ex.SerializeToString())
    else:
        writer_train.write(ex.SerializeToString())

2.模型训练

##搭建模型
##train.py
import tensorflow as tf
from tensorflow.contrib.layers import *
from tensorflow.contrib.slim.python.slim.nets.inception_v3 import inception_v3_base
slim = tf.contrib.slim
##nets下有很多网络结构,可以自己换
def inception_v3 (images,drop_out = 0.5, is_training = True):
    batch_norm_params ={
        "is_training":is_training,
        "trainable":True,
        "decay":0.9997,
        "epsilon":0.00001,
        "variables_collections":{
            "beta":None,
            "gamma":None,
            "moving_mean":["moving_vars"],
            "moving_variance":["moving_var"]
        }
    }
    weights_regularizer = tf.contrib.layers.l2_regularizer(0.00004)
    with tf.contrib.slim.arg_scope(
        [tf.contrib.slim.conv2d,tf.contrib.slim.fully_connected],
        weights_regularizer = weights_regularizer,
        trainable = True):
        with tf.contrib.slim.arg_scope(
            [tf.contrib.slim.conv2d],
            weights_regularizer = tf.truncated_normal_initializer(stddev=0.1),
            activation_fn = tf.nn.relu,
            normalizer_fn = batch_norm,
            normalizer_params = batch_norm_params):
            ##传入主干网络
            nets,endpoints = inception_v3_base(images)
            print(nets)
            print(endpoints)
            net = tf.reduce_mean(nets,axis=[1,2])##nhwc
            net = tf.nn.dropout(net,drop_out,name = "droplast")
            net = flatten(net,scope="flatten")
##全连接层
    net_eyeglass = slim.fully_connected(net,2,activation_fn = None)
    net_young = slim.fully_connected(net, 2, activation_fn = None)
    net_male = slim.fully_connected(net, 2, activation_fn = None)
    net_smiling = slim.fully_connected(net, 2, activation_fn = None)

    return net_eyeglass,net_young,net_male,net_smiling
##喂数据
##输入input_x
input_x = tf.placeholder(tf.float32, shape=[None, 128, 128, 3])
label_eyeglasses = tf.placeholder(tf.int64, shape=[None, 1])
label_young = tf.placeholder(tf.int64, shape=[None, 1])
label_male = tf.placeholder(tf.int64, shape=[None, 1])
label_smiling = tf.placeholder(tf.int64, shape=[None, 1])

logits_eyeglasses, logits_young, logits_male, logits_smiling = inception_v3(input_x,0.5,True)

loss_eyeglasses = tf.losses.sparse_softmax_cross_entropy(labels= label_eyeglasses,
                                                         logits = logits_eyeglasses)
loss_young = tf.losses.sparse_softmax_cross_entropy(labels= label_young,
                                                    logits = logits_young)
loss_male = tf.losses.sparse_softmax_cross_entropy(labels= label_male,
                                                   logits = logits_male)
loss_smiling = tf.losses.sparse_softmax_cross_entropy(labels= label_smiling,
                                                      logits = logits_smiling)

logits_eyeglasses = tf.nn.softmax(logits_eyeglasses)
logits_young = tf.nn.softmax(logits_young)
logits_male = tf.nn.softmax(logits_male)
logits_smiling = tf.nn.softmax(logits_smiling)

loss = loss_eyeglasses + loss_young + loss_male +loss_smiling
##定义正则化
reg_set =tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
l2_loss = tf.add_n(reg_set)
##learn
global_step = tf.Variable(0,trainable=True)
lr = tf.train.exponential_decay(0.0001, global_step, decay_steps=1000,
                                                   decay_rate=0.98,
                                                   staircase=False)

updata_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(updata_ops):
    train_op = tf.train.AdamOptimizer(lr).minimize(loss + l2_loss , global_step)


##喂数据
def get_one_batch(batch_size,type):
    if type == 0:
        file_list = tf.gfile.Glob("train.tfrecords")
    else:
        file_list = tf.gfile.Glob("test.tfrecords")
    reader = tf.TFRecordReader()
##等loss收敛
    file_queue = tf.train.string_input_producer(
        file_list, num_epochs=None, 
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

L烧鱼

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值