# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import logging
import pickle
import sys
from time import localtime, strftime
from net_input_everything_featparts4 import *
from ops import *
from utils import *
relu = tf.nn.relu
import tensorflow as tf
import os
import heapq
from sklearn.decomposition import PCA
import Test2 as T
import cell as C
import os.path
import time
import numpy as np
import shutil
import face_recognition
import os
# X = np.array([[-1,2,66,-1], [-2,6,58,-1], [-3,8,45,-2], [1,9,36,1], [2,10,62,1], [3,5,83,2]]) #导入数据,维度为4
# pca = PCA(n_components=2) #降到2维
# pca.fit(X) #训练
# newX=pca.fit_transform(X) #降维后的数据
# 设置 GPU 按需增长
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
# sess = tf.Session(config=config)
os.system('echo $PATH')
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1" #指定前二块GPU可用
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # doesn’t enable AVX/FMA
#These parameters should provide a good initialization, but if for specific refinement, you can adjust them during training.
# SYM_W = 0.3 #lamana1
ALPHA_ADVER = 1 #lamana2
BELTA_FEATURE = 1e-3 ##lamana3
# FEATURE = 1e-3
TV_WEIGHT = 1e-3 #lamana4
# IDEN_W = 1e-3 #a er fa
UPDATE_G = 1 #optimize D once and UPDATE_G times G
UPDATE_D = 1
MODE = 'fs60' #'f' feature loss enabled. 'v' -verification enanbled. 'o' original, 'm' masked is mandatory and no need to specify
LOAD_60_LABEL = False #otherwise load frontal label
WITHOUT_CODEMAP = True
USE_MASK = False
RANDOM_VERIFY = False
CHANNEL = 3
###########################################################
############################################################
flags = tf.app.flags
flags.DEFINE_integer("epoch", 300, "Epoch to train [250]")
flags.DEFINE_float("learning_rate", 2e-4, "Learning rate of for adam [0.0002]")
flags.DEFINE_float("beta1", 0.5, "Momentum term of adam [0.5]")
#flags.DEFINE_integer("train_size", np.inf, "The size of train images [np.inf]")
flags.DEFINE_float("train_size", np.inf, "The size of train images [np.inf]")
flags.DEFINE_integer("batch_size", 1, "The size of batch images [64]")
flags.DEFINE_integer("image_size", 128, "The size of image to use (will be center cropped) [108]")
flags.DEFINE_integer("output_size", 128, "The size of the output images to produce [64]")
flags.DEFINE_integer("c_dim", 3, "Dimension of image color. [3]")
flags.DEFINE_string("dataset", "MultiPIE", "The name of dataset [celebA, mnist, lsun]")
flags.DEFINE_string("checkpoint_dir", "data1/check_3_17", "Directory name to save the checkpoints [checkpoint]")
flags.DEFINE_string("sample_dir", "data1/samples", "Directory name to save the image samples [samples]")
flags.DEFINE_boolean("is_train", True, "True for training, False for testing [False]")
flags.DEFINE_boolean("is_crop", True, "True for training, False for testing [False]")
flags.DEFINE_boolean("visualize", True, "True for visualizing, False for nothing [False]")
FLAGS = flags.FLAGS
#tf.subtract(x, y, name=None) # 减法
class DCGAN(object):
def __init__(self, sess,batch_size=10,output_size=128, gf_dim=48, df_dim=48,
dataset_name='MultiPIE',checkpoint_dir=None):
"""
Args:
sess: TensorFlow session
batch_size: The size of batch. Should be specified before training.
output_size: (optional) The resolution in pixels of the images. [64]
y_dim: (optional) Dimension of dim for y. [None]
z_dim: (optional) Dimension of dim for Z. [100]
gf_dim: (optional) Dimension of gen filters in first conv layer. [64]
df_dim: (optional) Dimension of discrim filters in first conv layer. [64]
gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024]
dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024]
c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3]
"""
self.flage = False
self.testing = False # False True
self.sess = sess
self.batch_size = 2
self.time_size = 6
self.lstm = True
self.Positions = {15: 0, 30: 1, 45: 2, 60: 3, 75: 4, 90: 5}
self.test_batch_size = self.batch_size ## 5
self.output_size = output_size ## 128
self.gf_dim = gf_dim #眼睛使用的内核深度:64 ##生成器中第一个卷积层的filter维度
self.df_dim = df_dim #???64 ##鉴别器中第一个卷积层的filter维度
self.z_dim = 100
random.seed()
self.DeepFacePath = '/media/gpu/文档/lsf/TP-GAN/DeepFace168.pickle'
self.dataset_name = dataset_name #MultiPIE
self.checkpoint_dir = checkpoint_dir #None
self.loadDeepFace(self.DeepFacePath)
# self.G = np.empty([2,128,128,3])
# self.labels = np.empty([2, 128, 128, 3])
def build_model(self):
self.images_with_code2 = tf.placeholder(tf.float32, [self.batch_size] + [self.time_size] + [self.output_size, self.output_size, CHANNEL], name='images_with_code')
self.labels = tf.placeholder(tf.float32, [self.batch_size] + [self.output_size, self.output_size, CHANNEL], name='label_images')
self.g32_labels = tf.image.resize_bilinear(self.labels, [32, 32]) #双线性插值法 float32 (10,32,32,3)
self.g64_labels = tf.image.resize_bilinear(self.labels, [64, 64]) #双线性插值法 float32 (10,64,64,3)
# self.z = tf.random_normal([self.batch_size, 128,128,2], mean=0.0, stddev=0.02, seed=2017) # (10,100)正太分布
self.z = tf.random_normal([self.batch_size, 128, 128, 1], mean=0.0, stddev=0.02, seed=2017) # (10,100)正太分布
self.feats_lstms = []
self.feats2D = []
self.feats3D = []
self.img128 = []
self.g_loss = self.d_loss = 0
for i in range(self.time_size):
self.featsfus = []
if i == 0:
reuse = False
else:
reuse = True
self.feats = self.generator(self.images_with_code2[:,i,...], name="encoder_2D",reuse=reuse)
self.feats = list(self.feats)
self.feats_lstm = []
for j in range(len(self.feats)):
feats_0 = tf.reshape(self.feats[j],[self.feats[j].shape[0], 1, self.feats[j].shape[1], self.feats[j].shape[2],self.feats[j].shape[3]])
if i == 0:
self.feats_lstm.append(feats_0)
if i > 0:
feats_1 = tf.concat([feats_0, self.feats_lstms[i - 1][j]], 1)
self.feats_lstm.append(feats_1)
self.feats_lstms.append(self.feats_lstm)
if i == 0:
self.fts = self.feats
continue
if i == 1:
reuse = False
# a = tf.image.rgb_to_grayscale(self.images_with_code2[:, i - 1, ...])
# b = tf.image.rgb_to_grayscale(self.images_with_code2[:, i, ...])
# c = tf.reduce_mean(tf.concat([self.images_with_code2[:, i - 1, ...], self.images_with_code2[:, i, ...]], -1), -1,keepdims=True)
# self.G = tf.concat([a, c, b], -1)
self.G = self.images_with_code2[:, i-1, ...]
else:
reuse = True
h_state_0 = self.network2(self.feats_lstms[-1][0], "network2_h0",reuse=reuse)
h_state_1 = self.network2(self.feats_lstms[-1][1], "network2_h1",reuse=reuse)
h_state_2 = self.network2(self.feats_lstms[-1][2], "network2_h2",reuse=reuse)
h_state_3 = self.network2(self.feats_lstms[-1][3], "network2_h3",reuse=reuse) # (6,16,16,256)
self.feats_2D = h_state_0, h_state_1, h_state_2, h_state_3# self.h_state_5
self.feats2D.append(self.feats_2D)
self.fts_feats = []
for m in ran