In [1]:
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
 
# mnist data 불러옴
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("./mnist/data/", one_hot=True)
WARNING:tensorflow:From <ipython-input-1-9be9699d9187>:7: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use alternatives such as official/mnist/dataset.py from tensorflow/models.
WARNING:tensorflow:From /home/lab1_lsh/anaconda3/envs/py36/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.
Instructions for updating:
Please write your own downloading logic.
WARNING:tensorflow:From /home/lab1_lsh/anaconda3/envs/py36/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.data to implement this functionality.
Extracting ./mnist/data/train-images-idx3-ubyte.gz
WARNING:tensorflow:From /home/lab1_lsh/anaconda3/envs/py36/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.data to implement this functionality.
Extracting ./mnist/data/train-labels-idx1-ubyte.gz
WARNING:tensorflow:From /home/lab1_lsh/anaconda3/envs/py36/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.one_hot on tensors.
Extracting ./mnist/data/t10k-images-idx3-ubyte.gz
Extracting ./mnist/data/t10k-labels-idx1-ubyte.gz
WARNING:tensorflow:From /home/lab1_lsh/anaconda3/envs/py36/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use alternatives such as official/mnist/dataset.py from tensorflow/models.
In [2]:
# epoch, batch size, noise 로 쓸 상수 변수 선언
total_epoch = 100
batch_size = 100
n_noise = 100
 
# 전역 변수로 쓸 변수 0으로 초기화
D_global_step = tf.Variable(0, trainable=False, name='D_global_step')
G_global_step = tf.Variable(0, trainable=False, name='G_global_step')
 
# 이미지를 담는 변수 X와 노이즈 변수 Z
X = tf.placeholder(tf.float32, [None, 28, 28, 1])
Z = tf.placeholder(tf.float32, [None, n_noise])

is_training = tf.placeholder(tf.bool)
In [3]:
# Leaky ReLU : ReLU가 음수영역에서 0을 출력하던 것과 달리, 약간의 기울기를 갖는 값을 출력한다.
def leaky_relu(x, leak=0.2):
    return tf.maximum(x, x * leak)

In [4]:
# 생성기(Generator) 모델 구성
# DCGAN의 큰 특징 convlution layer와 batch normalization, 마지막은 tanh로 output이 나온다
def generator(noise):
    with tf.variable_scope('generator'):
        output = tf.layers.dense(noise, 128*7*7)
        output = tf.reshape(output, [-1, 7, 7, 128])
        output = tf.nn.relu(tf.layers.batch_normalization(output, training=is_training))
        output = tf.layers.conv2d_transpose(output, 64, [5, 5], strides=(2, 2), padding='SAME')
        output = tf.nn.relu(tf.layers.batch_normalization(output, training=is_training))
        output = tf.layers.conv2d_transpose(output, 32, [5, 5], strides=(2, 2), padding='SAME')
        output = tf.nn.relu(tf.layers.batch_normalization(output, training=is_training))
        output = tf.layers.conv2d_transpose(output, 1, [5, 5], strides=(1, 1), padding='SAME')
        output = tf.tanh(output)
    return output

In [5]:
# 판별기(Discriminator) 모델 구성
# leaky_relu, convolution layer를 사용. 마지막은 flatten 해준다. (그림에서는 sigmoid이지만 해당 코드에서는 그냥 flatten 해줌)
def discriminator(inputs, reuse=None):
    with tf.variable_scope('discriminator') as scope:
        
        # scope.reuse_variables()이후에는 같은 scope내에서 같은 name을 갖는 변수를 새로 만들지 않고 재사용 한다.
        if reuse:
            scope.reuse_variables()
            
        output = tf.layers.conv2d(inputs, 32, [5, 5], strides=(2, 2), padding='SAME')
        output = leaky_relu(output)
        output = tf.layers.conv2d(output, 64, [5, 5], strides=(2, 2), padding='SAME')
        output = leaky_relu(tf.layers.batch_normalization(output, training=is_training))
        output = tf.layers.conv2d(output, 128, [5, 5], strides=(2, 2), padding='SAME')
        output = leaky_relu(tf.layers.batch_normalization(output, training=is_training))
        flat = tf.contrib.layers.flatten(output)
        output = tf.layers.dense(flat, 1, activation=None)
    return output
In [6]:
# 노이즈 생성
def get_noise(batch_size, n_noise):
    return np.random.uniform(-1.0, 1.0, size=[batch_size, n_noise])
In [7]:
# interpolation 되는 과정을 보기위해 노이즈를 두개 생성하는 부분
def get_moving_noise(batch_size, n_noise):
    assert batch_size > 0
 
    # 노이즈 두개 생성
    noise_list = []
    base_noise = np.random.uniform(-1.0, 1.0, size=[n_noise])
    end_noise = np.random.uniform(-1.0, 1.0, size=[n_noise])
 
    # 임의의 step 설정
    step = (end_noise - base_noise) / batch_size
    noise = np.copy(base_noise)
    for _ in range(batch_size - 1):
        noise_list.append(noise)
        noise = noise + step
    noise_list.append(end_noise)   # 변해가는 모습을 볼 수 있게 base 부터 end 까지 노이즈 리스트 만듦
    
    return noise_list
In [8]:
G = generator(Z)
D_real = discriminator(X)
D_gene = discriminator(G, True)

# loss 함수 cross entropy 사용
loss_D_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_real, labels=tf.ones_like(D_real)))
loss_D_gene = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_gene, labels=tf.zeros_like(D_gene)))
loss_D = loss_D_real + loss_D_gene
loss_G = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_gene, labels=tf.ones_like(D_gene)))

교차 엔트로피 (Cross Entropy)

교차 엔트로피는 주로 범주형 데이터에 사용되는 손실함수이며, 다음과 같다.

loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=y_true, logits_y_pred) loss = tf.reduce_mean(loss)

교차 엔트로피는 두 분포 사이의 유사성을 측정하는 척도이다. 딥러닝에서의 분류 모델은 각 클래스의 확률값을 계산하므로 실제 클래스(y_true)와 모델이 예측한 클래스(y_pred)를 비교할 수 있으며, 두 분포가 가까울수록 교차 엔트로피값은 더 작아진다.

출처: https://excelsior-cjh.tistory.com/151

In [9]:
# tensorflow의 namefield를 이용해 변수를 재활용하기 위해 해당 collection의 variable들을 불러온다. 
# tf.get_collection(key)가 실행되면, key의 collection에 속하는 variable들의 리스트가 리턴된다.
# 출처: https://eyeofneedle.tistory.com/24 [Technology worth spreading]

vars_D = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,scope='discriminator')
vars_G = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,scope='generator')
 
# training 시점에서 변수 update_ops에 업데이트가 저장됨. 그래서 update_ops를 학습할 때 넣어준다.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
In [10]:
# loss를 adamoptimizer를 이용해서 minimize하게 학습
with tf.control_dependencies(update_ops):
    train_D = tf.train.AdamOptimizer().minimize(loss_D,var_list=vars_D, global_step=D_global_step)
    train_G = tf.train.AdamOptimizer().minimize(loss_G,var_list=vars_G, global_step=G_global_step)
In [11]:
# TensorBoard 사용하게 위해 loss값들을 기록
tf.summary.scalar('loss_D', loss_D)
tf.summary.scalar('loss_G', loss_G)
Out[11]:
<tf.Tensor 'loss_G:0' shape=() dtype=string>
In [14]:
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
 
    merged = tf.summary.merge_all()
    writer = tf.summary.FileWriter('./logs', sess.graph)  # 로그로 저장
 
    total_batch = int(mnist.train.num_examples / batch_size)  # 전체 배치 사이즈 계산
 
    for epoch in range(total_epoch):
        loss_val_D, loss_val_G = 0, 0
 
        batch_xs, batch_ys = None, None
        noise = None
 
        for i in range(total_batch):
            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
            batch_xs = batch_xs.reshape(-1, 28, 28, 1)
            noise = get_noise(batch_size, n_noise)
 
            # 학습 돌림 
            _, loss_val_D = sess.run([train_D, loss_D],feed_dict={X: batch_xs, Z: noise, is_training: True})
            _, loss_val_G = sess.run([train_G, loss_G],feed_dict={X: batch_xs, Z: noise, is_training: True})
 
        summary = sess.run(merged,feed_dict={X: batch_xs, Z: noise, is_training: True})
        writer.add_summary(summary, global_step=sess.run(G_global_step)) # log로 저장
 
        # epoch과 loss 프린트
        print('Epoch:', '%04d' % epoch,
            'D loss: {:.4}'.format(loss_val_D),
            'G loss: {:.4}'.format(loss_val_G))
     
        # 5번째 마다 그림 보여줌 및 저장
        if epoch == 0 or (epoch + 1) % 5 == 0:
            sample_size = 10
            noise = get_noise(sample_size, n_noise)
            samples = sess.run(G, feed_dict={Z: noise, is_training: False})
            test_noise = get_moving_noise(sample_size, n_noise)  # 두개의 노이즈 만들고 그 사이에 step을 더해서 noise list 가져옴
            test_samples = sess.run(G, feed_dict={Z: test_noise, is_training: False})
 
            fig, ax = plt.subplots(2, sample_size, figsize=(sample_size, 2))
 
            for i in range(sample_size):
                ax[0][i].set_axis_off()
                ax[1][i].set_axis_off()
                ax[0][i].imshow(np.reshape(samples[i], (28, 28)))  # generator가 만든 그림들
                ax[1][i].imshow(np.reshape(test_samples[i], (28, 28)))  # 두개의 노이즈를 만들어 interpolation되어가는 과정을 보여준 그림
             
            
            plt.savefig('{}.png'.format(str(epoch).zfill(3)),bbox_inches='tight')
            
            plt.close(fig)
Epoch: 0000 D loss: 0.02907 G loss: 6.078
Epoch: 0001 D loss: 0.1202 G loss: 4.496
Epoch: 0002 D loss: 0.06606 G loss: 5.314
Epoch: 0003 D loss: 0.1393 G loss: 8.347
Epoch: 0004 D loss: 0.4249 G loss: 3.941
Epoch: 0005 D loss: 0.4041 G loss: 4.288
Epoch: 0006 D loss: 0.4792 G loss: 2.279
Epoch: 0007 D loss: 0.3829 G loss: 3.653
Epoch: 0008 D loss: 0.5859 G loss: 3.193
Epoch: 0009 D loss: 0.734 G loss: 3.176
Epoch: 0010 D loss: 1.302 G loss: 2.147
Epoch: 0011 D loss: 0.4142 G loss: 3.214
Epoch: 0012 D loss: 0.525 G loss: 2.279
Epoch: 0013 D loss: 0.4454 G loss: 3.085
Epoch: 0014 D loss: 0.4857 G loss: 2.556
Epoch: 0015 D loss: 0.478 G loss: 2.82
Epoch: 0016 D loss: 0.5847 G loss: 1.487
Epoch: 0017 D loss: 0.6741 G loss: 3.33
Epoch: 0018 D loss: 0.46 G loss: 1.847
Epoch: 0019 D loss: 0.8492 G loss: 3.479
Epoch: 0020 D loss: 0.8634 G loss: 1.13
Epoch: 0021 D loss: 1.167 G loss: 1.825
Epoch: 0022 D loss: 0.639 G loss: 2.068
Epoch: 0023 D loss: 0.6697 G loss: 1.984
Epoch: 0024 D loss: 0.6301 G loss: 1.972
Epoch: 0025 D loss: 0.5148 G loss: 2.327
Epoch: 0026 D loss: 0.6565 G loss: 2.3
Epoch: 0027 D loss: 0.6536 G loss: 1.604
Epoch: 0028 D loss: 0.7611 G loss: 1.795
Epoch: 0029 D loss: 0.4582 G loss: 2.852
Epoch: 0030 D loss: 0.4666 G loss: 3.431
Epoch: 0031 D loss: 1.154 G loss: 2.66
Epoch: 0032 D loss: 0.6258 G loss: 2.951
Epoch: 0033 D loss: 0.4886 G loss: 1.489
Epoch: 0034 D loss: 0.4574 G loss: 2.827
Epoch: 0035 D loss: 0.7684 G loss: 2.315
Epoch: 0036 D loss: 0.5545 G loss: 2.547
Epoch: 0037 D loss: 1.026 G loss: 2.609
Epoch: 0038 D loss: 0.3344 G loss: 4.168
Epoch: 0039 D loss: 0.4717 G loss: 1.897
Epoch: 0040 D loss: 0.4616 G loss: 2.854
Epoch: 0041 D loss: 0.5543 G loss: 2.563
Epoch: 0042 D loss: 0.5416 G loss: 3.799
Epoch: 0043 D loss: 1.257 G loss: 1.067
Epoch: 0044 D loss: 0.2191 G loss: 3.314
Epoch: 0045 D loss: 0.3463 G loss: 3.754
Epoch: 0046 D loss: 0.2322 G loss: 3.58
Epoch: 0047 D loss: 0.2073 G loss: 2.277
Epoch: 0048 D loss: 0.4137 G loss: 2.734
Epoch: 0049 D loss: 0.3166 G loss: 5.358
Epoch: 0050 D loss: 0.7031 G loss: 1.085
Epoch: 0051 D loss: 0.1023 G loss: 4.362
Epoch: 0052 D loss: 0.09769 G loss: 5.473
Epoch: 0053 D loss: 0.312 G loss: 3.066
Epoch: 0054 D loss: 0.4291 G loss: 3.258
Epoch: 0055 D loss: 0.2741 G loss: 3.247
Epoch: 0056 D loss: 0.4392 G loss: 2.743
Epoch: 0057 D loss: 0.2945 G loss: 2.48
Epoch: 0058 D loss: 0.4327 G loss: 4.422
Epoch: 0059 D loss: 0.2953 G loss: 4.567
Epoch: 0060 D loss: 0.1427 G loss: 5.014
Epoch: 0061 D loss: 0.4876 G loss: 1.515
Epoch: 0062 D loss: 0.2276 G loss: 3.55
Epoch: 0063 D loss: 0.2033 G loss: 3.615
Epoch: 0064 D loss: 0.2513 G loss: 3.213
Epoch: 0065 D loss: 0.1801 G loss: 3.902
Epoch: 0066 D loss: 0.08173 G loss: 4.24
Epoch: 0067 D loss: 0.73 G loss: 4.982
Epoch: 0068 D loss: 0.3479 G loss: 2.658
Epoch: 0069 D loss: 0.139 G loss: 4.621
Epoch: 0070 D loss: 0.186 G loss: 4.804
Epoch: 0071 D loss: 0.09694 G loss: 3.157
Epoch: 0072 D loss: 0.1273 G loss: 3.242
Epoch: 0073 D loss: 0.8062 G loss: 1.48
Epoch: 0074 D loss: 0.3094 G loss: 6.895
Epoch: 0075 D loss: 0.09878 G loss: 5.624
Epoch: 0076 D loss: 0.3249 G loss: 2.962
Epoch: 0077 D loss: 0.1587 G loss: 3.929
Epoch: 0078 D loss: 0.2165 G loss: 3.993
Epoch: 0079 D loss: 0.1463 G loss: 3.276
Epoch: 0080 D loss: 0.603 G loss: 2.965
Epoch: 0081 D loss: 0.08803 G loss: 3.016
Epoch: 0082 D loss: 0.09373 G loss: 3.686
Epoch: 0083 D loss: 0.1198 G loss: 4.307
Epoch: 0084 D loss: 0.2539 G loss: 4.461
Epoch: 0085 D loss: 0.07144 G loss: 4.255
Epoch: 0086 D loss: 0.1534 G loss: 3.823
Epoch: 0087 D loss: 0.1359 G loss: 4.853
Epoch: 0088 D loss: 0.1508 G loss: 2.507
Epoch: 0089 D loss: 0.09045 G loss: 4.937
Epoch: 0090 D loss: 0.2825 G loss: 3.82
Epoch: 0091 D loss: 0.1382 G loss: 6.275
Epoch: 0092 D loss: 0.09481 G loss: 5.916
Epoch: 0093 D loss: 0.3046 G loss: 5.934
Epoch: 0094 D loss: 0.287 G loss: 2.843
Epoch: 0095 D loss: 0.2304 G loss: 3.66
Epoch: 0096 D loss: 0.4144 G loss: 3.291
Epoch: 0097 D loss: 0.0673 G loss: 5.339
Epoch: 0098 D loss: 0.3471 G loss: 3.676
Epoch: 0099 D loss: 0.1081 G loss: 4.82

첫째줄 - Generator가 생성한 그림

둘째줄 - 노이즈 두개를 생성해서 Z(latent vector)를 이용해 점점 변하는 것을 보여줌

009.png

054.png

074.png

099.png