Convolution AutoEncoder

tensorflow 2.x version_sequential model_fit

1. 필요한 패키지 import and tensorflow 2.x 버전 설정

In [8]:
%tensorflow_version 2.x

import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
import os 

import tensorflow as tf
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout, UpSampling2D, Input, Convolution2D, Reshape
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import plot_model

import scipy.io
from sklearn.model_selection import train_test_split

print(tf.__version__)
2.1.0

2. 컴퓨터의 파일을 colab으로 upload

In [3]:
from google.colab import files 

uploaded = files.upload()

# 코드가 돌아가면 파일선택 버튼을 눌러서 upload할 파일 선택
for fn in uploaded.keys():
  print('User uploaded file "{name}" with length {length} bytes'.format(
      name=fn, length=len(uploaded[fn]))) 
Upload widget is only available when the cell has been executed in the current browser session. Please rerun this cell to enable.
Saving data_3000.zip to data_3000.zip
User uploaded file "data_3000.zip" with length 3309996 bytes

3. colab 드라이브 연결 후, 파일 zip 풀기

In [ ]:
from google.colab import drive
# google drive와 연결 (인증코드 입력해야함)
drive.mount('/gdrive')

# data_3000 폴더 만듦. zip 파일 압출 풀기
! mkdir data_3000
! unzip data_3000.zip -d ./data_3000

4. 데이터 전처리

In [5]:
# 압축해제된 데이터 경로 (데이터 있는 파일 경로 입력)
src = './data_3000/'

# 이미지 읽기 및 출력
def img_read_plot(src,file):
    img = cv.imread(src+file,cv.COLOR_BGR2GRAY)
    plt.imshow(img)
    plt.xticks([]) # x축 눈금
    plt.yticks([]) # y축 눈금
    plt.show()
    return img

#이미지 읽기
def img_read(src,file):
    img = cv.imread(src+file,cv.COLOR_BGR2GRAY)
    return img

#src 경로에 있는 파일 명을 저장 
files = os.listdir(src)

X,Y = [],[]
count = 0

# 경로와 파일명을 입력으로 넣어 확인하고 
# 데이터를 255로 나눠서 0~1사이로 정규화 하여 X 리스트 추가

for file in files:
  # 데이터의 일부분만 확인해봅니다.
  if count < 10 : 
    print(count)
    X.append(img_read_plot(src,file)/255.)
    Y.append(float(file[:-4]))
    count+=1
  else : 
    X.append(img_read(src,file)/255.)
    Y.append(float(file[:-4]))

# 전체 X, Y data shape 출력
print('X_shape:',np.shape(X[0]),'Y_shape:',np.shape(Y[0]))
print('X_list shape:',np.shape(X),'Y_list shape:',np.shape(Y))

# hyper parameter 설정
img_size = 56       # 이미지 사이즈
latent_dim = 32     # latent dimension 크기 설정
BUFFER_SIZE = 5600  # 총 이미지 갯수
BATCH_SIZE = 20     # 배치 사이즈(나눴을때 이미지 갯수에 딱 떨어지게 설정해야 함)

# Train set(80%), Test set(20%)으로 나누기 
X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size=0.2, random_state=1,shuffle=True)

# CNN layer에 들어갈 수 있게 (x, 56, 56, 1) 차원으로 맞춰줌 
X_train = np.reshape(X_train, (len(X_train), img_size, img_size, 1))
X_test = np.reshape(X_test, (len(X_test), img_size, img_size, 1))
print(np.shape(X_train),np.shape(X_test))
0
1
2
3
4
5
6
7
8
9
X_shape: (56, 56) Y_shape: ()
X_list shape: (3000, 56, 56) Y_list shape: (3000,)
(2400, 56, 56, 1) (600, 56, 56, 1)

5. 모델 구성

In [ ]:
# 인코더
def encoder():

  model = tf.keras.Sequential()
  model.add(Conv2D(5, (3, 3), activation='relu', padding='same', input_shape=(img_size, img_size, 1)))
  model.add(MaxPooling2D((2, 2), padding='same'))
  model.add(Conv2D(10, (3, 3), activation='relu', padding='same'))
  model.add(MaxPooling2D((2, 2), padding='same'))
  model.add(Conv2D(15, (3, 3), activation='relu', padding='same'))
  model.add(MaxPooling2D((2, 2), padding='same'))
  model.add(Conv2D(20, (3, 3), activation='relu', padding='same'))
  model.add(MaxPooling2D((2, 2), padding='same'))
  model.add(Conv2D(25, (3, 3), activation='relu', padding='same'))
  model.add(Flatten())
  # latent dimension으로 축소됨
  model.add(Dense(latent_dim)) 

  return model
In [9]:
# 인코더 함수를 실행해서 모델을 build 후 변수에 집어넣고, 모델 구조를 살펴봄
e_model = encoder()
e_model.summary()
plot_model(e_model, show_shapes=True)
Model: "sequential_1"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d_5 (Conv2D)            (None, 56, 56, 5)         50        
_________________________________________________________________
max_pooling2d_4 (MaxPooling2 (None, 28, 28, 5)         0         
_________________________________________________________________
conv2d_6 (Conv2D)            (None, 28, 28, 10)        460       
_________________________________________________________________
max_pooling2d_5 (MaxPooling2 (None, 14, 14, 10)        0         
_________________________________________________________________
conv2d_7 (Conv2D)            (None, 14, 14, 15)        1365      
_________________________________________________________________
max_pooling2d_6 (MaxPooling2 (None, 7, 7, 15)          0         
_________________________________________________________________
conv2d_8 (Conv2D)            (None, 7, 7, 20)          2720      
_________________________________________________________________
max_pooling2d_7 (MaxPooling2 (None, 4, 4, 20)          0         
_________________________________________________________________
conv2d_9 (Conv2D)            (None, 4, 4, 25)          4525      
_________________________________________________________________
flatten_1 (Flatten)          (None, 400)               0         
_________________________________________________________________
dense_1 (Dense)              (None, 32)                12832     
=================================================================
Total params: 21,952
Trainable params: 21,952
Non-trainable params: 0
_________________________________________________________________
Out[9]:
In [ ]:
# 디코더
def decoder():

  model = tf.keras.Sequential()
  model.add(Dense(7 * 7 * 56, input_shape=(latent_dim,)))
  model.add(Reshape((7, 7, 56)))
  model.add(Conv2D(56, (2, 2), activation='relu', padding='same'))
  model.add(UpSampling2D((2, 2)))
  model.add(Conv2D(56, (2, 2), activation='relu', padding='same'))
  model.add(UpSampling2D((2, 2)))
  model.add(Conv2D(56, (2, 2), activation='relu', padding='same'))
  model.add(UpSampling2D((2, 2)))
  model.add(Dropout(0.5))
  model.add(Conv2D(1, (2, 2), padding='same'))

  return model
In [11]:
# 디코더 함수를 실행해서 모델을 build 후 변수에 집어넣고, 모델 구조를 살펴봄
d_model = decoder()
d_model.summary()
plot_model(d_model, show_shapes=True)
Model: "sequential_2"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense_2 (Dense)              (None, 2744)              90552     
_________________________________________________________________
reshape (Reshape)            (None, 7, 7, 56)          0         
_________________________________________________________________
conv2d_10 (Conv2D)           (None, 7, 7, 56)          12600     
_________________________________________________________________
up_sampling2d (UpSampling2D) (None, 14, 14, 56)        0         
_________________________________________________________________
conv2d_11 (Conv2D)           (None, 14, 14, 56)        12600     
_________________________________________________________________
up_sampling2d_1 (UpSampling2 (None, 28, 28, 56)        0         
_________________________________________________________________
conv2d_12 (Conv2D)           (None, 28, 28, 56)        12600     
_________________________________________________________________
up_sampling2d_2 (UpSampling2 (None, 56, 56, 56)        0         
_________________________________________________________________
dropout (Dropout)            (None, 56, 56, 56)        0         
_________________________________________________________________
conv2d_13 (Conv2D)           (None, 56, 56, 1)         225       
=================================================================
Total params: 128,577
Trainable params: 128,577
Non-trainable params: 0
_________________________________________________________________
Out[11]:
In [ ]:
# 모델을 합쳐줌 
input_img = Input(shape=(img_size, img_size, 1))
model = Model(input_img, d_model(e_model(input_img)), name='autoencoder')

# optimizer 설정과 model complie하면서 optimizer와 loss 함수, loss 평가지표로 mae 설정
adam=Adam(lr=0.00008, beta_1=0.9)
model.compile(optimizer=adam, loss='mean_squared_error', metrics=['mae'])

6. train

In [13]:
# model.fit을 써서 학습. 학습과정을 history에 저장
history = model.fit(X_train, X_train,
                    batch_size=20,
                    epochs=100,
                    verbose=2,
                    validation_data=(X_test, X_test),
                    shuffle=True)
Train on 2400 samples, validate on 600 samples
Epoch 1/100
2400/2400 - 9s - loss: 0.2522 - mae: 0.3872 - val_loss: 0.1125 - val_mae: 0.2731
Epoch 2/100
2400/2400 - 1s - loss: 0.1209 - mae: 0.2773 - val_loss: 0.1011 - val_mae: 0.2542
Epoch 3/100
2400/2400 - 1s - loss: 0.1113 - mae: 0.2642 - val_loss: 0.0969 - val_mae: 0.2488
Epoch 4/100
2400/2400 - 1s - loss: 0.1013 - mae: 0.2505 - val_loss: 0.0788 - val_mae: 0.2154
Epoch 5/100
2400/2400 - 1s - loss: 0.0781 - mae: 0.2136 - val_loss: 0.0594 - val_mae: 0.1803
Epoch 6/100
2400/2400 - 1s - loss: 0.0643 - mae: 0.1901 - val_loss: 0.0503 - val_mae: 0.1621
Epoch 7/100
2400/2400 - 1s - loss: 0.0560 - mae: 0.1754 - val_loss: 0.0442 - val_mae: 0.1483
Epoch 8/100
2400/2400 - 1s - loss: 0.0500 - mae: 0.1644 - val_loss: 0.0397 - val_mae: 0.1407
Epoch 9/100
2400/2400 - 1s - loss: 0.0457 - mae: 0.1560 - val_loss: 0.0366 - val_mae: 0.1333
Epoch 10/100
2400/2400 - 1s - loss: 0.0426 - mae: 0.1497 - val_loss: 0.0343 - val_mae: 0.1274
Epoch 11/100
2400/2400 - 1s - loss: 0.0402 - mae: 0.1446 - val_loss: 0.0324 - val_mae: 0.1231
Epoch 12/100
2400/2400 - 1s - loss: 0.0381 - mae: 0.1403 - val_loss: 0.0309 - val_mae: 0.1192
Epoch 13/100
2400/2400 - 1s - loss: 0.0363 - mae: 0.1366 - val_loss: 0.0298 - val_mae: 0.1173
Epoch 14/100
2400/2400 - 1s - loss: 0.0348 - mae: 0.1333 - val_loss: 0.0286 - val_mae: 0.1129
Epoch 15/100
2400/2400 - 1s - loss: 0.0335 - mae: 0.1303 - val_loss: 0.0275 - val_mae: 0.1105
Epoch 16/100
2400/2400 - 1s - loss: 0.0322 - mae: 0.1275 - val_loss: 0.0266 - val_mae: 0.1080
Epoch 17/100
2400/2400 - 1s - loss: 0.0310 - mae: 0.1248 - val_loss: 0.0259 - val_mae: 0.1061
Epoch 18/100
2400/2400 - 1s - loss: 0.0299 - mae: 0.1223 - val_loss: 0.0250 - val_mae: 0.1040
Epoch 19/100
2400/2400 - 1s - loss: 0.0291 - mae: 0.1202 - val_loss: 0.0243 - val_mae: 0.1022
Epoch 20/100
2400/2400 - 1s - loss: 0.0282 - mae: 0.1183 - val_loss: 0.0239 - val_mae: 0.1008
Epoch 21/100
2400/2400 - 1s - loss: 0.0275 - mae: 0.1164 - val_loss: 0.0232 - val_mae: 0.0991
Epoch 22/100
2400/2400 - 1s - loss: 0.0268 - mae: 0.1148 - val_loss: 0.0227 - val_mae: 0.0975
Epoch 23/100
2400/2400 - 1s - loss: 0.0262 - mae: 0.1134 - val_loss: 0.0221 - val_mae: 0.0954
Epoch 24/100
2400/2400 - 1s - loss: 0.0256 - mae: 0.1120 - val_loss: 0.0219 - val_mae: 0.0954
Epoch 25/100
2400/2400 - 1s - loss: 0.0250 - mae: 0.1106 - val_loss: 0.0214 - val_mae: 0.0937
Epoch 26/100
2400/2400 - 1s - loss: 0.0245 - mae: 0.1093 - val_loss: 0.0210 - val_mae: 0.0928
Epoch 27/100
2400/2400 - 1s - loss: 0.0240 - mae: 0.1081 - val_loss: 0.0208 - val_mae: 0.0915
Epoch 28/100
2400/2400 - 1s - loss: 0.0235 - mae: 0.1069 - val_loss: 0.0205 - val_mae: 0.0908
Epoch 29/100
2400/2400 - 1s - loss: 0.0230 - mae: 0.1058 - val_loss: 0.0202 - val_mae: 0.0899
Epoch 30/100
2400/2400 - 1s - loss: 0.0226 - mae: 0.1048 - val_loss: 0.0199 - val_mae: 0.0896
Epoch 31/100
2400/2400 - 1s - loss: 0.0222 - mae: 0.1039 - val_loss: 0.0197 - val_mae: 0.0881
Epoch 32/100
2400/2400 - 1s - loss: 0.0219 - mae: 0.1030 - val_loss: 0.0194 - val_mae: 0.0880
Epoch 33/100
2400/2400 - 1s - loss: 0.0215 - mae: 0.1022 - val_loss: 0.0192 - val_mae: 0.0874
Epoch 34/100
2400/2400 - 1s - loss: 0.0212 - mae: 0.1013 - val_loss: 0.0188 - val_mae: 0.0858
Epoch 35/100
2400/2400 - 1s - loss: 0.0208 - mae: 0.1004 - val_loss: 0.0186 - val_mae: 0.0859
Epoch 36/100
2400/2400 - 1s - loss: 0.0205 - mae: 0.0997 - val_loss: 0.0184 - val_mae: 0.0849
Epoch 37/100
2400/2400 - 1s - loss: 0.0202 - mae: 0.0990 - val_loss: 0.0182 - val_mae: 0.0842
Epoch 38/100
2400/2400 - 1s - loss: 0.0199 - mae: 0.0983 - val_loss: 0.0180 - val_mae: 0.0837
Epoch 39/100
2400/2400 - 1s - loss: 0.0196 - mae: 0.0975 - val_loss: 0.0178 - val_mae: 0.0833
Epoch 40/100
2400/2400 - 1s - loss: 0.0194 - mae: 0.0969 - val_loss: 0.0176 - val_mae: 0.0830
Epoch 41/100
2400/2400 - 1s - loss: 0.0191 - mae: 0.0963 - val_loss: 0.0175 - val_mae: 0.0829
Epoch 42/100
2400/2400 - 1s - loss: 0.0189 - mae: 0.0958 - val_loss: 0.0172 - val_mae: 0.0813
Epoch 43/100
2400/2400 - 1s - loss: 0.0186 - mae: 0.0951 - val_loss: 0.0170 - val_mae: 0.0812
Epoch 44/100
2400/2400 - 1s - loss: 0.0184 - mae: 0.0946 - val_loss: 0.0171 - val_mae: 0.0814
Epoch 45/100
2400/2400 - 1s - loss: 0.0182 - mae: 0.0941 - val_loss: 0.0169 - val_mae: 0.0812
Epoch 46/100
2400/2400 - 1s - loss: 0.0179 - mae: 0.0936 - val_loss: 0.0167 - val_mae: 0.0799
Epoch 47/100
2400/2400 - 1s - loss: 0.0177 - mae: 0.0930 - val_loss: 0.0164 - val_mae: 0.0791
Epoch 48/100
2400/2400 - 1s - loss: 0.0175 - mae: 0.0925 - val_loss: 0.0164 - val_mae: 0.0795
Epoch 49/100
2400/2400 - 1s - loss: 0.0173 - mae: 0.0920 - val_loss: 0.0161 - val_mae: 0.0783
Epoch 50/100
2400/2400 - 1s - loss: 0.0171 - mae: 0.0916 - val_loss: 0.0161 - val_mae: 0.0786
Epoch 51/100
2400/2400 - 1s - loss: 0.0169 - mae: 0.0911 - val_loss: 0.0161 - val_mae: 0.0783
Epoch 52/100
2400/2400 - 1s - loss: 0.0167 - mae: 0.0907 - val_loss: 0.0158 - val_mae: 0.0779
Epoch 53/100
2400/2400 - 1s - loss: 0.0166 - mae: 0.0903 - val_loss: 0.0157 - val_mae: 0.0773
Epoch 54/100
2400/2400 - 1s - loss: 0.0164 - mae: 0.0898 - val_loss: 0.0155 - val_mae: 0.0767
Epoch 55/100
2400/2400 - 1s - loss: 0.0162 - mae: 0.0894 - val_loss: 0.0154 - val_mae: 0.0762
Epoch 56/100
2400/2400 - 1s - loss: 0.0161 - mae: 0.0890 - val_loss: 0.0154 - val_mae: 0.0765
Epoch 57/100
2400/2400 - 1s - loss: 0.0159 - mae: 0.0888 - val_loss: 0.0152 - val_mae: 0.0757
Epoch 58/100
2400/2400 - 1s - loss: 0.0157 - mae: 0.0883 - val_loss: 0.0152 - val_mae: 0.0759
Epoch 59/100
2400/2400 - 1s - loss: 0.0156 - mae: 0.0879 - val_loss: 0.0151 - val_mae: 0.0753
Epoch 60/100
2400/2400 - 1s - loss: 0.0155 - mae: 0.0876 - val_loss: 0.0149 - val_mae: 0.0753
Epoch 61/100
2400/2400 - 1s - loss: 0.0153 - mae: 0.0872 - val_loss: 0.0148 - val_mae: 0.0750
Epoch 62/100
2400/2400 - 1s - loss: 0.0152 - mae: 0.0868 - val_loss: 0.0148 - val_mae: 0.0746
Epoch 63/100
2400/2400 - 1s - loss: 0.0151 - mae: 0.0866 - val_loss: 0.0147 - val_mae: 0.0742
Epoch 64/100
2400/2400 - 1s - loss: 0.0149 - mae: 0.0863 - val_loss: 0.0146 - val_mae: 0.0743
Epoch 65/100
2400/2400 - 1s - loss: 0.0148 - mae: 0.0860 - val_loss: 0.0145 - val_mae: 0.0740
Epoch 66/100
2400/2400 - 1s - loss: 0.0147 - mae: 0.0856 - val_loss: 0.0144 - val_mae: 0.0737
Epoch 67/100
2400/2400 - 1s - loss: 0.0146 - mae: 0.0853 - val_loss: 0.0143 - val_mae: 0.0734
Epoch 68/100
2400/2400 - 1s - loss: 0.0144 - mae: 0.0849 - val_loss: 0.0143 - val_mae: 0.0732
Epoch 69/100
2400/2400 - 1s - loss: 0.0143 - mae: 0.0846 - val_loss: 0.0141 - val_mae: 0.0730
Epoch 70/100
2400/2400 - 1s - loss: 0.0142 - mae: 0.0843 - val_loss: 0.0141 - val_mae: 0.0730
Epoch 71/100
2400/2400 - 1s - loss: 0.0140 - mae: 0.0839 - val_loss: 0.0140 - val_mae: 0.0723
Epoch 72/100
2400/2400 - 1s - loss: 0.0140 - mae: 0.0838 - val_loss: 0.0140 - val_mae: 0.0719
Epoch 73/100
2400/2400 - 1s - loss: 0.0139 - mae: 0.0835 - val_loss: 0.0138 - val_mae: 0.0716
Epoch 74/100
2400/2400 - 1s - loss: 0.0138 - mae: 0.0832 - val_loss: 0.0138 - val_mae: 0.0716
Epoch 75/100
2400/2400 - 1s - loss: 0.0137 - mae: 0.0829 - val_loss: 0.0138 - val_mae: 0.0722
Epoch 76/100
2400/2400 - 1s - loss: 0.0136 - mae: 0.0827 - val_loss: 0.0136 - val_mae: 0.0713
Epoch 77/100
2400/2400 - 1s - loss: 0.0135 - mae: 0.0824 - val_loss: 0.0136 - val_mae: 0.0711
Epoch 78/100
2400/2400 - 1s - loss: 0.0134 - mae: 0.0823 - val_loss: 0.0137 - val_mae: 0.0719
Epoch 79/100
2400/2400 - 1s - loss: 0.0133 - mae: 0.0820 - val_loss: 0.0134 - val_mae: 0.0707
Epoch 80/100
2400/2400 - 1s - loss: 0.0132 - mae: 0.0818 - val_loss: 0.0134 - val_mae: 0.0701
Epoch 81/100
2400/2400 - 1s - loss: 0.0131 - mae: 0.0815 - val_loss: 0.0134 - val_mae: 0.0704
Epoch 82/100
2400/2400 - 1s - loss: 0.0130 - mae: 0.0812 - val_loss: 0.0133 - val_mae: 0.0699
Epoch 83/100
2400/2400 - 1s - loss: 0.0130 - mae: 0.0811 - val_loss: 0.0132 - val_mae: 0.0700
Epoch 84/100
2400/2400 - 1s - loss: 0.0129 - mae: 0.0808 - val_loss: 0.0131 - val_mae: 0.0696
Epoch 85/100
2400/2400 - 1s - loss: 0.0128 - mae: 0.0807 - val_loss: 0.0132 - val_mae: 0.0698
Epoch 86/100
2400/2400 - 1s - loss: 0.0127 - mae: 0.0805 - val_loss: 0.0131 - val_mae: 0.0691
Epoch 87/100
2400/2400 - 1s - loss: 0.0126 - mae: 0.0802 - val_loss: 0.0130 - val_mae: 0.0692
Epoch 88/100
2400/2400 - 1s - loss: 0.0125 - mae: 0.0800 - val_loss: 0.0130 - val_mae: 0.0688
Epoch 89/100
2400/2400 - 1s - loss: 0.0125 - mae: 0.0799 - val_loss: 0.0129 - val_mae: 0.0688
Epoch 90/100
2400/2400 - 1s - loss: 0.0124 - mae: 0.0796 - val_loss: 0.0128 - val_mae: 0.0685
Epoch 91/100
2400/2400 - 1s - loss: 0.0123 - mae: 0.0794 - val_loss: 0.0128 - val_mae: 0.0684
Epoch 92/100
2400/2400 - 1s - loss: 0.0123 - mae: 0.0792 - val_loss: 0.0128 - val_mae: 0.0684
Epoch 93/100
2400/2400 - 1s - loss: 0.0122 - mae: 0.0790 - val_loss: 0.0128 - val_mae: 0.0681
Epoch 94/100
2400/2400 - 1s - loss: 0.0121 - mae: 0.0789 - val_loss: 0.0127 - val_mae: 0.0684
Epoch 95/100
2400/2400 - 1s - loss: 0.0120 - mae: 0.0786 - val_loss: 0.0127 - val_mae: 0.0685
Epoch 96/100
2400/2400 - 1s - loss: 0.0120 - mae: 0.0785 - val_loss: 0.0126 - val_mae: 0.0679
Epoch 97/100
2400/2400 - 1s - loss: 0.0119 - mae: 0.0783 - val_loss: 0.0126 - val_mae: 0.0684
Epoch 98/100
2400/2400 - 1s - loss: 0.0119 - mae: 0.0781 - val_loss: 0.0124 - val_mae: 0.0670
Epoch 99/100
2400/2400 - 1s - loss: 0.0118 - mae: 0.0779 - val_loss: 0.0124 - val_mae: 0.0670
Epoch 100/100
2400/2400 - 1s - loss: 0.0118 - mae: 0.0779 - val_loss: 0.0125 - val_mae: 0.0678

7. train 과 test의 loss그래프 출력

In [14]:
# summarize history for loss
plt.plot(history.history['loss'])
plt.title('train loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for mean_absolute_error
plt.plot(history.history['val_loss'])
plt.title('test loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()

8. 원본이미지와 test data를 복원해서 출력 후 비교

In [15]:
sample_size = 10                        # 보여줄 sample 갯수 설정
prediction = model.predict(X_test)      # test 데이터로 이미지 복원(predict 함수 사용)

# plot 하기
fig, ax = plt.subplots(2, sample_size, figsize=(15, 4))
for i in range(sample_size):
  org_img = X_test[i].reshape(56,56)
  rec_img = prediction[i].reshape(56,56)
  
  ax[0][i].set_axis_off()
  ax[1][i].set_axis_off()
  
  ax[0][i].imshow(org_img, cmap=plt.cm.bone)
  ax[1][i].imshow(rec_img, cmap=plt.cm.bone)

plt.show()