[논문읽기] 07-2. DCGAN Pokemon With Keras
📲PROJECT/논문읽기

[논문읽기] 07-2. DCGAN Pokemon With Keras

728x90
반응형
from __future__ import absolute_import, division, print_function, unicode_literals
In [306]:
import tensorflow as tf
tf.__version__
Out[306]:
'1.14.0-rc1'
In [0]:
import glob
import imageio
import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
from PIL import Image
from tensorflow.keras.layers import *
import time
import cv2
import tqdm

from IPython import display
In [5]:
!apt-get install -y -qq software-properties-common python-software-properties module-init-tools
!add-apt-repository -y ppa:alessandro-strada/ppa 2>&1 > /dev/null
!apt-get update -qq 2>&1 > /dev/null
!apt-get -y install -qq google-drive-ocamlfuse fuse
from google.colab import auth
auth.authenticate_user()
from oauth2client.client import GoogleCredentials
creds = GoogleCredentials.get_application_default()
import getpass
!google-drive-ocamlfuse -headless -id={creds.client_id} -secret={creds.client_secret} < /dev/null 2>&1 | grep URL
vcode = getpass.getpass()
!echo {vcode} | google-drive-ocamlfuse -headless -id={creds.client_id} -secret={creds.client_secret}
E: Package 'python-software-properties' has no installation candidate
Selecting previously unselected package google-drive-ocamlfuse.
(Reading database ... 130912 files and directories currently installed.)
Preparing to unpack .../google-drive-ocamlfuse_0.7.3-0ubuntu3~ubuntu18.04.1_amd64.deb ...
Unpacking google-drive-ocamlfuse (0.7.3-0ubuntu3~ubuntu18.04.1) ...
Setting up google-drive-ocamlfuse (0.7.3-0ubuntu3~ubuntu18.04.1) ...
Processing triggers for man-db (2.8.3-2ubuntu0.1) ...
Please, open the following URL in a web browser: https://accounts.google.com/o/oauth2/auth?client_id=32555940559.apps.googleusercontent.com&redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob&scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive&response_type=code&access_type=offline&approval_prompt=force
··········
Please, open the following URL in a web browser: https://accounts.google.com/o/oauth2/auth?client_id=32555940559.apps.googleusercontent.com&redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob&scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive&response_type=code&access_type=offline&approval_prompt=force
Please enter the verification code: Access token retrieved correctly.
In [9]:
!mkdir -p drive 
!google-drive-ocamlfuse drive
!ls drive/"Colab Notebooks"
 진돗개    Dog_Classification.ipynb  'Pokemon_DCGAN의 사본'   Untitled1.ipynb
 bbox.py  'fast R-CNN.ipynb'	      shiba
 DCGAN	   GAN_Pytorch_MyData	      Untitled0.ipynb
In [0]:
path = './drive/Colab Notebooks/DCGAN/pokemon'
In [0]:
filenames = np.array(glob.glob('./drive/Colab Notebooks/DCGAN/pokemon/*.jpg'))
In [424]:
pokemon = list()
for i in tqdm.tqdm_notebook(range(len(filenames))):
    img = cv2.imread(filenames[i],cv2.IMREAD_COLOR)
    img = cv2.resize(img,(64,64))
    pokemon.append(img)
In [425]:
plt.imshow(pokemon[2])
print(pokemon[2].shape)
(64, 64, 3)
In [0]:
def preprocess(x):
    return (x/255)*2-1

def deprocess(x):
    return np.uint8((x+1)/2*255) 
In [0]:
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
In [0]:
def get_generator():
  gen_model = Sequential()

  gen_model.add(Dense(input_dim=100, units=2048))
  gen_model.add(LeakyReLU(alpha=0.2))

  gen_model.add(Dense(256 * 8 * 8))
  gen_model.add(BatchNormalization())
  gen_model.add(LeakyReLU(alpha=0.2))

  gen_model.add(Reshape((8, 8, 256), input_shape=(256 * 8 * 8,)))
  gen_model.add(UpSampling2D(size=(2, 2)))

  gen_model.add(Conv2D(128, (5, 5), padding='same'))
  gen_model.add(LeakyReLU(alpha=0.2))

  gen_model.add(UpSampling2D(size=(2, 2)))

  gen_model.add(Conv2D(64, (5, 5), padding='same'))
  gen_model.add(LeakyReLU(alpha=0.2))

  gen_model.add(UpSampling2D(size=(2, 2)))

  gen_model.add(Conv2D(3, (5, 5), padding='same'))
  gen_model.add(Activation('tanh'))
  return gen_model
In [0]:
def get_discriminator():
  dis_model = Sequential()
  dis_model.add(Conv2D(128, (5, 5), padding='same', input_shape=(64, 64, 3)))
  dis_model.add(LeakyReLU(alpha=0.2))
  dis_model.add(MaxPooling2D(pool_size=(2, 2)))

  dis_model.add(Conv2D(256, (3, 3)))
  dis_model.add(LeakyReLU(alpha=0.2))
  dis_model.add(MaxPooling2D(pool_size=(2, 2)))

  dis_model.add(Conv2D(512, (3, 3)))
  dis_model.add(LeakyReLU(alpha=0.2))
  dis_model.add(MaxPooling2D(pool_size=(2, 2)))

  dis_model.add(Flatten())
  dis_model.add(Dense(1024))
  dis_model.add(LeakyReLU(alpha=0.2))

  dis_model.add(Dense(1))
  dis_model.add(Activation('sigmoid'))

  return dis_model
In [0]:
batch_size = 128
z_shape = 100
epochs = 1000
dis_learning_rate = 0.0002
gen_learning_rate = 0.0002
dis_momentum = 0.5
gen_momentum = 0.5
dis_nesterov = True
gen_nesterov = True
In [0]:
X = np.array(pokemon)
X = (X-127.5) / 127.5
In [0]:
# Define optimizers
from tensorflow.keras.optimizers import *
dis_optimizer = Adam(lr=dis_learning_rate, beta_1=dis_momentum)
gen_optimizer = Adam(lr=gen_learning_rate, beta_1=gen_momentum)

# dis_optimizer = SGD(lr=dis_learning_rate, momentum=dis_momentum, nesterov=dis_nesterov)
# gen_optimizer = SGD(lr=gen_learning_rate, momentum=gen_momentum, nesterov=gen_nesterov)
In [0]:
gen_model = get_generator()
gen_model.compile(loss='binary_crossentropy', optimizer=gen_optimizer)
In [0]:
dis_model = get_discriminator()
dis_model.compile(loss='binary_crossentropy', optimizer=dis_optimizer)
In [0]:
adversarial_model = Sequential()
adversarial_model.add(gen_model)
dis_model.trainable = False
adversarial_model.add(dis_model)
In [0]:
adversarial_model.compile(loss='binary_crossentropy', optimizer=gen_optimizer)
In [0]:
from tensorflow.keras.callbacks import TensorBoard
tensorboard = TensorBoard(log_dir="logs/{}".format(time.time()), write_images=True, write_grads=True, write_graph=True)
tensorboard.set_model(gen_model)
tensorboard.set_model(dis_model)
In [0]:
def save_rgb_img(img,path):
  fig = plt.figure()
  ax = fig.add_subplot(1,1,1)
  ax.imshow(img)
  ax.axis('off')
  ax.set_title("Image")
  
  plt.savefig(path)
  plt.close()
In [439]:
device_name = tf.test.gpu_device_name()
device_name

with tf.device('/GPU:0'):
  a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
  b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
c = tf.matmul(a, b)
# log_device_placement을 True로 설정하여 세션을 만듭니다.
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
# op를 실행합니다.
print(sess.run(c))
[[22. 28.]
 [49. 64.]]
In [0]:
for epoch in range(epochs):
  print("Epoch is", epoch)
  number_of_batches = int(X.shape[0] / batch_size)
  print("Number of batches", number_of_batches)
  for index in range(number_of_batches):
    z_noise = np.random.normal(0, 1, size=(batch_size, z_shape))
    image_batch = X[index * batch_size:(index + 1) * batch_size]
    generated_images = gen_model.predict_on_batch(z_noise)
    y_real = np.ones(batch_size) - np.random.random_sample(batch_size) * 0.2
    y_fake = np.random.random_sample(batch_size) * 0.2
    dis_loss_real = dis_model.train_on_batch(image_batch, y_real)
    dis_loss_fake = dis_model.train_on_batch(generated_images, y_fake)
    d_loss = (dis_loss_real+dis_loss_fake)/2
    print("d_loss:", d_loss)

    z_noise = np.random.normal(0, 1, size=(batch_size, z_shape))
    g_loss = adversarial_model.train_on_batch(z_noise, [1] * batch_size)
    print("g_loss:", g_loss)

    if epoch % 10 == 0:
        z_noise = np.random.normal(0, 1, size=(batch_size, z_shape))
        gen_images1 = gen_model.predict_on_batch(z_noise)
        for img in gen_images1[:2]:     # [:2]        
          save_rgb_img(img, "./drive/Colab Notebooks/DCGAN/results/one_{}.png".format(epoch))

  gen_model.save("./drive/Colab Notebooks/DCGAN/results/gen_model.h5")
  dis_model.save("./drive/Colab Notebooks/DCGAN/results/dis_model.h5")  
W0614 08:42:35.636625 140408559871872 training.py:2197] Discrepancy between trainable weights and collected trainable weights, did you set `model.trainable` without calling `model.compile` after ?
W0614 08:42:35.769290 140408559871872 training.py:2197] Discrepancy between trainable weights and collected trainable weights, did you set `model.trainable` without calling `model.compile` after ?
Epoch is 0
Number of batches 6
d_loss: 0.7466840744018555
g_loss: 0.6913359
W0614 08:42:36.105803 140408559871872 image.py:648] Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
W0614 08:42:37.369902 140408559871872 image.py:648] Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
d_loss: 0.7059701085090637
g_loss: 0.31180447
W0614 08:42:39.188244 140408559871872 image.py:648] Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
W0614 08:42:40.805777 140408559871872 image.py:648] Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
d_loss: 0.5790705680847168
g_loss: 0.47519732
W0614 08:42:42.713602 140408559871872 image.py:648] Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
W0614 08:42:43.940738 140408559871872 image.py:648] Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
d_loss: 0.5532870888710022
g_loss: 0.54871285
W0614 08:42:45.673555 140408559871872 image.py:648] Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
W0614 08:42:47.309546 140408559871872 image.py:648] Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
d_loss: 0.578808069229126
g_loss: 0.41854602
W0614 08:42:49.043815 140408559871872 image.py:648] Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
W0614 08:42:50.203110 140408559871872 image.py:648] Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
d_loss: 0.5492187738418579
g_loss: 1.1136755
W0614 08:42:52.059267 140408559871872 image.py:648] Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
W0614 08:42:53.165419 140408559871872 image.py:648] Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).

Epoch is 1 Number of batches 6 d_loss: 0.6293489336967468 g_loss: 0.5364257 d_loss: 0.5963907241821289 g_loss: 1.455703 d_loss: 0.5225332975387573 g_loss: 2.9206076 d_loss: 0.6771973967552185 g_loss: 1.497695 d_loss: 0.972356915473938 g_loss: 0.5699434 d_loss: 0.5080163478851318 g_loss: 1.1352361 Epoch is 2 Number of batches 6 d_loss: 0.451909601688385 g_loss: 2.2073097 d_loss: 0.3696335256099701 g_loss: 0.9562659 d_loss: 0.39729052782058716 g_loss: 0.22962993 d_loss: 0.8683243989944458 g_loss: 2.7547913 d_loss: 0.515442430973053 g_loss: 2.0335221 d_loss: 0.362031489610672 g_loss: 2.8030224 Epoch is 3 Number of batches 6 d_loss: 0.3458408713340759 g_loss: 2.399647 d_loss: 0.3334394693374634 g_loss: 2.2035112 d_loss: 0.30878710746765137 g_loss: 2.4823642 d_loss: 0.33320263028144836 g_loss: 2.0898116 d_loss: 0.3258468806743622 g_loss: 2.1524444 d_loss: 0.33179548382759094 g_loss: 2.00511 Epoch is 4 Number of batches 6 d_loss: 0.3442322313785553 g_loss: 1.9363065 d_loss: 0.3317245841026306 g_loss: 2.0686688 d_loss: 0.32227054238319397 g_loss: 2.1408176 d_loss: 0.33648762106895447 g_loss: 1.6672324 d_loss: 0.3482271134853363 g_loss: 2.460092 d_loss: 0.34325671195983887 g_loss: 2.1117969 Epoch is 5 Number of batches 6 d_loss: 0.3304678499698639 g_loss: 2.5013168 d_loss: 0.32904696464538574 g_loss: 2.5717883 d_loss: 0.5494174957275391 g_loss: 1.6007113 d_loss: 0.7298337817192078 g_loss: 3.4888923 d_loss: 0.40815338492393494 g_loss: 1.3344698 d_loss: 0.5579795241355896 g_loss: 1.5787725 Epoch is 6 Number of batches 6 d_loss: 0.4741252660751343 g_loss: 2.81844 d_loss: 0.3507201075553894 g_loss: 2.3492258 d_loss: 0.3459780514240265 g_loss: 2.3569021 d_loss: 0.33787813782691956 g_loss: 2.5125778 d_loss: 0.3236125409603119 g_loss: 2.4686031 d_loss: 0.31742018461227417 g_loss: 2.4283364 Epoch is 7 Number of batches 6 d_loss: 0.31923753023147583 g_loss: 2.5662096 d_loss: 0.3212466239929199

Epoch is 649 Number of batches 6 d_loss: 0.3312387466430664 g_loss: 2.3445854 d_loss: 0.32529839873313904 g_loss: 2.3411825 d_loss: 0.32071375846862793 g_loss: 2.4808054 d_loss: 0.3185819387435913 g_loss: 2.296806 d_loss: 0.32163211703300476 g_loss: 2.3788013 d_loss: 0.33558154106140137 g_loss: 2.2589333 Epoch is 650 Number of batches 6 d_loss: 0.3124457001686096 g_loss: 2.4288573 d_loss: 0.33641475439071655 g_loss: 2.291899 d_loss: 0.3393487334251404 g_loss: 2.2327538 d_loss: 0.32311615347862244 g_loss: 2.383648 d_loss: 0.33118799328804016 g_loss: 2.288849 d_loss: 0.33236902952194214 g_loss: 2.2025032

In [0]:
path2 = './drive/Colab Notebooks/DCGAN/results'
In [0]:
img_test = cv2.imread(os.path.join(path2,'one_400.png'))
In [16]:
plt.imshow(img_test)
Out[16]:
<matplotlib.image.AxesImage at 0x7effa5af5c50>
In [22]:
glob.glob(path2+'/'+'one_*.png')
Out[22]:
['./drive/Colab Notebooks/DCGAN/results/one_0.png',
 './drive/Colab Notebooks/DCGAN/results/one_10.png',
 './drive/Colab Notebooks/DCGAN/results/one_20.png',
 './drive/Colab Notebooks/DCGAN/results/one_30.png',
 './drive/Colab Notebooks/DCGAN/results/one_40.png',
 './drive/Colab Notebooks/DCGAN/results/one_50.png',
 './drive/Colab Notebooks/DCGAN/results/one_60.png',
 './drive/Colab Notebooks/DCGAN/results/one_70.png',
 './drive/Colab Notebooks/DCGAN/results/one_80.png',
 './drive/Colab Notebooks/DCGAN/results/one_90.png',
 './drive/Colab Notebooks/DCGAN/results/one_100.png',
 './drive/Colab Notebooks/DCGAN/results/one_110.png',
 './drive/Colab Notebooks/DCGAN/results/one_120.png',
 './drive/Colab Notebooks/DCGAN/results/one_130.png',
 './drive/Colab Notebooks/DCGAN/results/one_140.png',
 './drive/Colab Notebooks/DCGAN/results/one_150.png',
 './drive/Colab Notebooks/DCGAN/results/one_160.png',
 './drive/Colab Notebooks/DCGAN/results/one_170.png',
 './drive/Colab Notebooks/DCGAN/results/one_180.png',
 './drive/Colab Notebooks/DCGAN/results/one_190.png',
 './drive/Colab Notebooks/DCGAN/results/one_200.png',
 './drive/Colab Notebooks/DCGAN/results/one_210.png',
 './drive/Colab Notebooks/DCGAN/results/one_220.png',
 './drive/Colab Notebooks/DCGAN/results/one_230.png',
 './drive/Colab Notebooks/DCGAN/results/one_240.png',
 './drive/Colab Notebooks/DCGAN/results/one_250.png',
 './drive/Colab Notebooks/DCGAN/results/one_260.png',
 './drive/Colab Notebooks/DCGAN/results/one_270.png',
 './drive/Colab Notebooks/DCGAN/results/one_280.png',
 './drive/Colab Notebooks/DCGAN/results/one_290.png',
 './drive/Colab Notebooks/DCGAN/results/one_300.png',
 './drive/Colab Notebooks/DCGAN/results/one_310.png',
 './drive/Colab Notebooks/DCGAN/results/one_320.png',
 './drive/Colab Notebooks/DCGAN/results/one_330.png',
 './drive/Colab Notebooks/DCGAN/results/one_340.png',
 './drive/Colab Notebooks/DCGAN/results/one_350.png',
 './drive/Colab Notebooks/DCGAN/results/one_360.png',
 './drive/Colab Notebooks/DCGAN/results/one_370.png',
 './drive/Colab Notebooks/DCGAN/results/one_380.png',
 './drive/Colab Notebooks/DCGAN/results/one_390.png',
 './drive/Colab Notebooks/DCGAN/results/one_400.png',
 './drive/Colab Notebooks/DCGAN/results/one_410.png',
 './drive/Colab Notebooks/DCGAN/results/one_420.png',
 './drive/Colab Notebooks/DCGAN/results/one_430.png',
 './drive/Colab Notebooks/DCGAN/results/one_440.png',
 './drive/Colab Notebooks/DCGAN/results/one_450.png',
 './drive/Colab Notebooks/DCGAN/results/one_460.png',
 './drive/Colab Notebooks/DCGAN/results/one_470.png',
 './drive/Colab Notebooks/DCGAN/results/one_480.png',
 './drive/Colab Notebooks/DCGAN/results/one_490.png',
 './drive/Colab Notebooks/DCGAN/results/one_500.png',
 './drive/Colab Notebooks/DCGAN/results/one_510.png',
 './drive/Colab Notebooks/DCGAN/results/one_520.png',
 './drive/Colab Notebooks/DCGAN/results/one_530.png',
 './drive/Colab Notebooks/DCGAN/results/one_540.png',
 './drive/Colab Notebooks/DCGAN/results/one_550.png',
 './drive/Colab Notebooks/DCGAN/results/one_560.png',
 './drive/Colab Notebooks/DCGAN/results/one_570.png',
 './drive/Colab Notebooks/DCGAN/results/one_580.png',
 './drive/Colab Notebooks/DCGAN/results/one_590.png',
 './drive/Colab Notebooks/DCGAN/results/one_600.png',
 './drive/Colab Notebooks/DCGAN/results/one_610.png',
 './drive/Colab Notebooks/DCGAN/results/one_620.png',
 './drive/Colab Notebooks/DCGAN/results/one_630.png',
 './drive/Colab Notebooks/DCGAN/results/one_640.png',
 './drive/Colab Notebooks/DCGAN/results/one_650.png']
In [0]:
# Create GIF

anim_file = 'pokemon_dcgan.gif'

with imageio.get_writer(anim_file,mode='I') as writer:
  filenames = glob.glob(path2+'/'+'one_*.png')
  filenames = sorted(filenames)
  last = -1
  for i,filename in enumerate(filenames):
    frame = 2*(i**0.5)
    if round(frame) > round(last):
      last = frame
    else:
      continue
    image = imageio.imread(filename)
    writer.append_data(image)
  image = imageio.imread(filename)
  writer.append_data(image)
  
import IPython
if IPython.version_info > (6,2,0,''):
  displya.Image(filename=anim_file)
In [0]:
try:
  from google.colab import files
except ImportError:
  pass
else:
  files.download(anim_file)



Epoch 650이후 부터 흰색 이미지를 내뱉어서 모델 내부를 다시 수정해야할 것 같다.

중간 중간 결과물들을 보면 언뜻 포켓몬 처럼 생긴 캐릭터가 보이기도 한다. 




728x90
반응형