[논문읽기] 08-1. WGAN MNIST With Keras
📲PROJECT/논문읽기

[논문읽기] 08-1. WGAN MNIST With Keras

728x90
반응형

From_GAN_to_WGAN_Implementation

with Keras

In [63]:
from keras.models import *
from keras.layers import *
from keras.optimizers import*
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import cv2,PIL

1. View MNIST DATASET (Just See)

In [7]:
(X_train,y_train),(X_test,y_test) = tf.keras.datasets.mnist.load_data()
In [ ]:
X_train = X_train.astype(np.float32).reshape(-1, 28,28) / 255.0
X_test = X_test.astype(np.float32).reshape(-1, 28,28) / 255.0

y_train = y_train.astype(np.int32)
y_test = y_test.astype(np.int32)
In [9]:
X_train = X_train[5000:]
X_valid = X_train[:5000]

y_train = y_train[5000:]
y_valid = y_train[:5000]
In [16]:
# Prints image
plt.rcParams['font.family'] = 'NanumBarunGothic'

plt.imshow(X_train[1])
plt.title("The number is : {0}".format(y_train[1]))
plt.show()

2. Build Network

In [1]:
from __future__ import print_function, division

from keras.datasets import mnist
from keras.layers import Input, Dense, Reshape, Flatten, Dropout
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import RMSprop

import keras.backend as K

import matplotlib.pyplot as plt

import sys

import numpy as np

class WGAN():
    def __init__(self):
        self.img_rows = 28
        self.img_cols = 28
        self.channels = 1
        self.img_shape = (self.img_rows, self.img_cols, self.channels)
        self.latent_dim = 100

        # Following parameter and optimizer set as recommended in paper
        self.n_critic = 5
        self.clip_value = 0.01
        optimizer = RMSprop(lr=0.00005)

        # Build and compile the critic
        self.critic = self.build_critic()
        self.critic.compile(loss=self.wasserstein_loss,
            optimizer=optimizer,
            metrics=['accuracy'])

        # Build the generator
        self.generator = self.build_generator()

        # The generator takes noise as input and generated imgs
        z = Input(shape=(self.latent_dim,))
        img = self.generator(z)

        # For the combined model we will only train the generator
        self.critic.trainable = False

        # The critic takes generated images as input and determines validity
        valid = self.critic(img)

        # The combined model  (stacked generator and critic)
        self.combined = Model(z, valid)
        self.combined.compile(loss=self.wasserstein_loss,
            optimizer=optimizer,
            metrics=['accuracy'])

    def wasserstein_loss(self, y_true, y_pred):
        return K.mean(y_true * y_pred)

    def build_generator(self):

        model = Sequential()

        model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
        model.add(Reshape((7, 7, 128)))
        model.add(UpSampling2D())
        model.add(Conv2D(128, kernel_size=4, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))
        model.add(UpSampling2D())
        model.add(Conv2D(64, kernel_size=4, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))
        model.add(Conv2D(self.channels, kernel_size=4, padding="same"))
        model.add(Activation("tanh"))

        model.summary()

        noise = Input(shape=(self.latent_dim,))
        img = model(noise)

        return Model(noise, img)

    def build_critic(self):

        model = Sequential()

        model.add(Conv2D(16, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(32, kernel_size=3, strides=2, padding="same"))
        model.add(ZeroPadding2D(padding=((0,1),(0,1))))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(128, kernel_size=3, strides=1, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(1))

        model.summary()

        img = Input(shape=self.img_shape)
        validity = model(img)

        return Model(img, validity)

    def train(self, epochs, batch_size=128, sample_interval=50):

        # Load the dataset
        (X_train, _), (_, _) = mnist.load_data()

        # Rescale -1 to 1
        X_train = (X_train.astype(np.float32) - 127.5) / 127.5
        X_train = np.expand_dims(X_train, axis=3)

        # Adversarial ground truths
        valid = -np.ones((batch_size, 1))
        fake = np.ones((batch_size, 1))

        for epoch in range(epochs):

            for _ in range(self.n_critic):

                # ---------------------
                #  Train Discriminator
                # ---------------------

                # Select a random batch of images
                idx = np.random.randint(0, X_train.shape[0], batch_size)
                imgs = X_train[idx]
                
                # Sample noise as generator input
                noise = np.random.normal(0, 1, (batch_size, self.latent_dim))

                # Generate a batch of new images
                gen_imgs = self.generator.predict(noise)

                # Train the critic
                d_loss_real = self.critic.train_on_batch(imgs, valid)
                d_loss_fake = self.critic.train_on_batch(gen_imgs, fake)
                d_loss = 0.5 * np.add(d_loss_fake, d_loss_real)

                # Clip critic weights
                for l in self.critic.layers:
                    weights = l.get_weights()
                    weights = [np.clip(w, -self.clip_value, self.clip_value) for w in weights]
                    l.set_weights(weights)


            # ---------------------
            #  Train Generator
            # ---------------------

            g_loss = self.combined.train_on_batch(noise, valid)

            # Plot the progress
            print ("%d [D loss: %f] [G loss: %f]" % (epoch, 1 - d_loss[0], 1 - g_loss[0]))

            # If at save interval => save generated image samples
            if epoch % sample_interval == 0:
                self.sample_images(epoch)

    def sample_images(self, epoch):
        r, c = 5, 5
        noise = np.random.normal(0, 1, (r * c, self.latent_dim))
        gen_imgs = self.generator.predict(noise)

        # Rescale images 0 - 1
        gen_imgs = 0.5 * gen_imgs + 0.5

        fig, axs = plt.subplots(r, c)
        cnt = 0
        for i in range(r):
            for j in range(c):
                axs[i,j].imshow(gen_imgs[cnt, :,:,0], cmap='gray')
                axs[i,j].axis('off')
                cnt += 1
        fig.savefig("images/mnist_%d.png" % epoch)
        plt.close()


if __name__ == '__main__':
    wgan = WGAN()
    wgan.train(epochs=4000, batch_size=32, sample_interval=50)
Using TensorFlow backend.
WARNING:tensorflow:From /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Colocations handled automatically by placer.
WARNING:tensorflow:From /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.
Instructions for updating:
Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d_1 (Conv2D)            (None, 14, 14, 16)        160       
_________________________________________________________________
leaky_re_lu_1 (LeakyReLU)    (None, 14, 14, 16)        0         
_________________________________________________________________
dropout_1 (Dropout)          (None, 14, 14, 16)        0         
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 7, 7, 32)          4640      
_________________________________________________________________
zero_padding2d_1 (ZeroPaddin (None, 8, 8, 32)          0         
_________________________________________________________________
batch_normalization_1 (Batch (None, 8, 8, 32)          128       
_________________________________________________________________
leaky_re_lu_2 (LeakyReLU)    (None, 8, 8, 32)          0         
_________________________________________________________________
dropout_2 (Dropout)          (None, 8, 8, 32)          0         
_________________________________________________________________
conv2d_3 (Conv2D)            (None, 4, 4, 64)          18496     
_________________________________________________________________
batch_normalization_2 (Batch (None, 4, 4, 64)          256       
_________________________________________________________________
leaky_re_lu_3 (LeakyReLU)    (None, 4, 4, 64)          0         
_________________________________________________________________
dropout_3 (Dropout)          (None, 4, 4, 64)          0         
_________________________________________________________________
conv2d_4 (Conv2D)            (None, 4, 4, 128)         73856     
_________________________________________________________________
batch_normalization_3 (Batch (None, 4, 4, 128)         512       
_________________________________________________________________
leaky_re_lu_4 (LeakyReLU)    (None, 4, 4, 128)         0         
_________________________________________________________________
dropout_4 (Dropout)          (None, 4, 4, 128)         0         
_________________________________________________________________
flatten_1 (Flatten)          (None, 2048)              0         
_________________________________________________________________
dense_1 (Dense)              (None, 1)                 2049      
=================================================================
Total params: 100,097
Trainable params: 99,649
Non-trainable params: 448
_________________________________________________________________
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense_2 (Dense)              (None, 6272)              633472    
_________________________________________________________________
reshape_1 (Reshape)          (None, 7, 7, 128)         0         
_________________________________________________________________
up_sampling2d_1 (UpSampling2 (None, 14, 14, 128)       0         
_________________________________________________________________
conv2d_5 (Conv2D)            (None, 14, 14, 128)       262272    
_________________________________________________________________
batch_normalization_4 (Batch (None, 14, 14, 128)       512       
_________________________________________________________________
activation_1 (Activation)    (None, 14, 14, 128)       0         
_________________________________________________________________
up_sampling2d_2 (UpSampling2 (None, 28, 28, 128)       0         
_________________________________________________________________
conv2d_6 (Conv2D)            (None, 28, 28, 64)        131136    
_________________________________________________________________
batch_normalization_5 (Batch (None, 28, 28, 64)        256       
_________________________________________________________________
activation_2 (Activation)    (None, 28, 28, 64)        0         
_________________________________________________________________
conv2d_7 (Conv2D)            (None, 28, 28, 1)         1025      
_________________________________________________________________
activation_3 (Activation)    (None, 28, 28, 1)         0         
=================================================================
Total params: 1,028,673
Trainable params: 1,028,289
Non-trainable params: 384
_________________________________________________________________
/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/keras/engine/training.py:490: UserWarning: Discrepancy between trainable weights and collected trainable weights, did you set `model.trainable` without calling `model.compile` after ?
  'Discrepancy between trainable weights and collected trainable'

0 [D loss: 0.999909] [G loss: 1.000171] 1 [D loss: 0.999919] [G loss: 1.000168] 2 [D loss: 0.999919] [G loss: 1.000177] 3 [D loss: 0.999921] [G loss: 1.000172] 4 [D loss: 0.999924] [G loss: 1.000174] 5 [D loss: 0.999924] [G loss: 1.000180] 6 [D loss: 0.999924] [G loss: 1.000179] 7 [D loss: 0.999927] [G loss: 1.000171] 8 [D loss: 0.999921] [G loss: 1.000183] 9 [D loss: 0.999921] [G loss: 1.000190] 10 [D loss: 0.999926] [G loss: 1.000189] 11 [D loss: 0.999923] [G loss: 1.000180] 12 [D loss: 0.999922] [G loss: 1.000181] 13 [D loss: 0.999926] [G loss: 1.000186] 14 [D loss: 0.999925] [G loss: 1.000187] 15 [D loss: 0.999930] [G loss: 1.000197] 16 [D loss: 0.999928] [G loss: 1.000195] 17 [D loss: 0.999930] [G loss: 1.000198] 18 [D loss: 0.999932] [G loss: 1.000186] 19 [D loss: 0.999935] [G loss: 1.000190] 20 [D loss: 0.999939] [G loss: 1.000180] 21 [D loss: 0.999943] [G loss: 1.000160] 22 [D loss: 0.999936] [G loss: 1.000157] 23 [D loss: 0.999935] [G loss: 1.000147] 24 [D loss: 0.999947] [G loss: 1.000165] 25 [D loss: 0.999941] [G loss: 1.000155] 26 [D loss: 0.999951] [G loss: 1.000139] 27 [D loss: 0.999949] [G loss: 1.000145] 28 [D loss: 0.999951] [G loss: 1.000129] 29 [D loss: 0.999940] [G loss: 1.000144] 30 [D loss: 0.999942] [G loss: 1.000138] 31 [D loss: 0.999941] [G loss: 1.000129] 32 [D loss: 0.999946] [G loss: 1.000148] 33 [D loss: 0.999952] [G loss: 1.000137] 34 [D loss: 0.999951] [G loss: 1.000142] 35 [D loss: 0.999955] [G loss: 1.000109] 36 [D loss: 0.999935] [G loss: 1.000124] 37 [D loss: 0.999961] [G loss: 1.000136] 38 [D loss: 0.999967] [G loss: 1.000137] 39 [D loss: 0.999958] [G loss: 1.000133] 40 [D loss: 0.999956] [G loss: 1.000136] 41 [D loss: 0.999965] [G loss: 1.000126] 42 [D loss: 0.999962] [G loss: 1.000137] 43 [D loss: 0.999977] [G loss: 1.000125] 44 [D loss: 0.999951] [G loss: 1.000127] 45 [D loss: 0.999962] [G loss: 1.000142] 46 [D loss: 0.999963] [G loss: 1.000128] 47 [D loss: 0.999950] [G loss: 1.000123] 48 [D loss: 0.999977] [G loss: 1.000118] 49 [D loss: 0.999950] [G loss: 1.000133] 50 [D loss: 0.999943] [G loss: 1.000132] 51 [D loss: 0.999955] [G loss: 1.000156] 52 [D loss: 0.999984] [G loss: 1.000120] 53 [D loss: 0.999994] [G loss: 1.000105] 54 [D loss: 0.999975] [G loss: 1.000084] 55 [D loss: 0.999959] [G loss: 1.000134] 56 [D loss: 0.999975] [G loss: 1.000127] 57 [D loss: 0.999987] [G loss: 1.000116] 58 [D loss: 0.999949] [G loss: 1.000109] 59 [D loss: 0.999985] [G loss: 1.000139] 60 [D loss: 0.999989] [G loss: 1.000130] 61 [D loss: 0.999986] [G loss: 1.000123] 62 [D loss: 0.999954] [G loss: 1.000138] 63 [D loss: 0.999976] [G loss: 1.000114] 64 [D loss: 0.999981] [G loss: 1.000147] 65 [D loss: 0.999988] [G loss: 1.000165] 66 [D loss: 0.999974] [G loss: 1.000125] 67 [D loss: 0.999963] [G loss: 1.000143] 68 [D loss: 0.999973] [G loss: 1.000144] 69 [D loss: 0.999970] [G loss: 1.000154] 70 [D loss: 0.999970] [G loss: 1.000160] 71 [D loss: 0.999983] [G loss: 1.000137] 72 [D loss: 0.999976] [G loss: 1.000158] 73 [D loss: 0.999981] [G loss: 1.000158] 74 [D loss: 0.999965] [G loss: 1.000157] 75 [D loss: 0.999964] [G loss: 1.000160] 76 [D loss: 0.999954] [G loss: 1.000229] :

: 3996 [D loss: 0.999952] [G loss: 1.000040] 3997 [D loss: 0.999928] [G loss: 1.000043] 3998 [D loss: 0.999947] [G loss: 0.999993] 3999 [D loss: 0.999948] [G loss: 1.000094]

In [177]:
import os
path2 = os.getcwd()
path2
Out[177]:
'/Users/charming/Python/0_Paper_Review/08. From GAN to WGAN'
In [180]:
generated_image_array = [imageio.imread(generated_image) for generated_image in f]
imageio.mimsave(path + 'WGAN_MNIST2.gif', generated_image_array, fps=5)






WGAN을 통해 생성되는 과정을 표시한 것이다. 


DCGAN보다 더 선명한 이미지가 나오지 않는 것을 보니 모델을 다시 확인해봐야 할 듯 하다.




728x90
반응형