728x90
반응형
In [357]:
import cv2
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import os
import tqdm
import random
from sklearn.datasets import load_files
In [358]:
X_dog = list()
# 네이버사진은 이상해서..
for fileName in os.listdir('./shiba'):
if fileName.startswith('google') or fileName.startswith('aug'):
X_dog.append(fileName)
In [359]:
len(X_dog)
Out[359]:
In [360]:
for fileName in os.listdir('./진돗개'):
X_dog.append(fileName)
In [361]:
len(X_dog)
Out[361]:
In [362]:
n_shiba = 1160
n_jindo = 1219
In [363]:
shiba_labels = list(list([1,0] for _ in range(n_shiba)))
jindo_labels = list(list([0,1] for _ in range(n_jindo)))
In [364]:
len(shiba_labels), len(jindo_labels)
Out[364]:
In [365]:
labels = shiba_labels + jindo_labels
In [366]:
labels = labels
In [367]:
len(labels),labels[:3]
Out[367]:
In [368]:
resize_dog = list()
for dog in X_dog[:n_shiba]:
img = cv2.imread('./shiba/' + dog,cv2.IMREAD_GRAYSCALE)
resize = cv2.resize(img,(224,224))
resize_dog.append(resize)
In [369]:
len(resize_dog)
Out[369]:
In [370]:
for i in X_dog[n_shiba:]:
img = cv2.imread('./진돗개/'+i,cv2.IMREAD_GRAYSCALE)
resize = cv2.resize(img,(224,224))
resize_dog.append(resize)
In [371]:
len(resize_dog)
Out[371]:
In [372]:
# 잘 됐는지 임의로 하나 출력해보자.
plt.imshow(resize_dog[2],cmap=plt.cm.gray)
print(labels[1])
plt.show()
In [373]:
# data shuffle
np.random.seed(42)
tmp = [[x,y] for x, y in zip(resize_dog, labels)]
random.shuffle(tmp)
X_sample = [n[0] for n in tmp]
y_sample = [n[1] for n in tmp]
In [374]:
# Train / Test Split
# 80 : 20
train_size = np.ceil(0.8 * len(resize_dog)).astype(int) # 381 / 나머지 95개 test 할당
X_train = X_sample[:train_size]
y_train = y_sample[:train_size]
X_test = X_sample[train_size:]
y_test = y_sample[train_size:]
In [375]:
from keras_preprocessing.image import img_to_array
In [376]:
X_train = img_to_array(X_train)
y_train = np.array(y_train)
X_test = img_to_array(X_test)
y_test = np.array(y_test)
In [377]:
len(X_train), len(y_train), len(X_test), len(y_test)
Out[377]:
In [378]:
plt.imshow(X_test[1],cmap=plt.cm.gray)
print(y_test[1])
plt.show()
In [379]:
IMG_SIZE = 224
# (None,224,224,1) 형태로 reshape
X_train = X_train.reshape(X_train.shape[0],IMG_SIZE,IMG_SIZE,1)
X_test = X_test.reshape(X_test.shape[0],IMG_SIZE,IMG_SIZE,1)
In [380]:
from keras import models
from keras.layers import Conv2D, MaxPooling2D,BatchNormalization,Dropout,Flatten,Dense
In [381]:
IMG_SIZE = 224
def Network(name):
name.add(Conv2D(32, kernel_size = (3, 3), activation='relu', input_shape=(IMG_SIZE, IMG_SIZE, 1)))
name.add(MaxPooling2D(pool_size=(2,2)))
name.add(BatchNormalization())
name.add(Conv2D(64, kernel_size=(3,3), activation='relu'))
name.add(MaxPooling2D(pool_size=(2,2)))
name.add(BatchNormalization())
name.add(Conv2D(64, kernel_size=(3,3), activation='relu'))
name.add(MaxPooling2D(pool_size=(2,2)))
name.add(BatchNormalization())
name.add(Conv2D(96, kernel_size=(3,3), activation='relu'))
name.add(MaxPooling2D(pool_size=(2,2)))
name.add(BatchNormalization())
name.add(Conv2D(32, kernel_size=(3,3), activation='relu'))
name.add(MaxPooling2D(pool_size=(2,2)))
name.add(BatchNormalization())
name.add(Dropout(0.2))
name.add(Flatten())
name.add(Dense(128, activation='relu'))
name.add(Dropout(0.5))
name.add(Dense(2, activation = 'softmax'))
In [382]:
model1 = models.Sequential()
model2 = models.Sequential()
Network(model1)
Network(model2)
In [383]:
model1.compile(optimizer='rmsprop',loss='categorical_crossentropy',metrics=['accuracy'])
In [384]:
model2.compile(optimizer='Adam',loss='categorical_crossentropy',metrics=['accuracy'])
In [386]:
model_sgd = models.Sequential()
Network(model_sgd)
model_sgd.compile(optimizer='sgd',loss='categorical_crossentropy',metrics=['accuracy'])
In [34]:
model1.fit(np.array(X_train),np.array(y_train),epochs=15,batch_size=20,verbose=1,validation_split=0.2)
Out[34]:
In [35]:
# optimizer : RmsProp / epochs : 15 / batch_size = 20
# network는 동일
loss, acc = model1.evaluate(X_test,y_test,verbose=0)
print("Accuracy : %0.2f" % (acc*100))
In [37]:
model2.fit(np.array(X_train),np.array(y_train),epochs=15,batch_size=20,verbose=1,validation_split=0.2)
Out[37]:
In [38]:
# optimizer : Adam / Dropout = 0.3 추가 / epochs : 15 / batch_size = 20
# network는 동일
loss, acc = model2.evaluate(X_test,y_test,verbose=0)
print("Accuracy : %0.2f" % (acc*100))
In [216]:
model2.fit(X_train,y_train,epochs=50,batch_size=32,verbose=1,validation_split=0.2)
Out[216]:
In [219]:
# 모델 가중치 저장
model2.save_weights("epoch50")
# 불러올때는 model.load_weights(filename)
In [430]:
model2.save('epoch50_Adam')
In [431]:
from keras.models import load_model
model3 = load_model("epoch50_Adam")
In [217]:
# optimizer : Adam / Dropout = 0.3 추가 / epochs : 15 / batch_size = 20
# network는 동일
loss, acc = model2.evaluate(X_test,y_test,verbose=0)
print("Accuracy : %0.2f" % (acc*100))
In [388]:
history_sgd = model_sgd.fit(X_train,y_train,epochs=50,batch_size=32,verbose=1,validation_split=0.2)
In [426]:
plt.subplot(1,2,1)
plt.plot(history_sgd.history['acc'], 'r-',label='acc')
plt.plot(history_sgd.history['loss'],'b-',label='loss')
plt.legend()
plt.subplot(1,2,2)
plt.plot(history_sgd.history['val_loss'],label='val_loss')
plt.plot(history_sgd.history['val_acc'],label='val_acc')
plt.ylim(0.4,3)
plt.legend()
plt.show()
In [427]:
loss, acc = model_sgd.evaluate(X_test,y_test,verbose=0)
print("Accuracy : %0.2f" % (acc*100))
In [428]:
model_sgd.save('epoch50_sgd')
728x90
반응형