728x90
반응형
In [2]:
import cv2
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import os
import tqdm
import random
from sklearn.datasets import load_files
In [170]:
X_dog = list()
# 네이버사진은 이상해서..
for fileName in os.listdir('./shiba'):
if fileName.startswith('google'): #or fileName.startswith('aug'):
X_dog.append(fileName)
In [171]:
len(X_dog)
Out[171]:
In [172]:
for fileName in os.listdir('./비글'):
X_dog.append(fileName)
In [173]:
len(X_dog)
Out[173]:
In [175]:
n_shiba = 183
n_jindo = 195
In [176]:
shiba_labels = list(list([1,0] for _ in range(n_shiba)))
jindo_labels = list(list([0,1] for _ in range(n_jindo)))
In [177]:
len(shiba_labels), len(jindo_labels)
Out[177]:
In [178]:
labels = shiba_labels + jindo_labels
In [179]:
len(labels),labels[:3]
Out[179]:
In [180]:
resize_dog = list()
for dog in X_dog[:n_shiba]:
img = cv2.imread('./shiba/' + dog,cv2.IMREAD_GRAYSCALE)
resize = cv2.resize(img,(224,224))
resize = cv2.normalize(resize,resize,0,255,cv2.NORM_MINMAX)
resize_dog.append(resize)
In [181]:
len(resize_dog)
Out[181]:
In [183]:
for i in X_dog[n_shiba:]:
img = cv2.imread('./비글/'+i,cv2.IMREAD_GRAYSCALE)
resize = cv2.resize(img,(224,224))
resize = cv2.normalize(resize,resize,0,255,cv2.NORM_MINMAX)
resize_dog.append(resize)
In [184]:
len(resize_dog)
Out[184]:
In [185]:
# 잘 됐는지 임의로 하나 출력해보자.
plt.imshow(resize_dog[2],cmap=plt.cm.gray)
print(labels[1])
plt.show()
In [186]:
# data shuffle
np.random.seed(42)
tmp = [[x,y] for x, y in zip(resize_dog, labels)]
random.shuffle(tmp)
X_sample = [n[0] for n in tmp]
y_sample = [n[1] for n in tmp]
In [187]:
# Train / Test Split
# 80 : 20
train_size = np.ceil(0.8 * len(resize_dog)).astype(int) # 381 / 나머지 95개 test 할당
X_train = X_sample[:train_size]
y_train = y_sample[:train_size]
X_test = X_sample[train_size:]
y_test = y_sample[train_size:]
In [188]:
from keras_preprocessing.image import img_to_array
In [189]:
X_train = img_to_array(X_train)
y_train = np.array(y_train)
X_test = img_to_array(X_test)
y_test = np.array(y_test)
In [190]:
len(X_train), len(y_train), len(X_test), len(y_test)
Out[190]:
In [191]:
plt.imshow(X_test[1],cmap=plt.cm.gray)
print(y_test[1])
plt.show()
In [192]:
IMG_SIZE = 224
# (None,224,224,1) 형태로 reshape
X_train = X_train.reshape(X_train.shape[0],IMG_SIZE,IMG_SIZE,1)
X_test = X_test.reshape(X_test.shape[0],IMG_SIZE,IMG_SIZE,1)
In [193]:
from keras import models
from keras.layers import Conv2D, MaxPooling2D,BatchNormalization,Dropout,Flatten,Dense
In [197]:
IMG_SIZE = 224
def Network(name):
name.add(Conv2D(32, kernel_size = (3, 3), activation='relu', input_shape=(IMG_SIZE, IMG_SIZE, 1)))
name.add(MaxPooling2D(pool_size=(2,2)))
name.add(BatchNormalization())
name.add(Conv2D(64, kernel_size=(3,3), activation='relu'))
name.add(MaxPooling2D(pool_size=(2,2)))
name.add(BatchNormalization())
name.add(Conv2D(64, kernel_size=(3,3), activation='relu'))
name.add(MaxPooling2D(pool_size=(2,2)))
name.add(BatchNormalization())
name.add(Conv2D(96, kernel_size=(3,3), activation='relu'))
name.add(MaxPooling2D(pool_size=(2,2)))
name.add(BatchNormalization())
name.add(Conv2D(32, kernel_size=(3,3), activation='relu'))
name.add(MaxPooling2D(pool_size=(2,2)))
name.add(BatchNormalization())
name.add(Dropout(0.7))
name.add(Flatten())
name.add(Dense(128, activation='relu'))
name.add(Dropout(0.7))
name.add(Dense(2, activation = 'softmax'))
In [198]:
from keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(patience = 5)
In [199]:
from keras import optimizers
model_early_sgd = models.Sequential()
Network(model_early_sgd)
model_early_sgd.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=0.01, momentum=0.9),
metrics=['accuracy'])
In [200]:
len(X_train), len(y_train)
Out[200]:
In [201]:
history_early = model_early_sgd.fit(X_train,y_train,epochs=50,batch_size=32,verbose=1,validation_split=0.2,callbacks=[early_stopping])
In [203]:
loss, acc = model_early_sgd.evaluate(X_test,y_test,verbose=0)
print("Accuracy : %0.2f" % (acc*100))
In [208]:
plt.subplot(1,2,1)
plt.plot(history_early.history['val_loss'],'r-',label='val_loss')
plt.legend()
plt.subplot(1,2,2)
plt.plot(history_early.history['val_acc'],label='val_acc')
plt.legend()
plt.show()
In [3]:
X_dog = list()
# 네이버사진은 이상해서..
for fileName in os.listdir('./shiba'):
if fileName.startswith('google') or fileName.startswith('aug'):
X_dog.append(fileName)
In [4]:
len(X_dog)
Out[4]:
In [5]:
for fileName in os.listdir('./진돗개2'):
X_dog.append(fileName)
In [6]:
len(X_dog)
Out[6]:
In [7]:
1865-915
Out[7]:
In [8]:
n_shiba = 915
n_jindo = 950
In [9]:
shiba_labels = list(list([1,0] for _ in range(n_shiba)))
jindo_labels = list(list([0,1] for _ in range(n_jindo)))
In [10]:
len(shiba_labels), len(jindo_labels)
Out[10]:
In [11]:
labels = shiba_labels + jindo_labels
In [12]:
len(labels),labels[:3]
Out[12]:
In [13]:
resize_dog = list()
for dog in X_dog[:n_shiba]:
img = cv2.imread('./shiba/' + dog,cv2.IMREAD_GRAYSCALE)
resize = cv2.resize(img,(224,224))
resize = cv2.normalize(resize,resize,0,255,cv2.NORM_MINMAX)
resize_dog.append(resize)
In [14]:
len(resize_dog)
Out[14]:
In [15]:
for i in X_dog[n_shiba:]:
img = cv2.imread('./진돗개2/'+i,cv2.IMREAD_GRAYSCALE)
resize = cv2.resize(img,(224,224))
resize = cv2.normalize(resize,resize,0,255,cv2.NORM_MINMAX)
resize_dog.append(resize)
In [16]:
len(resize_dog)
Out[16]:
In [17]:
# 잘 됐는지 임의로 하나 출력해보자.
plt.imshow(resize_dog[2],cmap=plt.cm.gray)
print(labels[2])
plt.show()
In [18]:
# data shuffle
np.random.seed(42)
tmp = [[x,y] for x, y in zip(resize_dog, labels)]
random.shuffle(tmp)
X_sample = [n[0] for n in tmp]
y_sample = [n[1] for n in tmp]
In [19]:
# Train / Test Split
# 80 : 20
train_size = np.ceil(0.8 * len(resize_dog)).astype(int) # 381 / 나머지 95개 test 할당
X_train = X_sample[:train_size]
y_train = y_sample[:train_size]
X_test = X_sample[train_size:]
y_test = y_sample[train_size:]
In [20]:
from keras_preprocessing.image import img_to_array
In [21]:
X_train = img_to_array(X_train)
y_train = np.array(y_train)
X_test = img_to_array(X_test)
y_test = np.array(y_test)
In [22]:
len(X_train), len(y_train), len(X_test), len(y_test)
Out[22]:
In [23]:
plt.imshow(X_test[1],cmap=plt.cm.gray)
print(y_test[1])
plt.show()
In [24]:
IMG_SIZE = 224
# (None,224,224,1) 형태로 reshape
X_train = X_train.reshape(X_train.shape[0],IMG_SIZE,IMG_SIZE,1)
X_test = X_test.reshape(X_test.shape[0],IMG_SIZE,IMG_SIZE,1)
In [25]:
X_train.shape, X_test.shape
Out[25]:
In [26]:
from keras import models
from keras.layers import Conv2D, MaxPooling2D,BatchNormalization,Dropout,Flatten,Dense
In [27]:
IMG_SIZE = 224
def Network(name):
name.add(Conv2D(32, kernel_size = (3, 3), activation='relu', input_shape=(IMG_SIZE, IMG_SIZE, 1)))
name.add(MaxPooling2D(pool_size=(2,2)))
name.add(BatchNormalization())
name.add(Conv2D(64, kernel_size=(3,3), activation='relu'))
name.add(MaxPooling2D(pool_size=(2,2)))
name.add(BatchNormalization())
name.add(Conv2D(64, kernel_size=(3,3), activation='relu'))
name.add(MaxPooling2D(pool_size=(2,2)))
name.add(BatchNormalization())
name.add(Conv2D(96, kernel_size=(3,3), activation='relu'))
name.add(MaxPooling2D(pool_size=(2,2)))
name.add(BatchNormalization())
name.add(Conv2D(32, kernel_size=(3,3), activation='relu'))
name.add(MaxPooling2D(pool_size=(2,2)))
name.add(BatchNormalization())
name.add(Dropout(0.7))
name.add(Flatten())
name.add(Dense(128, activation='relu'))
name.add(Dropout(0.7))
name.add(Dense(2, activation = 'softmax'))
In [66]:
from keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(patience = 10)
In [45]:
from keras import optimizers
model_early_sgd = models.Sequential()
Network(model_early_sgd)
model_early_sgd.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=0.01, momentum=0.9),
metrics=['accuracy'])
In [33]:
len(X_train), len(y_train)
Out[33]:
In [47]:
history_sgd_early = model_early_sgd.fit(X_train,y_train,epochs=30,batch_size=32,verbose=1,validation_split=0.2,callbacks=[early_stopping])
In [48]:
loss, acc = model_early_sgd.evaluate(X_test,y_test,verbose=0)
print("Accuracy : %0.2f" % (acc*100))
In [49]:
plt.subplot(1,2,1)
plt.plot(history_sgd_early.history['val_loss'],'r-',label='val_loss')
plt.legend()
plt.subplot(1,2,2)
plt.plot(history_sgd_early.history['val_acc'],label='val_acc')
plt.legend()
plt.show()
In [50]:
model_early_sgd.save('model_early_sgd30')
In [53]:
from keras.models import load_model
model_early_sgd100 = load_model("model_early_sgd30")
In [67]:
# 30이후 100번 더 돌기
# train / val loss acc을 보니 epochs를 늘리면 성능이 더 잘나올 것 같음
history_sgd_early100 = model_early_sgd100.fit(X_train,y_train,epochs=100,batch_size=32,verbose=1,validation_split=0.2,callbacks=[early_stopping])
In [68]:
loss, acc = model_early_sgd100.evaluate(X_test,y_test,verbose=0)
print("Accuracy : %0.2f" % (acc*100))
In [69]:
plt.subplot(1,2,1)
plt.plot(history_sgd_early100.history['val_loss'],'r-',label='val_loss')
plt.legend()
plt.subplot(1,2,2)
plt.plot(history_sgd_early100.history['val_acc'],label='val_acc')
plt.legend()
plt.show()
In [70]:
from keras import optimizers
model_early_ada = models.Sequential()
Network(model_early_ada)
model_early_ada.compile(loss='categorical_crossentropy',
optimizer=optimizers.adadelta(lr=0.1),
metrics=['accuracy'])
In [71]:
history_early = model_early_ada.fit(X_train,y_train,epochs=50,batch_size=32,verbose=1,validation_split=0.2,callbacks=[early_stopping])
In [72]:
loss, acc = model_early_ada.evaluate(X_test,y_test,verbose=0)
print("Accuracy : %0.2f" % (acc*100))
In [73]:
plt.subplot(1,2,1)
plt.plot(history_early.history['val_loss'],'r-',label='val_loss')
plt.legend()
plt.subplot(1,2,2)
plt.plot(history_early.history['val_acc'],label='val_acc')
plt.legend()
plt.show()
728x90
반응형