반응형
lenet-5-using-keras-99-2
In [1]:
import gc
import numpy as np 
import pandas as pd
import matplotlib.pyplot as plt

# 교차검증 lib
from sklearn.model_selection import StratifiedKFold,train_test_split
from tqdm import tqdm_notebook

#모델 lib
from keras.datasets import mnist
from keras.utils.np_utils import to_categorical
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, Model
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from keras.layers import Dense, Dropout, Flatten, Activation, Conv2D, AveragePooling2D
from keras import layers
from keras.optimizers import Adam,RMSprop

#모델
from keras.applications import VGG16, VGG19, resnet50

#경고메세지 무시
import warnings
warnings.filterwarnings(action='ignore')
Using TensorFlow backend.

LeNet-5

  • 최초로 산업에 성공적으로 적용된 CNN모델이다 image.png

data load

In [2]:
import os
os.listdir('../input/digit-recognizer')
datapath = '../input/digit-recognizer'
In [3]:
train =pd.read_csv(datapath+'/train.csv')
print(train.shape)
train.head()
(42000, 785)
Out[3]:
label pixel0 pixel1 pixel2 pixel3 pixel4 pixel5 pixel6 pixel7 pixel8 ... pixel774 pixel775 pixel776 pixel777 pixel778 pixel779 pixel780 pixel781 pixel782 pixel783
0 1 0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0 0 0
1 0 0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0 0 0
2 1 0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0 0 0
3 4 0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0 0 0
4 0 0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0 0 0

5 rows × 785 columns

In [4]:
test =pd.read_csv(datapath+'/test.csv')
print(test.shape)
test.head()
(28000, 784)
Out[4]:
pixel0 pixel1 pixel2 pixel3 pixel4 pixel5 pixel6 pixel7 pixel8 pixel9 ... pixel774 pixel775 pixel776 pixel777 pixel778 pixel779 pixel780 pixel781 pixel782 pixel783
0 0 0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0 0 0
1 0 0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0 0 0
2 0 0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0 0 0
3 0 0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0 0 0
4 0 0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0 0 0

5 rows × 784 columns

In [5]:
train_labels = train['label']
train = (train.iloc[:,1:].values).astype('float32')
test = test.values.astype('float32')
In [6]:
#Visualizing the data
sample = train[10, :].reshape(28,28)
plt.imshow(sample, cmap='gray')
plt.show()
print('label : ', train_labels[10])
label :  8

Preprocessing

  • LeNet-1 모델은 28x28의 이미지를 사용했습니다.
  • LeNet-5에서는 MNIST의 28x28 테스트 영상을 32x32 이미지의 중심에 배치하여 처리하였습니다.큰사이즈의 이미지 사용으로 인해 작은부분의 고려가 작은사이즈 보다 훨씬 더 고려되어 성능이 더욱 향상되었습니다.
In [7]:
train = train.reshape(42000, 28, 28, 1)
test= test.reshape(28000, 28, 28, 1)
# change shape using pad
train = np.pad(train, ((0,0),(2,2),(2,2),(0,0)), 'constant')
test = np.pad(test, ((0,0),(2,2),(2,2),(0,0)), 'constant')

print('train shape : ', train.shape)
print('test shape : ', test.shape)
train shape :  (42000, 32, 32, 1)
test shape :  (28000, 32, 32, 1)
In [8]:
# int64 -> float32 ,  scaling
train = train.astype('float32')/255
test = test.astype('float32')/255
X_train, X_val, y_train, y_val = train_test_split(train, train_labels, test_size=0.20, random_state=42)

#One-hot encoding the labels
print('X_train shape : ', X_train.shape)
print('X_val shape : ', X_val.shape)
print('y_train : ', y_train.shape)
print('y_val : ', y_val.shape)
y_train = to_categorical(y_train)
y_val = to_categorical(y_val)
print('y_train_to_categorical : ', y_train.shape)
print('y_val_to_categorical : ', y_val.shape)
X_train shape :  (33600, 32, 32, 1)
X_val shape :  (8400, 32, 32, 1)
y_train :  (33600,)
y_val :  (8400,)
y_train_to_categorical :  (33600, 10)
y_val_to_categorical :  (8400, 10)

Model

  • [32x32x1] INPUT
  • [28x28x6] CONV1: 6 5x5 filters at stride 1, pad 0
  • [6x14x14] Average POOL1: 2x2 filters at stride 2
  • [16x10x10] CONV2: 256 5x5 filters at stride 1, pad 0
  • [16x5x5] Average POOL2: 2x2 filters at stride 2
  • [120] FC6: 120 neurons
  • [84] FC7: 84 neurons
  • [10] FC8: 10 neurons (class scores)
  • LeNet-5모델은 Non-overlapping pooling을 사용했다. image
In [9]:
#lenet-5 model
model = Sequential()
#Conv layer 1
model.add(layers.Conv2D(filters=6, kernel_size=(5, 5),strides=1, activation='relu', input_shape=(32,32,1)))
#Pooling layer 1
model.add(AveragePooling2D(pool_size = 2, strides = 2))
#Conv Layer2
model.add(layers.Conv2D(filters=16, kernel_size=(5, 5),strides=1, activation='relu'))
#Pooling layer 2
model.add(AveragePooling2D(pool_size = 2, strides = 2))
model.add(layers.Flatten())
#FC Layer 3
model.add(layers.Dense(120, activation='relu'))
#FC Layer 4
model.add(layers.Dense(84, activation='relu'))
#FC Layer 5
model.add(layers.Dense(10, activation = 'softmax'))


# compile
model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
model.summary()
Model: "sequential_1"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d_1 (Conv2D)            (None, 28, 28, 6)         156       
_________________________________________________________________
average_pooling2d_1 (Average (None, 14, 14, 6)         0         
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 10, 10, 16)        2416      
_________________________________________________________________
average_pooling2d_2 (Average (None, 5, 5, 16)          0         
_________________________________________________________________
flatten_1 (Flatten)          (None, 400)               0         
_________________________________________________________________
dense_1 (Dense)              (None, 120)               48120     
_________________________________________________________________
dense_2 (Dense)              (None, 84)                10164     
_________________________________________________________________
dense_3 (Dense)              (None, 10)                850       
=================================================================
Total params: 61,706
Trainable params: 61,706
Non-trainable params: 0
_________________________________________________________________

Train and predict

In [10]:
datagen = ImageDataGenerator(
        rotation_range=10,  
        zoom_range = 0.10,  
        width_shift_range=0.1, 
        height_shift_range=0.1)
In [11]:
patient = 4
callbacks_list = [
    ReduceLROnPlateau(
        monitor = 'val_loss', 
        #학습률을 절반으로 줄입니다.
        factor = 0.5, 
        #patience 만큼 val_loss가 감소하지 않으면 학습률을 줄입니다.
        patience = patient / 2, 
        #min Reduces learning
        min_lr=0.00001,
        verbose=1,
        mode='min'
    )]
In [12]:
%%time
epochs =30
batch_size = 64
history = model.fit_generator(datagen.flow(X_train,y_train, batch_size=batch_size),
                              epochs = epochs, validation_data = (X_val,y_val),
                              steps_per_epoch=X_train.shape[0] // batch_size
                              ,callbacks=callbacks_list,verbose = 1)
Epoch 1/30
525/525 [==============================] - 18s 35ms/step - loss: 0.6216 - accuracy: 0.8020 - val_loss: 0.1816 - val_accuracy: 0.9450
Epoch 2/30
525/525 [==============================] - 15s 29ms/step - loss: 0.2233 - accuracy: 0.9317 - val_loss: 0.1542 - val_accuracy: 0.9527
Epoch 3/30
525/525 [==============================] - 15s 29ms/step - loss: 0.1758 - accuracy: 0.9464 - val_loss: 0.0894 - val_accuracy: 0.9735
Epoch 4/30
525/525 [==============================] - 15s 29ms/step - loss: 0.1458 - accuracy: 0.9562 - val_loss: 0.0741 - val_accuracy: 0.9777
Epoch 5/30
525/525 [==============================] - 15s 29ms/step - loss: 0.1208 - accuracy: 0.9624 - val_loss: 0.0798 - val_accuracy: 0.9755
Epoch 6/30
525/525 [==============================] - 16s 31ms/step - loss: 0.1092 - accuracy: 0.9667 - val_loss: 0.0671 - val_accuracy: 0.9792
Epoch 7/30
525/525 [==============================] - 15s 29ms/step - loss: 0.0936 - accuracy: 0.9712 - val_loss: 0.0548 - val_accuracy: 0.9832
Epoch 8/30
525/525 [==============================] - 15s 29ms/step - loss: 0.0918 - accuracy: 0.9715 - val_loss: 0.0622 - val_accuracy: 0.9814
Epoch 9/30
525/525 [==============================] - 15s 28ms/step - loss: 0.0811 - accuracy: 0.9744 - val_loss: 0.0531 - val_accuracy: 0.9832
Epoch 10/30
525/525 [==============================] - 15s 28ms/step - loss: 0.0761 - accuracy: 0.9767 - val_loss: 0.0439 - val_accuracy: 0.9862
Epoch 11/30
525/525 [==============================] - 15s 29ms/step - loss: 0.0712 - accuracy: 0.9774 - val_loss: 0.0421 - val_accuracy: 0.9880
Epoch 12/30
525/525 [==============================] - 16s 30ms/step - loss: 0.0662 - accuracy: 0.9788 - val_loss: 0.0393 - val_accuracy: 0.9882
Epoch 13/30
525/525 [==============================] - 15s 29ms/step - loss: 0.0635 - accuracy: 0.9799 - val_loss: 0.0487 - val_accuracy: 0.9852
Epoch 14/30
525/525 [==============================] - 15s 29ms/step - loss: 0.0624 - accuracy: 0.9806 - val_loss: 0.0409 - val_accuracy: 0.9879

Epoch 00014: ReduceLROnPlateau reducing learning rate to 0.0005000000237487257.
Epoch 15/30
525/525 [==============================] - 15s 29ms/step - loss: 0.0490 - accuracy: 0.9850 - val_loss: 0.0346 - val_accuracy: 0.9899
Epoch 16/30
525/525 [==============================] - 15s 29ms/step - loss: 0.0445 - accuracy: 0.9861 - val_loss: 0.0357 - val_accuracy: 0.9887
Epoch 17/30
525/525 [==============================] - 15s 29ms/step - loss: 0.0413 - accuracy: 0.9871 - val_loss: 0.0367 - val_accuracy: 0.9890

Epoch 00017: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
Epoch 18/30
525/525 [==============================] - 15s 29ms/step - loss: 0.0405 - accuracy: 0.9870 - val_loss: 0.0279 - val_accuracy: 0.9913
Epoch 19/30
525/525 [==============================] - 15s 29ms/step - loss: 0.0370 - accuracy: 0.9885 - val_loss: 0.0314 - val_accuracy: 0.9907
Epoch 20/30
525/525 [==============================] - 15s 29ms/step - loss: 0.0355 - accuracy: 0.9885 - val_loss: 0.0278 - val_accuracy: 0.9914

Epoch 00020: ReduceLROnPlateau reducing learning rate to 0.0001250000059371814.
Epoch 21/30
525/525 [==============================] - 15s 28ms/step - loss: 0.0331 - accuracy: 0.9895 - val_loss: 0.0283 - val_accuracy: 0.9923
Epoch 22/30
525/525 [==============================] - 15s 28ms/step - loss: 0.0321 - accuracy: 0.9897 - val_loss: 0.0282 - val_accuracy: 0.9915

Epoch 00022: ReduceLROnPlateau reducing learning rate to 6.25000029685907e-05.
Epoch 23/30
525/525 [==============================] - 16s 30ms/step - loss: 0.0310 - accuracy: 0.9901 - val_loss: 0.0264 - val_accuracy: 0.9924
Epoch 24/30
525/525 [==============================] - 15s 29ms/step - loss: 0.0274 - accuracy: 0.9917 - val_loss: 0.0263 - val_accuracy: 0.9919
CPU times: user 9min 34s, sys: 49.4 s, total: 10min 23s
Wall time: 7min 42s
In [13]:
#predict
submission =pd.read_csv(datapath+'/sample_submission.csv')
pred = model.predict(test)
pred = np.argmax(pred,axis = 1)
submission['Label'] = pred
submission.to_csv('submission.csv',index=False)
submission.head()
Out[13]:
ImageId Label
0 1 2
1 2 0
2 3 9
3 4 0
4 5 3

Acc/Loss plot

In [14]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']

epochs = range(len(acc))

plt.plot(epochs, acc, label='Training acc')
plt.plot(epochs, val_acc, label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.ylim(0.9,1)
plt.show()
In [15]:
loss = history.history['loss']
val_loss = history.history['val_loss']

plt.plot(epochs, loss, label='Training loss')
plt.plot(epochs, val_loss, label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.ylim(0,0.5)
plt.show()

conclusion

image

  • 간단한 Renet-5 모델로 정확도 99%이상을 달성했습니다.
  • 교차검증을 통한 앙상블로 정확도를 더 높일 수 있을 것 같습니다
In [1]:
from IPython.core.display import display, HTML
display(HTML("<style>.container {width:90% !important;}</style>"))
반응형

+ Recent posts