eye_training.py 2.86 KB
import datetime
import numpy as np
import matplotlib.pyplot as plt
from keras.layers import Input, Activation, Conv2D, Flatten, Dense, MaxPooling2D
from keras.models import Model, load_model
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
plt.style.use('dark_background')


x_train = np.load('sauce/x_train.npy').astype(np.float32)
y_train = np.load('sauce/y_train.npy').astype(np.float32)
x_val = np.load('sauce/x_val.npy').astype(np.float32)
y_val = np.load('sauce/y_val.npy').astype(np.float32)


'''
print(x_train.shape,y_train.shape)
(2586,26,34,1)(2586,1) 
세로 26 가로34  흑백 이미지 2586개, 

print(x_val.shape,y_val.shape)
(288, 26, 34, 1) (288, 1)
'''


train_datagen = ImageDataGenerator(
    #data Augmentation 데이터변형 데이터 수 늘리기
    rescale=1./255,
    rotation_range=10,
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.2
)
val_datagen = ImageDataGenerator(
    rescale=1. / 255,
    rotation_range=10,
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.2
)
train_generator = train_datagen.flow(
    #원 데이터셋에 ImageDataGenerator 이용한 변형한 것을 적용
    x=x_train, y=y_train,
    batch_size=32,
    shuffle=True
)
val_generator = val_datagen.flow(
    x=x_val, y=y_val,
    batch_size=32,
    shuffle=False
)

inputs = Input(shape=(26, 34, 1))
net = Conv2D(32, kernel_size=3, strides=1, padding='same', activation='relu')(inputs)
#1번째 convolution layer
net = MaxPooling2D(pool_size=2)(net)
#데이터 연산 줄여주는 역할
#여기서는 연산 후 가장 큰 값 MaxPooling
#차원 축소
net = Conv2D(64, kernel_size=3, strides=1, padding='same', activation='relu')(net)
net = MaxPooling2D(pool_size=2)(net)
net = Conv2D(128, kernel_size=3, strides=1, padding='same', activation='relu')(net)
net = MaxPooling2D(pool_size=2)(net)
net = Flatten()(net)
#완전연결층
#1차원으로 펴주고
net = Dense(512)(net)
#512개의 fully connected layer와 연결
net = Activation('relu')(net)
net = Dense(1)(net)
#마지막 레이어 한개 0~1사이값
outputs = Activation('sigmoid')(net)
#sigmoid 0~1
model = Model(inputs=inputs, outputs=outputs)
model.compile(optimizer='Nadam', loss='binary_crossentropy', metrics=['acc'])
#loss는 0이냐 1이냐 니깐 binary, accuracy 추가

model.summary()

model.fit_generator(
    #위의 generator로 데이터셋 늘렸기에 이때 는 fit_generator가 좋다
    train_generator, epochs=50, validation_data=val_generator,
    callbacks=[
        #save_best_only 학습이 잘되면 저장해라
        ModelCheckpoint('sauce/models.h5', monitor='val_acc', save_best_only=True, mode='max', verbose=1),

        #learning이 잘안되면 learning rate를 줄여라
        ReduceLROnPlateau(monitor='val_acc', factor=0.2, patience=10, verbose=1, mode='auto', min_lr=1e-05)
    ]
)