Training a larger model
In the sections before we use images of size 150×150 because the model can be trained faster (here:4 times faster). This is a good way when experimenting with parameters. Now we want to train on bigger images – let’s use images of size 299×299. Therefor we reuse the previous code for training and do a few changes.
We will use a new parameter input_size with default value of 150.
Train a 299×299 model
def make_model(input_size=150, learning_rate=0.01, size_inner=100,
droprate=0.5):
base_model = Xception(
weights='imagenet',
include_top=False,
input_shape=(input_size, input_size, 3)
)
base_model.trainable = False
#########################################
inputs = keras.Input(shape=(input_size, input_size, 3))
base = base_model(inputs, training=False)
vectors = keras.layers.GlobalAveragePooling2D()(base)
inner = keras.layers.Dense(size_inner, activation='relu')(vectors)
drop = keras.layers.Dropout(droprate)(inner)
outputs = keras.layers.Dense(10)(drop)
model = keras.Model(inputs, outputs)
#########################################
optimizer = keras.optimizers.Adam(learning_rate=learning_rate)
loss = keras.losses.CategoricalCrossentropy(from_logits=True)
model.compile(
optimizer=optimizer,
loss=loss,
metrics=['accuracy']
)
return model
input_size = 299
train_gen = ImageDataGenerator(
preprocessing_function=preprocess_input,
shear_range=10,
zoom_range=0.1,
horizontal_flip=True
)
train_ds = train_gen.flow_from_directory(
'./clothing-dataset-small/train',
target_size=(input_size, input_size),
batch_size=32
)
val_gen = ImageDataGenerator(preprocessing_function=preprocess_input)
val_ds = train_gen.flow_from_directory(
'./clothing-dataset-small/validation',
target_size=(input_size, input_size),
batch_size=32,
shuffle=False
)
# Output:
# Found 3068 images belonging to 10 classes.
# Found 341 images belonging to 10 classes.
checkpoint = keras.callbacks.ModelCheckpoint(
'xception_v4_1_{epoch:02d}_{val_accuracy:.3f}.h5',
save_best_only=True,
monitor='val_accuracy',
mode='max'
)
learning_rate = 0.0005
size = 100
droprate = 0.2
model = make_model(
input_size=input_size,
learning_rate=learning_rate,
size_inner=size,
droprate=droprate
)
history = model.fit(train_ds, epochs=50, validation_data=val_ds,
callbacks=[checkpoint])
# Output:
# Epoch 1/50
# 96/96 [==============================] - 574s 6s/step - loss: 1.0308 - accuracy: 0.6734 - val_loss: 0.5814 - val_accuracy: 0.8152
# Epoch 2/50
# 96/96 [==============================] - 572s 6s/step - loss: 0.5457 - accuracy: 0.8106 - val_loss: 0.4663 - val_accuracy: 0.8416
# Epoch 3/50
# 96/96 [==============================] - 546s 6s/step - loss: 0.4452 - accuracy: 0.8494 - val_loss: 0.4256 - val_accuracy: 0.8446
...
# Epoch 49/50
# 96/96 [==============================] - 573s 6s/step - loss: 0.0436 - accuracy: 0.9899 - val_loss: 0.4458 - val_accuracy: 0.8534
# Epoch 50/50
# 96/96 [==============================] - 587s 6s/step - loss: 0.0476 - accuracy: 0.9863 - val_loss: 0.4051 - val_accuracy: 0.8827