NMJ406 - VGG16 Python Code
NMJ406 - VGG16 Python Code
Before starting run this code, make sure your images are ready in your google drive. Images must divide into
three types of folders; training, validation and testing.
# view the structure of the model
model.summary()
# tell the model what cost and optimization method to u
se
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
7 #Using ImageDataGenerator to read images from directori
es
#Rescale all images by 1/255
from keras.preprocessing.image import ImageDataGenerato
r
train_datagen = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True
)
test_datagen = ImageDataGenerator(rescale=1./255)
training_set = train_datagen.flow_from_directory(train_
path,target_size=(224, 224), batch_size=32,
class_mode='categorical')
validation_set = test_datagen.flow_from_directory(valid
_path, target_size=(224, 224), batch_size=32,
class_mode='categorical')
8 from os import listdir
label = listdir(train_path)
numClass = len(label)
print (label)
9 r = model.fit(training_set, epochs=20, validation_data=
validation_set, validation_steps=1)
# loss
plt.figure(1)
plt.plot(r.history['loss'], label='train loss')
plt.plot(r.history['val_loss'], label='val loss')
plt.legend()
plt.title("train-val loss graph")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.show()
# plt.savefig("/content/drive/MyDrive/FYP/8to2ep20/
LossVal_loss.png")
# accuracies
plt.figure(2)
plt.plot(r.history['accuracy'], label='train acc')
plt.plot(r.history['val_accuracy'], label='val acc')
plt.legend()
plt.title("train-val accuracy graph")
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.show()
# plt.savefig("/content/drive/MyDrive/FYP/8to2ep20/
AccVal_acc.png")
12 #traning evaluation - accuracy, specificity, sensitivit
y and etc
#find the code by yourselves
Example: VGG 16 testing using Google Colab
6 vgg = VGG16(input_shape=IMAGE_SIZE + [3], weights='imag
enet', include_top=False)
# don't train existing weights
for layer in vgg.layers:
layer.trainable = False
# our layers - you can add more if you want
x = Flatten()(vgg.output)
prediction = Dense(3, activation='softmax')(x)
# create a model object
model = Model(inputs=vgg.input, outputs=prediction)
# tell the model what cost and optimization method to u
se
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
train_datagen = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True
)
valid_datagen = ImageDataGenerator(rescale=1./255)
training_set = train_datagen.flow_from_directory(train_
path, target_size=(224, 224), batch_size=32,
class_mode='categorical')
testing_set = valid_datagen.flow_from_directory(test_pa
th, target_size=(224, 224), batch_size=32,
class_mode='categorical')
from os import listdir
label = listdir(train_path)
numClass = len(label)
print (label)
r = model.fit(training_set, epochs=20, validation_data=
testing_set, validation_steps=1,
shuffle=True,
callbacks=checkpoint)
#confusion matrix
#find yourselves