import sys
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.optimizers import RMSprop, Adam, SGD
from tensorflow.keras import layers
from tensorflow.keras.models import load_model, Model
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img
from tensorflow.keras.applications.vgg16 import VGG16
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
#sys.path.append("/home/ia-serveur/ia/my_lib_functions")
sys.path.append("/home/ia/ia/my_lib_functions")
from my_functions import *
sys.path.append('./my_plot_results')
from my_functions_plot import *
assert hasattr(tf, "function") # Be sure to use tensorflow 2.X
print('tensorflow :',tf.__version__)
first_function()
tensorflow : 2.3.1 This is the first function
# define location from Data Train
folder = './data/train/'
# Dogs
plt.figure(figsize=(15,15))
for i in range(18):
plt.subplot(5,6,i+1)
filename = folder + 'dog.' + str(i) + '.jpg'
image = plt.imread(filename)
plt.imshow(image)
plt.grid(False)
plt.title('Dog')
plt.show()
# Cats
plt.figure(figsize=(15,15))
for i in range(18):
plt.subplot(5,6,i+1)
filename = folder + 'cat.' + str(i) + '.jpg'
image = plt.imread(filename)
plt.imshow(image)
plt.grid(False)
plt.title('Cat')
plt.show()
print("Distribution Data First Image of DataSet")
filename = folder + 'dog.' + str(0) + '.jpg'
image = plt.imread(filename)
max_h = np.max(image)
min_h = np.min(image)
abstract(image)
print('\nmax = ',max_h)
print('min = ',min_h)
print("Mean = {t:3.2f}".format(t=image.mean()))
print("Std = {t:3.2f}".format(t=image.std()))
plt.figure(figsize=(16, 4))
plt.subplot(1, 2, 1)
plt.imshow(image)
plt.grid(False)
plt.title('Dog')
plt.colorbar()
plt.subplot(1, 2, 2)
plt.hist(image.ravel(), color='gray', bins=100)
plt.title('Histogram Distribution Data')
plt.xlabel('Valeur Data')
plt.ylabel('Frequency')
plt.grid(True)
plt.show()
Distribution Data First Image of DataSet Abstract: Class=ndarray Shape=(375, 499, 3) Type=uint8 Nb_Bytes=561 Kb max = 255 min = 0 Mean = 116.79 Std = 50.96
filenames = os.listdir("./data/train/")
number_files = len(filenames)
# Limit the number of data only for test
max = 0
if max > 0 :
number_max = (number_files//max) -1
else:
number_max = (number_files) - 1
categories = []
list_files = []
stop_list = 0
while stop_list <= number_max:
category = filenames[stop_list].split('.')[0]
if category == 'dog':
categories.append('Dog')
else:
categories.append('Cat')
list_files.append(filenames[stop_list])
stop_list += 1
df = pd.DataFrame(
{'filename': list_files,
'category' : categories
}
)
abstract(df)
print('\ndf',df)
df.head()
Abstract: Class=pandas DataFrame Shape=(24000, 2) Type=int64 Nb_Bytes=384 Kb df filename category 0 dog.2728.jpg Dog 1 dog.1683.jpg Dog 2 cat.5192.jpg Cat 3 dog.3749.jpg Dog 4 cat.11130.jpg Cat ... ... ... 23995 dog.5011.jpg Dog 23996 dog.4587.jpg Dog 23997 dog.3416.jpg Dog 23998 dog.1474.jpg Dog 23999 dog.7497.jpg Dog [24000 rows x 2 columns]
filename | category | |
---|---|---|
0 | dog.2728.jpg | Dog |
1 | dog.1683.jpg | Dog |
2 | cat.5192.jpg | Cat |
3 | dog.3749.jpg | Dog |
4 | cat.11130.jpg | Cat |
test_val_filenames = os.listdir("./data/test-val/")
categories = []
for f_name in test_val_filenames:
category = f_name.split('.')[0]
if category == 'dog':
categories.append("Dog")
else:
categories.append("Cat")
test_val_df = pd.DataFrame(
{
'filename_val' : test_val_filenames,
'category_val' : categories
}
)
#Shuffle DataFrame
test_val_df = test_val_df.sample(frac=1)
abstract(test_val_df)
print('\ntest_val_df',test_val_df)
test_val_df.head()
# reset the index of the DataFrame (Pandas)
test_images_validate_df = test_val_df.reset_index(drop = True)
Abstract: Class=pandas DataFrame Shape=(1000, 2) Type=int64 Nb_Bytes=16 Kb test_val_df filename_val category_val 68 cat.12381.jpg Cat 59 dog.12173.jpg Dog 42 cat.12217.jpg Cat 879 dog.12194.jpg Dog 502 dog.12174.jpg Dog .. ... ... 745 dog.12144.jpg Dog 802 dog.12060.jpg Dog 427 cat.12409.jpg Cat 431 cat.12420.jpg Cat 774 dog.12062.jpg Dog [1000 rows x 2 columns]
test_filenames = os.listdir("./data/test/")
categories_none = []
for f_name in test_filenames:
categories_none.append('none')
test_df = pd.DataFrame(
{
'filename_test' : test_filenames,
'category_test' : categories_none
}
)
abstract(test_df)
print('\ntest_df',test_df)
test_df.head()
# reset the index of the DataFrame (Pandas)
test_images_df = test_df.reset_index(drop = True)
Abstract: Class=pandas DataFrame Shape=(12500, 2) Type=int64 Nb_Bytes=200 Kb test_df filename_test category_test 0 4497.jpg none 1 10532.jpg none 2 7939.jpg none 3 10992.jpg none 4 7981.jpg none ... ... ... 12495 10975.jpg none 12496 3542.jpg none 12497 12219.jpg none 12498 10617.jpg none 12499 7019.jpg none [12500 rows x 2 columns]
targets_names = ['Cat', 'Dog']
print('targets_names',targets_names)
targets_names ['Cat', 'Dog']
train_images_df, train_images_validate_df = train_test_split(df, test_size = 0.2, random_state = 42)
# reset the index of the DataFrame (Pandas)
train_images_df = train_images_df.reset_index(drop = True)
train_images_validate_df = train_images_validate_df.reset_index(drop = True)
print("(train_images_df)")
abstract(train_images_df)
print("\n(train_images_validate_df)")
abstract(train_images_validate_df)
(train_images_df) Abstract: Class=pandas DataFrame Shape=(19200, 2) Type=int64 Nb_Bytes=307 Kb (train_images_validate_df) Abstract: Class=pandas DataFrame Shape=(4800, 2) Type=int64 Nb_Bytes=76 Kb
Images_With = 128 #128
Images_Height = 128 #128
Images_channel = 3
Images_Size = (Images_With, Images_Height)
print('Images_With =',Images_With,'- Images_Height =',Images_Height,'- Images_channel =',Images_channel)
Images_With = 128 - Images_Height = 128 - Images_channel = 3
BT_ImGen = 64 # Batch Size 64
Max_train = train_images_df.shape[0]
Max_train_validate = train_images_validate_df.shape[0]
Max_test_validate = test_images_validate_df.shape[0]
Max_test = test_images_df.shape[0]
test_images_validate_df
print('Batch Size ImGen =',BT_ImGen)
print('Max_train =',Max_train)
print('Max_train_validate =',Max_train_validate)
print('Max_test_validate =',Max_test_validate)
print('Max_test =',Max_test)
Batch Size ImGen = 64 Max_train = 19200 Max_train_validate = 4800 Max_test_validate = 1000 Max_test = 12500
train_images_datagen = ImageDataGenerator(rotation_range = 15,
rescale = 1.0/255,
shear_range = 0.1,
zoom_range = 0.2,
horizontal_flip = True,
width_shift_range = 0.1,
height_shift_range = 0.1,
featurewise_center = False,
samplewise_std_normalization = False)
train_images_generator = train_images_datagen.flow_from_dataframe(train_images_df,
"./data/train/",
x_col = 'filename',
y_col = 'category',
target_size = Images_Size,
class_mode = 'categorical',
batch_size = BT_ImGen,
shuffle = True)
Found 19200 validated image filenames belonging to 2 classes.
get_firts_from_ImageDataGenerator(train_images_generator, targets_names)
Distribution Data First Image From ImageDataGenerator type ImageDataGenerator = <class 'tuple'> - Shape= (64, 128, 128, 3) Abstract: Class=ndarray Shape=(128, 128, 3) Type=float32 Nb_Bytes=196 Kb Abstract: Class=ndarray Shape=(2,) Type=float32 Nb_Bytes=8 (Max) = 1.00 (Min) = 0.01 (Mean) = 0.49 (Std) = 0.22
train_images_validate_datagen = ImageDataGenerator(rotation_range = 15,
rescale = 1.0/255,
shear_range = 0.1,
zoom_range = 0.2,
horizontal_flip = True,
width_shift_range = 0.1,
height_shift_range = 0.1,
featurewise_center = False,
samplewise_std_normalization = False)
train_images_validate_generator = train_images_validate_datagen.flow_from_dataframe(train_images_validate_df,
"./data/train/",
x_col = 'filename',
y_col = 'category',
target_size = Images_Size,
class_mode = 'categorical',
batch_size = BT_ImGen,
shuffle = True)
Found 4800 validated image filenames belonging to 2 classes.
get_firts_from_ImageDataGenerator(train_images_validate_generator, targets_names)
Distribution Data First Image From ImageDataGenerator type ImageDataGenerator = <class 'tuple'> - Shape= (64, 128, 128, 3) Abstract: Class=ndarray Shape=(128, 128, 3) Type=float32 Nb_Bytes=196 Kb Abstract: Class=ndarray Shape=(2,) Type=float32 Nb_Bytes=8 (Max) = 1.00 (Min) = 0.00 (Mean) = 0.30 (Std) = 0.21
test_images_validate_datagen = ImageDataGenerator(rescale = 1.0/255)
test_images_validate_generator = test_images_validate_datagen.flow_from_dataframe(test_images_validate_df,
"./data/test-val/",
x_col = 'filename_val',
y_col = 'category_val',
target_size = Images_Size,
class_mode = 'categorical',
batch_size = BT_ImGen,
shuffle = False)
Found 1000 validated image filenames belonging to 2 classes.
get_firts_from_ImageDataGenerator(test_images_validate_generator, targets_names)
Distribution Data First Image From ImageDataGenerator type ImageDataGenerator = <class 'tuple'> - Shape= (64, 128, 128, 3) Abstract: Class=ndarray Shape=(128, 128, 3) Type=float32 Nb_Bytes=196 Kb Abstract: Class=ndarray Shape=(2,) Type=float32 Nb_Bytes=8 (Max) = 0.92 (Min) = 0.00 (Mean) = 0.44 (Std) = 0.12
test_images_datagen = ImageDataGenerator(rescale = 1.0/255)
test_images_generator = test_images_datagen.flow_from_dataframe(test_images_df,
"./data/test/",
x_col = 'filename_test',
y_col = 'category_test',
target_size = Images_Size,
class_mode = 'categorical',
batch_size = BT_ImGen,
shuffle = False)
Found 12500 validated image filenames belonging to 1 classes.
get_firts_from_ImageDataGenerator(test_images_generator, targets_names)
Distribution Data First Image From ImageDataGenerator type ImageDataGenerator = <class 'tuple'> - Shape= (64, 128, 128, 3) Abstract: Class=ndarray Shape=(128, 128, 3) Type=float32 Nb_Bytes=196 Kb Abstract: Class=ndarray Shape=(1,) Type=float32 Nb_Bytes=4 (Max) = 1.00 (Min) = 0.00 (Mean) = 0.39 (Std) = 0.27
def save_model(model_name, model_format):
path = './results'
model_path = os.path.join(path, model_name)
model.save(model_path, save_format=model_format)
def compile_model(Optimizer, Learning_rate, Opt):
if Optimizer == "RMSprop":
if Opt == True:
print('RMSprop & Learning_rate')
opt = RMSprop(lr=Learning_rate)
else:
print('RMSprop')
opt = "rmsprop"
elif Optimizer == "Adam":
if Opt == True:
print('Adam & Learning_rate')
opt = Adam(lr=Learning_rate)
else:
print('Adam')
opt = "adam"
elif Optimizer == "SGD":
if Opt == True:
print('SGD & Learning_rate')
opt = SGD(lr=Learning_rate)
else:
print('SGD')
opt = "sgd"
model.compile(
loss="categorical_crossentropy",
optimizer= opt,
metrics=["accuracy"]
)
def show_img_predict(resultat, targets_n):
folder = './data/test-val/'
x = resultat[0]
a = x[0]
b = x[1]
ind = np.argmax(b[0])
lab = targets_n[ind]
plt.figure(figsize=(16, 4))
plt.subplot(1, 2, 1)
name_file = folder + resultat.filenames[0]
image = plt.imread(name_file)
plt.title(img_org)
plt.imshow(image)
plt.grid(False)
plt.subplot(1, 2, 2)
plt.title(lab)
plt.imshow(a[0])
plt.grid(False)
plt.show()
def img_predict_n(resultat,predict,targets_n):
i = 0
plt.figure(figsize=(18,18))
for i in range(12):
plt.subplot(5,6,i+1)
x = resultat[i]
plt.imshow((x[0])[0])
plt.title(targets_n[np.argmax((x[1])[0])])
plt.xlabel('validated at = {t:3.2f} %'.format(t= np.max(predict[i]*100)))
plt.grid(False)
plt.show()
def model_dataflair():
# define model cnnn
model = tf.keras.models.Sequential()
# Layer 1 - Conv1
model.add(tf.keras.layers.Conv2D(filters=32, kernel_size=(3,3),activation="relu",
input_shape=(Images_With,
Images_Height,
Images_channel)))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.MaxPool2D((2,2)))
model.add(tf.keras.layers.Dropout(0.25))
# Layer 2 - Conv2
model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=(3,3),activation="relu"))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.MaxPool2D((2,2)))
model.add(tf.keras.layers.Dropout(0.25))
# Layer 3 - Conv3
model.add(tf.keras.layers.Conv2D(filters=128, kernel_size=(3,3),activation="relu"))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.MaxPool2D((2,2)))
model.add(tf.keras.layers.Dropout(0.25))
# Layer 4 - Flatten
model.add(tf.keras.layers.Flatten())
# Layer 5 - Dense
model.add(tf.keras.layers.Dense(128, activation="relu"))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Dropout(0.5))
# Layer 6 - Output
model.add(tf.keras.layers.Dense(2, activation="softmax",name ="softmax"))
return model
def model_vgg16_fixe():
# define cnn model
model = tf.keras.models.Sequential()
# Layer 1 - Conv1
model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=(3,3), padding="same", activation="relu",
input_shape=(Images_With,
Images_Height,
Images_channel)))
# Layer 2 - Conv2
model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=(3,3), padding="same", activation="relu"))
model.add(tf.keras.layers.MaxPool2D(pool_size=(2,2), strides=(2,2)))
# Layer 3 - Conv3
model.add(tf.keras.layers.Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"))
# Layer 4 - Conv4
model.add(tf.keras.layers.Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"))
model.add(tf.keras.layers.MaxPool2D(pool_size=(2,2), strides=(2,2)))
# Layer 5 - Conv5
model.add(tf.keras.layers.Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
# Layer 6 - Conv6
model.add(tf.keras.layers.Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
model.add(tf.keras.layers.MaxPool2D(pool_size=(2,2), strides=(2,2)))
# Layer 7 - Conv7
model.add(tf.keras.layers.Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
# Layer 8 - Conv8
model.add(tf.keras.layers.Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
# Layer 9 - Conv9
model.add(tf.keras.layers.Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(tf.keras.layers.MaxPool2D(pool_size=(2,2), strides=(2,2)))
# Layer 10 - Conv10
model.add(tf.keras.layers.Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
# Layer 11 - Conv11
model.add(tf.keras.layers.Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
# Layer 12 - Conv12
model.add(tf.keras.layers.Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(tf.keras.layers.MaxPool2D(pool_size=(2,2), strides=(2,2)))
# Layer 13 - Flatten
model.add(tf.keras.layers.Flatten())
# Layer 14 - Dense1
model.add(tf.keras.layers.Dense(units=4096, activation="relu"))
# Layer 15 - Dense2
model.add(tf.keras.layers.Dense(units=4096, activation="relu"))
# Layer 16 - Output
model.add(tf.keras.layers.Dense(units=2, activation="softmax",name ="softmax"))
return model
def model_vgg16_adap(W, H, Ch):
# define cnn model
# load model
model = VGG16(include_top = False, input_shape = (W, H, Ch))
# mark load layers as not trainable
for layer in model.layers:
layer.trainable = False
# add new classifier layers
x = tf.keras.layers.Flatten(name = 'flatten')(model.layers[-1].output)
x = tf.keras.layers.Dense(128, activation = 'relu', name = 'dense_1')(x)
Output = tf.keras.layers.Dense(2, activation = 'softmax', name = 'softmax')(x)
# define new model
model = Model(inputs = model.inputs, outputs = Output)
return model
earlystop = EarlyStopping(patience = 10)
learning_rate_reduction = ReduceLROnPlateau(monitor = 'val_accuracy',
patience = 2,
verbose = True,
factor = 0.5,
min_lr = 0.00001)
Callback_list_1 = [earlystop, learning_rate_reduction]
earlystop = EarlyStopping(monitor='val_accuracy',min_delta=0, patience=20, verbose=True, mode='auto')
checkpoint = ModelCheckpoint(filepath="./results/cnn_dog&cat_cb2.h5", monitor='val_accuracy',
verbose=True,
save_best_only=True,
save_weights_only=False,
mode='auto')
Callback_list_2 = [earlystop, checkpoint]
checkpoint = ModelCheckpoint(filepath='./results/cnn_dog&cat_cb3.h5', monitor='val_accuracy',
save_best_only=True,
mode='max')
Callback_list_3 = [checkpoint]
Setp_Size_Train = train_images_generator.n//train_images_generator.batch_size
Setp_Size_Train_validate = train_images_validate_generator.n//train_images_validate_generator.batch_size
Setp_Size_Test_validate = test_images_validate_generator.n//test_images_validate_generator.batch_size
print('Setp_Size_Train =',Setp_Size_Train)
print('Setp_Size_Train_validate =',Setp_Size_Train_validate)
print('Setp_Size_Test_validate =',Setp_Size_Test_validate)
Setp_Size_Train = 300 Setp_Size_Train_validate = 75 Setp_Size_Test_validate = 15
model = model_dataflair()
Metrics — Used to monitor the training and testing steps.
Si les classes ou Labels sont code sur un digit (ex: 2 -> [2] alors
compile_model(Optimizer="RMSprop", Learning_rate=None, Opt=False)
#compile_model(Optimizer="Adam", Learning_rate=None, Opt=False)
#compile_model(Optimizer="SGD", Learning_rate=None, Opt=False)
RMSprop
model.summary()
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 126, 126, 32) 896 _________________________________________________________________ batch_normalization (BatchNo (None, 126, 126, 32) 128 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 63, 63, 32) 0 _________________________________________________________________ dropout (Dropout) (None, 63, 63, 32) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 61, 61, 64) 18496 _________________________________________________________________ batch_normalization_1 (Batch (None, 61, 61, 64) 256 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 30, 30, 64) 0 _________________________________________________________________ dropout_1 (Dropout) (None, 30, 30, 64) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 28, 28, 128) 73856 _________________________________________________________________ batch_normalization_2 (Batch (None, 28, 28, 128) 512 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 14, 14, 128) 0 _________________________________________________________________ dropout_2 (Dropout) (None, 14, 14, 128) 0 _________________________________________________________________ flatten (Flatten) (None, 25088) 0 _________________________________________________________________ dense (Dense) (None, 128) 3211392 _________________________________________________________________ batch_normalization_3 (Batch (None, 128) 512 _________________________________________________________________ dropout_3 (Dropout) (None, 128) 0 _________________________________________________________________ softmax (Dense) (None, 2) 258 ================================================================= Total params: 3,306,306 Trainable params: 3,305,602 Non-trainable params: 704 _________________________________________________________________
EP = 50 # Epochs
print('Epochs =',EP)
print('Batch Size ImGen =',BT_ImGen)
Epochs = 50 Batch Size ImGen = 64
#history = model.fit(train_images_generator, epochs = EP,
# validation_data = train_images_validate_generator,
# validation_steps = Setp_Size_Train_validate,
# steps_per_epoch = Setp_Size_Train,
# verbose=True,
# callbacks = Callback_list_1)
#plotting_full_axisy_train_metrics(history,save=False,name='fig_dataflair_RMSprop_cb1.png',
# accuracy_curve=True,loss_curve=False)
model_name = 'fit1_dataflair_ RMSprop_cb1.h5'
model_format ='h5'
#save_model(model_name, model_format)
del model
Conclusions :
model_name = './results/fit1_dataflair_ RMSprop_cb1.h5'
model = load_model(model_name)
EP = 50 # Epochs
print('Epochs =',EP)
print('Batch Size ImGen =',BT_ImGen)
Epochs = 50 Batch Size ImGen = 64
#history = model.fit(train_images_generator, epochs = EP,
# validation_data = train_images_validate_generator,
# validation_steps = Setp_Size_Train_validate,
# steps_per_epoch = Setp_Size_Train,
# verbose=True,
# callbacks = Callback_list_1)
#plotting_full_axisy_train_metrics(history,save=False,name='fig2_dataflair_RMSprop_cb1.png',
# accuracy_curve=True,loss_curve=False)
model_name = 'fit2_dataflair_ RMSprop_cb1.h5'
model_format ='h5'
#save_model(model_name, model_format)
del model
Conclusions :
model = model_vgg16_fixe()
#compile_model(Optimizer="RMSprop", Learning_rate=None, Opt=False)
compile_model(Optimizer="Adam", Learning_rate=0.0001, Opt=True)
#compile_model(Optimizer="SGD", Learning_rate=None, Opt=False)
Adam & Learning_rate
model.summary()
Model: "sequential_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_3 (Conv2D) (None, 128, 128, 64) 1792 _________________________________________________________________ conv2d_4 (Conv2D) (None, 128, 128, 64) 36928 _________________________________________________________________ max_pooling2d_3 (MaxPooling2 (None, 64, 64, 64) 0 _________________________________________________________________ conv2d_5 (Conv2D) (None, 64, 64, 128) 73856 _________________________________________________________________ conv2d_6 (Conv2D) (None, 64, 64, 128) 147584 _________________________________________________________________ max_pooling2d_4 (MaxPooling2 (None, 32, 32, 128) 0 _________________________________________________________________ conv2d_7 (Conv2D) (None, 32, 32, 256) 295168 _________________________________________________________________ conv2d_8 (Conv2D) (None, 32, 32, 256) 590080 _________________________________________________________________ max_pooling2d_5 (MaxPooling2 (None, 16, 16, 256) 0 _________________________________________________________________ conv2d_9 (Conv2D) (None, 16, 16, 512) 1180160 _________________________________________________________________ conv2d_10 (Conv2D) (None, 16, 16, 512) 2359808 _________________________________________________________________ conv2d_11 (Conv2D) (None, 16, 16, 512) 2359808 _________________________________________________________________ max_pooling2d_6 (MaxPooling2 (None, 8, 8, 512) 0 _________________________________________________________________ conv2d_12 (Conv2D) (None, 8, 8, 512) 2359808 _________________________________________________________________ conv2d_13 (Conv2D) (None, 8, 8, 512) 2359808 _________________________________________________________________ conv2d_14 (Conv2D) (None, 8, 8, 512) 2359808 _________________________________________________________________ max_pooling2d_7 (MaxPooling2 (None, 4, 4, 512) 0 _________________________________________________________________ flatten_1 (Flatten) (None, 8192) 0 _________________________________________________________________ dense_1 (Dense) (None, 4096) 33558528 _________________________________________________________________ dense_2 (Dense) (None, 4096) 16781312 _________________________________________________________________ softmax (Dense) (None, 2) 8194 ================================================================= Total params: 64,472,642 Trainable params: 64,472,642 Non-trainable params: 0 _________________________________________________________________
EP = 50 # Epochs
print('Epochs =',EP)
print('Batch Size ImGen =',BT_ImGen)
Epochs = 50 Batch Size ImGen = 64
history = model.fit(train_images_generator, epochs = EP,
validation_data = train_images_validate_generator,
validation_steps = Setp_Size_Train_validate,
steps_per_epoch = Setp_Size_Train,
verbose=True,
callbacks = Callback_list_2)
plotting_full_axisy_train_metrics(history,save=False,name='fit1_fig_vgg16_F_Adam&lr_cb2.png',
accuracy_curve=True,loss_curve=True)
del model
Conclusions :
model_name = './results/fit1_vgg16_F_Adam&lr_cb2.h5'
model = load_model(model_name)
model.summary()
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 128, 128, 64) 1792 _________________________________________________________________ conv2d_1 (Conv2D) (None, 128, 128, 64) 36928 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 64, 64, 64) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 64, 64, 128) 73856 _________________________________________________________________ conv2d_3 (Conv2D) (None, 64, 64, 128) 147584 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 32, 32, 128) 0 _________________________________________________________________ conv2d_4 (Conv2D) (None, 32, 32, 256) 295168 _________________________________________________________________ conv2d_5 (Conv2D) (None, 32, 32, 256) 590080 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 16, 16, 256) 0 _________________________________________________________________ conv2d_6 (Conv2D) (None, 16, 16, 512) 1180160 _________________________________________________________________ conv2d_7 (Conv2D) (None, 16, 16, 512) 2359808 _________________________________________________________________ conv2d_8 (Conv2D) (None, 16, 16, 512) 2359808 _________________________________________________________________ max_pooling2d_3 (MaxPooling2 (None, 8, 8, 512) 0 _________________________________________________________________ conv2d_9 (Conv2D) (None, 8, 8, 512) 2359808 _________________________________________________________________ conv2d_10 (Conv2D) (None, 8, 8, 512) 2359808 _________________________________________________________________ conv2d_11 (Conv2D) (None, 8, 8, 512) 2359808 _________________________________________________________________ max_pooling2d_4 (MaxPooling2 (None, 4, 4, 512) 0 _________________________________________________________________ flatten (Flatten) (None, 8192) 0 _________________________________________________________________ dense (Dense) (None, 4096) 33558528 _________________________________________________________________ dense_1 (Dense) (None, 4096) 16781312 _________________________________________________________________ dense_2 (Dense) (None, 2) 8194 ================================================================= Total params: 64,472,642 Trainable params: 64,472,642 Non-trainable params: 0 _________________________________________________________________
results = model.evaluate(train_images_validate_generator, steps=Setp_Size_Test_validate, verbose=True)
print('test_loss, test_accuracy', results)
print('test_accuracy = {t1:3.2f} %'.format(t1=results[1]*100.0))
print('test_loss = {t1:3.2f} %'.format(t1=results[0]*100.0))
15/15 [==============================] - 69s 5s/step - loss: 0.1081 - accuracy: 0.9573 test_loss, test_accuracy [0.10809040069580078, 0.9572916626930237] test_accuracy = 95.73 % test_loss = 10.81 %
With the model trained, you can use it to make predictions about some images.
test_images_validate_generator.reset()
predictions = model.predict(test_images_validate_generator, steps=Setp_Size_Test_validate, verbose=True)
predictions.shape
15/15 [==============================] - 67s 4s/step
(960, 2)
accuracy of the prediction first img in %
print('validated at = {t:3.2f} %'.format(t= np.max(predictions[0]*100)))
img_org = test_images_validate_generator.filenames[0]
predict_class = targets_names[np.argmax(predictions[0])]
true_class = test_images_validate_generator.filenames[0].split('.')[0]
print('predict Class = ', predict_class,' - true Class is = ', true_class,' - filename =',img_org)
validated at = 100.00 % predict Class = Cat - true Class is = cat - filename = cat.12381.jpg
First Predicted Img
show_img_predict(test_images_validate_generator, targets_names)
Predictions about some images.
img_predict_n(test_images_validate_generator, predictions, targets_names)
del model
model = model_vgg16_adap(Images_With, Images_Height, Images_channel)
#compile_model(Optimizer="RMSprop", Learning_rate=None, Opt=False)
compile_model(Optimizer="Adam", Learning_rate=0.0001, Opt=True)
#compile_model(Optimizer="SGD", Learning_rate=None, Opt=False)
Adam & Learning_rate
model.summary()
Model: "functional_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_1 (InputLayer) [(None, 128, 128, 3)] 0 _________________________________________________________________ block1_conv1 (Conv2D) (None, 128, 128, 64) 1792 _________________________________________________________________ block1_conv2 (Conv2D) (None, 128, 128, 64) 36928 _________________________________________________________________ block1_pool (MaxPooling2D) (None, 64, 64, 64) 0 _________________________________________________________________ block2_conv1 (Conv2D) (None, 64, 64, 128) 73856 _________________________________________________________________ block2_conv2 (Conv2D) (None, 64, 64, 128) 147584 _________________________________________________________________ block2_pool (MaxPooling2D) (None, 32, 32, 128) 0 _________________________________________________________________ block3_conv1 (Conv2D) (None, 32, 32, 256) 295168 _________________________________________________________________ block3_conv2 (Conv2D) (None, 32, 32, 256) 590080 _________________________________________________________________ block3_conv3 (Conv2D) (None, 32, 32, 256) 590080 _________________________________________________________________ block3_pool (MaxPooling2D) (None, 16, 16, 256) 0 _________________________________________________________________ block4_conv1 (Conv2D) (None, 16, 16, 512) 1180160 _________________________________________________________________ block4_conv2 (Conv2D) (None, 16, 16, 512) 2359808 _________________________________________________________________ block4_conv3 (Conv2D) (None, 16, 16, 512) 2359808 _________________________________________________________________ block4_pool (MaxPooling2D) (None, 8, 8, 512) 0 _________________________________________________________________ block5_conv1 (Conv2D) (None, 8, 8, 512) 2359808 _________________________________________________________________ block5_conv2 (Conv2D) (None, 8, 8, 512) 2359808 _________________________________________________________________ block5_conv3 (Conv2D) (None, 8, 8, 512) 2359808 _________________________________________________________________ block5_pool (MaxPooling2D) (None, 4, 4, 512) 0 _________________________________________________________________ flatten (Flatten) (None, 8192) 0 _________________________________________________________________ dense_1 (Dense) (None, 128) 1048704 _________________________________________________________________ softmax (Dense) (None, 2) 258 ================================================================= Total params: 15,763,650 Trainable params: 1,048,962 Non-trainable params: 14,714,688 _________________________________________________________________
EP = 50 # Epochs
print('Epochs =',EP)
print('Batch Size ImGen =',BT_ImGen)
Epochs = 50 Batch Size ImGen = 64
history = model.fit(train_images_generator, epochs = EP,
validation_data = train_images_validate_generator,
validation_steps = Setp_Size_Train_validate,
steps_per_epoch = Setp_Size_Train,
verbose=True,
callbacks = Callback_list_2)
plotting_full_axisy_train_metrics(history,save=False,name='fit1_fig_vgg16_K_Adam&lr_cb2.png',
accuracy_curve=True,loss_curve=True)
del model
Conclusions :
model_name = './results/fit1_vgg16_K_Adam&lr_cb2.h5'
model = load_model(model_name)
model.summary()
Model: "model" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_1 (InputLayer) [(None, 128, 128, 3)] 0 _________________________________________________________________ block1_conv1 (Conv2D) (None, 128, 128, 64) 1792 _________________________________________________________________ block1_conv2 (Conv2D) (None, 128, 128, 64) 36928 _________________________________________________________________ block1_pool (MaxPooling2D) (None, 64, 64, 64) 0 _________________________________________________________________ block2_conv1 (Conv2D) (None, 64, 64, 128) 73856 _________________________________________________________________ block2_conv2 (Conv2D) (None, 64, 64, 128) 147584 _________________________________________________________________ block2_pool (MaxPooling2D) (None, 32, 32, 128) 0 _________________________________________________________________ block3_conv1 (Conv2D) (None, 32, 32, 256) 295168 _________________________________________________________________ block3_conv2 (Conv2D) (None, 32, 32, 256) 590080 _________________________________________________________________ block3_conv3 (Conv2D) (None, 32, 32, 256) 590080 _________________________________________________________________ block3_pool (MaxPooling2D) (None, 16, 16, 256) 0 _________________________________________________________________ block4_conv1 (Conv2D) (None, 16, 16, 512) 1180160 _________________________________________________________________ block4_conv2 (Conv2D) (None, 16, 16, 512) 2359808 _________________________________________________________________ block4_conv3 (Conv2D) (None, 16, 16, 512) 2359808 _________________________________________________________________ block4_pool (MaxPooling2D) (None, 8, 8, 512) 0 _________________________________________________________________ block5_conv1 (Conv2D) (None, 8, 8, 512) 2359808 _________________________________________________________________ block5_conv2 (Conv2D) (None, 8, 8, 512) 2359808 _________________________________________________________________ block5_conv3 (Conv2D) (None, 8, 8, 512) 2359808 _________________________________________________________________ block5_pool (MaxPooling2D) (None, 4, 4, 512) 0 _________________________________________________________________ flatten (Flatten) (None, 8192) 0 _________________________________________________________________ dense_1 (Dense) (None, 128) 1048704 _________________________________________________________________ softmax (Dense) (None, 2) 258 ================================================================= Total params: 15,763,650 Trainable params: 1,048,962 Non-trainable params: 14,714,688 _________________________________________________________________
results = model.evaluate(train_images_validate_generator, steps=Setp_Size_Test_validate, verbose=True)
print('test_loss, test_accuracy', results)
print('test_accuracy = {t1:3.2f} %'.format(t1=results[1]*100.0))
print('test_loss = {t1:3.2f} %'.format(t1=results[0]*100.0))
15/15 [==============================] - 78s 5s/step - loss: 0.1720 - accuracy: 0.9219 test_loss, test_accuracy [0.1720123291015625, 0.921875] test_accuracy = 92.19 % test_loss = 17.20 %
With the model trained, you can use it to make predictions about some images.
test_images_validate_generator.reset()
predictions = model.predict(test_images_validate_generator, steps=Setp_Size_Test_validate, verbose=True)
predictions.shape
15/15 [==============================] - 74s 5s/step
(960, 2)
accuracy of the prediction first img in %
print('validated at = {t:3.2f} %'.format(t= np.max(predictions[0]*100)))
img_org = test_images_validate_generator.filenames[0]
predict_class = targets_names[np.argmax(predictions[0])]
true_class = test_images_validate_generator.filenames[0].split('.')[0]
print('predict Class = ', predict_class,' - true Class is = ', true_class,' - filename =',img_org)
validated at = 99.99 % predict Class = Cat - true Class is = cat - filename = cat.12381.jpg
First Predicted Img
show_img_predict(test_images_validate_generator, targets_names)
Predictions about some images.
img_predict_n(test_images_validate_generator, predictions, targets_names)