Files
CNN/Models/model_sk_vgg16.py
2024-10-28 10:19:33 -04:00

198 lines
6.8 KiB
Python

import sys
import os
from keras.optimizers import SGD
from keras.optimizers import Adam
from tensorflow.keras.callbacks import TensorBoard
import keras
from keras.src.legacy.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint
from tensorflow.keras.callbacks import EarlyStopping
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import MaxPool2D
from keras.layers import Dropout
from keras.models import Model
from keras.layers import AveragePooling2D
from keras.models import Sequential
import numpy as np
import tensorflow
from inception_module import *
import math
from time import time
# VGG 16 ARCHITECTIURE
image_size = (128, 128)
model_weights_file='vggnet16.h5.keras'
train_ds = tensorflow.keras.preprocessing.image_dataset_from_directory(
'C:\\boneyard\\DeepLearning\\data',
label_mode="binary",
subset="training",
validation_split=.20,
image_size=image_size,
color_mode='grayscale',
batch_size=32,
seed=50
)
val_ds = tensorflow.keras.preprocessing.image_dataset_from_directory(
'C:\\boneyard\\DeepLearning\\data',
label_mode="binary",
subset="validation",
validation_split=.20,
image_size=image_size,
color_mode='grayscale',
batch_size=32,
seed=50
)
# base_hidden_units=32
weight_decay=.005
model=Sequential()
input_shape=(128,128,1)
base_filters=32
learning_rate=.00001
batch_size=256
epochs=200
patience_on_early_stop=5
# Create a TensorBoard instance with the path to the logs directory. Before training, in Terminal, run tensorboard --logdir=logs/
tensorboard = TensorBoard(log_dir='logs/{}'.format(time()))
# Build the network based on AlexNet but using 6 convolutional and 1 Fully Connected
# Inspired by VGGNet we will add a pooling layer after every two convolutional layers
#CONV1
model.add(Conv2D(filters=base_filters,kernel_size=(3,3),padding='same',kernel_regularizer=regularizers.l2(weight_decay), input_shape=input_shape,activation='relu'))
model.add(BatchNormalization())
#CONV2
model.add(Conv2D(filters=base_filters,kernel_size=(3,3),padding='same',kernel_regularizer=regularizers.l2(weight_decay),activation='relu'))
model.add(BatchNormalization())
#POOL + Dropout
model.add(AveragePooling2D(pool_size=(2,2),strides=(2,2),padding='same'))
model.add(Dropout(0.2))
#CONV3
model.add(Conv2D(filters=base_filters*2,kernel_size=(3,3),padding='same',kernel_regularizer=regularizers.l2(weight_decay),activation='relu'))
model.add(BatchNormalization())
#CONV4
model.add(Conv2D(filters=base_filters*2,kernel_size=3,padding='same',kernel_regularizer=regularizers.l2(weight_decay),activation='relu'))
model.add(BatchNormalization())
#POOL + Dropout
model.add(AveragePooling2D(pool_size=(2,2),strides=(2,2),padding='same'))
#CONV5
model.add(Conv2D(filters=base_filters*4,kernel_size=(3,3),padding='same',kernel_regularizer=regularizers.l2(weight_decay),activation='relu'))
model.add(BatchNormalization())
#CONV6
model.add(Conv2D(filters=base_filters*4,kernel_size=(3,3),padding='same',kernel_regularizer=regularizers.l2(weight_decay),activation='relu'))
model.add(BatchNormalization())
#CONV7
model.add(Conv2D(filters=base_filters*4,kernel_size=(3,3),padding='same',kernel_regularizer=regularizers.l2(weight_decay),activation='relu'))
model.add(BatchNormalization())
#POOL + Dropout
model.add(AveragePooling2D(pool_size=(2,2),strides=(2,2),padding='same'))
#CONV8
model.add(Conv2D(filters=base_filters*8,kernel_size=(3,3),padding='same',kernel_regularizer=regularizers.l2(weight_decay),activation='relu'))
model.add(BatchNormalization())
#CONV9
model.add(Conv2D(filters=base_filters*8,kernel_size=(3,3),padding='same',kernel_regularizer=regularizers.l2(weight_decay),activation='relu'))
model.add(BatchNormalization())
#CONV10
model.add(Conv2D(filters=base_filters*8,kernel_size=(3,3),padding='same',kernel_regularizer=regularizers.l2(weight_decay),activation='relu'))
model.add(BatchNormalization())
#POOL + Dropout
model.add(AveragePooling2D(pool_size=(2,2),strides=(2,2),padding='same'))
#CONV11
model.add(Conv2D(filters=base_filters*8,kernel_size=(3,3),padding='same',kernel_regularizer=regularizers.l2(weight_decay),activation='relu'))
model.add(BatchNormalization())
#CONV12
model.add(Conv2D(filters=base_filters*8,kernel_size=(3,3),padding='same',kernel_regularizer=regularizers.l2(weight_decay),activation='relu'))
model.add(BatchNormalization())
#CONV13
model.add(Conv2D(filters=base_filters*8,kernel_size=(3,3),padding='same',kernel_regularizer=regularizers.l2(weight_decay),activation='relu'))
model.add(BatchNormalization())
#POOL + Dropout
model.add(AveragePooling2D(pool_size=(2,2),strides=(2,2),padding='same'))
#Flatten
model.add(Flatten())
#FC14
model.add(Dense(4096,activation='relu'))
#DROPOUT
model.add(Dropout(.50))
#FC15
model.add(Dense(4096,activation='relu'))
#DROPOUT
model.add(Dropout(.50))
#SIGMOID
model.add(Dense(1,activation='sigmoid'))
model.summary()
# File path to save the file. Only save the weights if there is an improvement.
checkpointer=ModelCheckpoint(filepath=model_weights_file,verbose=1,save_best_only=True)
#Early Stopping
early_stopping=EarlyStopping(monitor='val_loss',mode='min',verbose=1,patience=patience_on_early_stop)
# Adam optimizer with learning rate .0001
#Optimizer
optimizer = keras.optimizers.Adam(learning_rate=learning_rate, beta_1=0.9, beta_2=0.999,epsilon=1e-8)
# Cross entropy loss function
model.compile(loss='binary_crossentropy', optimizer=optimizer,metrics=['accuracy'])
# Allows to do real-time data augmentation on images on cpu in parallel to training your model on gpu.
# The callback to checkpointer saves the model wights. Other callback can be added...like a stopping function
# history=model.fit_generator(dataGen.flow(x_train,y_train,batch_size=batch_size),callbacks=[checkpointer],steps_per_epoch=x_train.shape[0] //
# batch_size,epochs=epochs,verbose=2,validation_data=(x_valid,y_valid))
history=model.fit(train_ds, epochs=epochs, validation_data=val_ds,batch_size=batch_size,verbose=2,callbacks=[early_stopping,tensorboard,checkpointer])
# Here is the evaluation part
# score=model.evaluate(x=train_ds,verbose=1)
# print('\n', 'Test Accuracy:', score[1])
# model.save(model_name)
# This loads the best weights we saved from the fitting excercise and displays the accuracy of the model
# model.load_weights(model_weights_file)
# score=model.evaluate(train_ds,verbose=1)
# print('\n', 'Test Accuracy:', score[1])
# scores=model.evaluate(x_test,y_test,batch_size=128,verbose=1)
# print('\nTest result: %.3f loss: %.3f' % (scores[1]*100,scores[0]))
# predictions=model.predict(batch_test_array)
# print(predictions)
# threshold_output = np.where(predictions > 0.5, 1, 0)
# print(threshold_output)
# plot the learning curves
# pyplot.plot(history.history['accuracy'],label='train')
# pyplot.plot(history.history['val_accuracy'],label='validation')
# pyplot.legend()
# pyplot.show()