Updated all models October 2024
This commit is contained in:
@@ -1,49 +1,32 @@
|
||||
import sys
|
||||
import os
|
||||
|
||||
from keras.layers.pooling import MaxPool2D
|
||||
|
||||
sys.path.append('c:/git/keras')
|
||||
sys.path.append('c:/git/absl')
|
||||
|
||||
# installed
|
||||
# py -mpip install numpy
|
||||
# py -mpip show numpy
|
||||
# py -mpip install tensorflow
|
||||
# py -mpip show tensorflow
|
||||
# py -mpip install matplotlib
|
||||
# c:\users\skess\appdata\local\programs\python\python39\lib\site-packages
|
||||
|
||||
from keras.optimizers import SGD
|
||||
from keras.optimizers import Adam
|
||||
from tensorflow.keras.callbacks import TensorBoard
|
||||
import keras
|
||||
from keras.optimizers import adam_v2
|
||||
from tensorflow.keras.optimizers import Adam
|
||||
from keras.datasets import cifar10
|
||||
from keras.preprocessing.image import ImageDataGenerator
|
||||
from keras.models import Sequential
|
||||
from keras.utils import np_utils
|
||||
from keras.layers import Dense
|
||||
from keras.layers import Activation
|
||||
from keras.layers import Flatten
|
||||
from keras.layers import Dropout
|
||||
from keras.layers import BatchNormalization
|
||||
from keras.layers import Conv2D
|
||||
from keras.layers import MaxPooling2D
|
||||
from keras.layers import AveragePooling2D
|
||||
from keras.src.legacy.preprocessing.image import ImageDataGenerator
|
||||
from keras.callbacks import ModelCheckpoint
|
||||
from tensorflow.keras.callbacks import EarlyStopping
|
||||
from keras import regularizers
|
||||
from keras import optimizers
|
||||
from matplotlib import pyplot
|
||||
from keras.layers import Flatten
|
||||
from keras.layers import Dense
|
||||
from keras.layers import MaxPool2D
|
||||
from keras.layers import Dropout
|
||||
from keras.models import Model
|
||||
from keras.layers import AveragePooling2D
|
||||
from keras.models import Sequential
|
||||
import numpy as np
|
||||
import tensorflow
|
||||
|
||||
from inception_module import *
|
||||
import math
|
||||
from time import time
|
||||
|
||||
# VGG 16 ARCHITECTIURE
|
||||
|
||||
image_size = (128, 128)
|
||||
|
||||
model_name='vgg16'
|
||||
model_weights_file='model_vgg16.h5'
|
||||
#model_name='vgg16'
|
||||
model_weights_file='model_vgg16.h5.keras'
|
||||
|
||||
train_ds = tensorflow.keras.preprocessing.image_dataset_from_directory(
|
||||
'C:\\boneyard\\DeepLearning\\data',
|
||||
@@ -76,6 +59,9 @@ learning_rate=.00001
|
||||
batch_size=256
|
||||
epochs=200
|
||||
|
||||
# Create a TensorBoard instance with the path to the logs directory. Before training, in Terminal, run tensorboard --logdir=logs/
|
||||
tensorboard = TensorBoard(log_dir='logs/{}'.format(time()))
|
||||
|
||||
# Build the network based on AlexNet but using 6 convolutional and 1 Fully Connected
|
||||
# Inspired by VGGNet we will add a pooling layer after every two convolutional layers
|
||||
|
||||
@@ -165,11 +151,8 @@ model.add(Dropout(.50))
|
||||
#SIGMOID
|
||||
model.add(Dense(1,activation='sigmoid'))
|
||||
|
||||
|
||||
model.summary()
|
||||
|
||||
|
||||
|
||||
# File path to save the file. Only save the weights if there is an improvement.
|
||||
checkpointer=ModelCheckpoint(filepath=model_weights_file,verbose=1,save_best_only=True)
|
||||
|
||||
@@ -178,7 +161,9 @@ checkpointer=ModelCheckpoint(filepath=model_weights_file,verbose=1,save_best_onl
|
||||
# Adam optimizer with learning rate .0001
|
||||
#optimizer=adam_v2.Adam(learning_rate=10e-6)
|
||||
#optimizer=adam_v2.Adam(learning_rate=10e-6)
|
||||
optimizer=adam_v2.Adam(learning_rate=learning_rate)
|
||||
#optimizer=adam_v2.Adam(learning_rate=learning_rate)
|
||||
#Optimizer
|
||||
optimizer = keras.optimizers.Adam(learning_rate=learning_rate, beta_1=0.9, beta_2=0.999,epsilon=1e-8)
|
||||
|
||||
# Cross entropy loss function
|
||||
#model.compile(loss='categorical_crossentropy', optimizer=optimizer,metrics=['accuracy'])
|
||||
@@ -190,17 +175,17 @@ model.compile(loss='binary_crossentropy', optimizer=optimizer,metrics=['accuracy
|
||||
# The callback to checkpointer saves the model wights. Other callback can be added...like a stopping function
|
||||
# history=model.fit_generator(dataGen.flow(x_train,y_train,batch_size=batch_size),callbacks=[checkpointer],steps_per_epoch=x_train.shape[0] //
|
||||
# batch_size,epochs=epochs,verbose=2,validation_data=(x_valid,y_valid))
|
||||
history=model.fit(train_ds, epochs=epochs, validation_data=val_ds,batch_size=batch_size,verbose=2,callbacks=[checkpointer])
|
||||
history=model.fit(train_ds, epochs=epochs, validation_data=val_ds,batch_size=batch_size,verbose=2,callbacks=[tensorboard,checkpointer])
|
||||
|
||||
# Here is the evaluation part
|
||||
score=model.evaluate(x=train_ds,verbose=1)
|
||||
print('\n', 'Test Accuracy:', score[1])
|
||||
model.save(model_name)
|
||||
# score=model.evaluate(x=train_ds,verbose=1)
|
||||
# print('\n', 'Test Accuracy:', score[1])
|
||||
# model.save(model_name)
|
||||
|
||||
# This loads the best weights we saved from the fitting excercise and displays the accuracy of the model
|
||||
model.load_weights(model_weights_file)
|
||||
score=model.evaluate(train_ds,verbose=1)
|
||||
print('\n', 'Test Accuracy:', score[1])
|
||||
# model.load_weights(model_weights_file)
|
||||
# score=model.evaluate(train_ds,verbose=1)
|
||||
# print('\n', 'Test Accuracy:', score[1])
|
||||
|
||||
# scores=model.evaluate(x_test,y_test,batch_size=128,verbose=1)
|
||||
# print('\nTest result: %.3f loss: %.3f' % (scores[1]*100,scores[0]))
|
||||
@@ -211,10 +196,10 @@ print('\n', 'Test Accuracy:', score[1])
|
||||
# print(threshold_output)
|
||||
|
||||
# plot the learning curves
|
||||
pyplot.plot(history.history['accuracy'],label='train')
|
||||
pyplot.plot(history.history['val_accuracy'],label='validation')
|
||||
pyplot.legend()
|
||||
pyplot.show()
|
||||
# pyplot.plot(history.history['accuracy'],label='train')
|
||||
# pyplot.plot(history.history['val_accuracy'],label='validation')
|
||||
# pyplot.legend()
|
||||
# pyplot.show()
|
||||
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user