import sys import os from keras.layers.pooling import MaxPool2D sys.path.append('c:/git/keras') sys.path.append('c:/git/absl') # installed # py -mpip install numpy # py -mpip show numpy # py -mpip install tensorflow # py -mpip show tensorflow # py -mpip install matplotlib # c:\users\skess\appdata\local\programs\python\python39\lib\site-packages import keras from keras.optimizers import adam_v2 from tensorflow.keras.optimizers import Adam from keras.datasets import cifar10 from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential from keras.utils import np_utils from keras.layers import Dense from keras.layers import Activation from keras.layers import Flatten from keras.layers import Dropout from keras.layers import BatchNormalization from keras.layers import Conv2D from keras.layers import MaxPooling2D from keras.layers import AveragePooling2D from keras.callbacks import ModelCheckpoint from tensorflow.keras.callbacks import EarlyStopping from keras import regularizers from keras import optimizers from matplotlib import pyplot import numpy as np import tensorflow # VGG 16 ARCHITECTIURE image_size = (128, 128) model_name='vgg16' model_weights_file='model_vgg16.h5' train_ds = tensorflow.keras.preprocessing.image_dataset_from_directory( 'C:\\boneyard\\DeepLearning\\data', label_mode="binary", subset="training", validation_split=.20, image_size=image_size, color_mode='grayscale', batch_size=32, seed=50 ) val_ds = tensorflow.keras.preprocessing.image_dataset_from_directory( 'C:\\boneyard\\DeepLearning\\data', label_mode="binary", subset="validation", validation_split=.20, image_size=image_size, color_mode='grayscale', batch_size=32, seed=50 ) # base_hidden_units=32 weight_decay=.005 model=Sequential() input_shape=(128,128,1) base_filters=32 learning_rate=.00001 batch_size=256 epochs=200 # Build the network based on AlexNet but using 6 convolutional and 1 Fully Connected # Inspired by VGGNet we will add a pooling layer after every two convolutional layers #CONV1 model.add(Conv2D(filters=base_filters,kernel_size=(3,3),padding='same',kernel_regularizer=regularizers.l2(weight_decay), input_shape=input_shape,activation='relu')) model.add(BatchNormalization()) #CONV2 model.add(Conv2D(filters=base_filters,kernel_size=(3,3),padding='same',kernel_regularizer=regularizers.l2(weight_decay),activation='relu')) model.add(BatchNormalization()) #POOL + Dropout model.add(AveragePooling2D(pool_size=(2,2),strides=(2,2),padding='same')) model.add(Dropout(0.2)) #CONV3 model.add(Conv2D(filters=base_filters*2,kernel_size=(3,3),padding='same',kernel_regularizer=regularizers.l2(weight_decay),activation='relu')) model.add(BatchNormalization()) #CONV4 model.add(Conv2D(filters=base_filters*2,kernel_size=3,padding='same',kernel_regularizer=regularizers.l2(weight_decay),activation='relu')) model.add(BatchNormalization()) #POOL + Dropout model.add(AveragePooling2D(pool_size=(2,2),strides=(2,2),padding='same')) #CONV5 model.add(Conv2D(filters=base_filters*4,kernel_size=(3,3),padding='same',kernel_regularizer=regularizers.l2(weight_decay),activation='relu')) model.add(BatchNormalization()) #CONV6 model.add(Conv2D(filters=base_filters*4,kernel_size=(3,3),padding='same',kernel_regularizer=regularizers.l2(weight_decay),activation='relu')) model.add(BatchNormalization()) #CONV7 model.add(Conv2D(filters=base_filters*4,kernel_size=(3,3),padding='same',kernel_regularizer=regularizers.l2(weight_decay),activation='relu')) model.add(BatchNormalization()) #POOL + Dropout model.add(AveragePooling2D(pool_size=(2,2),strides=(2,2),padding='same')) #CONV8 model.add(Conv2D(filters=base_filters*8,kernel_size=(3,3),padding='same',kernel_regularizer=regularizers.l2(weight_decay),activation='relu')) model.add(BatchNormalization()) #CONV9 model.add(Conv2D(filters=base_filters*8,kernel_size=(3,3),padding='same',kernel_regularizer=regularizers.l2(weight_decay),activation='relu')) model.add(BatchNormalization()) #CONV10 model.add(Conv2D(filters=base_filters*8,kernel_size=(3,3),padding='same',kernel_regularizer=regularizers.l2(weight_decay),activation='relu')) model.add(BatchNormalization()) #POOL + Dropout model.add(AveragePooling2D(pool_size=(2,2),strides=(2,2),padding='same')) #CONV11 model.add(Conv2D(filters=base_filters*8,kernel_size=(3,3),padding='same',kernel_regularizer=regularizers.l2(weight_decay),activation='relu')) model.add(BatchNormalization()) #CONV12 model.add(Conv2D(filters=base_filters*8,kernel_size=(3,3),padding='same',kernel_regularizer=regularizers.l2(weight_decay),activation='relu')) model.add(BatchNormalization()) #CONV13 model.add(Conv2D(filters=base_filters*8,kernel_size=(3,3),padding='same',kernel_regularizer=regularizers.l2(weight_decay),activation='relu')) model.add(BatchNormalization()) #POOL + Dropout model.add(AveragePooling2D(pool_size=(2,2),strides=(2,2),padding='same')) #Flatten model.add(Flatten()) #FC14 model.add(Dense(4096,activation='relu')) #DROPOUT model.add(Dropout(.50)) #FC15 model.add(Dense(4096,activation='relu')) #DROPOUT model.add(Dropout(.50)) #SIGMOID model.add(Dense(1,activation='sigmoid')) model.summary() # File path to save the file. Only save the weights if there is an improvement. checkpointer=ModelCheckpoint(filepath=model_weights_file,verbose=1,save_best_only=True) #earlystopping=EarlyStopping(monitor='val_loss',patience=5) # Adam optimizer with learning rate .0001 #optimizer=adam_v2.Adam(learning_rate=10e-6) #optimizer=adam_v2.Adam(learning_rate=10e-6) optimizer=adam_v2.Adam(learning_rate=learning_rate) # Cross entropy loss function #model.compile(loss='categorical_crossentropy', optimizer=optimizer,metrics=['accuracy']) #model.compile(loss='binary_crossentropy', optimizer='adam',metrics=['accuracy']) model.compile(loss='binary_crossentropy', optimizer=optimizer,metrics=['accuracy']) #model.compile(loss='binary_crossentropy', optimizer=optimizer,metrics=['accuracy']) # Allows to do real-time data augmentation on images on cpu in parallel to training your model on gpu. # The callback to checkpointer saves the model wights. Other callback can be added...like a stopping function # history=model.fit_generator(dataGen.flow(x_train,y_train,batch_size=batch_size),callbacks=[checkpointer],steps_per_epoch=x_train.shape[0] // # batch_size,epochs=epochs,verbose=2,validation_data=(x_valid,y_valid)) history=model.fit(train_ds, epochs=epochs, validation_data=val_ds,batch_size=batch_size,verbose=2,callbacks=[checkpointer]) # Here is the evaluation part score=model.evaluate(x=train_ds,verbose=1) print('\n', 'Test Accuracy:', score[1]) model.save(model_name) # This loads the best weights we saved from the fitting excercise and displays the accuracy of the model model.load_weights(model_weights_file) score=model.evaluate(train_ds,verbose=1) print('\n', 'Test Accuracy:', score[1]) # scores=model.evaluate(x_test,y_test,batch_size=128,verbose=1) # print('\nTest result: %.3f loss: %.3f' % (scores[1]*100,scores[0])) # predictions=model.predict(batch_test_array) # print(predictions) # threshold_output = np.where(predictions > 0.5, 1, 0) # print(threshold_output) # plot the learning curves pyplot.plot(history.history['accuracy'],label='train') pyplot.plot(history.history['val_accuracy'],label='validation') pyplot.legend() pyplot.show()