Compare commits
3 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 0b41f82896 | |||
| 0a34130ebe | |||
| 548143cbfc |
@@ -11,13 +11,24 @@ import matplotlib.pyplot as plt
|
||||
# This model was trained with 13,185 images
|
||||
# See CNNImageProcessor solution for creating the test images for training this model
|
||||
# **************************************** I M P O R T A N T ********************************************************
|
||||
# There is a shell script in the Scripts folder. setup_tf_gpu.sh Copy the script to the CNN folder and run it. It will
|
||||
# create the venv enviroment and install python 3.10 and tensorflow (gpu)
|
||||
# to start the environment "source tf_gpu/bin/activate"
|
||||
# then type "code ."
|
||||
# Train the model on EUPORIE laptop using the GPU card with WSL2. (Windows Subsystem For Linux). I am running Ubuntu1 22.04.2
|
||||
# To launch WSL open up a command prompt, run powershell and type "wsl".
|
||||
# The folder structure will be /home/pi/CNN.
|
||||
# You can access the folder structure through windows explorer. type "\\wsl$" in explorer and navigate to the folder.
|
||||
# drop in the Data and Model and run the model
|
||||
# There is a shell script in the Scripts folder. setup_tf_gpu.sh Copy teh script to the CNN folder and run it. It will
|
||||
# create the venv enviroment and install python 3.10 and tensorflow (gpu)
|
||||
#
|
||||
# If you want to run the model_host.py from the WSL2 environment (recommended for backtesting speed) you first need to get the address that was assigned to the
|
||||
# WSL environment. On the WSL inatance type "hostname -I". Next, from the host environment you need to forward port requests
|
||||
# on port 5000 (Flask Listener Port) to the WSL environment. Here is how to do that. Predictions on the WSL environment
|
||||
# run considerable faster than on the host because the WSL environment is able to utilize the GPU.
|
||||
# {To Create a Port Forward} netsh interface portproxy add v4tov4 listenport=5000 listenaddress=0.0.0.0 connectport=5000 connectaddress=172.29.110.64
|
||||
# {To List Port Forwards} netsh interface portproxy show all
|
||||
# {To Remove the Port Forward} netsh interface portproxy delete v4tov4 listenport=5000 listenaddress=0.0.0.0
|
||||
# hostname -I
|
||||
# ********************************************************************************************************************
|
||||
|
||||
# Figure out if we are training in CPU or GPU
|
||||
@@ -39,7 +50,7 @@ tensorboard = TensorBoard(log_dir=log_dir)
|
||||
# Configuration
|
||||
# -----------------------
|
||||
shuffle_count=3000
|
||||
dataset_path = 'C:\\boneyard\\DeepLearning\\data'
|
||||
dataset_path = '/home/pi/DeepLearning/Data'
|
||||
image_size = (actualImageDimension, actualImageDimension)
|
||||
batch_size = 16 # try 16 was 32
|
||||
image_size=(actualImageDimension, actualImageDimension)
|
||||
@@ -74,22 +85,7 @@ val_ds = tf.keras.preprocessing.image_dataset_from_directory(
|
||||
# -----------------------
|
||||
# Data Augmentation
|
||||
# -----------------------
|
||||
|
||||
# data_augmentation = tf.keras.Sequential([
|
||||
# layers.RandomFlip("horizontal"),
|
||||
# layers.RandomRotation(0.1)
|
||||
# ])
|
||||
#data_augmentation = tf.keras.Sequential([
|
||||
# layers.RandomFlip("horizontal"),
|
||||
# layers.RandomRotation(0.1),
|
||||
# layers.RandomRotation(0.1, fill_mode="nearest"),
|
||||
# layers.RandomZoom(0.1)
|
||||
#])
|
||||
|
||||
|
||||
# def preprocess_train(x, y):
|
||||
# x = data_augmentation(x, training=True)
|
||||
# return x, y
|
||||
# I do this in c#-land
|
||||
|
||||
def preprocess_val(x, y):
|
||||
return x, y
|
||||
@@ -106,22 +102,6 @@ train_ds = (
|
||||
)
|
||||
|
||||
|
||||
# for images, labels in train_ds.take(1):
|
||||
|
||||
# plt.figure(figsize=(10,10))
|
||||
|
||||
# for i in range(12):
|
||||
# ax = plt.subplot(3,4,i+1)
|
||||
# plt.imshow(images[i].numpy().astype("uint8"))
|
||||
# plt.title(int(labels[i].numpy()))
|
||||
# plt.axis("off")
|
||||
|
||||
# plt.tight_layout()
|
||||
# plt.show()
|
||||
|
||||
|
||||
|
||||
|
||||
# -----------------------
|
||||
# ConvNeXt-Tiny Base Model
|
||||
# -----------------------
|
||||
@@ -143,13 +123,6 @@ inputs = tf.keras.Input(shape=(actualImageDimension, actualImageDimension, 3))
|
||||
|
||||
x = preprocess_input(inputs)
|
||||
x = base_model(x)
|
||||
# Dense Head
|
||||
# x = layers.GlobalAveragePooling2D()(x)
|
||||
# x = layers.BatchNormalization()(x)
|
||||
# x = layers.Dense(512, activation="relu")(x)
|
||||
# x = layers.Dropout(0.3)(x)
|
||||
# x = layers.Dense(128, activation="relu")(x)
|
||||
|
||||
x = layers.GlobalAveragePooling2D()(x)
|
||||
x = layers.BatchNormalization()(x)
|
||||
x = layers.Dense(256, activation="relu")(x)
|
||||
@@ -228,36 +201,3 @@ history_fine = model.fit(
|
||||
validation_data=val_ds,
|
||||
callbacks=[tensorboard, lr_scheduler, early_stopping, checkpointer]
|
||||
)
|
||||
|
||||
|
||||
# -----------------------
|
||||
# Plot Results
|
||||
# -----------------------
|
||||
|
||||
def plot_history(hist, title_prefix=""):
|
||||
plt.figure()
|
||||
plt.plot(hist.history['accuracy'], label='Train Accuracy')
|
||||
plt.plot(hist.history['val_accuracy'], label='Val Accuracy')
|
||||
plt.title(f'{title_prefix} Accuracy')
|
||||
plt.xlabel('Epochs')
|
||||
plt.ylabel('Accuracy')
|
||||
plt.legend()
|
||||
plt.show()
|
||||
|
||||
plt.figure()
|
||||
plt.plot(hist.history['loss'], label='Train Loss')
|
||||
plt.plot(hist.history['val_loss'], label='Val Loss')
|
||||
plt.title(f'{title_prefix} Loss')
|
||||
plt.xlabel('Epochs')
|
||||
plt.ylabel('Loss')
|
||||
plt.legend()
|
||||
plt.show()
|
||||
|
||||
plot_history(history, "Initial Training")
|
||||
plot_history(history_fine, "Fine-Tuning")
|
||||
|
||||
# -----------------------
|
||||
# Save Final Model
|
||||
# -----------------------
|
||||
|
||||
#model.save(modelname)
|
||||
Binary file not shown.
Reference in New Issue
Block a user