Adding Automatic Validation
Table of Contents
Beginning
Imports
Python
from functools import partial
from pathlib import Path
PyPi
from holoviews.operation.datashader import datashade
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import cv2
import holoviews
import numpy
import tensorflow
Graeae
from graeae import EmbedHoloviews, ZipDownloader
Set Up
The Plotting
Embed = partial(
EmbedHoloviews,
folder_path="../../files/posts/keras/adding-automatic-validation/")
holoviews.extension("bokeh")
The Training Images
URL = ("https://storage.googleapis.com/"
"laurencemoroney-blog.appspot.com/"
"horse-or-human.zip")
BASE = "~/data/datasets/images/horse-or-human/"
TARGET = f"{BASE}training"
download = ZipDownloader(url=URL, target=TARGET)
download()
training_path = download.target
Files exist, not downloading
The Validation Images
URL = (
"https://storage.googleapis.com/"
"laurencemoroney-blog.appspot.com/"
"validation-horse-or-human.zip")
TARGET = f"{BASE}validation"
download = ZipDownloader(url=URL, target=TARGET)
download()
validation_path = download.target
Downloading the zip file
Middle
Examining the Data
The training set is the same one that I used before to train a model to recognize whether a picture contained a human or a horse, but the validation set is new.
print("Training")
for path in training_path.iterdir():
print(path.name)
print("\nValidation")
for path in validation_path.iterdir():
print(path.name)
Training horses humans Validation horses humans
How many images do we have?
print("Training")
for path in training_path.iterdir():
print(f"{len(list(path.iterdir())):,} images in {path.name}")
print("\nValidation")
for path in validation_path.iterdir():
print(f"{len(list(path.iterdir())):,} images in {path.name}")
Training 500 images in horses 527 images in humans Validation 128 images in horses 128 images in humans
Looking At A Few Images
As I noted, the training set is the same one I looked at before, but still, it never hurts to look.
height = width = 300
human_files = list((training_path/"humans").iterdir())
horse_files = list((training_path/"horses").iterdir())
human_indexes = numpy.random.randint(0, 527, 2)
horse_indexes = numpy.random.randint(0, 500, 2)
humans = [holoviews.RGB.load_image(str(human_files[index])).opts(
width = width,
height = height,
) for index in human_indexes]
horses = [holoviews.RGB.load_image(str(horse_files[index])).opts(
width = width,
height = height,
) for index in horse_indexes]
plot = holoviews.Layout(humans + horses).cols(2).opts(
title="Sample Training Images"
)
Embed(plot=plot, file_name="training_images", height_in_pixels=700)()
Preprocessing the Data
When we train the model we'll use a batch generator. This next bit of code is just a convenience class to bundle the code together.
class Data:
"""creates the data generator
Args:
path: path to the dataset
target_size: tuple of pixel size for the generated images
"""
def __init__(self, path: str, target_size: tuple=(300, 300)) -> None:
self.path = path
self.target_size = target_size
self._batches = None
return
@property
def batches(self) -> tensorflow.keras.preprocessing.image.DirectoryIterator:
"""Generator of image batches"""
if self._batches is None:
data_generator = ImageDataGenerator(rescale=1/255)
self._batches = data_generator.flow_from_directory(
self.path,
target_size=self.target_size,
batch_size=128,
class_mode="binary",
)
return self._batches
The Model
This bundles together the different parts needed to train and use the model.
class Model:
"""A CNN Builder
Args:
training_path: training data folder path
validation_path: validation data folder path
image_size: single-dimension for the inputs to the model
epochs: number of training epochs
callback: something to stop the training
"""
def __init__(self, training_path: str, validation_path: str,
image_size: int=300,
epochs: int=15,
callback: tensorflow.keras.callbacks.Callback=None) -> None:
self.training_path = training_path
self.validation_path = validation_path
self.image_size = image_size
self.epochs = epochs
self.callback = callback
self._model = None
self._training_data = None
self._validation_data = None
return
@property
def training_data(self) -> (tensorflow.keras.preprocessing
.image.DirectoryIterator):
"""generator of training data batches"""
if self._training_data is None:
self._training_data = Data(
self.training_path,
(self.image_size, self.image_size)).batches
return self._training_data
@property
def validation_data(self) -> (tensorflow.keras.preprocessing
.image.DirectoryIterator):
"""generator of validation batches"""
if self._validation_data is None:
self._validation_data = Data(
self.validation_path,
(self.image_size, self.image_size)).batches
return self._validation_data
@property
def model(self) -> tensorflow.keras.models.Sequential:
"""A model with five CNN layers"""
if self._model is None:
self._model = tensorflow.keras.models.Sequential()
for layer in (
tensorflow.keras.layers.Conv2D(
16, (3,3),
activation='relu',
input_shape=(self.image_size, self.image_size, 3)),
tensorflow.keras.layers.MaxPooling2D(2, 2),
tensorflow.keras.layers.Conv2D(32, (3,3),
activation='relu'),
tensorflow.keras.layers.MaxPooling2D(2,2),
tensorflow.keras.layers.Conv2D(64, (3,3),
activation='relu'),
tensorflow.keras.layers.MaxPooling2D(2,2),
tensorflow.keras.layers.Conv2D(64, (3,3),
activation='relu'),
tensorflow.keras.layers.MaxPooling2D(2,2),
tensorflow.keras.layers.Conv2D(64, (3,3),
activation='relu'),
tensorflow.keras.layers.MaxPooling2D(2,2),
tensorflow.keras.layers.Flatten(),
tensorflow.keras.layers.Dense(512,
activation='relu'),
tensorflow.keras.layers.Dense(1, activation='sigmoid'),
):
self._model.add(layer)
self._model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.001),
metrics=['acc'])
return self._model
def print_summary(self) -> None:
"""Prints a summary of the model's layers"""
print(self.model.summary())
return
def train(self) -> None:
"""Trains the model"""
fit = partial(self.model.fit_generator,
self.training_data,
steps_per_epoch=8,
epochs=self.epochs,
verbose=2,
validation_data = self.validation_data,
validation_steps=8)
if self.callback:
fit(callbacks=[self.callback])
else:
fit()
return
def predict(self, image) -> str:
"""Predicts whether the image contains a horse or a human
Returns:
label: label for the image
"""
classes = self.model.predict(image)
return "human" if classes[0] else "horse"
Training The Model
model = Model(str(training_path), str(validation_path))
model.train()
Found 1027 images belonging to 2 classes. Found 256 images belonging to 2 classes. Epoch 1/15 8/8 - 9s - loss: 1.5885 - acc: 0.5640 - val_loss: 0.9410 - val_acc: 0.5000 Epoch 2/15 8/8 - 7s - loss: 0.7624 - acc: 0.6407 - val_loss: 0.7195 - val_acc: 0.5000 Epoch 3/15 8/8 - 7s - loss: 0.8388 - acc: 0.6908 - val_loss: 0.6150 - val_acc: 0.6758 Epoch 4/15 8/8 - 7s - loss: 0.3347 - acc: 0.8818 - val_loss: 1.4559 - val_acc: 0.7070 Epoch 5/15 8/8 - 7s - loss: 0.2710 - acc: 0.8832 - val_loss: 1.2360 - val_acc: 0.8242 Epoch 6/15 8/8 - 6s - loss: 0.1465 - acc: 0.9433 - val_loss: 1.5440 - val_acc: 0.8320 Epoch 7/15 8/8 - 6s - loss: 0.4357 - acc: 0.8454 - val_loss: 1.2532 - val_acc: 0.8242 Epoch 8/15 8/8 - 6s - loss: 0.3896 - acc: 0.8888 - val_loss: 1.4711 - val_acc: 0.8008 Epoch 9/15 8/8 - 5s - loss: 0.1057 - acc: 0.9588 - val_loss: 2.0512 - val_acc: 0.8164 Epoch 10/15 8/8 - 5s - loss: 0.1610 - acc: 0.9366 - val_loss: 1.3215 - val_acc: 0.6602 Epoch 11/15 8/8 - 8s - loss: 0.0889 - acc: 0.9736 - val_loss: 1.7946 - val_acc: 0.8281 Epoch 12/15 8/8 - 7s - loss: 0.0163 - acc: 0.9944 - val_loss: 1.6159 - val_acc: 0.8672 Epoch 13/15 8/8 - 7s - loss: 0.5203 - acc: 0.8915 - val_loss: 0.9708 - val_acc: 0.8125 Epoch 14/15 8/8 - 6s - loss: 0.1073 - acc: 0.9800 - val_loss: 1.1768 - val_acc: 0.8438 Epoch 15/15 8/8 - 7s - loss: 0.0305 - acc: 0.9922 - val_loss: 1.4107 - val_acc: 0.8555
It looks like the accuracy for both the training and the validation sets are going up. Maybe a little more training will help.
model.epochs = 5
model.train()
Epoch 1/5 8/8 - 7s - loss: 0.0109 - acc: 0.9978 - val_loss: 1.6156 - val_acc: 0.8672 Epoch 2/5 8/8 - 7s - loss: 0.0067 - acc: 0.9989 - val_loss: 2.5671 - val_acc: 0.8242 Epoch 3/5 8/8 - 7s - loss: 0.2348 - acc: 0.9477 - val_loss: 1.2397 - val_acc: 0.8633 Epoch 4/5 8/8 - 7s - loss: 0.0132 - acc: 0.9961 - val_loss: 1.5193 - val_acc: 0.8750 Epoch 5/5 8/8 - 7s - loss: 0.0101 - acc: 0.9978 - val_loss: 0.9305 - val_acc: 0.8945
Everything is still improving. Try a little more.
model.epochs = 10
model.train()
Epoch 1/10 8/8 - 8s - loss: 0.0413 - acc: 0.9844 - val_loss: 0.8631 - val_acc: 0.9062 Epoch 2/10 8/8 - 7s - loss: 0.2625 - acc: 0.9244 - val_loss: 1.3837 - val_acc: 0.8438 Epoch 3/10 8/8 - 7s - loss: 0.7150 - acc: 0.8776 - val_loss: 8.2253 - val_acc: 0.6328 Epoch 4/10 8/8 - 7s - loss: 0.0937 - acc: 0.9785 - val_loss: 1.9342 - val_acc: 0.8281 Epoch 5/10 8/8 - 7s - loss: 0.0126 - acc: 0.9978 - val_loss: 1.7459 - val_acc: 0.8672 Epoch 6/10 8/8 - 7s - loss: 0.0064 - acc: 1.0000 - val_loss: 1.8857 - val_acc: 0.8633 Epoch 7/10 8/8 - 6s - loss: 0.0025 - acc: 1.0000 - val_loss: 2.1456 - val_acc: 0.8672 Epoch 8/10 8/8 - 6s - loss: 0.0027 - acc: 1.0000 - val_loss: 2.0877 - val_acc: 0.8711 Epoch 9/10 8/8 - 6s - loss: 9.8538e-04 - acc: 1.0000 - val_loss: 2.3224 - val_acc: 0.8672 Epoch 10/10 8/8 - 6s - loss: 4.4454e-04 - acc: 1.0000 - val_loss: 2.8453 - val_acc: 0.8672
The training loss and accuracy keeps getting better but it looks like it might be overfitting, after about epoch 21, since the validation metrics start to get worse.
I'll try making a callback that stops whene the validation accuracy reaches 90 %.
class Stop(tensorflow.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if (logs.get("val_acc") >= 0.9):
print(f"Stopping point reached at epoch {epoch}")
self.model.stop_training = True
callback = Stop()
model = Model(str(training_path), str(validation_path),
epochs=30,
callback=callback)
model.train()
Found 1027 images belonging to 2 classes. Found 256 images belonging to 2 classes. Epoch 1/30 8/8 - 8s - loss: 1.7387 - acc: 0.5006 - val_loss: 0.6752 - val_acc: 0.5000 Epoch 2/30 8/8 - 7s - loss: 0.6397 - acc: 0.6630 - val_loss: 0.4168 - val_acc: 0.8438 Epoch 3/30 8/8 - 7s - loss: 0.8124 - acc: 0.6162 - val_loss: 0.5096 - val_acc: 0.7617 Epoch 4/30 8/8 - 7s - loss: 0.3740 - acc: 0.8498 - val_loss: 0.8950 - val_acc: 0.7891 Epoch 5/30 8/8 - 7s - loss: 0.2619 - acc: 0.8867 - val_loss: 0.8874 - val_acc: 0.8477 Epoch 6/30 8/8 - 6s - loss: 0.2136 - acc: 0.9010 - val_loss: 0.5653 - val_acc: 0.8789 Epoch 7/30 8/8 - 6s - loss: 0.0980 - acc: 0.9566 - val_loss: 1.4001 - val_acc: 0.8320 Epoch 8/30 8/8 - 6s - loss: 0.2865 - acc: 0.8665 - val_loss: 0.5963 - val_acc: 0.8906 Epoch 9/30 8/8 - 5s - loss: 0.1949 - acc: 0.9288 - val_loss: 0.9161 - val_acc: 0.8984 Epoch 10/30 8/8 - 5s - loss: 0.1328 - acc: 0.9488 - val_loss: 1.7331 - val_acc: 0.8164 Epoch 11/30 8/8 - 7s - loss: 0.1825 - acc: 0.9266 - val_loss: 1.1965 - val_acc: 0.8438 Epoch 12/30 8/8 - 7s - loss: 0.1108 - acc: 0.9633 - val_loss: 1.8896 - val_acc: 0.7852 Epoch 13/30 8/8 - 7s - loss: 0.0309 - acc: 0.9883 - val_loss: 1.7577 - val_acc: 0.8477 Epoch 14/30 8/8 - 7s - loss: 0.0140 - acc: 0.9956 - val_loss: 2.0667 - val_acc: 0.8320 Epoch 15/30 8/8 - 6s - loss: 1.5402 - acc: 0.8359 - val_loss: 1.3396 - val_acc: 0.8203 Epoch 16/30 8/8 - 6s - loss: 0.0144 - acc: 0.9990 - val_loss: 1.8488 - val_acc: 0.8203 Epoch 17/30 8/8 - 6s - loss: 0.0092 - acc: 0.9989 - val_loss: 2.0972 - val_acc: 0.8320 Epoch 18/30 8/8 - 5s - loss: 0.0031 - acc: 1.0000 - val_loss: 1.9660 - val_acc: 0.8594 Epoch 19/30 8/8 - 6s - loss: 0.0752 - acc: 0.9785 - val_loss: 2.6233 - val_acc: 0.7578 Epoch 20/30 8/8 - 7s - loss: 0.0086 - acc: 0.9987 - val_loss: 2.2535 - val_acc: 0.8203 Epoch 21/30 8/8 - 7s - loss: 0.0012 - acc: 1.0000 - val_loss: 2.5086 - val_acc: 0.8242 Epoch 22/30 8/8 - 7s - loss: 8.1537e-04 - acc: 1.0000 - val_loss: 2.6183 - val_acc: 0.8203 Epoch 23/30 8/8 - 7s - loss: 4.3476e-04 - acc: 1.0000 - val_loss: 2.5576 - val_acc: 0.8477 Epoch 24/30 8/8 - 7s - loss: 1.6678e-04 - acc: 1.0000 - val_loss: 2.7958 - val_acc: 0.8398 Epoch 25/30 8/8 - 6s - loss: 2.6736e-04 - acc: 1.0000 - val_loss: 2.8162 - val_acc: 0.8398 Epoch 26/30 8/8 - 6s - loss: 6.3831e-05 - acc: 1.0000 - val_loss: 3.0070 - val_acc: 0.8398 Epoch 27/30 8/8 - 6s - loss: 3.5260e-05 - acc: 1.0000 - val_loss: 3.4427 - val_acc: 0.8320 Epoch 28/30 8/8 - 5s - loss: 2.8581e-05 - acc: 1.0000 - val_loss: 3.0836 - val_acc: 0.8594 Epoch 29/30 8/8 - 7s - loss: 1.9179 - acc: 0.8610 - val_loss: 1.5853 - val_acc: 0.8281 Epoch 30/30 8/8 - 7s - loss: 0.0118 - acc: 0.9951 - val_loss: 2.7055 - val_acc: 0.8086
So this time it never reached 90 % accuracy the way it did previously so the callback didn't work. Maybe I'll just set it to use 21 epochs.
model = Model(str(training_path), str(validation_path), epochs=21)
model.train()
Found 1027 images belonging to 2 classes. Found 256 images belonging to 2 classes. Epoch 1/21 8/8 - 8s - loss: 0.8662 - acc: 0.5428 - val_loss: 0.6637 - val_acc: 0.5000 Epoch 2/21 8/8 - 7s - loss: 0.7301 - acc: 0.6118 - val_loss: 0.5114 - val_acc: 0.8398 Epoch 3/21 8/8 - 7s - loss: 0.5781 - acc: 0.8516 - val_loss: 0.4985 - val_acc: 0.8203 Epoch 4/21 8/8 - 6s - loss: 0.6889 - acc: 0.8346 - val_loss: 0.8576 - val_acc: 0.7969 Epoch 5/21 8/8 - 6s - loss: 0.2113 - acc: 0.9310 - val_loss: 2.0597 - val_acc: 0.6875 Epoch 6/21 8/8 - 6s - loss: 0.3143 - acc: 0.8865 - val_loss: 0.8110 - val_acc: 0.8320 Epoch 7/21 8/8 - 6s - loss: 0.1289 - acc: 0.9570 - val_loss: 1.1169 - val_acc: 0.8672 Epoch 8/21 8/8 - 6s - loss: 0.1513 - acc: 0.9288 - val_loss: 1.1159 - val_acc: 0.8398 Epoch 9/21 8/8 - 6s - loss: 0.0882 - acc: 0.9700 - val_loss: 1.4653 - val_acc: 0.8125 Epoch 10/21 8/8 - 5s - loss: 0.1803 - acc: 0.9522 - val_loss: 1.2575 - val_acc: 0.8711 Epoch 11/21 8/8 - 7s - loss: 0.0753 - acc: 0.9766 - val_loss: 1.0846 - val_acc: 0.8633 Epoch 12/21 8/8 - 8s - loss: 0.1993 - acc: 0.9580 - val_loss: 0.9569 - val_acc: 0.8672 Epoch 13/21 8/8 - 8s - loss: 0.0452 - acc: 0.9867 - val_loss: 1.1035 - val_acc: 0.8906 Epoch 14/21 8/8 - 6s - loss: 0.0139 - acc: 0.9948 - val_loss: 1.7541 - val_acc: 0.8516 Epoch 15/21 8/8 - 6s - loss: 0.0191 - acc: 0.9911 - val_loss: 1.6554 - val_acc: 0.8555 Epoch 16/21 8/8 - 6s - loss: 0.0327 - acc: 0.9967 - val_loss: 10.3868 - val_acc: 0.6523 Epoch 17/21 8/8 - 6s - loss: 2.2541 - acc: 0.9004 - val_loss: 0.9508 - val_acc: 0.8594 Epoch 18/21 8/8 - 6s - loss: 0.0282 - acc: 0.9889 - val_loss: 1.3172 - val_acc: 0.8672 Epoch 19/21 8/8 - 5s - loss: 0.0064 - acc: 0.9989 - val_loss: 1.6202 - val_acc: 0.8477 Epoch 20/21 8/8 - 7s - loss: 0.0033 - acc: 1.0000 - val_loss: 2.0371 - val_acc: 0.8125 Epoch 21/21 8/8 - 8s - loss: 0.0066 - acc: 0.9990 - val_loss: 1.8340 - val_acc: 0.8672
It looks like the 90 % validation accuracy was a fluke.
Looking At Some Predictions
These are the same images I tested previously. The architecture of the model is the same, but I didn't train it for as many epochs on the current pass through this data set.
test_path = Path("~/test_images/").expanduser()
height = width = 400
plots = [datashade(holoviews.RGB.load_image(str(path))).opts(
title=f"{path.name}",
height=height,
width=width
) for path in test_path.iterdir()]
plot = holoviews.Layout(plots).cols(2).opts(title="Test Images")
Embed(plot=plot, file_name="test_images", height_in_pixels=900)()
target_size = (300, 300)
images = (("horse.jpg", "Horse"),
("centaur.jpg", "Centaur"),
("tomb_figure.jpg", "Statue of a Man Riding a Horse"),
("rembrandt.jpg", "Woman"))
for filename, label in images:
loaded = cv2.imread(str(test_path/filename))
x = cv2.resize(loaded, target_size)
x = numpy.reshape(x, (1, 300, 300, 3))
prediction = model.predict(x)
print(f"The {label} is a {prediction}.")
The Horse is a horse. The Centaur is a horse. The Statue of a Man Riding a Horse is a human. The Woman is a horse.
Well, now it got the horse right and the woman wrong. Peculiar.
A re-try with smaller images.
model = Model(str(training_path), str(validation_path),
image_size=150,
epochs=21)
model.train()
Found 1027 images belonging to 2 classes. Found 256 images belonging to 2 classes. Epoch 1/21 8/8 - 6s - loss: 0.7257 - acc: 0.5072 - val_loss: 0.6794 - val_acc: 0.6719 Epoch 2/21 8/8 - 6s - loss: 0.6691 - acc: 0.6118 - val_loss: 0.4503 - val_acc: 0.8633 Epoch 3/21 8/8 - 6s - loss: 0.5535 - acc: 0.7402 - val_loss: 0.4486 - val_acc: 0.7969 Epoch 4/21 8/8 - 5s - loss: 0.5850 - acc: 0.7959 - val_loss: 0.4330 - val_acc: 0.8555 Epoch 5/21 8/8 - 4s - loss: 0.1967 - acc: 0.9321 - val_loss: 1.1319 - val_acc: 0.7891 Epoch 6/21 8/8 - 4s - loss: 0.1969 - acc: 0.9310 - val_loss: 0.8440 - val_acc: 0.8125 Epoch 7/21 8/8 - 4s - loss: 0.1309 - acc: 0.9522 - val_loss: 1.4648 - val_acc: 0.8008 Epoch 8/21 8/8 - 5s - loss: 0.2732 - acc: 0.9023 - val_loss: 0.8364 - val_acc: 0.8398 Epoch 9/21 8/8 - 4s - loss: 0.1071 - acc: 0.9611 - val_loss: 1.2082 - val_acc: 0.8359 Epoch 10/21 8/8 - 4s - loss: 0.0725 - acc: 0.9711 - val_loss: 1.9165 - val_acc: 0.7148 Epoch 11/21 8/8 - 6s - loss: 0.2651 - acc: 0.9062 - val_loss: 0.8687 - val_acc: 0.8398 Epoch 12/21 8/8 - 6s - loss: 0.0568 - acc: 0.9789 - val_loss: 1.0587 - val_acc: 0.8359 Epoch 13/21 8/8 - 5s - loss: 0.1405 - acc: 0.9522 - val_loss: 1.3749 - val_acc: 0.7773 Epoch 14/21 8/8 - 5s - loss: 0.2003 - acc: 0.9395 - val_loss: 0.7942 - val_acc: 0.8555 Epoch 15/21 8/8 - 4s - loss: 0.0313 - acc: 0.9889 - val_loss: 0.8540 - val_acc: 0.8594 Epoch 16/21 8/8 - 4s - loss: 0.0280 - acc: 0.9922 - val_loss: 0.9602 - val_acc: 0.8516 Epoch 17/21 8/8 - 4s - loss: 0.1560 - acc: 0.9544 - val_loss: 0.6488 - val_acc: 0.8359 Epoch 18/21 8/8 - 4s - loss: 0.0366 - acc: 0.9933 - val_loss: 1.0103 - val_acc: 0.8555 Epoch 19/21 8/8 - 4s - loss: 0.0238 - acc: 0.9967 - val_loss: 0.7084 - val_acc: 0.8555 Epoch 20/21 8/8 - 5s - loss: 0.0555 - acc: 0.9778 - val_loss: 0.9348 - val_acc: 0.8594 Epoch 21/21 8/8 - 6s - loss: 0.0046 - acc: 1.0000 - val_loss: 1.1267 - val_acc: 0.8633
target_size = (150, 150)
images = (("horse.jpg", "Horse"),
("centaur.jpg", "Centaur"),
("tomb_figure.jpg", "Statue of a Man Riding a Horse"),
("rembrandt.jpg", "Woman"))
for filename, label in images:
loaded = cv2.imread(str(test_path/filename))
x = cv2.resize(loaded, target_size)
x = numpy.reshape(x, (1, 150, 150, 3))
prediction = model.predict(x)
print(f"The {label} is a {prediction}.")
The Horse is a horse. The Centaur is a horse. The Statue of a Man Riding a Horse is a horse. The Woman is a horse.
Although it looked like it did about the same except getting to high accuracy, it now appears to predict everything is a horse.
A re-try with smaller images.
model = Model(str(training_path), str(validation_path),
image_size=150,
epochs=21)
model.train()
Found 1027 images belonging to 2 classes. Found 256 images belonging to 2 classes. Epoch 1/21 8/8 - 6s - loss: 0.7257 - acc: 0.5072 - val_loss: 0.6794 - val_acc: 0.6719 Epoch 2/21 8/8 - 6s - loss: 0.6691 - acc: 0.6118 - val_loss: 0.4503 - val_acc: 0.8633 Epoch 3/21 8/8 - 6s - loss: 0.5535 - acc: 0.7402 - val_loss: 0.4486 - val_acc: 0.7969 Epoch 4/21 8/8 - 5s - loss: 0.5850 - acc: 0.7959 - val_loss: 0.4330 - val_acc: 0.8555 Epoch 5/21 8/8 - 4s - loss: 0.1967 - acc: 0.9321 - val_loss: 1.1319 - val_acc: 0.7891 Epoch 6/21 8/8 - 4s - loss: 0.1969 - acc: 0.9310 - val_loss: 0.8440 - val_acc: 0.8125 Epoch 7/21 8/8 - 4s - loss: 0.1309 - acc: 0.9522 - val_loss: 1.4648 - val_acc: 0.8008 Epoch 8/21 8/8 - 5s - loss: 0.2732 - acc: 0.9023 - val_loss: 0.8364 - val_acc: 0.8398 Epoch 9/21 8/8 - 4s - loss: 0.1071 - acc: 0.9611 - val_loss: 1.2082 - val_acc: 0.8359 Epoch 10/21 8/8 - 4s - loss: 0.0725 - acc: 0.9711 - val_loss: 1.9165 - val_acc: 0.7148 Epoch 11/21 8/8 - 6s - loss: 0.2651 - acc: 0.9062 - val_loss: 0.8687 - val_acc: 0.8398 Epoch 12/21 8/8 - 6s - loss: 0.0568 - acc: 0.9789 - val_loss: 1.0587 - val_acc: 0.8359 Epoch 13/21 8/8 - 5s - loss: 0.1405 - acc: 0.9522 - val_loss: 1.3749 - val_acc: 0.7773 Epoch 14/21 8/8 - 5s - loss: 0.2003 - acc: 0.9395 - val_loss: 0.7942 - val_acc: 0.8555 Epoch 15/21 8/8 - 4s - loss: 0.0313 - acc: 0.9889 - val_loss: 0.8540 - val_acc: 0.8594 Epoch 16/21 8/8 - 4s - loss: 0.0280 - acc: 0.9922 - val_loss: 0.9602 - val_acc: 0.8516 Epoch 17/21 8/8 - 4s - loss: 0.1560 - acc: 0.9544 - val_loss: 0.6488 - val_acc: 0.8359 Epoch 18/21 8/8 - 4s - loss: 0.0366 - acc: 0.9933 - val_loss: 1.0103 - val_acc: 0.8555 Epoch 19/21 8/8 - 4s - loss: 0.0238 - acc: 0.9967 - val_loss: 0.7084 - val_acc: 0.8555 Epoch 20/21 8/8 - 5s - loss: 0.0555 - acc: 0.9778 - val_loss: 0.9348 - val_acc: 0.8594 Epoch 21/21 8/8 - 6s - loss: 0.0046 - acc: 1.0000 - val_loss: 1.1267 - val_acc: 0.8633
target_size = (150, 150)
images = (("horse.jpg", "Horse"),
("centaur.jpg", "Centaur"),
("tomb_figure.jpg", "Statue of a Man Riding a Horse"),
("rembrandt.jpg", "Woman"))
for filename, label in images:
loaded = cv2.imread(str(test_path/filename))
x = cv2.resize(loaded, target_size)
x = numpy.reshape(x, (1, 150, 150, 3))
prediction = model.predict(x)
print(f"The {label} is a {prediction}.")
The Horse is a horse. The Centaur is a horse. The Statue of a Man Riding a Horse is a horse. The Woman is a horse.
Although it looked like it did about the same except getting to high accuracy, it now appears to predict everything is a horse.
End
Source
This is a walk-through of the Course 1 - Part 8 - Lesson 3 - Notebook.ipynb on github.