Face Mask Detection Using MobileNetV2 Transfer Learning
In this article, we will learn the role of computer vision in detecting people who wear the mask or not, especially as we are going through a global crisis from the outbreak of the Corona virus.
Note
The training of the model will be on Google Colab because it was previously prepared for all training libraries deep learning and away from the problems of installation troublesome libraries
Dataset for face mask
#download dataset
!wget https://s3-us-west-2.amazonaws.com/static.pyimagesearch.com/face-mask-detection/face-mask-detector.zip
unzip dataset in the google colab root
!unzip /content/face-mask-detector.zip
import the necessary packages
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.utils import to_categorical
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import os
initialize the initial learning rate, number of epochs,batch size
INIT_LR = 1e-4
EPOCHS = 10
BS = 32
grab the list of images in our dataset directory, then initialize the list of data and class images
print("[INFO] loading images...")
imagePaths = list(paths.list_images(dataset))
data = []
labels = []
# loop over the image paths
for imagePath in imagePaths:
# extract the class label from the filename
label = imagePath.split(os.path.sep)[-2]
# load the input image (224x224) and preprocess it
image = load_img(imagePath, target_size=(224, 224))
image = img_to_array(image)
image = preprocess_input(image)
# update the data and labels lists, respectively
data.append(image)
labels.append(label)
convert the data and labels to arrays
data = np.array(data, dtype="float32")
labels = np.array(labels)
perform one-hot encoding on the labels
lb = LabelBinarizer()
labels = lb.fit_transform(labels)
labels = to_categorical(labels)
partition the data into training and testing splits using 80% of the data for training and the remaining 20% for testing
(trainX, testX, trainY, testY) = train_test_split(data, labels,
test_size=0.20, stratify=labels, random_state=42)
construct the training image generator for data augmentation
aug = ImageDataGenerator(
rotation_range=20,
zoom_range=0.15,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.15,
horizontal_flip=True,
fill_mode="nearest")
load the MobileNetV2 network, ensuring the head FC layer sets are
baseModel = MobileNetV2(weights="imagenet", include_top=False,
input_tensor=Input(shape=(224, 224, 3)))
construct the head of the model that will be placed on top of the the base model
headModel = baseModel.output
headModel = AveragePooling2D(pool_size=(7, 7))(headModel)
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(128, activation="relu")(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(2, activation="softmax")(headModel)
place the head FC model on top of the base model
model = Model(inputs=baseModel.input, outputs=headModel)
loop over all layers in the base model and freeze them
for layer in baseModel.layers:
layer.trainable = False
compile our model
print("[INFO] compiling model...")
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])
train the head of the network
print("[INFO] training head...")
H = model.fit(
aug.flow(trainX, trainY, batch_size=BS),
steps_per_epoch=len(trainX) // BS,
validation_data=(testX, testY),
validation_steps=len(testX) // BS,
epochs=EPOCHS)
make predictions on the testing set and show a nicely formatted classification report
print("[INFO] evaluating network...")
predIdxs = model.predict(testX, batch_size=BS)
# for each image in the testing set we need to find the index of the
# label with corresponding largest predicted probability
predIdxs = np.argmax(predIdxs, axis=1)
print(classification_report(testY.argmax(axis=1), predIdxs,
target_names=lb.classes_))
plot the training loss and accuracy
N = EPOCHS
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, N), H.history["accuracy"], label="train_acc")
plt.plot(np.arange(0, N), H.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
plt.savefig(plot)
save model
print("[INFO] saving mask detector model...")
model.save('mask-no-mask.h5')
convert model to tensorflowjs
# install tensorflowjs
!pip install tensorflowjs
and import module
import tensorflowjs as tfjs
from tensorflow.keras.models import load_model
create function to convert keras model to tensorflowjs
def keras2tfjs(model_path,dir_out):
#import tensorflowjs as tfjs
MODEL_PATH = model_path
print('Model loading...')
model=load_model(MODEL_PATH)
#print('Model loaded. Started serving...')
tfjs.converters.save_keras_model(model, dir_out)
and implementation this function
model_path='/content/mask-no-mask.h5'
dir_out='/content/keras2js'
keras2tfjs(model_path,dir_out)
and this code implementation in pc or laptop detection in real time with camera
# import the necessary packages
from datetime import datetime
from mtcnn.mtcnn import MTCNN
detector = MTCNN()
import numpy as np
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
from keras.preprocessing.image import load_img
import cv2
import os
from resizeimage import resizeimage
load the model
model = load_model("./models/mask_no_mask.h5")
load and run camera by opencv2 module
font = cv2.FONT_HERSHEY_TRIPLEX
font2 = cv2.FONT_HERSHEY_COMPLEX_SMALL
font3 = cv2.FONT_HERSHEY_SCRIPT_COMPLEX
font4 = cv2.FONT_HERSHEY_SIMPLEX
cap = cv2.VideoCapture(0)
while True:
#Capture frame-by-frame
__, frame = cap.read()
cv2.putText(frame,str(datetime.now()),(10,30), font3, 1,(255,255,255),2,cv2.LINE_AA)
# cv2.putText(frame,(10,450), font2, 1,(255,255,255),2,cv2.LINE_AA)
cv2.putText(frame,'ARR-AB\'s CAM',(480,450), font2, 0.9,(255,255,255),2,cv2.LINE_AA)
#Use MTCNN to detect faces
result = detector.detect_faces(frame)
if result != []:
for person in result:
bounding_box = person['box']
keypoints = person['keypoints']
#cv2.putText(frame,"The Face",(200,100), font, 1,(255,255,255),2,cv2.LINE_AA)
cv2.imwrite('opencv.png', frame)
image_file = load_img('opencv.png')
cover = resizeimage.resize_cover(image_file, [224, 224], validate=False)
x = []
x = img_to_array(cover)
face = preprocess_input(x)
face = np.expand_dims(face, axis=0)
#x = np.expand_dims(x, axis=0)
(mask, withoutMask) = model.predict(face)[0]
#print(pred[0][0])
label = "Mask" if mask > withoutMask else "No Mask"
color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
# include the probability in the label
label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)
#if pred[0][0]==0.0:
cv2.putText(frame,label,(100,100), font4, 0.8,(0,255,0),2,cv2.LINE_AA)
#cv2.putText(frame, label, (startX, startY - 10),
#cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
#cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
# elif pred[0][0]==1.0:
# cv2.putText(frame,"ACCESS DENIED, No FACE-MASK",(100,100), font4, 0.8,(0,0,255),2,cv2.LINE_AA)
cv2.rectangle(frame,
(bounding_box[0], bounding_box[1]),
(bounding_box[0]+bounding_box[2], bounding_box[1] + bounding_box[3]),
(0,155,255),2)
#display resulting frame
out.write(frame)#display resulting frame
cv2.imshow('frame',frame)
if cv2.waitKey(10) &0xFF == ord('q'):
break
When everything’s done, release capture
cap.release()
cv2.destroyAllWindows()
all the code and original file is located at Google Colab
special thanks for pyimageserach blog web site
thanks for reading If you love this tutorial, give some claps.
Connect with me on FB ,Github ,linkedin,my blog ,PyPi,Google Store Play,my youtube channel
Email :falahgs07@gmail.com