33 KiB
33 KiB
None
<html>
<head>
</head>
</html>
Question 7 - YOLOv8¶
In [1]:
import cv2
import torch
import numpy as np
from collections import Counter
import matplotlib.pyplot as plt
from ultralytics import YOLO
import os
import glob
from tqdm import tqdm
In [2]:
MODEL_NAME = "data/yolov8.pt"
Load the dataset¶
In [3]:
val_labels = "./data/MaskedFace/val/labels"
val_imgs = "./data/MaskedFace/val/images"
y_true = glob.glob(os.path.join(val_labels,"*.txt"))
y_true.sort()
images = glob.glob(os.path.join(val_imgs,"*.png"))
images.sort()
In [4]:
test_dataset = {
'images': images, # list of image paths
'y_true': y_true, # list of label paths
}
In [5]:
def count_obj(txt_file, n_class):
with open(txt_file, 'r') as file:
lines = file.readlines()
# Extracting the class identifiers from each line
class_ids = [int(line.split()[0]) for line in lines]
# Counting the occurrences of each class
class_counts = Counter(class_ids)
# Sorting the dictionary by class id and converting it to a list of counts
sorted_counts = [class_counts[i] if i in class_counts else 0 for i in range(n_class)]
return sorted_counts
In [6]:
gt_counts = []
for idx , (img , txt) in enumerate(tqdm(zip(test_dataset['images'], test_dataset['y_true']))):
# get ground truth
obj_count = count_obj(txt, 3)
gt_counts.append(obj_count)
Load the model¶
In [7]:
model = YOLO(MODEL_NAME)
Test on the validation set¶
In [8]:
from collections import Counter
def calculate_mape(actual, forecast):
if len(actual) != len(forecast):
raise ValueError("The length of actual and forecast arrays must be the same.")
n = len(actual)
sum_error = 0
for a, f in zip(actual, forecast):
sum_error += abs(a - f) / max(a, 1)
mape_value = (sum_error / n) * 100
return mape_value
def count_masks(model, dataset):
n_class = 3
mape_scores = []
all_pred_counts = []
all_obj_counts = []
for idx , (img , txt) in enumerate(tqdm(zip(dataset['images'],dataset['y_true']))):
# get predicted list
preds = model.predict(img)
pred = preds[0]
predict_list = [ box.cls[0].item() for box in pred.boxes]
count = Counter(predict_list)
predict_count = [count[i] if i in count else 0 for i in range(n_class)]
# get ground truth
obj_count = count_obj(txt, n_class)
all_obj_counts.append(obj_count)
all_pred_counts.append(predict_count)
'''
After the model was trained, I just found that I defined the format class in data.yaml is [without_mask, with_mask, mask_weared_incorrect] which is wrong in order.
Therefore, I will swap the true label and predicted label to [with_mask, without_mask, mask_weared_incorrect] in the count_masks function to return the values should respectively indicate the number of faces wearing mask, without mask and incorrectly wearing mask.
The reason why I did not correct the data.yaml and train the model again because of the limitation of time.
'''
all_pred_counts = np.array(all_pred_counts)
all_obj_counts = np.array(all_obj_counts)
# all_pred_counts[:, [0, 1]] = all_pred_counts[:, [1, 0]]
# all_obj_counts[:, [0, 1]] = all_obj_counts[:, [1, 0]]
mape_scores = [calculate_mape(a, p) for a, p in zip(all_obj_counts, all_pred_counts)]
# Convert all_pred_counts to int64 before returning
all_pred_counts = all_pred_counts.astype(np.int64)
return np.array(all_pred_counts), np.mean(mape_scores)
In [9]:
predicted_counts, mape_score = count_masks(model, test_dataset)
MAPE¶
In [10]:
def compute_mape(prediction, truth):
mape = np.mean( np.abs(truth - prediction) / np.maximum(truth, np.ones_like(truth)) ) * 100
return mape
In [11]:
# X2d0f9f39
# predicted_counts[:, [0, 1]] = predicted_counts[:, [1, 0]]
In [12]:
predicted_counts[:, [1, 2]] = predicted_counts[:, [2, 1]]
In [13]:
MAPE = compute_mape(predicted_counts, gt_counts)
In [14]:
print(MAPE)
Final Score¶
In [15]:
if MAPE <= 10:
print("Score: ", 25*1.0)
elif MAPE <= 15:
print("Score: ", 25*0.875)
elif MAPE <= 20:
print("Score: ", 25*0.75)
elif MAPE <= 25:
print("Score: ", 25*0.625)
elif MAPE <= 30:
print("Score: ", 25*0.5)
else:
print("Score: ", 0)
In [ ]: