760 lines
33 KiB
Plaintext
760 lines
33 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "a619e638",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Question 7 - YOLOv8"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 1,
|
|
"id": "cb626037",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"import cv2\n",
|
|
"import torch\n",
|
|
"import numpy as np\n",
|
|
"from collections import Counter\n",
|
|
"import matplotlib.pyplot as plt\n",
|
|
"\n",
|
|
"from ultralytics import YOLO\n",
|
|
"\n",
|
|
"import os\n",
|
|
"import glob\n",
|
|
"from tqdm import tqdm"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 2,
|
|
"id": "39beaeb6",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"MODEL_NAME = \"data/yolov8.pt\""
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "78f8f8d3",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Load the dataset"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 3,
|
|
"id": "f920de25",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"val_labels = \"./data/MaskedFace/val/labels\"\n",
|
|
"val_imgs = \"./data/MaskedFace/val/images\"\n",
|
|
"\n",
|
|
"y_true = glob.glob(os.path.join(val_labels,\"*.txt\"))\n",
|
|
"y_true.sort()\n",
|
|
"\n",
|
|
"images = glob.glob(os.path.join(val_imgs,\"*.png\"))\n",
|
|
"images.sort()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 4,
|
|
"id": "78f3faca",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"test_dataset = {\n",
|
|
" 'images': images, # list of image paths\n",
|
|
" 'y_true': y_true, # list of label paths\n",
|
|
"}"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 5,
|
|
"id": "dace1605",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def count_obj(txt_file, n_class):\n",
|
|
" with open(txt_file, 'r') as file:\n",
|
|
" lines = file.readlines()\n",
|
|
" # Extracting the class identifiers from each line\n",
|
|
" class_ids = [int(line.split()[0]) for line in lines]\n",
|
|
"\n",
|
|
" # Counting the occurrences of each class\n",
|
|
" class_counts = Counter(class_ids)\n",
|
|
"\n",
|
|
" # Sorting the dictionary by class id and converting it to a list of counts\n",
|
|
" sorted_counts = [class_counts[i] if i in class_counts else 0 for i in range(n_class)]\n",
|
|
" return sorted_counts"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 6,
|
|
"id": "bfc50534",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stderr",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"85it [00:00, 7354.03it/s]\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"gt_counts = []\n",
|
|
"for idx , (img , txt) in enumerate(tqdm(zip(test_dataset['images'], test_dataset['y_true']))):\n",
|
|
" # get ground truth\n",
|
|
" obj_count = count_obj(txt, 3)\n",
|
|
" gt_counts.append(obj_count)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "44602de6",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Load the model"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 7,
|
|
"id": "e5ff04e4",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"model = YOLO(MODEL_NAME)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "5ea8aa59",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Test on the validation set"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 8,
|
|
"id": "3d15ae87",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"from collections import Counter\n",
|
|
"\n",
|
|
"def calculate_mape(actual, forecast):\n",
|
|
" if len(actual) != len(forecast):\n",
|
|
" raise ValueError(\"The length of actual and forecast arrays must be the same.\")\n",
|
|
" \n",
|
|
" n = len(actual)\n",
|
|
" sum_error = 0\n",
|
|
" \n",
|
|
" for a, f in zip(actual, forecast):\n",
|
|
" sum_error += abs(a - f) / max(a, 1)\n",
|
|
" \n",
|
|
" mape_value = (sum_error / n) * 100\n",
|
|
" return mape_value\n",
|
|
"\n",
|
|
"def count_masks(model, dataset):\n",
|
|
" n_class = 3\n",
|
|
" mape_scores = []\n",
|
|
" all_pred_counts = []\n",
|
|
" all_obj_counts = []\n",
|
|
" for idx , (img , txt) in enumerate(tqdm(zip(dataset['images'],dataset['y_true']))):\n",
|
|
" # get predicted list\n",
|
|
" preds = model.predict(img)\n",
|
|
" pred = preds[0]\n",
|
|
" predict_list = [ box.cls[0].item() for box in pred.boxes]\n",
|
|
" count = Counter(predict_list)\n",
|
|
" predict_count = [count[i] if i in count else 0 for i in range(n_class)]\n",
|
|
" # get ground truth\n",
|
|
" obj_count = count_obj(txt, n_class)\n",
|
|
" all_obj_counts.append(obj_count)\n",
|
|
" all_pred_counts.append(predict_count)\n",
|
|
"\n",
|
|
" '''\n",
|
|
" After the model was trained, I just found that I defined the format class in data.yaml is [without_mask, with_mask, mask_weared_incorrect] which is wrong in order. \n",
|
|
" Therefore, I will swap the true label and predicted label to [with_mask, without_mask, mask_weared_incorrect] in the count_masks function to return the values should respectively indicate the number of faces wearing mask, without mask and incorrectly wearing mask.\n",
|
|
" The reason why I did not correct the data.yaml and train the model again because of the limitation of time.\n",
|
|
" '''\n",
|
|
" all_pred_counts = np.array(all_pred_counts)\n",
|
|
" all_obj_counts = np.array(all_obj_counts)\n",
|
|
"\n",
|
|
"# all_pred_counts[:, [0, 1]] = all_pred_counts[:, [1, 0]]\n",
|
|
"# all_obj_counts[:, [0, 1]] = all_obj_counts[:, [1, 0]]\n",
|
|
"\n",
|
|
" mape_scores = [calculate_mape(a, p) for a, p in zip(all_obj_counts, all_pred_counts)]\n",
|
|
"\n",
|
|
" # Convert all_pred_counts to int64 before returning\n",
|
|
" all_pred_counts = all_pred_counts.astype(np.int64)\n",
|
|
" \n",
|
|
" return np.array(all_pred_counts), np.mean(mape_scores)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 9,
|
|
"id": "1428b97d",
|
|
"metadata": {
|
|
"scrolled": true
|
|
},
|
|
"outputs": [
|
|
{
|
|
"name": "stderr",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"\r",
|
|
"0it [00:00, ?it/s]"
|
|
]
|
|
},
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-023.png: 480x640 1 with_mask, 4.3ms\n",
|
|
"Speed: 1.5ms preprocess, 4.3ms inference, 0.8ms postprocess per image at shape (1, 3, 480, 640)\n"
|
|
]
|
|
},
|
|
{
|
|
"name": "stderr",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"\r",
|
|
"1it [00:01, 1.65s/it]"
|
|
]
|
|
},
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-819.png: 384x640 1 with_mask, 4.4ms\n",
|
|
"Speed: 0.8ms preprocess, 4.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-131.png: 448x640 4 with_masks, 2 without_masks, 4.2ms\n",
|
|
"Speed: 0.8ms preprocess, 4.2ms inference, 0.7ms postprocess per image at shape (1, 3, 448, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-032.png: 384x640 2 with_masks, 4.0ms\n",
|
|
"Speed: 0.8ms preprocess, 4.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-256.png: 448x640 13 with_masks, 4.0ms\n",
|
|
"Speed: 0.8ms preprocess, 4.0ms inference, 0.7ms postprocess per image at shape (1, 3, 448, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-201.png: 384x640 12 with_masks, 4.0ms\n",
|
|
"Speed: 0.7ms preprocess, 4.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-610.png: 352x640 5 with_masks, 4.3ms\n",
|
|
"Speed: 0.7ms preprocess, 4.3ms inference, 0.7ms postprocess per image at shape (1, 3, 352, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-143.png: 640x512 1 with_mask, 4.4ms\n",
|
|
"Speed: 0.9ms preprocess, 4.4ms inference, 0.6ms postprocess per image at shape (1, 3, 640, 512)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-323.png: 608x640 2 with_masks, 4.3ms\n",
|
|
"Speed: 1.1ms preprocess, 4.3ms inference, 0.7ms postprocess per image at shape (1, 3, 608, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-383.png: 640x512 1 with_mask, 4.0ms\n",
|
|
"Speed: 1.0ms preprocess, 4.0ms inference, 0.6ms postprocess per image at shape (1, 3, 640, 512)\n"
|
|
]
|
|
},
|
|
{
|
|
"name": "stderr",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"\r",
|
|
"10it [00:01, 7.73it/s]"
|
|
]
|
|
},
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-307.png: 384x640 2 with_masks, 1 mask_weared_incorrect, 4.2ms\n",
|
|
"Speed: 0.7ms preprocess, 4.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-011.png: 448x640 26 with_masks, 1 mask_weared_incorrect, 4.0ms\n",
|
|
"Speed: 0.9ms preprocess, 4.0ms inference, 0.7ms postprocess per image at shape (1, 3, 448, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-043.png: 640x448 1 with_mask, 4.2ms\n",
|
|
"Speed: 0.9ms preprocess, 4.2ms inference, 0.6ms postprocess per image at shape (1, 3, 640, 448)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-098.png: 448x640 4 with_masks, 1 without_mask, 4.0ms\n",
|
|
"Speed: 0.8ms preprocess, 4.0ms inference, 0.7ms postprocess per image at shape (1, 3, 448, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-368.png: 448x640 9 with_masks, 3.8ms\n",
|
|
"Speed: 0.9ms preprocess, 3.8ms inference, 0.7ms postprocess per image at shape (1, 3, 448, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-802.png: 448x640 4 with_masks, 3.8ms\n",
|
|
"Speed: 0.9ms preprocess, 3.8ms inference, 0.7ms postprocess per image at shape (1, 3, 448, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-461.png: 448x640 8 with_masks, 3.9ms\n",
|
|
"Speed: 0.9ms preprocess, 3.9ms inference, 0.8ms postprocess per image at shape (1, 3, 448, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-574.png: 384x640 5 with_masks, 4.1ms\n",
|
|
"Speed: 0.7ms preprocess, 4.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-226.png: 384x640 1 with_mask, 3.7ms\n",
|
|
"Speed: 0.7ms preprocess, 3.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-444.png: 416x640 3 with_masks, 4.3ms\n",
|
|
"Speed: 0.8ms preprocess, 4.3ms inference, 0.7ms postprocess per image at shape (1, 3, 416, 640)\n"
|
|
]
|
|
},
|
|
{
|
|
"name": "stderr",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"\r",
|
|
"20it [00:01, 16.99it/s]"
|
|
]
|
|
},
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-450.png: 480x640 4 with_masks, 4.0ms\n",
|
|
"Speed: 0.9ms preprocess, 4.0ms inference, 0.7ms postprocess per image at shape (1, 3, 480, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-095.png: 640x512 1 without_mask, 4.0ms\n",
|
|
"Speed: 0.9ms preprocess, 4.0ms inference, 0.6ms postprocess per image at shape (1, 3, 640, 512)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-269.png: 480x640 2 with_masks, 4.0ms\n",
|
|
"Speed: 0.9ms preprocess, 4.0ms inference, 0.7ms postprocess per image at shape (1, 3, 480, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-555.png: 384x640 4 with_masks, 4.1ms\n",
|
|
"Speed: 0.8ms preprocess, 4.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-139.png: 384x640 17 with_masks, 2 without_masks, 2 mask_weared_incorrects, 3.8ms\n",
|
|
"Speed: 0.9ms preprocess, 3.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-080.png: 384x640 1 with_mask, 3.8ms\n",
|
|
"Speed: 0.8ms preprocess, 3.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-371.png: 384x640 1 with_mask, 1 mask_weared_incorrect, 3.8ms\n",
|
|
"Speed: 0.7ms preprocess, 3.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-554.png: 480x640 5 with_masks, 4.1ms\n",
|
|
"Speed: 0.9ms preprocess, 4.1ms inference, 0.7ms postprocess per image at shape (1, 3, 480, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-660.png: 640x512 1 without_mask, 4.1ms\n",
|
|
"Speed: 0.9ms preprocess, 4.1ms inference, 0.6ms postprocess per image at shape (1, 3, 640, 512)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-180.png: 640x512 1 with_mask, 3.8ms\n",
|
|
"Speed: 0.9ms preprocess, 3.8ms inference, 0.6ms postprocess per image at shape (1, 3, 640, 512)\n"
|
|
]
|
|
},
|
|
{
|
|
"name": "stderr",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"\r",
|
|
"30it [00:01, 27.19it/s]"
|
|
]
|
|
},
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-019.png: 384x640 3 with_masks, 2 without_masks, 1 mask_weared_incorrect, 4.1ms\n",
|
|
"Speed: 0.8ms preprocess, 4.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-719.png: 384x640 4 with_masks, 1 without_mask, 3.8ms\n",
|
|
"Speed: 0.7ms preprocess, 3.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-410.png: 448x640 18 with_masks, 2 without_masks, 1 mask_weared_incorrect, 4.1ms\n",
|
|
"Speed: 0.9ms preprocess, 4.1ms inference, 0.7ms postprocess per image at shape (1, 3, 448, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-796.png: 448x640 12 with_masks, 3.7ms\n",
|
|
"Speed: 0.9ms preprocess, 3.7ms inference, 0.7ms postprocess per image at shape (1, 3, 448, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-765.png: 640x512 1 with_mask, 4.2ms\n",
|
|
"Speed: 1.0ms preprocess, 4.2ms inference, 0.6ms postprocess per image at shape (1, 3, 640, 512)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-305.png: 480x640 6 with_masks, 9 without_masks, 4 mask_weared_incorrects, 4.1ms\n",
|
|
"Speed: 0.9ms preprocess, 4.1ms inference, 0.7ms postprocess per image at shape (1, 3, 480, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-528.png: 640x512 1 without_mask, 4.1ms\n",
|
|
"Speed: 1.0ms preprocess, 4.1ms inference, 0.7ms postprocess per image at shape (1, 3, 640, 512)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-209.png: 352x640 4 with_masks, 13 without_masks, 4.1ms\n",
|
|
"Speed: 0.7ms preprocess, 4.1ms inference, 0.7ms postprocess per image at shape (1, 3, 352, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-328.png: 640x512 1 with_mask, 4.0ms\n",
|
|
"Speed: 1.0ms preprocess, 4.0ms inference, 0.7ms postprocess per image at shape (1, 3, 640, 512)\n"
|
|
]
|
|
},
|
|
{
|
|
"name": "stderr",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"\r",
|
|
"39it [00:02, 36.68it/s]"
|
|
]
|
|
},
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-833.png: 384x640 2 with_masks, 4.1ms\n",
|
|
"Speed: 0.7ms preprocess, 4.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-512.png: 448x640 8 with_masks, 4.0ms\n",
|
|
"Speed: 0.9ms preprocess, 4.0ms inference, 0.7ms postprocess per image at shape (1, 3, 448, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-286.png: 480x640 3 with_masks, 4.1ms\n",
|
|
"Speed: 0.9ms preprocess, 4.1ms inference, 0.7ms postprocess per image at shape (1, 3, 480, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-370.png: 640x512 1 without_mask, 4.1ms\n",
|
|
"Speed: 0.9ms preprocess, 4.1ms inference, 0.6ms postprocess per image at shape (1, 3, 640, 512)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-123.png: 512x640 3 with_masks, 4.6ms\n",
|
|
"Speed: 1.0ms preprocess, 4.6ms inference, 0.7ms postprocess per image at shape (1, 3, 512, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-829.png: 320x640 6 with_masks, 1 without_mask, 4.3ms\n",
|
|
"Speed: 0.7ms preprocess, 4.3ms inference, 0.7ms postprocess per image at shape (1, 3, 320, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-197.png: 384x640 1 with_mask, 4.2ms\n",
|
|
"Speed: 0.8ms preprocess, 4.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-793.png: 640x512 1 with_mask, 4.0ms\n",
|
|
"Speed: 1.0ms preprocess, 4.0ms inference, 0.6ms postprocess per image at shape (1, 3, 640, 512)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-562.png: 352x640 1 with_mask, 1 mask_weared_incorrect, 4.1ms\n",
|
|
"Speed: 0.7ms preprocess, 4.1ms inference, 0.7ms postprocess per image at shape (1, 3, 352, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-182.png: 448x640 2 with_masks, 1 mask_weared_incorrect, 4.1ms\n",
|
|
"Speed: 0.9ms preprocess, 4.1ms inference, 0.7ms postprocess per image at shape (1, 3, 448, 640)\n"
|
|
]
|
|
},
|
|
{
|
|
"name": "stderr",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"\r",
|
|
"49it [00:02, 47.10it/s]"
|
|
]
|
|
},
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-149.png: 480x640 7 with_masks, 1 without_mask, 4.1ms\n",
|
|
"Speed: 0.9ms preprocess, 4.1ms inference, 0.7ms postprocess per image at shape (1, 3, 480, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-560.png: 448x640 1 with_mask, 4.1ms\n",
|
|
"Speed: 0.9ms preprocess, 4.1ms inference, 0.7ms postprocess per image at shape (1, 3, 448, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-373.png: 448x640 13 with_masks, 2 without_masks, 3.8ms\n",
|
|
"Speed: 0.9ms preprocess, 3.8ms inference, 0.7ms postprocess per image at shape (1, 3, 448, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-248.png: 640x512 1 without_mask, 4.2ms\n",
|
|
"Speed: 0.9ms preprocess, 4.2ms inference, 0.6ms postprocess per image at shape (1, 3, 640, 512)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-292.png: 480x640 1 with_mask, 4.1ms\n",
|
|
"Speed: 0.9ms preprocess, 4.1ms inference, 0.6ms postprocess per image at shape (1, 3, 480, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-041.png: 480x640 8 with_masks, 3.9ms\n",
|
|
"Speed: 1.0ms preprocess, 3.9ms inference, 0.7ms postprocess per image at shape (1, 3, 480, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-515.png: 640x512 1 with_mask, 4.1ms\n",
|
|
"Speed: 1.0ms preprocess, 4.1ms inference, 0.7ms postprocess per image at shape (1, 3, 640, 512)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-148.png: 608x640 2 with_masks, 3 without_masks, 4.2ms\n",
|
|
"Speed: 1.2ms preprocess, 4.2ms inference, 0.7ms postprocess per image at shape (1, 3, 608, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-036.png: 384x640 6 with_masks, 1 without_mask, 4.1ms\n",
|
|
"Speed: 0.7ms preprocess, 4.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640)\n"
|
|
]
|
|
},
|
|
{
|
|
"name": "stderr",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"\r",
|
|
"58it [00:02, 55.21it/s]"
|
|
]
|
|
},
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-275.png: 640x544 1 with_mask, 4.5ms\n",
|
|
"Speed: 1.1ms preprocess, 4.5ms inference, 0.6ms postprocess per image at shape (1, 3, 640, 544)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-690.png: 416x640 19 with_masks, 1 without_mask, 4.2ms\n",
|
|
"Speed: 0.8ms preprocess, 4.2ms inference, 0.7ms postprocess per image at shape (1, 3, 416, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-581.png: 640x512 1 with_mask, 4.3ms\n",
|
|
"Speed: 0.9ms preprocess, 4.3ms inference, 0.6ms postprocess per image at shape (1, 3, 640, 512)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-607.png: 448x640 2 with_masks, 1 without_mask, 4.1ms\n",
|
|
"Speed: 0.9ms preprocess, 4.1ms inference, 0.7ms postprocess per image at shape (1, 3, 448, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-227.png: 448x640 11 with_masks, 3.8ms\n",
|
|
"Speed: 0.9ms preprocess, 3.8ms inference, 0.7ms postprocess per image at shape (1, 3, 448, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-184.png: 352x640 16 with_masks, 4.1ms\n",
|
|
"Speed: 0.7ms preprocess, 4.1ms inference, 0.7ms postprocess per image at shape (1, 3, 352, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-387.png: 384x640 5 with_masks, 1 mask_weared_incorrect, 4.1ms\n",
|
|
"Speed: 0.8ms preprocess, 4.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-169.png: 640x512 1 with_mask, 4.1ms\n",
|
|
"Speed: 1.0ms preprocess, 4.1ms inference, 0.6ms postprocess per image at shape (1, 3, 640, 512)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-411.png: 448x640 7 with_masks, 1 without_mask, 4.1ms\n",
|
|
"Speed: 0.8ms preprocess, 4.1ms inference, 0.7ms postprocess per image at shape (1, 3, 448, 640)\n"
|
|
]
|
|
},
|
|
{
|
|
"name": "stderr",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"\r",
|
|
"67it [00:02, 62.86it/s]"
|
|
]
|
|
},
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-742.png: 640x512 1 with_mask, 4.2ms\n",
|
|
"Speed: 0.9ms preprocess, 4.2ms inference, 0.6ms postprocess per image at shape (1, 3, 640, 512)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-621.png: 384x640 3 with_masks, 2 without_masks, 4.1ms\n",
|
|
"Speed: 0.8ms preprocess, 4.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-280.png: 448x640 14 with_masks, 7 without_masks, 4.1ms\n",
|
|
"Speed: 0.9ms preprocess, 4.1ms inference, 0.7ms postprocess per image at shape (1, 3, 448, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-637.png: 384x640 6 with_masks, 4.1ms\n",
|
|
"Speed: 0.7ms preprocess, 4.1ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-745.png: 640x512 1 with_mask, 4.1ms\n",
|
|
"Speed: 1.0ms preprocess, 4.1ms inference, 0.7ms postprocess per image at shape (1, 3, 640, 512)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-606.png: 448x640 4 with_masks, 4.1ms\n",
|
|
"Speed: 0.9ms preprocess, 4.1ms inference, 0.7ms postprocess per image at shape (1, 3, 448, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-152.png: 480x640 8 with_masks, 4.1ms\n",
|
|
"Speed: 1.0ms preprocess, 4.1ms inference, 0.7ms postprocess per image at shape (1, 3, 480, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-296.png: 384x640 27 with_masks, 15 without_masks, 4.1ms\n",
|
|
"Speed: 0.8ms preprocess, 4.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-699.png: 448x640 1 with_mask, 6 without_masks, 4.0ms\n",
|
|
"Speed: 0.9ms preprocess, 4.0ms inference, 0.7ms postprocess per image at shape (1, 3, 448, 640)\n"
|
|
]
|
|
},
|
|
{
|
|
"name": "stderr",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"\r",
|
|
"76it [00:02, 69.31it/s]"
|
|
]
|
|
},
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-094.png: 384x640 5 with_masks, 1 without_mask, 1 mask_weared_incorrect, 4.0ms\n",
|
|
"Speed: 0.8ms preprocess, 4.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-130.png: 640x544 2 with_masks, 2 without_masks, 4.0ms\n",
|
|
"Speed: 1.0ms preprocess, 4.0ms inference, 0.7ms postprocess per image at shape (1, 3, 640, 544)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-086.png: 480x640 3 with_masks, 1 without_mask, 4.1ms\n",
|
|
"Speed: 0.9ms preprocess, 4.1ms inference, 0.6ms postprocess per image at shape (1, 3, 480, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-589.png: 640x448 1 with_mask, 1 mask_weared_incorrect, 4.2ms\n",
|
|
"Speed: 0.9ms preprocess, 4.2ms inference, 0.7ms postprocess per image at shape (1, 3, 640, 448)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-058.png: 448x640 13 with_masks, 4.0ms\n",
|
|
"Speed: 0.9ms preprocess, 4.0ms inference, 0.7ms postprocess per image at shape (1, 3, 448, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-377.png: 480x640 1 with_mask, 4.1ms\n",
|
|
"Speed: 0.9ms preprocess, 4.1ms inference, 0.7ms postprocess per image at shape (1, 3, 480, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-260.png: 480x640 50 with_masks, 3.8ms\n",
|
|
"Speed: 0.9ms preprocess, 3.8ms inference, 0.7ms postprocess per image at shape (1, 3, 480, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-594.png: 448x640 8 with_masks, 4.1ms\n",
|
|
"Speed: 0.8ms preprocess, 4.1ms inference, 0.7ms postprocess per image at shape (1, 3, 448, 640)\n",
|
|
"\n",
|
|
"image 1/1 /home/wuhanstudio/Documents/Marking/Template/data/MaskedFace/val/images/mask-598.png: 640x512 1 with_mask, 3.9ms\n",
|
|
"Speed: 1.0ms preprocess, 3.9ms inference, 0.6ms postprocess per image at shape (1, 3, 640, 512)\n"
|
|
]
|
|
},
|
|
{
|
|
"name": "stderr",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"85it [00:02, 32.88it/s]\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"predicted_counts, mape_score = count_masks(model, test_dataset)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "67dda1aa",
|
|
"metadata": {},
|
|
"source": [
|
|
"## MAPE"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 10,
|
|
"id": "e7624ff3",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def compute_mape(prediction, truth):\n",
|
|
" mape = np.mean( np.abs(truth - prediction) / np.maximum(truth, np.ones_like(truth)) ) * 100\n",
|
|
" return mape"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 11,
|
|
"id": "fbb7aa74",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# X2d0f9f39\n",
|
|
"# predicted_counts[:, [0, 1]] = predicted_counts[:, [1, 0]]"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 12,
|
|
"id": "028f3e71",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"predicted_counts[:, [1, 2]] = predicted_counts[:, [2, 1]]"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 13,
|
|
"id": "c9176cc8",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"MAPE = compute_mape(predicted_counts, gt_counts)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 14,
|
|
"id": "828484ae",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"133.83205417471694\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"print(MAPE)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "b29e3ba9",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Final Score"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 15,
|
|
"id": "9b170114",
|
|
"metadata": {
|
|
"scrolled": true
|
|
},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Score: 0\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"if MAPE <= 10:\n",
|
|
" print(\"Score: \", 25*1.0)\n",
|
|
"elif MAPE <= 15:\n",
|
|
" print(\"Score: \", 25*0.875)\n",
|
|
"elif MAPE <= 20:\n",
|
|
" print(\"Score: \", 25*0.75)\n",
|
|
"elif MAPE <= 25:\n",
|
|
" print(\"Score: \", 25*0.625)\n",
|
|
"elif MAPE <= 30:\n",
|
|
" print(\"Score: \", 25*0.5)\n",
|
|
"else:\n",
|
|
" print(\"Score: \", 0) "
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "258ec405",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": []
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "what",
|
|
"language": "python",
|
|
"name": "what"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.8.13"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 5
|
|
}
|