ECMM426-Template/Question 7 - YOLOv8.ipynb

1807 lines
49 KiB
Plaintext

{
"cells": [
{
"cell_type": "markdown",
"id": "a619e638",
"metadata": {},
"source": [
"## Question 7 - YOLOv8"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "cb626037",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import torch\n",
"import numpy as np\n",
"from collections import Counter\n",
"import matplotlib.pyplot as plt\n",
"\n",
"from ultralytics import YOLO\n",
"\n",
"import os\n",
"import glob\n",
"from tqdm import tqdm"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "39beaeb6",
"metadata": {},
"outputs": [],
"source": [
"MODEL_NAME = \"data/yolov8.pt\""
]
},
{
"cell_type": "markdown",
"id": "78f8f8d3",
"metadata": {},
"source": [
"## Load the dataset"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "f920de25",
"metadata": {},
"outputs": [],
"source": [
"val_labels = \"./data/MaskedFace/val/labels\"\n",
"val_imgs = \"./data/MaskedFace/val/images\"\n",
"\n",
"y_true = glob.glob(os.path.join(val_labels,\"*.txt\"))\n",
"images = glob.glob(os.path.join(val_imgs,\"*.png\"))"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "78f3faca",
"metadata": {},
"outputs": [],
"source": [
"test_dataset = {\n",
" 'images': images, # list of image paths\n",
" 'y_true': y_true, # list of label paths\n",
"}"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "dace1605",
"metadata": {},
"outputs": [],
"source": [
"def count_obj(txt_file, n_class):\n",
" with open(txt_file, 'r') as file:\n",
" lines = file.readlines()\n",
" # Extracting the class identifiers from each line\n",
" class_ids = [int(line.split()[0]) for line in lines]\n",
"\n",
" # Counting the occurrences of each class\n",
" class_counts = Counter(class_ids)\n",
"\n",
" # Sorting the dictionary by class id and converting it to a list of counts\n",
" sorted_counts = [class_counts[i] if i in class_counts else 0 for i in range(n_class)]\n",
" return sorted_counts"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "bfc50534",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"85it [00:00, 1838.59it/s]\n"
]
}
],
"source": [
"gt_counts = []\n",
"for idx , (img , txt) in enumerate(tqdm(zip(test_dataset['images'], test_dataset['y_true']))):\n",
" # get ground truth\n",
" obj_count = count_obj(txt, 3)\n",
" gt_counts.append(obj_count)"
]
},
{
"cell_type": "markdown",
"id": "44602de6",
"metadata": {},
"source": [
"## Load the model"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "e5ff04e4",
"metadata": {},
"outputs": [],
"source": [
"model = YOLO(MODEL_NAME)"
]
},
{
"cell_type": "markdown",
"id": "5ea8aa59",
"metadata": {},
"source": [
"## Test on the validation set"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "3d15ae87",
"metadata": {},
"outputs": [],
"source": [
"from collections import Counter\n",
"\n",
"def calculate_mape(actual, forecast):\n",
" if len(actual) != len(forecast):\n",
" raise ValueError(\"The length of actual and forecast arrays must be the same.\")\n",
" \n",
" n = len(actual)\n",
" sum_error = 0\n",
" \n",
" for a, f in zip(actual, forecast):\n",
" sum_error += abs(a - f) / max(a, 1)\n",
" \n",
" mape_value = (sum_error / n) * 100\n",
" return mape_value\n",
"\n",
"def count_masks(model, dataset):\n",
" n_class = 3\n",
" mape_scores = []\n",
" all_pred_counts = []\n",
" all_obj_counts = []\n",
" for idx , (img , txt) in enumerate(tqdm(zip(dataset['images'],dataset['y_true']))):\n",
" # get predicted list\n",
" preds = model.predict(img)\n",
" pred = preds[0]\n",
" predict_list = [ box.cls[0].item() for box in pred.boxes]\n",
" count = Counter(predict_list)\n",
" predict_count = [count[i] if i in count else 0 for i in range(n_class)]\n",
" # get ground truth\n",
" obj_count = count_obj(txt, n_class)\n",
" all_obj_counts.append(obj_count)\n",
" all_pred_counts.append(predict_count)\n",
"\n",
" '''\n",
" After the model was trained, I just found that I defined the format class in data.yaml is [without_mask, with_mask, mask_weared_incorrect] which is wrong in order. \n",
" Therefore, I will swap the true label and predicted label to [with_mask, without_mask, mask_weared_incorrect] in the count_masks function to return the values should respectively indicate the number of faces wearing mask, without mask and incorrectly wearing mask.\n",
" The reason why I did not correct the data.yaml and train the model again because of the limitation of time.\n",
" '''\n",
" all_pred_counts = np.array(all_pred_counts)\n",
" all_obj_counts = np.array(all_obj_counts)\n",
"\n",
"# all_pred_counts[:, [0, 1]] = all_pred_counts[:, [1, 0]]\n",
"# all_obj_counts[:, [0, 1]] = all_obj_counts[:, [1, 0]]\n",
"\n",
" mape_scores = [calculate_mape(a, p) for a, p in zip(all_obj_counts, all_pred_counts)]\n",
"\n",
" # Convert all_pred_counts to int64 before returning\n",
" all_pred_counts = all_pred_counts.astype(np.int64)\n",
" \n",
" return np.array(all_pred_counts), np.mean(mape_scores)"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "1428b97d",
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"0it [00:00, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-011.png: 448x640 26 with_masks, 1 mask_weared_incorrect, 311.0ms\n",
"Speed: 5.7ms preprocess, 311.0ms inference, 6.6ms postprocess per image at shape (1, 3, 448, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"1it [00:02, 2.99s/it]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-019.png: 384x640 3 with_masks, 2 without_masks, 1 mask_weared_incorrect, 237.2ms\n",
"Speed: 6.9ms preprocess, 237.2ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"2it [00:03, 1.39s/it]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-023.png: 480x640 1 with_mask, 293.8ms\n",
"Speed: 6.0ms preprocess, 293.8ms inference, 2.8ms postprocess per image at shape (1, 3, 480, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"3it [00:03, 1.11it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-032.png: 384x640 2 with_masks, 187.6ms\n",
"Speed: 3.9ms preprocess, 187.6ms inference, 0.0ms postprocess per image at shape (1, 3, 384, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"4it [00:03, 1.58it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-036.png: 384x640 6 with_masks, 1 without_mask, 190.1ms\n",
"Speed: 5.0ms preprocess, 190.1ms inference, 0.0ms postprocess per image at shape (1, 3, 384, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"5it [00:04, 2.06it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-041.png: 480x640 8 with_masks, 220.2ms\n",
"Speed: 5.4ms preprocess, 220.2ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"6it [00:04, 2.48it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-043.png: 640x448 1 with_mask, 255.3ms\n",
"Speed: 4.9ms preprocess, 255.3ms inference, 2.2ms postprocess per image at shape (1, 3, 640, 448)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"7it [00:04, 2.75it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-058.png: 448x640 13 with_masks, 245.6ms\n",
"Speed: 7.3ms preprocess, 245.6ms inference, 1.0ms postprocess per image at shape (1, 3, 448, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"8it [00:04, 2.98it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-080.png: 384x640 1 with_mask, 201.2ms\n",
"Speed: 5.1ms preprocess, 201.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"9it [00:05, 3.31it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-086.png: 480x640 3 with_masks, 1 without_mask, 256.2ms\n",
"Speed: 5.1ms preprocess, 256.2ms inference, 1.8ms postprocess per image at shape (1, 3, 480, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"10it [00:05, 3.37it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-094.png: 384x640 5 with_masks, 1 without_mask, 1 mask_weared_incorrect, 199.7ms\n",
"Speed: 4.1ms preprocess, 199.7ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"11it [00:05, 3.64it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-095.png: 640x512 1 without_mask, 292.5ms\n",
"Speed: 6.2ms preprocess, 292.5ms inference, 3.0ms postprocess per image at shape (1, 3, 640, 512)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"12it [00:05, 3.45it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-098.png: 448x640 4 with_masks, 1 without_mask, 230.2ms\n",
"Speed: 5.6ms preprocess, 230.2ms inference, 1.0ms postprocess per image at shape (1, 3, 448, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"13it [00:06, 3.57it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-123.png: 512x640 3 with_masks, 253.3ms\n",
"Speed: 4.0ms preprocess, 253.3ms inference, 0.0ms postprocess per image at shape (1, 3, 512, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"14it [00:06, 3.56it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-130.png: 640x544 2 with_masks, 2 without_masks, 264.4ms\n",
"Speed: 10.2ms preprocess, 264.4ms inference, 2.1ms postprocess per image at shape (1, 3, 640, 544)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"15it [00:06, 3.50it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-131.png: 448x640 4 with_masks, 2 without_masks, 222.3ms\n",
"Speed: 2.4ms preprocess, 222.3ms inference, 0.0ms postprocess per image at shape (1, 3, 448, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"16it [00:06, 3.64it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-139.png: 384x640 17 with_masks, 2 without_masks, 2 mask_weared_incorrects, 175.8ms\n",
"Speed: 4.0ms preprocess, 175.8ms inference, 0.0ms postprocess per image at shape (1, 3, 384, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"17it [00:07, 3.94it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-143.png: 640x512 1 with_mask, 231.2ms\n",
"Speed: 6.1ms preprocess, 231.2ms inference, 2.1ms postprocess per image at shape (1, 3, 640, 512)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"18it [00:07, 3.92it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-148.png: 608x640 2 with_masks, 3 without_masks, 295.3ms\n",
"Speed: 8.5ms preprocess, 295.3ms inference, 0.0ms postprocess per image at shape (1, 3, 608, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"19it [00:07, 3.59it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-149.png: 480x640 7 with_masks, 1 without_mask, 229.2ms\n",
"Speed: 6.6ms preprocess, 229.2ms inference, 8.1ms postprocess per image at shape (1, 3, 480, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"20it [00:08, 3.64it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-152.png: 480x640 8 with_masks, 209.4ms\n",
"Speed: 4.9ms preprocess, 209.4ms inference, 8.1ms postprocess per image at shape (1, 3, 480, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"21it [00:08, 3.78it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-169.png: 640x512 1 with_mask, 242.2ms\n",
"Speed: 6.1ms preprocess, 242.2ms inference, 0.0ms postprocess per image at shape (1, 3, 640, 512)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"22it [00:08, 3.75it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-180.png: 640x512 1 with_mask, 232.8ms\n",
"Speed: 8.4ms preprocess, 232.8ms inference, 7.1ms postprocess per image at shape (1, 3, 640, 512)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"23it [00:08, 3.75it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-182.png: 448x640 2 with_masks, 1 mask_weared_incorrect, 222.5ms\n",
"Speed: 5.3ms preprocess, 222.5ms inference, 0.0ms postprocess per image at shape (1, 3, 448, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"24it [00:09, 3.83it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-184.png: 352x640 16 with_masks, 172.3ms\n",
"Speed: 2.0ms preprocess, 172.3ms inference, 7.1ms postprocess per image at shape (1, 3, 352, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"25it [00:09, 4.12it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-197.png: 384x640 1 with_mask, 182.8ms\n",
"Speed: 2.5ms preprocess, 182.8ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"26it [00:09, 4.32it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-201.png: 384x640 12 with_masks, 177.6ms\n",
"Speed: 3.8ms preprocess, 177.6ms inference, 2.1ms postprocess per image at shape (1, 3, 384, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"27it [00:09, 4.49it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-209.png: 352x640 4 with_masks, 13 without_masks, 168.5ms\n",
"Speed: 14.7ms preprocess, 168.5ms inference, 7.6ms postprocess per image at shape (1, 3, 352, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"28it [00:09, 4.56it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-226.png: 384x640 1 with_mask, 171.7ms\n",
"Speed: 3.9ms preprocess, 171.7ms inference, 0.0ms postprocess per image at shape (1, 3, 384, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"29it [00:10, 4.68it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-227.png: 448x640 11 with_masks, 223.3ms\n",
"Speed: 4.7ms preprocess, 223.3ms inference, 2.0ms postprocess per image at shape (1, 3, 448, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"30it [00:10, 4.47it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-248.png: 640x512 1 without_mask, 237.8ms\n",
"Speed: 5.6ms preprocess, 237.8ms inference, 0.0ms postprocess per image at shape (1, 3, 640, 512)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"31it [00:10, 4.20it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-256.png: 448x640 13 with_masks, 222.1ms\n",
"Speed: 4.2ms preprocess, 222.1ms inference, 1.0ms postprocess per image at shape (1, 3, 448, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"32it [00:10, 4.16it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-260.png: 480x640 50 with_masks, 290.6ms\n",
"Speed: 6.5ms preprocess, 290.6ms inference, 2.4ms postprocess per image at shape (1, 3, 480, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"33it [00:11, 3.76it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-269.png: 480x640 2 with_masks, 240.2ms\n",
"Speed: 4.7ms preprocess, 240.2ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"34it [00:11, 3.75it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-275.png: 640x544 1 with_mask, 259.9ms\n",
"Speed: 6.7ms preprocess, 259.9ms inference, 0.0ms postprocess per image at shape (1, 3, 640, 544)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"35it [00:11, 3.66it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-280.png: 448x640 14 with_masks, 7 without_masks, 221.5ms\n",
"Speed: 3.9ms preprocess, 221.5ms inference, 2.0ms postprocess per image at shape (1, 3, 448, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"36it [00:11, 3.76it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-286.png: 480x640 3 with_masks, 286.4ms\n",
"Speed: 5.1ms preprocess, 286.4ms inference, 2.6ms postprocess per image at shape (1, 3, 480, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"37it [00:12, 3.56it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-292.png: 480x640 1 with_mask, 226.3ms\n",
"Speed: 7.4ms preprocess, 226.3ms inference, 2.1ms postprocess per image at shape (1, 3, 480, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"38it [00:12, 3.65it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-296.png: 384x640 27 with_masks, 15 without_masks, 199.4ms\n",
"Speed: 4.5ms preprocess, 199.4ms inference, 2.0ms postprocess per image at shape (1, 3, 384, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"39it [00:12, 3.84it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-305.png: 480x640 6 with_masks, 9 without_masks, 4 mask_weared_incorrects, 268.3ms\n",
"Speed: 6.8ms preprocess, 268.3ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"40it [00:13, 3.67it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-307.png: 384x640 2 with_masks, 1 mask_weared_incorrect, 182.9ms\n",
"Speed: 3.7ms preprocess, 182.9ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"41it [00:13, 3.96it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-323.png: 608x640 2 with_masks, 261.5ms\n",
"Speed: 9.3ms preprocess, 261.5ms inference, 0.0ms postprocess per image at shape (1, 3, 608, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"42it [00:13, 3.75it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-328.png: 640x512 1 with_mask, 230.3ms\n",
"Speed: 6.3ms preprocess, 230.3ms inference, 2.0ms postprocess per image at shape (1, 3, 640, 512)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"43it [00:13, 3.78it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-368.png: 448x640 9 with_masks, 225.7ms\n",
"Speed: 2.9ms preprocess, 225.7ms inference, 2.0ms postprocess per image at shape (1, 3, 448, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"44it [00:14, 3.84it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-370.png: 640x512 1 without_mask, 232.2ms\n",
"Speed: 7.4ms preprocess, 232.2ms inference, 0.0ms postprocess per image at shape (1, 3, 640, 512)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"45it [00:14, 3.83it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-371.png: 384x640 1 with_mask, 1 mask_weared_incorrect, 232.0ms\n",
"Speed: 3.5ms preprocess, 232.0ms inference, 2.4ms postprocess per image at shape (1, 3, 384, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"46it [00:14, 3.84it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-373.png: 448x640 13 with_masks, 2 without_masks, 222.2ms\n",
"Speed: 4.5ms preprocess, 222.2ms inference, 3.0ms postprocess per image at shape (1, 3, 448, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"47it [00:14, 3.86it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-377.png: 480x640 1 with_mask, 282.4ms\n",
"Speed: 5.3ms preprocess, 282.4ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"48it [00:15, 3.66it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-383.png: 640x512 1 with_mask, 251.2ms\n",
"Speed: 4.3ms preprocess, 251.2ms inference, 2.0ms postprocess per image at shape (1, 3, 640, 512)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"49it [00:15, 3.64it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-387.png: 384x640 5 with_masks, 1 mask_weared_incorrect, 182.5ms\n",
"Speed: 3.4ms preprocess, 182.5ms inference, 0.0ms postprocess per image at shape (1, 3, 384, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"50it [00:15, 3.93it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-410.png: 448x640 18 with_masks, 2 without_masks, 1 mask_weared_incorrect, 261.8ms\n",
"Speed: 7.9ms preprocess, 261.8ms inference, 1.9ms postprocess per image at shape (1, 3, 448, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"51it [00:15, 3.76it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-411.png: 448x640 7 with_masks, 1 without_mask, 326.2ms\n",
"Speed: 4.8ms preprocess, 326.2ms inference, 1.5ms postprocess per image at shape (1, 3, 448, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"52it [00:16, 3.42it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-444.png: 416x640 3 with_masks, 247.0ms\n",
"Speed: 6.0ms preprocess, 247.0ms inference, 2.1ms postprocess per image at shape (1, 3, 416, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"53it [00:16, 3.48it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-450.png: 480x640 4 with_masks, 290.4ms\n",
"Speed: 8.8ms preprocess, 290.4ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"54it [00:16, 3.36it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-461.png: 448x640 8 with_masks, 228.1ms\n",
"Speed: 8.0ms preprocess, 228.1ms inference, 2.0ms postprocess per image at shape (1, 3, 448, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"55it [00:17, 3.50it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-512.png: 448x640 8 with_masks, 195.1ms\n",
"Speed: 5.8ms preprocess, 195.1ms inference, 7.5ms postprocess per image at shape (1, 3, 448, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"56it [00:17, 3.72it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-515.png: 640x512 1 with_mask, 241.7ms\n",
"Speed: 7.8ms preprocess, 241.7ms inference, 2.0ms postprocess per image at shape (1, 3, 640, 512)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"57it [00:17, 3.71it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-528.png: 640x512 1 without_mask, 287.1ms\n",
"Speed: 6.2ms preprocess, 287.1ms inference, 1.8ms postprocess per image at shape (1, 3, 640, 512)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"58it [00:17, 3.52it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-554.png: 480x640 5 with_masks, 262.2ms\n",
"Speed: 4.4ms preprocess, 262.2ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"59it [00:18, 3.50it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-555.png: 384x640 4 with_masks, 203.6ms\n",
"Speed: 4.0ms preprocess, 203.6ms inference, 2.4ms postprocess per image at shape (1, 3, 384, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"60it [00:18, 3.72it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-560.png: 448x640 1 with_mask, 259.8ms\n",
"Speed: 3.6ms preprocess, 259.8ms inference, 0.0ms postprocess per image at shape (1, 3, 448, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"61it [00:18, 3.65it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-562.png: 352x640 1 with_mask, 1 mask_weared_incorrect, 178.5ms\n",
"Speed: 0.0ms preprocess, 178.5ms inference, 1.0ms postprocess per image at shape (1, 3, 352, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"62it [00:18, 4.00it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-574.png: 384x640 5 with_masks, 181.9ms\n",
"Speed: 4.9ms preprocess, 181.9ms inference, 0.0ms postprocess per image at shape (1, 3, 384, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"63it [00:19, 4.21it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-581.png: 640x512 1 with_mask, 197.9ms\n",
"Speed: 8.3ms preprocess, 197.9ms inference, 6.7ms postprocess per image at shape (1, 3, 640, 512)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"64it [00:19, 4.25it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-589.png: 640x448 1 with_mask, 1 mask_weared_incorrect, 197.9ms\n",
"Speed: 5.3ms preprocess, 197.9ms inference, 6.1ms postprocess per image at shape (1, 3, 640, 448)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"65it [00:19, 4.31it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-594.png: 448x640 8 with_masks, 204.8ms\n",
"Speed: 3.6ms preprocess, 204.8ms inference, 0.0ms postprocess per image at shape (1, 3, 448, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"66it [00:19, 4.32it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-598.png: 640x512 1 with_mask, 204.8ms\n",
"Speed: 6.3ms preprocess, 204.8ms inference, 0.0ms postprocess per image at shape (1, 3, 640, 512)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"67it [00:20, 4.31it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-606.png: 448x640 4 with_masks, 239.3ms\n",
"Speed: 4.0ms preprocess, 239.3ms inference, 2.0ms postprocess per image at shape (1, 3, 448, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"68it [00:20, 4.14it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-607.png: 448x640 2 with_masks, 1 without_mask, 177.7ms\n",
"Speed: 4.7ms preprocess, 177.7ms inference, 0.0ms postprocess per image at shape (1, 3, 448, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"69it [00:20, 4.33it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-610.png: 352x640 5 with_masks, 158.2ms\n",
"Speed: 4.2ms preprocess, 158.2ms inference, 0.0ms postprocess per image at shape (1, 3, 352, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"70it [00:20, 4.61it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-621.png: 384x640 3 with_masks, 2 without_masks, 175.1ms\n",
"Speed: 4.0ms preprocess, 175.1ms inference, 7.1ms postprocess per image at shape (1, 3, 384, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"71it [00:20, 4.68it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-637.png: 384x640 6 with_masks, 171.3ms\n",
"Speed: 4.1ms preprocess, 171.3ms inference, 9.2ms postprocess per image at shape (1, 3, 384, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"72it [00:21, 4.75it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-660.png: 640x512 1 without_mask, 212.6ms\n",
"Speed: 4.5ms preprocess, 212.6ms inference, 0.0ms postprocess per image at shape (1, 3, 640, 512)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"73it [00:21, 4.59it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-690.png: 416x640 19 with_masks, 1 without_mask, 181.0ms\n",
"Speed: 4.0ms preprocess, 181.0ms inference, 0.0ms postprocess per image at shape (1, 3, 416, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"74it [00:21, 4.64it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-699.png: 448x640 1 with_mask, 6 without_masks, 212.2ms\n",
"Speed: 3.6ms preprocess, 212.2ms inference, 2.0ms postprocess per image at shape (1, 3, 448, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"75it [00:21, 4.50it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-719.png: 384x640 4 with_masks, 1 without_mask, 206.2ms\n",
"Speed: 2.6ms preprocess, 206.2ms inference, 2.0ms postprocess per image at shape (1, 3, 384, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"76it [00:22, 4.46it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-742.png: 640x512 1 with_mask, 233.0ms\n",
"Speed: 8.2ms preprocess, 233.0ms inference, 2.7ms postprocess per image at shape (1, 3, 640, 512)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"77it [00:22, 4.23it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-745.png: 640x512 1 with_mask, 317.3ms\n",
"Speed: 8.0ms preprocess, 317.3ms inference, 2.0ms postprocess per image at shape (1, 3, 640, 512)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"78it [00:22, 3.71it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-765.png: 640x512 1 with_mask, 329.8ms\n",
"Speed: 5.8ms preprocess, 329.8ms inference, 0.7ms postprocess per image at shape (1, 3, 640, 512)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"79it [00:23, 3.38it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-793.png: 640x512 1 with_mask, 232.5ms\n",
"Speed: 11.2ms preprocess, 232.5ms inference, 0.0ms postprocess per image at shape (1, 3, 640, 512)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"80it [00:23, 3.48it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-796.png: 448x640 12 with_masks, 185.0ms\n",
"Speed: 4.4ms preprocess, 185.0ms inference, 8.2ms postprocess per image at shape (1, 3, 448, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"81it [00:23, 3.76it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-802.png: 448x640 4 with_masks, 190.2ms\n",
"Speed: 3.0ms preprocess, 190.2ms inference, 2.0ms postprocess per image at shape (1, 3, 448, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"82it [00:23, 4.01it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-819.png: 384x640 1 with_mask, 177.0ms\n",
"Speed: 4.0ms preprocess, 177.0ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"83it [00:23, 4.27it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-829.png: 320x640 6 with_masks, 1 without_mask, 203.6ms\n",
"Speed: 3.2ms preprocess, 203.6ms inference, 2.0ms postprocess per image at shape (1, 3, 320, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
"84it [00:24, 4.30it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 F:\\ECMM426_CV_Workshop\\Marking\\Template\\data\\MaskedFace\\val\\images\\mask-833.png: 384x640 2 with_masks, 178.7ms\n",
"Speed: 7.6ms preprocess, 178.7ms inference, 2.1ms postprocess per image at shape (1, 3, 384, 640)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"85it [00:24, 3.49it/s]\n"
]
}
],
"source": [
"predicted_counts, mape_score = count_masks(model, test_dataset)"
]
},
{
"cell_type": "markdown",
"id": "67dda1aa",
"metadata": {},
"source": [
"## MAPE"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "e7624ff3",
"metadata": {},
"outputs": [],
"source": [
"def compute_mape(prediction, truth):\n",
" mape = np.mean( np.abs(truth - prediction) / np.maximum(truth, np.ones_like(truth)) ) * 100\n",
" return mape"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "fbb7aa74",
"metadata": {},
"outputs": [],
"source": [
"# X2d0f9f39\n",
"# predicted_counts[:, [0, 1]] = predicted_counts[:, [1, 0]]"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "028f3e71",
"metadata": {},
"outputs": [],
"source": [
"predicted_counts[:, [1, 2]] = predicted_counts[:, [2, 1]]"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "c9176cc8",
"metadata": {},
"outputs": [],
"source": [
"MAPE = compute_mape(predicted_counts, gt_counts)"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "828484ae",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"16.415378480087284\n"
]
}
],
"source": [
"print(MAPE)"
]
},
{
"cell_type": "markdown",
"id": "b29e3ba9",
"metadata": {},
"source": [
"## Final Score"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "9b170114",
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Score: 18.75\n"
]
}
],
"source": [
"if MAPE <= 10:\n",
" print(\"Score: \", 25*1.0)\n",
"elif MAPE <= 15:\n",
" print(\"Score: \", 25*0.875)\n",
"elif MAPE <= 20:\n",
" print(\"Score: \", 25*0.75)\n",
"elif MAPE <= 25:\n",
" print(\"Score: \", 25*0.625)\n",
"elif MAPE <= 30:\n",
" print(\"Score: \", 25*0.5)\n",
"else:\n",
" print(\"Score: \", 0) "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "258ec405",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "what",
"language": "python",
"name": "what"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.16"
}
},
"nbformat": 4,
"nbformat_minor": 5
}