13 KiB
13 KiB
None
<html>
<head>
</head>
</html>
Question 4 (10 marks)¶
In [1]:
import torch
import torch.nn.functional as F
from ca_utils import ResNet, BasicBlock
In [2]:
model = ResNet(block=BasicBlock, layers=[1, 1, 1], num_classes=10)
Load the Model¶
In [3]:
checkpoint = torch.load("data/weights_resnet.pth", map_location=torch.device('cpu'))
model.load_state_dict(checkpoint)
model.eval()
Out[3]:
Question 5 (15 marks)¶
In [4]:
import torchvision
from torch.utils.data import DataLoader
from torchvision import transforms
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
In [5]:
image_transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
test_data = torchvision.datasets.ImageFolder('data/EXCV10/val/', transform=image_transform)
test_loader = DataLoader(test_data, batch_size=64, shuffle=False, num_workers=4, pin_memory=True)
Method 1¶
In [6]:
import numpy as np
In [7]:
def m1_test_cnn(model, test_loader):
model.to(device)
model.eval()
correct = 0
total = 0
all_predicted_labels = []
with torch.no_grad():
for images, labels in test_loader:
# Make predictions
images, labels = images.to(device), labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
# Save results
total += labels.size(0)
correct += (predicted == labels).sum().item()
all_predicted_labels.append(predicted.cpu().numpy())
accuracy = 100 * correct / total
all_predicted_labels = np.concatenate(all_predicted_labels)
return all_predicted_labels, accuracy
In [8]:
m1_predicted_labels, m1_test_accuracy = m1_test_cnn(model, test_loader)
print(f'Test Accuracy: {m1_test_accuracy}%')
Put Students' implementations here¶
In [9]:
def test_cnn(model, test_loader):
"""
Test the trained ResNet model on the test dataset.
Args:
model (nn.Module): The trained ResNet model.
test_loader (DataLoader): Data loader for the test data.
Returns:
float: Test accuracy.
list: Predicted labels.
list: True labels.
"""
model.eval()
correct = 0
total = 0
predicted_labels = []
true_labels = []
with torch.no_grad():
for images, labels in test_loader:
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
predicted_labels.extend(predicted.tolist())
true_labels.extend(labels.tolist())
accuracy = correct / total
return predicted_labels, accuracy*100
In [10]:
predicted_labels, test_accuracy = test_cnn(model, test_loader)
print(f'Test Accuracy: {test_accuracy}%')
Test (Should output ALL PASS)¶
In [11]:
assert np.allclose(predicted_labels, m1_predicted_labels)
assert np.allclose(test_accuracy, m1_test_accuracy)
print("Test accuracy: ", test_accuracy)
if (test_accuracy >= 75):
print("Score 100%:", 15 * 1.0)
elif (test_accuracy >= 70):
print("Score 90%:", 15 * 0.90)
elif (test_accuracy >= 65):
print("Score 80%:", 15 * 0.80)
elif (test_accuracy >= 60):
print("Score 70%:", 15 * 0.70)
elif (test_accuracy >= 55):
print("Score 60%:", 15 * 0.60)
elif (test_accuracy >= 50):
print("Score 50%:", 15 * 0.50)
else:
print("Accuracy less than 50%")
print("ALL PASS")
Question 6 (6 marks)¶
In [12]:
true_labels = []
for images, labels in test_loader:
images, labels = images.to(device), labels.to(device)
true_labels.extend(labels.cpu().numpy())
true_labels = np.array(true_labels)
In [13]:
def m1_compute_confusion_matrix(true, predictions):
unique_labels = np.unique(np.concatenate((true, predictions)))
confusion_mat = np.zeros((len(unique_labels), len(unique_labels)), dtype=np.int64)
label_to_index = {label: index for index,
label in enumerate(unique_labels)}
for t, p in zip(true, predictions):
t_index = label_to_index[t]
p_index = label_to_index[p]
confusion_mat[t_index][p_index] += 1
return confusion_mat
In [14]:
m1_confusion_matrix = m1_compute_confusion_matrix(true_labels, m1_predicted_labels)
Put Students' implementations here¶
In [15]:
def compute_confusion_matrix(true_labels, predicted_labels):
# Ensure inputs are NumPy arrays
true_labels = np.array(true_labels)
predicted_labels = np.array(predicted_labels)
# Determine the number of classes
num_classes = len(np.unique(true_labels))
# Initialize the confusion matrix with zeros
cm = np.zeros((num_classes, num_classes))
# Count occurrences of true-predicted label pairs
for i in range(len(true_labels)):
cm[true_labels[i]][predicted_labels[i]] += 1
return cm
In [16]:
confusion_matrix = m1_compute_confusion_matrix(true_labels, m1_predicted_labels)
Test (Should output ALL PASS)¶
In [17]:
assert np.allclose(confusion_matrix, m1_confusion_matrix)
print("ALL PASS")
In [ ]: