14 KiB
14 KiB
None
<html>
<head>
</head>
</html>
Question 4 (10 marks)¶
In [1]:
import torch
import torch.nn.functional as F
from ca_utils import ResNet, BasicBlock
In [2]:
model = ResNet(block=BasicBlock, layers=[1, 1, 1], num_classes=10)
Load the Model¶
In [3]:
checkpoint = torch.load("data/weights_resnet.pth", map_location=torch.device('cpu'))
model.load_state_dict(checkpoint)
model.eval()
Out[3]:
Question 5 (15 marks)¶
In [4]:
import torchvision
from torch.utils.data import DataLoader
from torchvision import transforms
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
In [5]:
image_transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
# image_transform = transforms.Compose([
# # transforms.Resize(256),
# # transforms.CenterCrop(224),
# transforms.ToTensor(),
# transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
# ])
test_data = torchvision.datasets.ImageFolder('data/EXCV10/val/', transform=image_transform)
test_loader = DataLoader(test_data, batch_size=64, shuffle=False, num_workers=4, pin_memory=True)
Method 1¶
In [6]:
import numpy as np
In [7]:
def m1_test_cnn(model, test_loader):
model.to(device)
model.eval()
correct = 0
total = 0
all_predicted_labels = []
with torch.no_grad():
for images, labels in test_loader:
# Make predictions
images, labels = images.to(device), labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
# Save results
total += labels.size(0)
correct += (predicted == labels).sum().item()
all_predicted_labels.append(predicted.cpu().numpy())
accuracy = 100 * correct / total
all_predicted_labels = np.concatenate(all_predicted_labels)
return all_predicted_labels, accuracy
In [8]:
m1_predicted_labels, m1_test_accuracy = m1_test_cnn(model, test_loader)
print(f'Test Accuracy: {m1_test_accuracy}%')
Put Students' implementations here¶
In [9]:
def test_cnn(model, test_loader, device='cpu'):
model.to(device)
model.eval()
total = 0
correct_num = 0
all_predicted_labels = []
with torch.no_grad(): # No need to track gradients for testing
for images, labels in test_loader:
images, labels = images.to(device), labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct_num += (predicted == labels).sum().item()
all_predicted_labels.append(predicted.cpu().numpy())
accuracy = (correct_num / total) * 100
all_predicted_labels = np.concatenate(all_predicted_labels)
return all_predicted_labels, accuracy
In [10]:
predicted_labels, test_accuracy = test_cnn(model, test_loader)
print(f'Test Accuracy: {test_accuracy}%')
Test (Should output ALL PASS)¶
In [11]:
assert np.allclose(predicted_labels, m1_predicted_labels)
assert np.allclose(test_accuracy, m1_test_accuracy)
print("Test accuracy: ", test_accuracy)
if (test_accuracy >= 75):
print("Score 100%:", 15 * 1.0)
elif (test_accuracy >= 70):
print("Score 90%:", 15 * 0.90)
elif (test_accuracy >= 65):
print("Score 80%:", 15 * 0.80)
elif (test_accuracy >= 60):
print("Score 70%:", 15 * 0.70)
elif (test_accuracy >= 55):
print("Score 60%:", 15 * 0.60)
elif (test_accuracy >= 50):
print("Score 50%:", 15 * 0.50)
else:
print("Accuracy less than 50%")
print("ALL PASS")
Question 6 (6 marks)¶
In [12]:
true_labels = []
for images, labels in test_loader:
images, labels = images.to(device), labels.to(device)
true_labels.extend(labels.cpu().numpy())
true_labels = np.array(true_labels)
In [13]:
def m1_compute_confusion_matrix(true, predictions):
unique_labels = np.unique(np.concatenate((true, predictions)))
confusion_mat = np.zeros((len(unique_labels), len(unique_labels)), dtype=np.int64)
label_to_index = {label: index for index,
label in enumerate(unique_labels)}
for t, p in zip(true, predictions):
t_index = label_to_index[t]
p_index = label_to_index[p]
confusion_mat[t_index][p_index] += 1
return confusion_mat
In [14]:
m1_confusion_matrix = m1_compute_confusion_matrix(true_labels, m1_predicted_labels)
In [17]:
from sklearn.metrics import confusion_matrix
def m2_compute_confusion_matrix(true, predictions):
return confusion_matrix(true, predictions)
In [18]:
m2_confusion_matrix = m2_compute_confusion_matrix(true_labels, m1_predicted_labels)
Put Students' implementations here¶
In [19]:
def compute_confusion_matrix(true, predictions):
unique_labels = np.unique(np.concatenate((true, predictions)))
confusion_matrix = np.zeros((len(unique_labels), len(unique_labels)), dtype=np.int64)
for i, true_label in enumerate(unique_labels):
for j, predicted_label in enumerate(unique_labels):
confusion_matrix[i, j] = np.sum((true == true_label) & (predictions == predicted_label))
return confusion_matrix
In [20]:
confusion_matrix = compute_confusion_matrix(true_labels, predicted_labels)
Test (Should output ALL PASS)¶
In [21]:
assert np.allclose(m1_confusion_matrix, m2_confusion_matrix)
assert np.allclose(confusion_matrix, m1_confusion_matrix)
print("ALL PASS")
In [ ]: