Skip to content

Instantly share code, notes, and snippets.

@adnan-bashir-fuaad
Created August 26, 2018 18:02
Show Gist options
  • Select an option

  • Save adnan-bashir-fuaad/42a1c0044a6876a7cefb442cb92d007e to your computer and use it in GitHub Desktop.

Select an option

Save adnan-bashir-fuaad/42a1c0044a6876a7cefb442cb92d007e to your computer and use it in GitHub Desktop.
Transfer Learning example with PyTorch on ImageNet-200. Base model is SqueezeNet
#!/home/adnan/anaconda3/bin/python
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, models, transforms
import time
import os
import copy
batch_size = 128
num_workers = 4
data_transforms = {
'train': transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
save_dir = '/home/adnan/A-New-Start/ImageNet-200_Classifier_Pre-trained_Models/'
data_dir = '/home/adnan/A-New-Start/Enlarged-ImageNet-200'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=num_workers) for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def train_model(model, criterion, optimizer, scheduler=None, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
# scheduler.step()
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
iter_count = 0
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
print('Done with ' + str(iter_count))
iter_count += 1
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
torch.save(model.state_dict(), os.path.join(save_dir, 'ImageNet-200-Classifier-Pretrained-Model-' + str(epoch_acc.item()) + '.pkl'))
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
model_ft = models.squeezenet1_1(pretrained=True)
# print(model_ft.classifier[1])
# '''
for param in model_ft.parameters():
param.requires_grad = False
# num_ftrs = model_ft.fc.in_features
# model_ft.fc = nn.Linear(num_ftrs, len(class_names))
num_ftrs = model_ft.classifier[1].in_channels
model_ft.classifier[1] = nn.Conv2d(in_channels=num_ftrs, out_channels=200, kernel_size=(1, 1), stride=(1, 1))
model_ft.num_classes = 200
for param in model_ft.parameters():
print(param.requires_grad)
print(model_ft)
# '''
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
# optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
optimizer_ft = optim.Adam(params=model_ft.parameters(), lr=0.001)
# Decay LR by a factor of 0.1 every 7 epochs
# exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model=model_ft, criterion=criterion, optimizer=optimizer_ft, num_epochs=50)
# '''
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment