In [2]:
import torch
workDir='/usr/data/'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
In [1]:
# this mounts your Google Drive to the Colab VM.
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
%cd '/usr'
!mkdir 'data'
%cd '/usr/data'
In [3]:
!pip install --upgrade fastai
!pip install -U aicrowd-cli
In [4]:
API_KEY = '52ab6eb031245b7028158e2f3e993174' #Please enter your API Key from [https://www.aicrowd.com/participants/me]
!aicrowd login --api-key $API_KEY
In [5]:
!aicrowd dataset download --challenge f1-car-rotation -j 3
In [6]:
!rm -rf data
!mkdir data
!unzip -q train.zip -d data/train
!unzip -q val.zip -d data/val
!unzip -q test.zip -d data/test
!mv train.csv data/train.csv
!mv val.csv data/val.csv
!mv sample_submission.csv data/sample_submission.csv
In [7]:
import torch
from torch.utils.data import Dataset,DataLoader,RandomSampler
from torchvision import transforms as T
import pandas as pd
from PIL import Image
class ImageDataset(Dataset):
def __init__(self,ImageFold,lblDict,df,transforms):
self.ImageFold=ImageFold
self.df=df
self.trans=transforms
self.lblDict=lblDict
def __len__(self):
return len(self.df)
def __getitem__(self,ind):
im=self.load_image(self.df.iloc[ind][0])
im=self.trans(im)
return im, self.lblDict[self.df.iloc[ind][1]]
def load_image(self,ind):
return Image.open(self.ImageFold+str(self.df.iloc[ind][0])+'.jpg')
In [13]:
trainResnet=T.Compose([
# T.Resize(imSize),
# transforms.RandomHorizontalFlip(),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
lblDict={'front':0,'back':1,'right':2,'left':3}
df_train=pd.read_csv('data/train.csv')
ds_train_resnet=ImageDataset(workDir+'data/train/',lblDict,df_train,trainResnet)
dl_train_resnet=DataLoader(ds_train_resnet,batch_size=64,shuffle=True,num_workers=2)
df_val=pd.read_csv('data/val.csv')
ds_val_resnet=ImageDataset(workDir+'data/val/',lblDict,df_val,trainResnet)
dl_val_resnet=DataLoader(ds_val_resnet,batch_size=64,shuffle=False,num_workers=2)
dataloaders_dict={'train':dl_train_resnet,'val':dl_val_resnet}
In [14]:
def train_model(model, dataloaders, criterion, optimizer, num_epochs=25):
since = time.time()
val_acc_history = []
best_acc=0
best_model_wts = copy.deepcopy(model.state_dict())
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
i=0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
i+=128
outputs = model(inputs)
loss = criterion(outputs, labels)
_, preds = torch.max(outputs, 1)
if(i % 8192 ==0):
print(loss)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_corrects += torch.sum(preds == labels.data)
running_loss += loss.detach().item()*len(labels)
epoch_loss = running_loss / (len(dataloaders[phase].dataset))
epoch_acc= running_corrects / len(dataloaders[phase].dataset)
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
if phase == 'val':
val_acc_history.append(epoch_acc)
print('{} Loss: {:.4f}, acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
# load best model weights
model.load_state_dict(best_model_wts)
return model
In [15]:
import random
import torchvision.models as models
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
Efficient Net
In [18]:
import torch
!pip install efficientnet_pytorch
from efficientnet_pytorch import EfficientNet
In [19]:
model = EfficientNet.from_pretrained('efficientnet-b3',num_classes = 4)
model.to(device)
criterion=nn.CrossEntropyLoss()
num_epochs=7
optimizer =torch.optim.Adam(model.parameters(), lr=0.001)
model_ft = train_model(model, dataloaders_dict, criterion, optimizer, num_epochs=num_epochs)
torch.save(model.state_dict(), '/content/drive/MyDrive/weights_4_chel_ef_adam1_crossentropy.txt')
In [20]:
optimizer =torch.optim.Adam(model.parameters(), lr=0.0003)
model_ft = train_model(model, dataloaders_dict, criterion, optimizer, num_epochs=5)
torch.save(model.state_dict(), '/content/drive/MyDrive/weights_4_chel_ef_adam2_entropy.txt')
In [21]:
optimizer =torch.optim.Adam(model.parameters(), lr=0.00004)
model_ft = train_model(model, dataloaders_dict, criterion, optimizer, num_epochs=4)
torch.save(model.state_dict(), '/content/drive/MyDrive/weights_4_chel_ef_adam3_entropy.txt')
In [23]:
model.eval()
clsDict={0:'front',1:'back',2:'right',3:'left'}
A=[[i for i in range(10000)],['']*10000]
df=pd.DataFrame(A).transpose()
df.columns=['ImageID','label']
i=0
for f in os.listdir('data/test/'):
im=Image.open('data/test/'+f)
tens=torch.reshape(trainResnet(im),(1,3,256,256))
inputs = tens.to(device)
outputs = np.argmax(model(inputs).detach().cpu().numpy())
df.iloc[int(f.split('.')[0]),1]=clsDict[outputs]
df.to_csv('/content/drive/MyDrive/submission.csv',index=False)
In [24]:
!aicrowd submission create -c f1-car-rotation -f '/content/drive/MyDrive/submission.csv'
Content
Comments
You must login before you can post a comment.