-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.py
More file actions
215 lines (174 loc) · 9.27 KB
/
main.py
File metadata and controls
215 lines (174 loc) · 9.27 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
from torch.utils.data import DataLoader
from torchvision import transforms
from tqdm import tqdm
import argparse
import math
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.multiprocessing as mp
import clip
from utils import set_seed,get_lr,GaussianBlur
from dataset import CUB200Pair,MyPair
from models import MoCo_KL
parser = argparse.ArgumentParser(description='Train EAD')
parser.add_argument('-t', '--task', default='bird', help='Task Name: bird or car or aircraft')
parser.add_argument('--lr', '--learning-rate', default=0.03, type=float, metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--epochs', default=100, type=int, metavar='N', help='number of total epochs to run')
parser.add_argument('--schedule', default=[60, 80], nargs='*', type=int, help='learning rate schedule (when to drop lr by 10x); does not take effect if --cos is on')
parser.add_argument('--cos', default=True, help='use cosine lr schedule')
parser.add_argument('--seed', default=123, type=int, metavar='N', help='random seed')
parser.add_argument('--batch-size', default=64, type=int, metavar='N', help='mini-batch size')
parser.add_argument('--wd', default=1e-4, type=float, metavar='W', help='weight decay')
# moco specific configs:
parser.add_argument('--moco-dim', default=512, type=int, help='feature dimension')
parser.add_argument('--moco-k', default=65536, type=int, help='queue size; number of negative keys')
parser.add_argument('--moco-m', default=0.999, type=float, help='moco momentum of updating key encoder')
parser.add_argument('--moco-t', default=0.2, type=float, help='softmax temperature')
# utils
parser.add_argument('--pretrained', default='resnet50-19c8e357.pth', type=str, metavar='PATH', help='path to pretrained checkpoint')
parser.add_argument('--root', default='bird/', type=str, metavar='PATH', help='path to dataset')
parser.add_argument('--checkpoints', default='checkpoints/EAD_bird/', type=str, metavar='PATH', help='path to save')
parser.add_argument('--num-classes', default=200, type=int, metavar='N', help='Total number of categories')
parser.add_argument('--alpha', default=0.5, type=float, help='contrastive loss initial weight')
parser.add_argument('--beta', default=10, type=float, help='distill loss initial weight')
parser.add_argument('--text-load', default='text_description_tensor/bird_text_tensor.pt', type=str, help='Text description of the output of the CLIP text encoder')
parser.add_argument('--temperature', default=0.02, type=float, help='softmax temperature')
parser.add_argument('--port', default=10002, type=int, help='')
parser.add_argument('--world-size', default=-1, type=int, help='')
parser.add_argument('--num_workers', default=16, type=int, help='')
args = parser.parse_args('')
def main():
args = parser.parse_args()
set_seed(args.seed)
#world_size = torch.cuda.device_count()
if args.world_size == -1:
world_size = torch.cuda.device_count()
print('GPUs on this node:', world_size)
# spawn
mp.spawn(main_worker, nprocs=world_size, args=(world_size, args))
def main_worker(rank, world_size, args):
print('==> Start rank:', rank)
local_rank = rank % 8
torch.cuda.set_device(local_rank)
torch.distributed.init_process_group(backend='nccl', init_method=f'tcp://localhost:{args.port}',
world_size=world_size, rank=rank)
# build data loader
bsz_gpu = int(args.batch_size / world_size)
print('batch_size per gpu:', bsz_gpu)
train_transform = transforms.Compose([
transforms.RandomResizedCrop(224, scale=(0.2, 1.0)),
transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([GaussianBlur([0.1, 2.0])], p=0.5),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
if args.task == 'bird' :
train_data = CUB200Pair(root=args.root, train=True, transform=train_transform)
else:
traindir = os.path.join(args.root, "train")
train_data = MyPair(img_root = traindir,transform = train_transform)
train_sampler = torch.utils.data.distributed.DistributedSampler(train_data,shuffle = True)
train_loader = DataLoader(train_data, batch_size=bsz_gpu, num_workers=args.num_workers, pin_memory=True, sampler=train_sampler, drop_last=True)
device = torch.device(f'cuda:{rank}')
model_t,_ = clip.load("ViT-B/16",device=device)
model_t.float()
model_t_dim = model_t.ln_final.weight.shape[0]
model_t = torch.nn.parallel.DistributedDataParallel(model_t, device_ids=[local_rank])
model_t.eval()
model = MoCo_KL(
dim=args.moco_dim,
K=args.moco_k,
m=args.moco_m,
T=args.moco_t,
teacher_dim = model_t_dim,
mlp=True
).cuda()
#print(model.encoder_q)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank])
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wd, momentum=0.9)
criterion = nn.CrossEntropyLoss(reduction='none').cuda()
criterion_distill = torch.nn.KLDivLoss(reduction='none').cuda()
if args.pretrained:
if os.path.isfile(args.pretrained):
print("=> loading checkpoint '{}'".format(args.pretrained))
checkpoint = torch.load(args.pretrained, map_location="cpu")
# print(checkpoint.keys())
state_dict = checkpoint
new_state_dict={}
for k,v in state_dict.items():
if not k.startswith('fc'):
new_state_dict["module.encoder_q."+k] = v
new_state_dict["module.encoder_k."+k] = v
msg = model.load_state_dict(new_state_dict,strict= False)
print(msg.missing_keys)
# print(new_state_dict.keys())
print("=> loaded pre-trained model '{}'".format(args.pretrained))
else:
print("=> no checkpoint found at '{}'".format(args.pretrained))
test_tensor = torch.load(args.text_load, map_location='cpu').cuda()
epoch_start = 1
# Start training
print("==> Start training...")
for epoch in range(epoch_start, args.epochs+1):
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
train_loss = train(model,model_t, train_loader, criterion, criterion_distill,optimizer,epoch, test_tensor, args)
#tbwriter.add_scalar('Train/loss Total', train_loss, epoch)
# save the last model; master process
if not os.path.exists(args.checkpoints) and rank == 0:
os.mkdir(args.checkpoints)
if rank == 0:
torch.save({'epoch': epoch, 'state_dict': model.state_dict(), 'optimizer' : optimizer.state_dict(),}, args.checkpoints + '/model_last.pth')
# train for one epoch
def train(net,model_t, data_loader, criterion, criterion_distill,train_optimizer, epoch, test_tensor, args):
net.train()
model_t.eval()
total_loss, total_num, train_bar = 0.0, 0, tqdm(data_loader)
for im_1, im_2 in train_bar:
im_1, im_2 = im_1.cuda(non_blocking=True), im_2.cuda(non_blocking=True)
output,target,f_s = net(im_1, im_2)
f_t = model_t.module.encode_image(im_1)
f_t = nn.functional.normalize(f_t,dim = 1, p=2.0)
test_tensor = test_tensor.to(torch.float32)
similarity_t = f_t @ test_tensor.T
similarity_t /= args.temperature
similarity_t_softmax = F.softmax(similarity_t, dim=1)
similarity_s = f_s @ test_tensor.T
similarity_s /= args.temperature
similarity_s_softmax = F.softmax(similarity_s, dim=1)
loss_distill = criterion_distill(similarity_s_softmax.log(),similarity_t_softmax)
loss1 = criterion(output,target)
# Calculate normalized information entropy
entropy = -torch.sum(similarity_t_softmax * torch.log(similarity_t_softmax + 1e-10), dim=1)
max_entropy = torch.log(torch.tensor(args.num_classes, dtype=torch.float32))
entropy = entropy / max_entropy
# Get information entropy weight
weights_contrastive = torch.tensor([p * p for p in entropy]).cuda()
weights_distill = torch.tensor([1 - q for q in weights_contrastive]).cuda()
loss_distill = loss_distill.mean(dim=1) * weights_distill * args.beta
loss_distill = loss_distill.sum()
loss1 = args.alpha * loss1 * weights_contrastive
loss1 = torch.mean(loss1)
loss = loss1 + loss_distill
train_optimizer.zero_grad()
loss.backward()
train_optimizer.step()
total_num += data_loader.batch_size
total_loss += loss.item() * data_loader.batch_size
train_bar.set_description('Train Epoch: [{}/{}], lr: {:.10f}, Loss: {:.4f}'.format(epoch, args.epochs, get_lr(train_optimizer), total_loss / total_num))
return total_loss / total_num
def adjust_learning_rate(optimizer, epoch, args):
"""Decay the learning rate based on schedule"""
lr = args.lr
if args.cos: # cosine lr schedule
lr *= 0.5 * (1. + math.cos(math.pi * epoch / args.epochs))
else: # stepwise lr schedule
for milestone in args.schedule:
lr *= 0.1 if epoch >= milestone else 1.
for param_group in optimizer.param_groups:
param_group['lr'] = lr
if __name__ == '__main__':
main()