In Masterarbeit:"Anomalie-Detektion in Zellbildern zur Anwendung der Leukämieerkennung" verwendete CSI Methode.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

simclr_CSI.py 4.2KB

2 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114
  1. import time
  2. import torch.optim
  3. import models.transform_layers as TL
  4. from training.contrastive_loss import get_similarity_matrix, NT_xent
  5. from utils.utils import AverageMeter, normalize
  6. device = torch.device(f"cuda" if torch.cuda.is_available() else "cpu")
  7. hflip = TL.HorizontalFlipLayer().to(device)
  8. def train(P, epoch, model, criterion, optimizer, scheduler, loader, logger=None,
  9. simclr_aug=None, linear=None, linear_optim=None):
  10. assert simclr_aug is not None
  11. assert P.sim_lambda == 1.0 # to avoid mistake
  12. assert P.K_shift > 1
  13. if logger is None:
  14. log_ = print
  15. else:
  16. log_ = logger.log
  17. batch_time = AverageMeter()
  18. data_time = AverageMeter()
  19. losses = dict()
  20. losses['cls'] = AverageMeter()
  21. losses['sim'] = AverageMeter()
  22. losses['shift'] = AverageMeter()
  23. check = time.time()
  24. for n, (images, labels) in enumerate(loader):
  25. model.train()
  26. count = n * P.n_gpus # number of trained samples
  27. data_time.update(time.time() - check)
  28. check = time.time()
  29. ### SimCLR loss ###
  30. if P.dataset != 'imagenet' and P.dataset != 'CNMC' and P.dataset != 'CNMC_grayscale':
  31. batch_size = images.size(0)
  32. images = images.to(device)
  33. images1, images2 = hflip(images.repeat(2, 1, 1, 1)).chunk(2) # hflip
  34. else:
  35. batch_size = images[0].size(0)
  36. images1, images2 = images[0].to(device), images[1].to(device)
  37. labels = labels.to(device)
  38. images1 = torch.cat([P.shift_trans(images1, k) for k in range(P.K_shift)])
  39. images2 = torch.cat([P.shift_trans(images2, k) for k in range(P.K_shift)])
  40. shift_labels = torch.cat([torch.ones_like(labels) * k for k in range(P.K_shift)], 0) # B -> 4B
  41. shift_labels = shift_labels.repeat(2)
  42. images_pair = torch.cat([images1, images2], dim=0) # 8B
  43. images_pair = simclr_aug(images_pair) # transform
  44. _, outputs_aux = model(images_pair, simclr=True, penultimate=True, shift=True)
  45. simclr = normalize(outputs_aux['simclr']) # normalize
  46. sim_matrix = get_similarity_matrix(simclr, multi_gpu=P.multi_gpu)
  47. loss_sim = NT_xent(sim_matrix, temperature=0.5) * P.sim_lambda
  48. loss_shift = criterion(outputs_aux['shift'], shift_labels)
  49. ### total loss ###
  50. loss = loss_sim + loss_shift
  51. optimizer.zero_grad()
  52. loss.backward()
  53. optimizer.step()
  54. scheduler.step(epoch - 1 + n / len(loader))
  55. lr = optimizer.param_groups[0]['lr']
  56. batch_time.update(time.time() - check)
  57. ### Post-processing stuffs ###
  58. simclr_norm = outputs_aux['simclr'].norm(dim=1).mean()
  59. penul_1 = outputs_aux['penultimate'][:batch_size]
  60. penul_2 = outputs_aux['penultimate'][P.K_shift * batch_size: (P.K_shift + 1) * batch_size]
  61. outputs_aux['penultimate'] = torch.cat([penul_1, penul_2]) # only use original rotation
  62. ### Linear evaluation ###
  63. outputs_linear_eval = linear(outputs_aux['penultimate'].detach())
  64. loss_linear = criterion(outputs_linear_eval, labels.repeat(2))
  65. linear_optim.zero_grad()
  66. loss_linear.backward()
  67. linear_optim.step()
  68. losses['cls'].update(0, batch_size)
  69. losses['sim'].update(loss_sim.item(), batch_size)
  70. losses['shift'].update(loss_shift.item(), batch_size)
  71. if count % 50 == 0:
  72. log_('[Epoch %3d; %3d] [Time %.3f] [Data %.3f] [LR %.5f]\n'
  73. '[LossC %f] [LossSim %f] [LossShift %f]' %
  74. (epoch, count, batch_time.value, data_time.value, lr,
  75. losses['cls'].value, losses['sim'].value, losses['shift'].value))
  76. log_('[DONE] [Time %.3f] [Data %.3f] [LossC %f] [LossSim %f] [LossShift %f]' %
  77. (batch_time.average, data_time.average,
  78. losses['cls'].average, losses['sim'].average, losses['shift'].average))
  79. if logger is not None:
  80. logger.scalar_summary('train/loss_cls', losses['cls'].average, epoch)
  81. logger.scalar_summary('train/loss_sim', losses['sim'].average, epoch)
  82. logger.scalar_summary('train/loss_shift', losses['shift'].average, epoch)
  83. logger.scalar_summary('train/batch_time', batch_time.average, epoch)