This commit is contained in:
wudong 2022-11-23 21:15:19 +08:00
commit b58a8defaf

View File

@ -217,16 +217,16 @@ def train(hyp, opt, device, data_list,id,callbacks): # hyp is path/to/hyp.yaml
best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume) best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume)
del ckpt, csd del ckpt, csd
# DP mode # # DP mode
if cuda and RANK == -1 and torch.cuda.device_count() > 1: # if cuda and RANK == -1 and torch.cuda.device_count() > 1:
LOGGER.warning('WARNING: DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' # LOGGER.warning('WARNING: DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n'
'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') # 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.')
model = torch.nn.DataParallel(model) # model = torch.nn.DataParallel(model)
# SyncBatchNorm # # SyncBatchNorm
if opt.sync_bn and cuda and RANK != -1: # if opt.sync_bn and cuda and RANK != -1:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) # model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
LOGGER.info('Using SyncBatchNorm()') # LOGGER.info('Using SyncBatchNorm()')
print("Trainloader") print("Trainloader")
# Trainloader # Trainloader