Skip to content

ModuleDict.update should be called with an iterable of key/value pairs, but got ResNet #87

@manza-ari

Description

@manza-ari

My Code is here I am facing the following error. I want to ask one more thing Does LR Finder doesn't work for DataParallel? I have commented on the scheduler so that LR-FInder could work properly.

`

Main

if name == 'main':

method = args.method_type
methods = ['Random']
datasets = ['cifar10', 'cifar100', 'fashionmnist','svhn']
assert method in methods, 'No method %s! Try options %s'%(method, methods)
assert args.dataset in datasets, 'No dataset %s! Try options %s'%(args.dataset, datasets)

results = open('results_'+str(args.method_type)+"_"+args.dataset +'_main'+str(args.cycles)+str(args.total)+'.txt','w')
print("Dataset: %s"%args.dataset)
print("Method type:%s"%method)

if args.total:
    TRIALS = 1
    CYCLES = 1
else:
    CYCLES = args.cycles

for trial in range(TRIALS):

    # Load training and testing dataset
    data_train, data_unlabeled, data_test, adden, NO_CLASSES, no_train = load_dataset(args.dataset)
    # Don't predefine budget size. Configure it in the config.py: ADDENDUM = adden
    NUM_TRAIN = no_train
    indices = list(range(NUM_TRAIN))
    random.shuffle(indices)

    if args.total:
        labeled_set= indices
    else:
        labeled_set = indices[:ADDENDUM]
        unlabeled_set = [x for x in indices if x not in labeled_set]

    train_loader = DataLoader(data_train, batch_size=BATCH, 
                                sampler=SubsetRandomSampler(labeled_set), 
                                pin_memory=True, drop_last=True)
    test_loader  = DataLoader(data_test, batch_size=BATCH)
    dataloaders  = {'train': train_loader, 'test': test_loader}

    for cycle in range(CYCLES):
        
        # Randomly sample 10000 unlabeled data points
        if not args.total:
            random.shuffle(unlabeled_set)
            subset = unlabeled_set[:SUBSET]

        # Model - create new instance for every cycle so that it resets
        with torch.cuda.device(CUDA_VISIBLE_DEVICES):
            args.dataset == "cifar100"
            resnet18    = resnet.resnet18(num_classes=NO_CLASSES).cuda()
            
        args.dataset == "cifar100"
        models = resnet18 

        torch.backends.cudnn.benchmark = True
        #models = torch.nn.DataParallel(models, device_ids=[0])

        # Loss, criterion and scheduler (re)initialization
        criterion      = nn.CrossEntropyLoss(reduction='none')
        optim_backbone = optim.SGD(models.parameters(), lr=LR, weight_decay=WDECAY) #, momentum=MOMENTUM
    
        #sched_backbone = lr_scheduler.MultiStepLR(optim_backbone, milestones=MILESTONES)  
        
        optimizers = optim_backbone
        #schedulers = {'backbone': sched_backbone}
        
        # Training and testing
        model_wrapper = ModelWrapper(models, method)
        loss_wrapper = LossWrapper(criterion, models, method)
        optimizer_wrapper = OptimizerWrapper(optimizers)

        # Manually create an axis and pass it into `LRFinder.plot()` to avoid popping window
        # of figure blocking the procedure.
        fig, ax = plt.subplots()

        lr_finder = LRFinder(model_wrapper, optimizer_wrapper, loss_wrapper, device='cuda')
        lr_finder.range_test(train_loader, end_lr=1, num_iter=100)
        ax, suggested_lr = lr_finder.plot(ax=ax, skip_start=0, skip_end=0, suggest_lr=True)
        
        lr_finder.reset() # to reset the model and optimizer to their initial state 

        for name in optimizers:
            optimizers[name].param_groups[0]['lr'] = suggested_lr

        print('----- Updated optimizers -----')
        print(optimizers)
                        
        criterion = nn.CrossEntropyLoss(reduction='none') 

        # LR Finder

        train(models, method, criterion, optimizers, dataloaders, args.no_of_epochs, EPOCHL)  #schedulers,
        
        acc = test(models, EPOCH, method, dataloaders, mode='test')
        print('Trial {}/{} || Cycle {}/{} || Label set size {}: Test acc {}'.format(trial+1, TRIALS, cycle+1, CYCLES, len(labeled_set), acc))
        np.array([method, trial+1, TRIALS, cycle+1, CYCLES, len(labeled_set), acc]).tofile(results, sep=" ")
        results.write("\n")

        if cycle == (CYCLES-1):
            # Reached final training cycle
            print("Finished.")
            break
        # Get the indices of the unlabeled samples to train on next cycle
        arg = query_samples(models, method, data_unlabeled, subset, labeled_set, cycle, args)

        # Update the labeled dataset and the unlabeled dataset, respectively
        labeled_set += list(torch.tensor(subset)[arg][-ADDENDUM:].numpy())
        listd = list(torch.tensor(subset)[arg][:-ADDENDUM].numpy()) 
        unlabeled_set = listd + unlabeled_set[SUBSET:]
        print(len(labeled_set), min(labeled_set), max(labeled_set))
        # Create a new dataloader for the updated labeled dataset
        dataloaders['train'] = DataLoader(data_train, batch_size=BATCH, 
                                        sampler=SubsetRandomSampler(labeled_set), 
                                        pin_memory=True)
    results.close()

`

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions