Overfit_batches flag throwing error

overfit_batches flag is throwing an error when used in combination with the data module.
‘generator’ object is not callable

The same error is not repeating when I do not use the overfit_batches flag.

My Code:

trainer = pl.Trainer(gpus=1, overfit_batches=1)
trainer.fit(model, dm)

Full Stack Trace:

TypeError                                 Traceback (most recent call last)
<ipython-input-28-5c4f8ec623ff> in <module>()
      1 trainer = pl.Trainer(gpus=1, overfit_batches=1)
----> 2 trainer.fit(model, dm)

12 frames
/usr/local/lib/python3.6/dist-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
    442         self.call_hook('on_fit_start')
    443 
--> 444         results = self.accelerator_backend.train()
    445         self.accelerator_backend.teardown()
    446 

/usr/local/lib/python3.6/dist-packages/pytorch_lightning/accelerators/gpu_accelerator.py in train(self)
     61 
     62         # train or test
---> 63         results = self.train_or_test()
     64         return results
     65 

/usr/local/lib/python3.6/dist-packages/pytorch_lightning/accelerators/accelerator.py in train_or_test(self)
     72             results = self.trainer.run_test()
     73         else:
---> 74             results = self.trainer.train()
     75         return results
     76 

/usr/local/lib/python3.6/dist-packages/pytorch_lightning/trainer/trainer.py in train(self)
    464 
    465     def train(self):
--> 466         self.run_sanity_check(self.get_model())
    467 
    468         self.checkpoint_connector.has_trained = False

/usr/local/lib/python3.6/dist-packages/pytorch_lightning/trainer/trainer.py in run_sanity_check(self, ref_model)
    646         # to make sure program won't crash during val
    647         if should_sanity_check:
--> 648             self.reset_val_dataloader(ref_model)
    649             self.num_sanity_val_batches = [
    650                 min(self.num_sanity_val_steps, val_batches) for val_batches in self.num_val_batches

/usr/local/lib/python3.6/dist-packages/pytorch_lightning/trainer/data_loading.py in reset_val_dataloader(self, model)
    316         has_step = is_overridden('validation_step', model)
    317         if has_loader and has_step:
--> 318             self.num_val_batches, self.val_dataloaders = self._reset_eval_dataloader(model, 'val')
    319 
    320     def reset_test_dataloader(self, model) -> None:

/usr/local/lib/python3.6/dist-packages/pytorch_lightning/trainer/data_loading.py in _reset_eval_dataloader(self, model, mode)
    246             num_loaders = len(dataloaders)
    247             train_dataloader = self.request_dataloader(getattr(model, 'train_dataloader'))
--> 248             dataloaders = [deepcopy(train_dataloader) for _ in range(num_loaders)]
    249 
    250         self.dev_debugger.track_load_dataloader_call(loader_name, dataloaders=dataloaders)

/usr/local/lib/python3.6/dist-packages/pytorch_lightning/trainer/data_loading.py in <listcomp>(.0)
    246             num_loaders = len(dataloaders)
    247             train_dataloader = self.request_dataloader(getattr(model, 'train_dataloader'))
--> 248             dataloaders = [deepcopy(train_dataloader) for _ in range(num_loaders)]
    249 
    250         self.dev_debugger.track_load_dataloader_call(loader_name, dataloaders=dataloaders)

/usr/lib/python3.6/copy.py in deepcopy(x, memo, _nil)
    178                     y = x
    179                 else:
--> 180                     y = _reconstruct(x, memo, *rv)
    181 
    182     # If is its own copy, don't memoize.

/usr/lib/python3.6/copy.py in _reconstruct(x, memo, func, args, state, listiter, dictiter, deepcopy)
    278     if state is not None:
    279         if deep:
--> 280             state = deepcopy(state, memo)
    281         if hasattr(y, '__setstate__'):
    282             y.__setstate__(state)

/usr/lib/python3.6/copy.py in deepcopy(x, memo, _nil)
    148     copier = _deepcopy_dispatch.get(cls)
    149     if copier:
--> 150         y = copier(x, memo)
    151     else:
    152         try:

/usr/lib/python3.6/copy.py in _deepcopy_dict(x, memo, deepcopy)
    238     memo[id(x)] = y
    239     for key, value in x.items():
--> 240         y[deepcopy(key, memo)] = deepcopy(value, memo)
    241     return y
    242 d[dict] = _deepcopy_dict

/usr/lib/python3.6/copy.py in deepcopy(x, memo, _nil)
    159             copier = getattr(x, "__deepcopy__", None)
    160             if copier:
--> 161                 y = copier(memo)
    162             else:
    163                 reductor = dispatch_table.get(cls)

TypeError: 'generator' object is not callable

Hey! Can’t reproduce the bug with our datamodule example.

Can you try to reproduce using our simple model colab and open an issue if the error persists?

Hi
@asvskartheek make sure you return a PyTorch DataLoader in your DataModule.
It looks like you are returning some kind of Generator object instead.