ulissigroup / amptorch

AMPtorch: Atomistic Machine Learning Package (AMP) - PyTorch
GNU General Public License v3.0
59 stars 35 forks source link

examples do not work #90

Closed rsdmse closed 3 years ago

rsdmse commented 3 years ago

I installed the latest commit of amptorch with all the dependencies in env_gpu.yml. I can import amptorch but I cannot run the example scripts. I wonder if the updated dependency versions are causing these examples to break.

custom_descriptor_example.py:

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-9-3ad6331c597d> in <module>()
     38     descriptor_setup=("gaussian", Gs, elements),
     39     forcetraining=False,
---> 40     save_fps=True,
     41 )
     42 

/amptorch/lib/python3.6/site-packages/amptorch/dataset.py in __init__(self, images, descriptor_setup, forcetraining, save_fps, scaling, cores, process)
     26         self.forcetraining = forcetraining
     27         self.scaling = scaling
---> 28         self.descriptor = construct_descriptor(descriptor_setup)
     29 
     30         self.a2d = AtomsToData(

/amptorch/lib/python3.6/site-packages/amptorch/dataset.py in construct_descriptor(descriptor_setup)
     91 
     92 def construct_descriptor(descriptor_setup):
---> 93     fp_scheme, fp_params, cutoff_params, elements = descriptor_setup
     94     if fp_scheme == "gaussian":
     95         descriptor = Gaussian(Gs=fp_params, elements=elements, **cutoff_params)

ValueError: not enough values to unpack (expected 4, got 3)

train_example.py:

---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-3-7ab00cf4fa51> in <module>()
     73 torch.set_num_threads(1)
     74 trainer = AtomsTrainer(config)
---> 75 trainer.train()
     76 
     77 predictions = trainer.predict(images)

/amptorch/lib/python3.6/site-packages/amptorch/trainer.py in train(self, raw_data)
    233 
    234         stime = time.time()
--> 235         self.net.fit(self.train_dataset, None)
    236         elapsed_time = time.time() - stime
    237         print(f"Training completed in {elapsed_time}s")

/amptorch/lib/python3.6/site-packages/skorch/regressor.py in fit(self, X, y, **fit_params)
     89         # this is actually a pylint bug:
     90         # https://github.com/PyCQA/pylint/issues/1085
---> 91         return super(NeuralNetRegressor, self).fit(X, y, **fit_params)

/amptorch/lib/python3.6/site-packages/skorch/net.py in fit(self, X, y, **fit_params)
    901             self.initialize()
    902 
--> 903         self.partial_fit(X, y, **fit_params)
    904         return self
    905 

/amptorch/lib/python3.6/site-packages/skorch/net.py in partial_fit(self, X, y, classes, **fit_params)
    860         self.notify('on_train_begin', X=X, y=y)
    861         try:
--> 862             self.fit_loop(X, y, **fit_params)
    863         except KeyboardInterrupt:
    864             pass

/amptorch/lib/python3.6/site-packages/skorch/net.py in fit_loop(self, X, y, epochs, **fit_params)
    774 
    775             self.run_single_epoch(dataset_train, training=True, prefix="train",
--> 776                                   step_fn=self.train_step, **fit_params)
    777 
    778             if dataset_valid is not None:

/amptorch/lib/python3.6/site-packages/skorch/net.py in run_single_epoch(self, dataset, training, prefix, step_fn, **fit_params)
    806 
    807         batch_count = 0
--> 808         for data in self.get_iterator(dataset, training=training):
    809             Xi, yi = unpack_data(data)
    810             yi_res = yi if not is_placeholder_y else None

/amptorch/lib/python3.6/site-packages/torch/utils/data/dataloader.py in __next__(self)
    361 
    362     def __next__(self):
--> 363         data = self._next_data()
    364         self._num_yielded += 1
    365         if self._dataset_kind == _DatasetKind.Iterable and \

/amptorch/lib/python3.6/site-packages/torch/utils/data/dataloader.py in _next_data(self)
    403         data = self._dataset_fetcher.fetch(index)  # may raise StopIteration
    404         if self._pin_memory:
--> 405             data = _utils.pin_memory.pin_memory(data)
    406         return data
    407 

/amptorch/lib/python3.6/site-packages/torch/utils/data/_utils/pin_memory.py in pin_memory(data)
     53         return type(data)(*(pin_memory(sample) for sample in data))
     54     elif isinstance(data, container_abcs.Sequence):
---> 55         return [pin_memory(sample) for sample in data]
     56     elif hasattr(data, "pin_memory"):
     57         return data.pin_memory()

/amptorch/lib/python3.6/site-packages/torch/utils/data/_utils/pin_memory.py in <listcomp>(.0)
     53         return type(data)(*(pin_memory(sample) for sample in data))
     54     elif isinstance(data, container_abcs.Sequence):
---> 55         return [pin_memory(sample) for sample in data]
     56     elif hasattr(data, "pin_memory"):
     57         return data.pin_memory()

/amptorch/lib/python3.6/site-packages/torch/utils/data/_utils/pin_memory.py in pin_memory(data)
     53         return type(data)(*(pin_memory(sample) for sample in data))
     54     elif isinstance(data, container_abcs.Sequence):
---> 55         return [pin_memory(sample) for sample in data]
     56     elif hasattr(data, "pin_memory"):
     57         return data.pin_memory()

/amptorch/lib/python3.6/site-packages/torch/utils/data/_utils/pin_memory.py in <listcomp>(.0)
     53         return type(data)(*(pin_memory(sample) for sample in data))
     54     elif isinstance(data, container_abcs.Sequence):
---> 55         return [pin_memory(sample) for sample in data]
     56     elif hasattr(data, "pin_memory"):
     57         return data.pin_memory()

/amptorch/lib/python3.6/site-packages/torch/utils/data/_utils/pin_memory.py in pin_memory(data)
     55         return [pin_memory(sample) for sample in data]
     56     elif hasattr(data, "pin_memory"):
---> 57         return data.pin_memory()
     58     else:
     59         return data

/amptorch/lib/python3.6/site-packages/torch_geometric/data/data.py in pin_memory(self, *keys)
    363         If :obj:`*keys` is not given, the conversion is applied to all present
    364         attributes."""
--> 365         return self.apply(lambda x: x.pin_memory(), *keys)
    366 
    367     def debug(self):

/amptorch/lib/python3.6/site-packages/torch_geometric/data/data.py in apply(self, func, *keys)
    324         """
    325         for key, item in self(*keys):
--> 326             self[key] = self.__apply__(item, func)
    327         return self
    328 

/amptorch/lib/python3.6/site-packages/torch_geometric/data/data.py in __apply__(self, item, func)
    303     def __apply__(self, item, func):
    304         if torch.is_tensor(item):
--> 305             return func(item)
    306         elif isinstance(item, SparseTensor):
    307             # Not all apply methods are supported for `SparseTensor`, e.g.,

/amptorch/lib/python3.6/site-packages/torch_geometric/data/data.py in <lambda>(x)
    363         If :obj:`*keys` is not given, the conversion is applied to all present
    364         attributes."""
--> 365         return self.apply(lambda x: x.pin_memory(), *keys)
    366 
    367     def debug(self):

RuntimeError: cannot pin 'torch.sparse.FloatTensor' only dense CPU tensors can be pinned

The torch-related versions are listed below:

amptorch                      0.1
torch                         1.6.0
torch-cluster                 1.5.9
torch-geometric               1.7.0
torch-scatter                 2.0.6
torch-sparse                  0.6.9
torch-spline-conv             1.2.1
rsdmse commented 3 years ago

Our user reported that it's now working.