Traceback (most recent call last):
File "train.py", line 237, in <module>
adj = attack(args.attack_type, ptb_rate, adj, features, labels, sens, idx_train, idx_val, idx_test,
File "/home/hhussain/attack-gnn-fairness/src/attack/attack.py", line 161, in attack
modified_adj = apply_perturbation(builds[attack_name], attacks[attack_name], adj, features, labels, sens,
File "/home/hhussain/attack-gnn-fairness/src/attack/attack.py", line 80, in apply_perturbation
modified_adj = attack(model, adj, features, labels, n_perturbations, idx_train, idx_unlabeled, sens)
File "/home/hhussain/attack-gnn-fairness/src/attack/attack.py", line 121, in attack_prbcd
adversary.attack(n_perturbations)
File "/home/hhussain/anaconda3/envs/fair-prbcd-1/lib/python3.8/site-packages/typeguard/__init__.py", line 1015, in wrapper
retval = func(*args, **kwargs)
File "/home/hhussain/attack-gnn-fairness/src/rgnn_at_scale/attacks/base_attack.py", line 118, in attack
return self._attack(n_perturbations, **kwargs)
File "/home/hhussain/attack-gnn-fairness/src/rgnn_at_scale/attacks/prbcd.py", line 163, in _attack
edge_index = self.sample_final_edges(n_perturbations)[0]
File "/home/hhussain/.local/lib/python3.8/site-packages/torch/autograd/grad_mode.py", line 28, in decorate_context
return func(*args, **kwargs)
File "/home/hhussain/attack-gnn-fairness/src/rgnn_at_scale/attacks/prbcd.py", line 193, in sample_final_edges
sampled_edges = torch.bernoulli(perturbed_edge_weight).float()
RuntimeError: Expected p_in >= 0 && p_in <= 1 to be true, but got false. (Could this error message be improved? If so, please report an enhancement request to PyTorch.)