Closed dnguyen1196 closed 4 years ago
Only lines changed, from "h0" to "h" but a lot of these tests are breaking because of dimension mismatch. Any ideas @yuanqing-wang
2020-05-21T19:16:15.7525955Z =================================== FAILURES ===================================
2020-05-21T19:16:15.7526289Z __________________________________ test_test ___________________________________
2020-05-21T19:16:15.7526585Z
2020-05-21T19:16:15.7527724Z def test_test():
2020-05-21T19:16:15.7528111Z import pinot
2020-05-21T19:16:15.7528441Z import copy
2020-05-21T19:16:15.7528711Z
2020-05-21T19:16:15.7529196Z layer = pinot.representation.dgl_legacy.gn()
2020-05-21T19:16:15.7529671Z net_representation = pinot.representation.Sequential(
2020-05-21T19:16:15.7530101Z layer, [32, "tanh", 32, "tanh", 32, "tanh"]
2020-05-21T19:16:15.7530385Z )
2020-05-21T19:16:15.7530799Z net = pinot.Net(net_representation)
2020-05-21T19:16:15.7531086Z
2020-05-21T19:16:15.7531442Z train = pinot.Train(
2020-05-21T19:16:15.7531733Z net=net,
2020-05-21T19:16:15.7532466Z data=pinot.data.utils.batch(pinot.data.esol()[:10], 5),
2020-05-21T19:16:15.7532635Z n_epochs=1,
2020-05-21T19:16:15.7533135Z optimizer=torch.optim.Adam(net.parameters()),
2020-05-21T19:16:15.7533418Z )
2020-05-21T19:16:15.7533656Z
2020-05-21T19:16:15.7534041Z train.train()
2020-05-21T19:16:15.7534305Z
2020-05-21T19:16:15.7534676Z test = pinot.Test(
2020-05-21T19:16:15.7535004Z net=net,
2020-05-21T19:16:15.7535478Z data=pinot.data.utils.batch(pinot.data.esol()[:10], 5),
2020-05-21T19:16:15.7535919Z metrics=[pinot.mse, pinot.rmse, pinot.r2],
2020-05-21T19:16:15.7536281Z states=train.states,
2020-05-21T19:16:15.7536585Z )
2020-05-21T19:16:15.7536851Z
2020-05-21T19:16:15.7537156Z > test.test()
2020-05-21T19:16:15.7537471Z
2020-05-21T19:16:15.7538096Z pinot/app/tests/test_experiment.py:55:
2020-05-21T19:16:15.7539039Z _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
2020-05-21T19:16:15.7540141Z /usr/share/miniconda/envs/test/lib/python3.6/site-packages/pinot/app/experiment.py:132: in test
2020-05-21T19:16:15.7540497Z results[metric.__name__][state_name] = metric(self.net, g, y).detach().numpy()
2020-05-21T19:16:15.7541102Z /usr/share/miniconda/envs/test/lib/python3.6/site-packages/pinot/metrics.py:28: in rmse
2020-05-21T19:16:15.7541357Z y_hat = net.condition(g).mean
2020-05-21T19:16:15.7541791Z /usr/share/miniconda/envs/test/lib/python3.6/site-packages/pinot/net.py:94: in condition
2020-05-21T19:16:15.7542034Z mu, log_sigma = self.forward(g)
2020-05-21T19:16:15.7542482Z /usr/share/miniconda/envs/test/lib/python3.6/site-packages/pinot/net.py:80: in forward
2020-05-21T19:16:15.7542731Z h = self.representation(g)
2020-05-21T19:16:15.7543220Z /usr/share/miniconda/envs/test/lib/python3.6/site-packages/torch/nn/modules/module.py:550: in __call__
2020-05-21T19:16:15.7543490Z result = self.forward(*input, **kwargs)
2020-05-21T19:16:15.7543942Z /usr/share/miniconda/envs/test/lib/python3.6/site-packages/pinot/representation/sequential.py:73: in forward
2020-05-21T19:16:15.7544220Z g.apply_nodes(lambda nodes: {"h": self.f_in(nodes.data["h"])})
2020-05-21T19:16:15.7544771Z /usr/share/miniconda/envs/test/lib/python3.6/site-packages/dgl/graph.py:2572: in apply_nodes
2020-05-21T19:16:15.7545177Z Runtime.run(prog)
2020-05-21T19:16:15.7545632Z /usr/share/miniconda/envs/test/lib/python3.6/site-packages/dgl/runtime/runtime.py:11: in run
2020-05-21T19:16:15.7545902Z exe.run()
2020-05-21T19:16:15.7546334Z /usr/share/miniconda/envs/test/lib/python3.6/site-packages/dgl/runtime/ir/executor.py:129: in run
2020-05-21T19:16:15.7546597Z udf_ret = fn_data(node_data)
2020-05-21T19:16:15.7547071Z /usr/share/miniconda/envs/test/lib/python3.6/site-packages/dgl/runtime/scheduler.py:288: in _afunc_wrapper
2020-05-21T19:16:15.7547337Z return apply_func(nbatch)
2020-05-21T19:16:15.7547897Z /usr/share/miniconda/envs/test/lib/python3.6/site-packages/pinot/representation/sequential.py:73: in <lambda>
2020-05-21T19:16:15.7548173Z g.apply_nodes(lambda nodes: {"h": self.f_in(nodes.data["h"])})
2020-05-21T19:16:15.7548823Z /usr/share/miniconda/envs/test/lib/python3.6/site-packages/torch/nn/modules/module.py:550: in __call__
2020-05-21T19:16:15.7549099Z result = self.forward(*input, **kwargs)
2020-05-21T19:16:15.7549722Z /usr/share/miniconda/envs/test/lib/python3.6/site-packages/torch/nn/modules/container.py:100: in forward
2020-05-21T19:16:15.7549991Z input = module(input)
2020-05-21T19:16:15.7550535Z /usr/share/miniconda/envs/test/lib/python3.6/site-packages/torch/nn/modules/module.py:550: in __call__
2020-05-21T19:16:15.7550828Z result = self.forward(*input, **kwargs)
2020-05-21T19:16:15.7551358Z /usr/share/miniconda/envs/test/lib/python3.6/site-packages/torch/nn/modules/linear.py:87: in forward
2020-05-21T19:16:15.7551628Z return F.linear(input, self.weight, self.bias)
2020-05-21T19:16:15.7552168Z _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
2020-05-21T19:16:15.7552383Z
2020-05-21T19:16:15.7552812Z input = tensor([[ 0.4230, -0.4313, 0.2328, ..., -0.0289, 0.2030, -0.1921],
2020-05-21T19:16:15.7553286Z [ 0.6931, -0.7159, 0.3054, ..., -0.007...0.2768, -0.3093],
2020-05-21T19:16:15.7553729Z [ 0.5729, -0.6100, 0.3289, ..., -0.0402, 0.2761, -0.2934]],
2020-05-21T19:16:15.7553989Z grad_fn=<TanhBackward>)
2020-05-21T19:16:15.7554202Z weight = Parameter containing:
2020-05-21T19:16:15.7554594Z tensor([[ 0.0651, -0.0627, -0.0126, ..., -0.0667, 0.0905, 0.0272],
2020-05-21T19:16:15.7555004Z [ 0.0144, -0.0314,...0, -0.0188, 0.0536],
2020-05-21T19:16:15.7555432Z [ 0.0530, 0.0902, 0.0720, ..., 0.0010, 0.0360, -0.0446]],
2020-05-21T19:16:15.7555662Z requires_grad=True)
2020-05-21T19:16:15.7555872Z bias = Parameter containing:
2020-05-21T19:16:15.7556278Z tensor([-0.0411, -0.0834, -0.0739, -0.0913, -0.0316, 0.0134, -0.0019, -0.0084,
2020-05-21T19:16:15.7556523Z 0.0793,...3, 0.0852,
2020-05-21T19:16:15.7556966Z 0.0316, -0.0668, -0.0685, -0.0450, -0.0221, -0.0614, -0.0620, -0.0669],
2020-05-21T19:16:15.7557183Z requires_grad=True)
2020-05-21T19:16:15.7557357Z
2020-05-21T19:16:15.7557567Z def linear(input, weight, bias=None):
2020-05-21T19:16:15.7558101Z # type: (Tensor, Tensor, Optional[Tensor]) -> Tensor
2020-05-21T19:16:15.7560819Z r"""
2020-05-21T19:16:15.7560947Z Applies a linear transformation to the incoming data: :math:`y = xA^T + b`.
2020-05-21T19:16:15.7561074Z
2020-05-21T19:16:15.7561183Z Shape:
2020-05-21T19:16:15.7561271Z
2020-05-21T19:16:15.7561603Z - Input: :math:`(N, *, in\_features)` where `*` means any number of
2020-05-21T19:16:15.7561732Z additional dimensions
2020-05-21T19:16:15.7562008Z - Weight: :math:`(out\_features, in\_features)`
2020-05-21T19:16:15.7562254Z - Bias: :math:`(out\_features)`
2020-05-21T19:16:15.7562529Z - Output: :math:`(N, *, out\_features)`
2020-05-21T19:16:15.7562647Z """
2020-05-21T19:16:15.7562758Z tens_ops = (input, weight)
2020-05-21T19:16:15.7562854Z if not torch.jit.is_scripting():
2020-05-21T19:16:15.7563084Z if any([type(t) is not Tensor for t in tens_ops]) and has_torch_function(tens_ops):
2020-05-21T19:16:15.7563222Z return handle_torch_function(linear, tens_ops, input, weight, bias=bias)
2020-05-21T19:16:15.7563354Z if input.dim() == 2 and bias is not None:
2020-05-21T19:16:15.7563474Z # fused op is marginally faster
2020-05-21T19:16:15.7563569Z > ret = torch.addmm(bias, input, weight.t())
2020-05-21T19:16:15.7563711Z E RuntimeError: size mismatch, m1: [128 x 32], m2: [117 x 128] at /pytorch/aten/src/TH/generic/THTensorMath.cpp:41
Changed "h0" to "h" for consistency. Pull request #36 turns out to require much more work than initially expected. So this pull request just addresses this small isssue