x = x.newones(5, 3, dtype=torch.double) # new* methods take in sizes
x = torch.randn_like(x, dtype=torch.float) # override dtype!
torch.add(x, y)
y.add_(x)
x = torch.randn(4, 4)
y = x.view(16)
z = x.view(-1, 8) # the size -1 is inferred from other dimensions
x = torch.randn(1)
print(x.item())
b = a.numpy() # The Torch Tensor and NumPy array will share their underlying memory locations (if the Torch Tensor is on CPU), and changing one will change the other.
a = np.ones(5)
b = torch.from_numpy(a)
np.add(a, 1, out=a) # changing the np array changed the Torch Tensor automatically
device = torch.device("cuda") # a CUDA device object
y = torch.ones_like(x, device=device) # directly create a tensor on GPU
x = x.to(device) # or just use strings .to("cuda")
torch.cuda.is_available()
Autograd
x = torch.ones(2, 2, requires_grad=True)
y = x + 2 # y was created as a result of an operation, so it has a grad_fn.
.to("cuda")
Autograd