XanaduAI / MrMustard

A differentiable bridge between phase space and Fock space
https://mrmustard.readthedocs.io/
Apache License 2.0
77 stars 26 forks source link

Tensor flow causing an error during optimization #172

Closed nquesada closed 6 months ago

nquesada commented 1 year ago

Before posting a bug report

Expected behavior

Not fail.

Actual behavior

Failure

Reproduces how often

100% of the time.

System information

Mr Mustard: a differentiable bridge between phase space and Fock space.
Copyright 2018-2021 Xanadu Quantum Technologies Inc.

Python version:            3.9.7
Platform info:             Linux-5.17.5-76051705-generic-x86_64-with-glibc2.34
Installation path:         None of your business
Mr Mustard version:        0.2.0-dev
Numpy version:             1.21.5
Numba version:             0.55.1
Scipy version:             1.8.0
The Walrus version:        0.20.0-dev
TensorFlow version:        2.6.2
Torch version:             None

Source code

import numpy as np
from mrmustard.lab import *
import tensorflow as tf
from mrmustard.math import Math
math = Math()
from mrmustard.utils.training import Optimizer
#Target cat state: Normalized(|alpha> - |-alpha>)
alpha = 2.0
cutoff = 50
cat_amps = Coherent(alpha).ket([cutoff]) - Coherent(-alpha).ket([cutoff])
cat_amps = cat_amps / np.linalg.norm(cat_amps)
cat = State(ket=cat_amps)
cat_ket = cat.ket(cutoffs = [cutoff]).numpy()
cat_ket /= np.linalg.norm(cat_ket)
def cost_fn_cat():
    ket = output().ket(cutoffs=[cutoff])
    return -math.abs(math.sum(math.conj(cat_ket) * ket))**2

np.random.seed(21)
S = Sgate(r=[np.random.uniform(0,3.14),np.random.uniform(0,3.14)],phi=[np.random.uniform(0,3.14), np.random.uniform(0,3.14)],r_trainable=True, phi_trainable=True)
B = BSgate(theta=np.random.uniform(0,3.14), phi=np.random.uniform(0,3.14), theta_trainable=True, phi_trainable=True)
#Full train circuit
def output():
    return Vacuum(2) >> S >> B << Fock(3, modes =[0], normalize=True)
opt = Optimizer(euclidean_lr = 0.001)
opt.minimize(cost_fn_cat, by_optimizing=[S,B], max_steps=2000)

Tracebacks

2022-11-02 15:05:24.361254: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory
2022-11-02 15:05:24.361270: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.
2022-11-02 15:05:26.024104: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcuda.so.1'; dlerror: libcuda.so.1: cannot open shared object file: No such file or directory
2022-11-02 15:05:26.024131: W tensorflow/stream_executor/cuda/cuda_driver.cc:269] failed call to cuInit: UNKNOWN ERROR (303)
2022-11-02 15:05:26.024145: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:156] kernel driver does not appear to be running on this host (pop-os): /proc/driver/nvidia/version does not exist
2022-11-02 15:05:26.025019: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations:  AVX2 AVX512F FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.

Step 106/2000 | 39.8 it/s ━╸━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━   5% Cost = -0.03122 | ⏳  0:00:48

2022-11-02 15:05:30.483723: W tensorflow/core/framework/op_kernel.cc:1692] OP_REQUIRES failed at strided_slice_op.cc:108 : Invalid argument: slice index 3 of dimension 0 out of bounds.

Step 106/2000 | 39.8 it/s ━╸━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━   5% Cost = -0.03122 | ⏳  0:00:48

---------------------------------------------------------------------------
InvalidArgumentError                      Traceback (most recent call last)
/tmp/ipykernel_362528/3454949026.py in <module>
     24     return Vacuum(2) >> S >> B << Fock(3, modes =[0], normalize=True)
     25 opt = Optimizer(euclidean_lr = 0.001)
---> 26 opt.minimize(cost_fn_cat, by_optimizing=[S,B], max_steps=2000)

~/Code/MrMustard/mrmustard/utils/training.py in minimize(self, cost_fn, by_optimizing, max_steps)
     72             with bar:
     73                 while not self.should_stop(max_steps):
---> 74                     cost, grads = math.value_and_gradients(cost_fn, params)
     75                     update_symplectic(params["symplectic"], grads["symplectic"], self.symplectic_lr)
     76                     update_orthogonal(params["orthogonal"], grads["orthogonal"], self.orthogonal_lr)

~/Code/MrMustard/mrmustard/math/tensorflow.py in value_and_gradients(self, cost_fn, parameters)
    315         """
    316         with tf.GradientTape() as tape:
--> 317             loss = cost_fn()
    318         gradients = tape.gradient(loss, list(parameters.values()))
    319         return loss, dict(zip(parameters.keys(), gradients))

/tmp/ipykernel_362528/3454949026.py in cost_fn_cat()
     14 cat_ket /= np.linalg.norm(cat_ket)
     15 def cost_fn_cat():
---> 16     ket = output().ket(cutoffs=[cutoff])
     17     return -math.abs(math.sum(math.conj(cat_ket) * ket))**2
     18 

/tmp/ipykernel_362528/3454949026.py in output()
     22 #Full train circuit
     23 def output():
---> 24     return Vacuum(2) >> S >> B << Fock(3, modes =[0], normalize=True)
     25 opt = Optimizer(euclidean_lr = 0.001)
     26 opt.minimize(cost_fn_cat, by_optimizing=[S,B], max_steps=2000)

~/Code/MrMustard/mrmustard/lab/abstract/state.py in __lshift__(self, other)
    471         E.g., ``self << other`` where other is a ``State`` and ``self`` is either a ``State`` or a ``Transformation``.
    472         """
--> 473         return other.primal(self)
    474 
    475     def __add__(self, other: State):

~/Code/MrMustard/mrmustard/lab/abstract/state.py in primal(self, other)
    356             ]
    357             try:
--> 358                 out_fock = self._preferred_projection(other, other.indices(self.modes))
    359             except AttributeError:
    360                 # matching other's cutoffs

~/Code/MrMustard/mrmustard/lab/states.py in _preferred_projection(self, other, mode_indices)
    500             else:
    501                 getitem.append(slice(None))
--> 502         output = other.fock[tuple(getitem)] if other.is_pure else other.fock[tuple(getitem) * 2]
    503         if self._normalize:
    504             return fock.normalize(output, is_dm=other.is_mixed)

~/miniconda3/envs/mm/lib/python3.9/site-packages/tensorflow/python/util/dispatch.py in wrapper(*args, **kwargs)
    204     """Call target, and fall back on dispatchers if there is a TypeError."""
    205     try:
--> 206       return target(*args, **kwargs)
    207     except (TypeError, ValueError):
    208       # Note: convert_to_eager_tensor currently raises a ValueError, not a

~/miniconda3/envs/mm/lib/python3.9/site-packages/tensorflow/python/ops/array_ops.py in _slice_helper(tensor, slice_spec, var)
   1039       var_empty = constant([], dtype=dtypes.int32)
   1040       packed_begin = packed_end = packed_strides = var_empty
-> 1041     return strided_slice(
   1042         tensor,
   1043         packed_begin,

~/miniconda3/envs/mm/lib/python3.9/site-packages/tensorflow/python/util/dispatch.py in wrapper(*args, **kwargs)
    204     """Call target, and fall back on dispatchers if there is a TypeError."""
    205     try:
--> 206       return target(*args, **kwargs)
    207     except (TypeError, ValueError):
    208       # Note: convert_to_eager_tensor currently raises a ValueError, not a

~/miniconda3/envs/mm/lib/python3.9/site-packages/tensorflow/python/ops/array_ops.py in strided_slice(input_, begin, end, strides, begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask, var, name)
   1212     strides = ones_like(begin)
   1213 
-> 1214   op = gen_array_ops.strided_slice(
   1215       input=input_,
   1216       begin=begin,

~/miniconda3/envs/mm/lib/python3.9/site-packages/tensorflow/python/ops/gen_array_ops.py in strided_slice(input, begin, end, strides, begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask, name)
  10509       return _result
  10510     except _core._NotOkStatusException as e:
> 10511       _ops.raise_from_not_ok_status(e, name)
  10512     except _core._FallbackException:
  10513       pass

~/miniconda3/envs/mm/lib/python3.9/site-packages/tensorflow/python/framework/ops.py in raise_from_not_ok_status(e, name)
   6939   message = e.message + (" name: " + name if name is not None else "")
   6940   # pylint: disable=protected-access
-> 6941   six.raise_from(core._status_to_exception(e.code, message), None)
   6942   # pylint: enable=protected-access
   6943 

~/miniconda3/envs/mm/lib/python3.9/site-packages/six.py in raise_from(value, from_value)

InvalidArgumentError: slice index 3 of dimension 0 out of bounds. [Op:StridedSlice] name: strided_slice/

Additional information

No response

nquesada commented 1 year ago

Note that if you change the random seed to be np.random.seed(20) the optimization works.

ziofil commented 1 year ago

try increasing the cutoff. an easy way is:

from mrmustard import settings
settings.AUTOCUTOFF_MIN_CUTOFF = 20  # or whatever
sduquemesa commented 1 year ago

resolved?