/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/linear_model/base.py:35: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
from ..utils.seq_dataset import ArrayDataset, CSRDataset
/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/linear_model/least_angle.py:23: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
from ..utils import arrayfuncs, as_float_array, check_X_y, deprecated
/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/utils/random.py:10: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
from ._random import sample_without_replacement
/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/linear_model/coordinate_descent.py:30: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
from . import cd_fast
/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/linear_model/init.py:22: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/linear_model/init.py:22: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/linear_model/sag.py:12: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
from .sag_fast import sag
/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/svm/base.py:8: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
from . import libsvm, liblinear
/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/svm/base.py:8: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
from . import libsvm, liblinear
/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/svm/base.py:9: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
from . import libsvm_sparse
/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/neighbors/init.py:6: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
from .ball_tree import BallTree
/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/neighbors/init.py:6: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
from .ball_tree import BallTree
/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/neighbors/init.py:6: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
from .ball_tree import BallTree
/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/neighbors/init.py:7: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
from .kd_tree import KDTree
/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/decomposition/online_lda.py:28: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
from ._online_lda import (mean_change, _dirichlet_expectation_1d,
/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/utils/graph.py:16: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
from .graph_shortest_path import graph_shortest_path # noqa
/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/isotonic.py:11: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
from ._isotonic import _inplace_contiguous_isotonic_regression, _make_unique
/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/manifold/t_sne.py:26: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
from . import _utils
/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/manifold/t_sne.py:27: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
from . import _barnes_hut_tsne
/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/manifold/t_sne.py:27: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
from . import _barnes_hut_tsne
/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/tree/tree.py:40: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
from ._criterion import Criterion
/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/tree/tree.py:40: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
from ._criterion import Criterion
/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/tree/tree.py:40: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
from ._criterion import Criterion
/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/tree/tree.py:40: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
from ._criterion import Criterion /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/cluster/kmeans.py:37: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
from . import _k_means /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/cluster/kmeans.py:38: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
from ._k_means_elkan import k_means_elkan
/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/cluster/hierarchical.py:23: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
from . import _hierarchical
/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/cluster/hierarchical.py:23: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
from . import hierarchical /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/cluster/dbscan.py:20: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
from ._dbscan_inner import dbscan_inner
/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/feature_extraction/hashing.py:14: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
from ._hashing import transform as _hashing_transform
INFO:tensorflow:Loading hparams from existing json /home/usuario/Escritorio/ds/hparams.json
INFO:tensorflow:Overwrite key label_smoothing: 0.1 -> 0.0
INFO:tensorflow:Overwrite key max_length: 256 -> 0
INFO:tensorflow:Overwrite key max_target_seq_length: 0 -> 2048
INFO:tensorflow:Overriding hparams in transformer_base with label_smoothing=0.0,max_length=0,max_target_seq_length=2048
WARNING:tensorflow:From /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/utils/trainer_lib.py:278: init (from tensorflow.contrib.learn.python.learn.estimators.run_config) is deprecated and will be removed in a future version.
Instructions for updating:
When switching to tf.estimator.Estimator, use tf.estimator.RunConfig instead.
INFO:tensorflow:Configuring DataParallelism to replicate the model.
INFO:tensorflow:schedule=continuous_train_and_eval
INFO:tensorflow:worker_gpu=1
INFO:tensorflow:sync=False
WARNING:tensorflow:Schedule=continuous_train_and_eval. Assuming that training is running on a single machine.
INFO:tensorflow:datashard_devices: ['gpu:0']
INFO:tensorflow:caching_devices: None
INFO:tensorflow:ps_devices: ['gpu:0']
INFO:tensorflow:Using config: {'_save_checkpoints_secs': None, '_num_ps_replicas': 0, '_keep_checkpoint_max': 20, '_task_type': None, '_train_distribute': None, '_is_chief': True, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7efc3abc9510>, '_tf_config': gpu_options {
per_process_gpu_memory_fraction: 1.0
}
, '_protocol': None, '_save_checkpoints_steps': 1000, '_keep_checkpoint_every_n_hours': 10000, '_session_config': gpu_options {
per_process_gpu_memory_fraction: 0.95
}
allow_soft_placement: true
graph_options {
optimizer_options {
global_jit_level: OFF
}
}
, '_model_dir': '/home/usuario/Escritorio/ds', 'use_tpu': False, '_tf_random_seed': None, '_master': '', '_device_fn': None, '_num_worker_replicas': 0, '_task_id': 0, '_log_step_count_steps': 100, '_evaluation_master': '', '_eval_distribute': None, 'data_parallelism': <tensor2tensor.utils.expert_utils.Parallelism object at 0x7efc3b4c3dd0>, '_environment': 'local', '_save_summary_steps': 100, 't2t_device_info': {'num_async_replicas': 1}}
WARNING:tensorflow:Estimator's model_fn (<function wrapping_model_fn at 0x7efc3abc31b8>) includes params argument, but params are not passed to Estimator.
WARNING:tensorflow:ValidationMonitor only works with --schedule=train_and_evaluate
INFO:tensorflow:Not using Distribute Coordinator.
INFO:tensorflow:Running training and evaluation locally (non-distributed).
INFO:tensorflow:Start train and evaluate loop. The evaluate will happen after every checkpoint. Checkpoint frequency is determined based on RunConfig arguments: save_checkpoints_steps 1000 or save_checkpoints_secs None.
INFO:tensorflow:Reading data files from /home/usuario/Escritorio/ds/score2perf_maestro_language_uncropped_aug-train*
INFO:tensorflow:partition: 0 num_data_files: 1
WARNING:tensorflow:From /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/ops/sparse_ops.py:1165: sparse_to_dense (from tensorflow.python.ops.sparse_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Create a tf.sparse.SparseTensor and use tf.sparse.to_dense instead.
WARNING:tensorflow:Shapes are not fully defined. Assuming batch_size means tokens.
INFO:tensorflow:Calling model_fn.
INFO:tensorflow:Setting T2TModel mode to 'train'
INFO:tensorflow:Using variable initializer: uniform_unit_scaling
INFO:tensorflow:Transforming feature 'targets' with symbol_modality_310_512.targets_bottom
WARNING:tensorflow:From /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/framework/function.py:987: calling create_op (from tensorflow.python.framework.ops) with compute_shapes is deprecated and will be removed in a future version.
Instructions for updating:
Shapes are always computed; don't use the compute_shapes as it has no effect.
INFO:tensorflow:Building model body
INFO:tensorflow:Transforming body output with symbol_modality_310_512.top
INFO:tensorflow:Base learning rate: 2.000000
INFO:tensorflow:Trainable Variables Total size: 19061760
INFO:tensorflow:Non-trainable variables Total size: 5
INFO:tensorflow:Using optimizer Adam
INFO:tensorflow:Done calling model_fn.
INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Graph was finalized.
2019-02-16 17:30:27.735273: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
2019-02-16 17:30:27.821970: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:964] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2019-02-16 17:30:27.822387: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1432] Found device 0 with properties:
name: GeForce GTX 1070 major: 6 minor: 1 memoryClockRate(GHz): 1.645
pciBusID: 0000:01:00.0
totalMemory: 7.93GiB freeMemory: 7.60GiB
2019-02-16 17:30:27.822419: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1511] Adding visible gpu devices: 0
2019-02-16 17:30:28.036805: I tensorflow/core/common_runtime/gpu/gpu_device.cc:982] Device interconnect StreamExecutor with strength 1 edge matrix:
2019-02-16 17:30:28.036885: I tensorflow/core/common_runtime/gpu/gpu_device.cc:988] 0
2019-02-16 17:30:28.036891: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1001] 0: N
2019-02-16 17:30:28.037097: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1115] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 7711 MB memory) -> physical GPU (device: 0, name: GeForce GTX 1070, pci bus id: 0000:01:00.0, compute capability: 6.1)
INFO:tensorflow:Restoring parameters from /home/usuario/Escritorio/ds/model.ckpt-0
INFO:tensorflow:Running local_init_op.
INFO:tensorflow:Done running local_init_op.
INFO:tensorflow:Saving checkpoints for 0 into /home/usuario/Escritorio/ds/model.ckpt.
2019-02-16 17:30:37.134709: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.136520: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.138138: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.140584: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.143127: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.145318: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.148035: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.149650: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.151261: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.153675: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.156145: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.158038: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.160499: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.162547: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.164320: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.166616: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.169107: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.170976: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.173398: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.175018: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.176643: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.179173: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.181886: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.183832: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.186242: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.187922: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.189518: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.191852: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.194489: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.196683: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.199091: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.200664: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.202178: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.204392: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.206710: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.208562: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.210872: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.213397: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.214951: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.217271: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.218810: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.220619: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.222160: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.224491: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.226124: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.228021: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.229809: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.231640: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.233268: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.234916: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.236479: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.238621: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.240217: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.241846: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.243441: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.245441: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.247033: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.248677: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.250221: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.251950: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.253640: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.255367: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.256938: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.259079: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.260704: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.262334: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.264171: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.266418: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.268087: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.269736: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.271325: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.273002: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.274691: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.276325: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.277877: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.280079: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.281632: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.283299: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.284877: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.287094: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.288717: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.290681: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.292376: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.294078: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.295679: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.297268: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.298802: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.301206: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.302774: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.304487: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.306026: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.308171: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.309720: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.311383: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.312921: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.314617: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.316230: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.317902: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.319492: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.321615: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.323160: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.324815: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.326351: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.328421: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.329969: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.331948: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.333532: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.335254: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.336814: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.338401: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:37.339992: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:40.245546: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED
2019-02-16 17:30:40.245579: W tensorflow/stream_executor/stream.cc:2127] attempting to perform BLAS operation using StreamExecutor without BLAS support
Traceback (most recent call last):
File "/home/usuario/.conda/envs/magenta/bin/t2t_trainer", line 10, in
sys.exit(console_entry_point())
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/magenta/tensor2tensor/t2t_trainer.py", line 34, in console_entry_point
tf.app.run(main)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/platform/app.py", line 125, in run
_sys.exit(main(argv))
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/magenta/tensor2tensor/t2t_trainer.py", line 29, in main
t2t_trainer.main(argv)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/bin/t2t_trainer.py", line 393, in main
execute_schedule(exp)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/bin/t2t_trainer.py", line 349, in execute_schedule
getattr(exp, FLAGS.schedule)()
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/utils/trainer_lib.py", line 438, in continuous_train_and_eval
self._eval_spec)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/estimator/training.py", line 471, in train_and_evaluate
return executor.run()
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/estimator/training.py", line 610, in run
return self.run_local()
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/estimator/training.py", line 711, in run_local
saving_listeners=saving_listeners)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/estimator/estimator.py", line 354, in train
loss = self._train_model(input_fn, hooks, saving_listeners)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/estimator/estimator.py", line 1207, in _train_model
return self._train_model_default(input_fn, hooks, saving_listeners)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/estimator/estimator.py", line 1241, in _train_model_default
saving_listeners)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/estimator/estimator.py", line 1471, in _train_with_estimatorspec
, loss = mon_sess.run([estimator_spec.train_op, estimator_spec.loss])
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/training/monitored_session.py", line 671, in run
run_metadata=run_metadata)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/training/monitored_session.py", line 1156, in run
run_metadata=run_metadata)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/training/monitored_session.py", line 1255, in run
raise six.reraise(original_exc_info)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/training/monitored_session.py", line 1240, in run
return self._sess.run(args, *kwargs)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/training/monitored_session.py", line 1312, in run
run_metadata=run_metadata)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/training/monitored_session.py", line 1076, in run
return self._sess.run(args, **kwargs)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 929, in run
run_metadata_ptr)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1152, in _run
feed_dict_tensor, options, run_metadata)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1328, in _do_run
run_metadata)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1348, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InternalError: Blas GEMM launch failed : a.shape=(3825, 512), b.shape=(512, 512), m=3825, n=512, k=512
[[node transformer/parallel_0_4/transformer/transformer/body/decoder/layer_0/self_attention/multihead_attention/q/Tensordot/MatMul (defined at /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/layers/common_layers.py:2935) = MatMul[T=DT_FLOAT, _class=["loc:@train...d/MatMul_1"], transpose_a=false, transpose_b=false, _device="/job:localhost/replica:0/task:0/device:GPU:0"](transformer/parallel_0_4/transformer/transformer/body/decoder/layer_0/self_attention/multihead_attention/v/Tensordot/Reshape, Read_18/ReadVariableOp)]]
[[{{node transformer/parallel_0_4/transformer/transformer/body/decoder/layer_2/self_attention/multihead_attention/v/Tensordot/Shape/_2433}} = _Recvclient_terminated=false, recv_device="/job:localhost/replica:0/task:0/device:CPU:0", send_device="/job:localhost/replica:0/task:0/device:GPU:0", send_device_incarnation=1, tensor_name="edge1404...rdot/Shape", tensor_type=DT_INT32, _device="/job:localhost/replica:0/task:0/device:CPU:0"]]
Caused by op u'transformer/parallel_0_4/transformer/transformer/body/decoder/layer_0/self_attention/multihead_attention/q/Tensordot/MatMul', defined at:
File "/home/usuario/.conda/envs/magenta/bin/t2t_trainer", line 10, in
sys.exit(console_entry_point())
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/magenta/tensor2tensor/t2t_trainer.py", line 34, in console_entry_point
tf.app.run(main)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/platform/app.py", line 125, in run
_sys.exit(main(argv))
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/magenta/tensor2tensor/t2t_trainer.py", line 29, in main
t2t_trainer.main(argv)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/bin/t2t_trainer.py", line 393, in main
execute_schedule(exp)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/bin/t2t_trainer.py", line 349, in execute_schedule
getattr(exp, FLAGS.schedule)()
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/utils/trainer_lib.py", line 438, in continuous_train_and_eval
self._eval_spec)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/estimator/training.py", line 471, in train_and_evaluate
return executor.run()
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/estimator/training.py", line 610, in run
return self.run_local()
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/estimator/training.py", line 711, in run_local
saving_listeners=saving_listeners)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/estimator/estimator.py", line 354, in train
loss = self._train_model(input_fn, hooks, saving_listeners)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/estimator/estimator.py", line 1207, in _train_model
return self._train_model_default(input_fn, hooks, saving_listeners)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/estimator/estimator.py", line 1237, in _train_model_default
features, labels, model_fn_lib.ModeKeys.TRAIN, self.config)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/estimator/estimator.py", line 1195, in _call_model_fn
model_fn_results = self._model_fn(features=features, kwargs)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/utils/t2t_model.py", line 1368, in wrapping_model_fn
use_tpu=use_tpu)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/utils/t2t_model.py", line 1429, in estimator_model_fn
logits, losses_dict = model(features) # pylint: disable=not-callable
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/layers/base.py", line 374, in call
outputs = super(Layer, self).call(inputs, *args, kwargs)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/keras/engine/base_layer.py", line 757, in call*
outputs = self.call(inputs, args, kwargs)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/utils/t2t_model.py", line 295, in call
sharded_logits, losses = self.model_fn_sharded(sharded_features)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/utils/t2t_model.py", line 356, in model_fn_sharded
sharded_logits, sharded_losses = dp(self.model_fn, datashard_to_features)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/utils/expert_utils.py", line 231, in call
outputs.append(fns[i](my_args[i], my_kwargs[i]))
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/utils/t2t_model.py", line 392, in model_fn
body_out = self.body(transformed_features)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/models/transformer.py", line 210, in body
losses=losses)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/models/transformer.py", line 161, in decode
losses=losses)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/models/transformer.py", line 1316, in transformer_decoder
vars_3d=hparams.get("attention_variables_3d"))
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/layers/common_attention.py", line 3408, in multihead_attention
vars_3d_num_heads=vars_3d_num_heads)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/layers/common_attention.py", line 3275, in compute_qkv
vars_3d_num_heads=vars_3d_num_heads)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/layers/common_attention.py", line 3235, in compute_attention_component
antecedent, total_depth, use_bias=False, name=name)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/layers/common_layers.py", line 2935, in dense
return tf.layers.dense(x, units, kwargs)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/layers/core.py", line 184, in dense
return layer.apply(inputs)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/keras/engine/base_layer.py", line 817, in apply
return self.call(inputs, args, kwargs)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/layers/base.py", line 374, in call
outputs = super(Layer, self).call(inputs, *args, kwargs)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/keras/engine/base_layer.py", line 757, in call*
outputs = self.call(inputs, args, kwargs)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/keras/layers/core.py", line 963, in call
outputs = standard_ops.tensordot(inputs, self.kernel, [[rank - 1], [0]])
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/ops/math_ops.py", line 2985, in tensordot
ab_matmul = matmul(a_reshape, b_reshape)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/ops/math_ops.py", line 2057, in matmul
a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/ops/gen_math_ops.py", line 4560, in mat_mul
name=name)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/framework/op_def_library.py", line 787, in _apply_op_helper
op_def=op_def)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/util/deprecation.py", line 488, in new_func
return func(*args, kwargs)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 3274, in create_op
op_def=op_def)
File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 1770, in init**
self._traceback = tf_stack.extract_stack()
'm getting this error when trying to use t2t_trainer with magenta-gpu installed:
(magenta) usuario@Strix:~/Escritorio/train$ t2t_trainer --data_dir="${DATA_DIR}" --hparams=${HPARAMS} --hparams_set=${HPARAMS_SET} --model=${MODEL} --output_dir=${TRAIN_DIR} --problem=${PROBLEM} --train_steps=1000000
/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/linear_model/base.py:35: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 from ..utils.seq_dataset import ArrayDataset, CSRDataset /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/linear_model/least_angle.py:23: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 from ..utils import arrayfuncs, as_float_array, check_X_y, deprecated /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/utils/random.py:10: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 from ._random import sample_without_replacement /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/linear_model/coordinate_descent.py:30: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 from . import cd_fast /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/linear_model/init.py:22: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/linear_model/init.py:22: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/linear_model/sag.py:12: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 from .sag_fast import sag /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/svm/base.py:8: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 from . import libsvm, liblinear /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/svm/base.py:8: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 from . import libsvm, liblinear /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/svm/base.py:9: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 from . import libsvm_sparse /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/neighbors/init.py:6: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 from .ball_tree import BallTree /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/neighbors/init.py:6: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 from .ball_tree import BallTree /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/neighbors/init.py:6: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 from .ball_tree import BallTree /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/neighbors/init.py:7: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 from .kd_tree import KDTree /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/decomposition/online_lda.py:28: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 from ._online_lda import (mean_change, _dirichlet_expectation_1d, /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/utils/graph.py:16: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 from .graph_shortest_path import graph_shortest_path # noqa /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/isotonic.py:11: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 from ._isotonic import _inplace_contiguous_isotonic_regression, _make_unique /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/manifold/t_sne.py:26: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 from . import _utils /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/manifold/t_sne.py:27: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 from . import _barnes_hut_tsne /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/manifold/t_sne.py:27: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 from . import _barnes_hut_tsne /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/tree/tree.py:40: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 from ._criterion import Criterion /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/tree/tree.py:40: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 from ._criterion import Criterion /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/tree/tree.py:40: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 from ._criterion import Criterion /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/tree/tree.py:40: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 from ._criterion import Criterion /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/cluster/kmeans.py:37: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 from . import _k_means /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/cluster/kmeans.py:38: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 from ._k_means_elkan import k_means_elkan /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/cluster/hierarchical.py:23: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 from . import _hierarchical /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/cluster/hierarchical.py:23: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 from . import hierarchical /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/cluster/dbscan.py:20: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 from ._dbscan_inner import dbscan_inner /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/sklearn/feature_extraction/hashing.py:14: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 from ._hashing import transform as _hashing_transform INFO:tensorflow:Loading hparams from existing json /home/usuario/Escritorio/ds/hparams.json INFO:tensorflow:Overwrite key label_smoothing: 0.1 -> 0.0 INFO:tensorflow:Overwrite key max_length: 256 -> 0 INFO:tensorflow:Overwrite key max_target_seq_length: 0 -> 2048 INFO:tensorflow:Overriding hparams in transformer_base with label_smoothing=0.0,max_length=0,max_target_seq_length=2048 WARNING:tensorflow:From /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/utils/trainer_lib.py:278: init (from tensorflow.contrib.learn.python.learn.estimators.run_config) is deprecated and will be removed in a future version. Instructions for updating: When switching to tf.estimator.Estimator, use tf.estimator.RunConfig instead. INFO:tensorflow:Configuring DataParallelism to replicate the model. INFO:tensorflow:schedule=continuous_train_and_eval INFO:tensorflow:worker_gpu=1 INFO:tensorflow:sync=False WARNING:tensorflow:Schedule=continuous_train_and_eval. Assuming that training is running on a single machine. INFO:tensorflow:datashard_devices: ['gpu:0'] INFO:tensorflow:caching_devices: None INFO:tensorflow:ps_devices: ['gpu:0'] INFO:tensorflow:Using config: {'_save_checkpoints_secs': None, '_num_ps_replicas': 0, '_keep_checkpoint_max': 20, '_task_type': None, '_train_distribute': None, '_is_chief': True, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7efc3abc9510>, '_tf_config': gpu_options { per_process_gpu_memory_fraction: 1.0 } , '_protocol': None, '_save_checkpoints_steps': 1000, '_keep_checkpoint_every_n_hours': 10000, '_session_config': gpu_options { per_process_gpu_memory_fraction: 0.95 } allow_soft_placement: true graph_options { optimizer_options { global_jit_level: OFF } } , '_model_dir': '/home/usuario/Escritorio/ds', 'use_tpu': False, '_tf_random_seed': None, '_master': '', '_device_fn': None, '_num_worker_replicas': 0, '_task_id': 0, '_log_step_count_steps': 100, '_evaluation_master': '', '_eval_distribute': None, 'data_parallelism': <tensor2tensor.utils.expert_utils.Parallelism object at 0x7efc3b4c3dd0>, '_environment': 'local', '_save_summary_steps': 100, 't2t_device_info': {'num_async_replicas': 1}} WARNING:tensorflow:Estimator's model_fn (<function wrapping_model_fn at 0x7efc3abc31b8>) includes params argument, but params are not passed to Estimator. WARNING:tensorflow:ValidationMonitor only works with --schedule=train_and_evaluate INFO:tensorflow:Not using Distribute Coordinator. INFO:tensorflow:Running training and evaluation locally (non-distributed). INFO:tensorflow:Start train and evaluate loop. The evaluate will happen after every checkpoint. Checkpoint frequency is determined based on RunConfig arguments: save_checkpoints_steps 1000 or save_checkpoints_secs None. INFO:tensorflow:Reading data files from /home/usuario/Escritorio/ds/score2perf_maestro_language_uncropped_aug-train* INFO:tensorflow:partition: 0 num_data_files: 1 WARNING:tensorflow:From /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/ops/sparse_ops.py:1165: sparse_to_dense (from tensorflow.python.ops.sparse_ops) is deprecated and will be removed in a future version. Instructions for updating: Create a
tf.sparse.SparseTensor
and usetf.sparse.to_dense
instead. WARNING:tensorflow:Shapes are not fully defined. Assuming batch_size means tokens. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Setting T2TModel mode to 'train' INFO:tensorflow:Using variable initializer: uniform_unit_scaling INFO:tensorflow:Transforming feature 'targets' with symbol_modality_310_512.targets_bottom WARNING:tensorflow:From /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/framework/function.py:987: calling create_op (from tensorflow.python.framework.ops) with compute_shapes is deprecated and will be removed in a future version. Instructions for updating: Shapes are always computed; don't use the compute_shapes as it has no effect. INFO:tensorflow:Building model body INFO:tensorflow:Transforming body output with symbol_modality_310_512.top INFO:tensorflow:Base learning rate: 2.000000 INFO:tensorflow:Trainable Variables Total size: 19061760 INFO:tensorflow:Non-trainable variables Total size: 5 INFO:tensorflow:Using optimizer Adam INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Create CheckpointSaverHook. INFO:tensorflow:Graph was finalized. 2019-02-16 17:30:27.735273: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA 2019-02-16 17:30:27.821970: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:964] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero 2019-02-16 17:30:27.822387: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1432] Found device 0 with properties: name: GeForce GTX 1070 major: 6 minor: 1 memoryClockRate(GHz): 1.645 pciBusID: 0000:01:00.0 totalMemory: 7.93GiB freeMemory: 7.60GiB 2019-02-16 17:30:27.822419: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1511] Adding visible gpu devices: 0 2019-02-16 17:30:28.036805: I tensorflow/core/common_runtime/gpu/gpu_device.cc:982] Device interconnect StreamExecutor with strength 1 edge matrix: 2019-02-16 17:30:28.036885: I tensorflow/core/common_runtime/gpu/gpu_device.cc:988] 0 2019-02-16 17:30:28.036891: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1001] 0: N 2019-02-16 17:30:28.037097: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1115] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 7711 MB memory) -> physical GPU (device: 0, name: GeForce GTX 1070, pci bus id: 0000:01:00.0, compute capability: 6.1) INFO:tensorflow:Restoring parameters from /home/usuario/Escritorio/ds/model.ckpt-0 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Saving checkpoints for 0 into /home/usuario/Escritorio/ds/model.ckpt. 2019-02-16 17:30:37.134709: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.136520: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.138138: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.140584: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.143127: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.145318: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.148035: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.149650: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.151261: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.153675: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.156145: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.158038: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.160499: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.162547: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.164320: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.166616: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.169107: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.170976: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.173398: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.175018: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.176643: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.179173: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.181886: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.183832: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.186242: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.187922: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.189518: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.191852: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.194489: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.196683: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.199091: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.200664: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.202178: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.204392: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.206710: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.208562: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.210872: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.213397: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.214951: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.217271: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.218810: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.220619: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.222160: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.224491: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.226124: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.228021: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.229809: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.231640: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.233268: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.234916: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.236479: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.238621: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.240217: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.241846: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.243441: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.245441: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.247033: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.248677: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.250221: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.251950: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.253640: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.255367: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.256938: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.259079: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.260704: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.262334: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.264171: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.266418: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.268087: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.269736: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.271325: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.273002: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.274691: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.276325: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.277877: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.280079: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.281632: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.283299: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.284877: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.287094: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.288717: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.290681: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.292376: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.294078: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.295679: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.297268: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.298802: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.301206: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.302774: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.304487: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.306026: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.308171: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.309720: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.311383: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.312921: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.314617: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.316230: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.317902: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.319492: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.321615: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.323160: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.324815: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.326351: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.328421: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.329969: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.331948: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.333532: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.335254: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.336814: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.338401: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:37.339992: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:40.245546: E tensorflow/stream_executor/cuda/cuda_blas.cc:464] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2019-02-16 17:30:40.245579: W tensorflow/stream_executor/stream.cc:2127] attempting to perform BLAS operation using StreamExecutor without BLAS support Traceback (most recent call last): File "/home/usuario/.conda/envs/magenta/bin/t2t_trainer", line 10, in sys.exit(console_entry_point()) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/magenta/tensor2tensor/t2t_trainer.py", line 34, in console_entry_point tf.app.run(main) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/platform/app.py", line 125, in run _sys.exit(main(argv)) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/magenta/tensor2tensor/t2t_trainer.py", line 29, in main t2t_trainer.main(argv) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/bin/t2t_trainer.py", line 393, in main execute_schedule(exp) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/bin/t2t_trainer.py", line 349, in execute_schedule getattr(exp, FLAGS.schedule)() File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/utils/trainer_lib.py", line 438, in continuous_train_and_eval self._eval_spec) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/estimator/training.py", line 471, in train_and_evaluate return executor.run() File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/estimator/training.py", line 610, in run return self.run_local() File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/estimator/training.py", line 711, in run_local saving_listeners=saving_listeners) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/estimator/estimator.py", line 354, in train loss = self._train_model(input_fn, hooks, saving_listeners) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/estimator/estimator.py", line 1207, in _train_model return self._train_model_default(input_fn, hooks, saving_listeners) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/estimator/estimator.py", line 1241, in _train_model_default saving_listeners) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/estimator/estimator.py", line 1471, in _train_with_estimatorspec , loss = mon_sess.run([estimator_spec.train_op, estimator_spec.loss]) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/training/monitored_session.py", line 671, in run run_metadata=run_metadata) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/training/monitored_session.py", line 1156, in run run_metadata=run_metadata) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/training/monitored_session.py", line 1255, in run raise six.reraise(original_exc_info) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/training/monitored_session.py", line 1240, in run return self._sess.run(args, *kwargs) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/training/monitored_session.py", line 1312, in run run_metadata=run_metadata) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/training/monitored_session.py", line 1076, in run return self._sess.run(args, **kwargs) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 929, in run run_metadata_ptr) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1152, in _run feed_dict_tensor, options, run_metadata) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1328, in _do_run run_metadata) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1348, in _do_call raise type(e)(node_def, op, message) tensorflow.python.framework.errors_impl.InternalError: Blas GEMM launch failed : a.shape=(3825, 512), b.shape=(512, 512), m=3825, n=512, k=512 [[node transformer/parallel_0_4/transformer/transformer/body/decoder/layer_0/self_attention/multihead_attention/q/Tensordot/MatMul (defined at /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/layers/common_layers.py:2935) = MatMul[T=DT_FLOAT, _class=["loc:@train...d/MatMul_1"], transpose_a=false, transpose_b=false, _device="/job:localhost/replica:0/task:0/device:GPU:0"](transformer/parallel_0_4/transformer/transformer/body/decoder/layer_0/self_attention/multihead_attention/v/Tensordot/Reshape, Read_18/ReadVariableOp)]] [[{{node transformer/parallel_0_4/transformer/transformer/body/decoder/layer_2/self_attention/multihead_attention/v/Tensordot/Shape/_2433}} = _Recvclient_terminated=false, recv_device="/job:localhost/replica:0/task:0/device:CPU:0", send_device="/job:localhost/replica:0/task:0/device:GPU:0", send_device_incarnation=1, tensor_name="edge1404...rdot/Shape", tensor_type=DT_INT32, _device="/job:localhost/replica:0/task:0/device:CPU:0"]]Caused by op u'transformer/parallel_0_4/transformer/transformer/body/decoder/layer_0/self_attention/multihead_attention/q/Tensordot/MatMul', defined at: File "/home/usuario/.conda/envs/magenta/bin/t2t_trainer", line 10, in sys.exit(console_entry_point()) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/magenta/tensor2tensor/t2t_trainer.py", line 34, in console_entry_point tf.app.run(main) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/platform/app.py", line 125, in run _sys.exit(main(argv)) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/magenta/tensor2tensor/t2t_trainer.py", line 29, in main t2t_trainer.main(argv) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/bin/t2t_trainer.py", line 393, in main execute_schedule(exp) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/bin/t2t_trainer.py", line 349, in execute_schedule getattr(exp, FLAGS.schedule)() File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/utils/trainer_lib.py", line 438, in continuous_train_and_eval self._eval_spec) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/estimator/training.py", line 471, in train_and_evaluate return executor.run() File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/estimator/training.py", line 610, in run return self.run_local() File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/estimator/training.py", line 711, in run_local saving_listeners=saving_listeners) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/estimator/estimator.py", line 354, in train loss = self._train_model(input_fn, hooks, saving_listeners) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/estimator/estimator.py", line 1207, in _train_model return self._train_model_default(input_fn, hooks, saving_listeners) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/estimator/estimator.py", line 1237, in _train_model_default features, labels, model_fn_lib.ModeKeys.TRAIN, self.config) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/estimator/estimator.py", line 1195, in _call_model_fn model_fn_results = self._model_fn(features=features, kwargs) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/utils/t2t_model.py", line 1368, in wrapping_model_fn use_tpu=use_tpu) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/utils/t2t_model.py", line 1429, in estimator_model_fn logits, losses_dict = model(features) # pylint: disable=not-callable File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/layers/base.py", line 374, in call outputs = super(Layer, self).call(inputs, *args, kwargs) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/keras/engine/base_layer.py", line 757, in call* outputs = self.call(inputs, args, kwargs) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/utils/t2t_model.py", line 295, in call sharded_logits, losses = self.model_fn_sharded(sharded_features) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/utils/t2t_model.py", line 356, in model_fn_sharded sharded_logits, sharded_losses = dp(self.model_fn, datashard_to_features) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/utils/expert_utils.py", line 231, in call outputs.append(fns[i](my_args[i], my_kwargs[i])) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/utils/t2t_model.py", line 392, in model_fn body_out = self.body(transformed_features) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/models/transformer.py", line 210, in body losses=losses) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/models/transformer.py", line 161, in decode losses=losses) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/models/transformer.py", line 1316, in transformer_decoder vars_3d=hparams.get("attention_variables_3d")) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/layers/common_attention.py", line 3408, in multihead_attention vars_3d_num_heads=vars_3d_num_heads) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/layers/common_attention.py", line 3275, in compute_qkv vars_3d_num_heads=vars_3d_num_heads) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/layers/common_attention.py", line 3235, in compute_attention_component antecedent, total_depth, use_bias=False, name=name) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/layers/common_layers.py", line 2935, in dense return tf.layers.dense(x, units, kwargs) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/layers/core.py", line 184, in dense return layer.apply(inputs) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/keras/engine/base_layer.py", line 817, in apply return self.call(inputs, args, kwargs) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/layers/base.py", line 374, in call outputs = super(Layer, self).call(inputs, *args, kwargs) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/keras/engine/base_layer.py", line 757, in call* outputs = self.call(inputs, args, kwargs) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/keras/layers/core.py", line 963, in call outputs = standard_ops.tensordot(inputs, self.kernel, [[rank - 1], [0]]) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/ops/math_ops.py", line 2985, in tensordot ab_matmul = matmul(a_reshape, b_reshape) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/ops/math_ops.py", line 2057, in matmul a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/ops/gen_math_ops.py", line 4560, in mat_mul name=name) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/framework/op_def_library.py", line 787, in _apply_op_helper op_def=op_def) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/util/deprecation.py", line 488, in new_func return func(*args, kwargs) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 3274, in create_op op_def=op_def) File "/home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 1770, in init** self._traceback = tf_stack.extract_stack()
InternalError (see above for traceback): Blas GEMM launch failed : a.shape=(3825, 512), b.shape=(512, 512), m=3825, n=512, k=512 [[node transformer/parallel_0_4/transformer/transformer/body/decoder/layer_0/self_attention/multihead_attention/q/Tensordot/MatMul (defined at /home/usuario/.conda/envs/magenta/lib/python2.7/site-packages/tensor2tensor/layers/common_layers.py:2935) = MatMul[T=DT_FLOAT, _class=["loc:@train...d/MatMul_1"], transpose_a=false, transpose_b=false, _device="/job:localhost/replica:0/task:0/device:GPU:0"](transformer/parallel_0_4/transformer/transformer/body/decoder/layer_0/self_attention/multihead_attention/v/Tensordot/Reshape, Read_18/ReadVariableOp)]] [[{{node transformer/parallel_0_4/transformer/transformer/body/decoder/layer_2/self_attention/multihead_attention/v/Tensordot/Shape/_2433}} = _Recvclient_terminated=false, recv_device="/job:localhost/replica:0/task:0/device:CPU:0", send_device="/job:localhost/replica:0/task:0/device:GPU:0", send_device_incarnation=1, tensor_name="edge1404...rdot/Shape", tensor_type=DT_INT32, _device="/job:localhost/replica:0/task:0/device:CPU:0"]]