tensorflow / adanet

Fast and flexible AutoML with learning guarantees.
https://adanet.readthedocs.io
Apache License 2.0
3.47k stars 527 forks source link

Problem with placeholder #117

Open augustodelscenario opened 5 years ago

augustodelscenario commented 5 years ago

Hello, i`m trying to create more complex NN. Help please, what can be wrong?

Here is code: `class _SimpleCNNBuilder(adanet.subnetwork.Builder):

def init(self, optimizer, layer_size, num_layers, learn_mixture_weights, seed):

self._optimizer = optimizer
self._layer_size = layer_size
self._num_layers = num_layers
self._learn_mixture_weights = learn_mixture_weights
self._seed = seed

def build_subnetwork(self, features, logits_dimension, training, iteration_step, summary, previous_ensemble=None):

input_layer = tf.to_float(features[FEATURES_KEY])
kernel_initializer = tf.glorot_uniform_initializer(seed=self._seed)
last_layer = input_layer
array_layer = []
for _ in range(self._num_layers):
    in_layer = tf.keras.Input((1, 98))
    tmp_layer = tf.keras.layers.Conv1D(
        filters=16,
        kernel_size=3,
        padding="same",
        activation="relu",
        kernel_initializer=kernel_initializer)(in_layer)
    tmp_layer = tf.keras.layers.MaxPool1D(pool_size=1, strides=1)(tmp_layer)
    tmp_layer = tf.keras.layers.Flatten()(tmp_layer)
    tmp_layer = tf.keras.layers.Dense(
        units=64, activation="relu", kernel_initializer=kernel_initializer)(tmp_layer)
    array_layer.append(tmp_layer)

if (len(array_layer) > 1):
    last_layer = tf.keras.layers.concatenate(array_layer)
else:
    last_layer = array_layer[0]

logits = tf.layers.dense(
    last_layer,
    units=logits_dimension,
    kernel_initializer=kernel_initializer)

persisted_tensors = {_NUM_LAYERS_KEY: tf.constant(self._num_layers)}
return adanet.Subnetwork(
    last_layer=last_layer,
    logits=logits,
    complexity=self._measure_complexity(),
    persisted_tensors=persisted_tensors)

def _measure_complexity(self): return tf.sqrt(tf.to_float(self._num_layers))

def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels, iteration_step, summary, previous_ensemble): return self._optimizer.minimize(loss=loss, var_list=var_list)

def build_mixture_weights_train_op(self, loss, var_list, logits, labels, iteration_step, summary):

if not self._learn_mixture_weights:
  return tf.no_op()
return self._optimizer.minimize(loss=loss, var_list=var_list)

@property def name(self):

if self._num_layers == 0:
  return "linear"
return "{}_layer_dnn".format(self._num_layers)

class SimpleCNNGenerator(adanet.subnetwork.Generator):

def init(self, optimizer, layer_size=64, learn_mixture_weights=False, seed=None):

self._seed = seed
self._dnn_builder_fn = functools.partial(
    _SimpleCNNBuilder,
    optimizer=optimizer,
    layer_size=layer_size,
    learn_mixture_weights=learn_mixture_weights)

def generate_candidates(self, previous_ensemble, iteration_number, previous_ensemble_reports, all_reports):

num_layers = 1
seed = self._seed
if previous_ensemble:
  num_layers = tf.contrib.util.constant_value(
      previous_ensemble.weighted_subnetworks[
          -1].subnetwork.persisted_tensors[_NUM_LAYERS_KEY])
if seed is not None:
  seed += iteration_number
return [
    self._dnn_builder_fn(num_layers=num_layers, seed=seed),
    self._dnn_builder_fn(num_layers=num_layers + 1, seed=seed),
]

LEARNING_RATE = 0.001 TRAIN_STEPS = 80000 BATCH_SIZE = 98

LEARN_MIXTURE_WEIGHTS = False ADANET_LAMBDA = 0 ADANET_ITERATIONS = 4

def train_and_evaluate(experiment_name, learn_mixture_weights=LEARN_MIXTURE_WEIGHTS, adanet_lambda=ADANET_LAMBDA):

model_dir = os.path.join(LOG_DIR, experiment_name)

estimator = adanet.Estimator( head=tf.contrib.estimator.regression_head( label_dimension=2, loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE),

  subnetwork_generator=SimpleCNNGenerator(
      optimizer=tf.train.RMSPropOptimizer(learning_rate=LEARNING_RATE),
      learn_mixture_weights=learn_mixture_weights,
      seed=RANDOM_SEED),

  adanet_lambda=adanet_lambda,

  max_iteration_steps=TRAIN_STEPS // ADANET_ITERATIONS,

  evaluator=adanet.Evaluator(
      input_fn=input_fn("train", training=False, batch_size=BATCH_SIZE)),

  config=tf.estimator.RunConfig(
      save_summary_steps=80000,
      save_checkpoints_steps=80000,
      tf_random_seed=RANDOM_SEED,
      model_dir=model_dir))

train_spec = tf.estimator.TrainSpec( input_fn=input_fn("train", training=True, batch_size=BATCH_SIZE), max_steps=TRAIN_STEPS) eval_spec = tf.estimator.EvalSpec( input_fn=input_fn("test", training=False, batch_size=BATCH_SIZE), steps=None, start_delay_secs=1, throttle_secs=30, )

test = tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)

print("Loss:", test[0]["average_loss"]) print("Architecture:", ensemble_architecture(test[0]))

return estimator`

I got error: InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'adanet/iteration_0/subnetwork_t0_2_layer_dnn/input_2' with dtype float and shape [?,1,98] [[{{node adanet/iteration_0/subnetwork_t0_2_layer_dnn/input_2}} = Placeholder[dtype=DT_FLOAT, shape=[?,1,98], _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]]