PaddlePaddle / models

Officially maintained, supported by PaddlePaddle, including CV, NLP, Speech, Rec, TS, big models and so on.
Apache License 2.0
6.9k stars 2.91k forks source link

Configure OCR Attention model. #837

Closed wanghaoshuang closed 6 years ago

shiwenguo commented 6 years ago
import paddle.fluid as fluid

decoder_size = 128
word_vector_dim = 128
max_length = 100
sos = 0
eos = 1

def conv_bn_pool(input,
                 group,
                 out_ch,
                 act="relu",
                 param=None,
                 bias=None,
                 param_0=None,
                 is_test=False,
                 pool = True):
    tmp = input
    for i in xrange(group):
        tmp = fluid.layers.conv2d(
            input=tmp,
            num_filters=out_ch[i],
            filter_size=3,
            padding=1,
            param_attr=param if param_0 is None else param_0,
            act=None,  # LinearActivation
            use_cudnn=True)
        tmp = fluid.layers.batch_norm(
            input=tmp,
            act=act,
            param_attr=param,
            bias_attr=bias,
            is_test=is_test)
    if pool == True:
        tmp = fluid.layers.pool2d(
            input=tmp, pool_size=2, pool_type='max', pool_stride=2, use_cudnn=True, ceil_mode=True)

    return tmp

def ocr_convs(input,
              num,
              with_bn,
              regularizer=None,
              gradient_clip=None,
              is_test=False):
    assert (num % 4 == 0)

    b = fluid.ParamAttr(
        regularizer=regularizer,
        gradient_clip=gradient_clip,
        initializer=fluid.initializer.Normal(0.0, 0.0))
    w0 = fluid.ParamAttr(
        regularizer=regularizer,
        gradient_clip=gradient_clip,
        initializer=fluid.initializer.Normal(0.0, 0.0005))
    w1 = fluid.ParamAttr(
        regularizer=regularizer,
        gradient_clip=gradient_clip,
        initializer=fluid.initializer.Normal(0.0, 0.01))
    tmp = input
    tmp = conv_bn_pool(
        tmp, 2, [16, 16], param=w1, bias=b, param_0=w0, is_test=is_test)
    tmp = conv_bn_pool(tmp, 2, [32, 32], param=w1, bias=b, is_test=is_test)
    tmp = conv_bn_pool(tmp, 2, [64, 64], param=w1, bias=b, is_test=is_test)
    tmp = conv_bn_pool(tmp, 2, [128, 128], param=w1, bias=b, is_test=is_test, pool=False)
    return tmp

def encoder_net(images,
                rnn_hidden_size=200,
                regularizer=None,
                gradient_clip=None,
                is_test=False):

    conv_features = ocr_convs(
        images,
        8,
        True,
        regularizer=regularizer,
        gradient_clip=gradient_clip,
        is_test=is_test)
    sliced_feature = fluid.layers.im2sequence(
        input=conv_features,
        stride=[1, 1],
        filter_size=[conv_features.shape[2], 1])

    para_attr = fluid.ParamAttr(
        regularizer=regularizer,
        gradient_clip=gradient_clip,
        initializer=fluid.initializer.Normal(0.0, 0.02))
    bias_attr = fluid.ParamAttr(
        regularizer=regularizer,
        gradient_clip=gradient_clip,
        initializer=fluid.initializer.Normal(0.0, 0.02),
        learning_rate=2.0)
    bias_attr_nobias = fluid.ParamAttr(
        regularizer=regularizer,
        gradient_clip=gradient_clip,
        initializer=fluid.initializer.Normal(0.0, 0.02))

    fc_1 = fluid.layers.fc(input=sliced_feature,
                           size=rnn_hidden_size * 3,
                           param_attr=para_attr,
                           bias_attr=bias_attr_nobias)
    fc_2 = fluid.layers.fc(input=sliced_feature,
                           size=rnn_hidden_size * 3,
                           param_attr=para_attr,
                           bias_attr=bias_attr_nobias)

    gru_forward = fluid.layers.dynamic_gru(
        input=fc_1,
        size=rnn_hidden_size,
        param_attr=para_attr,
        bias_attr=bias_attr,
        candidate_activation='relu')
    gru_backward = fluid.layers.dynamic_gru(
        input=fc_2,
        size=rnn_hidden_size,
        is_reverse=True,
        param_attr=para_attr,
        bias_attr=bias_attr,
        candidate_activation='relu')
#############################
    encoded_vector = fluid.layers.concat(input=[gru_forward, gru_backward],
                                         axis=1)
    encoded_proj = fluid.layers.fc(input=encoded_vector,
                                   size=decoder_size,
                                   bias_attr=False)

    return gru_backward, encoded_vector, encoded_proj

def attention_train_net(images, label_in, label_out, args, num_classes):
    regularizer = fluid.regularizer.L2Decay(args.l2)
    gradient_clip = None
    gru_backward, encoded_vector, encoded_proj = encoder_net(
        images,
        regularizer=regularizer,
        gradient_clip=gradient_clip)

    print "ooooooooooooooooo"
    print gru_backward.shape
    print encoded_vector.shape
    print encoded_proj.shape

    backward_first = fluid.layers.sequence_pool(input=gru_backward,
                                                pool_type='first')
    decoder_boot  = fluid.layers.fc(input=backward_first,
                                    size=decoder_size,
                                    bias_attr=False,
                                    act="relu")
##
    def gru_decoder_with_attention(target_embedding, encoder_vec,
                    encoder_proj, decoder_boot, decoder_size):
        def simple_attention(encoder_vec, encoder_proj, decoder_state):
            decoder_state_proj = fluid.layers.fc(input=decoder_state,
                                                 size=decoder_size,
                                                 bias_attr=False)
            decoder_state_expand = fluid.layers.sequence_expand(
                x=decoder_state_proj, y=encoder_proj)
            concated = fluid.layers.concat(
                input=[encoder_proj, decoder_state_expand], axis=1)
            attention_weights = fluid.layers.fc(input=concated,
                                                size=1,
                                                act='tanh',
                                                bias_attr=False)
            attention_weights = fluid.layers.sequence_softmax(
                input=attention_weights)
            weigths_reshape = fluid.layers.reshape(
                x=attention_weights, shape=[-1])
            scaled = fluid.layers.elementwise_mul(
                x=encoder_vec, y=weigths_reshape, axis=0)
            context = fluid.layers.sequence_pool(input=scaled, pool_type='sum')
            return context

        rnn = fluid.layers.DynamicRNN()

        cell_init = fluid.layers.fill_constant_batch_size_like(
            input=decoder_boot,
            value=0.0,
            shape=[-1, decoder_size],
            dtype='float32')
        cell_init.stop_gradient = False

        with rnn.block():
            current_word = rnn.step_input(target_embedding)
            encoder_vec = rnn.static_input(encoder_vec)
            encoder_proj = rnn.static_input(encoder_proj)
            hidden_mem = rnn.memory(init=decoder_boot, need_reorder=True)
            cell_mem = rnn.memory(init=cell_init)
            context = simple_attention(encoder_vec, encoder_proj, hidden_mem)
            print "aaaaaaaaaaaaaaaaaaaaaaa"
            print context.shape
            print current_word.shape
            decoder_inputs = fluid.layers.concat(
                input=[context, current_word], axis=1)
            h, c = lstm_step(decoder_inputs, hidden_mem, cell_mem, decoder_size)
            rnn.update_memory(hidden_mem, h)
            rnn.update_memory(cell_mem, c)
            out = fluid.layers.fc(input=h,
                                  size=num_classes + 2,
                                  bias_attr=True,
                                  act='softmax')
            rnn.output(out)
        return rnn()
##
#training
    trg_embedding = fluid.layers.embedding(
        input=label_in,
        size=[num_classes + 2, word_vector_dim],
        dtype='float32')
    print "eeeeeeeeeeeeeeeeeeeee"
    print trg_embedding.shape
    print encoded_vector.shape
    print encoded_proj.shape
    print decoder_boot.shape
    prediction = gru_decoder_with_attention(trg_embedding, encoded_vector,
            encoded_proj, decoder_boot,
            decoder_size)
    cost = fluid.layers.cross_entropy(input=prediction, label=label_out)
    sum_cost = fluid.layers.reduce_sum(cost)

    optimizer = fluid.optimizer.Adadelta(
                    learning_rate=args.learning_rate,
                    epsilon=1.0e-6,
                    rho=0.9)
    optimizer.minimize(sum_cost)

    maxid = fluid.layers.maxid_layer(input=decoder)
    casted_label = fluid.layers.cast(x=label_out, dtype='int64')##leixing
    error_evaluator = fluid.evaluator.EditDistance(
                        input=maxid,
                        label=casted_label,
                        ignored_tokens = [0, 1])

 #   trg_embedding = fluid.layers.GeneratedInput(
 #       size=num_classes + 2,
 #       embedding_size=word_vector_dim)
 #   group_inputs_infer.append(trg_embedding)##xinkaiyige

 #   beam_gen = fluid.layers.beam_search(
 #       step=gru_decoder_with_attention,
 #       input=group_inputs_infer,
 #       bos_id=sos,
 #       eos_id=eos,
 #       beam_size=args.beam_size,
 #       max_length=max_length)

 #   beam_gen = fluid.layers.cast(x=beam_gen, dtype='float32')

 #   return sum_cost, error_evaluator, beam_gen
    return sum_cost, error_evaluator
shiwenguo commented 6 years ago
ooooooooooooooooo
(-64L, 200L)
(-64L, 400L)
(-64L, 128L)
eeeeeeeeeeeeeeeeeeeee
(-1L, 128L)
(-64L, 400L)
(-64L, 128L)
(-64L, 128L)
aaaaaaaaaaaaaaaaaaaaaaa
(-64L, 400L)
(-1L, 128L)
shanyi15 commented 6 years ago

您好,此issue在近一个月内暂无更新,我们将于今天内关闭。若在关闭后您仍需跟进提问,可重新开启此问题,我们将在24小时内回复您。因关闭带来的不便我们深表歉意,请您谅解~感谢您对PaddlePaddle的支持! Hello, this issue has not been updated in the past month. We will close it today for the sake of other user‘s experience. If you still need to follow up on this question after closing, please feel free to reopen it. In that case, we will get back to you within 24 hours. We apologize for the inconvenience caused by the closure and thank you so much for your support of PaddlePaddle Group!