chilynn / sequence-labeling

307 stars 167 forks source link

Why my loss is every small and under 0? #30

Open qlwang25 opened 6 years ago

qlwang25 commented 6 years ago

code is error? or other is error? if you know how to do it, please tell you.. Thanks

    @staticmethod
    def argmax(vec):
        _, idx = torch.max(vec, 1)
        return idx.item()

    def log_sum_exp(self, vec):
        max_score = vec[0, self.argmax(vec)]
        max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])
        return max_score + torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))

    def _forward_alg(self, feats, mask):
        batch_size, step_num, tag_size = feats.size()
        lengths = mask.sum(1).tolist()
        alpha = torch.FloatTensor([0]).to(self.device)
        for b in range(batch_size):
            init_alphas = torch.full((1, self.tag_size), -10000.).to(self.device)
            init_alphas[0][self.tag_to_ix[START_TAG]] = 0.

            forward_var = init_alphas

            for k in range(lengths[b]):
                alphas_t = []  # The forward tensors at this timestep
                for next_tag in range(self.tag_size):
                    emit_score = feats[b][k][next_tag].view(1, -1).expand(1, self.tag_size)
                    trans_score = self.transitions[next_tag].view(1, -1)
                    next_tag_var = forward_var + trans_score + emit_score
                    alphas_t.append(self.log_sum_exp(next_tag_var).view(1))
                forward_var = torch.cat(alphas_t).view(1, -1)
            terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]
            alpha += self.log_sum_exp(terminal_var)
        return alpha

    def _score_sentence(self, features, arguments, mask):
        # Gives the score of a provided tag sequence
        batch_size, step_num, tag_size = features.size()
        lengths = mask.sum(1).tolist()
        score = torch.zeros(1).to(self.device)
        for b in range(batch_size):
            tags = torch.Tensor(arguments[b]).long().to(self.device)
            tags = torch.cat([torch.Tensor([self.tag_to_ix[START_TAG]]).long().to(self.device), tags])
            feats = features[b][:lengths[b], :]
            for i, feat in enumerate(feats):
                score = score + self.transitions[tags[i + 1], tags[i]] + feat[tags[i + 1]]
            score = score + self.transitions[self.tag_to_ix[STOP_TAG], tags[-1]]
        return score

    def neg_log_likelihood(self, inps):
        sent, mask, tags = inps
        feats = self._get_lstm_features(sent, mask)
        forward_score = self._forward_alg(feats, mask)
        gold_score = self._score_sentence(feats, tags, mask)
        return forward_score - gold_score
train---epoch: 8, learn rate: 0.001000, global step: 1525
loss: -13789.84375000
macro arg---P: 0.592268, R: 0.567828, F: 0.579791
---------------------------------------
train---epoch: 8, learn rate: 0.001000, global step: 1526
loss: -370.70312500
macro arg---P: 0.573383, R: 0.599768, F: 0.586279
---------------------------------------
train---epoch: 8, learn rate: 0.001000, global step: 1527
loss: -9303.57812500
macro arg---P: 0.578675, R: 0.598240, F: 0.588295
---------------------------------------
train---epoch: 8, learn rate: 0.001000, global step: 1528
loss: 718.17187500
macro arg---P: 0.585216, R: 0.577113, F: 0.581136
---------------------------------------
train---epoch: 8, learn rate: 0.001000, global step: 1529
loss: -2091.90625000
macro arg---P: 0.601942, R: 0.591456, F: 0.596653
---------------------------------------
train---epoch: 8, learn rate: 0.001000, global step: 1530
loss: -12369.98437500
macro arg---P: 0.602512, R: 0.591223, F: 0.596814
---------------------------------------