Open zhangjun opened 2 years ago
virtual nvinfer1::DimsExprs getOutputDimensions(
int output_index,
const nvinfer1::DimsExprs* inputs,
int nb_inputs,
nvinfer1::IExprBuilder& expr_builder) TRT_NOEXCEPT = 0; // NOLINT
virtual bool supportsFormatCombination(
int pos,
const nvinfer1::PluginTensorDesc* in_out,
int nb_inputs,
int nb_outputs) TRT_NOEXCEPT = 0;
virtual void configurePlugin(const nvinfer1::DynamicPluginTensorDesc* in,
int nb_inputs,
const nvinfer1::DynamicPluginTensorDesc* out,
int nb_outputs) TRT_NOEXCEPT;
import math
import torch
from torch import nn
from d2l import torch as d2l
def masked_softmax(X, valid_lens):
"""通过在最后一个轴上掩蔽元素来执行softmax操作"""
# X:3D张量,valid_lens:1D或2D张量
if valid_lens is None:
return nn.functional.softmax(X, dim=-1) # dim=-1代表以X的最后一个维度进行softmax,对于多维的X来说dim=-1相当于dim=2,即是对行进行softmax
else:
shape = X.shape
if valid_lens.dim() == 1:
valid_lens = torch.repeat_interleave(valid_lens, shape[1]) # 把valid_lens转换成一个mask向量
else:
valid_lens = valid_lens.reshape(-1) # .reshape(-1)把张量拉成一维数组
X = d2l.sequence_mask(X.reshape(-1, shape[-1]), valid_lens,
value=-1e6) # # 最后一轴上被掩蔽的元素使用一个非常大的负值替换,从而其softmax输出为0
return nn.functional.softmax(X.reshape(shape), dim=-1)
TransformerInputConvertPlugin