z
This commit is contained in:
parent
f4d6e2f5e1
commit
ba38079ff4
0
layers/__init__.py
Normal file
0
layers/__init__.py
Normal file
0
layers/attentions/__init__.py
Normal file
0
layers/attentions/__init__.py
Normal file
397
layers/attentions/atten_encoder.py
Normal file
397
layers/attentions/atten_encoder.py
Normal file
@ -0,0 +1,397 @@
|
||||
# _*_ coding:utf-8 _*_
|
||||
import torch
|
||||
import torch.autograd as autograd
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
PAD = 0
|
||||
|
||||
|
||||
class VanillaAttention(nn.Module):
|
||||
"""
|
||||
VanillaAttention
|
||||
"""
|
||||
|
||||
def __init__(self, p):
|
||||
super(VanillaAttention, self).__init__()
|
||||
|
||||
self.dropout = nn.Dropout(p)
|
||||
self.mask = None
|
||||
|
||||
def forward(self, q, k, v, mask=None):
|
||||
dim_q = list(q.size())
|
||||
b_k, t_k, dim_k = list(k.size())
|
||||
b_v, t_v, dim_v = list(v.size())
|
||||
|
||||
assert (b_k == b_v) # batch size should be equal
|
||||
assert (t_k == t_v) # times should be equal
|
||||
|
||||
qk = torch.matmul(k, q.unsqueeze(1)).squeeze(2)
|
||||
# qk.div_(dim_k ** 0.5)
|
||||
qk.masked_fill_(mask, -1e30)
|
||||
qk = F.softmax(qk, 1)
|
||||
|
||||
return torch.bmm(qk.unsqueeze(1), v).squeeze(1) # b,n
|
||||
|
||||
|
||||
def get_attn_padding_mask(seq_q, seq_k):
|
||||
"""
|
||||
Indicate the padding-related part to mask
|
||||
|
||||
:param seq_q:
|
||||
:param seq_k:
|
||||
:return:
|
||||
"""
|
||||
|
||||
assert seq_q.dim() == 2 and seq_k.dim() == 2
|
||||
mb_size, len_q = seq_q.size()
|
||||
mb_size, len_k = seq_k.size()
|
||||
pad_attn_mask = seq_k.data.eq(PAD).unsqueeze(1) # bx1xsk
|
||||
pad_attn_mask = pad_attn_mask.expand(mb_size, len_q, len_k) # bxsqxsk
|
||||
return pad_attn_mask
|
||||
|
||||
|
||||
class BiAttention(nn.Module):
|
||||
"""
|
||||
BI-DIRECTIONAL ATTENTION FLOW in BiDAF model
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, dim):
|
||||
super(BiAttention, self).__init__()
|
||||
self.linear = nn.Linear(3 * dim, 1)
|
||||
|
||||
def forward(self, x1, x1_mask, x2, x2_mask):
|
||||
"""
|
||||
:param x1: b x n x d
|
||||
:param x2: b x m x d
|
||||
:param x1_mask: b x n
|
||||
:param x2_mask: b x m
|
||||
|
||||
####### caculation to similarity matrix #######
|
||||
这里计算的是 二维层面的相似度 是 字 和 字 之间的相似度
|
||||
S(n,m) = alpha(x1,x2) = W [x1, x2, x1*x2]
|
||||
|
||||
####### context to query attention #######
|
||||
# (b,n,m) = (b,1,m) --> (b,n,m)
|
||||
# x2_mask (n,m)
|
||||
# 我 爱 你 空 空
|
||||
# 我 1 1 1 0 0
|
||||
# 爱 1 1 1 0 0
|
||||
# 北 1 1 1 0 0
|
||||
# 京 1 1 1 0 0
|
||||
# 人 1 1 1 0 0
|
||||
# 空 1 1 1 0 0
|
||||
# 空 1 1 1 0 0
|
||||
|
||||
####### query to context attention #######
|
||||
|
||||
******
|
||||
这里q2c是现将context对齐到query,然后再重复一次c2q,将query对齐到context
|
||||
******
|
||||
|
||||
# (b,n,m) = (b,n,1) --> (b,n,m)
|
||||
# 我 爱 你 空 空
|
||||
# 我 1 1 1 1 1
|
||||
# 爱 1 1 1 1 1
|
||||
# 北 1 1 1 1 1
|
||||
# 京 1 1 1 1 1
|
||||
# 人 1 1 1 1 1
|
||||
# 空 0 0 0 0 0
|
||||
# 空 0 0 0 0 0
|
||||
"""
|
||||
# (b,n,m,d) = (b,n,d) --> (b,n,1,d) --> (b,n,m,d)
|
||||
x1_aug = x1.unsqueeze(2).expand(x1.size(0), x1.size(1), x2.size(1), x1.size(2))
|
||||
|
||||
# (b,n,m,d) = (b,m,d) --> (b,1,m,d) --> (b,n,m,d)
|
||||
x2_aug = x2.unsqueeze(1).expand(x1.size(0), x1.size(1), x2.size(1), x2.size(2))
|
||||
|
||||
# (b,n,m,3d)
|
||||
x_input = torch.cat([x1_aug, x2_aug, x1_aug * x2_aug], dim=3)
|
||||
|
||||
# (b,n,m) = (b,n,m,3d) --> (b,n,m,1) --> (b,n,m)
|
||||
similarity = self.linear(x_input).squeeze(3)
|
||||
|
||||
####### context to query attention #######
|
||||
# (b,n,m) = (b,1,m) --> (b,n,m)
|
||||
# x2_mask (n,m)
|
||||
# 我 爱 你 空 空
|
||||
# 我 1 1 1 0 0
|
||||
# 爱 1 1 1 0 0
|
||||
# 北 1 1 1 0 0
|
||||
# 京 1 1 1 0 0
|
||||
# 人 1 1 1 0 0
|
||||
# 空 1 1 1 0 0
|
||||
# 空 1 1 1 0 0
|
||||
# x2_mask = x2_mask[:, :x2.size(1)]
|
||||
# x1_mask = x1_mask[:, :x1.size(1)]
|
||||
|
||||
# a_non_pad = x1_mask.ne(1).type(torch.float).unsqueeze(-1)
|
||||
# q2c_non_pad = x2_mask.ne(1).type(torch.float).unsqueeze(-1)
|
||||
|
||||
x2_mask = x2_mask.unsqueeze(1).expand_as(similarity)
|
||||
# (b,n,m)
|
||||
similarity.data.masked_fill_(x2_mask.data, -2e20)
|
||||
# (b,n,m)
|
||||
sim_row = F.softmax(similarity, dim=2)
|
||||
# (b,n,d) = (b,n,m) * (b,m,d)
|
||||
c2q_att = sim_row.bmm(x2)
|
||||
|
||||
# attn_a = a_non_pad * attn_a
|
||||
|
||||
####### query to context attention #######
|
||||
# (b,n,m) = (b,n,1) --> (b,n,m)
|
||||
# 我 爱 你 空 空
|
||||
# 我 1 1 1 1 1
|
||||
# 爱 1 1 1 1 1
|
||||
# 北 1 1 1 1 1
|
||||
# 京 1 1 1 1 1
|
||||
# 人 1 1 1 1 1
|
||||
# 空 0 0 0 0 0
|
||||
# 空 0 0 0 0 0
|
||||
x1_mask = x1_mask.unsqueeze(2).expand_as(similarity)
|
||||
# (b,n,m) = (b,n,1) --> (b,n,m)
|
||||
# TODO: 检查此时的similarity(其实已经不重要了)
|
||||
similarity.data.masked_fill_(x1_mask.data, -2e20)
|
||||
sim_col = F.softmax(similarity, dim=1)
|
||||
# (b,m,d) = (b,m,n) * (b,n,d)
|
||||
q2c = sim_col.transpose(1, 2).bmm(x1)
|
||||
# q2c = q2c_non_pad * q2c
|
||||
|
||||
# (b,n,d) = (b,n,m) * (b,m,d)
|
||||
# 这里q2c是现将context对齐到query,然后再重复一次c2q,将query对齐到context
|
||||
|
||||
q2c_att = sim_row.bmm(q2c)
|
||||
# attn_b = a_non_pad * attn_b
|
||||
|
||||
return torch.cat([x1, c2q_att, x1 * c2q_att, x1 * q2c_att], dim=-1)
|
||||
|
||||
|
||||
class SDPAttention(nn.Module):
|
||||
"""
|
||||
Scaled Dot-Product Attention from TransFormer
|
||||
####### self attention #######
|
||||
这里给出的self attention 最简单的实现,在输入维度q=k,没有通过linear
|
||||
# 我 爱 你 北 京 人 空 空
|
||||
# 我 1 1 1 1 1 1 0 0
|
||||
# 爱 1 1 1 1 1 1 0 0
|
||||
# 北 1 1 1 1 1 1 0 0
|
||||
# 京 1 1 1 1 1 1 0 0
|
||||
# 人 1 1 1 1 1 1 0 0
|
||||
# 空 0 0 0 0 0 0 0 0
|
||||
# 空 0 0 0 0 0 0 0 0
|
||||
"""
|
||||
|
||||
def __init__(self, p=0.1):
|
||||
super(SDPAttention, self).__init__()
|
||||
self.dropout = nn.Dropout(p)
|
||||
self.mask = None
|
||||
|
||||
def set_mask(self, masked):
|
||||
# applies a mask of b x tq length
|
||||
self.mask = masked
|
||||
|
||||
def forward(self, q, k, v):
|
||||
b_q, t_q, dim_q = list(q.size())
|
||||
b_k, t_k, dim_k = list(k.size())
|
||||
b_v, t_v, dim_v = list(v.size())
|
||||
|
||||
assert (b_q == b_k and b_k == b_v) # batch size should be equal
|
||||
assert (dim_q == dim_k) # dims should be equal
|
||||
assert (t_k == t_v) # times should be equal
|
||||
|
||||
"""
|
||||
similrity matrix caculation
|
||||
sm_qk = {QK.t()/sqrt(512/8)}
|
||||
"""
|
||||
qk = torch.bmm(q, k.transpose(1, 2)) # b x t_q x t_k
|
||||
qk.div_(dim_k ** 0.5)
|
||||
qk.masked_fill_(self.mask, -1e9)
|
||||
sm_qk = self.dropout(F.softmax(qk, 2))
|
||||
|
||||
return torch.bmm(sm_qk, v), sm_qk # b x t_q x dim_v
|
||||
|
||||
|
||||
class MultiHeadAttention(nn.Module):
|
||||
"""
|
||||
Scaled Dot-Product Attention
|
||||
"""
|
||||
|
||||
def __init__(self, embed_dim=None, num_heads=None, dropout=0.1):
|
||||
|
||||
super(MultiHeadAttention, self).__init__()
|
||||
|
||||
self.head_dim = embed_dim // num_heads
|
||||
assert self.head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
|
||||
self.num_heads = num_heads
|
||||
self.input_size = self.head_dim * num_heads
|
||||
self.output_size = self.head_dim * num_heads
|
||||
self.linear_q = nn.Linear(self.input_size, self.input_size)
|
||||
self.linear_k = nn.Linear(self.input_size, self.input_size)
|
||||
self.linear_v = nn.Linear(self.input_size, self.input_size)
|
||||
self.linear_out = nn.Linear(self.input_size, self.output_size)
|
||||
self.sdp_attention = SDPAttention(p=dropout)
|
||||
self.relu = nn.ReLU()
|
||||
self.dp_out = nn.Dropout()
|
||||
|
||||
def set_mask_sdp(self, masked):
|
||||
self.sdp_attention.set_mask(masked)
|
||||
|
||||
def forward(self, q, k, v, attn_mask=None):
|
||||
|
||||
if attn_mask is not None:
|
||||
self.set_mask_sdp(attn_mask)
|
||||
|
||||
qw = self.linear_q(q)
|
||||
kw = self.linear_k(k)
|
||||
vw = self.linear_v(v)
|
||||
|
||||
# use extra dim_size and chunk to simplify the repeat process
|
||||
qw = qw.chunk(self.num_heads, 2)
|
||||
kw = kw.chunk(self.num_heads, 2)
|
||||
vw = vw.chunk(self.num_heads, 2)
|
||||
|
||||
output = []
|
||||
attention_scores = []
|
||||
for i in range(self.num_heads):
|
||||
out_h, score = self.sdp_attention(qw[i], kw[i], vw[i])
|
||||
output.append(out_h)
|
||||
attention_scores.append(score)
|
||||
|
||||
output = torch.cat(output, 2)
|
||||
|
||||
return self.linear_out(self.dp_out(self.relu(output)))
|
||||
|
||||
|
||||
class AttentionLayer(nn.Module):
|
||||
"""
|
||||
Params:
|
||||
num_units: Number of units used in the attention layer
|
||||
"""
|
||||
|
||||
def __init__(self, query_size, key_size, value_size=None, mode='bahdanau',
|
||||
normalize=False, dropout=0, batch_first=False,
|
||||
output_transform=True, output_nonlinearity='tanh', output_size=None):
|
||||
super(AttentionLayer, self).__init__()
|
||||
assert mode == 'bahdanau' or mode == 'dot_prod'
|
||||
value_size = value_size or key_size # Usually key and values are the same
|
||||
self.mode = mode
|
||||
self.query_size = query_size
|
||||
self.key_size = key_size
|
||||
self.value_size = value_size
|
||||
self.normalize = normalize
|
||||
if mode == 'bahdanau':
|
||||
self.linear_att = nn.Linear(key_size, 1)
|
||||
if normalize:
|
||||
self.linear_att = nn.utils.weight_norm(self.linear_att)
|
||||
if output_transform:
|
||||
output_size = output_size or query_size
|
||||
self.linear_out = nn.Linear(query_size + key_size, output_size)
|
||||
self.output_size = output_size
|
||||
else:
|
||||
self.output_size = value_size
|
||||
self.linear_q = nn.Linear(query_size, key_size)
|
||||
self.dropout = nn.Dropout(dropout)
|
||||
self.batch_first = batch_first
|
||||
self.output_nonlinearity = output_nonlinearity
|
||||
self.mask = None
|
||||
|
||||
def set_mask(self, mask):
|
||||
# applies a mask of b x t length
|
||||
self.mask = mask
|
||||
if mask is not None and not self.batch_first:
|
||||
self.mask = self.mask.t()
|
||||
|
||||
def calc_score(self, att_query, att_keys):
|
||||
"""
|
||||
att_query is: b x t_q x n
|
||||
att_keys is b x t_k x n
|
||||
return b x t_q x t_k scores
|
||||
"""
|
||||
|
||||
b, t_k, n = list(att_keys.size())
|
||||
t_q = att_query.size(1)
|
||||
if self.mode == 'bahdanau':
|
||||
att_query = att_query.unsqueeze(2).expand(b, t_q, t_k, n)
|
||||
att_keys = att_keys.unsqueeze(1).expand(b, t_q, t_k, n)
|
||||
sum_qk = att_query + att_keys
|
||||
sum_qk = sum_qk.view(b * t_k * t_q, n)
|
||||
out = self.linear_att(F.tanh(sum_qk)).view(b, t_q, t_k)
|
||||
elif self.mode == 'dot_prod':
|
||||
out = torch.bmm(att_query, att_keys.transpose(1, 2))
|
||||
if self.normalize:
|
||||
out.div_(n ** 0.5)
|
||||
return out
|
||||
|
||||
def forward(self, query, keys, values=None):
|
||||
if not self.batch_first:
|
||||
keys = keys.transpose(0, 1)
|
||||
if values is not None:
|
||||
values = values.transpose(0, 1)
|
||||
if query.dim() == 3:
|
||||
query = query.transpose(0, 1)
|
||||
if query.dim() == 2:
|
||||
single_query = True
|
||||
query = query.unsqueeze(1)
|
||||
else:
|
||||
single_query = False
|
||||
values = keys if values is None else values
|
||||
|
||||
b = query.size(0)
|
||||
t_k = keys.size(1)
|
||||
t_q = query.size(1)
|
||||
|
||||
# Fully connected layers to transform query
|
||||
att_query = self.linear_q(query)
|
||||
|
||||
scores = self.calc_score(att_query, keys) # size b x t_q x t_k
|
||||
if self.mask is not None:
|
||||
mask = self.mask.unsqueeze(1).expand(b, t_q, t_k)
|
||||
scores.masked_fill_(mask, -1e12)
|
||||
|
||||
# Normalize the scores
|
||||
scores_normalized = F.softmax(scores, dim=2)
|
||||
|
||||
# Calculate the weighted average of the attention inputs
|
||||
# according to the scores
|
||||
scores_normalized = self.dropout(scores_normalized)
|
||||
context = torch.bmm(scores_normalized, values) # b x t_q x n
|
||||
|
||||
if hasattr(self, 'linear_out'):
|
||||
context = self.linear_out(torch.cat([query, context], 2))
|
||||
if self.output_nonlinearity == 'tanh':
|
||||
context = F.tanh(context)
|
||||
elif self.output_nonlinearity == 'relu':
|
||||
context = F.relu(context, inplace=True)
|
||||
if single_query:
|
||||
context = context.squeeze(1)
|
||||
scores_normalized = scores_normalized.squeeze(1)
|
||||
elif not self.batch_first:
|
||||
context = context.transpose(0, 1)
|
||||
scores_normalized = scores_normalized.transpose(0, 1)
|
||||
|
||||
return context, scores_normalized
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
feats = autograd.Variable(torch.LongTensor([range(10)] * 45))
|
||||
vocab_s = 8888
|
||||
embedding_d = 100
|
||||
repeat_t = 1
|
||||
interm_c = 10
|
||||
tagset_s = 18
|
||||
batch_s = 10
|
||||
seq_l = 45
|
||||
dilation_r = [1, 2, 4, 8, 16, 32, 64, 128]
|
||||
word_embeds = nn.Embedding(vocab_s, embedding_d)
|
||||
|
||||
# emb = word_embeds(feats)
|
||||
# print emb.size()
|
||||
#
|
||||
# emb0 = emb[:, :5, :]
|
||||
# print emb0.size()
|
||||
#
|
||||
# ap = AttentionPooling(100, [100], 75, True)
|
||||
#
|
||||
# o = ap(emb, emb0, emb0)
|
||||
# print o
|
0
layers/encoders/__init__.py
Normal file
0
layers/encoders/__init__.py
Normal file
0
layers/encoders/cnns/__init__.py
Normal file
0
layers/encoders/cnns/__init__.py
Normal file
0
layers/encoders/rnns/__init__.py
Normal file
0
layers/encoders/rnns/__init__.py
Normal file
141
layers/encoders/rnns/stacked_rnn.py
Normal file
141
layers/encoders/rnns/stacked_rnn.py
Normal file
@ -0,0 +1,141 @@
|
||||
# -*- encoding:utf-8 -*-
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
from torch.autograd import Variable
|
||||
|
||||
class StackedBRNN(nn.Module):
|
||||
"""Stacked Bi-directional RNNs.
|
||||
"""
|
||||
|
||||
def __init__(self, input_size, hidden_size, num_layers=1,
|
||||
dropout_rate=0, dropout_output=True, rnn_type=nn.LSTM,
|
||||
concat_layers=False, padding=True):
|
||||
super(StackedBRNN, self).__init__()
|
||||
self.padding = padding
|
||||
self.dropout_output = dropout_output
|
||||
self.dropout_rate = dropout_rate
|
||||
self.num_layers = num_layers
|
||||
self.concat_layers = concat_layers
|
||||
self.rnns = nn.ModuleList()
|
||||
for i in range(num_layers):
|
||||
input_size = input_size if i == 0 else 2 * hidden_size
|
||||
self.rnns.append(rnn_type(input_size, hidden_size,
|
||||
num_layers=1,
|
||||
bidirectional=True))
|
||||
|
||||
def forward(self, x, x_mask):
|
||||
"""Encode either padded or non-padded sequences.
|
||||
"""
|
||||
if x_mask.data.sum() == 0:
|
||||
# No padding necessary.
|
||||
output = self._forward_unpadded(x, x_mask)
|
||||
elif self.padding or not self.training:
|
||||
# Pad if we care or if its during eval.
|
||||
output = self._forward_padded(x, x_mask)
|
||||
else:
|
||||
# We don't care.
|
||||
output = self._forward_unpadded(x, x_mask)
|
||||
|
||||
return output.contiguous()
|
||||
|
||||
def _forward_unpadded(self, x, x_mask):
|
||||
"""Faster encoding that ignores any padding."""
|
||||
# Transpose batch and sequence dims
|
||||
x = x.transpose(0, 1)
|
||||
|
||||
# Encode all layers
|
||||
outputs = [x]
|
||||
for i in range(self.num_layers):
|
||||
rnn_input = outputs[-1]
|
||||
|
||||
# Apply dropout to hidden input
|
||||
if self.dropout_rate > 0:
|
||||
rnn_input = F.dropout(rnn_input,
|
||||
p=self.dropout_rate,
|
||||
training=self.training)
|
||||
# Forward
|
||||
rnn_output = self.rnns[i](rnn_input)[0]
|
||||
outputs.append(rnn_output)
|
||||
|
||||
# Concat hidden layers
|
||||
if self.concat_layers:
|
||||
output = torch.cat(outputs[1:], 2)
|
||||
else:
|
||||
output = outputs[-1]
|
||||
|
||||
# Transpose back
|
||||
output = output.transpose(0, 1)
|
||||
|
||||
# Dropout on output layer
|
||||
if self.dropout_output and self.dropout_rate > 0:
|
||||
output = F.dropout(output,
|
||||
p=self.dropout_rate,
|
||||
training=self.training)
|
||||
return output
|
||||
|
||||
def _forward_padded(self, x, x_mask):
|
||||
"""Slower (significantly), but more precise, encoding that handles
|
||||
padding.
|
||||
"""
|
||||
# Compute sorted sequence lengths
|
||||
lengths = x_mask.data.eq(0).long().sum(1).squeeze()
|
||||
_, idx_sort = torch.sort(lengths, dim=0, descending=True)
|
||||
_, idx_unsort = torch.sort(idx_sort, dim=0)
|
||||
|
||||
lengths = list(lengths[idx_sort])
|
||||
idx_sort = Variable(idx_sort)
|
||||
idx_unsort = Variable(idx_unsort)
|
||||
|
||||
# Sort x
|
||||
x = x.index_select(0, idx_sort)
|
||||
|
||||
# Transpose batch and sequence dims
|
||||
x = x.transpose(0, 1)
|
||||
|
||||
# Pack it up
|
||||
rnn_input = nn.utils.rnn.pack_padded_sequence(x, lengths)
|
||||
|
||||
# Encode all layers
|
||||
outputs = [rnn_input]
|
||||
for i in range(self.num_layers):
|
||||
rnn_input = outputs[-1]
|
||||
|
||||
# Apply dropout to input
|
||||
if self.dropout_rate > 0:
|
||||
dropout_input = F.dropout(rnn_input.data,
|
||||
p=self.dropout_rate,
|
||||
training=self.training)
|
||||
rnn_input = nn.utils.rnn.PackedSequence(dropout_input,
|
||||
rnn_input.batch_sizes)
|
||||
outputs.append(self.rnns[i](rnn_input)[0])
|
||||
|
||||
# Unpack everything
|
||||
for i, o in enumerate(outputs[1:], 1):
|
||||
outputs[i] = nn.utils.rnn.pad_packed_sequence(o)[0]
|
||||
|
||||
# Concat hidden layers or take final
|
||||
if self.concat_layers:
|
||||
output = torch.cat(outputs[1:], 2)
|
||||
else:
|
||||
output = outputs[-1]
|
||||
|
||||
# Transpose and unsort
|
||||
output = output.transpose(0, 1)
|
||||
output = output.index_select(0, idx_unsort)
|
||||
|
||||
# Pad up to original batch sequence length
|
||||
if output.size(1) != x_mask.size(1):
|
||||
padding = torch.zeros(output.size(0),
|
||||
x_mask.size(1) - output.size(1),
|
||||
output.size(2)).type(output.data.type())
|
||||
output = torch.cat([output, Variable(padding)], 1)
|
||||
|
||||
# Dropout on output layer
|
||||
if self.dropout_output and self.dropout_rate > 0:
|
||||
output = F.dropout(output,
|
||||
p=self.dropout_rate,
|
||||
training=self.training)
|
||||
return output
|
||||
|
0
layers/encoders/transformers/__init__.py
Normal file
0
layers/encoders/transformers/__init__.py
Normal file
0
layers/encoders/transformers/bert/__init__.py
Normal file
0
layers/encoders/transformers/bert/__init__.py
Normal file
302
layers/encoders/transformers/bert/bert_model.py
Normal file
302
layers/encoders/transformers/bert/bert_model.py
Normal file
@ -0,0 +1,302 @@
|
||||
# _*_ coding:utf-8 _*_
|
||||
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
import copy
|
||||
import math
|
||||
import sys
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
from layers.encoders.transformers.bert.layernorm import BertLayerNorm
|
||||
from layers.encoders.transformers.bert.bert_pretrain import BertPreTrainedModel
|
||||
CONFIG_NAME = "config.json"
|
||||
WEIGHTS_NAME = "pytorch_model.bin"
|
||||
|
||||
|
||||
def gelu(x):
|
||||
"""Implementation of the gelu activation function.
|
||||
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
|
||||
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
|
||||
Also see https://arxiv.org/abs/1606.08415
|
||||
"""
|
||||
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
|
||||
|
||||
|
||||
def swish(x):
|
||||
return x * torch.sigmoid(x)
|
||||
|
||||
|
||||
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
class BertSelfOutput(nn.Module):
|
||||
def __init__(self, config):
|
||||
super(BertSelfOutput, self).__init__()
|
||||
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
||||
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
||||
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
||||
|
||||
def forward(self, hidden_states, input_tensor):
|
||||
hidden_states = self.dense(hidden_states)
|
||||
hidden_states = self.dropout(hidden_states)
|
||||
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
||||
return hidden_states
|
||||
|
||||
|
||||
class BertSelfAttention(nn.Module):
|
||||
def __init__(self, config):
|
||||
super(BertSelfAttention, self).__init__()
|
||||
if config.hidden_size % config.num_attention_heads != 0:
|
||||
raise ValueError(
|
||||
"The hidden size (%d) is not a multiple of the number of attention "
|
||||
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
|
||||
self.num_attention_heads = config.num_attention_heads
|
||||
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
|
||||
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
||||
|
||||
self.query = nn.Linear(config.hidden_size, self.all_head_size)
|
||||
self.key = nn.Linear(config.hidden_size, self.all_head_size)
|
||||
self.value = nn.Linear(config.hidden_size, self.all_head_size)
|
||||
|
||||
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
|
||||
|
||||
def transpose_for_scores(self, x):
|
||||
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
|
||||
x = x.view(*new_x_shape)
|
||||
return x.permute(0, 2, 1, 3)
|
||||
|
||||
def forward(self, hidden_states, attention_mask):
|
||||
mixed_query_layer = self.query(hidden_states)
|
||||
mixed_key_layer = self.key(hidden_states)
|
||||
mixed_value_layer = self.value(hidden_states)
|
||||
|
||||
query_layer = self.transpose_for_scores(mixed_query_layer)
|
||||
key_layer = self.transpose_for_scores(mixed_key_layer)
|
||||
value_layer = self.transpose_for_scores(mixed_value_layer)
|
||||
|
||||
# Take the dot product between "query" and "key" to get the raw attention scores.
|
||||
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
|
||||
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
|
||||
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
|
||||
attention_scores = attention_scores + attention_mask
|
||||
|
||||
# Normalize the attention scores to probabilities.
|
||||
attention_probs = nn.Softmax(dim=-1)(attention_scores)
|
||||
|
||||
# This is actually dropping out entire tokens to attend to, which might
|
||||
# seem a bit unusual, but is taken from the original Transformer paper.
|
||||
attention_probs = self.dropout(attention_probs)
|
||||
|
||||
context_layer = torch.matmul(attention_probs, value_layer)
|
||||
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
|
||||
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
|
||||
context_layer = context_layer.view(*new_context_layer_shape)
|
||||
return context_layer
|
||||
|
||||
|
||||
class BertAttention(nn.Module):
|
||||
def __init__(self, config):
|
||||
super(BertAttention, self).__init__()
|
||||
self.self = BertSelfAttention(config)
|
||||
self.output = BertSelfOutput(config)
|
||||
|
||||
def forward(self, input_tensor, attention_mask):
|
||||
self_output = self.self(input_tensor, attention_mask)
|
||||
attention_output = self.output(self_output, input_tensor)
|
||||
return attention_output
|
||||
|
||||
|
||||
class BertIntermediate(nn.Module):
|
||||
def __init__(self, config):
|
||||
super(BertIntermediate, self).__init__()
|
||||
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
|
||||
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
|
||||
self.intermediate_act_fn = ACT2FN[config.hidden_act]
|
||||
else:
|
||||
self.intermediate_act_fn = config.hidden_act
|
||||
|
||||
def forward(self, hidden_states):
|
||||
hidden_states = self.dense(hidden_states)
|
||||
hidden_states = self.intermediate_act_fn(hidden_states)
|
||||
return hidden_states
|
||||
|
||||
|
||||
class BertOutput(nn.Module):
|
||||
def __init__(self, config):
|
||||
super(BertOutput, self).__init__()
|
||||
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
|
||||
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
||||
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
||||
|
||||
def forward(self, hidden_states, input_tensor):
|
||||
hidden_states = self.dense(hidden_states)
|
||||
hidden_states = self.dropout(hidden_states)
|
||||
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
||||
return hidden_states
|
||||
|
||||
|
||||
class BertLayer(nn.Module):
|
||||
def __init__(self, config):
|
||||
super(BertLayer, self).__init__()
|
||||
self.attention = BertAttention(config)
|
||||
self.intermediate = BertIntermediate(config)
|
||||
self.output = BertOutput(config)
|
||||
|
||||
def forward(self, hidden_states, attention_mask):
|
||||
attention_output = self.attention(hidden_states, attention_mask)
|
||||
intermediate_output = self.intermediate(attention_output)
|
||||
layer_output = self.output(intermediate_output, attention_output)
|
||||
return layer_output
|
||||
|
||||
|
||||
class BertEncoder(nn.Module):
|
||||
def __init__(self, config):
|
||||
super(BertEncoder, self).__init__()
|
||||
layer = BertLayer(config)
|
||||
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
|
||||
|
||||
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
|
||||
all_encoder_layers = []
|
||||
for layer_module in self.layer:
|
||||
hidden_states = layer_module(hidden_states, attention_mask)
|
||||
if output_all_encoded_layers:
|
||||
all_encoder_layers.append(hidden_states)
|
||||
if not output_all_encoded_layers:
|
||||
all_encoder_layers.append(hidden_states)
|
||||
return all_encoder_layers
|
||||
|
||||
|
||||
class BertPooler(nn.Module):
|
||||
def __init__(self, config):
|
||||
super(BertPooler, self).__init__()
|
||||
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
||||
self.activation = nn.Tanh()
|
||||
|
||||
def forward(self, hidden_states):
|
||||
# We "pool" the model by simply taking the hidden state corresponding
|
||||
# to the first token.
|
||||
first_token_tensor = hidden_states[:, 0]
|
||||
pooled_output = self.dense(first_token_tensor)
|
||||
pooled_output = self.activation(pooled_output)
|
||||
return pooled_output
|
||||
|
||||
|
||||
class BertEmbeddings(nn.Module):
|
||||
"""Construct the embeddings from word, position and token_type embeddings.
|
||||
"""
|
||||
|
||||
def __init__(self, config):
|
||||
super(BertEmbeddings, self).__init__()
|
||||
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
|
||||
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
|
||||
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
|
||||
|
||||
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
|
||||
# any TensorFlow checkpoint file
|
||||
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
||||
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
||||
|
||||
def forward(self, input_ids, token_type_ids=None):
|
||||
seq_length = input_ids.size(1)
|
||||
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
|
||||
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
|
||||
if token_type_ids is None:
|
||||
token_type_ids = torch.zeros_like(input_ids)
|
||||
|
||||
words_embeddings = self.word_embeddings(input_ids)
|
||||
position_embeddings = self.position_embeddings(position_ids)
|
||||
token_type_embeddings = self.token_type_embeddings(token_type_ids)
|
||||
|
||||
embeddings = words_embeddings + position_embeddings + token_type_embeddings
|
||||
embeddings = self.LayerNorm(embeddings)
|
||||
embeddings = self.dropout(embeddings)
|
||||
return embeddings
|
||||
|
||||
|
||||
class BertModel(BertPreTrainedModel):
|
||||
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
|
||||
|
||||
Params:
|
||||
config: a BertConfig class instance with the configuration to build a new model
|
||||
|
||||
Inputs:
|
||||
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
|
||||
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
|
||||
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
|
||||
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
|
||||
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
|
||||
a `sentence B` token (see BERT paper for more details).
|
||||
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
|
||||
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
|
||||
input sequence length in the current batch. It's the mask that we typically use for attention when
|
||||
a batch has varying length sentences.
|
||||
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
|
||||
|
||||
Outputs: Tuple of (encoded_layers, pooled_output)
|
||||
`encoded_layers`: controled by `output_all_encoded_layers` argument:
|
||||
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
|
||||
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
|
||||
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
|
||||
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
|
||||
to the last attention block of shape [batch_size, sequence_length, hidden_size],
|
||||
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
|
||||
classifier pretrained on top of the hidden state associated to the first character of the
|
||||
input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
|
||||
|
||||
Example usage:
|
||||
```python
|
||||
# Already been converted into WordPiece token ids
|
||||
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
|
||||
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
|
||||
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
|
||||
|
||||
config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
|
||||
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
|
||||
|
||||
model = modeling.BertModel(config=config)
|
||||
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
|
||||
```
|
||||
"""
|
||||
|
||||
def __init__(self, config):
|
||||
super(BertModel, self).__init__(config)
|
||||
self.embeddings = BertEmbeddings(config)
|
||||
self.encoder = BertEncoder(config)
|
||||
self.pooler = BertPooler(config)
|
||||
self.apply(self.init_bert_weights)
|
||||
|
||||
def forward(self, input_ids=None, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True):
|
||||
if attention_mask is None:
|
||||
attention_mask = torch.ones_like(input_ids)
|
||||
if token_type_ids is None:
|
||||
token_type_ids = torch.zeros_like(input_ids)
|
||||
|
||||
# We create a 3D attention mask from a 2D tensor mask.
|
||||
# Sizes are [batch_size, 1, 1, to_seq_length]
|
||||
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
|
||||
# this attention mask is more simple than the triangular masking of causal attention
|
||||
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
|
||||
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
|
||||
|
||||
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
|
||||
# masked positions, this operation will create a tensor which is 0.0 for
|
||||
# positions we want to attend and -10000.0 for masked positions.
|
||||
# Since we are adding it to the raw scores before the softmax, this is
|
||||
# effectively the same as removing these entirely.
|
||||
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
|
||||
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
|
||||
|
||||
embedding_output = self.embeddings(input_ids, token_type_ids)
|
||||
encoded_layers = self.encoder(embedding_output,
|
||||
extended_attention_mask,
|
||||
output_all_encoded_layers=output_all_encoded_layers)
|
||||
sequence_output = encoded_layers[-1]
|
||||
pooled_output = self.pooler(sequence_output)
|
||||
if not output_all_encoded_layers:
|
||||
encoded_layers = encoded_layers[-1]
|
||||
return encoded_layers, pooled_output
|
302
layers/encoders/transformers/bert/bert_optimization.py
Normal file
302
layers/encoders/transformers/bert/bert_optimization.py
Normal file
@ -0,0 +1,302 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""PyTorch optimization for BERT model."""
|
||||
|
||||
import math
|
||||
import torch
|
||||
from torch.optim import Optimizer
|
||||
from torch.optim.optimizer import required
|
||||
from torch.nn.utils import clip_grad_norm_
|
||||
import logging
|
||||
import abc
|
||||
import sys
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
if sys.version_info >= (3, 4):
|
||||
ABC = abc.ABC
|
||||
else:
|
||||
ABC = abc.ABCMeta('ABC', (), {})
|
||||
|
||||
|
||||
class _LRSchedule(ABC):
|
||||
""" Parent of all LRSchedules here. """
|
||||
warn_t_total = False # is set to True for schedules where progressing beyond t_total steps doesn't make sense
|
||||
def __init__(self, warmup=0.002, t_total=-1, **kw):
|
||||
"""
|
||||
:param warmup: what fraction of t_total steps will be used for linear warmup
|
||||
:param t_total: how many training steps (updates) are planned
|
||||
:param kw:
|
||||
"""
|
||||
super(_LRSchedule, self).__init__(**kw)
|
||||
if t_total < 0:
|
||||
logger.warning("t_total value of {} results in schedule not being applied".format(t_total))
|
||||
if not 0.0 <= warmup < 1.0 and not warmup == -1:
|
||||
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
|
||||
warmup = max(warmup, 0.)
|
||||
self.warmup, self.t_total = float(warmup), float(t_total)
|
||||
self.warned_for_t_total_at_progress = -1
|
||||
|
||||
def get_lr(self, step, nowarn=False):
|
||||
"""
|
||||
:param step: which of t_total steps we're on
|
||||
:param nowarn: set to True to suppress warning regarding training beyond specified 't_total' steps
|
||||
:return: learning rate multiplier for current update
|
||||
"""
|
||||
if self.t_total < 0:
|
||||
return 1.
|
||||
progress = float(step) / self.t_total
|
||||
ret = self.get_lr_(progress)
|
||||
# warning for exceeding t_total (only active with warmup_linear
|
||||
if not nowarn and self.warn_t_total and progress > 1. and progress > self.warned_for_t_total_at_progress:
|
||||
logger.warning(
|
||||
"Training beyond specified 't_total'. Learning rate multiplier set to {}. Please set 't_total' of {} correctly."
|
||||
.format(ret, self.__class__.__name__))
|
||||
self.warned_for_t_total_at_progress = progress
|
||||
# end warning
|
||||
return ret
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_lr_(self, progress):
|
||||
"""
|
||||
:param progress: value between 0 and 1 (unless going beyond t_total steps) specifying training progress
|
||||
:return: learning rate multiplier for current update
|
||||
"""
|
||||
return 1.
|
||||
|
||||
|
||||
class ConstantLR(_LRSchedule):
|
||||
def get_lr_(self, progress):
|
||||
return 1.
|
||||
|
||||
|
||||
class WarmupCosineSchedule(_LRSchedule):
|
||||
"""
|
||||
Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
|
||||
Decreases learning rate from 1. to 0. over remaining `1 - warmup` steps following a cosine curve.
|
||||
If `cycles` (default=0.5) is different from default, learning rate follows cosine function after warmup.
|
||||
"""
|
||||
warn_t_total = True
|
||||
def __init__(self, warmup=0.002, t_total=-1, cycles=.5, **kw):
|
||||
"""
|
||||
:param warmup: see LRSchedule
|
||||
:param t_total: see LRSchedule
|
||||
:param cycles: number of cycles. Default: 0.5, corresponding to cosine decay from 1. at progress==warmup and 0 at progress==1.
|
||||
:param kw:
|
||||
"""
|
||||
super(WarmupCosineSchedule, self).__init__(warmup=warmup, t_total=t_total, **kw)
|
||||
self.cycles = cycles
|
||||
|
||||
def get_lr_(self, progress):
|
||||
if progress < self.warmup:
|
||||
return progress / self.warmup
|
||||
else:
|
||||
progress = (progress - self.warmup) / (1 - self.warmup) # progress after warmup
|
||||
return 0.5 * (1. + math.cos(math.pi * self.cycles * 2 * progress))
|
||||
|
||||
|
||||
class WarmupCosineWithHardRestartsSchedule(WarmupCosineSchedule):
|
||||
"""
|
||||
Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
|
||||
If `cycles` (default=1.) is different from default, learning rate follows `cycles` times a cosine decaying
|
||||
learning rate (with hard restarts).
|
||||
"""
|
||||
def __init__(self, warmup=0.002, t_total=-1, cycles=1., **kw):
|
||||
super(WarmupCosineWithHardRestartsSchedule, self).__init__(warmup=warmup, t_total=t_total, cycles=cycles, **kw)
|
||||
assert(cycles >= 1.)
|
||||
|
||||
def get_lr_(self, progress):
|
||||
if progress < self.warmup:
|
||||
return progress / self.warmup
|
||||
else:
|
||||
progress = (progress - self.warmup) / (1 - self.warmup) # progress after warmup
|
||||
ret = 0.5 * (1. + math.cos(math.pi * ((self.cycles * progress) % 1)))
|
||||
return ret
|
||||
|
||||
|
||||
class WarmupCosineWithWarmupRestartsSchedule(WarmupCosineWithHardRestartsSchedule):
|
||||
"""
|
||||
All training progress is divided in `cycles` (default=1.) parts of equal length.
|
||||
Every part follows a schedule with the first `warmup` fraction of the training steps linearly increasing from 0. to 1.,
|
||||
followed by a learning rate decreasing from 1. to 0. following a cosine curve.
|
||||
"""
|
||||
def __init__(self, warmup=0.002, t_total=-1, cycles=1., **kw):
|
||||
assert(warmup * cycles < 1.)
|
||||
warmup = warmup * cycles if warmup >= 0 else warmup
|
||||
super(WarmupCosineWithWarmupRestartsSchedule, self).__init__(warmup=warmup, t_total=t_total, cycles=cycles, **kw)
|
||||
|
||||
def get_lr_(self, progress):
|
||||
progress = progress * self.cycles % 1.
|
||||
if progress < self.warmup:
|
||||
return progress / self.warmup
|
||||
else:
|
||||
progress = (progress - self.warmup) / (1 - self.warmup) # progress after warmup
|
||||
ret = 0.5 * (1. + math.cos(math.pi * progress))
|
||||
return ret
|
||||
|
||||
|
||||
class WarmupConstantSchedule(_LRSchedule):
|
||||
"""
|
||||
Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
|
||||
Keeps learning rate equal to 1. after warmup.
|
||||
"""
|
||||
def get_lr_(self, progress):
|
||||
if progress < self.warmup:
|
||||
return progress / self.warmup
|
||||
return 1.
|
||||
|
||||
|
||||
class WarmupLinearSchedule(_LRSchedule):
|
||||
"""
|
||||
Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
|
||||
Linearly decreases learning rate from 1. to 0. over remaining `1 - warmup` steps.
|
||||
"""
|
||||
warn_t_total = True
|
||||
def get_lr_(self, progress):
|
||||
if progress < self.warmup:
|
||||
return progress / self.warmup
|
||||
return max((progress - 1.) / (self.warmup - 1.), 0.)
|
||||
|
||||
|
||||
SCHEDULES = {
|
||||
None: ConstantLR,
|
||||
"none": ConstantLR,
|
||||
"warmup_cosine": WarmupCosineSchedule,
|
||||
"warmup_constant": WarmupConstantSchedule,
|
||||
"warmup_linear": WarmupLinearSchedule
|
||||
}
|
||||
|
||||
|
||||
class BertAdam(Optimizer):
|
||||
"""Implements BERT version of Adam algorithm with weight decay fix.
|
||||
Params:
|
||||
lr: learning rate
|
||||
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
|
||||
t_total: total number of training steps for the learning
|
||||
rate schedule, -1 means constant learning rate of 1. (no warmup regardless of warmup setting). Default: -1
|
||||
schedule: schedule to use for the warmup (see above).
|
||||
Can be `'warmup_linear'`, `'warmup_constant'`, `'warmup_cosine'`, `'none'`, `None` or a `_LRSchedule` object (see below).
|
||||
If `None` or `'none'`, learning rate is always kept constant.
|
||||
Default : `'warmup_linear'`
|
||||
b1: Adams b1. Default: 0.9
|
||||
b2: Adams b2. Default: 0.999
|
||||
e: Adams epsilon. Default: 1e-6
|
||||
weight_decay: Weight decay. Default: 0.01
|
||||
max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
|
||||
"""
|
||||
def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear',
|
||||
b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01, max_grad_norm=1.0, **kwargs):
|
||||
if lr is not required and lr < 0.0:
|
||||
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
|
||||
if not isinstance(schedule, _LRSchedule) and schedule not in SCHEDULES:
|
||||
raise ValueError("Invalid schedule parameter: {}".format(schedule))
|
||||
if not 0.0 <= b1 < 1.0:
|
||||
raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
|
||||
if not 0.0 <= b2 < 1.0:
|
||||
raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
|
||||
if not e >= 0.0:
|
||||
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
|
||||
# initialize schedule object
|
||||
if not isinstance(schedule, _LRSchedule):
|
||||
schedule_type = SCHEDULES[schedule]
|
||||
schedule = schedule_type(warmup=warmup, t_total=t_total)
|
||||
else:
|
||||
if warmup != -1 or t_total != -1:
|
||||
logger.warning("warmup and t_total on the optimizer are ineffective when _LRSchedule object is provided as schedule. "
|
||||
"Please specify custom warmup and t_total in _LRSchedule object.")
|
||||
defaults = dict(lr=lr, schedule=schedule,
|
||||
b1=b1, b2=b2, e=e, weight_decay=weight_decay,
|
||||
max_grad_norm=max_grad_norm)
|
||||
super(BertAdam, self).__init__(params, defaults)
|
||||
|
||||
def get_lr(self):
|
||||
lr = []
|
||||
for group in self.param_groups:
|
||||
for p in group['params']:
|
||||
state = self.state[p]
|
||||
if len(state) == 0:
|
||||
return [0]
|
||||
lr_scheduled = group['lr']
|
||||
lr_scheduled *= group['schedule'].get_lr(state['step'])
|
||||
lr.append(lr_scheduled)
|
||||
return lr
|
||||
|
||||
def step(self, closure=None):
|
||||
"""Performs a single optimization step.
|
||||
|
||||
Arguments:
|
||||
closure (callable, optional): A closure that reevaluates the model
|
||||
and returns the loss.
|
||||
"""
|
||||
loss = None
|
||||
if closure is not None:
|
||||
loss = closure()
|
||||
|
||||
for group in self.param_groups:
|
||||
for p in group['params']:
|
||||
if p.grad is None:
|
||||
continue
|
||||
grad = p.grad.data
|
||||
if grad.is_sparse:
|
||||
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
|
||||
|
||||
state = self.state[p]
|
||||
|
||||
# State initialization
|
||||
if len(state) == 0:
|
||||
state['step'] = 0
|
||||
# Exponential moving average of gradient values
|
||||
state['next_m'] = torch.zeros_like(p.data)
|
||||
# Exponential moving average of squared gradient values
|
||||
state['next_v'] = torch.zeros_like(p.data)
|
||||
|
||||
next_m, next_v = state['next_m'], state['next_v']
|
||||
beta1, beta2 = group['b1'], group['b2']
|
||||
|
||||
# Add grad clipping
|
||||
if group['max_grad_norm'] > 0:
|
||||
clip_grad_norm_(p, group['max_grad_norm'])
|
||||
|
||||
# Decay the first and second moment running average coefficient
|
||||
# In-place operations to update the averages at the same time
|
||||
next_m.mul_(beta1).add_(1 - beta1, grad)
|
||||
next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)
|
||||
update = next_m / (next_v.sqrt() + group['e'])
|
||||
|
||||
# Just adding the square of the weights to the loss function is *not*
|
||||
# the correct way of using L2 regularization/weight decay with Adam,
|
||||
# since that will interact with the m and v parameters in strange ways.
|
||||
#
|
||||
# Instead we want to decay the weights in a manner that doesn't interact
|
||||
# with the m/v parameters. This is equivalent to adding the square
|
||||
# of the weights to the loss with plain (non-momentum) SGD.
|
||||
if group['weight_decay'] > 0.0:
|
||||
update += group['weight_decay'] * p.data
|
||||
|
||||
lr_scheduled = group['lr']
|
||||
lr_scheduled *= group['schedule'].get_lr(state['step'])
|
||||
|
||||
update_with_lr = lr_scheduled * update
|
||||
p.data.add_(-update_with_lr)
|
||||
|
||||
state['step'] += 1
|
||||
|
||||
# step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1
|
||||
# No bias correction
|
||||
# bias_correction1 = 1 - beta1 ** state['step']
|
||||
# bias_correction2 = 1 - beta2 ** state['step']
|
||||
|
||||
return loss
|
248
layers/encoders/transformers/bert/bert_pretrain.py
Normal file
248
layers/encoders/transformers/bert/bert_pretrain.py
Normal file
@ -0,0 +1,248 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
import copy
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from io import open
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
from layers.encoders.transformers.bert.layernorm import BertLayerNorm
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
BERT_CONFIG_NAME = 'bert_config.json'
|
||||
WEIGHTS_NAME = "pytorch_model.bin"
|
||||
|
||||
|
||||
def cached_path(filename):
|
||||
"""
|
||||
Given something that might be a URL (or might be a local path),
|
||||
determine which. If it's a URL, download the file and cache it, and
|
||||
return the path to the cached file. If it's already a local path,
|
||||
make sure the file exists and then return the path.
|
||||
"""
|
||||
if sys.version_info[0] == 3:
|
||||
filename = str(filename)
|
||||
|
||||
if os.path.exists(filename):
|
||||
# File, and it exists.
|
||||
return filename
|
||||
|
||||
|
||||
class BertConfig(object):
|
||||
"""Configuration class to store the configuration of a `BertModel`.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
vocab_size_or_config_json_file,
|
||||
hidden_size=768,
|
||||
num_hidden_layers=12,
|
||||
num_attention_heads=12,
|
||||
intermediate_size=3072,
|
||||
hidden_act="gelu",
|
||||
hidden_dropout_prob=0.1,
|
||||
attention_probs_dropout_prob=0.1,
|
||||
max_position_embeddings=512,
|
||||
type_vocab_size=2,
|
||||
initializer_range=0.02,
|
||||
layer_norm_eps=1e-12):
|
||||
"""Constructs BertConfig.
|
||||
|
||||
Args:
|
||||
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
|
||||
hidden_size: Size of the encoder layers and the pooler layer.
|
||||
num_hidden_layers: Number of hidden layers in the Transformer encoder.
|
||||
num_attention_heads: Number of attention heads for each attention layer in
|
||||
the Transformer encoder.
|
||||
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
|
||||
layer in the Transformer encoder.
|
||||
hidden_act: The non-linear activation function (function or string) in the
|
||||
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
|
||||
hidden_dropout_prob: The dropout probabilitiy for all fully connected
|
||||
layers in the embeddings, encoder, and pooler.
|
||||
attention_probs_dropout_prob: The dropout ratio for the attention
|
||||
probabilities.
|
||||
max_position_embeddings: The maximum sequence length that this model might
|
||||
ever be used with. Typically set this to something large just in case
|
||||
(e.g., 512 or 1024 or 2048).
|
||||
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
|
||||
`BertModel`.
|
||||
initializer_range: The sttdev of the truncated_normal_initializer for
|
||||
initializing all weight matrices.
|
||||
layer_norm_eps: The epsilon used by LayerNorm.
|
||||
"""
|
||||
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
|
||||
and isinstance(vocab_size_or_config_json_file, unicode)):
|
||||
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
|
||||
json_config = json.loads(reader.read())
|
||||
for key, value in json_config.items():
|
||||
self.__dict__[key] = value
|
||||
elif isinstance(vocab_size_or_config_json_file, int):
|
||||
self.vocab_size = vocab_size_or_config_json_file
|
||||
self.hidden_size = hidden_size
|
||||
self.num_hidden_layers = num_hidden_layers
|
||||
self.num_attention_heads = num_attention_heads
|
||||
self.hidden_act = hidden_act
|
||||
self.intermediate_size = intermediate_size
|
||||
self.hidden_dropout_prob = hidden_dropout_prob
|
||||
self.attention_probs_dropout_prob = attention_probs_dropout_prob
|
||||
self.max_position_embeddings = max_position_embeddings
|
||||
self.type_vocab_size = type_vocab_size
|
||||
self.initializer_range = initializer_range
|
||||
self.layer_norm_eps = layer_norm_eps
|
||||
else:
|
||||
raise ValueError("First argument must be either a vocabulary size (int)"
|
||||
"or the path to a pretrained model config file (str)")
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, json_object):
|
||||
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
|
||||
config = BertConfig(vocab_size_or_config_json_file=-1)
|
||||
for key, value in json_object.items():
|
||||
config.__dict__[key] = value
|
||||
return config
|
||||
|
||||
@classmethod
|
||||
def from_json_file(cls, json_file):
|
||||
"""Constructs a `BertConfig` from a json file of parameters."""
|
||||
with open(json_file, "r", encoding='utf-8') as reader:
|
||||
text = reader.read()
|
||||
return cls.from_dict(json.loads(text))
|
||||
|
||||
def __repr__(self):
|
||||
return str(self.to_json_string())
|
||||
|
||||
def to_dict(self):
|
||||
"""Serializes this instance to a Python dictionary."""
|
||||
output = copy.deepcopy(self.__dict__)
|
||||
return output
|
||||
|
||||
def to_json_string(self):
|
||||
"""Serializes this instance to a JSON string."""
|
||||
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
|
||||
|
||||
def to_json_file(self, json_file_path):
|
||||
""" Save this instance to a json file."""
|
||||
with open(json_file_path, "w", encoding='utf-8') as writer:
|
||||
writer.write(self.to_json_string())
|
||||
|
||||
|
||||
class BertPreTrainedModel(nn.Module):
|
||||
""" An abstract class to handle weights initialization and
|
||||
a simple interface for dowloading and loading pretrained models.
|
||||
"""
|
||||
|
||||
def __init__(self, config, *inputs, **kwargs):
|
||||
super(BertPreTrainedModel, self).__init__()
|
||||
if not isinstance(config, BertConfig):
|
||||
raise ValueError(
|
||||
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
|
||||
"To create a model from a Google pretrained model use "
|
||||
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
|
||||
self.__class__.__name__, self.__class__.__name__
|
||||
))
|
||||
self.config = config
|
||||
|
||||
def init_bert_weights(self, module):
|
||||
""" Initialize the weights.
|
||||
"""
|
||||
if isinstance(module, (nn.Linear, nn.Embedding)):
|
||||
# Slightly different from the TF version which uses truncated_normal for initialization
|
||||
# cf https://github.com/pytorch/pytorch/pull/5617
|
||||
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
||||
elif isinstance(module, BertLayerNorm):
|
||||
module.bias.data.zero_()
|
||||
module.weight.data.fill_(1.0)
|
||||
if isinstance(module, nn.Linear) and module.bias is not None:
|
||||
module.bias.data.zero_()
|
||||
|
||||
@classmethod
|
||||
def from_pretrained(cls, pretrained_model_name_or_path,*inputs, **kwargs):
|
||||
"""
|
||||
Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict.
|
||||
Download and cache the pre-trained model file if needed.
|
||||
|
||||
Params:
|
||||
pretrained_model_name_or_path: either:
|
||||
- a path to a pretrained model archive containing:
|
||||
. `bert_config.json` a configuration file for the model
|
||||
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
|
||||
"""
|
||||
|
||||
archive_file = pretrained_model_name_or_path
|
||||
|
||||
# redirect to the cache, if necessary
|
||||
try:
|
||||
resolved_archive_file = cached_path(archive_file)
|
||||
except EnvironmentError:
|
||||
logger.error(
|
||||
"We assumed '{}' was a path or url but couldn't find any file "
|
||||
"associated to this path or url.".format(archive_file))
|
||||
return None
|
||||
|
||||
if resolved_archive_file == archive_file:
|
||||
logger.info("loading archive file {}".format(archive_file))
|
||||
|
||||
serialization_dir = resolved_archive_file
|
||||
|
||||
# Load config
|
||||
config_file = os.path.join(serialization_dir, BERT_CONFIG_NAME)
|
||||
config = BertConfig.from_json_file(config_file)
|
||||
logger.info("Model config {}".format(config))
|
||||
|
||||
# Instantiate model.
|
||||
model = cls(config, *inputs, **kwargs)
|
||||
|
||||
weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)
|
||||
state_dict = torch.load(weights_path, map_location='cpu')
|
||||
|
||||
# Load from a PyTorch state_dict
|
||||
old_keys = []
|
||||
new_keys = []
|
||||
for key in state_dict.keys():
|
||||
new_key = None
|
||||
if 'gamma' in key:
|
||||
new_key = key.replace('gamma', 'weight')
|
||||
if 'beta' in key:
|
||||
new_key = key.replace('beta', 'bias')
|
||||
if new_key:
|
||||
old_keys.append(key)
|
||||
new_keys.append(new_key)
|
||||
for old_key, new_key in zip(old_keys, new_keys):
|
||||
state_dict[new_key] = state_dict.pop(old_key)
|
||||
|
||||
missing_keys = []
|
||||
unexpected_keys = []
|
||||
error_msgs = []
|
||||
# copy state_dict so _load_from_state_dict can modify it
|
||||
metadata = getattr(state_dict, '_metadata', None)
|
||||
state_dict = state_dict.copy()
|
||||
if metadata is not None:
|
||||
state_dict._metadata = metadata
|
||||
|
||||
def load(module, prefix=''):
|
||||
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
|
||||
module._load_from_state_dict(
|
||||
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
|
||||
for name, child in module._modules.items():
|
||||
if child is not None:
|
||||
load(child, prefix + name + '.')
|
||||
|
||||
start_prefix = ''
|
||||
if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()):
|
||||
start_prefix = 'bert.'
|
||||
load(model, prefix=start_prefix)
|
||||
if len(missing_keys) > 0:
|
||||
logger.info("Weights of {} not initialized from pretrained model: {}".format(
|
||||
model.__class__.__name__, missing_keys))
|
||||
if len(unexpected_keys) > 0:
|
||||
logger.info("Weights from pretrained model not used in {}: {}".format(
|
||||
model.__class__.__name__, unexpected_keys))
|
||||
if len(error_msgs) > 0:
|
||||
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
|
||||
model.__class__.__name__, "\n\t".join(error_msgs)))
|
||||
return model
|
430
layers/encoders/transformers/bert/bert_tokenization.py
Normal file
430
layers/encoders/transformers/bert/bert_tokenization.py
Normal file
@ -0,0 +1,430 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Tokenization classes."""
|
||||
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
import collections
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import unicodedata
|
||||
from io import open
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
PRETRAINED_VOCAB_ARCHIVE_MAP = {
|
||||
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
|
||||
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
|
||||
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
|
||||
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
|
||||
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt",
|
||||
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
|
||||
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt",
|
||||
}
|
||||
PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = {
|
||||
'bert-base-uncased': 512,
|
||||
'bert-large-uncased': 512,
|
||||
'bert-base-cased': 512,
|
||||
'bert-large-cased': 512,
|
||||
'bert-base-multilingual-uncased': 512,
|
||||
'bert-base-multilingual-cased': 512,
|
||||
'bert-base-chinese': 512,
|
||||
}
|
||||
VOCAB_NAME = 'vocab.txt'
|
||||
|
||||
def cached_path(filename):
|
||||
"""
|
||||
Given something that might be a URL (or might be a local path),
|
||||
determine which. If it's a URL, download the file and cache it, and
|
||||
return the path to the cached file. If it's already a local path,
|
||||
make sure the file exists and then return the path.
|
||||
"""
|
||||
if sys.version_info[0] == 3:
|
||||
filename = str(filename)
|
||||
|
||||
if os.path.exists(filename):
|
||||
# File, and it exists.
|
||||
return filename
|
||||
|
||||
def load_vocab(vocab_file):
|
||||
"""Loads a vocabulary file into a dictionary."""
|
||||
vocab = collections.OrderedDict()
|
||||
index = 0
|
||||
with open(vocab_file, "r", encoding="utf-8") as reader:
|
||||
while True:
|
||||
token = reader.readline()
|
||||
if not token:
|
||||
break
|
||||
token = token.strip()
|
||||
vocab[token] = index
|
||||
index += 1
|
||||
return vocab
|
||||
|
||||
|
||||
def whitespace_tokenize(text):
|
||||
"""Runs basic whitespace cleaning and splitting on a piece of text."""
|
||||
text = text.strip()
|
||||
if not text:
|
||||
return []
|
||||
tokens = text.split()
|
||||
return tokens
|
||||
|
||||
|
||||
class BertTokenizer(object):
|
||||
"""Runs end-to-end tokenization: punctuation splitting + wordpiece"""
|
||||
|
||||
def __init__(self, vocab_file, do_lower_case=True, max_len=None, do_basic_tokenize=True,
|
||||
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
|
||||
"""Constructs a BertTokenizer.
|
||||
|
||||
Args:
|
||||
vocab_file: Path to a one-wordpiece-per-line vocabulary file
|
||||
do_lower_case: Whether to lower case the input
|
||||
Only has an effect when do_wordpiece_only=False
|
||||
do_basic_tokenize: Whether to do basic tokenization before wordpiece.
|
||||
max_len: An artificial maximum length to truncate tokenized sequences to;
|
||||
Effective maximum length is always the minimum of this
|
||||
value (if specified) and the underlying BERT model's
|
||||
sequence length.
|
||||
never_split: List of tokens which will never be split during tokenization.
|
||||
Only has an effect when do_wordpiece_only=False
|
||||
"""
|
||||
if not os.path.isfile(vocab_file):
|
||||
raise ValueError(
|
||||
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
|
||||
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
|
||||
self.vocab = load_vocab(vocab_file)
|
||||
self.ids_to_tokens = collections.OrderedDict(
|
||||
[(ids, tok) for tok, ids in self.vocab.items()])
|
||||
self.do_basic_tokenize = do_basic_tokenize
|
||||
if do_basic_tokenize:
|
||||
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case,
|
||||
never_split=never_split)
|
||||
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
|
||||
self.max_len = max_len if max_len is not None else int(1e12)
|
||||
|
||||
def tokenize(self, text):
|
||||
split_tokens = []
|
||||
if self.do_basic_tokenize:
|
||||
for token in self.basic_tokenizer.tokenize(text):
|
||||
for sub_token in self.wordpiece_tokenizer.tokenize(token):
|
||||
split_tokens.append(sub_token)
|
||||
else:
|
||||
split_tokens = self.wordpiece_tokenizer.tokenize(text)
|
||||
return split_tokens
|
||||
|
||||
def convert_tokens_to_ids(self, tokens):
|
||||
"""Converts a sequence of tokens into ids using the vocab."""
|
||||
ids = []
|
||||
for token in tokens:
|
||||
ids.append(self.vocab.get(token,100))
|
||||
if len(ids) > self.max_len:
|
||||
logger.warning(
|
||||
"Token indices sequence length is longer than the specified maximum "
|
||||
" sequence length for this BERT model ({} > {}). Running this"
|
||||
" sequence through BERT will result in indexing errors".format(len(ids), self.max_len)
|
||||
)
|
||||
return ids
|
||||
|
||||
def convert_ids_to_tokens(self, ids):
|
||||
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
|
||||
tokens = []
|
||||
for i in ids:
|
||||
tokens.append(self.ids_to_tokens[i])
|
||||
return tokens
|
||||
|
||||
def save_vocabulary(self, vocab_path):
|
||||
"""Save the tokenizer vocabulary to a directory or file."""
|
||||
index = 0
|
||||
if os.path.isdir(vocab_path):
|
||||
vocab_file = os.path.join(vocab_path, VOCAB_NAME)
|
||||
with open(vocab_file, "w", encoding="utf-8") as writer:
|
||||
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
|
||||
if index != token_index:
|
||||
logger.warning("Saving vocabulary to {}: vocabulary indices are not consecutive."
|
||||
" Please check that the vocabulary is not corrupted!".format(vocab_file))
|
||||
index = token_index
|
||||
writer.write(token + u'\n')
|
||||
index += 1
|
||||
return vocab_file
|
||||
|
||||
@classmethod
|
||||
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
|
||||
"""
|
||||
Instantiate a PreTrainedBertModel from a pre-trained model file.
|
||||
Download and cache the pre-trained model file if needed.
|
||||
"""
|
||||
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
|
||||
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
|
||||
if '-cased' in pretrained_model_name_or_path and kwargs.get('do_lower_case', True):
|
||||
logger.warning("The pre-trained model you are loading is a cased model but you have not set "
|
||||
"`do_lower_case` to False. We are setting `do_lower_case=False` for you but "
|
||||
"you may want to check this behavior.")
|
||||
kwargs['do_lower_case'] = False
|
||||
elif '-cased' not in pretrained_model_name_or_path and not kwargs.get('do_lower_case', True):
|
||||
logger.warning("The pre-trained model you are loading is an uncased model but you have set "
|
||||
"`do_lower_case` to False. We are setting `do_lower_case=True` for you "
|
||||
"but you may want to check this behavior.")
|
||||
kwargs['do_lower_case'] = True
|
||||
else:
|
||||
vocab_file = pretrained_model_name_or_path
|
||||
if os.path.isdir(vocab_file):
|
||||
vocab_file = os.path.join(vocab_file, VOCAB_NAME)
|
||||
# redirect to the cache, if necessary
|
||||
try:
|
||||
resolved_vocab_file = cached_path(vocab_file)
|
||||
except EnvironmentError:
|
||||
logger.error(
|
||||
"Model name '{}' was not found in model name list ({}). "
|
||||
"We assumed '{}' was a path or url but couldn't find any file "
|
||||
"associated to this path or url.".format(
|
||||
pretrained_model_name_or_path,
|
||||
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
|
||||
vocab_file))
|
||||
return None
|
||||
if resolved_vocab_file == vocab_file:
|
||||
logger.info("loading vocabulary file {}".format(vocab_file))
|
||||
else:
|
||||
logger.info("loading vocabulary file {} from cache at {}".format(
|
||||
vocab_file, resolved_vocab_file))
|
||||
if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP:
|
||||
# if we're using a pretrained model, ensure the tokenizer wont index sequences longer
|
||||
# than the number of positional embeddings
|
||||
max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path]
|
||||
kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
|
||||
# Instantiate tokenizer.
|
||||
tokenizer = cls(resolved_vocab_file, *inputs, **kwargs)
|
||||
return tokenizer
|
||||
|
||||
|
||||
class BasicTokenizer(object):
|
||||
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
|
||||
|
||||
def __init__(self,
|
||||
do_lower_case=True,
|
||||
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
|
||||
"""Constructs a BasicTokenizer.
|
||||
|
||||
Args:
|
||||
do_lower_case: Whether to lower case the input.
|
||||
"""
|
||||
self.do_lower_case = do_lower_case
|
||||
self.never_split = never_split
|
||||
|
||||
def tokenize(self, text):
|
||||
"""Tokenizes a piece of text."""
|
||||
text = self._clean_text(text)
|
||||
# This was added on November 1st, 2018 for the multilingual and Chinese
|
||||
# models. This is also applied to the English models now, but it doesn't
|
||||
# matter since the English models were not trained on any Chinese data
|
||||
# and generally don't have any Chinese data in them (there are Chinese
|
||||
# characters in the vocabulary because Wikipedia does have some Chinese
|
||||
# words in the English Wikipedia.).
|
||||
text = self._tokenize_chinese_chars(text)
|
||||
orig_tokens = whitespace_tokenize(text)
|
||||
split_tokens = []
|
||||
for token in orig_tokens:
|
||||
if self.do_lower_case and token not in self.never_split:
|
||||
token = token.lower()
|
||||
token = self._run_strip_accents(token)
|
||||
split_tokens.extend(self._run_split_on_punc(token))
|
||||
|
||||
output_tokens = whitespace_tokenize(" ".join(split_tokens))
|
||||
return output_tokens
|
||||
|
||||
def _run_strip_accents(self, text):
|
||||
"""Strips accents from a piece of text."""
|
||||
text = unicodedata.normalize("NFD", text)
|
||||
output = []
|
||||
for char in text:
|
||||
cat = unicodedata.category(char)
|
||||
if cat == "Mn":
|
||||
continue
|
||||
output.append(char)
|
||||
return "".join(output)
|
||||
|
||||
def _run_split_on_punc(self, text):
|
||||
"""Splits punctuation on a piece of text."""
|
||||
if text in self.never_split:
|
||||
return [text]
|
||||
chars = list(text)
|
||||
i = 0
|
||||
start_new_word = True
|
||||
output = []
|
||||
while i < len(chars):
|
||||
char = chars[i]
|
||||
if _is_punctuation(char):
|
||||
output.append([char])
|
||||
start_new_word = True
|
||||
else:
|
||||
if start_new_word:
|
||||
output.append([])
|
||||
start_new_word = False
|
||||
output[-1].append(char)
|
||||
i += 1
|
||||
|
||||
return ["".join(x) for x in output]
|
||||
|
||||
def _tokenize_chinese_chars(self, text):
|
||||
"""Adds whitespace around any CJK character."""
|
||||
output = []
|
||||
for char in text:
|
||||
cp = ord(char)
|
||||
if self._is_chinese_char(cp):
|
||||
output.append(" ")
|
||||
output.append(char)
|
||||
output.append(" ")
|
||||
else:
|
||||
output.append(char)
|
||||
return "".join(output)
|
||||
|
||||
def _is_chinese_char(self, cp):
|
||||
"""Checks whether CP is the codepoint of a CJK character."""
|
||||
# This defines a "chinese character" as anything in the CJK Unicode block:
|
||||
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
|
||||
#
|
||||
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
|
||||
# despite its name. The modern Korean Hangul alphabet is a different block,
|
||||
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
|
||||
# space-separated words, so they are not treated specially and handled
|
||||
# like the all of the other languages.
|
||||
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
|
||||
(cp >= 0x3400 and cp <= 0x4DBF) or #
|
||||
(cp >= 0x20000 and cp <= 0x2A6DF) or #
|
||||
(cp >= 0x2A700 and cp <= 0x2B73F) or #
|
||||
(cp >= 0x2B740 and cp <= 0x2B81F) or #
|
||||
(cp >= 0x2B820 and cp <= 0x2CEAF) or
|
||||
(cp >= 0xF900 and cp <= 0xFAFF) or #
|
||||
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def _clean_text(self, text):
|
||||
"""Performs invalid character removal and whitespace cleanup on text."""
|
||||
output = []
|
||||
for char in text:
|
||||
cp = ord(char)
|
||||
if cp == 0 or cp == 0xfffd or _is_control(char):
|
||||
continue
|
||||
if _is_whitespace(char):
|
||||
output.append(" ")
|
||||
else:
|
||||
output.append(char)
|
||||
return "".join(output)
|
||||
|
||||
|
||||
class WordpieceTokenizer(object):
|
||||
"""Runs WordPiece tokenization."""
|
||||
|
||||
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
|
||||
self.vocab = vocab
|
||||
self.unk_token = unk_token
|
||||
self.max_input_chars_per_word = max_input_chars_per_word
|
||||
|
||||
def tokenize(self, text):
|
||||
"""Tokenizes a piece of text into its word pieces.
|
||||
|
||||
This uses a greedy longest-match-first algorithm to perform tokenization
|
||||
using the given vocabulary.
|
||||
|
||||
For example:
|
||||
input = "unaffable"
|
||||
output = ["un", "##aff", "##able"]
|
||||
|
||||
Args:
|
||||
text: A single token or whitespace separated tokens. This should have
|
||||
already been passed through `BasicTokenizer`.
|
||||
|
||||
Returns:
|
||||
A list of wordpiece tokens.
|
||||
"""
|
||||
|
||||
output_tokens = []
|
||||
for token in whitespace_tokenize(text):
|
||||
chars = list(token)
|
||||
if len(chars) > self.max_input_chars_per_word:
|
||||
output_tokens.append(self.unk_token)
|
||||
continue
|
||||
|
||||
is_bad = False
|
||||
start = 0
|
||||
sub_tokens = []
|
||||
while start < len(chars):
|
||||
end = len(chars)
|
||||
cur_substr = None
|
||||
while start < end:
|
||||
substr = "".join(chars[start:end])
|
||||
if start > 0:
|
||||
substr = "##" + substr
|
||||
if substr in self.vocab:
|
||||
cur_substr = substr
|
||||
break
|
||||
end -= 1
|
||||
if cur_substr is None:
|
||||
is_bad = True
|
||||
break
|
||||
sub_tokens.append(cur_substr)
|
||||
start = end
|
||||
|
||||
if is_bad:
|
||||
output_tokens.append(self.unk_token)
|
||||
else:
|
||||
output_tokens.extend(sub_tokens)
|
||||
return output_tokens
|
||||
|
||||
|
||||
def _is_whitespace(char):
|
||||
"""Checks whether `chars` is a whitespace character."""
|
||||
# \t, \n, and \r are technically contorl characters but we treat them
|
||||
# as whitespace since they are generally considered as such.
|
||||
if char == " " or char == "\t" or char == "\n" or char == "\r":
|
||||
return True
|
||||
cat = unicodedata.category(char)
|
||||
if cat == "Zs":
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _is_control(char):
|
||||
"""Checks whether `chars` is a control character."""
|
||||
# These are technically control characters but we count them as whitespace
|
||||
# characters.
|
||||
|
||||
|
||||
if char == "\t" or char == "\n" or char == "\r":
|
||||
return False
|
||||
cat = unicodedata.category(char)
|
||||
if cat.startswith("C"):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _is_punctuation(char):
|
||||
"""Checks whether `chars` is a punctuation character."""
|
||||
cp = ord(char)
|
||||
# We treat all non-letter/number ASCII as punctuation.
|
||||
# Characters such as "^", "$", and "`" are not in the Unicode
|
||||
# Punctuation class but we treat them as punctuation anyways, for
|
||||
# consistency.
|
||||
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
|
||||
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
|
||||
return True
|
||||
cat = unicodedata.category(char)
|
||||
if cat.startswith("P"):
|
||||
return True
|
||||
return False
|
22
layers/encoders/transformers/bert/layernorm.py
Normal file
22
layers/encoders/transformers/bert/layernorm.py
Normal file
@ -0,0 +1,22 @@
|
||||
# _*_ coding:utf-8 _*_
|
||||
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
|
||||
class BertLayerNorm(nn.Module):
|
||||
def __init__(self, hidden_size, eps=1e-12):
|
||||
"""Construct a layernorm module in the TF style (epsilon inside the square root).
|
||||
"""
|
||||
super(BertLayerNorm, self).__init__()
|
||||
self.weight = nn.Parameter(torch.ones(hidden_size))
|
||||
self.bias = nn.Parameter(torch.zeros(hidden_size))
|
||||
self.variance_epsilon = eps
|
||||
|
||||
def forward(self, x):
|
||||
u = x.mean(-1, keepdim=True)
|
||||
s = (x - u).pow(2).mean(-1, keepdim=True)
|
||||
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
|
||||
return self.weight * x + self.bias
|
Loading…
Reference in New Issue
Block a user