mirror of
https://github.com/awslabs/mlm-scoring.git
synced 2021-10-10 02:35:08 +03:00
Update to transformers~=3.3.1
This commit is contained in:
2
setup.py
2
setup.py
@@ -18,7 +18,7 @@ setup(
|
|||||||
'regex',
|
'regex',
|
||||||
'sacrebleu',
|
'sacrebleu',
|
||||||
'mosestokenizer',
|
'mosestokenizer',
|
||||||
'transformers~=2.9.1'
|
'transformers~=3.3.1'
|
||||||
],
|
],
|
||||||
|
|
||||||
extras_require={
|
extras_require={
|
||||||
|
|||||||
@@ -27,7 +27,10 @@ __all__ = ['BERTClassifier', 'BERTRegression']
|
|||||||
from mxnet.gluon import Block
|
from mxnet.gluon import Block
|
||||||
from mxnet.gluon import nn
|
from mxnet.gluon import nn
|
||||||
# PyTorch-based
|
# PyTorch-based
|
||||||
import transformers
|
from transformers import BertForMaskedLM
|
||||||
|
from transformers.modeling_outputs import MaskedLMOutput
|
||||||
|
from torch.nn import CrossEntropyLoss
|
||||||
|
|
||||||
|
|
||||||
class BERTRegression(Block):
|
class BERTRegression(Block):
|
||||||
"""Model for sentence (pair) regression task with BERT.
|
"""Model for sentence (pair) regression task with BERT.
|
||||||
@@ -135,11 +138,11 @@ class BERTClassifier(Block):
|
|||||||
_, pooler_out = self.bert(inputs, token_types, valid_length)
|
_, pooler_out = self.bert(inputs, token_types, valid_length)
|
||||||
return self.classifier(pooler_out)
|
return self.classifier(pooler_out)
|
||||||
|
|
||||||
# This overrides HuggingFace Transformer's implementation to only return targeted positions, similar to what MXNet does
|
### CURRENT AS OF TRANSFORMERS 3.3.1 ###
|
||||||
|
# These override HuggingFace Transformers' implementations to only computed targeted positions (for speed), similar to MXNet
|
||||||
|
|
||||||
class BertForMaskedLMOptimized(transformers.BertForMaskedLM):
|
class BertForMaskedLMOptimized(BertForMaskedLM):
|
||||||
|
|
||||||
# @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
|
|
||||||
def forward(
|
def forward(
|
||||||
self,
|
self,
|
||||||
input_ids=None,
|
input_ids=None,
|
||||||
@@ -148,49 +151,35 @@ class BertForMaskedLMOptimized(transformers.BertForMaskedLM):
|
|||||||
position_ids=None,
|
position_ids=None,
|
||||||
head_mask=None,
|
head_mask=None,
|
||||||
inputs_embeds=None,
|
inputs_embeds=None,
|
||||||
masked_lm_labels=None,
|
|
||||||
encoder_hidden_states=None,
|
encoder_hidden_states=None,
|
||||||
encoder_attention_mask=None,
|
encoder_attention_mask=None,
|
||||||
lm_labels=None,
|
labels=None,
|
||||||
select_positions=None
|
output_attentions=None,
|
||||||
|
output_hidden_states=None,
|
||||||
|
return_dict=None,
|
||||||
|
# New argument:
|
||||||
|
select_positions=None,
|
||||||
|
**kwargs
|
||||||
):
|
):
|
||||||
r"""
|
r"""
|
||||||
masked_lm_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
|
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
|
||||||
Labels for computing the masked language modeling loss.
|
Labels for computing the masked language modeling loss.
|
||||||
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
|
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
|
||||||
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
|
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
|
||||||
in ``[0, ..., config.vocab_size]``
|
in ``[0, ..., config.vocab_size]``
|
||||||
lm_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
|
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
|
||||||
Labels for computing the left-to-right language modeling loss (next word prediction).
|
Used to hide legacy arguments that have been deprecated.
|
||||||
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
|
|
||||||
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
|
|
||||||
in ``[0, ..., config.vocab_size]``
|
|
||||||
Returns:
|
|
||||||
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
|
|
||||||
masked_lm_loss (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
|
|
||||||
Masked language modeling loss.
|
|
||||||
ltr_lm_loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`lm_labels` is provided):
|
|
||||||
Next token prediction loss.
|
|
||||||
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
|
|
||||||
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
|
|
||||||
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
|
|
||||||
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
|
|
||||||
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
||||||
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
|
||||||
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
|
|
||||||
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
|
|
||||||
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
|
|
||||||
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
||||||
heads.
|
|
||||||
Examples::
|
|
||||||
from transformers import BertTokenizer, BertForMaskedLM
|
|
||||||
import torch
|
|
||||||
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
|
|
||||||
model = BertForMaskedLM.from_pretrained('bert-base-uncased')
|
|
||||||
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
|
|
||||||
outputs = model(input_ids, masked_lm_labels=input_ids)
|
|
||||||
loss, prediction_scores = outputs[:2]
|
|
||||||
"""
|
"""
|
||||||
|
if "masked_lm_labels" in kwargs:
|
||||||
|
warnings.warn(
|
||||||
|
"The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.",
|
||||||
|
FutureWarning,
|
||||||
|
)
|
||||||
|
labels = kwargs.pop("masked_lm_labels")
|
||||||
|
assert "lm_labels" not in kwargs, "Use `BertWithLMHead` for autoregressive language modeling task."
|
||||||
|
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
|
||||||
|
|
||||||
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
||||||
|
|
||||||
outputs = self.bert(
|
outputs = self.bert(
|
||||||
input_ids,
|
input_ids,
|
||||||
@@ -201,18 +190,33 @@ class BertForMaskedLMOptimized(transformers.BertForMaskedLM):
|
|||||||
inputs_embeds=inputs_embeds,
|
inputs_embeds=inputs_embeds,
|
||||||
encoder_hidden_states=encoder_hidden_states,
|
encoder_hidden_states=encoder_hidden_states,
|
||||||
encoder_attention_mask=encoder_attention_mask,
|
encoder_attention_mask=encoder_attention_mask,
|
||||||
|
output_attentions=output_attentions,
|
||||||
|
output_hidden_states=output_hidden_states,
|
||||||
|
return_dict=return_dict,
|
||||||
)
|
)
|
||||||
|
|
||||||
sequence_output = outputs[0]
|
sequence_output = outputs[0]
|
||||||
|
|
||||||
|
### START MODIFICATION
|
||||||
|
# Only apply MLM head to desired positions
|
||||||
if select_positions is not None:
|
if select_positions is not None:
|
||||||
sequence_output = sequence_output[[[i] for i in range(sequence_output.shape[0])],select_positions,:]
|
sequence_output = sequence_output[[[i] for i in range(sequence_output.shape[0])],select_positions,:]
|
||||||
|
### END MODIFICATION
|
||||||
|
|
||||||
prediction_scores = self.cls(sequence_output)
|
prediction_scores = self.cls(sequence_output)
|
||||||
|
|
||||||
outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here
|
masked_lm_loss = None
|
||||||
|
if labels is not None:
|
||||||
|
loss_fct = CrossEntropyLoss() # -100 index = padding token
|
||||||
|
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
|
||||||
|
|
||||||
assert masked_lm_labels is None
|
if not return_dict:
|
||||||
assert lm_labels is None
|
output = (prediction_scores,) + outputs[2:]
|
||||||
|
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
|
||||||
|
|
||||||
return outputs # prediction_scores, (hidden_states), (attentions)
|
return MaskedLMOutput(
|
||||||
|
loss=masked_lm_loss,
|
||||||
|
logits=prediction_scores,
|
||||||
|
hidden_states=outputs.hidden_states,
|
||||||
|
attentions=outputs.attentions,
|
||||||
|
)
|
||||||
|
|||||||
Reference in New Issue
Block a user