[go: nahoru, domu]

Skip to content

Commit

Permalink
Fix input embeddings
Browse files Browse the repository at this point in the history
  • Loading branch information
LysandreJik committed Nov 26, 2019
1 parent bdfe21a commit f2f3294
Show file tree
Hide file tree
Showing 3 changed files with 8 additions and 3 deletions.
2 changes: 2 additions & 0 deletions transformers/tests/modeling_albert_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ def __init__(self,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
embedding_size=16,
hidden_size=36,
num_hidden_layers=6,
num_hidden_groups=6,
Expand All @@ -73,6 +74,7 @@ def __init__(self,
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
Expand Down
2 changes: 2 additions & 0 deletions transformers/tests/modeling_tf_albert_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ def __init__(self,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
embedding_size=16,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
Expand All @@ -77,6 +78,7 @@ def __init__(self,
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
Expand Down
7 changes: 4 additions & 3 deletions transformers/tests/modeling_tf_common_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -426,9 +426,10 @@ def test_inputs_embeds(self):
try:
x = wte([input_ids], mode="embedding")
except:
x = tf.ones(input_ids.shape + [self.model_tester.hidden_size], dtype=tf.dtypes.float32)
# ^^ In our TF models, the input_embeddings can take slightly different forms,
# so we try two of them and fall back to just synthetically creating a dummy tensor of ones.
if hasattr(self.model_tester, "embedding_size"):
x = tf.ones(input_ids.shape + [model.config.embedding_size], dtype=tf.dtypes.float32)
else:
x = tf.ones(input_ids.shape + [self.model_tester.hidden_size], dtype=tf.dtypes.float32)
inputs_dict["inputs_embeds"] = x
outputs = model(inputs_dict)

Expand Down

0 comments on commit f2f3294

Please sign in to comment.