Skip to content

Commit

Permalink
[WIP] Fix attention option when using fast beam search (#1073)
Browse files Browse the repository at this point in the history
Fix attention option when using fast beam search
  • Loading branch information
shhshn authored and vince62s committed Nov 26, 2018
1 parent b085d57 commit 254d3d9
Showing 1 changed file with 4 additions and 3 deletions.
7 changes: 4 additions & 3 deletions onmt/translate/translator.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,8 @@ def translate(self,
all_predictions = []

for batch in data_iter:
batch_data = self.translate_batch(batch, data, fast=self.fast)
batch_data = self.translate_batch(batch, data, attn_debug,
fast=self.fast)
translations = builder.from_batch(batch_data)

for trans in translations:
Expand Down Expand Up @@ -268,7 +269,7 @@ def translate(self,
codecs.open(self.dump_beam, 'w', 'utf-8'))
return all_scores, all_predictions

def translate_batch(self, batch, data, fast=False):
def translate_batch(self, batch, data, attn_debug, fast=False):
"""
Translate a batch of sentences.
Expand All @@ -290,7 +291,7 @@ def translate_batch(self, batch, data, fast=False):
self.max_length,
min_length=self.min_length,
n_best=self.n_best,
return_attention=self.replace_unk)
return_attention=attn_debug or self.replace_unk)
else:
return self._translate_batch(batch, data)

Expand Down

0 comments on commit 254d3d9

Please sign in to comment.