Skip to content

Commit acb6fba

Browse files
Myle Ottfacebook-github-bot
authored andcommitted
Fix torch.hub to not depend on libnat
Summary: Pull Request resolved: fairinternal/fairseq-py#878 Differential Revision: D17661768 Pulled By: myleott fbshipit-source-id: 1e4c5f09eb14c40d491ca2459fd2adb8382fb6d2
1 parent 1351972 commit acb6fba

File tree

2 files changed

+30
-2
lines changed

2 files changed

+30
-2
lines changed

fairseq/models/insertion_transformer.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
import numpy as np
77
import torch
88
import torch.nn.functional as F
9-
from fairseq import libnat
9+
1010
from fairseq.models import register_model, register_model_architecture
1111
from fairseq.models.levenshtein_transformer import (
1212
LevenshteinTransformerDecoder,
@@ -51,6 +51,13 @@ def compute_score_full(self, L, tau):
5151

5252

5353
def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx, vocab_size, tau=None):
54+
try:
55+
from fairseq import libnat
56+
except ImportError as e:
57+
import sys
58+
sys.stderr.write('ERROR: missing libnat. run `pip install --editable .`\n')
59+
raise e
60+
5461
B = in_tokens.size(0)
5562
T = in_tokens.size(1)
5663
V = vocab_size

fairseq/models/levenshtein_transformer.py

Lines changed: 22 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
import torch
77
import torch.nn.functional as F
8-
from fairseq import libnat
8+
99
from fairseq.models import register_model, register_model_architecture
1010
from fairseq.models.model_utils import fill_tensors as _fill, skip_tensors as _skip
1111
from fairseq.models.transformer import (
@@ -18,6 +18,13 @@
1818

1919

2020
def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx):
21+
try:
22+
from fairseq import libnat
23+
except ImportError as e:
24+
import sys
25+
sys.stderr.write('ERROR: missing libnat. run `pip install --editable .`\n')
26+
raise e
27+
2128
in_seq_len, out_seq_len = in_tokens.size(1), out_tokens.size(1)
2229

2330
with torch.cuda.device_of(in_tokens):
@@ -60,6 +67,13 @@ def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx):
6067

6168

6269
def _get_del_targets(in_tokens, out_tokens, padding_idx):
70+
try:
71+
from fairseq import libnat
72+
except ImportError as e:
73+
import sys
74+
sys.stderr.write('ERROR: missing libnat. run `pip install --editable .`\n')
75+
raise e
76+
6377
out_seq_len = out_tokens.size(1)
6478

6579
with torch.cuda.device_of(in_tokens):
@@ -86,6 +100,13 @@ def _get_del_targets(in_tokens, out_tokens, padding_idx):
86100

87101

88102
def _get_del_ins_targets(in_tokens, out_tokens, padding_idx):
103+
try:
104+
from fairseq import libnat
105+
except ImportError as e:
106+
import sys
107+
sys.stderr.write('ERROR: missing libnat. run `pip install --editable .`\n')
108+
raise e
109+
89110
in_seq_len, out_seq_len = in_tokens.size(1), out_tokens.size(1)
90111

91112
with torch.cuda.device_of(in_tokens):

0 commit comments

Comments
 (0)