Skip to content

Commit

Permalink
v0.7.1: fix PyPI setup and tests
Browse files Browse the repository at this point in the history
Summary: Pull Request resolved: #818

Differential Revision: D15916265

Pulled By: myleott

fbshipit-source-id: c66c0bd988d3472c4150226952f34ee8d4c3db86
  • Loading branch information
myleott authored and facebook-github-bot committed Jun 20, 2019
1 parent 9462a81 commit 881381c
Show file tree
Hide file tree
Showing 6 changed files with 34 additions and 24 deletions.
4 changes: 2 additions & 2 deletions docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,9 +60,9 @@
# built documents.
#
# The short X.Y version.
version = '0.7.0'
version = '0.7.1'
# The full version, including alpha/beta/rc tags.
release = '0.7.0'
release = '0.7.1'

# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
Expand Down
2 changes: 1 addition & 1 deletion fairseq/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
# can be found in the PATENTS file in the same directory.

__all__ = ['pdb']
__version__ = '0.7.0'
__version__ = '0.7.1'

import fairseq.criterions
import fairseq.models
Expand Down
Empty file added fairseq/data/audio/__init__.py
Empty file.
15 changes: 9 additions & 6 deletions fairseq/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,11 +233,11 @@ def train_step(self, samples, dummy_batch=False, raise_oom=False):
# forward and backward pass
logging_outputs, sample_sizes, ooms = [], [], 0
for i, sample in enumerate(samples):
sample = self._prepare_sample(sample, self.args.fp16)
sample = self._prepare_sample(sample)
if sample is None:
# when sample is None, run forward/backward on a dummy batch
# and ignore the resulting gradients
sample = self._prepare_sample(self._dummy_batch, self.args.fp16)
sample = self._prepare_sample(self._dummy_batch)
ignore_grad = True
else:
ignore_grad = False
Expand Down Expand Up @@ -381,9 +381,9 @@ def valid_step(self, sample, raise_oom=False):
self.model.eval()
self.criterion.eval()

sample = self._prepare_sample(sample, self.args.fp16)
sample = self._prepare_sample(sample)
if sample is None:
sample = self._prepare_sample(self._dummy_batch, self.args.fp16)
sample = self._prepare_sample(self._dummy_batch)
ignore_results = True
else:
ignore_results = False
Expand Down Expand Up @@ -488,7 +488,7 @@ def set_num_updates(self, num_updates):
self._num_updates = num_updates
self.lr_step_update()

def _prepare_sample(self, sample, fp16):
def _prepare_sample(self, sample):
if sample is None or len(sample) == 0:
return None

Expand All @@ -500,7 +500,10 @@ def apply_half(t):
return t.half()
return t

return utils.apply(apply_half, sample) if fp16 else sample
if self.args.fp16:
sample = utils.apply_to_sample(apply_half, sample)

return sample

def _set_seed(self):
# Set seed based on args.seed and the update number so that we get
Expand Down
34 changes: 20 additions & 14 deletions fairseq/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,26 +31,32 @@ def load_ensemble_for_inference(filenames, task, model_arg_overrides=None):
)


def apply(f, sample):
def apply_to_sample(f, sample):
if len(sample) == 0:
return {}
if torch.is_tensor(sample):
return f(sample)
elif isinstance(sample, dict):
return {
key: apply(f, value)
for key, value in sample.items()
}
elif isinstance(sample, list):
return [apply(f, x) for x in sample]
else:
return sample

def _apply(x):
if torch.is_tensor(x):
return f(x)
elif isinstance(x, dict):
return {
key: _apply(value)
for key, value in x.items()
}
elif isinstance(x, list):
return [_apply(x) for x in x]
else:
return x

return _apply(sample)


def move_to_cuda(sample):

def _move_to_cuda(tensor):
return tensor.cuda()
return apply(_move_to_cuda, sample)
return tensor.cuda()

return apply_to_sample(_move_to_cuda, sample)


INCREMENTAL_STATE_INSTANCE_ID = defaultdict(lambda: 0)
Expand Down
3 changes: 2 additions & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@

setup(
name='fairseq',
version='0.7.0',
version='0.7.1',
description='Facebook AI Research Sequence-to-Sequence Toolkit',
url='https://github.com/pytorch/fairseq',
classifiers=[
Expand All @@ -40,6 +40,7 @@
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
long_description=readme,
long_description_content_type='text/markdown',
install_requires=[
'cffi',
'numpy',
Expand Down

0 comments on commit 881381c

Please sign in to comment.