Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

MNT: Compat with pytest 8.1 #219

Merged
merged 5 commits into from
Jan 10, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
7 changes: 5 additions & 2 deletions .github/workflows/test_and_publish.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,10 @@ on:
# Allow manual runs through the web UI
workflow_dispatch:

concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true

jobs:
test:
uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@v1
Expand Down Expand Up @@ -42,8 +46,7 @@ jobs:
- linux: py311-test-mpl36
- linux: py311-test-mpl37
# Test different versions of pytest
# Skip pytestdev until hook wrapper issue is fixed
# - linux: py312-test-mpldev-pytestdev
- linux: py312-test-mpldev-pytestdev
- linux: py39-test-mpl33-pytest62
- linux: py38-test-mpl31-pytest54
coverage: 'codecov'
Expand Down
123 changes: 66 additions & 57 deletions pytest_mpl/plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ def wrapper(*args, **kwargs):
item.obj = figure_interceptor(plugin, item.obj)


def pytest_report_header(config, startdir):
pllim marked this conversation as resolved.
Show resolved Hide resolved
def pytest_report_header():
import matplotlib
import matplotlib.ft2font
return ["Matplotlib: {0}".format(matplotlib.__version__),
Expand Down Expand Up @@ -803,67 +803,76 @@ def pytest_runtest_call(self, item): # noqa

# Run test and get figure object
wrap_figure_interceptor(self, item)
yield
if test_name not in self.return_value:
# Test function did not complete successfully
summary['status'] = 'failed'
summary['status_msg'] = ('Test function raised an exception '
'before returning a figure.')
self._test_results[test_name] = summary
return
fig = self.return_value[test_name]

if remove_text:
remove_ticks_and_titles(fig)

result_dir = self.make_test_results_dir(item)

# What we do now depends on whether we are generating the
# reference images or simply running the test.
if self.generate_dir is not None:
summary['status'] = 'skipped'
summary['image_status'] = 'generated'
summary['status_msg'] = 'Skipped test, since generating image.'
generate_image = self.generate_baseline_image(item, fig)
if self.results_always: # Make baseline image available in HTML
result_image = (result_dir / f"baseline.{ext}").absolute()
shutil.copy(generate_image, result_image)
summary['baseline_image'] = \
result_image.relative_to(self.results_dir).as_posix()

if self.generate_hash_library is not None:
summary['hash_status'] = 'generated'
image_hash = self.generate_image_hash(item, fig)
self._generated_hash_library[test_name] = image_hash
summary['baseline_hash'] = image_hash

# Only test figures if not generating images
if self.generate_dir is None:
# Compare to hash library
if self.hash_library or compare.kwargs.get('hash_library', None):
msg = self.compare_image_to_hash_library(item, fig, result_dir, summary=summary)

# Compare against a baseline if specified
else:
msg = self.compare_image_to_baseline(item, fig, result_dir, summary=summary)

close_mpl_figure(fig)

if msg is None:
if not self.results_always:
shutil.rmtree(result_dir)
for image_type in ['baseline_image', 'diff_image', 'result_image']:
summary[image_type] = None # image no longer exists
else:
# See https://github.com/pytest-dev/pytest/issues/11714
result = yield
try:
if test_name not in self.return_value:
# Test function did not complete successfully
summary['status'] = 'failed'
summary['status_msg'] = ('Test function raised an exception '
'before returning a figure.')
self._test_results[test_name] = summary
pytest.fail(msg, pytrace=False)
return
fig = self.return_value[test_name]

if remove_text:
remove_ticks_and_titles(fig)

result_dir = self.make_test_results_dir(item)

# What we do now depends on whether we are generating the
# reference images or simply running the test.
if self.generate_dir is not None:
summary['status'] = 'skipped'
summary['image_status'] = 'generated'
summary['status_msg'] = 'Skipped test, since generating image.'
generate_image = self.generate_baseline_image(item, fig)
if self.results_always: # Make baseline image available in HTML
result_image = (result_dir / f"baseline.{ext}").absolute()
shutil.copy(generate_image, result_image)
summary['baseline_image'] = \
result_image.relative_to(self.results_dir).as_posix()

if self.generate_hash_library is not None:
summary['hash_status'] = 'generated'
image_hash = self.generate_image_hash(item, fig)
self._generated_hash_library[test_name] = image_hash
summary['baseline_hash'] = image_hash

# Only test figures if not generating images
if self.generate_dir is None:
# Compare to hash library
if self.hash_library or compare.kwargs.get('hash_library', None):
msg = self.compare_image_to_hash_library(item, fig, result_dir, summary=summary)

# Compare against a baseline if specified
else:
msg = self.compare_image_to_baseline(item, fig, result_dir, summary=summary)

close_mpl_figure(fig)

if msg is None:
if not self.results_always:
shutil.rmtree(result_dir)
for image_type in ['baseline_image', 'diff_image', 'result_image']:
summary[image_type] = None # image no longer exists
else:
self._test_results[test_name] = summary
pytest.fail(msg, pytrace=False)

close_mpl_figure(fig)
close_mpl_figure(fig)

self._test_results[test_name] = summary
self._test_results[test_name] = summary

if summary['status'] == 'skipped':
pytest.skip(summary['status_msg'])
if summary['status'] == 'skipped':
pytest.skip(summary['status_msg'])
except BaseException as e:
if hasattr(result, "force_exception"): # pluggy>=1.2.0
result.force_exception(e)
else:
result._result = None
result._excinfo = (type(e), e, e.__traceback__)

def generate_summary_json(self):
json_file = self.results_dir / 'results.json'
Expand Down
6 changes: 3 additions & 3 deletions setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -55,11 +55,11 @@ filterwarnings =
ignore:The NumPy module was reloaded

[flake8]
max-line-length = 100
ignore = W504
max-line-length = 120
ignore = W503,W504

[pycodestyle]
max_line_length = 100
max_line_length = 120

[isort]
balanced_wrapping = True
Expand Down