Skip to content

Commit

Permalink
Remove test_all_sets_modified_image_counts.py
Browse files Browse the repository at this point in the history
- Remove `all_sets_modified.cfg` too
Update `build_workflow.yml` to separate testing steps
- Move `complete_run.py` to `tests/integration`
- Move `_compare_images()` to `tests/integration/utils.py`
- Update `testing.rst` with correct path to `complete_run.py` and `pytest`
  • Loading branch information
tomvothecoder committed Dec 13, 2023
1 parent d3e0c9a commit 5eaf7b3
Show file tree
Hide file tree
Showing 10 changed files with 96 additions and 310 deletions.
12 changes: 10 additions & 2 deletions .github/workflows/build_workflow.yml
Original file line number Diff line number Diff line change
Expand Up @@ -99,10 +99,18 @@ jobs:
mamba info
- if: ${{ steps.skip_check.outputs.should_skip != 'true' }}
name: Run Tests
name: Run Unit Tests
run: pytest tests/e3sm_diags

- if: ${{ steps.skip_check.outputs.should_skip != 'true' }}
name: Download Integration Test Data
run: python -m tests.integration.download_data

- if: ${{ steps.skip_check.outputs.should_skip != 'true' }}
name: Run Integration Tests
env:
CHECK_IMAGES: False
run: bash tests/test.sh
run: pytest tests/integration

publish-docs:
if: ${{ github.event_name == 'push' }}
Expand Down
10 changes: 5 additions & 5 deletions docs/source/dev_guide/testing.rst
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ The unit and integration tests are run automatically as part of this.
Complete run test
-----------------

``tests/complete_run.py`` checks the images generated by all diagnostics to
``tests/integration/complete_run.py`` checks the images generated by all diagnostics to
see if any differ from expected.
This test is not run as part of the unit test suite, because it relies on a large
quantity of data found on LCRC (Anvil/Chrysalis).
Expand All @@ -78,7 +78,7 @@ Now that you have your changes on LCRC, enter your development environment. Then
.. code::
pip install . # Install your changes
python -m unittest tests/complete_run.py
pytest tests/integration/complete_run.py
If this test passes, you're done. If it fails however, that means
your code has changed what the output looks like.
Expand Down Expand Up @@ -108,11 +108,11 @@ then you need to update the expected images.
find . -type f -name '*.png' > ../image_list_all_sets.txt
cd ..
Run ``python -m unittest tests/complete_run.py`` again. Now, the test should pass.
Run ``pytest tests/integration/complete_run.py`` again. Now, the test should pass.

After merging your pull request, edit ``README.md``.
The version should be the version of E3SM Diags you ran
``python -m unittest tests/complete_run.py`` with,
the date should be the date you ran ``python -m unittest tests/complete_run.py`` on,
``pytest tests/integration/complete_run.py`` with,
the date should be the date you ran ``pytest tests/integration/complete_run.py`` on,
and the hash should be for the top commit shown by ``git log`` or on
https://github.com/E3SM-Project/e3sm_diags/commits/main.
3 changes: 2 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,8 @@ skip = "e3sm_diags/e3sm_diags_driver.py"
junit_family = "xunit2"
addopts = "--cov=e3sm_diags --cov-report term --cov-report html:tests_coverage_reports/htmlcov --cov-report xml:tests_coverage_reports/coverage.xml -s"
python_files = ["tests.py", "test_*.py"]
# testpaths = "tests/e3sm_diags"
# Only run the unit tests because integration tests take a long time.
testpaths = "tests/e3sm_diags"

[tool.mypy]
# Docs: https://mypy.readthedocs.io/en/stable/config_file.html
Expand Down
10 changes: 0 additions & 10 deletions tests/integration/all_sets.py

This file was deleted.

187 changes: 0 additions & 187 deletions tests/integration/all_sets_modified.cfg

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@

# This test should be run with the latest E3SM Diags tutorial code.
from examples.run_v2_6_0_all_sets_E3SM_machines import run_lcrc
from tests.integration.test_all_sets_image_diffs import _compare_images
from tests.integration.utils import _compare_images


class TestCompleteRun:
Expand Down
4 changes: 2 additions & 2 deletions tests/integration/config.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# Paths and directories used in the integration test. Configurations in
# `all_sets.cfg` and `all_sets_modified.cfg` should match these paths.
# Paths and directories used in the integration test. All `.cfg` paths
# should align with these (e.g., `all_sets.cfg`).
TEST_ROOT_PATH = "tests/integration/"
TEST_DATA_DIR = "integration_test_data"
TEST_IMAGES_DIR = "integration_test_images"
Expand Down
69 changes: 1 addition & 68 deletions tests/integration/test_all_sets_image_diffs.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,14 @@
import os
import re
import shutil
import sys
from typing import List

import pytest
from PIL import Image, ImageChops, ImageDraw

from e3sm_diags.logger import custom_logger
from e3sm_diags.run import runner
from tests.integration.config import TEST_IMAGES_PATH, TEST_ROOT_PATH
from tests.integration.utils import _get_test_params
from tests.integration.utils import _compare_images, _get_test_params

CFG_PATH = os.path.join(TEST_ROOT_PATH, "all_sets.cfg")

Expand Down Expand Up @@ -375,68 +373,3 @@ def _check_streamflow_plots(self):
# Check the full HTML path is the same as the expected.
full_html_path = os.path.join(self.results_dir, html_path)
self._check_html_image(full_html_path, png_path, full_png_path)


def _compare_images(
mismatched_images: List[str],
image_name: str,
path_to_actual_png: str,
path_to_expected_png: str,
) -> List[str]:
# https://stackoverflow.com/questions/35176639/compare-images-python-pil

actual_png = Image.open(path_to_actual_png).convert("RGB")
expected_png = Image.open(path_to_expected_png).convert("RGB")
diff = ImageChops.difference(actual_png, expected_png)

diff_dir = f"{TEST_ROOT_PATH}image_check_failures"
if not os.path.isdir(diff_dir):
os.mkdir(diff_dir)

bbox = diff.getbbox()
# If `diff.getbbox()` is None, then the images are in theory equal
if bbox is None:
pass
else:
# Sometimes, a few pixels will differ, but the two images appear identical.
# https://codereview.stackexchange.com/questions/55902/fastest-way-to-count-non-zero-pixels-using-python-and-pillow
nonzero_pixels = (
diff.crop(bbox)
.point(lambda x: 255 if x else 0)
.convert("L")
.point(bool)
.getdata()
)
num_nonzero_pixels = sum(nonzero_pixels)
logger.info("\npath_to_actual_png={}".format(path_to_actual_png))
logger.info("path_to_expected_png={}".format(path_to_expected_png))
logger.info("diff has {} nonzero pixels.".format(num_nonzero_pixels))
width, height = expected_png.size
num_pixels = width * height
logger.info("total number of pixels={}".format(num_pixels))
fraction = num_nonzero_pixels / num_pixels
logger.info("num_nonzero_pixels/num_pixels fraction={}".format(fraction))

# Fraction of mismatched pixels should be less than 0.02%
if fraction >= 0.0002:
mismatched_images.append(image_name)

simple_image_name = image_name.split("/")[-1].split(".")[0]
shutil.copy(
path_to_actual_png,
os.path.join(diff_dir, "{}_actual.png".format(simple_image_name)),
)
shutil.copy(
path_to_expected_png,
os.path.join(diff_dir, "{}_expected.png".format(simple_image_name)),
)
# https://stackoverflow.com/questions/41405632/draw-a-rectangle-and-a-text-in-it-using-pil
draw = ImageDraw.Draw(diff)
(left, upper, right, lower) = diff.getbbox()
draw.rectangle(((left, upper), (right, lower)), outline="red")
diff.save(
os.path.join(diff_dir, "{}_diff.png".format(simple_image_name)),
"PNG",
)

return mismatched_images
Loading

0 comments on commit 5eaf7b3

Please sign in to comment.