Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Allow custom metadata for replay file #82

Merged
4 changes: 4 additions & 0 deletions CHANGELOG.rst
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,10 @@ UNRELEASED
* Add support for Python 3.13.
* Dropped support for EOL Python 3.8.
* Change build to use ``pyproject.toml``.
* Allow cutomization of metadata in replay file (`#78`_).

.. _`#78`: https://github.com/ESSS/pytest-replay/issues/78


1.5.3
=====
Expand Down
34 changes: 34 additions & 0 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,40 @@ execute the tests in the same order with::

Hopefully this will make it easier to reproduce the problem and fix it.

Additional metadata
-------------------

*Version added: 1.6*

In cases where it is necessary to add new metadata to the replay file to make the test reproducible, `pytest-replay`
provides a fixture called ``replay_metadata`` that allows new information to be added using the ``metadata``
attribute.

Example:

.. code-block:: python

import pytest
import numpy as np
import random

@pytest.fixture
def rng(replay_metadata):
seed = replay_metadata.metadata.setdefault("seed", random.randint(0, 100))
return np.random.default_rng(seed=seed)

def test_random(rng):
data = rng.standard_normal((100, 100))
assert data.shape == (100, 100)


When using it with pytest-replay it generates a replay file similar to

.. code-block:: json

{"nodeid": "test_bar.py::test_random", "start": 0.000}
{"nodeid": "test_bar.py::test_random", "start": 0.000, "finish": 1.5, "outcome": "passed", "metadata": {"seed": 12}}


FAQ
~~~
Expand Down
58 changes: 42 additions & 16 deletions src/pytest_replay/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,12 @@
import collections
import dataclasses
import json
import os
import time
from dataclasses import asdict
from glob import glob
from typing import Any
from typing import Optional

import pytest

Expand Down Expand Up @@ -39,6 +44,24 @@ def pytest_addoption(parser):
)


@dataclasses.dataclass
class ReplayTestInfo:
nodeid: str
start: float = 0.0
finish: Optional[float] = None
outcome: Optional[str] = None
metadata: dict[str, Any] = dataclasses.field(default_factory=dict)

def to_clean_dict(self) -> dict[str, Any]:
return {k: v for k, v in asdict(self).items() if v}


class _ReplayTestInfoDefaultDict(collections.defaultdict):
def __missing__(self, key):
self[key] = ReplayTestInfo(nodeid=key)
return self[key]


class ReplayPlugin:
def __init__(self, config):
self.dir = config.getoption("replay_record_dir")
Expand All @@ -53,10 +76,13 @@ def __init__(self, config):
skip_cleanup = config.getoption("skip_cleanup", False)
if not skip_cleanup:
self.cleanup_scripts()
self.node_start_time = {}
self.node_outcome = {}
self.nodes = _ReplayTestInfoDefaultDict()
self.session_start_time = config.replay_start_time

@pytest.fixture(scope="function")
def replay_metadata(self, request):
return self.nodes[request.node.nodeid]

def cleanup_scripts(self):
if self.xdist_worker_name:
# only cleanup scripts on the master node
Expand All @@ -79,31 +105,28 @@ def pytest_runtest_logstart(self, nodeid):
# only workers report running tests when running in xdist
return
if self.dir:
self.node_start_time[nodeid] = time.perf_counter() - self.session_start_time
json_content = json.dumps(
{"nodeid": nodeid, "start": self.node_start_time[nodeid]}
)
self.nodes[nodeid].start = time.perf_counter() - self.session_start_time
json_content = json.dumps(self.nodes[nodeid].to_clean_dict())
self.append_test_to_script(nodeid, json_content)

@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_makereport(self, item):
report = yield
result = report.get_result()
if self.dir:
current = self.node_outcome.setdefault(item.nodeid, result.outcome)
self.nodes[item.nodeid].outcome = (
self.nodes[item.nodeid].outcome or result.outcome
)
current = self.nodes[item.nodeid].outcome
if not result.passed and current != "failed":
# do not overwrite a failed outcome with a skipped one
self.node_outcome[item.nodeid] = result.outcome
self.nodes[item.nodeid].outcome = result.outcome

if result.when == "teardown":
json_content = json.dumps(
{
"nodeid": item.nodeid,
"start": self.node_start_time[item.nodeid],
"finish": time.perf_counter() - self.session_start_time,
"outcome": self.node_outcome.pop(item.nodeid),
}
self.nodes[item.nodeid].finish = (
time.perf_counter() - self.session_start_time
)
json_content = json.dumps(self.nodes[item.nodeid].to_clean_dict())
self.append_test_to_script(item.nodeid, json_content)

def pytest_collection_modifyitems(self, items, config):
Expand All @@ -119,7 +142,10 @@ def pytest_collection_modifyitems(self, items, config):
stripped = line.strip()
# Ignore blank linkes and comments. (#70)
if stripped and not stripped.startswith(("#", "//")):
nodeid = json.loads(stripped)["nodeid"]
node_info = json.loads(stripped)
nodeid = node_info["nodeid"]
if "finish" in node_info:
self.nodes[nodeid] = ReplayTestInfo(**node_info)
nodeids[nodeid] = None

items_dict = {item.nodeid: item for item in items}
Expand Down
36 changes: 36 additions & 0 deletions tests/test_replay.py
Original file line number Diff line number Diff line change
Expand Up @@ -274,6 +274,42 @@ def test_filter_out_tests_not_in_file(testdir):
)


def test_metadata(pytester, tmp_path):
pytester.makepyfile(
"""
import pytest

@pytest.fixture
def seed(replay_metadata):
assert replay_metadata.metadata == {}
replay_metadata.metadata["seed"] = seed = 1234
return seed

def test_foo(seed):
assert seed == 1234
"""
)
dir = tmp_path / "replay"
result = pytester.runpytest(f"--replay-record-dir={dir}")
assert result.ret == 0

# Rewrite the fixture to always returns the metadata, as written previously.
pytester.makepyfile(
"""
import pytest

@pytest.fixture
def seed(replay_metadata):
return replay_metadata.metadata["seed"]

def test_foo(seed):
assert seed == 1234
"""
)
result = pytester.runpytest(f"--replay={dir / '.pytest-replay.txt'}")
assert result.ret == 0


def test_replay_file_outcome_is_correct(testdir):
"""Tests that the outcomes in the replay file are correct."""
testdir.makepyfile(
Expand Down