Skip to content

skipping: fix dynamic xfail mark added in runtest not respected #7490

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jul 15, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 11 additions & 10 deletions src/_pytest/skipping.py
Original file line number Diff line number Diff line change
Expand Up @@ -231,17 +231,14 @@ def evaluate_xfail_marks(item: Item) -> Optional[Xfail]:

@hookimpl(tryfirst=True)
def pytest_runtest_setup(item: Item) -> None:
item._store[skipped_by_mark_key] = False

skipped = evaluate_skip_marks(item)
item._store[skipped_by_mark_key] = skipped is not None
if skipped:
item._store[skipped_by_mark_key] = True
skip(skipped.reason)

if not item.config.option.runxfail:
item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item)
if xfailed and not xfailed.run:
xfail("[NOTRUN] " + xfailed.reason)
item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item)
if xfailed and not item.config.option.runxfail and not xfailed.run:
xfail("[NOTRUN] " + xfailed.reason)


@hookimpl(hookwrapper=True)
Expand All @@ -250,12 +247,16 @@ def pytest_runtest_call(item: Item) -> Generator[None, None, None]:
if xfailed is None:
item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item)

if not item.config.option.runxfail:
if xfailed and not xfailed.run:
xfail("[NOTRUN] " + xfailed.reason)
if xfailed and not item.config.option.runxfail and not xfailed.run:
xfail("[NOTRUN] " + xfailed.reason)

yield

# The test run may have added an xfail mark dynamically.
xfailed = item._store.get(xfailed_key, None)
if xfailed is None:
item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item)


@hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item: Item, call: CallInfo[None]):
Expand Down
28 changes: 28 additions & 0 deletions testing/test_skipping.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import sys

import pytest
from _pytest.pytester import Testdir
from _pytest.runner import runtestprotocol
from _pytest.skipping import evaluate_skip_marks
from _pytest.skipping import evaluate_xfail_marks
Expand Down Expand Up @@ -425,6 +426,33 @@ def test_this2(arg):
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["*1 xfailed*"])

def test_dynamic_xfail_set_during_runtest_failed(self, testdir: Testdir) -> None:
# Issue #7486.
p = testdir.makepyfile(
"""
import pytest
def test_this(request):
request.node.add_marker(pytest.mark.xfail(reason="xfail"))
assert 0
"""
)
result = testdir.runpytest(p)
result.assert_outcomes(xfailed=1)

def test_dynamic_xfail_set_during_runtest_passed_strict(
self, testdir: Testdir
) -> None:
# Issue #7486.
p = testdir.makepyfile(
"""
import pytest
def test_this(request):
request.node.add_marker(pytest.mark.xfail(reason="xfail", strict=True))
"""
)
result = testdir.runpytest(p)
result.assert_outcomes(failed=1)

@pytest.mark.parametrize(
"expected, actual, matchline",
[
Expand Down