Coverage for tests / unit / tools / pytest_tool / test_output_processing.py: 100%
36 statements
« prev ^ index » next coverage.py v7.13.0, created at 2026-04-03 18:53 +0000
« prev ^ index » next coverage.py v7.13.0, created at 2026-04-03 18:53 +0000
1"""Tests for pytest output processing functions."""
3from __future__ import annotations
5from assertpy import assert_that
7from lintro.parsers.pytest.pytest_issue import PytestIssue
8from lintro.tools.implementations.pytest.pytest_output_processor import (
9 build_output_with_failures,
10 detect_and_log_slow_tests,
11 parse_pytest_output_with_fallback,
12 process_test_summary,
13)
15# =============================================================================
16# Tests for process_test_summary function
17# =============================================================================
20def test_process_test_summary_all_passed(
21 mock_test_success_output: str,
22) -> None:
23 """Process summary for all passed tests.
25 Args:
26 mock_test_success_output: Mock pytest output with all tests passing.
27 """
28 summary = process_test_summary(
29 output=mock_test_success_output,
30 issues=[],
31 total_available_tests=10,
32 )
33 assert_that(summary["passed"]).is_equal_to(10)
34 assert_that(summary["failed"]).is_equal_to(0)
37def test_process_test_summary_with_failures(
38 mock_test_failure_output: str,
39 sample_pytest_issues: list[PytestIssue],
40) -> None:
41 """Process summary with failures.
43 Args:
44 mock_test_failure_output: Mock pytest output with test failures.
45 sample_pytest_issues: List of sample pytest issues for testing.
46 """
47 # Filter to only FAILED/ERROR for the summary calculation
48 failed_issues = [
49 i for i in sample_pytest_issues if i.test_status in ("FAILED", "ERROR")
50 ]
51 summary = process_test_summary(
52 output=mock_test_failure_output,
53 issues=failed_issues,
54 total_available_tests=10,
55 )
56 assert_that(summary["failed"]).is_equal_to(2)
59# =============================================================================
60# Tests for detect_and_log_slow_tests function
61# =============================================================================
64def test_detect_slow_tests_finds_slow() -> None:
65 """Detect slow tests when duration exceeds threshold."""
66 issues = [
67 PytestIssue(
68 file="test.py",
69 line=1,
70 test_name="test_slow",
71 message="",
72 test_status="PASSED",
73 duration=5.0,
74 ),
75 ]
76 slow_tests = detect_and_log_slow_tests(issues, {"slow_test_threshold": 1.0})
77 assert_that(slow_tests).is_length(1)
78 assert_that(slow_tests[0][0]).is_equal_to("test_slow")
79 assert_that(slow_tests[0][1]).is_equal_to(5.0)
82def test_detect_slow_tests_none_slow() -> None:
83 """Detect no slow tests when all are fast."""
84 issues = [
85 PytestIssue(
86 file="test.py",
87 line=1,
88 test_name="test_fast",
89 message="",
90 test_status="PASSED",
91 duration=0.1,
92 ),
93 ]
94 slow_tests = detect_and_log_slow_tests(issues, {"slow_test_threshold": 1.0})
95 assert_that(slow_tests).is_empty()
98# =============================================================================
99# Tests for build_output_with_failures function
100# =============================================================================
103def test_build_output_with_failures_includes_summary() -> None:
104 """Build output includes JSON summary."""
105 summary_data = {
106 "passed": 10,
107 "failed": 0,
108 "skipped": 0,
109 "error": 0,
110 "duration": 0.12,
111 "total": 10,
112 }
113 output = build_output_with_failures(summary_data, [])
114 assert_that(output).contains('"passed": 10')
117# =============================================================================
118# Tests for parse_pytest_output_with_fallback function
119# =============================================================================
122def test_fallback_returns_empty_for_empty_output() -> None:
123 """Return empty list for empty output and no junitxml."""
124 issues = parse_pytest_output_with_fallback(
125 output="",
126 return_code=0,
127 options={},
128 )
129 assert_that(issues).is_empty()
132def test_fallback_uses_json_when_option_set(
133 mock_test_json_failure: str,
134) -> None:
135 """Use JSON parsing when json_report option is set.
137 Args:
138 mock_test_json_failure: Mock pytest JSON output with test failures.
139 """
140 issues = parse_pytest_output_with_fallback(
141 output=mock_test_json_failure,
142 return_code=1,
143 options={"json_report": True},
144 )
145 assert_that(issues).is_length(1)
148def test_fallback_to_text_when_format_fails() -> None:
149 """Fall back to text parsing when primary format fails."""
150 # Invalid JSON but valid text output
151 output = "FAILED tests/test.py::test_fail - AssertionError"
152 issues = parse_pytest_output_with_fallback(
153 output=output,
154 return_code=1,
155 options={"json_report": True}, # Try JSON first but will fail
156 )
157 # Should fall back to text and find the failure
158 assert_that(issues).is_length(1)