Coverage for lintro / tools / implementations / pytest / formatters.py: 65%

137 statements  

« prev     ^ index     » next       coverage.py v7.13.0, created at 2026-04-03 18:53 +0000

1"""Output formatters for pytest results. 

2 

3This module provides functions for formatting pytest output for display. 

4""" 

5 

6from __future__ import annotations 

7 

8import json 

9from typing import Any 

10 

11from loguru import logger 

12 

13from lintro.parsers.pytest.pytest_issue import PytestIssue 

14from lintro.parsers.pytest.pytest_parser import extract_pytest_summary 

15from lintro.tools.implementations.pytest.coverage_processor import ( 

16 parse_coverage_summary, 

17) 

18 

19 

20def process_test_summary( 

21 output: str, 

22 issues: list[PytestIssue], 

23 total_available_tests: int, 

24) -> dict[str, Any]: 

25 """Process test summary and calculate skipped tests. 

26 

27 Args: 

28 output: Raw output from pytest. 

29 issues: Parsed test issues. 

30 total_available_tests: Total number of available tests. 

31 

32 Returns: 

33 dict: Summary data dictionary. 

34 """ 

35 # Extract summary statistics 

36 summary = extract_pytest_summary(output) 

37 

38 # Filter to only failed/error issues for display 

39 failed_issues = [ 

40 issue for issue in issues if issue.test_status in ("FAILED", "ERROR") 

41 ] 

42 

43 # Use actual failed issues count, not summary count 

44 # (in case parsing is inconsistent) 

45 actual_failures = len(failed_issues) 

46 

47 # Calculate actual skipped tests (tests that exist but weren't run) 

48 # This includes deselected tests that pytest doesn't report in summary 

49 # Note: summary.error is already counted in actual_failures, so don't double-count 

50 # Include xfailed and xpassed in collected count as they are tests that ran 

51 collected_tests = ( 

52 summary.passed 

53 + actual_failures 

54 + summary.skipped 

55 + summary.xfailed 

56 + summary.xpassed 

57 ) 

58 actual_skipped = max(0, total_available_tests - collected_tests) 

59 

60 logger.debug(f"Total available tests: {total_available_tests}") 

61 logger.debug(f"Collected tests: {collected_tests}") 

62 logger.debug( 

63 f"Summary: passed={summary.passed}, " 

64 f"failed={actual_failures}, " 

65 f"skipped={summary.skipped}, " 

66 f"error={summary.error}", 

67 ) 

68 logger.debug(f"Actual skipped: {actual_skipped}") 

69 

70 # Use the larger of summary.skipped or actual_skipped 

71 # (summary.skipped is runtime skips, actual_skipped includes deselected) 

72 total_skipped = max(summary.skipped, actual_skipped) 

73 

74 summary_data = { 

75 "passed": summary.passed, 

76 # Use actual parsed failures, not regex summary 

77 "failed": actual_failures, 

78 "skipped": total_skipped, 

79 "error": summary.error, 

80 "duration": summary.duration, 

81 "total": total_available_tests, 

82 } 

83 

84 return summary_data 

85 

86 

87def format_pytest_issue(issue: PytestIssue) -> str: 

88 """Format a single pytest issue in a clean, readable format. 

89 

90 Args: 

91 issue: PytestIssue to format. 

92 

93 Returns: 

94 str: Formatted issue string. 

95 """ 

96 status = issue.test_status.upper() if issue.test_status else "UNKNOWN" 

97 

98 # Choose emoji based on status 

99 if status == "FAILED": 

100 emoji = "X" 

101 elif status == "ERROR": 

102 emoji = "!" 

103 elif status == "SKIPPED": 

104 emoji = "-" 

105 else: 

106 emoji = "?" 

107 

108 # Get test identifier - prefer node_id, fall back to test_name 

109 test_id = issue.node_id or issue.test_name or "unknown test" 

110 

111 # Format the main line 

112 lines = [f"{emoji} {status} {test_id}"] 

113 

114 # Add brief message if available (first meaningful line only) 

115 if issue.message: 

116 # Extract the first meaningful line from the message 

117 msg_lines = issue.message.strip().split("\n") 

118 brief_msg = None 

119 for line in msg_lines: 

120 line = line.strip() 

121 # Skip empty lines and pytest output markers, look for error message 

122 is_valid_line = ( 

123 line and not line.startswith(">") and not line.startswith("E ") 

124 ) 

125 has_error_info = "Error" in line or "assert" in line.lower() or ":" in line 

126 if is_valid_line and has_error_info: 

127 brief_msg = line 

128 break 

129 # If no good line found, try to find an "E " line (pytest error indicator) 

130 if not brief_msg: 

131 for line in msg_lines: 

132 if line.strip().startswith("E "): 

133 brief_msg = line.strip()[2:].strip() # Remove "E " prefix 

134 break 

135 # Truncate if too long 

136 if brief_msg: 

137 if len(brief_msg) > 100: 

138 brief_msg = brief_msg[:97] + "..." 

139 lines.append(f" {brief_msg}") 

140 

141 return "\n".join(lines) 

142 

143 

144def _extract_brief_message(message: str | None) -> str: 

145 """Extract a brief, single-line message from pytest error output. 

146 

147 Args: 

148 message: Full error message from pytest. 

149 

150 Returns: 

151 str: Brief message suitable for table display. 

152 """ 

153 if not message: 

154 return "-" 

155 

156 msg_lines = message.strip().split("\n") 

157 brief_msg = None 

158 

159 # Look for an informative line 

160 for line in msg_lines: 

161 line = line.strip() 

162 # Skip empty lines and pytest output markers 

163 is_valid_line = line and not line.startswith(">") and not line.startswith("E ") 

164 has_error_info = "Error" in line or "assert" in line.lower() or ":" in line 

165 if is_valid_line and has_error_info: 

166 brief_msg = line 

167 break 

168 

169 # If no good line found, try to find an "E " line (pytest error indicator) 

170 if not brief_msg: 

171 for line in msg_lines: 

172 if line.strip().startswith("E "): 

173 brief_msg = line.strip()[2:].strip() 

174 break 

175 

176 # Fall back to first non-empty line 

177 if not brief_msg: 

178 for line in msg_lines: 

179 if line.strip(): 

180 brief_msg = line.strip() 

181 break 

182 

183 if not brief_msg: 

184 return "-" 

185 

186 # Truncate if too long for table display 

187 if len(brief_msg) > 60: 

188 brief_msg = brief_msg[:57] + "..." 

189 

190 return brief_msg 

191 

192 

193def format_pytest_issues_table(issues: list[PytestIssue]) -> str: 

194 """Format pytest issues as a table similar to check command output. 

195 

196 Args: 

197 issues: List of PytestIssue objects to format. 

198 

199 Returns: 

200 str: Formatted table string. 

201 """ 

202 if not issues: 

203 return "" 

204 

205 try: 

206 from tabulate import tabulate 

207 except ImportError: 

208 # Fall back to simple format if tabulate not available 

209 return "\n".join(format_pytest_issue(issue) for issue in issues) 

210 

211 # Build table data 

212 table_data: list[list[str]] = [] 

213 for issue in issues: 

214 status = issue.test_status.upper() if issue.test_status else "UNKNOWN" 

215 

216 # Add status emoji and color 

217 if status == "FAILED": 

218 status_display = "\033[91mX FAILED\033[0m" 

219 elif status == "ERROR": 

220 status_display = "\033[91m! ERROR\033[0m" 

221 elif status == "SKIPPED": 

222 status_display = "\033[93m- SKIPPED\033[0m" 

223 else: 

224 status_display = f"? {status}" 

225 

226 # Get test identifier - prefer node_id, fall back to test_name or file 

227 test_id = issue.node_id or issue.test_name or issue.file or "-" 

228 # Shorten long test IDs for display 

229 if len(test_id) > 70: 

230 test_id = "..." + test_id[-67:] 

231 

232 # Get location info from file:line if available 

233 location = "-" 

234 if issue.file and issue.line: 

235 file_short = issue.file 

236 if len(file_short) > 40: 

237 file_short = "..." + file_short[-37:] 

238 location = f"{file_short}:{issue.line}" 

239 elif issue.file: 

240 location = issue.file if len(issue.file) <= 45 else "..." + issue.file[-42:] 

241 elif issue.line: 

242 location = str(issue.line) 

243 

244 # Get brief message 

245 message = _extract_brief_message(issue.message) 

246 

247 table_data.append([test_id, location, status_display, message]) 

248 

249 # Generate table 

250 table = tabulate( 

251 tabular_data=table_data, 

252 headers=["Test", "Location", "Status", "Message"], 

253 tablefmt="grid", 

254 stralign="left", 

255 disable_numparse=True, 

256 ) 

257 

258 return table 

259 

260 

261def build_output_with_failures( 

262 summary_data: dict[str, Any], 

263 all_issues: list[PytestIssue], 

264 raw_output: str | None = None, 

265) -> str: 

266 """Build output string with summary and test details. 

267 

268 Args: 

269 summary_data: Summary data dictionary. 

270 all_issues: List of all test issues (failures, errors, skips). 

271 raw_output: Optional raw pytest output to extract coverage report from. 

272 

273 Returns: 

274 str: Formatted output string. 

275 """ 

276 # Extract and add coverage summary to summary_data if present 

277 if raw_output: 

278 coverage_summary = parse_coverage_summary(raw_output) 

279 if coverage_summary: 

280 summary_data["coverage"] = coverage_summary 

281 

282 # Build output with summary and test details 

283 output_lines = [json.dumps(summary_data)] 

284 

285 # Note: We no longer include the verbose coverage report in output 

286 # Coverage summary will be displayed by result_formatters.py using summary_data 

287 

288 # Format test failures/errors as a table (similar to chk command) 

289 if all_issues: 

290 # Separate by status for organized output 

291 failed = [i for i in all_issues if i.test_status == "FAILED"] 

292 errors = [i for i in all_issues if i.test_status == "ERROR"] 

293 skipped = [i for i in all_issues if i.test_status == "SKIPPED"] 

294 

295 # Show failures and errors in a table (most important) 

296 if failed or errors: 

297 output_lines.append("") 

298 table = format_pytest_issues_table(failed + errors) 

299 if table: 

300 output_lines.append(table) 

301 

302 # Show skipped tests in a separate table - only if there are few 

303 if skipped and len(skipped) <= 10: 

304 output_lines.append("") 

305 output_lines.append("Skipped Tests") 

306 table = format_pytest_issues_table(skipped) 

307 if table: 

308 output_lines.append(table) 

309 elif skipped: 

310 # Just show count if many skipped 

311 output_lines.append("") 

312 output_lines.append(f"- {len(skipped)} tests skipped") 

313 

314 return "\n".join(output_lines)