Coverage for lintro / utils / summary_tables.py: 78%

210 statements  

« prev     ^ index     » next       coverage.py v7.13.0, created at 2026-04-03 18:53 +0000

1"""Summary table generation for Lintro tool output. 

2 

3Handles formatting and display of execution summary tables with tabulate. 

4""" 

5 

6import contextlib 

7import sys 

8from collections.abc import Callable, Sequence 

9from typing import Any 

10 

11from lintro.enums.action import Action 

12from lintro.enums.tool_name import ToolName, normalize_tool_name 

13from lintro.utils.ai_metadata import get_ai_count 

14from lintro.utils.console import ( 

15 RE_CANNOT_AUTOFIX, 

16 RE_REMAINING_OR_CANNOT, 

17 get_summary_value, 

18 get_tool_emoji, 

19) 

20 

21# Constants 

22DEFAULT_REMAINING_COUNT: str = "?" 

23 

24# ANSI color codes — only emit when stdout is a terminal 

25_USE_COLOR = sys.stdout.isatty() 

26_GREEN = "\033[92m" if _USE_COLOR else "" 

27_RED = "\033[91m" if _USE_COLOR else "" 

28_YELLOW = "\033[93m" if _USE_COLOR else "" 

29_RESET = "\033[0m" if _USE_COLOR else "" 

30 

31 

32def _extract_skip_reason(output: str) -> str: 

33 """Extract abbreviated skip reason from tool output. 

34 

35 Skip messages have format: "Skipping {tool}: {error}. Minimum required: ..." 

36 

37 Args: 

38 output: The tool output containing the skip message. 

39 

40 Returns: 

41 Abbreviated reason string for display in the summary table. 

42 """ 

43 if ":" in output and ". Minimum" in output: 

44 colon_idx = output.index(":") 

45 minimum_idx = output.index(". Minimum") 

46 # Ensure colon comes before ". Minimum" to get a valid slice 

47 if colon_idx >= minimum_idx: 

48 return "SKIPPED" 

49 start = colon_idx + 1 

50 end = minimum_idx 

51 reason = output[start:end].strip() 

52 # Abbreviate common error messages 

53 if "Command failed" in reason: 

54 return "Cmd failed" 

55 if "Could not parse version" in reason: 

56 return "No version" 

57 if "below minimum" in reason: 

58 return "Outdated" 

59 if "Failed to run" in reason: 

60 return "Not found" 

61 # Truncate if too long 

62 return reason[:20] if len(reason) > 20 else reason 

63 return "SKIPPED" 

64 

65 

66def _safe_cast( 

67 summary: dict[str, Any], 

68 key: str, 

69 default: int | float, 

70 converter: Callable[[Any], int | float], 

71) -> int | float: 

72 """Safely extract and cast a value from a summary dictionary. 

73 

74 Args: 

75 summary: Dictionary containing summary data. 

76 key: Key to extract from summary. 

77 default: Default value if extraction/conversion fails. 

78 converter: Function to convert the extracted value (e.g., int, float). 

79 

80 Returns: 

81 Converted value or default if extraction/conversion fails. 

82 """ 

83 try: 

84 return converter(get_summary_value(summary, key, default)) 

85 except (ValueError, TypeError): 

86 return default 

87 

88 

89def _format_tool_display_name(tool_name: str) -> str: 

90 """Format tool name for display (convert underscores to hyphens). 

91 

92 Args: 

93 tool_name: Raw tool name (may contain underscores). 

94 

95 Returns: 

96 Display name with hyphens instead of underscores. 

97 """ 

98 return tool_name.replace("_", "-") 

99 

100 

101def _get_ai_applied_count(result: object) -> int: 

102 """Get AI-applied fix count from tool result metadata.""" 

103 return get_ai_count(result, "applied_count") 

104 

105 

106def _get_ai_verified_count(result: object) -> int: 

107 """Get count of AI-applied fixes verified as resolved.""" 

108 return get_ai_count(result, "verified_count") 

109 

110 

111def _get_ai_unverified_count(result: object) -> int: 

112 """Get count of AI-applied fixes that remain unresolved.""" 

113 return get_ai_count(result, "unverified_count") 

114 

115 

116def _is_result_skipped(result: object) -> tuple[bool, str]: 

117 """Check if a tool result represents a skipped tool. 

118 

119 Uses the first-class ``skipped`` field if available, falling back to 

120 legacy output string matching for backward compatibility. 

121 

122 Args: 

123 result: Tool result object. 

124 

125 Returns: 

126 Tuple of (is_skipped, skip_reason). 

127 """ 

128 # First-class field (preferred) 

129 skipped = getattr(result, "skipped", False) 

130 if skipped: 

131 skip_reason = getattr(result, "skip_reason", None) or "" 

132 return True, skip_reason 

133 

134 # Legacy fallback: match "Skipping {tool}: ..." pattern in output 

135 tool_name = getattr(result, "name", "unknown") 

136 result_output = getattr(result, "output", "") or "" 

137 if ( 

138 result_output 

139 and isinstance(result_output, str) 

140 and result_output.lower().startswith(f"skipping {tool_name.lower()}:") 

141 ): 

142 return True, _extract_skip_reason(result_output) 

143 

144 return False, "" 

145 

146 

147def count_affected_files(tool_results: Sequence[object]) -> int: 

148 """Count unique file paths with issues across all tool results. 

149 

150 Args: 

151 tool_results: Sequence of tool results to inspect. 

152 

153 Returns: 

154 Number of unique files that have at least one issue. 

155 """ 

156 files: set[str] = set() 

157 for result in tool_results: 

158 issues = getattr(result, "issues", None) 

159 if issues: 

160 for issue in issues: 

161 file_path = getattr(issue, "file", "") 

162 if file_path: 

163 files.add(str(file_path)) 

164 return len(files) 

165 

166 

167def print_summary_table( 

168 console_output_func: Callable[..., None], 

169 action: Action, 

170 tool_results: Sequence[object], 

171) -> None: 

172 """Print the summary table for the run. 

173 

174 Args: 

175 console_output_func: Function to output text to console 

176 action: The action being performed. 

177 tool_results: Sequence of tool results. 

178 """ 

179 try: 

180 from tabulate import tabulate 

181 

182 # Sort results alphabetically by tool name for consistent output 

183 sorted_results = sorted( 

184 tool_results, 

185 key=lambda r: getattr(r, "name", "unknown").lower(), 

186 ) 

187 

188 summary_data: list[list[str]] = [] 

189 for result in sorted_results: 

190 tool_name: str = getattr(result, "name", "unknown") 

191 issues_count: int = getattr(result, "issues_count", 0) 

192 success: bool = getattr(result, "success", True) 

193 

194 emoji: str = get_tool_emoji(tool_name) 

195 display_name: str = _format_tool_display_name(tool_name) 

196 tool_display: str = f"{emoji} {display_name}" 

197 

198 # Check skip status (first-class field or legacy fallback) 

199 is_skipped, skip_reason = _is_result_skipped(result) 

200 

201 # Special handling for pytest/test action 

202 # Safely check if this is pytest by normalizing the tool name 

203 is_pytest = False 

204 with contextlib.suppress(ValueError): 

205 is_pytest = normalize_tool_name(tool_name) == ToolName.PYTEST 

206 

207 if action == Action.TEST and is_pytest: 

208 if is_skipped: 

209 summary_data.append( 

210 [ 

211 tool_display, 

212 f"{_YELLOW}⏭️ SKIP{_RESET}", 

213 "-", 

214 "-", 

215 "-", 

216 "-", 

217 "-", 

218 f"{_YELLOW}{skip_reason}{_RESET}" if skip_reason else "", 

219 ], 

220 ) 

221 continue 

222 

223 pytest_summary = getattr(result, "pytest_summary", None) 

224 if pytest_summary: 

225 # Use pytest summary data for more detailed display 

226 passed = _safe_cast(pytest_summary, "passed", 0, int) 

227 failed = _safe_cast(pytest_summary, "failed", 0, int) 

228 skipped_count = _safe_cast(pytest_summary, "skipped", 0, int) 

229 duration = _safe_cast(pytest_summary, "duration", 0.0, float) 

230 total = _safe_cast(pytest_summary, "total", 0, int) 

231 

232 # Create detailed status display 

233 status_display = ( 

234 f"{_GREEN}✅ PASS{_RESET}" 

235 if failed == 0 

236 else f"{_RED}❌ FAIL{_RESET}" 

237 ) 

238 

239 # Format duration with proper units 

240 duration_str = f"{duration:.2f}s" 

241 

242 # Create row with separate columns for each metric 

243 summary_data.append( 

244 [ 

245 tool_display, 

246 status_display, 

247 str(passed), 

248 str(failed), 

249 str(skipped_count), 

250 str(total), 

251 duration_str, 

252 "", # Notes 

253 ], 

254 ) 

255 continue 

256 

257 # Handle TEST action for non-pytest tools 

258 if action == Action.TEST: 

259 if is_skipped: 

260 summary_data.append( 

261 [ 

262 tool_display, 

263 f"{_YELLOW}⏭️ SKIP{_RESET}", 

264 "-", 

265 "-", 

266 "-", 

267 "-", 

268 "-", 

269 f"{_YELLOW}{skip_reason}{_RESET}" if skip_reason else "", 

270 ], 

271 ) 

272 continue 

273 

274 # Non-pytest tool in test mode - show basic pass/fail 

275 status_display = ( 

276 f"{_GREEN}✅ PASS{_RESET}" 

277 if (success and issues_count == 0) 

278 else f"{_RED}❌ FAIL{_RESET}" 

279 ) 

280 summary_data.append( 

281 [ 

282 tool_display, 

283 status_display, 

284 "-", 

285 "-", 

286 "-", 

287 "-", 

288 "-", 

289 "", # Notes 

290 ], 

291 ) 

292 continue 

293 

294 # For format operations, success means tool ran 

295 # (regardless of fixes made) 

296 # For check operations, success means no issues found 

297 if action == Action.FIX: 

298 if is_skipped: 

299 summary_data.append( 

300 [ 

301 tool_display, 

302 f"{_YELLOW}⏭️ SKIP{_RESET}", 

303 "-", # Fixed 

304 "-", # AI-Applied 

305 "-", # AI-Resolved 

306 "-", # Remaining 

307 f"{_YELLOW}{skip_reason}{_RESET}" if skip_reason else "", 

308 ], 

309 ) 

310 continue 

311 

312 # Format operations: show fixed count and remaining status 

313 if success: 

314 status_display = f"{_GREEN}✅ PASS{_RESET}" 

315 else: 

316 status_display = f"{_RED}❌ FAIL{_RESET}" 

317 

318 # Get result output for parsing 

319 result_output: str = getattr(result, "output", "") 

320 

321 # Prefer standardized counts from ToolResult 

322 remaining_std = getattr(result, "remaining_issues_count", None) 

323 fixed_std = getattr(result, "fixed_issues_count", None) 

324 

325 if remaining_std is not None: 

326 try: 

327 remaining_count: int | str = int(remaining_std) 

328 except (ValueError, TypeError): 

329 remaining_count = DEFAULT_REMAINING_COUNT 

330 else: 

331 # Parse output to determine remaining issues 

332 remaining_count = 0 

333 if result_output and ( 

334 "remaining" in result_output.lower() 

335 or "cannot be auto-fixed" in result_output.lower() 

336 ): 

337 remaining_match = RE_CANNOT_AUTOFIX.search( 

338 result_output, 

339 ) 

340 if not remaining_match: 

341 remaining_match = RE_REMAINING_OR_CANNOT.search( 

342 result_output.lower(), 

343 ) 

344 if remaining_match: 

345 try: 

346 remaining_count = int(remaining_match.group(1)) 

347 except (ValueError, TypeError): 

348 remaining_count = DEFAULT_REMAINING_COUNT 

349 elif not success: 

350 remaining_count = DEFAULT_REMAINING_COUNT 

351 

352 if fixed_std is not None: 

353 try: 

354 fixed_display_value = int(fixed_std) 

355 except (ValueError, TypeError): 

356 fixed_display_value = 0 

357 else: 

358 try: 

359 fixed_display_value = int(issues_count) 

360 except (ValueError, TypeError): 

361 fixed_display_value = 0 

362 

363 # Fixed issues display 

364 fixed_display: str = f"{_GREEN}{fixed_display_value}{_RESET}" 

365 ai_applied_value = _get_ai_applied_count(result) 

366 ai_applied_display: str = f"{_GREEN}{ai_applied_value}{_RESET}" 

367 ai_verified_value = _get_ai_verified_count(result) 

368 ai_verified_display: str = f"{_GREEN}{ai_verified_value}{_RESET}" 

369 ai_unverified_value = _get_ai_unverified_count(result) 

370 notes_display = ( 

371 f"{_YELLOW}{ai_unverified_value} unresolved{_RESET}" 

372 if ai_unverified_value > 0 

373 else "" 

374 ) 

375 

376 # Remaining issues display 

377 if isinstance(remaining_count, str): 

378 remaining_display: str = f"{_YELLOW}{remaining_count}{_RESET}" 

379 else: 

380 remaining_display = ( 

381 f"{_RED}{remaining_count}{_RESET}" 

382 if remaining_count > 0 

383 else f"{_GREEN}{remaining_count}{_RESET}" 

384 ) 

385 

386 summary_data.append( 

387 [ 

388 tool_display, 

389 status_display, 

390 fixed_display, 

391 ai_applied_display, 

392 ai_verified_display, 

393 remaining_display, 

394 notes_display, 

395 ], 

396 ) 

397 else: # check 

398 if is_skipped: 

399 summary_data.append( 

400 [ 

401 tool_display, 

402 f"{_YELLOW}⏭️ SKIP{_RESET}", 

403 "-", # Issues 

404 f"{_YELLOW}{skip_reason}{_RESET}" if skip_reason else "", 

405 ], 

406 ) 

407 continue 

408 

409 # Check if this is an execution failure (timeout/error) 

410 result_output = getattr(result, "output", "") or "" 

411 

412 has_execution_failure = result_output and ( 

413 "timeout" in result_output.lower() 

414 or "error processing" in result_output.lower() 

415 or "tool execution failed" in result_output.lower() 

416 ) 

417 

418 notes_display = "" 

419 

420 # Check for framework deferral pattern in output 

421 if ( 

422 result_output 

423 and result_output.startswith("SKIPPED:") 

424 and "detected" in result_output 

425 ): 

426 notes_display = f"{_YELLOW}deferred to framework checker{_RESET}" 

427 

428 # Surface stale/expired suppression counts for security tools 

429 ai_meta = getattr(result, "ai_metadata", None) 

430 if not isinstance(ai_meta, dict): 

431 ai_meta = {} 

432 suppressions = ai_meta.get("suppressions", []) 

433 if suppressions: 

434 stale = sum( 

435 1 

436 for s in suppressions 

437 if isinstance(s, dict) and s.get("status") == "stale" 

438 ) 

439 expired = sum( 

440 1 

441 for s in suppressions 

442 if isinstance(s, dict) and s.get("status") == "expired" 

443 ) 

444 parts: list[str] = [] 

445 if expired: 

446 parts.append(f"{expired} expired") 

447 if stale: 

448 parts.append(f"{stale} stale") 

449 if parts: 

450 notes_display = ( 

451 f"{_YELLOW}{', '.join(parts)} suppression(s){_RESET}" 

452 ) 

453 

454 if (has_execution_failure and issues_count == 0) or ( 

455 not success and issues_count == 0 

456 ): 

457 status_display = f"{_RED}❌ FAIL{_RESET}" 

458 issues_display = f"{_RED}ERROR{_RESET}" 

459 else: 

460 status_display = ( 

461 f"{_GREEN}✅ PASS{_RESET}" 

462 if (success and issues_count == 0) 

463 else f"{_RED}❌ FAIL{_RESET}" 

464 ) 

465 issues_display = ( 

466 f"{_GREEN}{issues_count}{_RESET}" 

467 if issues_count == 0 

468 else f"{_RED}{issues_count}{_RESET}" 

469 ) 

470 

471 summary_data.append( 

472 [ 

473 tool_display, 

474 status_display, 

475 issues_display, 

476 notes_display, 

477 ], 

478 ) 

479 

480 # Set headers based on action 

481 # Use plain headers to avoid ANSI/emojis width misalignment 

482 headers: list[str] 

483 if action == Action.TEST: 

484 headers = [ 

485 "Tool", 

486 "Status", 

487 "Passed", 

488 "Failed", 

489 "Skipped", 

490 "Total", 

491 "Duration", 

492 "Notes", 

493 ] 

494 elif action == Action.FIX: 

495 headers = [ 

496 "Tool", 

497 "Status", 

498 "Fixed", 

499 "AI-Applied", 

500 "AI-Resolved", 

501 "Remaining", 

502 "Notes", 

503 ] 

504 else: 

505 headers = ["Tool", "Status", "Issues", "Notes"] 

506 

507 # Render with plain values to ensure proper alignment across terminals 

508 table: str = tabulate( 

509 tabular_data=summary_data, 

510 headers=headers, 

511 tablefmt="grid", 

512 stralign="left", 

513 disable_numparse=True, 

514 ) 

515 console_output_func(text=table) 

516 console_output_func(text="") 

517 

518 except ImportError: 

519 # Fallback if tabulate not available 

520 console_output_func(text="Summary table requires tabulate package") 

521 

522 

523def print_totals_table( 

524 console_output_func: Callable[..., None], 

525 action: Action, 

526 total_issues: int = 0, 

527 total_fixed: int = 0, 

528 total_remaining: int = 0, 

529 affected_files: int = 0, 

530 severity_errors: int = 0, 

531 severity_warnings: int = 0, 

532 severity_info: int = 0, 

533 total_ai_applied: int = 0, 

534 total_ai_verified: int = 0, 

535) -> None: 

536 """Print a totals summary table for the run. 

537 

538 Args: 

539 console_output_func: Function to output text to console. 

540 action: The action being performed. 

541 total_issues: Total number of issues found (CHECK/TEST mode). 

542 total_fixed: Total number of native-tool issues fixed (FIX mode). 

543 total_remaining: Total number of remaining issues (FIX mode). 

544 affected_files: Number of unique files with issues. 

545 severity_errors: Number of issues at ERROR severity. 

546 severity_warnings: Number of issues at WARNING severity. 

547 severity_info: Number of issues at INFO severity. 

548 total_ai_applied: Total number of AI-applied fixes (FIX mode). 

549 total_ai_verified: Total number of AI-verified fixes (FIX mode). 

550 """ 

551 try: 

552 import click 

553 from tabulate import tabulate 

554 

555 header: str = click.style("\U0001f4ca TOTALS", fg="cyan", bold=True) 

556 console_output_func(text=header) 

557 

558 if action == Action.FIX: 

559 total_resolved = total_fixed + total_ai_verified 

560 rows: list[list[str | int]] = [ 

561 ["Fixed Issues (Native)", total_fixed], 

562 ["AI Applied Fixes", total_ai_applied], 

563 ["AI Resolved Fixes", total_ai_verified], 

564 ["Total Resolved", total_resolved], 

565 ["Remaining Issues", total_remaining], 

566 ["Affected Files", affected_files], 

567 ] 

568 else: 

569 rows = [ 

570 ["Total Issues", total_issues], 

571 ] 

572 if total_issues > 0: 

573 rows.append([" Errors", severity_errors]) 

574 rows.append([" Warnings", severity_warnings]) 

575 rows.append([" Info", severity_info]) 

576 rows.append(["Affected Files", affected_files]) 

577 

578 headers: list[str] = ["Metric", "Count"] 

579 table: str = tabulate( 

580 tabular_data=rows, 

581 headers=headers, 

582 tablefmt="grid", 

583 stralign="left", 

584 disable_numparse=True, 

585 ) 

586 console_output_func(text=table) 

587 console_output_func(text="") 

588 

589 except ImportError: 

590 # Fallback if tabulate not available 

591 console_output_func(text="Totals table requires tabulate package")