Coverage for lintro / tools / definitions / pytest.py: 69%

125 statements  

« prev     ^ index     » next       coverage.py v7.13.0, created at 2026-04-03 18:53 +0000

1"""Pytest tool definition. 

2 

3Pytest is a mature full-featured Python testing tool that helps you write 

4better programs. It supports various testing patterns, fixtures, parametrization, 

5and provides extensive plugin support for customization. 

6""" 

7 

8from __future__ import annotations 

9 

10import subprocess # nosec B404 - used safely with shell disabled 

11from dataclasses import dataclass, field 

12from typing import Any 

13 

14from loguru import logger 

15 

16from lintro._tool_versions import get_min_version 

17from lintro.enums.tool_name import ToolName 

18from lintro.enums.tool_type import ToolType 

19from lintro.models.core.tool_result import ToolResult 

20from lintro.plugins.base import BaseToolPlugin 

21from lintro.plugins.protocol import ToolDefinition 

22from lintro.plugins.registry import register_tool 

23from lintro.tools.implementations.pytest.output import ( 

24 load_file_patterns_from_config, 

25 load_pytest_config, 

26) 

27from lintro.tools.implementations.pytest.pytest_command_builder import ( 

28 build_check_command, 

29) 

30from lintro.tools.implementations.pytest.pytest_config import PytestConfiguration 

31from lintro.tools.implementations.pytest.pytest_error_handler import PytestErrorHandler 

32from lintro.tools.implementations.pytest.pytest_executor import PytestExecutor 

33from lintro.tools.implementations.pytest.pytest_output_processor import ( 

34 parse_pytest_output_with_fallback, 

35) 

36from lintro.tools.implementations.pytest.pytest_result_processor import ( 

37 PytestResultProcessor, 

38) 

39from lintro.utils.path_utils import load_lintro_ignore 

40 

41# Constants for pytest configuration 

42PYTEST_DEFAULT_TIMEOUT: int = 300 # 5 minutes for test runs 

43PYTEST_DEFAULT_PRIORITY: int = 90 

44PYTEST_FILE_PATTERNS: list[str] = ["test_*.py", "*_test.py"] 

45 

46 

47@register_tool 

48@dataclass 

49class PytestPlugin(BaseToolPlugin): 

50 """Pytest test runner plugin. 

51 

52 This plugin integrates Pytest with Lintro for running Python tests 

53 and collecting test results. 

54 

55 Attributes: 

56 pytest_config: Pytest-specific configuration. 

57 executor: Test execution handler. 

58 result_processor: Result processing handler. 

59 error_handler: Error handling handler. 

60 """ 

61 

62 # Pytest-specific components 

63 pytest_config: PytestConfiguration = field(default_factory=PytestConfiguration) 

64 executor: PytestExecutor | None = field(default=None, init=False) 

65 result_processor: PytestResultProcessor | None = field(default=None, init=False) 

66 error_handler: PytestErrorHandler | None = field(default=None, init=False) 

67 

68 # Internal storage for file patterns from config 

69 _file_patterns_from_config: list[str] = field(default_factory=list, init=False) 

70 

71 def __post_init__(self) -> None: 

72 """Initialize plugin with pytest-specific components.""" 

73 super().__post_init__() 

74 

75 # Load lintro ignore patterns 

76 ignore_patterns = load_lintro_ignore() 

77 for pattern in ignore_patterns: 

78 if pattern not in self.exclude_patterns: 

79 self.exclude_patterns.append(pattern) 

80 

81 # Load pytest configuration and file patterns 

82 pytest_config = load_pytest_config() 

83 config_file_patterns = load_file_patterns_from_config(pytest_config) 

84 if config_file_patterns: 

85 self._file_patterns_from_config = config_file_patterns 

86 

87 # Apply any additional config options from pytest_config 

88 if pytest_config and "options" in pytest_config: 

89 self.options.update(pytest_config.get("options", {})) 

90 

91 # Initialize the components with tool reference 

92 self.executor = PytestExecutor( 

93 config=self.pytest_config, 

94 tool=self, 

95 ) 

96 self.result_processor = PytestResultProcessor( 

97 self.pytest_config, 

98 self.definition.name, 

99 ) 

100 self.error_handler = PytestErrorHandler(self.definition.name) 

101 

102 @property 

103 def definition(self) -> ToolDefinition: 

104 """Return the tool definition. 

105 

106 Returns: 

107 ToolDefinition containing tool metadata. 

108 """ 

109 return ToolDefinition( 

110 name="pytest", 

111 description="Mature full-featured Python testing tool", 

112 can_fix=False, 

113 tool_type=ToolType.TEST_RUNNER, 

114 file_patterns=PYTEST_FILE_PATTERNS, 

115 priority=PYTEST_DEFAULT_PRIORITY, 

116 conflicts_with=[], 

117 native_configs=[ 

118 "pytest.ini", 

119 "pyproject.toml", 

120 "setup.cfg", 

121 "tox.ini", 

122 "conftest.py", 

123 ], 

124 version_command=["pytest", "--version"], 

125 min_version=get_min_version(ToolName.PYTEST), 

126 default_options={ 

127 "timeout": PYTEST_DEFAULT_TIMEOUT, 

128 "verbose": False, 

129 "capture": None, 

130 "markers": None, 

131 "keywords": None, 

132 "maxfail": None, 

133 "exitfirst": False, 

134 "last_failed": False, 

135 "collect_only": False, 

136 }, 

137 default_timeout=PYTEST_DEFAULT_TIMEOUT, 

138 ) 

139 

140 def set_options(self, **kwargs: Any) -> None: 

141 """Set pytest-specific options. 

142 

143 Args: 

144 **kwargs: Option key-value pairs to set. 

145 

146 Delegates to PytestConfiguration for option management and validation. 

147 Forwards unrecognized options (like timeout) to the base class. 

148 """ 

149 # Extract pytest-specific options 

150 config_fields = { 

151 f.name for f in self.pytest_config.__dataclass_fields__.values() 

152 } 

153 pytest_options = {k: v for k, v in kwargs.items() if k in config_fields} 

154 base_options = {k: v for k, v in kwargs.items() if k not in config_fields} 

155 

156 # Set pytest-specific options 

157 self.pytest_config.set_options(**pytest_options) 

158 

159 # Forward unrecognized options (like timeout) to base class 

160 if base_options: 

161 super().set_options(**base_options) 

162 

163 # Set pytest options on the parent class (for backward compatibility) 

164 options_dict = self.pytest_config.get_options_dict() 

165 super().set_options(**options_dict) 

166 

167 def _parse_output( 

168 self, 

169 output: str, 

170 return_code: int, 

171 junitxml_path: str | None = None, 

172 subprocess_start_time: float | None = None, 

173 ) -> list[Any]: 

174 """Parse pytest output into issues. 

175 

176 Args: 

177 output: Raw output from pytest. 

178 return_code: Return code from pytest. 

179 junitxml_path: Optional path to JUnit XML file (from auto_junitxml). 

180 subprocess_start_time: Optional Unix timestamp when subprocess started. 

181 

182 Returns: 

183 list: Parsed test failures and errors. 

184 """ 

185 # Build options dict for parser 

186 options = self.options.copy() if junitxml_path else self.options 

187 if junitxml_path: 

188 options["junitxml"] = junitxml_path 

189 

190 return parse_pytest_output_with_fallback( 

191 output=output, 

192 return_code=return_code, 

193 options=options, 

194 subprocess_start_time=subprocess_start_time, 

195 ) 

196 

197 def check(self, paths: list[str], options: dict[str, object]) -> ToolResult: 

198 """Run pytest on specified files. 

199 

200 Args: 

201 paths: List of file or directory paths to test. 

202 options: Runtime options that override defaults. 

203 

204 Returns: 

205 ToolResult: Results from pytest execution. 

206 """ 

207 # Merge runtime options 

208 merged_options = dict(self.options) 

209 merged_options.update(options) 

210 

211 # Check version requirements 

212 version_result = self._verify_tool_version() 

213 if version_result is not None: 

214 return version_result 

215 

216 # For pytest, when no specific files are provided, use directories to let 

217 # pytest discover all tests. This allows running all tests by default. 

218 target_files = paths 

219 if target_files is None or not target_files: 

220 # Default to "tests" directory to match pytest conventions 

221 target_files = ["tests"] 

222 elif ( 

223 isinstance(target_files, list) 

224 and len(target_files) == 1 

225 and target_files[0] == "." 

226 ): 

227 # If just "." is provided, also default to "tests" directory 

228 target_files = ["tests"] 

229 

230 # Handle special modes first (these don't run tests) 

231 from lintro.enums.pytest_enums import PytestSpecialMode 

232 from lintro.tools.implementations.pytest.pytest_handlers import ( 

233 handle_check_plugins, 

234 handle_collect_only, 

235 handle_fixture_info, 

236 handle_list_fixtures, 

237 handle_list_markers, 

238 handle_list_plugins, 

239 handle_parametrize_help, 

240 ) 

241 

242 special_mode = self.pytest_config.get_special_mode() 

243 if special_mode: 

244 mode_value = self.pytest_config.get_special_mode_value(special_mode) 

245 

246 if special_mode == PytestSpecialMode.LIST_PLUGINS: 

247 return handle_list_plugins(self) 

248 elif special_mode == PytestSpecialMode.CHECK_PLUGINS: 

249 return handle_check_plugins(self, mode_value) 

250 elif special_mode == PytestSpecialMode.COLLECT_ONLY: 

251 return handle_collect_only(self, target_files) 

252 elif special_mode == PytestSpecialMode.LIST_FIXTURES: 

253 return handle_list_fixtures(self, target_files) 

254 elif special_mode == PytestSpecialMode.FIXTURE_INFO: 

255 return handle_fixture_info(self, mode_value, target_files) 

256 elif special_mode == PytestSpecialMode.LIST_MARKERS: 

257 return handle_list_markers(self) 

258 elif special_mode == PytestSpecialMode.PARAMETRIZE_HELP: 

259 return handle_parametrize_help(self) 

260 

261 # Normal test execution 

262 cmd, auto_junitxml_path = build_check_command(self, target_files, fix=False) 

263 

264 logger.debug(f"Running pytest with command: {' '.join(cmd)}") 

265 logger.debug(f"Target files: {target_files}") 

266 

267 # Prepare test execution using executor 

268 if self.executor is None: 

269 return ToolResult( 

270 name=self.definition.name, 

271 success=False, 

272 output="Pytest executor not initialized", 

273 issues_count=0, 

274 ) 

275 

276 total_available_tests = self.executor.prepare_test_execution(target_files) 

277 

278 # Display run configuration summary 

279 self.executor.display_run_config(total_available_tests, target_files) 

280 

281 try: 

282 # Record start time to filter out stale junitxml files 

283 import time 

284 

285 subprocess_start_time = time.time() 

286 

287 # Execute tests using executor 

288 success, output, return_code = self.executor.execute_tests(cmd) 

289 

290 # Parse output 

291 issues = self._parse_output( 

292 output, 

293 return_code, 

294 auto_junitxml_path, 

295 subprocess_start_time, 

296 ) 

297 

298 # Process results using result processor 

299 if self.result_processor is None: 

300 return ToolResult( 

301 name=self.definition.name, 

302 success=False, 

303 output="Pytest result processor not initialized", 

304 issues_count=0, 

305 ) 

306 

307 summary_data, all_issues = self.result_processor.process_test_results( 

308 output=output, 

309 return_code=return_code, 

310 issues=issues, 

311 total_available_tests=total_available_tests, 

312 ) 

313 

314 # Build result using result processor 

315 return self.result_processor.build_result( 

316 success, 

317 summary_data, 

318 all_issues, 

319 raw_output=output, 

320 ) 

321 

322 except subprocess.TimeoutExpired: 

323 timeout_opt = self.options.get("timeout", PYTEST_DEFAULT_TIMEOUT) 

324 if isinstance(timeout_opt, int): 

325 timeout_val = timeout_opt 

326 elif timeout_opt is not None: 

327 timeout_val = int(str(timeout_opt)) 

328 else: 

329 timeout_val = PYTEST_DEFAULT_TIMEOUT 

330 

331 if self.error_handler is None: 

332 return ToolResult( 

333 name=self.definition.name, 

334 success=False, 

335 output=f"Pytest execution timed out ({timeout_val}s)", 

336 issues_count=0, 

337 ) 

338 

339 return self.error_handler.handle_timeout_error( 

340 timeout_val, 

341 cmd, 

342 initial_count=0, 

343 ) 

344 except (OSError, ValueError, RuntimeError) as e: 

345 if self.error_handler is None: 

346 return ToolResult( 

347 name=self.definition.name, 

348 success=False, 

349 output=f"Pytest execution failed: {e}", 

350 issues_count=0, 

351 ) 

352 return self.error_handler.handle_execution_error(e, cmd) 

353 

354 def fix(self, paths: list[str], options: dict[str, object]) -> ToolResult: 

355 """Pytest does not support fixing issues. 

356 

357 Args: 

358 paths: List of file paths (unused). 

359 options: Runtime options (unused). 

360 

361 Returns: 

362 ToolResult: Never returns, always raises NotImplementedError. 

363 

364 Raises: 

365 NotImplementedError: pytest does not support fixing issues. 

366 """ 

367 raise NotImplementedError( 

368 "Pytest cannot automatically fix issues. It only runs tests - " 

369 "fix test failures by modifying your code or tests directly.", 

370 )