Coverage for tests / unit / tools / test_common_behaviors.py: 92%
105 statements
« prev ^ index » next coverage.py v7.13.0, created at 2026-04-03 18:53 +0000
« prev ^ index » next coverage.py v7.13.0, created at 2026-04-03 18:53 +0000
1"""Parametrized tests for common tool plugin behaviors.
3This module consolidates duplicate tests from individual tool test files,
4following DRY principles. Tests here cover common patterns that all tools share.
5"""
7from __future__ import annotations
9import subprocess
10from typing import TYPE_CHECKING, Any, cast
11from unittest.mock import MagicMock, patch
13import pytest
14from assertpy import assert_that
16from lintro.enums.tool_name import ToolName
17from lintro.plugins.base import BaseToolPlugin
19if TYPE_CHECKING:
20 from collections.abc import Callable
23# =============================================================================
24# Helper function for dynamic plugin instantiation
25# =============================================================================
28def _get_plugin_instance(plugin_class_path: str) -> BaseToolPlugin:
29 """Dynamically import and instantiate a plugin class.
31 Args:
32 plugin_class_path: Full module path to the plugin class.
34 Returns:
35 An instance of the plugin class.
36 """
37 module_path, class_name = plugin_class_path.rsplit(".", 1)
38 import importlib
40 # Safe: module_path comes from hardcoded test constants, not user input
41 module = importlib.import_module(module_path) # nosemgrep: non-literal-import
42 plugin_class = getattr(module, class_name)
44 return cast(BaseToolPlugin, plugin_class())
47# =============================================================================
48# Test Data: Tools that support check operation
49# =============================================================================
51# Tools with their check success configurations
52# (plugin_class_path, tool_name, sample_file, success_output)
53TOOL_CHECK_SUCCESS_CONFIGS = [
54 pytest.param(
55 "lintro.tools.definitions.black.BlackPlugin",
56 ToolName.BLACK,
57 "test.py",
58 (True, "All done! 1 file left unchanged."),
59 id="black",
60 ),
61]
63# Tools with their check failure configurations
64# (plugin_class_path, tool_name, sample_file, failure_output)
65TOOL_CHECK_FAILURE_CONFIGS = [
66 pytest.param(
67 "lintro.tools.definitions.black.BlackPlugin",
68 ToolName.BLACK,
69 "test.py",
70 (False, "would reformat test.py\nOh no! 1 file would be reformatted."),
71 id="black",
72 ),
73]
75# Tools with their timeout configurations
76# (plugin_class_path, tool_name, executable_cmd)
77TOOL_TIMEOUT_CONFIGS = [
78 pytest.param(
79 "lintro.tools.definitions.black.BlackPlugin",
80 ToolName.BLACK,
81 ["black"],
82 id="black",
83 ),
84]
86# Tools that cannot fix issues (raise NotImplementedError)
87# (plugin_class_path, error_match_pattern)
88TOOLS_THAT_CANNOT_FIX = [
89 pytest.param(
90 "lintro.tools.definitions.hadolint.HadolintPlugin",
91 "cannot automatically fix",
92 id="hadolint",
93 ),
94 pytest.param(
95 "lintro.tools.definitions.yamllint.YamllintPlugin",
96 "cannot automatically fix",
97 id="yamllint",
98 ),
99 pytest.param(
100 "lintro.tools.definitions.markdownlint.MarkdownlintPlugin",
101 "cannot fix issues",
102 id="markdownlint",
103 ),
104 pytest.param(
105 "lintro.tools.definitions.mypy.MypyPlugin",
106 "cannot automatically fix",
107 id="mypy",
108 ),
109 pytest.param(
110 "lintro.tools.definitions.pytest.PytestPlugin",
111 "cannot automatically fix",
112 id="pytest",
113 ),
114]
116# Tools with early skip behavior
117# (plugin_class_path, tool_name)
118TOOL_EARLY_SKIP_CONFIGS = [
119 pytest.param(
120 "lintro.tools.definitions.black.BlackPlugin",
121 ToolName.BLACK,
122 id="black",
123 ),
124 pytest.param(
125 "lintro.tools.definitions.hadolint.HadolintPlugin",
126 ToolName.HADOLINT,
127 id="hadolint",
128 ),
129]
132# =============================================================================
133# Fixtures
134# =============================================================================
137@pytest.fixture
138def mock_execution_context() -> Callable[..., MagicMock]:
139 """Factory for creating mock ExecutionContext instances.
141 Returns:
142 A factory function that creates configured MagicMock objects.
143 """
145 def _create(
146 files: list[str] | None = None,
147 rel_files: list[str] | None = None,
148 cwd: str = "/tmp",
149 timeout: int = 30,
150 should_skip: bool = False,
151 early_result: Any = None,
152 ) -> MagicMock:
153 ctx = MagicMock()
154 ctx.files = files or []
155 ctx.rel_files = rel_files or []
156 ctx.cwd = cwd
157 ctx.timeout = timeout
158 ctx.should_skip = should_skip
159 ctx.early_result = early_result
160 return ctx
162 return _create
165# =============================================================================
166# Test: Check Success (No Issues)
167# =============================================================================
170@pytest.mark.parametrize(
171 ("plugin_class_path", "expected_name", "sample_file", "subprocess_result"),
172 TOOL_CHECK_SUCCESS_CONFIGS,
173)
174def test_check_success_no_issues(
175 plugin_class_path: str,
176 expected_name: ToolName,
177 sample_file: str,
178 subprocess_result: tuple[bool, str],
179 mock_execution_context: Callable[..., MagicMock],
180) -> None:
181 """Check returns success when no issues found.
183 This test is parametrized across multiple tools to verify the common
184 behavior pattern of returning success with zero issues.
186 Args:
187 plugin_class_path: Full module path to the plugin class.
188 expected_name: The expected tool name.
189 sample_file: Sample file for testing.
190 subprocess_result: Mock result tuple (success, output).
191 mock_execution_context: Factory for mock execution contexts.
192 """
193 plugin = _get_plugin_instance(plugin_class_path)
194 ctx = mock_execution_context(
195 files=[sample_file],
196 rel_files=[sample_file],
197 )
199 with (
200 patch.object(plugin, "_prepare_execution", return_value=ctx),
201 patch.object(plugin, "_run_subprocess", return_value=subprocess_result),
202 patch.object(
203 plugin,
204 "_get_executable_command",
205 return_value=[str(expected_name).lower()],
206 ),
207 ):
208 # Handle Black's extra methods
209 if hasattr(plugin, "_build_common_args"):
210 with patch.object(plugin, "_build_common_args", return_value=[]):
211 if hasattr(plugin, "_check_line_length_violations"):
212 with patch.object(
213 plugin,
214 "_check_line_length_violations",
215 return_value=[],
216 ):
217 result = plugin.check([f"/tmp/{sample_file}"], {})
218 else:
219 result = plugin.check([f"/tmp/{sample_file}"], {})
220 else:
221 result = plugin.check([f"/tmp/{sample_file}"], {})
223 assert_that(result.success).is_true()
224 assert_that(result.issues_count).is_equal_to(0)
225 assert_that(result.name).is_equal_to(expected_name)
228# =============================================================================
229# Test: Check Failure (With Issues)
230# =============================================================================
233@pytest.mark.parametrize(
234 ("plugin_class_path", "expected_name", "sample_file", "subprocess_result"),
235 TOOL_CHECK_FAILURE_CONFIGS,
236)
237def test_check_failure_with_issues(
238 plugin_class_path: str,
239 expected_name: ToolName,
240 sample_file: str,
241 subprocess_result: tuple[bool, str],
242 mock_execution_context: Callable[..., MagicMock],
243) -> None:
244 """Check returns failure when issues found.
246 This test is parametrized across multiple tools to verify the common
247 behavior pattern of returning failure when issues are detected.
249 Args:
250 plugin_class_path: Full module path to the plugin class.
251 expected_name: The expected tool name.
252 sample_file: Sample file for testing.
253 subprocess_result: Mock result tuple (success, output).
254 mock_execution_context: Factory for mock execution contexts.
255 """
256 plugin = _get_plugin_instance(plugin_class_path)
257 ctx = mock_execution_context(
258 files=[sample_file],
259 rel_files=[sample_file],
260 )
262 with (
263 patch.object(plugin, "_prepare_execution", return_value=ctx),
264 patch.object(plugin, "_run_subprocess", return_value=subprocess_result),
265 patch.object(
266 plugin,
267 "_get_executable_command",
268 return_value=[str(expected_name).lower()],
269 ),
270 ):
271 # Handle tool-specific extra methods
272 if hasattr(plugin, "_build_common_args"):
273 with patch.object(plugin, "_build_common_args", return_value=[]):
274 if hasattr(plugin, "_check_line_length_violations"):
275 with patch.object(
276 plugin,
277 "_check_line_length_violations",
278 return_value=[],
279 ):
280 result = plugin.check([f"/tmp/{sample_file}"], {})
281 else:
282 result = plugin.check([f"/tmp/{sample_file}"], {})
283 elif hasattr(plugin, "_build_config_args"):
284 with patch.object(plugin, "_build_config_args", return_value=[]):
285 result = plugin.check([f"/tmp/{sample_file}"], {})
286 else:
287 result = plugin.check([f"/tmp/{sample_file}"], {})
289 assert_that(result.success).is_false()
290 assert_that(result.issues_count).is_greater_than(0)
293# =============================================================================
294# Test: Check Timeout Handling
295# =============================================================================
298def _create_mock_timeout_result(tool_name: str) -> MagicMock:
299 """Create a mock timeout result for testing.
301 Args:
302 tool_name: Name of the tool.
304 Returns:
305 A mock ToolResult representing a timeout.
306 """
307 result = MagicMock()
308 result.success = False
309 result.output = f"{tool_name} execution timed out (30s limit exceeded)."
310 result.issues_count = 1
311 return result
314@pytest.mark.parametrize(
315 ("plugin_class_path", "expected_name", "executable_cmd"),
316 TOOL_TIMEOUT_CONFIGS,
317)
318def test_check_timeout_handling(
319 plugin_class_path: str,
320 expected_name: ToolName,
321 executable_cmd: list[str],
322 mock_execution_context: Callable[..., MagicMock],
323) -> None:
324 """Check handles timeout correctly across tools.
326 This test is parametrized across multiple tools to verify that
327 timeout exceptions are properly caught and handled.
329 Args:
330 plugin_class_path: Full module path to the plugin class.
331 expected_name: The expected tool name.
332 executable_cmd: The executable command list.
333 mock_execution_context: Factory for mock execution contexts.
334 """
335 plugin = _get_plugin_instance(plugin_class_path)
336 ctx = mock_execution_context(
337 files=["test_file"],
338 rel_files=["test_file"],
339 )
341 timeout_result = _create_mock_timeout_result(str(expected_name).lower())
343 with (
344 patch.object(plugin, "_prepare_execution", return_value=ctx),
345 patch.object(
346 plugin,
347 "_run_subprocess",
348 side_effect=subprocess.TimeoutExpired(cmd=executable_cmd, timeout=30),
349 ),
350 patch.object(plugin, "_get_executable_command", return_value=executable_cmd),
351 ):
352 # Handle tool-specific extra methods and timeout result methods
353 extra_patches = []
355 if hasattr(plugin, "_build_common_args"):
356 extra_patches.append(
357 patch.object(plugin, "_build_common_args", return_value=[]),
358 )
359 if hasattr(plugin, "_build_config_args"):
360 extra_patches.append(
361 patch.object(plugin, "_build_config_args", return_value=[]),
362 )
363 if hasattr(plugin, "_create_timeout_result"):
364 extra_patches.append(
365 patch.object(
366 plugin,
367 "_create_timeout_result",
368 return_value=timeout_result,
369 ),
370 )
372 from contextlib import ExitStack
374 with ExitStack() as stack:
375 for p in extra_patches:
376 stack.enter_context(p)
377 result = plugin.check(["/tmp/test_file"], {})
379 assert_that(result.success).is_false()
380 assert_that(result.output).is_not_none()
381 assert_that(result.output.lower() if result.output else "").contains("timed out")
384# =============================================================================
385# Test: Fix Raises NotImplementedError
386# =============================================================================
389@pytest.mark.parametrize(
390 ("plugin_class_path", "error_match"),
391 TOOLS_THAT_CANNOT_FIX,
392)
393def test_fix_raises_not_implemented(
394 plugin_class_path: str,
395 error_match: str,
396) -> None:
397 """Tools that cannot fix should raise NotImplementedError.
399 This test is parametrized across tools that do not support
400 automatic fixing of issues.
402 Args:
403 plugin_class_path: Full module path to the plugin class.
404 error_match: Pattern expected in the error message.
405 """
406 plugin = _get_plugin_instance(plugin_class_path)
408 with pytest.raises(NotImplementedError, match=error_match):
409 plugin.fix([], {})
412# =============================================================================
413# Test: Check Early Return When Should Skip
414# =============================================================================
417@pytest.mark.parametrize(
418 ("plugin_class_path", "expected_name"),
419 TOOL_EARLY_SKIP_CONFIGS,
420)
421def test_check_early_return_when_should_skip(
422 plugin_class_path: str,
423 expected_name: ToolName,
424 mock_execution_context: Callable[..., MagicMock],
425) -> None:
426 """Check returns early result when should_skip is True.
428 This test is parametrized across tools to verify that the early
429 skip logic is implemented consistently.
431 Args:
432 plugin_class_path: Full module path to the plugin class.
433 expected_name: The expected tool name.
434 mock_execution_context: Factory for mock execution contexts.
435 """
436 plugin = _get_plugin_instance(plugin_class_path)
438 early_result = MagicMock()
439 early_result.success = True
440 early_result.issues_count = 0
442 ctx = mock_execution_context(
443 should_skip=True,
444 early_result=early_result,
445 )
447 with patch.object(plugin, "_prepare_execution", return_value=ctx):
448 result = plugin.check(["/tmp"], {})
450 assert_that(result.success).is_true()