Coverage for tests / unit / ai / test_pipeline.py: 100%
184 statements
« prev ^ index » next coverage.py v7.13.0, created at 2026-04-03 18:53 +0000
« prev ^ index » next coverage.py v7.13.0, created at 2026-04-03 18:53 +0000
1"""Tests for the AI fix pipeline (lintro.ai.pipeline.run_fix_pipeline)."""
3from __future__ import annotations
5from pathlib import Path
6from unittest.mock import MagicMock, patch
8from assertpy import assert_that
10from lintro.ai.config import AIConfig
11from lintro.ai.models import AIFixSuggestion
12from lintro.ai.pipeline import run_fix_pipeline
13from lintro.ai.validation import ValidationResult
14from lintro.models.core.tool_result import ToolResult
15from lintro.parsers.base_issue import BaseIssue
16from tests.unit.ai.conftest import MockAIProvider, MockIssue
18# ---------------------------------------------------------------------------
19# Helpers
20# ---------------------------------------------------------------------------
22_PIPELINE = "lintro.ai.pipeline"
25def _make_suggestion(
26 *,
27 file: str = "src/main.py",
28 line: int = 1,
29 code: str = "E501",
30 tool_name: str = "ruff",
31 risk_level: str = "",
32 confidence: str = "high",
33 explanation: str = "fix",
34) -> AIFixSuggestion:
35 s = AIFixSuggestion(
36 file=file,
37 line=line,
38 code=code,
39 explanation=explanation,
40 risk_level=risk_level,
41 confidence=confidence,
42 )
43 s.tool_name = tool_name
44 return s
47def _default_ai_config(**overrides: object) -> AIConfig:
48 defaults: dict[str, object] = {
49 "enabled": True,
50 "max_fix_attempts": 20,
51 }
52 defaults.update(overrides)
53 return AIConfig(**defaults) # type: ignore[arg-type]
56def _make_result(name: str, issues: list[MockIssue]) -> ToolResult:
57 return ToolResult(
58 name=name,
59 success=False,
60 issues_count=len(issues),
61 issues=issues,
62 )
65def _make_fix_issues(
66 result: ToolResult,
67 issues: list[MockIssue],
68) -> list[tuple[ToolResult, BaseIssue]]:
69 return [(result, issue) for issue in issues]
72# ---------------------------------------------------------------------------
73# Tests
74# ---------------------------------------------------------------------------
77@patch(f"{_PIPELINE}.render_validation")
78@patch(f"{_PIPELINE}.render_summary")
79@patch(f"{_PIPELINE}.verify_fixes")
80@patch(f"{_PIPELINE}.generate_post_fix_summary")
81@patch(f"{_PIPELINE}.review_fixes_interactive")
82@patch(f"{_PIPELINE}.apply_fixes")
83@patch(f"{_PIPELINE}.generate_fixes_from_params")
84def test_budget_tracking_across_multiple_tools(
85 mock_generate_fixes_from_params,
86 mock_apply_fixes,
87 mock_review_fixes_interactive,
88 mock_generate_post_fix_summary,
89 mock_verify_fixes,
90 mock_render_summary,
91 mock_render_validation,
92):
93 """When two tools have issues, budget (max_fix_attempts) is consumed correctly."""
94 issue_a = MockIssue(file="a.py", line=1, code="E501", message="err")
95 issue_b = MockIssue(file="b.py", line=1, code="E501", message="err")
96 issue_c = MockIssue(file="c.py", line=1, code="W001", message="err")
98 result_ruff = _make_result("ruff", [issue_a, issue_b])
99 result_mypy = _make_result("mypy", [issue_c])
101 fix_issues = _make_fix_issues(result_ruff, [issue_a, issue_b]) + _make_fix_issues(
102 result_mypy,
103 [issue_c],
104 )
106 suggestion_a = _make_suggestion(file="a.py", tool_name="ruff")
107 suggestion_b = _make_suggestion(file="b.py", tool_name="ruff")
108 suggestion_c = _make_suggestion(file="c.py", tool_name="mypy", code="W001")
110 mock_generate_fixes_from_params.side_effect = [
111 [suggestion_a, suggestion_b],
112 [suggestion_c],
113 ]
114 mock_apply_fixes.return_value = []
115 mock_review_fixes_interactive.return_value = (0, 0, [])
116 mock_verify_fixes.return_value = ValidationResult()
118 ai_config = _default_ai_config(max_fix_attempts=3)
120 run_fix_pipeline(
121 fix_issues=fix_issues,
122 provider=MockAIProvider(),
123 ai_config=ai_config,
124 logger=MagicMock(),
125 output_format="terminal",
126 workspace_root=Path("/tmp"),
127 )
129 assert_that(mock_generate_fixes_from_params.call_count).is_equal_to(2)
131 # First call gets full budget of 3
132 first_call_params = mock_generate_fixes_from_params.call_args_list[0].args[2]
133 assert_that(first_call_params.max_issues).is_equal_to(3)
135 # Second call gets reduced budget: 3 - 2 (issues consumed from ruff) = 1
136 second_call_params = mock_generate_fixes_from_params.call_args_list[1].args[2]
137 assert_that(second_call_params.max_issues).is_equal_to(1)
140@patch(f"{_PIPELINE}.is_safe_style_fix")
141@patch(f"{_PIPELINE}.render_validation")
142@patch(f"{_PIPELINE}.render_summary")
143@patch(f"{_PIPELINE}.verify_fixes")
144@patch(f"{_PIPELINE}.generate_post_fix_summary")
145@patch(f"{_PIPELINE}.review_fixes_interactive")
146@patch(f"{_PIPELINE}.apply_fixes")
147@patch(f"{_PIPELINE}.generate_fixes_from_params")
148def test_safe_vs_risky_suggestion_splitting(
149 mock_generate_fixes_from_params,
150 mock_apply_fixes,
151 mock_review_fixes_interactive,
152 mock_generate_post_fix_summary,
153 mock_verify_fixes,
154 mock_render_summary,
155 mock_render_validation,
156 mock_is_safe,
157):
158 """Suggestions split into safe and risky via is_safe_style_fix."""
159 issue = MockIssue(file="a.py", line=1, code="E501", message="err")
160 result = _make_result("ruff", [issue])
161 fix_issues = _make_fix_issues(result, [issue])
163 safe = _make_suggestion(code="E501", risk_level="safe-style")
164 risky = _make_suggestion(code="B101", risk_level="behavioral-risk")
166 mock_generate_fixes_from_params.return_value = [safe, risky]
167 mock_is_safe.side_effect = lambda s: s.risk_level == "safe-style"
168 mock_apply_fixes.return_value = [safe]
169 mock_review_fixes_interactive.return_value = (0, 0, [])
170 mock_verify_fixes.return_value = ValidationResult()
172 ai_config = _default_ai_config(auto_apply_safe_fixes=True)
174 run_fix_pipeline(
175 fix_issues=fix_issues,
176 provider=MockAIProvider(),
177 ai_config=ai_config,
178 logger=MagicMock(),
179 output_format="json",
180 workspace_root=Path("/tmp"),
181 )
183 # apply_fixes is called with only safe suggestions for fast path
184 applied_batch = mock_apply_fixes.call_args.args[0]
185 assert_that(applied_batch).is_length(1)
186 assert_that(applied_batch[0].risk_level).is_equal_to("safe-style")
189@patch(f"{_PIPELINE}.render_validation")
190@patch(f"{_PIPELINE}.render_summary")
191@patch(f"{_PIPELINE}.verify_fixes")
192@patch(f"{_PIPELINE}.generate_post_fix_summary")
193@patch(f"{_PIPELINE}.review_fixes_interactive")
194@patch(f"{_PIPELINE}.apply_fixes")
195@patch(f"{_PIPELINE}.generate_fixes_from_params")
196def test_auto_apply_fast_path_json_mode(
197 mock_generate_fixes_from_params,
198 mock_apply_fixes,
199 mock_review_fixes_interactive,
200 mock_generate_post_fix_summary,
201 mock_verify_fixes,
202 mock_render_summary,
203 mock_render_validation,
204):
205 """Safe fixes auto-apply when auto_apply_safe_fixes + json."""
206 issue = MockIssue(file="a.py", line=1, code="E501", message="err")
207 result = _make_result("ruff", [issue])
208 fix_issues = _make_fix_issues(result, [issue])
210 safe = _make_suggestion(code="E501", risk_level="safe-style", confidence="high")
212 mock_generate_fixes_from_params.return_value = [safe]
213 mock_apply_fixes.return_value = [safe]
214 mock_verify_fixes.return_value = ValidationResult()
216 ai_config = _default_ai_config(auto_apply_safe_fixes=True, auto_apply=False)
218 run_fix_pipeline(
219 fix_issues=fix_issues,
220 provider=MockAIProvider(),
221 ai_config=ai_config,
222 logger=MagicMock(),
223 output_format="json",
224 workspace_root=Path("/tmp"),
225 )
227 assert_that(mock_apply_fixes.call_count).is_greater_than_or_equal_to(1)
228 apply_kwargs = mock_apply_fixes.call_args.kwargs
229 assert_that(apply_kwargs["auto_apply"]).is_true()
231 # review_fixes_interactive should NOT be called in json mode
232 mock_review_fixes_interactive.assert_not_called()
235@patch(f"{_PIPELINE}.render_validation")
236@patch(f"{_PIPELINE}.render_summary")
237@patch(f"{_PIPELINE}.verify_fixes")
238@patch(f"{_PIPELINE}.generate_post_fix_summary")
239@patch(f"{_PIPELINE}.review_fixes_interactive")
240@patch(f"{_PIPELINE}.apply_fixes")
241@patch(f"{_PIPELINE}.generate_fixes_from_params")
242@patch(f"{_PIPELINE}.sys.stdin.isatty", return_value=True)
243def test_interactive_review_path(
244 _mock_isatty,
245 mock_generate_fixes_from_params,
246 mock_apply_fixes,
247 mock_review_fixes_interactive,
248 mock_generate_post_fix_summary,
249 mock_verify_fixes,
250 mock_render_summary,
251 mock_render_validation,
252):
253 """When not json and not auto_apply, review_fixes_interactive is called."""
254 issue = MockIssue(file="a.py", line=1, code="B101", message="err")
255 result = _make_result("ruff", [issue])
256 fix_issues = _make_fix_issues(result, [issue])
258 suggestion = _make_suggestion(code="B101", risk_level="behavioral-risk")
260 mock_generate_fixes_from_params.return_value = [suggestion]
261 mock_review_fixes_interactive.return_value = (1, 0, [suggestion])
262 mock_verify_fixes.return_value = ValidationResult()
264 ai_config = _default_ai_config(auto_apply=False, auto_apply_safe_fixes=False)
266 run_fix_pipeline(
267 fix_issues=fix_issues,
268 provider=MockAIProvider(),
269 ai_config=ai_config,
270 logger=MagicMock(),
271 output_format="terminal",
272 workspace_root=Path("/tmp"),
273 )
275 assert_that(mock_review_fixes_interactive.call_count).is_equal_to(1)
276 review_batch = mock_review_fixes_interactive.call_args.args[0]
277 assert_that(review_batch).is_length(1)
278 assert_that(review_batch[0].code).is_equal_to("B101")
281@patch(f"{_PIPELINE}.render_validation")
282@patch(f"{_PIPELINE}.render_summary")
283@patch(f"{_PIPELINE}.verify_fixes")
284@patch(f"{_PIPELINE}.generate_post_fix_summary")
285@patch(f"{_PIPELINE}.review_fixes_interactive")
286@patch(f"{_PIPELINE}.apply_fixes")
287@patch(f"{_PIPELINE}.generate_fixes_from_params")
288def test_no_suggestions_returns_early(
289 mock_generate_fixes_from_params,
290 mock_apply_fixes,
291 mock_review_fixes_interactive,
292 mock_generate_post_fix_summary,
293 mock_verify_fixes,
294 mock_render_summary,
295 mock_render_validation,
296):
297 """Empty generate_fixes_from_params exits without calling apply/review."""
298 issue = MockIssue(file="a.py", line=1, code="E501", message="err")
299 result = _make_result("ruff", [issue])
300 fix_issues = _make_fix_issues(result, [issue])
302 mock_generate_fixes_from_params.return_value = []
304 ai_config = _default_ai_config()
306 run_fix_pipeline(
307 fix_issues=fix_issues,
308 provider=MockAIProvider(),
309 ai_config=ai_config,
310 logger=MagicMock(),
311 output_format="json",
312 workspace_root=Path("/tmp"),
313 )
315 assert_that(mock_generate_fixes_from_params.call_count).is_equal_to(1)
316 mock_apply_fixes.assert_not_called()
317 mock_review_fixes_interactive.assert_not_called()
318 mock_verify_fixes.assert_not_called()
319 mock_generate_post_fix_summary.assert_not_called()
322@patch(f"{_PIPELINE}.render_validation")
323@patch(f"{_PIPELINE}.render_summary")
324@patch(f"{_PIPELINE}.verify_fixes")
325@patch(f"{_PIPELINE}.generate_post_fix_summary")
326@patch(f"{_PIPELINE}.review_fixes_interactive")
327@patch(f"{_PIPELINE}.apply_fixes")
328@patch(f"{_PIPELINE}.generate_fixes_from_params")
329def test_post_fix_summary_generation(
330 mock_generate_fixes_from_params,
331 mock_apply_fixes,
332 mock_review_fixes_interactive,
333 mock_generate_post_fix_summary,
334 mock_verify_fixes,
335 mock_render_summary,
336 mock_render_validation,
337):
338 """Applied fixes + non-json -> post_fix_summary is called."""
339 issue = MockIssue(file="a.py", line=1, code="B101", message="err")
340 result = _make_result("ruff", [issue])
341 fix_issues = _make_fix_issues(result, [issue])
343 suggestion = _make_suggestion(code="B101", tool_name="ruff")
345 mock_generate_fixes_from_params.return_value = [suggestion]
346 mock_apply_fixes.return_value = [suggestion]
347 mock_verify_fixes.return_value = ValidationResult(
348 verified=1,
349 unverified=0,
350 verified_by_tool={"ruff": 1},
351 unverified_by_tool={"ruff": 0},
352 )
353 mock_generate_post_fix_summary.return_value = None
355 ai_config = _default_ai_config(auto_apply=True)
357 run_fix_pipeline(
358 fix_issues=fix_issues,
359 provider=MockAIProvider(),
360 ai_config=ai_config,
361 logger=MagicMock(),
362 output_format="terminal",
363 workspace_root=Path("/tmp"),
364 )
366 assert_that(mock_generate_post_fix_summary.call_count).is_equal_to(1)
367 post_kwargs = mock_generate_post_fix_summary.call_args.kwargs
368 assert_that(post_kwargs).contains_key("remaining_results")
369 assert_that(post_kwargs).contains_key("applied")
370 assert_that(post_kwargs).contains_key("rejected")
373@patch(f"{_PIPELINE}.render_validation")
374@patch(f"{_PIPELINE}.render_summary")
375@patch(f"{_PIPELINE}.verify_fixes")
376@patch(f"{_PIPELINE}.generate_post_fix_summary")
377@patch(f"{_PIPELINE}.review_fixes_interactive")
378@patch(f"{_PIPELINE}.apply_fixes")
379@patch(f"{_PIPELINE}.generate_fixes_from_params")
380def test_verify_fixes_flow(
381 mock_generate_fixes_from_params,
382 mock_apply_fixes,
383 mock_review_fixes_interactive,
384 mock_generate_post_fix_summary,
385 mock_verify_fixes,
386 mock_render_summary,
387 mock_render_validation,
388):
389 """When fixes are applied, verify_fixes is called with suggestions and by_tool."""
390 issue = MockIssue(file="a.py", line=1, code="B101", message="err")
391 result = _make_result("ruff", [issue])
392 fix_issues = _make_fix_issues(result, [issue])
394 suggestion = _make_suggestion(code="B101", tool_name="ruff")
396 mock_generate_fixes_from_params.return_value = [suggestion]
397 mock_apply_fixes.return_value = [suggestion]
398 mock_verify_fixes.return_value = ValidationResult(
399 verified=1,
400 unverified=0,
401 verified_by_tool={"ruff": 1},
402 unverified_by_tool={"ruff": 0},
403 )
404 mock_generate_post_fix_summary.return_value = None
406 ai_config = _default_ai_config(auto_apply=True)
408 run_fix_pipeline(
409 fix_issues=fix_issues,
410 provider=MockAIProvider(),
411 ai_config=ai_config,
412 logger=MagicMock(),
413 output_format="terminal",
414 workspace_root=Path("/tmp"),
415 )
417 assert_that(mock_verify_fixes.call_count).is_equal_to(1)
418 verify_kwargs = mock_verify_fixes.call_args.kwargs
419 assert_that(verify_kwargs).contains_key("applied_suggestions")
420 assert_that(verify_kwargs).contains_key("by_tool")
421 assert_that(verify_kwargs["applied_suggestions"]).is_equal_to([suggestion])