Coverage for tests / unit / ai / test_orchestrator_fix.py: 100%
107 statements
« prev ^ index » next coverage.py v7.13.0, created at 2026-04-03 18:53 +0000
« prev ^ index » next coverage.py v7.13.0, created at 2026-04-03 18:53 +0000
1"""Tests for AI orchestrator fix action."""
3from __future__ import annotations
5from pathlib import Path
6from unittest.mock import MagicMock, patch
8from assertpy import assert_that
10from lintro.ai.config import AIConfig
11from lintro.ai.models import AIFixSuggestion
12from lintro.ai.orchestrator import run_ai_enhancement
13from lintro.ai.validation import ValidationResult
14from lintro.config.lintro_config import LintroConfig
15from lintro.enums.action import Action
16from lintro.models.core.tool_result import ToolResult
17from tests.unit.ai.conftest import MockAIProvider, MockIssue
19# ---------------------------------------------------------------------------
20# Fix action tests
21# ---------------------------------------------------------------------------
24@patch("lintro.ai.orchestrator.require_ai")
25@patch("lintro.ai.orchestrator.get_provider")
26@patch("lintro.ai.pipeline.generate_fixes_from_params")
27@patch("lintro.ai.pipeline.verify_fixes")
28@patch("lintro.ai.pipeline.apply_fixes")
29@patch(
30 "lintro.ai.orchestrator._resolve_issue_path",
31 side_effect=lambda *, file, workspace_root, cwd: Path(file),
32)
33def test_run_ai_enhancement_fix_action_generates_fix_metadata(
34 _mock_normalize,
35 mock_apply_fixes,
36 mock_verify_fixes,
37 mock_generate_fixes,
38 mock_get_provider,
39 _mock_require_ai,
40):
41 """Verify fix action populates applied/verified counts."""
42 result = ToolResult(
43 name="ruff",
44 success=False,
45 issues_count=1,
46 issues=[
47 MockIssue(
48 file="src/main.py",
49 line=1,
50 message="Use of assert",
51 code="B101",
52 ),
53 ],
54 )
55 config = LintroConfig(
56 ai=AIConfig(
57 enabled=True,
58 max_fix_attempts=5,
59 auto_apply=True,
60 ),
61 )
62 logger = MagicMock()
64 mock_get_provider.return_value = MockAIProvider()
65 suggestion = AIFixSuggestion(
66 file="src/main.py",
67 line=1,
68 code="B101",
69 explanation="Replace assert",
70 )
71 suggestion.tool_name = "ruff"
72 mock_generate_fixes.return_value = [suggestion]
73 mock_apply_fixes.return_value = [suggestion]
74 mock_verify_fixes.return_value = ValidationResult(
75 verified=1,
76 unverified=0,
77 verified_by_tool={"ruff": 1},
78 unverified_by_tool={"ruff": 0},
79 )
81 run_ai_enhancement(
82 action=Action.FIX,
83 all_results=[result],
84 lintro_config=config,
85 logger=logger,
86 output_format="terminal",
87 )
89 assert_that(result.ai_metadata).is_not_none()
90 assert_that(result.ai_metadata).contains_key("fix_suggestions")
91 assert_that(result.ai_metadata).contains_key("applied_count")
92 assert_that(result.ai_metadata).contains_key("verified_count")
93 assert_that(result.ai_metadata).contains_key("unverified_count")
94 assert_that(result.ai_metadata["fix_suggestions"]).is_length(1) # type: ignore[index] # assertpy is_not_none narrows this
95 assert_that(result.ai_metadata["applied_count"]).is_equal_to(1) # type: ignore[index] # assertpy is_not_none narrows this
96 assert_that(result.ai_metadata["verified_count"]).is_equal_to(1) # type: ignore[index] # assertpy is_not_none narrows this
97 assert_that(result.ai_metadata["unverified_count"]).is_equal_to(0) # type: ignore[index] # assertpy is_not_none narrows this
100@patch("lintro.ai.orchestrator.require_ai")
101@patch("lintro.ai.orchestrator.get_provider")
102@patch("lintro.ai.pipeline.generate_fixes_from_params")
103@patch("lintro.ai.pipeline.review_fixes_interactive")
104@patch("lintro.ai.pipeline.sys.stdin.isatty", return_value=True)
105@patch(
106 "lintro.ai.orchestrator._resolve_issue_path",
107 side_effect=lambda *, file, workspace_root, cwd: Path(file),
108)
109def test_run_ai_enhancement_fix_action_passes_validate_mode_to_interactive_review(
110 _mock_normalize,
111 _mock_isatty,
112 mock_review_fixes_interactive,
113 mock_generate_fixes,
114 mock_get_provider,
115 _mock_require_ai,
116):
117 """Verify validate_after_group config flag is forwarded to interactive review."""
118 result = ToolResult(
119 name="ruff",
120 success=False,
121 issues_count=1,
122 issues=[
123 MockIssue(
124 file="src/main.py",
125 line=1,
126 message="Use of assert",
127 code="B101",
128 ),
129 ],
130 )
131 config = LintroConfig(
132 ai=AIConfig(
133 enabled=True,
134 max_fix_attempts=5,
135 validate_after_group=True,
136 ),
137 )
138 logger = MagicMock()
140 mock_get_provider.return_value = MockAIProvider()
141 mock_generate_fixes.return_value = [
142 AIFixSuggestion(
143 file="src/main.py",
144 line=1,
145 code="B101",
146 explanation="Replace assert",
147 ),
148 ]
149 mock_review_fixes_interactive.return_value = (0, 0, [])
151 run_ai_enhancement(
152 action=Action.FIX,
153 all_results=[result],
154 lintro_config=config,
155 logger=logger,
156 output_format="terminal",
157 )
159 assert_that(mock_review_fixes_interactive.call_count).is_equal_to(1)
160 kwargs = mock_review_fixes_interactive.call_args.kwargs
161 assert_that(kwargs.get("validate_after_group")).is_true()
164@patch("lintro.ai.orchestrator.require_ai")
165@patch("lintro.ai.orchestrator.get_provider")
166@patch("lintro.ai.pipeline.generate_fixes_from_params")
167@patch(
168 "lintro.ai.orchestrator._resolve_issue_path",
169 side_effect=lambda *, file, workspace_root, cwd: Path(file),
170)
171def test_run_ai_enhancement_fix_action_uses_only_remaining_issue_tail(
172 _mock_normalize,
173 mock_generate_fixes,
174 mock_get_provider,
175 _mock_require_ai,
176):
177 """Fix generation receives only remaining issues, not fixed."""
178 fixed_issue = MockIssue(
179 file="src/main.py",
180 line=1,
181 message="Already fixed",
182 code="FORMAT",
183 )
184 remaining_issue = MockIssue(
185 file="src/main.py",
186 line=2,
187 message="Still failing",
188 code="E501",
189 )
190 result = ToolResult(
191 name="prettier",
192 success=False,
193 issues_count=1,
194 issues=[fixed_issue, remaining_issue],
195 remaining_issues_count=1,
196 )
197 config = LintroConfig(ai=AIConfig(enabled=True, max_fix_attempts=5))
198 logger = MagicMock()
200 mock_get_provider.return_value = MockAIProvider()
201 mock_generate_fixes.return_value = []
203 run_ai_enhancement(
204 action=Action.FIX,
205 all_results=[result],
206 lintro_config=config,
207 logger=logger,
208 output_format="json",
209 )
211 assert_that(mock_generate_fixes.call_count).is_equal_to(1)
212 issues_arg = mock_generate_fixes.call_args.args[0]
213 assert_that(issues_arg).is_length(1)
214 assert_that(issues_arg[0].code).is_equal_to("E501")
215 params = mock_generate_fixes.call_args.args[2]
216 assert_that(params.max_tokens).is_equal_to(4096)
217 assert_that(params.workspace_root).is_not_none()
220@patch("lintro.ai.orchestrator.require_ai")
221@patch("lintro.ai.orchestrator.get_provider")
222@patch("lintro.ai.pipeline.generate_fixes_from_params")
223def test_run_ai_enhancement_fix_action_skips_tools_with_zero_remaining_issues(
224 mock_generate_fixes,
225 mock_get_provider,
226 _mock_require_ai,
227):
228 """Verify fix generation is skipped for tools with zero remaining issues."""
229 result = ToolResult(
230 name="prettier",
231 success=True,
232 issues_count=0,
233 issues=[
234 MockIssue(
235 file="src/main.py",
236 line=1,
237 message="Initial issue",
238 code="FORMAT",
239 ),
240 ],
241 remaining_issues_count=0,
242 )
243 config = LintroConfig(ai=AIConfig(enabled=True, max_fix_attempts=5))
244 logger = MagicMock()
246 mock_get_provider.return_value = MockAIProvider()
248 run_ai_enhancement(
249 action=Action.FIX,
250 all_results=[result],
251 lintro_config=config,
252 logger=logger,
253 output_format="json",
254 )
256 mock_generate_fixes.assert_not_called()
259@patch("lintro.ai.orchestrator.require_ai")
260@patch("lintro.ai.orchestrator.get_provider")
261@patch("lintro.ai.pipeline.generate_fixes_from_params")
262@patch("lintro.ai.pipeline.apply_fixes")
263@patch("lintro.ai.pipeline.verify_fixes")
264@patch("lintro.ai.pipeline.generate_post_fix_summary")
265@patch(
266 "lintro.ai.orchestrator._resolve_issue_path",
267 side_effect=lambda *, file, workspace_root, cwd: Path(file),
268)
269def test_run_ai_enhancement_fix_action_uses_fresh_rerun_results_for_post_summary(
270 _mock_normalize,
271 mock_generate_post_fix_summary,
272 mock_verify_fixes,
273 mock_apply_fixes,
274 mock_generate_fixes,
275 mock_get_provider,
276 _mock_require_ai,
277):
278 """Post-fix summary receives results from by_tool after verify_fixes."""
279 result = ToolResult(
280 name="ruff",
281 success=False,
282 issues_count=1,
283 issues=[
284 MockIssue(
285 file="src/main.py",
286 line=1,
287 message="Use of assert",
288 code="B101",
289 ),
290 ],
291 )
292 suggestion = AIFixSuggestion(
293 file="src/main.py",
294 line=1,
295 code="B101",
296 explanation="Replace assert",
297 tool_name="ruff",
298 )
299 config = LintroConfig(
300 ai=AIConfig(
301 enabled=True,
302 auto_apply=True,
303 ),
304 )
305 logger = MagicMock()
307 mock_get_provider.return_value = MockAIProvider()
308 mock_generate_fixes.return_value = [suggestion]
309 mock_apply_fixes.return_value = [suggestion]
310 mock_verify_fixes.return_value = ValidationResult(
311 verified=1,
312 unverified=0,
313 verified_by_tool={"ruff": 1},
314 unverified_by_tool={"ruff": 0},
315 )
316 mock_generate_post_fix_summary.return_value = None
318 run_ai_enhancement(
319 action=Action.FIX,
320 all_results=[result],
321 lintro_config=config,
322 logger=logger,
323 output_format="terminal",
324 )
326 assert_that(mock_verify_fixes.call_count).is_equal_to(1)
327 assert_that(mock_generate_post_fix_summary.call_count).is_equal_to(1)
328 post_kwargs = mock_generate_post_fix_summary.call_args.kwargs
329 assert_that(post_kwargs.get("remaining_results")).is_not_none()