Coverage for tests / unit / ai / test_orchestrator_multi.py: 100%
147 statements
« prev ^ index » next coverage.py v7.13.0, created at 2026-04-03 18:53 +0000
« prev ^ index » next coverage.py v7.13.0, created at 2026-04-03 18:53 +0000
1"""Tests for multi-tool AI orchestration scenarios and complex workflows."""
3from __future__ import annotations
5from pathlib import Path
6from typing import Any
7from unittest.mock import MagicMock, patch
9from assertpy import assert_that
11from lintro.ai.config import AIConfig
12from lintro.ai.models import AIFixSuggestion
13from lintro.ai.orchestrator import run_ai_enhancement
14from lintro.ai.rerun import _rerun_cwd_lock, paths_for_context, rerun_tools
15from lintro.ai.validation import ValidationResult
16from lintro.config.lintro_config import LintroConfig
17from lintro.enums.action import Action
18from lintro.models.core.tool_result import ToolResult
19from tests.unit.ai.conftest import MockAIProvider, MockIssue
21# ---------------------------------------------------------------------------
22# Multi-tool fix scenarios
23# ---------------------------------------------------------------------------
26@patch("lintro.ai.orchestrator.require_ai")
27@patch("lintro.ai.orchestrator.get_provider")
28@patch("lintro.ai.pipeline.generate_fixes_from_params")
29@patch("lintro.ai.pipeline.apply_fixes")
30@patch("lintro.ai.pipeline.review_fixes_interactive")
31@patch("lintro.ai.pipeline.sys.stdin.isatty", return_value=False)
32@patch(
33 "lintro.ai.orchestrator._resolve_issue_path",
34 side_effect=lambda *, file, workspace_root, cwd: Path(file),
35)
36def test_run_ai_enhancement_fix_action_noninteractive_applies_safe_then_reviews_risky(
37 _mock_normalize,
38 _mock_isatty,
39 mock_review_fixes_interactive,
40 mock_apply_fixes,
41 mock_generate_fixes,
42 mock_get_provider,
43 _mock_require_ai,
44):
45 """Non-interactive mode auto-applies safe fixes, reviews risky."""
46 result = ToolResult(
47 name="ruff",
48 success=False,
49 issues_count=2,
50 issues=[
51 MockIssue(
52 file="src/main.py",
53 line=1,
54 message="Line too long",
55 code="E501",
56 ),
57 MockIssue(
58 file="src/main.py",
59 line=2,
60 message="Use of assert",
61 code="B101",
62 ),
63 ],
64 )
65 config = LintroConfig(
66 ai=AIConfig(
67 enabled=True,
68 auto_apply=False,
69 auto_apply_safe_fixes=True,
70 ),
71 )
72 logger = MagicMock()
74 safe_suggestion = AIFixSuggestion(
75 file="src/main.py",
76 line=1,
77 code="E501",
78 explanation="Break long line",
79 risk_level="safe-style",
80 confidence="high",
81 )
82 risky_suggestion = AIFixSuggestion(
83 file="src/main.py",
84 line=2,
85 code="B101",
86 explanation="Replace assert",
87 risk_level="behavioral-risk",
88 confidence="medium",
89 )
91 mock_get_provider.return_value = MockAIProvider()
92 mock_generate_fixes.return_value = [safe_suggestion, risky_suggestion]
93 mock_apply_fixes.return_value = [safe_suggestion]
94 mock_review_fixes_interactive.return_value = (0, 0, [])
96 run_ai_enhancement(
97 action=Action.FIX,
98 all_results=[result],
99 lintro_config=config,
100 logger=logger,
101 output_format="terminal",
102 )
104 assert_that(mock_apply_fixes.call_count).is_equal_to(1)
105 safe_batch = mock_apply_fixes.call_args.args[0]
106 assert_that(safe_batch).is_length(1)
107 assert_that(safe_batch[0].code).is_equal_to("E501")
109 assert_that(mock_review_fixes_interactive.call_count).is_equal_to(1)
110 risky_batch = mock_review_fixes_interactive.call_args.args[0]
111 assert_that(risky_batch).is_length(1)
112 assert_that(risky_batch[0].code).is_equal_to("B101")
115@patch("lintro.ai.orchestrator.require_ai")
116@patch("lintro.ai.orchestrator.get_provider")
117@patch("lintro.ai.pipeline.generate_fixes_from_params")
118@patch("lintro.ai.pipeline.apply_fixes")
119@patch("lintro.ai.pipeline.verify_fixes")
120@patch(
121 "lintro.ai.orchestrator._resolve_issue_path",
122 side_effect=lambda *, file, workspace_root, cwd: Path(file),
123)
124def test_run_ai_enhancement_fix_action_json_auto_applies_safe_style_suggestions(
125 _mock_normalize,
126 mock_verify_fixes,
127 mock_apply_fixes,
128 mock_generate_fixes,
129 mock_get_provider,
130 _mock_require_ai,
131):
132 """JSON mode auto-applies only safe-style suggestions and reruns."""
133 result = ToolResult(
134 name="ruff",
135 success=False,
136 issues_count=2,
137 issues=[
138 MockIssue(
139 file="src/main.py",
140 line=1,
141 message="Line too long",
142 code="E501",
143 ),
144 MockIssue(
145 file="src/main.py",
146 line=2,
147 message="Use of assert",
148 code="B101",
149 ),
150 ],
151 )
152 config = LintroConfig(
153 ai=AIConfig(
154 enabled=True,
155 max_fix_attempts=5,
156 auto_apply=False,
157 auto_apply_safe_fixes=True,
158 ),
159 )
160 logger = MagicMock()
162 safe_suggestion = AIFixSuggestion(
163 file="src/main.py",
164 line=1,
165 code="E501",
166 explanation="Break long line",
167 risk_level="safe-style",
168 confidence="high",
169 )
170 risky_suggestion = AIFixSuggestion(
171 file="src/main.py",
172 line=2,
173 code="B101",
174 explanation="Replace assert",
175 risk_level="behavioral-risk",
176 confidence="medium",
177 )
179 mock_get_provider.return_value = MockAIProvider()
180 mock_generate_fixes.return_value = [safe_suggestion, risky_suggestion]
181 mock_apply_fixes.return_value = [safe_suggestion]
182 mock_verify_fixes.return_value = ValidationResult()
184 run_ai_enhancement(
185 action=Action.FIX,
186 all_results=[result],
187 lintro_config=config,
188 logger=logger,
189 output_format="json",
190 )
192 assert_that(mock_apply_fixes.call_count).is_equal_to(1)
193 applied_batch = mock_apply_fixes.call_args.args[0]
194 assert_that(applied_batch).is_length(1)
195 assert_that(applied_batch[0].code).is_equal_to("E501")
196 assert_that(mock_verify_fixes.call_count).is_equal_to(1)
197 assert_that(result.ai_metadata).is_not_none()
198 assert_that(result.ai_metadata).contains_key("fixed_count")
199 assert_that(result.ai_metadata["fixed_count"]).is_equal_to(1) # type: ignore[index] # assertpy is_not_none narrows this
202@patch("lintro.ai.orchestrator.require_ai")
203@patch("lintro.ai.orchestrator.get_provider")
204@patch("lintro.ai.pipeline.generate_fixes_from_params")
205@patch("lintro.ai.pipeline.apply_fixes")
206@patch("lintro.ai.pipeline.verify_fixes")
207@patch(
208 "lintro.ai.orchestrator._resolve_issue_path",
209 side_effect=lambda *, file, workspace_root, cwd: Path(file),
210)
211def test_run_ai_enhancement_fix_action_json_uses_fresh_rerun_results(
212 _mock_normalize,
213 mock_verify_fixes,
214 mock_apply_fixes,
215 mock_generate_fixes,
216 mock_get_provider,
217 _mock_require_ai,
218):
219 """Verify JSON fix action updates result counts via verify_fixes."""
220 result = ToolResult(
221 name="ruff",
222 success=False,
223 issues_count=1,
224 issues=[
225 MockIssue(
226 file="src/main.py",
227 line=1,
228 message="Use of assert",
229 code="B101",
230 ),
231 ],
232 remaining_issues_count=1,
233 )
234 suggestion = AIFixSuggestion(
235 file="src/main.py",
236 line=1,
237 code="B101",
238 explanation="Replace assert",
239 tool_name="ruff",
240 )
241 config = LintroConfig(
242 ai=AIConfig(
243 enabled=True,
244 auto_apply=True,
245 ),
246 )
247 logger = MagicMock()
249 mock_get_provider.return_value = MockAIProvider()
250 mock_generate_fixes.return_value = [suggestion]
251 mock_apply_fixes.return_value = [suggestion]
252 mock_verify_fixes.return_value = ValidationResult(
253 verified=1,
254 unverified=0,
255 verified_by_tool={"ruff": 1},
256 unverified_by_tool={"ruff": 0},
257 )
259 run_ai_enhancement(
260 action=Action.FIX,
261 all_results=[result],
262 lintro_config=config,
263 logger=logger,
264 output_format="json",
265 )
267 assert_that(mock_verify_fixes.call_count).is_equal_to(1)
270# ---------------------------------------------------------------------------
271# TestRerunContext
272# ---------------------------------------------------------------------------
275def test_rerun_context_paths_for_context_relativizes_to_tool_cwd(tmp_path):
276 """Paths inside tool cwd become relative; outside stay absolute."""
277 tool_cwd = tmp_path / "tool"
278 tool_cwd.mkdir(parents=True)
279 inside = tool_cwd / "src" / "main.py"
280 inside.parent.mkdir(parents=True)
281 inside.write_text("x = 1\n", encoding="utf-8")
282 outside = tmp_path / "outside.py"
283 outside.write_text("x = 1\n", encoding="utf-8")
285 rerun_paths = paths_for_context(
286 file_paths=[str(inside), str(outside)],
287 cwd=str(tool_cwd),
288 )
290 assert_that(rerun_paths[0]).is_equal_to("src/main.py")
291 assert_that(rerun_paths[1]).is_equal_to(str(outside.resolve()))
294@patch("lintro.tools.tool_manager.get_tool")
295def test_rerun_context_rerun_uses_original_tool_cwd(mock_get_tool, tmp_path):
296 """Verify rerun_tools changes cwd to the original tool working directory."""
297 tool_cwd = tmp_path / "tool"
298 tool_cwd.mkdir(parents=True)
299 source = tool_cwd / "src" / "main.py"
300 source.parent.mkdir(parents=True)
301 source.write_text("x = 1\n", encoding="utf-8")
303 issue = MockIssue(
304 file=str(source),
305 line=1,
306 code="E501",
307 message="Line too long",
308 )
309 original_result = ToolResult(
310 name="ruff",
311 success=False,
312 issues_count=1,
313 issues=[issue],
314 remaining_issues_count=1,
315 cwd=str(tool_cwd),
316 )
317 by_tool = {"ruff": (original_result, [issue])}
319 captured: dict[str, object] = {}
321 class _FakeTool:
322 def check(self, paths: Any, options: Any) -> ToolResult:
323 import os
325 captured["cwd"] = os.getcwd()
326 captured["paths"] = paths
327 return ToolResult(
328 name="ruff",
329 success=True,
330 issues_count=0,
331 issues=[],
332 )
334 mock_get_tool.return_value = _FakeTool()
335 rerun_results = rerun_tools(by_tool) # type: ignore[arg-type] # test uses simplified mock data
337 assert_that(rerun_results).is_length(1)
338 assert_that(captured.get("cwd")).is_equal_to(str(tool_cwd))
339 assert_that(captured.get("paths")).is_equal_to(["src/main.py"])
342def test_rerun_context_rerun_cwd_lock_exists():
343 """Verify the module-level threading lock is a Lock instance."""
344 # threading.Lock() returns a _thread.lock instance; verify it has
345 # the acquire/release protocol rather than comparing type identity.
346 assert_that(hasattr(_rerun_cwd_lock, "acquire")).is_true()
347 assert_that(hasattr(_rerun_cwd_lock, "release")).is_true()
350@patch("lintro.tools.tool_manager.get_tool")
351def test_rerun_context_rerun_continues_on_tool_failure(mock_get_tool, tmp_path):
352 """When one tool fails, other tools still get rerun."""
353 issue_a = MockIssue(
354 file=str(tmp_path / "a.py"),
355 line=1,
356 code="E501",
357 message="err",
358 )
359 (tmp_path / "a.py").write_text("x = 1\n", encoding="utf-8")
361 issue_b = MockIssue(
362 file=str(tmp_path / "b.py"),
363 line=1,
364 code="E501",
365 message="err",
366 )
367 (tmp_path / "b.py").write_text("y = 2\n", encoding="utf-8")
369 result_a = ToolResult(name="failing-tool", success=False, issues=[issue_a])
370 result_b = ToolResult(name="passing-tool", success=False, issues=[issue_b])
372 call_count = {"failing": 0, "passing": 0}
374 class _FailingTool:
375 def check(self, paths: Any, options: Any) -> ToolResult:
376 call_count["failing"] += 1
377 raise RuntimeError("boom")
379 class _PassingTool:
380 def check(self, paths: Any, options: Any) -> ToolResult:
381 call_count["passing"] += 1
382 return ToolResult(
383 name="passing-tool",
384 success=True,
385 issues_count=0,
386 issues=[],
387 )
389 def _side_effect(name):
390 if name == "failing-tool":
391 return _FailingTool()
392 return _PassingTool()
394 mock_get_tool.side_effect = _side_effect
396 by_tool = {
397 "failing-tool": (result_a, [issue_a]),
398 "passing-tool": (result_b, [issue_b]),
399 }
400 rerun_results = rerun_tools(by_tool) # type: ignore[arg-type] # test uses simplified mock data
402 assert_that(call_count["failing"]).is_equal_to(1)
403 assert_that(call_count["passing"]).is_equal_to(1)
404 assert_that(rerun_results).is_not_none()
405 assert_that(rerun_results).is_length(1)
406 assert_that(rerun_results[0].name).is_equal_to("passing-tool") # type: ignore[index] # assertpy is_not_none narrows this