Coverage for tests / unit / ai / test_orchestrator_edge.py: 100%
84 statements
« prev ^ index » next coverage.py v7.13.0, created at 2026-04-03 18:53 +0000
« prev ^ index » next coverage.py v7.13.0, created at 2026-04-03 18:53 +0000
1"""Tests for AI orchestration edge cases, error handling, and fail_on_unfixed."""
3from __future__ import annotations
5from pathlib import Path
6from unittest.mock import MagicMock, patch
8import pytest
9from assertpy import assert_that
11from lintro.ai.config import AIConfig
12from lintro.ai.models import AIFixSuggestion, AIResult, AISummary
13from lintro.ai.orchestrator import run_ai_enhancement
14from lintro.ai.validation import ValidationResult
15from lintro.config.lintro_config import LintroConfig
16from lintro.enums.action import Action
17from lintro.models.core.tool_result import ToolResult
18from tests.unit.ai.conftest import MockAIProvider, MockIssue
20# ---------------------------------------------------------------------------
21# TestAIResultExitCode
22# ---------------------------------------------------------------------------
25@patch("lintro.ai.orchestrator.require_ai")
26@patch("lintro.ai.orchestrator.get_provider")
27@patch("lintro.ai.orchestrator.generate_summary")
28def test_ai_result_default_no_error(
29 mock_generate_summary,
30 mock_get_provider,
31 _mock_require_ai,
32):
33 """Default behavior: AI returns AIResult with no error flag."""
34 result = ToolResult(
35 name="ruff",
36 success=False,
37 issues_count=1,
38 issues=[
39 MockIssue(
40 file="src/main.py",
41 line=1,
42 message="Use of assert",
43 code="B101",
44 ),
45 ],
46 )
47 config = LintroConfig(ai=AIConfig(enabled=True))
48 logger = MagicMock()
50 mock_get_provider.return_value = MockAIProvider()
51 mock_generate_summary.return_value = AISummary(overview="AI overview")
53 ai_result = run_ai_enhancement(
54 action=Action.CHECK,
55 all_results=[result],
56 lintro_config=config,
57 logger=logger,
58 output_format="json",
59 )
61 assert_that(ai_result).is_instance_of(AIResult)
62 assert_that(ai_result.error).is_false()
63 assert_that(ai_result.fixes_applied).is_equal_to(0)
64 assert_that(ai_result.fixes_failed).is_equal_to(0)
65 assert_that(ai_result.unfixed_issues).is_equal_to(0)
66 assert_that(ai_result.budget_exceeded).is_false()
69@patch("lintro.ai.orchestrator.require_ai")
70@patch("lintro.ai.orchestrator.get_provider")
71@patch("lintro.ai.orchestrator.generate_summary")
72@patch("lintro.ai.pipeline.generate_fixes_from_params")
73@patch(
74 "lintro.ai.orchestrator._resolve_issue_path",
75 side_effect=lambda *, file, workspace_root, cwd: Path(file),
76)
77def test_ai_result_unfixed_issues_when_fixes_fail(
78 _mock_normalize,
79 mock_generate_fixes,
80 mock_generate_summary,
81 mock_get_provider,
82 _mock_require_ai,
83):
84 """AIResult reports unfixed issues when fix generation returns nothing."""
85 result = ToolResult(
86 name="ruff",
87 success=False,
88 issues_count=1,
89 issues=[
90 MockIssue(
91 file="src/main.py",
92 line=1,
93 message="Use of assert",
94 code="B101",
95 ),
96 ],
97 )
98 config = LintroConfig(
99 ai=AIConfig(enabled=True, max_fix_attempts=5, fail_on_unfixed=True),
100 )
101 logger = MagicMock()
103 mock_get_provider.return_value = MockAIProvider()
104 mock_generate_summary.return_value = None
105 mock_generate_fixes.return_value = []
107 ai_result = run_ai_enhancement(
108 action=Action.CHECK,
109 all_results=[result],
110 lintro_config=config,
111 logger=logger,
112 output_format="json",
113 ai_fix=True,
114 )
116 assert_that(ai_result).is_instance_of(AIResult)
117 assert_that(ai_result.unfixed_issues).is_equal_to(1)
118 assert_that(ai_result.fixes_applied).is_equal_to(0)
121def test_ai_result_error_on_exception():
122 """AIResult.error is True when AI enhancement raises an exception."""
123 config = LintroConfig(ai=AIConfig(enabled=True))
124 logger = MagicMock()
126 with patch(
127 "lintro.ai.orchestrator.require_ai",
128 side_effect=RuntimeError("boom"),
129 ):
130 ai_result = run_ai_enhancement(
131 action=Action.CHECK,
132 all_results=[],
133 lintro_config=config,
134 logger=logger,
135 output_format="json",
136 )
138 assert_that(ai_result).is_instance_of(AIResult)
139 assert_that(ai_result.error).is_true()
142def test_ai_result_error_propagates_when_fail_on_ai_error():
143 """Exceptions propagate when fail_on_ai_error=True."""
144 config = LintroConfig(ai=AIConfig(enabled=True, fail_on_ai_error=True))
145 logger = MagicMock()
147 with (
148 patch(
149 "lintro.ai.orchestrator.require_ai",
150 side_effect=RuntimeError("boom"),
151 ),
152 pytest.raises(RuntimeError, match="boom"),
153 ):
154 run_ai_enhancement(
155 action=Action.CHECK,
156 all_results=[],
157 lintro_config=config,
158 logger=logger,
159 output_format="json",
160 )
163@patch("lintro.ai.orchestrator.require_ai")
164@patch("lintro.ai.orchestrator.get_provider")
165@patch("lintro.ai.pipeline.generate_fixes_from_params")
166@patch("lintro.ai.pipeline.apply_fixes")
167@patch("lintro.ai.pipeline.verify_fixes")
168@patch(
169 "lintro.ai.orchestrator._resolve_issue_path",
170 side_effect=lambda *, file, workspace_root, cwd: Path(file),
171)
172def test_ai_result_tracks_applied_fixes(
173 _mock_normalize,
174 mock_verify_fixes,
175 mock_apply_fixes,
176 mock_generate_fixes,
177 mock_get_provider,
178 _mock_require_ai,
179):
180 """AIResult correctly reports fixes_applied and fixes_failed."""
181 result = ToolResult(
182 name="ruff",
183 success=False,
184 issues_count=2,
185 issues=[
186 MockIssue(
187 file="src/main.py",
188 line=1,
189 message="Use of assert",
190 code="B101",
191 ),
192 MockIssue(
193 file="src/main.py",
194 line=2,
195 message="Line too long",
196 code="E501",
197 ),
198 ],
199 remaining_issues_count=2,
200 )
201 suggestion1 = AIFixSuggestion(
202 file="src/main.py",
203 line=1,
204 code="B101",
205 explanation="Replace assert",
206 tool_name="ruff",
207 )
208 suggestion2 = AIFixSuggestion(
209 file="src/main.py",
210 line=2,
211 code="E501",
212 explanation="Break line",
213 tool_name="ruff",
214 )
215 config = LintroConfig(
216 ai=AIConfig(enabled=True, auto_apply=True),
217 )
218 logger = MagicMock()
220 mock_get_provider.return_value = MockAIProvider()
221 mock_generate_fixes.return_value = [suggestion1, suggestion2]
222 # Only one fix applies successfully
223 mock_apply_fixes.return_value = [suggestion1]
224 mock_verify_fixes.return_value = ValidationResult(
225 verified=1,
226 unverified=0,
227 verified_by_tool={"ruff": 1},
228 unverified_by_tool={"ruff": 0},
229 )
231 ai_result = run_ai_enhancement(
232 action=Action.FIX,
233 all_results=[result],
234 lintro_config=config,
235 logger=logger,
236 output_format="json",
237 )
239 assert_that(ai_result).is_instance_of(AIResult)
240 assert_that(ai_result.fixes_applied).is_equal_to(1)
241 assert_that(ai_result.fixes_failed).is_equal_to(1)
242 assert_that(ai_result.unfixed_issues).is_equal_to(1)
245# ---------------------------------------------------------------------------
246# TestFailOnUnfixed
247# ---------------------------------------------------------------------------
250def test_fail_on_unfixed_config_default_is_false():
251 """Verify fail_on_unfixed defaults to False."""
252 config = AIConfig()
253 assert_that(config.fail_on_unfixed).is_false()
256def test_fail_on_unfixed_config_can_be_set():
257 """Verify fail_on_unfixed can be set to True."""
258 config = AIConfig(fail_on_unfixed=True)
259 assert_that(config.fail_on_unfixed).is_true()