Coverage for tests / unit / ai / test_config_wiring.py: 100%

108 statements  

« prev     ^ index     » next       coverage.py v7.13.0, created at 2026-04-03 18:53 +0000

1"""Tests verifying config knobs flow through to the functions that use them. 

2 

3Phase 3.4: After context_lines, fix_search_radius, retry delays, and 

4timeout were wired (Phase 2.1-2.5), these tests confirm the values 

5actually arrive at the downstream functions. 

6""" 

7 

8from __future__ import annotations 

9 

10from pathlib import Path 

11from unittest.mock import MagicMock, patch 

12 

13from assertpy import assert_that 

14 

15from lintro.ai.config import AIConfig 

16from lintro.ai.models import AIFixSuggestion 

17from lintro.ai.pipeline import run_fix_pipeline 

18from lintro.ai.validation import ValidationResult 

19from lintro.models.core.tool_result import ToolResult 

20from lintro.parsers.base_issue import BaseIssue 

21from tests.unit.ai.conftest import MockAIProvider, MockIssue 

22 

23_PIPELINE = "lintro.ai.pipeline" 

24 

25 

26def _make_suggestion( 

27 *, 

28 tool_name: str = "ruff", 

29 code: str = "E501", 

30) -> AIFixSuggestion: 

31 s = AIFixSuggestion(file="a.py", line=1, code=code) 

32 s.tool_name = tool_name 

33 return s 

34 

35 

36def _make_fix_issues() -> tuple[ 

37 list[tuple[ToolResult, BaseIssue]], 

38 ToolResult, 

39 MockIssue, 

40]: 

41 issue = MockIssue(file="a.py", line=1, code="E501", message="err") 

42 result = ToolResult( 

43 name="ruff", 

44 success=False, 

45 issues_count=1, 

46 issues=[issue], 

47 ) 

48 return [(result, issue)], result, issue 

49 

50 

51# -- context_lines wiring --------------------------------------------------- 

52 

53 

54@patch(f"{_PIPELINE}.render_validation") 

55@patch(f"{_PIPELINE}.render_summary") 

56@patch(f"{_PIPELINE}.verify_fixes") 

57@patch(f"{_PIPELINE}.generate_post_fix_summary") 

58@patch(f"{_PIPELINE}.review_fixes_interactive") 

59@patch(f"{_PIPELINE}.apply_fixes") 

60@patch(f"{_PIPELINE}.generate_fixes_from_params") 

61def test_context_lines_flows_to_generate_fixes( 

62 mock_generate_fixes, 

63 mock_apply_fixes, 

64 _mock_review, 

65 _mock_post_summary, 

66 _mock_verify, 

67 _mock_render_summary, 

68 _mock_render_validation, 

69): 

70 """ai_config.context_lines is passed through to generate_fixes().""" 

71 fix_issues, _result, _issue = _make_fix_issues() 

72 mock_generate_fixes.return_value = [] 

73 

74 ai_config = AIConfig(enabled=True, context_lines=42) 

75 

76 run_fix_pipeline( 

77 fix_issues=fix_issues, 

78 provider=MockAIProvider(), 

79 ai_config=ai_config, 

80 logger=MagicMock(), 

81 output_format="json", 

82 workspace_root=Path("/tmp"), 

83 ) 

84 

85 params = mock_generate_fixes.call_args.args[2] 

86 assert_that(params.context_lines).is_equal_to(42) 

87 

88 

89# -- fix_search_radius wiring ----------------------------------------------- 

90 

91 

92@patch(f"{_PIPELINE}.render_validation") 

93@patch(f"{_PIPELINE}.render_summary") 

94@patch(f"{_PIPELINE}.verify_fixes") 

95@patch(f"{_PIPELINE}.generate_post_fix_summary") 

96@patch(f"{_PIPELINE}.review_fixes_interactive") 

97@patch(f"{_PIPELINE}.apply_fixes") 

98@patch(f"{_PIPELINE}.generate_fixes_from_params") 

99def test_fix_search_radius_flows_to_apply_fixes( 

100 mock_generate_fixes, 

101 mock_apply_fixes, 

102 _mock_review, 

103 _mock_post_summary, 

104 _mock_verify, 

105 _mock_render_summary, 

106 _mock_render_validation, 

107): 

108 """ai_config.fix_search_radius is passed through to apply_fixes().""" 

109 fix_issues, _result, _issue = _make_fix_issues() 

110 suggestion = _make_suggestion() 

111 mock_generate_fixes.return_value = [suggestion] 

112 mock_apply_fixes.return_value = [suggestion] 

113 _mock_verify.return_value = ValidationResult() 

114 

115 ai_config = AIConfig( 

116 enabled=True, 

117 auto_apply=True, 

118 fix_search_radius=25, 

119 ) 

120 

121 run_fix_pipeline( 

122 fix_issues=fix_issues, 

123 provider=MockAIProvider(), 

124 ai_config=ai_config, 

125 logger=MagicMock(), 

126 output_format="json", 

127 workspace_root=Path("/tmp"), 

128 ) 

129 

130 kwargs = mock_apply_fixes.call_args.kwargs 

131 assert_that(kwargs["search_radius"]).is_equal_to(25) 

132 

133 

134# -- retry delay wiring ----------------------------------------------------- 

135 

136 

137@patch(f"{_PIPELINE}.render_validation") 

138@patch(f"{_PIPELINE}.render_summary") 

139@patch(f"{_PIPELINE}.verify_fixes") 

140@patch(f"{_PIPELINE}.generate_post_fix_summary") 

141@patch(f"{_PIPELINE}.review_fixes_interactive") 

142@patch(f"{_PIPELINE}.apply_fixes") 

143@patch(f"{_PIPELINE}.generate_fixes_from_params") 

144def test_retry_delays_flow_to_generate_fixes( 

145 mock_generate_fixes, 

146 mock_apply_fixes, 

147 _mock_review, 

148 _mock_post_summary, 

149 _mock_verify, 

150 _mock_render_summary, 

151 _mock_render_validation, 

152): 

153 """Retry delay config values are passed through to generate_fixes().""" 

154 fix_issues, _result, _issue = _make_fix_issues() 

155 mock_generate_fixes.return_value = [] 

156 

157 ai_config = AIConfig( 

158 enabled=True, 

159 retry_base_delay=0.5, 

160 retry_max_delay=10.0, 

161 retry_backoff_factor=3.0, 

162 ) 

163 

164 run_fix_pipeline( 

165 fix_issues=fix_issues, 

166 provider=MockAIProvider(), 

167 ai_config=ai_config, 

168 logger=MagicMock(), 

169 output_format="json", 

170 workspace_root=Path("/tmp"), 

171 ) 

172 

173 params = mock_generate_fixes.call_args.args[2] 

174 assert_that(params.base_delay).is_equal_to(0.5) 

175 assert_that(params.max_delay).is_equal_to(10.0) 

176 assert_that(params.backoff_factor).is_equal_to(3.0) 

177 

178 

179# -- timeout wiring to post-fix summary ------------------------------------ 

180 

181 

182@patch(f"{_PIPELINE}.render_validation") 

183@patch(f"{_PIPELINE}.render_summary") 

184@patch(f"{_PIPELINE}.verify_fixes") 

185@patch(f"{_PIPELINE}.generate_post_fix_summary") 

186@patch(f"{_PIPELINE}.review_fixes_interactive") 

187@patch(f"{_PIPELINE}.apply_fixes") 

188@patch(f"{_PIPELINE}.generate_fixes_from_params") 

189def test_timeout_and_retries_flow_to_post_fix_summary( 

190 mock_generate_fixes, 

191 mock_apply_fixes, 

192 _mock_review, 

193 mock_post_summary, 

194 mock_verify, 

195 _mock_render_summary, 

196 _mock_render_validation, 

197): 

198 """api_timeout and retry config flow through to generate_post_fix_summary().""" 

199 fix_issues, _result, _issue = _make_fix_issues() 

200 suggestion = _make_suggestion() 

201 mock_generate_fixes.return_value = [suggestion] 

202 mock_apply_fixes.return_value = [suggestion] 

203 mock_verify.return_value = ValidationResult( 

204 verified=1, 

205 unverified=0, 

206 verified_by_tool={"ruff": 1}, 

207 unverified_by_tool={"ruff": 0}, 

208 ) 

209 mock_post_summary.return_value = None 

210 

211 ai_config = AIConfig( 

212 enabled=True, 

213 auto_apply=True, 

214 api_timeout=120.0, 

215 max_retries=5, 

216 retry_base_delay=2.0, 

217 retry_max_delay=60.0, 

218 retry_backoff_factor=4.0, 

219 ) 

220 

221 run_fix_pipeline( 

222 fix_issues=fix_issues, 

223 provider=MockAIProvider(), 

224 ai_config=ai_config, 

225 logger=MagicMock(), 

226 output_format="terminal", 

227 workspace_root=Path("/tmp"), 

228 ) 

229 

230 kwargs = mock_post_summary.call_args.kwargs 

231 assert_that(kwargs["timeout"]).is_equal_to(120.0) 

232 assert_that(kwargs["max_retries"]).is_equal_to(5) 

233 assert_that(kwargs["base_delay"]).is_equal_to(2.0) 

234 assert_that(kwargs["max_delay"]).is_equal_to(60.0) 

235 assert_that(kwargs["backoff_factor"]).is_equal_to(4.0) 

236 

237 

238# -- timeout wiring to summary in orchestrator ----------------------------- 

239 

240 

241@patch("lintro.ai.orchestrator.run_fix_pipeline") 

242@patch("lintro.ai.orchestrator.get_provider") 

243@patch("lintro.ai.orchestrator.require_ai") 

244@patch("lintro.ai.orchestrator.generate_summary") 

245def test_timeout_and_retries_flow_to_generate_summary( 

246 mock_summary, 

247 _mock_require, 

248 mock_get_provider, 

249 _mock_pipeline, 

250): 

251 """api_timeout and retry config flow through to generate_summary().""" 

252 from lintro.ai.orchestrator import run_ai_enhancement 

253 from lintro.config.lintro_config import LintroConfig 

254 from lintro.enums.action import Action 

255 

256 mock_get_provider.return_value = MockAIProvider() 

257 mock_summary.return_value = None 

258 

259 config = LintroConfig( 

260 ai=AIConfig( 

261 enabled=True, 

262 api_timeout=90.0, 

263 max_retries=4, 

264 retry_base_delay=1.5, 

265 retry_max_delay=20.0, 

266 retry_backoff_factor=2.5, 

267 ), 

268 ) 

269 

270 result = ToolResult( 

271 name="ruff", 

272 success=False, 

273 issues_count=1, 

274 issues=[MockIssue(file="x.py", line=1, message="err", code="E501")], 

275 ) 

276 

277 run_ai_enhancement( 

278 action=Action.CHECK, 

279 all_results=[result], 

280 lintro_config=config, 

281 logger=MagicMock(), 

282 output_format="terminal", 

283 ) 

284 

285 kwargs = mock_summary.call_args.kwargs 

286 assert_that(kwargs["timeout"]).is_equal_to(90.0) 

287 assert_that(kwargs["max_retries"]).is_equal_to(4) 

288 assert_that(kwargs["base_delay"]).is_equal_to(1.5) 

289 assert_that(kwargs["max_delay"]).is_equal_to(20.0) 

290 assert_that(kwargs["backoff_factor"]).is_equal_to(2.5)