Coverage for tests / unit / ai / test_summary_generation.py: 100%

101 statements  

« prev     ^ index     » next       coverage.py v7.13.0, created at 2026-04-03 18:53 +0000

1"""Tests for AI summary generation. 

2 

3Covers _build_issues_digest, _parse_summary_response, 

4and generate_summary. 

5""" 

6 

7from __future__ import annotations 

8 

9import json 

10 

11from assertpy import assert_that 

12 

13from lintro.ai.providers.base import AIResponse 

14from lintro.ai.summary import ( 

15 _build_issues_digest, 

16 _parse_summary_response, 

17 generate_summary, 

18) 

19from lintro.models.core.tool_result import ToolResult 

20from tests.unit.ai.conftest import MockAIProvider, MockIssue 

21 

22# -- _build_issues_digest ----------------------------------------------------- 

23 

24 

25def test_build_issues_digest_builds_digest_from_results(): 

26 """Verify digest includes tool names, issue codes, and occurrence counts.""" 

27 issues = [ 

28 MockIssue(file="src/a.py", line=10, message="Use of assert", code="B101"), 

29 MockIssue(file="src/b.py", line=20, message="Use of assert", code="B101"), 

30 MockIssue(file="src/a.py", line=42, message="Line too long", code="E501"), 

31 ] 

32 result = ToolResult( 

33 name="ruff", 

34 success=True, 

35 issues_count=3, 

36 issues=issues, 

37 ) 

38 digest = _build_issues_digest([result]) 

39 assert_that(digest).contains("ruff") 

40 assert_that(digest).contains("[B101]") 

41 assert_that(digest).contains("[E501]") 

42 assert_that(digest).contains("x2") # B101 count 

43 

44 

45def test_build_issues_digest_empty_results(): 

46 """Verify empty results list produces an empty digest string.""" 

47 digest = _build_issues_digest([]) 

48 assert_that(digest).is_empty() 

49 

50 

51def test_build_issues_digest_skipped_results_excluded(): 

52 """Verify skipped tool results are excluded from the digest.""" 

53 result = ToolResult( 

54 name="ruff", 

55 success=True, 

56 issues_count=0, 

57 skipped=True, 

58 skip_reason="not installed", 

59 ) 

60 digest = _build_issues_digest([result]) 

61 assert_that(digest).is_empty() 

62 

63 

64def test_build_issues_digest_sample_locations_capped(): 

65 """Verify sample locations are capped at 3 with a count of remaining shown.""" 

66 issues = [ 

67 MockIssue(file=f"src/f{i}.py", line=i, message="test", code="E501") 

68 for i in range(10) 

69 ] 

70 result = ToolResult( 

71 name="ruff", 

72 success=True, 

73 issues_count=10, 

74 issues=issues, 

75 ) 

76 digest = _build_issues_digest([result]) 

77 # Should show 3 samples + "(+7 more)" 

78 assert_that(digest).contains("+7 more") 

79 

80 

81def test_build_issues_digest_redacts_absolute_paths_for_provider(tmp_path): 

82 """Verify absolute file paths are converted to workspace-relative in the digest.""" 

83 absolute_file = tmp_path / "src" / "hidden.py" 

84 absolute_file.parent.mkdir(parents=True) 

85 absolute_file.write_text("x = 1\n", encoding="utf-8") 

86 

87 result = ToolResult( 

88 name="ruff", 

89 success=True, 

90 issues_count=1, 

91 issues=[ 

92 MockIssue( 

93 file=str(absolute_file), 

94 line=1, 

95 message="Line too long", 

96 code="E501", 

97 ), 

98 ], 

99 ) 

100 

101 digest = _build_issues_digest([result], workspace_root=tmp_path) 

102 assert_that(digest).contains("src/hidden.py:1") 

103 assert_that(digest).does_not_contain(str(absolute_file)) 

104 

105 

106# -- _parse_summary_response -------------------------------------------------- 

107 

108 

109def test_parse_summary_response_valid_json(): 

110 """Verify valid JSON response is parsed into an AISummary with all fields.""" 

111 content = json.dumps( 

112 { 

113 "overview": "Code needs work", 

114 "key_patterns": ["Pattern 1", "Pattern 2"], 

115 "priority_actions": ["Action 1"], 

116 "estimated_effort": "30 minutes", 

117 }, 

118 ) 

119 result = _parse_summary_response( 

120 content, 

121 input_tokens=100, 

122 output_tokens=50, 

123 cost_estimate=0.01, 

124 ) 

125 assert_that(result.overview).is_equal_to("Code needs work") 

126 assert_that(result.key_patterns).is_length(2) 

127 assert_that(result.priority_actions).is_length(1) 

128 assert_that(result.estimated_effort).is_equal_to("30 minutes") 

129 assert_that(result.input_tokens).is_equal_to(100) 

130 assert_that(result.cost_estimate).is_equal_to(0.01) 

131 

132 

133def test_parse_summary_response_parses_triage_suggestions(): 

134 """Verify triage_suggestions field is parsed from the JSON response.""" 

135 content = json.dumps( 

136 { 

137 "overview": "Some issues", 

138 "key_patterns": [], 

139 "priority_actions": [], 

140 "triage_suggestions": [ 

141 "B101 in tests — assert is idiomatic, add # noqa: B101", 

142 ], 

143 "estimated_effort": "5 minutes", 

144 }, 

145 ) 

146 result = _parse_summary_response(content) 

147 assert_that(result.triage_suggestions).is_length(1) 

148 assert_that(result.triage_suggestions[0]).contains("B101") 

149 

150 

151def test_parse_summary_response_missing_triage_defaults_to_empty(): 

152 """Verify missing triage_suggestions key defaults to an empty list.""" 

153 content = json.dumps({"overview": "Clean", "key_patterns": []}) 

154 result = _parse_summary_response(content) 

155 assert_that(result.triage_suggestions).is_empty() 

156 

157 

158def test_parse_summary_response_invalid_json_fallback(): 

159 """Verify invalid JSON falls back to using raw content as the overview.""" 

160 result = _parse_summary_response("not json at all") 

161 assert_that(result.overview).contains("not json") 

162 assert_that(result.key_patterns).is_empty() 

163 

164 

165def test_parse_summary_response_empty_content(): 

166 """Verify empty content returns a summary with 'Summary unavailable' overview.""" 

167 result = _parse_summary_response("") 

168 assert_that(result.overview).is_equal_to("Summary unavailable") 

169 

170 

171def test_parse_summary_response_non_dict_json_list(): 

172 """When json.loads returns a list, the isinstance(data, dict) check triggers.""" 

173 content = json.dumps(["item1", "item2"]) 

174 result = _parse_summary_response( 

175 content, 

176 input_tokens=50, 

177 output_tokens=25, 

178 cost_estimate=0.003, 

179 ) 

180 assert_that(result.overview).contains("item1") 

181 assert_that(result.key_patterns).is_empty() 

182 assert_that(result.input_tokens).is_equal_to(50) 

183 assert_that(result.cost_estimate).is_equal_to(0.003) 

184 

185 

186def test_parse_summary_response_non_dict_json_string(): 

187 """Non-dict JSON triggers the isinstance(data, dict) fallback.""" 

188 content = json.dumps("just a string") 

189 result = _parse_summary_response( 

190 content, 

191 input_tokens=10, 

192 output_tokens=5, 

193 cost_estimate=0.001, 

194 ) 

195 assert_that(result.overview).contains("just a string") 

196 assert_that(result.key_patterns).is_empty() 

197 assert_that(result.input_tokens).is_equal_to(10) 

198 

199 

200def test_parse_summary_response_non_dict_json_int(): 

201 """When json.loads returns an integer, the isinstance(data, dict) check triggers.""" 

202 content = json.dumps(42) 

203 result = _parse_summary_response(content) 

204 assert_that(result.overview).contains("42") 

205 assert_that(result.key_patterns).is_empty() 

206 

207 

208# -- generate_summary --------------------------------------------------------- 

209 

210 

211def test_generate_summary_returns_none_for_no_issues(): 

212 """Returns None and skips provider when no issues exist.""" 

213 provider = MockAIProvider() 

214 result = ToolResult(name="ruff", success=True, issues_count=0) 

215 summary = generate_summary([result], provider) 

216 assert_that(summary).is_none() 

217 assert_that(provider.calls).is_empty() 

218 

219 

220def test_generate_summary_generates_summary(): 

221 """Verify generate_summary calls the provider and returns a parsed AISummary.""" 

222 issues = [ 

223 MockIssue(file="a.py", line=1, message="bad code", code="E501"), 

224 ] 

225 result = ToolResult( 

226 name="ruff", 

227 success=True, 

228 issues_count=1, 

229 issues=issues, 

230 ) 

231 response = AIResponse( 

232 content=json.dumps( 

233 { 

234 "overview": "One issue found", 

235 "key_patterns": [], 

236 "priority_actions": [], 

237 "estimated_effort": "5 minutes", 

238 }, 

239 ), 

240 model="mock", 

241 input_tokens=200, 

242 output_tokens=100, 

243 cost_estimate=0.005, 

244 provider="mock", 

245 ) 

246 provider = MockAIProvider(responses=[response]) 

247 

248 summary = generate_summary([result], provider) 

249 assert_that(summary).is_not_none() 

250 assert_that(summary.overview).is_equal_to("One issue found") # type: ignore[union-attr] # assertpy is_not_none narrows this 

251 assert_that(provider.calls).is_length(1) 

252 

253 

254def test_generate_summary_handles_provider_error(): 

255 """Verify generate_summary returns None when the provider raises an error.""" 

256 issues = [ 

257 MockIssue(file="a.py", line=1, message="bad", code="E501"), 

258 ] 

259 result = ToolResult( 

260 name="ruff", 

261 success=True, 

262 issues_count=1, 

263 issues=issues, 

264 ) 

265 

266 class ErrorProvider(MockAIProvider): 

267 def complete(self, prompt, **kwargs): 

268 raise RuntimeError("API down") 

269 

270 summary = generate_summary([result], ErrorProvider()) 

271 assert_that(summary).is_none()