Coverage for tests / unit / parsers / test_bandit_parser.py: 100%
88 statements
« prev ^ index » next coverage.py v7.13.0, created at 2026-04-03 18:53 +0000
« prev ^ index » next coverage.py v7.13.0, created at 2026-04-03 18:53 +0000
1"""Unit tests for Bandit output parsing and tool JSON extraction."""
3from __future__ import annotations
5import json
6from pathlib import Path
7from types import SimpleNamespace
8from typing import Any
10import pytest
11from assertpy import assert_that
13from lintro.models.core.tool_result import ToolResult
14from lintro.parsers.bandit.bandit_parser import parse_bandit_output
15from lintro.plugins import ToolRegistry
18def test_parse_bandit_valid_output() -> None:
19 """Parse a representative Bandit JSON result and validate fields."""
20 sample_output = {
21 "results": [
22 {
23 "filename": "test.py",
24 "line_number": 10,
25 "col_offset": 4,
26 "test_id": "B602",
27 "test_name": ("subprocess_popen_with_shell_equals_true"),
28 "issue_severity": "HIGH",
29 "issue_confidence": "HIGH",
30 "issue_text": (
31 "subprocess call with shell=True identified, security issue."
32 ),
33 "more_info": (
34 "https://bandit.readthedocs.io/en/1.8.6/plugins/"
35 "b602_subprocess_popen_with_shell_equals_true.html"
36 ),
37 "issue_cwe": {
38 "id": 78,
39 "link": "https://cwe.mitre.org/data/definitions/78.html",
40 },
41 "code": "subprocess.call(user_input, shell=True)",
42 "line_range": [10],
43 },
44 ],
45 }
46 issues = parse_bandit_output(sample_output)
47 assert_that(len(issues)).is_equal_to(1)
48 issue = issues[0]
49 assert_that(issue.file).is_equal_to("test.py")
50 assert_that(issue.line).is_equal_to(10)
51 assert_that(issue.col_offset).is_equal_to(4)
52 assert_that(issue.test_id).is_equal_to("B602")
53 assert_that(issue.issue_severity).is_equal_to("HIGH")
54 assert_that(issue.issue_confidence).is_equal_to("HIGH")
55 assert_that(issue.issue_text).contains("shell=True")
58def test_parse_bandit_multiple_issues_and_errors_array() -> None:
59 """Parser should handle multiple results and ignore errors array."""
60 sample_output = {
61 "errors": [{"filename": "z.py", "reason": "bad config"}],
62 "results": [
63 {
64 "filename": "a.py",
65 "line_number": 1,
66 "col_offset": 0,
67 "test_id": "B101",
68 "test_name": "assert_used",
69 "issue_severity": "LOW",
70 "issue_confidence": "HIGH",
71 "issue_text": "Use of assert.",
72 "more_info": "https://example.com",
73 "line_range": [1],
74 },
75 {
76 "filename": "b.py",
77 "line_number": 2,
78 "col_offset": 1,
79 "test_id": "B102",
80 "test_name": "exec_used",
81 "issue_severity": "MEDIUM",
82 "issue_confidence": "LOW",
83 "issue_text": "Use of exec.",
84 "more_info": "https://example.com",
85 "line_range": [2],
86 },
87 ],
88 }
89 issues = parse_bandit_output(sample_output)
90 assert_that(len(issues)).is_equal_to(2)
91 assert_that(issues[0].file).is_equal_to("a.py")
92 assert_that(issues[1].file).is_equal_to("b.py")
95def test_parse_bandit_empty_results() -> None:
96 """Ensure an empty results list returns no issues."""
97 issues = parse_bandit_output({"results": []})
98 assert_that(issues).is_equal_to([])
101def test_parse_bandit_missing_results_key() -> None:
102 """Missing results should behave as empty list (no crash)."""
103 issues = parse_bandit_output({})
104 assert_that(issues).is_equal_to([])
107def test_parse_bandit_handles_malformed_issue_gracefully(
108 caplog: pytest.LogCaptureFixture,
109) -> None:
110 """Malformed issue entries should be skipped with a warning.
112 Args:
113 caplog: Pytest logging capture fixture.
114 """
115 malformed = {"results": [None, 42, {"filename": "x.py", "line_number": "NaN"}]}
116 issues = parse_bandit_output(malformed)
117 assert_that(issues).is_equal_to([])
120def test_bandit_check_parses_mixed_output_json(
121 monkeypatch: pytest.MonkeyPatch,
122 tmp_path: Path,
123) -> None:
124 """BanditTool.check should parse JSON amidst mixed stdout/stderr text.
126 Args:
127 monkeypatch: Pytest monkeypatch fixture.
128 tmp_path: Temporary directory path fixture.
129 """
130 p = tmp_path / "a.py"
131 p.write_text("print('hello')\n")
132 sample = {
133 "errors": [],
134 "results": [
135 {
136 "filename": str(p),
137 "line_number": 1,
138 "col_offset": 0,
139 "issue_severity": "LOW",
140 "issue_confidence": "HIGH",
141 "test_id": "B101",
142 "test_name": "assert_used",
143 "issue_text": "Use of assert detected.",
144 "more_info": "https://example.com",
145 "line_range": [1],
146 },
147 ],
148 }
149 mixed_stdout = "Working... 100%\n" + json.dumps(sample) + "\n"
150 mixed_stderr = "[main] INFO done\n"
152 def fake_run(
153 cmd: list[str],
154 capture_output: bool,
155 text: bool,
156 timeout: int,
157 **kwargs: Any,
158 ) -> SimpleNamespace:
159 return SimpleNamespace(stdout=mixed_stdout, stderr=mixed_stderr, returncode=0)
161 monkeypatch.setattr("subprocess.run", fake_run)
162 tool = ToolRegistry.get("bandit")
163 assert_that(tool).is_not_none()
164 result: ToolResult = tool.check([str(p)], {})
165 assert_that(isinstance(result, ToolResult)).is_true()
166 assert_that(result.name).is_equal_to("bandit")
167 assert_that(result.success is True).is_true()
168 assert_that(result.issues_count).is_equal_to(1)
171def test_bandit_check_handles_nonzero_rc_with_errors_array(
172 monkeypatch: pytest.MonkeyPatch,
173 tmp_path: Path,
174) -> None:
175 """Ensure nonzero return with JSON errors[] sets success False but parses.
177 Args:
178 monkeypatch: Pytest monkeypatch fixture.
179 tmp_path: Temporary directory path fixture.
180 """
181 p = tmp_path / "c.py"
182 p.write_text("print('x')\n")
183 sample = {
184 "errors": [
185 {"filename": str(p), "reason": "config error"},
186 ],
187 "results": [
188 {
189 "filename": str(p),
190 "line_number": 1,
191 "col_offset": 0,
192 "issue_severity": "LOW",
193 "issue_confidence": "HIGH",
194 "test_id": "B101",
195 "test_name": "assert_used",
196 "issue_text": "Use of assert detected.",
197 "more_info": "https://example.com",
198 "line_range": [1],
199 },
200 ],
201 }
203 class NS:
204 def __init__(self, stdout: str, stderr: str, returncode: int) -> None:
205 self.stdout = stdout
206 self.stderr = stderr
207 self.returncode = returncode
209 def fake_run(
210 cmd: list[str],
211 capture_output: bool,
212 text: bool,
213 timeout: int,
214 **kwargs: Any,
215 ) -> NS:
216 # Handle version check calls
217 if "--version" in cmd:
218 return NS(stdout="bandit 1.9.2", stderr="", returncode=0)
219 # Handle actual check calls
220 return NS(stdout=json.dumps(sample), stderr="", returncode=1)
222 monkeypatch.setattr("subprocess.run", fake_run)
223 tool = ToolRegistry.get("bandit")
224 assert_that(tool).is_not_none()
225 result: ToolResult = tool.check([str(p)], {})
226 assert_that(result.success).is_false()
227 assert_that(result.issues_count).is_equal_to(1)
230def test_bandit_check_handles_unparseable_output(
231 monkeypatch: pytest.MonkeyPatch,
232 tmp_path: Path,
233) -> None:
234 """On unparseable output, BanditTool.check should fail gracefully.
236 Args:
237 monkeypatch: Pytest monkeypatch fixture.
238 tmp_path: Temporary directory path fixture.
239 """
240 p = tmp_path / "b.py"
241 p.write_text("x=1\n")
243 def fake_run(
244 cmd: list[str],
245 capture_output: bool,
246 text: bool,
247 timeout: int,
248 **kwargs: Any,
249 ) -> SimpleNamespace:
250 # Handle version check calls
251 if "--version" in cmd:
252 return SimpleNamespace(stdout="bandit 1.9.2", stderr="", returncode=0)
253 # Handle actual check calls
254 return SimpleNamespace(stdout="nonsense", stderr="also nonsense", returncode=1)
256 monkeypatch.setattr("subprocess.run", fake_run)
257 tool = ToolRegistry.get("bandit")
258 assert_that(tool).is_not_none()
259 result: ToolResult = tool.check([str(p)], {})
260 assert_that(isinstance(result, ToolResult)).is_true()
261 assert_that(result.name).is_equal_to("bandit")
262 assert_that(result.success is False).is_true()
263 assert_that(result.issues_count).is_equal_to(0)