Coverage for lintro / ai / metadata / helpers.py: 97%

67 statements  

« prev     ^ index     » next       coverage.py v7.13.0, created at 2026-04-03 18:53 +0000

1"""Helper functions for attaching and normalizing AI metadata.""" 

2 

3from __future__ import annotations 

4 

5from typing import TYPE_CHECKING, Any 

6 

7from lintro.ai.metadata.fix_suggestion_payload import AIFixSuggestionPayload 

8from lintro.ai.metadata.summary_payload import AISummaryPayload 

9 

10if TYPE_CHECKING: 

11 from lintro.ai.models import AIFixSuggestion, AISummary 

12 from lintro.ai.telemetry import AITelemetry 

13 from lintro.models.core.tool_result import ToolResult 

14 

15 

16def summary_to_payload(summary: AISummary) -> AISummaryPayload: 

17 """Convert AISummary model to JSON-serializable metadata payload.""" 

18 return AISummaryPayload( 

19 overview=summary.overview, 

20 key_patterns=summary.key_patterns, 

21 priority_actions=summary.priority_actions, 

22 triage_suggestions=summary.triage_suggestions, 

23 estimated_effort=summary.estimated_effort, 

24 input_tokens=summary.input_tokens, 

25 output_tokens=summary.output_tokens, 

26 cost_estimate=summary.cost_estimate, 

27 ) 

28 

29 

30def suggestion_to_payload( 

31 suggestion: AIFixSuggestion, 

32) -> AIFixSuggestionPayload: 

33 """Convert AIFixSuggestion model to JSON-serializable payload.""" 

34 return AIFixSuggestionPayload( 

35 file=suggestion.file, 

36 line=suggestion.line, 

37 code=suggestion.code, 

38 tool_name=suggestion.tool_name, 

39 original_code=suggestion.original_code, 

40 suggested_code=suggestion.suggested_code, 

41 explanation=suggestion.explanation, 

42 confidence=suggestion.confidence, 

43 risk_level=suggestion.risk_level, 

44 diff=suggestion.diff, 

45 input_tokens=suggestion.input_tokens, 

46 output_tokens=suggestion.output_tokens, 

47 cost_estimate=suggestion.cost_estimate, 

48 ) 

49 

50 

51def ensure_ai_metadata(result: ToolResult) -> dict[str, Any]: 

52 """Ensure a ToolResult has a mutable AI metadata container.""" 

53 if result.ai_metadata is None: 

54 result.ai_metadata = {} 

55 return result.ai_metadata 

56 

57 

58def attach_summary_metadata( 

59 result: ToolResult, 

60 summary: AISummary, 

61) -> None: 

62 """Attach summary metadata without overwriting other AI metadata.""" 

63 metadata = ensure_ai_metadata(result) 

64 payload = summary_to_payload(summary) 

65 metadata["summary"] = payload.to_dict() 

66 

67 

68def attach_fix_suggestions_metadata( 

69 result: ToolResult, 

70 suggestions: list[AIFixSuggestion], 

71) -> None: 

72 """Attach fix suggestion metadata without overwriting summary.""" 

73 metadata = ensure_ai_metadata(result) 

74 existing = list(metadata.get("fix_suggestions", [])) 

75 existing.extend(suggestion_to_payload(s).to_dict() for s in suggestions) 

76 metadata["fix_suggestions"] = existing 

77 

78 

79def attach_fixed_count_metadata( 

80 result: ToolResult, 

81 fixed_count: int, 

82) -> None: 

83 """Attach per-tool AI-applied fix count for summary rendering.""" 

84 metadata = ensure_ai_metadata(result) 

85 applied_count = max(0, int(fixed_count)) 

86 metadata["fixed_count"] = applied_count 

87 metadata["applied_count"] = applied_count 

88 

89 

90def attach_validation_counts_metadata( 

91 result: ToolResult, 

92 *, 

93 verified_count: int, 

94 unverified_count: int, 

95) -> None: 

96 """Attach per-tool validation counts for AI-applied fixes.""" 

97 metadata = ensure_ai_metadata(result) 

98 metadata["verified_count"] = max(0, int(verified_count)) 

99 metadata["unverified_count"] = max(0, int(unverified_count)) 

100 

101 

102def attach_telemetry_metadata( 

103 results: list[ToolResult], 

104 telemetry: AITelemetry, 

105) -> None: 

106 """Attach telemetry metrics to the first result's AI metadata.""" 

107 if not results: 

108 return 

109 metadata = ensure_ai_metadata(results[0]) 

110 metadata["ai_metrics"] = telemetry.to_dict() 

111 

112 

113def normalize_ai_metadata(raw: dict[str, Any]) -> dict[str, Any]: 

114 """Normalize legacy and current AI metadata into one stable shape.""" 

115 normalized: dict[str, Any] = {} 

116 

117 summary = raw.get("summary") 

118 if isinstance(summary, dict): 

119 normalized["summary"] = summary 

120 

121 fix_suggestions = raw.get("fix_suggestions") 

122 if fix_suggestions is None: 

123 fix_suggestions = raw.get("suggestions") 

124 if isinstance(fix_suggestions, list): 

125 normalized["fix_suggestions"] = [ 

126 item for item in fix_suggestions if isinstance(item, dict) 

127 ] 

128 

129 fixed_count = raw.get("fixed_count") 

130 if isinstance(fixed_count, int): 

131 normalized["fixed_count"] = fixed_count 

132 

133 applied_count = raw.get("applied_count") 

134 if isinstance(applied_count, int): 

135 normalized["applied_count"] = applied_count 

136 elif isinstance(fixed_count, int): 

137 normalized["applied_count"] = fixed_count 

138 

139 verified_count = raw.get("verified_count") 

140 if isinstance(verified_count, int): 

141 normalized["verified_count"] = verified_count 

142 

143 unverified_count = raw.get("unverified_count") 

144 if isinstance(unverified_count, int): 

145 normalized["unverified_count"] = unverified_count 

146 

147 ai_metrics = raw.get("ai_metrics") 

148 if isinstance(ai_metrics, dict): 

149 import copy 

150 

151 normalized["ai_metrics"] = copy.deepcopy(ai_metrics) 

152 

153 # Pass through tool-specific metadata (e.g. osv-scanner suppressions) 

154 suppressions = raw.get("suppressions") 

155 if isinstance(suppressions, list): 

156 normalized["suppressions"] = [s for s in suppressions if isinstance(s, dict)] 

157 

158 return normalized