Coverage for src/alprina_cli/agents/guardrails.py: 29%
72 statements
« prev ^ index » next coverage.py v7.11.3, created at 2025-11-14 11:27 +0100
« prev ^ index » next coverage.py v7.11.3, created at 2025-11-14 11:27 +0100
1"""
2Alprina Guardrails Agent
4Safety validation before executing scans
5Integrated from Alprina framework for use in Alprina platform.
6"""
8import asyncio
9from typing import Dict, Any, List
10from loguru import logger
13# Import actual CAI Guardrails Agent
14try:
15 from alprina.agents import get_agent_by_name
16 CAI_AVAILABLE = True
17 logger.debug("CAI Guardrails Agent available") # DEBUG level - not shown to users
18except ImportError as e:
19 CAI_AVAILABLE = False
20 logger.debug(f"Alprina agents not available: {e}") # DEBUG level - not shown to users
23class GuardrailsWrapper:
24 """
25 Wrapper for CAI Guardrails Agent.
27 Provides synchronous interface to the async Alprina agent.
28 """
30 def __init__(self):
31 self.name = "Guardrails Agent"
32 self.agent_type = "safety-check"
33 self.description = "Safety validation before executing scans"
34 self._alprina_agent = None
36 def _get_alprina_agent(self):
37 """Get or create Alprina agent instance."""
38 if not CAI_AVAILABLE:
39 return None
41 if self._alprina_agent is None:
42 try:
43 # Get the real Alprina agent
44 self._alprina_agent = get_agent_by_name("guardrails")
45 logger.info("CAI Guardrails Agent initialized")
46 except Exception as e:
47 logger.error(f"Failed to initialize CAI Guardrails Agent: {e}")
48 return None
50 return self._alprina_agent
52 async def _scan_async(self, target: str, safe_only: bool = True) -> Dict[str, Any]:
53 """
54 Async scan using real Alprina agent.
56 Args:
57 target: Target system, application, or path
58 safe_only: If True, only perform safe, non-destructive tests
60 Returns:
61 Dictionary with scan results
62 """
63 alprina_agent = self._get_alprina_agent()
65 if alprina_agent is None:
66 # Fallback to mock implementation
67 return self._mock_scan(target, safe_only)
69 try:
70 # Build prompt for Alprina agent
71 prompt = f"""Perform safety-check analysis on: {target}
73Focus on:
74- Security guardrails
75- Safe operation validation
76- Risk assessment
77- Compliance checks
79Provide detailed findings with severity levels."""
81 # Create message for Alprina agent
82 messages = [
83 {"role": "user", "content": prompt}
84 ]
86 # Run Alprina agent (async)
87 result = await alprina_agent.run(messages)
89 # Parse Alprina agent response into findings
90 findings = self._parse_cai_response(result.value, target)
92 return {
93 "agent": self.name,
94 "type": self.agent_type,
95 "target": target,
96 "findings": findings,
97 "summary": {
98 "total_findings": len(findings),
99 "alprina_powered": True
100 }
101 }
103 except Exception as e:
104 logger.error(f"CAI Guardrails Agent error: {e}")
105 # Fallback to mock
106 return self._mock_scan(target, safe_only)
108 def _mock_scan(self, target: str, safe_only: bool = True) -> Dict[str, Any]:
109 """Mock scan implementation (fallback when CAI not available)."""
110 findings = []
111 findings.append({
112 "type": "Security Finding",
113 "severity": "INFO",
114 "title": "Mock scan result",
115 "description": "This is a mock implementation. Enable CAI for real analysis.",
116 "file": target,
117 "line": 0,
118 "confidence": 0.5
119 })
121 return {
122 "agent": self.name,
123 "type": self.agent_type,
124 "target": target,
125 "findings": findings,
126 "summary": {
127 "total_findings": len(findings),
128 "alprina_powered": False
129 }
130 }
132 def _parse_cai_response(self, response: str, target: str) -> List[Dict[str, Any]]:
133 """
134 Parse Alprina agent response into structured findings.
136 Args:
137 response: Alprina agent response text
138 target: Target that was scanned
140 Returns:
141 List of finding dictionaries
142 """
143 findings = []
144 import re
146 # Parse response text for findings
147 high_pattern = r"(?i)(critical|high|severe).*?(?=\n\n|\Z)"
148 medium_pattern = r"(?i)(medium|moderate).*?(?=\n\n|\Z)"
149 low_pattern = r"(?i)(low|minor|info).*?(?=\n\n|\Z)"
151 for severity, pattern in [("HIGH", high_pattern), ("MEDIUM", medium_pattern), ("LOW", low_pattern)]:
152 matches = re.finditer(pattern, response, re.DOTALL)
153 for match in matches:
154 finding_text = match.group(0)
155 lines = finding_text.strip().split('\n')
156 title = lines[0] if lines else "Security Finding"
158 finding = {
159 "type": "Safety Check",
160 "severity": severity,
161 "title": title[:100],
162 "description": finding_text[:500],
163 "file": target,
164 "line": 0,
165 "confidence": 0.8
166 }
167 findings.append(finding)
169 # If no findings parsed, create a summary finding
170 if not findings and len(response) > 50:
171 findings.append({
172 "type": "Safety Check",
173 "severity": "INFO",
174 "title": "Guardrails Agent Analysis Complete",
175 "description": response[:500],
176 "file": target,
177 "line": 0,
178 "confidence": 1.0
179 })
181 return findings
183 def scan(self, target: str, safe_only: bool = True) -> Dict[str, Any]:
184 """
185 Perform security scan (synchronous wrapper).
187 Args:
188 target: Target system, application, or path
189 safe_only: If True, only perform safe, non-destructive tests
191 Returns:
192 Dictionary with scan results
193 """
194 logger.info(f"Guardrails Agent scanning: {target} (safe_only={safe_only}, CAI={CAI_AVAILABLE})")
196 # Run async scan in sync context
197 try:
198 loop = asyncio.get_event_loop()
199 except RuntimeError:
200 loop = asyncio.new_event_loop()
201 asyncio.set_event_loop(loop)
203 return loop.run_until_complete(self._scan_async(target, safe_only))
206# Create singleton instance
207guardrails_agent = GuardrailsWrapper()
210def run_guardrails_scan(target: str, safe_only: bool = True) -> Dict[str, Any]:
211 """
212 Run security scan.
214 Args:
215 target: Target to scan
216 safe_only: Only perform safe tests
218 Returns:
219 Scan results
220 """
221 return guardrails_agent.scan(target, safe_only)