Coverage for src/alprina_cli/chat.py: 0%

327 statements  

« prev     ^ index     » next       coverage.py v7.11.3, created at 2025-11-14 11:27 +0100

1""" 

2Interactive chat interface for Alprina. 

3Provides conversational AI assistant for security scanning. 

4""" 

5 

6from typing import Optional 

7from pathlib import Path 

8import re 

9import sys 

10 

11from rich.console import Console 

12from rich.markdown import Markdown 

13from rich.panel import Panel 

14from rich.table import Table 

15from rich.progress import Progress, SpinnerColumn, TextColumn 

16from prompt_toolkit import PromptSession 

17from prompt_toolkit.history import FileHistory 

18from prompt_toolkit.auto_suggest import AutoSuggestFromHistory 

19from prompt_toolkit.key_binding import KeyBindings 

20from loguru import logger 

21 

22from .context_manager import ConversationContext 

23from .llm_provider import get_llm_client 

24from .scanner import scan_command 

25from .mitigation import mitigate_command 

26from .main_agent import MainAlprinaAgent 

27 

28console = Console() 

29 

30 

31class AlprinaChatSession: 

32 """Interactive chat session with Alprina AI.""" 

33 

34 def __init__( 

35 self, 

36 model: str = "claude-3-5-sonnet-20241022", 

37 streaming: bool = True, 

38 context_file: Optional[Path] = None 

39 ): 

40 """ 

41 Initialize chat session. 

42 

43 Args: 

44 model: LLM model to use 

45 streaming: Enable streaming responses 

46 context_file: Load previous scan context 

47 """ 

48 self.model = model 

49 self.streaming = streaming 

50 self.context = ConversationContext() 

51 self.llm = get_llm_client(model=model) 

52 

53 # Session statistics 

54 self.stats = { 

55 "messages": 0, 

56 "tokens_used": 0, 

57 "estimated_cost": 0.0, 

58 "scans_run": 0, 

59 "start_time": None 

60 } 

61 

62 import time 

63 self.stats["start_time"] = time.time() 

64 

65 # Initialize Main Alprina Agent (orchestrator) 

66 self.main_agent = MainAlprinaAgent(model=model) 

67 logger.info("Main Alprina Agent initialized in chat session") 

68 

69 # Set up prompt session with history 

70 history_file = Path.home() / '.alprina' / 'chat_history.txt' 

71 history_file.parent.mkdir(parents=True, exist_ok=True) 

72 

73 self.session = PromptSession( 

74 history=FileHistory(str(history_file)), 

75 auto_suggest=AutoSuggestFromHistory(), 

76 key_bindings=self._create_key_bindings() 

77 ) 

78 

79 # Load context if provided 

80 if context_file: 

81 self.context.load_scan_results(context_file) 

82 

83 logger.info(f"Chat session initialized with model: {model}") 

84 

85 def _create_key_bindings(self): 

86 """Create custom key bindings.""" 

87 kb = KeyBindings() 

88 

89 @kb.add('c-c') 

90 def _(event): 

91 """Handle Ctrl+C gracefully.""" 

92 event.app.exit() 

93 

94 return kb 

95 

96 def start(self): 

97 """Start interactive chat loop.""" 

98 self._show_welcome() 

99 

100 while True: 

101 try: 

102 # Get user input 

103 user_input = self.session.prompt("\n[bold green]You:[/bold green] ") 

104 

105 if not user_input.strip(): 

106 continue 

107 

108 # Check for exit 

109 if user_input.lower() in ['exit', 'quit', 'q', 'bye']: 

110 self._handle_exit() 

111 break 

112 

113 # Handle special commands 

114 if user_input.startswith('/'): 

115 self._handle_command(user_input) 

116 continue 

117 

118 # Process as chat message 

119 self._process_message(user_input) 

120 

121 except KeyboardInterrupt: 

122 console.print("\n[yellow]Use 'exit' or Ctrl+D to quit[/yellow]") 

123 continue 

124 except EOFError: 

125 self._handle_exit() 

126 break 

127 except Exception as e: 

128 logger.error(f"Chat error: {e}", exc_info=True) 

129 console.print(f"[red]Error: {e}[/red]") 

130 

131 def _show_welcome(self): 

132 """Show welcome message with session info.""" 

133 # Get auth status 

134 from .config import get_api_key 

135 api_key = get_api_key() 

136 auth_status = "✅ Authenticated" if api_key else "⚠️ Not authenticated" 

137 

138 console.print(Panel.fit( 

139 "[bold cyan]🛡️ Hey! I'm Alprina, your security expert![/bold cyan]\n\n" 

140 f"[dim]Model:[/dim] {self.model}\n" 

141 f"[dim]Status:[/dim] {auth_status}\n" 

142 f"[dim]Session:[/dim] {self.stats['messages']} messages\n\n" 

143 "[bold]💬 Chat with me naturally, like:[/bold]\n" 

144 ' • "Scan my Python app for vulnerabilities"\n' 

145 ' • "What\'s SQL injection and how do I fix it?"\n' 

146 ' • "Find secrets in my code"\n' 

147 ' • "Explain finding #3"\n\n' 

148 "[bold]⚡ Quick commands:[/bold]\n" 

149 " [cyan]/scan <path>[/cyan] - Run security scan\n" 

150 " [cyan]/status[/cyan] - Show auth status\n" 

151 " [cyan]/stats[/cyan] - Show session stats\n" 

152 " [cyan]/help[/cyan] - Show all commands\n" 

153 " [cyan]/exit[/cyan] - Quit chat\n\n" 

154 "[dim]💡 Tip: I can explain vulnerabilities, show fixes, and even\n" 

155 "scan your code - just ask me in plain English![/dim]", 

156 title="🛡️ Alprina Interactive Chat", 

157 border_style="cyan" 

158 )) 

159 

160 # Show context if loaded 

161 if self.context.scan_results: 

162 console.print(f"\n[cyan]📊 Context loaded:[/cyan] {self.context.get_context_summary()}\n") 

163 

164 def _process_message(self, user_input: str): 

165 """ 

166 Process user message using Main Alprina Agent (orchestrator). 

167 

168 Args: 

169 user_input: User's message 

170 """ 

171 # Update stats 

172 self.stats["messages"] += 1 

173 

174 # Add to context 

175 self.context.add_user_message(user_input) 

176 

177 console.print(f"\n[bold cyan]Alprina:[/bold cyan]") 

178 

179 try: 

180 # Route request through Main Alprina Agent (orchestrator) 

181 logger.info("Routing request through Main Alprina Agent") 

182 

183 # Prepare context for Main Agent 

184 agent_context = { 

185 "scan_results": self.context.scan_results, 

186 "conversation_history": self.context.get_messages_for_llm() 

187 } 

188 

189 # Show enhanced thinking indicator with agent transparency 

190 with Progress( 

191 SpinnerColumn(), 

192 TextColumn("[progress.description]{task.description}"), 

193 console=console, 

194 transient=False # Keep visible to show thinking process 

195 ) as progress: 

196 # Step 1: Analyze intent 

197 task = progress.add_task("[cyan]💭 Analyzing your request...", total=None) 

198 

199 # Get intent first (to show which agents will be used) 

200 intent = self.main_agent._analyze_intent(user_input, agent_context) 

201 progress.update(task, description="[green]✓ Request analyzed") 

202 

203 # Step 2: Show agent selection transparency 

204 if intent.get("agents"): 

205 agent_names = [] 

206 for agent_key in intent["agents"]: 

207 agent_info = self.main_agent.SECURITY_AGENTS.get(agent_key, {}) 

208 agent_names.append(agent_info.get("name", agent_key)) 

209 

210 agents_str = ", ".join(agent_names) 

211 progress.add_task(f"[cyan]🤖 Selected agents: {agents_str}", total=None) 

212 progress.add_task(f"[cyan]🎯 Task type: {intent.get('type', 'unknown')}", total=None) 

213 

214 # Step 3: Execute with selected agents 

215 progress.add_task("[cyan]⚡ Executing security analysis...", total=None) 

216 

217 # Main Agent processes request and coordinates with security agents 

218 response_data = self.main_agent.process_user_request( 

219 user_message=user_input, 

220 context=agent_context 

221 ) 

222 

223 progress.add_task("[green]✓ Analysis complete!", total=None) 

224 

225 # Extract response message 

226 response_message = response_data.get("message", "") 

227 response_type = response_data.get("type", "general") 

228 

229 # Display response based on type 

230 if response_type == "scan_complete": 

231 # Scan was executed - display results 

232 console.print(Markdown(response_message)) 

233 

234 # Update context with scan results if available 

235 if response_data.get("results"): 

236 self.context.scan_results = response_data["results"] 

237 

238 elif response_type == "clarification_needed": 

239 # Main Agent needs more info 

240 console.print(f"[yellow]{response_message}[/yellow]") 

241 

242 elif response_type == "error": 

243 # Error occurred 

244 console.print(f"[red]{response_message}[/red]") 

245 

246 else: 

247 # General response, explanation, remediation, capabilities, etc. 

248 console.print(Markdown(response_message)) 

249 

250 # Add response to context 

251 self.context.add_assistant_message(response_message) 

252 

253 except Exception as e: 

254 logger.error(f"Main Agent processing failed: {e}", exc_info=True) 

255 console.print(f"[red]Error: {str(e)}[/red]") 

256 console.print("[yellow]Falling back to direct LLM response...[/yellow]") 

257 

258 # Fallback to old behavior 

259 if self.streaming: 

260 response = self._get_streaming_response(user_input) 

261 else: 

262 with Progress( 

263 SpinnerColumn(), 

264 TextColumn("[progress.description]{task.description}"), 

265 console=console, 

266 transient=True 

267 ) as progress: 

268 progress.add_task("Thinking...", total=None) 

269 response = self._get_response(user_input) 

270 

271 console.print(Markdown(response)) 

272 

273 self.context.add_assistant_message(response) 

274 

275 def _get_response(self, user_input: str) -> str: 

276 """ 

277 Get AI response (non-streaming). 

278 

279 Args: 

280 user_input: User's message 

281 

282 Returns: 

283 AI response text 

284 """ 

285 try: 

286 system_prompt = self._build_system_prompt() 

287 messages = self.context.get_messages_for_llm() 

288 

289 response = self.llm.chat( 

290 messages=messages, 

291 system_prompt=system_prompt, 

292 max_tokens=4096, 

293 temperature=0.7 

294 ) 

295 

296 return response 

297 except Exception as e: 

298 logger.error(f"Failed to get AI response: {e}") 

299 return f"I encountered an error: {e}. Please try again." 

300 

301 def _get_streaming_response(self, user_input: str) -> str: 

302 """ 

303 Get AI response with streaming. 

304 

305 Args: 

306 user_input: User's message 

307 

308 Returns: 

309 Full AI response text 

310 """ 

311 try: 

312 system_prompt = self._build_system_prompt() 

313 messages = self.context.get_messages_for_llm() 

314 

315 full_response = "" 

316 for chunk in self.llm.chat_streaming( 

317 messages=messages, 

318 system_prompt=system_prompt, 

319 max_tokens=4096, 

320 temperature=0.7 

321 ): 

322 console.print(chunk, end="") 

323 full_response += chunk 

324 

325 console.print() # Newline after streaming 

326 return full_response 

327 

328 except Exception as e: 

329 logger.error(f"Failed to get streaming response: {e}") 

330 return f"\n\nI encountered an error: {e}. Please try again." 

331 

332 def _build_system_prompt(self) -> str: 

333 """ 

334 Build context-aware system prompt. 

335 

336 Returns: 

337 System prompt string 

338 """ 

339 base_prompt = """You are Alprina, an expert security consultant with 20+ years of cybersecurity experience. 

340 

341## YOUR PERSONALITY: 

342 

343You're **friendly but professional** - like a senior security engineer who: 

344- Gets excited about finding (and fixing) vulnerabilities 🛡️ 

345- Explains complex security concepts simply 

346- Shares war stories and real-world examples 

347- Celebrates when users secure their code 

348- Never judges - everyone's learning! 

349 

350You're **patient and educational** - you: 

351- Break down jargon into plain English 

352- Use analogies that developers understand 

353- Show code examples (not just theory) 

354- Encourage good security practices 

355- Make security feel achievable, not scary 

356 

357You're **practical and actionable** - you: 

358- Provide copy/paste code fixes 

359- Prioritize high-impact issues first 

360- Suggest realistic security improvements 

361- Know when "perfect" is the enemy of "good enough" 

362- Focus on what matters most 

363 

364## COMMUNICATION STYLE: 

365 

366**Think out loud**: Share your reasoning process 

367- "I'm going to scan this with CodeAgent because..." 

368- "Looking at this code, I notice..." 

369- "Let me check for SQL injection first, then XSS..." 

370 

371**Be conversational**: Talk like a real person 

372- Instead of: "SQL injection vulnerability detected" 

373- Say: "Uh oh, I found a SQL injection here. This is serious - an attacker could steal your entire database!" 

374 

375**Show, don't just tell**: Use examples 

376- Instead of: "Use parameterized queries" 

377- Say: "Replace `query = f'SELECT * FROM users WHERE id={user_id}'` with `cursor.execute('SELECT * FROM users WHERE id=?', (user_id,))`" 

378 

379**End with next steps**: Always give users options 

380- "Want me to scan the rest of your app?" 

381- "Should I explain how this attack works?" 

382- "Need help implementing this fix?" 

383 

384## ALPRINA SECURITY AGENTS YOU COORDINATE: 

385 

386### 1. CodeAgent (code-audit) 

387 - SAST (Static Application Security Testing) 

388 - Detects: SQL injection, XSS, CSRF, authentication flaws 

389 - Analyzes: Python, JavaScript, Java, Go, PHP, Ruby, Rust, C/C++ 

390 - Finds: Hardcoded secrets, insecure cryptography, input validation issues 

391 - Scans: Dependencies for known CVEs 

392 

393### 2. Web Scanner Agent (web-recon) 

394 - API endpoint security testing 

395 - Authentication bypass detection 

396 - Rate limiting analysis 

397 - CORS misconfiguration detection 

398 - Session management vulnerabilities 

399 - HTTP security headers validation 

400 

401### 3. Bug Bounty Agent (vuln-scan) 

402 - OWASP Top 10 vulnerability detection 

403 - Business logic flaws 

404 - Authorization issues 

405 - Information disclosure 

406 - Server misconfigurations 

407 

408### 4. Secret Detection Agent 

409 - API keys, tokens, passwords in code 

410 - AWS credentials, database connection strings 

411 - Private keys, certificates 

412 - Slack tokens, GitHub tokens 

413 - Regex-based + entropy analysis 

414 

415### 5. Config Audit Agent 

416 - Docker security configurations 

417 - Kubernetes manifests 

418 - CI/CD pipeline security 

419 - Environment variable exposure 

420 - Cloud infrastructure misconfigurations 

421 

422## VULNERABILITY CATEGORIES YOU DETECT: 

423 

424**Critical:** SQL Injection, RCE, Authentication Bypass, Hardcoded Credentials, SSRF 

425**High:** XSS, CSRF, Insecure Deserialization, XXE, Path Traversal 

426**Medium:** Security Misconfiguration, Sensitive Data Exposure, Missing Headers, Weak Crypto 

427**Low:** Information Disclosure, Missing Rate Limiting, Verbose Errors, Outdated Dependencies 

428 

429## REPORTING CAPABILITIES: 

430 

431You automatically generate professional security reports in the `.alprina/` folder: 

432- **SECURITY-REPORT.md** - Full vulnerability report with severity breakdown 

433- **FINDINGS.md** - Detailed findings with code snippets and CWE references 

434- **REMEDIATION.md** - Step-by-step fix instructions with code examples 

435- **EXECUTIVE-SUMMARY.md** - Non-technical overview for stakeholders 

436 

437These reports also sync to the dashboard at https://dashboard.alprina.ai 

438 

439## YOUR COMMUNICATION STYLE: 

440 

441**When users ask "What can you do?" or "Help":** 

442- Be conversational, not robotic 

443- Give real examples they can try immediately 

444- Explain capabilities in plain English 

445- Offer next steps 

446 

447**When explaining vulnerabilities:** 

448- Start with "What it is" (simple explanation) 

449- Show "How it works" (real attack example) 

450- Provide "The fix" (code you can copy/paste) 

451- End with "Want me to scan your code?" 

452 

453**When providing fixes:** 

454- Show vulnerable code vs. secure code side-by-side 

455- Explain WHY the fix works 

456- Include best practices 

457- Offer to scan after they implement the fix 

458 

459## NATURAL LANGUAGE UNDERSTANDING: 

460 

461You understand requests like: 

462- "Scan my code" → Run CodeAgent on local files 

463- "Check my API" → Run Web Scanner Agent 

464- "Find hardcoded secrets" → Run Secret Detection Agent 

465- "What's SQL injection?" → Educational explanation 

466- "How do I fix finding #3?" → Provide remediation for specific finding 

467- "Create a security report" → Generate markdown reports in .alprina/ folder 

468 

469## REMEMBER: 

470 

471- Your goal is to make security accessible to ALL developers 

472- Explain complex concepts in simple terms 

473- Provide actionable, copy/paste solutions 

474- Generate reports automatically in .alprina/ folder 

475- Save everything to dashboard for tracking 

476- Be patient and encouraging 

477- Use analogies and real-world examples 

478- End responses with helpful next steps 

479 

480Think of yourself as a friendly security expert who's here to help, teach, and protect - not to intimidate or overwhelm.""" 

481 

482 # Add scan context if available 

483 if self.context.scan_results: 

484 context_info = self.context.get_detailed_context() 

485 base_prompt += f"\n\nCurrent Scan Context:\n{context_info}" 

486 

487 return base_prompt 

488 

489 def _is_scan_request(self, text: str) -> bool: 

490 """Check if user wants to run a scan.""" 

491 scan_keywords = ['scan', 'analyze', 'check', 'test', 'audit'] 

492 text_lower = text.lower() 

493 return any(keyword in text_lower for keyword in scan_keywords) and \ 

494 ('my' in text_lower or './' in text or 'http' in text_lower or 'file' in text_lower) 

495 

496 def _is_mitigation_request(self, text: str) -> bool: 

497 """Check if user wants mitigation steps.""" 

498 mitigation_keywords = ['fix', 'remediate', 'solve', 'patch', 'how to fix'] 

499 return any(keyword in text.lower() for keyword in mitigation_keywords) 

500 

501 def _handle_scan_request(self, user_input: str): 

502 """Handle scan request from natural language.""" 

503 # Try to extract target from user input 

504 target = self._extract_target(user_input) 

505 

506 if not target: 

507 console.print("[yellow]I'd be happy to run a scan! What would you like me to scan?[/yellow]") 

508 console.print("[dim]Example: ./src, https://api.example.com, or /path/to/file[/dim]") 

509 return 

510 

511 console.print(f"\n[cyan]→ Running security scan on:[/cyan] {target}\n") 

512 

513 # Determine profile based on target 

514 profile = "code-audit" if Path(target).exists() else "web-recon" 

515 

516 # Run scan (this will use the existing scan_command) 

517 try: 

518 scan_command(target, profile=profile, safe_only=True, output=None) 

519 

520 # The scan results would be captured here 

521 # For now, simulate adding to context 

522 console.print(f"\n[green]✓ Scan complete![/green]") 

523 console.print("[dim]Type 'explain' to learn about findings or 'fix' for remediation steps[/dim]\n") 

524 

525 except Exception as e: 

526 console.print(f"[red]Scan failed: {e}[/red]") 

527 logger.error(f"Scan failed: {e}", exc_info=True) 

528 

529 def _handle_mitigation_request(self, user_input: str): 

530 """Handle mitigation request.""" 

531 console.print("\n[cyan]→ Getting remediation suggestions...[/cyan]\n") 

532 

533 try: 

534 # Run mitigation command 

535 mitigate_command(finding_id=None, report_file=None) 

536 

537 except Exception as e: 

538 console.print(f"[red]Failed to get mitigation: {e}[/red]") 

539 logger.error(f"Mitigation failed: {e}", exc_info=True) 

540 

541 def _extract_target(self, text: str) -> Optional[str]: 

542 """Extract scan target from natural language.""" 

543 # Look for file paths 

544 file_pattern = r'\.\/[\w\/\-\.]+' 

545 match = re.search(file_pattern, text) 

546 if match: 

547 return match.group(0) 

548 

549 # Look for URLs 

550 url_pattern = r'https?://[\w\-\.\/]+' 

551 match = re.search(url_pattern, text) 

552 if match: 

553 return match.group(0) 

554 

555 # Look for absolute paths 

556 path_pattern = r'/[\w\/\-\.]+' 

557 match = re.search(path_pattern, text) 

558 if match: 

559 return match.group(0) 

560 

561 return None 

562 

563 def _handle_command(self, command: str): 

564 """ 

565 Handle special chat commands. 

566 

567 Args: 

568 command: Command string starting with / 

569 """ 

570 cmd_parts = command.split() 

571 cmd_name = cmd_parts[0].lower() 

572 

573 if cmd_name == '/help': 

574 self._show_help() 

575 elif cmd_name == '/status': 

576 self._show_status() 

577 elif cmd_name == '/scan': 

578 if len(cmd_parts) < 2: 

579 console.print("[yellow]Usage: /scan <target>[/yellow]") 

580 else: 

581 target = cmd_parts[1] 

582 self._handle_scan_request(f"scan {target}") 

583 elif cmd_name == '/explain': 

584 if len(cmd_parts) < 2: 

585 self._show_findings() 

586 else: 

587 finding_id = cmd_parts[1] 

588 self._explain_finding(finding_id) 

589 elif cmd_name == '/fix': 

590 if len(cmd_parts) < 2: 

591 console.print("[yellow]Usage: /fix <finding_id>[/yellow]") 

592 else: 

593 finding_id = cmd_parts[1] 

594 self._fix_finding(finding_id) 

595 elif cmd_name == '/report': 

596 self._show_report() 

597 elif cmd_name == '/clear': 

598 self.context.clear() 

599 console.print("[green]✓ Conversation history cleared[/green]") 

600 elif cmd_name == '/stats': 

601 self._show_stats() 

602 elif cmd_name == '/save': 

603 self._save_conversation() 

604 elif cmd_name == '/exit' or cmd_name == '/quit': 

605 self._handle_exit() 

606 raise EOFError # Signal to exit the chat loop 

607 else: 

608 console.print(f"[red]Unknown command: {cmd_name}[/red]") 

609 console.print("[dim]Type /help for available commands[/dim]") 

610 

611 def _show_help(self): 

612 """Show help message.""" 

613 help_table = Table(title="Available Commands", show_header=True, header_style="bold cyan", title_style="bold cyan") 

614 help_table.add_column("Command", style="cyan", no_wrap=True) 

615 help_table.add_column("Description", style="white") 

616 

617 commands = [ 

618 ("/help", "Show this help message"), 

619 ("/status", "Show authentication status"), 

620 ("/scan <path>", "Run security scan on file or directory"), 

621 ("/explain [id]", "Explain vulnerability finding (or list all)"), 

622 ("/fix <id>", "Get AI-powered fix for specific finding"), 

623 ("/report", "Show current scan summary report"), 

624 ("/clear", "Clear conversation history"), 

625 ("/stats", "Show session statistics (messages, tokens, cost)"), 

626 ("/save", "Save conversation to file"), 

627 ("/exit, /quit", "Exit chat session"), 

628 ("exit, quit, bye", "Exit chat session"), 

629 ] 

630 

631 for cmd, desc in commands: 

632 help_table.add_row(cmd, desc) 

633 

634 console.print(help_table) 

635 

636 def _show_findings(self): 

637 """Show current scan findings.""" 

638 if not self.context.current_findings: 

639 console.print("[yellow]No scan findings available. Run a scan first![/yellow]") 

640 return 

641 

642 findings_table = Table(title="Current Findings", show_header=True, header_style="bold") 

643 findings_table.add_column("ID", style="dim") 

644 findings_table.add_column("Severity") 

645 findings_table.add_column("Title") 

646 findings_table.add_column("File", style="dim") 

647 

648 for finding in self.context.current_findings: 

649 severity_style = { 

650 'HIGH': 'bold red', 

651 'MEDIUM': 'bold yellow', 

652 'LOW': 'bold green' 

653 }.get(finding.get('severity', 'UNKNOWN'), 'white') 

654 

655 findings_table.add_row( 

656 finding.get('id', 'N/A'), 

657 f"[{severity_style}]{finding.get('severity', 'UNKNOWN')}[/{severity_style}]", 

658 finding.get('title', 'Unknown'), 

659 finding.get('file', 'N/A') 

660 ) 

661 

662 console.print(findings_table) 

663 

664 def _explain_finding(self, finding_id: str): 

665 """Explain specific finding.""" 

666 finding = self.context.get_finding(finding_id) 

667 if not finding: 

668 console.print(f"[red]Finding {finding_id} not found[/red]") 

669 return 

670 

671 # Use AI to explain the finding 

672 explanation_request = f"Can you explain this security finding in detail?\n\n{finding}" 

673 self._process_message(explanation_request) 

674 

675 def _fix_finding(self, finding_id: str): 

676 """Get fix for specific finding.""" 

677 finding = self.context.get_finding(finding_id) 

678 if not finding: 

679 console.print(f"[red]Finding {finding_id} not found[/red]") 

680 return 

681 

682 # Use AI to provide fix 

683 fix_request = f"How can I fix this security vulnerability?\n\n{finding}" 

684 self._process_message(fix_request) 

685 

686 def _show_report(self): 

687 """Show scan report summary.""" 

688 if not self.context.scan_results: 

689 console.print("[yellow]No scan results available[/yellow]") 

690 return 

691 

692 summary = self.context.get_context_summary() 

693 console.print(Panel( 

694 f"[bold]Scan Report[/bold]\n\n{summary}", 

695 border_style="cyan" 

696 )) 

697 

698 def _show_status(self): 

699 """Show authentication status.""" 

700 from .auth import load_token 

701 

702 auth_data = load_token() 

703 

704 if auth_data: 

705 user = auth_data.get("user", {}) 

706 api_key = auth_data.get("token", "") 

707 

708 # Show masked API key 

709 if api_key: 

710 masked_key = f"{api_key[:15]}...{api_key[-4:]}" if len(api_key) > 20 else "***" 

711 else: 

712 masked_key = "None" 

713 

714 console.print(Panel.fit( 

715 f"[green]✅ Authenticated[/green]\n\n" 

716 f"[dim]Email:[/dim] {user.get('email', 'N/A')}\n" 

717 f"[dim]Name:[/dim] {user.get('full_name', 'N/A')}\n" 

718 f"[dim]Tier:[/dim] {user.get('tier', 'free').title()}\n" 

719 f"[dim]API Key:[/dim] {masked_key}", 

720 title="Authentication Status", 

721 border_style="green" 

722 )) 

723 else: 

724 from .utils.errors import show_not_authenticated_error 

725 show_not_authenticated_error() 

726 

727 def _show_stats(self): 

728 """Show session statistics.""" 

729 import time 

730 

731 # Calculate session duration 

732 duration = time.time() - self.stats["start_time"] 

733 hours = int(duration // 3600) 

734 minutes = int((duration % 3600) // 60) 

735 seconds = int(duration % 60) 

736 

737 if hours > 0: 

738 duration_str = f"{hours}h {minutes}m {seconds}s" 

739 elif minutes > 0: 

740 duration_str = f"{minutes}m {seconds}s" 

741 else: 

742 duration_str = f"{seconds}s" 

743 

744 # Get context stats 

745 context_stats = self.context.get_statistics() 

746 

747 stats_table = Table(title="📊 Session Statistics", show_header=False, title_style="bold cyan") 

748 stats_table.add_column("Metric", style="cyan", no_wrap=True) 

749 stats_table.add_column("Value", style="bold white") 

750 

751 stats_table.add_row("Messages", str(self.stats['messages'])) 

752 stats_table.add_row(" └─ User", str(context_stats.get('user_messages', 0))) 

753 stats_table.add_row(" └─ Assistant", str(context_stats.get('assistant_messages', 0))) 

754 stats_table.add_row("", "") # Spacer 

755 stats_table.add_row("Scans Run", str(self.stats['scans_run'])) 

756 stats_table.add_row("Findings", str(context_stats.get('total_findings', 0))) 

757 stats_table.add_row(" └─ Critical/High", f"[red]{context_stats.get('high_severity', 0)}[/red]") 

758 stats_table.add_row(" └─ Medium", f"[yellow]{context_stats.get('medium_severity', 0)}[/yellow]") 

759 stats_table.add_row(" └─ Low", f"[green]{context_stats.get('low_severity', 0)}[/green]") 

760 stats_table.add_row("", "") # Spacer 

761 stats_table.add_row("Session Duration", duration_str) 

762 stats_table.add_row("Model", self.model) 

763 

764 # Estimate tokens and cost (rough estimates) 

765 if self.stats['tokens_used'] > 0: 

766 stats_table.add_row("Tokens Used", f"~{self.stats['tokens_used']:,}") 

767 stats_table.add_row("Estimated Cost", f"${self.stats['estimated_cost']:.4f}") 

768 

769 console.print(stats_table) 

770 

771 def _save_conversation(self): 

772 """Save conversation to file.""" 

773 output_file = Path.home() / '.alprina' / 'conversations' / f"chat_{self.context.session_start.strftime('%Y%m%d_%H%M%S')}.json" 

774 output_file.parent.mkdir(parents=True, exist_ok=True) 

775 

776 self.context.save_conversation(output_file) 

777 console.print(f"[green]✓ Conversation saved to:[/green] {output_file}") 

778 

779 def _handle_exit(self): 

780 """Handle exit gracefully.""" 

781 stats = self.context.get_statistics() 

782 console.print(f"\n[cyan]Thanks for using Alprina![/cyan]") 

783 console.print(f"[dim]Session stats: {stats['total_messages']} messages, {stats['session_duration']:.0f}s duration[/dim]") 

784 console.print("[dim]💾 Use /save before exit to save your conversation[/dim]\n") 

785 

786 

787def chat_command( 

788 model: str = "claude-3-5-sonnet-20241022", 

789 streaming: bool = True, 

790 load_results: Optional[Path] = None 

791): 

792 """ 

793 Start interactive chat session with Alprina AI. 

794 

795 Args: 

796 model: LLM model to use 

797 streaming: Enable streaming responses 

798 load_results: Load previous scan results for context 

799 """ 

800 try: 

801 session = AlprinaChatSession( 

802 model=model, 

803 streaming=streaming, 

804 context_file=load_results 

805 ) 

806 session.start() 

807 except Exception as e: 

808 logger.error(f"Chat session error: {e}", exc_info=True) 

809 console.print(f"[red]Chat error: {e}[/red]") 

810 sys.exit(1)