Coverage for fastblocks/mcp/config_health.py: 0%

213 statements  

« prev     ^ index     » next       coverage.py v7.10.7, created at 2025-10-09 00:47 -0700

1"""Configuration health checks and testing system for FastBlocks.""" 

2 

3import os 

4import tempfile 

5import time 

6import typing as t 

7from contextlib import asynccontextmanager 

8from dataclasses import dataclass, field 

9from datetime import datetime 

10from enum import Enum 

11from pathlib import Path 

12from typing import Any 

13 

14from .configuration import ConfigurationSchema, ConfigurationStatus 

15from .env_manager import EnvironmentManager 

16from .health import HealthCheckSystem 

17from .registry import AdapterRegistry 

18 

19 

20class ConfigurationTestType(str, Enum): 

21 """Types of configuration tests.""" 

22 

23 VALIDATION = "validation" 

24 ENVIRONMENT = "environment" 

25 ADAPTER_LOADING = "adapter_loading" 

26 DEPENDENCIES = "dependencies" 

27 SECURITY = "security" 

28 PERFORMANCE = "performance" 

29 INTEGRATION = "integration" 

30 

31 

32class TestSeverity(str, Enum): 

33 """Test result severity levels.""" 

34 

35 CRITICAL = "critical" 

36 HIGH = "high" 

37 MEDIUM = "medium" 

38 LOW = "low" 

39 INFO = "info" 

40 

41 

42@dataclass 

43class ConfigurationTestResult: 

44 """Result of a configuration test.""" 

45 

46 test_type: ConfigurationTestType 

47 test_name: str 

48 passed: bool 

49 severity: TestSeverity 

50 message: str 

51 details: dict[str, Any] = field(default_factory=dict) 

52 execution_time_ms: float = 0.0 

53 timestamp: datetime = field(default_factory=datetime.now) 

54 

55 

56@dataclass 

57class ConfigurationHealthReport: 

58 """Comprehensive configuration health report.""" 

59 

60 configuration_name: str 

61 profile: str 

62 overall_status: ConfigurationStatus 

63 test_results: list[ConfigurationTestResult] = field(default_factory=list) 

64 summary: dict[str, Any] = field(default_factory=dict) 

65 recommendations: list[str] = field(default_factory=list) 

66 execution_time_ms: float = 0.0 

67 timestamp: datetime = field(default_factory=datetime.now) 

68 

69 

70class ConfigurationHealthChecker: 

71 """Comprehensive health checking for FastBlocks configurations.""" 

72 

73 def __init__( 

74 self, 

75 registry: AdapterRegistry, 

76 env_manager: EnvironmentManager, 

77 base_path: Path | None = None, 

78 ): 

79 """Initialize configuration health checker.""" 

80 self.registry = registry 

81 self.env_manager = env_manager 

82 self.health_system = HealthCheckSystem(registry) 

83 self.base_path = base_path or Path.cwd() 

84 

85 # Test categories with their severity levels 

86 self.test_categories = { 

87 ConfigurationTestType.VALIDATION: TestSeverity.CRITICAL, 

88 ConfigurationTestType.ENVIRONMENT: TestSeverity.HIGH, 

89 ConfigurationTestType.ADAPTER_LOADING: TestSeverity.HIGH, 

90 ConfigurationTestType.DEPENDENCIES: TestSeverity.MEDIUM, 

91 ConfigurationTestType.SECURITY: TestSeverity.HIGH, 

92 ConfigurationTestType.PERFORMANCE: TestSeverity.LOW, 

93 ConfigurationTestType.INTEGRATION: TestSeverity.MEDIUM, 

94 } 

95 

96 async def run_comprehensive_health_check( 

97 self, 

98 config: ConfigurationSchema, 

99 test_types: list[ConfigurationTestType] | None = None, 

100 ) -> ConfigurationHealthReport: 

101 """Run comprehensive health check on configuration.""" 

102 start_time = time.time() 

103 

104 if test_types is None: 

105 test_types = list(ConfigurationTestType) 

106 

107 report = ConfigurationHealthReport( 

108 configuration_name="unknown", 

109 profile=config.profile.value, 

110 overall_status=ConfigurationStatus.VALID, 

111 ) 

112 

113 # Run all specified tests 

114 for test_type in test_types: 

115 test_results = await self._run_test_category(config, test_type) 

116 report.test_results.extend(test_results) 

117 

118 # Analyze results and determine overall status 

119 report.overall_status = self._determine_overall_status(report.test_results) 

120 report.summary = self._generate_summary(report.test_results) 

121 report.recommendations = self._generate_recommendations( 

122 report.test_results, config 

123 ) 

124 report.execution_time_ms = (time.time() - start_time) * 1000 

125 

126 return report 

127 

128 async def _run_test_category( 

129 self, config: ConfigurationSchema, test_type: ConfigurationTestType 

130 ) -> list[ConfigurationTestResult]: 

131 """Run all tests in a specific category.""" 

132 test_methods = { 

133 ConfigurationTestType.VALIDATION: self._test_configuration_validation, 

134 ConfigurationTestType.ENVIRONMENT: self._test_environment_variables, 

135 ConfigurationTestType.ADAPTER_LOADING: self._test_adapter_loading, 

136 ConfigurationTestType.DEPENDENCIES: self._test_adapter_dependencies, 

137 ConfigurationTestType.SECURITY: self._test_security_configuration, 

138 ConfigurationTestType.PERFORMANCE: self._test_performance_configuration, 

139 ConfigurationTestType.INTEGRATION: self._test_integration_configuration, 

140 } 

141 

142 test_method = test_methods.get(test_type) 

143 if test_method: 

144 return await test_method(config) 

145 

146 return [] 

147 

148 async def _test_configuration_validation( 

149 self, config: ConfigurationSchema 

150 ) -> list[ConfigurationTestResult]: 

151 """Test configuration validation.""" 

152 results = [] 

153 start_time = time.time() 

154 

155 try: 

156 # Test schema validation 

157 from .configuration import ConfigurationManager 

158 

159 config_manager = ConfigurationManager(self.registry) 

160 validation_result = await config_manager.validate_configuration(config) 

161 

162 execution_time = (time.time() - start_time) * 1000 

163 

164 if validation_result.status == ConfigurationStatus.VALID: 

165 results.append( 

166 ConfigurationTestResult( 

167 test_type=ConfigurationTestType.VALIDATION, 

168 test_name="Schema Validation", 

169 passed=True, 

170 severity=TestSeverity.INFO, 

171 message="Configuration schema is valid", 

172 execution_time_ms=execution_time, 

173 ) 

174 ) 

175 else: 

176 results.append( 

177 ConfigurationTestResult( 

178 test_type=ConfigurationTestType.VALIDATION, 

179 test_name="Schema Validation", 

180 passed=False, 

181 severity=TestSeverity.CRITICAL, 

182 message=f"Configuration validation failed: {validation_result.status.value}", 

183 details={ 

184 "errors": validation_result.errors, 

185 "warnings": validation_result.warnings, 

186 }, 

187 execution_time_ms=execution_time, 

188 ) 

189 ) 

190 

191 except Exception as e: 

192 results.append( 

193 ConfigurationTestResult( 

194 test_type=ConfigurationTestType.VALIDATION, 

195 test_name="Schema Validation", 

196 passed=False, 

197 severity=TestSeverity.CRITICAL, 

198 message=f"Validation test failed: {e}", 

199 execution_time_ms=(time.time() - start_time) * 1000, 

200 ) 

201 ) 

202 

203 return results 

204 

205 async def _test_environment_variables( 

206 self, config: ConfigurationSchema 

207 ) -> list[ConfigurationTestResult]: 

208 """Test environment variable configuration.""" 

209 results = [] 

210 

211 # Extract all environment variables 

212 variables = self.env_manager.extract_variables_from_configuration(config) 

213 

214 # Validate environment variables 

215 start_time = time.time() 

216 validation_result = self.env_manager.validate_environment_variables(variables) 

217 execution_time = (time.time() - start_time) * 1000 

218 

219 # Test for missing required variables 

220 if validation_result.missing_required: 

221 results.append( 

222 ConfigurationTestResult( 

223 test_type=ConfigurationTestType.ENVIRONMENT, 

224 test_name="Required Variables", 

225 passed=False, 

226 severity=TestSeverity.HIGH, 

227 message=f"Missing {len(validation_result.missing_required)} required variables", 

228 details={"missing": validation_result.missing_required}, 

229 execution_time_ms=execution_time, 

230 ) 

231 ) 

232 else: 

233 results.append( 

234 ConfigurationTestResult( 

235 test_type=ConfigurationTestType.ENVIRONMENT, 

236 test_name="Required Variables", 

237 passed=True, 

238 severity=TestSeverity.INFO, 

239 message="All required environment variables are configured", 

240 execution_time_ms=execution_time, 

241 ) 

242 ) 

243 

244 # Test for format validation 

245 if validation_result.invalid_format: 

246 results.append( 

247 ConfigurationTestResult( 

248 test_type=ConfigurationTestType.ENVIRONMENT, 

249 test_name="Format Validation", 

250 passed=False, 

251 severity=TestSeverity.MEDIUM, 

252 message=f"{len(validation_result.invalid_format)} variables have format issues", 

253 details={"invalid_format": validation_result.invalid_format}, 

254 execution_time_ms=execution_time, 

255 ) 

256 ) 

257 else: 

258 results.append( 

259 ConfigurationTestResult( 

260 test_type=ConfigurationTestType.ENVIRONMENT, 

261 test_name="Format Validation", 

262 passed=True, 

263 severity=TestSeverity.INFO, 

264 message="All environment variables have valid formats", 

265 execution_time_ms=execution_time, 

266 ) 

267 ) 

268 

269 return results 

270 

271 async def _test_adapter_loading( 

272 self, config: ConfigurationSchema 

273 ) -> list[ConfigurationTestResult]: 

274 """Test adapter loading and instantiation.""" 

275 results = [] 

276 

277 for adapter_name, adapter_config in config.adapters.items(): 

278 if not adapter_config.enabled: 

279 continue 

280 

281 start_time = time.time() 

282 

283 try: 

284 # Try to get adapter info 

285 adapter_info = await self.registry.get_adapter_info(adapter_name) 

286 if not adapter_info: 

287 results.append( 

288 ConfigurationTestResult( 

289 test_type=ConfigurationTestType.ADAPTER_LOADING, 

290 test_name=f"Adapter Discovery ({adapter_name})", 

291 passed=False, 

292 severity=TestSeverity.HIGH, 

293 message=f"Adapter '{adapter_name}' not found in registry", 

294 execution_time_ms=(time.time() - start_time) * 1000, 

295 ) 

296 ) 

297 continue 

298 

299 # Try to instantiate adapter 

300 adapter_instance = await self.registry.get_adapter(adapter_name) 

301 if adapter_instance: 

302 results.append( 

303 ConfigurationTestResult( 

304 test_type=ConfigurationTestType.ADAPTER_LOADING, 

305 test_name=f"Adapter Loading ({adapter_name})", 

306 passed=True, 

307 severity=TestSeverity.INFO, 

308 message=f"Adapter '{adapter_name}' loaded successfully", 

309 details={ 

310 "adapter_class": adapter_instance.__class__.__name__ 

311 }, 

312 execution_time_ms=(time.time() - start_time) * 1000, 

313 ) 

314 ) 

315 else: 

316 results.append( 

317 ConfigurationTestResult( 

318 test_type=ConfigurationTestType.ADAPTER_LOADING, 

319 test_name=f"Adapter Loading ({adapter_name})", 

320 passed=False, 

321 severity=TestSeverity.HIGH, 

322 message=f"Failed to instantiate adapter '{adapter_name}'", 

323 execution_time_ms=(time.time() - start_time) * 1000, 

324 ) 

325 ) 

326 

327 except Exception as e: 

328 results.append( 

329 ConfigurationTestResult( 

330 test_type=ConfigurationTestType.ADAPTER_LOADING, 

331 test_name=f"Adapter Loading ({adapter_name})", 

332 passed=False, 

333 severity=TestSeverity.HIGH, 

334 message=f"Error loading adapter '{adapter_name}': {e}", 

335 execution_time_ms=(time.time() - start_time) * 1000, 

336 ) 

337 ) 

338 

339 return results 

340 

341 async def _test_adapter_dependencies( 

342 self, config: ConfigurationSchema 

343 ) -> list[ConfigurationTestResult]: 

344 """Test adapter dependency resolution.""" 

345 results = [] 

346 

347 enabled_adapters = { 

348 name for name, adapter in config.adapters.items() if adapter.enabled 

349 } 

350 

351 for adapter_name, adapter_config in config.adapters.items(): 

352 if not adapter_config.enabled: 

353 continue 

354 

355 start_time = time.time() 

356 

357 # Check if all dependencies are enabled 

358 missing_deps = adapter_config.dependencies - enabled_adapters 

359 if missing_deps: 

360 results.append( 

361 ConfigurationTestResult( 

362 test_type=ConfigurationTestType.DEPENDENCIES, 

363 test_name=f"Dependencies ({adapter_name})", 

364 passed=False, 

365 severity=TestSeverity.MEDIUM, 

366 message=f"Missing dependencies for '{adapter_name}': {', '.join(missing_deps)}", 

367 details={"missing_dependencies": list(missing_deps)}, 

368 execution_time_ms=(time.time() - start_time) * 1000, 

369 ) 

370 ) 

371 else: 

372 results.append( 

373 ConfigurationTestResult( 

374 test_type=ConfigurationTestType.DEPENDENCIES, 

375 test_name=f"Dependencies ({adapter_name})", 

376 passed=True, 

377 severity=TestSeverity.INFO, 

378 message=f"All dependencies satisfied for '{adapter_name}'", 

379 execution_time_ms=(time.time() - start_time) * 1000, 

380 ) 

381 ) 

382 

383 return results 

384 

385 async def _test_security_configuration( 

386 self, config: ConfigurationSchema 

387 ) -> list[ConfigurationTestResult]: 

388 """Test security aspects of configuration.""" 

389 results = [] 

390 

391 # Extract environment variables for security audit 

392 variables = self.env_manager.extract_variables_from_configuration(config) 

393 

394 start_time = time.time() 

395 audit_results = self.env_manager.audit_environment_security(variables) 

396 execution_time = (time.time() - start_time) * 1000 

397 

398 # Check for critical security issues 

399 if audit_results["critical"]: 

400 results.append( 

401 ConfigurationTestResult( 

402 test_type=ConfigurationTestType.SECURITY, 

403 test_name="Critical Security Issues", 

404 passed=False, 

405 severity=TestSeverity.CRITICAL, 

406 message=f"Found {len(audit_results['critical'])} critical security issues", 

407 details={"issues": audit_results["critical"]}, 

408 execution_time_ms=execution_time, 

409 ) 

410 ) 

411 

412 # Check for high severity issues 

413 if audit_results["high"]: 

414 results.append( 

415 ConfigurationTestResult( 

416 test_type=ConfigurationTestType.SECURITY, 

417 test_name="High Security Issues", 

418 passed=False, 

419 severity=TestSeverity.HIGH, 

420 message=f"Found {len(audit_results['high'])} high severity security issues", 

421 details={"issues": audit_results["high"]}, 

422 execution_time_ms=execution_time, 

423 ) 

424 ) 

425 

426 # Production security checks 

427 if config.profile.value == "production": 

428 prod_results = self._check_production_security(config) 

429 results.extend(prod_results) 

430 

431 # If no critical or high issues found 

432 if not audit_results["critical"] and not audit_results["high"]: 

433 results.append( 

434 ConfigurationTestResult( 

435 test_type=ConfigurationTestType.SECURITY, 

436 test_name="Security Audit", 

437 passed=True, 

438 severity=TestSeverity.INFO, 

439 message="No critical security issues found", 

440 details={ 

441 "audit_summary": {k: len(v) for k, v in audit_results.items()} 

442 }, 

443 execution_time_ms=execution_time, 

444 ) 

445 ) 

446 

447 return results 

448 

449 def _check_production_security( 

450 self, config: ConfigurationSchema 

451 ) -> list[ConfigurationTestResult]: 

452 """Additional security checks for production configuration.""" 

453 results = [] 

454 start_time = time.time() 

455 

456 # Check debug mode 

457 debug_enabled = config.global_settings.get("debug", False) 

458 if debug_enabled: 

459 results.append( 

460 ConfigurationTestResult( 

461 test_type=ConfigurationTestType.SECURITY, 

462 test_name="Production Debug Mode", 

463 passed=False, 

464 severity=TestSeverity.HIGH, 

465 message="Debug mode is enabled in production configuration", 

466 details={"recommendation": "Set debug=false for production"}, 

467 execution_time_ms=(time.time() - start_time) * 1000, 

468 ) 

469 ) 

470 

471 # Check log level 

472 log_level = config.global_settings.get("log_level", "INFO").upper() 

473 if log_level in ("DEBUG"): 

474 results.append( 

475 ConfigurationTestResult( 

476 test_type=ConfigurationTestType.SECURITY, 

477 test_name="Production Log Level", 

478 passed=False, 

479 severity=TestSeverity.MEDIUM, 

480 message="Debug logging enabled in production", 

481 details={ 

482 "current_level": log_level, 

483 "recommendation": "Use WARNING or ERROR for production", 

484 }, 

485 execution_time_ms=(time.time() - start_time) * 1000, 

486 ) 

487 ) 

488 

489 return results 

490 

491 async def _test_performance_configuration( 

492 self, config: ConfigurationSchema 

493 ) -> list[ConfigurationTestResult]: 

494 """Test performance-related configuration.""" 

495 results = [] 

496 start_time = time.time() 

497 

498 # Count enabled adapters 

499 enabled_count = sum( 

500 1 for adapter in config.adapters.values() if adapter.enabled 

501 ) 

502 

503 if enabled_count > 20: 

504 results.append( 

505 ConfigurationTestResult( 

506 test_type=ConfigurationTestType.PERFORMANCE, 

507 test_name="Adapter Count", 

508 passed=False, 

509 severity=TestSeverity.LOW, 

510 message=f"High number of enabled adapters ({enabled_count}) may impact performance", 

511 details={"enabled_adapters": enabled_count}, 

512 execution_time_ms=(time.time() - start_time) * 1000, 

513 ) 

514 ) 

515 else: 

516 results.append( 

517 ConfigurationTestResult( 

518 test_type=ConfigurationTestType.PERFORMANCE, 

519 test_name="Adapter Count", 

520 passed=True, 

521 severity=TestSeverity.INFO, 

522 message=f"Reasonable number of enabled adapters ({enabled_count})", 

523 execution_time_ms=(time.time() - start_time) * 1000, 

524 ) 

525 ) 

526 

527 return results 

528 

529 async def _test_integration_configuration( 

530 self, config: ConfigurationSchema 

531 ) -> list[ConfigurationTestResult]: 

532 """Test integration aspects of configuration.""" 

533 results = [] 

534 

535 # Test adapter health checks 

536 for adapter_name, adapter_config in config.adapters.items(): 

537 if not adapter_config.enabled: 

538 continue 

539 

540 start_time = time.time() 

541 

542 try: 

543 health_result = await self.health_system.check_adapter_health( 

544 adapter_name 

545 ) 

546 

547 if health_result.status == "healthy": 

548 results.append( 

549 ConfigurationTestResult( 

550 test_type=ConfigurationTestType.INTEGRATION, 

551 test_name=f"Adapter Health ({adapter_name})", 

552 passed=True, 

553 severity=TestSeverity.INFO, 

554 message=f"Adapter '{adapter_name}' is healthy", 

555 details={"health_status": health_result.status}, 

556 execution_time_ms=(time.time() - start_time) * 1000, 

557 ) 

558 ) 

559 else: 

560 results.append( 

561 ConfigurationTestResult( 

562 test_type=ConfigurationTestType.INTEGRATION, 

563 test_name=f"Adapter Health ({adapter_name})", 

564 passed=False, 

565 severity=TestSeverity.MEDIUM, 

566 message=f"Adapter '{adapter_name}' health check failed: {health_result.message}", 

567 details={ 

568 "health_status": health_result.status, 

569 "details": health_result.details, 

570 }, 

571 execution_time_ms=(time.time() - start_time) * 1000, 

572 ) 

573 ) 

574 

575 except Exception as e: 

576 results.append( 

577 ConfigurationTestResult( 

578 test_type=ConfigurationTestType.INTEGRATION, 

579 test_name=f"Adapter Health ({adapter_name})", 

580 passed=False, 

581 severity=TestSeverity.MEDIUM, 

582 message=f"Health check failed for '{adapter_name}': {e}", 

583 execution_time_ms=(time.time() - start_time) * 1000, 

584 ) 

585 ) 

586 

587 return results 

588 

589 def _determine_overall_status( 

590 self, test_results: list[ConfigurationTestResult] 

591 ) -> ConfigurationStatus: 

592 """Determine overall configuration status from test results.""" 

593 has_critical = any( 

594 not result.passed and result.severity == TestSeverity.CRITICAL 

595 for result in test_results 

596 ) 

597 if has_critical: 

598 return ConfigurationStatus.ERROR 

599 

600 has_high = any( 

601 not result.passed and result.severity == TestSeverity.HIGH 

602 for result in test_results 

603 ) 

604 if has_high: 

605 return ConfigurationStatus.ERROR 

606 

607 has_medium = any( 

608 not result.passed and result.severity == TestSeverity.MEDIUM 

609 for result in test_results 

610 ) 

611 if has_medium: 

612 return ConfigurationStatus.WARNING 

613 

614 return ConfigurationStatus.VALID 

615 

616 def _generate_summary( 

617 self, test_results: list[ConfigurationTestResult] 

618 ) -> dict[str, Any]: 

619 """Generate summary statistics from test results.""" 

620 total_tests = len(test_results) 

621 passed_tests = sum(1 for result in test_results if result.passed) 

622 

623 severity_counts = {} 

624 for severity in TestSeverity: 

625 severity_counts[severity.value] = sum( 

626 1 

627 for result in test_results 

628 if not result.passed and result.severity == severity 

629 ) 

630 

631 test_type_summary = {} 

632 for test_type in ConfigurationTestType: 

633 type_results = [r for r in test_results if r.test_type == test_type] 

634 test_type_summary[test_type.value] = { 

635 "total": len(type_results), 

636 "passed": sum(1 for r in type_results if r.passed), 

637 "failed": sum(1 for r in type_results if not r.passed), 

638 } 

639 

640 return { 

641 "total_tests": total_tests, 

642 "passed_tests": passed_tests, 

643 "failed_tests": total_tests - passed_tests, 

644 "pass_rate": (passed_tests / total_tests * 100) if total_tests > 0 else 0, 

645 "severity_breakdown": severity_counts, 

646 "test_type_summary": test_type_summary, 

647 "avg_execution_time_ms": sum(r.execution_time_ms for r in test_results) 

648 / total_tests 

649 if total_tests > 0 

650 else 0, 

651 } 

652 

653 def _generate_recommendations( 

654 self, test_results: list[ConfigurationTestResult], config: ConfigurationSchema 

655 ) -> list[str]: 

656 """Generate recommendations based on test results.""" 

657 recommendations = [] 

658 

659 # Critical and high severity issues 

660 critical_issues = [ 

661 r 

662 for r in test_results 

663 if not r.passed and r.severity == TestSeverity.CRITICAL 

664 ] 

665 if critical_issues: 

666 recommendations.append( 

667 f"🔴 Address {len(critical_issues)} critical issues immediately before deploying" 

668 ) 

669 

670 high_issues = [ 

671 r for r in test_results if not r.passed and r.severity == TestSeverity.HIGH 

672 ] 

673 if high_issues: 

674 recommendations.append( 

675 f"🟡 Fix {len(high_issues)} high severity issues to improve reliability" 

676 ) 

677 

678 # Security recommendations 

679 security_issues = [ 

680 r 

681 for r in test_results 

682 if r.test_type == ConfigurationTestType.SECURITY and not r.passed 

683 ] 

684 if security_issues: 

685 recommendations.append("🔒 Review and fix security configuration issues") 

686 

687 # Performance recommendations 

688 if config.profile.value == "production": 

689 enabled_adapters = sum( 

690 1 for adapter in config.adapters.values() if adapter.enabled 

691 ) 

692 if enabled_adapters > 15: 

693 recommendations.append( 

694 "⚡ Consider disabling unused adapters to improve performance" 

695 ) 

696 

697 # Environment recommendations 

698 env_issues = [ 

699 r 

700 for r in test_results 

701 if r.test_type == ConfigurationTestType.ENVIRONMENT and not r.passed 

702 ] 

703 if env_issues: 

704 recommendations.append("🌍 Complete environment variable configuration") 

705 

706 return recommendations 

707 

708 async def run_configuration_test_suite( 

709 self, config_file: Path, output_file: Path | None = None 

710 ) -> ConfigurationHealthReport: 

711 """Run complete test suite on a configuration file.""" 

712 from .configuration import ConfigurationManager 

713 

714 # Load configuration 

715 config_manager = ConfigurationManager(self.registry, self.base_path) 

716 await config_manager.initialize() 

717 config = await config_manager.load_configuration(config_file) 

718 

719 # Run health check 

720 report = await self.run_comprehensive_health_check(config) 

721 report.configuration_name = config_file.stem 

722 

723 # Save report if requested 

724 if output_file: 

725 await self._save_health_report(report, output_file) 

726 

727 return report 

728 

729 async def _save_health_report( 

730 self, report: ConfigurationHealthReport, output_file: Path 

731 ) -> None: 

732 """Save health report to file.""" 

733 import json 

734 

735 # Convert report to serializable dict 

736 report_dict = { 

737 "configuration_name": report.configuration_name, 

738 "profile": report.profile, 

739 "overall_status": report.overall_status.value, 

740 "timestamp": report.timestamp.isoformat(), 

741 "execution_time_ms": report.execution_time_ms, 

742 "summary": report.summary, 

743 "recommendations": report.recommendations, 

744 "test_results": [ 

745 { 

746 "test_type": result.test_type.value, 

747 "test_name": result.test_name, 

748 "passed": result.passed, 

749 "severity": result.severity.value, 

750 "message": result.message, 

751 "details": result.details, 

752 "execution_time_ms": result.execution_time_ms, 

753 "timestamp": result.timestamp.isoformat(), 

754 } 

755 for result in report.test_results 

756 ], 

757 } 

758 

759 with output_file.open("w") as f: 

760 json.dump(report_dict, f, indent=2) 

761 

762 @asynccontextmanager 

763 async def isolated_test_environment( 

764 self, config: ConfigurationSchema 

765 ) -> t.AsyncGenerator[Path]: 

766 """Create isolated environment for testing configuration.""" 

767 with tempfile.TemporaryDirectory() as temp_dir: 

768 temp_path = Path(temp_dir) 

769 

770 # Create isolated environment manager 

771 isolated_env_manager = EnvironmentManager(temp_path) 

772 

773 # Generate environment file in isolation 

774 variables = isolated_env_manager.extract_variables_from_configuration( 

775 config 

776 ) 

777 env_file = await isolated_env_manager.generate_environment_file( # type: ignore[misc] 

778 variables, temp_path / ".env" 

779 ) 

780 

781 # Backup current environment 

782 original_env = os.environ.copy() 

783 

784 try: 

785 # Load test environment 

786 test_env = isolated_env_manager.load_environment_from_file(env_file) 

787 os.environ.update(test_env) 

788 

789 yield temp_path 

790 

791 finally: 

792 # Restore original environment 

793 os.environ.clear() 

794 os.environ.update(original_env)