Coverage for nilearn/connectome/connectivity_matrices.py: 39%

213 statements  

« prev     ^ index     » next       coverage.py v7.9.1, created at 2025-06-20 11:02 +0200

1"""Connectivity matrices.""" 

2 

3import warnings 

4from math import floor, sqrt 

5 

6import numpy as np 

7from scipy import linalg 

8from sklearn.base import BaseEstimator, TransformerMixin, clone 

9from sklearn.covariance import LedoitWolf 

10from sklearn.utils import check_array 

11from sklearn.utils.estimator_checks import check_is_fitted 

12 

13from nilearn import signal 

14from nilearn._utils.docs import fill_doc 

15from nilearn._utils.extmath import is_spd 

16from nilearn._utils.logger import find_stack_level 

17from nilearn._utils.tags import SKLEARN_LT_1_6 

18 

19 

20def _check_square(matrix): 

21 """Raise a ValueError if the input matrix is square. 

22 

23 Parameters 

24 ---------- 

25 matrix : numpy.ndarray 

26 Input array. 

27 

28 """ 

29 if matrix.ndim != 2 or (matrix.shape[0] != matrix.shape[-1]): 

30 raise ValueError( 

31 f"Expected a square matrix, got array of shape {matrix.shape}." 

32 ) 

33 

34 

35def _check_spd(matrix): 

36 """Raise a ValueError if the input matrix is not symmetric positive \ 

37 definite. 

38 

39 Parameters 

40 ---------- 

41 matrix : numpy.ndarray 

42 Input array. 

43 

44 """ 

45 if not is_spd(matrix, decimal=7): 

46 raise ValueError("Expected a symmetric positive definite matrix.") 

47 

48 

49def _form_symmetric(function, eigenvalues, eigenvectors): 

50 """Return the symmetric matrix with the given eigenvectors and \ 

51 eigenvalues transformed by function. 

52 

53 Parameters 

54 ---------- 

55 function : function numpy.ndarray -> numpy.ndarray 

56 The transform to apply to the eigenvalues. 

57 

58 eigenvalues : numpy.ndarray, shape (n_features, ) 

59 Input argument of the function. 

60 

61 eigenvectors : numpy.ndarray, shape (n_features, n_features) 

62 Unitary matrix. 

63 

64 Returns 

65 ------- 

66 output : numpy.ndarray, shape (n_features, n_features) 

67 The symmetric matrix obtained after transforming the eigenvalues, while 

68 keeping the same eigenvectors. 

69 

70 """ 

71 return np.dot(eigenvectors * function(eigenvalues), eigenvectors.T) 

72 

73 

74def _map_eigenvalues(function, symmetric): 

75 """Matrix function, for real symmetric matrices. 

76 

77 The function is applied to the eigenvalues of symmetric. 

78 

79 Parameters 

80 ---------- 

81 function : function numpy.ndarray -> numpy.ndarray 

82 The transform to apply to the eigenvalues. 

83 

84 symmetric : numpy.ndarray, shape (n_features, n_features) 

85 The input symmetric matrix. 

86 

87 Returns 

88 ------- 

89 output : numpy.ndarray, shape (n_features, n_features) 

90 The new symmetric matrix obtained after transforming the eigenvalues, 

91 while keeping the same eigenvectors. 

92 

93 Notes 

94 ----- 

95 If input matrix is not real symmetric, no error is reported but result will 

96 be wrong. 

97 

98 """ 

99 eigenvalues, eigenvectors = linalg.eigh(symmetric) 

100 return _form_symmetric(function, eigenvalues, eigenvectors) 

101 

102 

103def _geometric_mean(matrices, init=None, max_iter=10, tol=1e-7): 

104 """Compute the geometric mean of symmetric positive definite matrices. 

105 

106 The geometric mean of n positive definite matrices 

107 M_1, ..., M_n is the minimizer of the sum of squared distances from an 

108 arbitrary matrix to each input matrix M_k 

109 

110 gmean(M_1, ..., M_n) = argmin_X sum_{k=1}^N dist(X, M_k)^2 

111 

112 where the used distance is related to matrices logarithm 

113 

114 dist(X, M_k) = ||log(X^{-1/2} M_k X^{-1/2)}|| 

115 

116 In case of positive numbers, this mean is the usual geometric mean. 

117 

118 See Algorithm 3 of :footcite:t:`Fletcher2007`. 

119 

120 References 

121 ---------- 

122 .. footbibliography:: 

123 

124 Parameters 

125 ---------- 

126 matrices : list of numpy.ndarray, all of shape (n_features, n_features) 

127 List of matrices whose geometric mean to compute. Raise an error if the 

128 matrices are not all symmetric positive definite of the same shape. 

129 

130 init : numpy.ndarray, shape (n_features, n_features), optional 

131 Initialization matrix, default to the arithmetic mean of matrices. 

132 Raise an error if the matrix is not symmetric positive definite of the 

133 same shape as the elements of matrices. 

134 

135 max_iter : int, default=10 

136 Maximal number of iterations. 

137 

138 tol : positive float or None, default=1e-7 

139 The tolerance to declare convergence: if the gradient norm goes below 

140 this value, the gradient descent is stopped. If None, no check is 

141 performed. 

142 

143 Returns 

144 ------- 

145 gmean : numpy.ndarray, shape (n_features, n_features) 

146 Geometric mean of the matrices. 

147 

148 """ 

149 # Shape and symmetry positive definiteness checks 

150 n_features = matrices[0].shape[0] 

151 for matrix in matrices: 

152 _check_square(matrix) 

153 if matrix.shape[0] != n_features: 

154 raise ValueError("Matrices are not of the same shape.") 

155 _check_spd(matrix) 

156 

157 # Initialization 

158 matrices = np.array(matrices) 

159 if init is None: 

160 gmean = np.mean(matrices, axis=0) 

161 else: 

162 _check_square(init) 

163 if init.shape[0] != n_features: 

164 raise ValueError("Initialization has incorrect shape.") 

165 _check_spd(init) 

166 gmean = init 

167 

168 norm_old = np.inf 

169 step = 1.0 

170 

171 # Gradient descent 

172 for _ in range(max_iter): 

173 # Computation of the gradient 

174 vals_gmean, vecs_gmean = linalg.eigh(gmean) 

175 gmean_inv_sqrt = _form_symmetric(np.sqrt, 1.0 / vals_gmean, vecs_gmean) 

176 whitened_matrices = [ 

177 gmean_inv_sqrt.dot(matrix).dot(gmean_inv_sqrt) 

178 for matrix in matrices 

179 ] 

180 logs = [_map_eigenvalues(np.log, w_mat) for w_mat in whitened_matrices] 

181 # Covariant derivative is - gmean.dot(logms_mean) 

182 logs_mean = np.mean(logs, axis=0) 

183 if np.any(np.isnan(logs_mean)): 

184 raise FloatingPointError("Nan value after logarithm operation.") 

185 

186 # Norm of the covariant derivative on the tangent space at point gmean 

187 norm = np.linalg.norm(logs_mean) 

188 

189 # Update of the minimizer 

190 vals_log, vecs_log = linalg.eigh(logs_mean) 

191 gmean_sqrt = _form_symmetric(np.sqrt, vals_gmean, vecs_gmean) 

192 # Move along the geodesic 

193 gmean = gmean_sqrt.dot( 

194 _form_symmetric(np.exp, vals_log * step, vecs_log) 

195 ).dot(gmean_sqrt) 

196 

197 # Update the norm and the step size 

198 if norm < norm_old: 

199 norm_old = norm 

200 elif norm > norm_old: 

201 step = step / 2.0 

202 norm = norm_old 

203 if tol is not None and norm / gmean.size < tol: 

204 break 

205 if tol is not None and norm / gmean.size >= tol: 

206 warnings.warn( 

207 f"Maximum number of iterations {max_iter} reached without " 

208 f"getting to the requested tolerance level {tol}.", 

209 stacklevel=find_stack_level(), 

210 ) 

211 

212 return gmean 

213 

214 

215def sym_matrix_to_vec(symmetric, discard_diagonal=False): 

216 """Return the flattened lower triangular part of an array. 

217 

218 If diagonal is kept, diagonal elements are divided by sqrt(2) to conserve 

219 the norm. 

220 

221 Acts on the last two dimensions of the array if not 2-dimensional. 

222 

223 .. versionadded:: 0.3 

224 

225 Parameters 

226 ---------- 

227 symmetric : numpy.ndarray or :obj:`list` of numpy arrays, shape\ 

228 (..., n_features, n_features) 

229 Input array. 

230 

231 discard_diagonal : :obj:`bool`, default=False 

232 If True, the values of the diagonal are not returned. 

233 

234 Returns 

235 ------- 

236 output : numpy.ndarray 

237 The output flattened lower triangular part of symmetric. Shape is 

238 (..., n_features * (n_features + 1) / 2) if discard_diagonal is False 

239 and (..., (n_features - 1) * n_features / 2) otherwise. 

240 

241 """ 

242 if discard_diagonal: 

243 # No scaling, we directly return the values 

244 tril_mask = np.tril(np.ones(symmetric.shape[-2:]), k=-1).astype(bool) 

245 return symmetric[..., tril_mask] 

246 scaling = np.ones(symmetric.shape[-2:]) 

247 np.fill_diagonal(scaling, sqrt(2.0)) 

248 tril_mask = np.tril(np.ones(symmetric.shape[-2:])).astype(bool) 

249 return symmetric[..., tril_mask] / scaling[tril_mask] 

250 

251 

252def vec_to_sym_matrix(vec, diagonal=None): 

253 """Return the symmetric matrix given its flattened lower triangular part. 

254 

255 Acts on the last dimension of the array if not 1-dimensional. 

256 Diagonal can be encompassed in vec or given separately. In both cases, note 

257 that diagonal elements are multiplied by sqrt(2). 

258 

259 .. versionadded:: 0.3 

260 

261 Parameters 

262 ---------- 

263 vec : numpy.ndarray or :obj:`list` of numpy arrays, shape \ 

264 (..., n_columns * (n_columns + 1) /2) or 

265 (..., (n_columns - 1) * n_columns / 2) if diagonal is given separately. 

266 The input array. 

267 

268 diagonal : numpy.ndarray, shape (..., n_columns), default=None 

269 The diagonal array to be stacked to vec. If None, the diagonal is 

270 assumed to be included in vec. 

271 

272 Returns 

273 ------- 

274 sym : numpy.ndarray, shape (..., n_columns, n_columns). 

275 The output symmetric matrix. 

276 

277 Notes 

278 ----- 

279 This function is meant to be the inverse of sym_matrix_to_vec. If you have 

280 discarded the diagonal in sym_matrix_to_vec, you need to provide it 

281 separately to reconstruct the symmetric matrix. For instance this can be 

282 useful for correlation matrices for which we know the diagonal is 1. 

283 

284 See Also 

285 -------- 

286 nilearn.connectome.sym_matrix_to_vec 

287 

288 """ 

289 n = vec.shape[-1] 

290 # Compute the number of the symmetric matrix columns 

291 # solve n_columns * (n_columns + 1) / 2 = n subject to n_columns > 0 

292 n_columns = (sqrt(8 * n + 1) - 1.0) / 2 

293 if diagonal is not None: 

294 n_columns += 1 

295 

296 if n_columns > floor(n_columns): 

297 raise ValueError( 

298 f"Vector of unsuitable shape {vec.shape} cannot be transformed to " 

299 "a symmetric matrix." 

300 ) 

301 

302 n_columns = int(n_columns) 

303 first_shape = vec.shape[:-1] 

304 if diagonal is not None and ( 

305 diagonal.shape[:-1] != first_shape or diagonal.shape[-1] != n_columns 

306 ): 

307 raise ValueError( 

308 f"diagonal of shape {diagonal.shape} incompatible " 

309 f"with vector of shape {vec.shape}" 

310 ) 

311 

312 sym = np.zeros((*first_shape, n_columns, n_columns)) 

313 

314 # Fill lower triangular part 

315 skip_diagonal = diagonal is not None 

316 mask = np.tril(np.ones((n_columns, n_columns)), k=-skip_diagonal).astype( 

317 bool 

318 ) 

319 sym[..., mask] = vec 

320 

321 # Fill upper triangular part 

322 sym.swapaxes(-1, -2)[..., mask] = vec 

323 

324 # (Fill and) rescale diagonal terms 

325 mask.fill(False) 

326 np.fill_diagonal(mask, True) 

327 if diagonal is not None: 

328 sym[..., mask] = diagonal 

329 

330 sym[..., mask] *= sqrt(2) 

331 

332 return sym 

333 

334 

335def cov_to_corr(covariance): 

336 """Return correlation matrix for a given covariance matrix. 

337 

338 Parameters 

339 ---------- 

340 covariance : 2D numpy.ndarray 

341 The input covariance matrix. 

342 

343 Returns 

344 ------- 

345 correlation : 2D numpy.ndarray 

346 The output correlation matrix. 

347 

348 """ 

349 diagonal = np.atleast_2d(1.0 / np.sqrt(np.diag(covariance))) 

350 correlation = covariance * diagonal * diagonal.T 

351 

352 # Force exact 1. on diagonal 

353 np.fill_diagonal(correlation, 1.0) 

354 return correlation 

355 

356 

357def prec_to_partial(precision): 

358 """Return partial correlation matrix for a given precision matrix. 

359 

360 Parameters 

361 ---------- 

362 precision : 2D numpy.ndarray 

363 The input precision matrix. 

364 

365 Returns 

366 ------- 

367 partial_correlation : 2D numpy.ndarray 

368 The 2D output partial correlation matrix. 

369 

370 """ 

371 partial_correlation = -cov_to_corr(precision) 

372 np.fill_diagonal(partial_correlation, 1.0) 

373 return partial_correlation 

374 

375 

376@fill_doc 

377class ConnectivityMeasure(TransformerMixin, BaseEstimator): 

378 """A class that computes different kinds of \ 

379 :term:`functional connectivity` matrices. 

380 

381 .. versionadded:: 0.2 

382 

383 Parameters 

384 ---------- 

385 cov_estimator : estimator object, \ 

386 default=LedoitWolf(store_precision=False) 

387 The covariance estimator. 

388 This implies that correlations are slightly shrunk 

389 towards zero compared to a maximum-likelihood estimate 

390 

391 kind : {"covariance", "correlation", "partial correlation",\ 

392 "tangent", "precision"}, default='covariance' 

393 The matrix kind. 

394 For the use of "tangent" see :footcite:t:`Varoquaux2010b`. 

395 

396 vectorize : :obj:`bool`, default=False 

397 If True, connectivity matrices are reshaped into 1D arrays and only 

398 their flattened lower triangular parts are returned. 

399 

400 discard_diagonal : :obj:`bool`, default=False 

401 If True, vectorized connectivity coefficients do not include the 

402 matrices diagonal elements. Used only when vectorize is set to True. 

403 

404 %(standardize)s 

405 

406 .. note:: 

407 

408 Added to control passing value to `standardize` of ``signal.clean`` 

409 to call new behavior since passing "zscore" or True (default) is 

410 deprecated. This parameter will be deprecated in version 0.13 and 

411 removed in version 0.15. 

412 

413 Attributes 

414 ---------- 

415 cov_estimator_ : estimator object, default=None 

416 A new covariance estimator with the same parameters as cov_estimator. 

417 If ``None`` is passed, 

418 defaults to ``LedoitWolf(store_precision=False)``. 

419 

420 mean_ : numpy.ndarray 

421 The mean connectivity matrix across subjects. For 'tangent' kind, 

422 it is the geometric mean of covariances (a group covariance 

423 matrix that captures information from both correlation and partial 

424 correlation matrices). For other values for "kind", it is the 

425 mean of the corresponding matrices 

426 

427 whitening_ : numpy.ndarray 

428 The inverted square-rooted geometric mean of the covariance matrices. 

429 

430 References 

431 ---------- 

432 .. footbibliography:: 

433 

434 """ 

435 

436 def __init__( 

437 self, 

438 cov_estimator=None, 

439 kind="covariance", 

440 vectorize=False, 

441 discard_diagonal=False, 

442 standardize=True, 

443 ): 

444 self.cov_estimator = cov_estimator 

445 self.kind = kind 

446 self.vectorize = vectorize 

447 self.discard_diagonal = discard_diagonal 

448 self.standardize = standardize 

449 

450 def _more_tags(self): 

451 """Return estimator tags. 

452 

453 TODO remove when bumping sklearn_version > 1.5 

454 """ 

455 return self.__sklearn_tags__() 

456 

457 def __sklearn_tags__(self): 

458 """Return estimator tags. 

459 

460 See the sklearn documentation for more details on tags 

461 https://scikit-learn.org/1.6/developers/develop.html#estimator-tags 

462 """ 

463 # TODO 

464 # get rid of if block 

465 # bumping sklearn_version > 1.5 

466 # see https://github.com/scikit-learn/scikit-learn/pull/29677 

467 if SKLEARN_LT_1_6: 467 ↛ 472line 467 didn't jump to line 472 because the condition on line 467 was always true

468 from nilearn._utils.tags import tags 

469 

470 return tags(niimg_like=False) 

471 

472 from nilearn._utils.tags import InputTags 

473 

474 tags = super().__sklearn_tags__() 

475 tags.input_tags = InputTags(niimg_like=False) 

476 return tags 

477 

478 def _check_input(self, X, confounds=None): 

479 subjects_types = [type(s) for s in X] 

480 if set(subjects_types) != {np.ndarray}: 

481 raise ValueError( 

482 "Each subject must be 2D numpy.ndarray.\n " 

483 f"You provided {subjects_types}" 

484 ) 

485 

486 subjects_dims = [s.ndim for s in X] 

487 if set(subjects_dims) != {2}: 487 ↛ 488line 487 didn't jump to line 488 because the condition on line 487 was never true

488 raise ValueError( 

489 "Each subject must be 2D numpy.ndarray.\n " 

490 f"You provided arrays of dimensions {subjects_dims}" 

491 ) 

492 

493 features_dims = [s.shape[1] for s in X] 

494 if len(set(features_dims)) > 1: 494 ↛ 495line 494 didn't jump to line 495 because the condition on line 494 was never true

495 raise ValueError( 

496 "All subjects must have the same number of features.\n" 

497 f"You provided: {features_dims}" 

498 ) 

499 

500 for s in X: 

501 check_array(s, accept_sparse=False) 

502 

503 if confounds is not None and not hasattr(confounds, "__iter__"): 503 ↛ 504line 503 didn't jump to line 504 because the condition on line 503 was never true

504 raise ValueError( 

505 "'confounds' input argument must be an iterable. " 

506 f"You provided {confounds.__class__}" 

507 ) 

508 

509 @fill_doc 

510 def fit(self, X, y=None): 

511 """Fit the covariance estimator to the given time series for each \ 

512 subject. 

513 

514 Parameters 

515 ---------- 

516 X : iterable of numpy.ndarray, \ 

517 shape for each (n_samples, n_features) 

518 The input subjects time series. 

519 The number of samples may differ from one subject to another. 

520 

521 %(y_dummy)s 

522 

523 Returns 

524 ------- 

525 self : ConnectivityMatrix instance 

526 The object itself. Useful for chaining operations. 

527 

528 """ 

529 del y 

530 self._fit_transform(X, do_fit=True) 

531 return self 

532 

533 def _fit_transform( 

534 self, X, do_transform=False, do_fit=False, confounds=None 

535 ): 

536 """Avoid duplication of computation.""" 

537 if self.cov_estimator is None: 537 ↛ 538line 537 didn't jump to line 538 because the condition on line 537 was never true

538 self.cov_estimator = LedoitWolf(store_precision=False) 

539 

540 # casting to a list 

541 # to make it easier to check with sklearn estimator compliance 

542 if not hasattr(X, "__iter__"): 542 ↛ 543line 542 didn't jump to line 543 because the condition on line 542 was never true

543 raise TypeError( 

544 "Input must be an iterable of numpy arrays. " 

545 f"Got {X.__class__.__name__}" 

546 ) 

547 if isinstance(X, np.ndarray) and X.ndim == 2: 

548 check_array(X, accept_sparse=False) 

549 X = [X] 

550 self._check_input(X, confounds=confounds) 

551 

552 if do_fit: 

553 self.cov_estimator_ = clone(self.cov_estimator) 

554 

555 # Compute all the matrices, stored in "connectivities" 

556 if self.kind == "correlation": 556 ↛ 557line 556 didn't jump to line 557 because the condition on line 556 was never true

557 covariances_std = [ 

558 self.cov_estimator_.fit( 

559 signal.standardize_signal( 

560 x, 

561 detrend=False, 

562 standardize=self.standardize, 

563 ) 

564 ).covariance_ 

565 for x in X 

566 ] 

567 connectivities = [cov_to_corr(cov) for cov in covariances_std] 

568 else: 

569 covariances = [self.cov_estimator_.fit(x).covariance_ for x in X] 

570 if self.kind in ("covariance", "tangent"): 570 ↛ 572line 570 didn't jump to line 572 because the condition on line 570 was always true

571 connectivities = covariances 

572 elif self.kind == "precision": 

573 connectivities = [linalg.inv(cov) for cov in covariances] 

574 elif self.kind == "partial correlation": 

575 connectivities = [ 

576 prec_to_partial(linalg.inv(cov)) for cov in covariances 

577 ] 

578 else: 

579 allowed_kinds = ( 

580 "correlation", 

581 "partial correlation", 

582 "tangent", 

583 "covariance", 

584 "precision", 

585 ) 

586 raise ValueError( 

587 f"Allowed connectivity kinds are {allowed_kinds}. " 

588 f"Got kind {self.kind}." 

589 ) 

590 

591 # Store the mean 

592 if do_fit: 

593 if self.kind == "tangent": 593 ↛ 594line 593 didn't jump to line 594 because the condition on line 593 was never true

594 self.mean_ = _geometric_mean( 

595 covariances, max_iter=30, tol=1e-7 

596 ) 

597 self.whitening_ = _map_eigenvalues( 

598 lambda x: 1.0 / np.sqrt(x), self.mean_ 

599 ) 

600 else: 

601 self.mean_ = np.mean(connectivities, axis=0) 

602 # Fight numerical instabilities: make symmetric 

603 self.mean_ = self.mean_ + self.mean_.T 

604 self.mean_ *= 0.5 

605 

606 # Compute the vector we return on transform 

607 if do_transform: 

608 if self.kind == "tangent": 608 ↛ 609line 608 didn't jump to line 609 because the condition on line 608 was never true

609 connectivities = [ 

610 _map_eigenvalues( 

611 np.log, self.whitening_.dot(cov).dot(self.whitening_) 

612 ) 

613 for cov in connectivities 

614 ] 

615 

616 connectivities = np.array(connectivities) 

617 

618 if confounds is not None and not self.vectorize: 618 ↛ 619line 618 didn't jump to line 619 because the condition on line 618 was never true

619 error_message = ( 

620 "'confounds' are provided but vectorize=False. " 

621 "Confounds are only cleaned on vectorized matrices " 

622 "as second level connectome regression " 

623 "but not on symmetric matrices." 

624 ) 

625 raise ValueError(error_message) 

626 

627 if self.vectorize: 627 ↛ 628line 627 didn't jump to line 628 because the condition on line 627 was never true

628 connectivities = sym_matrix_to_vec( 

629 connectivities, discard_diagonal=self.discard_diagonal 

630 ) 

631 if confounds is not None: 

632 connectivities = signal.clean( 

633 connectivities, confounds=confounds 

634 ) 

635 

636 return connectivities 

637 

638 @fill_doc 

639 def fit_transform(self, X, y=None, confounds=None): 

640 """Fit the covariance estimator to the given time series \ 

641 for each subject. \ 

642 Then apply transform to covariance matrices for the chosen kind. 

643 

644 Parameters 

645 ---------- 

646 X : iterable of n_subjects numpy.ndarray with shapes \ 

647 (n_samples, n_features) 

648 The input subjects time series. The number of samples may differ 

649 

650 %(y_dummy)s 

651 

652 confounds : np.ndarray with shape (n_samples) or \ 

653 (n_samples, n_confounds), or pandas DataFrame, default=None 

654 Confounds to be cleaned on the vectorized matrices. Only takes 

655 into effect when vetorize=True. 

656 This parameter is passed to signal.clean. Please see the related 

657 documentation for details. 

658 

659 Returns 

660 ------- 

661 output : numpy.ndarray, shape (n_subjects, n_features, n_features) or \ 

662 (n_subjects, n_features * (n_features + 1) / 2) if vectorize \ 

663 is set to True. 

664 The transformed individual connectivities, as matrices or vectors. 

665 Vectors are cleaned when vectorize=True and confounds are provided. 

666 

667 """ 

668 del y 

669 # casting to a list 

670 # to make it easier to check with sklearn estimator compliance 

671 if not hasattr(X, "__iter__"): 671 ↛ 672line 671 didn't jump to line 672 because the condition on line 671 was never true

672 raise TypeError( 

673 "Input must be an iterable of numpy arrays. " 

674 f"Got {X.__class__.__name__}" 

675 ) 

676 if isinstance(X, np.ndarray) and X.ndim == 2: 676 ↛ 679line 676 didn't jump to line 679 because the condition on line 676 was always true

677 check_array(X, accept_sparse=False) 

678 X = [X] 

679 if self.kind == "tangent" and len(X) <= 1: 679 ↛ 684line 679 didn't jump to line 684 because the condition on line 679 was never true

680 # Check that people are applying fit_transform to a group of 

681 # subject 

682 # We can only impose this in fit_transform, as it is legit to 

683 # fit only on a single given reference point 

684 raise ValueError( 

685 "Tangent space parametrization can only " 

686 "be applied to a group of subjects, as it returns " 

687 f"deviations to the mean. You provided {X!r}" 

688 ) 

689 return self._fit_transform( 

690 X, do_fit=True, do_transform=True, confounds=confounds 

691 ) 

692 

693 def transform(self, X, confounds=None): 

694 """Apply transform to covariances matrices to get the connectivity \ 

695 matrices for the chosen kind. 

696 

697 Parameters 

698 ---------- 

699 X : iterable of n_subjects numpy.ndarray with shapes \ 

700 (n_samples, n_features) 

701 The input subjects time series. The number of samples may differ 

702 from one subject to another. 

703 

704 confounds : numpy.ndarray with shape (n_samples) or \ 

705 (n_samples, n_confounds), default=None 

706 Confounds to be cleaned on the vectorized matrices. Only takes 

707 into effect when vetorize=True. 

708 This parameter is passed to signal.clean. Please see the related 

709 documentation for details. 

710 

711 Returns 

712 ------- 

713 output : numpy.ndarray, shape (n_subjects, n_features, n_features) or \ 

714 (n_subjects, n_features * (n_features + 1) / 2) if vectorize \ 

715 is set to True. 

716 The transformed individual connectivities, as matrices or vectors. 

717 Vectors are cleaned when vectorize=True and confounds are provided. 

718 

719 """ 

720 check_is_fitted(self) 

721 return self._fit_transform(X, do_transform=True, confounds=confounds) 

722 

723 def __sklearn_is_fitted__(self): 

724 return hasattr(self, "cov_estimator_") 

725 

726 def inverse_transform(self, connectivities, diagonal=None): 

727 """Return connectivity matrices from connectivities, \ 

728 vectorized or not. 

729 

730 If kind is 'tangent', the covariance matrices are reconstructed. 

731 

732 Parameters 

733 ---------- 

734 connectivities : :obj:`list` of n_subjects numpy.ndarray with shapes \ 

735 (n_features, n_features) or (n_features * (n_features + 1) / 2,) 

736 or ((n_features - 1) * n_features / 2,) 

737 Connectivities of each subject, vectorized or not. 

738 

739 diagonal : numpy.ndarray, shape (n_subjects, n_features), default=None 

740 The diagonals of the connectivity matrices. 

741 

742 Returns 

743 ------- 

744 output : numpy.ndarray, shape (n_subjects, n_features, n_features) 

745 The corresponding connectivity matrices. If kind is 'correlation'/ 

746 'partial correlation', the correlation/partial correlation 

747 matrices are returned. 

748 If kind is 'tangent', the covariance matrices are reconstructed. 

749 

750 """ 

751 check_is_fitted(self) 

752 

753 connectivities = np.array(connectivities) 

754 if self.vectorize: 

755 if self.discard_diagonal and diagonal is None: 

756 if self.kind in ["correlation", "partial correlation"]: 

757 diagonal = np.ones( 

758 (connectivities.shape[0], self.mean_.shape[0]) 

759 ) / sqrt(2.0) 

760 else: 

761 raise ValueError( 

762 "diagonal values has been discarded and are unknown " 

763 f"for {self.kind} kind, " 

764 "cannot reconstruct connectivity matrices." 

765 ) 

766 

767 connectivities = vec_to_sym_matrix( 

768 connectivities, diagonal=diagonal 

769 ) 

770 

771 if self.kind == "tangent": 

772 mean_sqrt = _map_eigenvalues(np.sqrt, self.mean_) 

773 connectivities = [ 

774 mean_sqrt.dot(_map_eigenvalues(np.exp, displacement)).dot( 

775 mean_sqrt 

776 ) 

777 for displacement in connectivities 

778 ] 

779 connectivities = np.array(connectivities) 

780 

781 return connectivities 

782 

783 def set_output(self, *, transform=None): 

784 """Set the output container when ``"transform"`` is called. 

785 

786 .. warning:: 

787 

788 This has not been implemented yet. 

789 """ 

790 raise NotImplementedError()