Coverage for nilearn/connectome/tests/test_connectivity_matrices.py: 19%
427 statements
« prev ^ index » next coverage.py v7.9.1, created at 2025-06-20 10:58 +0200
« prev ^ index » next coverage.py v7.9.1, created at 2025-06-20 10:58 +0200
1import copy
2import warnings
3from math import cosh, exp, log, sinh, sqrt
5import numpy as np
6import pytest
7from numpy.testing import assert_array_almost_equal, assert_array_equal
8from pandas import DataFrame
9from scipy import linalg
10from sklearn.covariance import EmpiricalCovariance, LedoitWolf
11from sklearn.utils.estimator_checks import parametrize_with_checks
13from nilearn._utils.estimator_checks import (
14 check_estimator,
15 nilearn_check_estimator,
16 return_expected_failed_checks,
17)
18from nilearn._utils.extmath import is_spd
19from nilearn._utils.tags import SKLEARN_LT_1_6
20from nilearn.connectome.connectivity_matrices import (
21 ConnectivityMeasure,
22 _check_spd,
23 _check_square,
24 _form_symmetric,
25 _geometric_mean,
26 _map_eigenvalues,
27 prec_to_partial,
28 sym_matrix_to_vec,
29 vec_to_sym_matrix,
30)
31from nilearn.tests.test_signal import generate_signals
33CONNECTIVITY_KINDS = (
34 "covariance",
35 "correlation",
36 "tangent",
37 "precision",
38 "partial correlation",
39)
41N_FEATURES = 49
43N_SUBJECTS = 5
46ESTIMATORS_TO_CHECK = [
47 ConnectivityMeasure(cov_estimator=EmpiricalCovariance())
48]
50if SKLEARN_LT_1_6: 50 ↛ 84line 50 didn't jump to line 84 because the condition on line 50 was always true
52 @pytest.mark.parametrize(
53 "estimator, check, name",
54 (check_estimator(estimators=ESTIMATORS_TO_CHECK)),
55 )
56 def test_check_estimator_sklearn_valid(estimator, check, name):
57 """Check compliance with sklearn estimators."""
58 if name == "check_estimators_fit_returns_self":
59 # "check_estimators_fit_returns_self" fails with sklearn 1.4
60 # whether passed as a valid or invalid check
61 # so we are skipping it.
62 # Note it passes fine with later sklearn versions
63 pytest.skip("ignored for older sklearn")
64 check(estimator)
66 @pytest.mark.xfail(reason="invalid checks should fail")
67 @pytest.mark.parametrize(
68 "estimator, check, name",
69 check_estimator(
70 estimators=ESTIMATORS_TO_CHECK,
71 valid=False,
72 ),
73 )
74 def test_check_estimator_sklearn_invalid(
75 estimator,
76 check,
77 name, # noqa: ARG001
78 ):
79 """Check compliance with sklearn estimators."""
80 check(estimator)
82else:
84 @parametrize_with_checks(
85 estimators=ESTIMATORS_TO_CHECK,
86 expected_failed_checks=return_expected_failed_checks,
87 )
88 def test_check_estimator_sklearn_2(estimator, check):
89 """Check compliance with sklearn estimators."""
90 check(estimator)
93@pytest.mark.parametrize(
94 "estimator, check, name",
95 nilearn_check_estimator(estimators=ESTIMATORS_TO_CHECK),
96)
97def test_check_estimator_nilearn(estimator, check, name): # noqa: ARG001
98 """Check compliance with nilearn estimators rules."""
99 check(estimator)
102def random_diagonal(p, v_min=1.0, v_max=2.0, random_state=0):
103 """Generate a random diagonal matrix.
105 Parameters
106 ----------
107 p : int
108 The first dimension of the array.
110 v_min : float, optional (default to 1.)
111 Minimal element.
113 v_max : float, optional (default to 2.)
114 Maximal element.
116 %(random_state)s
117 default=0
119 Returns
120 -------
121 output : numpy.ndarray, shape (p, p)
122 A diagonal matrix with the given minimal and maximal elements.
124 """
125 random_state = np.random.default_rng(random_state)
126 diag = random_state.random(p) * (v_max - v_min) + v_min
127 diag[diag == np.amax(diag)] = v_max
128 diag[diag == np.amin(diag)] = v_min
129 return np.diag(diag)
132def random_spd(p, eig_min, cond, random_state=0):
133 """Generate a random symmetric positive definite matrix.
135 Parameters
136 ----------
137 p : int
138 The first dimension of the array.
140 eig_min : float
141 Minimal eigenvalue.
143 cond : float
144 Condition number, defined as the ratio of the maximum eigenvalue to the
145 minimum one.
147 %(random_state)s
148 default=0
150 Returns
151 -------
152 output : numpy.ndarray, shape (p, p)
153 A symmetric positive definite matrix with the given minimal eigenvalue
154 and condition number.
155 """
156 rand_gen = np.random.default_rng(random_state)
157 mat = rand_gen.standard_normal((p, p))
158 unitary, _ = linalg.qr(mat)
159 diag = random_diagonal(
160 p, v_min=eig_min, v_max=cond * eig_min, random_state=random_state
161 )
162 return unitary.dot(diag).dot(unitary.T)
165def _signals(n_subjects=N_SUBJECTS):
166 """Generate signals and compute covariances \
167 and apply confounds while computing covariances.
168 """
169 n_features = N_FEATURES
170 signals = []
171 for k in range(n_subjects):
172 n_samples = 200 + k
173 signal, _, confounds = generate_signals(
174 n_features=n_features,
175 n_confounds=5,
176 length=n_samples,
177 same_variance=False,
178 )
179 signals.append(signal)
180 signal -= signal.mean(axis=0)
181 return signals, confounds
184@pytest.fixture
185def signals():
186 return _signals(N_SUBJECTS)[0]
189@pytest.fixture
190def signals_and_covariances(cov_estimator):
191 signals, _ = _signals()
192 emp_covs = []
193 ledoit_covs = []
194 ledoit_estimator = LedoitWolf()
195 for k, signal_ in enumerate(signals):
196 n_samples = 200 + k
197 signal_ -= signal_.mean(axis=0)
198 emp_covs.append((signal_.T).dot(signal_) / n_samples)
199 ledoit_covs.append(ledoit_estimator.fit(signal_).covariance_)
201 if isinstance(cov_estimator, LedoitWolf):
202 return signals, ledoit_covs
203 elif isinstance(cov_estimator, EmpiricalCovariance):
204 return signals, emp_covs
207def test_check_square():
208 non_square = np.ones((2, 3))
209 with pytest.raises(ValueError, match="Expected a square matrix"):
210 _check_square(non_square)
213@pytest.mark.parametrize(
214 "invalid_input",
215 [np.array([[0, 1], [0, 0]]), np.ones((3, 3))], # non symmetric
216) # non SPD
217def test_check_spd(invalid_input):
218 with pytest.raises(
219 ValueError, match="Expected a symmetric positive definite matrix."
220 ):
221 _check_spd(invalid_input)
224def test_map_eigenvalues_on_exp_map():
225 sym = np.ones((2, 2))
226 sym_exp = exp(1.0) * np.array(
227 [[cosh(1.0), sinh(1.0)], [sinh(1.0), cosh(1.0)]]
228 )
229 assert_array_almost_equal(_map_eigenvalues(np.exp, sym), sym_exp)
232def test_map_eigenvalues_on_sqrt_map():
233 spd_sqrt = np.array(
234 [[2.0, -1.0, 0.0], [-1.0, 2.0, -1.0], [0.0, -1.0, 2.0]]
235 )
236 spd = spd_sqrt.dot(spd_sqrt)
237 assert_array_almost_equal(_map_eigenvalues(np.sqrt, spd), spd_sqrt)
240def test_map_eigenvalues_on_log_map():
241 spd = np.array([[1.25, 0.75], [0.75, 1.25]])
242 spd_log = np.array([[0.0, log(2.0)], [log(2.0), 0.0]])
243 assert_array_almost_equal(_map_eigenvalues(np.log, spd), spd_log)
246def test_geometric_mean_couple():
247 n_features = 7
248 spd1 = np.ones((n_features, n_features))
249 spd1 = spd1.dot(spd1) + n_features * np.eye(n_features)
250 spd2 = np.tril(np.ones((n_features, n_features)))
251 spd2 = spd2.dot(spd2.T)
252 vals_spd2, vecs_spd2 = np.linalg.eigh(spd2)
253 spd2_sqrt = _form_symmetric(np.sqrt, vals_spd2, vecs_spd2)
254 spd2_inv_sqrt = _form_symmetric(np.sqrt, 1.0 / vals_spd2, vecs_spd2)
255 geo = spd2_sqrt.dot(
256 _map_eigenvalues(np.sqrt, spd2_inv_sqrt.dot(spd1).dot(spd2_inv_sqrt))
257 ).dot(spd2_sqrt)
259 assert_array_almost_equal(_geometric_mean([spd1, spd2]), geo)
262def test_geometric_mean_diagonal():
263 n_matrices = 20
264 n_features = 5
265 diags = []
266 for k in range(n_matrices):
267 diag = np.eye(n_features)
268 diag[k % n_features, k % n_features] = 1e4 + k
269 diag[(n_features - 1) // (k + 1), (n_features - 1) // (k + 1)] = (
270 k + 1
271 ) * 1e-4
272 diags.append(diag)
273 geo = np.prod(np.array(diags), axis=0) ** (1 / float(len(diags)))
275 assert_array_almost_equal(_geometric_mean(diags), geo)
278def test_geometric_mean_geodesic():
279 n_matrices = 10
280 n_features = 6
281 sym = np.arange(n_features) / np.linalg.norm(np.arange(n_features))
282 sym = sym * sym[:, np.newaxis]
283 times = np.arange(n_matrices)
284 non_singular = np.eye(n_features)
285 non_singular[1:3, 1:3] = np.array([[-1, -0.5], [-0.5, -1]])
286 spds = [
287 non_singular.dot(_map_eigenvalues(np.exp, time * sym)).dot(
288 non_singular.T
289 )
290 for time in times
291 ]
292 gmean = non_singular.dot(_map_eigenvalues(np.exp, times.mean() * sym)).dot(
293 non_singular.T
294 )
295 assert_array_almost_equal(_geometric_mean(spds), gmean)
298def test_geometric_mean_properties():
299 n_matrices = 40
300 n_features = 15
301 spds = [
302 random_spd(n_features, eig_min=1.0, cond=10.0, random_state=0)
303 for _ in range(n_matrices)
304 ]
305 input_spds = copy.copy(spds)
307 gmean = _geometric_mean(spds)
309 # Generic
310 assert isinstance(spds, list)
311 for spd, input_spd in zip(spds, input_spds):
312 assert_array_equal(spd, input_spd)
313 assert is_spd(gmean, decimal=7)
316def random_non_singular(p, sing_min=1.0, sing_max=2.0, random_state=0):
317 """Generate a random nonsingular matrix.
319 Parameters
320 ----------
321 p : int
322 The first dimension of the array.
324 sing_min : float, optional (default to 1.)
325 Minimal singular value.
327 sing_max : float, optional (default to 2.)
328 Maximal singular value.
330 %(random_state)s
331 default=0
333 Returns
334 -------
335 output : numpy.ndarray, shape (p, p)
336 A nonsingular matrix with the given minimal and maximal singular
337 values.
338 """
339 rand_gen = np.random.default_rng(random_state)
340 diag = random_diagonal(
341 p, v_min=sing_min, v_max=sing_max, random_state=random_state
342 )
343 mat1 = rand_gen.standard_normal((p, p))
344 mat2 = rand_gen.standard_normal((p, p))
345 unitary1, _ = linalg.qr(mat1)
346 unitary2, _ = linalg.qr(mat2)
347 return unitary1.dot(diag).dot(unitary2.T)
350def test_geometric_mean_properties_check_invariance():
351 n_matrices = 40
352 n_features = 15
353 spds = [
354 random_spd(n_features, eig_min=1.0, cond=10.0, random_state=0)
355 for _ in range(n_matrices)
356 ]
358 gmean = _geometric_mean(spds)
360 # Invariance under reordering
361 spds.reverse()
362 spds.insert(0, spds[1])
363 spds.pop(2)
364 assert_array_almost_equal(_geometric_mean(spds), gmean)
366 # Invariance under congruent transformation
367 non_singular = random_non_singular(n_features, random_state=0)
368 spds_cong = [non_singular.dot(spd).dot(non_singular.T) for spd in spds]
369 assert_array_almost_equal(
370 _geometric_mean(spds_cong), non_singular.dot(gmean).dot(non_singular.T)
371 )
373 # Invariance under inversion
374 spds_inv = [linalg.inv(spd) for spd in spds]
375 init = linalg.inv(np.mean(spds, axis=0))
376 assert_array_almost_equal(
377 _geometric_mean(spds_inv, init=init), linalg.inv(gmean)
378 )
381def grad_geometric_mean(mats, init=None, max_iter=10, tol=1e-7):
382 """Return the norm of the covariant derivative at each iteration step \
383 of geometric_mean. See its docstring for details.
385 Norm is intrinsic norm on the tangent space of the manifold of symmetric
386 positive definite matrices.
388 Returns
389 -------
390 grad_norm : list of float
391 Norm of the covariant derivative in the tangent space at each step.
392 """
393 mats = np.array(mats)
395 # Initialization
396 gmean = init or np.mean(mats, axis=0)
398 norm_old = np.inf
399 step = 1.0
400 grad_norm = []
401 for _ in range(max_iter):
402 # Computation of the gradient
403 vals_gmean, vecs_gmean = linalg.eigh(gmean)
404 gmean_inv_sqrt = _form_symmetric(np.sqrt, 1.0 / vals_gmean, vecs_gmean)
405 whitened_mats = [
406 gmean_inv_sqrt.dot(mat).dot(gmean_inv_sqrt) for mat in mats
407 ]
408 logs = [_map_eigenvalues(np.log, w_mat) for w_mat in whitened_mats]
410 # Covariant derivative is - gmean.dot(logs_mean)
411 logs_mean = np.mean(logs, axis=0)
413 # Norm of the covariant derivative on
414 # the tangent space at point gmean
415 norm = np.linalg.norm(logs_mean)
417 # Update of the minimizer
418 vals_log, vecs_log = linalg.eigh(logs_mean)
419 gmean_sqrt = _form_symmetric(np.sqrt, vals_gmean, vecs_gmean)
420 gmean = gmean_sqrt.dot(
421 _form_symmetric(np.exp, vals_log * step, vecs_log)
422 ).dot(gmean_sqrt)
424 # Update the norm and the step size
425 norm_old = min(norm, norm_old)
426 if norm > norm_old:
427 step = step / 2.0
428 norm = norm_old
430 grad_norm.append(norm / gmean.size)
431 if tol is not None and norm / gmean.size < tol:
432 break
434 return grad_norm
437def test_geometric_mean_properties_check_gradient():
438 n_matrices = 40
439 n_features = 15
440 spds = [
441 random_spd(n_features, eig_min=1.0, cond=10.0, random_state=0)
442 for _ in range(n_matrices)
443 ]
445 grad_norm = grad_geometric_mean(spds, tol=1e-20)
447 # Gradient norm is decreasing
448 difference = np.diff(grad_norm)
449 assert np.amax(difference) <= 0.0
451 # Check warning if gradient norm in the last step is less than
452 # tolerance
453 max_iter = 1
454 tol = 1e-20
455 with warnings.catch_warnings(record=True) as w:
456 warnings.simplefilter("always")
457 _geometric_mean(spds, max_iter=max_iter, tol=tol)
458 assert len(w) == 1
460 grad_norm = grad_geometric_mean(spds, max_iter=max_iter, tol=tol)
462 assert len(grad_norm) == max_iter
463 assert grad_norm[-1] > tol
466# proportion of badly conditioned matrices
467@pytest.mark.parametrize("p", [0.5, 1.0])
468def test_geometric_mean_properties_evaluate_convergence(p):
469 n_matrices = 40
470 n_features = 15
471 # A warning is printed if tolerance is not reached
472 spds = [
473 random_spd(n_features, eig_min=1e-2, cond=1e6, random_state=0)
474 for _ in range(int(p * n_matrices))
475 ]
476 spds.extend(
477 random_spd(n_features, eig_min=1.0, cond=10.0, random_state=0)
478 for _ in range(int(p * n_matrices), n_matrices)
479 )
480 max_iter = 30 if p < 1 else 60
482 _geometric_mean(spds, max_iter=max_iter, tol=1e-5)
485def test_geometric_mean_error_non_square_matrix():
486 n_features = 5
487 mat1 = np.ones((n_features, n_features + 1))
489 with pytest.raises(ValueError, match="Expected a square matrix"):
490 _geometric_mean([mat1])
493def test_geometric_mean_error_input_matrices_have_different_shapes():
494 n_features = 5
495 mat1 = np.eye(n_features)
496 mat2 = np.ones((n_features + 1, n_features + 1))
498 with pytest.raises(
499 ValueError, match="Matrices are not of the same shape."
500 ):
501 _geometric_mean([mat1, mat2])
504def test_geometric_mean_error_non_spd_input_matrix():
505 n_features = 5
506 mat2 = np.ones((n_features + 1, n_features + 1))
508 with pytest.raises(
509 ValueError, match="Expected a symmetric positive definite matrix."
510 ):
511 _geometric_mean([mat2])
514def test_sym_matrix_to_vec():
515 sym = np.ones((3, 3))
516 sqrt2 = 1.0 / sqrt(2.0)
517 vec = np.array([sqrt2, 1.0, sqrt2, 1.0, 1.0, sqrt2])
519 assert_array_almost_equal(sym_matrix_to_vec(sym), vec)
521 vec = np.array([1.0, 1.0, 1.0])
523 assert_array_almost_equal(
524 sym_matrix_to_vec(sym, discard_diagonal=True), vec
525 )
528def test_sym_matrix_to_vec_is_the_inverse_of_vec_to_sym_matrix(rng):
529 n = 5
530 p = n * (n + 1) // 2
532 # when diagonal is included
533 vec = rng.random(p)
534 sym = vec_to_sym_matrix(vec)
536 assert_array_almost_equal(sym_matrix_to_vec(sym), vec)
538 # when diagonal given separately
539 diagonal = rng.random(n + 1)
540 sym = vec_to_sym_matrix(vec, diagonal=diagonal)
542 assert_array_almost_equal(
543 sym_matrix_to_vec(sym, discard_diagonal=True), vec
544 )
546 # multiple matrices case when diagonal is included
547 vecs = np.asarray([vec, 2.0 * vec, 0.5 * vec])
548 syms = vec_to_sym_matrix(vecs)
550 assert_array_almost_equal(sym_matrix_to_vec(syms), vecs)
552 # multiple matrices case when diagonal is given separately
553 diagonals = np.asarray([diagonal, 3.0 * diagonal, -diagonal])
554 syms = vec_to_sym_matrix(vecs, diagonal=diagonals)
556 assert_array_almost_equal(
557 sym_matrix_to_vec(syms, discard_diagonal=True), vecs
558 )
561def test_vec_to_sym_matrix():
562 # Check output value is correct
563 vec = np.ones(6)
564 sym = np.array(
565 [[sqrt(2), 1.0, 1.0], [1.0, sqrt(2), 1.0], [1.0, 1.0, sqrt(2)]]
566 )
568 assert_array_almost_equal(vec_to_sym_matrix(vec), sym)
570 # Check output value is correct with separate diagonal
571 vec = np.ones(3)
572 diagonal = np.ones(3)
574 assert_array_almost_equal(vec_to_sym_matrix(vec, diagonal=diagonal), sym)
575 # Check vec_to_sym_matrix is the inverse function of sym_matrix_to_vec
576 # when diagonal is included
577 assert_array_almost_equal(vec_to_sym_matrix(sym_matrix_to_vec(sym)), sym)
579 # when diagonal is discarded
580 vec = sym_matrix_to_vec(sym, discard_diagonal=True)
581 diagonal = np.diagonal(sym) / sqrt(2)
583 assert_array_almost_equal(vec_to_sym_matrix(vec, diagonal=diagonal), sym)
586def test_vec_to_sym_matrix_errors():
587 # Check error if unsuitable size
588 vec = np.ones(31)
590 with pytest.raises(ValueError, match="Vector of unsuitable shape"):
591 vec_to_sym_matrix(vec)
593 # Check error if given diagonal shape incompatible with vec
594 vec = np.ones(3)
595 diagonal = np.zeros(4)
597 with pytest.raises(ValueError, match="incompatible with vector"):
598 vec_to_sym_matrix(vec, diagonal)
601def test_prec_to_partial():
602 precision = np.array(
603 [
604 [2.0, -1.0, 1.0],
605 [-1.0, 2.0, -1.0],
606 [1.0, -1.0, 1.0],
607 ]
608 )
609 partial = np.array(
610 [
611 [1.0, 0.5, -sqrt(2.0) / 2.0],
612 [0.5, 1.0, sqrt(2.0) / 2.0],
613 [-sqrt(2.0) / 2.0, sqrt(2.0) / 2.0, 1.0],
614 ]
615 )
617 assert_array_almost_equal(prec_to_partial(precision), partial)
620def test_connectivity_measure_errors():
621 # Raising error for input subjects not iterable
622 conn_measure = ConnectivityMeasure()
624 # input subjects not 2D numpy.ndarrays
625 with pytest.raises(
626 ValueError, match="Each subject must be 2D numpy.ndarray."
627 ):
628 conn_measure.fit([np.ones((100, 40)), np.ones((10,))])
630 # input subjects with different number of features
631 with pytest.raises(
632 ValueError, match="All subjects must have the same number of features."
633 ):
634 conn_measure.fit([np.ones((100, 40)), np.ones((100, 41))])
636 # fit_transform with a single subject and kind=tangent
637 conn_measure = ConnectivityMeasure(kind="tangent")
639 with pytest.raises(
640 ValueError,
641 match="Tangent space parametrization .* only be .* group of subjects",
642 ):
643 conn_measure.fit_transform([np.ones((100, 40))])
646@pytest.mark.parametrize(
647 "cov_estimator", [EmpiricalCovariance(), LedoitWolf()]
648)
649@pytest.mark.parametrize("kind", CONNECTIVITY_KINDS)
650def test_connectivity_measure_generic(
651 kind, cov_estimator, signals_and_covariances
652):
653 signals, covs = signals_and_covariances
655 # Check outputs properties
656 input_covs = copy.copy(covs)
657 conn_measure = ConnectivityMeasure(kind=kind, cov_estimator=cov_estimator)
658 connectivities = conn_measure.fit_transform(signals)
660 # Generic
661 assert isinstance(connectivities, np.ndarray)
662 assert len(connectivities) == len(covs)
664 for k, _ in enumerate(connectivities):
665 assert_array_equal(input_covs[k], covs[k])
667 assert is_spd(covs[k], decimal=7)
670@pytest.mark.parametrize(
671 "cov_estimator", [EmpiricalCovariance(), LedoitWolf()]
672)
673@pytest.mark.parametrize("kind", CONNECTIVITY_KINDS)
674def test_connectivity_measure_generic_3d_array(kind, cov_estimator, signals):
675 """Ensure ConnectivityMeasure accepts 3D arrays or tuple of 2D arrays."""
676 conn_measure = ConnectivityMeasure(kind=kind, cov_estimator=cov_estimator)
678 signals_as_array = np.asarray(
679 [_signals(n_subjects=1)[0] for _ in range(5)]
680 ).squeeze()
681 assert signals_as_array.ndim == 3
683 connectivities = conn_measure.fit_transform(signals_as_array)
685 assert isinstance(connectivities, np.ndarray)
687 signals_as_tuple = tuple(x for x in signals)
689 connectivities = conn_measure.fit_transform(signals_as_tuple)
691 assert isinstance(connectivities, np.ndarray)
694def _assert_connectivity_tangent(connectivities, conn_measure, covs):
695 """Check output value properties for tangent connectivity measure \
696 that they have the expected relationship \
697 to the input covariance matrices.
699 - the geometric mean of the eigenvalues
700 of the mean covariance matrix is positive-definite
701 - the whitening matrix (used to transform the data \
702 also produces a positive-definite matrix
703 """
704 for true_covariance_matrix, estimated_covariance_matrix in zip(
705 covs, connectivities
706 ):
707 assert_array_almost_equal(
708 estimated_covariance_matrix, estimated_covariance_matrix.T
709 )
711 assert is_spd(conn_measure.whitening_, decimal=7)
713 gmean_sqrt = _map_eigenvalues(np.sqrt, conn_measure.mean_)
714 assert is_spd(gmean_sqrt, decimal=7)
715 assert_array_almost_equal(
716 conn_measure.whitening_.dot(gmean_sqrt),
717 np.eye(N_FEATURES),
718 )
719 assert_array_almost_equal(
720 gmean_sqrt.dot(
721 _map_eigenvalues(np.exp, estimated_covariance_matrix)
722 ).dot(gmean_sqrt),
723 true_covariance_matrix,
724 )
727def _assert_connectivity_precision(connectivities, covs):
728 """Estimated precision matrix: \
729 - is positive definite, \
730 - its product with the true covariance matrix \
731 is close to the identity matrix.
732 """
733 for true_covariance_matrix, estimated_covariance_matrix in zip(
734 covs, connectivities
735 ):
736 assert is_spd(estimated_covariance_matrix, decimal=7)
737 assert_array_almost_equal(
738 estimated_covariance_matrix.dot(true_covariance_matrix),
739 np.eye(N_FEATURES),
740 )
743def _assert_connectivity_correlation(connectivities, cov_estimator, covs):
744 """Verify that the estimated covariance matrix: \
745 - is symmetric and positive definite \
746 - has values close to 1 on its diagonal.
748 If the covariance estimator is EmpiricalCovariance,
749 the product of:
750 - the square root of the diagonal of the true covariance matrix,
751 - the estimated covariance matrix,
752 - the square root of the diagonal of the true covariance matrix,
754 should be close to the true covariance matrix.
755 """
756 for true_covariance_matrix, estimated_covariance_matrix in zip(
757 covs, connectivities
758 ):
759 assert is_spd(estimated_covariance_matrix, decimal=7)
761 assert_array_almost_equal(
762 np.diag(estimated_covariance_matrix), np.ones(N_FEATURES)
763 )
765 if cov_estimator == EmpiricalCovariance():
766 # square root of the diagonal of the true covariance matrix
767 d = np.sqrt(np.diag(np.diag(true_covariance_matrix)))
769 assert_array_almost_equal(
770 d.dot(estimated_covariance_matrix).dot(d),
771 true_covariance_matrix,
772 )
775def _assert_connectivity_partial_correlation(connectivities, covs):
776 for true_covariance_matrix, estimated_covariance_matrix in zip(
777 covs, connectivities
778 ):
779 precision_matrix = linalg.inv(true_covariance_matrix)
781 # square root of the diagonal elements of the precision matrix
782 d = np.sqrt(np.diag(np.diag(precision_matrix)))
784 # normalize the computed partial correlation matrix
785 # necessary to ensure that the diagonal elements
786 # of the partial correlation matrix are equal to 1
787 normalized_partial_correlation_matrix = d.dot(
788 estimated_covariance_matrix
789 ).dot(d)
791 # expected value
792 partial_corrlelation_matrix = -precision_matrix + 2 * np.diag(
793 np.diag(precision_matrix)
794 )
796 assert_array_almost_equal(
797 normalized_partial_correlation_matrix,
798 partial_corrlelation_matrix,
799 )
802@pytest.mark.parametrize(
803 "kind",
804 ["tangent", "precision", "correlation", "partial correlation"],
805)
806@pytest.mark.parametrize(
807 "cov_estimator", [EmpiricalCovariance(), LedoitWolf()]
808)
809def test_connectivity_measure_specific_for_each_kind(
810 kind, cov_estimator, signals_and_covariances
811):
812 signals, covs = signals_and_covariances
814 conn_measure = ConnectivityMeasure(kind=kind, cov_estimator=cov_estimator)
815 connectivities = conn_measure.fit_transform(signals)
817 if kind == "tangent":
818 _assert_connectivity_tangent(connectivities, conn_measure, covs)
819 elif kind == "precision":
820 _assert_connectivity_precision(connectivities, covs)
821 elif kind == "correlation":
822 _assert_connectivity_correlation(connectivities, cov_estimator, covs)
823 elif kind == "partial correlation":
824 _assert_connectivity_partial_correlation(connectivities, covs)
827@pytest.mark.parametrize("kind", CONNECTIVITY_KINDS)
828def test_connectivity_measure_check_mean(kind, signals):
829 conn_measure = ConnectivityMeasure(kind=kind)
830 conn_measure.fit_transform(signals)
832 assert (conn_measure.mean_).shape == (N_FEATURES, N_FEATURES)
834 if kind != "tangent":
835 assert_array_almost_equal(
836 conn_measure.mean_,
837 np.mean(conn_measure.transform(signals), axis=0),
838 )
840 # Check that the mean isn't modified in transform
841 conn_measure = ConnectivityMeasure(kind="covariance")
842 conn_measure.fit(signals[:1])
843 mean = conn_measure.mean_
844 conn_measure.transform(signals[1:])
846 assert_array_equal(mean, conn_measure.mean_)
849@pytest.mark.parametrize("kind", CONNECTIVITY_KINDS)
850def test_connectivity_measure_check_vectorization_option(kind, signals):
851 conn_measure = ConnectivityMeasure(kind=kind)
852 connectivities = conn_measure.fit_transform(signals)
853 conn_measure = ConnectivityMeasure(vectorize=True, kind=kind)
854 vectorized_connectivities = conn_measure.fit_transform(signals)
856 assert_array_almost_equal(
857 vectorized_connectivities, sym_matrix_to_vec(connectivities)
858 )
861@pytest.mark.parametrize(
862 "kind",
863 ["covariance", "correlation", "precision", "partial correlation"],
864)
865def test_connectivity_measure_check_inverse_transformation(kind, signals):
866 # without vectorization: input matrices are returned with no change
867 conn_measure = ConnectivityMeasure(kind=kind)
868 connectivities = conn_measure.fit_transform(signals)
870 assert_array_almost_equal(
871 conn_measure.inverse_transform(connectivities), connectivities
872 )
874 # with vectorization: input vectors are reshaped into matrices
875 # if diagonal has not been discarded
876 conn_measure = ConnectivityMeasure(kind=kind, vectorize=True)
877 vectorized_connectivities = conn_measure.fit_transform(signals)
879 assert_array_almost_equal(
880 conn_measure.inverse_transform(vectorized_connectivities),
881 connectivities,
882 )
885@pytest.mark.parametrize(
886 "kind",
887 ["covariance", "correlation", "precision", "partial correlation"],
888)
889def test_connectivity_measure_check_inverse_transformation_discard_diag(
890 kind, signals
891):
892 # with vectorization
893 connectivities = ConnectivityMeasure(kind=kind).fit_transform(signals)
894 conn_measure = ConnectivityMeasure(
895 kind=kind, vectorize=True, discard_diagonal=True
896 )
897 vectorized_connectivities = conn_measure.fit_transform(signals)
899 if kind in ["correlation", "partial correlation"]:
900 assert_array_almost_equal(
901 conn_measure.inverse_transform(vectorized_connectivities),
902 connectivities,
903 )
904 elif kind in ["covariance", "precision"]:
905 diagonal = np.array(
906 [np.diagonal(conn) / sqrt(2) for conn in connectivities]
907 )
908 inverse_transformed = conn_measure.inverse_transform(
909 vectorized_connectivities, diagonal=diagonal
910 )
912 assert_array_almost_equal(inverse_transformed, connectivities)
913 with pytest.raises(
914 ValueError, match="cannot reconstruct connectivity matrices"
915 ):
916 conn_measure.inverse_transform(vectorized_connectivities)
919def test_connectivity_measure_inverse_transform_tangent(
920 signals,
921):
922 """For 'tangent' kind, covariance matrices are reconstructed."""
923 # Without vectorization
924 tangent_measure = ConnectivityMeasure(kind="tangent")
925 displacements = tangent_measure.fit_transform(signals)
926 covariances = ConnectivityMeasure(kind="covariance").fit_transform(signals)
928 assert_array_almost_equal(
929 tangent_measure.inverse_transform(displacements), covariances
930 )
932 # with vectorization
933 # when diagonal has not been discarded
934 tangent_measure = ConnectivityMeasure(kind="tangent", vectorize=True)
935 vectorized_displacements = tangent_measure.fit_transform(signals)
937 assert_array_almost_equal(
938 tangent_measure.inverse_transform(vectorized_displacements),
939 covariances,
940 )
942 # When diagonal has been discarded
943 tangent_measure = ConnectivityMeasure(
944 kind="tangent", vectorize=True, discard_diagonal=True
945 )
946 vectorized_displacements = tangent_measure.fit_transform(signals)
948 diagonal = np.array(
949 [np.diagonal(matrix) / sqrt(2) for matrix in displacements]
950 )
951 inverse_transformed = tangent_measure.inverse_transform(
952 vectorized_displacements, diagonal=diagonal
953 )
955 assert_array_almost_equal(inverse_transformed, covariances)
956 with pytest.raises(
957 ValueError, match="cannot reconstruct connectivity matrices"
958 ):
959 tangent_measure.inverse_transform(vectorized_displacements)
962def test_confounds_connectome_measure():
963 n_subjects = 10
965 signals, confounds = _signals(n_subjects)
967 correlation_measure = ConnectivityMeasure(
968 kind="correlation", vectorize=True
969 )
971 # Clean confounds on 10 subjects with confounds filtered to 10 subjects in
972 # length
973 cleaned_vectors = correlation_measure.fit_transform(
974 signals, confounds=confounds[:10]
975 )
977 zero_matrix = np.zeros((confounds.shape[1], cleaned_vectors.shape[1]))
978 assert_array_almost_equal(
979 np.dot(confounds[:10].T, cleaned_vectors), zero_matrix
980 )
981 assert isinstance(cleaned_vectors, np.ndarray)
983 # Confounds as pandas DataFrame
984 confounds_df = DataFrame(confounds[:10])
985 correlation_measure.fit_transform(signals, confounds=confounds_df)
988def test_confounds_connectome_measure_errors(signals):
989 # Generate signals and compute covariances and apply confounds while
990 # computing covariances
991 signals, confounds = _signals()
993 # Raising error for input confounds are not iterable
994 conn_measure = ConnectivityMeasure(vectorize=True)
995 msg = "'confounds' input argument must be an iterable"
997 with pytest.raises(ValueError, match=msg):
998 conn_measure._check_input(X=signals, confounds=1.0)
1000 with pytest.raises(ValueError, match=msg):
1001 conn_measure._fit_transform(
1002 X=signals, do_fit=True, do_transform=True, confounds=1.0
1003 )
1005 with pytest.raises(ValueError, match=msg):
1006 conn_measure.fit_transform(X=signals, y=None, confounds=1.0)
1008 # Raising error for input confounds are given but not vectorize=True
1009 conn_measure = ConnectivityMeasure(vectorize=False)
1010 with pytest.raises(
1011 ValueError, match="'confounds' are provided but vectorize=False"
1012 ):
1013 conn_measure.fit_transform(signals, None, confounds[:10])
1016def test_connectivity_measure_standardize(signals):
1017 """Check warning is raised and then suppressed with setting standardize."""
1018 match = "default strategy for standardize"
1020 with pytest.deprecated_call(match=match):
1021 ConnectivityMeasure(kind="correlation").fit_transform(signals)
1023 with warnings.catch_warnings(record=True) as record:
1024 ConnectivityMeasure(
1025 kind="correlation", standardize="zscore_sample"
1026 ).fit_transform(signals)
1027 for m in record:
1028 assert match not in m.message