Coverage for nilearn/decomposition/tests/test_decomposition_estimators.py: 0%
115 statements
« prev ^ index » next coverage.py v7.9.1, created at 2025-06-20 10:58 +0200
« prev ^ index » next coverage.py v7.9.1, created at 2025-06-20 10:58 +0200
1"""Common test for multi_pca, dict_learning, canica."""
3import numpy as np
4import pytest
5from numpy.testing import assert_array_equal, assert_raises
6from sklearn.utils.estimator_checks import parametrize_with_checks
8from nilearn._utils.estimator_checks import (
9 check_estimator,
10 nilearn_check_estimator,
11 return_expected_failed_checks,
12)
13from nilearn._utils.tags import SKLEARN_LT_1_6
14from nilearn._utils.testing import write_imgs_to_path
15from nilearn.decomposition import CanICA, DictLearning
16from nilearn.decomposition._base import _BaseDecomposition
17from nilearn.decomposition._multi_pca import _MultiPCA
18from nilearn.decomposition.tests.conftest import (
19 N_SAMPLES,
20 N_SUBJECTS,
21 RANDOM_STATE,
22 check_decomposition_estimator,
23)
25ESTIMATORS_TO_CHECK = [
26 _MultiPCA(),
27 DictLearning(),
28 CanICA(),
29 _BaseDecomposition(),
30]
32if SKLEARN_LT_1_6:
34 @pytest.mark.parametrize(
35 "estimator, check, name",
36 check_estimator(estimators=ESTIMATORS_TO_CHECK),
37 )
38 def test_check_estimator_sklearn_valid(estimator, check, name): # noqa: ARG001
39 """Check compliance with sklearn estimators."""
40 check(estimator)
42 @pytest.mark.xfail(reason="invalid checks should fail")
43 @pytest.mark.parametrize(
44 "estimator, check, name",
45 check_estimator(estimators=ESTIMATORS_TO_CHECK, valid=False),
46 )
47 def test_check_estimator_sklearn_invalid(estimator, check, name): # noqa: ARG001
48 """Check compliance with sklearn estimators."""
49 check(estimator)
51else:
53 @parametrize_with_checks(
54 estimators=ESTIMATORS_TO_CHECK,
55 expected_failed_checks=return_expected_failed_checks,
56 )
57 def test_check_estimator_sklearn(estimator, check):
58 """Check compliance with sklearn estimators."""
59 check(estimator)
62@pytest.mark.parametrize(
63 "estimator, check, name",
64 nilearn_check_estimator(estimators=ESTIMATORS_TO_CHECK),
65)
66def test_check_estimator_nilearn(estimator, check, name): # noqa: ARG001
67 """Check compliance with nilearn estimators rules."""
68 check(estimator)
71@pytest.mark.parametrize("estimator", [CanICA, _MultiPCA, DictLearning])
72@pytest.mark.parametrize("data_type", ["nifti", "surface"])
73def test_fit_errors(data_type, decomposition_images, estimator):
74 """Fit and transform fail without the proper arguments."""
75 est = estimator(
76 smoothing_fwhm=None,
77 )
79 # Test if raises an error when empty list of provided.
80 with pytest.raises(
81 ValueError,
82 match="Need one or more Niimg-like or SurfaceImage objects as input, "
83 "an empty list was given.",
84 ):
85 est.fit([])
87 # No mask provided
88 est = estimator(
89 smoothing_fwhm=None,
90 )
91 # the default mask computation strategy 'epi' will result in an empty mask
92 if data_type == "nifti":
93 with pytest.raises(
94 ValueError, match="The mask is invalid as it is empty"
95 ):
96 est.fit(decomposition_images)
97 # but with surface images, the mask encompasses all vertices
98 # so it should have the same number of True vertices as the vertices
99 # in input images
100 elif data_type == "surface":
101 est.fit(decomposition_images)
102 assert (
103 est.masker_.n_elements_ == decomposition_images[0].mesh.n_vertices
104 )
107@pytest.mark.parametrize("estimator", [CanICA, _MultiPCA, DictLearning])
108@pytest.mark.parametrize("data_type", ["nifti", "surface"])
109def test_masker_attributes_with_fit(
110 data_type,
111 canica_data,
112 decomposition_mask_img,
113 decomposition_masker,
114 estimator,
115):
116 """Test mask_img_ properly set when passing mask_img or masker."""
117 # Passing mask_img
118 est = estimator(
119 n_components=3,
120 mask=decomposition_mask_img,
121 random_state=RANDOM_STATE,
122 smoothing_fwhm=None,
123 )
124 est.fit(canica_data)
126 check_decomposition_estimator(est, data_type)
128 # Passing masker
129 canica = estimator(
130 n_components=3,
131 mask=decomposition_masker,
132 random_state=RANDOM_STATE,
133 smoothing_fwhm=None,
134 )
135 canica.fit(canica_data)
137 check_decomposition_estimator(canica, data_type)
140@pytest.mark.parametrize("estimator", [CanICA, _MultiPCA, DictLearning])
141@pytest.mark.parametrize("data_type", ["nifti", "surface"])
142def test_pass_masker_arg_to_estimator(
143 data_type, affine_eye, decomposition_img, estimator
144):
145 """Masker arguments are passed to the estimator without fail."""
146 shape = (
147 decomposition_img.shape[:3]
148 if data_type == "nifti"
149 else (decomposition_img.mesh.n_vertices,)
150 )
151 est = estimator(
152 target_affine=affine_eye,
153 target_shape=shape,
154 n_components=3,
155 mask_strategy="background",
156 random_state=RANDOM_STATE,
157 smoothing_fwhm=None,
158 )
160 # for surface we should get a warning about target_affine, target_shape
161 # and mask_strategy being ignored
162 if data_type == "surface":
163 with pytest.warns(
164 UserWarning, match="The following parameters are not relevant"
165 ):
166 est.fit(decomposition_img)
167 elif data_type == "nifti":
168 est.fit(decomposition_img)
170 check_decomposition_estimator(est, data_type)
173@pytest.mark.timeout(0)
174@pytest.mark.parametrize("estimator", [CanICA, _MultiPCA, DictLearning])
175@pytest.mark.parametrize("data_type", ["nifti"])
176def test_nifti_maps_masker_(canica_data_single_img, estimator):
177 """Check deprecation of nifti_maps_masker_."""
178 est = estimator()
180 est.fit(canica_data_single_img)
182 with pytest.deprecated_call(
183 match="The 'nifti_maps_masker_' attribute is deprecated"
184 ):
185 est.nifti_maps_masker_ # noqa: B018
188# TODO passing confounds does not affect output with CanICA, DictLearning
189# @pytest.mark.parametrize("estimator", [CanICA, _MultiPCA, DictLearning])
190@pytest.mark.parametrize("estimator", [_MultiPCA])
191@pytest.mark.parametrize("data_type", ["nifti", "surface"])
192def test_with_confounds(
193 data_type, decomposition_images, decomposition_mask_img, estimator
194):
195 """Test of estimator with confounds.
197 Output should be different with and without confounds.
198 """
199 confounds = [np.arange(N_SAMPLES * 2).reshape(N_SAMPLES, 2)] * N_SUBJECTS
201 est = estimator(
202 n_components=3,
203 random_state=RANDOM_STATE,
204 mask=decomposition_mask_img,
205 smoothing_fwhm=None,
206 )
208 est.fit(decomposition_images)
210 check_decomposition_estimator(est, data_type)
212 components = est.components_
214 est = estimator(
215 n_components=3, random_state=RANDOM_STATE, mask=decomposition_mask_img
216 )
217 est.fit(decomposition_images, confounds=confounds)
219 components_clean = est.components_
221 assert_raises(
222 AssertionError, assert_array_equal, components, components_clean
223 )
226@pytest.mark.parametrize("estimator", [CanICA, _MultiPCA, DictLearning])
227@pytest.mark.parametrize("data_type", ["nifti", "surface"])
228def test_single_subject_score(canica_data_single_img, data_type, estimator):
229 """Check content of scores after fitting."""
230 n_components = 3
232 # quick sanity check to avoid some tests failures if
233 # n_components < N_SAMPLES
234 assert n_components < N_SAMPLES
236 est = estimator(
237 n_components=n_components,
238 random_state=RANDOM_STATE,
239 smoothing_fwhm=None,
240 )
242 est.fit(canica_data_single_img)
244 check_decomposition_estimator(est, data_type)
246 # One score for all components
247 scores = est.score(canica_data_single_img, per_component=False)
249 assert isinstance(scores, float)
250 assert 0 <= scores <= 1
252 # Per component score
253 scores = est.score(canica_data_single_img, per_component=True)
255 assert scores.shape, (n_components,)
256 assert np.all(scores <= 1)
257 assert np.all(scores >= 0)
260@pytest.mark.parametrize("estimator", [CanICA, _MultiPCA, DictLearning])
261@pytest.mark.parametrize("data_type", ["nifti"])
262def test_single_subject_file(
263 data_type, canica_data_single_img, estimator, tmp_path
264):
265 """Test with a single-subject dataset with globbing and path.
267 Only for nifti as we cannot read surface from file.
268 """
269 est = estimator(n_components=4, random_state=RANDOM_STATE)
270 # globbing
271 img = write_imgs_to_path(
272 canica_data_single_img,
273 file_path=tmp_path,
274 create_files=True,
275 use_wildcards=True,
276 )
277 est.fit(img)
279 check_decomposition_estimator(est, data_type)
281 # path
282 tmp_file = tmp_path / "tmp.nii.gz"
283 canica_data_single_img.to_filename(tmp_file)
285 est.fit(tmp_file)
287 check_decomposition_estimator(est, data_type)
290@pytest.mark.timeout(0)
291@pytest.mark.parametrize("estimator", [CanICA, _MultiPCA, DictLearning])
292@pytest.mark.parametrize("data_type", ["nifti"])
293@pytest.mark.parametrize("n_subjects", [1, 3])
294def test_with_globbing_patterns(
295 tmp_path,
296 canica_data,
297 data_type,
298 estimator,
299 n_subjects, # noqa: ARG001
300):
301 """Check DictLearning can work with files on disk.
303 Only for nifti as we cannot read surface from file.
304 """
305 est = estimator(n_components=3)
307 est.fit(canica_data)
309 img = write_imgs_to_path(
310 *canica_data, file_path=tmp_path, create_files=True, use_wildcards=True
311 )
313 est.fit(img)
315 check_decomposition_estimator(est, data_type)