Coverage for nilearn/glm/tests/test_first_level.py: 0%
981 statements
« prev ^ index » next coverage.py v7.9.1, created at 2025-06-20 10:58 +0200
« prev ^ index » next coverage.py v7.9.1, created at 2025-06-20 10:58 +0200
1"""Test related to first level model."""
3import itertools
4import shutil
5import string
6import unittest.mock
7import warnings
8from itertools import product
9from pathlib import Path
11import numpy as np
12import pandas as pd
13import pytest
14from nibabel import Nifti1Image, load
15from numpy.testing import (
16 assert_almost_equal,
17 assert_array_almost_equal,
18 assert_array_equal,
19 assert_array_less,
20)
21from sklearn.cluster import KMeans
22from sklearn.utils.estimator_checks import parametrize_with_checks
24from nilearn._utils.data_gen import (
25 add_metadata_to_bids_dataset,
26 basic_paradigm,
27 create_fake_bids_dataset,
28 generate_fake_fmri_data_and_design,
29 write_fake_fmri_data_and_design,
30)
31from nilearn._utils.estimator_checks import (
32 check_estimator,
33 nilearn_check_estimator,
34 return_expected_failed_checks,
35)
36from nilearn._utils.tags import SKLEARN_LT_1_6
37from nilearn.glm.contrasts import compute_fixed_effects
38from nilearn.glm.first_level import (
39 FirstLevelModel,
40 first_level_from_bids,
41 mean_scaling,
42 run_glm,
43)
44from nilearn.glm.first_level.design_matrix import (
45 check_design_matrix,
46 make_first_level_design_matrix,
47)
48from nilearn.glm.first_level.first_level import (
49 _check_length_match,
50 _check_run_tables,
51 _check_trial_type,
52 _list_valid_subjects,
53 _yule_walker,
54)
55from nilearn.glm.regression import ARModel, OLSModel
56from nilearn.image import get_data
57from nilearn.interfaces.bids import get_bids_files
58from nilearn.maskers import NiftiMasker, SurfaceMasker
59from nilearn.surface import SurfaceImage
60from nilearn.surface.utils import assert_polymesh_equal
62ESTIMATORS_TO_CHECK = [FirstLevelModel()]
64if SKLEARN_LT_1_6:
66 @pytest.mark.parametrize(
67 "estimator, check, name",
68 check_estimator(estimators=ESTIMATORS_TO_CHECK),
69 )
70 def test_check_estimator_sklearn_valid(estimator, check, name): # noqa: ARG001
71 """Check compliance with sklearn estimators."""
72 check(estimator)
74 @pytest.mark.xfail(reason="invalid checks should fail")
75 @pytest.mark.parametrize(
76 "estimator, check, name",
77 check_estimator(estimators=ESTIMATORS_TO_CHECK, valid=False),
78 )
79 def test_check_estimator_sklearn_invalid(estimator, check, name): # noqa: ARG001
80 """Check compliance with sklearn estimators."""
81 check(estimator)
83else:
85 @parametrize_with_checks(
86 estimators=ESTIMATORS_TO_CHECK,
87 expected_failed_checks=return_expected_failed_checks,
88 )
89 def test_check_estimator_sklearn(estimator, check):
90 """Check compliance with sklearn estimators."""
91 check(estimator)
94@pytest.mark.parametrize(
95 "estimator, check, name",
96 nilearn_check_estimator(estimators=ESTIMATORS_TO_CHECK),
97)
98def test_check_estimator_nilearn(estimator, check, name): # noqa: ARG001
99 """Check compliance with nilearn estimators rules."""
100 check(estimator)
103def test_glm_fit_invalid_mask_img(shape_4d_default):
104 """Raise error when invalid mask are passed to FirstLevelModel."""
105 rk = 3
106 mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design(
107 shapes=[shape_4d_default], rk=rk
108 )
110 # Give an unfitted NiftiMasker as mask_img and check that we get an error
111 masker = NiftiMasker(mask)
112 with pytest.raises(
113 ValueError, match="NiftiMasker instance is not fitted yet."
114 ):
115 FirstLevelModel(mask_img=masker).fit(
116 fmri_data[0], design_matrices=design_matrices[0]
117 )
119 # Give a fitted NiftiMasker with a None mask_img_ attribute
120 # and check that the masker parameters are overridden by the
121 # FirstLevelModel parameters
122 masker.fit()
123 masker.mask_img_ = None
124 with pytest.warns(
125 UserWarning,
126 match=(
127 "Overriding provided-default estimator parameters "
128 "with provided masker parameters"
129 ),
130 ):
131 FirstLevelModel(mask_img=masker).fit(
132 fmri_data[0], design_matrices=design_matrices[0]
133 )
136def test_glm_fit_valid_mask_img(shape_4d_default):
137 """Run fit on FLM with different valid masks."""
138 rk = 3
139 mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design(
140 shapes=[shape_4d_default], rk=rk
141 )
143 # Give a fitted NiftiMasker
144 masker = NiftiMasker(mask)
145 masker.fit()
146 single_run_model = FirstLevelModel(mask_img=masker).fit(
147 fmri_data[0], design_matrices=design_matrices[0]
148 )
149 assert single_run_model.masker_ == masker
151 single_run_model = FirstLevelModel(mask_img=None).fit(
152 fmri_data[0], design_matrices=design_matrices[0]
153 )
154 assert isinstance(single_run_model.masker_.mask_img_, Nifti1Image)
156 single_run_model = FirstLevelModel(mask_img=mask).fit(
157 fmri_data[0], design_matrices=design_matrices[0]
158 )
159 z1 = single_run_model.compute_contrast(np.eye(rk)[:1])
160 assert isinstance(z1, Nifti1Image)
163@pytest.mark.timeout(0)
164def test_explicit_fixed_effects(shape_3d_default):
165 """Test the fixed effects performed manually/explicitly."""
166 shapes, rk = [(*shape_3d_default, 4), (*shape_3d_default, 5)], 3
167 mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design(
168 shapes, rk=rk
169 )
170 contrast = np.eye(rk)[1]
172 # run 1
173 multi_run_model = FirstLevelModel(mask_img=mask).fit(
174 fmri_data[0], design_matrices=design_matrices[:1]
175 )
176 dic1 = multi_run_model.compute_contrast(contrast, output_type="all")
178 # run 2
179 multi_run_model.fit(fmri_data[1], design_matrices=design_matrices[1:])
180 dic2 = multi_run_model.compute_contrast(contrast, output_type="all")
182 # fixed effects model
183 multi_run_model.fit(fmri_data, design_matrices=design_matrices)
184 fixed_fx_dic = multi_run_model.compute_contrast(
185 contrast, output_type="all"
186 )
188 contrasts = [dic1["effect_size"], dic2["effect_size"]]
189 variance = [dic1["effect_variance"], dic2["effect_variance"]]
191 (fixed_fx_contrast, fixed_fx_variance, fixed_fx_stat, _) = (
192 compute_fixed_effects(contrasts, variance, mask, return_z_score=True)
193 )
195 assert_almost_equal(
196 get_data(fixed_fx_contrast), get_data(fixed_fx_dic["effect_size"])
197 )
198 assert_almost_equal(
199 get_data(fixed_fx_variance), get_data(fixed_fx_dic["effect_variance"])
200 )
201 assert_almost_equal(
202 get_data(fixed_fx_stat), get_data(fixed_fx_dic["stat"])
203 )
205 # ensure that using unbalanced effects size and variance images
206 # raises an error
207 with pytest.raises(
208 ValueError,
209 match=(
210 "The number of contrast images .* differs "
211 "from the number of variance images"
212 ),
213 ):
214 compute_fixed_effects(
215 contrasts * 2, variance, mask, return_z_score=True
216 )
218 # ensure that not providing the right number of dofs
219 with pytest.raises(
220 ValueError, match="degrees of freedom .* differs .* contrast images"
221 ):
222 compute_fixed_effects(
223 contrasts, variance, mask, dofs=[100], return_z_score=True
224 )
227@pytest.mark.timeout(0)
228def test_explicit_fixed_effects_without_mask(shape_3d_default):
229 """Test the fixed effects performed manually/explicitly with no mask."""
230 shapes, rk = [(*shape_3d_default, 4), (*shape_3d_default, 5)], 3
231 _, fmri_data, design_matrices = generate_fake_fmri_data_and_design(
232 shapes, rk=rk
233 )
234 contrast = np.eye(rk)[1]
236 # run 1
237 multi_run_model = FirstLevelModel().fit(
238 fmri_data[0], design_matrices=design_matrices[:1]
239 )
240 dic1 = multi_run_model.compute_contrast(contrast, output_type="all")
242 # run 2
243 multi_run_model.fit(fmri_data[1], design_matrices=design_matrices[1:])
244 dic2 = multi_run_model.compute_contrast(contrast, output_type="all")
246 # fixed effects model
247 multi_run_model.fit(fmri_data, design_matrices=design_matrices)
248 fixed_fx_dic = multi_run_model.compute_contrast(
249 contrast, output_type="all"
250 )
252 contrasts = [dic1["effect_size"], dic2["effect_size"]]
253 variance = [dic1["effect_variance"], dic2["effect_variance"]]
255 # test without mask variable
256 (
257 fixed_fx_contrast,
258 fixed_fx_variance,
259 fixed_fx_stat,
260 _,
261 ) = compute_fixed_effects(contrasts, variance, return_z_score=True)
262 assert_almost_equal(
263 get_data(fixed_fx_contrast), get_data(fixed_fx_dic["effect_size"])
264 )
265 assert_almost_equal(
266 get_data(fixed_fx_variance), get_data(fixed_fx_dic["effect_variance"])
267 )
268 assert_almost_equal(
269 get_data(fixed_fx_stat), get_data(fixed_fx_dic["stat"])
270 )
273def test_high_level_glm_with_data(shape_3d_default):
274 """High level test of GLM."""
275 shapes, rk = [(*shape_3d_default, 5)], 3
276 _, fmri_data, design_matrices = generate_fake_fmri_data_and_design(
277 shapes, rk=rk
278 )
280 multi_run_model = FirstLevelModel(mask_img=None).fit(
281 fmri_data, design_matrices=design_matrices
282 )
283 n_voxels = get_data(multi_run_model.masker_.mask_img_).sum()
284 z_image = multi_run_model.compute_contrast(np.eye(rk)[1])
286 assert np.sum(get_data(z_image) != 0) == n_voxels
287 assert get_data(z_image).std() < 3.0
290def test_glm_target_shape_affine(shape_3d_default, affine_eye):
291 """Check that target shape and affine are applied."""
292 shapes, rk = [(*shape_3d_default, 5)], 3
293 _, fmri_data, design_matrices = generate_fake_fmri_data_and_design(
294 shapes, rk=rk
295 )
297 model_1 = FirstLevelModel(mask_img=None).fit(
298 fmri_data, design_matrices=design_matrices
299 )
301 assert model_1.masker_.mask_img_.shape == shape_3d_default
303 z_image = model_1.compute_contrast(np.eye(rk)[1])
305 assert z_image.shape == shape_3d_default
307 model_2 = FirstLevelModel(
308 mask_img=None, target_shape=(10, 11, 12), target_affine=affine_eye
309 ).fit(fmri_data, design_matrices=design_matrices)
310 assert model_2.masker_.mask_img_.shape != shape_3d_default
311 assert model_2.masker_.mask_img_.shape == (10, 11, 12)
313 z_image = model_2.compute_contrast(np.eye(rk)[1])
315 assert z_image.shape != shape_3d_default
316 assert z_image.shape == (10, 11, 12)
319def test_high_level_glm_with_data_with_mask(shape_3d_default):
320 """Test GLM can be run with mask."""
321 shapes, rk = [(*shape_3d_default, 5)], 3
322 mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design(
323 shapes, rk=rk
324 )
326 multi_run_model = FirstLevelModel(mask_img=mask).fit(
327 fmri_data, design_matrices=design_matrices
328 )
330 z_image = multi_run_model.compute_contrast(
331 np.eye(rk)[:2], output_type="z_score"
332 )
333 p_value = multi_run_model.compute_contrast(
334 np.eye(rk)[:2], output_type="p_value"
335 )
336 stat_image = multi_run_model.compute_contrast(
337 np.eye(rk)[:2], output_type="stat"
338 )
339 effect_image = multi_run_model.compute_contrast(
340 np.eye(rk)[:2], output_type="effect_size"
341 )
342 variance_image = multi_run_model.compute_contrast(
343 np.eye(rk)[:2], output_type="effect_variance"
344 )
346 assert_array_equal(get_data(z_image) == 0.0, get_data(mask) == 0.0)
347 assert (get_data(variance_image)[get_data(mask) > 0] > 0.001).all()
349 all_images = multi_run_model.compute_contrast(
350 np.eye(rk)[:2], output_type="all"
351 )
353 assert_array_equal(get_data(all_images["z_score"]), get_data(z_image))
354 assert_array_equal(get_data(all_images["p_value"]), get_data(p_value))
355 assert_array_equal(get_data(all_images["stat"]), get_data(stat_image))
356 assert_array_equal(
357 get_data(all_images["effect_size"]), get_data(effect_image)
358 )
359 assert_array_equal(
360 get_data(all_images["effect_variance"]), get_data(variance_image)
361 )
364def test_fmri_inputs_type_data_smoke(tmp_path, shape_4d_default):
365 """Test processing of FMRI inputs with path, str or nifti for data."""
366 mask_file, fmri_files, design_files = write_fake_fmri_data_and_design(
367 shapes=[shape_4d_default], file_path=tmp_path
368 )
369 FirstLevelModel(mask_img=mask_file).fit(
370 fmri_files[0], design_matrices=design_files[0]
371 )
372 FirstLevelModel(mask_img=mask_file).fit(
373 [Path(fmri_files[0])], design_matrices=design_files[0]
374 )
375 FirstLevelModel(mask_img=mask_file).fit(
376 load(fmri_files[0]), design_matrices=design_files[0]
377 )
380def test_fmri_inputs_type_design_matrices_smoke(tmp_path, shape_4d_default):
381 """Test processing of FMRI inputs with path, str for design matrix."""
382 mask_file, fmri_files, design_files = write_fake_fmri_data_and_design(
383 shapes=[shape_4d_default], file_path=tmp_path
384 )
385 FirstLevelModel(mask_img=mask_file).fit(
386 fmri_files[0], design_matrices=design_files[0]
387 )
388 FirstLevelModel(mask_img=mask_file).fit(
389 fmri_files[0], design_matrices=[pd.read_csv(design_files[0], sep="\t")]
390 )
391 FirstLevelModel(mask_img=mask_file).fit(
392 fmri_files[0], design_matrices=[Path(design_files[0])]
393 )
396def test_high_level_glm_with_paths(tmp_path, shape_3d_default):
397 """Test GLM can be run with files."""
398 shapes, rk = [(*shape_3d_default, 5)], 3
399 mask_file, fmri_files, design_files = write_fake_fmri_data_and_design(
400 shapes, rk, file_path=tmp_path
401 )
402 multi_run_model = FirstLevelModel(mask_img=None).fit(
403 fmri_files, design_matrices=design_files
404 )
405 z_image = multi_run_model.compute_contrast(np.eye(rk)[1])
407 assert_array_equal(z_image.affine, load(mask_file).affine)
408 assert get_data(z_image).std() < 3.0
411def test_high_level_glm_null_contrasts(shape_3d_default):
412 """Test contrast computation is resilient to 0 values."""
413 shapes, rk = [(*shape_3d_default, 5), (*shape_3d_default, 6)], 3
414 _, fmri_data, design_matrices = generate_fake_fmri_data_and_design(
415 shapes, rk
416 )
418 multi_run_model = FirstLevelModel(mask_img=None).fit(
419 fmri_data, design_matrices=design_matrices
420 )
421 single_run_model = FirstLevelModel(mask_img=None).fit(
422 fmri_data[0], design_matrices=design_matrices[0]
423 )
424 z1 = multi_run_model.compute_contrast(
425 [np.eye(rk)[:1], np.zeros((1, rk))], output_type="stat"
426 )
427 z2 = single_run_model.compute_contrast(np.eye(rk)[:1], output_type="stat")
429 np.testing.assert_almost_equal(get_data(z1), get_data(z2))
432def test_high_level_glm_different_design_matrices():
433 """Test can estimate a contrast when design matrices are different."""
434 shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 19)), 3
435 mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design(
436 shapes, rk
437 )
439 # add a column to the second design matrix
440 design_matrices[1]["new"] = np.ones((19, 1))
442 # Fit a glm with two runs and design matrices
443 multi_run_model = FirstLevelModel(mask_img=mask).fit(
444 fmri_data, design_matrices=design_matrices
445 )
446 z_joint = multi_run_model.compute_contrast(
447 [np.eye(rk)[:1], np.eye(rk + 1)[:1]], output_type="effect_size"
448 )
449 assert z_joint.shape == (7, 8, 7)
451 # compare the estimated effects to seprarately-fitted models
452 model1 = FirstLevelModel(mask_img=mask).fit(
453 fmri_data[0], design_matrices=design_matrices[0]
454 )
455 z1 = model1.compute_contrast(np.eye(rk)[:1], output_type="effect_size")
456 model2 = FirstLevelModel(mask_img=mask).fit(
457 fmri_data[1], design_matrices=design_matrices[1]
458 )
459 z2 = model2.compute_contrast(np.eye(rk + 1)[:1], output_type="effect_size")
461 assert_almost_equal(get_data(z1) + get_data(z2), 2 * get_data(z_joint))
464def test_high_level_glm_different_design_matrices_formulas():
465 """Test can estimate a contrast when design matrices are different."""
466 shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 19)), 3
467 mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design(
468 shapes, rk
469 )
471 # make column names identical
472 design_matrices[1].columns = design_matrices[0].columns
473 # add a column to the second design matrix
474 design_matrices[1]["new"] = np.ones((19, 1))
476 # Fit a glm with two runs and design matrices
477 multi_run_model = FirstLevelModel(mask_img=mask).fit(
478 fmri_data, design_matrices=design_matrices
479 )
481 # Compute contrast with formulas
482 cols_formula = tuple(design_matrices[0].columns[:2])
483 formula = f"{cols_formula[0]}-{cols_formula[1]}"
485 with pytest.warns(
486 UserWarning, match="One contrast given, assuming it for all 2 runs"
487 ):
488 multi_run_model.compute_contrast(formula, output_type="effect_size")
491def test_compute_contrast_num_contrasts(shape_4d_default):
492 """Check error when computing contrast with invalid contrast matrix."""
493 shapes, rk = [shape_4d_default, shape_4d_default, shape_4d_default], 3
494 mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design(
495 shapes, rk
496 )
498 # Fit a glm with 3 runs and design matrices
499 multi_run_model = FirstLevelModel(mask_img=mask).fit(
500 fmri_data, design_matrices=design_matrices
501 )
503 # raise when n_contrast != n_runs | 1
504 with pytest.raises(
505 ValueError, match="2 contrasts given, while there are 3 runs."
506 ):
507 multi_run_model.compute_contrast([np.eye(rk)[1]] * 2)
509 multi_run_model.compute_contrast([np.eye(rk)[1]] * 3)
511 with pytest.warns(
512 UserWarning, match="One contrast given, assuming it for all 3 runs"
513 ):
514 multi_run_model.compute_contrast([np.eye(rk)[1]])
517def test_run_glm_ols(rng):
518 """Test run_glm with Ordinary Least Squares case."""
519 n, p, q = 33, 80, 10
520 X, Y = rng.standard_normal(size=(p, q)), rng.standard_normal(size=(p, n))
522 labels, results = run_glm(Y, X, "ols")
524 assert_array_equal(labels, np.zeros(n))
525 assert list(results.keys()) == [0.0]
526 assert results[0.0].theta.shape == (q, n)
527 assert_almost_equal(results[0.0].theta.mean(), 0, 1)
528 assert_almost_equal(results[0.0].theta.var(), 1.0 / p, 1)
529 assert isinstance(results[labels[0]].model, OLSModel)
532def test_run_glm_ar1(rng):
533 """Test run_glm with AR(1) noise model."""
534 n, p, q = 33, 80, 10
535 X, Y = rng.standard_normal(size=(p, q)), rng.standard_normal(size=(p, n))
537 labels, results = run_glm(Y, X, "ar1")
539 assert len(labels) == n
540 assert len(results.keys()) > 1
541 tmp = sum(val.theta.shape[1] for val in results.values())
542 assert tmp == n
543 assert results[labels[0]].model.order == 1
544 assert isinstance(results[labels[0]].model, ARModel)
547def test_run_glm_ar3(rng):
548 """Test run_glm with AR(3) noise model."""
549 n, p, q = 33, 80, 10
550 X, Y = rng.standard_normal(size=(p, q)), rng.standard_normal(size=(p, n))
552 labels_ar3, results_ar3 = run_glm(Y, X, "ar3", bins=10)
554 assert len(labels_ar3) == n
555 assert len(results_ar3.keys()) > 1
556 tmp = sum(val.theta.shape[1] for val in results_ar3.values())
557 assert tmp == n
558 assert isinstance(results_ar3[labels_ar3[0]].model, ARModel)
559 assert results_ar3[labels_ar3[0]].model.order == 3
560 assert len(results_ar3[labels_ar3[0]].model.rho) == 3
563def test_run_glm_errors(rng):
564 """Check correct errors are thrown for nonsense noise model requests."""
565 n, p, q = 33, 80, 10
566 X, Y = rng.standard_normal(size=(p, q)), rng.standard_normal(size=(p, n))
568 with pytest.raises(ValueError, match="AR order must be positive"):
569 run_glm(Y, X, "ar0")
570 match = (
571 "AR order must be a positive integer specified as arN, "
572 "where N is an integer."
573 )
574 with pytest.raises(ValueError, match=match):
575 run_glm(Y, X, "arfoo")
576 with pytest.raises(ValueError, match=match):
577 run_glm(Y, X, "arr3")
578 with pytest.raises(ValueError, match=match):
579 run_glm(Y, X, "ar1.2")
580 with pytest.raises(ValueError, match=match):
581 run_glm(Y, X, "ar")
582 with pytest.raises(ValueError, match="Acceptable noise models are "):
583 run_glm(Y, X, "3ar")
586@pytest.mark.parametrize(
587 "ar_vals", [[-0.2], [-0.2, -0.5], [-0.2, -0.5, -0.7, -0.3]]
588)
589def test_glm_ar_estimates(rng, ar_vals):
590 """Test that Yule-Walker AR fits are correct."""
591 n, p, q = 1, 500, 2
592 X_orig = rng.standard_normal((p, q))
593 Y_orig = rng.standard_normal((p, n))
595 ar_order = len(ar_vals)
596 ar_arg = f"ar{ar_order}"
598 X = X_orig.copy()
599 Y = Y_orig.copy()
601 for idx, lag in itertools.product(range(1, len(Y)), range(ar_order)):
602 Y[idx] += ar_vals[lag] * Y[idx - 1 - lag]
604 # Test using run_glm
605 labels, results = run_glm(Y, X, ar_arg, bins=100)
607 assert len(labels) == n
609 for lab in results:
610 ar_estimate = lab.split("_")
611 for lag in range(ar_order):
612 assert_almost_equal(
613 float(ar_estimate[lag]), ar_vals[lag], decimal=1
614 )
616 # Test using _yule_walker
617 yw = _yule_walker(Y.T, ar_order)
618 assert_almost_equal(yw[0], ar_vals, decimal=1)
621def test_glm_ar_estimates_errors(rng):
622 """Test Yule-Walker errors."""
623 (n, p) = (1, 500)
624 Y_orig = rng.standard_normal((p, n))
626 with pytest.raises(TypeError, match="AR order must be an integer"):
627 _yule_walker(Y_orig, 1.2)
628 with pytest.raises(ValueError, match="AR order must be positive"):
629 _yule_walker(Y_orig, 0)
630 with pytest.raises(ValueError, match="AR order must be positive"):
631 _yule_walker(Y_orig, -2)
632 with pytest.raises(TypeError, match="at least 1 dim"):
633 _yule_walker(np.array(0.0), 2)
636@pytest.mark.parametrize("random_state", [3, np.random.RandomState(42)])
637def test_glm_random_state(random_state):
638 """Test that the random state is passed to the run_glm."""
639 rng = np.random.RandomState(42)
640 n, p, q = 33, 80, 10
641 X, Y = rng.standard_normal(size=(p, q)), rng.standard_normal(size=(p, n))
643 with unittest.mock.patch.object(
644 KMeans,
645 "__init__",
646 autospec=True,
647 side_effect=KMeans.__init__,
648 ) as spy_kmeans:
649 run_glm(Y, X, "ar3", random_state=random_state)
650 spy_kmeans.assert_called_once_with(
651 unittest.mock.ANY,
652 n_clusters=unittest.mock.ANY,
653 n_init=unittest.mock.ANY,
654 random_state=random_state,
655 )
658def test_scaling(rng):
659 """Test the scaling function."""
660 shape = (400, 10)
661 u = rng.standard_normal(size=shape)
662 mean = 100 * rng.uniform(size=shape[1]) + 1
663 Y = u + mean
664 Y_, mean_ = mean_scaling(Y)
665 assert_almost_equal(Y_.mean(0), 0, 5)
666 assert_almost_equal(mean_, mean, 0)
667 assert Y.std() > 1
670def test_fmri_inputs_shape(shape_4d_default):
671 """Test different types of fit inputs.
673 - func_img as list of single nifti, des as a single dataframe
674 - func_img as single nifti, des as a list of single dataframe
675 - both as lists of single nifti and single dataframe
676 - both as lists of 2 nifti and 2 dataframe
677 """
678 mask, func_img, design_matrices = generate_fake_fmri_data_and_design(
679 shapes=[shape_4d_default]
680 )
681 func_img = func_img[0]
682 design_matrices = design_matrices[0]
684 FirstLevelModel(mask_img=mask).fit(
685 [func_img], design_matrices=design_matrices
686 )
688 FirstLevelModel(mask_img=mask).fit(
689 func_img, design_matrices=[design_matrices]
690 )
692 FirstLevelModel(mask_img=mask).fit(
693 [func_img], design_matrices=[design_matrices]
694 )
696 FirstLevelModel(mask_img=mask).fit(
697 [func_img, func_img],
698 design_matrices=[design_matrices, design_matrices],
699 )
702def test_fmri_inputs_design_matrices_csv(tmp_path, shape_4d_default):
703 """Test design_matrices can be passed as csv."""
704 mask_file, fmri_files, design_files = write_fake_fmri_data_and_design(
705 shapes=[shape_4d_default], file_path=tmp_path
706 )
707 fmri_files = fmri_files[0]
708 design_files = Path(design_files[0])
709 pd.read_csv(design_files, sep="\t").to_csv(
710 design_files.with_suffix(".csv"), index=False
711 )
712 FirstLevelModel(mask_img=mask_file).fit(
713 [fmri_files], design_matrices=design_files
714 )
717def test_fmri_inputs_events_type(tmp_path):
718 """Check events can be dataframe or pathlike to CSV / TSV."""
719 n_timepoints = 10
720 shapes = ((3, 4, 5, n_timepoints),)
721 mask_file, fmri_files, _ = write_fake_fmri_data_and_design(
722 shapes, file_path=tmp_path
723 )
725 events = basic_paradigm()
726 FirstLevelModel(mask_img=mask_file, t_r=2.0).fit(
727 fmri_files[0], events=events
728 )
730 events_file = tmp_path / "tmp.tsv"
731 events.to_csv(events_file, index=False, sep="\t")
732 FirstLevelModel(mask_img=mask_file, t_r=2.0).fit(
733 fmri_files[0], events=events_file
734 )
737def test_fmri_inputs_with_confounds():
738 """Test with confounds and, events."""
739 n_timepoints = 10
740 shapes = ((3, 4, 5, n_timepoints),)
741 mask, fmri_data, _ = generate_fake_fmri_data_and_design(shapes)
743 conf = pd.DataFrame([0] * n_timepoints, columns=["conf"])
745 events = basic_paradigm()
747 fmri_data = fmri_data[0]
749 # Provide t_r, confounds, and events but no design matrix
750 flm = FirstLevelModel(mask_img=mask, t_r=2.0).fit(
751 fmri_data,
752 confounds=conf,
753 events=events,
754 )
755 assert "conf" in flm.design_matrices_[0]
757 # list are OK
758 FirstLevelModel(mask_img=mask, t_r=2.0).fit(
759 fmri_data,
760 confounds=[conf],
761 events=events,
762 )
764 # test with confounds as numpy array
765 flm = FirstLevelModel(mask_img=mask, t_r=2.0).fit(
766 fmri_data,
767 confounds=conf.to_numpy(),
768 events=events,
769 )
770 assert "confound_0" in flm.design_matrices_[0]
772 flm = FirstLevelModel(mask_img=mask, t_r=2.0).fit(
773 fmri_data,
774 confounds=[conf.to_numpy()],
775 events=events,
776 )
777 assert "confound_0" in flm.design_matrices_[0]
780def test_fmri_inputs_confounds_ignored_with_design_matrix():
781 """Test with confounds with design matrix.
783 Confounds ignored if design matrix is passed
784 """
785 n_timepoints = 10
786 shapes = ((3, 4, 5, n_timepoints),)
787 mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design(
788 shapes
789 )
791 conf = pd.DataFrame([0] * n_timepoints, columns=["conf"])
793 fmri_data = fmri_data[0]
795 design_matrices = design_matrices[0]
796 n_col_in_design_matrices = len(design_matrices.columns)
798 flm = FirstLevelModel(mask_img=mask).fit(
799 fmri_data, confounds=conf, design_matrices=design_matrices
800 )
802 assert len(flm.design_matrices_[0].columns) == n_col_in_design_matrices
805def test_fmri_inputs_errors(shape_4d_default):
806 """Check raise errors when incompatible inputs are passed."""
807 _, fmri_data, design_matrices = generate_fake_fmri_data_and_design(
808 shapes=[shape_4d_default]
809 )
811 fmri_data = fmri_data[0]
812 design_matrices = design_matrices[0]
814 # test mismatch number of image and events file
815 match = r"len\(run_imgs\) .* does not match len\(events\) .*"
816 with pytest.raises(ValueError, match=match):
817 FirstLevelModel(mask_img=None, t_r=2.0).fit(
818 [fmri_data, fmri_data], design_matrices
819 )
820 with pytest.raises(ValueError, match=match):
821 FirstLevelModel(mask_img=None, t_r=2.0).fit(
822 fmri_data, [design_matrices, design_matrices]
823 )
825 # At least paradigms or design have to be given
826 with pytest.raises(
827 ValueError,
828 match="events or design matrices must be provided",
829 ):
830 FirstLevelModel(mask_img=None).fit(fmri_data)
832 # If paradigms are given
833 # then both t_r and slice time ref are required
834 match = (
835 "t_r not given to FirstLevelModel object to compute design from events"
836 )
837 with pytest.raises(ValueError, match=match):
838 FirstLevelModel(mask_img=None).fit(fmri_data, design_matrices)
839 with pytest.raises(ValueError, match=match):
840 FirstLevelModel(mask_img=None, slice_time_ref=0.0).fit(
841 fmri_data, design_matrices
842 )
843 with pytest.raises(
844 ValueError,
845 match="The provided events data has no onset column.",
846 ):
847 FirstLevelModel(mask_img=None, t_r=1.0).fit(fmri_data, design_matrices)
850@pytest.mark.parametrize(
851 "to_ignore",
852 [{"slice_time_ref": 0.5}, {"t_r": 2}, {"hrf_model": "fir"}],
853)
854def test_parameter_attributes_ignored_with_design_matrix(
855 shape_4d_default, to_ignore
856):
857 """Warn some parameters/attributes are ignored when using design matrix.
859 Test that the warning is thrown if events are passed with design matrix.
860 Also test with some of the non default value for some attributes
861 """
862 _, fmri_data, design_matrices = generate_fake_fmri_data_and_design(
863 shapes=[shape_4d_default]
864 )
866 fmri_data = fmri_data[0]
867 design_matrices = design_matrices[0]
869 with warnings.catch_warnings(record=True) as warning_list:
870 FirstLevelModel().fit([fmri_data], design_matrices=[design_matrices])
871 assert not warning_list
873 with pytest.warns(UserWarning, match="If design matrices are supplied"):
874 FirstLevelModel().fit(
875 [fmri_data],
876 design_matrices=[design_matrices],
877 events=basic_paradigm(),
878 )
880 with pytest.warns(UserWarning, match="If design matrices are supplied"):
881 FirstLevelModel(**to_ignore).fit(
882 [fmri_data], design_matrices=[design_matrices]
883 )
886def test_fmri_inputs_errors_confounds(shape_4d_default):
887 """Raise errors when incompatible inputs and confounds are passed."""
888 mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design(
889 shapes=[shape_4d_default]
890 )
892 conf = pd.DataFrame([0, 0])
894 events = basic_paradigm()
896 fmri_data = fmri_data[0]
897 design_matrices = design_matrices[0]
899 # confounds cannot be passed with design matrix
900 with pytest.warns(UserWarning, match="If design matrices are supplied"):
901 FirstLevelModel(mask_img=mask).fit(
902 [fmri_data], design_matrices=[design_matrices], confounds=conf
903 )
905 # check that an error is raised if there is a
906 # mismatch in the dimensions of the inputs
907 with pytest.raises(ValueError, match="Rows in confounds does not match"):
908 FirstLevelModel(mask_img=mask, t_r=2.0).fit(
909 fmri_data, confounds=conf, events=events
910 )
912 # confounds rows do not match n_scans
913 with pytest.raises(
914 ValueError,
915 match=(
916 "Rows in confounds does not match n_scans in run_img at index 0."
917 ),
918 ):
919 FirstLevelModel(mask_img=None, t_r=2.0).fit(
920 fmri_data, design_matrices, conf
921 )
924def test_first_level_design_creation(shape_4d_default):
925 """Check that design matrices equals one built 'manually'."""
926 mask, fmri_data, _ = generate_fake_fmri_data_and_design(
927 shapes=[shape_4d_default]
928 )
930 t_r = 10
931 slice_time_ref = 0.0
932 drift_model = "polynomial"
933 drift_order = 3
935 model = FirstLevelModel(
936 t_r=t_r,
937 slice_time_ref=slice_time_ref,
938 mask_img=mask,
939 drift_model=drift_model,
940 drift_order=drift_order,
941 )
942 events = basic_paradigm()
943 model = model.fit(fmri_data[0], events)
945 frame1, X1, names1 = check_design_matrix(model.design_matrices_[0])
947 # check design computation is identical
948 n_scans = get_data(fmri_data[0]).shape[3]
949 start_time = slice_time_ref * t_r
950 end_time = (n_scans - 1 + slice_time_ref) * t_r
951 frame_times = np.linspace(start_time, end_time, n_scans)
952 design = make_first_level_design_matrix(
953 frame_times, events, drift_model=drift_model, drift_order=drift_order
954 )
956 frame2, X2, names2 = check_design_matrix(design)
958 assert_array_equal(frame1, frame2)
959 assert_array_equal(X1, X2)
960 assert_array_equal(names1, names2)
963def test_first_level_glm_computation(shape_4d_default):
964 """Smoke test of FirstLevelModel.fit()."""
965 mask, fmri_data, _ = generate_fake_fmri_data_and_design(
966 shapes=[shape_4d_default]
967 )
968 # basic test based on basic_paradigm and glover hrf
969 model = FirstLevelModel(
970 t_r=10,
971 slice_time_ref=0.0,
972 mask_img=mask,
973 drift_model="polynomial",
974 drift_order=3,
975 minimize_memory=False,
976 )
977 events = basic_paradigm()
978 model.fit(fmri_data[0], events)
981def test_first_level_glm_computation_with_memory_caching(shape_4d_default):
982 """Smoke test of FirstLevelModel.fit() with memory caching."""
983 mask, fmri_data, _ = generate_fake_fmri_data_and_design(
984 shapes=[shape_4d_default]
985 )
986 # initialize FirstLevelModel with memory option enabled
987 model = FirstLevelModel(
988 t_r=10.0,
989 slice_time_ref=0.0,
990 mask_img=mask,
991 drift_model="polynomial",
992 drift_order=3,
993 memory="nilearn_cache",
994 memory_level=1,
995 minimize_memory=False,
996 )
997 events = basic_paradigm()
998 model.fit(fmri_data[0], events)
1001def test_first_level_from_bids_set_repetition_time_warnings(tmp_path):
1002 """Raise a warning when there is no bold.json file in the derivatives \
1003 and no TR value is passed as argument.
1005 create_fake_bids_dataset does not add JSON files in derivatives,
1006 so the TR value will be inferred from the raw.
1007 """
1008 bids_path = create_fake_bids_dataset(
1009 base_dir=tmp_path, n_sub=10, n_ses=1, tasks=["main"], n_runs=[1]
1010 )
1011 t_r = None
1012 warning_msg = "No bold.json .* BIDS"
1013 with pytest.warns(UserWarning, match=warning_msg):
1014 models, *_ = first_level_from_bids(
1015 dataset_path=str(tmp_path / bids_path),
1016 task_label="main",
1017 space_label="MNI",
1018 img_filters=[("desc", "preproc")],
1019 t_r=t_r,
1020 slice_time_ref=None,
1021 verbose=1,
1022 )
1024 # If no t_r is provided it is inferred from the raw dataset
1025 # create_fake_bids_dataset generates a dataset
1026 # with bold data with TR=1.5 secs
1027 expected_t_r = 1.5
1028 assert models[0].t_r == expected_t_r
1031@pytest.mark.parametrize(
1032 "t_r, error_type, error_msg",
1033 [
1034 ("not a number", TypeError, "must be a float"),
1035 (-1, ValueError, "positive"),
1036 ],
1037)
1038def test_first_level_from_bids_set_repetition_time_errors(
1039 tmp_path, t_r, error_type, error_msg
1040):
1041 """Throw errors for impossible values of TR."""
1042 bids_path = create_fake_bids_dataset(
1043 base_dir=tmp_path, n_sub=1, n_ses=1, tasks=["main"], n_runs=[1]
1044 )
1046 with pytest.raises(error_type, match=error_msg):
1047 first_level_from_bids(
1048 dataset_path=str(tmp_path / bids_path),
1049 task_label="main",
1050 space_label="MNI",
1051 img_filters=[("desc", "preproc")],
1052 slice_time_ref=None,
1053 t_r=t_r,
1054 )
1057def test_first_level_from_bids_set_slice_timing_ref_warnings(tmp_path):
1058 """Check that a warning is raised when slice_time_ref is not provided \
1059 and cannot be inferred from the dataset.
1061 In this case the model should be created with a slice_time_ref of 0.0.
1062 """
1063 bids_path = create_fake_bids_dataset(
1064 base_dir=tmp_path, n_sub=10, n_ses=1, tasks=["main"], n_runs=[1]
1065 )
1067 slice_time_ref = None
1068 warning_msg = "not provided and cannot be inferred"
1069 with pytest.warns(UserWarning, match=warning_msg):
1070 models, *_ = first_level_from_bids(
1071 dataset_path=str(tmp_path / bids_path),
1072 task_label="main",
1073 space_label="MNI",
1074 img_filters=[("desc", "preproc")],
1075 slice_time_ref=slice_time_ref,
1076 )
1078 expected_slice_time_ref = 0.0
1079 assert models[0].slice_time_ref == expected_slice_time_ref
1082@pytest.mark.parametrize(
1083 "slice_time_ref, error_type, error_msg",
1084 [
1085 ("not a number", TypeError, "must be a float"),
1086 (2, ValueError, "between 0 and 1"),
1087 ],
1088)
1089def test_first_level_from_bids_set_slice_timing_ref_errors(
1090 tmp_path, slice_time_ref, error_type, error_msg
1091):
1092 """Throw errors for impossible values of slice_time_ref."""
1093 bids_path = create_fake_bids_dataset(
1094 base_dir=tmp_path, n_sub=1, n_ses=1, tasks=["main"], n_runs=[1]
1095 )
1097 with pytest.raises(error_type, match=error_msg):
1098 first_level_from_bids(
1099 dataset_path=str(tmp_path / bids_path),
1100 task_label="main",
1101 space_label="MNI",
1102 img_filters=[("desc", "preproc")],
1103 slice_time_ref=slice_time_ref,
1104 )
1107def test_first_level_from_bids_get_metadata_from_derivatives(tmp_path):
1108 """No warning should be thrown given derivatives have metadata.
1110 The model created should use the values found in the derivatives.
1111 """
1112 bids_path = create_fake_bids_dataset(
1113 base_dir=tmp_path, n_sub=10, n_ses=1, tasks=["main"], n_runs=[1]
1114 )
1116 RepetitionTime = 6.0
1117 StartTime = 2.0
1118 add_metadata_to_bids_dataset(
1119 bids_path=tmp_path / bids_path,
1120 metadata={"RepetitionTime": RepetitionTime, "StartTime": StartTime},
1121 )
1122 with warnings.catch_warnings():
1123 warnings.simplefilter("error")
1124 models, *_ = first_level_from_bids(
1125 dataset_path=str(tmp_path / bids_path),
1126 task_label="main",
1127 space_label="MNI",
1128 img_filters=[("desc", "preproc")],
1129 slice_time_ref=None,
1130 )
1131 assert models[0].t_r == RepetitionTime
1132 assert models[0].slice_time_ref == StartTime / RepetitionTime
1135def test_first_level_from_bids_get_repetition_time_from_derivatives(tmp_path):
1136 """Only RepetitionTime is provided in derivatives.
1138 Warning about missing StarTime time in derivatives.
1139 slice_time_ref cannot be inferred: defaults to 0.
1140 """
1141 bids_path = create_fake_bids_dataset(
1142 base_dir=tmp_path, n_sub=10, n_ses=1, tasks=["main"], n_runs=[1]
1143 )
1144 RepetitionTime = 6.0
1145 add_metadata_to_bids_dataset(
1146 bids_path=tmp_path / bids_path,
1147 metadata={"RepetitionTime": RepetitionTime},
1148 )
1150 with pytest.warns(UserWarning, match="StartTime' not found in file"):
1151 models, *_ = first_level_from_bids(
1152 dataset_path=str(tmp_path / bids_path),
1153 task_label="main",
1154 space_label="MNI",
1155 slice_time_ref=None,
1156 img_filters=[("desc", "preproc")],
1157 )
1158 assert models[0].t_r == 6.0
1159 assert models[0].slice_time_ref == 0.0
1162def test_first_level_from_bids_get_start_time_from_derivatives(tmp_path):
1163 """Only StartTime is provided in derivatives.
1165 Warning about missing repetition time in derivatives,
1166 but RepetitionTime is still read from raw dataset.
1167 """
1168 bids_path = create_fake_bids_dataset(
1169 base_dir=tmp_path, n_sub=10, n_ses=1, tasks=["main"], n_runs=[1]
1170 )
1171 StartTime = 1.0
1172 add_metadata_to_bids_dataset(
1173 bids_path=tmp_path / bids_path, metadata={"StartTime": StartTime}
1174 )
1176 with pytest.warns(UserWarning, match="RepetitionTime' not found in file"):
1177 models, *_ = first_level_from_bids(
1178 dataset_path=str(tmp_path / bids_path),
1179 task_label="main",
1180 space_label="MNI",
1181 img_filters=[("desc", "preproc")],
1182 slice_time_ref=None,
1183 )
1185 # create_fake_bids_dataset generates a dataset
1186 # with bold data with TR=1.5 secs
1187 assert models[0].t_r == 1.5
1188 assert models[0].slice_time_ref == StartTime / 1.5
1191def test_first_level_contrast_computation():
1192 """Check contrast_computation."""
1193 shapes = ((7, 8, 9, 10),)
1194 mask, fmri_data, _ = generate_fake_fmri_data_and_design(shapes)
1196 # Ordinary Least Squares case
1197 model = FirstLevelModel(
1198 t_r=10.0,
1199 slice_time_ref=0.0,
1200 mask_img=mask,
1201 drift_model="polynomial",
1202 drift_order=3,
1203 minimize_memory=False,
1204 )
1205 c1, c2, cnull = np.eye(7)[0], np.eye(7)[1], np.zeros(7)
1207 # fit model
1208 # basic test based on basic_paradigm and glover hrf
1209 events = basic_paradigm()
1210 model = model.fit([fmri_data[0], fmri_data[0]], [events, events])
1212 # smoke test for different contrasts in fixed effects
1213 model.compute_contrast([c1, c2])
1215 # smoke test for same contrast in fixed effects
1216 model.compute_contrast([c2, c2])
1218 # smoke test for contrast that will be repeated
1219 model.compute_contrast(c2)
1220 model.compute_contrast(c2, "F")
1221 model.compute_contrast(c2, "t", "z_score")
1222 model.compute_contrast(c2, "t", "stat")
1223 model.compute_contrast(c2, "t", "p_value")
1224 model.compute_contrast(c2, None, "effect_size")
1225 model.compute_contrast(c2, None, "effect_variance")
1227 # formula should work (passing variable name directly)
1228 model.compute_contrast("c0")
1229 model.compute_contrast("c1")
1230 model.compute_contrast("c2")
1232 # smoke test for one null contrast in group
1233 model.compute_contrast([c2, cnull])
1236def test_first_level_contrast_computation_errors(shape_4d_default):
1237 """Test errors of FirstLevelModel.compute_contrast() ."""
1238 mask, fmri_data, _ = generate_fake_fmri_data_and_design(
1239 shapes=[shape_4d_default]
1240 )
1242 # Ordinary Least Squares case
1243 model = FirstLevelModel(
1244 t_r=10.0,
1245 slice_time_ref=0.0,
1246 mask_img=mask,
1247 drift_model="polynomial",
1248 drift_order=3,
1249 minimize_memory=False,
1250 )
1251 c1, cnull = np.eye(7)[0], np.zeros(7)
1253 # asking for contrast before model fit gives error
1254 with pytest.raises(ValueError, match="not fitted yet"):
1255 model.compute_contrast(c1)
1257 # fit model
1258 # basic test based on basic_paradigm and glover hrf
1259 events = basic_paradigm()
1260 model = model.fit([fmri_data[0], fmri_data[0]], [events, events])
1262 # Check that an error is raised for invalid contrast_def
1263 with pytest.raises(
1264 ValueError, match="contrast_def must be an array or str or list"
1265 ):
1266 model.compute_contrast(37)
1268 # only passing null contrasts should give back a value error
1269 with pytest.raises(
1270 ValueError, match="All contrasts provided were null contrasts."
1271 ):
1272 model.compute_contrast(cnull)
1273 with pytest.raises(
1274 ValueError, match="All contrasts provided were null contrasts."
1275 ):
1276 model.compute_contrast([cnull, cnull])
1278 # passing wrong parameters
1279 match = ".* contrasts given, while there are .* runs."
1280 with pytest.raises(ValueError, match=match):
1281 model.compute_contrast([c1, c1, c1])
1282 with pytest.raises(ValueError, match=match):
1283 model.compute_contrast([])
1285 match = "output_type must be one of "
1286 with pytest.raises(ValueError, match=match):
1287 model.compute_contrast(c1, "", "")
1288 with pytest.raises(ValueError, match=match):
1289 model.compute_contrast(c1, "", [])
1291 with pytest.raises(
1292 ValueError,
1293 match="t contrasts cannot be empty",
1294 ):
1295 model.compute_contrast([c1, []])
1298def test_first_level_with_scaling(affine_eye):
1299 """Check running GLM with no scaling."""
1300 shapes, rk = [(3, 1, 1, 2)], 1
1301 fmri_data = [Nifti1Image(np.zeros((1, 1, 1, 2)) + 6, affine_eye)]
1302 design_matrices = [
1303 pd.DataFrame(
1304 np.ones((shapes[0][-1], rk)),
1305 columns=list(string.ascii_lowercase)[:rk],
1306 )
1307 ]
1308 fmri_glm = FirstLevelModel(
1309 mask_img=False,
1310 noise_model="ols",
1311 signal_scaling=0,
1312 minimize_memory=True,
1313 )
1314 assert fmri_glm.signal_scaling == 0
1315 assert not fmri_glm.standardize
1317 glm_parameters = fmri_glm.get_params()
1318 test_glm = FirstLevelModel(**glm_parameters)
1319 fmri_glm = fmri_glm.fit(fmri_data, design_matrices=design_matrices)
1320 test_glm = test_glm.fit(fmri_data, design_matrices=design_matrices)
1322 assert glm_parameters["signal_scaling"] == 0
1325def test_first_level_with_no_signal_scaling(affine_eye):
1326 """Test to ensure that the FirstLevelModel works correctly \
1327 with a signal_scaling==False.
1329 In particular, that derived theta are correct for a
1330 constant design matrix with a single valued fmri image
1331 """
1332 shapes, rk = [(3, 1, 1, 2)], 1
1333 design_matrices = [
1334 pd.DataFrame(
1335 np.ones((shapes[0][-1], rk)),
1336 columns=list(string.ascii_lowercase)[:rk],
1337 )
1338 ]
1339 fmri_data = [Nifti1Image(np.zeros((1, 1, 1, 2)) + 6, affine_eye)]
1341 # Check error with invalid signal_scaling values
1342 with pytest.raises(ValueError, match="signal_scaling must be"):
1343 flm = FirstLevelModel(
1344 mask_img=False, noise_model="ols", signal_scaling="foo"
1345 )
1346 flm.fit(fmri_data, design_matrices=design_matrices)
1348 first_level = FirstLevelModel(
1349 mask_img=False, noise_model="ols", signal_scaling=False
1350 )
1352 first_level.fit(fmri_data, design_matrices=design_matrices)
1353 # trivial test of signal_scaling value
1354 assert first_level.signal_scaling is False
1355 # assert that our design matrix has one constant
1356 assert first_level.design_matrices_[0].equals(
1357 pd.DataFrame([1.0, 1.0], columns=["a"])
1358 )
1359 # assert that we only have one theta as there is only on voxel in our image
1360 assert first_level.results_[0][0].theta.shape == (1, 1)
1361 # assert that the theta is equal to the one voxel value
1362 assert_almost_equal(first_level.results_[0][0].theta[0, 0], 6.0, 2)
1365def test_first_level_residuals(shape_4d_default):
1366 """Check of residuals properties."""
1367 mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design(
1368 shapes=[shape_4d_default]
1369 )
1371 for design_matrix in design_matrices:
1372 design_matrix[design_matrix.columns[0]] = 1
1374 model = FirstLevelModel(
1375 mask_img=mask, minimize_memory=False, noise_model="ols"
1376 )
1378 model.fit(fmri_data, design_matrices=design_matrices)
1380 residuals = model.residuals[0]
1381 mean_residuals = model.masker_.transform(residuals).mean(0)
1383 assert_array_almost_equal(mean_residuals, 0)
1386def test_first_level_residuals_errors(shape_4d_default):
1387 """Access residuals needs fit and minimize_memory set to True."""
1388 mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design(
1389 shapes=[shape_4d_default]
1390 )
1392 for design_matrix in design_matrices:
1393 design_matrix[design_matrix.columns[0]] = 1
1395 # Check that voxelwise model attributes
1396 # cannot be accessed if minimize_memory is set to True
1397 model = FirstLevelModel(
1398 mask_img=mask, minimize_memory=True, noise_model="ols"
1399 )
1400 model.fit(fmri_data, design_matrices=design_matrices)
1402 with pytest.raises(ValueError, match="To access voxelwise attributes"):
1403 model.residuals[0]
1405 # Check that trying to access residuals without fitting
1406 # raises an error
1407 model = FirstLevelModel(
1408 mask_img=mask, minimize_memory=False, noise_model="ols"
1409 )
1411 model.fit(fmri_data, design_matrices=design_matrices)
1413 # For coverage
1414 with pytest.raises(ValueError, match="attribute must be one of"):
1415 model._get_element_wise_model_attribute("foo", True)
1418@pytest.mark.parametrize(
1419 "shapes",
1420 [
1421 [(10, 10, 10, 25)],
1422 [(10, 10, 10, 25), (10, 10, 10, 100)],
1423 ],
1424)
1425def test_get_element_wise_attributes_should_return_as_many_as_design_matrices(
1426 shapes,
1427):
1428 """Check outputs _get_element_wise_model_attribute same shape as input."""
1429 mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design(
1430 shapes
1431 )
1433 for design_matrix in design_matrices:
1434 design_matrix[design_matrix.columns[0]] = 1
1436 model = FirstLevelModel(
1437 mask_img=mask, minimize_memory=False, noise_model="ols"
1438 )
1439 model.fit(fmri_data, design_matrices=design_matrices)
1441 assert len(
1442 model._get_element_wise_model_attribute("residuals", True)
1443 ) == len(shapes)
1446def test_first_level_predictions_r_square(shape_4d_default):
1447 """Check r_square gives sensible values."""
1448 mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design(
1449 shapes=[shape_4d_default]
1450 )
1452 for design_matrix in design_matrices:
1453 design_matrix[design_matrix.columns[0]] = 1
1455 model = FirstLevelModel(
1456 mask_img=mask,
1457 signal_scaling=False,
1458 minimize_memory=False,
1459 noise_model="ols",
1460 )
1461 model.fit(fmri_data, design_matrices=design_matrices)
1463 pred = model.predicted[0]
1464 data = fmri_data[0]
1465 r_square_3d = model.r_square[0]
1467 y_predicted = model.masker_.transform(pred)
1468 y_measured = model.masker_.transform(data)
1470 assert_almost_equal(np.mean(y_predicted - y_measured), 0)
1472 r_square_2d = model.masker_.transform(r_square_3d)
1473 assert_array_less(0.0, r_square_2d)
1476@pytest.mark.parametrize(
1477 "hrf_model",
1478 [
1479 "spm",
1480 "spm + derivative",
1481 "glover",
1482 lambda t_r, ov: np.ones(int(t_r * ov)),
1483 ],
1484)
1485@pytest.mark.parametrize("spaces", [False, True])
1486def test_first_level_hrf_model(hrf_model, spaces, shape_4d_default):
1487 """Ensure that FirstLevelModel runs without raising errors \
1488 for different values of hrf_model.
1490 In particular, one checks that it runs
1491 without raising errors when given a custom response function.
1492 When :meth:`~nilearn.glm.first_level.FirstLevelModel.compute_contrast`
1493 is used errors should be raised when event (ie condition) names are not
1494 valid identifiers.
1495 """
1496 mask, fmri_data, _ = generate_fake_fmri_data_and_design(
1497 shapes=[shape_4d_default]
1498 )
1500 events = basic_paradigm(condition_names_have_spaces=spaces)
1502 model = FirstLevelModel(t_r=2.0, mask_img=mask, hrf_model=hrf_model)
1504 model.fit(fmri_data, events)
1506 columns = model.design_matrices_[0].columns
1507 exp = f"{columns[0]}-{columns[1]}"
1508 try:
1509 model.compute_contrast(exp)
1510 except Exception:
1511 with pytest.raises(ValueError, match="invalid python identifiers"):
1512 model.compute_contrast(exp)
1515def test_glm_sample_mask(shape_4d_default):
1516 """Ensure the sample mask is performing correctly in GLM."""
1517 mask, fmri_data, design_matrix = generate_fake_fmri_data_and_design(
1518 [shape_4d_default]
1519 )
1520 model = FirstLevelModel(t_r=2.0, mask_img=mask, minimize_memory=False)
1521 # censor the first three volumes
1522 sample_mask = np.arange(shape_4d_default[3])[3:]
1523 model.fit(
1524 fmri_data, design_matrices=design_matrix, sample_masks=sample_mask
1525 )
1527 assert model.design_matrices_[0].shape[0] == shape_4d_default[3] - 3
1528 assert model.predicted[0].shape[-1] == shape_4d_default[3] - 3
1531def _inputs_for_new_bids_dataset():
1532 n_sub = 2
1533 n_ses = 2
1534 tasks = ["main"]
1535 n_runs = [2]
1536 return n_sub, n_ses, tasks, n_runs
1539@pytest.fixture(scope="session")
1540def bids_dataset(tmp_path_factory):
1541 """Create a fake BIDS dataset for testing purposes.
1543 Only use if the dataset does not need to me modified.
1544 """
1545 base_dir = tmp_path_factory.mktemp("bids")
1546 n_sub, n_ses, tasks, n_runs = _inputs_for_new_bids_dataset()
1547 return create_fake_bids_dataset(
1548 base_dir=base_dir, n_sub=n_sub, n_ses=n_ses, tasks=tasks, n_runs=n_runs
1549 )
1552def _new_bids_dataset(base_dir=None):
1553 """Create a new BIDS dataset for testing purposes.
1555 Use if the dataset needs to be modified after creation.
1556 """
1557 if base_dir is None:
1558 base_dir = Path()
1559 n_sub, n_ses, tasks, n_runs = _inputs_for_new_bids_dataset()
1560 return create_fake_bids_dataset(
1561 base_dir=base_dir, n_sub=n_sub, n_ses=n_ses, tasks=tasks, n_runs=n_runs
1562 )
1565@pytest.mark.parametrize("n_runs", ([1, 0], [1, 2]))
1566@pytest.mark.parametrize("n_ses", [0, 2])
1567@pytest.mark.parametrize("task_index", [0, 1])
1568@pytest.mark.parametrize("space_label", ["MNI", "T1w"])
1569def test_first_level_from_bids(
1570 tmp_path, n_runs, n_ses, task_index, space_label
1571):
1572 """Test several BIDS structure."""
1573 n_sub = 2
1574 tasks = ["localizer", "main"]
1576 bids_path = create_fake_bids_dataset(
1577 base_dir=tmp_path, n_sub=n_sub, n_ses=n_ses, tasks=tasks, n_runs=n_runs
1578 )
1580 models, imgs, events, confounds = first_level_from_bids(
1581 dataset_path=bids_path,
1582 task_label=tasks[task_index],
1583 space_label=space_label,
1584 img_filters=[("desc", "preproc")],
1585 slice_time_ref=0.0, # set to 0.0 to avoid warnings
1586 )
1588 _check_output_first_level_from_bids(n_sub, models, imgs, events, confounds)
1590 n_imgs_expected = n_ses * n_runs[task_index]
1592 # no run entity in filename or session level
1593 # when they take a value of 0 when generating a dataset
1594 no_run_entity = n_runs[task_index] <= 1
1595 no_session_level = n_ses <= 1
1597 if no_session_level:
1598 n_imgs_expected = 1 if no_run_entity else n_runs[task_index]
1599 elif no_run_entity:
1600 n_imgs_expected = n_ses
1602 assert len(imgs[0]) == n_imgs_expected
1605@pytest.mark.parametrize("slice_time_ref", [None, 0.0, 0.5, 1.0])
1606def test_first_level_from_bids_slice_time_ref(bids_dataset, slice_time_ref):
1607 """Test several valid values of slice_time_ref."""
1608 n_sub, *_ = _inputs_for_new_bids_dataset()
1609 models, imgs, events, confounds = first_level_from_bids(
1610 dataset_path=bids_dataset,
1611 task_label="main",
1612 space_label="MNI",
1613 img_filters=[("run", "01"), ("desc", "preproc")],
1614 slice_time_ref=slice_time_ref,
1615 )
1617 _check_output_first_level_from_bids(n_sub, models, imgs, events, confounds)
1620def test_first_level_from_bids_space_none(tmp_path):
1621 """Test behavior when no specific space is required .
1623 Function should look for images with MNI152NLin2009cAsym.
1624 """
1625 n_sub = 1
1626 bids_path = create_fake_bids_dataset(
1627 base_dir=tmp_path, n_sub=n_sub, spaces=["MNI152NLin2009cAsym"]
1628 )
1629 models, imgs, events, confounds = first_level_from_bids(
1630 dataset_path=bids_path,
1631 task_label="main",
1632 space_label=None,
1633 img_filters=[("run", "01"), ("desc", "preproc")],
1634 slice_time_ref=None,
1635 )
1637 _check_output_first_level_from_bids(n_sub, models, imgs, events, confounds)
1640def test_first_level_from_bids_select_one_run_per_session(bids_dataset):
1641 """Check that img_filters can select a single file per run per session."""
1642 n_sub, n_ses, *_ = _inputs_for_new_bids_dataset()
1644 models, imgs, events, confounds = first_level_from_bids(
1645 dataset_path=bids_dataset,
1646 task_label="main",
1647 space_label="MNI",
1648 img_filters=[("run", "01"), ("desc", "preproc")],
1649 slice_time_ref=0.0, # set to 0.0 to avoid warnings
1650 )
1652 _check_output_first_level_from_bids(n_sub, models, imgs, events, confounds)
1654 n_imgs_expected = n_ses
1655 assert len(imgs[0]) == n_imgs_expected
1658def test_first_level_from_bids_select_all_runs_of_one_session(bids_dataset):
1659 """Check that img_filters can select all runs in a session."""
1660 n_sub, _, _, n_runs = _inputs_for_new_bids_dataset()
1662 models, imgs, events, confounds = first_level_from_bids(
1663 dataset_path=bids_dataset,
1664 task_label="main",
1665 space_label="MNI",
1666 img_filters=[("ses", "01"), ("desc", "preproc")],
1667 slice_time_ref=0.0, # set to 0.0 to avoid warnings
1668 )
1670 _check_output_first_level_from_bids(n_sub, models, imgs, events, confounds)
1672 n_imgs_expected = n_runs[0]
1673 assert len(imgs[0]) == n_imgs_expected
1676def test_first_level_from_bids_smoke_test_for_verbose_argument(bids_dataset):
1677 """Test with verbose mode.
1679 verbose = 0 is the default, so should be covered by other tests.
1680 """
1681 first_level_from_bids(
1682 dataset_path=bids_dataset,
1683 task_label="main",
1684 space_label="MNI",
1685 img_filters=[("desc", "preproc")],
1686 verbose=1,
1687 slice_time_ref=0.0, # set to 0.0 to avoid warnings
1688 )
1691@pytest.mark.parametrize(
1692 "entity", ["acq", "ce", "dir", "rec", "echo", "res", "den"]
1693)
1694def test_first_level_from_bids_several_labels_per_entity(tmp_path, entity):
1695 """Correct files selected when an entity has several possible labels.
1697 Regression test for https://github.com/nilearn/nilearn/issues/3524
1698 """
1699 n_sub = 1
1700 n_ses = 1
1701 tasks = ["main"]
1702 n_runs = [1]
1704 bids_path = create_fake_bids_dataset(
1705 base_dir=tmp_path,
1706 n_sub=n_sub,
1707 n_ses=n_ses,
1708 tasks=tasks,
1709 n_runs=n_runs,
1710 entities={entity: ["A", "B"]},
1711 )
1713 models, imgs, events, confounds = first_level_from_bids(
1714 dataset_path=bids_path,
1715 task_label="main",
1716 space_label="MNI",
1717 img_filters=[("desc", "preproc"), (entity, "A")],
1718 slice_time_ref=0.0, # set to 0.0 to avoid warnings
1719 )
1721 _check_output_first_level_from_bids(n_sub, models, imgs, events, confounds)
1722 n_imgs_expected = n_ses * n_runs[0]
1723 assert len(imgs[0]) == n_imgs_expected
1726def _check_output_first_level_from_bids(
1727 n_sub, models, imgs, events, confounds
1728):
1729 assert len(models) == n_sub
1730 assert all(isinstance(model, FirstLevelModel) for model in models)
1732 assert len(models) == len(imgs)
1733 for img_ in imgs:
1734 assert isinstance(img_, list)
1736 # We should only get lists of valid paths or lists of SurfaceImages
1737 if all(isinstance(x, str) for x in img_):
1738 assert all(Path(x).exists() for x in img_)
1739 else:
1740 assert all(isinstance(x, SurfaceImage) for x in img_)
1742 assert len(models) == len(events)
1743 for event_ in events:
1744 assert isinstance(event_, list)
1745 assert all(isinstance(x, pd.DataFrame) for x in event_)
1747 assert len(models) == len(confounds)
1748 for confound_ in confounds:
1749 assert isinstance(confound_, list)
1750 assert all(isinstance(x, pd.DataFrame) for x in confound_)
1753def test_first_level_from_bids_with_subject_labels(bids_dataset):
1754 """Test that the subject labels arguments works \
1755 with proper warning for missing subjects.
1757 Check that the incorrect label `foo` raises a warning,
1758 but that we still get a model for existing subject.
1759 """
1760 warning_message = "Subject label 'foo' is not present in*"
1761 with pytest.warns(UserWarning, match=warning_message):
1762 models, *_ = first_level_from_bids(
1763 dataset_path=bids_dataset,
1764 task_label="main",
1765 sub_labels=["foo", "01"],
1766 space_label="MNI",
1767 img_filters=[("desc", "preproc")],
1768 slice_time_ref=0.0, # set to 0.0 to avoid warnings
1769 )
1771 assert models[0].subject_label == "01"
1774def test_first_level_from_bids_no_duplicate_sub_labels(bids_dataset):
1775 """Make sure that if a subject label is repeated, \
1776 only one model is created.
1778 See https://github.com/nilearn/nilearn/issues/3585
1779 """
1780 models, *_ = first_level_from_bids(
1781 dataset_path=bids_dataset,
1782 task_label="main",
1783 sub_labels=["01", "01"],
1784 space_label="MNI",
1785 img_filters=[("desc", "preproc")],
1786 slice_time_ref=0.0, # set to 0.0 to avoid warnings
1787 )
1789 assert len(models) == 1
1792def test_first_level_from_bids_validation_input_dataset_path():
1793 """Raise error when dataset_path is invalid."""
1794 with pytest.raises(TypeError, match="must be a string or pathlike"):
1795 first_level_from_bids(
1796 dataset_path=2,
1797 task_label="main",
1798 space_label="MNI",
1799 slice_time_ref=0.0, # set to 0.0 to avoid warnings
1800 )
1801 with pytest.raises(ValueError, match="'dataset_path' does not exist"):
1802 first_level_from_bids(
1803 dataset_path="lolo",
1804 task_label="main",
1805 space_label="MNI",
1806 slice_time_ref=0.0, # set to 0.0 to avoid warnings
1807 )
1808 with pytest.raises(TypeError, match="derivatives_.* must be a string"):
1809 first_level_from_bids(
1810 dataset_path=Path(),
1811 task_label="main",
1812 space_label="MNI",
1813 derivatives_folder=1,
1814 slice_time_ref=0.0, # set to 0.0 to avoid warnings
1815 )
1818@pytest.mark.parametrize(
1819 "task_label, error_type",
1820 [(42, TypeError), ("$$$", ValueError)],
1821)
1822def test_first_level_from_bids_validation_task_label(
1823 bids_dataset, task_label, error_type
1824):
1825 """Raise error for invalid task_label."""
1826 with pytest.raises(error_type, match="All bids labels must be "):
1827 first_level_from_bids(
1828 dataset_path=bids_dataset, task_label=task_label, space_label="MNI"
1829 )
1832@pytest.mark.parametrize(
1833 "sub_labels, error_type, error_msg",
1834 [
1835 ("42", TypeError, "must be a list"),
1836 (["1", 1], TypeError, "must be string"),
1837 ([1], TypeError, "must be string"),
1838 ],
1839)
1840def test_first_level_from_bids_validation_sub_labels(
1841 bids_dataset, sub_labels, error_type, error_msg
1842):
1843 """Raise error for invalid sub_labels."""
1844 with pytest.raises(error_type, match=error_msg):
1845 first_level_from_bids(
1846 dataset_path=bids_dataset,
1847 task_label="main",
1848 sub_labels=sub_labels,
1849 slice_time_ref=0.0, # set to 0.0 to avoid warnings
1850 )
1853@pytest.mark.parametrize(
1854 "space_label, error_type",
1855 [(42, TypeError), ("$$$", ValueError)],
1856)
1857def test_first_level_from_bids_validation_space_label(
1858 bids_dataset, space_label, error_type
1859):
1860 """Raise error when space_label is invalid."""
1861 with pytest.raises(error_type, match="All bids labels must be "):
1862 first_level_from_bids(
1863 dataset_path=bids_dataset,
1864 task_label="main",
1865 space_label=space_label,
1866 slice_time_ref=0.0, # set to 0.0 to avoid warnings
1867 )
1870@pytest.mark.parametrize(
1871 "img_filters, error_type,match",
1872 [
1873 ("foo", TypeError, "'img_filters' must be a list"),
1874 ([(1, 2)], TypeError, "Filters in img"),
1875 ([("desc", "*/-")], ValueError, "bids labels must be alphanumeric."),
1876 ([("foo", "bar")], ValueError, "is not a possible filter."),
1877 ],
1878)
1879def test_first_level_from_bids_validation_img_filter(
1880 bids_dataset, img_filters, error_type, match
1881):
1882 """Raise error when img_filters is invalid."""
1883 with pytest.raises(error_type, match=match):
1884 first_level_from_bids(
1885 dataset_path=bids_dataset,
1886 task_label="main",
1887 img_filters=img_filters,
1888 slice_time_ref=0.0, # set to 0.0 to avoid warnings
1889 )
1892def test_first_level_from_bids_too_many_bold_files(bids_dataset):
1893 """Too many bold files if img_filters is underspecified, \
1894 should raise an error.
1896 Here there is a desc-preproc and desc-fmriprep image for the space-T1w.
1897 """
1898 with pytest.raises(ValueError, match="Too many images found"):
1899 first_level_from_bids(
1900 dataset_path=bids_dataset,
1901 task_label="main",
1902 space_label="T1w",
1903 slice_time_ref=0.0, # set to 0.0 to avoid warnings
1904 )
1907def test_first_level_from_bids_with_missing_events(tmp_path_factory):
1908 """All events.tsv files are missing, should raise an error."""
1909 bids_dataset = _new_bids_dataset(tmp_path_factory.mktemp("no_events"))
1910 events_files = get_bids_files(main_path=bids_dataset, file_tag="events")
1911 for f in events_files:
1912 Path(f).unlink()
1914 with pytest.raises(ValueError, match="No events.tsv files found"):
1915 first_level_from_bids(
1916 dataset_path=bids_dataset,
1917 task_label="main",
1918 space_label="MNI",
1919 slice_time_ref=0.0, # set to 0.0 to avoid warnings
1920 )
1923def test_first_level_from_bids_no_tr(tmp_path_factory):
1924 """Throw warning when t_r information cannot be inferred from the data \
1925 and t_r=None is passed.
1926 """
1927 bids_dataset = _new_bids_dataset(tmp_path_factory.mktemp("no_events"))
1928 json_files = get_bids_files(
1929 main_path=bids_dataset, file_tag="bold", file_type="json"
1930 )
1931 for f in json_files:
1932 Path(f).unlink()
1934 with pytest.warns(
1935 UserWarning, match="'t_r' not provided and cannot be inferred"
1936 ):
1937 first_level_from_bids(
1938 dataset_path=bids_dataset,
1939 task_label="main",
1940 space_label="MNI",
1941 slice_time_ref=0.0, # set to 0.0 to avoid warnings
1942 t_r=None,
1943 )
1946def test_first_level_from_bids_no_bold_file(tmp_path_factory):
1947 """Raise error when no bold file in BIDS dataset."""
1948 bids_dataset = _new_bids_dataset(tmp_path_factory.mktemp("no_bold"))
1949 imgs = get_bids_files(
1950 main_path=bids_dataset / "derivatives",
1951 file_tag="bold",
1952 file_type="*gz",
1953 )
1954 for img_ in imgs:
1955 Path(img_).unlink()
1957 with pytest.raises(ValueError, match="No BOLD files found "):
1958 first_level_from_bids(
1959 dataset_path=bids_dataset,
1960 task_label="main",
1961 space_label="MNI",
1962 slice_time_ref=0.0, # set to 0.0 to avoid warnings
1963 )
1966def test_first_level_from_bids_with_one_events_missing(tmp_path_factory):
1967 """Only one events.tsv file is missing, should raise an error."""
1968 bids_dataset = _new_bids_dataset(
1969 tmp_path_factory.mktemp("one_event_missing")
1970 )
1971 events_files = get_bids_files(main_path=bids_dataset, file_tag="events")
1972 Path(events_files[0]).unlink()
1974 with pytest.raises(ValueError, match="Same number of event files "):
1975 first_level_from_bids(
1976 dataset_path=bids_dataset,
1977 task_label="main",
1978 space_label="MNI",
1979 slice_time_ref=0.0, # set to 0.0 to avoid warnings
1980 )
1983def test_first_level_from_bids_one_confound_missing(tmp_path_factory):
1984 """There must be only one confound file per image or none.
1986 If only one is missing, it should raise an error.
1987 """
1988 bids_dataset = _new_bids_dataset(
1989 tmp_path_factory.mktemp("one_confound_missing")
1990 )
1991 confound_files = get_bids_files(
1992 main_path=bids_dataset / "derivatives",
1993 file_tag="desc-confounds_timeseries",
1994 )
1995 Path(confound_files[-1]).unlink()
1997 with pytest.raises(ValueError, match="Same number of confound"):
1998 first_level_from_bids(
1999 dataset_path=bids_dataset,
2000 task_label="main",
2001 space_label="MNI",
2002 slice_time_ref=0.0, # set to 0.0 to avoid warnings
2003 )
2006def test_first_level_from_bids_all_confounds_missing(tmp_path_factory):
2007 """If all confound files are missing, \
2008 confounds should be an array of None.
2009 """
2010 bids_dataset = _new_bids_dataset(tmp_path_factory.mktemp("no_confounds"))
2011 confound_files = get_bids_files(
2012 main_path=bids_dataset / "derivatives",
2013 file_tag="desc-confounds_timeseries",
2014 )
2015 for f in confound_files:
2016 Path(f).unlink()
2018 models, imgs, events, confounds = first_level_from_bids(
2019 dataset_path=bids_dataset,
2020 task_label="main",
2021 space_label="MNI",
2022 img_filters=[("desc", "preproc")],
2023 verbose=0,
2024 slice_time_ref=0.0, # set to 0.0 to avoid warnings
2025 )
2027 assert len(models) == len(imgs)
2028 assert len(models) == len(events)
2029 assert len(models) == len(confounds)
2030 for condounds_ in confounds:
2031 assert condounds_ is None
2034def test_first_level_from_bids_no_derivatives(tmp_path):
2035 """Raise error if the derivative folder does not exist."""
2036 bids_path = create_fake_bids_dataset(
2037 base_dir=tmp_path,
2038 n_sub=1,
2039 n_ses=1,
2040 tasks=["main"],
2041 n_runs=[1],
2042 with_derivatives=False,
2043 )
2044 with pytest.raises(ValueError, match="derivatives folder not found"):
2045 first_level_from_bids(
2046 dataset_path=bids_path,
2047 task_label="main",
2048 space_label="MNI",
2049 slice_time_ref=0.0, # set to 0.0 to avoid warnings
2050 )
2053def test_first_level_from_bids_no_session(tmp_path):
2054 """Check runs are not repeated when ses field is not used."""
2055 bids_path = create_fake_bids_dataset(
2056 base_dir=tmp_path, n_sub=3, n_ses=0, tasks=["main"], n_runs=[2]
2057 )
2058 # repeated run entity error
2059 # when run entity is in filenames and not ses
2060 # can arise when desc or space is present and not specified
2061 with pytest.raises(ValueError, match="Too many images found"):
2062 first_level_from_bids(
2063 dataset_path=bids_path,
2064 task_label="main",
2065 space_label="T1w",
2066 slice_time_ref=0.0, # set to 0.0 to avoid warnings
2067 )
2070def test_first_level_from_bids_mismatch_run_index(tmp_path_factory):
2071 """Test error when run index is zero padded in raw but not in derivatives.
2073 Regression test for https://github.com/nilearn/nilearn/issues/3029
2075 """
2076 bids_dataset = _new_bids_dataset(tmp_path_factory.mktemp("renamed_runs"))
2077 files_to_rename = (bids_dataset / "derivatives").glob(
2078 "**/func/*_task-main_*desc-*"
2079 )
2080 for file_ in files_to_rename:
2081 new_file = file_.parent / file_.name.replace("run-0", "run-")
2082 file_.rename(new_file)
2084 with pytest.raises(ValueError, match=".*events.tsv files.*"):
2085 first_level_from_bids(
2086 dataset_path=bids_dataset,
2087 task_label="main",
2088 space_label="MNI",
2089 img_filters=[("desc", "preproc")],
2090 slice_time_ref=0.0, # set to 0.0 to avoid warnings
2091 )
2094def test_slice_time_ref_warning_only_when_not_provided(bids_dataset):
2095 """Catch warning when slice_time_ref is not provided."""
2096 with pytest.warns() as record:
2097 first_level_from_bids(
2098 dataset_path=bids_dataset,
2099 task_label="main",
2100 space_label="MNI",
2101 img_filters=[("desc", "preproc")],
2102 slice_time_ref=0.6,
2103 verbose=0,
2104 )
2106 # check that no warnings were raised
2107 for r in record:
2108 assert "'slice_time_ref' not provided" not in r.message.args[0]
2111def test_check_trial_type_warning(tmp_path):
2112 """Check that warning is thrown when an events file has no trial_type."""
2113 events = pd.DataFrame({"onset": [0, 1, 2], "duration": [1, 1, 1]})
2114 event_file = tmp_path / "events.tsv"
2115 events.to_csv(event_file, sep="\t", index=False)
2116 with pytest.warns(UserWarning, match="No column named 'trial_type' found"):
2117 _check_trial_type([event_file])
2120def test_list_valid_subjects_with_toplevel_files(tmp_path):
2121 """Test that only subject directories are returned, not file names."""
2122 (tmp_path / "sub-01").mkdir()
2123 (tmp_path / "sub-02").mkdir()
2124 (tmp_path / "sub-01.html").touch()
2126 valid_subjects = _list_valid_subjects(tmp_path, None)
2127 assert valid_subjects == ["01", "02"]
2130def test_missing_trial_type_column_warning(tmp_path_factory):
2131 """Check that warning is thrown when an events file has no trial_type.
2133 Ensure that the warning is thrown when running first_level_from_bids.
2134 """
2135 bids_dataset = _new_bids_dataset(
2136 tmp_path_factory.mktemp("one_event_missing")
2137 )
2138 events_files = get_bids_files(main_path=bids_dataset, file_tag="events")
2139 # remove trial type column from one events.tsv file
2140 events = pd.read_csv(events_files[0], sep="\t")
2141 events = events.drop(columns="trial_type")
2142 events.to_csv(events_files[0], sep="\t", index=False)
2144 with pytest.warns() as record:
2145 first_level_from_bids(
2146 dataset_path=bids_dataset,
2147 task_label="main",
2148 space_label="MNI",
2149 slice_time_ref=None,
2150 )
2151 assert any(
2152 "No column named 'trial_type' found" in r.message.args[0]
2153 for r in record
2154 )
2157def test_first_level_from_bids_load_confounds(tmp_path):
2158 """Test that only a subset of confounds can be loaded."""
2159 n_sub = 2
2161 bids_path = create_fake_bids_dataset(
2162 base_dir=tmp_path, n_sub=n_sub, n_ses=2, tasks=["main"], n_runs=[2]
2163 )
2165 _, _, _, confounds = first_level_from_bids(
2166 dataset_path=bids_path,
2167 task_label="main",
2168 space_label="MNI",
2169 img_filters=[("desc", "preproc")],
2170 )
2172 assert len(confounds[0][0].columns) == 189
2174 models, imgs, events, confounds = first_level_from_bids(
2175 dataset_path=bids_path,
2176 task_label="main",
2177 space_label="MNI",
2178 img_filters=[("desc", "preproc")],
2179 confounds_strategy=("motion", "wm_csf"),
2180 confounds_motion="full",
2181 confounds_wm_csf="basic",
2182 )
2184 _check_output_first_level_from_bids(n_sub, models, imgs, events, confounds)
2186 assert len(confounds[0][0].columns) == 26
2188 assert all(x in confounds[0][0].columns for x in ["csf", "white_matter"])
2189 for dir, motion, der, power in product(
2190 ["x", "y", "z"],
2191 ["rot", "trans"],
2192 ["", "_derivative1"],
2193 ["", "_power2"],
2194 ):
2195 assert f"{motion}_{dir}{der}{power}" in confounds[0][0].columns
2198def test_first_level_from_bids_load_confounds_warnings(tmp_path):
2199 """Throw warning when incompatible confound loading strategy are used."""
2200 n_sub = 2
2202 bids_path = create_fake_bids_dataset(
2203 base_dir=tmp_path, n_sub=n_sub, n_ses=2, tasks=["main"], n_runs=[2]
2204 )
2206 # high pass is loaded from the confounds: no warning
2207 first_level_from_bids(
2208 dataset_path=bids_path,
2209 task_label="main",
2210 space_label="MNI",
2211 img_filters=[("desc", "preproc")],
2212 drift_model=None,
2213 confounds_strategy=("high_pass",),
2214 )
2216 with pytest.warns(
2217 UserWarning, match=("duplicate .*the cosine one used in the model.")
2218 ):
2219 # cosine loaded from confounds may duplicate
2220 # the one created during model specification
2221 first_level_from_bids(
2222 dataset_path=bids_path,
2223 task_label="main",
2224 space_label="MNI",
2225 img_filters=[("desc", "preproc")],
2226 drift_model="cosine",
2227 confounds_strategy=("high_pass",),
2228 )
2230 with pytest.warns(
2231 UserWarning, match=("conflict .*the polynomial one used in the model.")
2232 ):
2233 # cosine loaded from confounds may conflict
2234 # the one created during model specification
2235 first_level_from_bids(
2236 dataset_path=bids_path,
2237 task_label="main",
2238 space_label="MNI",
2239 img_filters=[("desc", "preproc")],
2240 drift_model="polynomial",
2241 confounds_strategy=("high_pass",),
2242 )
2245def test_first_level_from_bids_no_subject(tmp_path):
2246 """Throw error when no subject found."""
2247 bids_path = create_fake_bids_dataset(
2248 base_dir=tmp_path, n_sub=1, n_ses=0, tasks=["main"], n_runs=[2]
2249 )
2250 shutil.rmtree(bids_path / "derivatives" / "sub-01")
2251 with pytest.raises(RuntimeError, match="No subject found in:"):
2252 first_level_from_bids(
2253 dataset_path=bids_path,
2254 task_label="main",
2255 space_label="MNI",
2256 slice_time_ref=0.0, # set to 0.0 to avoid warnings
2257 )
2260def test_first_level_from_bids_unused_kwargs(tmp_path):
2261 """Check that unused kwargs are properly handled."""
2262 bids_path = create_fake_bids_dataset(
2263 base_dir=tmp_path, n_sub=1, n_ses=1, tasks=["main"], n_runs=[2]
2264 )
2265 with pytest.raises(RuntimeError, match="Unknown keyword arguments"):
2266 # wrong kwarg name `confound_strategy` (wrong)
2267 # instead of `confounds_strategy` (correct)
2268 first_level_from_bids(
2269 dataset_path=bids_path,
2270 task_label="main",
2271 space_label="MNI",
2272 slice_time_ref=0.0, # set to 0.0 to avoid warnings
2273 confound_strategy="motion",
2274 )
2277def test_check_run_tables_errors():
2278 """Check high level wrapper keeps behavior."""
2279 with pytest.raises(ValueError, match="len.* does not match len.*"):
2280 _check_run_tables([""] * 2, [""], "")
2281 with pytest.raises(
2282 ValueError, match="Tables to load can only be TSV or CSV."
2283 ):
2284 _check_run_tables([""] * 2, [".csv", ".csv"], "")
2285 with pytest.raises(
2286 TypeError,
2287 match="can only be a pandas DataFrame, a Path object or a string",
2288 ):
2289 _check_run_tables([""] * 2, [[0], pd.DataFrame()], "")
2290 with pytest.raises(
2291 ValueError, match="Tables to load can only be TSV or CSV."
2292 ):
2293 _check_run_tables([""] * 2, [".csv", pd.DataFrame()], "")
2296def test_img_table_checks():
2297 """Check matching lengths."""
2298 with pytest.raises(ValueError, match="len.* does not match len.*"):
2299 _check_length_match([""] * 2, [""], "", "")
2302# -----------------------surface tests--------------------------------------- #
2305def test_flm_fit_surface_image_default_mask_img(surface_glm_data):
2306 """Test FirstLevelModel with mask_img default."""
2307 img, des = surface_glm_data(5)
2308 model = FirstLevelModel()
2309 model.fit(img, design_matrices=des)
2311 assert isinstance(model.masker_.mask_img_, SurfaceImage)
2312 assert model.masker_.mask_img_.shape == (9,)
2313 assert isinstance(model.masker_, SurfaceMasker)
2314 sum_mask = (
2315 model.masker_.mask_img_.data.parts["left"].sum()
2316 + model.masker_.mask_img_.data.parts["right"].sum()
2317 )
2318 assert sum_mask == 9
2321def test_flm_fit_surface_image(surface_glm_data):
2322 """Test FirstLevelModel with surface image and mask_img set to False."""
2323 img, des = surface_glm_data(5)
2324 model = FirstLevelModel(mask_img=False)
2325 model.fit(img, design_matrices=des)
2327 assert isinstance(model.masker_.mask_img_, SurfaceImage)
2328 assert model.masker_.mask_img_.shape == (9,)
2329 assert isinstance(model.masker_, SurfaceMasker)
2332def test_warn_flm_smooth_surface_image(surface_glm_data):
2333 """Test warning raised in FirstLevelModel with surface smoothing."""
2334 mini_img, des = surface_glm_data(5)
2335 model = FirstLevelModel(mask_img=False, smoothing_fwhm=5)
2336 with pytest.warns(
2337 UserWarning,
2338 match="Parameter smoothing_fwhm is not yet supported for surface data",
2339 ):
2340 model.fit(mini_img, design_matrices=des)
2343def test_flm_fit_surface_image_one_hemisphere(
2344 surface_glm_data, drop_surf_img_part
2345):
2346 """Test FirstLevelModel with surface image with one hemisphere."""
2347 img, des = surface_glm_data(5)
2348 mini_img_one_hemi = drop_surf_img_part(img)
2349 model = FirstLevelModel(mask_img=False)
2350 model.fit(mini_img_one_hemi, design_matrices=des)
2352 assert isinstance(model.masker_.mask_img_, SurfaceImage)
2353 assert model.masker_.mask_img_.shape == (4,)
2354 assert isinstance(model.masker_, SurfaceMasker)
2357@pytest.mark.parametrize("surf_mask_dim", [1, 2])
2358def test_flm_fit_surface_image_with_mask(
2359 surface_glm_data, surf_mask_dim, surf_mask_1d, surf_mask_2d
2360):
2361 """Test FirstLevelModel with surface mask."""
2362 surf_mask = surf_mask_1d if surf_mask_dim == 1 else surf_mask_2d()
2363 img, des = surface_glm_data(5)
2364 model = FirstLevelModel(mask_img=surf_mask)
2365 model.fit(img, design_matrices=des)
2367 assert isinstance(model.masker_.mask_img_, SurfaceImage)
2368 assert model.masker_.mask_img_.shape == (9,)
2369 assert isinstance(model.masker_, SurfaceMasker)
2372def test_error_flm_surface_mask_volume_image(
2373 surface_glm_data, surf_mask_1d, img_4d_rand_eye
2374):
2375 """Test error is raised when mask is a surface and data is in volume."""
2376 img, des = surface_glm_data(5)
2377 model = FirstLevelModel(mask_img=surf_mask_1d)
2378 with pytest.raises(
2379 TypeError, match="Mask and images to fit must be of compatible types."
2380 ):
2381 model.fit(img_4d_rand_eye, design_matrices=des)
2383 masker = SurfaceMasker().fit(img)
2384 model = FirstLevelModel(mask_img=masker)
2385 with pytest.raises(
2386 TypeError, match="Mask and images to fit must be of compatible types."
2387 ):
2388 model.fit(img_4d_rand_eye, design_matrices=des)
2391def test_error_flm_volume_mask_surface_image(surface_glm_data):
2392 """Test error is raised when mask is a volume and data is in surface."""
2393 shapes, rk = [(7, 8, 9, 15)], 3
2394 mask, _, _ = generate_fake_fmri_data_and_design(shapes, rk)
2396 img, des = surface_glm_data(5)
2397 model = FirstLevelModel(mask_img=mask)
2398 with pytest.raises(
2399 TypeError, match="Mask and images to fit must be of compatible types."
2400 ):
2401 model.fit(img, design_matrices=des)
2403 masker = NiftiMasker().fit(mask)
2404 model = FirstLevelModel(mask_img=masker)
2405 with pytest.raises(
2406 TypeError, match="Mask and images to fit must be of compatible types."
2407 ):
2408 model.fit(img, design_matrices=des)
2411def test_flm_with_surface_image_with_surface_masker(surface_glm_data):
2412 """Test FirstLevelModel with SurfaceMasker."""
2413 img, des = surface_glm_data(5)
2414 masker = SurfaceMasker().fit(img)
2415 model = FirstLevelModel(mask_img=masker)
2416 model.fit(img, design_matrices=des)
2418 assert isinstance(model.masker_.mask_img_, SurfaceImage)
2419 assert model.masker_.mask_img_.shape == (9,)
2420 assert isinstance(model.masker_, SurfaceMasker)
2423@pytest.mark.parametrize("surf_mask_dim", [1, 2])
2424def test_flm_with_surface_masker_with_mask(
2425 surface_glm_data, surf_mask_dim, surf_mask_1d, surf_mask_2d
2426):
2427 """Test FirstLevelModel with SurfaceMasker and mask image."""
2428 surf_mask = surf_mask_1d if surf_mask_dim == 1 else surf_mask_2d()
2429 img, des = surface_glm_data(5)
2430 masker = SurfaceMasker(mask_img=surf_mask).fit(img)
2431 model = FirstLevelModel(mask_img=masker)
2432 model.fit(img, design_matrices=des)
2434 assert isinstance(model.masker_.mask_img_, SurfaceImage)
2435 assert model.masker_.mask_img_.shape == (9,)
2436 assert isinstance(model.masker_, SurfaceMasker)
2439def test_flm_with_surface_data_no_design_matrix(surface_glm_data):
2440 """Smoke test FirstLevelModel with surface data and no design matrix."""
2441 img, _ = surface_glm_data(5)
2442 masker = SurfaceMasker().fit(img)
2443 model = FirstLevelModel(mask_img=masker, t_r=2.0)
2444 model.fit(img, events=basic_paradigm())
2447def test_flm_compute_contrast_with_surface_data(surface_glm_data):
2448 """Smoke test FirstLevelModel compute_contrast with surface data."""
2449 img, _ = surface_glm_data(5)
2450 masker = SurfaceMasker().fit(img)
2451 model = FirstLevelModel(mask_img=masker, t_r=2.0)
2452 events = basic_paradigm()
2453 model.fit([img, img], events=[events, events])
2454 result = model.compute_contrast("c0")
2456 assert isinstance(result, SurfaceImage)
2457 assert_polymesh_equal(img.mesh, result.mesh)
2460def test_flm_get_element_wise_model_attribute_with_surface_data(
2461 surface_glm_data,
2462):
2463 """Smoke test 'voxel wise' attribute with surface data."""
2464 img, _ = surface_glm_data(5)
2465 masker = SurfaceMasker().fit(img)
2466 model = FirstLevelModel(mask_img=masker, t_r=2.0, minimize_memory=False)
2467 events = basic_paradigm()
2468 model.fit([img, img], events=[events, events])
2470 assert len(model.residuals) == 2
2471 assert model.residuals[0].shape == img.shape
2472 assert len(model.predicted) == 2
2473 assert model.predicted[0].shape == img.shape
2474 assert len(model.r_square) == 2
2475 assert model.r_square[0].shape == (img.mesh.n_vertices, 1)
2478# -----------------------bids tests----------------------- #
2481def test_first_level_from_bids_subject_order(tmp_path):
2482 """Make sure subjects are returned in order.
2484 See https://github.com/nilearn/nilearn/issues/4581
2485 """
2486 n_sub = 10
2487 bids_path = create_fake_bids_dataset(
2488 base_dir=tmp_path, n_sub=n_sub, n_ses=1, tasks=["main"], n_runs=[1]
2489 )
2491 models, *_ = first_level_from_bids(
2492 dataset_path=str(tmp_path / bids_path),
2493 task_label="main",
2494 space_label="MNI",
2495 img_filters=[("desc", "preproc")],
2496 slice_time_ref=None,
2497 )
2499 # Check if the subjects are returned in order
2500 expected_subjects = [f"{label:02}" for label in range(1, n_sub + 1)]
2501 returned_subjects = [model.subject_label for model in models]
2502 assert returned_subjects == expected_subjects
2505def test_first_level_from_bids_subject_order_with_labels(tmp_path):
2506 """Make sure subjects are returned in order.
2508 See https://github.com/nilearn/nilearn/issues/4581
2509 """
2510 n_sub = 10
2511 bids_path = create_fake_bids_dataset(
2512 base_dir=tmp_path, n_sub=n_sub, n_ses=1, tasks=["main"], n_runs=[1]
2513 )
2515 models, *_ = first_level_from_bids(
2516 dataset_path=str(tmp_path / bids_path),
2517 sub_labels=["01", "10", "04", "05", "02", "03"],
2518 task_label="main",
2519 space_label="MNI",
2520 img_filters=[("desc", "preproc")],
2521 slice_time_ref=None,
2522 )
2524 # Check if the subjects are returned in order
2525 expected_subjects = ["01", "02", "03", "04", "05", "10"]
2526 returned_subjects = [model.subject_label for model in models]
2527 assert returned_subjects == expected_subjects
2530def test_fixed_effect_contrast_surface(surface_glm_data):
2531 """Smoke test of compute_fixed_effects with surface data."""
2532 mini_img, _ = surface_glm_data(5)
2533 masker = SurfaceMasker().fit(mini_img)
2534 model = FirstLevelModel(mask_img=masker, t_r=2.0)
2535 events = basic_paradigm()
2536 model.fit([mini_img, mini_img], events=[events, events])
2537 result = model.compute_contrast("c0")
2539 assert isinstance(result, SurfaceImage)
2541 result = model.compute_contrast("c0", output_type="all")
2542 effect = result["effect_size"]
2543 variance = result["effect_variance"]
2544 surf_mask_ = masker.mask_img_
2545 for mask in [SurfaceMasker(mask_img=masker.mask_img_), surf_mask_, None]:
2546 outputs = compute_fixed_effects(
2547 [effect, effect],
2548 [variance, variance],
2549 mask=mask,
2550 return_z_score=True,
2551 )
2552 assert len(outputs) == 4
2553 for output in outputs:
2554 assert isinstance(output, SurfaceImage)
2557def test_first_level_from_bids_surface(tmp_path):
2558 """Test finding and loading Surface data in BIDS dataset."""
2559 n_sub = 2
2560 tasks = ["main"]
2561 n_runs = [2]
2563 bids_path = create_fake_bids_dataset(
2564 base_dir=tmp_path,
2565 n_sub=n_sub,
2566 n_ses=0,
2567 tasks=tasks,
2568 n_runs=n_runs,
2569 n_vertices=10242,
2570 )
2572 models, imgs, events, confounds = first_level_from_bids(
2573 dataset_path=bids_path,
2574 task_label="main",
2575 space_label="fsaverage5",
2576 )
2578 _check_output_first_level_from_bids(n_sub, models, imgs, events, confounds)