Coverage for nilearn/glm/tests/test_second_level.py: 0%

694 statements  

« prev     ^ index     » next       coverage.py v7.9.1, created at 2025-06-20 10:58 +0200

1"""Test the second level model.""" 

2 

3from pathlib import Path 

4 

5import numpy as np 

6import pandas as pd 

7import pytest 

8from nibabel import Nifti1Image, load 

9from numpy.testing import ( 

10 assert_almost_equal, 

11 assert_array_almost_equal, 

12 assert_array_equal, 

13) 

14from scipy import stats 

15from sklearn.utils.estimator_checks import parametrize_with_checks 

16 

17from nilearn._utils import testing 

18from nilearn._utils.data_gen import ( 

19 generate_fake_fmri_data_and_design, 

20 write_fake_bold_img, 

21 write_fake_fmri_data_and_design, 

22) 

23from nilearn._utils.estimator_checks import ( 

24 check_estimator, 

25 nilearn_check_estimator, 

26 return_expected_failed_checks, 

27) 

28from nilearn._utils.tags import SKLEARN_LT_1_6 

29from nilearn.conftest import _shape_3d_default 

30from nilearn.glm.first_level import FirstLevelModel, run_glm 

31from nilearn.glm.second_level import SecondLevelModel, non_parametric_inference 

32from nilearn.glm.second_level.second_level import ( 

33 _check_confounds, 

34 _check_first_level_contrast, 

35 _check_input_as_first_level_model, 

36 _check_n_rows_desmat_vs_n_effect_maps, 

37 _check_output_type, 

38 _check_second_level_input, 

39 _infer_effect_maps, 

40 _process_second_level_input_as_dataframe, 

41 _process_second_level_input_as_firstlevelmodels, 

42 _sort_input_dataframe, 

43) 

44from nilearn.image import concat_imgs, get_data, new_img_like, smooth_img 

45from nilearn.maskers import NiftiMasker, SurfaceMasker 

46from nilearn.reporting import get_clusters_table 

47from nilearn.surface.utils import assert_surface_image_equal 

48 

49ESTIMATORS_TO_CHECK = [SecondLevelModel()] 

50 

51if SKLEARN_LT_1_6: 

52 

53 @pytest.mark.parametrize( 

54 "estimator, check, name", 

55 check_estimator(estimators=ESTIMATORS_TO_CHECK), 

56 ) 

57 def test_check_estimator_sklearn_valid(estimator, check, name): # noqa: ARG001 

58 """Check compliance with sklearn estimators.""" 

59 check(estimator) 

60 

61 @pytest.mark.xfail(reason="invalid checks should fail") 

62 @pytest.mark.parametrize( 

63 "estimator, check, name", 

64 check_estimator(estimators=ESTIMATORS_TO_CHECK, valid=False), 

65 ) 

66 def test_check_estimator_sklearn_invalid(estimator, check, name): # noqa: ARG001 

67 """Check compliance with sklearn estimators.""" 

68 check(estimator) 

69 

70else: 

71 

72 @parametrize_with_checks( 

73 estimators=ESTIMATORS_TO_CHECK, 

74 expected_failed_checks=return_expected_failed_checks, 

75 ) 

76 def test_check_estimator_sklearn(estimator, check): 

77 """Check compliance with sklearn estimators.""" 

78 check(estimator) 

79 

80 

81@pytest.mark.parametrize( 

82 "estimator, check, name", 

83 nilearn_check_estimator(estimators=ESTIMATORS_TO_CHECK), 

84) 

85def test_check_estimator_nilearn(estimator, check, name): # noqa: ARG001 

86 """Check compliance with nilearn estimators rules.""" 

87 check(estimator) 

88 

89 

90N_PERM = 5 

91SHAPE = (*_shape_3d_default(), 1) 

92 

93 

94@pytest.fixture 

95def input_df(): 

96 """Input DataFrame for testing.""" 

97 return pd.DataFrame( 

98 { 

99 "effects_map_path": ["foo.nii", "bar.nii", "baz.nii"], 

100 "subject_label": ["foo", "bar", "baz"], 

101 } 

102 ) 

103 

104 

105def fake_fmri_data(shape=SHAPE): 

106 shapes = (shape,) 

107 mask, fmri_data, _ = generate_fake_fmri_data_and_design(shapes) 

108 return fmri_data[0], mask 

109 

110 

111def test_non_parametric_inference_with_flm_objects(shape_3d_default): 

112 """See https://github.com/nilearn/nilearn/issues/3579 .""" 

113 mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design( 

114 shapes=[(*shape_3d_default, 15)] 

115 ) 

116 

117 masker = NiftiMasker(mask) 

118 masker.fit() 

119 single_run_model = FirstLevelModel(mask_img=masker).fit( 

120 fmri_data[0], design_matrices=design_matrices[0] 

121 ) 

122 single_run_model.compute_contrast("x") 

123 

124 second_level_input = [single_run_model, single_run_model] 

125 

126 design_matrix = pd.DataFrame( 

127 [1] * len(second_level_input), columns=["intercept"] 

128 ) 

129 

130 non_parametric_inference( 

131 second_level_input=second_level_input, 

132 design_matrix=design_matrix, 

133 first_level_contrast="x", 

134 n_perm=N_PERM, 

135 ) 

136 

137 

138def test_process_second_level_input_as_dataframe(input_df): 

139 """Unit tests for function _process_second_level_input_as_dataframe().""" 

140 sample_map, subjects_label = _process_second_level_input_as_dataframe( 

141 input_df 

142 ) 

143 assert sample_map == "foo.nii" 

144 assert subjects_label == ["foo", "bar", "baz"] 

145 

146 

147def test_sort_input_dataframe(input_df): 

148 """Unit tests for function _sort_input_dataframe().""" 

149 output_df = _sort_input_dataframe(input_df) 

150 

151 assert output_df["subject_label"].to_list() == [ 

152 "bar", 

153 "baz", 

154 "foo", 

155 ] 

156 assert output_df["effects_map_path"].to_list() == [ 

157 "bar.nii", 

158 "baz.nii", 

159 "foo.nii", 

160 ] 

161 

162 

163def test_second_level_input_as_3d_images( 

164 rng, affine_eye, tmp_path, shape_3d_default 

165): 

166 """Test second level model with a list 3D image filenames as input. 

167 

168 Should act as a regression test for: 

169 https://github.com/nilearn/nilearn/issues/3636 

170 

171 """ 

172 images = [] 

173 n_subjects = 10 

174 for _ in range(n_subjects): 

175 data = rng.random(shape_3d_default) 

176 images.append(Nifti1Image(data, affine_eye)) 

177 

178 filenames = testing.write_imgs_to_path( 

179 *images, file_path=tmp_path, create_files=True 

180 ) 

181 second_level_input = filenames 

182 design_matrix = pd.DataFrame( 

183 [1] * len(second_level_input), columns=["intercept"] 

184 ) 

185 

186 second_level_model = SecondLevelModel(smoothing_fwhm=8.0) 

187 second_level_model = second_level_model.fit( 

188 second_level_input, 

189 design_matrix=design_matrix, 

190 ) 

191 

192 

193def test_process_second_level_input_as_firstlevelmodels(shape_4d_default): 

194 """Unit tests for function \ 

195 _process_second_level_input_as_firstlevelmodels(). 

196 """ 

197 mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design( 

198 shapes=[shape_4d_default] 

199 ) 

200 list_of_flm = [ 

201 FirstLevelModel(mask_img=mask, subject_label=f"sub-{i}").fit( 

202 fmri_data[0], design_matrices=design_matrices[0] 

203 ) 

204 for i in range(3) 

205 ] 

206 ( 

207 sample_map, 

208 subjects_label, 

209 ) = _process_second_level_input_as_firstlevelmodels(list_of_flm) 

210 

211 assert subjects_label == [f"sub-{i}" for i in range(3)] 

212 assert isinstance(sample_map, Nifti1Image) 

213 assert sample_map.shape == shape_4d_default[:3] 

214 

215 

216def test_check_affine_first_level_models(affine_eye, shape_4d_default): 

217 """Check all FirstLevelModel have the same affine.""" 

218 mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design( 

219 shapes=[shape_4d_default] 

220 ) 

221 list_of_flm = [ 

222 FirstLevelModel(mask_img=mask, subject_label=f"sub-{i}").fit( 

223 fmri_data[0], design_matrices=design_matrices[0] 

224 ) 

225 for i in range(3) 

226 ] 

227 # should pass 

228 _check_input_as_first_level_model( 

229 second_level_input=list_of_flm, none_confounds=False 

230 ) 

231 

232 # add a model with a different affine 

233 # should raise an error 

234 mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design( 

235 shapes=[shape_4d_default], affine=affine_eye * 2 

236 ) 

237 list_of_flm.append( 

238 FirstLevelModel(mask_img=mask, subject_label="sub-4").fit( 

239 fmri_data[0], design_matrices=design_matrices[0] 

240 ) 

241 ) 

242 

243 with pytest.raises( 

244 ValueError, match="All first level models must have the same affine" 

245 ): 

246 _check_input_as_first_level_model( 

247 second_level_input=list_of_flm, none_confounds=False 

248 ) 

249 

250 

251def test_check_shape_first_level_models(shape_4d_default): 

252 """Check all FirstLevelModel have the same shape.""" 

253 mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design( 

254 shapes=[shape_4d_default] 

255 ) 

256 list_of_flm = [ 

257 FirstLevelModel(mask_img=mask, subject_label=f"sub-{i}").fit( 

258 fmri_data[0], design_matrices=design_matrices[0] 

259 ) 

260 for i in range(3) 

261 ] 

262 # should pass 

263 _check_input_as_first_level_model( 

264 second_level_input=list_of_flm, none_confounds=False 

265 ) 

266 

267 # add a model with a different shape 

268 # should raise an error 

269 mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design( 

270 shapes=[(8, 9, 10, 15)] 

271 ) 

272 list_of_flm.append( 

273 FirstLevelModel(mask_img=mask, subject_label="sub-4").fit( 

274 fmri_data[0], design_matrices=design_matrices[0] 

275 ) 

276 ) 

277 

278 with pytest.raises( 

279 ValueError, match="All first level models must have the same shape" 

280 ): 

281 _check_input_as_first_level_model( 

282 second_level_input=list_of_flm, none_confounds=False 

283 ) 

284 

285 

286def test_check_second_level_input(shape_4d_default): 

287 """Raise errors when wrong inputs are passed to SecondLevelModel.""" 

288 with pytest.raises(TypeError, match="second_level_input must be"): 

289 _check_second_level_input(1, None) 

290 

291 with pytest.raises( 

292 TypeError, 

293 match="A second level model requires a list with at " 

294 "least two first level models or niimgs", 

295 ): 

296 _check_second_level_input([FirstLevelModel()], pd.DataFrame()) 

297 

298 with pytest.raises( 

299 TypeError, match="Got object type <class 'int'> at idx 1" 

300 ): 

301 _check_second_level_input(["foo", 1], pd.DataFrame()) 

302 

303 mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design( 

304 shapes=[shape_4d_default] 

305 ) 

306 

307 input_models = [ 

308 FirstLevelModel(mask_img=mask).fit( 

309 fmri_data[0], design_matrices=design_matrices[0] 

310 ) 

311 ] 

312 

313 obj = lambda: None # noqa: E731 

314 obj.results_ = "foo" 

315 obj.labels_ = "bar" 

316 

317 with pytest.raises( 

318 TypeError, match="Got object type <class 'function'> at idx 1" 

319 ): 

320 _check_second_level_input([*input_models, obj], pd.DataFrame()) 

321 

322 

323def test_check_second_level_input_list_wrong_type(): 

324 """Raise errors when wrong inputs are passed to SecondLevelModel. 

325 

326 Integration test: slightly higher level test than those for 

327 _check_second_level_input. 

328 """ 

329 model = SecondLevelModel() 

330 second_level_input = [1, 2] 

331 with pytest.raises(TypeError, match="second_level_input must be"): 

332 model.fit(second_level_input) 

333 

334 

335def test_check_second_level_input_unfit_model(): 

336 with pytest.raises( 

337 ValueError, match="Model sub_1 at index 0 has not been fit yet" 

338 ): 

339 _check_second_level_input( 

340 [FirstLevelModel(subject_label=f"sub_{i}") for i in range(1, 3)], 

341 pd.DataFrame(), 

342 ) 

343 

344 

345def test_check_second_level_input_dataframe(): 

346 with pytest.raises( 

347 ValueError, 

348 match="'second_level_input' DataFrame must have columns " 

349 "'subject_label', 'map_name' and 'effects_map_path'", 

350 ): 

351 _check_second_level_input( 

352 pd.DataFrame(columns=["foo", "bar"]), pd.DataFrame() 

353 ) 

354 

355 with pytest.raises( 

356 ValueError, match="'subject_label' column must contain only strings" 

357 ): 

358 _check_second_level_input( 

359 pd.DataFrame( 

360 { 

361 "subject_label": [1, 2], 

362 "map_name": ["a", "b"], 

363 "effects_map_path": ["c", "d"], 

364 } 

365 ), 

366 pd.DataFrame(), 

367 ) 

368 

369 

370def test_check_second_level_input_confounds(shape_4d_default): 

371 mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design( 

372 shapes=[shape_4d_default] 

373 ) 

374 

375 input_models = [ 

376 FirstLevelModel(mask_img=mask).fit( 

377 fmri_data[0], design_matrices=design_matrices[0] 

378 ) 

379 ] 

380 

381 with pytest.raises( 

382 ValueError, 

383 match="In case confounds are provided, first level " 

384 "objects need to provide the attribute 'subject_label'", 

385 ): 

386 _check_second_level_input( 

387 input_models * 2, pd.DataFrame(), confounds=pd.DataFrame() 

388 ) 

389 

390 

391def test_check_second_level_input_design_matrix(shape_4d_default): 

392 """Raise errors when no design matrix is passed to SecondLevelModel. 

393 

394 When passing niimg like objects. 

395 """ 

396 _, fmri_data, _ = generate_fake_fmri_data_and_design( 

397 shapes=[shape_4d_default] 

398 ) 

399 

400 _check_second_level_input(fmri_data[0], pd.DataFrame()) 

401 

402 with pytest.raises( 

403 ValueError, 

404 match="List of niimgs as second_level_input " 

405 "require a design matrix to be provided", 

406 ): 

407 _check_second_level_input(fmri_data * 2, None) 

408 with pytest.raises( 

409 ValueError, 

410 match="List of niimgs as second_level_input " 

411 "require a design matrix to be provided", 

412 ): 

413 _check_second_level_input(fmri_data[0], None) 

414 

415 

416def test_check_output_type(): 

417 _check_output_type(int, [str, int, float]) 

418 with pytest.raises(ValueError, match="output_type must be one of"): 

419 _check_output_type("foo", [str, int, float]) 

420 

421 

422def test_check_confounds(): 

423 _check_confounds(None) # Should not do anything 

424 with pytest.raises( 

425 ValueError, match="confounds must be a pandas DataFrame" 

426 ): 

427 _check_confounds("foo") 

428 with pytest.raises( 

429 ValueError, match="confounds DataFrame must contain column" 

430 ): 

431 _check_confounds(pd.DataFrame()) 

432 with pytest.raises( 

433 ValueError, match="confounds should contain at least 2 columns" 

434 ): 

435 _check_confounds(pd.DataFrame(columns=["subject_label"])) 

436 with pytest.raises( 

437 ValueError, match="subject_label column must contain only strings" 

438 ): 

439 _check_confounds( 

440 pd.DataFrame( 

441 {"subject_label": [None, None, None], "conf": [4, 5, 6]} 

442 ) 

443 ) 

444 

445 

446def test_check_first_level_contrast(): 

447 _check_first_level_contrast(["foo"], None) # Should not do anything 

448 _check_first_level_contrast([FirstLevelModel()], "foo") 

449 with pytest.raises(ValueError, match="If second_level_input was a list"): 

450 _check_first_level_contrast([FirstLevelModel()], None) 

451 

452 

453def test_check_n_rows_desmat_vs_n_effect_maps(): 

454 _check_n_rows_desmat_vs_n_effect_maps( 

455 [1, 2, 3], np.array([[1, 2], [3, 4], [5, 6]]) 

456 ) 

457 with pytest.raises( 

458 ValueError, 

459 match="design_matrix does not match the number of maps considered", 

460 ): 

461 _check_n_rows_desmat_vs_n_effect_maps( 

462 [1, 2], np.array([[1, 2], [3, 4], [5, 6]]) 

463 ) 

464 

465 

466def test_infer_effect_maps(tmp_path, shape_4d_default): 

467 """Check that the right input is inferred. 

468 

469 second_level_input could for example 

470 be a list of images 

471 or a dataframe 'mapping' a string to an image. 

472 """ 

473 rk = 3 

474 shapes = [SHAPE, shape_4d_default] 

475 mask_file, fmri_files, design_files = write_fake_fmri_data_and_design( 

476 shapes, rk=rk, file_path=tmp_path 

477 ) 

478 second_level_input = pd.DataFrame( 

479 {"map_name": ["a", "b"], "effects_map_path": [fmri_files[0], "bar"]} 

480 ) 

481 

482 assert _infer_effect_maps(second_level_input, "a") == [fmri_files[0]] 

483 assert _infer_effect_maps([fmri_files[0]], None) == [fmri_files[0]] 

484 

485 contrast = np.eye(rk)[1] 

486 second_level_input = [FirstLevelModel(mask_img=mask_file)] * 2 

487 for i, model in enumerate(second_level_input): 

488 model.fit(fmri_files[i], design_matrices=design_files[i]) 

489 

490 assert len(_infer_effect_maps(second_level_input, contrast)) == 2 

491 

492 

493def test_infer_effect_maps_error(tmp_path, shape_3d_default): 

494 """Check error raised when inferring 'type' for the images. 

495 

496 For example if the image mapped in a dataframe does not exist. 

497 """ 

498 shapes = [(*shape_3d_default, 5), (*shape_3d_default, 6)] 

499 _, fmri_files, _ = write_fake_fmri_data_and_design( 

500 shapes, file_path=tmp_path 

501 ) 

502 second_level_input = pd.DataFrame( 

503 {"map_name": ["a", "b"], "effects_map_path": [fmri_files[0], "bar"]} 

504 ) 

505 with pytest.raises(ValueError, match="File not found: 'bar'"): 

506 _infer_effect_maps(second_level_input, "b") 

507 

508 

509def test_high_level_glm_with_paths(affine_eye): 

510 func_img, mask = fake_fmri_data() 

511 

512 model = SecondLevelModel(mask_img=mask) 

513 

514 # fit model 

515 Y = [func_img] * 4 

516 X = pd.DataFrame([[1]] * 4, columns=["intercept"]) 

517 model = model.fit(Y, design_matrix=X) 

518 c1 = np.eye(len(model.design_matrix_.columns))[0] 

519 z_image = model.compute_contrast(c1, output_type="z_score") 

520 

521 assert isinstance(z_image, Nifti1Image) 

522 assert_array_equal(z_image.affine, mask.affine) 

523 

524 # try with target_shape 

525 target_shape = (10, 10, 10) 

526 target_affine = affine_eye 

527 target_affine[0, 3] = 1 

528 model = SecondLevelModel( 

529 mask_img=mask, 

530 target_shape=target_shape, 

531 target_affine=target_affine, 

532 ) 

533 z_image = model.fit(Y, design_matrix=X).compute_contrast(c1) 

534 

535 assert_array_equal(z_image.shape, target_shape) 

536 assert_array_equal(z_image.affine, target_affine) 

537 

538 

539def test_slm_4d_image(img_4d_mni): 

540 """Compute contrast with 4D images as input. 

541 

542 See https://github.com/nilearn/nilearn/issues/3058 

543 """ 

544 model = SecondLevelModel() 

545 Y = img_4d_mni 

546 X = pd.DataFrame([[1]] * img_4d_mni.shape[3], columns=["intercept"]) 

547 model = model.fit(Y, design_matrix=X) 

548 c1 = np.eye(len(model.design_matrix_.columns))[0] 

549 model.compute_contrast(c1, output_type="z_score") 

550 

551 

552def test_high_level_glm_with_paths_errors(): 

553 func_img, mask = fake_fmri_data() 

554 

555 # fit model 

556 Y = [func_img] * 4 

557 X = pd.DataFrame([[1]] * 4, columns=["intercept"]) 

558 

559 # Provide a masker as mask_img 

560 masker = NiftiMasker(mask).fit() 

561 with pytest.warns( 

562 UserWarning, 

563 match=( 

564 "Overriding provided-default estimator parameters " 

565 "with provided masker parameters" 

566 ), 

567 ): 

568 SecondLevelModel(mask_img=masker, verbose=1).fit(Y, design_matrix=X) 

569 

570 

571@pytest.mark.timeout(0) 

572def test_high_level_non_parametric_inference_with_paths(tmp_path): 

573 mask_file, fmri_files, _ = write_fake_fmri_data_and_design( 

574 (SHAPE,), file_path=tmp_path 

575 ) 

576 fmri_files = fmri_files[0] 

577 df_input = pd.DataFrame( 

578 { 

579 "subject_label": [f"sub-{i}" for i in range(4)], 

580 "effects_map_path": [fmri_files] * 4, 

581 "map_name": [fmri_files] * 4, 

582 } 

583 ) 

584 func_img = load(fmri_files) 

585 Y = [func_img] * 4 

586 X = pd.DataFrame([[1]] * 4, columns=["intercept"]) 

587 c1 = np.eye(len(X.columns))[0] 

588 neg_log_pvals_imgs = [ 

589 non_parametric_inference( 

590 second_level_input, 

591 design_matrix=X, 

592 second_level_contrast=c1, 

593 first_level_contrast=fmri_files, 

594 mask=mask_file, 

595 n_perm=N_PERM, 

596 verbose=1, 

597 ) 

598 for second_level_input in [Y, df_input] 

599 ] 

600 

601 assert all(isinstance(img, Nifti1Image) for img in neg_log_pvals_imgs) 

602 for img in neg_log_pvals_imgs: 

603 assert_array_equal(img.affine, load(mask_file).affine) 

604 

605 neg_log_pvals_list = [get_data(i) for i in neg_log_pvals_imgs] 

606 for neg_log_pvals in neg_log_pvals_list: 

607 assert np.all(neg_log_pvals <= -np.log10(1.0 / (N_PERM + 1))) 

608 assert np.all(neg_log_pvals >= 0) 

609 

610 

611def test_high_level_non_parametric_inference_with_paths_warning(): 

612 func_img, mask = fake_fmri_data() 

613 Y = [func_img] * 4 

614 X = pd.DataFrame([[1]] * 4, columns=["intercept"]) 

615 c1 = np.eye(len(X.columns))[0] 

616 

617 masker = NiftiMasker(mask, smoothing_fwhm=2.0) 

618 with pytest.warns( 

619 UserWarning, 

620 match="Parameter 'smoothing_fwhm' of the masker overridden", 

621 ): 

622 non_parametric_inference( 

623 Y, 

624 design_matrix=X, 

625 second_level_contrast=c1, 

626 smoothing_fwhm=3.0, 

627 mask=masker, 

628 n_perm=N_PERM, 

629 ) 

630 

631 

632@pytest.fixture 

633def confounds(): 

634 return pd.DataFrame( 

635 [["01", 1], ["02", 2], ["03", 3]], 

636 columns=["subject_label", "conf1"], 

637 ) 

638 

639 

640def test_fmri_inputs(rng, confounds, shape_3d_default, shape_4d_default): 

641 """Test processing of FMRI inputs.""" 

642 # prepare fake data 

643 mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design( 

644 [shape_4d_default], rk=1 

645 ) 

646 

647 # prepare correct input first level models 

648 flm = FirstLevelModel(subject_label="01").fit( 

649 fmri_data, design_matrices=design_matrices 

650 ) 

651 

652 # prepare correct input dataframe and lists 

653 p, q = 80, 10 

654 X = rng.standard_normal(size=(p, q)) 

655 sdes = pd.DataFrame(X[:3, :3], columns=["intercept", "b", "c"]) 

656 

657 # smoke tests with correct input 

658 flms = [flm, flm, flm] 

659 

660 shape_3d = [(*shape_3d_default, 1)] 

661 _, fmri_data, _ = generate_fake_fmri_data_and_design(shape_3d) 

662 fmri_data = fmri_data[0] 

663 niimgs = [fmri_data, fmri_data, fmri_data] 

664 niimg_4d = concat_imgs(niimgs) 

665 

666 # First level models as input 

667 SecondLevelModel(mask_img=mask).fit(flms) 

668 SecondLevelModel().fit(flms) 

669 # Note : the following one creates a singular design matrix 

670 SecondLevelModel().fit(flms, confounds) 

671 SecondLevelModel().fit(flms, None, sdes) 

672 

673 # niimgs as input 

674 SecondLevelModel().fit(niimgs, None, sdes) 

675 

676 # 4d niimg as input 

677 SecondLevelModel().fit(niimg_4d, None, sdes) 

678 

679 

680def test_fmri_inputs_dataframes_as_input(tmp_path, rng, confounds): 

681 # Test processing of FMRI inputs 

682 # prepare fake data 

683 p, q = 80, 10 

684 X = rng.standard_normal(size=(p, q)) 

685 

686 # prepare correct input dataframe and lists 

687 _, fmri_files, _ = write_fake_fmri_data_and_design( 

688 (SHAPE,), file_path=tmp_path 

689 ) 

690 fmri_files = fmri_files[0] 

691 

692 sdes = pd.DataFrame(X[:3, :3], columns=["intercept", "b", "c"]) 

693 

694 # dataframes as input 

695 dfcols = ["subject_label", "map_name", "effects_map_path"] 

696 dfrows = [ 

697 ["01", "a", fmri_files], 

698 ["02", "a", fmri_files], 

699 ["03", "a", fmri_files], 

700 ] 

701 niidf = pd.DataFrame(dfrows, columns=dfcols) 

702 

703 SecondLevelModel().fit(niidf) 

704 SecondLevelModel().fit(niidf, confounds) 

705 SecondLevelModel().fit(niidf, confounds, sdes) 

706 SecondLevelModel().fit(niidf, None, sdes) 

707 

708 

709def test_fmri_pandas_series_as_input(tmp_path, rng): 

710 """Use pandas series of file paths as inputs.""" 

711 # prepare correct input dataframe and lists 

712 p, q = 80, 10 

713 X = rng.standard_normal(size=(p, q)) 

714 _, fmri_files, _ = write_fake_fmri_data_and_design( 

715 (SHAPE,), file_path=tmp_path 

716 ) 

717 fmri_files = fmri_files[0] 

718 

719 # dataframes as input 

720 sdes = pd.DataFrame(X[:3, :3], columns=["intercept", "b", "c"]) 

721 niidf = pd.DataFrame({"filepaths": [fmri_files, fmri_files, fmri_files]}) 

722 SecondLevelModel().fit( 

723 second_level_input=niidf["filepaths"], 

724 confounds=None, 

725 design_matrix=sdes, 

726 ) 

727 

728 

729def test_fmri_inputs_pandas_errors(): 

730 """Test wrong second level inputs.""" 

731 # test wrong input for list and pandas requirements 

732 nii_img = ["01", "02", "03"] 

733 with pytest.raises(ValueError, match="File not found: "): 

734 SecondLevelModel().fit(nii_img) 

735 

736 nii_series = pd.Series(nii_img) 

737 with pytest.raises(ValueError, match="File not found: "): 

738 SecondLevelModel().fit(nii_series) 

739 

740 # test dataframe requirements 

741 dfcols = [ 

742 "not_the_right_column_name", 

743 ] 

744 dfrows = [["01"], ["02"], ["03"]] 

745 niidf = pd.DataFrame(dfrows, columns=dfcols) 

746 with pytest.raises( 

747 ValueError, 

748 match=( 

749 "'second_level_input' DataFrame must have " 

750 "columns 'subject_label', 'map_name' and 'effects_map_path'." 

751 ), 

752 ): 

753 SecondLevelModel().fit(niidf) 

754 

755 

756def test_secondlevelmodel_fit_inputs_errors(confounds, shape_4d_default): 

757 """Raise the proper errors when invalid inputs are passed to fit.""" 

758 # prepare fake data 

759 shapes = (shape_4d_default,) 

760 _, fmri_data, _ = generate_fake_fmri_data_and_design(shapes) 

761 fmri_data = fmri_data[0] 

762 n_samples = fmri_data.shape[-1] 

763 design_matrices = pd.DataFrame(np.ones((n_samples, 1)), columns=["a"]) 

764 

765 # prepare correct input first level models 

766 flm = FirstLevelModel(subject_label="01").fit( 

767 fmri_data, design_matrices=design_matrices 

768 ) 

769 

770 # test first level model requirements 

771 with pytest.raises(TypeError, match="second_level_input must be"): 

772 SecondLevelModel().fit(second_level_input=flm) 

773 with pytest.raises(TypeError, match="at least two"): 

774 SecondLevelModel().fit(second_level_input=[flm]) 

775 

776 # test first_level_conditions, confounds, and design 

777 flms = [flm, flm, flm] 

778 with pytest.raises( 

779 ValueError, match="confounds must be a pandas DataFrame" 

780 ): 

781 SecondLevelModel().fit(second_level_input=flms, confounds=["", []]) 

782 with pytest.raises( 

783 ValueError, match="confounds must be a pandas DataFrame" 

784 ): 

785 SecondLevelModel().fit(second_level_input=flms, confounds=[]) 

786 with pytest.raises( 

787 ValueError, match="confounds must be a pandas DataFrame" 

788 ): 

789 SecondLevelModel().fit( 

790 second_level_input=flms, confounds=confounds["conf1"] 

791 ) 

792 

793 

794def test_secondlevelmodel_design_matrix_path(img_3d_mni, tmp_path): 

795 second_level_input = [img_3d_mni, img_3d_mni, img_3d_mni] 

796 des = pd.DataFrame(np.ones((len(second_level_input), 1)), columns=["a"]) 

797 

798 SecondLevelModel().fit( 

799 second_level_input=second_level_input, design_matrix=des 

800 ) 

801 

802 des_fname = tmp_path / "design.csv" 

803 des.to_csv(des_fname) 

804 

805 SecondLevelModel().fit( 

806 second_level_input=second_level_input, design_matrix=des_fname 

807 ) 

808 SecondLevelModel().fit( 

809 second_level_input=second_level_input, design_matrix=str(des_fname) 

810 ) 

811 

812 des_fname = tmp_path / "design.tsv" 

813 des.to_csv(des_fname, sep="\t") 

814 

815 SecondLevelModel().fit( 

816 second_level_input=second_level_input, design_matrix=des_fname 

817 ) 

818 SecondLevelModel().fit( 

819 second_level_input=second_level_input, design_matrix=str(des_fname) 

820 ) 

821 

822 

823@pytest.mark.parametrize("design_matrix", ["foo", Path("foo")]) 

824def test_secondlevelmodel_design_matrix_error_path(img_3d_mni, design_matrix): 

825 second_level_input = [img_3d_mni, img_3d_mni, img_3d_mni] 

826 with pytest.raises( 

827 ValueError, match="Tables to load can only be TSV or CSV." 

828 ): 

829 SecondLevelModel().fit( 

830 second_level_input=second_level_input, design_matrix=design_matrix 

831 ) 

832 

833 

834@pytest.mark.parametrize("design_matrix", [1, ["foo"]]) 

835def test_secondlevelmodel_design_matrix_error_type(img_3d_mni, design_matrix): 

836 second_level_input = [img_3d_mni, img_3d_mni, img_3d_mni] 

837 

838 with pytest.raises(TypeError, match="'design_matrix' must be "): 

839 SecondLevelModel().fit( 

840 second_level_input=second_level_input, design_matrix=design_matrix 

841 ) 

842 

843 

844def test_fmri_img_inputs_errors(confounds): 

845 # prepare correct input 

846 _, fmri_data, _ = generate_fake_fmri_data_and_design((SHAPE,)) 

847 fmri_data = fmri_data[0] 

848 

849 # test niimgs requirements 

850 niimgs = [fmri_data, fmri_data, fmri_data] 

851 with pytest.raises(ValueError, match="require a design matrix"): 

852 SecondLevelModel().fit(niimgs) 

853 with pytest.raises( 

854 TypeError, 

855 match="Elements of second_level_input must be of the same type.", 

856 ): 

857 SecondLevelModel().fit([*niimgs, []], confounds) 

858 

859 

860def test_fmri_inputs_for_non_parametric_inference_errors( 

861 rng, confounds, shape_3d_default, shape_4d_default 

862): 

863 # Test processing of FMRI inputs 

864 # prepare fake data 

865 _, fmri_data, design_matrices = generate_fake_fmri_data_and_design( 

866 [shape_4d_default], rk=1 

867 ) 

868 

869 # prepare correct input first level models 

870 flm = FirstLevelModel(subject_label="01").fit( 

871 fmri_data, design_matrices=design_matrices 

872 ) 

873 

874 # prepare correct input dataframe and lists 

875 p, q = 80, 10 

876 X = rng.standard_normal(size=(p, q)) 

877 sdes = pd.DataFrame(X[:3, :3], columns=["intercept", "b", "c"]) 

878 

879 shape_3d = [(*shape_3d_default, 1)] 

880 _, fmri_data, _ = generate_fake_fmri_data_and_design(shape_3d) 

881 fmri_data = fmri_data[0] 

882 niimgs = [fmri_data, fmri_data, fmri_data] 

883 niimg_4d = concat_imgs(niimgs) 

884 

885 # test missing second-level contrast 

886 match = "No second-level contrast is specified." 

887 # niimgs as input 

888 with pytest.raises(ValueError, match=match): 

889 non_parametric_inference(niimgs, None, sdes) 

890 with pytest.raises(ValueError, match=match): 

891 non_parametric_inference(niimgs, confounds, sdes) 

892 # 4d niimg as input 

893 with pytest.raises(ValueError, match=match): 

894 non_parametric_inference(niimg_4d, None, sdes) 

895 

896 # test wrong input errors 

897 # test first level model 

898 with pytest.raises(TypeError, match="second_level_input must be"): 

899 non_parametric_inference(flm) 

900 

901 # test list of less than two niimgs 

902 with pytest.raises(TypeError, match="at least two"): 

903 non_parametric_inference([fmri_data]) 

904 

905 # test niimgs requirements 

906 with pytest.raises(ValueError, match="require a design matrix"): 

907 non_parametric_inference(niimgs) 

908 with pytest.raises(TypeError): 

909 non_parametric_inference([*niimgs, []], confounds) 

910 

911 # test other objects 

912 with pytest.raises(ValueError, match="File not found: .*"): 

913 non_parametric_inference("random string object") 

914 

915 

916def test_second_level_glm_computation(): 

917 func_img, mask = fake_fmri_data() 

918 

919 model = SecondLevelModel(mask_img=mask) 

920 Y = [func_img] * 4 

921 X = pd.DataFrame([[1]] * 4, columns=["intercept"]) 

922 

923 model = model.fit(Y, design_matrix=X) 

924 model.compute_contrast() 

925 labels1 = model.labels_ 

926 results1 = model.results_ 

927 

928 labels2, results2 = run_glm(model.masker_.transform(Y), X.values, "ols") 

929 assert_almost_equal(labels1, labels2, decimal=1) 

930 

931 assert len(results1) == len(results2) 

932 

933 

934@pytest.mark.parametrize("attribute", ["residuals", "predicted", "r_square"]) 

935def test_second_level_voxelwise_attribute_errors(attribute): 

936 """Tests that an error is raised when trying to access \ 

937 voxelwise attributes before fitting the model, \ 

938 before computing a contrast, \ 

939 and when not setting ``minimize_memory`` to ``True``. 

940 """ 

941 mask, fmri_data, _ = generate_fake_fmri_data_and_design((SHAPE,)) 

942 model = SecondLevelModel(mask_img=mask, minimize_memory=False) 

943 

944 Y = fmri_data * 4 

945 X = pd.DataFrame([[1]] * 4, columns=["intercept"]) 

946 model.fit(Y, design_matrix=X) 

947 

948 with pytest.raises(ValueError, match="The model has no results."): 

949 getattr(model, attribute) 

950 with pytest.raises(ValueError, match="attribute must be one of"): 

951 model._get_element_wise_model_attribute("foo", True) 

952 

953 model = SecondLevelModel(mask_img=mask, minimize_memory=True) 

954 model.fit(Y, design_matrix=X) 

955 model.compute_contrast() 

956 

957 with pytest.raises(ValueError, match="To access voxelwise attributes"): 

958 getattr(model, attribute) 

959 

960 

961@pytest.mark.parametrize("attribute", ["residuals", "predicted", "r_square"]) 

962def test_second_level_voxelwise_attribute(attribute): 

963 """Smoke test for voxelwise attributes for SecondLevelModel.""" 

964 mask, fmri_data, _ = generate_fake_fmri_data_and_design((SHAPE,)) 

965 model = SecondLevelModel(mask_img=mask, minimize_memory=False) 

966 Y = fmri_data * 4 

967 X = pd.DataFrame([[1]] * 4, columns=["intercept"]) 

968 model.fit(Y, design_matrix=X) 

969 model.compute_contrast() 

970 getattr(model, attribute) 

971 

972 

973def test_second_level_residuals(): 

974 """Tests residuals computation for SecondLevelModel.""" 

975 mask, fmri_data, _ = generate_fake_fmri_data_and_design((SHAPE,)) 

976 model = SecondLevelModel(mask_img=mask, minimize_memory=False) 

977 n_subject = 4 

978 Y = fmri_data * n_subject 

979 X = pd.DataFrame([[1]] * n_subject, columns=["intercept"]) 

980 model.fit(Y, design_matrix=X) 

981 model.compute_contrast() 

982 

983 assert isinstance(model.residuals, Nifti1Image) 

984 assert model.residuals.shape == (*SHAPE[:3], n_subject) 

985 mean_residuals = model.masker_.transform(model.residuals).mean(0) 

986 assert_array_almost_equal(mean_residuals, 0) 

987 

988 

989def test_non_parametric_inference_permutation_computation(): 

990 func_img, mask = fake_fmri_data() 

991 

992 Y = [func_img] * 4 

993 X = pd.DataFrame([[1]] * 4, columns=["intercept"]) 

994 

995 neg_log_pvals_img = non_parametric_inference( 

996 Y, design_matrix=X, model_intercept=False, mask=mask, n_perm=N_PERM 

997 ) 

998 

999 assert get_data(neg_log_pvals_img).shape == SHAPE[:3] 

1000 

1001 

1002def test_non_parametric_inference_tfce(): 

1003 """Test non-parametric inference with TFCE inference.""" 

1004 shapes = [SHAPE] * 4 

1005 mask, fmri_data, _ = generate_fake_fmri_data_and_design(shapes) 

1006 X = pd.DataFrame([[1]] * 4, columns=["intercept"]) 

1007 

1008 out = non_parametric_inference( 

1009 fmri_data, 

1010 design_matrix=X, 

1011 model_intercept=False, 

1012 mask=mask, 

1013 n_perm=N_PERM, 

1014 tfce=True, 

1015 ) 

1016 assert isinstance(out, dict) 

1017 assert "t" in out 

1018 assert "tfce" in out 

1019 assert "logp_max_t" in out 

1020 assert "logp_max_tfce" in out 

1021 

1022 assert get_data(out["tfce"]).shape == shapes[0][:3] 

1023 assert get_data(out["logp_max_tfce"]).shape == shapes[0][:3] 

1024 

1025 

1026@pytest.mark.timeout(0) 

1027def test_non_parametric_inference_cluster_level(): 

1028 """Test non-parametric inference with cluster-level inference.""" 

1029 func_img, mask = fake_fmri_data() 

1030 

1031 Y = [func_img] * 4 

1032 X = pd.DataFrame([[1]] * 4, columns=["intercept"]) 

1033 

1034 out = non_parametric_inference( 

1035 Y, 

1036 design_matrix=X, 

1037 model_intercept=False, 

1038 mask=mask, 

1039 n_perm=N_PERM, 

1040 threshold=0.001, 

1041 ) 

1042 assert isinstance(out, dict) 

1043 assert "t" in out 

1044 assert "logp_max_t" in out 

1045 assert "size" in out 

1046 assert "logp_max_size" in out 

1047 assert "mass" in out 

1048 assert "logp_max_mass" in out 

1049 

1050 assert get_data(out["logp_max_t"]).shape == SHAPE[:3] 

1051 

1052 

1053@pytest.mark.timeout(0) 

1054def test_non_parametric_inference_cluster_level_with_covariates( 

1055 shape_3d_default, 

1056 rng, 

1057): 

1058 """Test non-parametric inference with cluster-level inference in \ 

1059 the context of covariates. 

1060 """ 

1061 shapes = ((*shape_3d_default, 1),) 

1062 mask, fmri_data, _ = generate_fake_fmri_data_and_design(shapes) 

1063 

1064 unc_pval = 0.1 

1065 n_subjects = 2 

1066 

1067 # Set up one sample t-test design with two random covariates 

1068 cov1 = rng.random(n_subjects) 

1069 cov2 = rng.random(n_subjects) 

1070 X = pd.DataFrame({"cov1": cov1, "cov2": cov2, "intercept": 1}) 

1071 

1072 # make sure there is variability in the images 

1073 kernels = rng.uniform(low=0, high=5, size=n_subjects) 

1074 Y = [smooth_img(fmri_data[0], kernel) for kernel in kernels] 

1075 

1076 # Set up non-parametric test 

1077 out = non_parametric_inference( 

1078 Y, 

1079 design_matrix=X, 

1080 mask=mask, 

1081 model_intercept=False, 

1082 second_level_contrast="intercept", 

1083 n_perm=int(1 / unc_pval), 

1084 threshold=unc_pval, 

1085 ) 

1086 

1087 # Calculate uncorrected cluster sizes 

1088 df = len(Y) - X.shape[1] # noqa: PD901 

1089 neg_log_pval = -np.log10(stats.t.sf(get_data(out["t"]), df=df)) 

1090 logp_unc = new_img_like(out["t"], neg_log_pval) 

1091 logp_unc_cluster_sizes = list( 

1092 get_clusters_table(logp_unc, -np.log10(unc_pval))["Cluster Size (mm3)"] 

1093 ) 

1094 

1095 # Calculate corrected cluster sizes 

1096 logp_max_cluster_sizes = list( 

1097 get_clusters_table(out["logp_max_size"], unc_pval)[ 

1098 "Cluster Size (mm3)" 

1099 ] 

1100 ) 

1101 

1102 # Compare cluster sizes 

1103 logp_unc_cluster_sizes.sort() 

1104 logp_max_cluster_sizes.sort() 

1105 assert logp_unc_cluster_sizes == logp_max_cluster_sizes 

1106 

1107 

1108def test_non_parametric_inference_cluster_level_with_single_covariates( 

1109 shape_3d_default, 

1110 rng, 

1111): 

1112 """Test non-parametric inference with cluster-level inference in \ 

1113 the context of covariates. 

1114 """ 

1115 shapes = ((*shape_3d_default, 1),) 

1116 mask, fmri_data, _ = generate_fake_fmri_data_and_design(shapes) 

1117 

1118 unc_pval = 0.1 

1119 n_subjects = 2 

1120 

1121 # make sure there is variability in the images 

1122 kernels = rng.uniform(low=0, high=5, size=n_subjects) 

1123 Y = [smooth_img(fmri_data[0], kernel) for kernel in kernels] 

1124 

1125 # Test single covariate 

1126 X = pd.DataFrame({"intercept": [1] * len(Y)}) 

1127 non_parametric_inference( 

1128 Y, 

1129 design_matrix=X, 

1130 mask=mask, 

1131 model_intercept=False, 

1132 second_level_contrast="intercept", 

1133 n_perm=N_PERM, 

1134 threshold=unc_pval, 

1135 ) 

1136 

1137 

1138@pytest.mark.timeout(0) 

1139def test_second_level_contrast_computation_smoke(): 

1140 """Smoke test for different contrasts in fixed effects.""" 

1141 func_img, mask = fake_fmri_data() 

1142 

1143 model = SecondLevelModel(mask_img=mask) 

1144 Y = [func_img] * 4 

1145 X = pd.DataFrame([[1]] * 4, columns=["intercept"]) 

1146 model = model.fit(Y, design_matrix=X) 

1147 

1148 ncol = len(model.design_matrix_.columns) 

1149 c1, _ = np.eye(ncol)[0, :], np.zeros(ncol) 

1150 model.compute_contrast(second_level_contrast=c1) 

1151 

1152 # formula should work (passing variable name directly) 

1153 model.compute_contrast("intercept") 

1154 

1155 # or simply pass nothing 

1156 model.compute_contrast() 

1157 

1158 

1159@pytest.mark.timeout(0) 

1160@pytest.mark.parametrize( 

1161 "output_type", 

1162 [ 

1163 "z_score", 

1164 "stat", 

1165 "p_value", 

1166 "effect_size", 

1167 "effect_variance", 

1168 ], 

1169) 

1170def test_second_level_contrast_computation_all(output_type): 

1171 """Test output_type='all', and verify images are equivalent.""" 

1172 func_img, mask = fake_fmri_data() 

1173 

1174 model = SecondLevelModel(mask_img=mask) 

1175 Y = [func_img] * 4 

1176 X = pd.DataFrame([[1]] * 4, columns=["intercept"]) 

1177 model = model.fit(Y, design_matrix=X) 

1178 

1179 ncol = len(model.design_matrix_.columns) 

1180 c1, _ = np.eye(ncol)[0, :], np.zeros(ncol) 

1181 

1182 all_images = model.compute_contrast( 

1183 second_level_contrast=c1, output_type="all" 

1184 ) 

1185 

1186 assert_array_equal( 

1187 get_data(all_images[output_type]), 

1188 get_data( 

1189 model.compute_contrast( 

1190 second_level_contrast=c1, output_type=output_type 

1191 ) 

1192 ), 

1193 ) 

1194 

1195 

1196def test_second_level_contrast_computation_errors(rng): 

1197 func_img, mask = fake_fmri_data() 

1198 

1199 model = SecondLevelModel(mask_img=mask) 

1200 

1201 # asking for contrast before model fit gives error 

1202 with pytest.raises(ValueError, match="not fitted yet"): 

1203 model.compute_contrast(second_level_contrast="intercept") 

1204 

1205 # fit model 

1206 Y = [func_img] * 4 

1207 X = pd.DataFrame([[1]] * 4, columns=["intercept"]) 

1208 model = model.fit(Y, design_matrix=X) 

1209 ncol = len(model.design_matrix_.columns) 

1210 c1, cnull = np.eye(ncol)[0, :], np.zeros(ncol) 

1211 

1212 # passing null contrast should give back a value error 

1213 with pytest.raises(ValueError, match="Contrast is null"): 

1214 model.compute_contrast(cnull) 

1215 

1216 # passing wrong parameters 

1217 with pytest.raises(ValueError, match="Allowed types are .*'t', 'F'"): 

1218 model.compute_contrast( 

1219 second_level_contrast=c1, second_level_stat_type="" 

1220 ) 

1221 with pytest.raises(ValueError, match="Allowed types are .*'t', 'F'"): 

1222 model.compute_contrast( 

1223 second_level_contrast=c1, second_level_stat_type=[] 

1224 ) 

1225 with pytest.raises(ValueError, match="output_type must be one of "): 

1226 model.compute_contrast(second_level_contrast=c1, output_type="") 

1227 

1228 # check that passing no explicit contrast when the design 

1229 # matrix has more than one columns raises an error 

1230 X = pd.DataFrame(rng.uniform(size=(4, 2)), columns=["r1", "r2"]) 

1231 model = model.fit(Y, design_matrix=X) 

1232 with pytest.raises( 

1233 ValueError, match="No second-level contrast is specified" 

1234 ): 

1235 model.compute_contrast(None) 

1236 

1237 

1238def test_second_level_t_contrast_length_errors(): 

1239 func_img, mask = fake_fmri_data() 

1240 

1241 model = SecondLevelModel(mask_img=mask) 

1242 

1243 func_img, mask = fake_fmri_data() 

1244 Y = [func_img] * 4 

1245 X = pd.DataFrame([[1]] * 4, columns=["intercept"]) 

1246 model = model.fit(Y, design_matrix=X) 

1247 

1248 with pytest.raises( 

1249 ValueError, 

1250 match=("t contrasts should be of length P=1, but it has length 2."), 

1251 ): 

1252 model.compute_contrast(second_level_contrast=[1, 2]) 

1253 

1254 

1255def test_second_level_f_contrast_length_errors(): 

1256 func_img, mask = fake_fmri_data() 

1257 

1258 model = SecondLevelModel(mask_img=mask) 

1259 

1260 func_img, mask = fake_fmri_data() 

1261 Y = [func_img] * 4 

1262 X = pd.DataFrame([[1]] * 4, columns=["intercept"]) 

1263 model = model.fit(Y, design_matrix=X) 

1264 

1265 with pytest.raises( 

1266 ValueError, 

1267 match=("F contrasts should have .* columns, but it has .*"), 

1268 ): 

1269 model.compute_contrast(second_level_contrast=np.eye(2)) 

1270 

1271 

1272@pytest.mark.parametrize("second_level_contrast", [None, "intercept", [1]]) 

1273def test_non_parametric_inference_contrast_computation(second_level_contrast): 

1274 func_img, mask = fake_fmri_data() 

1275 

1276 # fit model 

1277 Y = [func_img] * 4 

1278 X = pd.DataFrame([[1]] * 4, columns=["intercept"]) 

1279 

1280 non_parametric_inference( 

1281 Y, 

1282 design_matrix=X, 

1283 model_intercept=False, 

1284 mask=mask, 

1285 n_perm=N_PERM, 

1286 second_level_contrast=second_level_contrast, 

1287 ) 

1288 

1289 

1290@pytest.mark.parametrize( 

1291 "second_level_contrast", [[1, 0], "r1", "r1-r2", [-1, 1]] 

1292) 

1293def test_non_parametric_inference_contrast_formula(second_level_contrast, rng): 

1294 func_img, _ = fake_fmri_data() 

1295 Y = [func_img] * 4 

1296 X = pd.DataFrame(rng.uniform(size=(4, 2)), columns=["r1", "r2"]) 

1297 

1298 non_parametric_inference( 

1299 second_level_input=Y, 

1300 design_matrix=X, 

1301 second_level_contrast=second_level_contrast, 

1302 ) 

1303 

1304 

1305def test_non_parametric_inference_contrast_computation_errors(rng): 

1306 func_img, mask = fake_fmri_data() 

1307 

1308 # asking for contrast before model fit gives error 

1309 with pytest.raises(TypeError, match="second_level_input must be either"): 

1310 non_parametric_inference( 

1311 second_level_input=None, 

1312 second_level_contrast="intercept", 

1313 mask=mask, 

1314 ) 

1315 

1316 # fit model 

1317 Y = [func_img] * 4 

1318 X = pd.DataFrame([[1]] * 4, columns=["intercept"]) 

1319 

1320 ncol = len(X.columns) 

1321 _, cnull = np.eye(ncol)[0, :], np.zeros(ncol) 

1322 

1323 # passing null contrast should give back a value error 

1324 with pytest.raises( 

1325 ValueError, 

1326 match=("Second_level_contrast must be a valid"), 

1327 ): 

1328 non_parametric_inference( 

1329 second_level_input=Y, 

1330 design_matrix=X, 

1331 second_level_contrast=cnull, 

1332 mask=mask, 

1333 ) 

1334 with pytest.raises( 

1335 ValueError, 

1336 match=("Second_level_contrast must be a valid"), 

1337 ): 

1338 non_parametric_inference( 

1339 second_level_input=Y, 

1340 design_matrix=X, 

1341 second_level_contrast=[], 

1342 mask=mask, 

1343 ) 

1344 

1345 # check that passing no explicit contrast when the design 

1346 # matrix has more than one columns raises an error 

1347 X = pd.DataFrame(rng.uniform(size=(4, 2)), columns=["r1", "r2"]) 

1348 with pytest.raises( 

1349 ValueError, match="No second-level contrast is specified." 

1350 ): 

1351 non_parametric_inference( 

1352 second_level_input=Y, 

1353 design_matrix=X, 

1354 second_level_contrast=None, 

1355 ) 

1356 

1357 

1358def test_second_level_contrast_computation_with_memory_caching(): 

1359 func_img, mask = fake_fmri_data() 

1360 

1361 model = SecondLevelModel(mask_img=mask, memory="nilearn_cache") 

1362 

1363 # fit model 

1364 Y = [func_img] * 4 

1365 X = pd.DataFrame([[1]] * 4, columns=["intercept"]) 

1366 model = model.fit(Y, design_matrix=X) 

1367 ncol = len(model.design_matrix_.columns) 

1368 c1 = np.eye(ncol)[0, :] 

1369 # test memory caching for compute_contrast 

1370 model.compute_contrast(c1, output_type="z_score") 

1371 # or simply pass nothing 

1372 model.compute_contrast() 

1373 

1374 

1375def test_second_lvl_dataframe_computation(tmp_path, shape_3d_default): 

1376 """Check that contrast can be computed when using dataframes as input. 

1377 

1378 See bug https://github.com/nilearn/nilearn/issues/3871 

1379 """ 

1380 file_path = write_fake_bold_img( 

1381 file_path=tmp_path / "img.nii.gz", shape=shape_3d_default 

1382 ) 

1383 

1384 dfcols = ["subject_label", "map_name", "effects_map_path"] 

1385 dfrows = [ 

1386 ["01", "a", file_path], 

1387 ["02", "a", file_path], 

1388 ["03", "a", file_path], 

1389 ] 

1390 niidf = pd.DataFrame(dfrows, columns=dfcols) 

1391 

1392 model = SecondLevelModel().fit(niidf) 

1393 model.compute_contrast(first_level_contrast="a") 

1394 

1395 

1396# -----------------------surface tests----------------------- # 

1397 

1398 

1399def test_second_level_input_as_surface_image(surf_img_1d): 

1400 """Test slm with a list surface images as input.""" 

1401 n_subjects = 5 

1402 second_level_input = [surf_img_1d for _ in range(n_subjects)] 

1403 

1404 design_matrix = pd.DataFrame( 

1405 [1] * len(second_level_input), columns=["intercept"] 

1406 ) 

1407 

1408 model = SecondLevelModel() 

1409 model = model.fit(second_level_input, design_matrix=design_matrix) 

1410 

1411 

1412def test_second_level_input_as_surface_image_3d(surf_img_2d): 

1413 """Fit with surface image with all subjects as timepoints.""" 

1414 n_subjects = 5 

1415 second_level_input = surf_img_2d(n_subjects) 

1416 

1417 design_matrix = pd.DataFrame([1] * n_subjects, columns=["intercept"]) 

1418 

1419 model = SecondLevelModel() 

1420 

1421 model.fit(second_level_input, design_matrix=design_matrix) 

1422 

1423 

1424def test_second_level_input_error_surface_image_2d(surf_img_2d): 

1425 """Err when passing a single 2D SurfaceImage with.""" 

1426 n_subjects = 1 

1427 second_level_input = surf_img_2d(n_subjects) 

1428 

1429 design_matrix = pd.DataFrame([1] * n_subjects, columns=["intercept"]) 

1430 

1431 model = SecondLevelModel() 

1432 

1433 with pytest.raises(TypeError, match="must be a 3D SurfaceImage"): 

1434 model.fit(second_level_input, design_matrix=design_matrix) 

1435 

1436 

1437def test_second_level_input_as_surface_image_3d_same_as_list_2d(surf_img_1d): 

1438 """Fit all subjects as timepoints same as list of subject.""" 

1439 n_subjects = 5 

1440 second_level_input = [surf_img_1d for _ in range(n_subjects)] 

1441 

1442 design_matrix = pd.DataFrame([1] * n_subjects, columns=["intercept"]) 

1443 

1444 model = SecondLevelModel() 

1445 model.fit(second_level_input, design_matrix=design_matrix) 

1446 result_2d = model.compute_contrast() 

1447 

1448 second_level_input_3d = concat_imgs(second_level_input) 

1449 model.fit(second_level_input_3d, design_matrix=design_matrix) 

1450 result_3d = model.compute_contrast() 

1451 

1452 assert_surface_image_equal(result_2d, result_3d) 

1453 

1454 

1455def test_second_level_input_as_surface_no_design_matrix(surf_img_1d): 

1456 """Raise error when design matrix is missing.""" 

1457 n_subjects = 5 

1458 second_level_input = [surf_img_1d for _ in range(n_subjects)] 

1459 

1460 model = SecondLevelModel() 

1461 

1462 with pytest.raises( 

1463 ValueError, match="require a design matrix to be provided" 

1464 ): 

1465 model.fit(second_level_input, design_matrix=None) 

1466 

1467 

1468@pytest.mark.parametrize("surf_mask_dim", [1, 2]) 

1469def test_second_level_input_as_surface_image_with_mask( 

1470 surf_img_1d, surf_mask_dim, surf_mask_1d, surf_mask_2d 

1471): 

1472 """Test slm with surface mask and a list surface images as input.""" 

1473 n_subjects = 5 

1474 second_level_input = [surf_img_1d for _ in range(n_subjects)] 

1475 

1476 design_matrix = pd.DataFrame( 

1477 [1] * len(second_level_input), columns=["intercept"] 

1478 ) 

1479 surf_mask = surf_mask_1d if surf_mask_dim == 1 else surf_mask_2d() 

1480 

1481 model = SecondLevelModel(mask_img=surf_mask) 

1482 model = model.fit(second_level_input, design_matrix=design_matrix) 

1483 

1484 

1485def test_second_level_input_with_wrong_mask( 

1486 surf_img_1d, surf_mask_1d, img_mask_mni 

1487): 

1488 """Test slm with mask of the wrong type.""" 

1489 n_subjects = 5 

1490 second_level_input = [surf_img_1d for _ in range(n_subjects)] 

1491 

1492 design_matrix = pd.DataFrame( 

1493 [1] * len(second_level_input), columns=["intercept"] 

1494 ) 

1495 

1496 # volume mask with surface data 

1497 model = SecondLevelModel(mask_img=img_mask_mni) 

1498 

1499 with pytest.raises( 

1500 TypeError, match="Mask and images to fit must be of compatible types." 

1501 ): 

1502 model = model.fit(second_level_input, design_matrix=design_matrix) 

1503 

1504 # surface mask with volume data 

1505 func_img, _ = fake_fmri_data() 

1506 second_level_input = [func_img] * 4 

1507 model = SecondLevelModel(mask_img=surf_mask_1d) 

1508 

1509 with pytest.raises( 

1510 TypeError, match="Mask and images to fit must be of compatible types." 

1511 ): 

1512 model = model.fit(second_level_input, design_matrix=design_matrix) 

1513 

1514 

1515def test_second_level_input_as_surface_image_warning_smoothing(surf_img_1d): 

1516 """Warn smoothing surface not implemented.""" 

1517 n_subjects = 5 

1518 second_level_input = [surf_img_1d for _ in range(n_subjects)] 

1519 

1520 design_matrix = pd.DataFrame( 

1521 [1] * len(second_level_input), columns=["intercept"] 

1522 ) 

1523 

1524 model = SecondLevelModel(smoothing_fwhm=8.0) 

1525 with pytest.warns(UserWarning, match="not yet supported"): 

1526 model = model.fit(second_level_input, design_matrix=design_matrix) 

1527 

1528 

1529def test_second_level_input_as_flm_of_surface_image(surface_glm_data): 

1530 """Test fitting of list of first level model with surface data.""" 

1531 n_subjects = 5 

1532 second_level_input = [] 

1533 for _ in range(n_subjects): 

1534 img, des = surface_glm_data(5) 

1535 model = FirstLevelModel() 

1536 model.fit(img, design_matrices=des) 

1537 second_level_input.append(model) 

1538 

1539 design_matrix = pd.DataFrame( 

1540 [1] * len(second_level_input), columns=["intercept"] 

1541 ) 

1542 

1543 model = SecondLevelModel() 

1544 model = model.fit(second_level_input, design_matrix=design_matrix) 

1545 

1546 

1547def test_second_level_surface_image_contrast_computation(surf_img_1d): 

1548 n_subjects = 5 

1549 second_level_input = [surf_img_1d for _ in range(n_subjects)] 

1550 

1551 design_matrix = pd.DataFrame( 

1552 [1] * len(second_level_input), columns=["intercept"] 

1553 ) 

1554 

1555 model = SecondLevelModel() 

1556 

1557 model = model.fit(second_level_input, design_matrix=design_matrix) 

1558 

1559 # simply pass nothing 

1560 model.compute_contrast() 

1561 

1562 # formula should work (passing variable name directly) 

1563 model.compute_contrast("intercept") 

1564 

1565 # smoke test for different contrasts in fixed effects 

1566 ncol = len(model.design_matrix_.columns) 

1567 c1, _ = np.eye(ncol)[0, :], np.zeros(ncol) 

1568 model.compute_contrast(second_level_contrast=c1) 

1569 

1570 # Test output_type='all', and verify images are equivalent 

1571 all_images = model.compute_contrast( 

1572 second_level_contrast=c1, output_type="all" 

1573 ) 

1574 for key in [ 

1575 "z_score", 

1576 "stat", 

1577 "p_value", 

1578 "effect_size", 

1579 "effect_variance", 

1580 ]: 

1581 assert_surface_image_equal( 

1582 all_images[key], 

1583 model.compute_contrast(second_level_contrast=c1, output_type=key), 

1584 ) 

1585 

1586 

1587@pytest.mark.parametrize("two_sided_test", [True, False]) 

1588def test_non_parametric_inference_with_surface_images( 

1589 surf_img_1d, two_sided_test 

1590): 

1591 """Smoke test non_parametric_inference on list of 1D surfaces.""" 

1592 n_subjects = 5 

1593 second_level_input = [surf_img_1d for _ in range(n_subjects)] 

1594 

1595 design_matrix = pd.DataFrame([1] * n_subjects, columns=["intercept"]) 

1596 

1597 non_parametric_inference( 

1598 second_level_input=second_level_input, 

1599 design_matrix=design_matrix, 

1600 n_perm=N_PERM, 

1601 two_sided_test=two_sided_test, 

1602 ) 

1603 

1604 

1605def test_non_parametric_inference_with_surface_images_2d(surf_img_2d): 

1606 """Smoke test non_parametric_inference on 2d surfaces.""" 

1607 n_subjects = 5 

1608 second_level_input = surf_img_2d(n_subjects) 

1609 

1610 design_matrix = pd.DataFrame([1] * n_subjects, columns=["intercept"]) 

1611 

1612 non_parametric_inference( 

1613 second_level_input=second_level_input, 

1614 design_matrix=design_matrix, 

1615 n_perm=N_PERM, 

1616 ) 

1617 

1618 

1619def test_non_parametric_inference_with_surface_images_2d_mask( 

1620 surf_img_2d, surf_mask_1d 

1621): 

1622 """Smoke test non_parametric_inference on 2d surfaces and a mask.""" 

1623 n_subjects = 5 

1624 second_level_input = surf_img_2d(n_subjects) 

1625 

1626 design_matrix = pd.DataFrame([1] * n_subjects, columns=["intercept"]) 

1627 

1628 masker = SurfaceMasker(surf_mask_1d) 

1629 

1630 non_parametric_inference( 

1631 second_level_input=second_level_input, 

1632 design_matrix=design_matrix, 

1633 n_perm=N_PERM, 

1634 mask=masker, 

1635 ) 

1636 

1637 

1638def test_non_parametric_inference_with_surface_images_warnings(surf_img_1d): 

1639 """Throw warnings for non implemented features for surface.""" 

1640 n_subjects = 5 

1641 second_level_input = [surf_img_1d for _ in range(n_subjects)] 

1642 

1643 design_matrix = pd.DataFrame([1] * n_subjects, columns=["intercept"]) 

1644 

1645 with pytest.warns( 

1646 UserWarning, 

1647 match="'smoothing_fwhm' is not yet supported for surface data.", 

1648 ): 

1649 non_parametric_inference( 

1650 second_level_input=second_level_input, 

1651 design_matrix=design_matrix, 

1652 n_perm=N_PERM, 

1653 smoothing_fwhm=6, 

1654 ) 

1655 with pytest.warns( 

1656 UserWarning, 

1657 match="Cluster level inference not yet implemented for surface data.", 

1658 ): 

1659 non_parametric_inference( 

1660 second_level_input=second_level_input, 

1661 design_matrix=design_matrix, 

1662 n_perm=N_PERM, 

1663 tfce=True, 

1664 ) 

1665 with pytest.warns( 

1666 UserWarning, 

1667 match="Cluster level inference not yet implemented for surface data.", 

1668 ): 

1669 non_parametric_inference( 

1670 second_level_input=second_level_input, 

1671 design_matrix=design_matrix, 

1672 n_perm=N_PERM, 

1673 threshold=0.001, 

1674 )