Coverage for nilearn/mass_univariate/tests/test_permuted_least_squares.py: 0%

324 statements  

« prev     ^ index     » next       coverage.py v7.9.1, created at 2025-06-16 12:32 +0200

1"""Tests for the permuted_ols function.""" 

2 

3import numpy as np 

4import pytest 

5from nibabel import Nifti1Image 

6from numpy.testing import ( 

7 assert_array_almost_equal, 

8 assert_array_less, 

9 assert_equal, 

10) 

11from scipy import stats 

12 

13from nilearn.conftest import _rng 

14from nilearn.maskers import NiftiMasker 

15from nilearn.mass_univariate import permuted_ols 

16from nilearn.mass_univariate.permuted_least_squares import ( 

17 _sanitize_inputs_permuted_ols, 

18) 

19 

20N_COVARS = 2 

21 

22N_PERM = 10 

23 

24N_SAMPLES = 50 

25 

26 

27def _tfce_design(): 

28 target_var1 = np.arange(0, 10).reshape((-1, 1)) # positive effect 

29 target_var = np.hstack( 

30 ( # corresponds to 3 x 3 x 3 x 10 niimg 

31 target_var1, # voxel 1 has positive effect 

32 -target_var1, # voxel 2 has negative effect 

33 _rng().random((10, 25)), # 25 remaining voxels 

34 ) 

35 ) 

36 tested_var = np.arange(0, 20, 2) 

37 

38 mask_img = Nifti1Image(np.ones((3, 3, 3)), np.eye(4)) 

39 masker = NiftiMasker(mask_img) 

40 masker.fit(mask_img) 

41 

42 n_descriptors = np.prod(mask_img.shape) 

43 n_regressors = 1 # tested_var is 1D 

44 

45 return target_var, tested_var, masker, n_descriptors, n_regressors 

46 

47 

48def compare_to_ref_score(own_score, tested_var, target_var, covars=None): 

49 """Compare obtained score to expected score.""" 

50 reference = ref_score(tested_var, target_var, covars) 

51 assert_array_almost_equal(own_score, reference, decimal=6) 

52 return reference 

53 

54 

55def ref_score(tested_var, target_var, covars=None): 

56 """Compute t-scores with linalg or statsmodels.""" 

57 from nilearn.mass_univariate.tests._testing import ( 

58 get_tvalue_with_alternative_library, 

59 ) 

60 

61 return get_tvalue_with_alternative_library(tested_var, target_var, covars) 

62 

63 

64def _create_design(rng, n_samples, n_descriptors, n_regressors): 

65 target_var = rng.standard_normal((n_samples, n_descriptors)) 

66 tested_var = rng.standard_normal((n_samples, n_regressors)) 

67 

68 return target_var, tested_var, n_descriptors, n_regressors 

69 

70 

71@pytest.fixture 

72def design(rng): 

73 """Return a design to run tests on.""" 

74 return _create_design( 

75 rng, n_samples=N_SAMPLES, n_descriptors=1, n_regressors=1 

76 ) 

77 

78 

79@pytest.fixture 

80def dummy_design(rng): 

81 """Use to test errors and warnings.""" 

82 return _create_design(rng, n_samples=10, n_descriptors=1, n_regressors=1) 

83 

84 

85@pytest.fixture 

86def confounding_vars(rng): 

87 """Return normally distributed confounds.""" 

88 return rng.standard_normal((N_SAMPLES, N_COVARS)) 

89 

90 

91@pytest.fixture() 

92def masker(affine_eye): 

93 """Return a default masker.""" 

94 mask_img = Nifti1Image(np.ones((5, 5, 5)), affine_eye) 

95 masker = NiftiMasker(mask_img) 

96 masker.fit(mask_img) 

97 return masker 

98 

99 

100@pytest.fixture() 

101def cluster_level_design(rng): 

102 """Create design for cluster level tests.""" 

103 target_var1 = np.arange(0, 10).reshape((-1, 1)) # positive effect 

104 voxel_vars = np.hstack( 

105 ( 

106 -target_var1, # negative effect 

107 target_var1, # positive effect 

108 rng.random((10, 1)), # random voxel 

109 ) 

110 ) 

111 

112 columns = np.arange(0, voxel_vars.shape[1]) 

113 # create 125 voxels 

114 chosen_columns = rng.choice(columns, size=125, p=[0.1, 0.1, 0.8]) 

115 # corresponds to 5 x 5 x 5 x 10 niimg 

116 target_var = voxel_vars[:, chosen_columns] 

117 tested_var = np.arange(0, 20, 2) 

118 

119 return target_var, tested_var 

120 

121 

122# General tests for permuted_ols function 

123# 

124# Check that h0 is close to the theoretical distribution 

125# for permuted OLS with label swap. 

126# 

127# Theoretical distribution is known for this simple design t(n_samples - dof). 

128 

129 

130PERM_RANGES = [10, 1000] 

131 

132 

133def run_permutations(tested_var, target_var, model_intercept): 

134 """Compute the Mean Squared Error between cumulative Density Function \ 

135 as a proof of consistency of the permutation algorithm. 

136 """ 

137 all_mse = [] 

138 all_kstest_pvals = [] 

139 

140 for i, n_perm in enumerate(np.repeat(PERM_RANGES, 10)): 

141 if model_intercept: 

142 h0 = permuted_ols_with_intercept( 

143 tested_var, target_var, int(n_perm), i 

144 ) 

145 dof = N_SAMPLES - 2 

146 else: 

147 h0 = permuted_ols_no_intercept( 

148 tested_var, target_var, int(n_perm), i 

149 ) 

150 dof = N_SAMPLES - 1 

151 

152 h0_intercept = h0[0, :] 

153 kstest_pval, mse = ks_stat_and_mse(dof, h0_intercept) 

154 

155 all_kstest_pvals.append(kstest_pval) 

156 all_mse.append(mse) 

157 

158 return all_kstest_pvals, all_mse 

159 

160 

161def permuted_ols_no_intercept(tested_var, target_var, n_perm, i): 

162 """Wrap to run permuted_ols without model_intercept.""" 

163 n_regressors = 1 

164 output = permuted_ols( 

165 tested_var, 

166 target_var, 

167 model_intercept=False, 

168 n_perm=n_perm, 

169 two_sided_test=False, 

170 random_state=i, 

171 output_type="dict", 

172 verbose=1, 

173 ) 

174 assert_equal(output["h0_max_t"].shape, (n_regressors, n_perm)) 

175 return output["h0_max_t"] 

176 

177 

178def permuted_ols_with_intercept(tested_var, target_var, n_perm, i): 

179 """Wrap to run permuted_ols with model_intercept.""" 

180 output = permuted_ols( 

181 tested_var, 

182 target_var, 

183 model_intercept=True, 

184 n_perm=n_perm, 

185 two_sided_test=False, 

186 random_state=i, 

187 output_type="dict", 

188 verbose=1, 

189 ) 

190 # pval should not be significant 

191 assert_array_less(output["logp_max_t"], 1.0) 

192 return output["h0_max_t"] 

193 

194 

195def ks_stat_and_mse(df, h0_intercept): 

196 """Run Kolmogorov-Smirnov test and compute Mean Squared Error.""" 

197 kstest_pval = stats.kstest(h0_intercept, stats.t(df).cdf)[1] 

198 mse = mean_squared_error(df=df, h0_intercept=h0_intercept) 

199 return kstest_pval, mse 

200 

201 

202def mean_squared_error(df, h0_intercept): 

203 """Compute meen square error.""" 

204 return np.mean( 

205 ( 

206 stats.t(df).cdf(np.sort(h0_intercept)) 

207 - np.linspace(0, 1, h0_intercept.size + 1)[1:] 

208 ) 

209 ** 2 

210 ) 

211 

212 

213def check_ktest_p_values_distribution_and_mse(all_kstest_pvals, all_mse): 

214 """Check difference between distributions is not rejected by KS test.""" 

215 all_kstest_pvals = np.array(all_kstest_pvals).reshape( 

216 (len(PERM_RANGES), -1) 

217 ) 

218 assert_array_less(0.01, all_kstest_pvals) 

219 

220 # consistency of the algorithm: the more permutations, the less the MSE 

221 all_mse = np.array(all_mse).reshape((len(PERM_RANGES), -1)) 

222 assert_array_less(np.diff(all_mse.mean(1)), 0) 

223 

224 

225@pytest.mark.parametrize("model_intercept", [True, False]) 

226def test_permuted_ols_check_h0_noeffect_labelswap_centered(model_intercept): 

227 """Check distributions of permutations when tested vars are centered.""" 

228 # create dummy design with no effect 

229 rng = np.random.RandomState(0) 

230 target_var = rng.randn(N_SAMPLES, 1) 

231 

232 centered_var = np.arange(N_SAMPLES, dtype="f8").reshape((-1, 1)) 

233 centered_var -= centered_var.mean(0) 

234 

235 all_kstest_pvals, all_mse = run_permutations( 

236 centered_var, target_var, model_intercept=model_intercept 

237 ) 

238 

239 check_ktest_p_values_distribution_and_mse(all_kstest_pvals, all_mse) 

240 

241 

242def test_permuted_ols_check_h0_noeffect_labelswap_uncentered(): 

243 """Check distributions of permutations when tested vars are uncentered.""" 

244 # create dummy design with no effect 

245 rng = np.random.RandomState(0) 

246 target_var = rng.randn(N_SAMPLES, 1) 

247 

248 uncentered_var = np.arange(N_SAMPLES, dtype="f8").reshape((-1, 1)) 

249 

250 all_kstest_pvals, all_mse = run_permutations( 

251 uncentered_var, target_var, model_intercept=True 

252 ) 

253 

254 check_ktest_p_values_distribution_and_mse(all_kstest_pvals, all_mse) 

255 

256 

257def test_permuted_ols_check_h0_noeffect_signswap(): 

258 """Check that h0 is close to the theoretical distribution \ 

259 for permuted OLS with sign swap. 

260 

261 Theoretical distribution is known for this simple design \ 

262 (= t(n_samples - dof)). 

263 """ 

264 # create dummy design with no effect 

265 rng = np.random.RandomState(0) 

266 target_var = rng.randn(N_SAMPLES, 1) 

267 

268 n_regressors = 1 

269 tested_var = np.ones((N_SAMPLES, n_regressors)) 

270 

271 all_kstest_pvals, all_mse = run_permutations( 

272 tested_var, target_var, model_intercept=False 

273 ) 

274 

275 all_kstest_pvals = np.array(all_kstest_pvals).reshape( 

276 (len(PERM_RANGES), -1) 

277 ) 

278 all_mse = np.array(all_mse).reshape((len(PERM_RANGES), -1)) 

279 

280 # check that a difference between distributions is not rejected by KS test 

281 assert_array_less(0.01 / (len(PERM_RANGES) * 10.0), all_kstest_pvals) 

282 # consistency of the algorithm: the more permutations, the less the MSE 

283 assert_array_less(np.diff(all_mse.mean(1)), 0) 

284 

285 

286# Tests for labels swapping permutation scheme 

287 

288 

289def test_permuted_ols_no_covar(design): 

290 """Check output.""" 

291 target_var, tested_var, *_ = design 

292 output = permuted_ols( 

293 tested_var, 

294 target_var, 

295 model_intercept=False, 

296 n_perm=0, 

297 random_state=0, 

298 output_type="dict", 

299 verbose=1, 

300 ) 

301 compare_to_ref_score(output["t"], tested_var, target_var) 

302 

303 

304def test_permuted_ols_no_covar_with_ravelized_tested_var(design): 

305 """Check output when tested var is flattened.""" 

306 target_var, tested_var, *_ = design 

307 

308 output = permuted_ols( 

309 np.ravel(tested_var), 

310 target_var, 

311 model_intercept=False, 

312 n_perm=0, 

313 random_state=0, 

314 output_type="dict", 

315 verbose=1, 

316 ) 

317 compare_to_ref_score(output["t"], tested_var, target_var) 

318 

319 

320def test_permuted_ols_no_covar_with_intercept(design): 

321 """Check output when modeling intercept with no confounds.""" 

322 # Add intercept (should be equivalent to centering variates). 

323 target_var, tested_var, *_ = design 

324 

325 output = permuted_ols( 

326 tested_var, 

327 target_var, 

328 model_intercept=True, 

329 n_perm=0, 

330 random_state=0, 

331 output_type="dict", 

332 verbose=1, 

333 ) 

334 target_var -= target_var.mean(0) 

335 tested_var -= tested_var.mean(0) 

336 

337 compare_to_ref_score( 

338 output["t"], tested_var, target_var, np.ones((N_SAMPLES, 1)) 

339 ) 

340 

341 

342def test_permuted_ols_with_covar(design, confounding_vars): 

343 """Check output when not modeling intercept with normal confounds.""" 

344 target_var, tested_var, n_descriptors, n_regressors = design 

345 

346 output = permuted_ols( 

347 tested_var, 

348 target_var, 

349 confounding_vars, 

350 model_intercept=False, 

351 n_perm=0, 

352 random_state=0, 

353 output_type="dict", 

354 verbose=1, 

355 ) 

356 

357 ref_score = compare_to_ref_score( 

358 output["t"], tested_var, target_var, confounding_vars 

359 ) 

360 assert output["t"].shape == (n_regressors, n_descriptors) 

361 assert ref_score.shape == (n_regressors, n_descriptors) 

362 

363 

364def test_permuted_ols_with_covar_with_intercept(design, confounding_vars): 

365 """Check output when modeling intercept with normal confounds.""" 

366 target_var, tested_var, n_descriptors, n_regressors = design 

367 

368 output = permuted_ols( 

369 tested_var, 

370 target_var, 

371 confounding_vars, 

372 model_intercept=True, 

373 n_perm=0, 

374 random_state=0, 

375 output_type="dict", 

376 verbose=1, 

377 ) 

378 

379 confounding_vars = np.hstack((confounding_vars, np.ones((N_SAMPLES, 1)))) 

380 ref_score = compare_to_ref_score( 

381 output["t"], tested_var, target_var, confounding_vars 

382 ) 

383 assert output["t"].shape == (n_regressors, n_descriptors) 

384 assert ref_score.shape == (n_regressors, n_descriptors) 

385 

386 

387@pytest.mark.parametrize("model_intercept", [True, False]) 

388def test_permuted_ols_with_covar_with_intercept_in_confonding_vars( 

389 design, model_intercept 

390): 

391 """Check output when modeling intercept or not, \ 

392 with confounds containing intercept. 

393 """ 

394 target_var, tested_var, n_descriptors, n_regressors = design 

395 confounding_vars = np.ones([N_SAMPLES, 1]) 

396 

397 output = permuted_ols( 

398 tested_var, 

399 target_var, 

400 confounding_vars, 

401 model_intercept=model_intercept, 

402 n_perm=0, 

403 random_state=0, 

404 output_type="dict", 

405 verbose=1, 

406 ) 

407 assert output["t"].shape == (n_regressors, n_descriptors) 

408 

409 

410def test_permuted_ols_with_multiple_constants_and_covars(design, rng): 

411 """Check output when multiple constants and covariate are passed.""" 

412 target_var, tested_var, n_descriptors, n_regressors = design 

413 

414 n_covars = 2 

415 

416 confounding_vars = np.hstack( 

417 (rng.standard_normal((N_SAMPLES, n_covars)), np.ones([N_SAMPLES, 2])) 

418 ) 

419 output = permuted_ols( 

420 tested_var, 

421 target_var, 

422 confounding_vars, 

423 model_intercept=False, 

424 n_perm=0, 

425 random_state=0, 

426 output_type="dict", 

427 verbose=1, 

428 ) 

429 assert output["t"].shape == (n_regressors, n_descriptors) 

430 

431 

432def test_permuted_ols_nocovar_multivariate(rng): 

433 """Test permuted_ols with multiple tested variates and no covariate. 

434 

435 It is equivalent to fitting several models with only one tested variate. 

436 """ 

437 n_descriptors = 10 

438 n_regressors = 2 

439 target_vars, tested_var, *_ = _create_design( 

440 rng, 

441 n_samples=N_SAMPLES, 

442 n_descriptors=n_descriptors, 

443 n_regressors=n_regressors, 

444 ) 

445 

446 n_perm = N_PERM 

447 output = permuted_ols( 

448 tested_var, 

449 target_vars, 

450 model_intercept=False, 

451 n_perm=n_perm, 

452 random_state=0, 

453 output_type="dict", 

454 verbose=1, 

455 ) 

456 

457 compare_to_ref_score(output["t"], tested_var, target_vars) 

458 

459 assert output["logp_max_t"].shape == (n_regressors, n_descriptors) 

460 assert output["h0_max_t"].shape == (n_regressors, n_perm) 

461 

462 # Adds intercept (should be equivalent to centering variates) 

463 output_intercept = permuted_ols( 

464 tested_var, 

465 target_vars, 

466 model_intercept=True, 

467 n_perm=0, 

468 random_state=0, 

469 output_type="dict", 

470 verbose=1, 

471 ) 

472 

473 target_vars -= target_vars.mean(0) 

474 tested_var -= tested_var.mean(0) 

475 compare_to_ref_score( 

476 output_intercept["t"], tested_var, target_vars, np.ones((N_SAMPLES, 1)) 

477 ) 

478 

479 

480# Tests for sign swapping permutation scheme 

481 

482 

483def test_permuted_ols_intercept_nocovar(rng): 

484 """Check output when no covariate is passed.""" 

485 n_descriptors = 10 

486 n_regressors = 1 

487 tested_var = np.ones((N_SAMPLES, n_regressors)) 

488 target_var = rng.standard_normal((N_SAMPLES, n_descriptors)) 

489 

490 output = permuted_ols( 

491 tested_var, 

492 target_var, 

493 confounding_vars=None, 

494 n_perm=N_PERM, 

495 random_state=0, 

496 output_type="dict", 

497 verbose=1, 

498 ) 

499 

500 ref_score = compare_to_ref_score(output["t"], tested_var, target_var) 

501 assert ref_score.shape == (n_regressors, n_descriptors) 

502 assert output["logp_max_t"].shape == (n_regressors, n_descriptors) 

503 assert output["t"].shape == (n_regressors, n_descriptors) 

504 assert_array_less( 

505 output["logp_max_t"], 1.0 

506 ) # ensure sign swap is correctly done 

507 

508 # same thing but with model_intercept=True to check it has no effect 

509 output_addintercept = permuted_ols( 

510 tested_var, 

511 target_var, 

512 confounding_vars=None, 

513 model_intercept=False, 

514 n_perm=0, 

515 random_state=0, 

516 output_type="dict", 

517 verbose=1, 

518 ) 

519 compare_to_ref_score(output_addintercept["t"], tested_var, target_var) 

520 assert output_addintercept["t"].shape == (n_regressors, n_descriptors) 

521 

522 

523def test_permuted_ols_intercept_statsmodels_withcovar( 

524 rng, 

525): 

526 """Check output when covariate is passed.""" 

527 n_descriptors = 10 

528 n_regressors = 1 

529 n_covars = 2 

530 tested_var = np.ones((N_SAMPLES, n_regressors)) 

531 target_var = rng.standard_normal((N_SAMPLES, n_descriptors)) 

532 confounding_vars = rng.standard_normal((N_SAMPLES, n_covars)) 

533 

534 output = permuted_ols( 

535 tested_var, 

536 target_var, 

537 confounding_vars, 

538 n_perm=0, 

539 random_state=0, 

540 output_type="dict", 

541 verbose=1, 

542 ) 

543 ref_score = compare_to_ref_score( 

544 output["t"], tested_var, target_var, confounding_vars 

545 ) 

546 assert ref_score.shape == (n_regressors, n_descriptors) 

547 assert output["t"].shape == (n_regressors, n_descriptors) 

548 

549 # same thing but with model_intercept=True to check it has no effect 

550 output_intercept = permuted_ols( 

551 tested_var, 

552 target_var, 

553 confounding_vars, 

554 model_intercept=True, 

555 n_perm=0, 

556 random_state=0, 

557 output_type="dict", 

558 verbose=1, 

559 ) 

560 compare_to_ref_score( 

561 output_intercept["t"], tested_var, target_var, confounding_vars 

562 ) 

563 assert output_intercept["t"].shape == (n_regressors, n_descriptors) 

564 

565 

566def test_one_sided_versus_two_test(rng): 

567 """Check that a positive effect is always better \ 

568 recovered with one-sided. 

569 """ 

570 n_descriptors = 100 

571 n_regressors = 1 

572 target_var = rng.standard_normal((N_SAMPLES, n_descriptors)) 

573 tested_var = rng.standard_normal((N_SAMPLES, n_regressors)) 

574 

575 # one-sided 

576 output_1_sided = permuted_ols( 

577 tested_var, 

578 target_var, 

579 model_intercept=False, 

580 two_sided_test=False, 

581 n_perm=N_PERM, 

582 random_state=0, 

583 output_type="dict", 

584 verbose=1, 

585 ) 

586 assert output_1_sided["logp_max_t"].shape == (n_regressors, n_descriptors) 

587 

588 # two-sided 

589 output_2_sided = permuted_ols( 

590 tested_var, 

591 target_var, 

592 model_intercept=False, 

593 two_sided_test=True, 

594 n_perm=N_PERM, 

595 random_state=0, 

596 output_type="dict", 

597 verbose=1, 

598 ) 

599 assert output_2_sided["logp_max_t"].shape == (n_regressors, n_descriptors) 

600 

601 positive_effect_location = output_1_sided["logp_max_t"] > 1 

602 assert_equal( 

603 np.sum( 

604 output_2_sided["logp_max_t"][positive_effect_location] 

605 - output_1_sided["logp_max_t"][positive_effect_location] 

606 > 0 

607 ), 

608 0, 

609 ) 

610 

611 

612def test_two_sided_recover_positive_and_negative_effects(): 

613 """Check that two-sided can actually recover \ 

614 positive and negative effects. 

615 """ 

616 target_var1 = np.arange(0, 10).reshape((-1, 1)) # positive effect 

617 target_var = np.hstack((target_var1, -target_var1)) 

618 tested_var = np.arange(0, 20, 2) 

619 

620 # one-sided 

621 output_1_sided_1 = permuted_ols( 

622 tested_var, 

623 target_var, 

624 model_intercept=False, 

625 two_sided_test=False, 

626 n_perm=N_PERM, 

627 random_state=0, 

628 output_type="dict", 

629 verbose=1, 

630 ) 

631 output_1_sided_1["logp_max_t"] 

632 

633 # one-sided (other side) 

634 output_1_sided_2 = permuted_ols( 

635 tested_var, 

636 -target_var, 

637 model_intercept=False, 

638 two_sided_test=False, 

639 n_perm=N_PERM, 

640 random_state=0, 

641 output_type="dict", 

642 verbose=1, 

643 ) 

644 

645 # two-sided 

646 output_2_sided = permuted_ols( 

647 tested_var, 

648 target_var, 

649 model_intercept=False, 

650 two_sided_test=True, 

651 n_perm=N_PERM, 

652 random_state=0, 

653 output_type="dict", 

654 verbose=1, 

655 ) 

656 output_2_sided["logp_max_t"] 

657 

658 assert_array_almost_equal( 

659 output_1_sided_1["logp_max_t"][0], 

660 output_1_sided_2["logp_max_t"][0][::-1], 

661 ) 

662 assert_array_almost_equal( 

663 output_1_sided_1["logp_max_t"] + output_1_sided_2["logp_max_t"], 

664 output_2_sided["logp_max_t"], 

665 ) 

666 

667 

668def test_tfce_smoke_legacy_smoke(): 

669 """Check tfce output of dict with or without permutations.""" 

670 ( 

671 target_var, 

672 tested_var, 

673 masker, 

674 n_descriptors, 

675 n_regressors, 

676 ) = _tfce_design() 

677 

678 # no permutations and output_type is "dict", so check for "t" and 

679 # "tfce" maps 

680 out = permuted_ols( 

681 tested_var, 

682 target_var, 

683 model_intercept=False, 

684 two_sided_test=False, 

685 n_perm=0, 

686 random_state=0, 

687 masker=masker, 

688 tfce=True, 

689 output_type="dict", 

690 verbose=1, 

691 ) 

692 

693 assert isinstance(out, dict) 

694 assert "t" in out 

695 assert "tfce" in out 

696 assert out["t"].shape == (n_regressors, n_descriptors) 

697 assert out["tfce"].shape == (n_regressors, n_descriptors) 

698 

699 # permutations, TFCE, and masker are defined, 

700 # so check for TFCE maps 

701 n_perm = N_PERM 

702 out = permuted_ols( 

703 tested_var, 

704 target_var, 

705 model_intercept=False, 

706 two_sided_test=False, 

707 n_perm=n_perm, 

708 random_state=0, 

709 masker=masker, 

710 tfce=True, 

711 output_type="dict", 

712 verbose=1, 

713 ) 

714 

715 assert isinstance(out, dict) 

716 assert "t" in out 

717 assert "tfce" in out 

718 assert "logp_max_t" in out 

719 assert "logp_max_tfce" in out 

720 assert "h0_max_t" in out 

721 assert "h0_max_tfce" in out 

722 assert out["t"].shape == (n_regressors, n_descriptors) 

723 assert out["tfce"].shape == (n_regressors, n_descriptors) 

724 assert out["logp_max_t"].shape == (n_regressors, n_descriptors) 

725 assert out["logp_max_tfce"].shape == (n_regressors, n_descriptors) 

726 assert out["h0_max_t"].size == n_perm 

727 assert out["h0_max_tfce"].size == n_perm 

728 

729 

730def test_cluster_level_parameters_smoke(cluster_level_design, masker): 

731 """Test combinations of parameters related to cluster-level inference.""" 

732 target_var, tested_var = cluster_level_design 

733 

734 # no permutations and output_type is "dict", so check for "t" map 

735 out = permuted_ols( 

736 tested_var, 

737 target_var, 

738 model_intercept=False, 

739 two_sided_test=False, 

740 n_perm=0, 

741 random_state=0, 

742 output_type="dict", 

743 verbose=1, 

744 ) 

745 

746 assert isinstance(out, dict) 

747 assert "t" in out 

748 

749 # permutations, threshold, and masker are defined, 

750 # so check for cluster-level maps 

751 n_perm = N_PERM 

752 out = permuted_ols( 

753 tested_var, 

754 target_var, 

755 model_intercept=False, 

756 two_sided_test=True, 

757 n_perm=n_perm, 

758 random_state=0, 

759 threshold=0.001, 

760 masker=masker, 

761 output_type="dict", 

762 verbose=1, 

763 ) 

764 

765 assert isinstance(out, dict) 

766 assert "t" in out 

767 assert "logp_max_t" in out 

768 assert "logp_max_size" in out 

769 assert "logp_max_mass" in out 

770 assert "h0_max_t" in out 

771 assert "h0_max_size" in out 

772 assert "h0_max_mass" in out 

773 assert out["h0_max_t"].size == n_perm 

774 assert out["h0_max_size"].size == n_perm 

775 assert out["h0_max_mass"].size == n_perm 

776 

777 

778def test_sanitize_inputs_permuted_ols(design): 

779 """Smoke test for input sanitization.""" 

780 target_vars, tested_vars, *_ = design 

781 _sanitize_inputs_permuted_ols( 

782 n_jobs=-1, 

783 output_type="dict", 

784 tfce=False, 

785 threshold=None, 

786 target_vars=target_vars, 

787 tested_vars=tested_vars, 

788 ) 

789 

790 

791def test_permuted_ols_warnings_n_perm_n_job(cluster_level_design, masker): 

792 """Check that proper warning are thrown depending on n_job VS n_perm.""" 

793 target_var, tested_var = cluster_level_design 

794 

795 # n_perm > n_job --> no warning 

796 with pytest.warns() as record: 

797 permuted_ols( 

798 tested_var, 

799 target_var, 

800 n_perm=4, 

801 n_jobs=1, 

802 masker=masker, 

803 ) 

804 assert all( 

805 "perform more permutations" not in str(x.message) for x in record 

806 ) 

807 

808 # n_perm <= n_job and n_job > 0 --> warning 

809 with pytest.warns( 

810 UserWarning, 

811 match="perform more permutations", 

812 ): 

813 permuted_ols(tested_var, target_var, n_perm=1, masker=masker, n_jobs=2) 

814 

815 

816def test_cluster_level_parameters_warnings(cluster_level_design, masker): 

817 """Test combinations of parameters related to cluster-level inference.""" 

818 target_var, tested_var = cluster_level_design 

819 

820 # masker is defined, but threshold is not. 

821 # no cluster-level inference is performed, but there's a warning. 

822 with pytest.warns( 

823 DeprecationWarning, 

824 match='"legacy" output structure for "permuted_ols" is deprecated', 

825 ): 

826 out = permuted_ols( 

827 tested_var, 

828 target_var, 

829 model_intercept=False, 

830 two_sided_test=False, 

831 n_perm=N_PERM, 

832 random_state=0, 

833 masker=masker, 

834 output_type="legacy", 

835 ) 

836 

837 assert isinstance(out, tuple) 

838 

839 # threshold is defined, but output_type is "legacy". 

840 # raise a warning, and get a dictionary. 

841 with pytest.warns( 

842 Warning, 

843 match='If "threshold" is not None', 

844 ): 

845 out = permuted_ols( 

846 tested_var, 

847 target_var, 

848 model_intercept=False, 

849 two_sided_test=False, 

850 n_perm=0, 

851 random_state=0, 

852 threshold=0.001, 

853 masker=masker, 

854 output_type="legacy", 

855 ) 

856 

857 assert isinstance(out, dict) 

858 

859 # output_type is "legacy". 

860 # raise a deprecation warning, but get the standard output. 

861 with pytest.deprecated_call(): 

862 out = permuted_ols( 

863 tested_var, 

864 target_var, 

865 model_intercept=False, 

866 two_sided_test=False, 

867 n_perm=N_PERM, 

868 random_state=0, 

869 output_type="legacy", 

870 ) 

871 

872 assert isinstance(out, tuple) 

873 

874 

875def test_permuted_ols_no_covar_warning(rng): 

876 """Ensure that a warning is raised when a given voxel has all zeros.""" 

877 target_var, tested_var, *_ = _create_design( 

878 rng, n_samples=N_SAMPLES, n_descriptors=10, n_regressors=1 

879 ) 

880 output_1 = permuted_ols( 

881 tested_var, 

882 target_var, 

883 model_intercept=False, 

884 n_perm=N_PERM, 

885 random_state=0, 

886 output_type="dict", 

887 verbose=1, 

888 ) 

889 

890 # test with ravelized tested_var 

891 target_var[:, 0] = 0 

892 

893 with pytest.warns(UserWarning, match="have zeros across all samples"): 

894 output_2 = permuted_ols( 

895 np.ravel(tested_var), 

896 target_var, 

897 model_intercept=False, 

898 n_perm=N_PERM, 

899 random_state=0, 

900 output_type="dict", 

901 ) 

902 

903 assert np.array_equal(output_1["t"][1:], output_2["t"][1:]) 

904 

905 

906def test_permuted_ols_with_multiple_constants_and_covars_warnings(design): 

907 """Check warnings for constants and covariates.""" 

908 target_var, tested_var, *_ = design 

909 

910 # Multiple intercepts should raise a warning 

911 # In confounding vars 

912 with pytest.warns(UserWarning, match="Multiple columns across"): 

913 confounding_vars = np.ones([N_SAMPLES, 2]) 

914 permuted_ols( 

915 tested_var, 

916 target_var, 

917 confounding_vars, 

918 n_perm=0, 

919 random_state=0, 

920 ) 

921 

922 # Across tested vars and confounding vars 

923 with pytest.warns(UserWarning, match="Multiple columns across"): 

924 confounding_vars = np.ones([N_SAMPLES, 1]) 

925 tested_var = np.ones([N_SAMPLES, 1]) 

926 permuted_ols( 

927 tested_var, 

928 target_var, 

929 confounding_vars, 

930 n_perm=0, 

931 random_state=0, 

932 ) 

933 

934 

935def test_tfce_smoke_legacy_warnings(): 

936 """Check that requesting a legacy output throws a warning.""" 

937 target_var, tested_var, masker, *_ = _tfce_design() 

938 

939 # tfce is True, but output_type is "legacy". 

940 # raise a warning, and get a dictionary. 

941 with pytest.warns(UserWarning, match="Overriding."): 

942 out = permuted_ols( 

943 tested_var, 

944 target_var, 

945 model_intercept=False, 

946 two_sided_test=False, 

947 n_perm=0, 

948 random_state=0, 

949 masker=masker, 

950 tfce=True, 

951 output_type="legacy", 

952 ) 

953 

954 assert isinstance(out, dict) 

955 

956 # output_type is "legacy". 

957 # raise a deprecation warning, but get the standard output. 

958 with pytest.deprecated_call(): 

959 out = permuted_ols( 

960 tested_var, 

961 target_var, 

962 model_intercept=False, 

963 two_sided_test=False, 

964 n_perm=N_PERM, 

965 random_state=0, 

966 output_type="legacy", 

967 ) 

968 

969 assert isinstance(out, tuple) 

970 

971 

972def test_permuted_ols_no_covar_n_job_error(dummy_design): 

973 """Check that an invalid n_jobs value will raise a ValueError.""" 

974 target_var, tested_var, *_ = dummy_design 

975 

976 with pytest.raises( 

977 ValueError, match="'n_jobs == 0' is not a valid choice." 

978 ): 

979 permuted_ols( 

980 tested_var, 

981 target_var, 

982 n_jobs=0, # not allowed 

983 ) 

984 

985 

986def test_permuted_ols_target_vars_error(dummy_design): 

987 """Checks dimension of target_var.""" 

988 target_var, tested_var, *_ = dummy_design 

989 

990 with pytest.raises( 

991 ValueError, match="'target_vars' should be a 2D array." 

992 ): 

993 permuted_ols( 

994 tested_var, 

995 target_var.ravel(), # must be 2D 

996 ) 

997 

998 

999def test_permuted_ols_type_n_perm(dummy_design): 

1000 """Checks type n_perm.""" 

1001 target_var, tested_var, *_ = dummy_design 

1002 

1003 with pytest.raises(TypeError, match="'n_perm' should be of type"): 

1004 permuted_ols(tested_var, target_var, n_perm=0.1) 

1005 

1006 

1007def test_tfce_no_masker_error(): 

1008 """Raise error when no masker is passed for TFCE.""" 

1009 target_var, tested_var, *_ = _tfce_design() 

1010 

1011 with pytest.raises(ValueError, match="masker must be provided"): 

1012 permuted_ols( 

1013 tested_var, 

1014 target_var, 

1015 model_intercept=False, 

1016 two_sided_test=False, 

1017 n_perm=N_PERM, 

1018 tfce=True, 

1019 ) 

1020 

1021 

1022def test_cluster_level_parameters_error_no_masker(cluster_level_design): 

1023 """Test combinations of parameters related to cluster-level inference.""" 

1024 target_var, tested_var = cluster_level_design 

1025 

1026 # threshold is defined, indicating cluster-level inference should be done, 

1027 # but masker is not defined. 

1028 with pytest.raises( 

1029 ValueError, 

1030 match='If "threshold" is not None, masker must be defined as well.', 

1031 ): 

1032 permuted_ols( 

1033 tested_var, 

1034 target_var, 

1035 two_sided_test=False, 

1036 threshold=0.001, 

1037 tfce=False, 

1038 )