Coverage for nilearn/mass_univariate/tests/test_utils.py: 0%

110 statements  

« prev     ^ index     » next       coverage.py v7.9.1, created at 2025-06-16 12:32 +0200

1"""Tests for nilearn.mass_univariate._utils.""" 

2 

3import math 

4 

5import numpy as np 

6import pytest 

7from numpy.testing import assert_array_almost_equal 

8from scipy.ndimage import generate_binary_structure 

9 

10from nilearn.conftest import _rng 

11from nilearn.mass_univariate import _utils 

12from nilearn.mass_univariate.tests._testing import ( 

13 get_tvalue_with_alternative_library, 

14) 

15 

16 

17@pytest.fixture 

18def null(): 

19 """Return a dummy null distribution that can be reused across tests.""" 

20 return [-10, -9, -9, -3, -2, -1, -1, 0, 1, 1, 1, 2, 3, 3, 4, 4, 7, 8, 8, 9] 

21 

22 

23@pytest.mark.parametrize( 

24 "two_sided_test, dh, true_max_tfce", 

25 [ 

26 ( 

27 False, 

28 "auto", 

29 5050, 

30 ), # One-sided where positive cluster has highest TFCE 

31 ( 

32 True, 

33 "auto", 

34 5555, 

35 ), # Two-sided where negative cluster has highest TFCE 

36 (False, 1, 550), # One-sided with preset dh 

37 ], 

38) 

39def test_calculate_tfce(two_sided_test, dh, true_max_tfce): 

40 """Test calculate_tfce.""" 

41 arr4d = np.zeros((10, 10, 10, 1)) 

42 bin_struct = generate_binary_structure(3, 1) 

43 

44 # 10-voxel positive cluster, high intensity 

45 arr4d[:2, :2, :2, 0] = 10 

46 arr4d[0, 2, 0, 0] = 10 

47 arr4d[2, 0, 0, 0] = 10 

48 

49 # 10-voxel negative cluster, higher intensity 

50 arr4d[3:5, 3:5, 3:5, 0] = -11 

51 arr4d[3, 5, 3, 0] = -11 

52 arr4d[5, 3, 3, 0] = -11 

53 

54 test_tfce_arr4d = _utils.calculate_tfce( 

55 arr4d, 

56 bin_struct=bin_struct, 

57 E=1, 

58 H=1, 

59 dh=dh, 

60 two_sided_test=two_sided_test, 

61 ) 

62 

63 assert test_tfce_arr4d.shape == arr4d.shape 

64 assert np.max(np.abs(test_tfce_arr4d)) == true_max_tfce 

65 

66 

67@pytest.mark.parametrize( 

68 "test_values, expected_p_value", [(9, 0.95), (-9, 0.15), (0, 0.4)] 

69) 

70def test_null_to_p_float_1_tailed_lower_tailed( 

71 null, test_values, expected_p_value 

72): 

73 """Test null_to_p with single float input lower-tailed .""" 

74 assert math.isclose( 

75 _utils.null_to_p(test_values, null, alternative="smaller"), 

76 expected_p_value, 

77 ) 

78 

79 

80@pytest.mark.parametrize( 

81 "test_values, expected_p_value", [(9, 0.05), (-9, 0.95), (0, 0.65)] 

82) 

83def test_null_to_p_float_1_tailed_uppper_tailed( 

84 test_values, expected_p_value, null 

85): 

86 """Test null_to_p with single float input upper-tailed.""" 

87 assert math.isclose( 

88 _utils.null_to_p(test_values, null, alternative="larger"), 

89 expected_p_value, 

90 ) 

91 

92 

93@pytest.mark.parametrize( 

94 "test_values, expected_p_value", 

95 [ 

96 (0, 0.95), 

97 (9, 0.2), 

98 (10, 0.05), 

99 ( 

100 20, 

101 0.05, 

102 ), # Still 0.05 because minimum valid p-value is 1 / len(null) 

103 ], 

104) 

105def test_null_to_p_float_2_tailed(test_values, expected_p_value, null): 

106 """Test null_to_p with single float input two-sided.""" 

107 result = _utils.null_to_p(test_values, null, alternative="two-sided") 

108 assert result == _utils.null_to_p( 

109 test_values * -1, null, alternative="two-sided" 

110 ) 

111 assert math.isclose(result, expected_p_value) 

112 

113 

114def test_null_to_p_float_error(null): 

115 """Check invalid alternative parameter.""" 

116 with pytest.raises( 

117 ValueError, match='Argument "alternative" must be one of' 

118 ): 

119 _utils.null_to_p(9, null, alternative="raise") 

120 

121 

122@pytest.mark.parametrize( 

123 "alternative, expected_p_value", 

124 [("two-sided", 1 / 10000), ("smaller", 1 - 1 / 10000)], 

125) 

126def test_null_to_p_float_with_extreme_values( 

127 alternative, expected_p_value, rng 

128): 

129 """Test that 1/n(null) is preserved with extreme values.""" 

130 null = rng.normal(size=10000) 

131 

132 result = _utils.null_to_p(20, null, alternative=alternative) 

133 assert math.isclose( 

134 result, 

135 expected_p_value, 

136 ) 

137 

138 

139def test_null_to_p_array(rng): 

140 """Test null_to_p with 1d array input.""" 

141 N = 10000 

142 nulldist = rng.normal(size=N) 

143 t = np.sort(rng.normal(size=N)) 

144 p = np.sort(_utils.null_to_p(t, nulldist)) 

145 

146 assert p.shape == (N,) 

147 assert (p < 1).all() 

148 assert (p > 0).all() 

149 

150 # Resulting distribution should be roughly uniform 

151 assert np.abs(p.mean() - 0.5) < 0.02 

152 assert np.abs(p.var() - 1 / 12) < 0.02 

153 

154 

155@pytest.fixture 

156def _arr4d(): 

157 _arr4d = np.zeros((10, 10, 10, 1)) 

158 _arr4d[:2, :2, :2, 0] = 5 # 8-voxel cluster, high intensity 

159 _arr4d[7:, 7:, 7:, 0] = 1 # 27-voxel cluster, low intensity 

160 _arr4d[6, 6, 6, 0] = 1 # corner touching second cluster 

161 _arr4d[6, 6, 8, 0] = 1 # edge touching second cluster 

162 _arr4d[3:5, 3:5, 3:5, 0] = -10 # negative cluster, very high intensity 

163 _arr4d[5:6, 3:5, 3:5, 0] = 1 # cluster touching negative one 

164 return _arr4d 

165 

166 

167@pytest.mark.parametrize( 

168 "bin_struct, two_sided_test, true_size, true_mass", 

169 [ 

170 ( 

171 generate_binary_structure(3, 1), 

172 False, 

173 27, 

174 39.992, 

175 ), # One-sided test: largest cluster doesn't have highest mass 

176 ( 

177 generate_binary_structure(3, 1), 

178 True, 

179 27, 

180 79.992, 

181 ), # Two-sided test where negative cluster has higher mass 

182 ( 

183 generate_binary_structure(3, 2), 

184 True, 

185 28, 

186 79.992, 

187 ), # Two-sided test with edge connectivity 

188 # should include edge-connected single voxel cluster 

189 ( 

190 generate_binary_structure(3, 3), 

191 True, 

192 29, 

193 79.992, 

194 ), # Two-sided test with corner connectivity 

195 # should include corner-connected single voxel cluster 

196 ], 

197) 

198def test_calculate_cluster_measures( 

199 _arr4d, bin_struct, two_sided_test, true_size, true_mass 

200): 

201 """Test calculate_cluster_measures. 

202 

203 true_mass : (8 vox * 5 intensity) - (8 vox * 0.001 thresh) 

204 """ 

205 test_size, test_mass = _utils.calculate_cluster_measures( 

206 _arr4d, 

207 threshold=0.001, 

208 bin_struct=bin_struct, 

209 two_sided_test=two_sided_test, 

210 ) 

211 

212 assert test_size[0] == true_size 

213 assert test_mass[0] == true_mass 

214 

215 

216def test_calculate_cluster_measures_on_empty_array(): 

217 """Check that empty array have 0 mass and size.""" 

218 test_size, test_mass = _utils.calculate_cluster_measures( 

219 np.zeros((10, 10, 10, 1)), 

220 threshold=0.001, 

221 bin_struct=generate_binary_structure(3, 1), 

222 two_sided_test=True, 

223 ) 

224 

225 true_size = 0 

226 true_mass = 0 

227 assert test_size[0] == true_size 

228 assert test_mass[0] == true_mass 

229 

230 

231def test_t_score_with_covars_and_normalized_design_nocovar(rng): 

232 """Test t-scores computation without covariates.""" 

233 # Normalized data 

234 n_samples = 50 

235 

236 # generate data 

237 var1 = np.ones((n_samples, 1)) / np.sqrt(n_samples) 

238 var2 = rng.standard_normal((n_samples, 1)) 

239 var2 = var2 / np.sqrt(np.sum(var2**2, 0)) # normalize 

240 

241 # compute t-scores with nilearn routine 

242 t_val_own = _utils.t_score_with_covars_and_normalized_design(var1, var2) 

243 

244 # compute t-scores with linalg or statsmodels 

245 t_val_alt = get_tvalue_with_alternative_library(var1, var2) 

246 assert_array_almost_equal(t_val_own, t_val_alt) 

247 

248 

249def test_t_score_with_covars_and_normalized_design_withcovar(rng): 

250 """Test t-scores computation with covariates.""" 

251 # Normalized data 

252 n_samples = 50 

253 

254 # generate data 

255 var1 = np.ones((n_samples, 1)) / np.sqrt(n_samples) # normalized 

256 var2 = rng.standard_normal((n_samples, 1)) 

257 var2 = var2 / np.sqrt(np.sum(var2**2, 0)) # normalize 

258 covars = np.eye(n_samples, 3) # covars is orthogonal 

259 covars[3] = -1 # covars is orthogonal to var1 

260 covars = _utils.orthonormalize_matrix(covars) 

261 

262 # nilearn t-score 

263 own_score = _utils.t_score_with_covars_and_normalized_design( 

264 var1, 

265 var2, 

266 covars, 

267 ) 

268 

269 # compute t-scores with linalg or statmodels 

270 ref_score = get_tvalue_with_alternative_library(var1, var2, covars) 

271 assert_array_almost_equal(own_score, ref_score) 

272 

273 

274@pytest.mark.parametrize("two_sided_test", [True, False]) 

275@pytest.mark.parametrize( 

276 "dh", 

277 [ 

278 1 / 49, 

279 0.1, 

280 0.9, 

281 "auto", 

282 ], 

283) 

284@pytest.mark.parametrize( 

285 "arr3d", 

286 [ 

287 np.ones((10, 11), dtype="float"), 

288 _rng().random((10, 11), dtype="float"), 

289 _rng().normal(size=(10, 11)), 

290 ], 

291) 

292def test_return_score_threshs(arr3d, two_sided_test, dh): 

293 """Check that the range of thresholds to test. 

294 

295 Also test for robustness to nan values. 

296 """ 

297 arr3d[0, 0] = np.nan 

298 

299 score_threshs = _utils._return_score_threshs( 

300 arr3d, dh=dh, two_sided_test=two_sided_test 

301 ) 

302 

303 max_score = ( 

304 np.nanmax(np.abs(arr3d)) if two_sided_test else np.nanmax(arr3d) 

305 ) 

306 assert (score_threshs <= max_score).all() 

307 

308 assert len(score_threshs) >= 10 

309 

310 

311def test_warning_n_steps_return_score_threshs(): 

312 """Check that warning is thrown when less than 10 steps for TFCE.""" 

313 arr3d = np.ones((10, 11), dtype="float") 

314 

315 with pytest.warns(UserWarning, match="Setting it to 10"): 

316 score_threshs = _utils._return_score_threshs( 

317 arr3d, dh=0.9, two_sided_test=False 

318 ) 

319 assert len(score_threshs) == 10 

320 

321 with pytest.warns(UserWarning, match="Setting it to 1000"): 

322 score_threshs = _utils._return_score_threshs( 

323 arr3d, dh=0.0001, two_sided_test=False 

324 ) 

325 assert len(score_threshs) == 1000