Coverage for nilearn/glm/tests/test_contrasts.py: 0%
152 statements
« prev ^ index » next coverage.py v7.9.1, created at 2025-06-16 12:32 +0200
« prev ^ index » next coverage.py v7.9.1, created at 2025-06-16 12:32 +0200
1import numpy as np
2import pytest
3import scipy.stats as st
4from numpy.testing import assert_almost_equal
5from sklearn.datasets import make_regression
6from sklearn.linear_model import LinearRegression
8from nilearn.glm.contrasts import (
9 Contrast,
10 _compute_fixed_effects_params,
11 compute_contrast,
12 compute_fixed_effect_contrast,
13 expression_to_contrast_vector,
14)
15from nilearn.glm.first_level import run_glm
18@pytest.mark.parametrize(
19 "expression, design_columns, expected",
20 [
21 (
22 "face / 10 + (window - face) * 2 - house",
23 ["a", "face", "xy_z", "house", "window"],
24 [0.0, -1.9, 0.0, -1.0, 2.0],
25 ),
26 (
27 "xy_z",
28 ["a", "face", "xy_z", "house", "window"],
29 [0.0, 0.0, 1.0, 0.0, 0.0],
30 ),
31 ("a - b", ["a", "b", "a - b"], [0.0, 0.0, 1.0]),
32 ("column_1", ["column_1"], [1.0]),
33 ],
34)
35def test_expression_to_contrast_vector(expression, design_columns, expected):
36 contrast = expression_to_contrast_vector(
37 expression=expression, design_columns=design_columns
38 )
39 assert np.allclose(contrast, expected)
42def test_expression_to_contrast_vector_error():
43 with pytest.raises(ValueError, match="invalid python identifiers"):
44 expression_to_contrast_vector(
45 expression="0-1", design_columns=["0", "1"]
46 )
49@pytest.fixture
50def set_up_glm():
51 def _set_up_glm(rng, noise_model, bins=100):
52 n, p, q = 100, 80, 10
53 X, Y = (
54 rng.standard_normal(size=(p, q)),
55 rng.standard_normal(size=(p, n)),
56 )
57 labels, results = run_glm(Y, X, noise_model, bins=bins)
58 return labels, results, q
60 return _set_up_glm
63def test_deprecation_contrast_type(rng, set_up_glm):
64 """Throw deprecation warning when using contrast_type as parameter."""
65 labels, results, q = set_up_glm(rng, "ar1")
66 con_val = np.eye(q)[0]
68 with pytest.deprecated_call(match="0.13.0"):
69 compute_contrast(
70 labels=labels,
71 regression_result=results,
72 con_val=con_val,
73 contrast_type="t",
74 )
77def test_t_contrast(rng, set_up_glm):
78 labels, results, q = set_up_glm(rng, "ar1")
79 con_val = np.eye(q)[0]
81 z_vals = compute_contrast(labels, results, con_val).z_score()
83 assert_almost_equal(z_vals.mean(), 0, 0)
84 assert_almost_equal(z_vals.std(), 1, 0)
87@pytest.mark.parametrize("model", ["ols", "ar1"])
88def test_f_contrast(rng, set_up_glm, model):
89 labels, results, q = set_up_glm(rng, model)
90 for con_val in [np.eye(q)[0], np.eye(q)[:3]]:
91 z_vals = compute_contrast(
92 labels, results, con_val, stat_type="F"
93 ).z_score()
95 assert_almost_equal(z_vals.mean(), 0, 0)
96 assert_almost_equal(z_vals.std(), 1, 0)
99def test_t_contrast_add(set_up_glm, rng):
100 labels, results, q = set_up_glm(rng, "ols")
101 c1, c2 = np.eye(q)[0], np.eye(q)[1]
103 con = compute_contrast(labels, results, c1) + compute_contrast(
104 labels, results, c2
105 )
107 z_vals = con.z_score()
109 assert_almost_equal(z_vals.mean(), 0, 0)
110 assert_almost_equal(z_vals.std(), 1, 0)
113def test_fixed_effect_contrast(set_up_glm, rng):
114 labels, results, q = set_up_glm(rng, "ols")
115 c1, c2 = np.eye(q)[0], np.eye(q)[1]
117 con = compute_fixed_effect_contrast(
118 [labels, labels], [results, results], [c1, c2]
119 )
121 z_vals = con.z_score()
123 assert_almost_equal(z_vals.mean(), 0, 0)
124 assert_almost_equal(z_vals.std(), 1, 0)
127def test_fixed_effect_contrast_nonzero_effect():
128 X, y = make_regression(n_features=5, n_samples=20, random_state=0)
129 y = y[:, None]
130 labels, results = run_glm(y, X, "ols")
131 coef = LinearRegression(fit_intercept=False).fit(X, y).coef_
132 for i in range(X.shape[1]):
133 contrast = np.zeros(X.shape[1])
134 contrast[i] = 1.0
135 fixed_effect = compute_fixed_effect_contrast(
136 [labels],
137 [results],
138 [contrast],
139 )
141 assert_almost_equal(fixed_effect.effect_size(), coef.ravel()[i])
143 fixed_effect = compute_fixed_effect_contrast(
144 [labels] * 3, [results] * 3, [contrast] * 3
145 )
147 assert_almost_equal(fixed_effect.effect_size(), coef.ravel()[i])
150def test_f_contrast_add(set_up_glm, rng):
151 labels, results, q = set_up_glm(rng, "ar1")
152 c1, c2 = np.eye(q)[:2], np.eye(q)[2:4]
154 con = compute_contrast(labels, results, c1) + compute_contrast(
155 labels, results, c2
156 )
158 z_vals = con.z_score()
160 assert_almost_equal(z_vals.mean(), 0, 0)
161 assert_almost_equal(z_vals.std(), 1, 0)
163 # first test with dependent contrast
164 con1 = compute_contrast(labels, results, c1)
165 con2 = compute_contrast(labels, results, c1) + compute_contrast(
166 labels, results, c1
167 )
169 assert_almost_equal(con1.effect * 2, con2.effect)
170 assert_almost_equal(con1.variance * 2, con2.variance)
171 assert_almost_equal(con1.stat() * 2, con2.stat())
174def test_contrast_mul(set_up_glm, rng):
175 labels, results, q = set_up_glm(rng, "ar1")
176 for c1 in [np.eye(q)[0], np.eye(q)[:3]]:
177 con1 = compute_contrast(labels, results, c1)
178 con2 = con1 * 2
179 assert_almost_equal(con1.effect * 2, con2.effect)
180 assert_almost_equal(con1.z_score(), con2.z_score())
183def test_contrast_values(set_up_glm, rng):
184 # but this test is circular and should be removed
185 labels, results, q = set_up_glm(rng, "ar1", bins=1)
187 # t test
188 cval = np.eye(q)[0]
189 con = compute_contrast(labels, results, cval)
190 t_ref = next(iter(results.values())).Tcontrast(cval).t
192 assert_almost_equal(np.ravel(con.stat()), t_ref)
194 # F test
195 cval = np.eye(q)[:3]
196 con = compute_contrast(labels, results, cval)
197 F_ref = next(iter(results.values())).Fcontrast(cval).F
199 # Note that the values are not strictly equal,
200 # this seems to be related to a bug in Mahalanobis
201 assert_almost_equal(np.ravel(con.stat()), F_ref, 3)
204def test_low_level_fixed_effects(rng):
205 p = 100
206 # X1 is some effects estimate, V1 their variance for "run 1"
207 X1, V1 = rng.standard_normal(p), np.ones(p)
208 # same thing for a "run 2"
209 X2, V2 = 2 * X1, 4 * V1
210 # compute the fixed effects estimate, Xf, their variance Vf,
211 # and the corresponding t statistic tf
212 Xf, Vf, tf, zf = _compute_fixed_effects_params(
213 [X1, X2], [V1, V2], dofs=[100, 100], precision_weighted=False
214 )
215 # check that the values are correct
216 assert_almost_equal(Xf, 1.5 * X1)
217 assert_almost_equal(Vf, 1.25 * V1)
218 assert_almost_equal(tf, (Xf / np.sqrt(Vf)).ravel())
219 assert_almost_equal(zf, st.norm.isf(st.t.sf(tf, 200)))
221 # Same thing, but now there is precision weighting
222 Xw, Vw, _, _ = _compute_fixed_effects_params(
223 [X1, X2], [V1, V2], dofs=[200, 200], precision_weighted=True
224 )
225 assert_almost_equal(Xw, 1.2 * X1)
226 assert_almost_equal(Vw, 0.8 * V1)
228 # F test
229 XX1 = np.vstack((X1, X1))
230 XX2 = np.vstack((X2, X2))
232 Xw, Vw, *_ = _compute_fixed_effects_params(
233 [XX1, XX2], [V1, V2], dofs=[200, 200], precision_weighted=False
234 )
235 assert_almost_equal(Xw, 1.5 * XX1)
236 assert_almost_equal(Vw, 1.25 * V1)
238 # check with 2D image
239 Xw, Vw, *_ = _compute_fixed_effects_params(
240 [X1[:, np.newaxis], X2[:, np.newaxis]],
241 [V1, V2],
242 dofs=[200, 200],
243 precision_weighted=False,
244 )
245 assert_almost_equal(Xw, 1.5 * X1[:, np.newaxis])
246 assert_almost_equal(Vw, 1.25 * V1)
249def test_one_minus_pvalue():
250 effect = np.ones((1, 3))
251 variance = effect[0]
253 contrast = Contrast(effect, variance, stat_type="t")
255 assert np.allclose(contrast.one_minus_pvalue(), 0.84, 1)
256 assert np.allclose(contrast.stat_, 1.0, 1)
259def test_deprecation_contrast_type_attribute():
260 effect = np.ones((1, 3))
261 variance = effect[0]
263 with pytest.deprecated_call(match="0.13.0"):
264 contrast = Contrast(effect, variance, contrast_type="t")
266 with pytest.deprecated_call(match="0.13.0"):
267 contrast.contrast_type # noqa: B018
270@pytest.mark.parametrize(
271 "effect, variance, match",
272 [
273 (
274 np.ones((3, 1, 1)),
275 np.ones(1),
276 "Effect array should have 1 or 2 dimensions",
277 ),
278 (
279 np.ones((1, 3)),
280 np.ones((1, 1)),
281 "Variance array should have 1 dimension",
282 ),
283 ],
284)
285def test_improper_contrast_inputs(effect, variance, match):
286 with pytest.raises(ValueError, match=match):
287 Contrast(effect, variance, stat_type="t")
290def test_automatic_t2f_conversion():
291 effect = np.ones((5, 3))
292 variance = np.ones(5)
293 contrast = Contrast(effect, variance, stat_type="t")
294 assert contrast.stat_type == "F"
297def test_invalid_contrast_type():
298 effect = np.ones((1, 3))
299 variance = np.ones(1)
300 with pytest.raises(ValueError, match="is not a valid stat_type."):
301 Contrast(effect, variance, stat_type="foo")
304def test_contrast_padding(rng):
305 n, p, q = 100, 80, 10
306 X, Y = rng.standard_normal(size=(p, q)), rng.standard_normal(size=(p, n))
307 labels, results = run_glm(Y, X, "ar1")
309 con_val = [1, 1]
311 with pytest.warns(
312 UserWarning, match="The rest of the contrast was padded with zeros."
313 ):
314 compute_contrast(labels, results, con_val).z_score()
316 con_val = np.eye(q)[:3, :3]
317 compute_contrast(labels, results, con_val, stat_type="F").z_score()