Coverage for nilearn/glm/_utils.py: 10%
84 statements
« prev ^ index » next coverage.py v7.9.1, created at 2025-06-16 12:39 +0200
« prev ^ index » next coverage.py v7.9.1, created at 2025-06-16 12:39 +0200
1"""Misc utilities for the library."""
3from warnings import warn
5import numpy as np
6import scipy.linalg as spl
7from scipy.linalg.lapack import get_lapack_funcs
8from scipy.stats import norm
10from nilearn._utils.logger import find_stack_level
13def z_score(pvalue, one_minus_pvalue=None):
14 """Return the z-score(s) corresponding to certain p-value(s) and, \
15 optionally, one_minus_pvalue(s) provided as inputs.
17 Parameters
18 ----------
19 pvalue : float or 1-d array shape=(n_pvalues,)
20 P-values computed using the survival function.
22 one_minus_pvalue : float or 1-d array shape=(n_one_minus_pvalues,), \
23 optional
24 It shall take the value returned
25 by /nilearn/glm/contrasts.py::one_minus_pvalue
26 which computes the p_value using the cumulative distribution function,
27 with n_one_minus_pvalues = n_pvalues.
29 Returns
30 -------
31 z_scores : 1-d array shape=(n_z_scores,), with n_z_scores = n_pvalues
33 """
34 pvalue = np.clip(pvalue, 1.0e-300, 1.0 - 1.0e-16)
35 z_scores_sf = norm.isf(pvalue)
37 if one_minus_pvalue is not None:
38 one_minus_pvalue = np.clip(one_minus_pvalue, 1.0e-300, 1.0 - 1.0e-16)
39 z_scores_cdf = norm.ppf(one_minus_pvalue)
40 z_scores = np.empty(pvalue.size)
41 use_cdf = z_scores_sf < 0
42 use_sf = np.logical_not(use_cdf)
43 z_scores[np.atleast_1d(use_cdf)] = z_scores_cdf[use_cdf]
44 z_scores[np.atleast_1d(use_sf)] = z_scores_sf[use_sf]
45 else:
46 z_scores = z_scores_sf
47 return z_scores
50def multiple_fast_inverse(a):
51 """Compute the inverse of a set of arrays.
53 Parameters
54 ----------
55 a : array_like of shape (n_samples, n_dim, n_dim)
56 Set of square matrices to be inverted. A is changed in place.
58 Returns
59 -------
60 a : ndarray
61 Yielding the inverse of the inputs.
63 Raises
64 ------
65 LinAlgError :
66 If `a` is singular.
68 ValueError :
69 If `a` is not square, or not 2-dimensional.
71 Notes
72 -----
73 This function is borrowed from scipy.linalg.inv,
74 but with some customizations for speed-up.
76 """
77 if a.shape[1] != a.shape[2]:
78 raise ValueError("a must have shape (n_samples, n_dim, n_dim)")
80 a1, n = a[0], a.shape[0]
81 getrf, getri, getri_lwork = get_lapack_funcs(
82 ("getrf", "getri", "getri_lwork"), (a1,)
83 )
84 for i in range(n):
85 if (
86 getrf.module_name[:7] == "clapack"
87 and getri.module_name[:7] != "clapack"
88 ):
89 # ATLAS 3.2.1 has getrf but not getri.
90 lu, piv, info = getrf(
91 np.transpose(a[i]), rowmajor=0, overwrite_a=True
92 )
93 a[i] = np.transpose(lu)
94 else:
95 a[i], piv, info = getrf(a[i], overwrite_a=True)
96 if info == 0:
97 if getri.module_name[:7] == "flapack":
98 lwork, _ = getri_lwork(a1.shape[0])
99 # XXX: the following line fixes curious SEGFAULT when
100 # benchmarking 500x500 matrix inverse. This seems to
101 # be a bug in LAPACK ?getri routine because if lwork is
102 # minimal (when using lwork[0] instead of lwork[1]) then
103 # all tests pass. Further investigation is required if
104 # more such SEGFAULTs occur.
105 lwork = int(1.01 * lwork.real)
106 a[i], _ = getri(a[i], piv, lwork=lwork, overwrite_lu=1)
107 else: # clapack
108 a[i], _ = getri(a[i], piv, overwrite_lu=1)
109 else:
110 raise ValueError("Matrix LU decomposition failed")
111 return a
114def multiple_mahalanobis(effect, covariance):
115 """Return the squared Mahalanobis distance for a given set of samples.
117 Parameters
118 ----------
119 effect : array of shape (n_features, n_samples)
120 Each column represents a vector to be evaluated.
122 covariance : array of shape (n_features, n_features, n_samples)
123 Corresponding covariance models stacked along the last axis.
125 Returns
126 -------
127 sqd : array of shape (n_samples,)
128 The squared distances (one per sample).
130 """
131 # check size
132 if effect.ndim == 1:
133 effect = effect[:, np.newaxis]
134 if covariance.ndim == 2:
135 covariance = covariance[:, :, np.newaxis]
136 if effect.shape[0] != covariance.shape[0]:
137 raise ValueError("Inconsistent shape for effect and covariance")
138 if covariance.shape[0] != covariance.shape[1]:
139 raise ValueError("Inconsistent shape for covariance")
141 # transpose and make contiguous for the sake of speed
142 Xt, Kt = np.ascontiguousarray(effect.T), np.ascontiguousarray(covariance.T)
144 # compute the inverse of the covariances
145 Kt = multiple_fast_inverse(Kt)
147 # derive the squared Mahalanobis distances
148 sqd = np.sum(np.sum(Xt[:, :, np.newaxis] * Xt[:, np.newaxis] * Kt, 1), 1)
149 return sqd
152def full_rank(X, cmax=1e15):
153 """Compute the condition number of X and if it is larger than cmax, \
154 returns a matrix with a condition number smaller than cmax.
156 Parameters
157 ----------
158 X : array of shape (nrows, ncols)
159 Input array.
161 cmax : float, default=1e15
162 Tolerance for condition number.
164 Returns
165 -------
166 X : array of shape (nrows, ncols)
167 Output array.
169 cond : float,
170 Actual condition number.
172 """
173 U, s, V = spl.svd(X, full_matrices=False)
174 smax, smin = s.max(), s.min()
175 cond = smax / smin
176 if cond < cmax:
177 return X, cond
179 warn(
180 "Matrix is singular at working precision, regularizing...",
181 stacklevel=find_stack_level(),
182 )
183 lda = (smax - cmax * smin) / (cmax - 1)
184 X = np.dot(U, np.dot(np.diag(s + lda), V))
185 return X, cmax
188def positive_reciprocal(X):
189 """Return element-wise reciprocal of array, setting `X`>=0 to 0.
191 Return the reciprocal of an array, setting all entries less than or
192 equal to 0 to 0. Therefore, it presumes that X should be positive in
193 general.
195 Parameters
196 ----------
197 X : array-like
199 Returns
200 -------
201 rX : array
202 Array of same shape as `X`, dtype float, with values set to
203 1/X where X > 0, 0 otherwise.
205 """
206 X = np.asarray(X)
207 return np.where(X <= 0, 0, 1.0 / X)
210def pad_contrast(con_val, theta, stat_type):
211 """Pad contrast with zeros if necessary.
213 If the contrast is shorter than the number of parameters,
214 it is padded with zeros.
216 If the contrast is longer than the number of parameters,
217 a ValueError is raised.
219 Parameters
220 ----------
221 con_val : numpy.ndarray of shape (p) or (n, p)
222 Where p = number of regressors
223 with a value explicitly passed by the user.
224 p must be <= P,
225 where P is the total number of regressors in the design matrix.
227 theta : numpy.ndarray with shape (P,m)
228 theta of RegressionResults instances
229 where P is the total number of regressors in the design matrix.
231 stat_type : {'t', 'F'}, optional
232 Type of the :term:`contrast`.
233 """
234 n_cols = con_val.shape[0] if con_val.ndim == 1 else con_val.shape[1]
235 if n_cols > theta.shape[0]:
236 if stat_type == "t":
237 raise ValueError(
238 f"t contrasts should be of length P={theta.shape[0]}, "
239 f"but it has length {n_cols}."
240 )
241 if stat_type == "F":
242 raise ValueError(
243 f"F contrasts should have {theta.shape[0]} columns, "
244 f"but it has {n_cols}."
245 )
247 pad = False
248 if n_cols < theta.shape[0]:
249 pad = True
250 if stat_type == "t":
251 warn(
252 f"t contrasts should be of length P={theta.shape[0]}, "
253 f"but it has length {n_cols}. "
254 "The rest of the contrast was padded with zeros.",
255 category=UserWarning,
256 stacklevel=find_stack_level(),
257 )
258 if stat_type == "F":
259 warn(
260 f"F contrasts should have {theta.shape[0]} columns, "
261 f"but it has only {n_cols}. "
262 "The rest of the contrast was padded with zeros.",
263 category=UserWarning,
264 stacklevel=find_stack_level(),
265 )
267 if pad:
268 if stat_type == "t" or (stat_type == "F" and con_val.shape[0] == 1):
269 padding = np.zeros((1, theta.shape[0] - n_cols))
270 elif stat_type == "F":
271 padding = np.zeros((con_val.shape[0], theta.shape[0] - n_cols))
272 con_val = np.hstack((con_val, padding))
274 return con_val