Coverage for nilearn/maskers/multi_nifti_labels_masker.py: 28%

51 statements  

« prev     ^ index     » next       coverage.py v7.9.1, created at 2025-06-20 10:58 +0200

1"""Transformer for computing ROI signals of multiple 4D images.""" 

2 

3import itertools 

4 

5from joblib import Parallel, delayed 

6from sklearn.utils.estimator_checks import check_is_fitted 

7 

8from nilearn._utils import ( 

9 fill_doc, 

10) 

11from nilearn._utils.niimg_conversions import iter_check_niimg 

12from nilearn._utils.tags import SKLEARN_LT_1_6 

13from nilearn.maskers.base_masker import prepare_confounds_multimaskers 

14from nilearn.maskers.nifti_labels_masker import NiftiLabelsMasker 

15from nilearn.typing import NiimgLike 

16 

17 

18@fill_doc 

19class MultiNiftiLabelsMasker(NiftiLabelsMasker): 

20 """Class for extracting data from multiple Niimg-like objects \ 

21 using labels of non-overlapping brain regions. 

22 

23 MultiNiftiLabelsMasker is useful when data from non-overlapping volumes 

24 and from different subjects should be extracted (contrary to 

25 :class:`nilearn.maskers.NiftiLabelsMasker`). 

26 

27 For more details on the definitions of labels in Nilearn, 

28 see the :ref:`region` section. 

29 

30 Parameters 

31 ---------- 

32 labels_img : Niimg-like object or None, default=None 

33 See :ref:`extracting_data`. 

34 Region definitions, as one image of labels. 

35 

36 labels : :obj:`list` of :obj:`str`, optional 

37 Full labels corresponding to the labels image. This is used 

38 to improve reporting quality if provided. 

39 

40 .. warning:: 

41 The labels must be consistent with the label 

42 values provided through `labels_img`. 

43 

44 %(masker_lut)s 

45 

46 background_label : :obj:`int` or :obj:`float`, default=0 

47 Label used in labels_img to represent background. 

48 

49 .. warning:: 

50 

51 This value must be consistent with label values and image provided. 

52 

53 mask_img : Niimg-like object, optional 

54 See :ref:`extracting_data`. 

55 Mask to apply to regions before extracting signals. 

56 

57 %(smoothing_fwhm)s 

58 

59 %(standardize_maskers)s 

60 

61 %(standardize_confounds)s 

62 

63 high_variance_confounds : :obj:`bool`, default=False 

64 If True, high variance confounds are computed on provided image with 

65 :func:`nilearn.image.high_variance_confounds` and default parameters 

66 and regressed out. 

67 

68 %(detrend)s 

69 

70 %(low_pass)s 

71 

72 %(high_pass)s 

73 

74 %(t_r)s 

75 

76 %(dtype)s 

77 

78 resampling_target : {"data", "labels", None}, default="data" 

79 Gives which image gives the final shape/size: 

80 

81 - "data" means the atlas is resampled to the 

82 shape of the data if needed 

83 - "labels" means en mask_img and images provided to fit() are 

84 resampled to the shape and affine of maps_img 

85 - None means no resampling: if shapes and affines do not match, a 

86 ValueError is raised 

87 

88 %(memory)s 

89 

90 %(memory_level1)s 

91 

92 %(n_jobs)s 

93 

94 %(verbose0)s 

95 

96 %(strategy)s 

97 

98 %(keep_masked_labels)s 

99 

100 reports : :obj:`bool`, default=True 

101 If set to True, data is saved in order to produce a report. 

102 

103 %(clean_args)s 

104 

105 %(masker_kwargs)s 

106 

107 Attributes 

108 ---------- 

109 %(nifti_mask_img_)s 

110 

111 labels_img_ : :obj:`nibabel.nifti1.Nifti1Image` 

112 The labels image. 

113 

114 See Also 

115 -------- 

116 nilearn.maskers.NiftiMasker 

117 nilearn.maskers.NiftiLabelsMasker 

118 

119 """ 

120 

121 def __init__( 

122 self, 

123 labels_img=None, 

124 labels=None, 

125 lut=None, 

126 background_label=0, 

127 mask_img=None, 

128 smoothing_fwhm=None, 

129 standardize=False, 

130 standardize_confounds=True, 

131 high_variance_confounds=False, 

132 detrend=False, 

133 low_pass=None, 

134 high_pass=None, 

135 t_r=None, 

136 dtype=None, 

137 resampling_target="data", 

138 memory=None, 

139 memory_level=1, 

140 verbose=0, 

141 strategy="mean", 

142 keep_masked_labels=True, 

143 reports=True, 

144 n_jobs=1, 

145 clean_args=None, 

146 **kwargs, 

147 ): 

148 self.n_jobs = n_jobs 

149 super().__init__( 

150 labels_img, 

151 labels=labels, 

152 lut=lut, 

153 background_label=background_label, 

154 mask_img=mask_img, 

155 smoothing_fwhm=smoothing_fwhm, 

156 standardize=standardize, 

157 standardize_confounds=standardize_confounds, 

158 high_variance_confounds=high_variance_confounds, 

159 low_pass=low_pass, 

160 high_pass=high_pass, 

161 detrend=detrend, 

162 t_r=t_r, 

163 dtype=dtype, 

164 resampling_target=resampling_target, 

165 memory=memory, 

166 memory_level=memory_level, 

167 verbose=verbose, 

168 strategy=strategy, 

169 reports=reports, 

170 clean_args=clean_args, 

171 keep_masked_labels=keep_masked_labels, 

172 **kwargs, 

173 ) 

174 

175 def __sklearn_tags__(self): 

176 """Return estimator tags. 

177 

178 See the sklearn documentation for more details on tags 

179 https://scikit-learn.org/1.6/developers/develop.html#estimator-tags 

180 """ 

181 # TODO 

182 # get rid of if block 

183 # bumping sklearn_version > 1.5 

184 if SKLEARN_LT_1_6: 

185 from nilearn._utils.tags import tags 

186 

187 return tags(masker=True, multi_masker=True) 

188 

189 from nilearn._utils.tags import InputTags 

190 

191 tags = super().__sklearn_tags__() 

192 tags.input_tags = InputTags(masker=True, multi_masker=True) 

193 return tags 

194 

195 @fill_doc 

196 def transform_imgs( 

197 self, imgs_list, confounds=None, n_jobs=1, sample_mask=None 

198 ): 

199 """Extract signals from a list of 4D niimgs. 

200 

201 Parameters 

202 ---------- 

203 %(imgs)s 

204 Images to process. 

205 

206 %(confounds_multi)s 

207 

208 %(n_jobs)s 

209 

210 %(sample_mask_multi)s 

211 

212 Returns 

213 ------- 

214 %(signals_transform_imgs_multi_nifti)s 

215 

216 """ 

217 check_is_fitted(self) 

218 

219 # We handle the resampling of labels separately because the affine of 

220 # the labels image should not impact the extraction of the signal. 

221 

222 niimg_iter = iter_check_niimg( 

223 imgs_list, 

224 ensure_ndim=None, 

225 atleast_4d=False, 

226 memory=self.memory, 

227 memory_level=self.memory_level, 

228 ) 

229 

230 confounds = prepare_confounds_multimaskers(self, imgs_list, confounds) 

231 

232 if sample_mask is None: 

233 sample_mask = itertools.repeat(None, len(imgs_list)) 

234 elif len(sample_mask) != len(imgs_list): 

235 raise ValueError( 

236 f"number of sample_mask ({len(sample_mask)}) unequal to " 

237 f"number of images ({len(imgs_list)})." 

238 ) 

239 

240 func = self._cache(self.transform_single_imgs) 

241 

242 region_signals = Parallel(n_jobs=n_jobs)( 

243 delayed(func)(imgs=imgs, confounds=cfs, sample_mask=sms) 

244 for imgs, cfs, sms in zip(niimg_iter, confounds, sample_mask) 

245 ) 

246 return region_signals 

247 

248 @fill_doc 

249 def transform(self, imgs, confounds=None, sample_mask=None): 

250 """Apply mask, spatial and temporal preprocessing. 

251 

252 Parameters 

253 ---------- 

254 imgs : Niimg-like object, or a :obj:`list` of Niimg-like objects 

255 See :ref:`extracting_data`. 

256 Data to be preprocessed 

257 

258 %(confounds_multi)s 

259 

260 %(sample_mask_multi)s 

261 

262 Returns 

263 ------- 

264 %(signals_transform_multi_nifti)s 

265 

266 """ 

267 check_is_fitted(self) 

268 

269 if not (confounds is None or isinstance(confounds, list)): 

270 raise TypeError( 

271 "'confounds' must be a None or a list. " 

272 f"Got {confounds.__class__.__name__}." 

273 ) 

274 if not (sample_mask is None or isinstance(sample_mask, list)): 

275 raise TypeError( 

276 "'sample_mask' must be a None or a list. " 

277 f"Got {sample_mask.__class__.__name__}." 

278 ) 

279 if isinstance(imgs, NiimgLike): 

280 if isinstance(confounds, list): 

281 confounds = confounds[0] 

282 if isinstance(sample_mask, list): 

283 sample_mask = sample_mask[0] 

284 return super().transform( 

285 imgs, confounds=confounds, sample_mask=sample_mask 

286 ) 

287 

288 return self.transform_imgs( 

289 imgs, 

290 confounds=confounds, 

291 sample_mask=sample_mask, 

292 n_jobs=self.n_jobs, 

293 ) 

294 

295 @fill_doc 

296 def fit_transform(self, imgs, y=None, confounds=None, sample_mask=None): 

297 """ 

298 Fit to data, then transform it. 

299 

300 Parameters 

301 ---------- 

302 imgs : Niimg-like object, or a :obj:`list` of Niimg-like objects 

303 See :ref:`extracting_data`. 

304 Data to be preprocessed 

305 

306 y : None 

307 This parameter is unused. It is solely included for scikit-learn 

308 compatibility. 

309 

310 %(confounds_multi)s 

311 

312 %(sample_mask_multi)s 

313 

314 .. versionadded:: 0.8.0 

315 

316 Returns 

317 ------- 

318 %(signals_transform_multi_nifti)s 

319 """ 

320 return self.fit(imgs, y=y).transform( 

321 imgs, confounds=confounds, sample_mask=sample_mask 

322 )