Coverage for nilearn/_utils/docs.py: 94%

158 statements  

« prev     ^ index     » next       coverage.py v7.9.1, created at 2025-06-20 10:58 +0200

1"""Functions related to the documentation. 

2 

3docdict contains the standard documentation entries 

4used across Nilearn. 

5 

6Entries are listed in alphabetical order. 

7 

8source: Eric Larson and MNE-python team. 

9https://github.com/mne-tools/mne-python/blob/main/mne/utils/docs.py 

10""" 

11 

12# sourcery skip: merge-dict-assign 

13 

14import sys 

15 

16############################################################################## 

17# 

18# Parameters definitions 

19# 

20# Standard documentation entries 

21# 

22# Entries are listed in alphabetical order. 

23# 

24docdict = {} 

25 

26############################################################################## 

27# 

28# Parameters definitions 

29# 

30 

31# annotate 

32docdict["annotate"] = """ 

33annotate : :obj:`bool`, default=True 

34 If `annotate` is `True`, positions and left/right annotation 

35 are added to the plot. 

36""" 

37 

38# avg_method 

39docdict["avg_method"] = """ 

40avg_method : {"mean", "median", "min", "max", custom function, None}, \ 

41 default=None 

42 How to average vertex values to derive the face value: 

43 

44 - ``"mean"``: results in smooth boundaries 

45 

46 - ``"median"``: results in sharp boundaries 

47 

48 - ``"min"`` or ``"max"``: for sparse matrices 

49 

50 - `custom function`: You can also pass a custom function 

51 which will be executed though :func:`numpy.apply_along_axis`. 

52 Here is an example of a custom function: 

53 

54 .. code-block:: python 

55 

56 def custom_function(vertices): 

57 return vertices[0] * vertices[1] * vertices[2] 

58 

59""" 

60 

61# ax 

62docdict["ax"] = """ 

63ax : :class:`~matplotlib.axes.Axes` 

64 The matplotlib axes in which the plots will be drawn. 

65""" 

66 

67# axes 

68docdict["axes"] = """ 

69axes : :class:`matplotlib.axes.Axes`, or 4 :obj:`tuple` \ 

70of :obj:`float`: (xmin, ymin, width, height), default=None 

71 The axes, or the coordinates, in matplotlib figure space, 

72 of the axes used to display the plot. 

73 If `None`, the complete figure is used. 

74""" 

75 

76# bg_map 

77docdict["bg_map"] = """ 

78bg_map : :obj:`str` or :obj:`pathlib.Path` or \ 

79 :class:`numpy.ndarray` \ 

80 or :obj:`~nilearn.surface.SurfaceImage` or None,\ 

81 default=None 

82 Background image to be plotted on the :term:`mesh` 

83 underneath the surf_data in grayscale, 

84 most likely a sulcal depth map for realistic shading. 

85 If the map contains values outside [0, 1], 

86 it will be rescaled such that all values are in [0, 1]. 

87 Otherwise, it will not be modified. 

88 If a :obj:`str` or :obj:`pathlib.Path` is passed, 

89 it should be loadable to a :class:`numpy.ndarray` 

90 by :func:`~nilearn.surface.load_surf_data`. 

91 If a :class:`numpy.ndarray` is passed, 

92 if should have a shape `(n_vertices, )`, 

93 with ``n_vertices`` matching that of the underlying mesh 

94 used for plotting. 

95""" 

96 

97# bg_img 

98docdict["bg_img"] = """ 

99bg_img : Niimg-like object, optional 

100 See :ref:`extracting_data`. 

101 The background image to plot on top of. 

102""" 

103 

104# bg_on_data 

105docdict["bg_on_data"] = r""" 

106bg_on_data : :obj:`bool`, default=False 

107 If `True` and a `bg_map` is specified, 

108 the `surf_data` data is multiplied by the background image, 

109 so that e.g. sulcal depth is jointly visible with `surf_data`. 

110 Otherwise, the background image will only be visible 

111 where there is no surface data 

112 (either because `surf_data` contains `nan`\s 

113 or because is was thresholded). 

114 

115 .. note:: 

116 

117 This non-uniformly changes the surf_data values according 

118 to e.g the sulcal depth. 

119 

120""" 

121 

122# black_bg 

123docdict["black_bg"] = """ 

124black_bg : :obj:`bool`, or "auto", optional 

125 If `True`, the background of the image is set to be black. 

126 If you wish to save figures with a black background, 

127 you will need to pass `facecolor="k", edgecolor="k"` 

128 to :func:`matplotlib.pyplot.savefig`. 

129""" 

130 

131# border_size 

132docdict["border_size"] = """ 

133border_size : :obj:`int`, optional 

134 The size, in :term:`voxel` of the border used on the side of 

135 the image to determine the value of the background. 

136""" 

137 

138# cbar_tick_format 

139docdict["cbar_tick_format"] = """ 

140cbar_tick_format : :obj:`str`, optional 

141 Controls how to format the tick labels of the colorbar. 

142 Ex: use "%%.2g" to display using scientific notation. 

143""" 

144 

145# classifier_options 

146svc = "Linear support vector classifier" 

147logistic = "Logistic regression" 

148rc = "Ridge classifier" 

149dc = "Dummy classifier with stratified strategy" 

150 

151docdict["classifier_options"] = f""" 

152 

153 - ``"svc"``: :class:`{svc} <sklearn.svm.LinearSVC>` with L2 penalty. 

154 

155 .. code-block:: python 

156 

157 svc = LinearSVC(penalty="l2", max_iter=1e4) 

158 

159 - ``"svc_l2"``: :class:`{svc} <sklearn.svm.LinearSVC>` with L2 penalty. 

160 

161 .. note:: 

162 

163 Same as option `svc`. 

164 

165 - ``"svc_l1"``: :class:`{svc} <sklearn.svm.LinearSVC>` with L1 penalty. 

166 

167 .. code-block:: python 

168 

169 svc_l1 = LinearSVC(penalty="l1", dual=False, max_iter=1e4) 

170 

171 - ``"logistic"``: \ 

172 :class:`{logistic} <sklearn.linear_model.LogisticRegressionCV>` \ 

173 with L2 penalty. 

174 

175 .. code-block:: python 

176 

177 logistic = LogisticRegressionCV(penalty="l2", solver="liblinear") 

178 

179 - ``"logistic_l1"``: \ 

180 :class:`{logistic} <sklearn.linear_model.LogisticRegressionCV>` \ 

181 with L1 penalty. 

182 

183 .. code-block:: python 

184 

185 logistic_l1 = LogisticRegressionCV(penalty="l1", solver="liblinear") 

186 

187 - ``"logistic_l2"``: \ 

188 :class:`{logistic} <sklearn.linear_model.LogisticRegressionCV>` \ 

189 with L2 penalty 

190 

191 .. note:: 

192 

193 Same as option `logistic`. 

194 

195 - ``"ridge_classifier"``: \ 

196 :class:`{rc} <sklearn.linear_model.RidgeClassifierCV>`. 

197 

198 .. code-block:: python 

199 

200 ridge_classifier = RidgeClassifierCV() 

201 

202 - ``"dummy_classifier"``: :class:`{dc} <sklearn.dummy.DummyClassifier>`. 

203 

204 .. code-block:: python 

205 

206 dummy = DummyClassifier(strategy="stratified", random_state=0) 

207 

208""" 

209 

210# clean_args 

211docdict["clean_args"] = """ 

212clean_args : :obj:`dict` or None, default=None 

213 Keyword arguments to be passed 

214 to :func:`~nilearn.signal.clean` 

215 called within the masker. 

216 Within :func:`~nilearn.signal.clean`, 

217 kwargs prefixed with ``'butterworth__'`` 

218 will be passed to the Butterworth filter. 

219""" 

220 

221# cmap 

222docdict["cmap"] = """ 

223cmap : :class:`matplotlib.colors.Colormap`, or :obj:`str`, optional 

224 The colormap to use. 

225 Either a string which is a name of a matplotlib colormap, 

226 or a matplotlib colormap object. 

227""" 

228 

229# cmap or lut 

230docdict["cmap_lut"] = """ 

231cmap : :class:`matplotlib.colors.Colormap`, or :obj:`str`, \ 

232 or :class:`pandas.DataFrame`, optional 

233 The colormap to use. 

234 Either a string which is a name of a matplotlib colormap, 

235 or a matplotlib colormap object, 

236 or a BIDS compliant 

237 `look-up table <https://bids-specification.readthedocs.io/en/latest/derivatives/imaging.html#common-image-derived-labels>`_ 

238 passed as a pandas dataframe. 

239 If the look up table does not contain a ``color`` column, 

240 then the default colormap of this function will be used. 

241""" 

242 

243# colorbar 

244docdict["colorbar"] = """ 

245colorbar : :obj:`bool`, optional 

246 If `True`, display a colorbar on the right of the plots. 

247""" 

248 

249# connected 

250docdict["connected"] = """ 

251connected : :obj:`bool`, optional 

252 If connected is `True`, only the largest connect component is kept. 

253""" 

254 

255# confounds 

256docdict["confounds"] = """ 

257confounds : :class:`numpy.ndarray`, :obj:`str`, :class:`pathlib.Path`, \ 

258 :class:`pandas.DataFrame` \ 

259 or :obj:`list` of confounds timeseries, default=None 

260 This parameter is passed to :func:`nilearn.signal.clean`. 

261 Please see the related documentation for details. 

262 shape: (number of scans, number of confounds) 

263""" 

264docdict["confounds_multi"] = """ 

265confounds : :obj:`list` of confounds, default=None 

266 List of confounds (arrays, dataframes, 

267 str or path of files loadable into an array). 

268 As confounds are passed to :func:`nilearn.signal.clean`, 

269 please see the related documentation for details about accepted types. 

270 Must be of same length than imgs. 

271""" 

272 

273# cut_coords 

274docdict["cut_coords"] = """ 

275cut_coords : None, a :obj:`tuple` of :obj:`float`, or :obj:`int`, optional 

276 The MNI coordinates of the point where the cut is performed. 

277 

278 - If `display_mode` is `'ortho'` or `'tiled'`, this should 

279 be a 3-tuple: `(x, y, z)` 

280 

281 - For `display_mode == "x"`, "y", or "z", then these are 

282 the coordinates of each cut in the corresponding direction. 

283 

284 - If `None` is given, the cuts are calculated automatically. 

285 

286 - If `display_mode` is 'mosaic', and the number of cuts is the same 

287 for all directions, `cut_coords` can be specified as an integer. 

288 It can also be a length 3 :obj:`tuple` 

289 specifying the number of cuts for 

290 every direction if these are different. 

291 

292 .. note:: 

293 

294 If `display_mode` is "x", "y" or "z", 

295 `cut_coords` can be an integer, 

296 in which case it specifies the number of cuts to perform. 

297 

298""" 

299 

300# darkness 

301docdict["darkness"] = """ 

302darkness : :obj:`float` between 0 and 1, optional 

303 Specifying the darkness of the background image: 

304 

305 - `1` indicates that the original values of the background are used 

306 

307 - `0.5` indicates that the background values 

308 are reduced by half before being applied. 

309 

310""" 

311 

312# data_dir 

313docdict["data_dir"] = """ 

314data_dir : :obj:`pathlib.Path` or :obj:`str` or None, optional 

315 Path where data should be downloaded. 

316 By default, files are downloaded in a ``nilearn_data`` folder 

317 in the home directory of the user. 

318 See also ``nilearn.datasets.utils.get_data_dirs``. 

319""" 

320 

321# detrend 

322docdict["detrend"] = """ 

323detrend : :obj:`bool`, optional 

324 Whether to detrend signals or not. 

325""" 

326 

327# dimming factor 

328docdict["dim"] = """ 

329dim : :obj:`float`, or "auto", optional 

330 Dimming factor applied to background image. 

331 By default, automatic heuristics are applied 

332 based upon the background image intensity. 

333 Accepted float values, where a typical span is between -2 and 2 

334 (-2 = increase contrast; 2 = decrease contrast), 

335 but larger values can be used for a more pronounced effect. 

336 `0` means no dimming. 

337""" 

338 

339# display_mode 

340docdict["display_mode"] = """ 

341display_mode : {"ortho", "tiled", "mosaic", "x", \ 

342"y", "z", "yx", "xz", "yz"}, default="ortho" 

343 Choose the direction of the cuts: 

344 

345 - ``"x"``: sagittal 

346 - ``"y"``: coronal 

347 - ``"z"``: axial 

348 - ``"ortho"``: three cuts are performed in orthogonal directions 

349 - ``"tiled"``: three cuts are performed and arranged in a 2x2 grid 

350 - ``"mosaic"``: three cuts are performed along 

351 multiple rows and columns 

352 

353""" 

354 

355# draw_cross 

356docdict["draw_cross"] = """ 

357draw_cross : :obj:`bool`, default=True 

358 If `draw_cross` is `True`, a cross is drawn on the plot 

359 to indicate the cut position. 

360""" 

361 

362# dtype 

363docdict["dtype"] = """ 

364dtype : dtype like, "auto" or None, default=None 

365 Data type toward which the data should be converted. 

366 If "auto", the data will be converted to int32 

367 if dtype is discrete and float32 if it is continuous. 

368""" 

369 

370# extractor / extract_type 

371docdict["extractor"] = """ 

372extractor : {"local_regions", "connected_components"}, default="local_regions" 

373 This option can take two values: 

374 

375 - ``"connected_components"``: each component/region in the image 

376 is extracted automatically by labeling each region based 

377 upon the presence of unique features in their respective regions. 

378 

379 - ``"local_regions"``: each component/region is extracted 

380 based on their maximum peak value to define a seed marker 

381 and then using random walker segmentation algorithm 

382 on these markers for region separation. 

383 

384""" 

385docdict["extract_type"] = docdict["extractor"].replace( 

386 "extractor", "extract_type" 

387) 

388 

389# figure 

390docdict["figure"] = """ 

391figure : :obj:`int`, or :class:`matplotlib.figure.Figure`, or None, optional 

392 Matplotlib figure used or its number. 

393 If `None` is given, a new figure is created. 

394""" 

395 

396# figure 

397docdict["first_level_contrast"] = """ 

398first_level_contrast : :obj:`str` or :class:`numpy.ndarray` of \ 

399 shape (n_col) with respect to \ 

400 :class:`~nilearn.glm.first_level.FirstLevelModel` \ 

401 or None, default=None 

402 

403 When the model is a :class:`~nilearn.glm.second_level.SecondLevelModel`: 

404 

405 - in case a :obj:`list` of 

406 :class:`~nilearn.glm.first_level.FirstLevelModel` was provided 

407 as ``second_level_input``, 

408 we have to provide a :term:`contrast` 

409 to apply to the first level models 

410 to get the corresponding list of images desired, 

411 that would be tested at the second level, 

412 - in case a :class:`~pandas.DataFrame` was provided 

413 as ``second_level_input`` this is the map name to extract 

414 from the :class:`~pandas.DataFrame` ``map_name`` column. 

415 (it has to be a 't' contrast). 

416 

417 This parameter is ignored for all other cases. 

418""" 

419 

420# fwhm 

421docdict["fwhm"] = """ 

422fwhm : scalar, :class:`numpy.ndarray`, or :obj:`tuple`, or :obj:`list`,\ 

423or 'fast' or None, optional 

424 Smoothing strength, as a :term:`full-width at half maximum<FWHM>`, 

425 in millimeters: 

426 

427 - If a nonzero scalar is given, width is identical in all 3 directions. 

428 

429 - If a :class:`numpy.ndarray`, :obj:`tuple`, or :obj:`list` is given, 

430 it must have 3 elements, giving the :term:`FWHM` along each axis. 

431 If any of the elements is `0` or `None`, 

432 

433 smoothing is not performed along that axis. 

434 - If `fwhm="fast"`, a fast smoothing will be performed with a filter 

435 [0.2, 1, 0.2] in each direction and a normalization to preserve the 

436 local average value. 

437 

438 - If `fwhm` is `None`, no filtering is performed 

439 (useful when just removal of non-finite values is needed). 

440 

441 .. note:: 

442 

443 In corner case situations, `fwhm` is simply kept to `None` 

444 when `fwhm` is specified as `fwhm=0`. 

445 

446""" 

447 

448# groups 

449docdict["groups"] = """ 

450groups : None, default=None 

451 Group labels for the samples used 

452 while splitting the dataset into train/test set. 

453 

454 Note that this parameter must be specified in some scikit-learn 

455 cross-validation generators to calculate the number of splits, 

456 for example sklearn.model_selection.LeaveOneGroupOut or 

457 sklearn.model_selection.LeavePGroupsOut. 

458 

459 For more details see 

460 https://scikit-learn.org/stable/modules/cross_validation.html#cross-validation-iterators-for-grouped-data 

461""" 

462 

463# hemi 

464docdict["hemi"] = """ 

465hemi : {"left", "right", "both"}, default="left" 

466 Hemisphere to display. 

467""" 

468 

469# high_pass 

470docdict["high_pass"] = """ 

471high_pass : :obj:`float` or :obj:`int` or None, default=None 

472 High cutoff frequency in Hertz. 

473 If specified, signals below this frequency will be filtered out. 

474""" 

475 

476# hrf_model 

477docdict["hrf_model"] = """ 

478hrf_model : :obj:`str`, function, :obj:`list` of functions, or None 

479 This parameter defines the :term:`HRF` model to be used. 

480 It can be a string if you are passing the name of a model 

481 implemented in Nilearn. 

482 Valid names are: 

483 

484 - ``"spm"``: 

485 This is the :term:`HRF` model used in :term:`SPM`. 

486 See :func:`~nilearn.glm.first_level.spm_hrf`. 

487 

488 - ``"spm + derivative"``: 

489 SPM model plus its time derivative. 

490 This gives 2 regressors. 

491 See :func:`~nilearn.glm.first_level.spm_hrf`, and 

492 :func:`~nilearn.glm.first_level.spm_time_derivative`. 

493 

494 - ``"spm + derivative + dispersion"``: 

495 Same as above plus dispersion derivative. 

496 This gives 3 regressors. 

497 See :func:`~nilearn.glm.first_level.spm_hrf`, 

498 :func:`~nilearn.glm.first_level.spm_time_derivative`, 

499 and :func:`~nilearn.glm.first_level.spm_dispersion_derivative`. 

500 

501 - ``"glover"``: 

502 This corresponds to the Glover :term:`HRF`. 

503 See :func:`~nilearn.glm.first_level.glover_hrf`. 

504 

505 - ``"glover + derivative"``: 

506 The Glover :term:`HRF` + time derivative. 

507 This gives 2 regressors. 

508 See :func:`~nilearn.glm.first_level.glover_hrf`, and 

509 :func:`~nilearn.glm.first_level.glover_time_derivative`. 

510 

511 - ``"glover"+ derivative + dispersion"``: 

512 Same as above plus dispersion derivative. 

513 This gives 3 regressors. 

514 See :func:`~nilearn.glm.first_level.glover_hrf`, 

515 :func:`~nilearn.glm.first_level.glover_time_derivative`, and 

516 :func:`~nilearn.glm.first_level.glover_dispersion_derivative`. 

517 

518 - ``"fir"``: 

519 Finite impulse response basis. 

520 This is a set of delayed dirac models. 

521 

522 It can also be a custom model. 

523 In this case, a function should be provided for each regressor. 

524 Each function should behave as the other models implemented within Nilearn. 

525 That is, it should take both ``t_r`` and ``oversampling`` as inputs 

526 and return a sample numpy array of appropriate shape. 

527 

528 .. note:: 

529 

530 It is expected that ``"spm"`` standard and ``"glover"`` models 

531 would not yield large differences in most cases. 

532 

533 .. note:: 

534 

535 In case of ``"glover"`` and ``"spm"`` models, 

536 the derived regressors are orthogonalized 

537 with respect to the main one. 

538 

539""" 

540 

541# img 

542docdict["img"] = """ 

543img : Niimg-like object 

544 See :ref:`extracting_data`. 

545""" 

546 

547# imgs 

548docdict["imgs"] = """ 

549imgs : :obj:`list` of Niimg-like objects 

550 See :ref:`extracting_data`. 

551""" 

552 

553# keep_masked_labels 

554docdict["keep_masked_labels"] = """ 

555keep_masked_labels : :obj:`bool`, default=True 

556 When a mask is supplied through the "mask_img" parameter, some 

557 atlas regions may lie entirely outside of the brain mask, resulting 

558 in empty time series for those regions. 

559 If True, the masked atlas with these empty labels will be retained 

560 in the output, resulting in corresponding time series containing 

561 zeros only. If False, the empty labels will be removed from the 

562 output, ensuring no empty time series are present. 

563 

564 .. deprecated:: 0.10.2 

565 

566 The 'True' option for ``keep_masked_labels`` is deprecated. 

567 The default value will change to 'False' in 0.13, 

568 and the ``keep_masked_labels`` parameter will be removed in 0.15. 

569 

570""" 

571 

572# keep_masked_maps 

573docdict["keep_masked_maps"] = """ 

574keep_masked_maps : :obj:`bool`, optional 

575 If True, masked atlas with invalid maps (maps that contain only 

576 zeros after applying the mask) will be retained in the output, resulting 

577 in corresponding time series containing zeros only. If False, the 

578 invalid maps will be removed from the trimmed atlas, resulting in 

579 no empty time series in the output. 

580 

581 .. deprecated:: 0.10.2 

582 

583 The 'True' option for ``keep_masked_maps`` is deprecated. 

584 The default value will change to 'False' in 0.13, 

585 and the ``keep_masked_maps`` parameter will be removed in 0.15. 

586 

587""" 

588 

589# linewidth 

590docdict["linewidths"] = """ 

591linewidths : :obj:`float`, optional 

592 Set the boundary thickness of the contours. 

593 Only reflects when `view_type=contours`. 

594""" 

595 

596# low_pass 

597docdict["low_pass"] = """ 

598low_pass : :obj:`float` or :obj:`int` or None, default=None 

599 Low cutoff frequency in Hertz. 

600 If specified, signals above this frequency will be filtered out. 

601 If `None`, no low-pass filtering will be performed. 

602""" 

603 

604# lower_cutoff 

605docdict["lower_cutoff"] = """ 

606lower_cutoff : :obj:`float`, optional 

607 Lower fraction of the histogram to be discarded. 

608""" 

609 

610# masker_lut 

611docdict["masker_lut"] = """lut : :obj:`pandas.DataFrame` or :obj:`str` \ 

612 or :obj:`pathlib.Path` to a TSV file or None, default=None 

613 Mutually exclusive with ``labels``. 

614 Act as a look up table (lut) 

615 with at least columns 'index' and 'name'. 

616 Formatted according to 'dseg.tsv' format from 

617 `BIDS <https://bids-specification.readthedocs.io/en/latest/derivatives/imaging.html#common-image-derived-labels>`_.""" 

618 

619 

620# mask_strategy 

621docdict["mask_strategy"] = """ 

622mask_strategy : {"background", "epi", "whole-brain-template",\ 

623"gm-template", "wm-template"}, optional 

624 The strategy used to compute the mask: 

625 

626 - ``"background"``: Use this option if your images present 

627 a clear homogeneous background. Uses 

628 :func:`nilearn.masking.compute_background_mask` under the hood. 

629 

630 - ``"epi"``: Use this option if your images are raw EPI images. Uses 

631 :func:`nilearn.masking.compute_epi_mask`. 

632 

633 - ``"whole-brain-template"``: This will extract the whole-brain 

634 part of your data by resampling the MNI152 brain mask for 

635 your data's field of view. Uses 

636 :func:`nilearn.masking.compute_brain_mask` with 

637 ``mask_type="whole-brain"``. 

638 

639 .. note:: 

640 

641 This option is equivalent to the previous 'template' option 

642 which is now deprecated. 

643 

644 - ``"gm-template"``: This will extract the gray matter part of your 

645 data by resampling the corresponding MNI152 template for your 

646 data's field of view. Uses 

647 :func:`nilearn.masking.compute_brain_mask` with ``mask_type="gm"``. 

648 

649 .. versionadded:: 0.8.1 

650 

651 - ``"wm-template"``: This will extract the white matter part of your 

652 data by resampling the corresponding MNI152 template for your 

653 data's field of view. Uses 

654 :func:`nilearn.masking.compute_brain_mask` with ``mask_type="wm"``. 

655 

656 .. versionadded:: 0.8.1 

657""" 

658 

659# mask_type 

660docdict["mask_type"] = """ 

661mask_type : {"whole-brain", "gm", "wm"}, default="whole-brain" 

662 Type of mask to be computed: 

663 

664 - ``"whole-brain"``: Computes the whole-brain mask. 

665 - ``"gm"``: Computes the grey-matter mask. 

666 - ``"wm"``: Computes the white-matter mask. 

667 

668""" 

669 

670# kwargs for Maskers 

671docdict["masker_kwargs"] = """ 

672kwargs : dict 

673 Keyword arguments to be passed to functions called within the masker. 

674 Kwargs prefixed with `'clean__'` will be passed to 

675 :func:`~nilearn.signal.clean`. 

676 Within :func:`~nilearn.signal.clean`, kwargs prefixed with 

677 `'butterworth__'` will be passed to the Butterworth filter 

678 (i.e., `clean__butterworth__`). 

679 

680 .. deprecated:: 0.11.2dev 

681 

682 .. admonition:: Use ``clean_args`` instead! 

683 :class: important 

684 

685 It is recommended to pass parameters to use for data cleaning 

686 via :obj:`dict` to the ``clean_args`` parameter. 

687 

688 Passing parameters via "kwargs" is mutually exclusive 

689 with passing cleaning parameters via ``clean_args``. 

690""" 

691 

692# memory 

693docdict["memory"] = """ 

694memory : None, instance of :class:`joblib.Memory`, :obj:`str`, or \ 

695:class:`pathlib.Path` 

696 Used to cache the masking process. 

697 By default, no caching is done. 

698 If a :obj:`str` is given, it is the path to the caching directory. 

699""" 

700 

701# memory_level 

702memory_level = """ 

703memory_level : :obj:`int`, default={} 

704 Rough estimator of the amount of memory used by caching. 

705 Higher value means more memory for caching. 

706 Zero means no caching. 

707""" 

708docdict["memory_level"] = memory_level.format(0) 

709docdict["memory_level1"] = memory_level.format(1) 

710 

711# n_jobs 

712n_jobs = """ 

713n_jobs : :obj:`int`, default={} 

714 The number of CPUs to use to do the computation. 

715 `-1` means 'all CPUs'. 

716""" 

717docdict["n_jobs"] = n_jobs.format("1") 

718docdict["n_jobs_all"] = n_jobs.format("-1") 

719 

720# n_jobs 

721docdict["n_perm"] = """ 

722n_perm : :obj:`int`, default=10000 

723 Number of permutations to perform. 

724 Permutations are costly but the more are performed, the more precision 

725 one gets in the p-values estimation. 

726""" 

727 

728# opening 

729docdict["opening"] = """ 

730opening : :obj:`bool` or :obj:`int`, optional 

731 This parameter determines whether a morphological 

732 :term:`opening<Opening>` is performed, to keep only large structures. 

733 This step is useful to remove parts of the skull that might have been 

734 included. `opening` can be: 

735 

736 - A :obj:`bool` : If `False`, no :term:`opening<Opening>` is performed. 

737 If `True`, it is equivalent to `opening=1`. 

738 

739 - An :obj:`int` `n`: The :term:`opening<Opening>` is performed via `n` 

740 :term:`erosions<Erosion>` (see :func:`scipy.ndimage.binary_erosion`). 

741 The largest connected component is then estimated 

742 if `connected` is set to `True`, 

743 and 2`n` :term:`dilation<Dilation>` operations are performed 

744 (see :func:`scipy.ndimage.binary_dilation`) 

745 followed by `n` :term:`erosions<Erosion>`. 

746 This corresponds to 1 :term:`opening<Opening>` operation 

747 of order `n` followed by a :term:`closing<Closing>` operator 

748 of order `n`. 

749 

750 .. note:: 

751 

752 Turning off :term:`opening<Opening>` (`opening=False`) will also 

753 prevent any smoothing applied to the image during the mask computation. 

754 

755""" 

756 

757# output_file 

758docdict["output_file"] = """ 

759output_file : :obj:`str` or :obj:`pathlib.Path` or None, optional 

760 The name of an image file to export the plot to. 

761 Valid extensions are .png, .pdf, .svg. 

762 If `output_file` is not `None`, the plot is saved to a file, 

763 and the display is closed. 

764""" 

765 

766# radiological 

767docdict["radiological"] = """ 

768radiological : :obj:`bool`, default=False 

769 Invert x axis and R L labels to plot sections as a radiological view. 

770 If False (default), the left hemisphere is on the left of a coronal image. 

771 If True, left hemisphere is on the right. 

772""" 

773 

774# random_state 

775docdict["random_state"] = """ 

776random_state : :obj:`int` or np.random.RandomState, optional 

777 Pseudo-random number generator state used for random sampling. 

778""" 

779 

780# regressor_options 

781docdict["regressor_options"] = """ 

782 

783 - ``ridge``: \ 

784 :class:`{Ridge regression} <sklearn.linear_model.RidgeCV>`. 

785 

786 .. code-block:: python 

787 

788 ridge = RidgeCV() 

789 

790 - ``ridge_regressor``: \ 

791 :class:`{Ridge regression} <sklearn.linear_model.RidgeCV>`. 

792 

793 .. note:: 

794 

795 Same option as `ridge`. 

796 

797 - ``svr``: :class:`{Support vector regression} <sklearn.svm.SVR>`. 

798 

799 .. code-block:: python 

800 

801 svr = SVR(kernel="linear", max_iter=1e4) 

802 

803 - ``lasso``: \ 

804 :class:`{Lasso regression} <sklearn.linear_model.LassoCV>`. 

805 

806 .. code-block:: python 

807 

808 lasso = LassoCV() 

809 

810 - ``lasso_regressor``: \ 

811 :class:`{Lasso regression} <sklearn.linear_model.LassoCV>`. 

812 

813 .. note:: 

814 

815 Same option as `lasso`. 

816 

817 - ``dummy_regressor``: \ 

818 :class:`{Dummy regressor} <sklearn.dummy.DummyRegressor>`. 

819 

820 .. code-block:: python 

821 

822 dummy = DummyRegressor(strategy="mean") 

823 

824""" 

825 

826# resampling_interpolation 

827docdict["resampling_interpolation"] = """ 

828resampling_interpolation : :obj:`str`, optional 

829 Interpolation to use when resampling the image to 

830 the destination space. Can be: 

831 

832 - ``"continuous"``: use 3rd-order spline interpolation 

833 - ``"nearest"``: use nearest-neighbor mapping. 

834 

835 .. note:: 

836 

837 ``"nearest"`` is faster but can be noisier in some cases. 

838 

839""" 

840 

841# resolution template 

842docdict["resolution"] = """ 

843resolution : :obj:`int` or None, default=None 

844 Resolution in millimeters. 

845 If resolution is different from 1, 

846 the template is re-sampled with the specified resolution. 

847 Default to ``1`` if None is passed. 

848""" 

849 

850# resume 

851docdict["resume"] = """ 

852resume : :obj:`bool`, default=True 

853 Whether to resume download of a partly-downloaded file. 

854""" 

855 

856# sample_mask 

857docdict["sample_mask"] = """ 

858sample_mask : Any type compatible with numpy-array indexing, default=None 

859 ``shape = (total number of scans - number of scans removed)`` 

860 for explicit index (for example, ``sample_mask=np.asarray([1, 2, 4])``), 

861 or ``shape = (number of scans)`` for binary mask 

862 (for example, 

863 ``sample_mask=np.asarray([False, True, True, False, True])``). 

864 Masks the images along the last dimension to perform scrubbing: 

865 for example to remove volumes with high motion 

866 and/or non-steady-state volumes. 

867 This parameter is passed to :func:`nilearn.signal.clean`. 

868""" 

869docdict["sample_mask_multi"] = """ 

870sample_mask : :obj:`list` of sample_mask, default=None 

871 List of sample_mask (any type compatible with numpy-array indexing) 

872 to use for scrubbing outliers. 

873 Must be of same length as ``imgs``. 

874 ``shape = (total number of scans - number of scans removed)`` 

875 for explicit index (for example, ``sample_mask=np.asarray([1, 2, 4])``), 

876 or ``shape = (number of scans)`` for binary mask 

877 (for example, 

878 ``sample_mask=np.asarray([False, True, True, False, True])``). 

879 Masks the images along the last dimension to perform scrubbing: 

880 for example to remove volumes with high motion 

881 and/or non-steady-state volumes. 

882 This parameter is passed to :func:`nilearn.signal.clean`. 

883""" 

884 

885# second_level_contrast 

886docdict["second_level_contrast"] = """ 

887second_level_contrast : :obj:`str` or :class:`numpy.ndarray` of shape\ 

888(n_col), optional 

889 Where `n_col` is the number of columns of the design matrix. 

890 The string can be a formula compatible with :meth:`pandas.DataFrame.eval`. 

891 Basically one can use the name of the conditions as they appear 

892 in the design matrix of the fitted model combined with operators +- 

893 and combined with numbers with operators +-`*`/. 

894 The default `None` is accepted if the design matrix has a single column, 

895 in which case the only possible contrast array((1)) is applied; 

896 when the design matrix has multiple columns, an error is raised. 

897""" 

898 

899# second_level_confounds 

900docdict["second_level_confounds"] = """ 

901confounds : :obj:`pandas.DataFrame` or None, default=None 

902 Must contain a ``subject_label`` column. 

903 All other columns are considered as confounds and included in the model. 

904 If ``design_matrix`` is provided then this argument is ignored. 

905 The resulting second level design matrix uses the same column names 

906 as in the given :class:`~pandas.DataFrame` for confounds. 

907 At least two columns are expected, ``subject_label`` 

908 and at least one confound. 

909""" 

910 

911# second_level_confounds 

912docdict["second_level_design_matrix"] = """ 

913design_matrix : :obj:`pandas.DataFrame`, :obj:`str` or \ 

914 or :obj:`pathlib.Path` to a CSV or TSV file, \ 

915 or None, default=None 

916 Design matrix to fit the :term:`GLM`. 

917 The number of rows in the design matrix 

918 must agree with the number of maps 

919 derived from ``second_level_input``. 

920 Ensure that the order of maps given by a ``second_level_input`` 

921 list of Niimgs matches the order of the rows in the design matrix. 

922""" 

923 

924# second_level_input 

925docdict["second_level_input"] = """ 

926second_level_input : :obj:`list` of \ 

927 :class:`~nilearn.glm.first_level.FirstLevelModel` objects or \ 

928 :class:`pandas.DataFrame` or \ 

929 :obj:`list` of 3D Niimg-like objects or \ 

930 4D Niimg-like objects or \ 

931 :obj:`list` of :class:`~nilearn.surface.SurfaceImage` objects or \ 

932 :obj:`pandas.Series` of Niimg-like objects. 

933 

934 - Giving :class:`~nilearn.glm.first_level.FirstLevelModel` objects 

935 will allow to easily compute the second level contrast of arbitrary first 

936 level contrasts thanks to the `first_level_contrast` argument of 

937 :meth:`~nilearn.glm.first_level.FirstLevelModel.compute_contrast`. 

938 Effect size images will be computed for each model 

939 to contrast at the second level. 

940 

941 - If a :class:`~pandas.DataFrame`, then it has to contain 

942 `subject_label`, `map_name` and `effects_map_path`. 

943 It can contain multiple maps that would be selected 

944 during contrast estimation with the argument `first_level_contrast` 

945 of :meth:`~nilearn.glm.first_level.FirstLevelModel.compute_contrast`. 

946 The :class:`~pandas.DataFrame` will be sorted 

947 based on the `subject_label` column to avoid order inconsistencies 

948 when extracting the maps. 

949 So the rows of the automatically computed design matrix, 

950 if not provided, will correspond to the sorted `subject_label` column. 

951 

952 - If a :obj:`list` of Niimg-like objects 

953 or :class:`~nilearn.surface.SurfaceImage` objects 

954 then this is taken literally as Y for the model fit 

955 and `design_matrix` must be provided. 

956 

957""" 

958 

959# second_level_mask_img 

960docdict["second_level_mask_img"] = """ 

961mask_img : Niimg-like, :obj:`~nilearn.maskers.NiftiMasker` or\ 

962 :obj:`~nilearn.maskers.MultiNiftiMasker` or\ 

963 :obj:`~nilearn.maskers.SurfaceMasker` object or None,\ 

964 default=None 

965 Mask to be used on data. 

966 If an instance of masker is passed, 

967 then its mask will be used. 

968 If no mask is given, 

969 it will be computed automatically 

970 by a :class:`~nilearn.maskers.NiftiMasker`, 

971 or a :obj:`~nilearn.maskers.SurfaceMasker` 

972 (depending on the type passed at fit time) 

973 with default parameters. 

974 Automatic mask computation assumes first level imgs 

975 have already been masked. 

976""" 

977docdict["second_level_mask"] = docdict["second_level_mask_img"].replace( 

978 "mask_img :", "mask :" 

979) 

980 

981# signals for inverse transform 

982docdict["signals_inv_transform"] = """ 

983signals : 1D/2D :obj:`numpy.ndarray` 

984 Extracted signal. 

985 If a 1D array is provided, 

986 then the shape should be (number of elements,). 

987 If a 2D array is provided, 

988 then the shape should be (number of scans, number of elements). 

989""" 

990docdict["region_signals_inv_transform"] = docdict["signals_inv_transform"] 

991docdict["x_inv_transform"] = docdict["signals_inv_transform"] 

992 

993 

994# smoothing_fwhm 

995docdict["smoothing_fwhm"] = """ 

996smoothing_fwhm : :obj:`float` or :obj:`int` or None, optional. 

997 If `smoothing_fwhm` is not `None`, 

998 it gives the :term:`full-width at half maximum<FWHM>` in millimeters 

999 of the spatial smoothing to apply to the signal. 

1000""" 

1001 

1002# standardize 

1003standardize = """ 

1004standardize : :obj:`bool`, default={} 

1005 If `standardize` is `True`, the data are centered and normed: 

1006 their mean is put to 0 and their variance is put to 1 

1007 in the time dimension. 

1008""" 

1009docdict["standardize"] = standardize.format("True") 

1010docdict["standardize_false"] = standardize.format("False") 

1011 

1012# standardize as used within maskers module 

1013docdict["standardize_maskers"] = """ 

1014standardize : {'zscore_sample', 'zscore', 'psc', True, False}, default=False 

1015 Strategy to standardize the signal: 

1016 

1017 - ``'zscore_sample'``: The signal is z-scored. Timeseries are shifted 

1018 to zero mean and scaled to unit variance. Uses sample std. 

1019 

1020 - ``'zscore'``: The signal is z-scored. Timeseries are shifted 

1021 to zero mean and scaled to unit variance. Uses population std 

1022 by calling default :obj:`numpy.std` with N - ``ddof=0``. 

1023 

1024 - ``'psc'``: Timeseries are shifted to zero mean value and scaled 

1025 to percent signal change (as compared to original mean signal). 

1026 

1027 - ``True``: The signal is z-scored (same as option `zscore`). 

1028 Timeseries are shifted to zero mean and scaled to unit variance. 

1029 

1030 - ``False``: Do not standardize the data. 

1031 

1032""" 

1033 

1034# standardize_confounds 

1035docdict["standardize_confounds"] = """ 

1036standardize_confounds : :obj:`bool`, default=True 

1037 If set to `True`, the confounds are z-scored: 

1038 their mean is put to 0 and their variance to 1 in the time dimension. 

1039""" 

1040 

1041# standardize_confounds 

1042docdict["strategy"] = """ 

1043strategy : :obj:`str`, default="mean" 

1044 The name of a valid function to reduce the region with. 

1045 Must be one of: sum, mean, median, minimum, maximum, variance, 

1046 standard_deviation. 

1047""" 

1048 

1049# surf_mesh 

1050docdict["surf_mesh"] = """ 

1051surf_mesh : :obj:`str` or :obj:`list` of two :class:`numpy.ndarray` \ 

1052 or a :obj:`~nilearn.surface.InMemoryMesh`, or a \ 

1053 :obj:`~nilearn.surface.PolyMesh`, or None, default=None 

1054 Surface :term:`mesh` geometry, can be a file (valid formats are .gii or 

1055 Freesurfer specific files such as .orig, .pial, .sphere, .white, 

1056 .inflated) or a list of two Numpy arrays, the first containing the 

1057 x-y-z coordinates of the :term:`mesh` :term:`vertices<vertex>`, the 

1058 second containing the indices (into coords) of the :term:`mesh` 

1059 :term:`faces`, or a :obj:`~nilearn.surface.InMemoryMesh` object with 

1060 "coordinates" and "faces" attributes, or a 

1061 :obj:`~nilearn.surface.PolyMesh` object, or None. 

1062""" 

1063 

1064# symmetric_cbar 

1065docdict["symmetric_cbar"] = """ 

1066symmetric_cbar : :obj:`bool`, or "auto", default="auto" 

1067 Specifies whether the colorbar and colormap should range from `-vmax` to 

1068 `vmax` (or from `vmin` to `-vmin` if `-vmin` is greater than `vmax`) or 

1069 from `vmin` to `vmax`. 

1070 Setting to `"auto"` (the default) will select the former if either 

1071 `vmin` or `vmax` is `None` and the image has both positive and negative 

1072 values. 

1073""" 

1074 

1075# t_r 

1076docdict["t_r"] = """ 

1077t_r : :obj:`float` or :obj:`int` or None, default=None 

1078 :term:`Repetition time<TR>`, in seconds (sampling period). 

1079 Set to `None` if not provided. 

1080""" 

1081 

1082# target_affine 

1083docdict["target_affine"] = """ 

1084target_affine : :class:`numpy.ndarray` or None, default=None 

1085 If specified, the image is resampled corresponding to this new affine. 

1086 `target_affine` can be a 3x3 or a 4x4 matrix. 

1087""" 

1088 

1089# target_shape 

1090docdict["target_shape"] = """ 

1091target_shape : :obj:`tuple` or :obj:`list` or None, default=None 

1092 If specified, the image will be resized to match this new shape. 

1093 `len(target_shape)` must be equal to 3. 

1094 

1095 .. note:: 

1096 

1097 If `target_shape` is specified, a `target_affine` of shape 

1098 `(4, 4)` must also be given. 

1099 

1100""" 

1101 

1102# threshold 

1103docdict["tfce"] = """ 

1104tfce : :obj:`bool`, default=False 

1105 Whether to calculate :term:`TFCE` 

1106 as part of the permutation procedure or not. 

1107 The TFCE calculation is implemented 

1108 as described in :footcite:t:`Smith2009a`. 

1109 

1110 .. note:: 

1111 

1112 The number of thresholds used in the TFCE procedure 

1113 will set between 10 and 1000. 

1114 

1115 .. versionadded:: 0.11.2dev 

1116 

1117 .. warning:: 

1118 

1119 Performing TFCE-based inference 

1120 will increase the computation time 

1121 of the permutation procedure considerably. 

1122 The permutations may take multiple hours, 

1123 depending on how many permutations 

1124 are requested and how many jobs are performed in parallel. 

1125""" 

1126 

1127# threshold 

1128docdict["threshold"] = """ 

1129threshold : :obj:`int` or :obj:`float`, None, or 'auto', optional 

1130 If `None` is given, the image is not thresholded. 

1131 If number is given, it must be non-negative. The specified value is used to 

1132 threshold the image: values below the threshold (in absolute value) are 

1133 plotted as transparent. 

1134 If "auto" is given, the threshold is determined based on the score obtained 

1135 using percentile value "80%" on the absolute value of the image data. 

1136""" 

1137 

1138# title 

1139docdict["title"] = """ 

1140title : :obj:`str`, or None, default=None 

1141 The title displayed on the figure. 

1142""" 

1143 

1144# transparency 

1145docdict["transparency"] = """ 

1146transparency : :obj:`float` between 0 and 1, \ 

1147 or a Niimg-Like object, \ 

1148 or None, \ 

1149 default = None 

1150 Value to be passed as alpha value to :func:`~matplotlib.pyplot.imshow`. 

1151 if ``None`` is passed, it will be set to 1. 

1152 If an image is passed, voxel-wise alpha blending will be applied, 

1153 by relying on the absolute value of ``transparency`` at each voxel. 

1154 

1155 .. versionadded:: 0.11.2 

1156""" 

1157 

1158# transparency 

1159docdict["transparency_range"] = """ 

1160transparency_range : :obj:`tuple` or :obj:`list` of 2 non-negative numbers, \ 

1161 or None, \ 

1162 default = None 

1163 When an image is passed to ``transparency``, 

1164 this determines the range of values in the image 

1165 to use for transparency (alpha blending). 

1166 For example with ``transparency_range = [1.96, 3]``, 

1167 any voxel / vertex (:math:`v_i`): 

1168 

1169 - with a value between between -1.96 and 1.96, 

1170 would be fully transparent (alpha = 0), 

1171 - with a value less than -3 or greater than 3, 

1172 would be fully opaque (alpha = 1), 

1173 - with a value in the intervals ``[-3.0, -1.96]`` or ``[1.96, 3.0]``, 

1174 would have an alpha_i value 

1175 scaled linearly between 0 and 1 : 

1176 :math:`alpha_i = (\\lvert v_i \\lvert - 1.96) / (3.0 - 1.96)`. 

1177 

1178 This parameter will be ignored 

1179 unless an image is passed as ``transparency``. 

1180 The first number must be greater than 0 and less than the second one. 

1181 if ``None`` is passed, 

1182 this will be set to ``[0, max(abs(transparency))]``. 

1183 

1184 .. versionadded:: 0.11.2 

1185""" 

1186 

1187# upper_cutoff 

1188docdict["upper_cutoff"] = """ 

1189upper_cutoff : :obj:`float`, optional 

1190 Upper fraction of the histogram to be discarded. 

1191""" 

1192 

1193# two_sided_test 

1194docdict["two_sided_test"] = """ 

1195two_sided_test : :obj:`bool`, default=False 

1196 

1197 - If ``True``, performs an unsigned t-test. 

1198 Both positive and negative effects are considered; the null 

1199 hypothesis is that the effect is zero. 

1200 - If ``False``, only positive effects are considered as relevant. 

1201 The null hypothesis is that the effect is zero or negative. 

1202""" 

1203 

1204# url 

1205docdict["url"] = """ 

1206url : :obj:`str` or None, default=None 

1207 URL of file to download. 

1208 Override download URL. 

1209 Used for test only (or if you setup a mirror of the data). 

1210""" 

1211 

1212# verbose 

1213verbose = """ 

1214verbose : :obj:`int`, default={} 

1215 Verbosity level (`0` means no message). 

1216""" 

1217docdict["verbose"] = verbose.format(1) 

1218docdict["verbose0"] = verbose.format(0) 

1219 

1220# view 

1221docdict["view"] = """ 

1222view : :obj:`str`, or a pair of :obj:`float` or :obj:`int`, default="lateral"\ 

1223 if `hemi` is "left" or "right", if `hemi` is "both" "dorsal" 

1224 If a string, and `hemi` is "left" or "right" must be in 

1225 {"lateral", "medial", "dorsal", "ventral", "anterior", "posterior"}. 

1226 If `hemi` is "both", must be in {"left", "right", "dorsal", "ventral", 

1227 "anterior", "posterior"}. 

1228 If a sequence, must be a pair (elev, azim) of :obj:`float` or :obj:`int` 

1229 angles in degrees that will manually set a custom view. 

1230 E.g., view=[270.0, 90] or view=(0, -180.0). 

1231 View of the surface that is rendered. 

1232""" 

1233 

1234# vmax 

1235docdict["vmax"] = """ 

1236vmax : :obj:`float` or obj:`int` or None, optional 

1237 Upper bound of the colormap. The values above vmax are masked. 

1238 If `None`, the max of the image is used. 

1239 Passed to :func:`matplotlib.pyplot.imshow`. 

1240""" 

1241 

1242# vmin 

1243docdict["vmin"] = """ 

1244vmin : :obj:`float` or obj:`int` or None, optional 

1245 Lower bound of the colormap. The values below vmin are masked. 

1246 If `None`, the min of the image is used. 

1247 Passed to :func:`matplotlib.pyplot.imshow`. 

1248""" 

1249 

1250# y 

1251docdict["y_dummy"] = """ 

1252y : None 

1253 This parameter is unused. 

1254 It is solely included for scikit-learn compatibility. 

1255""" 

1256 

1257 

1258############################################################################## 

1259# 

1260# Other values definitions: return values, attributes... 

1261# 

1262 

1263# atlas_type 

1264docdict["atlas_type"] = """'atlas_type' : :obj:`str` 

1265 Type of atlas. 

1266 See :term:`Probabilistic atlas` and :term:`Deterministic atlas`.""" 

1267 

1268docdict["base_decomposition_attributes"] = """ 

1269 Attributes 

1270 ---------- 

1271 mask_img_ : Niimg-like object or :obj:`~nilearn.surface.SurfaceImage` 

1272 See :ref:`extracting_data`. 

1273 The mask of the data. 

1274 If no mask was given at masker creation : 

1275 

1276 - for Nifti images, this contains automatically computed mask 

1277 via the selected ``mask_strategy``. 

1278 

1279 - for SurfaceImage objects, this mask encompasses all vertices of 

1280 the input images. 

1281 """ 

1282 

1283docdict["multi_pca_attributes"] = """ 

1284 masker_ : :obj:`~nilearn.maskers.MultiNiftiMasker` or \ 

1285 :obj:`~nilearn.maskers.SurfaceMasker` 

1286 Masker used to filter and mask data as first step. 

1287 If :obj:`~nilearn.maskers.MultiNiftiMasker` 

1288 or :obj:`~nilearn.maskers.SurfaceMasker` is given in 

1289 ``mask`` parameter, this is a copy of it. 

1290 Otherwise, a masker is created using the value of ``mask`` and 

1291 other NiftiMasker/SurfaceMasker 

1292 related parameters as initialization. 

1293 

1294 components_ : 2D numpy array (n_components x n-voxels or n-vertices) 

1295 Array of masked extracted components. 

1296 

1297 .. note:: 

1298 

1299 Use attribute ``components_img_`` 

1300 rather than manually unmasking 

1301 ``components_`` with ``masker_`` attribute. 

1302 

1303 components_img_ : 4D Nifti image \ 

1304 or 2D :obj:`~nilearn.surface.SurfaceImage` 

1305 The image giving the extracted components. 

1306 Each 3D Nifti image or 1D SurfaceImage is a component. 

1307 

1308 .. versionadded:: 0.4.1 

1309 

1310 variance_ : numpy array (n_components,) 

1311 The amount of variance explained 

1312 by each of the selected components. 

1313 """ 

1314 

1315docdict["base_decoder_fit_attributes"] = """ 

1316 Attributes 

1317 ---------- 

1318 masker_ : instance of NiftiMasker, MultiNiftiMasker, or SurfaceMasker 

1319 The masker used to mask the data. 

1320 

1321 mask_img_ : Nifti1Image or :obj:`~nilearn.surface.SurfaceImage` 

1322 Mask computed by the masker object. 

1323 

1324 classes_ : numpy.ndarray 

1325 Classes to predict. For classification only. 

1326 

1327 screening_percentile_ : :obj:`float` 

1328 Screening percentile corrected according to volume of mask, 

1329 relative to the volume of standard brain. 

1330 

1331 coef_ : numpy.ndarray, shape=(n_classes, n_features) 

1332 Contains the mean of the models weight vector across 

1333 fold for each class. Returns None for Dummy estimators. 

1334 

1335 coef_img_ : :obj:`dict` of Nifti1Image 

1336 Dictionary containing ``coef_`` with class names as keys, 

1337 and ``coef_`` transformed in Nifti1Images as values. 

1338 In the case of a regression, 

1339 it contains a single Nifti1Image at the key 'beta'. 

1340 Ignored if Dummy estimators are provided. 

1341 

1342 intercept_ : ndarray, shape (nclasses,) 

1343 Intercept (also known as bias) added to the decision function. 

1344 Ignored if Dummy estimators are provided. 

1345 

1346 cv_ : :obj:`list` of pairs of lists 

1347 List of the (n_folds,) folds. 

1348 For the corresponding fold, 

1349 each pair is composed of two lists of indices, 

1350 one for the train samples and one for the test samples. 

1351 

1352 std_coef_ : numpy.ndarray, shape=(n_classes, n_features) 

1353 Contains the standard deviation of the models weight vector across 

1354 fold for each class. 

1355 Note that folds are not independent, 

1356 see 

1357 https://scikit-learn.org/stable/modules/cross_validation.html#cross-validation-iterators-for-grouped-data 

1358 Ignored if Dummy estimators are provided. 

1359 

1360 std_coef_img_ : :obj:`dict` of Nifti1Image 

1361 Dictionary containing `std_coef_` with class names as keys, 

1362 and `coef_` transformed in Nifti1Image as values. 

1363 In the case of a regression, 

1364 it contains a single Nifti1Image at the key 'beta'. 

1365 Ignored if Dummy estimators are provided. 

1366 

1367 cv_params_ : :obj:`dict` of :obj:`list` 

1368 Best point in the parameter grid for each tested fold 

1369 in the inner cross validation loop. 

1370 The grid is empty 

1371 when Dummy estimators are provided. 

1372 

1373 .. note:: 

1374 

1375 If the estimator used its built-in cross-validation, 

1376 this will include an additional key 

1377 for the single best value estimated 

1378 by the built-in cross-validation 

1379 ('best_C' for LogisticRegressionCV 

1380 and 'best_alpha' for RidgeCV/RidgeClassifierCV/LassoCV), 

1381 in addition to the input list of values. 

1382 

1383 scorer_ : function 

1384 Scorer function used on the held out data to choose the best 

1385 parameters for the model. 

1386 

1387 cv_scores_ : :obj:`dict`, (classes, n_folds) 

1388 Scores (misclassification) for each parameter, and on each fold 

1389 

1390 n_outputs_ : :obj:`int` 

1391 Number of outputs (column-wise) 

1392 

1393 dummy_output_ : ndarray, shape=(n_classes, 2) \ 

1394 or shape=(1, 1) for regression 

1395 Contains dummy estimator attributes after class predictions 

1396 using strategies of :class:`sklearn.dummy.DummyClassifier` 

1397 (class_prior) 

1398 and :class:`sklearn.dummy.DummyRegressor` (constant) 

1399 from scikit-learn. 

1400 This attribute is necessary for estimating class predictions 

1401 after fit. 

1402 Returns None if non-dummy estimators are provided. 

1403""" 

1404 

1405# dataset description 

1406docdict["description"] = """'description' : :obj:`str` 

1407 Description of the dataset.""" 

1408 

1409# fsaverage options 

1410docdict["fsaverage_options"] = """ 

1411 

1412 - ``"fsaverage3"``: the low-resolution fsaverage3 mesh (642 nodes) 

1413 - ``"fsaverage4"``: the low-resolution fsaverage4 mesh (2562 nodes) 

1414 - ``"fsaverage5"``: the low-resolution fsaverage5 mesh (10242 nodes) 

1415 - ``"fsaverage6"``: the medium-resolution fsaverage6 mesh (40962 nodes) 

1416 - ``"fsaverage7"``: same as `"fsaverage"` 

1417 - ``"fsaverage"``: the high-resolution fsaverage mesh (163842 nodes) 

1418 

1419 .. note:: 

1420 

1421 The high-resolution fsaverage will result in more computation 

1422 time and memory usage 

1423 

1424""" 

1425 

1426# image returned Nifti maskers by inverse_transform 

1427docdict["img_inv_transform_nifti"] = """img : :obj:`nibabel.nifti1.Nifti1Image` 

1428 Transformed image in brain space. 

1429 Output shape for : 

1430 

1431 - 1D array : 3D :obj:`nibabel.nifti1.Nifti1Image` will be returned. 

1432 - 2D array : 4D :obj:`nibabel.nifti1.Nifti1Image` will be returned. 

1433 

1434 See :ref:`extracting_data`. 

1435 """ 

1436# image returned surface maskers by inverse_transform 

1437docdict[ 

1438 "img_inv_transform_surface" 

1439] = """img : :obj:`~nilearn.surface.SurfaceImage` 

1440 Signal for each vertex projected on the mesh. 

1441 Output shape for : 

1442 

1443 - 1D array : 1D :obj:`~nilearn.surface.SurfaceImage` will be returned. 

1444 - 2D array : 2D :obj:`~nilearn.surface.SurfaceImage` will be returned. 

1445 

1446 See :ref:`extracting_data`. 

1447 """ 

1448 

1449# atlas labels 

1450docdict["labels"] = """'labels' : :obj:`list` of :obj:`str` 

1451 List of the names of the regions.""" 

1452 

1453# mask_img_ for most nifti maskers 

1454docdict[ 

1455 "nifti_mask_img_" 

1456] = """mask_img_ : A 3D binary :obj:`nibabel.nifti1.Nifti1Image` or None. 

1457 The mask of the data. 

1458 If no ``mask_img`` was passed at masker construction, 

1459 then ``mask_img_`` is ``None``, otherwise 

1460 is the resulting binarized version of ``mask_img`` 

1461 where each voxel is ``True`` if all values across samples 

1462 (for example across timepoints) is finite value different from 0.""" 

1463 

1464# look up table 

1465docdict["lut"] = """lut : :obj:`pandas.DataFrame` 

1466 Act as a look up table (lut) 

1467 with at least columns 'index' and 'name'. 

1468 Formatted according to 'dseg.tsv' format from 

1469 `BIDS <https://bids-specification.readthedocs.io/en/latest/derivatives/imaging.html#common-image-derived-labels>`_.""" 

1470 

1471# signals returned Nifti maskers by transform, fit_transform... 

1472docdict["signals_transform_nifti"] = """signals : :obj:`numpy.ndarray` 

1473 Signal for each :term:`voxel`. 

1474 Output shape for : 

1475 

1476 - 3D images: (number of elements,) array 

1477 - 4D images: (number of scans, number of elements) array 

1478 """ 

1479# signals returned Mulit Nifti maskers by transform, fit_transform... 

1480docdict[ 

1481 "signals_transform_multi_nifti" 

1482] = """signals : :obj:`list` of :obj:`numpy.ndarray` or :obj:`numpy.ndarray` 

1483 Signal for each :term:`voxel`. 

1484 Output shape for : 

1485 

1486 - 3D images: (number of elements,) array 

1487 - 4D images: (number of scans, number of elements) array 

1488 - list of 3D images: list of (number of elements,) array 

1489 - list of 4D images: list of (number of scans, number of elements) 

1490 array 

1491 """ 

1492# signals returned Mulit Nifti maskers by transform, fit_transform... 

1493docdict[ 

1494 "signals_transform_imgs_multi_nifti" 

1495] = """signals : :obj:`list` of :obj:`numpy.ndarray` 

1496 Signal for each :term:`voxel`. 

1497 Output shape for : 

1498 

1499 - list of 3D images: list of (number of elements,) array 

1500 - list of 4D images: list of (number of scans, number of elements) 

1501 array 

1502 """ 

1503# signals returned surface maskers by transform, fit_transform... 

1504docdict["signals_transform_surface"] = """signals : :obj:`numpy.ndarray` 

1505 Signal for each element. 

1506 Output shape for : 

1507 

1508 - 1D images: (number of elements,) array 

1509 - 2D images: (number of scans, number of elements) array 

1510 """ 

1511 

1512# template 

1513docdict["template"] = """'template' : :obj:`str` 

1514 The standardized space of analysis 

1515 in which the atlas results are provided. 

1516 When known it should be a valid template name 

1517 taken from the spaces described in 

1518 `the BIDS specification <https://bids-specification.readthedocs.io/en/latest/appendices/coordinate-systems.html#image-based-coordinate-systems>`_.""" 

1519 

1520 

1521# templateflow 

1522docdict["templateflow"] = """ 

1523 

1524.. admonition:: Nilearn MNI template 

1525 :class: important 

1526 

1527 The Nilearn template is asymmetrical ICBM152 2009, release a. 

1528 

1529 The default template of :term:`fMRIPrep` is the asymmetrical ICBM152 2009, 

1530 release c (MNI152NLin2009cSAsym). 

1531 

1532 If you wish to use the exact same release as :term:`fMRIPrep`, 

1533 please refer to `TemplateFlow <https://www.templateflow.org>`_. 

1534 

1535""" 

1536 

1537############################################################################## 

1538 

1539docdict_indented: dict[int, dict[str, str]] = {} 

1540 

1541 

1542def _indentcount_lines(lines): 

1543 """Minimum indent for all lines in line list. 

1544 

1545 >>> lines = [" one", " two", " three"] 

1546 >>> _indentcount_lines(lines) 

1547 1 

1548 >>> lines = [] 

1549 >>> _indentcount_lines(lines) 

1550 0 

1551 >>> lines = [" one"] 

1552 >>> _indentcount_lines(lines) 

1553 1 

1554 >>> _indentcount_lines([" "]) 

1555 0 

1556 

1557 """ 

1558 indentno = sys.maxsize 

1559 for line in lines: 

1560 stripped = line.lstrip() 

1561 if stripped: 

1562 indentno = min(indentno, len(line) - len(stripped)) 

1563 if indentno == sys.maxsize: 1563 ↛ 1564line 1563 didn't jump to line 1564 because the condition on line 1563 was never true

1564 return 0 

1565 return indentno 

1566 

1567 

1568def fill_doc(f): 

1569 """Fill a docstring with docdict entries. 

1570 

1571 Parameters 

1572 ---------- 

1573 f : callable 

1574 The function to fill the docstring of. Will be modified in place. 

1575 

1576 Returns 

1577 ------- 

1578 f : callable 

1579 The function, potentially with an updated `__doc__`. 

1580 

1581 """ 

1582 docstring = f.__doc__ 

1583 if not docstring: 1583 ↛ 1584line 1583 didn't jump to line 1584 because the condition on line 1583 was never true

1584 return f 

1585 lines = docstring.splitlines() 

1586 # Find the minimum indent of the main docstring, after first line 

1587 icount = 0 if len(lines) < 2 else _indentcount_lines(lines[1:]) 

1588 # Insert this indent to dictionary docstrings 

1589 try: 

1590 indented = docdict_indented[icount] 

1591 except KeyError: 

1592 indent = " " * icount 

1593 docdict_indented[icount] = indented = {} 

1594 for name, dstr in docdict.items(): 

1595 lines = dstr.splitlines() 

1596 try: 

1597 newlines = [lines[0]] + [indent + line for line in lines[1:]] 

1598 indented[name] = "\n".join(newlines) 

1599 except IndexError: 

1600 indented[name] = dstr 

1601 try: 

1602 f.__doc__ = docstring % indented 

1603 except (TypeError, ValueError, KeyError) as exp: 

1604 funcname = f.__name__ 

1605 funcname = docstring.split("\n")[0] if funcname is None else funcname 

1606 raise RuntimeError(f"Error documenting {funcname}:\n{exp!s}") 

1607 return f