Coverage for nilearn/interfaces/tests/test_bids.py: 0%
278 statements
« prev ^ index » next coverage.py v7.9.1, created at 2025-06-20 10:58 +0200
« prev ^ index » next coverage.py v7.9.1, created at 2025-06-20 10:58 +0200
1"""Tests for the nilearn.interfaces.bids submodule."""
3import json
4from pathlib import Path
6import numpy as np
7import pandas as pd
8import pytest
10from nilearn._utils.data_gen import (
11 add_metadata_to_bids_dataset,
12 create_fake_bids_dataset,
13 generate_fake_fmri_data_and_design,
14)
15from nilearn._utils.helpers import is_matplotlib_installed
16from nilearn.glm.first_level import FirstLevelModel, first_level_from_bids
17from nilearn.glm.second_level import SecondLevelModel
18from nilearn.interfaces.bids import (
19 get_bids_files,
20 parse_bids_filename,
21 save_glm_to_bids,
22)
23from nilearn.interfaces.bids.query import (
24 _get_metadata_from_bids,
25 infer_repetition_time_from_dataset,
26 infer_slice_timing_start_time_from_dataset,
27)
28from nilearn.maskers import NiftiMasker
31def test_get_metadata_from_bids(tmp_path):
32 """Ensure that metadata is correctly extracted from BIDS JSON files.
34 Throw a warning when the field is not found.
35 Throw a warning when there is no JSON file.
36 """
37 json_file = tmp_path / "sub-01_task-main_bold.json"
38 json_files = [json_file]
40 with json_file.open("w") as f:
41 json.dump({"RepetitionTime": 2.0}, f)
42 value = _get_metadata_from_bids(
43 field="RepetitionTime", json_files=json_files
44 )
45 assert value == 2.0
47 with json_file.open("w") as f:
48 json.dump({"foo": 2.0}, f)
49 with pytest.warns(UserWarning, match="'RepetitionTime' not found"):
50 value = _get_metadata_from_bids(
51 field="RepetitionTime", json_files=json_files
52 )
54 json_files = []
55 with pytest.warns(UserWarning, match="No .*json found in BIDS"):
56 value = _get_metadata_from_bids(
57 field="RepetitionTime", json_files=json_files
58 )
59 assert value is None
62def test_infer_repetition_time_from_dataset(tmp_path):
63 """Test inferring repetition time from the BIDS dataset.
65 When using create_fake_bids_dataset the value is 1.5 secs by default
66 in the raw dataset.
67 When using add_metadata_to_bids_dataset the value is 2.0 secs.
68 """
69 bids_path = create_fake_bids_dataset(
70 base_dir=tmp_path, n_sub=1, n_ses=1, tasks=["main"], n_runs=[1]
71 )
73 t_r = infer_repetition_time_from_dataset(
74 bids_path=tmp_path / bids_path, filters=[("task", "main")]
75 )
77 expected_t_r = 1.5
78 assert t_r == expected_t_r
80 expected_t_r = 2.0
81 add_metadata_to_bids_dataset(
82 bids_path=tmp_path / bids_path,
83 metadata={"RepetitionTime": expected_t_r},
84 )
86 t_r = infer_repetition_time_from_dataset(
87 bids_path=tmp_path / bids_path / "derivatives",
88 filters=[("task", "main"), ("run", "01")],
89 )
91 assert t_r == expected_t_r
94def test_infer_slice_timing_start_time_from_dataset(tmp_path):
95 """Test inferring slice timing start time from the BIDS dataset.
97 create_fake_bids_dataset does not add slice timing information
98 by default so the value returned will be None.
100 If the metadata is added to the BIDS dataset,
101 then this value should be returned.
102 """
103 bids_path = create_fake_bids_dataset(
104 base_dir=tmp_path, n_sub=1, n_ses=1, tasks=["main"], n_runs=[1]
105 )
107 StartTime = infer_slice_timing_start_time_from_dataset(
108 bids_path=tmp_path / bids_path / "derivatives",
109 filters=[("task", "main")],
110 )
112 expected_StartTime = None
113 assert StartTime is expected_StartTime
115 expected_StartTime = 1.0
116 add_metadata_to_bids_dataset(
117 bids_path=tmp_path / bids_path,
118 metadata={"StartTime": expected_StartTime},
119 )
121 StartTime = infer_slice_timing_start_time_from_dataset(
122 bids_path=tmp_path / bids_path / "derivatives",
123 filters=[("task", "main")],
124 )
126 assert StartTime == expected_StartTime
129def _rm_all_json_files_from_bids_dataset(bids_path):
130 """Remove all json and make sure that get_bids_files does not find any."""
131 for x in bids_path.glob("**/*.json"):
132 x.unlink()
133 selection = get_bids_files(bids_path, file_type="json", sub_folder=True)
135 assert selection == []
137 selection = get_bids_files(bids_path, file_type="json", sub_folder=False)
139 assert selection == []
142def test_get_bids_files_inheritance_principle_root_folder(tmp_path):
143 """Check if json files are found if in root folder of a dataset.
145 see https://bids-specification.readthedocs.io/en/latest/common-principles.html#the-inheritance-principle
146 """
147 bids_path = create_fake_bids_dataset(
148 base_dir=tmp_path, n_sub=1, n_ses=1, tasks=["main"], n_runs=[1]
149 )
151 _rm_all_json_files_from_bids_dataset(bids_path)
153 # add json file to root of dataset
154 json_file = "task-main_bold.json"
155 json_file = add_metadata_to_bids_dataset(
156 bids_path=bids_path,
157 metadata={"RepetitionTime": 1.5},
158 json_file=json_file,
159 )
160 assert json_file.exists()
162 # make sure that get_bids_files finds the json file
163 # but only when looking in root of dataset
164 selection = get_bids_files(
165 bids_path,
166 file_tag="bold",
167 file_type="json",
168 filters=[("task", "main")],
169 sub_folder=True,
170 )
171 assert selection == []
173 selection = get_bids_files(
174 bids_path,
175 file_tag="bold",
176 file_type="json",
177 filters=[("task", "main")],
178 sub_folder=False,
179 )
181 assert selection != []
182 assert selection[0] == str(json_file)
185@pytest.mark.xfail(
186 reason=(
187 "get_bids_files does not find json files"
188 " that are directly in the subject folder of a dataset."
189 ),
190 strict=True,
191)
192@pytest.mark.parametrize(
193 "json_file",
194 [
195 "sub-01/sub-01_task-main_bold.json",
196 "sub-01/ses-01/sub-01_ses-01_task-main_bold.json",
197 ],
198)
199def test_get_bids_files_inheritance_principle_sub_folder(tmp_path, json_file):
200 """Check if json files are found if in subject or session folder.
202 see https://bids-specification.readthedocs.io/en/latest/common-principles.html#the-inheritance-principle
203 """
204 bids_path = create_fake_bids_dataset(
205 base_dir=tmp_path, n_sub=1, n_ses=1, tasks=["main"], n_runs=[1]
206 )
208 _rm_all_json_files_from_bids_dataset(bids_path)
210 new_json_file = add_metadata_to_bids_dataset(
211 bids_path=bids_path,
212 metadata={"RepetitionTime": 1.5},
213 json_file=json_file,
214 )
215 assert new_json_file.exists()
217 # make sure that get_bids_files finds the json file
218 # but only when NOT looking in root of dataset
219 selection = get_bids_files(
220 bids_path,
221 file_tag="bold",
222 file_type="json",
223 filters=[("task", "main")],
224 sub_folder=False,
225 )
226 assert selection == []
227 selection = get_bids_files(
228 bids_path,
229 file_tag="bold",
230 file_type="json",
231 filters=[("task", "main")],
232 sub_folder=True,
233 )
234 assert selection != []
235 assert selection[0] == str(new_json_file)
238@pytest.mark.parametrize(
239 "params, files_per_subject",
240 [
241 # files in total related to subject images.
242 # Top level files like README not included
243 ({}, 19),
244 # bold files expected. .nii and .json files
245 ({"file_tag": "bold"}, 12),
246 # files are nii.gz. Bold and T1w files.
247 ({"file_type": "nii.gz"}, 7),
248 # There are only n_sub files in anat folders. One T1w per subject.
249 ({"modality_folder": "anat"}, 1),
250 # files corresponding to run 1 of session 2 of main task.
251 # n_sub bold.nii.gz and n_sub bold.json files.
252 (
253 {
254 "file_tag": "bold",
255 "filters": [("task", "main"), ("run", "01"), ("ses", "02")],
256 },
257 2,
258 ),
259 ],
260)
261def test_get_bids_files(tmp_path, params, files_per_subject):
262 """Check proper number of files is returned.
264 For each possible option of file selection
265 we check that we recover the appropriate amount of files,
266 as included in the fake bids dataset.
267 """
268 n_sub = 2
270 bids_path = create_fake_bids_dataset(
271 base_dir=tmp_path,
272 n_sub=n_sub,
273 n_ses=2,
274 tasks=["localizer", "main"],
275 n_runs=[1, 2],
276 )
278 selection = get_bids_files(bids_path, **params)
280 assert len(selection) == files_per_subject * n_sub
282 # files correspond to subject 01
283 selection = get_bids_files(bids_path, sub_label="01")
285 assert len(selection) == 19
287 # Get Top level folder files. Only 1 in this case, the README file.
288 selection = get_bids_files(bids_path, sub_folder=False)
290 assert len(selection) == 1
293def test_get_bids_files_fmriprep(tmp_path):
294 """Check proper number of files is returned for fmriprep version."""
295 n_sub = 2
297 bids_path = create_fake_bids_dataset(
298 base_dir=tmp_path,
299 n_sub=n_sub,
300 n_ses=2,
301 tasks=["localizer", "main"],
302 n_runs=[1, 2],
303 confounds_tag="desc-confounds_timeseries",
304 )
306 # counfonds (4 runs per ses & sub), testing `fmriprep` >= 20.2 path
307 selection = get_bids_files(
308 bids_path / "derivatives",
309 file_tag="desc-confounds_timeseries",
310 )
311 assert len(selection) == 12 * n_sub
313 bids_path = create_fake_bids_dataset(
314 base_dir=tmp_path,
315 n_sub=n_sub,
316 n_ses=2,
317 tasks=["localizer", "main"],
318 n_runs=[1, 2],
319 confounds_tag="desc-confounds_regressors",
320 )
322 # counfonds (4 runs per ses & sub), testing `fmriprep` < 20.2 path
323 selection = get_bids_files(
324 bids_path / "derivatives",
325 file_tag="desc-confounds_regressors",
326 )
328 assert len(selection) == 12 * n_sub
331def test_get_bids_files_no_space_entity(tmp_path):
332 """Pass empty string for a label ignores files containing that label.
334 - remove space entity only from subject 01
335 - check that only files from the appropriate subject are returned
336 when passing ("space", "T1w") or ("space", "")
337 """
338 n_sub = 2
340 bids_path = create_fake_bids_dataset(
341 base_dir=tmp_path,
342 n_sub=n_sub,
343 n_ses=2,
344 tasks=["main"],
345 n_runs=[2],
346 )
348 for file in (bids_path / "derivatives" / "sub-01").glob(
349 "**/*_space-*.nii.gz"
350 ):
351 stem = [
352 entity
353 for entity in file.stem.split("_")
354 if not entity.startswith("space")
355 ]
356 file.replace(file.with_stem("_".join(stem)))
358 selection = get_bids_files(
359 bids_path / "derivatives",
360 file_tag="bold",
361 file_type="nii.gz",
362 filters=[("space", "T1w")],
363 )
365 assert selection
366 assert all("sub-01" not in file for file in selection)
368 selection = get_bids_files(
369 bids_path / "derivatives",
370 file_tag="bold",
371 file_type="nii.gz",
372 filters=[("space", "")],
373 )
375 assert selection
376 assert all("sub-02" not in file for file in selection)
379def test_parse_bids_filename():
380 """Check that a typical BIDS file is properly parsed."""
381 fields = ["sub", "ses", "task", "lolo"]
382 labels = ["01", "01", "langloc", "lala"]
383 file_name = "sub-01_ses-01_task-langloc_lolo-lala_bold.nii.gz"
385 file_path = Path("dataset", "sub-01", "ses-01", "func", file_name)
387 with pytest.deprecated_call(
388 match="a dictionary that uses BIDS terms as keys"
389 ):
390 file_dict = parse_bids_filename(file_path, legacy=True)
392 for fidx, field in enumerate(fields):
393 assert file_dict[field] == labels[fidx]
394 assert file_dict["file_type"] == "nii.gz"
395 assert file_dict["file_tag"] == "bold"
396 assert file_dict["file_path"] == file_path
397 assert file_dict["file_basename"] == file_name
398 assert file_dict["file_fields"] == fields
400 file_dict = parse_bids_filename(file_path, legacy=False)
401 assert file_dict["extension"] == "nii.gz"
402 assert file_dict["suffix"] == "bold"
403 assert file_dict["file_path"] == file_path
404 assert file_dict["file_basename"] == file_name
405 entities = {field: labels[fidx] for fidx, field in enumerate(fields)}
406 assert file_dict["entities"] == entities
409@pytest.mark.timeout(0)
410@pytest.mark.parametrize(
411 "prefix", ["sub-01_ses-01_task-nback", "sub-01_task-nback", "task-nback"]
412)
413def test_save_glm_to_bids(tmp_path_factory, prefix):
414 """Test that save_glm_to_bids saves the appropriate files.
416 This test reuses code from
417 nilearn.glm.tests.test_first_level.test_high_level_glm_one_session.
418 """
419 tmpdir = tmp_path_factory.mktemp("test_save_glm_results")
421 EXPECTED_FILENAMES = [
422 "contrast-effectsOfInterest_stat-F_statmap.nii.gz",
423 "contrast-effectsOfInterest_stat-effect_statmap.nii.gz",
424 "contrast-effectsOfInterest_stat-p_statmap.nii.gz",
425 "contrast-effectsOfInterest_stat-variance_statmap.nii.gz",
426 "contrast-effectsOfInterest_stat-z_statmap.nii.gz",
427 "contrast-effectsOfInterest_clusters.tsv",
428 "contrast-effectsOfInterest_clusters.json",
429 "design.tsv",
430 "design.json",
431 "stat-errorts_statmap.nii.gz",
432 "stat-rsquared_statmap.nii.gz",
433 "statmap.json",
434 "mask.nii.gz",
435 "report.html",
436 ]
438 if is_matplotlib_installed():
439 EXPECTED_FILENAMES.extend(
440 [
441 "design.png",
442 "contrast-effectsOfInterest_design.png",
443 ]
444 )
446 shapes, rk = [(7, 8, 9, 15)], 3
447 _, fmri_data, design_matrices = generate_fake_fmri_data_and_design(
448 shapes,
449 rk,
450 )
452 single_run_model = FirstLevelModel(
453 mask_img=None,
454 minimize_memory=False,
455 ).fit(fmri_data[0], design_matrices=design_matrices[0])
457 contrasts = {"effects of interest": np.eye(rk)}
458 contrast_types = {"effects of interest": "F"}
459 save_glm_to_bids(
460 model=single_run_model,
461 contrasts=contrasts,
462 contrast_types=contrast_types,
463 out_dir=tmpdir,
464 prefix=prefix,
465 )
467 assert (tmpdir / "dataset_description.json").exists()
469 sub_prefix = prefix.split("_")[0] if prefix.startswith("sub-") else ""
471 for fname in EXPECTED_FILENAMES:
472 assert (tmpdir / sub_prefix / f"{prefix}_{fname}").exists()
475@pytest.mark.timeout(0)
476def test_save_glm_to_bids_serialize_affine(tmp_path):
477 """Test that affines are turned into a serializable type.
479 Regression test for https://github.com/nilearn/nilearn/issues/4324.
480 """
481 shapes, rk = [(7, 8, 9, 15)], 3
482 mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design(
483 shapes,
484 rk,
485 )
487 target_affine = mask.affine
489 single_run_model = FirstLevelModel(
490 target_affine=target_affine,
491 minimize_memory=False,
492 ).fit(
493 fmri_data[0],
494 design_matrices=design_matrices[0],
495 )
497 save_glm_to_bids(
498 model=single_run_model,
499 contrasts={"effects of interest": np.eye(rk)},
500 contrast_types={"effects of interest": "F"},
501 out_dir=tmp_path,
502 prefix="sub-01_ses-01_task-nback",
503 )
506@pytest.fixture
507def n_cols_design_matrix():
508 """Return expected number of column in design matrix."""
509 return 3
512@pytest.fixture
513def two_runs_model(n_cols_design_matrix):
514 """Create two runs of data."""
515 shapes, rk = [(7, 8, 9, 10), (7, 8, 9, 10)], n_cols_design_matrix
516 mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design(
517 shapes,
518 rk,
519 )
520 # Rename two conditions in design matrices
521 mapper = {
522 design_matrices[0].columns[0]: "AAA",
523 design_matrices[0].columns[1]: "BBB",
524 }
525 design_matrices[0] = design_matrices[0].rename(columns=mapper)
526 mapper = {
527 design_matrices[1].columns[0]: "AAA",
528 design_matrices[1].columns[1]: "BBB",
529 }
530 design_matrices[1] = design_matrices[1].rename(columns=mapper)
532 masker = NiftiMasker(mask)
533 masker.fit()
535 return FirstLevelModel(mask_img=None, minimize_memory=False).fit(
536 fmri_data, design_matrices=design_matrices
537 )
540def test_save_glm_to_bids_errors(
541 tmp_path_factory, two_runs_model, n_cols_design_matrix
542):
543 """Test errors of save_glm_to_bids."""
544 tmpdir = tmp_path_factory.mktemp("test_save_glm_to_bids_errors")
546 # Contrast names must be strings
547 contrasts = {5: np.eye(n_cols_design_matrix)}
548 with pytest.raises(ValueError, match="contrast names must be strings"):
549 save_glm_to_bids(
550 model=two_runs_model,
551 contrasts=contrasts,
552 out_dir=tmpdir,
553 prefix="sub-01",
554 )
556 # Contrast definitions must be strings, numpy arrays, or lists
557 contrasts = {"effects of interest": 5}
558 with pytest.raises(
559 ValueError, match="contrast definitions must be strings or array_likes"
560 ):
561 save_glm_to_bids(
562 model=two_runs_model,
563 contrasts=contrasts,
564 out_dir=tmpdir,
565 prefix="sub-01",
566 )
568 with pytest.raises(
569 ValueError, match="Extra key-word arguments must be one of"
570 ):
571 save_glm_to_bids(
572 model=two_runs_model,
573 contrasts=["AAA - BBB"],
574 out_dir=tmpdir,
575 prefix="sub-01",
576 foo="bar",
577 )
580@pytest.mark.timeout(0)
581@pytest.mark.parametrize(
582 "prefix", ["sub-01_ses-01_task-nback", "sub-01_task-nback_", 1]
583)
584@pytest.mark.parametrize("contrasts", [["AAA - BBB"], "AAA - BBB"])
585def test_save_glm_to_bids_contrast_definitions(
586 tmp_path_factory, two_runs_model, contrasts, prefix
587):
588 """Test that save_glm_to_bids operates on different contrast definitions \
589 as expected.
591 - Test string-based contrasts and undefined contrast types
593 This test reuses code from
594 nilearn.glm.tests.test_first_level.test_high_level_glm_one_session.
595 """
596 tmpdir = tmp_path_factory.mktemp(
597 "test_save_glm_to_bids_contrast_definitions"
598 )
600 EXPECTED_FILENAME_ENDINGS = [
601 "contrast-aaaMinusBbb_stat-effect_statmap.nii.gz",
602 "contrast-aaaMinusBbb_stat-p_statmap.nii.gz",
603 "contrast-aaaMinusBbb_stat-t_statmap.nii.gz",
604 "contrast-aaaMinusBbb_stat-variance_statmap.nii.gz",
605 "contrast-aaaMinusBbb_stat-z_statmap.nii.gz",
606 "contrast-aaaMinusBbb_clusters.tsv",
607 "contrast-aaaMinusBbb_clusters.json",
608 "run-1_design.tsv",
609 "run-1_design.json",
610 "run-1_stat-errorts_statmap.nii.gz",
611 "run-1_stat-rsquared_statmap.nii.gz",
612 "run-2_design.tsv",
613 "run-2_design.json",
614 "run-2_stat-errorts_statmap.nii.gz",
615 "run-2_stat-rsquared_statmap.nii.gz",
616 "statmap.json",
617 "mask.nii.gz",
618 "report.html",
619 ]
620 if is_matplotlib_installed():
621 EXPECTED_FILENAME_ENDINGS.extend(
622 [
623 "run-1_contrast-aaaMinusBbb_design.png",
624 "run-1_design.png",
625 "run-2_contrast-aaaMinusBbb_design.png",
626 "run-2_design.png",
627 ]
628 )
630 save_glm_to_bids(
631 model=two_runs_model,
632 contrasts=contrasts,
633 contrast_types=None,
634 out_dir=tmpdir,
635 prefix=prefix,
636 )
638 assert (tmpdir / "dataset_description.json").exists()
640 if not isinstance(prefix, str):
641 prefix = ""
643 if prefix and not prefix.endswith("_"):
644 prefix = f"{prefix}_"
646 sub_prefix = prefix.split("_")[0] if prefix.startswith("sub-") else ""
648 for fname in EXPECTED_FILENAME_ENDINGS:
649 assert (tmpdir / sub_prefix / f"{prefix}{fname}").exists()
652@pytest.mark.timeout(0)
653@pytest.mark.parametrize("prefix", ["task-nback"])
654def test_save_glm_to_bids_second_level(tmp_path_factory, prefix):
655 """Test save_glm_to_bids on a SecondLevelModel.
657 This test reuses code from
658 nilearn.glm.tests.test_second_level.test_high_level_glm_with_paths.
659 """
660 tmpdir = tmp_path_factory.mktemp("test_save_glm_to_bids_second_level")
662 EXPECTED_FILENAMES = [
663 "contrast-effectsOfInterest_stat-F_statmap.nii.gz",
664 "contrast-effectsOfInterest_stat-effect_statmap.nii.gz",
665 "contrast-effectsOfInterest_stat-p_statmap.nii.gz",
666 "contrast-effectsOfInterest_stat-variance_statmap.nii.gz",
667 "contrast-effectsOfInterest_stat-z_statmap.nii.gz",
668 "contrast-effectsOfInterest_clusters.tsv",
669 "contrast-effectsOfInterest_clusters.json",
670 "design.tsv",
671 "stat-errorts_statmap.nii.gz",
672 "stat-rsquared_statmap.nii.gz",
673 "statmap.json",
674 "mask.nii.gz",
675 "report.html",
676 ]
677 if is_matplotlib_installed():
678 EXPECTED_FILENAMES.extend(
679 [
680 "design.png",
681 "contrast-effectsOfInterest_design.png",
682 ]
683 )
685 shapes = ((3, 3, 3, 1),)
686 rk = 3
687 mask, fmri_data, _ = generate_fake_fmri_data_and_design(
688 shapes,
689 rk,
690 )
691 fmri_data = fmri_data[0]
693 # Ordinary Least Squares case
694 model = SecondLevelModel(mask_img=mask, minimize_memory=False)
696 # fit model
697 Y = [fmri_data] * 2
698 X = pd.DataFrame([[1]] * 2, columns=["intercept"])
699 model = model.fit(Y, design_matrix=X)
701 contrasts = {
702 "effects of interest": np.eye(len(model.design_matrix_.columns))[0],
703 }
704 contrast_types = {"effects of interest": "F"}
706 save_glm_to_bids(
707 model=model,
708 contrasts=contrasts,
709 contrast_types=contrast_types,
710 out_dir=tmpdir,
711 prefix=prefix,
712 )
714 assert (tmpdir / "dataset_description.json").exists()
716 for fname in EXPECTED_FILENAMES:
717 assert (tmpdir / "group" / f"{prefix}_{fname}").exists()
720@pytest.mark.timeout(0)
721def test_save_glm_to_bids_glm_report_no_contrast(two_runs_model, tmp_path):
722 """Run generate_report with no contrasts after save_glm_to_bids.
724 generate_report tries to rely on some of the generated output,
725 if no contrasts are requested to generate_report
726 then it will rely on the content of the model.
728 report should contain the proper contrast and not filenames and not bytes
729 """
730 contrasts = {"BBB-AAA": "BBB-AAA"}
731 contrast_types = {"BBB-AAA": "t"}
732 model = save_glm_to_bids(
733 model=two_runs_model,
734 contrasts=contrasts,
735 contrast_types=contrast_types,
736 out_dir=tmp_path,
737 )
739 assert model._reporting_data.get("filenames", None) is not None
741 EXPECTED_FILENAMES = [
742 "run-1_design.png",
743 "run-1_corrdesign.png",
744 "run-1_contrast-bbbMinusAaa_design.png",
745 ]
747 with (tmp_path / "report.html").open("r") as f:
748 content = f.read()
749 assert "BBB-AAA" in content
750 for file in EXPECTED_FILENAMES:
751 assert file in content
753 report = model.generate_report()
755 assert "BBB-AAA" in content
756 for file in EXPECTED_FILENAMES:
757 assert file in report.__str__()
760@pytest.mark.timeout(0)
761def test_save_glm_to_bids_glm_report_new_contrast(two_runs_model, tmp_path):
762 """Run generate_report after save_glm_to_bids with different contrasts.
764 generate_report tries to rely on some of the generated output,
765 but if different contrasts are requested
766 then it will have to do some extra contrast computation.
767 """
768 contrasts = {"BBB-AAA": "BBB-AAA"}
769 contrast_types = {"BBB-AAA": "t"}
770 model = save_glm_to_bids(
771 model=two_runs_model,
772 contrasts=contrasts,
773 contrast_types=contrast_types,
774 out_dir=tmp_path,
775 )
777 EXPECTED_FILENAMES = [
778 "run-1_design.png",
779 "run-1_corrdesign.png",
780 "run-1_contrast-bbbMinusAaa_design.png",
781 ]
783 # check content of a new report
784 report = model.generate_report(contrasts=["AAA-BBB"])
786 assert "AAA-BBB" in report.__str__()
787 assert "BBB-AAA" not in report.__str__()
788 for file in EXPECTED_FILENAMES:
789 assert file not in report.__str__()
792@pytest.mark.timeout(0)
793def test_save_glm_to_bids_infer_filenames(tmp_path):
794 """Check that output filenames can be inferred from BIDS input."""
795 n_sub = 1
797 bids_path = create_fake_bids_dataset(
798 base_dir=tmp_path,
799 n_sub=n_sub,
800 n_ses=2,
801 tasks=["main"],
802 n_runs=[2],
803 n_voxels=20,
804 )
806 models, imgs, events, _ = first_level_from_bids(
807 dataset_path=bids_path,
808 task_label="main",
809 space_label="MNI",
810 img_filters=[("desc", "preproc")],
811 slice_time_ref=0.0, # set to 0.0 to avoid warnings
812 )
814 model = models[0]
815 run_imgs = imgs[0]
816 events = events[0]
818 model.minimize_memory = False
819 model.fit(run_imgs=run_imgs, events=events)
821 # 2 sessions with 2 runs each
822 assert len(model._reporting_data["run_imgs"]) == 4
824 model = save_glm_to_bids(
825 model=model, out_dir=tmp_path / "output", contrasts=["c0"]
826 )
828 EXPECTED_FILENAME_ENDINGS = [
829 "sub-01_task-main_space-MNI_contrast-c0_stat-z_statmap.nii.gz",
830 "sub-01_task-main_space-MNI_contrast-c0_clusters.tsv",
831 "sub-01_task-main_space-MNI_contrast-c0_clusters.json",
832 "sub-01_ses-01_task-main_run-01_space-MNI_stat-rsquared_statmap.nii.gz",
833 "sub-01_ses-02_task-main_run-02_space-MNI_design.tsv",
834 "sub-01_ses-01_task-main_run-02_space-MNI_design.json",
835 # mask is common to all sessions and runs
836 "sub-01_task-main_space-MNI_mask.nii.gz",
837 ]
838 if is_matplotlib_installed():
839 EXPECTED_FILENAME_ENDINGS.extend(
840 [
841 "sub-01_ses-02_task-main_run-01_space-MNI_design.png",
842 "sub-01_ses-02_task-main_run-01_space-MNI_corrdesign.png",
843 "sub-01_ses-01_task-main_run-02_space-MNI_contrast-c0_design.png",
844 ]
845 )
847 for fname in EXPECTED_FILENAME_ENDINGS:
848 assert (tmp_path / "output" / "sub-01" / fname).exists()
850 with (
851 tmp_path
852 / "output"
853 / "sub-01"
854 / "sub-01_task-main_space-MNI_contrast-c0_clusters.json"
855 ).open("r") as f:
856 metadata = json.load(f)
858 for key in [
859 "Height control",
860 "Threshold (computed)",
861 "Cluster size threshold (voxels)",
862 "Minimum distance (mm)",
863 ]:
864 assert key in metadata
867@pytest.mark.timeout(0)
868@pytest.mark.parametrize("prefix", ["", "sub-01", "foo_"])
869def test_save_glm_to_bids_infer_filenames_override(tmp_path, prefix):
870 """Check that output filenames is not inferred when prefix is passed."""
871 n_sub = 1
873 bids_path = create_fake_bids_dataset(
874 base_dir=tmp_path,
875 n_sub=n_sub,
876 n_ses=1,
877 tasks=["main"],
878 n_runs=[1],
879 n_voxels=20,
880 )
882 models, imgs, events, _ = first_level_from_bids(
883 dataset_path=bids_path,
884 task_label="main",
885 space_label="MNI",
886 img_filters=[("desc", "preproc")],
887 slice_time_ref=0.0, # set to 0.0 to avoid warnings
888 )
890 model = models[0]
891 run_imgs = imgs[0]
892 events = events[0]
894 model.minimize_memory = False
895 model.fit(run_imgs=run_imgs, events=events)
897 model = save_glm_to_bids(
898 model=model,
899 out_dir=tmp_path / "output",
900 contrasts=["c0"],
901 prefix=prefix,
902 )
904 EXPECTED_FILENAME_ENDINGS = [
905 "mask.nii.gz",
906 "contrast-c0_stat-z_statmap.nii.gz",
907 "contrast-c0_clusters.tsv",
908 "contrast-c0_clusters.json",
909 "stat-rsquared_statmap.nii.gz",
910 "design.tsv",
911 "design.json",
912 ]
914 if prefix != "" and not prefix.endswith("_"):
915 prefix += "_"
917 sub_prefix = prefix.split("_")[0] if prefix.startswith("sub-") else ""
919 for fname in EXPECTED_FILENAME_ENDINGS:
920 assert (tmp_path / "output" / sub_prefix / f"{prefix}{fname}").exists()