Coverage for nilearn/datasets/tests/test_testing.py: 0%
142 statements
« prev ^ index » next coverage.py v7.9.1, created at 2025-06-16 12:32 +0200
« prev ^ index » next coverage.py v7.9.1, created at 2025-06-16 12:32 +0200
1import re
2import tarfile
3import zipfile
4from pathlib import Path
6import pytest
7import requests
9from nilearn import image
10from nilearn._utils.data_gen import generate_fake_fmri
11from nilearn.datasets.tests import _testing
14def test_sender_key_order(request_mocker):
15 request_mocker.url_mapping["*message.txt"] = "message"
16 resp = requests.get("https://example.org/message.txt")
18 assert resp.text == "message"
20 request_mocker.url_mapping["*.txt"] = "new message"
21 resp = requests.get("https://example.org/message.txt")
23 assert resp.text == "new message"
25 request_mocker.url_mapping["*.csv"] = "other message"
27 resp = requests.get("https://example.org/message.txt")
29 assert resp.text == "new message"
32def test_loading_from_archive_contents(tmp_path):
33 expected_contents = sorted(
34 [
35 Path("README.txt"),
36 Path("data"),
37 Path("data", "img.nii.gz"),
38 Path("data", "labels.csv"),
39 ]
40 )
41 resp = requests.get("https://example.org/example_zip")
42 file_path = tmp_path / "archive.zip"
43 file_path.write_bytes(resp.content)
44 zip_extract_dir = tmp_path / "extract_zip"
45 zip_extract_dir.mkdir()
47 with zipfile.ZipFile(str(file_path)) as zipf:
48 assert sorted(map(Path, zipf.namelist())) == expected_contents
49 zipf.extractall(str(zip_extract_dir))
51 labels_file = zip_extract_dir / "data" / "labels.csv"
53 assert labels_file.read_bytes() == b""
55 for url_end in ["_default_format", "_tar_gz"]:
56 resp = requests.get(f"https://example.org/example{url_end}")
57 file_path = tmp_path / "archive.tar.gz"
58 file_path.write_bytes(resp.content)
59 tar_extract_dir = tmp_path / f"extract_tar{url_end}"
60 tar_extract_dir.mkdir()
62 with tarfile.open(str(file_path)) as tarf:
63 assert sorted(map(Path, tarf.getnames())) == [
64 Path(),
65 *expected_contents,
66 ]
67 tarf.extractall(str(tar_extract_dir))
69 labels_file = tar_extract_dir / "data" / "labels.csv"
71 assert labels_file.read_bytes() == b""
74def test_sender_regex(request_mocker):
75 url = "https://example.org/info?key=value&name=nilearn"
76 pattern = re.compile(
77 r".*example.org/(?P<section>.*)\?.*name=(?P<name>[^&]+)"
78 )
79 request_mocker.url_mapping[pattern] = r"in \g<section>: hello \2"
80 resp = requests.get(url)
82 assert resp.text == "in info: hello nilearn"
84 def f(match, request):
85 return f"name: {match.group('name')}, url: {request.url}"
87 request_mocker.url_mapping[pattern] = f
88 resp = requests.get(url)
90 assert resp.text == f"name: nilearn, url: {url}"
92 def g(match, request): # noqa: ARG001
93 return 403
95 request_mocker.url_mapping[pattern] = g
96 resp = requests.get(url)
97 with pytest.raises(requests.HTTPError, match="Error"):
98 resp.raise_for_status()
101def test_sender_status(request_mocker):
102 request_mocker.url_mapping["*good"] = 200
103 request_mocker.url_mapping["*forbidden"] = 403
104 resp = requests.get("https://example.org/good")
106 assert resp.status_code == 200
107 assert resp.text == "OK"
109 resp.raise_for_status()
110 resp = requests.get("https://example.org/forbidden")
112 assert resp.status_code == 403
113 assert resp.text == "ERROR"
114 with pytest.raises(requests.HTTPError, match="Error"):
115 resp.raise_for_status()
118class _MyError(Exception):
119 pass
122def test_sender_exception(request_mocker):
123 request_mocker.url_mapping["*bad"] = _MyError("abc")
124 with pytest.raises(_MyError, match="abc"):
125 requests.get("ftp:example.org/bad")
128def test_sender_img(request_mocker, tmp_path):
129 request_mocker.url_mapping["*"] = generate_fake_fmri()[0]
130 resp = requests.get("ftp:example.org/download")
131 file_path = tmp_path / "img.nii.gz"
132 file_path.write_bytes(resp.content)
133 img = image.load_img(str(file_path))
135 assert img.shape == (10, 11, 12, 17)
138class _MyResponse(_testing.Response):
139 def json(self):
140 return '{"count": 1}'
143def test_sender_response(request_mocker):
144 request_mocker.url_mapping["*example.org/a"] = _MyResponse("", "")
146 def f(match, request): # noqa: ARG001
147 resp = _testing.Response(b"hello", request.url)
148 resp.headers["cookie"] = "abc"
149 return resp
151 request_mocker.url_mapping["*example.org/b"] = f
152 resp = requests.get("https://example.org/a")
154 assert resp.json() == '{"count": 1}'
156 resp = requests.get("https://example.org/b")
158 assert resp.headers["cookie"] == "abc"
161def test_sender_path(request_mocker, tmp_path):
162 file_path = tmp_path / "readme.txt"
163 with file_path.open("w") as f:
164 f.write("hello")
165 request_mocker.url_mapping["*path"] = str(file_path)
166 request_mocker.url_mapping["*content"] = file_path
168 resp = requests.get("https://example.org/path")
170 assert resp.text == str(file_path)
172 resp = requests.get("https://example.org/content")
174 assert resp.text == "hello"
177def test_sender_bad_input(request_mocker):
178 request_mocker.url_mapping["*"] = 2.5
179 with pytest.raises(TypeError):
180 requests.get("https://example.org")
183def test_dict_to_archive(tmp_path):
184 subdir = tmp_path / "tmp"
185 subdir.mkdir()
186 (subdir / "labels.csv").touch()
187 (subdir / "img.nii.gz").touch()
188 archive_spec = {
189 "empty_data": subdir,
190 "empty_data_path.txt": str(subdir),
191 Path("data", "labels.csv"): "a,b,c",
192 Path("data", "img.nii.gz"): generate_fake_fmri()[0],
193 Path("a", "b", "c"): (100).to_bytes(
194 length=1, byteorder="big", signed=False
195 ),
196 }
197 targz = _testing.dict_to_archive(archive_spec)
198 extract_dir = tmp_path / "extract"
199 extract_dir.mkdir()
200 archive_path = tmp_path / "archive"
201 with archive_path.open("wb") as f:
202 f.write(targz)
203 with tarfile.open(str(archive_path)) as tarf:
204 tarf.extractall(str(extract_dir))
205 img = image.load_img(str(extract_dir / "data" / "img.nii.gz"))
207 assert img.shape == (10, 11, 12, 17)
208 with (extract_dir / "a" / "b" / "c").open("rb") as f:
209 assert int.from_bytes(f.read(), byteorder="big", signed=False) == 100
210 with (extract_dir / "empty_data" / "labels.csv").open() as f:
211 assert f.read() == ""
213 zip_archive = _testing.dict_to_archive(
214 {"readme.txt": "hello", "archive": targz}, "zip"
215 )
216 with archive_path.open("wb") as f:
217 f.write(zip_archive)
219 with (
220 zipfile.ZipFile(str(archive_path)) as zipf,
221 zipf.open("archive", "r") as f,
222 ):
223 assert f.read() == targz
225 from_list = _testing.list_to_archive(archive_spec.keys())
226 with archive_path.open("wb") as f:
227 f.write(from_list)
229 with tarfile.open(str(archive_path)) as tarf:
230 assert sorted(map(Path, tarf.getnames())) == sorted(
231 [
232 *list(map(Path, archive_spec.keys())),
233 Path(),
234 Path("a"),
235 Path("a", "b"),
236 Path("data"),
237 ]
238 )