Coverage for nilearn/datasets/tests/test_testing.py: 0%

142 statements  

« prev     ^ index     » next       coverage.py v7.9.1, created at 2025-06-16 12:32 +0200

1import re 

2import tarfile 

3import zipfile 

4from pathlib import Path 

5 

6import pytest 

7import requests 

8 

9from nilearn import image 

10from nilearn._utils.data_gen import generate_fake_fmri 

11from nilearn.datasets.tests import _testing 

12 

13 

14def test_sender_key_order(request_mocker): 

15 request_mocker.url_mapping["*message.txt"] = "message" 

16 resp = requests.get("https://example.org/message.txt") 

17 

18 assert resp.text == "message" 

19 

20 request_mocker.url_mapping["*.txt"] = "new message" 

21 resp = requests.get("https://example.org/message.txt") 

22 

23 assert resp.text == "new message" 

24 

25 request_mocker.url_mapping["*.csv"] = "other message" 

26 

27 resp = requests.get("https://example.org/message.txt") 

28 

29 assert resp.text == "new message" 

30 

31 

32def test_loading_from_archive_contents(tmp_path): 

33 expected_contents = sorted( 

34 [ 

35 Path("README.txt"), 

36 Path("data"), 

37 Path("data", "img.nii.gz"), 

38 Path("data", "labels.csv"), 

39 ] 

40 ) 

41 resp = requests.get("https://example.org/example_zip") 

42 file_path = tmp_path / "archive.zip" 

43 file_path.write_bytes(resp.content) 

44 zip_extract_dir = tmp_path / "extract_zip" 

45 zip_extract_dir.mkdir() 

46 

47 with zipfile.ZipFile(str(file_path)) as zipf: 

48 assert sorted(map(Path, zipf.namelist())) == expected_contents 

49 zipf.extractall(str(zip_extract_dir)) 

50 

51 labels_file = zip_extract_dir / "data" / "labels.csv" 

52 

53 assert labels_file.read_bytes() == b"" 

54 

55 for url_end in ["_default_format", "_tar_gz"]: 

56 resp = requests.get(f"https://example.org/example{url_end}") 

57 file_path = tmp_path / "archive.tar.gz" 

58 file_path.write_bytes(resp.content) 

59 tar_extract_dir = tmp_path / f"extract_tar{url_end}" 

60 tar_extract_dir.mkdir() 

61 

62 with tarfile.open(str(file_path)) as tarf: 

63 assert sorted(map(Path, tarf.getnames())) == [ 

64 Path(), 

65 *expected_contents, 

66 ] 

67 tarf.extractall(str(tar_extract_dir)) 

68 

69 labels_file = tar_extract_dir / "data" / "labels.csv" 

70 

71 assert labels_file.read_bytes() == b"" 

72 

73 

74def test_sender_regex(request_mocker): 

75 url = "https://example.org/info?key=value&name=nilearn" 

76 pattern = re.compile( 

77 r".*example.org/(?P<section>.*)\?.*name=(?P<name>[^&]+)" 

78 ) 

79 request_mocker.url_mapping[pattern] = r"in \g<section>: hello \2" 

80 resp = requests.get(url) 

81 

82 assert resp.text == "in info: hello nilearn" 

83 

84 def f(match, request): 

85 return f"name: {match.group('name')}, url: {request.url}" 

86 

87 request_mocker.url_mapping[pattern] = f 

88 resp = requests.get(url) 

89 

90 assert resp.text == f"name: nilearn, url: {url}" 

91 

92 def g(match, request): # noqa: ARG001 

93 return 403 

94 

95 request_mocker.url_mapping[pattern] = g 

96 resp = requests.get(url) 

97 with pytest.raises(requests.HTTPError, match="Error"): 

98 resp.raise_for_status() 

99 

100 

101def test_sender_status(request_mocker): 

102 request_mocker.url_mapping["*good"] = 200 

103 request_mocker.url_mapping["*forbidden"] = 403 

104 resp = requests.get("https://example.org/good") 

105 

106 assert resp.status_code == 200 

107 assert resp.text == "OK" 

108 

109 resp.raise_for_status() 

110 resp = requests.get("https://example.org/forbidden") 

111 

112 assert resp.status_code == 403 

113 assert resp.text == "ERROR" 

114 with pytest.raises(requests.HTTPError, match="Error"): 

115 resp.raise_for_status() 

116 

117 

118class _MyError(Exception): 

119 pass 

120 

121 

122def test_sender_exception(request_mocker): 

123 request_mocker.url_mapping["*bad"] = _MyError("abc") 

124 with pytest.raises(_MyError, match="abc"): 

125 requests.get("ftp:example.org/bad") 

126 

127 

128def test_sender_img(request_mocker, tmp_path): 

129 request_mocker.url_mapping["*"] = generate_fake_fmri()[0] 

130 resp = requests.get("ftp:example.org/download") 

131 file_path = tmp_path / "img.nii.gz" 

132 file_path.write_bytes(resp.content) 

133 img = image.load_img(str(file_path)) 

134 

135 assert img.shape == (10, 11, 12, 17) 

136 

137 

138class _MyResponse(_testing.Response): 

139 def json(self): 

140 return '{"count": 1}' 

141 

142 

143def test_sender_response(request_mocker): 

144 request_mocker.url_mapping["*example.org/a"] = _MyResponse("", "") 

145 

146 def f(match, request): # noqa: ARG001 

147 resp = _testing.Response(b"hello", request.url) 

148 resp.headers["cookie"] = "abc" 

149 return resp 

150 

151 request_mocker.url_mapping["*example.org/b"] = f 

152 resp = requests.get("https://example.org/a") 

153 

154 assert resp.json() == '{"count": 1}' 

155 

156 resp = requests.get("https://example.org/b") 

157 

158 assert resp.headers["cookie"] == "abc" 

159 

160 

161def test_sender_path(request_mocker, tmp_path): 

162 file_path = tmp_path / "readme.txt" 

163 with file_path.open("w") as f: 

164 f.write("hello") 

165 request_mocker.url_mapping["*path"] = str(file_path) 

166 request_mocker.url_mapping["*content"] = file_path 

167 

168 resp = requests.get("https://example.org/path") 

169 

170 assert resp.text == str(file_path) 

171 

172 resp = requests.get("https://example.org/content") 

173 

174 assert resp.text == "hello" 

175 

176 

177def test_sender_bad_input(request_mocker): 

178 request_mocker.url_mapping["*"] = 2.5 

179 with pytest.raises(TypeError): 

180 requests.get("https://example.org") 

181 

182 

183def test_dict_to_archive(tmp_path): 

184 subdir = tmp_path / "tmp" 

185 subdir.mkdir() 

186 (subdir / "labels.csv").touch() 

187 (subdir / "img.nii.gz").touch() 

188 archive_spec = { 

189 "empty_data": subdir, 

190 "empty_data_path.txt": str(subdir), 

191 Path("data", "labels.csv"): "a,b,c", 

192 Path("data", "img.nii.gz"): generate_fake_fmri()[0], 

193 Path("a", "b", "c"): (100).to_bytes( 

194 length=1, byteorder="big", signed=False 

195 ), 

196 } 

197 targz = _testing.dict_to_archive(archive_spec) 

198 extract_dir = tmp_path / "extract" 

199 extract_dir.mkdir() 

200 archive_path = tmp_path / "archive" 

201 with archive_path.open("wb") as f: 

202 f.write(targz) 

203 with tarfile.open(str(archive_path)) as tarf: 

204 tarf.extractall(str(extract_dir)) 

205 img = image.load_img(str(extract_dir / "data" / "img.nii.gz")) 

206 

207 assert img.shape == (10, 11, 12, 17) 

208 with (extract_dir / "a" / "b" / "c").open("rb") as f: 

209 assert int.from_bytes(f.read(), byteorder="big", signed=False) == 100 

210 with (extract_dir / "empty_data" / "labels.csv").open() as f: 

211 assert f.read() == "" 

212 

213 zip_archive = _testing.dict_to_archive( 

214 {"readme.txt": "hello", "archive": targz}, "zip" 

215 ) 

216 with archive_path.open("wb") as f: 

217 f.write(zip_archive) 

218 

219 with ( 

220 zipfile.ZipFile(str(archive_path)) as zipf, 

221 zipf.open("archive", "r") as f, 

222 ): 

223 assert f.read() == targz 

224 

225 from_list = _testing.list_to_archive(archive_spec.keys()) 

226 with archive_path.open("wb") as f: 

227 f.write(from_list) 

228 

229 with tarfile.open(str(archive_path)) as tarf: 

230 assert sorted(map(Path, tarf.getnames())) == sorted( 

231 [ 

232 *list(map(Path, archive_spec.keys())), 

233 Path(), 

234 Path("a"), 

235 Path("a", "b"), 

236 Path("data"), 

237 ] 

238 )