Coverage for nilearn/decoding/tests/test_objective_functions.py: 0%
38 statements
« prev ^ index » next coverage.py v7.9.1, created at 2025-06-16 12:32 +0200
« prev ^ index » next coverage.py v7.9.1, created at 2025-06-16 12:32 +0200
1"""Test module for functions related cost functions (including penalties)."""
3import numpy as np
4import pytest
5from numpy.testing import assert_almost_equal, assert_array_equal
6from scipy.optimize import check_grad
8from nilearn.decoding._objective_functions import (
9 divergence_id,
10 gradient_id,
11 logistic_loss,
12 logistic_loss_grad,
13)
15L1_RATIO = [0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0]
18@pytest.mark.parametrize("ndim", range(1, 5))
19@pytest.mark.parametrize("l1_ratio", L1_RATIO)
20@pytest.mark.parametrize("size", [3, 4, 5])
21def test_grad_div_adjoint_arbitrary_ndim(rng, ndim, l1_ratio, size):
22 # We need to check that <D x, y> = <x, DT y> for x and y random vectors
23 shape = tuple([size] * ndim)
24 x = rng.normal(size=shape)
25 y = rng.normal(size=[ndim + 1, *shape])
27 assert_almost_equal(
28 np.sum(gradient_id(x, l1_ratio=l1_ratio) * y),
29 -np.sum(x * divergence_id(y, l1_ratio=l1_ratio)),
30 )
33@pytest.mark.parametrize("l1_ratio", L1_RATIO)
34@pytest.mark.parametrize("size", [1, 2, 10])
35def test_1d_gradient_id(l1_ratio, size):
36 img = np.arange(size)
38 gid = gradient_id(img, l1_ratio=l1_ratio)
40 assert_array_equal(gid.shape, [img.ndim + 1, *img.shape])
41 assert_array_equal(l1_ratio * img, gid[-1])
44@pytest.mark.parametrize("l1_ratio", L1_RATIO)
45def test_2d_gradient_id(l1_ratio):
46 img = np.array([[1, 3], [4, 2]])
48 gid = gradient_id(img, l1_ratio)
50 assert_array_equal(gid.shape, [img.ndim + 1, *img.shape])
51 assert_array_equal(l1_ratio * img, gid[-1])
54@pytest.mark.parametrize("l1_ratio", L1_RATIO)
55def test_3d_gradient_id(l1_ratio):
56 img = np.array([[1, 3], [4, 2], [1, 0]])
58 gid = gradient_id(img, l1_ratio)
59 assert_array_equal(gid.shape, [img.ndim + 1, *img.shape])
62def test_logistic_loss_derivative(rng, n_samples=4, n_features=10, decimal=5):
63 X = rng.standard_normal((n_samples, n_features))
64 y = rng.standard_normal(n_samples)
65 n_features = X.shape[1]
66 w = rng.standard_normal(n_features + 1)
67 assert_almost_equal(
68 check_grad(
69 lambda w: logistic_loss(X, y, w),
70 lambda w: logistic_loss_grad(X, y, w),
71 w,
72 ),
73 0.0,
74 decimal=decimal,
75 )