Coverage for C:\Users\t590r\Documents\GitHub\suppy\suppy\feasibility\_bands\_ams_algorithms.py: 75%
174 statements
« prev ^ index » next coverage.py v7.6.4, created at 2025-02-05 10:12 +0100
« prev ^ index » next coverage.py v7.6.4, created at 2025-02-05 10:12 +0100
1from abc import ABC
2from typing import List
3import numpy as np
4import numpy.typing as npt
6try:
7 import cupy as cp
9 NO_GPU = False
11except ImportError:
12 NO_GPU = True
13 cp = np
15from suppy.feasibility._linear_algorithms import HyperslabFeasibility
16from suppy.utils import LinearMapping
19class HyperslabAMSAlgorithm(HyperslabFeasibility, ABC):
20 """
21 The HyperslabAMSAlgorithm class is used to find a feasible solution to a
22 set of
23 linear inequalities.
25 Parameters
26 ----------
27 A : npt.NDArray
28 The matrix representing the coefficients of the linear inequalities.
29 lb : npt.NDArray
30 The lower bounds for the inequalities.
31 ub : npt.NDArray
32 The upper bounds for the inequalities.
33 algorithmic_relaxation : npt.NDArray or float, optional
34 The relaxation parameter for the algorithm, by default 1.
35 relaxation : float, optional
36 The relaxation parameter for the feasibility problem, by default 1.
37 proximity_flag : bool, optional
38 A flag indicating whether to use proximity in the algorithm, by default True.
39 """
41 def __init__(
42 self,
43 A: npt.NDArray,
44 lb: npt.NDArray,
45 ub: npt.NDArray,
46 algorithmic_relaxation: npt.NDArray | float = 1,
47 relaxation: float = 1,
48 proximity_flag: bool = True,
49 ):
50 super().__init__(A, lb, ub, algorithmic_relaxation, relaxation, proximity_flag)
53class SequentialAMSHyperslab(HyperslabAMSAlgorithm):
54 """
55 SequentialAMSHyperslab class for sequentially applying the AMS algorithm
56 on hyperslabs.
58 Parameters
59 ----------
60 A : npt.NDArray
61 The matrix A used in the AMS algorithm.
62 lb : npt.NDArray
63 The lower bounds for the constraints.
64 ub : npt.NDArray
65 The upper bounds for the constraints.
66 algorithmic_relaxation : npt.NDArray or float, optional
67 The relaxation parameter for the algorithm, by default 1.
68 relaxation : float, optional
69 The relaxation parameter, by default 1.
70 cs : None or List[int], optional
71 The list of indices for the constraints, by default None.
72 proximity_flag : bool, optional
73 Flag to indicate if proximity should be considered, by default True.
74 """
76 def __init__(
77 self,
78 A: npt.NDArray,
79 lb: npt.NDArray,
80 ub: npt.NDArray,
81 algorithmic_relaxation: npt.NDArray | float = 1,
82 relaxation: float = 1,
83 cs: None | List[int] = None,
84 proximity_flag: bool = True,
85 ):
87 super().__init__(A, lb, ub, algorithmic_relaxation, relaxation, proximity_flag)
88 xp = cp if self._use_gpu else np
89 if cs is None:
90 self.cs = xp.arange(self.A.shape[0])
91 else:
92 self.cs = cs
94 def _project(self, x: npt.NDArray) -> npt.NDArray:
95 """
96 Projects the input array `x` onto the feasible region defined by the
97 constraints.
99 Parameters
100 ----------
101 x : npt.NDArray
102 The input array to be projected.
104 Returns
105 -------
106 npt.NDArray
107 The projected array.
108 """
110 for i in self.cs:
111 p_i = self.single_map(x, i)
112 (res_li, res_ui) = self.bounds.single_residual(p_i, i) # returns floats
113 # check if constraints are violated
115 # weights should be 1s!
116 if res_ui < 0:
117 self.A.update_step(
118 x, self.algorithmic_relaxation * self.inverse_row_norm[i] * res_ui, i
119 )
120 elif res_li < 0:
121 self.A.update_step(
122 x, -1 * self.algorithmic_relaxation * self.inverse_row_norm[i] * res_li, i
123 )
124 return x
127class SequentialWeightedAMSHyperslab(SequentialAMSHyperslab):
128 """
129 Parameters
130 ----------
131 A : npt.NDArray
132 The constraint matrix.
133 lb : npt.NDArray
134 The lower bounds of the constraints.
135 ub : npt.NDArray
136 The upper bounds of the constraints.
137 weights : None, list of float, or npt.NDArray, optional
138 The weights assigned to each constraint. If None, default weights are
139 used.
140 algorithmic_relaxation : npt.NDArray or float, optional
141 The relaxation parameter for the algorithm. Default is 1.
142 relaxation : float, optional
143 The relaxation parameter for the algorithm. Default is 1.
144 weight_decay : float, optional
145 Parameter that determines the rate at which the weights are reduced
146 after each phase (weights * weight_decay). Default is 1.
147 cs : None or list of int, optional
148 The indices of the constraints to be considered. Default is None.
149 proximity_flag : bool, optional
150 Flag to indicate if proximity should be considered. Default is True.
152 Attributes
153 ----------
154 weights : npt.NDArray
155 The weights assigned to each constraint.
156 weight_decay : float
157 Decay rate for the weights.
158 temp_weight_decay : float
159 Initial value for weight decay.
160 """
162 def __init__(
163 self,
164 A: npt.NDArray,
165 lb: npt.NDArray,
166 ub: npt.NDArray,
167 weights: None | List[float] | npt.NDArray = None,
168 algorithmic_relaxation: npt.NDArray | float = 1,
169 relaxation: float = 1,
170 weight_decay: float = 1,
171 cs: None | List[int] = None,
172 proximity_flag: bool = True,
173 ):
175 super().__init__(A, lb, ub, algorithmic_relaxation, relaxation, cs, proximity_flag)
176 xp = cp if self._use_gpu else np
177 self.weight_decay = weight_decay # decay rate
178 self.temp_weight_decay = 1 # initial value for weight decay
180 if weights is None:
181 self.weights = xp.ones(self.A.shape[0])
182 elif xp.abs((weights.sum() - 1)) > 1e-10:
183 print("Weights do not add up to 1! Renormalizing to 1...")
184 self.weights = weights
186 def _project(self, x: npt.NDArray) -> npt.NDArray:
187 """
188 Projects the input array `x` onto a feasible region defined by the
189 constraints.
191 Parameters
192 ----------
193 x : npt.NDArray
194 The input array to be projected.
196 Returns
197 -------
198 npt.NDArray
199 The projected array.
201 Notes
202 -----
203 This method iteratively adjusts the input array `x` based on the constraints
204 defined in `self.cs`. For each constraint, it computes the projection and
205 checks if the constraints are violated. If a constraint is violated, it updates
206 the array `x` using a weighted relaxation factor. The weight decay is applied
207 to the temporary weight decay after each iteration.
208 """
210 weighted_relaxation = self.algorithmic_relaxation * self.temp_weight_decay
212 for i in self.cs:
214 p_i = self.single_map(x, i)
216 (res_li, res_ui) = self.bounds.single_residual(p_i, i) # returns floats
217 # check if constraints are violated
219 if res_ui < 0:
220 self.A.update_step(
221 x, weighted_relaxation * self.weights[i] * self.inverse_row_norm[i] * res_ui, i
222 )
223 elif res_li < 0:
224 self.A.update_step(
225 x,
226 -1 * weighted_relaxation * self.weights[i] * self.inverse_row_norm[i] * res_li,
227 i,
228 )
230 self.temp_weight_decay *= self.weight_decay
231 return x
234class SimultaneousAMSHyperslab(HyperslabAMSAlgorithm):
235 """
236 SimultaneousAMSHyperslab class for simultaneous application of the AMS
237 algorithm on hyperslabs.
239 Parameters
240 ----------
241 A : npt.NDArray
242 The matrix representing the constraints.
243 lb : npt.NDArray
244 The lower bounds for the constraints.
245 ub : npt.NDArray
246 The upper bounds for the constraints.
247 algorithmic_relaxation : npt.NDArray or float, optional
248 The relaxation parameter for the algorithm, by default 1.
249 relaxation : float, optional
250 The relaxation parameter for the projections, by default 1.
251 weights : None or List[float], optional
252 The weights for the constraints, by default None.
253 proximity_flag : bool, optional
254 Flag to indicate if proximity calculations should be performed, by default True.
255 """
257 def __init__(
258 self,
259 A: npt.NDArray,
260 lb: npt.NDArray,
261 ub: npt.NDArray,
262 algorithmic_relaxation: npt.NDArray | float = 1,
263 relaxation: float = 1,
264 weights: None | List[float] = None,
265 proximity_flag: bool = True,
266 ):
268 super().__init__(A, lb, ub, algorithmic_relaxation, relaxation, proximity_flag)
270 xp = cp if self._use_gpu else np
272 if weights is None:
273 self.weights = xp.ones(self.A.shape[0]) / self.A.shape[0]
274 elif xp.abs((weights.sum() - 1)) > 1e-10:
275 print("Weights do not add up to 1! Renormalizing to 1...")
276 self.weights = weights / weights.sum()
277 else:
278 self.weights = weights
280 def _project(self, x):
281 # simultaneous projection
282 p = self.map(x)
283 (res_l, res_u) = self.bounds.residual(p)
284 d_idx = res_u < 0
285 c_idx = res_l < 0
286 x += self.algorithmic_relaxation * (
287 (self.weights * self.inverse_row_norm)[d_idx] * res_u[d_idx] @ self.A[d_idx, :]
288 - (self.weights * self.inverse_row_norm)[c_idx] * res_l[c_idx] @ self.A[c_idx, :]
289 )
291 return x
293 def _proximity(self, x: npt.NDArray, proximity_measures: List[str]) -> float:
294 p = self.map(x)
295 # residuals are positive if constraints are met
296 (res_l, res_u) = self.bounds.residual(p)
297 res_u[res_u > 0] = 0
298 res_l[res_l > 0] = 0
299 res = -res_u - res_l
300 measures = []
301 for measure in proximity_measures:
302 if isinstance(measure, tuple):
303 if measure[0] == "p_norm":
304 measures.append(self.weights @ (res ** measure[1]))
305 else:
306 raise ValueError("Invalid proximity measure")
307 elif isinstance(measure, str) and measure == "max_norm":
308 measures.append(res.max())
309 else:
310 raise ValueError("Invalid proximity measure)")
311 return measures
314class ExtrapolatedLandweber(SimultaneousAMSHyperslab):
315 def __init__(
316 self, A, lb, ub, algorithmic_relaxation=1, relaxation=1, weights=None, proximity_flag=True
317 ):
318 super().__init__(A, lb, ub, algorithmic_relaxation, relaxation, weights, proximity_flag)
319 self.a_i = self.A.row_norm(2, 2)
320 self.weight_norm = self.weights / self.a_i
321 self.sigmas = []
323 def _project(self, x):
324 xp = cp if self._use_gpu else np
325 p = self.map(x)
326 (res_l, res_u) = self.bounds.residual(p)
327 d_idx = res_u < 0
328 c_idx = res_l < 0
329 if not (xp.any(d_idx) or xp.any(c_idx)):
330 self.sigmas.append(0)
331 return x
332 t_u = self.weight_norm[d_idx] * res_u[d_idx] # D*(Ax-b)+
333 t_l = self.weight_norm[c_idx] * res_l[c_idx]
334 t_u_2 = t_u @ self.A[d_idx, :]
335 t_l_2 = t_l @ self.A[c_idx, :]
337 sig = ((res_l[c_idx] @ (t_l)) + (res_u[d_idx] @ (t_u))) / (
338 (t_u_2 - t_l_2) @ (t_u_2 - t_l_2)
339 )
340 self.sigmas.append(sig)
341 x += sig * (t_u_2 - t_l_2)
343 return x
346class BlockIterativeAMSHyperslab(HyperslabAMSAlgorithm):
347 """
348 Block Iterative AMS Algorithm for hyperslabs.
350 Parameters
351 ----------
352 A : npt.NDArray
353 The matrix representing the linear constraints.
354 lb : npt.NDArray
355 The lower bounds for the constraints.
356 ub : npt.NDArray
357 The upper bounds for the constraints.
358 weights : List[List[float]] or List[npt.NDArray]
359 A list of lists or arrays representing the weights for each block. Each list/array should sum to 1.
360 algorithmic_relaxation : npt.NDArray or float, optional
361 The relaxation parameter for the algorithm, by default 1.
362 relaxation : float, optional
363 The relaxation parameter for the constraints, by default 1.
364 proximity_flag : bool, optional
365 A flag indicating whether to use proximity measures, by default True.
367 Raises
368 ------
369 ValueError
370 If any of the weight lists do not sum to 1.
371 """
373 def __init__(
374 self,
375 A: npt.NDArray,
376 lb: npt.NDArray,
377 ub: npt.NDArray,
378 weights: List[List[float]] | List[npt.NDArray],
379 algorithmic_relaxation: npt.NDArray | float = 1,
380 relaxation: float = 1,
381 proximity_flag: bool = True,
382 ):
384 super().__init__(A, lb, ub, algorithmic_relaxation, relaxation, proximity_flag)
386 xp = cp if self._use_gpu else np
388 # check that weights is a list of lists that add up to 1 each
389 for el in weights:
390 if xp.abs((xp.sum(el) - 1)) > 1e-10:
391 raise ValueError("Weights do not add up to 1!")
393 self.weights = []
394 self.block_idxs = [
395 xp.where(xp.array(el) > 0)[0] for el in weights
396 ] # get idxs that meet requirements
398 # assemble a list of general weights
399 self.total_weights = xp.zeros_like(weights[0])
400 for el in weights:
401 el = xp.asarray(el)
402 self.weights.append(el[xp.array(el) > 0]) # remove non zero weights
403 self.total_weights += el / len(weights)
405 def _project(self, x):
406 # simultaneous projection
407 xp = cp if self._use_gpu else np
409 for el, block_idx in zip(self.weights, self.block_idxs): # get mask and associated weights
410 p = self.indexed_map(x, block_idx)
411 (res_l, res_u) = self.bounds.indexed_residual(p, block_idx)
412 d_idx = res_u < 0
413 c_idx = res_l < 0
414 full_d_idx = block_idx[d_idx]
415 full_c_idx = block_idx[c_idx]
417 x += self.algorithmic_relaxation * (
418 self.inverse_row_norm[full_d_idx]
419 * el[d_idx]
420 * res_u[d_idx]
421 @ self.A[full_d_idx, :]
422 - self.inverse_row_norm[full_c_idx]
423 * el[c_idx]
424 * res_l[c_idx]
425 @ self.A[full_c_idx, :]
426 )
428 return x
430 def _proximity(self, x: npt.NDArray, proximity_measures: List[str]) -> float:
431 p = self.map(x)
432 # residuals are positive if constraints are met
433 (res_l, res_u) = self.bounds.residual(p)
434 res_u[res_u > 0] = 0
435 res_l[res_l > 0] = 0
436 res = -res_u - res_l
437 measures = []
438 for measure in proximity_measures:
439 if isinstance(measure, tuple):
440 if measure[0] == "p_norm":
441 measures.append(self.total_weights @ (res ** measure[1]))
442 else:
443 raise ValueError("Invalid proximity measure")
444 elif isinstance(measure, str) and measure == "max_norm":
445 measures.append(res.max())
446 else:
447 raise ValueError("Invalid proximity measure)")
448 return measures
451class StringAveragedAMSHyperslab(HyperslabAMSAlgorithm):
452 """
453 StringAveragedAMSHyperslab is a string averaged implementation of the
454 AMS algorithm.
456 Parameters
457 ----------
458 A : npt.NDArray
459 The matrix A used in the algorithm.
460 lb : npt.NDArray
461 The lower bounds for the variables.
462 ub : npt.NDArray
463 The upper bounds for the variables.
464 strings : List[List[int]]
465 A list of lists, where each inner list represents a string of indices.
466 algorithmic_relaxation : npt.NDArray or float, optional
467 The relaxation parameter for the algorithm, by default 1.
468 relaxation : float, optional
469 The relaxation parameter for the projection, by default 1.
470 weights : None or List[float], optional
471 The weights for each string, by default None. If None, equal weights are assigned.
472 proximity_flag : bool, optional
473 A flag indicating whether to use proximity, by default True.
474 """
476 def __init__(
477 self,
478 A: npt.NDArray,
479 lb: npt.NDArray,
480 ub: npt.NDArray,
481 strings: List[List[int]],
482 algorithmic_relaxation: npt.NDArray | float = 1,
483 relaxation: float = 1,
484 weights: None | List[float] = None,
485 proximity_flag: bool = True,
486 ):
488 super().__init__(A, lb, ub, algorithmic_relaxation, relaxation, proximity_flag)
489 xp = cp if self._use_gpu else np
490 self.strings = strings
491 if weights is None:
492 self.weights = xp.ones(len(strings)) / len(strings)
494 # if check_weight_validity(weights):
495 # self.weights = weights
496 else:
497 if len(weights) != len(self.strings):
498 raise ValueError("The number of weights must be equal to the number of strings.")
500 self.weights = weights
501 # print('Choosing default weight vector...')
502 # self.weights = np.ones(self.A.shape[0])/self.A.shape[0]
504 def _project(self, x):
505 # string averaged projection
506 x_c = x.copy() # create a general copy of x
507 x -= x # reset x is this viable?
508 for string, weight in zip(self.strings, self.weights):
509 x_s = x_c.copy() # generate a copy for individual strings
510 for i in string:
511 p_i = self.single_map(x_s, i)
512 (res_li, res_ui) = self.bounds.single_residual(p_i, i)
513 if res_ui < 0:
514 self.A.update_step(
515 x_s, self.algorithmic_relaxation * self.inverse_row_norm[i] * res_ui, i
516 )
517 elif res_li < 0:
518 self.A.update_step(
519 x_s,
520 -1 * self.algorithmic_relaxation * self.inverse_row_norm[i] * res_li,
521 i,
522 )
524 x += weight * x_s
525 return x