diff --git a/experiment/power_iteration.py b/experiment/power_iteration.py index 96e67f5..63b9299 100644 --- a/experiment/power_iteration.py +++ b/experiment/power_iteration.py @@ -17,8 +17,18 @@ def norm_l1(x): def power_iteration(A, x, options): """Power iteration method - - x: assuming not zero. + + Performs the power iteration algorithm to find the largest eigenvalue and corresponding eigenvector of the input matrix A. + + Args: + A (numpy.ndarray): The input square matrix. + x (numpy.ndarray): The initial vector, assumed to be non-zero. + options (Options): An Options object containing the maximum number of iterations and the tolerance for convergence. + + Returns: + numpy.ndarray: The eigenvector corresponding to the largest eigenvalue. + float: The largest eigenvalue. + int: The number of iterations performed. """ x /= np.sqrt(np.sum(x**2)) for niter in range(options.max_iters): @@ -32,8 +42,18 @@ def power_iteration(A, x, options): def power_iteration4(A, x, options): """Power iteration method - - x: assuming not zero. + + Performs the power iteration algorithm to find the largest eigenvalue and corresponding eigenvector of the input matrix A. + + Args: + A (numpy.ndarray): The input square matrix. + x (numpy.ndarray): The initial vector, assumed to be non-zero. + options (Options): An Options object containing the maximum number of iterations and the tolerance for convergence. + + Returns: + numpy.ndarray: The eigenvector corresponding to the largest eigenvalue. + float: The largest eigenvalue. + int: The number of iterations performed. """ x /= norm_l1(x) for niter in range(options.max_iters): @@ -49,8 +69,18 @@ def power_iteration4(A, x, options): def power_iteration2(A, x, options): """Power iteration method - - x: assuming not zero. + + Performs the power iteration algorithm to find the largest eigenvalue and corresponding eigenvector of the input matrix A. + + Args: + A (numpy.ndarray): The input square matrix. + x (numpy.ndarray): The initial vector, assumed to be non-zero. + options (Options): An Options object containing the maximum number of iterations and the tolerance for convergence. + + Returns: + numpy.ndarray: The eigenvector corresponding to the largest eigenvalue. + float: The largest eigenvalue. + int: The number of iterations performed. """ x /= np.sqrt(np.sum(x**2)) new = A @ x @@ -68,8 +98,18 @@ def power_iteration2(A, x, options): def power_iteration3(A, x, options): """Power iteration method - - x: assuming not zero. + + Performs the power iteration algorithm to find the largest eigenvalue and corresponding eigenvector of the input matrix A. + + Args: + A (numpy.ndarray): The input square matrix. + x (numpy.ndarray): The initial vector, assumed to be non-zero. + options (Options): An Options object containing the maximum number of iterations and the tolerance for convergence. + + Returns: + numpy.ndarray: The eigenvector corresponding to the largest eigenvalue. + float: The largest eigenvalue. + int: The number of iterations performed. """ new = A @ x dot = x @ x diff --git a/src/ellalgo/conjugate_gradient.py b/src/ellalgo/conjugate_gradient.py index 20badf5..d93bc81 100644 --- a/src/ellalgo/conjugate_gradient.py +++ b/src/ellalgo/conjugate_gradient.py @@ -5,23 +5,22 @@ def conjugate_gradient(A, b, x0=None, tol=1e-5, max_iter=1000): """ - Solve Ax = b using the Conjugate Gradient method. - - Parameters: - A : 2D numpy array - The coefficient matrix (must be symmetric and positive definite) - b : 1D numpy array - The right-hand side vector - x0 : 1D numpy array, optional - Initial guess for the solution (default is zero vector) - tol : float, optional - Tolerance for convergence (default is 1e-5) - max_iter : int, optional - Maximum number of iterations (default is 1000) - + Solve the linear system Ax = b using the Conjugate Gradient method. + + The Conjugate Gradient method is an iterative algorithm for solving symmetric positive definite linear systems. It is particularly efficient for large, sparse systems. + + Args: + A (numpy.ndarray): The coefficient matrix (must be symmetric and positive definite). + b (numpy.ndarray): The right-hand side vector. + x0 (numpy.ndarray, optional): Initial guess for the solution (default is zero vector). + tol (float, optional): Tolerance for convergence (default is 1e-5). + max_iter (int, optional): Maximum number of iterations (default is 1000). + Returns: - x : 1D numpy array - The solution vector + numpy.ndarray: The solution vector. + + Raises: + ValueError: If the Conjugate Gradient method does not converge after the maximum number of iterations. """ n = len(b) if x0 is None: diff --git a/src/ellalgo/cutting_plane.py b/src/ellalgo/cutting_plane.py index e7a19af..9131d43 100644 --- a/src/ellalgo/cutting_plane.py +++ b/src/ellalgo/cutting_plane.py @@ -28,10 +28,10 @@ def cutting_plane_feas( Description: A function f(x) is *convex* if there always exist a g(x) - such that f(z) >= f(x) + g(x)' * (z - x), forall z, x in dom f. + such that f(z) >= f(x) + g(x)^T * (z - x), forall z, x in dom f. Note that dom f does not need to be a convex set in our definition. - The affine function g' (x - xc) + beta is called a cutting-plane, - or a ``cut'' for short. + The affine function g^T (x - xc) + beta is called a cutting-plane, + or a "cut" for short. This algorithm solves the following feasibility problem: find x @@ -90,7 +90,7 @@ def cutting_plane_feas( for niter in range(options.max_iters): cut = omega.assess_feas(space.xc()) # query the oracle at space.xc() - if cut is None: # feasible sol'n obtained + if cut is None: # feasible solution obtained return space.xc(), niter status = space.update_bias_cut(cut) # update space if status != CutStatus.Success or space.tsq() < options.tolerance: @@ -270,7 +270,7 @@ def bsearch( if tau < options.tolerance: return upper, niter gamma = T(lower + tau) - if omega.assess_bs(gamma): # feasible sol'n obtained + if omega.assess_bs(gamma): # feasible solution obtained upper = gamma else: lower = gamma diff --git a/src/ellalgo/oracles/ldlt_mgr.py b/src/ellalgo/oracles/ldlt_mgr.py index 3e52750..98b8cb3 100644 --- a/src/ellalgo/oracles/ldlt_mgr.py +++ b/src/ellalgo/oracles/ldlt_mgr.py @@ -21,7 +21,7 @@ class LDLTMgr: - Cholesky-Banachiewicz style, row-based - Lazy evaluation - A matrix A in R^{m x m} is positive definite - iff v' A v > 0 for all v in R^n. + iff v^T A v > 0 for all v in R^n. - O(p^3) per iteration, independent of ndim Examples: @@ -56,7 +56,7 @@ def factorize(self, mat: np.ndarray) -> bool: If $A$ is positive definite, then $p$ is zero. If it is not, then $p$ is a positive integer, such that $v = R^{-1} e_p$ is a certificate vector - to make $v'*A[:p,:p]*v < 0$ + to make $v^T*A[:p,:p]*v < 0$ :param A: A is a numpy array representing a symmetric matrix :type A: np.ndarray @@ -170,7 +170,7 @@ def witness(self) -> float: The function "witness" provides evidence that a matrix is not symmetric positive definite. (square-root-free version) - evidence: v' A v = -ep + evidence: v^T A v = -ep Raises: AssertionError: $A$ indeeds a spd matrix @@ -226,7 +226,7 @@ def sym_quad(self, mat: np.ndarray): return wit.dot(mat[start:ndim, start:ndim] @ wit) def sqrt(self) -> np.ndarray: - """Return upper triangular matrix R where A = R' * R + """Return upper triangular matrix R where A = R^T * R Raises: AssertionError: [description] diff --git a/src/ellalgo/oracles/lmi_oracle.py b/src/ellalgo/oracles/lmi_oracle.py index a3787e6..284b134 100644 --- a/src/ellalgo/oracles/lmi_oracle.py +++ b/src/ellalgo/oracles/lmi_oracle.py @@ -20,10 +20,10 @@ class LMIOracle(OracleFeas): def __init__(self, mat_f, mat_b): """ - The function initializes a new LMI oracle object with given arguments. - - :param mat_f: A list of numpy arrays. - :param mat_b: mat_b is a numpy array + Initializes a new LMIOracle object with the given matrix arguments. + + :param mat_f: A list of numpy arrays representing the matrix F. + :param mat_b: A numpy array representing the matrix B. """ self.mat_f = mat_f self.mat_f0 = mat_b diff --git a/src/ellalgo/oracles/profit_oracle.py b/src/ellalgo/oracles/profit_oracle.py index d078890..cfb8fed 100644 --- a/src/ellalgo/oracles/profit_oracle.py +++ b/src/ellalgo/oracles/profit_oracle.py @@ -150,16 +150,6 @@ class ProfitRbOracle(OracleOptim): This example is taken from [Aliabadi and Salahi, 2013]: - max p'(A x1^α' x2^β') - v1'*x1 - v2'*x2 - s.t. x1 ≤ k' - - where: - α' = α ± e1 - β' = β ± e2 - p' = p ± e3 - k' = k ± e4 - v' = v ± e5 - See also: ProfitOracle """