Skip to content

Commit

Permalink
Fixed bug caused by change in scipy from using tol to rtol in v1.12 f…
Browse files Browse the repository at this point in the history
…or CG.
  • Loading branch information
jlparkI committed Jul 11, 2024
1 parent 5b4606f commit bbd87d5
Show file tree
Hide file tree
Showing 5 changed files with 3 additions and 232 deletions.
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ if (CMAKE_CUDA_COMPILER)
set(CMAKE_CUDA_STANDARD_REQUIRED ON)
add_compile_definitions(USE_CUDA)

# set(CMAKE_CUDA_ARCHITECTURES 52)
#set(CMAKE_CUDA_ARCHITECTURES 52)
project(${SKBUILD_PROJECT_NAME} LANGUAGES CXX CUDA)
else()
message(STATUS "No CUDA")
Expand Down
57 changes: 0 additions & 57 deletions xGPR/cg_toolkit/cg_linear_operators.py

This file was deleted.

58 changes: 0 additions & 58 deletions xGPR/cg_toolkit/cuda_cg_linear_operators.py

This file was deleted.

110 changes: 0 additions & 110 deletions xGPR/fitting_toolkit/cg_fitting_toolkit.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,20 +2,16 @@
selected) using CG, either using our internal routine or Scipy / Cupy's."""
import warnings
import numpy as np
from scipy.sparse.linalg import cg as CPU_CG
from ..cg_toolkit.cg_tools import CPU_ConjugateGrad

try:
import cupy as cp
from cupyx.scipy.sparse.linalg import cg as Cuda_CG
from ..cg_toolkit.cuda_cg_linear_operators import Cuda_CGLinearOperator
from ..cg_toolkit.cg_tools import GPU_ConjugateGrad
except:
pass

from ..scoring_toolkit.exact_nmll_calcs import calc_zty

from ..cg_toolkit.cg_linear_operators import CPU_CGLinearOperator



Expand Down Expand Up @@ -68,109 +64,3 @@ def cg_fit_lib_internal(kernel, dataset, cg_tol = 1e-4, max_iter = 500,
if verbose:
print(f"CG iterations: {n_iter}")
return weights, n_iter, losses



def cg_fit_lib_ext(kernel, dataset, cg_tol = 1e-5, max_iter = 500,
preconditioner = None, verbose = True):
"""Calculates the weights when fitting the model using
preconditioned CG. Good scaling but slower for small
numbers of random features. Uses the CG implementation
in Scipy and Cupy instead of the internal implementation
(we've found these to provide equivalent results,
but it is good to be able to use either, also the
internal implementation can keep track of loss values
for diagnostics.)
Args:
kernel: A valid kernel object that can generate random features.
dataset: Either OnlineDataset or OfflineDataset.
cg_tol (float): The threshold below which cg is deemed to have
converged. Defaults to 1e-5.
max_iter (int): The maximum number of iterations before
CG is deemed to have failed to converge.
preconditioner: Either None or a valid Preconditioner (e.g.
CudaRandomizedPreconditioner, CPURandomizedPreconditioner
etc). If None, no preconditioning is used. Otherwise,
the preconditioner is used for CG. The preconditioner
can be built by calling self.build_preconditioner
with appropriate arguments.
verbose (bool): If True, print regular updates.
Returns:
weights: A cupy or numpy array of shape (M) for M
random features.
n_iter (int): The number of CG iterations.
losses (list): The loss on each iteration; for diagnostic
purposes.
"""
z_trans_y, _ = calc_zty(dataset, kernel)
if kernel.device == "cuda":
cg_operator = Cuda_CGLinearOperator(dataset, kernel,
verbose)
weights, convergence = Cuda_CG(A = cg_operator, b = z_trans_y,
M = preconditioner, tol = cg_tol, atol = 0, maxiter = max_iter)
else:
cg_operator = CPU_CGLinearOperator(dataset, kernel,
verbose)
weights, convergence = CPU_CG(A = cg_operator, b = z_trans_y,
M = preconditioner, tol = cg_tol, atol = 0, maxiter = max_iter)


if convergence != 0:
warnings.warn("Conjugate gradients failed to converge! Try refitting "
"the model with updated settings.")

return weights, cg_operator.n_iter, []



def cg_fit_lib_discriminant(kernel, dataset, x_mean, targets, cg_tol = 1e-5,
max_iter = 500, preconditioner = None, verbose = True):
"""Calculates the weights when fitting the model using
preconditioned CG for a discriminant-type classifier. Good scaling
but slower for small numbers of random features.
Args:
kernel: A valid kernel object that can generate random features.
dataset: Either OnlineDataset or OfflineDataset.
x_mean (ndarray): The mean of the data.
targets (ndarray): The class-specific means.
cg_tol (float): The threshold below which cg is deemed to have
converged. Defaults to 1e-5.
max_iter (int): The maximum number of iterations before
CG is deemed to have failed to converge.
preconditioner: Either None or a valid Preconditioner (e.g.
CudaRandomizedPreconditioner, CPURandomizedPreconditioner
etc). If None, no preconditioning is used. Otherwise,
the preconditioner is used for CG. The preconditioner
can be built by calling self.build_preconditioner
with appropriate arguments.
verbose (bool): If True, print regular updates.
Returns:
weights: A cupy or numpy array of shape (M) for M
random features.
n_iter (int): The number of CG iterations.
losses (list): The loss on each iteration; for diagnostic
purposes.
"""
if kernel.device == "cuda":
cg_operator = GPU_ConjugateGrad()
resid = cp.zeros((kernel.get_num_rffs(), 2, targets.shape[1]))
else:
cg_operator = CPU_ConjugateGrad()
resid = np.zeros((kernel.get_num_rffs(), 2, targets.shape[1]))

resid[:,0,:] = targets

weights, converged, n_iter, losses = cg_operator.fit(dataset, kernel,
preconditioner, resid, max_iter, cg_tol, verbose,
nmll_settings = False)
if not converged:
warnings.warn("Conjugate gradients failed to converge! Try refitting "
"the model with updated settings.")

if verbose:
print(f"CG iterations: {n_iter}")
return weights, n_iter, losses
8 changes: 2 additions & 6 deletions xGPR/xgp_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
from .preconditioners.inter_device_preconditioners import InterDevicePreconditioner
from .preconditioners.rand_nys_preconditioners import CPU_RandNysPreconditioner

from .fitting_toolkit.cg_fitting_toolkit import cg_fit_lib_ext, cg_fit_lib_internal
from .fitting_toolkit.cg_fitting_toolkit import cg_fit_lib_internal
from .fitting_toolkit.exact_fitting_toolkit import calc_weights_exact, calc_variance_exact

from .scoring_toolkit.approximate_nmll_calcs import estimate_logdet
Expand Down Expand Up @@ -495,11 +495,7 @@ def fit(self, dataset, preconditioner = None,
min_rank = min_rank, max_rank = max_rank,
ratio_target = autoselect_target_ratio,
always_use_srht2 = always_use_srht2)
if run_diagnostics:
self.weights, n_iter, losses = cg_fit_lib_internal(self.kernel, dataset, tol,
max_iter, preconditioner, self.verbose)
else:
self.weights, n_iter, losses = cg_fit_lib_ext(self.kernel, dataset, tol,
self.weights, n_iter, losses = cg_fit_lib_internal(self.kernel, dataset, tol,
max_iter, preconditioner, self.verbose)

else:
Expand Down

0 comments on commit bbd87d5

Please sign in to comment.