Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix Issue #357 for scipy >=1.14.0 #366

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,14 @@ coverage.xml
ehthumbs.db
Thumbs.db

# virtual environments #
########################
.env
.venv/
.conda/
.mamba/
.python-version

# Directories
#############
dist
Expand Down
6 changes: 3 additions & 3 deletions pygam/pygam.py
Original file line number Diff line number Diff line change
Expand Up @@ -703,7 +703,7 @@ def _initial_estimate(self, y, modelmat):

# solve the linear problem
return np.linalg.solve(
load_diagonal(modelmat.T.dot(modelmat).A), modelmat.T.dot(y_)
load_diagonal(modelmat.T.dot(modelmat).toarray()), modelmat.T.dot(y_)
)

# not sure if this is faster...
Expand Down Expand Up @@ -780,7 +780,7 @@ def _pirls(self, X, Y, weights):
self._on_loop_start(vars())

WB = W.dot(modelmat[mask, :]) # common matrix product
Q, R = np.linalg.qr(WB.A)
Q, R = np.linalg.qr(WB.toarray())
ouslan marked this conversation as resolved.
Show resolved Hide resolved

if not np.isfinite(Q).all() or not np.isfinite(R).all():
raise ValueError(
Expand Down Expand Up @@ -1401,7 +1401,7 @@ def _get_quantiles(
idxs = self.terms.get_coef_indices(term)
cov = self.statistics_['cov'][idxs][:, idxs]

var = (modelmat.dot(cov) * modelmat.A).sum(axis=1)
var = (modelmat.dot(cov) * modelmat.toarray()).sum(axis=1)
if prediction:
var += self.distribution.scale

Expand Down
2 changes: 1 addition & 1 deletion pygam/terms.py
Original file line number Diff line number Diff line change
Expand Up @@ -1500,7 +1500,7 @@ def _build_marginal_constraints(self, i, coef, constraint_lam, constraint_l2):
)

# now enter it into the composite
composite_C[tuple(np.meshgrid(slice_, slice_))] = slice_C.A
composite_C[tuple(np.meshgrid(slice_, slice_))] = slice_C.toarray()

return sp.sparse.csc_matrix(composite_C)

Expand Down
18 changes: 9 additions & 9 deletions pygam/tests/test_penalties.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,13 +23,13 @@ def test_single_spline_penalty():
monotonic_ and convexity_ should be 0.
"""
coef = np.array(1.0)
assert np.alltrue(derivative(1, coef).A == 0.0)
assert np.alltrue(l2(1, coef).A == 1.0)
assert np.alltrue(monotonic_inc(1, coef).A == 0.0)
assert np.alltrue(monotonic_dec(1, coef).A == 0.0)
assert np.alltrue(convex(1, coef).A == 0.0)
assert np.alltrue(concave(1, coef).A == 0.0)
assert np.alltrue(none(1, coef).A == 0.0)
assert np.all(derivative(1, coef).toarray() == 0.0)
assert np.all(l2(1, coef).toarray() == 1.0)
assert np.all(monotonic_inc(1, coef).toarray() == 0.0)
assert np.all(monotonic_dec(1, coef).toarray() == 0.0)
assert np.all(convex(1, coef).toarray() == 0.0)
assert np.all(concave(1, coef).toarray() == 0.0)
assert np.all(none(1, coef).toarray() == 0.0)


def test_wrap_penalty():
Expand All @@ -43,12 +43,12 @@ def test_wrap_penalty():

fit_linear = True
p = wrap_penalty(none, fit_linear, linear_penalty=linear_penalty)
P = p(n, coef).A
P = p(n, coef).toarray()
assert P.sum() == linear_penalty

fit_linear = False
p = wrap_penalty(none, fit_linear, linear_penalty=linear_penalty)
P = p(n, coef).A
P = p(n, coef).toarray()
assert P.sum() == 0.0


Expand Down
8 changes: 4 additions & 4 deletions pygam/tests/test_terms.py
Original file line number Diff line number Diff line change
Expand Up @@ -315,10 +315,10 @@ def der1(*args, **kwargs):

# check all the dimensions
for i in range(3):
P = term._build_marginal_penalties(i).A
P = term._build_marginal_penalties(i).toarray()
C = term._build_marginal_constraints(
i, -np.arange(term.n_coefs), constraint_lam=1, constraint_l2=0
).A
).toarray()

assert (P == C).all()

Expand Down Expand Up @@ -362,11 +362,11 @@ def test_compose_penalties(self):
term = SplineTerm(feature=0, penalties=['auto', 'none'])

# penalties should be equivalent
assert (term.build_penalties() == base_term.build_penalties()).A.all()
assert (term.build_penalties() == base_term.build_penalties()).toarray().all()

# multitple penalties should be additive, not multiplicative,
# so 'none' penalty should have no effect
assert np.abs(term.build_penalties().A).sum() > 0
assert np.abs(term.build_penalties().toarray()).sum() > 0

def test_compose_constraints(self, hepatitis_X_y):
"""we should be able to compose penalties
Expand Down
8 changes: 4 additions & 4 deletions pygam/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def cholesky(A, sparse=True, verbose=True): # noqa: F811

if sparse:
return L.T # upper triangular factorization
return L.T.A # upper triangular factorization
return L.T.toarray() # upper triangular factorization

else:
msg = (
Expand All @@ -78,7 +78,7 @@ def cholesky(A, sparse=True, verbose=True): # noqa: F811
warnings.warn(msg)

if sp.sparse.issparse(A):
A = A.A
A = A.toarray()

try:
L = sp.linalg.cholesky(A, lower=False)
Expand Down Expand Up @@ -951,10 +951,10 @@ def tensor_product(a, b, reshape=True):
raise ValueError('both arguments must have the same number of samples')

if sp.sparse.issparse(a):
a = a.A
a = a.toarray()

if sp.sparse.issparse(b):
b = b.A
b = b.toarray()

tensor = a[..., :, None] * b[..., None, :]

Expand Down
Loading