Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fixing Laplacian operator for vector fields #380

Merged
merged 5 commits into from
Nov 18, 2024
Merged
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
53 changes: 35 additions & 18 deletions pina/operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,36 @@ def laplacian(output_, input_, components=None, d=None, method="std"):
:return: The tensor containing the result of the Laplacian operator.
:rtype: LabelTensor
"""

gc031298 marked this conversation as resolved.
Show resolved Hide resolved
def scalar_laplace(output_, input_, components, d):
"""
Compute Laplace operator for a scalar output.

:param LabelTensor output_: the output tensor onto which computing the
Laplacian. It has to be a column tensor.
:param LabelTensor input_: the input tensor with respect to which
computing the Laplacian.
:param list(str) components: the name of the output variables to
calculate the Laplacian for. It should be a subset of the output
labels. If None, all the output variables are considered.
:param list(str) d: the name of the input variables on which the
Laplacian is computed. d should be a subset of the input labels.
If None, all the input variables are considered. Default is None.

:raises RuntimeError: a vectorial function is passed.
gc031298 marked this conversation as resolved.
Show resolved Hide resolved
:return: The tensor containing the result of the Laplacian operator.
:rtype: LabelTensor
"""

grad_output = grad(output_, input_, components=components, d=d)
result = torch.zeros(output_.shape[0], 1, device=output_.device)

for i, label in enumerate(grad_output.labels):
gg = grad(grad_output, input_, d=d, components=[label])
result[:, 0] += super(torch.Tensor, gg.T).__getitem__(i)

return result

if d is None:
d = input_.labels

gc031298 marked this conversation as resolved.
Show resolved Hide resolved
Expand All @@ -190,33 +220,20 @@ def laplacian(output_, input_, components=None, d=None, method="std"):
# TODO fix
# grad_output = grad(output_, input_, components, d)
# result = div(grad_output, input_, d=d)
elif method == "std":

elif method == "std":
if len(components) == 1:
grad_output = grad(output_, input_, components=components, d=d)
result = torch.zeros(output_.shape[0], 1, device=output_.device)
for i, label in enumerate(grad_output.labels):
gg = grad(grad_output, input_, d=d, components=[label])
result[:, 0] += super(torch.Tensor, gg.T).__getitem__(
i
) # TODO improve
result = scalar_laplace(output_, input_, components, d)
labels = [f"dd{components[0]}"]

else:
result = torch.empty(
gc031298 marked this conversation as resolved.
Show resolved Hide resolved
input_.shape[0], len(components), device=output_.device
)
labels = [None] * len(components)
for idx, (ci, di) in enumerate(zip(components, d)):

if not isinstance(ci, list):
ci = [ci]
if not isinstance(di, list):
di = [di]

grad_output = grad(output_, input_, components=ci, d=di)
result[:, idx] = grad(grad_output, input_, d=di).flatten()
labels[idx] = f"dd{ci}dd{di}"
for idx, c in enumerate(components):
result[:, idx] = scalar_laplace(output_, input_, c, d).flatten()
labels[idx] = f"dd{c}"

result = result.as_subclass(LabelTensor)
result.labels = labels
Expand Down
Loading