Skip to content

Commit

Permalink
Adding logging features to fork
Browse files Browse the repository at this point in the history
  • Loading branch information
StillerPatrick committed Jul 29, 2021
1 parent 4b7fcae commit 81e6641
Show file tree
Hide file tree
Showing 25 changed files with 1,633 additions and 103 deletions.
85 changes: 69 additions & 16 deletions PINNFramework/BoundaryCondition.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,8 @@


class BoundaryCondition(LossTerm):
def __init__(self, name, dataset, norm='L2', weight=1.):
self.name = name
super(BoundaryCondition, self).__init__(dataset, norm, weight)
def __init__(self, dataset, name, norm='L2', weight=1.):
super(BoundaryCondition, self).__init__(dataset, name, norm, weight)

def __call__(self, *args, **kwargs):
raise NotImplementedError("The call function of the Boundary Condition has to be implemented")
Expand All @@ -18,7 +17,7 @@ class DirichletBC(BoundaryCondition):
"""

def __init__(self, func, dataset, name, norm='L2',weight=1.):
super(DirichletBC, self).__init__(name, dataset, norm, weight)
super(DirichletBC, self).__init__(dataset, name, norm, weight)
self.func = func

def __call__(self, x, model):
Expand All @@ -29,19 +28,37 @@ def __call__(self, x, model):
class NeumannBC(BoundaryCondition):
"""
Neumann boundary conditions: dy/dn(x) = func(x).
With dy/dn(x) = <∇y,n>
"""

def __init__(self, func, dataset, input_dimension, output_dimension, name, norm='L2',weight=1.):
super(NeumannBC, self).__init__(name, dataset, norm, weight)
def __init__(self, func, dataset, normal_vector, begin, end, output_dimension, name, norm='L2', weight=1.):
"""
Args:
func: scalar but vectorized function f(x)
normal_vector: normal vector for the face
name: identifier of the boundary condition
weight: weighting of the boundary condition
begin: defines the begin of spatial variables in x
end: defines the end of the spatial domain in x
output_dimension defines on which dimension of the output the boundary condition performed
"""
super(NeumannBC, self).__init__(dataset, name, norm, weight)
self.func = func
self.input_dimension = input_dimension
self.normal_vector = normal_vector
self.begin = begin
self.end = end
self.output_dimension = output_dimension

def __call__(self, x, model):
grads = ones(x.shape, device=model.device)
y = model(x)[:, self.output_dimension]
x.requires_grad = True
y = model(x)
y = y[:, self.output_dimension]
grads = ones(y.shape, device=y.device)
grad_y = grad(y, x, create_graph=True, grad_outputs=grads)[0]
y_dn = grad_y[:, self.input_dimension]
grad_y = grad_y[:,self.begin:self.end]
self.normal_vector.to(y.device) # move normal vector to the correct device
y_dn = grad_y @ self.normal_vector
return self.weight * self.norm(y_dn, self.func(x))


Expand All @@ -50,17 +67,34 @@ class RobinBC(BoundaryCondition):
Robin boundary conditions: dy/dn(x) = func(x, y).
"""

def __init__(self, func, dataset, input_dimension, output_dimension, name, norm='L2', weight=1.):
super(RobinBC, self).__init__(name, dataset, norm, weight)
def __init__(self, func, dataset, normal_vector, begin, end, output_dimension, name, norm='L2', weight=1.):
"""
Args:
func: scalar but vectorized function f(x,y)
normal_vector: normal vector for the face
name: identifier of the boundary condition
weight: weighting of the boundary condition
begin: defines the begin of spatial variables in x
end: defines the end of the spatial domain in x
output_dimension defines on which dimension of the output the boundary condition performed
"""

super(RobinBC, self).__init__(dataset, name, norm, weight)
self.func = func
self.input_dimension = input_dimension
self.begin = begin
self.end = end
self.normal_vector = normal_vector
self.output_dimension = output_dimension

def __call__(self, x, y, model):
y = model(x)[:, self.output_dimension]
x.requires_grad = True
y = model(x)
y = y[:, self.output_dimension]
grads = ones(y.shape, device=y.device)
grad_y = grad(y, x, create_graph=True, grad_outputs=grads)[0]
y_dn = grad_y[:, self.input_dimension]
grad_y = grad_y[:, self.begin:self.end]
self.normal_vector.to(y.device) # move normal vector to the correct device
y_dn = grad_y @ self.normal_vector
return self.weight * self.norm(y_dn, self.func(x, y))


Expand All @@ -70,7 +104,7 @@ class PeriodicBC(BoundaryCondition):
"""

def __init__(self, dataset, output_dimension, name, degree=None, input_dimension=None, norm='L2', weight=1.):
super(PeriodicBC, self).__init__(name, dataset, norm, weight)
super(PeriodicBC, self).__init__(dataset, name, norm, weight)
if degree is not None and input_dimension is None:
raise ValueError("If the degree of the boundary condition is defined the input dimension for the "
"derivative has to be defined too ")
Expand All @@ -95,3 +129,22 @@ def __call__(self, x_lb, x_ub, model):

else:
raise NotImplementedError("Periodic Boundary Condition for a higher degree than one is not supported")


class TimeDerivativeBC(BoundaryCondition):
"""
For hyperbolic systems it may be needed to initialize the time derivative. This boundary condition intializes
the time derivative in a data driven way.
"""
def __init__(self, dataset, name, norm='L2', weight=1):
super(TimeDerivativeBC, self).__init__(dataset, name, norm, weight)

def __call__(self, x, dt_y, model):
x.requires_grad = True
pred = model(x)
grads = ones(pred.shape, device=pred.device)
pred_dt = grad(pred, x, create_graph=True, grad_outputs=grads)[0][:, -1]
pred_dt = pred_dt.reshape(-1,1)
return self.weight * self.norm(pred_dt, dt_y)

4 changes: 2 additions & 2 deletions PINNFramework/HPMLoss.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from .PDELoss import PDELoss

class HPMLoss(PDELoss):
def __init__(self, dataset, hpm_input, hpm_model, norm='L2', weight=1.):
def __init__(self, dataset, name, hpm_input, hpm_model, norm='L2', weight=1.):
"""
Constructor of the HPM loss
Expand All @@ -13,7 +13,7 @@ def __init__(self, dataset, hpm_input, hpm_model, norm='L2', weight=1.):
norm: Norm used for calculation PDE loss
weight: Weighting for the loss term
"""
super(HPMLoss, self).__init__(dataset, None, norm, weight)
super(HPMLoss, self).__init__(dataset, None, name, norm='L2', weight=1.)
self.hpm_input = hpm_input
self.hpm_model = hpm_model

Expand Down
6 changes: 3 additions & 3 deletions PINNFramework/InitalCondition.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,16 +4,16 @@


class InitialCondition(LossTerm):
def __init__(self, dataset, norm='L2', weight=1.):
def __init__(self, dataset, name, norm='L2', weight=1.):
"""
Constructor for the Intial condition
Constructor for the Initial condition
Args:
dataset (torch.utils.Dataset): dataset that provides the residual points
norm: Norm used for calculation PDE loss
weight: Weighting for the loss term
"""
super(InitialCondition, self).__init__(dataset, norm, weight)
super(InitialCondition, self).__init__(dataset, name, norm, weight)

def __call__(self, x: Tensor, model: Module, gt_y: Tensor):
"""
Expand Down
24 changes: 22 additions & 2 deletions PINNFramework/JoinedDataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,19 +19,39 @@ def min_length(datasets):
minimum = length
return minimum

def __init__(self, datasets):
@staticmethod
def max_length(datasets):
"""
Calculates the minimum dataset length of a list of datasets
datasets (Map): Map of datasets to be concatenated
"""
maximum = -1 * float("inf")
for key in datasets.keys():
length = len(datasets[key])
if length > maximum:
maximum = length
return maximum

def __init__(self, datasets, mode='min'):
super(JoinedDataset, self).__init__()
self.datasets = datasets
self.mode = mode

def __len__(self):
return self.min_length(self.datasets)
if self.mode =='min':
return self.min_length(self.datasets)
if self.mode =='max':
return self.max_length(self.datasets)

def __getitem__(self, idx):
if idx < 0:
if -idx > len(self):
raise ValueError("absolute value of index should not exceed dataset length")
combined_item = {}
for key in self.datasets.keys():
if self.mode == 'max':
idx = idx % len(self.datasets[key])
item = self.datasets[key][idx]
combined_item[key] = item
return combined_item
Expand Down
45 changes: 45 additions & 0 deletions PINNFramework/Logger_Interface.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
from abc import ABC, abstractmethod


class LoggerInterface(ABC):

@abstractmethod
def log_scalar(self, scalar, name, epoch):
"""
Method that defines how scalars are logged
Args:
scalar: scalar to be logged
name: name of the scalar
epoch: epoch in the training loop
"""
pass

@abstractmethod
def log_image(self, image, name, epoch):
"""
Method that defines how images are logged
Args:
image: image to be logged
name: name of the image
epoch: epoch in the training loop
"""
pass

@abstractmethod
def log_histogram(self, histogram, name, epoch):
"""
Method that defines how images are logged
Args:
histogram: histogram to be logged
name: name of the histogram
epoch: epoch in the training loop
"""
pass


4 changes: 2 additions & 2 deletions PINNFramework/LossTerm.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,12 @@ class LossTerm:
"""
Defines the main structure of a loss term
"""
def __init__(self, dataset, norm='L2', weight=1.):
def __init__(self, dataset, name, norm='L2', weight=1.):
"""
Constructor of a loss term
Args:
dataset (torch.utils.Dataset): dataset that provides the residual points
pde (function): function that represents residual of the PDE
norm: Norm used for calculation PDE loss
weight: Weighting for the loss term
"""
Expand All @@ -24,4 +23,5 @@ def __init__(self, dataset, norm='L2', weight=1.):
# Case for self implemented norms
self.norm = norm
self.dataset = dataset
self.name = name
self.weight = weight
5 changes: 2 additions & 3 deletions PINNFramework/PDELoss.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,11 @@
import torch
from torch import Tensor as Tensor
from torch.nn import Module as Module
from torch.nn import MSELoss, L1Loss
from .LossTerm import LossTerm


class PDELoss(LossTerm):
def __init__(self, dataset, pde, norm='L2', weight=1.):
def __init__(self, dataset, pde, name, norm='L2', weight=1.):
"""
Constructor of the PDE Loss
Expand All @@ -16,7 +15,7 @@ def __init__(self, dataset, pde, norm='L2', weight=1.):
norm: Norm used for calculation PDE loss
weight: Weighting for the loss term
"""
super(PDELoss, self).__init__(dataset, norm, weight)
super(PDELoss, self).__init__(dataset, name, norm, weight)
self.dataset = dataset
self.pde = pde

Expand Down
Loading

0 comments on commit 81e6641

Please sign in to comment.