diff --git a/pytorch/config.yaml b/pytorch/config.yaml index c103e26..2e24416 100644 --- a/pytorch/config.yaml +++ b/pytorch/config.yaml @@ -34,8 +34,8 @@ modelParam: divL1Lambda: 0 # MSE of long term divergence # If > 0, implements the Long Term divergence concept from FluidNet - divLongTermLambda: 0 - # Time step: default simulation timestep. + divLongTermLambda: 1 + # Time step: default simulation timestep only when long term divergence is active. dt: 0.1 # buoyancyScale : Buoyancy forces scale # gravityScale : Gravity forces scale @@ -96,7 +96,7 @@ modelParam: numWorkers: 3 # If true, dataset is preprocessed and programs exists. # Preprocessing is automatic if no previous preproc is detected on current dataset. -preprocOnly: false +preprocOriginalFluidNetDataOnly: false # printTraining : Debug options for training. # Prints or shows validation dataset and compares net # output to GT. diff --git a/pytorch/fluid_net_train.py b/pytorch/fluid_net_train.py index 62ec90e..78ffe76 100644 --- a/pytorch/fluid_net_train.py +++ b/pytorch/fluid_net_train.py @@ -96,7 +96,7 @@ conf['shuffleTraining'] = not arguments.noShuffle # Preprocessing dataset message (will exit after preproc) -if (conf['preprocOnly']): +if (conf['preprocOriginalFluidNetDataOnly']): print('Running preprocessing only') resume = False @@ -106,7 +106,7 @@ tr = lib.FluidNetDataset(conf, 'tr', save_dt=4, resume=resume) te = lib.FluidNetDataset(conf, 'te', save_dt=4, resume=resume) -if (conf['preprocOnly']): +if (conf['preprocOriginalFluidNetDataOnly']): sys.exit() # We create two conf dicts, general params and model params.