Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Backbones #14

Open
wants to merge 27 commits into
base: master
Choose a base branch
from
Open
Changes from all commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
d459cd9
add code for pointnet++ and kpconv
humanpose1 Jul 29, 2021
1597c9d
KPConv internal modules
humanpose1 Jul 29, 2021
f15c417
pointnet++
humanpose1 Jul 29, 2021
1708bf1
add collate function
humanpose1 Jul 30, 2021
1ee6dd6
add data file
humanpose1 Aug 3, 2021
6e46522
add data file
humanpose1 Aug 3, 2021
f121bfc
add test
humanpose1 Aug 3, 2021
c1810ec
solve test
humanpose1 Aug 3, 2021
307b841
add test for batches
humanpose1 Aug 3, 2021
ac65ddb
start writing tests
humanpose1 Aug 3, 2021
11454fa
add utils convolutionformat
humanpose1 Aug 4, 2021
c8f700e
add test hydra run with a script runner
humanpose1 Aug 4, 2021
83f6b46
as in lightning transformer there is fixture
humanpose1 Aug 4, 2021
e9be4f8
move test
humanpose1 Aug 4, 2021
6c12148
test datasets
humanpose1 Aug 5, 2021
239e20e
debug tests
humanpose1 Aug 5, 2021
23eaa33
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Aug 5, 2021
2bec877
test kpconv
humanpose1 Aug 5, 2021
b95366b
add the conf files and the tests of the api
humanpose1 Aug 5, 2021
d169f31
tests pass for kpconv
humanpose1 Aug 5, 2021
c09890b
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Aug 5, 2021
a30c9bc
remove collate_fn
humanpose1 Aug 17, 2021
9fd7e00
remove useless class in config
humanpose1 Aug 17, 2021
16b4db5
add custom non linearity for minkowski and torchsparse
humanpose1 Aug 19, 2021
edba40c
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Aug 19, 2021
c4bda7b
change the modules for sparseConv3d
humanpose1 Aug 19, 2021
44a12d8
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Aug 19, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion conf/dataset/default.yaml
Original file line number Diff line number Diff line change
@@ -4,6 +4,7 @@ cfg:
batch_size: ${training.batch_size}
num_workers: ${training.num_workers}
dataroot: data
conv_type: ${model.model.conv_type}

common_transform:
aug_transform:
@@ -13,4 +14,4 @@ cfg:
test_transform: "${dataset.cfg.val_transform}"
train_transform:
- "${dataset.cfg.aug_transform}"
- "${dataset.cfg.common_transform}"
- "${dataset.cfg.common_transform}"
1 change: 1 addition & 0 deletions conf/model/segmentation/default.yaml
Original file line number Diff line number Diff line change
@@ -13,3 +13,4 @@ model:
backbone:
input_nc: ${dataset.cfg.feature_dimension}
architecture: unet
conv_type: null
85 changes: 85 additions & 0 deletions conf/model/segmentation/kpconv/KPFCNN.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
# @package model
defaults:
- /model/segmentation/default

model:
conv_type: "PARTIAL_DENSE"
backbone:
_target_: torch_points3d.applications.kpconv.KPConv
config:
define_constants:
in_grid_size: 0.02
in_feat: 64
bn_momentum: 0.2
max_neighbors: 25
down_conv:
down_conv_nn:
[
[[FEAT + 1, in_feat], [in_feat, 2*in_feat]],
[[2*in_feat, 2*in_feat], [2*in_feat, 4*in_feat]],
[[4*in_feat, 4*in_feat], [4*in_feat, 8*in_feat]],
[[8*in_feat, 8*in_feat], [8*in_feat, 16*in_feat]],
[[16*in_feat, 16*in_feat], [16*in_feat, 32*in_feat]],
]
grid_size:
[
[in_grid_size, in_grid_size],
[2*in_grid_size, 2*in_grid_size],
[4*in_grid_size, 4*in_grid_size],
[8*in_grid_size, 8*in_grid_size],
[16*in_grid_size, 16*in_grid_size],
]
prev_grid_size:
[
[in_grid_size, in_grid_size],
[in_grid_size, 2*in_grid_size],
[2*in_grid_size, 4*in_grid_size],
[4*in_grid_size, 8*in_grid_size],
[8*in_grid_size, 16*in_grid_size],
]
block_names:
[
["SimpleBlock", "ResnetBBlock"],
["ResnetBBlock", "ResnetBBlock"],
["ResnetBBlock", "ResnetBBlock"],
["ResnetBBlock", "ResnetBBlock"],
["ResnetBBlock", "ResnetBBlock"],
]
has_bottleneck:
[
[False, True],
[True, True],
[True, True],
[True, True],
[True, True],
]
deformable:
[
[False, False],
[False, False],
[False, False],
[False, False],
[False, False],
]
max_num_neighbors:
[[max_neighbors,max_neighbors], [max_neighbors, max_neighbors], [max_neighbors, max_neighbors], [max_neighbors, max_neighbors], [max_neighbors, max_neighbors]]
module_name: KPDualBlock
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Eventually we can redo this to use hydra instantiation for blocks and remove some of the model factory reflection code

up_conv:
module_name: FPModule_PD
up_conv_nn:
[
[32*in_feat + 16*in_feat, 8*in_feat],
[8*in_feat + 8*in_feat, 4*in_feat],
[4*in_feat + 4*in_feat, 2*in_feat],
[2*in_feat + 2*in_feat, in_feat],
]
skip: True
up_k: [1,1,1,1]
bn_momentum:
[
bn_momentum,
bn_momentum,
bn_momentum,
bn_momentum,
bn_momentum,
]
40 changes: 40 additions & 0 deletions conf/model/segmentation/pointnet2/pointnet2_largemsg.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
# @package model
defaults:
- /model/segmentation/default

model:
conv_type: "DENSE"
backbone:
_target_: torch_points3d.applications.pointnet2.PointNet2
config:
down_conv:
module_name: PointNetMSGDown
npoint: [1024, 256, 64, 16]
radii: [[0.05, 0.1], [0.1, 0.2], [0.2, 0.4], [0.4, 0.8]]
nsamples: [[16, 32], [16, 32], [16, 32], [16, 32]]
down_conv_nn:
[
[[FEAT+3, 16, 16, 32], [FEAT+3, 32, 32, 64]],
[[32 + 64+3, 64, 64, 128], [32 + 64+3, 64, 96, 128]],
[
[128 + 128+3, 128, 196, 256],
[128 + 128+3, 128, 196, 256],
],
[
[256 + 256+3, 256, 256, 512],
[256 + 256+3, 256, 384, 512],
],
]
up_conv:
module_name: DenseFPModule
up_conv_nn:
[
[512 + 512 + 256 + 256, 512, 512],
[512 + 128 + 128, 512, 512],
[512 + 64 + 32, 256, 256],
[256 + FEAT, 128, 128],
]
skip: True
mlp_cls:
nn: [128, 128]
dropout: 0.5
4 changes: 2 additions & 2 deletions conf/model/segmentation/sparseconv3d/Res16UNet34.yaml
Original file line number Diff line number Diff line change
@@ -2,9 +2,9 @@
defaults:
- /model/segmentation/ResUNet32

model:
model:
backbone:
down_conv:
N: [ 0, 2, 3, 4, 6 ]
up_conv:
N: [ 1, 1, 1, 1, 1 ]
N: [ 1, 1, 1, 1, 1 ]
4 changes: 2 additions & 2 deletions conf/model/segmentation/sparseconv3d/ResUNet32.yaml
Original file line number Diff line number Diff line change
@@ -2,11 +2,11 @@
defaults:
- /model/segmentation/default

model:
model:
conv_type: "SPARSE"
backbone:
_target_: torch_points3d.applications.sparseconv3d.SparseConv3d
backend: torchsparse

config:
define_constants:
in_feat: 32
1 change: 1 addition & 0 deletions test/.#test_hydra.py
52 changes: 52 additions & 0 deletions test/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
from typing import List
import os
import os.path as osp
import pytest

from hydra import compose, initialize
from hydra.test_utils.test_utils import find_parent_dir_containing

from torch_points3d.trainer import LitTrainer
from torch_points3d.core.instantiator import HydraInstantiator


class ScriptRunner:

@staticmethod
def find_hydra_conf_dir(config_dir: str = "conf") -> str:
"""
Util function to find the hydra config directory from the main repository for testing.
Args:
config_dir: Name of config directory.
Returns: Relative config path
"""
parent_dir = find_parent_dir_containing(config_dir)
relative_conf_dir = osp.relpath(parent_dir, os.path.dirname(__file__))
return osp.join(relative_conf_dir, config_dir)

def train(self, cmd_args: List[str]) -> None:
relative_conf_dir = self.find_hydra_conf_dir()
with initialize(config_path=relative_conf_dir, job_name="test_app"):
cfg = compose(config_name="config", overrides=cmd_args)
instantiator = HydraInstantiator()
trainer = LitTrainer(
instantiator,
dataset=cfg.get("dataset"),
trainer=cfg.get("trainer"),
model=cfg.get("model"))
trainer.train()

def hf_train(self, dataset: str, model: str, num_workers: int = 0, fast_dev_run: int = 1):
cmd_args = []
cmd_args.extend([
f'model={model}',
f'dataset={dataset}',
f'trainer.max_epochs=1',
f'training.num_workers=1'
])
self.train(cmd_args)


@pytest.fixture(scope="session")
def script_runner() -> ScriptRunner:
return ScriptRunner()
Loading