Skip to content

Commit

Permalink
feat(point_cloud): Adds point cloud example an
Browse files Browse the repository at this point in the history
  • Loading branch information
cornerfarmer committed Jan 25, 2024
1 parent 61f7092 commit 1aa9a0b
Show file tree
Hide file tree
Showing 8 changed files with 193 additions and 15 deletions.
2 changes: 1 addition & 1 deletion blenderproc/api/object/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,5 +6,5 @@
from blenderproc.python.object.PhysicsSimulation import simulate_physics_and_fix_final_poses, simulate_physics
from blenderproc.python.types.MeshObjectUtility import get_all_mesh_objects, convert_to_meshes, \
create_from_blender_mesh, create_with_empty_mesh, create_primitive, disable_all_rigid_bodies, \
create_bvh_tree_multi_objects, compute_poi, scene_ray_cast
create_bvh_tree_multi_objects, compute_poi, scene_ray_cast, create_from_point_cloud
from blenderproc.python.types.EntityUtility import create_empty, delete_multiple, convert_to_entities
22 changes: 14 additions & 8 deletions blenderproc/python/camera/CameraProjection.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,11 @@
from blenderproc.python.camera.CameraUtility import get_camera_pose, get_intrinsics_as_K_matrix


def depth_via_raytracing(resolution_x: int, resolution_y: int, bvh_tree: BVHTree, frame: Optional[int] = None, return_dist: bool = False) -> np.ndarray:
""" Computes a depth images using raytracing
def depth_via_raytracing(bvh_tree: BVHTree, frame: Optional[int] = None, return_dist: bool = False) -> np.ndarray:
""" Computes a depth images using raytracing.
All pixel that correspond to rays which do not hit any object are set to inf.
:param resolution_x: The desired width of the depth image.
:param resolution_y: The desired height of the depth image.
:param bvh_tree: The BVH tree to use for raytracing.
:param frame: The frame number whose assigned camera pose should be used. If None is given, the current frame
is used.
Expand All @@ -27,6 +27,8 @@ def depth_via_raytracing(resolution_x: int, resolution_y: int, bvh_tree: BVHTree
cam = cam_ob.data

cam2world_matrix = cam_ob.matrix_world
resolution_x = bpy.context.scene.render.resolution_x
resolution_y = bpy.context.scene.render.resolution_y

# Get position of the corners of the near plane
frame = cam.view_frame(scene=bpy.context.scene)
Expand Down Expand Up @@ -58,13 +60,14 @@ def depth_via_raytracing(resolution_x: int, resolution_y: int, bvh_tree: BVHTree
depth = dist2depth(dists)
return depth

def unproject_points(points_2d: np.ndarray, depth: np.ndarray, frame: Optional[int] = None) -> np.ndarray:
def unproject_points(points_2d: np.ndarray, depth: np.ndarray, frame: Optional[int] = None, depth_cut_off: float = 1e6) -> np.ndarray:
""" Unproject 2D points into 3D
:param points_2d: An array of N 2D points with shape [N, 2].
:param depth: A list of depth values corresponding to each 2D point, shape [N].
:param frame: The frame number whose assigned camera pose should be used. If None is given, the current frame
is used.
:param depth_cut_off: All points that correspond to depth values bigger than this threshold will be set to NaN.
:return: The unprojected 3D points with shape [N, 3].
"""
# Get extrinsics and intrinsics
Expand All @@ -85,6 +88,8 @@ def unproject_points(points_2d: np.ndarray, depth: np.ndarray, frame: Optional[i
points_cam = np.concatenate((points_cam, np.ones_like(points[:, :1])), -1)
points_world = (cam2world @ points_cam.T).T

points_world[depth > depth_cut_off, :] = np.nan

return points_world[:, :3]


Expand Down Expand Up @@ -115,19 +120,20 @@ def project_points(points: np.ndarray, frame: Optional[int] = None) -> np.ndarra
points_2d[..., 1] = (bpy.context.scene.render.resolution_y - 1) - points_2d[..., 1]
return points_2d

def pointcloud_from_depth(depth: np.ndarray, frame: Optional[int] = None) -> np.ndarray:
def pointcloud_from_depth(depth: np.ndarray, frame: Optional[int] = None, depth_cut_off: float = 1e6) -> np.ndarray:
""" Compute a point cloud from a given depth image.
:param depth: The depth image with shape [H, W].
:param frame: The frame number whose assigned camera pose should be used. If None is given, the current frame
is used.
:param depth_cut_off: All points that correspond to depth values bigger than this threshold will be set to NaN.
:return: The point cloud with shape [H, W, 3]
"""
"""
# Generate 2D coordinates of all pixels in the given image.
y = np.arange(depth.shape[0])
x = np.arange(depth.shape[1])
points = np.stack(np.meshgrid(x, y), -1).astype(np.float32)
# Unproject the 2D points
return unproject_points(points.reshape(-1, 2), depth.flatten(), frame).reshape(depth.shape[0], depth.shape[1], 3)
return unproject_points(points.reshape(-1, 2), depth.flatten(), frame, depth_cut_off).reshape(depth.shape[0], depth.shape[1], 3)


59 changes: 59 additions & 0 deletions blenderproc/python/types/MeshObjectUtility.py
Original file line number Diff line number Diff line change
Expand Up @@ -498,6 +498,14 @@ def add_modifier(self, name: str, **kwargs):
for key, value in kwargs.items():
setattr(modifier, key, value)

def add_geometry_nodes(self):
""" Adds a new geometry nodes modifier to the object.
"""
# Create the new modifier
bpy.ops.node.new_geometry_nodes_modifier({"object": self.blender_obj})
modifier = self.blender_obj.modifiers[-1]
return modifier.node_group

def mesh_as_trimesh(self) -> Trimesh:
""" Returns a trimesh.Trimesh instance of the MeshObject.
Expand Down Expand Up @@ -545,6 +553,57 @@ def create_with_empty_mesh(object_name: str, mesh_name: str = None) -> "MeshObje
mesh_name = object_name
return create_from_blender_mesh(bpy.data.meshes.new(mesh_name), object_name)

def create_from_point_cloud(points: np.ndarray, object_name: str, add_geometry_nodes_visualization: bool = False) -> "MeshObject":
""" Create a mesh from a point cloud.
The mesh's vertices are filled with the points from the given point cloud.
:param points: The points of the point cloud. Should be in shape [N, 3]
:param object_name: The name of the new object.
:param add_geometry_nodes_visualization: If yes, a geometry nodes modifier is added,
which adds a sphere to every point. In this way,
the point cloud will appear in renderings.
:return: The new Mesh object.
"""
point_cloud = create_with_empty_mesh(object_name)

# Go into mesh edit mode
point_cloud.edit_mode()
bm = point_cloud.mesh_as_bmesh()

# Add a vertex for each point
for p, point in enumerate(points):
if not np.isnan(point).any():
bm.verts.new(point)

# Persist the changes
point_cloud.update_from_bmesh(bm)
point_cloud.object_mode()

# If desired, add geometry nodes that add a icosphere instance to every point
if add_geometry_nodes_visualization:
geometry_nodes = point_cloud.add_geometry_nodes()
# Collect all nodes
input_node = Utility.get_the_one_node_with_type(geometry_nodes.nodes, "NodeGroupInput")
output_node = Utility.get_the_one_node_with_type(geometry_nodes.nodes, "NodeGroupOutput")
instances_node = geometry_nodes.nodes.new("GeometryNodeInstanceOnPoints")
sphere_node = geometry_nodes.nodes.new("GeometryNodeMeshIcoSphere")
material_node = geometry_nodes.nodes.new("GeometryNodeSetMaterial")

mat = point_cloud.new_material("point_cloud_mat")
mat.set_principled_shader_value("Base Color", [1, 0, 0, 1])

# Link them
sphere_node.inputs["Radius"].default_value = 0.015
material_node.inputs["Material"].default_value = mat.blender_obj
geometry_nodes.links.new(input_node.outputs["Geometry"], instances_node.inputs["Points"])
geometry_nodes.links.new(sphere_node.outputs["Mesh"], instances_node.inputs["Instance"])
geometry_nodes.links.new(instances_node.outputs["Instances"], material_node.inputs["Geometry"])
geometry_nodes.links.new(material_node.outputs["Geometry"], output_node.inputs["Geometry"])


return point_cloud


def create_primitive(shape: str, **kwargs) -> "MeshObject":
""" Creates a new primitive mesh object.
Expand Down
59 changes: 59 additions & 0 deletions examples/advanced/point_clouds/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
# Point clouds

<p align="center">
<img src="../../../images/point_clouds_0.jpg" alt="Front readme image" width=375>
<img src="../../../images/point_clouds_1.jpg" alt="Front readme image" width=375>
</p>

In this example we demonstrate how to compute a depth map with raytracing and how to unproject it into a point cloud.
The point cloud is computed from the view of the second camera pose (right image) and is visualized using red dots.

## Usage

Execute in the Blender-Pipeline main directory:

```
blenderproc run examples/advanced/point_clouds/main.py examples/resources/camera_positions examples/resources/scene.obj examples/basics/basic/output
```

* `examples/resources/camera_positions`: text file with parameters of camera positions.
* `examples/resources/scene.obj`: path to the object file with the basic scene.
* `examples/basics/basic/output`: path to the output directory.

## Visualization

Visualize the generated data:

```
blenderproc vis hdf5 examples/advanced/point_clouds/output/0.hdf5
```

## Implementation

First we are computing a depth image via raytracing using the camera pose set to keyframe #1.

```
depth = bproc.camera.depth_via_raytracing(bvh_tree, 1)
```

Alternatively, one could also use the renderer to render a depth image:

```
data = bproc.renderer.render()
depth = data["depth"][1]
```

We now unproject the depth image again, resulting in a point cloud.
The point cloud contains one point per pixel.

```
points = bproc.camera.pointcloud_from_depth(depth, 1)
points = points.reshape(-1, 3)
```

To visualize the point cloud, we create a mesh with vertices set from the point cloud.
To be able to see the points in the final rendering, we add geometry nodes which add a sphere mesh to each point.

```
point_cloud = bproc.object.create_from_point_cloud(points, "point_cloud", add_geometry_nodes_visualization=True)
```
51 changes: 51 additions & 0 deletions examples/advanced/point_clouds/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
import blenderproc as bproc
import argparse
import numpy as np

from blenderproc.python.utility.Utility import Utility

parser = argparse.ArgumentParser()
parser.add_argument('camera', nargs='?', default="examples/resources/camera_positions", help="Path to the camera file")
parser.add_argument('scene', nargs='?', default="examples/resources/scene.obj", help="Path to the scene.blend file")
parser.add_argument('output_dir', nargs='?', default="examples/advanced/point_clouds/output", help="Path to where the final files will be saved ")
args = parser.parse_args()

bproc.init()

# load the objects into the scene
objs = bproc.loader.load_obj(args.scene)

# define a light and set its location and energy level
light = bproc.types.Light()
light.set_type("POINT")
light.set_location([5, -5, 5])
light.set_energy(1000)

# define the camera resolution
bproc.camera.set_resolution(128, 128)
bvh_tree = bproc.object.create_bvh_tree_multi_objects(objs)
# read the camera positions file and convert into homogeneous camera-world transformation
with open(args.camera, "r") as f:
for line in f.readlines():
line = [float(x) for x in line.split()]
position, euler_rotation = line[:3], line[3:6]
matrix_world = bproc.math.build_transformation_mat(position, euler_rotation)
bproc.camera.add_camera_pose(matrix_world)

# Compute a depth image from the view of the second camera pose
depth = bproc.camera.depth_via_raytracing(bvh_tree, 1)

# Project the depth again to get a point cloud
points = bproc.camera.pointcloud_from_depth(depth, 1)
points = points.reshape(-1, 3)

# Visualize the points cloud as a mesh
point_cloud = bproc.object.create_from_point_cloud(points, "point_cloud", add_geometry_nodes_visualization=True)

# render the whole pipeline
bproc.camera.set_resolution(512, 512)
data = bproc.renderer.render()

# write the data to a .hdf5 container
bproc.writer.write_hdf5(args.output_dir, data)

Binary file added images/point_clouds_0.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added images/point_clouds_1.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
15 changes: 9 additions & 6 deletions tests/testCameraProjection.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def test_unproject_project(self):

bvh_tree = bproc.object.create_bvh_tree_multi_objects(objs)

depth = bproc.camera.depth_via_raytracing(640, 480, bvh_tree)
depth = bproc.camera.depth_via_raytracing(bvh_tree)
pc = bproc.camera.pointcloud_from_depth(depth)

pixels = bproc.camera.project_points(pc.reshape(-1, 3)).reshape(480, 640, 2)
Expand All @@ -40,6 +40,7 @@ def test_depth_via_raytracing(self):
resource_folder = os.path.join("examples", "resources")
objs = bproc.loader.load_obj(os.path.join(resource_folder, "scene.obj"))


cam2world_matrix = np.array([
[1.0, 0.0, 0.0, 0.0],
[0.0, 0.2674988806247711, -0.9635581970214844, -13.741],
Expand All @@ -51,17 +52,19 @@ def test_depth_via_raytracing(self):

bvh_tree = bproc.object.create_bvh_tree_multi_objects(objs)

depth = bproc.camera.depth_via_raytracing(640, 480, bvh_tree)
depth = bproc.camera.depth_via_raytracing(bvh_tree)

bproc.renderer.enable_depth_output(activate_antialiasing=False)
data = bproc.renderer.render()
data["depth"][0][data["depth"][0] == 65504] = np.inf
print(depth[0, :10], data["depth"][0][0, :10])
print(depth[-1, :10], data["depth"][0][-1, :10])
data["depth"][0][data["depth"][0] >= 65504] = np.inf

np.testing.assert_almost_equal(depth, data["depth"][0], decimal=1)
diff = np.abs(depth[~np.isinf(depth)] - data["depth"][0][~np.isinf(depth)])

self.assertTrue(np.median(diff) < 1e-4)
self.assertTrue((diff < 1e-4).mean() > 0.99)

if __name__ == '__main__':
bproc.init()
#test = UnitTestCheckCameraProjection()
#test.test_depth_via_raytracing()
unittest.main()

0 comments on commit 1aa9a0b

Please sign in to comment.