Skip to content

Commit

Permalink
switch the launch argument order
Browse files Browse the repository at this point in the history
  • Loading branch information
ksimpson-work committed Dec 19, 2024
1 parent 33b7366 commit 07311af
Show file tree
Hide file tree
Showing 5 changed files with 8 additions and 8 deletions.
6 changes: 3 additions & 3 deletions cuda_core/cuda/core/experimental/_launcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,17 +107,17 @@ def _cast_to_3_tuple(self, cfg):
raise ValueError


def launch(kernel, config, *kernel_args):
def launch(config, kernel, *kernel_args):
"""Launches a :obj:`~_module.Kernel`
object with launch-time configuration.
Parameters
----------
kernel : :obj:`~_module.Kernel`
Kernel to launch.
config : :obj:`~_launcher.LaunchConfig`
Launch configurations inline with options provided by
:obj:`~_launcher.LaunchConfig` dataclass.
kernel : :obj:`~_module.Kernel`
Kernel to launch.
*kernel_args : Any
Variable length argument list that is provided to the
launching kernel.
Expand Down
4 changes: 2 additions & 2 deletions cuda_core/examples/saxpy.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@
ker_args = (a, x.data.ptr, y.data.ptr, out.data.ptr, size)

# launch kernel on stream s
launch(ker, config, *ker_args)
launch(config, ker, *ker_args)
s.sync()

# check result
Expand Down Expand Up @@ -90,7 +90,7 @@
ker_args = (a, x.data.ptr, y.data.ptr, buf, size)

# launch kernel on stream s
launch(ker, config, *ker_args)
launch(config, ker, *ker_args)
s.sync()

# check result
Expand Down
2 changes: 1 addition & 1 deletion cuda_core/examples/strided_memory_view.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ def my_func(arr, work_stream):
block = 256
grid = (size + block - 1) // block
config = LaunchConfig(grid=grid, block=block, stream=work_stream)
launch(gpu_ker, config, view.ptr, np.uint64(size))
launch(config, gpu_ker, view.ptr, np.uint64(size))
# Here we're being conservative and synchronize over our work stream,
# assuming we do not know the data stream; if we know then we could
# just order the data stream after the work stream here, e.g.
Expand Down
2 changes: 1 addition & 1 deletion cuda_core/examples/thread_block_cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@
config = LaunchConfig(grid=grid, cluster=cluster, block=block, stream=dev.default_stream)

# launch kernel on the default stream
launch(ker, config)
launch(config, ker)
dev.sync()

print("done!")
2 changes: 1 addition & 1 deletion cuda_core/examples/vector_add.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@
config = LaunchConfig(grid=grid, block=block, stream=s)

# launch kernel on stream s
launch(ker, config, a.data.ptr, b.data.ptr, c.data.ptr, cp.uint64(size))
launch(config, ker, a.data.ptr, b.data.ptr, c.data.ptr, cp.uint64(size))
s.sync()

# check result
Expand Down

0 comments on commit 07311af

Please sign in to comment.