Skip to content

Commit

Permalink
Revert "Move functional collectives to the right namespace (pytorch#9…
Browse files Browse the repository at this point in the history
…7793)"

This reverts commit 184bfbc.

Reverted pytorch#97793 on behalf of https://github.com/atalman due to breaks internal builds
  • Loading branch information
pytorchmergebot committed Mar 31, 2023
1 parent fa1a8b9 commit f4f1a5b
Show file tree
Hide file tree
Showing 11 changed files with 12 additions and 108 deletions.
12 changes: 8 additions & 4 deletions aten/src/ATen/native/native_functions.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14718,25 +14718,29 @@

# Collectives
- func: all_reduce(Tensor self, str reduceOp, str tag, int[] ranks, int group_size) -> Tensor
python_module: dist
# This should be changed to distributed but it requires changes all over the place to work
python_module: nn
dispatch:
CompositeExplicitAutograd: all_reduce
variants: function

- func: all_gather_into_tensor(Tensor shard, str tag, int[] ranks, int group_size) -> Tensor
python_module: dist
# This should be changed to distributed but it requires changes all over the place to work
python_module: nn
dispatch:
CompositeExplicitAutograd: all_gather_into_tensor
variants: function

- func: reduce_scatter_tensor(Tensor input, str reduceOp, int scatter_dim, str tag, int[] ranks, int group_size) -> Tensor
python_module: dist
# This should be changed to distributed but it requires changes all over the place to work
python_module: nn
dispatch:
CompositeExplicitAutograd: reduce_scatter_tensor
variants: function

- func: wait_tensor(Tensor self) -> Tensor
python_module: dist
# This should be changed to distributed but it requires changes all over the place to work
python_module: nn
dispatch:
CompositeExplicitAutograd: wait_tensor

Expand Down
1 change: 0 additions & 1 deletion build.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,6 @@ _GENERATED_AUTOGRAD_PYTHON_CPP = [
"torch/csrc/autograd/generated/python_nested_functions.cpp",
"torch/csrc/autograd/generated/python_fft_functions.cpp",
"torch/csrc/autograd/generated/python_linalg_functions.cpp",
"torch/csrc/autograd/generated/python_dist_functions.cpp",
"torch/csrc/autograd/generated/python_return_types.cpp",
"torch/csrc/autograd/generated/python_enum_tag.cpp",
"torch/csrc/autograd/generated/python_sparse_functions.cpp",
Expand Down
1 change: 0 additions & 1 deletion build_variables.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -930,7 +930,6 @@ def glob_libtorch_python_sources(gencode_pattern = ":generate-code[{}]"):
"torch/csrc/autograd/generated/python_nn_functions.cpp",
"torch/csrc/autograd/generated/python_fft_functions.cpp",
"torch/csrc/autograd/generated/python_linalg_functions.cpp",
"torch/csrc/autograd/generated/python_dist_functions.cpp",
"torch/csrc/autograd/generated/python_enum_tag.cpp",
"torch/csrc/autograd/generated/python_return_types.cpp",
"torch/csrc/autograd/generated/python_sparse_functions.cpp",
Expand Down
1 change: 0 additions & 1 deletion caffe2/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -394,7 +394,6 @@ set(GENERATED_CXX_PYTHON
"${TORCH_SRC_DIR}/csrc/autograd/generated/python_nested_functions.cpp"
"${TORCH_SRC_DIR}/csrc/autograd/generated/python_sparse_functions.cpp"
"${TORCH_SRC_DIR}/csrc/autograd/generated/python_special_functions.cpp"
"${TORCH_SRC_DIR}/csrc/autograd/generated/python_dist_functions.cpp"
"${TORCH_SRC_DIR}/csrc/autograd/generated/python_return_types.cpp"
"${TORCH_SRC_DIR}/csrc/autograd/generated/python_enum_tag.cpp"
)
Expand Down
1 change: 0 additions & 1 deletion pt_template_srcs.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,6 @@ def get_generate_code_bin_outs():
"autograd/generated/python_return_types.cpp": ["autograd/generated/python_return_types.cpp"],
"autograd/generated/python_sparse_functions.cpp": ["autograd/generated/python_sparse_functions.cpp"],
"autograd/generated/python_special_functions.cpp": ["autograd/generated/python_special_functions.cpp"],
"autograd/generated/python_dist_functions.cpp": ["autograd/generated/python_dist_functions.cpp"],
"autograd/generated/python_torch_functions_0.cpp": ["autograd/generated/python_torch_functions_0.cpp"],
"autograd/generated/python_torch_functions_1.cpp": ["autograd/generated/python_torch_functions_1.cpp"],
"autograd/generated/python_torch_functions_2.cpp": ["autograd/generated/python_torch_functions_2.cpp"],
Expand Down
1 change: 0 additions & 1 deletion tools/BUCK.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,6 @@ def define_tools_targets(
"autograd/templates/python_return_types.cpp",
"autograd/templates/python_sparse_functions.cpp",
"autograd/templates/python_special_functions.cpp",
"autograd/templates/python_dist_functions.cpp",
"autograd/templates/python_torch_functions.cpp",
"autograd/templates/python_variable_methods.cpp",
"autograd/templates/variable_factories.h",
Expand Down
14 changes: 0 additions & 14 deletions tools/autograd/gen_python_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -239,10 +239,6 @@ def is_py_special_function(f: NativeFunction) -> bool:
return f.python_module == "special"


def is_py_dist_function(f: NativeFunction) -> bool:
return f.python_module == "dist"


# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# Main Function
Expand Down Expand Up @@ -349,15 +345,6 @@ def gen(
symint=symint,
)

create_python_bindings(
fm,
functions,
is_py_dist_function,
"torch.distributed.functional",
"python_dist_functions.cpp",
method=False,
)

# Currently, we only use `functions` to generate `return_types` bindings.
# All methods which return namedtuple have function variant at this point.
# If any method only operator with namedtuple is added in the future,
Expand Down Expand Up @@ -915,7 +902,6 @@ def gen_has_torch_function_check(
"torch.nested": "THPNestedVariableFunctionsModule",
"torch.sparse": "THPSparseVariableFunctionsModule",
"torch.special": "THPSpecialVariableFunctionsModule",
"torch.distributed.functional": "THPDistVariableFunctionsModule",
}[module]
if module
else "THPVariableClass"
Expand Down
68 changes: 0 additions & 68 deletions tools/autograd/templates/python_dist_functions.cpp

This file was deleted.

2 changes: 0 additions & 2 deletions torch/csrc/Module.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@
#include <torch/csrc/TypeInfo.h>
#include <torch/csrc/api/include/torch/python/init.h>
#include <torch/csrc/autograd/python_cpp_function.h>
#include <torch/csrc/autograd/python_dist_functions.h>
#include <torch/csrc/autograd/python_enum_tag.h>
#include <torch/csrc/autograd/python_fft_functions.h>
#include <torch/csrc/autograd/python_function.h>
Expand Down Expand Up @@ -1329,7 +1328,6 @@ PyObject* initModule() {
torch::autograd::initNestedFunctions(module);
torch::autograd::initSparseFunctions(module);
torch::autograd::initSpecialFunctions(module);
torch::autograd::initDistFunctions(module);
torch::autograd::init_legacy_variable(module);
torch::profiler::initPythonBindings(module);
torch::python::init_bindings(module);
Expand Down
9 changes: 0 additions & 9 deletions torch/csrc/autograd/python_dist_functions.h

This file was deleted.

10 changes: 4 additions & 6 deletions torch/distributed/_functional_collectives.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ class AsyncCollectiveTensor(torch.Tensor):
Use it inside functional collective pytorch wrappers like the following:
def functional_collective(self, group, tag):
tag, rankset, group_size = _expand_group(group, tag)
tensor = torch._C._dist.{collective}(self, tag, rankset, group_size)
tensor = torch._C._nn.{collective}(self, tag, rankset, group_size)
res = AsyncCollectiveTensor(tensor)
_register_wrapper_tensor(res, tensor)
return res
Expand Down Expand Up @@ -254,7 +254,7 @@ def wait_tensor(tensor):
Waiting follows device semantics, which means blocking on CPU and synchronizing streams on CUDA.
"""
return torch._C._dist.wait_tensor(tensor) # type: ignore[attr-defined]
return torch._C._nn.wait_tensor(tensor) # type: ignore[attr-defined]


def all_reduce(self: torch.Tensor, reduceOp: str, group: RANK_TYPES, tag: str = ""):
Expand All @@ -275,7 +275,7 @@ def all_reduce(self: torch.Tensor, reduceOp: str, group: RANK_TYPES, tag: str =
that information and perform collective algebraic optimization. Use other forms of input for that.
"""
tag, rankset, group_size = _expand_group(group, tag)
tensor = torch._C._dist.all_reduce(self, reduceOp, tag, rankset, group_size) # type: ignore[attr-defined]
tensor = torch._C._nn.all_reduce(self, reduceOp, tag, rankset, group_size) # type: ignore[attr-defined]
res = AsyncCollectiveTensor(tensor)
_register_wrapper_tensor(res, tensor)
return res
Expand Down Expand Up @@ -307,9 +307,7 @@ def reduce_scatter_tensor(
assert (
self.size(0) % group_size == 0
), f"input dimension 0 ({self.size(0)} must be a multiple of group_size {group_size}"
tensor = torch._C._dist.reduce_scatter_tensor( # type: ignore[attr-defined]
self, reduceOp, scatter_dim, tag, rankset, group_size
)
tensor = torch._C._nn.reduce_scatter_tensor(self, reduceOp, scatter_dim, tag, rankset, group_size) # type: ignore[attr-defined]
res = AsyncCollectiveTensor(tensor)
_register_wrapper_tensor(res, tensor)
return res
Expand Down

0 comments on commit f4f1a5b

Please sign in to comment.