Skip to content

Commit

Permalink
[BE] [1/3] Rewrite super() calls in caffe2 and benchmarks (pytorch#…
Browse files Browse the repository at this point in the history
…94587)

Rewrite Python built-in class `super()` calls. Only non-semantic changes should be applied.

- pytorch#94587
- pytorch#94588
- pytorch#94592

Also, methods with only a `super()` call are removed:

```diff
class MyModule(nn.Module):
-   def __init__(self):
-       super().__init__()
-
    def forward(self, ...):
        ...
```

Some cases that change the semantics should be kept unchanged. E.g.:

https://github.com/pytorch/pytorch/blob/f152a79be9612b824e1672b8f8cb88a414ce4c12/caffe2/python/net_printer.py#L184-L190

https://github.com/pytorch/pytorch/blob/f152a79be9612b824e1672b8f8cb88a414ce4c12/test/test_jit_fuser_te.py#L2628-L2635

Pull Request resolved: pytorch#94587
Approved by: https://github.com/ezyang
  • Loading branch information
XuehaiPan authored and pytorchmergebot committed Feb 11, 2023
1 parent aa6f0ac commit 8d45f55
Show file tree
Hide file tree
Showing 97 changed files with 207 additions and 239 deletions.
18 changes: 9 additions & 9 deletions .circleci/cimodel/data/binary_build_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def get_processor_arch_name(gpu_version):

class TopLevelNode(ConfigNode):
def __init__(self, node_name, config_tree_data, smoke):
super(TopLevelNode, self).__init__(None, node_name)
super().__init__(None, node_name)

self.config_tree_data = config_tree_data
self.props["smoke"] = smoke
Expand All @@ -68,7 +68,7 @@ def get_children(self):

class OSConfigNode(ConfigNode):
def __init__(self, parent, os_name, gpu_versions, py_tree):
super(OSConfigNode, self).__init__(parent, os_name)
super().__init__(parent, os_name)

self.py_tree = py_tree
self.props["os_name"] = os_name
Expand All @@ -80,7 +80,7 @@ def get_children(self):

class PackageFormatConfigNode(ConfigNode):
def __init__(self, parent, package_format, python_versions):
super(PackageFormatConfigNode, self).__init__(parent, package_format)
super().__init__(parent, package_format)

self.props["python_versions"] = python_versions
self.props["package_format"] = package_format
Expand All @@ -97,7 +97,7 @@ def get_children(self):

class LinuxGccConfigNode(ConfigNode):
def __init__(self, parent, gcc_config_variant):
super(LinuxGccConfigNode, self).__init__(parent, "GCC_CONFIG_VARIANT=" + str(gcc_config_variant))
super().__init__(parent, "GCC_CONFIG_VARIANT=" + str(gcc_config_variant))

self.props["gcc_config_variant"] = gcc_config_variant

Expand All @@ -122,7 +122,7 @@ def get_children(self):

class WindowsLibtorchConfigNode(ConfigNode):
def __init__(self, parent, libtorch_config_variant):
super(WindowsLibtorchConfigNode, self).__init__(parent, "LIBTORCH_CONFIG_VARIANT=" + str(libtorch_config_variant))
super().__init__(parent, "LIBTORCH_CONFIG_VARIANT=" + str(libtorch_config_variant))

self.props["libtorch_config_variant"] = libtorch_config_variant

Expand All @@ -132,7 +132,7 @@ def get_children(self):

class ArchConfigNode(ConfigNode):
def __init__(self, parent, gpu):
super(ArchConfigNode, self).__init__(parent, get_processor_arch_name(gpu))
super().__init__(parent, get_processor_arch_name(gpu))

self.props["gpu"] = gpu

Expand All @@ -142,7 +142,7 @@ def get_children(self):

class PyVersionConfigNode(ConfigNode):
def __init__(self, parent, pyver):
super(PyVersionConfigNode, self).__init__(parent, pyver)
super().__init__(parent, pyver)

self.props["pyver"] = pyver

Expand All @@ -158,14 +158,14 @@ def get_children(self):

class LinkingVariantConfigNode(ConfigNode):
def __init__(self, parent, linking_variant):
super(LinkingVariantConfigNode, self).__init__(parent, linking_variant)
super().__init__(parent, linking_variant)

def get_children(self):
return [DependencyInclusionConfigNode(self, v) for v in DEPS_INCLUSION_DIMENSIONS]


class DependencyInclusionConfigNode(ConfigNode):
def __init__(self, parent, deps_variant):
super(DependencyInclusionConfigNode, self).__init__(parent, deps_variant)
super().__init__(parent, deps_variant)

self.props["libtorch_variant"] = "-".join([self.parent.get_label(), self.get_label()])
4 changes: 2 additions & 2 deletions .circleci/cimodel/data/pytorch_build_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ def get_major_pyver(dotted_version):

class TreeConfigNode(ConfigNode):
def __init__(self, parent, node_name, subtree):
super(TreeConfigNode, self).__init__(parent, self.modify_label(node_name))
super().__init__(parent, self.modify_label(node_name))
self.subtree = subtree
self.init2(node_name)

Expand All @@ -28,7 +28,7 @@ def get_children(self):

class TopLevelNode(TreeConfigNode):
def __init__(self, node_name, subtree):
super(TopLevelNode, self).__init__(None, node_name, subtree)
super().__init__(None, node_name, subtree)

# noinspection PyMethodMayBeStatic
def child_constructor(self):
Expand Down
3 changes: 0 additions & 3 deletions android/pytorch_android/generate_test_torchscripts.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,6 @@ def scriptAndSave(module, fileName):
print('=' * 80)

class Test(torch.jit.ScriptModule):
def __init__(self):
super(Test, self).__init__()

@torch.jit.script_method
def forward(self, input):
return None
Expand Down
2 changes: 1 addition & 1 deletion benchmarks/distributed/ddp/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ def generate_target(self):

class TorchvisionBenchmark(Benchmark):
def __init__(self, device, distributed_backend, bucket_size, model):
super(TorchvisionBenchmark, self).__init__(
super().__init__(
device,
distributed_backend,
bucket_size,
Expand Down
4 changes: 2 additions & 2 deletions benchmarks/distributed/pipeline/pipe.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def forward(self, src):

class PositionalEncodingLayer(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncodingLayer, self).__init__()
super().__init__()
self.dropout = nn.Dropout(p=dropout)

pe = torch.zeros(max_len, d_model)
Expand Down Expand Up @@ -99,7 +99,7 @@ def __init__(self, ntokens, ninp, nhead, nhid, dropout, initrange, ndecoder):
layers.append(TransformerDecoderLayer(ninp, nhead, nhid, dropout))

layers.append(LinearLayer(ninp, ntokens, initrange))
super(TransformerLMSequential, self).__init__(*layers)
super().__init__(*layers)


def make_model(args, device, ntokens):
Expand Down
2 changes: 1 addition & 1 deletion benchmarks/distributed/rpc/rl/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ def __init__(self, in_features, nlayers, out_features):
nlayers (int): Number of layers in the model
out_features (int): Number of features the model outputs
"""
super(Policy, self).__init__()
super().__init__()

self.model = nn.Sequential(
nn.Flatten(1, -1),
Expand Down
6 changes: 3 additions & 3 deletions benchmarks/dynamo/dist_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def cleanup():

class CustomLinear(torch.nn.Module):
def __init__(self, a, b):
super(CustomLinear, self).__init__()
super().__init__()
self.weight = nn.Parameter(torch.randn(a, b))

def forward(self, x):
Expand All @@ -47,7 +47,7 @@ def forward(self, x):

class MyModule(torch.nn.Module):
def __init__(self, a, b):
super(MyModule, self).__init__()
super().__init__()
self.net = nn.Sequential(
nn.Linear(a, b),
nn.ReLU(),
Expand All @@ -59,7 +59,7 @@ def forward(self, x):

class ToyModel(nn.Module):
def __init__(self):
super(ToyModel, self).__init__()
super().__init__()
self.net = nn.Sequential(
*[nn.Linear(10, 10000), nn.ReLU()]
+ [nn.Linear(10000, 10000), nn.ReLU()]
Expand Down
2 changes: 1 addition & 1 deletion benchmarks/dynamo/huggingface.py
Original file line number Diff line number Diff line change
Expand Up @@ -364,7 +364,7 @@ def rand_int_tensor(device, low, high, shape):

class HuggingfaceRunner(BenchmarkRunner):
def __init__(self):
super(HuggingfaceRunner, self).__init__()
super().__init__()
self.suite_name = "huggingface"

def load_model(
Expand Down
2 changes: 1 addition & 1 deletion benchmarks/dynamo/timm_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ def populate_family(models):

class TimmRunnner(BenchmarkRunner):
def __init__(self):
super(TimmRunnner, self).__init__()
super().__init__()
self.suite_name = "timm_models"

def load_model(
Expand Down
2 changes: 1 addition & 1 deletion benchmarks/dynamo/torchbench.py
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ def setup_torchbench_cwd():

class TorchBenchmarkRunner(BenchmarkRunner):
def __init__(self):
super(TorchBenchmarkRunner, self).__init__()
super().__init__()
self.suite_name = "torchbench"
self.optimizer = None

Expand Down
18 changes: 9 additions & 9 deletions benchmarks/fastrnns/custom_lstms.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ def reverse(lst: List[Tensor]) -> List[Tensor]:

class LSTMCell(jit.ScriptModule):
def __init__(self, input_size, hidden_size):
super(LSTMCell, self).__init__()
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.weight_ih = Parameter(torch.randn(4 * hidden_size, input_size))
Expand Down Expand Up @@ -120,7 +120,7 @@ def forward(self, input: Tensor, state: Tuple[Tensor, Tensor]) -> Tuple[Tensor,

class LayerNorm(jit.ScriptModule):
def __init__(self, normalized_shape):
super(LayerNorm, self).__init__()
super().__init__()
if isinstance(normalized_shape, numbers.Integral):
normalized_shape = (normalized_shape,)
normalized_shape = torch.Size(normalized_shape)
Expand All @@ -146,7 +146,7 @@ def forward(self, input):

class LayerNormLSTMCell(jit.ScriptModule):
def __init__(self, input_size, hidden_size, decompose_layernorm=False):
super(LayerNormLSTMCell, self).__init__()
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.weight_ih = Parameter(torch.randn(4 * hidden_size, input_size))
Expand Down Expand Up @@ -183,7 +183,7 @@ def forward(self, input: Tensor, state: Tuple[Tensor, Tensor]) -> Tuple[Tensor,

class LSTMLayer(jit.ScriptModule):
def __init__(self, cell, *cell_args):
super(LSTMLayer, self).__init__()
super().__init__()
self.cell = cell(*cell_args)

@jit.script_method
Expand All @@ -198,7 +198,7 @@ def forward(self, input: Tensor, state: Tuple[Tensor, Tensor]) -> Tuple[Tensor,

class ReverseLSTMLayer(jit.ScriptModule):
def __init__(self, cell, *cell_args):
super(ReverseLSTMLayer, self).__init__()
super().__init__()
self.cell = cell(*cell_args)

@jit.script_method
Expand All @@ -215,7 +215,7 @@ class BidirLSTMLayer(jit.ScriptModule):
__constants__ = ['directions']

def __init__(self, cell, *cell_args):
super(BidirLSTMLayer, self).__init__()
super().__init__()
self.directions = nn.ModuleList([
LSTMLayer(cell, *cell_args),
ReverseLSTMLayer(cell, *cell_args),
Expand Down Expand Up @@ -247,7 +247,7 @@ class StackedLSTM(jit.ScriptModule):
__constants__ = ['layers'] # Necessary for iterating through self.layers

def __init__(self, num_layers, layer, first_layer_args, other_layer_args):
super(StackedLSTM, self).__init__()
super().__init__()
self.layers = init_stacked_lstm(num_layers, layer, first_layer_args,
other_layer_args)

Expand All @@ -274,7 +274,7 @@ class StackedLSTM2(jit.ScriptModule):
__constants__ = ['layers'] # Necessary for iterating through self.layers

def __init__(self, num_layers, layer, first_layer_args, other_layer_args):
super(StackedLSTM2, self).__init__()
super().__init__()
self.layers = init_stacked_lstm(num_layers, layer, first_layer_args,
other_layer_args)

Expand All @@ -299,7 +299,7 @@ class StackedLSTMWithDropout(jit.ScriptModule):
__constants__ = ['layers', 'num_layers']

def __init__(self, num_layers, layer, first_layer_args, other_layer_args):
super(StackedLSTMWithDropout, self).__init__()
super().__init__()
self.layers = init_stacked_lstm(num_layers, layer, first_layer_args,
other_layer_args)
# Introduces a Dropout layer on the outputs of each LSTM layer except
Expand Down
2 changes: 1 addition & 1 deletion benchmarks/framework_overhead_benchmark/SimpleAddModule.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ def add_tensors_loop(x, y):

class SimpleAddModule(torch.nn.Module):
def __init__(self, add_op):
super(SimpleAddModule, self).__init__()
super().__init__()
self.add_op = add_op

def forward(self, x, y):
Expand Down
22 changes: 11 additions & 11 deletions benchmarks/functional_autograd_benchmark/torchaudio_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ class Wav2Letter(nn.Module):
def __init__(self, num_classes: int = 40,
input_type: str = "waveform",
num_features: int = 1) -> None:
super(Wav2Letter, self).__init__()
super().__init__()

acoustic_num_features = 250 if input_type == "waveform" else num_features
acoustic_model = nn.Sequential(
Expand Down Expand Up @@ -85,7 +85,7 @@ def __init__(self, module):
Allows handling of variable sequence lengths and minibatch sizes.
:param module: Module to apply input to.
"""
super(SequenceWise, self).__init__()
super().__init__()
self.module = module

def forward(self, x):
Expand All @@ -110,7 +110,7 @@ def __init__(self, seq_module):
Input needs to be in the shape of (BxCxDxT)
:param seq_module: The sequential module containing the conv stack.
"""
super(MaskConv, self).__init__()
super().__init__()
self.seq_module = seq_module

def forward(self, x, lengths):
Expand Down Expand Up @@ -142,7 +142,7 @@ def forward(self, input_):

class BatchRNN(nn.Module):
def __init__(self, input_size, hidden_size, rnn_type=nn.LSTM, bidirectional=False, batch_norm=True):
super(BatchRNN, self).__init__()
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bidirectional = bidirectional
Expand Down Expand Up @@ -170,7 +170,7 @@ class Lookahead(nn.Module):
# input shape - sequence, batch, feature - TxNxH
# output shape - same as input
def __init__(self, n_features, context):
super(Lookahead, self).__init__()
super().__init__()
assert context > 0
self.context = context
self.n_features = n_features
Expand All @@ -193,7 +193,7 @@ def __repr__(self):
class DeepSpeech(nn.Module):
def __init__(self, rnn_type, labels, rnn_hidden_size, nb_layers, audio_conf,
bidirectional, context=20):
super(DeepSpeech, self).__init__()
super().__init__()

self.hidden_size = rnn_hidden_size
self.hidden_layers = nb_layers
Expand Down Expand Up @@ -298,7 +298,7 @@ class PositionalEncoding(nn.Module):
"""

def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
super().__init__()
self.dropout = nn.Dropout(p=dropout)

pe = torch.zeros(max_len, d_model)
Expand Down Expand Up @@ -327,7 +327,7 @@ class TransformerModel(nn.Module):
"""Container module with an encoder, a recurrent or transformer module, and a decoder."""

def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5):
super(TransformerModel, self).__init__()
super().__init__()
try:
from torch.nn import TransformerEncoder, TransformerEncoderLayer
except Exception as e:
Expand Down Expand Up @@ -392,7 +392,7 @@ def __init__(self, nhead, in_proj_container, attention_layer, out_proj):
>>> print(attn_output.shape)
>>> torch.Size([21, 64, 10])
"""
super(MultiheadAttentionContainer, self).__init__()
super().__init__()
self.nhead = nhead
self.in_proj_container = in_proj_container
self.attention_layer = attention_layer
Expand Down Expand Up @@ -456,7 +456,7 @@ def __init__(self, dropout=0.0):
>>> print(attn_output.shape, attn_weights.shape)
torch.Size([256, 21, 3]) torch.Size([256, 21, 21])
"""
super(ScaledDotProduct, self).__init__()
super().__init__()
self.dropout = dropout

def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor,
Expand Down Expand Up @@ -532,7 +532,7 @@ def __init__(self, query_proj, key_proj, value_proj):
value_proj: a proj layer for value.
"""

super(InProjContainer, self).__init__()
super().__init__()
self.query_proj = query_proj
self.key_proj = key_proj
self.value_proj = value_proj
Expand Down
Loading

0 comments on commit 8d45f55

Please sign in to comment.