From 3173fd9a7159bdf681be33e40722352c2d01ac4c Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Mon, 27 Jun 2022 17:43:51 -0600 Subject: [PATCH 01/30] Sphinx doc improvements --- captum/optim/_core/output_hook.py | 13 +- captum/optim/_param/image/transforms.py | 152 +++++++++++---------- captum/optim/_utils/circuits.py | 27 ++-- captum/optim/_utils/image/atlas.py | 45 +++--- captum/optim/_utils/image/common.py | 82 ++++++----- captum/optim/_utils/reducer.py | 31 +++-- captum/optim/models/_image/inception_v1.py | 84 ++++++------ 7 files changed, 244 insertions(+), 190 deletions(-) diff --git a/captum/optim/_core/output_hook.py b/captum/optim/_core/output_hook.py index 4903155e74..85438fdfc6 100644 --- a/captum/optim/_core/output_hook.py +++ b/captum/optim/_core/output_hook.py @@ -101,11 +101,11 @@ def __init__(self, model: nn.Module, targets: Iterable[nn.Module]) -> None: """ Args: - model (nn.Module): The reference to PyTorch model instance. - targets (nn.Module or list of nn.Module): The target layers to + model (nn.Module): The reference to PyTorch model instance. + targets (nn.Module or list of nn.Module): The target layers to collect activations from. """ - super(ActivationFetcher, self).__init__() + super().__init__() self.model = model self.layers = ModuleOutputsHook(targets) @@ -113,12 +113,13 @@ def __call__(self, input_t: TupleOfTensorsOrTensorType) -> ModuleOutputMapping: """ Args: - input_t (tensor or tuple of tensors, optional): The input to use + input_t (tensor or tuple of tensors, optional): The input to use with the specified model. Returns: - activations_dict: An dict containing the collected activations. The keys - for the returned dictionary are the target layers. + activations_dict (ModuleOutputMapping): An dict containing the collected + activations. The keys for the returned dictionary are the target + layers. """ try: diff --git a/captum/optim/_param/image/transforms.py b/captum/optim/_param/image/transforms.py index 4ec8762637..f02fd5e3a4 100644 --- a/captum/optim/_param/image/transforms.py +++ b/captum/optim/_param/image/transforms.py @@ -22,7 +22,7 @@ def __init__(self, background: Optional[torch.Tensor] = None) -> None: background (tensor, optional): An NCHW image tensor to be used as the Alpha channel's background. - Default: None + Default: ``None`` """ super().__init__() self.background = background @@ -143,12 +143,12 @@ def _forward(self, x: torch.Tensor, inverse: bool = False) -> torch.Tensor: """ Args: - x (torch.tensor): A CHW or NCHW RGB or RGBA image tensor. - inverse (bool, optional): Whether to recorrelate or decorrelate colors. - Default: False. + x (torch.tensor): A CHW or NCHW RGB or RGBA image tensor. + inverse (bool, optional): Whether to recorrelate or decorrelate colors. + Default: ``False`` Returns: - chw (torch.tensor): A tensor with it's colors recorrelated or + chw (torch.tensor): A tensor with it's colors recorrelated or decorrelated. """ @@ -197,12 +197,12 @@ def _forward_without_named_dims( Args: - x (torch.tensor): A CHW pr NCHW RGB or RGBA image tensor. - inverse (bool, optional): Whether to recorrelate or decorrelate colors. - Default: False. + x (torch.tensor): A CHW pr NCHW RGB or RGBA image tensor. + inverse (bool, optional): Whether to recorrelate or decorrelate colors. + Default: ``False`` Returns: - chw (torch.tensor): A tensor with it's colors recorrelated or + chw (torch.tensor): A tensor with it's colors recorrelated or decorrelated. """ @@ -244,12 +244,12 @@ def forward(self, x: torch.Tensor, inverse: bool = False) -> torch.Tensor: Args: - x (torch.tensor): A CHW or NCHW RGB or RGBA image tensor. - inverse (bool, optional): Whether to recorrelate or decorrelate colors. - Default: False. + x (torch.tensor): A CHW or NCHW RGB or RGBA image tensor. + inverse (bool, optional): Whether to recorrelate or decorrelate colors. + Default: ``False`` Returns: - chw (torch.tensor): A tensor with it's colors recorrelated or + chw (torch.tensor): A tensor with it's colors recorrelated or decorrelated. """ if torch.jit.is_scripting(): @@ -291,18 +291,20 @@ def __init__( pixels_from_edges (bool, optional): Whether to treat crop size values as the number of pixels from the tensor's edge, or an exact shape in the center. - Default: False + Default: ``False`` offset_left (bool, optional): If the cropped away sides are not equal in size, offset center by +1 to the left and/or top. - This parameter is only valid when `pixels_from_edges` is False. - Default: False - padding_mode (optional, str): One of "constant", "reflect", "replicate" - or "circular". This parameter is only used if the crop size is larger - than the image size. - Default: "constant" - padding_value (float, optional): fill value for "constant" padding. This - parameter is only used if the crop size is larger than the image size. - Default: 0.0 + This parameter is only valid when ``pixels_from_edges`` is + ``False``. + Default: ``False`` + padding_mode (optional, str): One of ``"constant"``, ``"reflect"``, + ``"replicate"``, or ``"circular"``. This parameter is only used if the + crop size is larger than the image size. + Default: ``"constant"`` + padding_value (float, optional): fill value for ``"constant"`` padding. + This parameter is only used if the crop size is larger than the image + size. + Default: ``0.0`` """ super().__init__() if not hasattr(size, "__iter__"): @@ -360,23 +362,25 @@ def center_crop( Args: - input (tensor): A CHW or NCHW image tensor to center crop. + input (tensor): A CHW or NCHW image tensor to center crop. size (int, sequence, int): Number of pixels to center crop away. pixels_from_edges (bool, optional): Whether to treat crop size values as the number of pixels from the tensor's edge, or an exact shape in the center. - Default: False + Default: ``False`` offset_left (bool, optional): If the cropped away sides are not equal in size, offset center by +1 to the left and/or top. - This parameter is only valid when `pixels_from_edges` is False. - Default: False - padding_mode (optional, str): One of "constant", "reflect", "replicate" or - "circular". This parameter is only used if the crop size is larger than - the image size. - Default: "constant" - padding_value (float, optional): fill value for "constant" padding. This - parameter is only used if the crop size is larger than the image size. - Default: 0.0 + This parameter is only valid when ``pixels_from_edges`` is + ``False``. + Default: ``False`` + padding_mode (optional, str): One of ``"constant"``, ``"reflect"``, + ``"replicate"``, or ``"circular"``. This parameter is only used if the crop + size is larger than the image size. + Default: ``"constant"`` + padding_value (float, optional): fill value for ``"constant"`` padding. + This parameter is only used if the crop size is larger than the image + size. + Default: ``0.0`` Returns: **tensor**: A center cropped *tensor*. @@ -460,19 +464,20 @@ def __init__( scale (float, sequence, or torch.distribution): Sequence of rescaling values to randomly select from, or a torch.distributions instance. mode (str, optional): Interpolation mode to use. See documentation of - F.interpolate for more details. One of; "bilinear", "nearest", "area", - or "bicubic". - Default: "bilinear" + ``F.interpolate`` for more details. One of; ``"bilinear"``, + ``"nearest"``, ``"area"``, or ``"bicubic"``. + Default: ``"bilinear"`` align_corners (bool, optional): Whether or not to align corners. See - documentation of F.interpolate for more details. - Default: False + documentation of ``F.interpolate`` for more details. + Default: ``False`` recompute_scale_factor (bool, optional): Whether or not to recompute the - scale factor See documentation of F.interpolate for more details. - Default: False + scale factor See documentation of ``F.interpolate`` for more details. + Default: ``False`` antialias (bool, optional): Whether or not use to anti-aliasing. This - feature is currently only available for "bilinear" and "bicubic" - modes. See documentation of F.interpolate for more details. - Default: False + feature is currently only available for ``"bilinear"`` and + ``"bicubic"`` modes. See documentation of ``F.interpolate`` for more + details. + Default: ``False`` """ super().__init__() assert mode not in ["linear", "trilinear"] @@ -593,16 +598,17 @@ def __init__( scale (float, sequence, or torch.distribution): Sequence of rescaling values to randomly select from, or a torch.distributions instance. mode (str, optional): Interpolation mode to use. See documentation of - F.grid_sample for more details. One of; "bilinear", "nearest", or - "bicubic". - Default: "bilinear" + ``F.grid_sample`` for more details. One of; ``"bilinear"``, + ``"nearest"``, or ``"bicubic"``. + Default: ``"bilinear"`` padding_mode (str, optional): Padding mode for values that fall outside of - the grid. See documentation of F.grid_sample for more details. One of; - "zeros", "border", or "reflection". - Default: "zeros" + the grid. See documentation of ``F.grid_sample`` for more details. One + of; ``"zeros"``, ``"border"``, or ``"reflection"``. + Default: ``"zeros"`` align_corners (bool, optional): Whether or not to align corners. See - documentation of F.affine_grid & F.grid_sample for more details. - Default: False + documentation of ``F.affine_grid`` & ``F.grid_sample`` for more + details. + Default: ``False`` """ super().__init__() if isinstance(scale, torch.distributions.distribution.Distribution): @@ -772,19 +778,20 @@ def __init__( """ Args: - degrees (float, sequence, or torch.distribution): Tuple of degrees values - to randomly select from, or a torch.distributions instance. + degrees (float, sequence, or torch.distribution): Tuple or list of degrees + values to randomly select from, or a ``torch.distributions`` instance. mode (str, optional): Interpolation mode to use. See documentation of - F.grid_sample for more details. One of; "bilinear", "nearest", or - "bicubic". - Default: "bilinear" + F.grid_sample for more details. One of; ``"bilinear"``, ``"nearest"``, + or ``"bicubic"``. + Default: ``"bilinear"`` padding_mode (str, optional): Padding mode for values that fall outside of the grid. See documentation of F.grid_sample for more details. One of; - "zeros", "border", or "reflection". - Default: "zeros" + ``"zeros"``, ``"border"``, or ``"reflection"``. + Default: ``"zeros"`` align_corners (bool, optional): Whether or not to align corners. See - documentation of F.affine_grid & F.grid_sample for more details. - Default: False + documentation of ``F.affine_grid`` & ``F.grid_sample`` for more + details. + Default: ``False`` """ super().__init__() if isinstance(degrees, torch.distributions.distribution.Distribution): @@ -1117,7 +1124,8 @@ def __init__(self, warp: bool = False) -> None: Args: warp (bool, optional): Whether or not to make the resulting RGB colors more - distict from each other. Default is set to False. + distict from each other. + Default: ``False`` """ super().__init__() self.warp = warp @@ -1238,26 +1246,28 @@ def __init__( Args: padding_transform (nn.Module, optional): A padding module instance. No - padding will be applied before transforms if set to None. - Default: nn.ConstantPad2d(2, value=0.5) + padding will be applied before transforms if set to ``None``. + Default: ``nn.ConstantPad2d(2, value=0.5)`` translate (int or list of int, optional): The max horizontal and vertical translation to use for each jitter transform. - Default: [4] * 10 + Default: ``[4] * 10`` scale (float, sequence, or torch.distribution, optional): Sequence of rescaling values to randomly select from, or a torch.distributions - instance. If set to None, no rescaling transform will be used. - Default: A set of optimal values. + instance. If set to ``None``, no rescaling transform will be used. + Default: ``[0.995**n for n in range(-5, 80)] + [0.998**n for n in 2 * + list(range(20, 40))]`` degrees (float, sequence, or torch.distribution, optional): Sequence of degrees to randomly select from, or a torch.distributions - instance. If set to None, no rotation transform will be used. - Default: A set of optimal values. + instance. If set to ``None``, no rotation transform will be used. + Default: ``list(range(-20, 20)) + list(range(-10, 10)) + + list(range(-5, 5)) + 5 * [0]`` final_translate (int, optional): The max horizontal and vertical translation to use for the final jitter transform on fractional pixels. - Default: 2 + Default: ``2`` crop_or_pad_output (bool, optional): Whether or not to crop or pad the transformed output so that it is the same shape as the input. - Default: False + Default: ``False`` """ super().__init__() self.padding_transform = padding_transform diff --git a/captum/optim/_utils/circuits.py b/captum/optim/_utils/circuits.py index 9c84d16247..d2731a9bf9 100644 --- a/captum/optim/_utils/circuits.py +++ b/captum/optim/_utils/circuits.py @@ -24,20 +24,25 @@ def extract_expanded_weights( See: https://distill.pub/2020/circuits/visualizing-weights/ Args: - model (nn.Module): The reference to PyTorch model instance. - target1 (nn.module): The starting target layer. Must be below the layer - specified for target2. - target2 (nn.Module): The end target layer. Must be above the layer - specified for target1. - crop_shape (int or tuple of ints, optional): Specify the exact output size - to crop out. - model_input (tensor or tuple of tensors, optional): The input to use + + model (nn.Module): The reference to PyTorch model instance. + target1 (nn.module): The starting target layer. Must be below the layer + specified for ``target2``. + target2 (nn.Module): The end target layer. Must be above the layer + specified for ``target1``. + crop_shape (int or tuple of ints, optional): Specify the exact output size + to crop out. Set to ``None`` for no cropping. + Default: ``None`` + model_input (tensor or tuple of tensors, optional): The input to use with the specified model. - crop_func (Callable, optional): Specify a function to crop away the padding + Default: ``torch.zeros(1, 3, 224, 224)`` + crop_func (Callable, optional): Specify a function to crop away the padding from the output weights. + Default: ``center_crop`` + Returns: - *tensor*: A tensor containing the expanded weights in the form of: - (target2 output channels, target1 output channels, height, width) + *tensor* (torch.Tensor): A tensor containing the expanded weights in the form + of: (target2 output channels, target1 output channels, height, width) """ if isinstance(model_input, torch.Tensor): model_input = model_input.to(next(model.parameters()).device) diff --git a/captum/optim/_utils/image/atlas.py b/captum/optim/_utils/image/atlas.py index 5954a3a471..cab464596b 100644 --- a/captum/optim/_utils/image/atlas.py +++ b/captum/optim/_utils/image/atlas.py @@ -18,13 +18,13 @@ def normalize_grid( with a shape of: [n_points, n_axes]. min_percentile (float, optional): The minimum percentile to use when normalizing the tensor. Value must be in the range [0, 1]. - Default: 0.01 + Default: ``0.01`` max_percentile (float, optional): The maximum percentile to use when normalizing the tensor. Value must be in the range [0, 1]. - Default: 0.99 + Default: ``0.99`` relative_margin (float, optional): The relative margin to use when normalizing the tensor. - Default: 0.1 + Default: ``0.1`` Returns: normalized_grid (torch.tensor): A normalized xy coordinate grid tensor. @@ -71,21 +71,26 @@ def calc_grid_indices( Each cell in the above example would contain a list of indices inside a tensor for that particular cell, like this: - indices = [ - [tensor([0, 5]), tensor([1]), tensor([2, 3])], - [tensor([]), tensor([4]), tensor([])], - [tensor([6, 7, 8]), tensor([]), tensor([])], - ] + + :: + + indices = [ + [tensor([0, 5]), tensor([1]), tensor([2, 3])], + [tensor([]), tensor([4]), tensor([])], + [tensor([6, 7, 8]), tensor([]), tensor([])], + ] Args: + xy_grid (torch.tensor): The xy coordinate grid activation samples, with a shape of: [n_points, 2]. grid_size (Tuple[int, int]): The grid_size of grid cells to use. The grid_size variable should be in the format of: [width, height]. x_extent (Tuple[float, float], optional): The x axis range to use. - Default: (0.0, 1.0) + Default: ``(0.0, 1.0)`` y_extent (Tuple[float, float], optional): The y axis range to use. - Default: (0.0, 1.0) + Default: ``(0.0, 1.0)`` + Returns: indices (list of list of torch.Tensors): List of lists of grid indices stored inside tensors to use. Each 1D tensor of indices has a size of: @@ -134,11 +139,11 @@ def compute_avg_cell_samples( 0 to n_indices. raw_samples (torch.tensor): Raw unmodified activation or attribution samples, with a shape of: [n_samples, n_channels]. - grid_size (Tuple[int, int]): The grid_size of grid cells to use. The grid_size - variable should be in the format of: [width, height]. + grid_size (Tuple[int, int]): The grid_size of grid cells to use. The + ``grid_size`` variable should be in the format of: [width, height]. min_density (int, optional): The minimum number of points for a cell to be counted. - Default: 8 + Default: ``8`` Returns: cell_vecs (torch.tensor): A tensor containing all the direction vectors that @@ -186,18 +191,18 @@ def create_atlas_vectors( of: [n_points, 2]. raw_activations (torch.tensor): Raw unmodified activation samples, with a shape of: [n_samples, n_channels]. - grid_size (Tuple[int, int]): The size of grid cells to use. The grid_size + grid_size (Tuple[int, int]): The size of grid cells to use. The ``grid_size`` variable should be in the format of: [width, height]. min_density (int, optional): The minimum number of points for a cell to be counted. - Default: 8 + Default: ``8`` normalize (bool, optional): Whether or not to remove outliers from an xy coordinate grid tensor, and rescale it to [0, 1]. - Default: True + Default: ``True`` x_extent (Tuple[float, float], optional): The x axis range to use. - Default: (0.0, 1.0) + Default: ``(0.0, 1.0)`` y_extent (Tuple[float, float], optional): The y axis range to use. - Default: (0.0, 1.0) + Default: ``(0.0, 1.0)`` Returns: grid_vecs (torch.tensor): A tensor containing all the direction vectors that @@ -243,8 +248,8 @@ def create_atlas( grid_size (Tuple[int, int]): The size of grid cells to use. The grid_size variable should be in the format of: [width, height]. base_tensor (Callable, optional): What to use for the atlas base tensor. Basic - choices are: torch.ones or torch.zeros. - Default: torch.ones + choices are: ``torch.ones`` or ``torch.zeros``. + Default: ``torch.ones`` Returns: atlas_canvas (torch.tensor): The full activation atlas visualization, with a diff --git a/captum/optim/_utils/image/common.py b/captum/optim/_utils/image/common.py index f1cdc5f477..4ea2ff8516 100644 --- a/captum/optim/_utils/image/common.py +++ b/captum/optim/_utils/image/common.py @@ -27,13 +27,13 @@ def make_grid_image( tiles (torch.Tensor or list of torch.Tensor): A stack of NCHW image tensors or a list of NCHW image tensors to create a grid from. - nrow (int, optional): The number of rows to use for the grid image. - Default: 4 + images_per_row (int, optional): The number of rows to use for the grid image. + Default: ``4`` padding (int, optional): The amount of padding between images in the grid images. - padding: 2 + padding: ``2`` pad_value (float, optional): The value to use for the padding. - Default: 0.0 + Default: ``0.0`` Returns: grid_img (torch.Tensor): The full NCHW grid image. @@ -70,7 +70,7 @@ def make_grid_image( def show( x: torch.Tensor, - figsize: Optional[Tuple[int, int]] = None, + figsize: Optional[Tuple[int, int]] = (10, 10), scale: float = 255.0, images_per_row: Optional[int] = None, padding: int = 2, @@ -84,17 +84,20 @@ def show( x (torch.Tensor): The tensor you want to display as an image. figsize (Tuple[int, int], optional): height & width to use for displaying the image figure. - scale (float): Value to multiply the input tensor by so that + Default: ``(10, 10)`` + scale (float, optional): Value to multiply the input tensor by so that it's value range is [0-255] for display. + Default: ``255.0`` images_per_row (int, optional): The number of images per row to use for the - grid image. Default is set to None for no grid image creation. - Default: None + grid image. Default is set to ``None`` for no grid image creation. + Default: ``None`` padding (int, optional): The amount of padding between images in the grid - images. This parameter only has an effect if nrow is not None. - Default: 2 + images. This parameter only has an effect if ``images_per_row`` is not + ``None``. + Default: ``2`` pad_value (float, optional): The value to use for the padding. This parameter - only has an effect if nrow is not None. - Default: 0.0 + only has an effect if ``images_per_row`` is not ``None``. + Default: ``0.0`` """ if x.dim() not in [3, 4]: @@ -133,18 +136,20 @@ def save_tensor_as_image( filename (str): The filename to use when saving the image. scale (float, optional): Value to multiply the input tensor by so that it's value range is [0-255] for saving. + Default: ``255.0`` mode (str, optional): A PIL / Pillow supported colorspace. Default is set to None for automatic RGB / RGBA detection and usage. - Default: None + Default: ``None`` images_per_row (int, optional): The number of images per row to use for the grid image. Default is set to None for no grid image creation. - Default: None + Default: ``None`` padding (int, optional): The amount of padding between images in the grid - images. This parameter only has an effect if `nrow` is not None. - Default: 2 + images. This parameter only has an effect if ``images_per_row`` is not + ``None``. + Default: ``2`` pad_value (float, optional): The value to use for the padding. This parameter - only has an effect if `nrow` is not None. - Default: 0.0 + only has an effect if ``images_per_row`` is not ``None``. + Default: ``0.0`` """ if x.dim() not in [3, 4]: @@ -170,14 +175,14 @@ def get_neuron_pos( """ Args: - H (int) The height - W (int) The width + H (int): The h position to use. + W (int): The w position to use. x (int, optional): Optionally specify and exact x location of the neuron. If - set to None, then the center x location will be used. - Default: None + set to ``None``, then the center x location will be used. + Default: ``None`` y (int, optional): Optionally specify and exact y location of the neuron. If - set to None, then the center y location will be used. - Default: None + set to ``None``, then the center y location will be used. + Default: ``None`` Return: Tuple[_x, _y] (Tuple[int, int]): The x and y dimensions of the neuron. @@ -208,17 +213,22 @@ def _dot_cossim( a specified dimension. Args: + x (torch.Tensor): The tensor that you wish to compute the cosine similarity for in relation to tensor y. y (torch.Tensor): The tensor that you wish to compute the cosine similarity for in relation to tensor x. cossim_pow (float, optional): The desired cosine similarity power to use. + Default: ``0.0`` dim (int, optional): The target dimension for computing cosine similarity. + Default: ``1`` eps (float, optional): If cossim_pow is greater than zero, the desired epsilon value to use for cosine similarity calculations. + Default: ``1e-8`` + Returns: tensor (torch.Tensor): Dot cosine similarity between x and y, along the - specified dim. + specified dim. """ dot = torch.sum(x * y, dim) @@ -241,13 +251,16 @@ def hue_to_rgb( ) -> torch.Tensor: """ Create an RGB unit vector based on a hue of the input angle. + Args: + angle (float): The hue angle to create an RGB color for. device (torch.device, optional): The device to create the angle color tensor on. - Default: torch.device("cpu") + Default: ``torch.device("cpu")`` warp (bool, optional): Whether or not to make colors more distinguishable. - Default: True + Default: ``True`` + Returns: color_vec (torch.Tensor): A color vector. """ @@ -288,11 +301,12 @@ def nchannels_to_rgb( Args: - x (torch.Tensor): NCHW image tensor to transform into RGB image. - warp (bool, optional): Whether or not to make colors more distinguishable. - Default: True + x (torch.Tensor): NCHW image tensor to transform into RGB image. + warp (bool, optional): Whether or not to make colors more distinguishable. + Default: ``True`` eps (float, optional): An optional epsilon value. - Default: 1e-4 + Default: ``1e-4`` + Returns: tensor (torch.Tensor): An NCHW RGB image tensor. """ @@ -326,10 +340,12 @@ def weights_to_heatmap_2d( no excitation or inhibition. Args: - weight (torch.Tensor): A 2d tensor to create the heatmap from. - colors (list of str): A list of 5 strings containing hex triplet + + weight (torch.Tensor): A 2d tensor to create the heatmap from. + colors (list of str, optional): A list of 5 strings containing hex triplet (six digit), three-byte hexadecimal color values to use for coloring the heatmap. + Default: ``["0571b0", "92c5de", "f7f7f7", "f4a582", "ca0020"]`` Returns: color_tensor (torch.Tensor): A weight heatmap. diff --git a/captum/optim/_utils/reducer.py b/captum/optim/_utils/reducer.py index 2696d003d6..9393681759 100644 --- a/captum/optim/_utils/reducer.py +++ b/captum/optim/_utils/reducer.py @@ -22,12 +22,14 @@ class ChannelReducer: See here for more information: https://distill.pub/2018/building-blocks/ Args: - n_components (int, optional): The number of channels to reduce the target + + n_components (int, optional): The number of channels to reduce the target dimension to. - reduction_alg (str or callable, optional): The desired dimensionality - reduction algorithm to use. The default reduction_alg is set to NMF from - sklearn, which requires users to put inputs on CPU before passing them to - fit_transform. + reduction_alg (str or callable, optional): The desired dimensionality + reduction algorithm to use. The default ``reduction_alg`` is set to NMF + from sklearn, which requires users to put inputs on CPU before passing them + to ``fit_transform``. + Default: ``NMF`` **kwargs (optional): Arbitrary keyword arguments used by the specified reduction_alg. """ @@ -71,11 +73,15 @@ def fit_transform( ) -> torch.Tensor: """ Perform dimensionality reduction on an input tensor. + Args: - tensor (tensor): A tensor to perform dimensionality reduction on. - swap_2nd_and_last_dims (bool, optional): If true, input channels are + + tensor (tensor): A tensor to perform dimensionality reduction on. + swap_2nd_and_last_dims (bool, optional): If ``True``, input channels are expected to be in the second dimension unless the input tensor has a - shape of CHW. Default is set to True. + shape of CHW. + Default: ``True``. + Returns: *tensor*: A tensor with one of it's dimensions reduced. """ @@ -131,10 +137,13 @@ def posneg(x: torch.Tensor, dim: int = 0) -> torch.Tensor: NMF with regular NMF. Args: - x (tensor): A tensor to make positive. - dim (int, optional): The dimension to concatinate the two tensor halves at. + + x (tensor): A tensor to make positive. + dim (int, optional): The dimension to concatinate the two tensor halves at. + Default: ``0`` + Returns: - tensor (torch.tensor): A positive tensor for one-sided dimensionality + tensor (torch.tensor): A positive tensor for one-sided dimensionality reduction. """ diff --git a/captum/optim/models/_image/inception_v1.py b/captum/optim/models/_image/inception_v1.py index d24c87d42d..0f49ae5253 100644 --- a/captum/optim/models/_image/inception_v1.py +++ b/captum/optim/models/_image/inception_v1.py @@ -17,36 +17,43 @@ def googlenet( r"""GoogLeNet (also known as Inception v1 & Inception 5h) model architecture from `"Going Deeper with Convolutions" `_. + Example:: + + >>> model = opt.models.googlenet(pretrained=True) + >>> output = model(torch.zeros(1, 3, 224, 224)) + Args: - pretrained (bool, optional): If True, returns a model pre-trained on ImageNet. - Default: False - progress (bool, optional): If True, displays a progress bar of the download to - stderr - Default: True + pretrained (bool, optional): If ``True``, returns a model pre-trained on + ImageNet. + Default: ``False`` + progress (bool, optional): If ``True``, displays a progress bar of the download + to stderr. + Default: ``True`` model_path (str, optional): Optional path for InceptionV1 model file. - Default: None - replace_relus_with_redirectedrelu (bool, optional): If True, return pretrained - model with Redirected ReLU in place of ReLU layers. - Default: *True* when pretrained is True otherwise *False* - use_linear_modules_only (bool, optional): If True, return pretrained + Default: ``None`` + replace_relus_with_redirectedrelu (bool, optional): If ``True``, return + pretrained model with Redirected ReLU in place of ReLU layers. + Default: *``True``* when pretrained is True otherwise *``False``* + use_linear_modules_only (bool, optional): If ``True``, return pretrained model with all nonlinear layers replaced with linear equivalents. - Default: False - aux_logits (bool, optional): If True, adds two auxiliary branches that can + Default: ``False`` + aux_logits (bool, optional): If ``True``, adds two auxiliary branches that can improve training. - Default: False + Default: ``False`` out_features (int, optional): Number of output features in the model used for training. - Default: 1008 - transform_input (bool, optional): If True, preprocesses the input according to - the method with which it was trained on ImageNet. - Default: False - bgr_transform (bool, optional): If True and transform_input is True, perform an - RGB to BGR transform in the internal preprocessing. - Default: False + Default: ``1008`` + transform_input (bool, optional): If ``True``, preprocesses the input according + to the method with which it was trained on ImageNet. + Default: ``False`` + bgr_transform (bool, optional): If ``True`` and ``transform_input`` is + ``True``, perform an RGB to BGR transform in the internal + preprocessing. + Default: ``False`` Returns: - **InceptionV1** (InceptionV1): An Inception5h model. + **model** (InceptionV1): An Inception5h model instance. """ if pretrained: @@ -93,24 +100,25 @@ def __init__( """ Args: - replace_relus_with_redirectedrelu (bool, optional): If True, return + replace_relus_with_redirectedrelu (bool, optional): If ``True``, return pretrained model with Redirected ReLU in place of ReLU layers. - Default: False - use_linear_modules_only (bool, optional): If True, return pretrained + Default: ``False`` + use_linear_modules_only (bool, optional): If ``True``, return pretrained model with all nonlinear layers replaced with linear equivalents. - Default: False + Default: ``False`` aux_logits (bool, optional): If True, adds two auxiliary branches that can improve training. - Default: False + Default: ``False`` out_features (int, optional): Number of output features in the model used for training. - Default: 1008 - transform_input (bool, optional): If True, preprocesses the input according - to the method with which it was trained on ImageNet. - Default: False - bgr_transform (bool, optional): If True and transform_input is True, - perform an RGB to BGR transform in the internal preprocessing. - Default: False + Default: ``1008`` + transform_input (bool, optional): If ``True``, preprocesses the input + according to the method with which it was trained on ImageNet. + Default: ``False`` + bgr_transform (bool, optional): If ``True`` and ``transform_input`` is + ``True``, perform an RGB to BGR transform in the internal + preprocessing. + Default: ``False`` """ super().__init__() self.aux_logits = aux_logits @@ -293,10 +301,10 @@ def __init__( pool_proj (int, optional): activ (type of nn.Module, optional): The nn.Module class type to use for activation layers. - Default: nn.ReLU + Default: ``nn.ReLU`` p_layer (type of nn.Module, optional): The nn.Module class type to use for pooling layers. - Default: nn.MaxPool2d + Default: ``nn.MaxPool2d`` """ super().__init__() self.conv_1x1 = nn.Conv2d( @@ -390,13 +398,13 @@ def __init__( in_channels (int, optional): The number of input channels to use for the auxiliary branch. - Default: 508 + Default: ``508`` out_features (int, optional): The number of output features to use for the auxiliary branch. - Default: 1008 + Default: ``1008`` activ (type of nn.Module, optional): The nn.Module class type to use for activation layers. - Default: nn.ReLU + Default: ``nn.ReLU`` """ super().__init__() self.avg_pool = nn.AdaptiveAvgPool2d((4, 4)) From fdba4e2ed77a84bf37d111875bc21b7db0ddbf64 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Tue, 28 Jun 2022 19:29:58 -0600 Subject: [PATCH 02/30] Add function links to atlas docs --- captum/optim/_utils/image/atlas.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/captum/optim/_utils/image/atlas.py b/captum/optim/_utils/image/atlas.py index cab464596b..d53cac84dc 100644 --- a/captum/optim/_utils/image/atlas.py +++ b/captum/optim/_utils/image/atlas.py @@ -56,8 +56,8 @@ def calc_grid_indices( This function draws a 2D grid across the irregular grid of points, and then groups point indices based on the grid cell they fall within. The grid cells are then filled with 1D tensors that have anywhere from 0 to n_indices values in them. The - sets of grid indices can then be used with the compute_avg_cell_samples function - to create atlas grid cell direction vectors. + sets of grid indices can then be used with the :func:`compute_avg_cell_samples` + function to create atlas grid cell direction vectors. Indices are stored for grid cells in an xy matrix, where the outer lists represent x positions and the inner lists represent y positions. Each grid cell is filled @@ -84,8 +84,8 @@ def calc_grid_indices( xy_grid (torch.tensor): The xy coordinate grid activation samples, with a shape of: [n_points, 2]. - grid_size (Tuple[int, int]): The grid_size of grid cells to use. The grid_size - variable should be in the format of: [width, height]. + grid_size (Tuple[int, int]): The grid_size of grid cells to use. The + ``grid_size`` variable should be in the format of: [width, height]. x_extent (Tuple[float, float], optional): The x axis range to use. Default: ``(0.0, 1.0)`` y_extent (Tuple[float, float], optional): The y axis range to use. @@ -126,8 +126,8 @@ def compute_avg_cell_samples( """ Create direction vectors for sets of activation samples, attribution samples, and grid indices. Grid cells without the minimum number of points as specified by - min_density will be ignored. The calc_grid_indices function can be used to produce - the values required for the grid_indices variable. + ``min_density`` will be ignored. The :func:`calc_grid_indices` function can be used + to produce the values required for the ``grid_indices`` variable. Carter, et al., "Activation Atlas", Distill, 2019. https://distill.pub/2019/activation-atlas/ @@ -139,8 +139,8 @@ def compute_avg_cell_samples( 0 to n_indices. raw_samples (torch.tensor): Raw unmodified activation or attribution samples, with a shape of: [n_samples, n_channels]. - grid_size (Tuple[int, int]): The grid_size of grid cells to use. The - ``grid_size`` variable should be in the format of: [width, height]. + grid_size (Tuple[int, int]): The size of grid cells to use. The ``grid_size`` + variable should be in the format of: [width, height]. min_density (int, optional): The minimum number of points for a cell to be counted. Default: ``8`` @@ -179,8 +179,8 @@ def create_atlas_vectors( ) -> Tuple[torch.Tensor, List[Tuple[int, int, int]]]: """ Create direction vectors by splitting an irregular grid of activation samples into - cells. Grid cells without the minimum number of points as specified by min_density - will be ignored. + cells. Grid cells without the minimum number of points as specified by + ``min_density`` will be ignored. Carter, et al., "Activation Atlas", Distill, 2019. https://distill.pub/2019/activation-atlas/ @@ -245,7 +245,7 @@ def create_atlas( coords (list of Tuple[int, int] or list of Tuple[int, int, int]): A list of coordinates to use for the atlas image tensors. The first 2 values in each coordinate list should be: [x, y, ...]. - grid_size (Tuple[int, int]): The size of grid cells to use. The grid_size + grid_size (Tuple[int, int]): The size of grid cells to use. The ``grid_size`` variable should be in the format of: [width, height]. base_tensor (Callable, optional): What to use for the atlas base tensor. Basic choices are: ``torch.ones`` or ``torch.zeros``. From 76836b93e21bfd24d2c3714e45876068488546dc Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Wed, 29 Jun 2022 08:42:13 -0600 Subject: [PATCH 03/30] Improve ToRGB docs --- captum/optim/_param/image/transforms.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/captum/optim/_param/image/transforms.py b/captum/optim/_param/image/transforms.py index f02fd5e3a4..5bc2157711 100644 --- a/captum/optim/_param/image/transforms.py +++ b/captum/optim/_param/image/transforms.py @@ -71,13 +71,26 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: class ToRGB(nn.Module): """Transforms arbitrary channels to RGB. We use this to ensure our image parametrization itself can be decorrelated. So this goes between - the image parametrization and the normalization/sigmoid step. + the image parametrization and the normalization / sigmoid step, like in + :class:`captum.optim.images.NaturalImage`. + We offer two precalculated transforms: Karhunen-Loève (KLT) and I1I2I3. KLT corresponds to the empirically measured channel correlations on imagenet. I1I2I3 corresponds to an approximation for natural images from Ohta et al.[0] + If you wish to calculate your own KLT transform matrix on a custom dataset, + then please see :func:`captum.optim.dataset.dataset_klt_matrix` for an example + of how to do so. + [0] Y. Ohta, T. Kanade, and T. Sakai, "Color information for region segmentation," Computer Graphics and Image Processing, vol. 13, no. 3, pp. 222–241, 1980 https://www.sciencedirect.com/science/article/pii/0146664X80900477 + + Example:: + + >>> to_rgb = opt.transforms.ToRGB() + >>> x = torch.randn(1, 3, 224, 224) + >>> decorrelated_colors = to_rgb(x, inverse=True) + >>> recorrelated_colors = to_rgb(decorrelated_colors) """ @staticmethod From bb0984f879a88bb56684ce42c3c3df04d02a9561 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Wed, 29 Jun 2022 16:30:42 -0600 Subject: [PATCH 04/30] Add more doc refs --- captum/optim/_param/image/transforms.py | 100 +++++++++++++++--------- 1 file changed, 62 insertions(+), 38 deletions(-) diff --git a/captum/optim/_param/image/transforms.py b/captum/optim/_param/image/transforms.py index 5bc2157711..8d92d53841 100644 --- a/captum/optim/_param/image/transforms.py +++ b/captum/optim/_param/image/transforms.py @@ -276,6 +276,8 @@ class CenterCrop(torch.nn.Module): """ Center crop a specified amount from a tensor. If input are smaller than the specified crop size, padding will be applied. + + See :func:`.center_crop` for the functional version of this transform. """ __constants__ = [ @@ -372,6 +374,8 @@ def center_crop( """ Center crop a specified amount from a tensor. If input are smaller than the specified crop size, padding will be applied. + + This function is the functional version of: :class:`.CenterCrop`. Args: @@ -396,7 +400,7 @@ def center_crop( Default: ``0.0`` Returns: - **tensor**: A center cropped *tensor*. + **tensor** (torch.Tensor): A center cropped *tensor*. """ assert input.dim() == 3 or input.dim() == 4 @@ -450,7 +454,8 @@ def center_crop( class RandomScale(nn.Module): """ - Apply random rescaling on a NCHW tensor using the F.interpolate function. + Apply random rescaling on a NCHW tensor using the + :func:`torch.nn.functional.interpolate` function. """ __constants__ = [ @@ -475,21 +480,25 @@ def __init__( Args: scale (float, sequence, or torch.distribution): Sequence of rescaling - values to randomly select from, or a torch.distributions instance. + values to randomly select from, or a :mod:`torch.distributions` + instance. mode (str, optional): Interpolation mode to use. See documentation of - ``F.interpolate`` for more details. One of; ``"bilinear"``, - ``"nearest"``, ``"area"``, or ``"bicubic"``. + :func:`torch.nn.functional.interpolate` for more details. One of; + ``"bilinear"``, ``"nearest"``, ``"nearest-exact"``, ``"area"``, or + ``"bicubic"``. Default: ``"bilinear"`` align_corners (bool, optional): Whether or not to align corners. See - documentation of ``F.interpolate`` for more details. + documentation of :func:`torch.nn.functional.interpolate` for more + details. Default: ``False`` recompute_scale_factor (bool, optional): Whether or not to recompute the - scale factor See documentation of ``F.interpolate`` for more details. + scale factor See documentation of + :func:`torch.nn.functional.interpolate` for more details. Default: ``False`` antialias (bool, optional): Whether or not use to anti-aliasing. This feature is currently only available for ``"bilinear"`` and - ``"bicubic"`` modes. See documentation of ``F.interpolate`` for more - details. + ``"bicubic"`` modes. See documentation of + :func:`torch.nn.functional.interpolate` for more details. Default: ``False`` """ super().__init__() @@ -580,11 +589,11 @@ class RandomScaleAffine(nn.Module): """ Apply random rescaling on a NCHW tensor. - This random scaling transform utilizes F.affine_grid & F.grid_sample, and as a - result has two key differences to the default RandomScale transforms This - transform either shrinks an image while adding a background, or center crops image - and then resizes it to a larger size. This means that the output image shape is the - same shape as the input image. + This random scaling transform utilizes :func:`torch.nn.functional.affine_grid` + & :func:`torch.nn.functional.grid_sample`, and as a result has two key differences + to the default RandomScale transforms This transform either shrinks an image while + adding a background, or center crops image and then resizes it to a larger size. + This means that the output image shape is the same shape as the input image. In constrast to RandomScaleAffine, the default RandomScale transform simply resizes the input image using F.interpolate. @@ -609,18 +618,20 @@ def __init__( Args: scale (float, sequence, or torch.distribution): Sequence of rescaling - values to randomly select from, or a torch.distributions instance. + values to randomly select from, or a :mod:`torch.distributions` + instance. mode (str, optional): Interpolation mode to use. See documentation of - ``F.grid_sample`` for more details. One of; ``"bilinear"``, - ``"nearest"``, or ``"bicubic"``. + :func:`torch.nn.functional.grid_sample` for more details. One of; + ``"bilinear"``, ``"nearest"``, or ``"bicubic"``. Default: ``"bilinear"`` padding_mode (str, optional): Padding mode for values that fall outside of - the grid. See documentation of ``F.grid_sample`` for more details. One - of; ``"zeros"``, ``"border"``, or ``"reflection"``. + the grid. See documentation of :func:`torch.nn.functional.grid_sample` + for more details. One of; ``"zeros"``, ``"border"``, or + ``"reflection"``. Default: ``"zeros"`` align_corners (bool, optional): Whether or not to align corners. See - documentation of ``F.affine_grid`` & ``F.grid_sample`` for more - details. + documentation of :func:`torch.nn.functional.affine_grid` & + :func:`torch.nn.functional.grid_sample` for more details. Default: ``False`` """ super().__init__() @@ -769,8 +780,7 @@ def forward(self, input: torch.Tensor) -> torch.Tensor: class RandomRotation(nn.Module): """ - Apply random rotation transforms on a NCHW tensor, using a sequence of degrees or - torch.distributions instance. + Apply random rotation transforms on a NCHW tensor. """ __constants__ = [ @@ -792,18 +802,20 @@ def __init__( Args: degrees (float, sequence, or torch.distribution): Tuple or list of degrees - values to randomly select from, or a ``torch.distributions`` instance. + values to randomly select from, or a :mod:`torch.distributions` + instance. mode (str, optional): Interpolation mode to use. See documentation of - F.grid_sample for more details. One of; ``"bilinear"``, ``"nearest"``, - or ``"bicubic"``. + :func:`torch.nn.functional.grid_sample` for more details. One of; + ``"bilinear"``, ``"nearest"``, or ``"bicubic"``. Default: ``"bilinear"`` padding_mode (str, optional): Padding mode for values that fall outside of - the grid. See documentation of F.grid_sample for more details. One of; - ``"zeros"``, ``"border"``, or ``"reflection"``. + the grid. See documentation of :func:`torch.nn.functional.grid_sample` + for more details. One of; ``"zeros"``, ``"border"``, or + ``"reflection"``. Default: ``"zeros"`` align_corners (bool, optional): Whether or not to align corners. See - documentation of ``F.affine_grid`` & ``F.grid_sample`` for more - details. + documentation of :func:`torch.nn.functional.affine_grid` & + :func:`torch.nn.functional.grid_sample` for more details. Default: ``False`` """ super().__init__() @@ -1229,7 +1241,9 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: class TransformationRobustness(nn.Module): """ - This transform combines the standard transforms together for ease of use. + This transform combines the standard transforms (:class:`.RandomSpatialJitter`, + :class:`.RandomScale` & :class:`.RandomRotation`) together for ease of + use. Multiple jitter transforms can be used to create roughly gaussian distribution of jitter. @@ -1262,21 +1276,23 @@ def __init__( padding will be applied before transforms if set to ``None``. Default: ``nn.ConstantPad2d(2, value=0.5)`` translate (int or list of int, optional): The max horizontal and vertical - translation to use for each jitter transform. + translation to use for each :class:`.RandomSpatialJitter` transform. Default: ``[4] * 10`` scale (float, sequence, or torch.distribution, optional): Sequence of - rescaling values to randomly select from, or a torch.distributions - instance. If set to ``None``, no rescaling transform will be used. + rescaling values to randomly select from, or a + :mod:`torch.distributions` instance. If set to ``None``, no + :class:`.RandomScale` transform will be used. Default: ``[0.995**n for n in range(-5, 80)] + [0.998**n for n in 2 * list(range(20, 40))]`` degrees (float, sequence, or torch.distribution, optional): Sequence of - degrees to randomly select from, or a torch.distributions - instance. If set to ``None``, no rotation transform will be used. + degrees to randomly select from, or a :mod:`torch.distributions` + instance. If set to ``None``, no :class:`.RandomRotation` transform + will be used. Default: ``list(range(-20, 20)) + list(range(-10, 10)) + list(range(-5, 5)) + 5 * [0]`` final_translate (int, optional): The max horizontal and vertical - translation to use for the final jitter transform on fractional - pixels. + translation to use for the final :class:`.RandomSpatialJitter` + transform on fractional pixels. Default: ``2`` crop_or_pad_output (bool, optional): Whether or not to crop or pad the transformed output so that it is the same shape as the input. @@ -1303,6 +1319,14 @@ def __init__( self.crop_or_pad_output = crop_or_pad_output def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Args: + + x (torch.Tensor): An NCHW tensor. + + Returns: + x (torch.Tensor): A transformed NCHW tensor. + """ assert x.dim() == 4 crop_size = x.shape[2:] From e01478c6e65f9bf7f098eac6d3a845c2df9c9308 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Wed, 29 Jun 2022 16:47:20 -0600 Subject: [PATCH 05/30] Fix lint error --- captum/optim/_param/image/transforms.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/captum/optim/_param/image/transforms.py b/captum/optim/_param/image/transforms.py index 8d92d53841..a0bdc7ff7d 100644 --- a/captum/optim/_param/image/transforms.py +++ b/captum/optim/_param/image/transforms.py @@ -374,7 +374,7 @@ def center_crop( """ Center crop a specified amount from a tensor. If input are smaller than the specified crop size, padding will be applied. - + This function is the functional version of: :class:`.CenterCrop`. Args: From ef4858732ae5fc26c8150ce7a6e878ef25a46683 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Fri, 1 Jul 2022 11:27:53 -0600 Subject: [PATCH 06/30] Improve ToRGB docs --- captum/optim/_param/image/transforms.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/captum/optim/_param/image/transforms.py b/captum/optim/_param/image/transforms.py index a0bdc7ff7d..cfaea376ff 100644 --- a/captum/optim/_param/image/transforms.py +++ b/captum/optim/_param/image/transforms.py @@ -77,9 +77,11 @@ class ToRGB(nn.Module): We offer two precalculated transforms: Karhunen-Loève (KLT) and I1I2I3. KLT corresponds to the empirically measured channel correlations on imagenet. I1I2I3 corresponds to an approximation for natural images from Ohta et al.[0] - If you wish to calculate your own KLT transform matrix on a custom dataset, - then please see :func:`captum.optim.dataset.dataset_klt_matrix` for an example - of how to do so. + + While the default transform matrices should work for the vast majority of use + cases, you can also use your own 3x3 transform matrix. If you wish to calculate + your own KLT transform matrix on a custom dataset, then please see + :func:`captum.optim.dataset.dataset_klt_matrix` for an example of how to do so. [0] Y. Ohta, T. Kanade, and T. Sakai, "Color information for region segmentation," Computer Graphics and Image Processing, vol. 13, no. 3, pp. 222–241, 1980 From 3ab53ae230f9f4622cbd4e12ffd4acdcde914225 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Fri, 1 Jul 2022 18:47:54 -0600 Subject: [PATCH 07/30] Improve docs for Sphinx (#550) * Update common.py * Update reducer.py * Update __init__.py --- captum/optim/__init__.py | 2 +- captum/optim/_utils/image/common.py | 4 ++-- captum/optim/_utils/reducer.py | 10 +++++----- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/captum/optim/__init__.py b/captum/optim/__init__.py index 9177d5c62c..d2c61cc70f 100644 --- a/captum/optim/__init__.py +++ b/captum/optim/__init__.py @@ -1,6 +1,6 @@ """optim submodule.""" -from captum.optim import models +from captum.optim import models # noqa: F401 from captum.optim._core import loss, optimization # noqa: F401 from captum.optim._core.optimization import InputOptimization # noqa: F401 from captum.optim._param.image import images, transforms # noqa: F401 diff --git a/captum/optim/_utils/image/common.py b/captum/optim/_utils/image/common.py index 4ea2ff8516..03e20d26fe 100644 --- a/captum/optim/_utils/image/common.py +++ b/captum/optim/_utils/image/common.py @@ -70,7 +70,7 @@ def make_grid_image( def show( x: torch.Tensor, - figsize: Optional[Tuple[int, int]] = (10, 10), + figsize: Optional[Tuple[int, int]] = (8, 8), scale: float = 255.0, images_per_row: Optional[int] = None, padding: int = 2, @@ -84,7 +84,7 @@ def show( x (torch.Tensor): The tensor you want to display as an image. figsize (Tuple[int, int], optional): height & width to use for displaying the image figure. - Default: ``(10, 10)`` + Default: ``(8, 8)`` scale (float, optional): Value to multiply the input tensor by so that it's value range is [0-255] for display. Default: ``255.0`` diff --git a/captum/optim/_utils/reducer.py b/captum/optim/_utils/reducer.py index 9393681759..be1fd2ebce 100644 --- a/captum/optim/_utils/reducer.py +++ b/captum/optim/_utils/reducer.py @@ -28,7 +28,7 @@ class ChannelReducer: reduction_alg (str or callable, optional): The desired dimensionality reduction algorithm to use. The default ``reduction_alg`` is set to NMF from sklearn, which requires users to put inputs on CPU before passing them - to ``fit_transform``. + to :func:`ChannelReducer.fit_transform`. Default: ``NMF`` **kwargs (optional): Arbitrary keyword arguments used by the specified reduction_alg. @@ -76,14 +76,14 @@ def fit_transform( Args: - tensor (tensor): A tensor to perform dimensionality reduction on. + tensor (torch.Tensor): A tensor to perform dimensionality reduction on. swap_2nd_and_last_dims (bool, optional): If ``True``, input channels are expected to be in the second dimension unless the input tensor has a shape of CHW. Default: ``True``. Returns: - *tensor*: A tensor with one of it's dimensions reduced. + tensor: A tensor with one of it's dimensions reduced. """ if x.dim() == 3 and swap_2nd_and_last_dims: @@ -138,12 +138,12 @@ def posneg(x: torch.Tensor, dim: int = 0) -> torch.Tensor: Args: - x (tensor): A tensor to make positive. + x (torch.Tensor): A tensor to make positive. dim (int, optional): The dimension to concatinate the two tensor halves at. Default: ``0`` Returns: - tensor (torch.tensor): A positive tensor for one-sided dimensionality + tensor (torch.Tensor): A positive tensor for one-sided dimensionality reduction. """ From 57ea9512fdda56b1a71459376f1c245fc7dd2de1 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Mon, 4 Jul 2022 18:21:50 -0600 Subject: [PATCH 08/30] More doc improvements --- captum/optim/_param/image/transforms.py | 56 +++++++++++++++---------- captum/optim/_utils/circuits.py | 18 ++++++++ captum/optim/_utils/image/atlas.py | 30 ++++++------- captum/optim/_utils/image/common.py | 2 +- captum/optim/_utils/reducer.py | 13 +++++- 5 files changed, 79 insertions(+), 40 deletions(-) diff --git a/captum/optim/_param/image/transforms.py b/captum/optim/_param/image/transforms.py index cfaea376ff..87c00d97a7 100644 --- a/captum/optim/_param/image/transforms.py +++ b/captum/optim/_param/image/transforms.py @@ -20,7 +20,7 @@ def __init__(self, background: Optional[torch.Tensor] = None) -> None: """ Args: - background (tensor, optional): An NCHW image tensor to be used as the + background (tensor, optional): An NCHW image tensor to be used as the Alpha channel's background. Default: ``None`` """ @@ -36,7 +36,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: x (torch.Tensor): RGBA image tensor to blend into an RGB image tensor. Returns: - **blended** (torch.Tensor): RGB image tensor. + blended (torch.Tensor): RGB image tensor. """ assert x.dim() == 4 assert x.size(1) == 4 @@ -60,7 +60,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: x (torch.Tensor): RGBA image tensor. Returns: - **rgb** (torch.Tensor): RGB image tensor without the alpha channel. + rgb (torch.Tensor): RGB image tensor without the alpha channel. """ assert x.dim() == 4 assert x.size(1) == 4 @@ -101,7 +101,7 @@ def klt_transform() -> torch.Tensor: Karhunen-Loève transform (KLT) measured on ImageNet Returns: - **transform** (torch.Tensor): A Karhunen-Loève transform (KLT) measured on + transform (torch.Tensor): A Karhunen-Loève transform (KLT) measured on the ImageNet dataset. """ # Handle older versions of PyTorch @@ -120,7 +120,7 @@ def klt_transform() -> torch.Tensor: def i1i2i3_transform() -> torch.Tensor: """ Returns: - **transform** (torch.Tensor): An approximation of natural colors transform + transform (torch.Tensor): An approximation of natural colors transform (i1i2i3). """ i1i2i3_matrix = [ @@ -134,7 +134,7 @@ def __init__(self, transform: Union[str, torch.Tensor] = "klt") -> None: """ Args: - transform (str or tensor): Either a string for one of the precalculated + transform (str or tensor): Either a string for one of the precalculated transform matrices, or a 3x3 matrix for the 3 RGB channels of input tensors. """ @@ -352,7 +352,7 @@ def forward(self, input: torch.Tensor) -> torch.Tensor: input (torch.Tensor): Input to center crop. Returns: - **tensor** (torch.Tensor): A center cropped *tensor*. + tensor (torch.Tensor): A center cropped NCHW tensor. """ return center_crop( @@ -402,7 +402,7 @@ def center_crop( Default: ``0.0`` Returns: - **tensor** (torch.Tensor): A center cropped *tensor*. + tensor (torch.Tensor): A center cropped NCHW tensor. """ assert input.dim() == 3 or input.dim() == 4 @@ -537,7 +537,7 @@ def _scale_tensor(self, x: torch.Tensor, scale: float) -> torch.Tensor: scale (float): The amount to scale the NCHW image by. Returns: - **x** (torch.Tensor): A scaled NCHW image tensor. + x (torch.Tensor): A scaled NCHW image tensor. """ if self._has_antialias: x = F.interpolate( @@ -567,7 +567,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: x (torch.Tensor): NCHW image tensor to randomly scale. Returns: - **x** (torch.Tensor): A randomly scaled NCHW image *tensor*. + x (torch.Tensor): A randomly scaled NCHW image tensor. """ assert x.dim() == 4 if self._is_distribution: @@ -669,7 +669,7 @@ def _get_scale_mat( m (float): The scale value to use. Returns: - **scale_mat** (torch.Tensor): A scale matrix. + scale_mat (torch.Tensor): A scale matrix. """ scale_mat = torch.tensor( [[m, 0.0, 0.0], [0.0, m, 0.0]], device=device, dtype=dtype @@ -686,7 +686,7 @@ def _scale_tensor(self, x: torch.Tensor, scale: float) -> torch.Tensor: scale (float): The amount to scale the NCHW image by. Returns: - **x** (torch.Tensor): A scaled NCHW image tensor. + x (torch.Tensor): A scaled NCHW image tensor. """ scale_matrix = self._get_scale_mat(scale, x.device, x.dtype)[None, ...].repeat( x.shape[0], 1, 1 @@ -710,7 +710,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: x (torch.Tensor): NCHW image tensor to randomly scale. Returns: - **x** (torch.Tensor): A randomly scaled NCHW image *tensor*. + x (torch.Tensor): A randomly scaled NCHW image tensor. """ assert x.dim() == 4 if self._is_distribution: @@ -768,7 +768,7 @@ def forward(self, input: torch.Tensor) -> torch.Tensor: input (torch.Tensor): Input to randomly translate. Returns: - **tensor** (torch.Tensor): A randomly translated *tensor*. + tensor (torch.Tensor): A randomly translated NCHW tensor. """ insets = torch.randint( high=self.pad_range, @@ -854,7 +854,7 @@ def _get_rot_mat( theta (float): The rotation value in degrees. Returns: - **rot_mat** (torch.Tensor): A rotation matrix. + rot_mat (torch.Tensor): A rotation matrix. """ theta = theta * math.pi / 180.0 rot_mat = torch.tensor( @@ -877,7 +877,7 @@ def _rotate_tensor(self, x: torch.Tensor, theta: float) -> torch.Tensor: theta (float): The amount to rotate the NCHW image, in degrees. Returns: - **x** (torch.Tensor): A rotated NCHW image tensor. + x (torch.Tensor): A rotated NCHW image tensor. """ rot_matrix = self._get_rot_mat(theta, x.device, x.dtype)[None, ...].repeat( x.shape[0], 1, 1 @@ -901,7 +901,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: x (torch.Tensor): NCHW image tensor to randomly rotate. Returns: - **x** (torch.Tensor): A randomly rotated NCHW image *tensor*. + x (torch.Tensor): A randomly rotated NCHW image tensor. """ assert x.dim() == 4 if self._is_distribution: @@ -933,7 +933,7 @@ def __init__(self, multiplier: float = 1.0) -> None: """ Args: - multiplier (float, optional): A float value used to scale the input. + multiplier (float, optional): A float value used to scale the input. """ super().__init__() self.multiplier = multiplier @@ -947,7 +947,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: x (torch.Tensor): Input to scale values of. Returns: - **tensor** (torch.Tensor): tensor with it's values scaled. + tensor (torch.Tensor): tensor with it's values scaled. """ return x * self.multiplier @@ -966,7 +966,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: x (torch.Tensor): RGB image tensor to convert to BGR. Returns: - **BGR tensor** (torch.Tensor): A BGR tensor. + BGR tensor (torch.Tensor): A BGR tensor. """ assert x.dim() == 4 assert x.size(1) == 3 @@ -1104,7 +1104,7 @@ def forward( x (torch.Tensor): Input to apply symmetric padding on. Returns: - **tensor** (torch.Tensor): Padded tensor. + tensor (torch.Tensor): Padded tensor. """ ctx.padding = padding x_device = x.device @@ -1127,7 +1127,7 @@ def backward( grad_output (torch.Tensor): Input to remove symmetric padding from. Returns: - **grad_input** (torch.Tensor): Unpadded tensor. + grad_input (torch.Tensor): Unpadded tensor. """ grad_input = grad_output.clone() B, C, H, W = grad_input.size() @@ -1166,7 +1166,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: x (torch.Tensor): Input to reduce channel dimensions on. Returns: - **3 channel RGB tensor** (torch.Tensor): RGB image tensor. + x (torch.Tensor): A 3 channel RGB image tensor. """ assert x.dim() == 4 return nchannels_to_rgb(x, self.warp) @@ -1216,6 +1216,16 @@ def _center_crop(self, x: torch.Tensor) -> torch.Tensor: ] def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Randomly crop an NCHW image tensor. + + Args: + + x (torch.Tensor): The NCHW image tensor to randomly crop. + + Returns + x (torch.Tensor): The randomly cropped NCHW image tensor. + """ assert x.dim() == 4 hs = int(math.ceil((x.shape[2] - self.crop_size[0]) / 2.0)) ws = int(math.ceil((x.shape[3] - self.crop_size[1]) / 2.0)) diff --git a/captum/optim/_utils/circuits.py b/captum/optim/_utils/circuits.py index d2731a9bf9..6710f7c96a 100644 --- a/captum/optim/_utils/circuits.py +++ b/captum/optim/_utils/circuits.py @@ -20,6 +20,24 @@ def extract_expanded_weights( literally adjacent in a neural network, or where the weights aren’t directly represented in a single weight tensor. + Example:: + + >>> # Load InceptionV1 model with nonlinear layers replaced by + >>> # their linear equivalents + >>> linear_model = opt.models.googlenet( + >>> pretrained=True, use_linear_modules_only=True + >>> ).eval() + >>> # Extract weight interactions between target layers + >>> W_3a_3b = opt.circuits.extract_expanded_weights( + >>> linear_model, linear_model.mixed3a, linear_model.mixed3b, 5 + >>> ) + >>> # Display results for channel 147 of mixed3a and channel 379 of + >>> # mixed3b, in human readable format + >>> W_3a_3b_hm = opt.weights_to_heatmap_2d( + >>> W_3a_3b[379, 147, ...] / W_3a_3b[379, ...].max() + >>> ) + >>> opt.show(W_3a_3b_hm) + Voss, et al., "Visualizing Weights", Distill, 2021. See: https://distill.pub/2020/circuits/visualizing-weights/ diff --git a/captum/optim/_utils/image/atlas.py b/captum/optim/_utils/image/atlas.py index d53cac84dc..492aa403d3 100644 --- a/captum/optim/_utils/image/atlas.py +++ b/captum/optim/_utils/image/atlas.py @@ -146,13 +146,14 @@ def compute_avg_cell_samples( Default: ``8`` Returns: - cell_vecs (torch.tensor): A tensor containing all the direction vectors that - were created, stacked along the batch dimension with a shape of: - [n_vecs, n_channels]. - cell_coords (list of Tuple[int, int, int]): List of coordinates for grid - spatial positions of each direction vector, and the number of samples used - for the cell. The list for each cell is in the format of: - [x_coord, y_coord, number_of_samples_used]. + cell_vecs_and_cell_coords: A 2 element tuple of: ``(cell_vecs, cell_coords)``. + - cell_vecs (torch.tensor): A tensor containing all the direction vectors + that were created, stacked along the batch dimension with a shape of: + [n_vecs, n_channels]. + - cell_coords (list of Tuple[int, int, int]): List of coordinates for grid + spatial positions of each direction vector, and the number of samples + used for the cell. The list for each cell is in the format of: + [x_coord, y_coord, number_of_samples_used]. """ assert raw_samples.dim() == 2 @@ -205,13 +206,14 @@ def create_atlas_vectors( Default: ``(0.0, 1.0)`` Returns: - grid_vecs (torch.tensor): A tensor containing all the direction vectors that - were created, stacked along the batch dimension, with a shape of: - [n_vecs, n_channels]. - cell_coords (list of Tuple[int, int, int]): List of coordinates for grid - spatial positions of each direction vector, and the number of samples used - for the cell. The list for each cell is in the format of: - [x_coord, y_coord, number_of_samples_used]. + grid_vecs_and_cell_coords: A 2 element tuple of: ``(grid_vecs, cell_coords)``. + - grid_vecs (torch.tensor): A tensor containing all the direction vectors + that were created, stacked along the batch dimension, with a shape + of: [n_vecs, n_channels]. + - cell_coords (list of Tuple[int, int, int]): List of coordinates for grid + spatial positions of each direction vector, and the number of samples + used for the cell. The list for each cell is in the format of: + [x_coord, y_coord, number_of_samples_used]. """ assert xy_grid.dim() == 2 and xy_grid.size(1) == 2 diff --git a/captum/optim/_utils/image/common.py b/captum/optim/_utils/image/common.py index 03e20d26fe..fa9d936050 100644 --- a/captum/optim/_utils/image/common.py +++ b/captum/optim/_utils/image/common.py @@ -348,7 +348,7 @@ def weights_to_heatmap_2d( Default: ``["0571b0", "92c5de", "f7f7f7", "f4a582", "ca0020"]`` Returns: - color_tensor (torch.Tensor): A weight heatmap. + color_tensor (torch.Tensor): A weight heatmap. """ assert weight.dim() == 2 diff --git a/captum/optim/_utils/reducer.py b/captum/optim/_utils/reducer.py index be1fd2ebce..812bb91086 100644 --- a/captum/optim/_utils/reducer.py +++ b/captum/optim/_utils/reducer.py @@ -21,6 +21,14 @@ class ChannelReducer: See here for more information: https://distill.pub/2018/building-blocks/ + Example:: + + >>> reducer = opt.reducer.ChannelReducer(2, "NMF") + >>> x = torch.randn(1, 8, 128, 128).abs() + >>> output = reducer.fit_transform(x) + >>> print(output.shape) + torch.Size([1, 2, 128, 128]) + Args: n_components (int, optional): The number of channels to reduce the target @@ -30,7 +38,7 @@ class ChannelReducer: from sklearn, which requires users to put inputs on CPU before passing them to :func:`ChannelReducer.fit_transform`. Default: ``NMF`` - **kwargs (optional): Arbitrary keyword arguments used by the specified + **kwargs (any, optional): Arbitrary keyword arguments used by the specified reduction_alg. """ @@ -72,7 +80,8 @@ def fit_transform( self, x: torch.Tensor, swap_2nd_and_last_dims: bool = True ) -> torch.Tensor: """ - Perform dimensionality reduction on an input tensor. + Perform dimensionality reduction on an input tensor using the specified + ``reduction_alg``'s ``.fit_transform`` function. Args: From 3c6c24d90c31d670326574ecb081ef92104d718f Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Mon, 4 Jul 2022 19:01:31 -0600 Subject: [PATCH 09/30] Improve docs (#554) * Update common.py * Update inception_v1.py --- captum/optim/_utils/image/common.py | 4 ++-- captum/optim/models/_image/inception_v1.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/captum/optim/_utils/image/common.py b/captum/optim/_utils/image/common.py index fa9d936050..80bc7b831a 100644 --- a/captum/optim/_utils/image/common.py +++ b/captum/optim/_utils/image/common.py @@ -70,7 +70,7 @@ def make_grid_image( def show( x: torch.Tensor, - figsize: Optional[Tuple[int, int]] = (8, 8), + figsize: Optional[Tuple[int, int]] = None, scale: float = 255.0, images_per_row: Optional[int] = None, padding: int = 2, @@ -84,7 +84,7 @@ def show( x (torch.Tensor): The tensor you want to display as an image. figsize (Tuple[int, int], optional): height & width to use for displaying the image figure. - Default: ``(8, 8)`` + Default: ``None`` scale (float, optional): Value to multiply the input tensor by so that it's value range is [0-255] for display. Default: ``255.0`` diff --git a/captum/optim/models/_image/inception_v1.py b/captum/optim/models/_image/inception_v1.py index 0f49ae5253..ab7d050bd5 100644 --- a/captum/optim/models/_image/inception_v1.py +++ b/captum/optim/models/_image/inception_v1.py @@ -53,7 +53,7 @@ def googlenet( Default: ``False`` Returns: - **model** (InceptionV1): An Inception5h model instance. + model (InceptionV1): An Inception5h model instance. """ if pretrained: From 07c9e601e4a4bc557dff93955f3d6290c7b54d23 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Wed, 6 Jul 2022 14:44:31 -0600 Subject: [PATCH 10/30] Add missing InceptionV1 InceptionModule docs --- captum/optim/models/_image/inception_v1.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/captum/optim/models/_image/inception_v1.py b/captum/optim/models/_image/inception_v1.py index ab7d050bd5..e5e38cbed8 100644 --- a/captum/optim/models/_image/inception_v1.py +++ b/captum/optim/models/_image/inception_v1.py @@ -291,14 +291,20 @@ def __init__( """ Args: - in_channels (int, optional): The number of input channels to use for the - inception module. - c1x1 (int, optional): - c3x3reduce (int, optional): - c3x3 (int, optional): - c5x5reduce (int, optional): - c5x5 (int, optional): - pool_proj (int, optional): + in_channels (int): The number of input channels to use for the first + layers of the inception module branches. + c1x1 (int): The number of output channels to use for the first layer in + the c1x1 branch. + c3x3reduce (int): The number of output channels to use for the first layer + in the c3x3 branch. + c3x3 (int): The number of output channels to use for the second layer in + the c3x3 branch. + c5x5reduce (int): The number of output channels to use for the first layer + in the c5x5 branch. + c5x5 (int): The number of output channels to use for the second layer in + the c5x5 branch. + pool_proj (int): The number of output channels to use for the second layer + in the pool branch. activ (type of nn.Module, optional): The nn.Module class type to use for activation layers. Default: ``nn.ReLU`` From 953780e743dab1443997771366014da31c733827 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Wed, 6 Jul 2022 15:51:22 -0600 Subject: [PATCH 11/30] Fix TransformationRobustness doc formatting & add missing RedirectedReLU forward docs --- captum/optim/_param/image/transforms.py | 22 +++++++++++++--------- captum/optim/_utils/circuits.py | 2 +- captum/optim/models/_common.py | 8 ++++++++ 3 files changed, 22 insertions(+), 10 deletions(-) diff --git a/captum/optim/_param/image/transforms.py b/captum/optim/_param/image/transforms.py index 87c00d97a7..5f3e4030ad 100644 --- a/captum/optim/_param/image/transforms.py +++ b/captum/optim/_param/image/transforms.py @@ -1251,6 +1251,16 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: return self._center_crop(x) +# Define TransformationRobustness defaults externally for easier Sphinx docs formatting +_TR_TRANSLATE: List[int] = [4] * 10 +_TR_SCALE: List[float] = [0.995**n for n in range(-5, 80)] + [ + 0.998**n for n in 2 * list(range(20, 40)) +] +_TR_DEGREES: List[int] = ( + list(range(-20, 20)) + list(range(-10, 10)) + list(range(-5, 5)) + 5 * [0] +) + + class TransformationRobustness(nn.Module): """ This transform combines the standard transforms (:class:`.RandomSpatialJitter`, @@ -1269,15 +1279,9 @@ class TransformationRobustness(nn.Module): def __init__( self, padding_transform: Optional[nn.Module] = nn.ConstantPad2d(2, value=0.5), - translate: Optional[Union[int, List[int]]] = [4] * 10, - scale: Optional[NumSeqOrTensorOrProbDistType] = [ - 0.995**n for n in range(-5, 80) - ] - + [0.998**n for n in 2 * list(range(20, 40))], - degrees: Optional[NumSeqOrTensorOrProbDistType] = list(range(-20, 20)) - + list(range(-10, 10)) - + list(range(-5, 5)) - + 5 * [0], + translate: Optional[Union[int, List[int]]] = _TR_TRANSLATE, + scale: Optional[NumSeqOrTensorOrProbDistType] = _TR_SCALE, + degrees: Optional[NumSeqOrTensorOrProbDistType] = _TR_DEGREES, final_translate: Optional[int] = 2, crop_or_pad_output: bool = False, ) -> None: diff --git a/captum/optim/_utils/circuits.py b/captum/optim/_utils/circuits.py index 6710f7c96a..ac5d04070e 100644 --- a/captum/optim/_utils/circuits.py +++ b/captum/optim/_utils/circuits.py @@ -44,7 +44,7 @@ def extract_expanded_weights( Args: model (nn.Module): The reference to PyTorch model instance. - target1 (nn.module): The starting target layer. Must be below the layer + target1 (nn.Module): The starting target layer. Must be below the layer specified for ``target2``. target2 (nn.Module): The end target layer. Must be above the layer specified for ``target1``. diff --git a/captum/optim/models/_common.py b/captum/optim/models/_common.py index e65e281217..f739e63741 100644 --- a/captum/optim/models/_common.py +++ b/captum/optim/models/_common.py @@ -68,6 +68,14 @@ class RedirectedReluLayer(nn.Module): @torch.jit.ignore def forward(self, input: torch.Tensor) -> torch.Tensor: + """ + Args: + + x (torch.Tensor): A tensor to pass through RedirectedReLU. + + Returns: + x (torch.Tensor): The output of RedirectedReLU. + """ return RedirectedReLU.apply(input) From 533312894778ed2a88c3bfdb192f8f84cc8cf913 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Sun, 10 Jul 2022 09:54:48 -0600 Subject: [PATCH 12/30] http -> https --- captum/optim/models/_image/inception_v1.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/captum/optim/models/_image/inception_v1.py b/captum/optim/models/_image/inception_v1.py index e5e38cbed8..52818573f9 100644 --- a/captum/optim/models/_image/inception_v1.py +++ b/captum/optim/models/_image/inception_v1.py @@ -15,7 +15,7 @@ def googlenet( **kwargs: Any, ) -> "InceptionV1": r"""GoogLeNet (also known as Inception v1 & Inception 5h) model architecture from - `"Going Deeper with Convolutions" `_. + `"Going Deeper with Convolutions" `_. Example:: From 44af560ee495c6fbd11797503d37dd96f984f940 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Mon, 11 Jul 2022 14:31:10 -0600 Subject: [PATCH 13/30] Improve ChannelReducer docs --- captum/optim/_utils/reducer.py | 52 ++++++++++++++++++++++++++++++---- 1 file changed, 47 insertions(+), 5 deletions(-) diff --git a/captum/optim/_utils/reducer.py b/captum/optim/_utils/reducer.py index 812bb91086..89ed96315c 100644 --- a/captum/optim/_utils/reducer.py +++ b/captum/optim/_utils/reducer.py @@ -16,10 +16,11 @@ class ChannelReducer: """ - Dimensionality reduction for the channel dimension of an input tensor. - Olah, et al., "The Building Blocks of Interpretability", Distill, 2018. - - See here for more information: https://distill.pub/2018/building-blocks/ + The ChannelReducer class is a wrapper for PyTorch and NumPy based dimensonality + reduction algorithms, like those from ``sklearn.decomposition`` (ex: NMF, PCA), + ``sklearn.manifold`` (ex: TSNE), UMAP, and other libraries. This class handles + things like reshaping, algorithm search by name (for scikit-learn only), and + PyTorch tensor conversions to and from NumPy arrays. Example:: @@ -29,6 +30,16 @@ class ChannelReducer: >>> print(output.shape) torch.Size([1, 2, 128, 128]) + >>> # reduction_alg attributes are easily accessible + >>> print(reducer.components.shape) + torch.Size([2, 8]) + + Dimensionality reduction for the channel dimension of an input tensor. + Olah, et al., "The Building Blocks of Interpretability", Distill, 2018. + + See here for more information: https://distill.pub/2018/building-blocks/ + + Args: n_components (int, optional): The number of channels to reduce the target @@ -57,14 +68,43 @@ def __init__( self._reducer = reduction_alg(n_components=n_components, **kwargs) def _get_reduction_algo_instance(self, name: str) -> Union[None, Callable]: + """ + Search through a library for a ``reduction_alg`` matching the provided str + name. + + Args: + + name (str): The name of the reduction_alg to search for. + + Returns: + reduction_alg (callable or None): The ``reduction_alg`` if it was found, + otherwise None. + """ if hasattr(sklearn.decomposition, name): obj = sklearn.decomposition.__getattribute__(name) if issubclass(obj, BaseEstimator): return obj + elif hasattr(sklearn.manifold, name): + obj = sklearn.manifold.__getattribute__(name) + if issubclass(obj, BaseEstimator): + return obj return None @classmethod def _apply_flat(cls, func: Callable, x: torch.Tensor) -> torch.Tensor: + """ + Flatten inputs, run them through the reduction_alg, and then reshape them back + to their original size using the resized dimension. + + Args: + + cls (ChannelReducer): The ``ChannelReducer`` class being used. + func (callable): The ``reduction_alg`` transform function being used. + x (torch.Tensor): The tensor being transformed and reduced. + + Returns: + x (torch.Tensor): A transformed tensor. + """ orig_shape = x.shape try: return func(x.reshape([-1, x.shape[-1]])).reshape( @@ -88,7 +128,9 @@ def fit_transform( tensor (torch.Tensor): A tensor to perform dimensionality reduction on. swap_2nd_and_last_dims (bool, optional): If ``True``, input channels are expected to be in the second dimension unless the input tensor has a - shape of CHW. + shape of CHW. When reducing the channel dimension, this parameter + should be set to ``True`` unless you are already using the channels + last format. Default: ``True``. Returns: From acebbd8b99d91d68f39f3f4d7f03c2ab0bb7eaa4 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Fri, 15 Jul 2022 13:55:12 -0600 Subject: [PATCH 14/30] Fix tensor type hints --- captum/optim/_param/image/transforms.py | 22 +++++++++++----------- captum/optim/_utils/image/atlas.py | 24 ++++++++++++------------ captum/optim/_utils/reducer.py | 4 ++-- captum/optim/models/_common.py | 7 ++++++- 4 files changed, 31 insertions(+), 26 deletions(-) diff --git a/captum/optim/_param/image/transforms.py b/captum/optim/_param/image/transforms.py index 5f3e4030ad..2356a7f17e 100644 --- a/captum/optim/_param/image/transforms.py +++ b/captum/optim/_param/image/transforms.py @@ -20,7 +20,7 @@ def __init__(self, background: Optional[torch.Tensor] = None) -> None: """ Args: - background (tensor, optional): An NCHW image tensor to be used as the + background (torch.Tensor, optional): An NCHW image tensor to be used as the Alpha channel's background. Default: ``None`` """ @@ -134,9 +134,9 @@ def __init__(self, transform: Union[str, torch.Tensor] = "klt") -> None: """ Args: - transform (str or tensor): Either a string for one of the precalculated - transform matrices, or a 3x3 matrix for the 3 RGB channels of input - tensors. + transform (str or torch.Tensor): Either a string for one of the + precalculated transform matrices, or a 3x3 matrix for the 3 RGB + channels of input tensors. """ super().__init__() assert isinstance(transform, str) or torch.is_tensor(transform) @@ -158,12 +158,12 @@ def _forward(self, x: torch.Tensor, inverse: bool = False) -> torch.Tensor: """ Args: - x (torch.tensor): A CHW or NCHW RGB or RGBA image tensor. + x (torch.Tensor): A CHW or NCHW RGB or RGBA image tensor. inverse (bool, optional): Whether to recorrelate or decorrelate colors. Default: ``False`` Returns: - chw (torch.tensor): A tensor with it's colors recorrelated or + chw (torch.Tensor): A tensor with it's colors recorrelated or decorrelated. """ @@ -212,12 +212,12 @@ def _forward_without_named_dims( Args: - x (torch.tensor): A CHW pr NCHW RGB or RGBA image tensor. + x (torch.Tensor): A CHW pr NCHW RGB or RGBA image tensor. inverse (bool, optional): Whether to recorrelate or decorrelate colors. Default: ``False`` Returns: - chw (torch.tensor): A tensor with it's colors recorrelated or + chw (torch.Tensor): A tensor with it's colors recorrelated or decorrelated. """ @@ -259,12 +259,12 @@ def forward(self, x: torch.Tensor, inverse: bool = False) -> torch.Tensor: Args: - x (torch.tensor): A CHW or NCHW RGB or RGBA image tensor. + x (torch.Tensor): A CHW or NCHW RGB or RGBA image tensor. inverse (bool, optional): Whether to recorrelate or decorrelate colors. Default: ``False`` Returns: - chw (torch.tensor): A tensor with it's colors recorrelated or + chw (torch.Tensor): A tensor with it's colors recorrelated or decorrelated. """ if torch.jit.is_scripting(): @@ -381,7 +381,7 @@ def center_crop( Args: - input (tensor): A CHW or NCHW image tensor to center crop. + input (torch.Tensor): A CHW or NCHW image tensor to center crop. size (int, sequence, int): Number of pixels to center crop away. pixels_from_edges (bool, optional): Whether to treat crop size values as the number of pixels from the tensor's edge, or an diff --git a/captum/optim/_utils/image/atlas.py b/captum/optim/_utils/image/atlas.py index 492aa403d3..23aa00496a 100644 --- a/captum/optim/_utils/image/atlas.py +++ b/captum/optim/_utils/image/atlas.py @@ -14,7 +14,7 @@ def normalize_grid( Args: - xy_grid (torch.tensor): The xy coordinate grid tensor to normalize, + xy_grid (torch.Tensor): The xy coordinate grid tensor to normalize, with a shape of: [n_points, n_axes]. min_percentile (float, optional): The minimum percentile to use when normalizing the tensor. Value must be in the range [0, 1]. @@ -27,7 +27,7 @@ def normalize_grid( Default: ``0.1`` Returns: - normalized_grid (torch.tensor): A normalized xy coordinate grid tensor. + normalized_grid (torch.Tensor): A normalized xy coordinate grid tensor. """ assert xy_grid.dim() == 2 @@ -82,7 +82,7 @@ def calc_grid_indices( Args: - xy_grid (torch.tensor): The xy coordinate grid activation samples, with a shape + xy_grid (torch.Tensor): The xy coordinate grid activation samples, with a shape of: [n_points, 2]. grid_size (Tuple[int, int]): The grid_size of grid cells to use. The ``grid_size`` variable should be in the format of: [width, height]. @@ -92,7 +92,7 @@ def calc_grid_indices( Default: ``(0.0, 1.0)`` Returns: - indices (list of list of torch.Tensors): List of lists of grid indices + indices (list of list of torch.Tensor): List of lists of grid indices stored inside tensors to use. Each 1D tensor of indices has a size of: 0 to n_indices. """ @@ -134,10 +134,10 @@ def compute_avg_cell_samples( Args: - grid_indices (list of list of torch.tensor): List of lists of grid indices + grid_indices (list of list of torch.Tensor): List of lists of grid indices stored inside tensors to use. Each 1D tensor of indices has a size of: 0 to n_indices. - raw_samples (torch.tensor): Raw unmodified activation or attribution samples, + raw_samples (torch.Tensor): Raw unmodified activation or attribution samples, with a shape of: [n_samples, n_channels]. grid_size (Tuple[int, int]): The size of grid cells to use. The ``grid_size`` variable should be in the format of: [width, height]. @@ -147,7 +147,7 @@ def compute_avg_cell_samples( Returns: cell_vecs_and_cell_coords: A 2 element tuple of: ``(cell_vecs, cell_coords)``. - - cell_vecs (torch.tensor): A tensor containing all the direction vectors + - cell_vecs (torch.Tensor): A tensor containing all the direction vectors that were created, stacked along the batch dimension with a shape of: [n_vecs, n_channels]. - cell_coords (list of Tuple[int, int, int]): List of coordinates for grid @@ -188,9 +188,9 @@ def create_atlas_vectors( Args: - xy_grid (torch.tensor): The xy coordinate grid activation samples, with a shape + xy_grid (torch.Tensor): The xy coordinate grid activation samples, with a shape of: [n_points, 2]. - raw_activations (torch.tensor): Raw unmodified activation samples, with a shape + raw_activations (torch.Tensor): Raw unmodified activation samples, with a shape of: [n_samples, n_channels]. grid_size (Tuple[int, int]): The size of grid cells to use. The ``grid_size`` variable should be in the format of: [width, height]. @@ -207,7 +207,7 @@ def create_atlas_vectors( Returns: grid_vecs_and_cell_coords: A 2 element tuple of: ``(grid_vecs, cell_coords)``. - - grid_vecs (torch.tensor): A tensor containing all the direction vectors + - grid_vecs (torch.Tensor): A tensor containing all the direction vectors that were created, stacked along the batch dimension, with a shape of: [n_vecs, n_channels]. - cell_coords (list of Tuple[int, int, int]): List of coordinates for grid @@ -242,7 +242,7 @@ def create_atlas( Args: - cells (list of torch.tensor or torch.tensor): A list or stack of NCHW image + cells (list of torch.Tensor or torch.Tensor): A list or stack of NCHW image tensors made with atlas direction vectors. coords (list of Tuple[int, int] or list of Tuple[int, int, int]): A list of coordinates to use for the atlas image tensors. The first 2 values in each @@ -254,7 +254,7 @@ def create_atlas( Default: ``torch.ones`` Returns: - atlas_canvas (torch.tensor): The full activation atlas visualization, with a + atlas_canvas (torch.Tensor): The full activation atlas visualization, with a shape of NCHW. """ diff --git a/captum/optim/_utils/reducer.py b/captum/optim/_utils/reducer.py index 89ed96315c..c1aed81c53 100644 --- a/captum/optim/_utils/reducer.py +++ b/captum/optim/_utils/reducer.py @@ -125,7 +125,7 @@ def fit_transform( Args: - tensor (torch.Tensor): A tensor to perform dimensionality reduction on. + x (torch.Tensor): A tensor to perform dimensionality reduction on. swap_2nd_and_last_dims (bool, optional): If ``True``, input channels are expected to be in the second dimension unless the input tensor has a shape of CHW. When reducing the channel dimension, this parameter @@ -134,7 +134,7 @@ def fit_transform( Default: ``True``. Returns: - tensor: A tensor with one of it's dimensions reduced. + x (torch.Tensor): A tensor with one of it's dimensions reduced. """ if x.dim() == 3 and swap_2nd_and_last_dims: diff --git a/captum/optim/models/_common.py b/captum/optim/models/_common.py index f739e63741..49a1154fe7 100644 --- a/captum/optim/models/_common.py +++ b/captum/optim/models/_common.py @@ -215,7 +215,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: """ Args: - x (torch.tensor): The input tensor to apply 2D convolution to. + x (torch.Tensor): The input tensor to apply 2D convolution to. Returns x (torch.Tensor): The input tensor after the 2D convolution was applied. @@ -283,6 +283,7 @@ class SkipLayer(torch.nn.Module): https://pytorch.org/docs/stable/generated/torch.nn.Identity.html Args: + args (Any): Any argument. Arguments will be safely ignored. kwargs (Any) Any keyword argument. Arguments will be safely ignored. """ @@ -295,9 +296,11 @@ def forward( ) -> Union[torch.Tensor, Tuple[torch.Tensor]]: """ Args: + x (torch.Tensor or tuple of torch.Tensor): The input tensor or tensors. args (Any): Any argument. Arguments will be safely ignored. kwargs (Any) Any keyword argument. Arguments will be safely ignored. + Returns: x (torch.Tensor or tuple of torch.Tensor): The unmodified input tensor or tensors. @@ -314,7 +317,9 @@ def skip_layers( with layers that do nothing. This is useful for removing the nonlinear ReLU layers when creating expanded weights. + Args: + model (nn.Module): A PyTorch model instance. layers (nn.Module or list of nn.Module): The layer class type to replace in the model. From 61e18e42926b5077eeac45b7715765961ad6a0e3 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Sat, 16 Jul 2022 09:30:30 -0600 Subject: [PATCH 15/30] Add missing return docs to get_model_layers --- captum/optim/models/_common.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/captum/optim/models/_common.py b/captum/optim/models/_common.py index 49a1154fe7..68ff3942b1 100644 --- a/captum/optim/models/_common.py +++ b/captum/optim/models/_common.py @@ -16,6 +16,9 @@ def get_model_layers(model: nn.Module) -> List[str]: Args: model (nn.Module): A PyTorch model or module instance to collect layers from. + + Returns: + model_layers (list of str): A list of hookable layers in the model. """ layers = [] From 6dbfc3d5e42d6a03279cae3c5bee6aabd9e3baf0 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Sat, 16 Jul 2022 10:56:14 -0600 Subject: [PATCH 16/30] Fix doc parameter type formatting --- captum/optim/_param/image/transforms.py | 6 +++--- captum/optim/_utils/circuits.py | 4 ++-- captum/optim/_utils/image/atlas.py | 4 ++-- captum/optim/_utils/image/common.py | 4 ++-- captum/optim/_utils/reducer.py | 9 ++++----- 5 files changed, 13 insertions(+), 14 deletions(-) diff --git a/captum/optim/_param/image/transforms.py b/captum/optim/_param/image/transforms.py index 2356a7f17e..4e548fb8fa 100644 --- a/captum/optim/_param/image/transforms.py +++ b/captum/optim/_param/image/transforms.py @@ -314,7 +314,7 @@ def __init__( This parameter is only valid when ``pixels_from_edges`` is ``False``. Default: ``False`` - padding_mode (optional, str): One of ``"constant"``, ``"reflect"``, + padding_mode (str, optional): One of ``"constant"``, ``"reflect"``, ``"replicate"``, or ``"circular"``. This parameter is only used if the crop size is larger than the image size. Default: ``"constant"`` @@ -392,7 +392,7 @@ def center_crop( This parameter is only valid when ``pixels_from_edges`` is ``False``. Default: ``False`` - padding_mode (optional, str): One of ``"constant"``, ``"reflect"``, + padding_mode (str, optional): One of ``"constant"``, ``"reflect"``, ``"replicate"``, or ``"circular"``. This parameter is only used if the crop size is larger than the image size. Default: ``"constant"`` @@ -1291,7 +1291,7 @@ def __init__( padding_transform (nn.Module, optional): A padding module instance. No padding will be applied before transforms if set to ``None``. Default: ``nn.ConstantPad2d(2, value=0.5)`` - translate (int or list of int, optional): The max horizontal and vertical + translate (int or List[int], optional): The max horizontal and vertical translation to use for each :class:`.RandomSpatialJitter` transform. Default: ``[4] * 10`` scale (float, sequence, or torch.distribution, optional): Sequence of diff --git a/captum/optim/_utils/circuits.py b/captum/optim/_utils/circuits.py index ac5d04070e..ecfe078361 100644 --- a/captum/optim/_utils/circuits.py +++ b/captum/optim/_utils/circuits.py @@ -48,10 +48,10 @@ def extract_expanded_weights( specified for ``target2``. target2 (nn.Module): The end target layer. Must be above the layer specified for ``target1``. - crop_shape (int or tuple of ints, optional): Specify the exact output size + crop_shape (int or Tuple[int], optional): Specify the exact output size to crop out. Set to ``None`` for no cropping. Default: ``None`` - model_input (tensor or tuple of tensors, optional): The input to use + model_input (torch.Tensor or Tuple[torch.Tensor], optional): The input to use with the specified model. Default: ``torch.zeros(1, 3, 224, 224)`` crop_func (Callable, optional): Specify a function to crop away the padding diff --git a/captum/optim/_utils/image/atlas.py b/captum/optim/_utils/image/atlas.py index 23aa00496a..36fac7e0e9 100644 --- a/captum/optim/_utils/image/atlas.py +++ b/captum/optim/_utils/image/atlas.py @@ -242,9 +242,9 @@ def create_atlas( Args: - cells (list of torch.Tensor or torch.Tensor): A list or stack of NCHW image + cells (List[torch.Tensor] or torch.Tensor): A list or stack of NCHW image tensors made with atlas direction vectors. - coords (list of Tuple[int, int] or list of Tuple[int, int, int]): A list of + coords (List[Tuple[int, int]] or List[Tuple[int, int, int]]): A list of coordinates to use for the atlas image tensors. The first 2 values in each coordinate list should be: [x, y, ...]. grid_size (Tuple[int, int]): The size of grid cells to use. The ``grid_size`` diff --git a/captum/optim/_utils/image/common.py b/captum/optim/_utils/image/common.py index 80bc7b831a..7262432066 100644 --- a/captum/optim/_utils/image/common.py +++ b/captum/optim/_utils/image/common.py @@ -25,7 +25,7 @@ def make_grid_image( Args: - tiles (torch.Tensor or list of torch.Tensor): A stack of NCHW image tensors or + tiles (torch.Tensor or List[torch.Tensor]): A stack of NCHW image tensors or a list of NCHW image tensors to create a grid from. images_per_row (int, optional): The number of rows to use for the grid image. Default: ``4`` @@ -342,7 +342,7 @@ def weights_to_heatmap_2d( Args: weight (torch.Tensor): A 2d tensor to create the heatmap from. - colors (list of str, optional): A list of 5 strings containing hex triplet + colors (List[str], optional): A list of 5 strings containing hex triplet (six digit), three-byte hexadecimal color values to use for coloring the heatmap. Default: ``["0571b0", "92c5de", "f7f7f7", "f4a582", "ca0020"]`` diff --git a/captum/optim/_utils/reducer.py b/captum/optim/_utils/reducer.py index c1aed81c53..0f39071ff0 100644 --- a/captum/optim/_utils/reducer.py +++ b/captum/optim/_utils/reducer.py @@ -44,12 +44,12 @@ class ChannelReducer: n_components (int, optional): The number of channels to reduce the target dimension to. - reduction_alg (str or callable, optional): The desired dimensionality + reduction_alg (str or Callable, optional): The desired dimensionality reduction algorithm to use. The default ``reduction_alg`` is set to NMF from sklearn, which requires users to put inputs on CPU before passing them to :func:`ChannelReducer.fit_transform`. Default: ``NMF`` - **kwargs (any, optional): Arbitrary keyword arguments used by the specified + **kwargs (Any, optional): Arbitrary keyword arguments used by the specified reduction_alg. """ @@ -77,7 +77,7 @@ def _get_reduction_algo_instance(self, name: str) -> Union[None, Callable]: name (str): The name of the reduction_alg to search for. Returns: - reduction_alg (callable or None): The ``reduction_alg`` if it was found, + reduction_alg (Callable or None): The ``reduction_alg`` if it was found, otherwise None. """ if hasattr(sklearn.decomposition, name): @@ -98,8 +98,7 @@ def _apply_flat(cls, func: Callable, x: torch.Tensor) -> torch.Tensor: Args: - cls (ChannelReducer): The ``ChannelReducer`` class being used. - func (callable): The ``reduction_alg`` transform function being used. + func (Callable): The ``reduction_alg`` transform function being used. x (torch.Tensor): The tensor being transformed and reduced. Returns: From 82ca2429c702a23fcf64b9aa3f7ae7cca5c90eb1 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Sat, 16 Jul 2022 14:14:37 -0600 Subject: [PATCH 17/30] Fix duplicated circuits type hint --- captum/optim/_utils/circuits.py | 8 ++++---- captum/optim/models/_common.py | 3 --- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/captum/optim/_utils/circuits.py b/captum/optim/_utils/circuits.py index ecfe078361..9074cc1adb 100644 --- a/captum/optim/_utils/circuits.py +++ b/captum/optim/_utils/circuits.py @@ -11,7 +11,7 @@ def extract_expanded_weights( model: nn.Module, target1: nn.Module, target2: nn.Module, - crop_shape: Optional[Union[Tuple[int, int], IntSeqOrIntType]] = None, + crop_shape: Optional[IntSeqOrIntType] = None, model_input: TupleOfTensorsOrTensorType = torch.zeros(1, 3, 224, 224), crop_func: Optional[Callable] = center_crop, ) -> torch.Tensor: @@ -48,10 +48,10 @@ def extract_expanded_weights( specified for ``target2``. target2 (nn.Module): The end target layer. Must be above the layer specified for ``target1``. - crop_shape (int or Tuple[int], optional): Specify the exact output size - to crop out. Set to ``None`` for no cropping. + crop_shape (int or List[int] or tuple of int, optional): Specify the exact + output size to crop out. Set to ``None`` for no cropping. Default: ``None`` - model_input (torch.Tensor or Tuple[torch.Tensor], optional): The input to use + model_input (torch.Tensor or tuple of torch.Tensor, optional): The input to use with the specified model. Default: ``torch.zeros(1, 3, 224, 224)`` crop_func (Callable, optional): Specify a function to crop away the padding diff --git a/captum/optim/models/_common.py b/captum/optim/models/_common.py index 68ff3942b1..49a1154fe7 100644 --- a/captum/optim/models/_common.py +++ b/captum/optim/models/_common.py @@ -16,9 +16,6 @@ def get_model_layers(model: nn.Module) -> List[str]: Args: model (nn.Module): A PyTorch model or module instance to collect layers from. - - Returns: - model_layers (list of str): A list of hookable layers in the model. """ layers = [] From 95ed9f9b16735871cba86c34740e560d8e64e7f0 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Sat, 16 Jul 2022 14:19:27 -0600 Subject: [PATCH 18/30] Remove unused type hints --- captum/optim/_utils/circuits.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/captum/optim/_utils/circuits.py b/captum/optim/_utils/circuits.py index 9074cc1adb..56362cb6bc 100644 --- a/captum/optim/_utils/circuits.py +++ b/captum/optim/_utils/circuits.py @@ -1,4 +1,4 @@ -from typing import Callable, Optional, Tuple, Union +from typing import Callable, Optional import torch import torch.nn as nn From 8e77eb70c22dfa97b6286462022595bdb1a21276 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Sat, 16 Jul 2022 14:20:17 -0600 Subject: [PATCH 19/30] Doc fix --- captum/optim/_utils/circuits.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/captum/optim/_utils/circuits.py b/captum/optim/_utils/circuits.py index 56362cb6bc..14b85e2f8b 100644 --- a/captum/optim/_utils/circuits.py +++ b/captum/optim/_utils/circuits.py @@ -59,7 +59,7 @@ def extract_expanded_weights( Default: ``center_crop`` Returns: - *tensor* (torch.Tensor): A tensor containing the expanded weights in the form + tensor (torch.Tensor): A tensor containing the expanded weights in the form of: (target2 output channels, target1 output channels, height, width) """ if isinstance(model_input, torch.Tensor): From d6f0defcecab934bfd592233c032eb357948b5f2 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Sun, 17 Jul 2022 12:17:09 -0600 Subject: [PATCH 20/30] Add function aliases to docs --- captum/optim/_utils/image/common.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/captum/optim/_utils/image/common.py b/captum/optim/_utils/image/common.py index 7262432066..97ac88c07e 100644 --- a/captum/optim/_utils/image/common.py +++ b/captum/optim/_utils/image/common.py @@ -79,6 +79,8 @@ def show( """ Show CHW & NCHW tensors as an image. + Alias: ``captum.optim.images.show`` + Args: x (torch.Tensor): The tensor you want to display as an image. @@ -130,6 +132,8 @@ def save_tensor_as_image( """ Save RGB & RGBA image tensors with a shape of CHW or NCHW as images. + Alias: ``captum.optim.images.save_tensor_as_image`` + Args: x (torch.Tensor): The tensor you want to save as an image. From 910c38d1f74f051047edc5e8b51ac48601258e04 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Mon, 18 Jul 2022 15:07:36 -0600 Subject: [PATCH 21/30] Improve docstring type formatting --- captum/optim/_param/image/transforms.py | 4 +- captum/optim/_utils/circuits.py | 4 +- captum/optim/_utils/image/atlas.py | 49 ++++++++++++---------- captum/optim/_utils/image/common.py | 10 ++--- captum/optim/_utils/reducer.py | 6 +-- captum/optim/models/_image/inception_v1.py | 10 ++--- 6 files changed, 45 insertions(+), 38 deletions(-) diff --git a/captum/optim/_param/image/transforms.py b/captum/optim/_param/image/transforms.py index 4e548fb8fa..332d700a9f 100644 --- a/captum/optim/_param/image/transforms.py +++ b/captum/optim/_param/image/transforms.py @@ -314,7 +314,7 @@ def __init__( This parameter is only valid when ``pixels_from_edges`` is ``False``. Default: ``False`` - padding_mode (str, optional): One of ``"constant"``, ``"reflect"``, + padding_mode (str, optional): One of: ``"constant"``, ``"reflect"``, ``"replicate"``, or ``"circular"``. This parameter is only used if the crop size is larger than the image size. Default: ``"constant"`` @@ -392,7 +392,7 @@ def center_crop( This parameter is only valid when ``pixels_from_edges`` is ``False``. Default: ``False`` - padding_mode (str, optional): One of ``"constant"``, ``"reflect"``, + padding_mode (str, optional): One of: ``"constant"``, ``"reflect"``, ``"replicate"``, or ``"circular"``. This parameter is only used if the crop size is larger than the image size. Default: ``"constant"`` diff --git a/captum/optim/_utils/circuits.py b/captum/optim/_utils/circuits.py index 14b85e2f8b..56211aa6a5 100644 --- a/captum/optim/_utils/circuits.py +++ b/captum/optim/_utils/circuits.py @@ -48,13 +48,13 @@ def extract_expanded_weights( specified for ``target2``. target2 (nn.Module): The end target layer. Must be above the layer specified for ``target1``. - crop_shape (int or List[int] or tuple of int, optional): Specify the exact + crop_shape (int, list of int, or tuple of int, optional): Specify the exact output size to crop out. Set to ``None`` for no cropping. Default: ``None`` model_input (torch.Tensor or tuple of torch.Tensor, optional): The input to use with the specified model. Default: ``torch.zeros(1, 3, 224, 224)`` - crop_func (Callable, optional): Specify a function to crop away the padding + crop_func (callable, optional): Specify a function to crop away the padding from the output weights. Default: ``center_crop`` diff --git a/captum/optim/_utils/image/atlas.py b/captum/optim/_utils/image/atlas.py index 36fac7e0e9..45753d0d33 100644 --- a/captum/optim/_utils/image/atlas.py +++ b/captum/optim/_utils/image/atlas.py @@ -84,11 +84,14 @@ def calc_grid_indices( xy_grid (torch.Tensor): The xy coordinate grid activation samples, with a shape of: [n_points, 2]. - grid_size (Tuple[int, int]): The grid_size of grid cells to use. The - ``grid_size`` variable should be in the format of: [width, height]. - x_extent (Tuple[float, float], optional): The x axis range to use. + grid_size (tuple of int): The number of grid cells to use across the height + and width dimensions. The ``grid_size`` variable should be in the format + of: [width, height]. + x_extent (tuple of float, optional): The x axis range to use, in the format + of: (min, max). Default: ``(0.0, 1.0)`` - y_extent (Tuple[float, float], optional): The y axis range to use. + y_extent (tuple of float, optional): The y axis range to use, in the format + of: (min, max). Default: ``(0.0, 1.0)`` Returns: @@ -139,8 +142,9 @@ def compute_avg_cell_samples( 0 to n_indices. raw_samples (torch.Tensor): Raw unmodified activation or attribution samples, with a shape of: [n_samples, n_channels]. - grid_size (Tuple[int, int]): The size of grid cells to use. The ``grid_size`` - variable should be in the format of: [width, height]. + grid_size (tuple of int): The number of grid cells to use across the height + and width dimensions. The ``grid_size`` variable should be in the format + of: [width, height]. min_density (int, optional): The minimum number of points for a cell to be counted. Default: ``8`` @@ -150,7 +154,7 @@ def compute_avg_cell_samples( - cell_vecs (torch.Tensor): A tensor containing all the direction vectors that were created, stacked along the batch dimension with a shape of: [n_vecs, n_channels]. - - cell_coords (list of Tuple[int, int, int]): List of coordinates for grid + - cell_coords (list of tuple of int): List of coordinates for grid spatial positions of each direction vector, and the number of samples used for the cell. The list for each cell is in the format of: [x_coord, y_coord, number_of_samples_used]. @@ -192,17 +196,20 @@ def create_atlas_vectors( of: [n_points, 2]. raw_activations (torch.Tensor): Raw unmodified activation samples, with a shape of: [n_samples, n_channels]. - grid_size (Tuple[int, int]): The size of grid cells to use. The ``grid_size`` - variable should be in the format of: [width, height]. + grid_size (tuple of int): The number of grid cells to use across the height + and width dimensions. The ``grid_size`` variable should be in the format + of: [width, height]. min_density (int, optional): The minimum number of points for a cell to be counted. Default: ``8`` normalize (bool, optional): Whether or not to remove outliers from an xy coordinate grid tensor, and rescale it to [0, 1]. Default: ``True`` - x_extent (Tuple[float, float], optional): The x axis range to use. + x_extent (tuple of float, optional): The x axis range to use, in the format + of: (min, max). Default: ``(0.0, 1.0)`` - y_extent (Tuple[float, float], optional): The y axis range to use. + y_extent (tuple of float, optional): The y axis range to use, in the format + of: (min, max). Default: ``(0.0, 1.0)`` Returns: @@ -210,7 +217,7 @@ def create_atlas_vectors( - grid_vecs (torch.Tensor): A tensor containing all the direction vectors that were created, stacked along the batch dimension, with a shape of: [n_vecs, n_channels]. - - cell_coords (list of Tuple[int, int, int]): List of coordinates for grid + - cell_coords (list of tuple of int): List of coordinates for grid spatial positions of each direction vector, and the number of samples used for the cell. The list for each cell is in the format of: [x_coord, y_coord, number_of_samples_used]. @@ -242,16 +249,16 @@ def create_atlas( Args: - cells (List[torch.Tensor] or torch.Tensor): A list or stack of NCHW image + cells (list of torch.Tensor or torch.Tensor): A list or stack of NCHW image tensors made with atlas direction vectors. - coords (List[Tuple[int, int]] or List[Tuple[int, int, int]]): A list of - coordinates to use for the atlas image tensors. The first 2 values in each - coordinate list should be: [x, y, ...]. - grid_size (Tuple[int, int]): The size of grid cells to use. The ``grid_size`` - variable should be in the format of: [width, height]. - base_tensor (Callable, optional): What to use for the atlas base tensor. Basic - choices are: ``torch.ones`` or ``torch.zeros``. - Default: ``torch.ones`` + coords (list of tuple of int): A list of coordinates to use for the atlas image + tensors. The first 2 values in each coordinate list should be: [x, y, ...]. + grid_size (tuple of int): The number of grid cells to use across the height + and width dimensions. The ``grid_size`` variable should be in the format + of: [width, height]. + base_tensor (callable, optional): What to use for the atlas base tensor. Basic + choices are: :class:`torch.ones` or :class:`torch.zeros`. + Default: :class:`torch.ones` Returns: atlas_canvas (torch.Tensor): The full activation atlas visualization, with a diff --git a/captum/optim/_utils/image/common.py b/captum/optim/_utils/image/common.py index 97ac88c07e..a7ae89f405 100644 --- a/captum/optim/_utils/image/common.py +++ b/captum/optim/_utils/image/common.py @@ -25,7 +25,7 @@ def make_grid_image( Args: - tiles (torch.Tensor or List[torch.Tensor]): A stack of NCHW image tensors or + tiles (torch.Tensor or list of torch.Tensor): A stack of NCHW image tensors or a list of NCHW image tensors to create a grid from. images_per_row (int, optional): The number of rows to use for the grid image. Default: ``4`` @@ -84,9 +84,9 @@ def show( Args: x (torch.Tensor): The tensor you want to display as an image. - figsize (Tuple[int, int], optional): height & width to use - for displaying the image figure. - Default: ``None`` + figsize (tuple of int, optional): The height & width to use for displaying the + ``ImageTensor`` figure, in the format of: (height, width). + Default: ``None`` scale (float, optional): Value to multiply the input tensor by so that it's value range is [0-255] for display. Default: ``255.0`` @@ -346,7 +346,7 @@ def weights_to_heatmap_2d( Args: weight (torch.Tensor): A 2d tensor to create the heatmap from. - colors (List[str], optional): A list of 5 strings containing hex triplet + colors (list of str, optional): A list of 5 strings containing hex triplet (six digit), three-byte hexadecimal color values to use for coloring the heatmap. Default: ``["0571b0", "92c5de", "f7f7f7", "f4a582", "ca0020"]`` diff --git a/captum/optim/_utils/reducer.py b/captum/optim/_utils/reducer.py index 0f39071ff0..aa9a22d920 100644 --- a/captum/optim/_utils/reducer.py +++ b/captum/optim/_utils/reducer.py @@ -44,7 +44,7 @@ class ChannelReducer: n_components (int, optional): The number of channels to reduce the target dimension to. - reduction_alg (str or Callable, optional): The desired dimensionality + reduction_alg (str or callable, optional): The desired dimensionality reduction algorithm to use. The default ``reduction_alg`` is set to NMF from sklearn, which requires users to put inputs on CPU before passing them to :func:`ChannelReducer.fit_transform`. @@ -77,7 +77,7 @@ def _get_reduction_algo_instance(self, name: str) -> Union[None, Callable]: name (str): The name of the reduction_alg to search for. Returns: - reduction_alg (Callable or None): The ``reduction_alg`` if it was found, + reduction_alg (callable or None): The ``reduction_alg`` if it was found, otherwise None. """ if hasattr(sklearn.decomposition, name): @@ -98,7 +98,7 @@ def _apply_flat(cls, func: Callable, x: torch.Tensor) -> torch.Tensor: Args: - func (Callable): The ``reduction_alg`` transform function being used. + func (callable): The ``reduction_alg`` transform function being used. x (torch.Tensor): The tensor being transformed and reduced. Returns: diff --git a/captum/optim/models/_image/inception_v1.py b/captum/optim/models/_image/inception_v1.py index 52818573f9..e0660d6f93 100644 --- a/captum/optim/models/_image/inception_v1.py +++ b/captum/optim/models/_image/inception_v1.py @@ -33,7 +33,7 @@ def googlenet( model_path (str, optional): Optional path for InceptionV1 model file. Default: ``None`` replace_relus_with_redirectedrelu (bool, optional): If ``True``, return - pretrained model with Redirected ReLU in place of ReLU layers. + pretrained model with :class:`.RedirectedReLU` in place of ReLU layers. Default: *``True``* when pretrained is True otherwise *``False``* use_linear_modules_only (bool, optional): If ``True``, return pretrained model with all nonlinear layers replaced with linear equivalents. @@ -101,7 +101,7 @@ def __init__( Args: replace_relus_with_redirectedrelu (bool, optional): If ``True``, return - pretrained model with Redirected ReLU in place of ReLU layers. + pretrained model with :class:`.RedirectedReLU` in place of ReLU layers. Default: ``False`` use_linear_modules_only (bool, optional): If ``True``, return pretrained model with all nonlinear layers replaced with linear equivalents. @@ -307,10 +307,10 @@ def __init__( in the pool branch. activ (type of nn.Module, optional): The nn.Module class type to use for activation layers. - Default: ``nn.ReLU`` + Default: :class:`torch.nn.ReLU` p_layer (type of nn.Module, optional): The nn.Module class type to use for pooling layers. - Default: ``nn.MaxPool2d`` + Default: :class:`torch.nn.MaxPool2d` """ super().__init__() self.conv_1x1 = nn.Conv2d( @@ -410,7 +410,7 @@ def __init__( Default: ``1008`` activ (type of nn.Module, optional): The nn.Module class type to use for activation layers. - Default: ``nn.ReLU`` + Default: :class:`nn.ReLU` """ super().__init__() self.avg_pool = nn.AdaptiveAvgPool2d((4, 4)) From 876d737c82cad6e2d54ab1f6311de76520815d58 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Mon, 18 Jul 2022 17:48:30 -0600 Subject: [PATCH 22/30] :class: -> :func: --- captum/optim/_utils/image/atlas.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/captum/optim/_utils/image/atlas.py b/captum/optim/_utils/image/atlas.py index 45753d0d33..bcb35028df 100644 --- a/captum/optim/_utils/image/atlas.py +++ b/captum/optim/_utils/image/atlas.py @@ -257,8 +257,8 @@ def create_atlas( and width dimensions. The ``grid_size`` variable should be in the format of: [width, height]. base_tensor (callable, optional): What to use for the atlas base tensor. Basic - choices are: :class:`torch.ones` or :class:`torch.zeros`. - Default: :class:`torch.ones` + choices are: :func:`torch.ones` or :func:`torch.zeros`. + Default: :func:`torch.ones` Returns: atlas_canvas (torch.Tensor): The full activation atlas visualization, with a From 8cbca6d3be63e114d01a4a049e5c05c607b953cd Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Tue, 19 Jul 2022 13:55:33 -0600 Subject: [PATCH 23/30] Fix accidental indent --- captum/optim/_utils/image/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/captum/optim/_utils/image/common.py b/captum/optim/_utils/image/common.py index a7ae89f405..40a7f075b5 100644 --- a/captum/optim/_utils/image/common.py +++ b/captum/optim/_utils/image/common.py @@ -86,7 +86,7 @@ def show( x (torch.Tensor): The tensor you want to display as an image. figsize (tuple of int, optional): The height & width to use for displaying the ``ImageTensor`` figure, in the format of: (height, width). - Default: ``None`` + Default: ``None`` scale (float, optional): Value to multiply the input tensor by so that it's value range is [0-255] for display. Default: ``255.0`` From 485481d920baaf2fef31d78055f0b6c0f9663d22 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Wed, 20 Jul 2022 14:09:47 -0600 Subject: [PATCH 24/30] Improve doc types for ActivationFetcher --- captum/optim/_core/output_hook.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/captum/optim/_core/output_hook.py b/captum/optim/_core/output_hook.py index 85438fdfc6..90e91eb66c 100644 --- a/captum/optim/_core/output_hook.py +++ b/captum/optim/_core/output_hook.py @@ -113,7 +113,7 @@ def __call__(self, input_t: TupleOfTensorsOrTensorType) -> ModuleOutputMapping: """ Args: - input_t (tensor or tuple of tensors, optional): The input to use + input_t (torch.Tensor or tuple of torch.Tensor, optional): The input to use with the specified model. Returns: From 0fa87de379d495f519760aeb905215fa45079dd0 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Thu, 21 Jul 2022 11:12:36 -0600 Subject: [PATCH 25/30] Add hyperlink ref to circuits argument --- captum/optim/_utils/circuits.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/captum/optim/_utils/circuits.py b/captum/optim/_utils/circuits.py index 56211aa6a5..ff79254930 100644 --- a/captum/optim/_utils/circuits.py +++ b/captum/optim/_utils/circuits.py @@ -56,7 +56,7 @@ def extract_expanded_weights( Default: ``torch.zeros(1, 3, 224, 224)`` crop_func (callable, optional): Specify a function to crop away the padding from the output weights. - Default: ``center_crop`` + Default: :func:`.center_crop` Returns: tensor (torch.Tensor): A tensor containing the expanded weights in the form From 01c59d242e392d25727e5ad1f6b016e6508a57dd Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Fri, 22 Jul 2022 09:37:27 -0600 Subject: [PATCH 26/30] Improve reducer docs --- captum/optim/_utils/reducer.py | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/captum/optim/_utils/reducer.py b/captum/optim/_utils/reducer.py index aa9a22d920..7733c57a70 100644 --- a/captum/optim/_utils/reducer.py +++ b/captum/optim/_utils/reducer.py @@ -39,18 +39,24 @@ class ChannelReducer: See here for more information: https://distill.pub/2018/building-blocks/ + Some of the possible algorithm choices: + + * https://scikit-learn.org/stable/modules/classes.html#module-sklearn.decomposition + * https://scikit-learn.org/stable/modules/classes.html#module-sklearn.manifold + * https://umap-learn.readthedocs.io/en/latest/ Args: n_components (int, optional): The number of channels to reduce the target dimension to. - reduction_alg (str or callable, optional): The desired dimensionality + reduction_alg (str or Callable, optional): The desired dimensionality reduction algorithm to use. The default ``reduction_alg`` is set to NMF from sklearn, which requires users to put inputs on CPU before passing them - to :func:`ChannelReducer.fit_transform`. + to :func:`ChannelReducer.fit_transform`. Name strings are only supported + for ``sklearn.decomposition`` & ``sklearn.manifold`` class names. Default: ``NMF`` **kwargs (Any, optional): Arbitrary keyword arguments used by the specified - reduction_alg. + ``reduction_alg``. """ def __init__( @@ -77,7 +83,7 @@ def _get_reduction_algo_instance(self, name: str) -> Union[None, Callable]: name (str): The name of the reduction_alg to search for. Returns: - reduction_alg (callable or None): The ``reduction_alg`` if it was found, + reduction_alg (Callable or None): The ``reduction_alg`` if it was found, otherwise None. """ if hasattr(sklearn.decomposition, name): @@ -98,7 +104,7 @@ def _apply_flat(cls, func: Callable, x: torch.Tensor) -> torch.Tensor: Args: - func (callable): The ``reduction_alg`` transform function being used. + func (Callable): The ``reduction_alg`` transform function being used. x (torch.Tensor): The tensor being transformed and reduced. Returns: @@ -186,6 +192,9 @@ def posneg(x: torch.Tensor, dim: int = 0) -> torch.Tensor: Hack that makes a matrix positive by concatination in order to simulate one-sided NMF with regular NMF. + Voss, et al., "Visualizing Weights", Distill, 2021. + See: https://distill.pub/2020/circuits/visualizing-weights/ + Args: x (torch.Tensor): A tensor to make positive. From d45634755d5e2f858182dbc0a7df9b7ea637e347 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Wed, 27 Jul 2022 09:38:42 -0600 Subject: [PATCH 27/30] Fix spelling --- captum/optim/_core/output_hook.py | 2 +- captum/optim/_param/image/transforms.py | 2 +- captum/optim/_utils/image/atlas.py | 2 +- captum/optim/_utils/reducer.py | 6 +++--- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/captum/optim/_core/output_hook.py b/captum/optim/_core/output_hook.py index 90e91eb66c..d7bd8affd4 100644 --- a/captum/optim/_core/output_hook.py +++ b/captum/optim/_core/output_hook.py @@ -117,7 +117,7 @@ def __call__(self, input_t: TupleOfTensorsOrTensorType) -> ModuleOutputMapping: with the specified model. Returns: - activations_dict (ModuleOutputMapping): An dict containing the collected + activations_dict (ModuleOutputMapping): A dict containing the collected activations. The keys for the returned dictionary are the target layers. """ diff --git a/captum/optim/_param/image/transforms.py b/captum/optim/_param/image/transforms.py index 332d700a9f..f8e399026e 100644 --- a/captum/optim/_param/image/transforms.py +++ b/captum/optim/_param/image/transforms.py @@ -1009,7 +1009,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: class GaussianSmoothing(nn.Module): """ Apply gaussian smoothing on a - 1d, 2d or 3d tensor. Filtering is performed seperately for each channel + 1d, 2d or 3d tensor. Filtering is performed separately for each channel in the input using a depthwise convolution. """ diff --git a/captum/optim/_utils/image/atlas.py b/captum/optim/_utils/image/atlas.py index bcb35028df..016b0c67fc 100644 --- a/captum/optim/_utils/image/atlas.py +++ b/captum/optim/_utils/image/atlas.py @@ -276,7 +276,7 @@ def create_atlas( # cell_b -> number of images # cell_c -> image channel - # cell_h -> image hight + # cell_h -> image height # cell_w -> image width cell_b, cell_c, cell_h, cell_w = cells[0].shape atlas_canvas = base_tensor( diff --git a/captum/optim/_utils/reducer.py b/captum/optim/_utils/reducer.py index 7733c57a70..59b8813147 100644 --- a/captum/optim/_utils/reducer.py +++ b/captum/optim/_utils/reducer.py @@ -16,7 +16,7 @@ class ChannelReducer: """ - The ChannelReducer class is a wrapper for PyTorch and NumPy based dimensonality + The ChannelReducer class is a wrapper for PyTorch and NumPy based dimensionality reduction algorithms, like those from ``sklearn.decomposition`` (ex: NMF, PCA), ``sklearn.manifold`` (ex: TSNE), UMAP, and other libraries. This class handles things like reshaping, algorithm search by name (for scikit-learn only), and @@ -189,7 +189,7 @@ def __dir__(self) -> List: def posneg(x: torch.Tensor, dim: int = 0) -> torch.Tensor: """ - Hack that makes a matrix positive by concatination in order to simulate one-sided + Hack that makes a matrix positive by concatenation in order to simulate one-sided NMF with regular NMF. Voss, et al., "Visualizing Weights", Distill, 2021. @@ -198,7 +198,7 @@ def posneg(x: torch.Tensor, dim: int = 0) -> torch.Tensor: Args: x (torch.Tensor): A tensor to make positive. - dim (int, optional): The dimension to concatinate the two tensor halves at. + dim (int, optional): The dimension to concatenate the two tensor halves at. Default: ``0`` Returns: From e7042437f9ed2a2d30546aec2cc6729ce42c3cec Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Wed, 27 Jul 2022 14:30:19 -0600 Subject: [PATCH 28/30] Fix lint error --- captum/optim/_utils/reducer.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/captum/optim/_utils/reducer.py b/captum/optim/_utils/reducer.py index 59b8813147..85f15f7bf3 100644 --- a/captum/optim/_utils/reducer.py +++ b/captum/optim/_utils/reducer.py @@ -41,9 +41,9 @@ class ChannelReducer: Some of the possible algorithm choices: - * https://scikit-learn.org/stable/modules/classes.html#module-sklearn.decomposition - * https://scikit-learn.org/stable/modules/classes.html#module-sklearn.manifold - * https://umap-learn.readthedocs.io/en/latest/ + * https://scikit-learn.org/stable/modules/classes.html#module-sklearn.decomposition + * https://scikit-learn.org/stable/modules/classes.html#module-sklearn.manifold + * https://umap-learn.readthedocs.io/en/latest/ Args: From 330f0090e8f2fdd63d4d8bbc331bdda0bae5557a Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Thu, 28 Jul 2022 12:19:55 -0600 Subject: [PATCH 29/30] Fix docstring types --- captum/optim/_utils/circuits.py | 2 +- captum/optim/_utils/image/atlas.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/captum/optim/_utils/circuits.py b/captum/optim/_utils/circuits.py index ff79254930..d82d049fca 100644 --- a/captum/optim/_utils/circuits.py +++ b/captum/optim/_utils/circuits.py @@ -54,7 +54,7 @@ def extract_expanded_weights( model_input (torch.Tensor or tuple of torch.Tensor, optional): The input to use with the specified model. Default: ``torch.zeros(1, 3, 224, 224)`` - crop_func (callable, optional): Specify a function to crop away the padding + crop_func (Callable, optional): Specify a function to crop away the padding from the output weights. Default: :func:`.center_crop` diff --git a/captum/optim/_utils/image/atlas.py b/captum/optim/_utils/image/atlas.py index 016b0c67fc..3e616fd55c 100644 --- a/captum/optim/_utils/image/atlas.py +++ b/captum/optim/_utils/image/atlas.py @@ -256,7 +256,7 @@ def create_atlas( grid_size (tuple of int): The number of grid cells to use across the height and width dimensions. The ``grid_size`` variable should be in the format of: [width, height]. - base_tensor (callable, optional): What to use for the atlas base tensor. Basic + base_tensor (Callable, optional): What to use for the atlas base tensor. Basic choices are: :func:`torch.ones` or :func:`torch.zeros`. Default: :func:`torch.ones` From 1f0420bc6e9856926f314c586f39d4000e69ed1e Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Thu, 4 Aug 2022 08:40:51 -0600 Subject: [PATCH 30/30] Update transforms.py --- captum/optim/_param/image/transforms.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/captum/optim/_param/image/transforms.py b/captum/optim/_param/image/transforms.py index f8e399026e..e76500050d 100644 --- a/captum/optim/_param/image/transforms.py +++ b/captum/optim/_param/image/transforms.py @@ -93,6 +93,9 @@ class ToRGB(nn.Module): >>> x = torch.randn(1, 3, 224, 224) >>> decorrelated_colors = to_rgb(x, inverse=True) >>> recorrelated_colors = to_rgb(decorrelated_colors) + + .. note:: The ``ToRGB`` transform is included by default inside + :class:`.NaturalImage`. """ @staticmethod