Skip to content

Commit 37555ab

Browse files
vivekmigfacebook-github-bot
authored andcommitted
Adding isort (#310)
Summary: This applies isort to alphabetize import ordering. We also add an isort check to CircleCI to ensure that import ordering is maintained in future PRs. This required adding an isort config that is compatible with black, which was found here: https://github.com/psf/black This also adds a type: ignore for an issue caused by merging Insights type fixes with type hints for common. Pull Request resolved: #310 Reviewed By: orionr Differential Revision: D20206324 Pulled By: vivekmig fbshipit-source-id: 48cce82d60a9a6f7384211e7aae2ffb4c0c85772
1 parent e0402b8 commit 37555ab

File tree

87 files changed

+458
-462
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

87 files changed

+458
-462
lines changed

.circleci/config.yml

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,13 @@ commands:
3838
name: "Lint with black"
3939
command: black --check --diff .
4040

41+
isort:
42+
description: "Check import order with isort"
43+
steps:
44+
- run:
45+
name: "Check import order with isort"
46+
command: isort --check-only
47+
4148
mypy_check:
4249
description: "Static type checking with mypy"
4350
steps:
@@ -132,6 +139,7 @@ jobs:
132139
args: "-n -f"
133140
- lint_flake8
134141
- lint_black
142+
- isort
135143
- mypy_check
136144
- unit_tests
137145
- sphinx
@@ -145,6 +153,7 @@ jobs:
145153
args: "-f"
146154
- lint_flake8
147155
- lint_black
156+
- isort
148157
- mypy_check
149158
- unit_tests
150159
- sphinx
@@ -176,6 +185,7 @@ jobs:
176185
args: "-n"
177186
- lint_flake8
178187
- lint_black
188+
- isort
179189
- mypy_check
180190
- unit_tests
181191
- sphinx
@@ -200,6 +210,7 @@ jobs:
200210
args: "-n -f -d"
201211
- lint_flake8
202212
- lint_black
213+
- isort
203214
- unit_tests
204215
- sphinx
205216
- configure_github_bot

.isort.cfg

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
[settings]
2+
multi_line_output=3
3+
include_trailing_comma=True
4+
force_grid_wrap=0
5+
use_parentheses=True
6+
line_length=88
7+
known_third_party=pytext,torchvision,bs4

CONTRIBUTING.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ pip using `pip install isort`, and run locally by calling
3535
```bash
3636
isort
3737
```
38-
from the repository root. No additional configuration should be needed.
38+
from the repository root. Configuration for isort is located in .isort.cfg.
3939

4040
We feel strongly that having a consistent code style is extremely important, so
4141
CircleCI will fail on your PR if it does not adhere to the black or flake8 formatting style or isort import ordering.

captum/attr/__init__.py

Lines changed: 28 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -1,51 +1,52 @@
11
#!/usr/bin/env python3
22

3-
from ._core.integrated_gradients import IntegratedGradients # noqa
43
from ._core.deep_lift import DeepLift, DeepLiftShap # noqa
5-
from ._core.input_x_gradient import InputXGradient # noqa
6-
from ._core.saliency import Saliency # noqa
7-
from ._core.noise_tunnel import NoiseTunnel # noqa
8-
from ._core.gradient_shap import GradientShap # noqa
9-
from ._core.guided_backprop_deconvnet import GuidedBackprop, Deconvolution # noqa
10-
from ._core.guided_grad_cam import GuidedGradCam # noqa
114
from ._core.feature_ablation import FeatureAblation # noqa
125
from ._core.feature_permutation import FeaturePermutation # noqa
13-
from ._core.occlusion import Occlusion # noqa
14-
from ._core.shapley_value import ShapleyValueSampling, ShapleyValues # noqa
15-
from ._core.layer.layer_conductance import LayerConductance # noqa
16-
from ._core.layer.layer_gradient_x_activation import LayerGradientXActivation # noqa
17-
from ._core.layer.layer_activation import LayerActivation # noqa
18-
from ._core.layer.internal_influence import InternalInfluence # noqa
6+
from ._core.gradient_shap import GradientShap # noqa
7+
from ._core.guided_backprop_deconvnet import Deconvolution # noqa
8+
from ._core.guided_backprop_deconvnet import GuidedBackprop
9+
from ._core.guided_grad_cam import GuidedGradCam # noqa
10+
from ._core.input_x_gradient import InputXGradient # noqa
11+
from ._core.integrated_gradients import IntegratedGradients # noqa
1912
from ._core.layer.grad_cam import LayerGradCam # noqa
20-
from ._core.layer.layer_deep_lift import LayerDeepLift, LayerDeepLiftShap # noqa
13+
from ._core.layer.internal_influence import InternalInfluence # noqa
14+
from ._core.layer.layer_activation import LayerActivation # noqa
15+
from ._core.layer.layer_conductance import LayerConductance # noqa
16+
from ._core.layer.layer_deep_lift import LayerDeepLift # noqa
17+
from ._core.layer.layer_deep_lift import LayerDeepLiftShap
2118
from ._core.layer.layer_gradient_shap import LayerGradientShap # noqa
19+
from ._core.layer.layer_gradient_x_activation import LayerGradientXActivation # noqa
2220
from ._core.layer.layer_integrated_gradients import LayerIntegratedGradients # noqa
23-
from ._core.neuron.neuron_feature_ablation import NeuronFeatureAblation # noqa
2421
from ._core.neuron.neuron_conductance import NeuronConductance # noqa
22+
from ._core.neuron.neuron_deep_lift import NeuronDeepLift # noqa
23+
from ._core.neuron.neuron_deep_lift import NeuronDeepLiftShap
24+
from ._core.neuron.neuron_feature_ablation import NeuronFeatureAblation # noqa
2525
from ._core.neuron.neuron_gradient import NeuronGradient # noqa
26-
from ._core.neuron.neuron_integrated_gradients import NeuronIntegratedGradients # noqa
27-
from ._core.neuron.neuron_deep_lift import NeuronDeepLift, NeuronDeepLiftShap # noqa
2826
from ._core.neuron.neuron_gradient_shap import NeuronGradientShap # noqa
29-
from ._core.neuron.neuron_guided_backprop_deconvnet import (
27+
from ._core.neuron.neuron_guided_backprop_deconvnet import ( # noqa
3028
NeuronDeconvolution,
3129
NeuronGuidedBackprop,
32-
) # noqa
33-
30+
)
31+
from ._core.neuron.neuron_integrated_gradients import NeuronIntegratedGradients # noqa
32+
from ._core.noise_tunnel import NoiseTunnel # noqa
33+
from ._core.occlusion import Occlusion # noqa
34+
from ._core.saliency import Saliency # noqa
35+
from ._core.shapley_value import ShapleyValues, ShapleyValueSampling # noqa
36+
from ._models.base import InterpretableEmbeddingBase # noqa
3437
from ._models.base import (
35-
InterpretableEmbeddingBase,
3638
TokenReferenceBase,
3739
configure_interpretable_embedding_layer,
3840
remove_interpretable_embedding_layer,
39-
) # noqa
41+
)
42+
from ._utils import visualization # noqa
4043
from ._utils.attribution import Attribution # noqa
4144
from ._utils.attribution import GradientAttribution # noqa
42-
from ._utils.attribution import PerturbationAttribution # noqa
4345
from ._utils.attribution import LayerAttribution # noqa
4446
from ._utils.attribution import NeuronAttribution # noqa
45-
from ._utils import visualization # noqa
46-
from ._utils.summarizer import Summarizer, CommonSummarizer
47-
from ._utils.stat import Mean, StdDev, MSE, Var, Min, Max, Sum, Count
48-
47+
from ._utils.attribution import PerturbationAttribution # noqa
48+
from ._utils.stat import MSE, Count, Max, Mean, Min, StdDev, Sum, Var
49+
from ._utils.summarizer import CommonSummarizer, Summarizer
4950

5051
__all__ = [
5152
"Attribution",

captum/attr/_core/deep_lift.py

Lines changed: 16 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,41 +1,40 @@
11
#!/usr/bin/env python3
22
import typing
3-
from typing import Tuple, Union, Any, List, Callable, cast
4-
53
import warnings
4+
from typing import Any, Callable, List, Tuple, Union, cast
5+
6+
import numpy as np
67
import torch
78
import torch.nn as nn
89
import torch.nn.functional as F
910
from torch import Tensor
1011
from torch.nn import Module
1112
from torch.utils.hooks import RemovableHandle
1213

13-
import numpy as np
14-
14+
from .._utils.attribution import GradientAttribution
1515
from .._utils.common import (
16-
_is_tuple,
17-
_format_input,
16+
ExpansionTypes,
17+
_call_custom_attribution_func,
18+
_compute_conv_delta_and_format_attrs,
19+
_expand_additional_forward_args,
20+
_expand_target,
21+
_format_additional_forward_args,
22+
_format_attributions,
1823
_format_baseline,
1924
_format_callable_baseline,
20-
_format_attributions,
25+
_format_input,
2126
_format_tensor_into_tuples,
22-
_format_additional_forward_args,
27+
_is_tuple,
2328
_run_forward,
24-
_validate_input,
25-
_expand_target,
26-
_expand_additional_forward_args,
2729
_tensorize_baseline,
28-
_call_custom_attribution_func,
29-
_compute_conv_delta_and_format_attrs,
30-
ExpansionTypes,
30+
_validate_input,
3131
)
32-
from .._utils.attribution import GradientAttribution
3332
from .._utils.gradient import apply_gradient_requirements, undo_gradient_requirements
3433
from .._utils.typing import (
35-
TensorOrTupleOfTensorsGeneric,
34+
BaselineType,
3635
Literal,
3736
TargetType,
38-
BaselineType,
37+
TensorOrTupleOfTensorsGeneric,
3938
)
4039

4140

captum/attr/_core/feature_ablation.py

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,24 +1,23 @@
11
#!/usr/bin/env python3
22

3-
import torch
3+
from typing import Any, Callable, Tuple, Union, cast
44

5+
import torch
56
from torch import Tensor, dtype
67

7-
from typing import Any, Callable, Tuple, Union, cast
8-
8+
from .._utils.attribution import PerturbationAttribution
99
from .._utils.common import (
10+
_expand_additional_forward_args,
11+
_expand_target,
1012
_find_output_mode_and_verify,
13+
_format_additional_forward_args,
1114
_format_attributions,
1215
_format_input,
1316
_format_input_baseline,
1417
_is_tuple,
1518
_run_forward,
16-
_expand_additional_forward_args,
17-
_expand_target,
18-
_format_additional_forward_args,
1919
)
20-
from .._utils.attribution import PerturbationAttribution
21-
from .._utils.typing import TensorOrTupleOfTensorsGeneric, TargetType, BaselineType
20+
from .._utils.typing import BaselineType, TargetType, TensorOrTupleOfTensorsGeneric
2221

2322

2423
class FeatureAblation(PerturbationAttribution):

captum/attr/_core/feature_permutation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,8 @@
44
import torch
55
from torch import Tensor
66

7-
from .feature_ablation import FeatureAblation
87
from .._utils.typing import TargetType, TensorOrTupleOfTensorsGeneric
8+
from .feature_ablation import FeatureAblation
99

1010

1111
def _permute_feature(x: Tensor, feature_mask: Tensor) -> Tensor:

captum/attr/_core/gradient_shap.py

Lines changed: 10 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,26 +1,25 @@
11
#!/usr/bin/env python3
2-
import torch
2+
import typing
3+
from typing import Any, Callable, Tuple, Union
34

45
import numpy as np
6+
import torch
57

68
from .._utils.attribution import GradientAttribution
79
from .._utils.common import (
8-
_is_tuple,
9-
_format_input_baseline,
10-
_format_callable_baseline,
1110
_compute_conv_delta_and_format_attrs,
11+
_format_callable_baseline,
12+
_format_input_baseline,
13+
_is_tuple,
1214
)
13-
14-
from .noise_tunnel import NoiseTunnel
15-
from typing import Any, Callable, Tuple, Union
1615
from .._utils.typing import (
17-
Tensor,
18-
TensorOrTupleOfTensorsGeneric,
16+
BaselineType,
1917
Literal,
2018
TargetType,
21-
BaselineType,
19+
Tensor,
20+
TensorOrTupleOfTensorsGeneric,
2221
)
23-
import typing
22+
from .noise_tunnel import NoiseTunnel
2423

2524

2625
class GradientShap(GradientAttribution):

captum/attr/_core/guided_backprop_deconvnet.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,15 @@
11
#!/usr/bin/env python3
22
import warnings
3+
from typing import Any, List, Tuple, Union
4+
35
import torch
46
import torch.nn.functional as F
5-
from typing import Any, List, Union, Tuple
6-
77
from torch import Tensor
88
from torch.nn import Module
99
from torch.utils.hooks import RemovableHandle
1010

1111
from .._utils.attribution import GradientAttribution
12-
from .._utils.common import _format_input, _format_attributions, _is_tuple
12+
from .._utils.common import _format_attributions, _format_input, _is_tuple
1313
from .._utils.gradient import apply_gradient_requirements, undo_gradient_requirements
1414
from .._utils.typing import TargetType, TensorOrTupleOfTensorsGeneric
1515

captum/attr/_core/guided_grad_cam.py

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,16 @@
11
#!/usr/bin/env python3
2-
import torch
32
import warnings
3+
from typing import Any, List, Union
44

5-
from .._utils.attribution import GradientAttribution, LayerAttribution
6-
from .._utils.common import _format_input, _format_attributions, _is_tuple
7-
8-
from .layer.grad_cam import LayerGradCam
9-
from .guided_backprop_deconvnet import GuidedBackprop
10-
11-
from torch.nn import Module
5+
import torch
126
from torch import Tensor
13-
from typing import Any, List, Union
7+
from torch.nn import Module
8+
9+
from .._utils.attribution import GradientAttribution, LayerAttribution
10+
from .._utils.common import _format_attributions, _format_input, _is_tuple
1411
from .._utils.typing import TargetType, TensorOrTupleOfTensorsGeneric
12+
from .guided_backprop_deconvnet import GuidedBackprop
13+
from .layer.grad_cam import LayerGradCam
1514

1615

1716
class GuidedGradCam(GradientAttribution):

0 commit comments

Comments
 (0)