From fd73fb9ef7596f416690daf6c31dd3ae7ca7cda4 Mon Sep 17 00:00:00 2001 From: Gregory Comer Date: Thu, 24 Jul 2025 21:06:04 -0700 Subject: [PATCH 1/5] Update [ghstack-poisoned] --- backends/test/suite/operators/test_conv1d.py | 89 +++++++++++++++++ backends/test/suite/operators/test_conv2d.py | 96 +++++++++++++++++++ backends/test/suite/operators/test_conv3d.py | 95 ++++++++++++++++++ .../suite/operators/test_convtranspose1d.py | 89 +++++++++++++++++ .../suite/operators/test_convtranspose2d.py | 96 +++++++++++++++++++ .../suite/operators/test_convtranspose3d.py | 95 ++++++++++++++++++ 6 files changed, 560 insertions(+) create mode 100644 backends/test/suite/operators/test_conv1d.py create mode 100644 backends/test/suite/operators/test_conv2d.py create mode 100644 backends/test/suite/operators/test_conv3d.py create mode 100644 backends/test/suite/operators/test_convtranspose1d.py create mode 100644 backends/test/suite/operators/test_convtranspose2d.py create mode 100644 backends/test/suite/operators/test_convtranspose3d.py diff --git a/backends/test/suite/operators/test_conv1d.py b/backends/test/suite/operators/test_conv1d.py new file mode 100644 index 00000000000..1efd7685c18 --- /dev/null +++ b/backends/test/suite/operators/test_conv1d.py @@ -0,0 +1,89 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class Model(torch.nn.Module): + def __init__( + self, + in_channels=3, + out_channels=6, + kernel_size=3, + stride=1, + padding=0, + dilation=1, + groups=1, + bias=True, + padding_mode="zeros", + ): + super().__init__() + self.conv = torch.nn.Conv1d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias=bias, + padding_mode=padding_mode, + ) + + def forward(self, x): + return self.conv(x) + +@operator_test +class TestConv1d(OperatorTest): + @dtype_test + def test_conv1d_dtype(self, dtype, tester_factory: Callable) -> None: + # Input shape: (batch_size, in_channels, length) + self._test_op(Model().to(dtype), ((torch.rand(2, 3, 10) * 10).to(dtype),), tester_factory) + + def test_conv1d_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + self._test_op(Model(), (torch.randn(2, 3, 10),), tester_factory) + + def test_conv1d_kernel_size(self, tester_factory: Callable) -> None: + # Test with different kernel sizes + self._test_op(Model(kernel_size=1), (torch.randn(2, 3, 10),), tester_factory) + self._test_op(Model(kernel_size=5), (torch.randn(2, 3, 10),), tester_factory) + + def test_conv1d_stride(self, tester_factory: Callable) -> None: + # Test with different stride values + self._test_op(Model(stride=2), (torch.randn(2, 3, 10),), tester_factory) + + def test_conv1d_padding(self, tester_factory: Callable) -> None: + # Test with different padding values + self._test_op(Model(padding=1), (torch.randn(2, 3, 10),), tester_factory) + self._test_op(Model(padding=2), (torch.randn(2, 3, 10),), tester_factory) + + def test_conv1d_dilation(self, tester_factory: Callable) -> None: + # Test with different dilation values + self._test_op(Model(dilation=2), (torch.randn(2, 3, 10),), tester_factory) + + def test_conv1d_groups(self, tester_factory: Callable) -> None: + # Test with groups=3 (in_channels must be divisible by groups) + self._test_op(Model(in_channels=6, out_channels=6, groups=3), (torch.randn(2, 6, 10),), tester_factory) + + def test_conv1d_no_bias(self, tester_factory: Callable) -> None: + # Test without bias + self._test_op(Model(bias=False), (torch.randn(2, 3, 10),), tester_factory) + + def test_conv1d_padding_modes(self, tester_factory: Callable) -> None: + # Test different padding modes + for mode in ["zeros", "reflect", "replicate", "circular"]: + self._test_op(Model(padding=1, padding_mode=mode), (torch.randn(2, 3, 10),), tester_factory) + + def test_conv1d_channels(self, tester_factory: Callable) -> None: + # Test with different channel configurations + self._test_op(Model(in_channels=1, out_channels=1), (torch.randn(2, 1, 10),), tester_factory) + self._test_op(Model(in_channels=5, out_channels=10), (torch.randn(2, 5, 10),), tester_factory) diff --git a/backends/test/suite/operators/test_conv2d.py b/backends/test/suite/operators/test_conv2d.py new file mode 100644 index 00000000000..40b3b9dc24b --- /dev/null +++ b/backends/test/suite/operators/test_conv2d.py @@ -0,0 +1,96 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, Union, Tuple + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class Model(torch.nn.Module): + def __init__( + self, + in_channels=3, + out_channels=6, + kernel_size=3, + stride=1, + padding=0, + dilation=1, + groups=1, + bias=True, + padding_mode="zeros", + ): + super().__init__() + self.conv = torch.nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias=bias, + padding_mode=padding_mode, + ) + + def forward(self, x): + return self.conv(x) + +@operator_test +class TestConv2d(OperatorTest): + @dtype_test + def test_conv2d_dtype(self, dtype, tester_factory: Callable) -> None: + # Input shape: (batch_size, in_channels, height, width) + self._test_op(Model().to(dtype), ((torch.rand(2, 3, 8, 8) * 10).to(dtype),), tester_factory) + + def test_conv2d_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + self._test_op(Model(), (torch.randn(2, 3, 8, 8),), tester_factory) + + def test_conv2d_kernel_size(self, tester_factory: Callable) -> None: + # Test with different kernel sizes + self._test_op(Model(kernel_size=1), (torch.randn(2, 3, 8, 8),), tester_factory) + self._test_op(Model(kernel_size=5), (torch.randn(2, 3, 8, 8),), tester_factory) + self._test_op(Model(kernel_size=(3, 5)), (torch.randn(2, 3, 8, 8),), tester_factory) + + def test_conv2d_stride(self, tester_factory: Callable) -> None: + # Test with different stride values + self._test_op(Model(stride=2), (torch.randn(2, 3, 8, 8),), tester_factory) + self._test_op(Model(stride=(2, 1)), (torch.randn(2, 3, 8, 8),), tester_factory) + + def test_conv2d_padding(self, tester_factory: Callable) -> None: + # Test with different padding values + self._test_op(Model(padding=1), (torch.randn(2, 3, 8, 8),), tester_factory) + self._test_op(Model(padding=(1, 2)), (torch.randn(2, 3, 8, 8),), tester_factory) + + def test_conv2d_dilation(self, tester_factory: Callable) -> None: + # Test with different dilation values + self._test_op(Model(dilation=2), (torch.randn(2, 3, 8, 8),), tester_factory) + self._test_op(Model(dilation=(2, 1)), (torch.randn(2, 3, 8, 8),), tester_factory) + + def test_conv2d_groups(self, tester_factory: Callable) -> None: + # Test with groups=3 (in_channels must be divisible by groups) + self._test_op(Model(in_channels=6, out_channels=6, groups=3), (torch.randn(2, 6, 8, 8),), tester_factory) + + def test_conv2d_no_bias(self, tester_factory: Callable) -> None: + # Test without bias + self._test_op(Model(bias=False), (torch.randn(2, 3, 8, 8),), tester_factory) + + def test_conv2d_padding_modes(self, tester_factory: Callable) -> None: + # Test different padding modes + for mode in ["zeros", "reflect", "replicate", "circular"]: + self._test_op(Model(padding=1, padding_mode=mode), (torch.randn(2, 3, 8, 8),), tester_factory) + + def test_conv2d_channels(self, tester_factory: Callable) -> None: + # Test with different channel configurations + self._test_op(Model(in_channels=1, out_channels=1), (torch.randn(2, 1, 8, 8),), tester_factory) + self._test_op(Model(in_channels=5, out_channels=10), (torch.randn(2, 5, 8, 8),), tester_factory) + + def test_conv2d_different_spatial_dims(self, tester_factory: Callable) -> None: + # Test with different height and width + self._test_op(Model(), (torch.randn(2, 3, 10, 8),), tester_factory) diff --git a/backends/test/suite/operators/test_conv3d.py b/backends/test/suite/operators/test_conv3d.py new file mode 100644 index 00000000000..baade4df10e --- /dev/null +++ b/backends/test/suite/operators/test_conv3d.py @@ -0,0 +1,95 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, Union, Tuple + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class Model(torch.nn.Module): + def __init__( + self, + in_channels=3, + out_channels=6, + kernel_size=3, + stride=1, + padding=0, + dilation=1, + groups=1, + bias=True, + padding_mode="zeros", + ): + super().__init__() + self.conv = torch.nn.Conv3d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias=bias, + padding_mode=padding_mode, + ) + + def forward(self, x): + return self.conv(x) + +@operator_test +class TestConv3d(OperatorTest): + @dtype_test + def test_conv3d_dtype(self, dtype, tester_factory: Callable) -> None: + # Input shape: (batch_size, in_channels, depth, height, width) + self._test_op(Model().to(dtype), ((torch.rand(2, 3, 4, 4, 4) * 10).to(dtype),), tester_factory) + + def test_conv3d_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + self._test_op(Model(), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + + def test_conv3d_kernel_size(self, tester_factory: Callable) -> None: + # Test with different kernel sizes + self._test_op(Model(kernel_size=1), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + self._test_op(Model(kernel_size=(1, 3, 3)), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + + def test_conv3d_stride(self, tester_factory: Callable) -> None: + # Test with different stride values + self._test_op(Model(stride=2), (torch.randn(2, 3, 6, 6, 6),), tester_factory) + self._test_op(Model(stride=(1, 2, 2)), (torch.randn(2, 3, 4, 6, 6),), tester_factory) + + def test_conv3d_padding(self, tester_factory: Callable) -> None: + # Test with different padding values + self._test_op(Model(padding=1), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + self._test_op(Model(padding=(0, 1, 1)), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + + def test_conv3d_dilation(self, tester_factory: Callable) -> None: + # Test with different dilation values + self._test_op(Model(dilation=2), (torch.randn(2, 3, 6, 6, 6),), tester_factory) + self._test_op(Model(dilation=(1, 2, 2)), (torch.randn(2, 3, 4, 6, 6),), tester_factory) + + def test_conv3d_groups(self, tester_factory: Callable) -> None: + # Test with groups=3 (in_channels must be divisible by groups) + self._test_op(Model(in_channels=6, out_channels=6, groups=3), (torch.randn(2, 6, 4, 4, 4),), tester_factory) + + def test_conv3d_no_bias(self, tester_factory: Callable) -> None: + # Test without bias + self._test_op(Model(bias=False), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + + def test_conv3d_padding_modes(self, tester_factory: Callable) -> None: + # Test different padding modes + for mode in ["zeros", "reflect", "replicate", "circular"]: + self._test_op(Model(padding=1, padding_mode=mode), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + + def test_conv3d_channels(self, tester_factory: Callable) -> None: + # Test with different channel configurations + self._test_op(Model(in_channels=1, out_channels=1), (torch.randn(2, 1, 4, 4, 4),), tester_factory) + self._test_op(Model(in_channels=5, out_channels=10), (torch.randn(2, 5, 4, 4, 4),), tester_factory) + + def test_conv3d_different_spatial_dims(self, tester_factory: Callable) -> None: + # Test with different depth, height, and width + self._test_op(Model(), (torch.randn(2, 3, 3, 4, 5),), tester_factory) diff --git a/backends/test/suite/operators/test_convtranspose1d.py b/backends/test/suite/operators/test_convtranspose1d.py new file mode 100644 index 00000000000..d93e542de4a --- /dev/null +++ b/backends/test/suite/operators/test_convtranspose1d.py @@ -0,0 +1,89 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, Union, Tuple + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class Model(torch.nn.Module): + def __init__( + self, + in_channels=3, + out_channels=6, + kernel_size=3, + stride=1, + padding=0, + output_padding=0, + dilation=1, + groups=1, + bias=True, + ): + super().__init__() + self.conv_transpose = torch.nn.ConvTranspose1d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + output_padding=output_padding, + dilation=dilation, + groups=groups, + bias=bias, + ) + + def forward(self, x): + return self.conv_transpose(x) + +@operator_test +class TestConvTranspose1d(OperatorTest): + @dtype_test + def test_convtranspose1d_dtype(self, dtype, tester_factory: Callable) -> None: + # Input shape: (batch_size, in_channels, length) + self._test_op(Model().to(dtype), ((torch.rand(2, 3, 10) * 10).to(dtype),), tester_factory) + + def test_convtranspose1d_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + self._test_op(Model(), (torch.randn(2, 3, 10),), tester_factory) + + def test_convtranspose1d_kernel_size(self, tester_factory: Callable) -> None: + # Test with different kernel sizes + self._test_op(Model(kernel_size=1), (torch.randn(2, 3, 10),), tester_factory) + self._test_op(Model(kernel_size=5), (torch.randn(2, 3, 10),), tester_factory) + + def test_convtranspose1d_stride(self, tester_factory: Callable) -> None: + # Test with different stride values + self._test_op(Model(stride=2), (torch.randn(2, 3, 10),), tester_factory) + + def test_convtranspose1d_padding(self, tester_factory: Callable) -> None: + # Test with different padding values + self._test_op(Model(padding=1), (torch.randn(2, 3, 10),), tester_factory) + self._test_op(Model(padding=2), (torch.randn(2, 3, 10),), tester_factory) + + def test_convtranspose1d_output_padding(self, tester_factory: Callable) -> None: + # Test with different output_padding values (requires stride > 1) + self._test_op(Model(stride=2, output_padding=1), (torch.randn(2, 3, 10),), tester_factory) + + def test_convtranspose1d_dilation(self, tester_factory: Callable) -> None: + # Test with different dilation values + self._test_op(Model(dilation=2), (torch.randn(2, 3, 10),), tester_factory) + + def test_convtranspose1d_groups(self, tester_factory: Callable) -> None: + # Test with groups=3 (in_channels and out_channels must be divisible by groups) + self._test_op(Model(in_channels=6, out_channels=6, groups=3), (torch.randn(2, 6, 10),), tester_factory) + + def test_convtranspose1d_no_bias(self, tester_factory: Callable) -> None: + # Test without bias + self._test_op(Model(bias=False), (torch.randn(2, 3, 10),), tester_factory) + + def test_convtranspose1d_channels(self, tester_factory: Callable) -> None: + # Test with different channel configurations + self._test_op(Model(in_channels=1, out_channels=1), (torch.randn(2, 1, 10),), tester_factory) + self._test_op(Model(in_channels=5, out_channels=10), (torch.randn(2, 5, 10),), tester_factory) + \ No newline at end of file diff --git a/backends/test/suite/operators/test_convtranspose2d.py b/backends/test/suite/operators/test_convtranspose2d.py new file mode 100644 index 00000000000..b5a4dfb784c --- /dev/null +++ b/backends/test/suite/operators/test_convtranspose2d.py @@ -0,0 +1,96 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, Union, Tuple + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class Model(torch.nn.Module): + def __init__( + self, + in_channels=3, + out_channels=6, + kernel_size=3, + stride=1, + padding=0, + output_padding=0, + dilation=1, + groups=1, + bias=True, + ): + super().__init__() + self.conv_transpose = torch.nn.ConvTranspose2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + output_padding=output_padding, + dilation=dilation, + groups=groups, + bias=bias, + ) + + def forward(self, x): + return self.conv_transpose(x) + +@operator_test +class TestConvTranspose2d(OperatorTest): + @dtype_test + def test_convtranspose2d_dtype(self, dtype, tester_factory: Callable) -> None: + # Input shape: (batch_size, in_channels, height, width) + self._test_op(Model().to(dtype), ((torch.rand(2, 3, 8, 8) * 10).to(dtype),), tester_factory) + + def test_convtranspose2d_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + self._test_op(Model(), (torch.randn(2, 3, 8, 8),), tester_factory) + + def test_convtranspose2d_kernel_size(self, tester_factory: Callable) -> None: + # Test with different kernel sizes + self._test_op(Model(kernel_size=1), (torch.randn(2, 3, 8, 8),), tester_factory) + self._test_op(Model(kernel_size=5), (torch.randn(2, 3, 8, 8),), tester_factory) + self._test_op(Model(kernel_size=(3, 5)), (torch.randn(2, 3, 8, 8),), tester_factory) + + def test_convtranspose2d_stride(self, tester_factory: Callable) -> None: + # Test with different stride values + self._test_op(Model(stride=2), (torch.randn(2, 3, 8, 8),), tester_factory) + self._test_op(Model(stride=(2, 1)), (torch.randn(2, 3, 8, 8),), tester_factory) + + def test_convtranspose2d_padding(self, tester_factory: Callable) -> None: + # Test with different padding values + self._test_op(Model(padding=1), (torch.randn(2, 3, 8, 8),), tester_factory) + self._test_op(Model(padding=(1, 2)), (torch.randn(2, 3, 8, 8),), tester_factory) + + def test_convtranspose2d_output_padding(self, tester_factory: Callable) -> None: + # Test with different output_padding values (requires stride > 1) + self._test_op(Model(stride=2, output_padding=1), (torch.randn(2, 3, 8, 8),), tester_factory) + self._test_op(Model(stride=(2, 2), output_padding=(1, 0)), (torch.randn(2, 3, 8, 8),), tester_factory) + + def test_convtranspose2d_dilation(self, tester_factory: Callable) -> None: + # Test with different dilation values + self._test_op(Model(dilation=2), (torch.randn(2, 3, 8, 8),), tester_factory) + self._test_op(Model(dilation=(2, 1)), (torch.randn(2, 3, 8, 8),), tester_factory) + + def test_convtranspose2d_groups(self, tester_factory: Callable) -> None: + # Test with groups=3 (in_channels and out_channels must be divisible by groups) + self._test_op(Model(in_channels=6, out_channels=6, groups=3), (torch.randn(2, 6, 8, 8),), tester_factory) + + def test_convtranspose2d_no_bias(self, tester_factory: Callable) -> None: + # Test without bias + self._test_op(Model(bias=False), (torch.randn(2, 3, 8, 8),), tester_factory) + + def test_convtranspose2d_channels(self, tester_factory: Callable) -> None: + # Test with different channel configurations + self._test_op(Model(in_channels=1, out_channels=1), (torch.randn(2, 1, 8, 8),), tester_factory) + self._test_op(Model(in_channels=5, out_channels=10), (torch.randn(2, 5, 8, 8),), tester_factory) + + def test_convtranspose2d_different_spatial_dims(self, tester_factory: Callable) -> None: + # Test with different height and width + self._test_op(Model(), (torch.randn(2, 3, 10, 8),), tester_factory) diff --git a/backends/test/suite/operators/test_convtranspose3d.py b/backends/test/suite/operators/test_convtranspose3d.py new file mode 100644 index 00000000000..00612725016 --- /dev/null +++ b/backends/test/suite/operators/test_convtranspose3d.py @@ -0,0 +1,95 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, Union, Tuple + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class Model(torch.nn.Module): + def __init__( + self, + in_channels=3, + out_channels=6, + kernel_size=3, + stride=1, + padding=0, + output_padding=0, + dilation=1, + groups=1, + bias=True, + ): + super().__init__() + self.conv_transpose = torch.nn.ConvTranspose3d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + output_padding=output_padding, + dilation=dilation, + groups=groups, + bias=bias, + ) + + def forward(self, x): + return self.conv_transpose(x) + +@operator_test +class TestConvTranspose3d(OperatorTest): + @dtype_test + def test_convtranspose3d_dtype(self, dtype, tester_factory: Callable) -> None: + # Input shape: (batch_size, in_channels, depth, height, width) + self._test_op(Model().to(dtype), ((torch.rand(2, 3, 4, 4, 4) * 10).to(dtype),), tester_factory) + + def test_convtranspose3d_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + self._test_op(Model(), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + + def test_convtranspose3d_kernel_size(self, tester_factory: Callable) -> None: + # Test with different kernel sizes + self._test_op(Model(kernel_size=1), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + self._test_op(Model(kernel_size=(1, 3, 3)), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + + def test_convtranspose3d_stride(self, tester_factory: Callable) -> None: + # Test with different stride values + self._test_op(Model(stride=2), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + self._test_op(Model(stride=(1, 2, 2)), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + + def test_convtranspose3d_padding(self, tester_factory: Callable) -> None: + # Test with different padding values + self._test_op(Model(padding=1), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + self._test_op(Model(padding=(0, 1, 1)), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + + def test_convtranspose3d_output_padding(self, tester_factory: Callable) -> None: + # Test with different output_padding values (requires stride > 1) + self._test_op(Model(stride=2, output_padding=1), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + self._test_op(Model(stride=(2, 2, 2), output_padding=(1, 0, 1)), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + + def test_convtranspose3d_dilation(self, tester_factory: Callable) -> None: + # Test with different dilation values + self._test_op(Model(dilation=2), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + self._test_op(Model(dilation=(1, 2, 2)), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + + def test_convtranspose3d_groups(self, tester_factory: Callable) -> None: + # Test with groups=3 (in_channels and out_channels must be divisible by groups) + self._test_op(Model(in_channels=6, out_channels=6, groups=3), (torch.randn(2, 6, 4, 4, 4),), tester_factory) + + def test_convtranspose3d_no_bias(self, tester_factory: Callable) -> None: + # Test without bias + self._test_op(Model(bias=False), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + + def test_convtranspose3d_channels(self, tester_factory: Callable) -> None: + # Test with different channel configurations + self._test_op(Model(in_channels=1, out_channels=1), (torch.randn(2, 1, 4, 4, 4),), tester_factory) + self._test_op(Model(in_channels=5, out_channels=10), (torch.randn(2, 5, 4, 4, 4),), tester_factory) + + def test_convtranspose3d_different_spatial_dims(self, tester_factory: Callable) -> None: + # Test with different depth, height, and width + self._test_op(Model(), (torch.randn(2, 3, 3, 4, 5),), tester_factory) From 2de680e2f76771845ae9073e67579e71c4a3eff7 Mon Sep 17 00:00:00 2001 From: Gregory Comer Date: Thu, 24 Jul 2025 21:06:08 -0700 Subject: [PATCH 2/5] Update [ghstack-poisoned] --- backends/test/suite/operators/test_linear.py | 77 ++++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 backends/test/suite/operators/test_linear.py diff --git a/backends/test/suite/operators/test_linear.py b/backends/test/suite/operators/test_linear.py new file mode 100644 index 00000000000..ca0f428884a --- /dev/null +++ b/backends/test/suite/operators/test_linear.py @@ -0,0 +1,77 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class Model(torch.nn.Module): + def __init__( + self, + in_features=10, + out_features=5, + bias=True, + ): + super().__init__() + self.linear = torch.nn.Linear( + in_features=in_features, + out_features=out_features, + bias=bias, + ) + + def forward(self, x): + return self.linear(x) + +@operator_test +class TestLinear(OperatorTest): + @dtype_test + def test_linear_dtype(self, dtype, tester_factory: Callable) -> None: + # Input shape: (batch_size, in_features) + model = Model().to(dtype) + self._test_op(model, ((torch.rand(2, 10) * 10).to(dtype),), tester_factory) + + @dtype_test + def test_linear_no_bias_dtype(self, dtype, tester_factory: Callable) -> None: + # Input shape: (batch_size, in_features) + model = Model(bias=False).to(dtype) + self._test_op(model, ((torch.rand(2, 10) * 10).to(dtype),), tester_factory) + + def test_linear_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + self._test_op(Model(), (torch.randn(2, 10),), tester_factory) + + def test_linear_feature_sizes(self, tester_factory: Callable) -> None: + # Test with different input and output feature sizes + self._test_op(Model(in_features=5, out_features=3), (torch.randn(2, 5),), tester_factory) + self._test_op(Model(in_features=20, out_features=10), (torch.randn(2, 20),), tester_factory) + self._test_op(Model(in_features=100, out_features=1), (torch.randn(2, 100),), tester_factory) + self._test_op(Model(in_features=1, out_features=100), (torch.randn(2, 1),), tester_factory) + + def test_linear_no_bias(self, tester_factory: Callable) -> None: + # Test without bias + self._test_op(Model(bias=False), (torch.randn(2, 10),), tester_factory) + self._test_op(Model(in_features=20, out_features=15, bias=False), (torch.randn(2, 20),), tester_factory) + + def test_linear_batch_sizes(self, tester_factory: Callable) -> None: + # Test with different batch sizes + self._test_op(Model(), (torch.randn(1, 10),), tester_factory) + self._test_op(Model(), (torch.randn(5, 10),), tester_factory) + self._test_op(Model(), (torch.randn(100, 10),), tester_factory) + + def test_linear_unbatched(self, tester_factory: Callable) -> None: + # Test with unbatched input (just features) + self._test_op(Model(), (torch.randn(10),), tester_factory) + + def test_linear_multi_dim_input(self, tester_factory: Callable) -> None: + # Test with multi-dimensional input + # For multi-dimensional inputs, the linear transformation is applied to the last dimension + self._test_op(Model(), (torch.randn(3, 4, 10),), tester_factory) + self._test_op(Model(), (torch.randn(2, 3, 4, 10),), tester_factory) + \ No newline at end of file From 8b1136665b56752551bf3b83c98ae0988f257d70 Mon Sep 17 00:00:00 2001 From: Gregory Comer Date: Thu, 24 Jul 2025 21:06:13 -0700 Subject: [PATCH 3/5] Update [ghstack-poisoned] --- .../test/suite/operators/test_embedding.py | 68 ++++++++++++ .../suite/operators/test_embedding_bag.py | 100 ++++++++++++++++++ 2 files changed, 168 insertions(+) create mode 100644 backends/test/suite/operators/test_embedding.py create mode 100644 backends/test/suite/operators/test_embedding_bag.py diff --git a/backends/test/suite/operators/test_embedding.py b/backends/test/suite/operators/test_embedding.py new file mode 100644 index 00000000000..102881981db --- /dev/null +++ b/backends/test/suite/operators/test_embedding.py @@ -0,0 +1,68 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, Optional + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class Model(torch.nn.Module): + def __init__( + self, + num_embeddings=10, + embedding_dim=5, + padding_idx: Optional[int] = None, + norm_type: float = 2.0, + ): + super().__init__() + self.embedding = torch.nn.Embedding( + num_embeddings=num_embeddings, + embedding_dim=embedding_dim, + padding_idx=padding_idx, + norm_type=norm_type, + ) + + def forward(self, x): + return self.embedding(x) + +@operator_test +class TestEmbedding(OperatorTest): + @dtype_test + def test_embedding_dtype(self, dtype, tester_factory: Callable) -> None: + # Input shape: (batch_size, seq_length) + # Note: Input indices should be of type Long (int64) + model = Model().to(dtype) + self._test_op(model, (torch.randint(0, 10, (2, 8), dtype=torch.long),), tester_factory, use_random_test_inputs=False) + + def test_embedding_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + self._test_op(Model(), (torch.randint(0, 10, (2, 8), dtype=torch.long),), tester_factory, use_random_test_inputs=False) + + def test_embedding_sizes(self, tester_factory: Callable) -> None: + # Test with different dictionary sizes and embedding dimensions + self._test_op(Model(num_embeddings=5, embedding_dim=3), + (torch.randint(0, 5, (2, 8), dtype=torch.long),), tester_factory, use_random_test_inputs=False) + self._test_op(Model(num_embeddings=100, embedding_dim=10), + (torch.randint(0, 100, (2, 8), dtype=torch.long),), tester_factory, use_random_test_inputs=False) + self._test_op(Model(num_embeddings=1000, embedding_dim=50), + (torch.randint(0, 1000, (2, 4), dtype=torch.long),), tester_factory, use_random_test_inputs=False) + + def test_embedding_padding_idx(self, tester_factory: Callable) -> None: + # Test with padding_idx + self._test_op(Model(padding_idx=0), + (torch.randint(0, 10, (2, 8), dtype=torch.long),), tester_factory, use_random_test_inputs=False) + self._test_op(Model(padding_idx=5), + (torch.randint(0, 10, (2, 8), dtype=torch.long),), tester_factory, use_random_test_inputs=False) + + def test_embedding_input_shapes(self, tester_factory: Callable) -> None: + # Test with different input shapes + self._test_op(Model(), (torch.randint(0, 10, (5,), dtype=torch.long),), tester_factory, use_random_test_inputs=False) # 1D input + self._test_op(Model(), (torch.randint(0, 10, (2, 8), dtype=torch.long),), tester_factory, use_random_test_inputs=False) # 2D input + self._test_op(Model(), (torch.randint(0, 10, (2, 3, 4), dtype=torch.long),), tester_factory, use_random_test_inputs=False) # 3D input + \ No newline at end of file diff --git a/backends/test/suite/operators/test_embedding_bag.py b/backends/test/suite/operators/test_embedding_bag.py new file mode 100644 index 00000000000..67ba8a2849f --- /dev/null +++ b/backends/test/suite/operators/test_embedding_bag.py @@ -0,0 +1,100 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, Optional + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class Model(torch.nn.Module): + def __init__( + self, + num_embeddings=10, + embedding_dim=5, + mode='mean', + padding_idx: Optional[int] = None, + norm_type: float = 2.0, + include_last_offset: bool = False, + ): + super().__init__() + self.embedding_bag = torch.nn.EmbeddingBag( + num_embeddings=num_embeddings, + embedding_dim=embedding_dim, + mode=mode, + padding_idx=padding_idx, + norm_type=norm_type, + include_last_offset=include_last_offset, + ) + + def forward(self, x, offsets=None): + return self.embedding_bag(x, offsets) + +@operator_test +class TestEmbeddingBag(OperatorTest): + @dtype_test + def test_embedding_bag_dtype(self, dtype, tester_factory: Callable) -> None: + # Input: indices and offsets + # Note: Input indices should be of type Long (int64) + model = Model().to(dtype) + indices = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9], dtype=torch.long) + offsets = torch.tensor([0, 4], dtype=torch.long) # 2 bags + self._test_op(model, (indices, offsets), tester_factory, use_random_test_inputs=False) + + def test_embedding_bag_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + indices = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9], dtype=torch.long) + offsets = torch.tensor([0, 4], dtype=torch.long) # 2 bags + self._test_op(Model(), (indices, offsets), tester_factory, use_random_test_inputs=False) + + def test_embedding_bag_sizes(self, tester_factory: Callable) -> None: + # Test with different dictionary sizes and embedding dimensions + indices = torch.tensor([1, 2, 3, 1], dtype=torch.long) + offsets = torch.tensor([0, 2], dtype=torch.long) + + self._test_op(Model(num_embeddings=5, embedding_dim=3), + (indices, offsets), tester_factory, use_random_test_inputs=False) + + indices = torch.tensor([5, 20, 10, 43, 7], dtype=torch.long) + offsets = torch.tensor([0, 2, 4], dtype=torch.long) + self._test_op(Model(num_embeddings=50, embedding_dim=10), + (indices, offsets), tester_factory, use_random_test_inputs=False) + + indices = torch.tensor([100, 200, 300, 400], dtype=torch.long) + offsets = torch.tensor([0, 2], dtype=torch.long) + self._test_op(Model(num_embeddings=500, embedding_dim=20), + (indices, offsets), tester_factory, use_random_test_inputs=False) + + def test_embedding_bag_modes(self, tester_factory: Callable) -> None: + # Test with different modes (sum, mean, max) + indices = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9], dtype=torch.long) + offsets = torch.tensor([0, 4], dtype=torch.long) + + self._test_op(Model(mode='sum'), (indices, offsets), tester_factory, use_random_test_inputs=False) + self._test_op(Model(mode='mean'), (indices, offsets), tester_factory, use_random_test_inputs=False) + self._test_op(Model(mode='max'), (indices, offsets), tester_factory, use_random_test_inputs=False) + + def test_embedding_bag_padding_idx(self, tester_factory: Callable) -> None: + # Test with padding_idx + indices = torch.tensor([0, 1, 2, 0, 3, 0, 4], dtype=torch.long) + offsets = torch.tensor([0, 3, 6], dtype=torch.long) + + self._test_op(Model(padding_idx=0), (indices, offsets), tester_factory, use_random_test_inputs=False) + + indices = torch.tensor([1, 5, 2, 5, 3, 5, 4], dtype=torch.long) + offsets = torch.tensor([0, 3, 6], dtype=torch.long) + + self._test_op(Model(padding_idx=5), (indices, offsets), tester_factory, use_random_test_inputs=False) + + def test_embedding_bag_include_last_offset(self, tester_factory: Callable) -> None: + # Test with include_last_offset + indices = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9], dtype=torch.long) + offsets = torch.tensor([0, 4], dtype=torch.long) + + self._test_op(Model(include_last_offset=True), (indices, offsets), tester_factory, use_random_test_inputs=False) + \ No newline at end of file From ca1b8874e519f620dbda9abfa35925c363473916 Mon Sep 17 00:00:00 2001 From: Gregory Comer Date: Thu, 24 Jul 2025 21:06:17 -0700 Subject: [PATCH 4/5] Update [ghstack-poisoned] --- .../test/suite/operators/test_masked_fill.py | 141 ++++++++++++++++++ backends/test/suite/operators/test_permute.py | 73 +++++++++ .../test/suite/operators/test_transpose.py | 100 +++++++++++++ 3 files changed, 314 insertions(+) create mode 100644 backends/test/suite/operators/test_masked_fill.py create mode 100644 backends/test/suite/operators/test_permute.py create mode 100644 backends/test/suite/operators/test_transpose.py diff --git a/backends/test/suite/operators/test_masked_fill.py b/backends/test/suite/operators/test_masked_fill.py new file mode 100644 index 00000000000..44e9a16c218 --- /dev/null +++ b/backends/test/suite/operators/test_masked_fill.py @@ -0,0 +1,141 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, List, Union + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class MaskedFillModel(torch.nn.Module): + def __init__(self, value: Union[float, int]): + super().__init__() + self.value = value + + def forward(self, x, mask): + return x.masked_fill(mask, self.value) + +@operator_test +class TestMaskedFill(OperatorTest): + @dtype_test + def test_masked_fill_dtype(self, dtype, tester_factory: Callable) -> None: + # Test with different dtypes + model = MaskedFillModel(value=0.0) + self._test_op( + model, + ( + torch.rand(3, 4).to(dtype), + torch.tensor([[True, False, True, False], [False, True, False, True], [True, True, False, False]]), + ), + tester_factory + ) + + def test_masked_fill_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + # Fill with 0.0 where mask is True + self._test_op( + MaskedFillModel(value=0.0), + ( + torch.randn(3, 4), + torch.tensor([[True, False, True, False], [False, True, False, True], [True, True, False, False]]), + ), + tester_factory + ) + + def test_masked_fill_different_values(self, tester_factory: Callable) -> None: + # Test with different fill values + + # Fill with a positive value + self._test_op( + MaskedFillModel(value=5.0), + ( + torch.randn(3, 4), + torch.tensor([[True, False, True, False], [False, True, False, True], [True, True, False, False]]), + ), + tester_factory + ) + + # Fill with a negative value + self._test_op( + MaskedFillModel(value=-5.0), + ( + torch.randn(3, 4), + torch.tensor([[True, False, True, False], [False, True, False, True], [True, True, False, False]]), + ), + tester_factory + ) + + # Fill with an integer value + self._test_op( + MaskedFillModel(value=1), + ( + torch.randn(3, 4), + torch.tensor([[True, False, True, False], [False, True, False, True], [True, True, False, False]]), + ), + tester_factory + ) + + def test_masked_fill_different_shapes(self, tester_factory: Callable) -> None: + # Test with tensors of different shapes + + # 1D tensor + self._test_op( + MaskedFillModel(value=0.0), + ( + torch.randn(5), + torch.tensor([True, False, True, False, True]), + ), + tester_factory + ) + + # 3D tensor + self._test_op( + MaskedFillModel(value=0.0), + ( + torch.randn(2, 3, 4), + torch.tensor([ + [[True, False, True, False], [False, True, False, True], [True, True, False, False]], + [[False, False, True, True], [True, False, True, False], [False, True, False, True]] + ]), + ), + tester_factory + ) + + def test_masked_fill_all_true(self, tester_factory: Callable) -> None: + # Test with all mask values set to True + self._test_op( + MaskedFillModel(value=0.0), + ( + torch.randn(3, 4), + torch.ones(3, 4, dtype=torch.bool), + ), + tester_factory + ) + + def test_masked_fill_all_false(self, tester_factory: Callable) -> None: + # Test with all mask values set to False + self._test_op( + MaskedFillModel(value=0.0), + ( + torch.randn(3, 4), + torch.zeros(3, 4, dtype=torch.bool), + ), + tester_factory + ) + + def test_masked_fill_broadcast(self, tester_factory: Callable) -> None: + # Test with broadcasting mask + # A 1D mask can be broadcast to a 2D tensor + self._test_op( + MaskedFillModel(value=0.0), + ( + torch.randn(3, 4), + torch.tensor([True, False, True, False]), + ), + tester_factory + ) diff --git a/backends/test/suite/operators/test_permute.py b/backends/test/suite/operators/test_permute.py new file mode 100644 index 00000000000..dc0106a0797 --- /dev/null +++ b/backends/test/suite/operators/test_permute.py @@ -0,0 +1,73 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, List + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class PermuteModel(torch.nn.Module): + def __init__(self, dims: List[int]): + super().__init__() + self.dims = dims + + def forward(self, x): + return x.permute(self.dims) + +@operator_test +class TestPermute(OperatorTest): + @dtype_test + def test_permute_dtype(self, dtype, tester_factory: Callable) -> None: + # Test with different dtypes + model = PermuteModel(dims=[1, 0]) + self._test_op(model, (torch.rand(3, 4).to(dtype),), tester_factory) + + def test_permute_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + # Permute a 2D tensor from [3, 4] to [4, 3] + self._test_op(PermuteModel(dims=[1, 0]), (torch.randn(3, 4),), tester_factory) + + def test_permute_3d(self, tester_factory: Callable) -> None: + # Test permuting a 3D tensor + + # Permute from [2, 3, 4] to [4, 2, 3] + self._test_op(PermuteModel(dims=[2, 0, 1]), (torch.randn(2, 3, 4),), tester_factory) + + # Permute from [2, 3, 4] to [3, 4, 2] + self._test_op(PermuteModel(dims=[1, 2, 0]), (torch.randn(2, 3, 4),), tester_factory) + + # Permute from [2, 3, 4] to [2, 4, 3] + self._test_op(PermuteModel(dims=[0, 2, 1]), (torch.randn(2, 3, 4),), tester_factory) + + def test_permute_4d(self, tester_factory: Callable) -> None: + # Test permuting a 4D tensor + + # Permute from [2, 3, 4, 5] to [5, 4, 3, 2] + self._test_op(PermuteModel(dims=[3, 2, 1, 0]), (torch.randn(2, 3, 4, 5),), tester_factory) + + # Permute from [2, 3, 4, 5] to [2, 4, 3, 5] + self._test_op(PermuteModel(dims=[0, 2, 1, 3]), (torch.randn(2, 3, 4, 5),), tester_factory) + + def test_permute_identity(self, tester_factory: Callable) -> None: + # Test identity permutation (no change) + + # 2D tensor + self._test_op(PermuteModel(dims=[0, 1]), (torch.randn(3, 4),), tester_factory) + + # 3D tensor + self._test_op(PermuteModel(dims=[0, 1, 2]), (torch.randn(2, 3, 4),), tester_factory) + + def test_permute_different_shapes(self, tester_factory: Callable) -> None: + # Test with tensors of different shapes + + # 1D tensor (no permutation possible) + self._test_op(PermuteModel(dims=[0]), (torch.randn(5),), tester_factory) + + # 5D tensor + self._test_op(PermuteModel(dims=[4, 3, 2, 1, 0]), (torch.randn(2, 3, 4, 5, 6),), tester_factory) diff --git a/backends/test/suite/operators/test_transpose.py b/backends/test/suite/operators/test_transpose.py new file mode 100644 index 00000000000..c0a02b517f0 --- /dev/null +++ b/backends/test/suite/operators/test_transpose.py @@ -0,0 +1,100 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, List + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class TransposeModel(torch.nn.Module): + def __init__(self, dim0: int, dim1: int): + super().__init__() + self.dim0 = dim0 + self.dim1 = dim1 + + def forward(self, x): + return torch.transpose(x, self.dim0, self.dim1) + +@operator_test +class TestTranspose(OperatorTest): + @dtype_test + def test_transpose_dtype(self, dtype, tester_factory: Callable) -> None: + # Test with different dtypes + model = TransposeModel(dim0=0, dim1=1) + self._test_op(model, (torch.rand(3, 4).to(dtype),), tester_factory) + + def test_transpose_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + # Transpose a 2D tensor from [3, 4] to [4, 3] + self._test_op(TransposeModel(dim0=0, dim1=1), (torch.randn(3, 4),), tester_factory) + + def test_transpose_3d(self, tester_factory: Callable) -> None: + # Test transposing a 3D tensor + + # Transpose dimensions 0 and 1 + # From [2, 3, 4] to [3, 2, 4] + self._test_op(TransposeModel(dim0=0, dim1=1), (torch.randn(2, 3, 4),), tester_factory) + + # Transpose dimensions 0 and 2 + # From [2, 3, 4] to [4, 3, 2] + self._test_op(TransposeModel(dim0=0, dim1=2), (torch.randn(2, 3, 4),), tester_factory) + + # Transpose dimensions 1 and 2 + # From [2, 3, 4] to [2, 4, 3] + self._test_op(TransposeModel(dim0=1, dim1=2), (torch.randn(2, 3, 4),), tester_factory) + + def test_transpose_4d(self, tester_factory: Callable) -> None: + # Test transposing a 4D tensor + + # Transpose dimensions 0 and 3 + # From [2, 3, 4, 5] to [5, 3, 4, 2] + self._test_op(TransposeModel(dim0=0, dim1=3), (torch.randn(2, 3, 4, 5),), tester_factory) + + # Transpose dimensions 1 and 2 + # From [2, 3, 4, 5] to [2, 4, 3, 5] + self._test_op(TransposeModel(dim0=1, dim1=2), (torch.randn(2, 3, 4, 5),), tester_factory) + + def test_transpose_identity(self, tester_factory: Callable) -> None: + # Test identity transpose (same dimension, no change) + + # 2D tensor + self._test_op(TransposeModel(dim0=0, dim1=0), (torch.randn(3, 4),), tester_factory) + self._test_op(TransposeModel(dim0=1, dim1=1), (torch.randn(3, 4),), tester_factory) + + # 3D tensor + self._test_op(TransposeModel(dim0=0, dim1=0), (torch.randn(2, 3, 4),), tester_factory) + self._test_op(TransposeModel(dim0=1, dim1=1), (torch.randn(2, 3, 4),), tester_factory) + self._test_op(TransposeModel(dim0=2, dim1=2), (torch.randn(2, 3, 4),), tester_factory) + + def test_transpose_negative_dims(self, tester_factory: Callable) -> None: + # Test with negative dimensions (counting from the end) + + # 3D tensor + # Transpose dimensions -3 and -1 (equivalent to 0 and 2) + # From [2, 3, 4] to [4, 3, 2] + self._test_op(TransposeModel(dim0=-3, dim1=-1), (torch.randn(2, 3, 4),), tester_factory) + + # Transpose dimensions -2 and -1 (equivalent to 1 and 2) + # From [2, 3, 4] to [2, 4, 3] + self._test_op(TransposeModel(dim0=-2, dim1=-1), (torch.randn(2, 3, 4),), tester_factory) + + def test_transpose_different_shapes(self, tester_factory: Callable) -> None: + # Test with tensors of different shapes + + # 2D tensor + self._test_op(TransposeModel(dim0=0, dim1=1), (torch.randn(3, 4),), tester_factory) + + # 3D tensor + self._test_op(TransposeModel(dim0=0, dim1=2), (torch.randn(2, 3, 4),), tester_factory) + + # 4D tensor + self._test_op(TransposeModel(dim0=1, dim1=3), (torch.randn(2, 3, 4, 5),), tester_factory) + + # 5D tensor + self._test_op(TransposeModel(dim0=0, dim1=4), (torch.randn(2, 3, 4, 5, 6),), tester_factory) From 6acef0f6bb0765b434faeb1a277e8ae9160c3e4f Mon Sep 17 00:00:00 2001 From: Gregory James Comer Date: Fri, 25 Jul 2025 21:30:20 -0700 Subject: [PATCH 5/5] Update [ghstack-poisoned] --- backends/test/suite/operators/test_conv1d.py | 144 ++++++++----- backends/test/suite/operators/test_conv2d.py | 182 +++++++++++------ backends/test/suite/operators/test_conv3d.py | 176 ++++++++++------ .../suite/operators/test_convtranspose1d.py | 156 +++++++++----- .../suite/operators/test_convtranspose2d.py | 190 ++++++++++++------ .../suite/operators/test_convtranspose3d.py | 184 +++++++++++------ 6 files changed, 692 insertions(+), 340 deletions(-) diff --git a/backends/test/suite/operators/test_conv1d.py b/backends/test/suite/operators/test_conv1d.py index 1efd7685c18..6f1b840861b 100644 --- a/backends/test/suite/operators/test_conv1d.py +++ b/backends/test/suite/operators/test_conv1d.py @@ -1,17 +1,22 @@ -# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. -# pyre-strict +# pyre-unsafe -from typing import Callable import torch +from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.compliance_suite import ( +from executorch.backends.test.suite.operators import ( dtype_test, operator_test, OperatorTest, ) + class Model(torch.nn.Module): def __init__( self, @@ -37,53 +42,96 @@ def __init__( bias=bias, padding_mode=padding_mode, ) - + def forward(self, x): return self.conv(x) + @operator_test -class TestConv1d(OperatorTest): +class Conv1d(OperatorTest): @dtype_test - def test_conv1d_dtype(self, dtype, tester_factory: Callable) -> None: - # Input shape: (batch_size, in_channels, length) - self._test_op(Model().to(dtype), ((torch.rand(2, 3, 10) * 10).to(dtype),), tester_factory) - - def test_conv1d_basic(self, tester_factory: Callable) -> None: - # Basic test with default parameters - self._test_op(Model(), (torch.randn(2, 3, 10),), tester_factory) - - def test_conv1d_kernel_size(self, tester_factory: Callable) -> None: - # Test with different kernel sizes - self._test_op(Model(kernel_size=1), (torch.randn(2, 3, 10),), tester_factory) - self._test_op(Model(kernel_size=5), (torch.randn(2, 3, 10),), tester_factory) - - def test_conv1d_stride(self, tester_factory: Callable) -> None: - # Test with different stride values - self._test_op(Model(stride=2), (torch.randn(2, 3, 10),), tester_factory) - - def test_conv1d_padding(self, tester_factory: Callable) -> None: - # Test with different padding values - self._test_op(Model(padding=1), (torch.randn(2, 3, 10),), tester_factory) - self._test_op(Model(padding=2), (torch.randn(2, 3, 10),), tester_factory) - - def test_conv1d_dilation(self, tester_factory: Callable) -> None: - # Test with different dilation values - self._test_op(Model(dilation=2), (torch.randn(2, 3, 10),), tester_factory) - - def test_conv1d_groups(self, tester_factory: Callable) -> None: - # Test with groups=3 (in_channels must be divisible by groups) - self._test_op(Model(in_channels=6, out_channels=6, groups=3), (torch.randn(2, 6, 10),), tester_factory) - - def test_conv1d_no_bias(self, tester_factory: Callable) -> None: - # Test without bias - self._test_op(Model(bias=False), (torch.randn(2, 3, 10),), tester_factory) - - def test_conv1d_padding_modes(self, tester_factory: Callable) -> None: - # Test different padding modes + def test_conv1d_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op( + Model().to(dtype), + ((torch.rand(2, 3, 10) * 10).to(dtype),), + flow, + ) + + def test_conv1d_basic(self, flow: TestFlow) -> None: + self._test_op( + Model(), + (torch.randn(2, 3, 10),), + flow, + ) + + def test_conv1d_kernel_size(self, flow: TestFlow) -> None: + self._test_op( + Model(kernel_size=1), + (torch.randn(2, 3, 10),), + flow, + ) + self._test_op( + Model(kernel_size=5), + (torch.randn(2, 3, 10),), + flow, + ) + + def test_conv1d_stride(self, flow: TestFlow) -> None: + self._test_op( + Model(stride=2), + (torch.randn(2, 3, 10),), + flow, + ) + + def test_conv1d_padding(self, flow: TestFlow) -> None: + self._test_op( + Model(padding=1), + (torch.randn(2, 3, 10),), + flow, + ) + self._test_op( + Model(padding=2), + (torch.randn(2, 3, 10),), + flow, + ) + + def test_conv1d_dilation(self, flow: TestFlow) -> None: + self._test_op( + Model(dilation=2), + (torch.randn(2, 3, 10),), + flow, + ) + + def test_conv1d_groups(self, flow: TestFlow) -> None: + self._test_op( + Model(in_channels=6, out_channels=6, groups=3), + (torch.randn(2, 6, 10),), + flow, + ) + + def test_conv1d_no_bias(self, flow: TestFlow) -> None: + self._test_op( + Model(bias=False), + (torch.randn(2, 3, 10),), + flow, + ) + + def test_conv1d_padding_modes(self, flow: TestFlow) -> None: for mode in ["zeros", "reflect", "replicate", "circular"]: - self._test_op(Model(padding=1, padding_mode=mode), (torch.randn(2, 3, 10),), tester_factory) - - def test_conv1d_channels(self, tester_factory: Callable) -> None: - # Test with different channel configurations - self._test_op(Model(in_channels=1, out_channels=1), (torch.randn(2, 1, 10),), tester_factory) - self._test_op(Model(in_channels=5, out_channels=10), (torch.randn(2, 5, 10),), tester_factory) + self._test_op( + Model(padding=1, padding_mode=mode), + (torch.randn(2, 3, 10),), + flow, + ) + + def test_conv1d_channels(self, flow: TestFlow) -> None: + self._test_op( + Model(in_channels=1, out_channels=1), + (torch.randn(2, 1, 10),), + flow, + ) + self._test_op( + Model(in_channels=5, out_channels=10), + (torch.randn(2, 5, 10),), + flow, + ) diff --git a/backends/test/suite/operators/test_conv2d.py b/backends/test/suite/operators/test_conv2d.py index 40b3b9dc24b..2a7bae01faa 100644 --- a/backends/test/suite/operators/test_conv2d.py +++ b/backends/test/suite/operators/test_conv2d.py @@ -1,26 +1,32 @@ -# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. -# pyre-strict +# pyre-unsafe -from typing import Callable, Union, Tuple +from typing import Tuple, Union import torch +from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.compliance_suite import ( +from executorch.backends.test.suite.operators import ( dtype_test, operator_test, OperatorTest, ) + class Model(torch.nn.Module): def __init__( self, in_channels=3, out_channels=6, - kernel_size=3, - stride=1, - padding=0, - dilation=1, + kernel_size: Union[int, Tuple[int, int]] = 3, + stride: Union[int, Tuple[int, int]] = 1, + padding: Union[int, Tuple[int, int]] = 0, + dilation: Union[int, Tuple[int, int]] = 1, groups=1, bias=True, padding_mode="zeros", @@ -37,60 +43,118 @@ def __init__( bias=bias, padding_mode=padding_mode, ) - + def forward(self, x): return self.conv(x) + @operator_test -class TestConv2d(OperatorTest): +class Conv2d(OperatorTest): @dtype_test - def test_conv2d_dtype(self, dtype, tester_factory: Callable) -> None: - # Input shape: (batch_size, in_channels, height, width) - self._test_op(Model().to(dtype), ((torch.rand(2, 3, 8, 8) * 10).to(dtype),), tester_factory) - - def test_conv2d_basic(self, tester_factory: Callable) -> None: - # Basic test with default parameters - self._test_op(Model(), (torch.randn(2, 3, 8, 8),), tester_factory) - - def test_conv2d_kernel_size(self, tester_factory: Callable) -> None: - # Test with different kernel sizes - self._test_op(Model(kernel_size=1), (torch.randn(2, 3, 8, 8),), tester_factory) - self._test_op(Model(kernel_size=5), (torch.randn(2, 3, 8, 8),), tester_factory) - self._test_op(Model(kernel_size=(3, 5)), (torch.randn(2, 3, 8, 8),), tester_factory) - - def test_conv2d_stride(self, tester_factory: Callable) -> None: - # Test with different stride values - self._test_op(Model(stride=2), (torch.randn(2, 3, 8, 8),), tester_factory) - self._test_op(Model(stride=(2, 1)), (torch.randn(2, 3, 8, 8),), tester_factory) - - def test_conv2d_padding(self, tester_factory: Callable) -> None: - # Test with different padding values - self._test_op(Model(padding=1), (torch.randn(2, 3, 8, 8),), tester_factory) - self._test_op(Model(padding=(1, 2)), (torch.randn(2, 3, 8, 8),), tester_factory) - - def test_conv2d_dilation(self, tester_factory: Callable) -> None: - # Test with different dilation values - self._test_op(Model(dilation=2), (torch.randn(2, 3, 8, 8),), tester_factory) - self._test_op(Model(dilation=(2, 1)), (torch.randn(2, 3, 8, 8),), tester_factory) - - def test_conv2d_groups(self, tester_factory: Callable) -> None: - # Test with groups=3 (in_channels must be divisible by groups) - self._test_op(Model(in_channels=6, out_channels=6, groups=3), (torch.randn(2, 6, 8, 8),), tester_factory) - - def test_conv2d_no_bias(self, tester_factory: Callable) -> None: - # Test without bias - self._test_op(Model(bias=False), (torch.randn(2, 3, 8, 8),), tester_factory) - - def test_conv2d_padding_modes(self, tester_factory: Callable) -> None: - # Test different padding modes + def test_conv2d_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op( + Model().to(dtype), + ((torch.rand(2, 3, 8, 8) * 10).to(dtype),), + flow, + ) + + def test_conv2d_basic(self, flow: TestFlow) -> None: + self._test_op( + Model(), + (torch.randn(2, 3, 8, 8),), + flow, + ) + + def test_conv2d_kernel_size(self, flow: TestFlow) -> None: + self._test_op( + Model(kernel_size=1), + (torch.randn(2, 3, 8, 8),), + flow, + ) + self._test_op( + Model(kernel_size=5), + (torch.randn(2, 3, 8, 8),), + flow, + ) + self._test_op( + Model(kernel_size=(3, 5)), + (torch.randn(2, 3, 8, 8),), + flow, + ) + + def test_conv2d_stride(self, flow: TestFlow) -> None: + self._test_op( + Model(stride=2), + (torch.randn(2, 3, 8, 8),), + flow, + ) + self._test_op( + Model(stride=(2, 1)), + (torch.randn(2, 3, 8, 8),), + flow, + ) + + def test_conv2d_padding(self, flow: TestFlow) -> None: + self._test_op( + Model(padding=1), + (torch.randn(2, 3, 8, 8),), + flow, + ) + self._test_op( + Model(padding=(1, 2)), + (torch.randn(2, 3, 8, 8),), + flow, + ) + + def test_conv2d_dilation(self, flow: TestFlow) -> None: + self._test_op( + Model(dilation=2), + (torch.randn(2, 3, 8, 8),), + flow, + ) + self._test_op( + Model(dilation=(2, 1)), + (torch.randn(2, 3, 8, 8),), + flow, + ) + + def test_conv2d_groups(self, flow: TestFlow) -> None: + self._test_op( + Model(in_channels=6, out_channels=6, groups=3), + (torch.randn(2, 6, 8, 8),), + flow, + ) + + def test_conv2d_no_bias(self, flow: TestFlow) -> None: + self._test_op( + Model(bias=False), + (torch.randn(2, 3, 8, 8),), + flow, + ) + + def test_conv2d_padding_modes(self, flow: TestFlow) -> None: for mode in ["zeros", "reflect", "replicate", "circular"]: - self._test_op(Model(padding=1, padding_mode=mode), (torch.randn(2, 3, 8, 8),), tester_factory) - - def test_conv2d_channels(self, tester_factory: Callable) -> None: - # Test with different channel configurations - self._test_op(Model(in_channels=1, out_channels=1), (torch.randn(2, 1, 8, 8),), tester_factory) - self._test_op(Model(in_channels=5, out_channels=10), (torch.randn(2, 5, 8, 8),), tester_factory) - - def test_conv2d_different_spatial_dims(self, tester_factory: Callable) -> None: - # Test with different height and width - self._test_op(Model(), (torch.randn(2, 3, 10, 8),), tester_factory) + self._test_op( + Model(padding=1, padding_mode=mode), + (torch.randn(2, 3, 8, 8),), + flow, + ) + + def test_conv2d_channels(self, flow: TestFlow) -> None: + self._test_op( + Model(in_channels=1, out_channels=1), + (torch.randn(2, 1, 8, 8),), + flow, + ) + self._test_op( + Model(in_channels=5, out_channels=10), + (torch.randn(2, 5, 8, 8),), + flow, + ) + + def test_conv2d_different_spatial_dims(self, flow: TestFlow) -> None: + self._test_op( + Model(), + (torch.randn(2, 3, 10, 8),), + flow, + ) diff --git a/backends/test/suite/operators/test_conv3d.py b/backends/test/suite/operators/test_conv3d.py index baade4df10e..276ee20734e 100644 --- a/backends/test/suite/operators/test_conv3d.py +++ b/backends/test/suite/operators/test_conv3d.py @@ -1,26 +1,32 @@ -# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. -# pyre-strict +# pyre-unsafe -from typing import Callable, Union, Tuple +from typing import Tuple, Union import torch +from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.compliance_suite import ( +from executorch.backends.test.suite.operators import ( dtype_test, operator_test, OperatorTest, ) + class Model(torch.nn.Module): def __init__( self, in_channels=3, out_channels=6, - kernel_size=3, - stride=1, - padding=0, - dilation=1, + kernel_size: Union[int, Tuple[int, int, int]] = 3, + stride: Union[int, Tuple[int, int, int]] = 1, + padding: Union[int, Tuple[int, int, int]] = 0, + dilation: Union[int, Tuple[int, int, int]] = 1, groups=1, bias=True, padding_mode="zeros", @@ -37,59 +43,113 @@ def __init__( bias=bias, padding_mode=padding_mode, ) - + def forward(self, x): return self.conv(x) + @operator_test -class TestConv3d(OperatorTest): +class Conv3d(OperatorTest): @dtype_test - def test_conv3d_dtype(self, dtype, tester_factory: Callable) -> None: - # Input shape: (batch_size, in_channels, depth, height, width) - self._test_op(Model().to(dtype), ((torch.rand(2, 3, 4, 4, 4) * 10).to(dtype),), tester_factory) - - def test_conv3d_basic(self, tester_factory: Callable) -> None: - # Basic test with default parameters - self._test_op(Model(), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - - def test_conv3d_kernel_size(self, tester_factory: Callable) -> None: - # Test with different kernel sizes - self._test_op(Model(kernel_size=1), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - self._test_op(Model(kernel_size=(1, 3, 3)), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - - def test_conv3d_stride(self, tester_factory: Callable) -> None: - # Test with different stride values - self._test_op(Model(stride=2), (torch.randn(2, 3, 6, 6, 6),), tester_factory) - self._test_op(Model(stride=(1, 2, 2)), (torch.randn(2, 3, 4, 6, 6),), tester_factory) - - def test_conv3d_padding(self, tester_factory: Callable) -> None: - # Test with different padding values - self._test_op(Model(padding=1), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - self._test_op(Model(padding=(0, 1, 1)), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - - def test_conv3d_dilation(self, tester_factory: Callable) -> None: - # Test with different dilation values - self._test_op(Model(dilation=2), (torch.randn(2, 3, 6, 6, 6),), tester_factory) - self._test_op(Model(dilation=(1, 2, 2)), (torch.randn(2, 3, 4, 6, 6),), tester_factory) - - def test_conv3d_groups(self, tester_factory: Callable) -> None: - # Test with groups=3 (in_channels must be divisible by groups) - self._test_op(Model(in_channels=6, out_channels=6, groups=3), (torch.randn(2, 6, 4, 4, 4),), tester_factory) - - def test_conv3d_no_bias(self, tester_factory: Callable) -> None: - # Test without bias - self._test_op(Model(bias=False), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - - def test_conv3d_padding_modes(self, tester_factory: Callable) -> None: - # Test different padding modes + def test_conv3d_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op( + Model().to(dtype), + ((torch.rand(2, 3, 4, 4, 4) * 10).to(dtype),), + flow, + ) + + def test_conv3d_basic(self, flow: TestFlow) -> None: + self._test_op( + Model(), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + + def test_conv3d_kernel_size(self, flow: TestFlow) -> None: + self._test_op( + Model(kernel_size=1), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + self._test_op( + Model(kernel_size=(1, 3, 3)), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + + def test_conv3d_stride(self, flow: TestFlow) -> None: + self._test_op( + Model(stride=2), + (torch.randn(2, 3, 6, 6, 6),), + flow, + ) + self._test_op( + Model(stride=(1, 2, 2)), + (torch.randn(2, 3, 4, 6, 6),), + flow, + ) + + def test_conv3d_padding(self, flow: TestFlow) -> None: + self._test_op( + Model(padding=1), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + self._test_op( + Model(padding=(0, 1, 1)), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + + def test_conv3d_dilation(self, flow: TestFlow) -> None: + self._test_op( + Model(dilation=2), + (torch.randn(2, 3, 6, 6, 6),), + flow, + ) + self._test_op( + Model(dilation=(1, 2, 2)), + (torch.randn(2, 3, 4, 6, 6),), + flow, + ) + + def test_conv3d_groups(self, flow: TestFlow) -> None: + self._test_op( + Model(in_channels=6, out_channels=6, groups=3), + (torch.randn(2, 6, 4, 4, 4),), + flow, + ) + + def test_conv3d_no_bias(self, flow: TestFlow) -> None: + self._test_op( + Model(bias=False), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + + def test_conv3d_padding_modes(self, flow: TestFlow) -> None: for mode in ["zeros", "reflect", "replicate", "circular"]: - self._test_op(Model(padding=1, padding_mode=mode), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - - def test_conv3d_channels(self, tester_factory: Callable) -> None: - # Test with different channel configurations - self._test_op(Model(in_channels=1, out_channels=1), (torch.randn(2, 1, 4, 4, 4),), tester_factory) - self._test_op(Model(in_channels=5, out_channels=10), (torch.randn(2, 5, 4, 4, 4),), tester_factory) - - def test_conv3d_different_spatial_dims(self, tester_factory: Callable) -> None: - # Test with different depth, height, and width - self._test_op(Model(), (torch.randn(2, 3, 3, 4, 5),), tester_factory) + self._test_op( + Model(padding=1, padding_mode=mode), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + + def test_conv3d_channels(self, flow: TestFlow) -> None: + self._test_op( + Model(in_channels=1, out_channels=1), + (torch.randn(2, 1, 4, 4, 4),), + flow, + ) + self._test_op( + Model(in_channels=5, out_channels=10), + (torch.randn(2, 5, 4, 4, 4),), + flow, + ) + + def test_conv3d_different_spatial_dims(self, flow: TestFlow) -> None: + self._test_op( + Model(), + (torch.randn(2, 3, 3, 4, 5),), + flow, + ) diff --git a/backends/test/suite/operators/test_convtranspose1d.py b/backends/test/suite/operators/test_convtranspose1d.py index d93e542de4a..b2fe3040225 100644 --- a/backends/test/suite/operators/test_convtranspose1d.py +++ b/backends/test/suite/operators/test_convtranspose1d.py @@ -1,27 +1,33 @@ -# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. -# pyre-strict +# pyre-unsafe -from typing import Callable, Union, Tuple +from typing import Tuple, Union import torch +from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.compliance_suite import ( +from executorch.backends.test.suite.operators import ( dtype_test, operator_test, OperatorTest, ) + class Model(torch.nn.Module): def __init__( self, in_channels=3, out_channels=6, - kernel_size=3, - stride=1, - padding=0, - output_padding=0, - dilation=1, + kernel_size: Union[int, Tuple[int]] = 3, + stride: Union[int, Tuple[int]] = 1, + padding: Union[int, Tuple[int]] = 0, + output_padding: Union[int, Tuple[int]] = 0, + dilation: Union[int, Tuple[int]] = 1, groups=1, bias=True, ): @@ -37,53 +43,95 @@ def __init__( groups=groups, bias=bias, ) - + def forward(self, x): return self.conv_transpose(x) + @operator_test -class TestConvTranspose1d(OperatorTest): +class ConvTranspose1d(OperatorTest): @dtype_test - def test_convtranspose1d_dtype(self, dtype, tester_factory: Callable) -> None: - # Input shape: (batch_size, in_channels, length) - self._test_op(Model().to(dtype), ((torch.rand(2, 3, 10) * 10).to(dtype),), tester_factory) - - def test_convtranspose1d_basic(self, tester_factory: Callable) -> None: - # Basic test with default parameters - self._test_op(Model(), (torch.randn(2, 3, 10),), tester_factory) - - def test_convtranspose1d_kernel_size(self, tester_factory: Callable) -> None: - # Test with different kernel sizes - self._test_op(Model(kernel_size=1), (torch.randn(2, 3, 10),), tester_factory) - self._test_op(Model(kernel_size=5), (torch.randn(2, 3, 10),), tester_factory) - - def test_convtranspose1d_stride(self, tester_factory: Callable) -> None: - # Test with different stride values - self._test_op(Model(stride=2), (torch.randn(2, 3, 10),), tester_factory) - - def test_convtranspose1d_padding(self, tester_factory: Callable) -> None: - # Test with different padding values - self._test_op(Model(padding=1), (torch.randn(2, 3, 10),), tester_factory) - self._test_op(Model(padding=2), (torch.randn(2, 3, 10),), tester_factory) - - def test_convtranspose1d_output_padding(self, tester_factory: Callable) -> None: - # Test with different output_padding values (requires stride > 1) - self._test_op(Model(stride=2, output_padding=1), (torch.randn(2, 3, 10),), tester_factory) - - def test_convtranspose1d_dilation(self, tester_factory: Callable) -> None: - # Test with different dilation values - self._test_op(Model(dilation=2), (torch.randn(2, 3, 10),), tester_factory) - - def test_convtranspose1d_groups(self, tester_factory: Callable) -> None: - # Test with groups=3 (in_channels and out_channels must be divisible by groups) - self._test_op(Model(in_channels=6, out_channels=6, groups=3), (torch.randn(2, 6, 10),), tester_factory) - - def test_convtranspose1d_no_bias(self, tester_factory: Callable) -> None: - # Test without bias - self._test_op(Model(bias=False), (torch.randn(2, 3, 10),), tester_factory) - - def test_convtranspose1d_channels(self, tester_factory: Callable) -> None: - # Test with different channel configurations - self._test_op(Model(in_channels=1, out_channels=1), (torch.randn(2, 1, 10),), tester_factory) - self._test_op(Model(in_channels=5, out_channels=10), (torch.randn(2, 5, 10),), tester_factory) - \ No newline at end of file + def test_convtranspose1d_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op( + Model().to(dtype), + ((torch.rand(2, 3, 10) * 10).to(dtype),), + flow, + ) + + def test_convtranspose1d_basic(self, flow: TestFlow) -> None: + self._test_op( + Model(), + (torch.randn(2, 3, 10),), + flow, + ) + + def test_convtranspose1d_kernel_size(self, flow: TestFlow) -> None: + self._test_op( + Model(kernel_size=1), + (torch.randn(2, 3, 10),), + flow, + ) + self._test_op( + Model(kernel_size=5), + (torch.randn(2, 3, 10),), + flow, + ) + + def test_convtranspose1d_stride(self, flow: TestFlow) -> None: + self._test_op( + Model(stride=2), + (torch.randn(2, 3, 10),), + flow, + ) + + def test_convtranspose1d_padding(self, flow: TestFlow) -> None: + self._test_op( + Model(padding=1), + (torch.randn(2, 3, 10),), + flow, + ) + self._test_op( + Model(padding=2), + (torch.randn(2, 3, 10),), + flow, + ) + + def test_convtranspose1d_output_padding(self, flow: TestFlow) -> None: + self._test_op( + Model(stride=2, output_padding=1), + (torch.randn(2, 3, 10),), + flow, + ) + + def test_convtranspose1d_dilation(self, flow: TestFlow) -> None: + self._test_op( + Model(dilation=2), + (torch.randn(2, 3, 10),), + flow, + ) + + def test_convtranspose1d_groups(self, flow: TestFlow) -> None: + self._test_op( + Model(in_channels=6, out_channels=6, groups=3), + (torch.randn(2, 6, 10),), + flow, + ) + + def test_convtranspose1d_no_bias(self, flow: TestFlow) -> None: + self._test_op( + Model(bias=False), + (torch.randn(2, 3, 10),), + flow, + ) + + def test_convtranspose1d_channels(self, flow: TestFlow) -> None: + self._test_op( + Model(in_channels=1, out_channels=1), + (torch.randn(2, 1, 10),), + flow, + ) + self._test_op( + Model(in_channels=5, out_channels=10), + (torch.randn(2, 5, 10),), + flow, + ) diff --git a/backends/test/suite/operators/test_convtranspose2d.py b/backends/test/suite/operators/test_convtranspose2d.py index b5a4dfb784c..13ad272645f 100644 --- a/backends/test/suite/operators/test_convtranspose2d.py +++ b/backends/test/suite/operators/test_convtranspose2d.py @@ -1,27 +1,33 @@ -# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. -# pyre-strict +# pyre-unsafe -from typing import Callable, Union, Tuple +from typing import Tuple, Union import torch +from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.compliance_suite import ( +from executorch.backends.test.suite.operators import ( dtype_test, operator_test, OperatorTest, ) + class Model(torch.nn.Module): def __init__( self, in_channels=3, out_channels=6, - kernel_size=3, - stride=1, - padding=0, - output_padding=0, - dilation=1, + kernel_size: Union[int, Tuple[int, int]] = 3, + stride: Union[int, Tuple[int, int]] = 1, + padding: Union[int, Tuple[int, int]] = 0, + output_padding: Union[int, Tuple[int, int]] = 0, + dilation: Union[int, Tuple[int, int]] = 1, groups=1, bias=True, ): @@ -37,60 +43,122 @@ def __init__( groups=groups, bias=bias, ) - + def forward(self, x): return self.conv_transpose(x) + @operator_test -class TestConvTranspose2d(OperatorTest): +class ConvTranspose2d(OperatorTest): @dtype_test - def test_convtranspose2d_dtype(self, dtype, tester_factory: Callable) -> None: - # Input shape: (batch_size, in_channels, height, width) - self._test_op(Model().to(dtype), ((torch.rand(2, 3, 8, 8) * 10).to(dtype),), tester_factory) - - def test_convtranspose2d_basic(self, tester_factory: Callable) -> None: - # Basic test with default parameters - self._test_op(Model(), (torch.randn(2, 3, 8, 8),), tester_factory) - - def test_convtranspose2d_kernel_size(self, tester_factory: Callable) -> None: - # Test with different kernel sizes - self._test_op(Model(kernel_size=1), (torch.randn(2, 3, 8, 8),), tester_factory) - self._test_op(Model(kernel_size=5), (torch.randn(2, 3, 8, 8),), tester_factory) - self._test_op(Model(kernel_size=(3, 5)), (torch.randn(2, 3, 8, 8),), tester_factory) - - def test_convtranspose2d_stride(self, tester_factory: Callable) -> None: - # Test with different stride values - self._test_op(Model(stride=2), (torch.randn(2, 3, 8, 8),), tester_factory) - self._test_op(Model(stride=(2, 1)), (torch.randn(2, 3, 8, 8),), tester_factory) - - def test_convtranspose2d_padding(self, tester_factory: Callable) -> None: - # Test with different padding values - self._test_op(Model(padding=1), (torch.randn(2, 3, 8, 8),), tester_factory) - self._test_op(Model(padding=(1, 2)), (torch.randn(2, 3, 8, 8),), tester_factory) - - def test_convtranspose2d_output_padding(self, tester_factory: Callable) -> None: - # Test with different output_padding values (requires stride > 1) - self._test_op(Model(stride=2, output_padding=1), (torch.randn(2, 3, 8, 8),), tester_factory) - self._test_op(Model(stride=(2, 2), output_padding=(1, 0)), (torch.randn(2, 3, 8, 8),), tester_factory) - - def test_convtranspose2d_dilation(self, tester_factory: Callable) -> None: - # Test with different dilation values - self._test_op(Model(dilation=2), (torch.randn(2, 3, 8, 8),), tester_factory) - self._test_op(Model(dilation=(2, 1)), (torch.randn(2, 3, 8, 8),), tester_factory) - - def test_convtranspose2d_groups(self, tester_factory: Callable) -> None: - # Test with groups=3 (in_channels and out_channels must be divisible by groups) - self._test_op(Model(in_channels=6, out_channels=6, groups=3), (torch.randn(2, 6, 8, 8),), tester_factory) - - def test_convtranspose2d_no_bias(self, tester_factory: Callable) -> None: - # Test without bias - self._test_op(Model(bias=False), (torch.randn(2, 3, 8, 8),), tester_factory) - - def test_convtranspose2d_channels(self, tester_factory: Callable) -> None: - # Test with different channel configurations - self._test_op(Model(in_channels=1, out_channels=1), (torch.randn(2, 1, 8, 8),), tester_factory) - self._test_op(Model(in_channels=5, out_channels=10), (torch.randn(2, 5, 8, 8),), tester_factory) - - def test_convtranspose2d_different_spatial_dims(self, tester_factory: Callable) -> None: - # Test with different height and width - self._test_op(Model(), (torch.randn(2, 3, 10, 8),), tester_factory) + def test_convtranspose2d_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op( + Model().to(dtype), + ((torch.rand(2, 3, 8, 8) * 10).to(dtype),), + flow, + ) + + def test_convtranspose2d_basic(self, flow: TestFlow) -> None: + self._test_op( + Model(), + (torch.randn(2, 3, 8, 8),), + flow, + ) + + def test_convtranspose2d_kernel_size(self, flow: TestFlow) -> None: + self._test_op( + Model(kernel_size=1), + (torch.randn(2, 3, 8, 8),), + flow, + ) + self._test_op( + Model(kernel_size=5), + (torch.randn(2, 3, 8, 8),), + flow, + ) + self._test_op( + Model(kernel_size=(3, 5)), + (torch.randn(2, 3, 8, 8),), + flow, + ) + + def test_convtranspose2d_stride(self, flow: TestFlow) -> None: + self._test_op( + Model(stride=2), + (torch.randn(2, 3, 8, 8),), + flow, + ) + self._test_op( + Model(stride=(2, 1)), + (torch.randn(2, 3, 8, 8),), + flow, + ) + + def test_convtranspose2d_padding(self, flow: TestFlow) -> None: + self._test_op( + Model(padding=1), + (torch.randn(2, 3, 8, 8),), + flow, + ) + self._test_op( + Model(padding=(1, 2)), + (torch.randn(2, 3, 8, 8),), + flow, + ) + + def test_convtranspose2d_output_padding(self, flow: TestFlow) -> None: + self._test_op( + Model(stride=2, output_padding=1), + (torch.randn(2, 3, 8, 8),), + flow, + ) + self._test_op( + Model(stride=(2, 2), output_padding=(1, 0)), + (torch.randn(2, 3, 8, 8),), + flow, + ) + + def test_convtranspose2d_dilation(self, flow: TestFlow) -> None: + self._test_op( + Model(dilation=2), + (torch.randn(2, 3, 8, 8),), + flow, + ) + self._test_op( + Model(dilation=(2, 1)), + (torch.randn(2, 3, 8, 8),), + flow, + ) + + def test_convtranspose2d_groups(self, flow: TestFlow) -> None: + self._test_op( + Model(in_channels=6, out_channels=6, groups=3), + (torch.randn(2, 6, 8, 8),), + flow, + ) + + def test_convtranspose2d_no_bias(self, flow: TestFlow) -> None: + self._test_op( + Model(bias=False), + (torch.randn(2, 3, 8, 8),), + flow, + ) + + def test_convtranspose2d_channels(self, flow: TestFlow) -> None: + self._test_op( + Model(in_channels=1, out_channels=1), + (torch.randn(2, 1, 8, 8),), + flow, + ) + self._test_op( + Model(in_channels=5, out_channels=10), + (torch.randn(2, 5, 8, 8),), + flow, + ) + + def test_convtranspose2d_different_spatial_dims(self, flow: TestFlow) -> None: + self._test_op( + Model(), + (torch.randn(2, 3, 10, 8),), + flow, + ) diff --git a/backends/test/suite/operators/test_convtranspose3d.py b/backends/test/suite/operators/test_convtranspose3d.py index 00612725016..6cedc5b31cd 100644 --- a/backends/test/suite/operators/test_convtranspose3d.py +++ b/backends/test/suite/operators/test_convtranspose3d.py @@ -1,27 +1,33 @@ -# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. -# pyre-strict +# pyre-unsafe -from typing import Callable, Union, Tuple +from typing import Tuple, Union import torch +from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.compliance_suite import ( +from executorch.backends.test.suite.operators import ( dtype_test, operator_test, OperatorTest, ) + class Model(torch.nn.Module): def __init__( self, in_channels=3, out_channels=6, - kernel_size=3, - stride=1, - padding=0, - output_padding=0, - dilation=1, + kernel_size: Union[int, Tuple[int, int, int]] = 3, + stride: Union[int, Tuple[int, int, int]] = 1, + padding: Union[int, Tuple[int, int, int]] = 0, + output_padding: Union[int, Tuple[int, int, int]] = 0, + dilation: Union[int, Tuple[int, int, int]] = 1, groups=1, bias=True, ): @@ -37,59 +43,117 @@ def __init__( groups=groups, bias=bias, ) - + def forward(self, x): return self.conv_transpose(x) + @operator_test -class TestConvTranspose3d(OperatorTest): +class ConvTranspose3d(OperatorTest): @dtype_test - def test_convtranspose3d_dtype(self, dtype, tester_factory: Callable) -> None: - # Input shape: (batch_size, in_channels, depth, height, width) - self._test_op(Model().to(dtype), ((torch.rand(2, 3, 4, 4, 4) * 10).to(dtype),), tester_factory) - - def test_convtranspose3d_basic(self, tester_factory: Callable) -> None: - # Basic test with default parameters - self._test_op(Model(), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - - def test_convtranspose3d_kernel_size(self, tester_factory: Callable) -> None: - # Test with different kernel sizes - self._test_op(Model(kernel_size=1), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - self._test_op(Model(kernel_size=(1, 3, 3)), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - - def test_convtranspose3d_stride(self, tester_factory: Callable) -> None: - # Test with different stride values - self._test_op(Model(stride=2), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - self._test_op(Model(stride=(1, 2, 2)), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - - def test_convtranspose3d_padding(self, tester_factory: Callable) -> None: - # Test with different padding values - self._test_op(Model(padding=1), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - self._test_op(Model(padding=(0, 1, 1)), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - - def test_convtranspose3d_output_padding(self, tester_factory: Callable) -> None: - # Test with different output_padding values (requires stride > 1) - self._test_op(Model(stride=2, output_padding=1), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - self._test_op(Model(stride=(2, 2, 2), output_padding=(1, 0, 1)), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - - def test_convtranspose3d_dilation(self, tester_factory: Callable) -> None: - # Test with different dilation values - self._test_op(Model(dilation=2), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - self._test_op(Model(dilation=(1, 2, 2)), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - - def test_convtranspose3d_groups(self, tester_factory: Callable) -> None: - # Test with groups=3 (in_channels and out_channels must be divisible by groups) - self._test_op(Model(in_channels=6, out_channels=6, groups=3), (torch.randn(2, 6, 4, 4, 4),), tester_factory) - - def test_convtranspose3d_no_bias(self, tester_factory: Callable) -> None: - # Test without bias - self._test_op(Model(bias=False), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - - def test_convtranspose3d_channels(self, tester_factory: Callable) -> None: - # Test with different channel configurations - self._test_op(Model(in_channels=1, out_channels=1), (torch.randn(2, 1, 4, 4, 4),), tester_factory) - self._test_op(Model(in_channels=5, out_channels=10), (torch.randn(2, 5, 4, 4, 4),), tester_factory) - - def test_convtranspose3d_different_spatial_dims(self, tester_factory: Callable) -> None: - # Test with different depth, height, and width - self._test_op(Model(), (torch.randn(2, 3, 3, 4, 5),), tester_factory) + def test_convtranspose3d_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op( + Model().to(dtype), + ((torch.rand(2, 3, 4, 4, 4) * 10).to(dtype),), + flow, + ) + + def test_convtranspose3d_basic(self, flow: TestFlow) -> None: + self._test_op( + Model(), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + + def test_convtranspose3d_kernel_size(self, flow: TestFlow) -> None: + self._test_op( + Model(kernel_size=1), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + self._test_op( + Model(kernel_size=(1, 3, 3)), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + + def test_convtranspose3d_stride(self, flow: TestFlow) -> None: + self._test_op( + Model(stride=2), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + self._test_op( + Model(stride=(1, 2, 2)), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + + def test_convtranspose3d_padding(self, flow: TestFlow) -> None: + self._test_op( + Model(padding=1), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + self._test_op( + Model(padding=(0, 1, 1)), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + + def test_convtranspose3d_output_padding(self, flow: TestFlow) -> None: + self._test_op( + Model(stride=2, output_padding=1), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + self._test_op( + Model(stride=(2, 2, 2), output_padding=(1, 0, 1)), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + + def test_convtranspose3d_dilation(self, flow: TestFlow) -> None: + self._test_op( + Model(dilation=2), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + self._test_op( + Model(dilation=(1, 2, 2)), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + + def test_convtranspose3d_groups(self, flow: TestFlow) -> None: + self._test_op( + Model(in_channels=6, out_channels=6, groups=3), + (torch.randn(2, 6, 4, 4, 4),), + flow, + ) + + def test_convtranspose3d_no_bias(self, flow: TestFlow) -> None: + self._test_op( + Model(bias=False), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + + def test_convtranspose3d_channels(self, flow: TestFlow) -> None: + self._test_op( + Model(in_channels=1, out_channels=1), + (torch.randn(2, 1, 4, 4, 4),), + flow, + ) + self._test_op( + Model(in_channels=5, out_channels=10), + (torch.randn(2, 5, 4, 4, 4),), + flow, + ) + + def test_convtranspose3d_different_spatial_dims(self, flow: TestFlow) -> None: + self._test_op( + Model(), + (torch.randn(2, 3, 3, 4, 5),), + flow, + )