From fd73fb9ef7596f416690daf6c31dd3ae7ca7cda4 Mon Sep 17 00:00:00 2001 From: Gregory Comer Date: Thu, 24 Jul 2025 21:06:04 -0700 Subject: [PATCH 1/9] Update [ghstack-poisoned] --- backends/test/suite/operators/test_conv1d.py | 89 +++++++++++++++++ backends/test/suite/operators/test_conv2d.py | 96 +++++++++++++++++++ backends/test/suite/operators/test_conv3d.py | 95 ++++++++++++++++++ .../suite/operators/test_convtranspose1d.py | 89 +++++++++++++++++ .../suite/operators/test_convtranspose2d.py | 96 +++++++++++++++++++ .../suite/operators/test_convtranspose3d.py | 95 ++++++++++++++++++ 6 files changed, 560 insertions(+) create mode 100644 backends/test/suite/operators/test_conv1d.py create mode 100644 backends/test/suite/operators/test_conv2d.py create mode 100644 backends/test/suite/operators/test_conv3d.py create mode 100644 backends/test/suite/operators/test_convtranspose1d.py create mode 100644 backends/test/suite/operators/test_convtranspose2d.py create mode 100644 backends/test/suite/operators/test_convtranspose3d.py diff --git a/backends/test/suite/operators/test_conv1d.py b/backends/test/suite/operators/test_conv1d.py new file mode 100644 index 00000000000..1efd7685c18 --- /dev/null +++ b/backends/test/suite/operators/test_conv1d.py @@ -0,0 +1,89 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class Model(torch.nn.Module): + def __init__( + self, + in_channels=3, + out_channels=6, + kernel_size=3, + stride=1, + padding=0, + dilation=1, + groups=1, + bias=True, + padding_mode="zeros", + ): + super().__init__() + self.conv = torch.nn.Conv1d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias=bias, + padding_mode=padding_mode, + ) + + def forward(self, x): + return self.conv(x) + +@operator_test +class TestConv1d(OperatorTest): + @dtype_test + def test_conv1d_dtype(self, dtype, tester_factory: Callable) -> None: + # Input shape: (batch_size, in_channels, length) + self._test_op(Model().to(dtype), ((torch.rand(2, 3, 10) * 10).to(dtype),), tester_factory) + + def test_conv1d_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + self._test_op(Model(), (torch.randn(2, 3, 10),), tester_factory) + + def test_conv1d_kernel_size(self, tester_factory: Callable) -> None: + # Test with different kernel sizes + self._test_op(Model(kernel_size=1), (torch.randn(2, 3, 10),), tester_factory) + self._test_op(Model(kernel_size=5), (torch.randn(2, 3, 10),), tester_factory) + + def test_conv1d_stride(self, tester_factory: Callable) -> None: + # Test with different stride values + self._test_op(Model(stride=2), (torch.randn(2, 3, 10),), tester_factory) + + def test_conv1d_padding(self, tester_factory: Callable) -> None: + # Test with different padding values + self._test_op(Model(padding=1), (torch.randn(2, 3, 10),), tester_factory) + self._test_op(Model(padding=2), (torch.randn(2, 3, 10),), tester_factory) + + def test_conv1d_dilation(self, tester_factory: Callable) -> None: + # Test with different dilation values + self._test_op(Model(dilation=2), (torch.randn(2, 3, 10),), tester_factory) + + def test_conv1d_groups(self, tester_factory: Callable) -> None: + # Test with groups=3 (in_channels must be divisible by groups) + self._test_op(Model(in_channels=6, out_channels=6, groups=3), (torch.randn(2, 6, 10),), tester_factory) + + def test_conv1d_no_bias(self, tester_factory: Callable) -> None: + # Test without bias + self._test_op(Model(bias=False), (torch.randn(2, 3, 10),), tester_factory) + + def test_conv1d_padding_modes(self, tester_factory: Callable) -> None: + # Test different padding modes + for mode in ["zeros", "reflect", "replicate", "circular"]: + self._test_op(Model(padding=1, padding_mode=mode), (torch.randn(2, 3, 10),), tester_factory) + + def test_conv1d_channels(self, tester_factory: Callable) -> None: + # Test with different channel configurations + self._test_op(Model(in_channels=1, out_channels=1), (torch.randn(2, 1, 10),), tester_factory) + self._test_op(Model(in_channels=5, out_channels=10), (torch.randn(2, 5, 10),), tester_factory) diff --git a/backends/test/suite/operators/test_conv2d.py b/backends/test/suite/operators/test_conv2d.py new file mode 100644 index 00000000000..40b3b9dc24b --- /dev/null +++ b/backends/test/suite/operators/test_conv2d.py @@ -0,0 +1,96 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, Union, Tuple + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class Model(torch.nn.Module): + def __init__( + self, + in_channels=3, + out_channels=6, + kernel_size=3, + stride=1, + padding=0, + dilation=1, + groups=1, + bias=True, + padding_mode="zeros", + ): + super().__init__() + self.conv = torch.nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias=bias, + padding_mode=padding_mode, + ) + + def forward(self, x): + return self.conv(x) + +@operator_test +class TestConv2d(OperatorTest): + @dtype_test + def test_conv2d_dtype(self, dtype, tester_factory: Callable) -> None: + # Input shape: (batch_size, in_channels, height, width) + self._test_op(Model().to(dtype), ((torch.rand(2, 3, 8, 8) * 10).to(dtype),), tester_factory) + + def test_conv2d_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + self._test_op(Model(), (torch.randn(2, 3, 8, 8),), tester_factory) + + def test_conv2d_kernel_size(self, tester_factory: Callable) -> None: + # Test with different kernel sizes + self._test_op(Model(kernel_size=1), (torch.randn(2, 3, 8, 8),), tester_factory) + self._test_op(Model(kernel_size=5), (torch.randn(2, 3, 8, 8),), tester_factory) + self._test_op(Model(kernel_size=(3, 5)), (torch.randn(2, 3, 8, 8),), tester_factory) + + def test_conv2d_stride(self, tester_factory: Callable) -> None: + # Test with different stride values + self._test_op(Model(stride=2), (torch.randn(2, 3, 8, 8),), tester_factory) + self._test_op(Model(stride=(2, 1)), (torch.randn(2, 3, 8, 8),), tester_factory) + + def test_conv2d_padding(self, tester_factory: Callable) -> None: + # Test with different padding values + self._test_op(Model(padding=1), (torch.randn(2, 3, 8, 8),), tester_factory) + self._test_op(Model(padding=(1, 2)), (torch.randn(2, 3, 8, 8),), tester_factory) + + def test_conv2d_dilation(self, tester_factory: Callable) -> None: + # Test with different dilation values + self._test_op(Model(dilation=2), (torch.randn(2, 3, 8, 8),), tester_factory) + self._test_op(Model(dilation=(2, 1)), (torch.randn(2, 3, 8, 8),), tester_factory) + + def test_conv2d_groups(self, tester_factory: Callable) -> None: + # Test with groups=3 (in_channels must be divisible by groups) + self._test_op(Model(in_channels=6, out_channels=6, groups=3), (torch.randn(2, 6, 8, 8),), tester_factory) + + def test_conv2d_no_bias(self, tester_factory: Callable) -> None: + # Test without bias + self._test_op(Model(bias=False), (torch.randn(2, 3, 8, 8),), tester_factory) + + def test_conv2d_padding_modes(self, tester_factory: Callable) -> None: + # Test different padding modes + for mode in ["zeros", "reflect", "replicate", "circular"]: + self._test_op(Model(padding=1, padding_mode=mode), (torch.randn(2, 3, 8, 8),), tester_factory) + + def test_conv2d_channels(self, tester_factory: Callable) -> None: + # Test with different channel configurations + self._test_op(Model(in_channels=1, out_channels=1), (torch.randn(2, 1, 8, 8),), tester_factory) + self._test_op(Model(in_channels=5, out_channels=10), (torch.randn(2, 5, 8, 8),), tester_factory) + + def test_conv2d_different_spatial_dims(self, tester_factory: Callable) -> None: + # Test with different height and width + self._test_op(Model(), (torch.randn(2, 3, 10, 8),), tester_factory) diff --git a/backends/test/suite/operators/test_conv3d.py b/backends/test/suite/operators/test_conv3d.py new file mode 100644 index 00000000000..baade4df10e --- /dev/null +++ b/backends/test/suite/operators/test_conv3d.py @@ -0,0 +1,95 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, Union, Tuple + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class Model(torch.nn.Module): + def __init__( + self, + in_channels=3, + out_channels=6, + kernel_size=3, + stride=1, + padding=0, + dilation=1, + groups=1, + bias=True, + padding_mode="zeros", + ): + super().__init__() + self.conv = torch.nn.Conv3d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias=bias, + padding_mode=padding_mode, + ) + + def forward(self, x): + return self.conv(x) + +@operator_test +class TestConv3d(OperatorTest): + @dtype_test + def test_conv3d_dtype(self, dtype, tester_factory: Callable) -> None: + # Input shape: (batch_size, in_channels, depth, height, width) + self._test_op(Model().to(dtype), ((torch.rand(2, 3, 4, 4, 4) * 10).to(dtype),), tester_factory) + + def test_conv3d_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + self._test_op(Model(), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + + def test_conv3d_kernel_size(self, tester_factory: Callable) -> None: + # Test with different kernel sizes + self._test_op(Model(kernel_size=1), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + self._test_op(Model(kernel_size=(1, 3, 3)), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + + def test_conv3d_stride(self, tester_factory: Callable) -> None: + # Test with different stride values + self._test_op(Model(stride=2), (torch.randn(2, 3, 6, 6, 6),), tester_factory) + self._test_op(Model(stride=(1, 2, 2)), (torch.randn(2, 3, 4, 6, 6),), tester_factory) + + def test_conv3d_padding(self, tester_factory: Callable) -> None: + # Test with different padding values + self._test_op(Model(padding=1), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + self._test_op(Model(padding=(0, 1, 1)), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + + def test_conv3d_dilation(self, tester_factory: Callable) -> None: + # Test with different dilation values + self._test_op(Model(dilation=2), (torch.randn(2, 3, 6, 6, 6),), tester_factory) + self._test_op(Model(dilation=(1, 2, 2)), (torch.randn(2, 3, 4, 6, 6),), tester_factory) + + def test_conv3d_groups(self, tester_factory: Callable) -> None: + # Test with groups=3 (in_channels must be divisible by groups) + self._test_op(Model(in_channels=6, out_channels=6, groups=3), (torch.randn(2, 6, 4, 4, 4),), tester_factory) + + def test_conv3d_no_bias(self, tester_factory: Callable) -> None: + # Test without bias + self._test_op(Model(bias=False), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + + def test_conv3d_padding_modes(self, tester_factory: Callable) -> None: + # Test different padding modes + for mode in ["zeros", "reflect", "replicate", "circular"]: + self._test_op(Model(padding=1, padding_mode=mode), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + + def test_conv3d_channels(self, tester_factory: Callable) -> None: + # Test with different channel configurations + self._test_op(Model(in_channels=1, out_channels=1), (torch.randn(2, 1, 4, 4, 4),), tester_factory) + self._test_op(Model(in_channels=5, out_channels=10), (torch.randn(2, 5, 4, 4, 4),), tester_factory) + + def test_conv3d_different_spatial_dims(self, tester_factory: Callable) -> None: + # Test with different depth, height, and width + self._test_op(Model(), (torch.randn(2, 3, 3, 4, 5),), tester_factory) diff --git a/backends/test/suite/operators/test_convtranspose1d.py b/backends/test/suite/operators/test_convtranspose1d.py new file mode 100644 index 00000000000..d93e542de4a --- /dev/null +++ b/backends/test/suite/operators/test_convtranspose1d.py @@ -0,0 +1,89 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, Union, Tuple + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class Model(torch.nn.Module): + def __init__( + self, + in_channels=3, + out_channels=6, + kernel_size=3, + stride=1, + padding=0, + output_padding=0, + dilation=1, + groups=1, + bias=True, + ): + super().__init__() + self.conv_transpose = torch.nn.ConvTranspose1d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + output_padding=output_padding, + dilation=dilation, + groups=groups, + bias=bias, + ) + + def forward(self, x): + return self.conv_transpose(x) + +@operator_test +class TestConvTranspose1d(OperatorTest): + @dtype_test + def test_convtranspose1d_dtype(self, dtype, tester_factory: Callable) -> None: + # Input shape: (batch_size, in_channels, length) + self._test_op(Model().to(dtype), ((torch.rand(2, 3, 10) * 10).to(dtype),), tester_factory) + + def test_convtranspose1d_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + self._test_op(Model(), (torch.randn(2, 3, 10),), tester_factory) + + def test_convtranspose1d_kernel_size(self, tester_factory: Callable) -> None: + # Test with different kernel sizes + self._test_op(Model(kernel_size=1), (torch.randn(2, 3, 10),), tester_factory) + self._test_op(Model(kernel_size=5), (torch.randn(2, 3, 10),), tester_factory) + + def test_convtranspose1d_stride(self, tester_factory: Callable) -> None: + # Test with different stride values + self._test_op(Model(stride=2), (torch.randn(2, 3, 10),), tester_factory) + + def test_convtranspose1d_padding(self, tester_factory: Callable) -> None: + # Test with different padding values + self._test_op(Model(padding=1), (torch.randn(2, 3, 10),), tester_factory) + self._test_op(Model(padding=2), (torch.randn(2, 3, 10),), tester_factory) + + def test_convtranspose1d_output_padding(self, tester_factory: Callable) -> None: + # Test with different output_padding values (requires stride > 1) + self._test_op(Model(stride=2, output_padding=1), (torch.randn(2, 3, 10),), tester_factory) + + def test_convtranspose1d_dilation(self, tester_factory: Callable) -> None: + # Test with different dilation values + self._test_op(Model(dilation=2), (torch.randn(2, 3, 10),), tester_factory) + + def test_convtranspose1d_groups(self, tester_factory: Callable) -> None: + # Test with groups=3 (in_channels and out_channels must be divisible by groups) + self._test_op(Model(in_channels=6, out_channels=6, groups=3), (torch.randn(2, 6, 10),), tester_factory) + + def test_convtranspose1d_no_bias(self, tester_factory: Callable) -> None: + # Test without bias + self._test_op(Model(bias=False), (torch.randn(2, 3, 10),), tester_factory) + + def test_convtranspose1d_channels(self, tester_factory: Callable) -> None: + # Test with different channel configurations + self._test_op(Model(in_channels=1, out_channels=1), (torch.randn(2, 1, 10),), tester_factory) + self._test_op(Model(in_channels=5, out_channels=10), (torch.randn(2, 5, 10),), tester_factory) + \ No newline at end of file diff --git a/backends/test/suite/operators/test_convtranspose2d.py b/backends/test/suite/operators/test_convtranspose2d.py new file mode 100644 index 00000000000..b5a4dfb784c --- /dev/null +++ b/backends/test/suite/operators/test_convtranspose2d.py @@ -0,0 +1,96 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, Union, Tuple + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class Model(torch.nn.Module): + def __init__( + self, + in_channels=3, + out_channels=6, + kernel_size=3, + stride=1, + padding=0, + output_padding=0, + dilation=1, + groups=1, + bias=True, + ): + super().__init__() + self.conv_transpose = torch.nn.ConvTranspose2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + output_padding=output_padding, + dilation=dilation, + groups=groups, + bias=bias, + ) + + def forward(self, x): + return self.conv_transpose(x) + +@operator_test +class TestConvTranspose2d(OperatorTest): + @dtype_test + def test_convtranspose2d_dtype(self, dtype, tester_factory: Callable) -> None: + # Input shape: (batch_size, in_channels, height, width) + self._test_op(Model().to(dtype), ((torch.rand(2, 3, 8, 8) * 10).to(dtype),), tester_factory) + + def test_convtranspose2d_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + self._test_op(Model(), (torch.randn(2, 3, 8, 8),), tester_factory) + + def test_convtranspose2d_kernel_size(self, tester_factory: Callable) -> None: + # Test with different kernel sizes + self._test_op(Model(kernel_size=1), (torch.randn(2, 3, 8, 8),), tester_factory) + self._test_op(Model(kernel_size=5), (torch.randn(2, 3, 8, 8),), tester_factory) + self._test_op(Model(kernel_size=(3, 5)), (torch.randn(2, 3, 8, 8),), tester_factory) + + def test_convtranspose2d_stride(self, tester_factory: Callable) -> None: + # Test with different stride values + self._test_op(Model(stride=2), (torch.randn(2, 3, 8, 8),), tester_factory) + self._test_op(Model(stride=(2, 1)), (torch.randn(2, 3, 8, 8),), tester_factory) + + def test_convtranspose2d_padding(self, tester_factory: Callable) -> None: + # Test with different padding values + self._test_op(Model(padding=1), (torch.randn(2, 3, 8, 8),), tester_factory) + self._test_op(Model(padding=(1, 2)), (torch.randn(2, 3, 8, 8),), tester_factory) + + def test_convtranspose2d_output_padding(self, tester_factory: Callable) -> None: + # Test with different output_padding values (requires stride > 1) + self._test_op(Model(stride=2, output_padding=1), (torch.randn(2, 3, 8, 8),), tester_factory) + self._test_op(Model(stride=(2, 2), output_padding=(1, 0)), (torch.randn(2, 3, 8, 8),), tester_factory) + + def test_convtranspose2d_dilation(self, tester_factory: Callable) -> None: + # Test with different dilation values + self._test_op(Model(dilation=2), (torch.randn(2, 3, 8, 8),), tester_factory) + self._test_op(Model(dilation=(2, 1)), (torch.randn(2, 3, 8, 8),), tester_factory) + + def test_convtranspose2d_groups(self, tester_factory: Callable) -> None: + # Test with groups=3 (in_channels and out_channels must be divisible by groups) + self._test_op(Model(in_channels=6, out_channels=6, groups=3), (torch.randn(2, 6, 8, 8),), tester_factory) + + def test_convtranspose2d_no_bias(self, tester_factory: Callable) -> None: + # Test without bias + self._test_op(Model(bias=False), (torch.randn(2, 3, 8, 8),), tester_factory) + + def test_convtranspose2d_channels(self, tester_factory: Callable) -> None: + # Test with different channel configurations + self._test_op(Model(in_channels=1, out_channels=1), (torch.randn(2, 1, 8, 8),), tester_factory) + self._test_op(Model(in_channels=5, out_channels=10), (torch.randn(2, 5, 8, 8),), tester_factory) + + def test_convtranspose2d_different_spatial_dims(self, tester_factory: Callable) -> None: + # Test with different height and width + self._test_op(Model(), (torch.randn(2, 3, 10, 8),), tester_factory) diff --git a/backends/test/suite/operators/test_convtranspose3d.py b/backends/test/suite/operators/test_convtranspose3d.py new file mode 100644 index 00000000000..00612725016 --- /dev/null +++ b/backends/test/suite/operators/test_convtranspose3d.py @@ -0,0 +1,95 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, Union, Tuple + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class Model(torch.nn.Module): + def __init__( + self, + in_channels=3, + out_channels=6, + kernel_size=3, + stride=1, + padding=0, + output_padding=0, + dilation=1, + groups=1, + bias=True, + ): + super().__init__() + self.conv_transpose = torch.nn.ConvTranspose3d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + output_padding=output_padding, + dilation=dilation, + groups=groups, + bias=bias, + ) + + def forward(self, x): + return self.conv_transpose(x) + +@operator_test +class TestConvTranspose3d(OperatorTest): + @dtype_test + def test_convtranspose3d_dtype(self, dtype, tester_factory: Callable) -> None: + # Input shape: (batch_size, in_channels, depth, height, width) + self._test_op(Model().to(dtype), ((torch.rand(2, 3, 4, 4, 4) * 10).to(dtype),), tester_factory) + + def test_convtranspose3d_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + self._test_op(Model(), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + + def test_convtranspose3d_kernel_size(self, tester_factory: Callable) -> None: + # Test with different kernel sizes + self._test_op(Model(kernel_size=1), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + self._test_op(Model(kernel_size=(1, 3, 3)), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + + def test_convtranspose3d_stride(self, tester_factory: Callable) -> None: + # Test with different stride values + self._test_op(Model(stride=2), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + self._test_op(Model(stride=(1, 2, 2)), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + + def test_convtranspose3d_padding(self, tester_factory: Callable) -> None: + # Test with different padding values + self._test_op(Model(padding=1), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + self._test_op(Model(padding=(0, 1, 1)), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + + def test_convtranspose3d_output_padding(self, tester_factory: Callable) -> None: + # Test with different output_padding values (requires stride > 1) + self._test_op(Model(stride=2, output_padding=1), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + self._test_op(Model(stride=(2, 2, 2), output_padding=(1, 0, 1)), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + + def test_convtranspose3d_dilation(self, tester_factory: Callable) -> None: + # Test with different dilation values + self._test_op(Model(dilation=2), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + self._test_op(Model(dilation=(1, 2, 2)), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + + def test_convtranspose3d_groups(self, tester_factory: Callable) -> None: + # Test with groups=3 (in_channels and out_channels must be divisible by groups) + self._test_op(Model(in_channels=6, out_channels=6, groups=3), (torch.randn(2, 6, 4, 4, 4),), tester_factory) + + def test_convtranspose3d_no_bias(self, tester_factory: Callable) -> None: + # Test without bias + self._test_op(Model(bias=False), (torch.randn(2, 3, 4, 4, 4),), tester_factory) + + def test_convtranspose3d_channels(self, tester_factory: Callable) -> None: + # Test with different channel configurations + self._test_op(Model(in_channels=1, out_channels=1), (torch.randn(2, 1, 4, 4, 4),), tester_factory) + self._test_op(Model(in_channels=5, out_channels=10), (torch.randn(2, 5, 4, 4, 4),), tester_factory) + + def test_convtranspose3d_different_spatial_dims(self, tester_factory: Callable) -> None: + # Test with different depth, height, and width + self._test_op(Model(), (torch.randn(2, 3, 3, 4, 5),), tester_factory) From 2de680e2f76771845ae9073e67579e71c4a3eff7 Mon Sep 17 00:00:00 2001 From: Gregory Comer Date: Thu, 24 Jul 2025 21:06:08 -0700 Subject: [PATCH 2/9] Update [ghstack-poisoned] --- backends/test/suite/operators/test_linear.py | 77 ++++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 backends/test/suite/operators/test_linear.py diff --git a/backends/test/suite/operators/test_linear.py b/backends/test/suite/operators/test_linear.py new file mode 100644 index 00000000000..ca0f428884a --- /dev/null +++ b/backends/test/suite/operators/test_linear.py @@ -0,0 +1,77 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class Model(torch.nn.Module): + def __init__( + self, + in_features=10, + out_features=5, + bias=True, + ): + super().__init__() + self.linear = torch.nn.Linear( + in_features=in_features, + out_features=out_features, + bias=bias, + ) + + def forward(self, x): + return self.linear(x) + +@operator_test +class TestLinear(OperatorTest): + @dtype_test + def test_linear_dtype(self, dtype, tester_factory: Callable) -> None: + # Input shape: (batch_size, in_features) + model = Model().to(dtype) + self._test_op(model, ((torch.rand(2, 10) * 10).to(dtype),), tester_factory) + + @dtype_test + def test_linear_no_bias_dtype(self, dtype, tester_factory: Callable) -> None: + # Input shape: (batch_size, in_features) + model = Model(bias=False).to(dtype) + self._test_op(model, ((torch.rand(2, 10) * 10).to(dtype),), tester_factory) + + def test_linear_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + self._test_op(Model(), (torch.randn(2, 10),), tester_factory) + + def test_linear_feature_sizes(self, tester_factory: Callable) -> None: + # Test with different input and output feature sizes + self._test_op(Model(in_features=5, out_features=3), (torch.randn(2, 5),), tester_factory) + self._test_op(Model(in_features=20, out_features=10), (torch.randn(2, 20),), tester_factory) + self._test_op(Model(in_features=100, out_features=1), (torch.randn(2, 100),), tester_factory) + self._test_op(Model(in_features=1, out_features=100), (torch.randn(2, 1),), tester_factory) + + def test_linear_no_bias(self, tester_factory: Callable) -> None: + # Test without bias + self._test_op(Model(bias=False), (torch.randn(2, 10),), tester_factory) + self._test_op(Model(in_features=20, out_features=15, bias=False), (torch.randn(2, 20),), tester_factory) + + def test_linear_batch_sizes(self, tester_factory: Callable) -> None: + # Test with different batch sizes + self._test_op(Model(), (torch.randn(1, 10),), tester_factory) + self._test_op(Model(), (torch.randn(5, 10),), tester_factory) + self._test_op(Model(), (torch.randn(100, 10),), tester_factory) + + def test_linear_unbatched(self, tester_factory: Callable) -> None: + # Test with unbatched input (just features) + self._test_op(Model(), (torch.randn(10),), tester_factory) + + def test_linear_multi_dim_input(self, tester_factory: Callable) -> None: + # Test with multi-dimensional input + # For multi-dimensional inputs, the linear transformation is applied to the last dimension + self._test_op(Model(), (torch.randn(3, 4, 10),), tester_factory) + self._test_op(Model(), (torch.randn(2, 3, 4, 10),), tester_factory) + \ No newline at end of file From 8b1136665b56752551bf3b83c98ae0988f257d70 Mon Sep 17 00:00:00 2001 From: Gregory Comer Date: Thu, 24 Jul 2025 21:06:13 -0700 Subject: [PATCH 3/9] Update [ghstack-poisoned] --- .../test/suite/operators/test_embedding.py | 68 ++++++++++++ .../suite/operators/test_embedding_bag.py | 100 ++++++++++++++++++ 2 files changed, 168 insertions(+) create mode 100644 backends/test/suite/operators/test_embedding.py create mode 100644 backends/test/suite/operators/test_embedding_bag.py diff --git a/backends/test/suite/operators/test_embedding.py b/backends/test/suite/operators/test_embedding.py new file mode 100644 index 00000000000..102881981db --- /dev/null +++ b/backends/test/suite/operators/test_embedding.py @@ -0,0 +1,68 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, Optional + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class Model(torch.nn.Module): + def __init__( + self, + num_embeddings=10, + embedding_dim=5, + padding_idx: Optional[int] = None, + norm_type: float = 2.0, + ): + super().__init__() + self.embedding = torch.nn.Embedding( + num_embeddings=num_embeddings, + embedding_dim=embedding_dim, + padding_idx=padding_idx, + norm_type=norm_type, + ) + + def forward(self, x): + return self.embedding(x) + +@operator_test +class TestEmbedding(OperatorTest): + @dtype_test + def test_embedding_dtype(self, dtype, tester_factory: Callable) -> None: + # Input shape: (batch_size, seq_length) + # Note: Input indices should be of type Long (int64) + model = Model().to(dtype) + self._test_op(model, (torch.randint(0, 10, (2, 8), dtype=torch.long),), tester_factory, use_random_test_inputs=False) + + def test_embedding_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + self._test_op(Model(), (torch.randint(0, 10, (2, 8), dtype=torch.long),), tester_factory, use_random_test_inputs=False) + + def test_embedding_sizes(self, tester_factory: Callable) -> None: + # Test with different dictionary sizes and embedding dimensions + self._test_op(Model(num_embeddings=5, embedding_dim=3), + (torch.randint(0, 5, (2, 8), dtype=torch.long),), tester_factory, use_random_test_inputs=False) + self._test_op(Model(num_embeddings=100, embedding_dim=10), + (torch.randint(0, 100, (2, 8), dtype=torch.long),), tester_factory, use_random_test_inputs=False) + self._test_op(Model(num_embeddings=1000, embedding_dim=50), + (torch.randint(0, 1000, (2, 4), dtype=torch.long),), tester_factory, use_random_test_inputs=False) + + def test_embedding_padding_idx(self, tester_factory: Callable) -> None: + # Test with padding_idx + self._test_op(Model(padding_idx=0), + (torch.randint(0, 10, (2, 8), dtype=torch.long),), tester_factory, use_random_test_inputs=False) + self._test_op(Model(padding_idx=5), + (torch.randint(0, 10, (2, 8), dtype=torch.long),), tester_factory, use_random_test_inputs=False) + + def test_embedding_input_shapes(self, tester_factory: Callable) -> None: + # Test with different input shapes + self._test_op(Model(), (torch.randint(0, 10, (5,), dtype=torch.long),), tester_factory, use_random_test_inputs=False) # 1D input + self._test_op(Model(), (torch.randint(0, 10, (2, 8), dtype=torch.long),), tester_factory, use_random_test_inputs=False) # 2D input + self._test_op(Model(), (torch.randint(0, 10, (2, 3, 4), dtype=torch.long),), tester_factory, use_random_test_inputs=False) # 3D input + \ No newline at end of file diff --git a/backends/test/suite/operators/test_embedding_bag.py b/backends/test/suite/operators/test_embedding_bag.py new file mode 100644 index 00000000000..67ba8a2849f --- /dev/null +++ b/backends/test/suite/operators/test_embedding_bag.py @@ -0,0 +1,100 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, Optional + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class Model(torch.nn.Module): + def __init__( + self, + num_embeddings=10, + embedding_dim=5, + mode='mean', + padding_idx: Optional[int] = None, + norm_type: float = 2.0, + include_last_offset: bool = False, + ): + super().__init__() + self.embedding_bag = torch.nn.EmbeddingBag( + num_embeddings=num_embeddings, + embedding_dim=embedding_dim, + mode=mode, + padding_idx=padding_idx, + norm_type=norm_type, + include_last_offset=include_last_offset, + ) + + def forward(self, x, offsets=None): + return self.embedding_bag(x, offsets) + +@operator_test +class TestEmbeddingBag(OperatorTest): + @dtype_test + def test_embedding_bag_dtype(self, dtype, tester_factory: Callable) -> None: + # Input: indices and offsets + # Note: Input indices should be of type Long (int64) + model = Model().to(dtype) + indices = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9], dtype=torch.long) + offsets = torch.tensor([0, 4], dtype=torch.long) # 2 bags + self._test_op(model, (indices, offsets), tester_factory, use_random_test_inputs=False) + + def test_embedding_bag_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + indices = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9], dtype=torch.long) + offsets = torch.tensor([0, 4], dtype=torch.long) # 2 bags + self._test_op(Model(), (indices, offsets), tester_factory, use_random_test_inputs=False) + + def test_embedding_bag_sizes(self, tester_factory: Callable) -> None: + # Test with different dictionary sizes and embedding dimensions + indices = torch.tensor([1, 2, 3, 1], dtype=torch.long) + offsets = torch.tensor([0, 2], dtype=torch.long) + + self._test_op(Model(num_embeddings=5, embedding_dim=3), + (indices, offsets), tester_factory, use_random_test_inputs=False) + + indices = torch.tensor([5, 20, 10, 43, 7], dtype=torch.long) + offsets = torch.tensor([0, 2, 4], dtype=torch.long) + self._test_op(Model(num_embeddings=50, embedding_dim=10), + (indices, offsets), tester_factory, use_random_test_inputs=False) + + indices = torch.tensor([100, 200, 300, 400], dtype=torch.long) + offsets = torch.tensor([0, 2], dtype=torch.long) + self._test_op(Model(num_embeddings=500, embedding_dim=20), + (indices, offsets), tester_factory, use_random_test_inputs=False) + + def test_embedding_bag_modes(self, tester_factory: Callable) -> None: + # Test with different modes (sum, mean, max) + indices = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9], dtype=torch.long) + offsets = torch.tensor([0, 4], dtype=torch.long) + + self._test_op(Model(mode='sum'), (indices, offsets), tester_factory, use_random_test_inputs=False) + self._test_op(Model(mode='mean'), (indices, offsets), tester_factory, use_random_test_inputs=False) + self._test_op(Model(mode='max'), (indices, offsets), tester_factory, use_random_test_inputs=False) + + def test_embedding_bag_padding_idx(self, tester_factory: Callable) -> None: + # Test with padding_idx + indices = torch.tensor([0, 1, 2, 0, 3, 0, 4], dtype=torch.long) + offsets = torch.tensor([0, 3, 6], dtype=torch.long) + + self._test_op(Model(padding_idx=0), (indices, offsets), tester_factory, use_random_test_inputs=False) + + indices = torch.tensor([1, 5, 2, 5, 3, 5, 4], dtype=torch.long) + offsets = torch.tensor([0, 3, 6], dtype=torch.long) + + self._test_op(Model(padding_idx=5), (indices, offsets), tester_factory, use_random_test_inputs=False) + + def test_embedding_bag_include_last_offset(self, tester_factory: Callable) -> None: + # Test with include_last_offset + indices = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9], dtype=torch.long) + offsets = torch.tensor([0, 4], dtype=torch.long) + + self._test_op(Model(include_last_offset=True), (indices, offsets), tester_factory, use_random_test_inputs=False) + \ No newline at end of file From ca1b8874e519f620dbda9abfa35925c363473916 Mon Sep 17 00:00:00 2001 From: Gregory Comer Date: Thu, 24 Jul 2025 21:06:17 -0700 Subject: [PATCH 4/9] Update [ghstack-poisoned] --- .../test/suite/operators/test_masked_fill.py | 141 ++++++++++++++++++ backends/test/suite/operators/test_permute.py | 73 +++++++++ .../test/suite/operators/test_transpose.py | 100 +++++++++++++ 3 files changed, 314 insertions(+) create mode 100644 backends/test/suite/operators/test_masked_fill.py create mode 100644 backends/test/suite/operators/test_permute.py create mode 100644 backends/test/suite/operators/test_transpose.py diff --git a/backends/test/suite/operators/test_masked_fill.py b/backends/test/suite/operators/test_masked_fill.py new file mode 100644 index 00000000000..44e9a16c218 --- /dev/null +++ b/backends/test/suite/operators/test_masked_fill.py @@ -0,0 +1,141 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, List, Union + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class MaskedFillModel(torch.nn.Module): + def __init__(self, value: Union[float, int]): + super().__init__() + self.value = value + + def forward(self, x, mask): + return x.masked_fill(mask, self.value) + +@operator_test +class TestMaskedFill(OperatorTest): + @dtype_test + def test_masked_fill_dtype(self, dtype, tester_factory: Callable) -> None: + # Test with different dtypes + model = MaskedFillModel(value=0.0) + self._test_op( + model, + ( + torch.rand(3, 4).to(dtype), + torch.tensor([[True, False, True, False], [False, True, False, True], [True, True, False, False]]), + ), + tester_factory + ) + + def test_masked_fill_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + # Fill with 0.0 where mask is True + self._test_op( + MaskedFillModel(value=0.0), + ( + torch.randn(3, 4), + torch.tensor([[True, False, True, False], [False, True, False, True], [True, True, False, False]]), + ), + tester_factory + ) + + def test_masked_fill_different_values(self, tester_factory: Callable) -> None: + # Test with different fill values + + # Fill with a positive value + self._test_op( + MaskedFillModel(value=5.0), + ( + torch.randn(3, 4), + torch.tensor([[True, False, True, False], [False, True, False, True], [True, True, False, False]]), + ), + tester_factory + ) + + # Fill with a negative value + self._test_op( + MaskedFillModel(value=-5.0), + ( + torch.randn(3, 4), + torch.tensor([[True, False, True, False], [False, True, False, True], [True, True, False, False]]), + ), + tester_factory + ) + + # Fill with an integer value + self._test_op( + MaskedFillModel(value=1), + ( + torch.randn(3, 4), + torch.tensor([[True, False, True, False], [False, True, False, True], [True, True, False, False]]), + ), + tester_factory + ) + + def test_masked_fill_different_shapes(self, tester_factory: Callable) -> None: + # Test with tensors of different shapes + + # 1D tensor + self._test_op( + MaskedFillModel(value=0.0), + ( + torch.randn(5), + torch.tensor([True, False, True, False, True]), + ), + tester_factory + ) + + # 3D tensor + self._test_op( + MaskedFillModel(value=0.0), + ( + torch.randn(2, 3, 4), + torch.tensor([ + [[True, False, True, False], [False, True, False, True], [True, True, False, False]], + [[False, False, True, True], [True, False, True, False], [False, True, False, True]] + ]), + ), + tester_factory + ) + + def test_masked_fill_all_true(self, tester_factory: Callable) -> None: + # Test with all mask values set to True + self._test_op( + MaskedFillModel(value=0.0), + ( + torch.randn(3, 4), + torch.ones(3, 4, dtype=torch.bool), + ), + tester_factory + ) + + def test_masked_fill_all_false(self, tester_factory: Callable) -> None: + # Test with all mask values set to False + self._test_op( + MaskedFillModel(value=0.0), + ( + torch.randn(3, 4), + torch.zeros(3, 4, dtype=torch.bool), + ), + tester_factory + ) + + def test_masked_fill_broadcast(self, tester_factory: Callable) -> None: + # Test with broadcasting mask + # A 1D mask can be broadcast to a 2D tensor + self._test_op( + MaskedFillModel(value=0.0), + ( + torch.randn(3, 4), + torch.tensor([True, False, True, False]), + ), + tester_factory + ) diff --git a/backends/test/suite/operators/test_permute.py b/backends/test/suite/operators/test_permute.py new file mode 100644 index 00000000000..dc0106a0797 --- /dev/null +++ b/backends/test/suite/operators/test_permute.py @@ -0,0 +1,73 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, List + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class PermuteModel(torch.nn.Module): + def __init__(self, dims: List[int]): + super().__init__() + self.dims = dims + + def forward(self, x): + return x.permute(self.dims) + +@operator_test +class TestPermute(OperatorTest): + @dtype_test + def test_permute_dtype(self, dtype, tester_factory: Callable) -> None: + # Test with different dtypes + model = PermuteModel(dims=[1, 0]) + self._test_op(model, (torch.rand(3, 4).to(dtype),), tester_factory) + + def test_permute_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + # Permute a 2D tensor from [3, 4] to [4, 3] + self._test_op(PermuteModel(dims=[1, 0]), (torch.randn(3, 4),), tester_factory) + + def test_permute_3d(self, tester_factory: Callable) -> None: + # Test permuting a 3D tensor + + # Permute from [2, 3, 4] to [4, 2, 3] + self._test_op(PermuteModel(dims=[2, 0, 1]), (torch.randn(2, 3, 4),), tester_factory) + + # Permute from [2, 3, 4] to [3, 4, 2] + self._test_op(PermuteModel(dims=[1, 2, 0]), (torch.randn(2, 3, 4),), tester_factory) + + # Permute from [2, 3, 4] to [2, 4, 3] + self._test_op(PermuteModel(dims=[0, 2, 1]), (torch.randn(2, 3, 4),), tester_factory) + + def test_permute_4d(self, tester_factory: Callable) -> None: + # Test permuting a 4D tensor + + # Permute from [2, 3, 4, 5] to [5, 4, 3, 2] + self._test_op(PermuteModel(dims=[3, 2, 1, 0]), (torch.randn(2, 3, 4, 5),), tester_factory) + + # Permute from [2, 3, 4, 5] to [2, 4, 3, 5] + self._test_op(PermuteModel(dims=[0, 2, 1, 3]), (torch.randn(2, 3, 4, 5),), tester_factory) + + def test_permute_identity(self, tester_factory: Callable) -> None: + # Test identity permutation (no change) + + # 2D tensor + self._test_op(PermuteModel(dims=[0, 1]), (torch.randn(3, 4),), tester_factory) + + # 3D tensor + self._test_op(PermuteModel(dims=[0, 1, 2]), (torch.randn(2, 3, 4),), tester_factory) + + def test_permute_different_shapes(self, tester_factory: Callable) -> None: + # Test with tensors of different shapes + + # 1D tensor (no permutation possible) + self._test_op(PermuteModel(dims=[0]), (torch.randn(5),), tester_factory) + + # 5D tensor + self._test_op(PermuteModel(dims=[4, 3, 2, 1, 0]), (torch.randn(2, 3, 4, 5, 6),), tester_factory) diff --git a/backends/test/suite/operators/test_transpose.py b/backends/test/suite/operators/test_transpose.py new file mode 100644 index 00000000000..c0a02b517f0 --- /dev/null +++ b/backends/test/suite/operators/test_transpose.py @@ -0,0 +1,100 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, List + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class TransposeModel(torch.nn.Module): + def __init__(self, dim0: int, dim1: int): + super().__init__() + self.dim0 = dim0 + self.dim1 = dim1 + + def forward(self, x): + return torch.transpose(x, self.dim0, self.dim1) + +@operator_test +class TestTranspose(OperatorTest): + @dtype_test + def test_transpose_dtype(self, dtype, tester_factory: Callable) -> None: + # Test with different dtypes + model = TransposeModel(dim0=0, dim1=1) + self._test_op(model, (torch.rand(3, 4).to(dtype),), tester_factory) + + def test_transpose_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + # Transpose a 2D tensor from [3, 4] to [4, 3] + self._test_op(TransposeModel(dim0=0, dim1=1), (torch.randn(3, 4),), tester_factory) + + def test_transpose_3d(self, tester_factory: Callable) -> None: + # Test transposing a 3D tensor + + # Transpose dimensions 0 and 1 + # From [2, 3, 4] to [3, 2, 4] + self._test_op(TransposeModel(dim0=0, dim1=1), (torch.randn(2, 3, 4),), tester_factory) + + # Transpose dimensions 0 and 2 + # From [2, 3, 4] to [4, 3, 2] + self._test_op(TransposeModel(dim0=0, dim1=2), (torch.randn(2, 3, 4),), tester_factory) + + # Transpose dimensions 1 and 2 + # From [2, 3, 4] to [2, 4, 3] + self._test_op(TransposeModel(dim0=1, dim1=2), (torch.randn(2, 3, 4),), tester_factory) + + def test_transpose_4d(self, tester_factory: Callable) -> None: + # Test transposing a 4D tensor + + # Transpose dimensions 0 and 3 + # From [2, 3, 4, 5] to [5, 3, 4, 2] + self._test_op(TransposeModel(dim0=0, dim1=3), (torch.randn(2, 3, 4, 5),), tester_factory) + + # Transpose dimensions 1 and 2 + # From [2, 3, 4, 5] to [2, 4, 3, 5] + self._test_op(TransposeModel(dim0=1, dim1=2), (torch.randn(2, 3, 4, 5),), tester_factory) + + def test_transpose_identity(self, tester_factory: Callable) -> None: + # Test identity transpose (same dimension, no change) + + # 2D tensor + self._test_op(TransposeModel(dim0=0, dim1=0), (torch.randn(3, 4),), tester_factory) + self._test_op(TransposeModel(dim0=1, dim1=1), (torch.randn(3, 4),), tester_factory) + + # 3D tensor + self._test_op(TransposeModel(dim0=0, dim1=0), (torch.randn(2, 3, 4),), tester_factory) + self._test_op(TransposeModel(dim0=1, dim1=1), (torch.randn(2, 3, 4),), tester_factory) + self._test_op(TransposeModel(dim0=2, dim1=2), (torch.randn(2, 3, 4),), tester_factory) + + def test_transpose_negative_dims(self, tester_factory: Callable) -> None: + # Test with negative dimensions (counting from the end) + + # 3D tensor + # Transpose dimensions -3 and -1 (equivalent to 0 and 2) + # From [2, 3, 4] to [4, 3, 2] + self._test_op(TransposeModel(dim0=-3, dim1=-1), (torch.randn(2, 3, 4),), tester_factory) + + # Transpose dimensions -2 and -1 (equivalent to 1 and 2) + # From [2, 3, 4] to [2, 4, 3] + self._test_op(TransposeModel(dim0=-2, dim1=-1), (torch.randn(2, 3, 4),), tester_factory) + + def test_transpose_different_shapes(self, tester_factory: Callable) -> None: + # Test with tensors of different shapes + + # 2D tensor + self._test_op(TransposeModel(dim0=0, dim1=1), (torch.randn(3, 4),), tester_factory) + + # 3D tensor + self._test_op(TransposeModel(dim0=0, dim1=2), (torch.randn(2, 3, 4),), tester_factory) + + # 4D tensor + self._test_op(TransposeModel(dim0=1, dim1=3), (torch.randn(2, 3, 4, 5),), tester_factory) + + # 5D tensor + self._test_op(TransposeModel(dim0=0, dim1=4), (torch.randn(2, 3, 4, 5, 6),), tester_factory) From aacddf55dab8f8d4e66d33a089a378e087a30f86 Mon Sep 17 00:00:00 2001 From: Gregory Comer Date: Thu, 24 Jul 2025 21:06:21 -0700 Subject: [PATCH 5/9] Update [ghstack-poisoned] --- backends/test/suite/operators/test_cat.py | 183 ++++++++++++++++++ backends/test/suite/operators/test_expand.py | 76 ++++++++ backends/test/suite/operators/test_reshape.py | 62 ++++++ backends/test/suite/operators/test_select.py | 86 ++++++++ backends/test/suite/operators/test_slice.py | 34 ++++ backends/test/suite/operators/test_split.py | 96 +++++++++ backends/test/suite/operators/test_squeeze.py | 69 +++++++ backends/test/suite/operators/test_stack.py | 146 ++++++++++++++ .../test/suite/operators/test_unsqueeze.py | 71 +++++++ backends/test/suite/operators/test_view.py | 62 ++++++ 10 files changed, 885 insertions(+) create mode 100644 backends/test/suite/operators/test_cat.py create mode 100644 backends/test/suite/operators/test_expand.py create mode 100644 backends/test/suite/operators/test_reshape.py create mode 100644 backends/test/suite/operators/test_select.py create mode 100644 backends/test/suite/operators/test_slice.py create mode 100644 backends/test/suite/operators/test_split.py create mode 100644 backends/test/suite/operators/test_squeeze.py create mode 100644 backends/test/suite/operators/test_stack.py create mode 100644 backends/test/suite/operators/test_unsqueeze.py create mode 100644 backends/test/suite/operators/test_view.py diff --git a/backends/test/suite/operators/test_cat.py b/backends/test/suite/operators/test_cat.py new file mode 100644 index 00000000000..13fa96f56d2 --- /dev/null +++ b/backends/test/suite/operators/test_cat.py @@ -0,0 +1,183 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, List + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class CatModel(torch.nn.Module): + def __init__(self, dim: int = 0): + super().__init__() + self.dim = dim + + def forward(self, x1, x2, x3): + return torch.cat([x1, x2, x3], dim=self.dim) + +@operator_test +class TestCat(OperatorTest): + @dtype_test + def test_cat_dtype(self, dtype, tester_factory: Callable) -> None: + # Test with different dtypes + model = CatModel() + self._test_op( + model, + ( + torch.rand(2, 3).to(dtype), + torch.rand(3, 3).to(dtype), + torch.rand(4, 3).to(dtype), + ), + tester_factory + ) + + def test_cat_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + # Concatenate 3 tensors along dimension 0 + # Tensors of shapes [2, 3], [3, 3], [4, 3] -> Result will be of shape [9, 3] + self._test_op( + CatModel(), + ( + torch.randn(2, 3), + torch.randn(3, 3), + torch.randn(4, 3), + ), + tester_factory + ) + + def test_cat_dimensions(self, tester_factory: Callable) -> None: + # Test concatenating along different dimensions + + # Concatenate along dimension 0 (default) + # Tensors of shapes [2, 3], [3, 3], [4, 3] -> Result will be of shape [9, 3] + self._test_op( + CatModel(dim=0), + ( + torch.randn(2, 3), + torch.randn(3, 3), + torch.randn(4, 3), + ), + tester_factory + ) + + # Concatenate along dimension 1 + # Tensors of shapes [3, 2], [3, 3], [3, 4] -> Result will be of shape [3, 9] + self._test_op( + CatModel(dim=1), + ( + torch.randn(3, 2), + torch.randn(3, 3), + torch.randn(3, 4), + ), + tester_factory + ) + + # Concatenate along dimension 2 + # Tensors of shapes [2, 3, 1], [2, 3, 2], [2, 3, 3] -> Result will be of shape [2, 3, 6] + self._test_op( + CatModel(dim=2), + ( + torch.randn(2, 3, 1), + torch.randn(2, 3, 2), + torch.randn(2, 3, 3), + ), + tester_factory + ) + + def test_cat_negative_dim(self, tester_factory: Callable) -> None: + # Test with negative dimensions (counting from the end) + + # Concatenate along the last dimension (dim=-1) + # For tensors of shape [3, 2], [3, 3], [3, 4], this is equivalent to dim=1 + # Result will be of shape [3, 9] + self._test_op( + CatModel(dim=-1), + ( + torch.randn(3, 2), + torch.randn(3, 3), + torch.randn(3, 4), + ), + tester_factory + ) + + # Concatenate along the second-to-last dimension (dim=-2) + # For tensors of shape [2, 3], [3, 3], [4, 3], this is equivalent to dim=0 + # Result will be of shape [9, 3] + self._test_op( + CatModel(dim=-2), + ( + torch.randn(2, 3), + torch.randn(3, 3), + torch.randn(4, 3), + ), + tester_factory + ) + + def test_cat_different_shapes(self, tester_factory: Callable) -> None: + # Test with tensors of different shapes + + # Concatenate 1D tensors + # Tensors of shapes [2], [3], [4] -> Result will be of shape [9] + self._test_op( + CatModel(), + ( + torch.randn(2), + torch.randn(3), + torch.randn(4), + ), + tester_factory + ) + + # Concatenate 3D tensors along dimension 0 + # Tensors of shapes [1, 3, 4], [2, 3, 4], [3, 3, 4] -> Result will be of shape [6, 3, 4] + self._test_op( + CatModel(dim=0), + ( + torch.randn(1, 3, 4), + torch.randn(2, 3, 4), + torch.randn(3, 3, 4), + ), + tester_factory + ) + + # Concatenate 3D tensors along dimension 1 + # Tensors of shapes [2, 1, 4], [2, 2, 4], [2, 3, 4] -> Result will be of shape [2, 6, 4] + self._test_op( + CatModel(dim=1), + ( + torch.randn(2, 1, 4), + torch.randn(2, 2, 4), + torch.randn(2, 3, 4), + ), + tester_factory + ) + + # Concatenate 3D tensors along dimension 2 + # Tensors of shapes [2, 3, 1], [2, 3, 2], [2, 3, 3] -> Result will be of shape [2, 3, 6] + self._test_op( + CatModel(dim=2), + ( + torch.randn(2, 3, 1), + torch.randn(2, 3, 2), + torch.randn(2, 3, 3), + ), + tester_factory + ) + + def test_cat_same_shapes(self, tester_factory: Callable) -> None: + # Test with tensors of the same shape + # Tensors of shapes [2, 3], [2, 3], [2, 3] -> Result will be of shape [6, 3] + self._test_op( + CatModel(), + ( + torch.randn(2, 3), + torch.randn(2, 3), + torch.randn(2, 3), + ), + tester_factory + ) diff --git a/backends/test/suite/operators/test_expand.py b/backends/test/suite/operators/test_expand.py new file mode 100644 index 00000000000..85e6a7d3427 --- /dev/null +++ b/backends/test/suite/operators/test_expand.py @@ -0,0 +1,76 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, List + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class ExpandModel(torch.nn.Module): + def __init__(self, shape: List[int]): + super().__init__() + self.shape = shape + + def forward(self, x): + return x.expand(self.shape) + +@operator_test +class TestExpand(OperatorTest): + @dtype_test + def test_expand_dtype(self, dtype, tester_factory: Callable) -> None: + # Test with different dtypes + model = ExpandModel(shape=[3, 5]) + self._test_op(model, (torch.rand(1, 5).to(dtype),), tester_factory) + + def test_expand_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + # Expand from [1, 5] to [3, 5] + self._test_op(ExpandModel(shape=[3, 5]), (torch.randn(1, 5),), tester_factory) + + def test_expand_dimensions(self, tester_factory: Callable) -> None: + # Test expanding different dimensions + + # Expand first dimension + self._test_op(ExpandModel(shape=[3, 5]), (torch.randn(1, 5),), tester_factory) + + # Expand multiple dimensions + self._test_op(ExpandModel(shape=[3, 4]), (torch.randn(1, 1),), tester_factory) + + # Expand with adding a new dimension at the beginning + self._test_op(ExpandModel(shape=[2, 1, 5]), (torch.randn(1, 5),), tester_factory) + + # Expand with adding a new dimension in the middle + self._test_op(ExpandModel(shape=[3, 2, 5]), (torch.randn(3, 1, 5),), tester_factory) + + # Expand with adding a new dimension at the end + self._test_op(ExpandModel(shape=[3, 5, 2]), (torch.randn(3, 5, 1),), tester_factory) + + def test_expand_keep_original_size(self, tester_factory: Callable) -> None: + # Test with -1 to keep the original size + + # Keep the last dimension size + self._test_op(ExpandModel(shape=[3, -1]), (torch.randn(1, 5),), tester_factory) + + # Keep the first dimension size + self._test_op(ExpandModel(shape=[-1, 5]), (torch.randn(2, 1),), tester_factory) + + # Keep multiple dimension sizes + self._test_op(ExpandModel(shape=[-1, 4, -1]), (torch.randn(2, 1, 3),), tester_factory) + + def test_expand_singleton_dimensions(self, tester_factory: Callable) -> None: + # Test expanding singleton dimensions + + # Expand a scalar to a vector + self._test_op(ExpandModel(shape=[5]), (torch.randn(1),), tester_factory) + + # Expand a scalar to a matrix + self._test_op(ExpandModel(shape=[3, 4]), (torch.randn(1, 1),), tester_factory) + + # Expand a vector to a matrix by adding a dimension + self._test_op(ExpandModel(shape=[3, 5]), (torch.randn(5),), tester_factory) diff --git a/backends/test/suite/operators/test_reshape.py b/backends/test/suite/operators/test_reshape.py new file mode 100644 index 00000000000..49f75a4803e --- /dev/null +++ b/backends/test/suite/operators/test_reshape.py @@ -0,0 +1,62 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, List + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class ReshapeModel(torch.nn.Module): + def __init__(self, shape: List[int]): + super().__init__() + self.shape = shape + + def forward(self, x): + return torch.reshape(x, self.shape) + +@operator_test +class TestReshape(OperatorTest): + @dtype_test + def test_reshape_dtype(self, dtype, tester_factory: Callable) -> None: + # Test with different dtypes + model = ReshapeModel(shape=[3, 5]) + self._test_op(model, (torch.rand(15).to(dtype),), tester_factory) + + def test_reshape_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + # Reshape from [15] to [3, 5] + self._test_op(ReshapeModel(shape=[3, 5]), (torch.randn(15),), tester_factory) + + def test_reshape_dimensions(self, tester_factory: Callable) -> None: + # Test reshaping to different dimensions + + # Reshape from 1D to 2D + self._test_op(ReshapeModel(shape=[3, 5]), (torch.randn(15),), tester_factory) + + # Reshape from 2D to 1D + self._test_op(ReshapeModel(shape=[20]), (torch.randn(4, 5),), tester_factory) + + # Reshape from 2D to 3D + self._test_op(ReshapeModel(shape=[2, 2, 5]), (torch.randn(4, 5),), tester_factory) + + # Reshape from 3D to 2D + self._test_op(ReshapeModel(shape=[6, 4]), (torch.randn(3, 2, 4),), tester_factory) + + def test_reshape_inferred_dimension(self, tester_factory: Callable) -> None: + # Test with inferred dimension (-1) + + # Infer the last dimension + self._test_op(ReshapeModel(shape=[3, -1]), (torch.randn(15),), tester_factory) + + # Infer the first dimension + self._test_op(ReshapeModel(shape=[-1, 5]), (torch.randn(15),), tester_factory) + + # Infer the middle dimension + self._test_op(ReshapeModel(shape=[2, -1, 3]), (torch.randn(24),), tester_factory) + \ No newline at end of file diff --git a/backends/test/suite/operators/test_select.py b/backends/test/suite/operators/test_select.py new file mode 100644 index 00000000000..37c7e9dd142 --- /dev/null +++ b/backends/test/suite/operators/test_select.py @@ -0,0 +1,86 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, List + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class SelectModel(torch.nn.Module): + def __init__(self, dim: int, index: int): + super().__init__() + self.dim = dim + self.index = index + + def forward(self, x): + return torch.select(x, dim=self.dim, index=self.index) + +@operator_test +class TestSelect(OperatorTest): + @dtype_test + def test_select_dtype(self, dtype, tester_factory: Callable) -> None: + # Test with different dtypes + model = SelectModel(dim=0, index=0) + self._test_op(model, (torch.rand(3, 4, 5).to(dtype),), tester_factory) + + def test_select_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + # Select the first slice along dimension 0 from a 3D tensor + # Result will be a 2D tensor of shape [4, 5] + self._test_op(SelectModel(dim=0, index=0), (torch.randn(3, 4, 5),), tester_factory) + + def test_select_dimensions(self, tester_factory: Callable) -> None: + # Test selecting along different dimensions + + # Select along dimension 0 + # From tensor of shape [3, 4, 5] -> Result will be of shape [4, 5] + self._test_op(SelectModel(dim=0, index=1), (torch.randn(3, 4, 5),), tester_factory) + + # Select along dimension 1 + # From tensor of shape [3, 4, 5] -> Result will be of shape [3, 5] + self._test_op(SelectModel(dim=1, index=2), (torch.randn(3, 4, 5),), tester_factory) + + # Select along dimension 2 + # From tensor of shape [3, 4, 5] -> Result will be of shape [3, 4] + self._test_op(SelectModel(dim=2, index=3), (torch.randn(3, 4, 5),), tester_factory) + + def test_select_negative_dim(self, tester_factory: Callable) -> None: + # Test with negative dimensions (counting from the end) + + # Select along the last dimension (dim=-1) + # From tensor of shape [3, 4, 5] -> Result will be of shape [3, 4] + self._test_op(SelectModel(dim=-1, index=2), (torch.randn(3, 4, 5),), tester_factory) + + # Select along the second-to-last dimension (dim=-2) + # From tensor of shape [3, 4, 5] -> Result will be of shape [3, 5] + self._test_op(SelectModel(dim=-2, index=1), (torch.randn(3, 4, 5),), tester_factory) + + # Select along the third-to-last dimension (dim=-3) + # From tensor of shape [3, 4, 5] -> Result will be of shape [4, 5] + self._test_op(SelectModel(dim=-3, index=0), (torch.randn(3, 4, 5),), tester_factory) + + def test_select_different_shapes(self, tester_factory: Callable) -> None: + # Test with tensors of different shapes + + # Select from a 2D tensor + # From tensor of shape [3, 4] -> Result will be of shape [4] + self._test_op(SelectModel(dim=0, index=1), (torch.randn(3, 4),), tester_factory) + + # Select from a 4D tensor + # From tensor of shape [2, 3, 4, 5] -> Result will be of shape [2, 4, 5] + self._test_op(SelectModel(dim=1, index=1), (torch.randn(2, 3, 4, 5),), tester_factory) + + def test_select_edge_indices(self, tester_factory: Callable) -> None: + # Test with edge indices + + # Select the first element (index=0) + self._test_op(SelectModel(dim=0, index=0), (torch.randn(3, 4, 5),), tester_factory) + + # Select the last element (index=size-1) + self._test_op(SelectModel(dim=0, index=2), (torch.randn(3, 4, 5),), tester_factory) diff --git a/backends/test/suite/operators/test_slice.py b/backends/test/suite/operators/test_slice.py new file mode 100644 index 00000000000..741db8839b6 --- /dev/null +++ b/backends/test/suite/operators/test_slice.py @@ -0,0 +1,34 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, List, Union + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class SliceSimple(torch.nn.Module): + def __init__(self, index=1): + super().__init__() + self.index = index + + def forward(self, x): + return x[self.index] + +class SliceRange(torch.nn.Module): + def forward(self, x): + return x[1:3] + +@operator_test +class TestSlice(OperatorTest): + @dtype_test + def test_slice_simple_dtype(self, dtype, tester_factory: Callable) -> None: + self._test_op(SliceSimple().to(dtype), ((torch.rand(2, 3, 4)).to(dtype),), tester_factory) + + def test_slice_range(self, tester_factory: Callable) -> None: + self._test_op(SliceRange(), ((torch.rand(2, 5, 4),),), tester_factory) diff --git a/backends/test/suite/operators/test_split.py b/backends/test/suite/operators/test_split.py new file mode 100644 index 00000000000..f36f0f955de --- /dev/null +++ b/backends/test/suite/operators/test_split.py @@ -0,0 +1,96 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, List, Union + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class SplitSizeModel(torch.nn.Module): + def __init__(self, split_size: int, dim: int = 0): + super().__init__() + self.split_size = split_size + self.dim = dim + + def forward(self, x): + return torch.split(x, self.split_size, dim=self.dim) + +class SplitSectionsModel(torch.nn.Module): + def __init__(self, sections: List[int], dim: int = 0): + super().__init__() + self.sections = sections + self.dim = dim + + def forward(self, x): + return torch.split(x, self.sections, dim=self.dim) + +@operator_test +class TestSplit(OperatorTest): + @dtype_test + def test_split_dtype(self, dtype, tester_factory: Callable) -> None: + # Test with different dtypes + model = SplitSizeModel(split_size=2) + self._test_op(model, (torch.rand(6, 4).to(dtype),), tester_factory) + + def test_split_size_basic(self, tester_factory: Callable) -> None: + # Basic test with split_size + # Split a 6x4 tensor into chunks of size 2 along dimension 0 + self._test_op(SplitSizeModel(split_size=2), (torch.randn(6, 4),), tester_factory) + + def test_split_size_dimensions(self, tester_factory: Callable) -> None: + # Test splitting along different dimensions + + # Split along dimension 0 + self._test_op(SplitSizeModel(split_size=2, dim=0), (torch.randn(6, 4),), tester_factory) + + # Split along dimension 1 + self._test_op(SplitSizeModel(split_size=2, dim=1), (torch.randn(4, 6),), tester_factory) + + # Split along dimension 2 + self._test_op(SplitSizeModel(split_size=2, dim=2), (torch.randn(3, 4, 6),), tester_factory) + + def test_split_size_uneven(self, tester_factory: Callable) -> None: + # Test with uneven splits (last chunk may be smaller) + + # Split a 7x4 tensor into chunks of size 3 along dimension 0 + # This will result in chunks of size [3, 3, 1] + self._test_op(SplitSizeModel(split_size=3), (torch.randn(7, 4),), tester_factory) + + # Split a 4x7 tensor into chunks of size 3 along dimension 1 + # This will result in chunks of size [4x3, 4x3, 4x1] + self._test_op(SplitSizeModel(split_size=3, dim=1), (torch.randn(4, 7),), tester_factory) + + def test_split_sections_basic(self, tester_factory: Callable) -> None: + # Basic test with sections + # Split a 6x4 tensor into sections [2, 3, 1] along dimension 0 + self._test_op(SplitSectionsModel(sections=[2, 3, 1]), (torch.randn(6, 4),), tester_factory) + + def test_split_sections_dimensions(self, tester_factory: Callable) -> None: + # Test splitting into sections along different dimensions + + # Split along dimension 0 + self._test_op(SplitSectionsModel(sections=[2, 3, 1], dim=0), (torch.randn(6, 4),), tester_factory) + + # Split along dimension 1 + self._test_op(SplitSectionsModel(sections=[2, 3, 1], dim=1), (torch.randn(4, 6),), tester_factory) + + # Split along dimension 2 + self._test_op(SplitSectionsModel(sections=[2, 3, 1], dim=2), (torch.randn(3, 4, 6),), tester_factory) + + def test_split_negative_dim(self, tester_factory: Callable) -> None: + # Test with negative dimensions (counting from the end) + + # Split along the last dimension (dim=-1) + self._test_op(SplitSizeModel(split_size=2, dim=-1), (torch.randn(4, 6),), tester_factory) + + # Split along the second-to-last dimension (dim=-2) + self._test_op(SplitSizeModel(split_size=2, dim=-2), (torch.randn(4, 6),), tester_factory) + + # Split into sections along the last dimension + self._test_op(SplitSectionsModel(sections=[2, 3, 1], dim=-1), (torch.randn(4, 6),), tester_factory) diff --git a/backends/test/suite/operators/test_squeeze.py b/backends/test/suite/operators/test_squeeze.py new file mode 100644 index 00000000000..821db6cc515 --- /dev/null +++ b/backends/test/suite/operators/test_squeeze.py @@ -0,0 +1,69 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, List, Optional, Tuple, Union + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class SqueezeModel(torch.nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + return torch.squeeze(x) + +class SqueezeDimModel(torch.nn.Module): + def __init__(self, dim): + super().__init__() + self.dim = dim + + def forward(self, x): + return torch.squeeze(x, dim=self.dim) + +@operator_test +class TestSqueeze(OperatorTest): + @dtype_test + def test_squeeze_dtype(self, dtype, tester_factory: Callable) -> None: + # Test with different dtypes + model = SqueezeModel() + self._test_op(model, (torch.rand(1, 3, 1, 5).to(dtype),), tester_factory) + + def test_squeeze_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters (remove all dimensions of size 1) + self._test_op(SqueezeModel(), (torch.randn(1, 3, 1, 5),), tester_factory) + + def test_squeeze_specific_dimension(self, tester_factory: Callable) -> None: + # Test squeezing specific dimensions + + # Squeeze first dimension (size 1) + self._test_op(SqueezeDimModel(dim=0), (torch.randn(1, 3, 5),), tester_factory) + + # Squeeze middle dimension (size 1) + self._test_op(SqueezeDimModel(dim=2), (torch.randn(3, 4, 1, 5),), tester_factory) + + # Squeeze last dimension (size 1) + self._test_op(SqueezeDimModel(dim=-1), (torch.randn(3, 4, 5, 1),), tester_factory) + + def test_squeeze_no_effect(self, tester_factory: Callable) -> None: + # Test cases where squeeze has no effect + + # Dimension specified is not size 1 + self._test_op(SqueezeDimModel(dim=1), (torch.randn(3, 4, 5),), tester_factory) + + # No dimensions of size 1 + self._test_op(SqueezeModel(), (torch.randn(3, 4, 5),), tester_factory) + + def test_squeeze_multiple_dims(self, tester_factory: Callable) -> None: + # Test squeezing multiple dimensions of size 1 + + # Multiple dimensions of size 1 (all removed with default parameters) + self._test_op(SqueezeModel(), (torch.randn(1, 3, 1, 5, 1),), tester_factory) + + self._test_op(SqueezeDimModel(dim=(0, 1)), (torch.randn(1, 1, 1),), tester_factory) diff --git a/backends/test/suite/operators/test_stack.py b/backends/test/suite/operators/test_stack.py new file mode 100644 index 00000000000..17e58c2ea44 --- /dev/null +++ b/backends/test/suite/operators/test_stack.py @@ -0,0 +1,146 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, List + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class StackModel(torch.nn.Module): + def __init__(self, dim: int = 0): + super().__init__() + self.dim = dim + + def forward(self, x1, x2, x3): + return torch.stack([x1, x2, x3], dim=self.dim) + +@operator_test +class TestStack(OperatorTest): + @dtype_test + def test_stack_dtype(self, dtype, tester_factory: Callable) -> None: + # Test with different dtypes + model = StackModel() + self._test_op( + model, + ( + torch.rand(3, 4).to(dtype), + torch.rand(3, 4).to(dtype), + torch.rand(3, 4).to(dtype), + ), + tester_factory + ) + + def test_stack_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + # Stack 3 tensors of shape [3, 4] along dimension 0 + # Result will be of shape [3, 3, 4] + self._test_op( + StackModel(), + ( + torch.randn(3, 4), + torch.randn(3, 4), + torch.randn(3, 4), + ), + tester_factory + ) + + def test_stack_dimensions(self, tester_factory: Callable) -> None: + # Test stacking along different dimensions + + # Stack along dimension 0 (default) + # Result will be of shape [3, 3, 4] + self._test_op( + StackModel(dim=0), + ( + torch.randn(3, 4), + torch.randn(3, 4), + torch.randn(3, 4), + ), + tester_factory + ) + + # Stack along dimension 1 + # Result will be of shape [3, 3, 4] + self._test_op( + StackModel(dim=1), + ( + torch.randn(3, 4), + torch.randn(3, 4), + torch.randn(3, 4), + ), + tester_factory + ) + + # Stack along dimension 2 + # Result will be of shape [3, 4, 3] + self._test_op( + StackModel(dim=2), + ( + torch.randn(3, 4), + torch.randn(3, 4), + torch.randn(3, 4), + ), + tester_factory + ) + + def test_stack_negative_dim(self, tester_factory: Callable) -> None: + # Test with negative dimensions (counting from the end) + + # Stack along the last dimension (dim=-1) + # For tensors of shape [3, 4], this is equivalent to dim=2 + # Result will be of shape [3, 4, 3] + self._test_op( + StackModel(dim=-1), + ( + torch.randn(3, 4), + torch.randn(3, 4), + torch.randn(3, 4), + ), + tester_factory + ) + + # Stack along the second-to-last dimension (dim=-2) + # For tensors of shape [3, 4], this is equivalent to dim=1 + # Result will be of shape [3, 3, 4] + self._test_op( + StackModel(dim=-2), + ( + torch.randn(3, 4), + torch.randn(3, 4), + torch.randn(3, 4), + ), + tester_factory + ) + + def test_stack_different_shapes(self, tester_factory: Callable) -> None: + # Test with tensors of different shapes + + # Stack 1D tensors + # Result will be of shape [3, 5] + self._test_op( + StackModel(), + ( + torch.randn(5), + torch.randn(5), + torch.randn(5), + ), + tester_factory + ) + + # Stack 3D tensors + # Result will be of shape [3, 2, 3, 4] + self._test_op( + StackModel(), + ( + torch.randn(2, 3, 4), + torch.randn(2, 3, 4), + torch.randn(2, 3, 4), + ), + tester_factory + ) diff --git a/backends/test/suite/operators/test_unsqueeze.py b/backends/test/suite/operators/test_unsqueeze.py new file mode 100644 index 00000000000..34c58f689a3 --- /dev/null +++ b/backends/test/suite/operators/test_unsqueeze.py @@ -0,0 +1,71 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, List + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class UnsqueezeModel(torch.nn.Module): + def __init__(self, dim: int): + super().__init__() + self.dim = dim + + def forward(self, x): + return torch.unsqueeze(x, self.dim) + +@operator_test +class TestUnsqueeze(OperatorTest): + @dtype_test + def test_unsqueeze_dtype(self, dtype, tester_factory: Callable) -> None: + # Test with different dtypes + model = UnsqueezeModel(dim=1) + self._test_op(model, (torch.rand(3, 5).to(dtype),), tester_factory) + + def test_unsqueeze_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + # Add dimension at position 1 + self._test_op(UnsqueezeModel(dim=1), (torch.randn(3, 5),), tester_factory) + + def test_unsqueeze_positions(self, tester_factory: Callable) -> None: + # Test unsqueezing at different positions + + # Unsqueeze at the beginning (dim=0) + self._test_op(UnsqueezeModel(dim=0), (torch.randn(3, 5),), tester_factory) + + # Unsqueeze in the middle (dim=1) + self._test_op(UnsqueezeModel(dim=1), (torch.randn(3, 5),), tester_factory) + + # Unsqueeze at the end (dim=2) + self._test_op(UnsqueezeModel(dim=2), (torch.randn(3, 5),), tester_factory) + + def test_unsqueeze_negative_dim(self, tester_factory: Callable) -> None: + # Test with negative dimensions (counting from the end) + + # Unsqueeze at the end (dim=-1) + self._test_op(UnsqueezeModel(dim=-1), (torch.randn(3, 5),), tester_factory) + + # Unsqueeze at the second-to-last position (dim=-2) + self._test_op(UnsqueezeModel(dim=-2), (torch.randn(3, 5),), tester_factory) + + # Unsqueeze at the beginning (dim=-3) + self._test_op(UnsqueezeModel(dim=-3), (torch.randn(3, 5),), tester_factory) + + def test_unsqueeze_different_shapes(self, tester_factory: Callable) -> None: + # Test with tensors of different shapes + + # 1D tensor + self._test_op(UnsqueezeModel(dim=0), (torch.randn(5),), tester_factory) + self._test_op(UnsqueezeModel(dim=1), (torch.randn(5),), tester_factory) + + # 3D tensor + self._test_op(UnsqueezeModel(dim=0), (torch.randn(3, 4, 5),), tester_factory) + self._test_op(UnsqueezeModel(dim=2), (torch.randn(3, 4, 5),), tester_factory) + self._test_op(UnsqueezeModel(dim=3), (torch.randn(3, 4, 5),), tester_factory) + \ No newline at end of file diff --git a/backends/test/suite/operators/test_view.py b/backends/test/suite/operators/test_view.py new file mode 100644 index 00000000000..189f90f6793 --- /dev/null +++ b/backends/test/suite/operators/test_view.py @@ -0,0 +1,62 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, List + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class ViewModel(torch.nn.Module): + def __init__(self, shape: List[int]): + super().__init__() + self.shape = shape + + def forward(self, x): + return x.view(self.shape) + +@operator_test +class TestView(OperatorTest): + @dtype_test + def test_view_dtype(self, dtype, tester_factory: Callable) -> None: + # Test with different dtypes + model = ViewModel(shape=[3, 5]) + self._test_op(model, (torch.rand(15).to(dtype),), tester_factory) + + def test_view_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + # View from [15] to [3, 5] + self._test_op(ViewModel(shape=[3, 5]), (torch.randn(15),), tester_factory) + + def test_view_dimensions(self, tester_factory: Callable) -> None: + # Test viewing to different dimensions + + # View from 1D to 2D + self._test_op(ViewModel(shape=[3, 5]), (torch.randn(15),), tester_factory) + + # View from 2D to 1D + self._test_op(ViewModel(shape=[20]), (torch.randn(4, 5),), tester_factory) + + # View from 2D to 3D + self._test_op(ViewModel(shape=[2, 2, 5]), (torch.randn(4, 5),), tester_factory) + + # View from 3D to 2D + self._test_op(ViewModel(shape=[6, 4]), (torch.randn(3, 2, 4),), tester_factory) + + def test_view_inferred_dimension(self, tester_factory: Callable) -> None: + # Test with inferred dimension (-1) + + # Infer the last dimension + self._test_op(ViewModel(shape=[3, -1]), (torch.randn(15),), tester_factory) + + # Infer the first dimension + self._test_op(ViewModel(shape=[-1, 5]), (torch.randn(15),), tester_factory) + + # Infer the middle dimension + self._test_op(ViewModel(shape=[2, -1, 3]), (torch.randn(24),), tester_factory) + \ No newline at end of file From 2adbf6157bbcd6b03b82904d63f217d6032bd7a0 Mon Sep 17 00:00:00 2001 From: Gregory Comer Date: Thu, 24 Jul 2025 21:06:25 -0700 Subject: [PATCH 6/9] Update [ghstack-poisoned] --- .../test/suite/operators/test_index_put.py | 115 ++++++++++++++++++ .../test/suite/operators/test_index_select.py | 104 ++++++++++++++++ 2 files changed, 219 insertions(+) create mode 100644 backends/test/suite/operators/test_index_put.py create mode 100644 backends/test/suite/operators/test_index_select.py diff --git a/backends/test/suite/operators/test_index_put.py b/backends/test/suite/operators/test_index_put.py new file mode 100644 index 00000000000..a4ec3bcbbeb --- /dev/null +++ b/backends/test/suite/operators/test_index_put.py @@ -0,0 +1,115 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, List, Tuple + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class IndexPutModel(torch.nn.Module): + def __init__(self, accumulate=False): + super().__init__() + self.accumulate = accumulate + + def forward(self, x, indices, values): + # Clone the input to avoid modifying it in-place + result = x.clone() + # Apply index_put_ and return the modified tensor + result.index_put_(indices, values, self.accumulate) + return result + +@operator_test +class TestIndexPut(OperatorTest): + @dtype_test + def test_index_put_dtype(self, dtype, tester_factory: Callable) -> None: + # Test with different dtypes + indices = (torch.tensor([0, 2]),) + values = torch.tensor([10.0, 20.0]).to(dtype) + model = IndexPutModel() + self._test_op(model, ((torch.rand(5, 2) * 100).to(dtype), indices, values), tester_factory, use_random_test_inputs=False) + + def test_index_put_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + indices = (torch.tensor([0, 2]),) + values = torch.tensor([10.0, 20.0]) + self._test_op(IndexPutModel(), (torch.randn(5, 2), indices, values), tester_factory, use_random_test_inputs=False) + + def test_index_put_accumulate(self, tester_factory: Callable) -> None: + # Test with accumulate=True and accumulate=False + + # Without accumulation (replace values) + indices = (torch.tensor([0, 2]),) + values = torch.tensor([10.0, 20.0]) + self._test_op(IndexPutModel(accumulate=False), + (torch.ones(5, 2), indices, values), tester_factory, use_random_test_inputs=False) + + # With accumulation (add values) + indices = (torch.tensor([0, 2]),) + values = torch.tensor([10.0, 20.0]) + self._test_op(IndexPutModel(accumulate=True), + (torch.ones(5, 2), indices, values), tester_factory, use_random_test_inputs=False) + + def test_index_put_shapes(self, tester_factory: Callable) -> None: + # Test with different tensor shapes + + # 1D tensor + indices = (torch.tensor([0, 2]),) + values = torch.tensor([10.0, 20.0]) + self._test_op(IndexPutModel(), + (torch.randn(5), indices, values), tester_factory, use_random_test_inputs=False) + + # 2D tensor + indices = (torch.tensor([0, 2]), torch.tensor([1, 1])) + values = torch.tensor([10.0, 20.0]) + self._test_op(IndexPutModel(), + (torch.randn(5, 2), indices, values), tester_factory, use_random_test_inputs=False) + + # 3D tensor + indices = (torch.tensor([0, 2]), torch.tensor([1, 1]), torch.tensor([0, 1])) + values = torch.tensor([10.0, 20.0]) + self._test_op(IndexPutModel(), + (torch.randn(5, 3, 2), indices, values), tester_factory, use_random_test_inputs=False) + + # 4D tensor + indices = (torch.tensor([0, 2]), torch.tensor([1, 1]), + torch.tensor([0, 1]), torch.tensor([2, 3])) + values = torch.tensor([10.0,]) + self._test_op(IndexPutModel(), + (torch.randn(5, 3, 2, 4), indices, values), tester_factory, use_random_test_inputs=False) + + def test_index_put_indices(self, tester_factory: Callable) -> None: + # Test with different index patterns + + # Single index + indices = (torch.tensor([2]),) + values = torch.tensor([10.0]) + self._test_op(IndexPutModel(), + (torch.randn(5, 2), indices, values), tester_factory, use_random_test_inputs=False) + + # Multiple indices + indices = (torch.tensor([0, 2, 4]),) + values = torch.tensor([10.0, 20.0, 30.0]) + self._test_op(IndexPutModel(), + (torch.randn(5, 3), indices, values), tester_factory, use_random_test_inputs=False) + + # Repeated indices with accumulate=True (values add up) + indices = (torch.tensor([1, 1, 3, 3]),) + values = torch.tensor([10.0, 20.0, 30.0, 40.0]) + self._test_op(IndexPutModel(accumulate=True), + (torch.randn(5), indices, values), tester_factory, use_random_test_inputs=False) + + def test_index_put_edge_cases(self, tester_factory: Callable) -> None: + # Test edge cases + + # Put values in all positions + indices = (torch.tensor([0, 1, 2, 3, 4]),) + values = torch.tensor([10.0, 20.0, 30.0, 40.0, 50.0]) + self._test_op(IndexPutModel(), + (torch.randn(5, 5), indices, values), tester_factory, use_random_test_inputs=False) + \ No newline at end of file diff --git a/backends/test/suite/operators/test_index_select.py b/backends/test/suite/operators/test_index_select.py new file mode 100644 index 00000000000..eb4da2d11e8 --- /dev/null +++ b/backends/test/suite/operators/test_index_select.py @@ -0,0 +1,104 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class IndexSelectModel(torch.nn.Module): + def __init__(self, dim=0): + super().__init__() + self.dim = dim + + def forward(self, x, indices): + return torch.index_select(x, self.dim, indices) + +@operator_test +class TestIndexSelect(OperatorTest): + @dtype_test + def test_index_select_dtype(self, dtype, tester_factory: Callable) -> None: + # Test with different dtypes + indices = torch.tensor([0, 2], dtype=torch.int64) + model = IndexSelectModel(dim=0) + self._test_op(model, ((torch.rand(5, 3) * 100).to(dtype), indices), tester_factory, use_random_test_inputs=False) + + def test_index_select_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + indices = torch.tensor([0, 2], dtype=torch.int64) + self._test_op(IndexSelectModel(dim=0), (torch.randn(5, 3), indices), tester_factory, use_random_test_inputs=False) + + def test_index_select_dimensions(self, tester_factory: Callable) -> None: + # Test selecting along different dimensions + + # Select along dim 0 + indices = torch.tensor([0, 2], dtype=torch.int64) + self._test_op(IndexSelectModel(dim=0), (torch.randn(5, 3), indices), tester_factory, use_random_test_inputs=False) + + # Select along dim 1 + indices = torch.tensor([0, 1], dtype=torch.int64) + self._test_op(IndexSelectModel(dim=1), (torch.randn(5, 3), indices), tester_factory, use_random_test_inputs=False) + + # Select along dim 2 in a 3D tensor + indices = torch.tensor([0, 2], dtype=torch.int64) + self._test_op(IndexSelectModel(dim=2), (torch.randn(3, 4, 5), indices), tester_factory, use_random_test_inputs=False) + + def test_index_select_shapes(self, tester_factory: Callable) -> None: + # Test with different tensor shapes + indices = torch.tensor([0, 1], dtype=torch.int64) + + # 1D tensor + self._test_op(IndexSelectModel(dim=0), (torch.randn(5), indices), tester_factory, use_random_test_inputs=False) + + # 2D tensor + self._test_op(IndexSelectModel(dim=0), (torch.randn(5, 3), indices), tester_factory, use_random_test_inputs=False) + + # 3D tensor + self._test_op(IndexSelectModel(dim=0), (torch.randn(5, 3, 2), indices), tester_factory, use_random_test_inputs=False) + + # 4D tensor + self._test_op(IndexSelectModel(dim=0), (torch.randn(5, 3, 2, 4), indices), tester_factory, use_random_test_inputs=False) + + def test_index_select_indices(self, tester_factory: Callable) -> None: + # Test with different index patterns + + # Single index + indices = torch.tensor([2], dtype=torch.int64) + self._test_op(IndexSelectModel(dim=0), (torch.randn(5, 3), indices), tester_factory, use_random_test_inputs=False) + + # Multiple indices + indices = torch.tensor([0, 2, 4], dtype=torch.int64) + self._test_op(IndexSelectModel(dim=0), (torch.randn(5, 3), indices), tester_factory, use_random_test_inputs=False) + + # Repeated indices + indices = torch.tensor([1, 1, 3, 3], dtype=torch.int64) + self._test_op(IndexSelectModel(dim=0), (torch.randn(5, 3), indices), tester_factory, use_random_test_inputs=False) + + # Reversed indices + indices = torch.tensor([4, 3, 2, 1, 0], dtype=torch.int64) + self._test_op(IndexSelectModel(dim=0), (torch.randn(5, 3), indices), tester_factory, use_random_test_inputs=False) + + def test_index_select_edge_cases(self, tester_factory: Callable) -> None: + # Test edge cases + + # Select all indices + indices = torch.tensor([0, 1, 2, 3, 4], dtype=torch.int64) + self._test_op(IndexSelectModel(dim=0), (torch.randn(5, 3), indices), tester_factory, use_random_test_inputs=False) + + # Select from a dimension with size 1 + indices = torch.tensor([0], dtype=torch.int64) + self._test_op(IndexSelectModel(dim=0), (torch.randn(1, 3), indices), tester_factory, use_random_test_inputs=False) + + # Select from a tensor with all zeros + indices = torch.tensor([0, 1], dtype=torch.int64) + self._test_op(IndexSelectModel(dim=0), (torch.zeros(5, 3), indices), tester_factory, use_random_test_inputs=False) + + # Select from a tensor with all ones + indices = torch.tensor([0, 1], dtype=torch.int64) + self._test_op(IndexSelectModel(dim=0), (torch.ones(5, 3), indices), tester_factory, use_random_test_inputs=False) From 709f39b51e8fe7d792e151899fd647226838172b Mon Sep 17 00:00:00 2001 From: Gregory Comer Date: Thu, 24 Jul 2025 21:06:29 -0700 Subject: [PATCH 7/9] Update [ghstack-poisoned] --- backends/test/suite/operators/test_amax.py | 196 +++++++++++++++ backends/test/suite/operators/test_amin.py | 196 +++++++++++++++ backends/test/suite/operators/test_argmax.py | 156 ++++++++++++ backends/test/suite/operators/test_argmin.py | 156 ++++++++++++ backends/test/suite/operators/test_mean.py | 223 ++++++++++++++++ backends/test/suite/operators/test_median.py | 251 +++++++++++++++++++ 6 files changed, 1178 insertions(+) create mode 100644 backends/test/suite/operators/test_amax.py create mode 100644 backends/test/suite/operators/test_amin.py create mode 100644 backends/test/suite/operators/test_argmax.py create mode 100644 backends/test/suite/operators/test_argmin.py create mode 100644 backends/test/suite/operators/test_mean.py create mode 100644 backends/test/suite/operators/test_median.py diff --git a/backends/test/suite/operators/test_amax.py b/backends/test/suite/operators/test_amax.py new file mode 100644 index 00000000000..6488746f83d --- /dev/null +++ b/backends/test/suite/operators/test_amax.py @@ -0,0 +1,196 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, List, Optional, Tuple, Union + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class AmaxModel(torch.nn.Module): + def __init__( + self, + dim: Optional[Union[int, Tuple[int, ...], List[int]]] = None, + keepdim: bool = False + ): + super().__init__() + self.dim = dim + self.keepdim = keepdim + + def forward(self, x): + return torch.amax(x, dim=self.dim, keepdim=self.keepdim) + +@operator_test +class TestAmax(OperatorTest): + @dtype_test + def test_amax_dtype(self, dtype, tester_factory: Callable) -> None: + # Test with different dtypes + model = AmaxModel().to(dtype) + self._test_op(model, (torch.rand(10, 10).to(dtype),), tester_factory) + + def test_amax_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters (global reduction) + self._test_op(AmaxModel(), (torch.randn(10, 10),), tester_factory) + + def test_amax_dim(self, tester_factory: Callable) -> None: + # Test with different dimensions + + # 2D tensor, dim=0 + self._test_op(AmaxModel(dim=0), (torch.randn(5, 10),), tester_factory) + + # 2D tensor, dim=1 + self._test_op(AmaxModel(dim=1), (torch.randn(5, 10),), tester_factory) + + # 3D tensor, dim=0 + self._test_op(AmaxModel(dim=0), (torch.randn(3, 4, 5),), tester_factory) + + # 3D tensor, dim=1 + self._test_op(AmaxModel(dim=1), (torch.randn(3, 4, 5),), tester_factory) + + # 3D tensor, dim=2 + self._test_op(AmaxModel(dim=2), (torch.randn(3, 4, 5),), tester_factory) + + # 4D tensor, dim=1 + self._test_op(AmaxModel(dim=1), (torch.randn(2, 3, 4, 5),), tester_factory) + + # Negative dim (last dimension) + self._test_op(AmaxModel(dim=-1), (torch.randn(3, 4, 5),), tester_factory) + + # Negative dim (second-to-last dimension) + self._test_op(AmaxModel(dim=-2), (torch.randn(3, 4, 5),), tester_factory) + + def test_amax_multi_dim(self, tester_factory: Callable) -> None: + # Test with multiple dimensions + + # 3D tensor, dim=(0, 1) + self._test_op(AmaxModel(dim=(0, 1)), (torch.randn(3, 4, 5),), tester_factory) + + # 3D tensor, dim=(0, 2) + self._test_op(AmaxModel(dim=(0, 2)), (torch.randn(3, 4, 5),), tester_factory) + + # 3D tensor, dim=(1, 2) + self._test_op(AmaxModel(dim=(1, 2)), (torch.randn(3, 4, 5),), tester_factory) + + # 4D tensor, dim=(1, 3) + self._test_op(AmaxModel(dim=(1, 3)), (torch.randn(2, 3, 4, 5),), tester_factory) + + # 4D tensor, dim=(0, 2) + self._test_op(AmaxModel(dim=(0, 2)), (torch.randn(2, 3, 4, 5),), tester_factory) + + # 4D tensor, dim=(-1, -3) + self._test_op(AmaxModel(dim=(-1, -3)), (torch.randn(2, 3, 4, 5),), tester_factory) + + # 4D tensor, all dimensions + self._test_op(AmaxModel(dim=(0, 1, 2, 3)), (torch.randn(2, 3, 4, 5),), tester_factory) + + def test_amax_keepdim(self, tester_factory: Callable) -> None: + # Test with keepdim=True + + # 2D tensor, dim=0, keepdim=True + self._test_op(AmaxModel(dim=0, keepdim=True), (torch.randn(5, 10),), tester_factory) + + # 2D tensor, dim=1, keepdim=True + self._test_op(AmaxModel(dim=1, keepdim=True), (torch.randn(5, 10),), tester_factory) + + # 3D tensor, dim=1, keepdim=True + self._test_op(AmaxModel(dim=1, keepdim=True), (torch.randn(3, 4, 5),), tester_factory) + + # 4D tensor, dim=2, keepdim=True + self._test_op(AmaxModel(dim=2, keepdim=True), (torch.randn(2, 3, 4, 5),), tester_factory) + + # Multiple dimensions with keepdim=True + self._test_op(AmaxModel(dim=(1, 2), keepdim=True), (torch.randn(3, 4, 5),), tester_factory) + + def test_amax_shapes(self, tester_factory: Callable) -> None: + # Test with different tensor shapes + + # 1D tensor + self._test_op(AmaxModel(), (torch.randn(20),), tester_factory) + self._test_op(AmaxModel(dim=0), (torch.randn(20),), tester_factory) + + # 2D tensor + self._test_op(AmaxModel(), (torch.randn(5, 10),), tester_factory) + + # 3D tensor + self._test_op(AmaxModel(), (torch.randn(3, 4, 5),), tester_factory) + + # 4D tensor + self._test_op(AmaxModel(), (torch.randn(2, 3, 4, 5),), tester_factory) + + # 5D tensor + self._test_op(AmaxModel(), (torch.randn(2, 2, 3, 4, 5),), tester_factory) + + def test_amax_values(self, tester_factory: Callable) -> None: + # Test with different value patterns + + # Tensor with clear maximum + x = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) + self._test_op(AmaxModel(), (x,), tester_factory) + self._test_op(AmaxModel(dim=0), (x,), tester_factory) + self._test_op(AmaxModel(dim=1), (x,), tester_factory) + + # Tensor with duplicate maximum values + x = torch.tensor([[3.0, 2.0, 3.0], [6.0, 6.0, 5.0]]) + self._test_op(AmaxModel(), (x,), tester_factory) + self._test_op(AmaxModel(dim=0), (x,), tester_factory) + self._test_op(AmaxModel(dim=1), (x,), tester_factory) + + # Tensor with negative values + x = torch.tensor([[-3.0, -2.0, -1.0], [-6.0, -5.0, -4.0]]) + self._test_op(AmaxModel(), (x,), tester_factory) + self._test_op(AmaxModel(dim=0), (x,), tester_factory) + self._test_op(AmaxModel(dim=1), (x,), tester_factory) + + # Tensor with mixed positive and negative values + x = torch.tensor([[-3.0, 2.0, -1.0], [6.0, -5.0, 4.0]]) + self._test_op(AmaxModel(), (x,), tester_factory) + self._test_op(AmaxModel(dim=0), (x,), tester_factory) + self._test_op(AmaxModel(dim=1), (x,), tester_factory) + + def test_amax_edge_cases(self, tester_factory: Callable) -> None: + # Test edge cases + + # Tensor with all same values + x = torch.ones(3, 4) + self._test_op(AmaxModel(), (x,), tester_factory) + self._test_op(AmaxModel(dim=0), (x,), tester_factory) + self._test_op(AmaxModel(dim=1), (x,), tester_factory) + + # Zero tensor + x = torch.zeros(3, 4) + self._test_op(AmaxModel(), (x,), tester_factory) + self._test_op(AmaxModel(dim=0), (x,), tester_factory) + self._test_op(AmaxModel(dim=1), (x,), tester_factory) + + # Tensor with infinity + x = torch.tensor([[1.0, float('inf'), 3.0], [4.0, 5.0, float('inf')]]) + self._test_op(AmaxModel(), (x,), tester_factory) + self._test_op(AmaxModel(dim=0), (x,), tester_factory) + self._test_op(AmaxModel(dim=1), (x,), tester_factory) + + # Tensor with negative infinity + x = torch.tensor([[1.0, float('-inf'), 3.0], [4.0, 5.0, float('-inf')]]) + self._test_op(AmaxModel(), (x,), tester_factory) + self._test_op(AmaxModel(dim=0), (x,), tester_factory) + self._test_op(AmaxModel(dim=1), (x,), tester_factory) + + # Tensor with NaN (NaN should be propagated) + x = torch.tensor([[1.0, float('nan'), 3.0], [4.0, 5.0, float('nan')]]) + self._test_op(AmaxModel(), (x,), tester_factory) + self._test_op(AmaxModel(dim=0), (x,), tester_factory) + self._test_op(AmaxModel(dim=1), (x,), tester_factory) + + # Single element tensor + x = torch.tensor([5.0]) + self._test_op(AmaxModel(), (x,), tester_factory) + self._test_op(AmaxModel(dim=0), (x,), tester_factory) + + def test_amax_scalar(self, tester_factory: Callable) -> None: + # Test with scalar input (1-element tensor) + self._test_op(AmaxModel(), (torch.tensor([5.0]),), tester_factory) + self._test_op(AmaxModel(dim=0), (torch.tensor([5.0]),), tester_factory) diff --git a/backends/test/suite/operators/test_amin.py b/backends/test/suite/operators/test_amin.py new file mode 100644 index 00000000000..599e53d0dad --- /dev/null +++ b/backends/test/suite/operators/test_amin.py @@ -0,0 +1,196 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, List, Optional, Tuple, Union + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class AminModel(torch.nn.Module): + def __init__( + self, + dim: Optional[Union[int, Tuple[int, ...], List[int]]] = None, + keepdim: bool = False + ): + super().__init__() + self.dim = dim + self.keepdim = keepdim + + def forward(self, x): + return torch.amin(x, dim=self.dim, keepdim=self.keepdim) + +@operator_test +class TestAmin(OperatorTest): + @dtype_test + def test_amin_dtype(self, dtype, tester_factory: Callable) -> None: + # Test with different dtypes + model = AminModel().to(dtype) + self._test_op(model, (torch.rand(10, 10).to(dtype),), tester_factory) + + def test_amin_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters (global reduction) + self._test_op(AminModel(), (torch.randn(10, 10),), tester_factory) + + def test_amin_dim(self, tester_factory: Callable) -> None: + # Test with different dimensions + + # 2D tensor, dim=0 + self._test_op(AminModel(dim=0), (torch.randn(5, 10),), tester_factory) + + # 2D tensor, dim=1 + self._test_op(AminModel(dim=1), (torch.randn(5, 10),), tester_factory) + + # 3D tensor, dim=0 + self._test_op(AminModel(dim=0), (torch.randn(3, 4, 5),), tester_factory) + + # 3D tensor, dim=1 + self._test_op(AminModel(dim=1), (torch.randn(3, 4, 5),), tester_factory) + + # 3D tensor, dim=2 + self._test_op(AminModel(dim=2), (torch.randn(3, 4, 5),), tester_factory) + + # 4D tensor, dim=1 + self._test_op(AminModel(dim=1), (torch.randn(2, 3, 4, 5),), tester_factory) + + # Negative dim (last dimension) + self._test_op(AminModel(dim=-1), (torch.randn(3, 4, 5),), tester_factory) + + # Negative dim (second-to-last dimension) + self._test_op(AminModel(dim=-2), (torch.randn(3, 4, 5),), tester_factory) + + def test_amin_multi_dim(self, tester_factory: Callable) -> None: + # Test with multiple dimensions + + # 3D tensor, dim=(0, 1) + self._test_op(AminModel(dim=(0, 1)), (torch.randn(3, 4, 5),), tester_factory) + + # 3D tensor, dim=(0, 2) + self._test_op(AminModel(dim=(0, 2)), (torch.randn(3, 4, 5),), tester_factory) + + # 3D tensor, dim=(1, 2) + self._test_op(AminModel(dim=(1, 2)), (torch.randn(3, 4, 5),), tester_factory) + + # 4D tensor, dim=(1, 3) + self._test_op(AminModel(dim=(1, 3)), (torch.randn(2, 3, 4, 5),), tester_factory) + + # 4D tensor, dim=(0, 2) + self._test_op(AminModel(dim=(0, 2)), (torch.randn(2, 3, 4, 5),), tester_factory) + + # 4D tensor, dim=(-1, -3) + self._test_op(AminModel(dim=(-1, -3)), (torch.randn(2, 3, 4, 5),), tester_factory) + + # 4D tensor, all dimensions + self._test_op(AminModel(dim=(0, 1, 2, 3)), (torch.randn(2, 3, 4, 5),), tester_factory) + + def test_amin_keepdim(self, tester_factory: Callable) -> None: + # Test with keepdim=True + + # 2D tensor, dim=0, keepdim=True + self._test_op(AminModel(dim=0, keepdim=True), (torch.randn(5, 10),), tester_factory) + + # 2D tensor, dim=1, keepdim=True + self._test_op(AminModel(dim=1, keepdim=True), (torch.randn(5, 10),), tester_factory) + + # 3D tensor, dim=1, keepdim=True + self._test_op(AminModel(dim=1, keepdim=True), (torch.randn(3, 4, 5),), tester_factory) + + # 4D tensor, dim=2, keepdim=True + self._test_op(AminModel(dim=2, keepdim=True), (torch.randn(2, 3, 4, 5),), tester_factory) + + # Multiple dimensions with keepdim=True + self._test_op(AminModel(dim=(1, 2), keepdim=True), (torch.randn(3, 4, 5),), tester_factory) + + def test_amin_shapes(self, tester_factory: Callable) -> None: + # Test with different tensor shapes + + # 1D tensor + self._test_op(AminModel(), (torch.randn(20),), tester_factory) + self._test_op(AminModel(dim=0), (torch.randn(20),), tester_factory) + + # 2D tensor + self._test_op(AminModel(), (torch.randn(5, 10),), tester_factory) + + # 3D tensor + self._test_op(AminModel(), (torch.randn(3, 4, 5),), tester_factory) + + # 4D tensor + self._test_op(AminModel(), (torch.randn(2, 3, 4, 5),), tester_factory) + + # 5D tensor + self._test_op(AminModel(), (torch.randn(2, 2, 3, 4, 5),), tester_factory) + + def test_amin_values(self, tester_factory: Callable) -> None: + # Test with different value patterns + + # Tensor with clear minimum + x = torch.tensor([[6.0, 5.0, 4.0], [3.0, 2.0, 1.0]]) + self._test_op(AminModel(), (x,), tester_factory) + self._test_op(AminModel(dim=0), (x,), tester_factory) + self._test_op(AminModel(dim=1), (x,), tester_factory) + + # Tensor with duplicate minimum values + x = torch.tensor([[3.0, 2.0, 2.0], [1.0, 1.0, 5.0]]) + self._test_op(AminModel(), (x,), tester_factory) + self._test_op(AminModel(dim=0), (x,), tester_factory) + self._test_op(AminModel(dim=1), (x,), tester_factory) + + # Tensor with negative values + x = torch.tensor([[-3.0, -2.0, -1.0], [-6.0, -5.0, -4.0]]) + self._test_op(AminModel(), (x,), tester_factory) + self._test_op(AminModel(dim=0), (x,), tester_factory) + self._test_op(AminModel(dim=1), (x,), tester_factory) + + # Tensor with mixed positive and negative values + x = torch.tensor([[-3.0, 2.0, -1.0], [6.0, -5.0, 4.0]]) + self._test_op(AminModel(), (x,), tester_factory) + self._test_op(AminModel(dim=0), (x,), tester_factory) + self._test_op(AminModel(dim=1), (x,), tester_factory) + + def test_amin_edge_cases(self, tester_factory: Callable) -> None: + # Test edge cases + + # Tensor with all same values + x = torch.ones(3, 4) + self._test_op(AminModel(), (x,), tester_factory) + self._test_op(AminModel(dim=0), (x,), tester_factory) + self._test_op(AminModel(dim=1), (x,), tester_factory) + + # Zero tensor + x = torch.zeros(3, 4) + self._test_op(AminModel(), (x,), tester_factory) + self._test_op(AminModel(dim=0), (x,), tester_factory) + self._test_op(AminModel(dim=1), (x,), tester_factory) + + # Tensor with infinity + x = torch.tensor([[1.0, float('inf'), 3.0], [4.0, 5.0, float('inf')]]) + self._test_op(AminModel(), (x,), tester_factory) + self._test_op(AminModel(dim=0), (x,), tester_factory) + self._test_op(AminModel(dim=1), (x,), tester_factory) + + # Tensor with negative infinity + x = torch.tensor([[1.0, float('-inf'), 3.0], [4.0, 5.0, float('-inf')]]) + self._test_op(AminModel(), (x,), tester_factory) + self._test_op(AminModel(dim=0), (x,), tester_factory) + self._test_op(AminModel(dim=1), (x,), tester_factory) + + # Tensor with NaN (NaN should be propagated) + x = torch.tensor([[1.0, float('nan'), 3.0], [4.0, 5.0, float('nan')]]) + self._test_op(AminModel(), (x,), tester_factory) + self._test_op(AminModel(dim=0), (x,), tester_factory) + self._test_op(AminModel(dim=1), (x,), tester_factory) + + # Single element tensor + x = torch.tensor([5.0]) + self._test_op(AminModel(), (x,), tester_factory) + self._test_op(AminModel(dim=0), (x,), tester_factory) + + def test_amin_scalar(self, tester_factory: Callable) -> None: + # Test with scalar input (1-element tensor) + self._test_op(AminModel(), (torch.tensor([5.0]),), tester_factory) + self._test_op(AminModel(dim=0), (torch.tensor([5.0]),), tester_factory) diff --git a/backends/test/suite/operators/test_argmax.py b/backends/test/suite/operators/test_argmax.py new file mode 100644 index 00000000000..db5b406f87e --- /dev/null +++ b/backends/test/suite/operators/test_argmax.py @@ -0,0 +1,156 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, Optional + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class ArgmaxModel(torch.nn.Module): + def __init__(self, dim: Optional[int] = None, keepdim: bool = False): + super().__init__() + self.dim = dim + self.keepdim = keepdim + + def forward(self, x): + return torch.argmax(x, dim=self.dim, keepdim=self.keepdim) + +@operator_test +class TestArgmax(OperatorTest): + @dtype_test + def test_argmax_dtype(self, dtype, tester_factory: Callable) -> None: + # Test with different dtypes + model = ArgmaxModel().to(dtype) + self._test_op(model, (torch.rand(10, 10).to(dtype),), tester_factory) + + def test_argmax_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters (flattened tensor) + self._test_op(ArgmaxModel(), (torch.randn(10, 10),), tester_factory) + + def test_argmax_dim(self, tester_factory: Callable) -> None: + # Test with different dimensions + + # 2D tensor, dim=0 + self._test_op(ArgmaxModel(dim=0), (torch.randn(5, 10),), tester_factory) + + # 2D tensor, dim=1 + self._test_op(ArgmaxModel(dim=1), (torch.randn(5, 10),), tester_factory) + + # 3D tensor, dim=0 + self._test_op(ArgmaxModel(dim=0), (torch.randn(3, 4, 5),), tester_factory) + + # 3D tensor, dim=1 + self._test_op(ArgmaxModel(dim=1), (torch.randn(3, 4, 5),), tester_factory) + + # 3D tensor, dim=2 + self._test_op(ArgmaxModel(dim=2), (torch.randn(3, 4, 5),), tester_factory) + + # 4D tensor, dim=1 + self._test_op(ArgmaxModel(dim=1), (torch.randn(2, 3, 4, 5),), tester_factory) + + # Negative dim (last dimension) + self._test_op(ArgmaxModel(dim=-1), (torch.randn(3, 4, 5),), tester_factory) + + # Negative dim (second-to-last dimension) + self._test_op(ArgmaxModel(dim=-2), (torch.randn(3, 4, 5),), tester_factory) + + def test_argmax_keepdim(self, tester_factory: Callable) -> None: + # Test with keepdim=True + + # 2D tensor, dim=0, keepdim=True + self._test_op(ArgmaxModel(dim=0, keepdim=True), (torch.randn(5, 10),), tester_factory) + + # 2D tensor, dim=1, keepdim=True + self._test_op(ArgmaxModel(dim=1, keepdim=True), (torch.randn(5, 10),), tester_factory) + + # 3D tensor, dim=1, keepdim=True + self._test_op(ArgmaxModel(dim=1, keepdim=True), (torch.randn(3, 4, 5),), tester_factory) + + # 4D tensor, dim=2, keepdim=True + self._test_op(ArgmaxModel(dim=2, keepdim=True), (torch.randn(2, 3, 4, 5),), tester_factory) + + def test_argmax_shapes(self, tester_factory: Callable) -> None: + # Test with different tensor shapes + + # 1D tensor + self._test_op(ArgmaxModel(), (torch.randn(20),), tester_factory) + + # 2D tensor + self._test_op(ArgmaxModel(), (torch.randn(5, 10),), tester_factory) + + # 3D tensor + self._test_op(ArgmaxModel(), (torch.randn(3, 4, 5),), tester_factory) + + # 4D tensor + self._test_op(ArgmaxModel(), (torch.randn(2, 3, 4, 5),), tester_factory) + + # 5D tensor + self._test_op(ArgmaxModel(), (torch.randn(2, 2, 3, 4, 5),), tester_factory) + + def test_argmax_values(self, tester_factory: Callable) -> None: + # Test with different value patterns + + # Tensor with clear maximum + x = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) + self._test_op(ArgmaxModel(), (x,), tester_factory) + self._test_op(ArgmaxModel(dim=0), (x,), tester_factory) + self._test_op(ArgmaxModel(dim=1), (x,), tester_factory) + + # Tensor with duplicate maximum values (should return first occurrence) + x = torch.tensor([[3.0, 2.0, 3.0], [6.0, 6.0, 5.0]]) + self._test_op(ArgmaxModel(), (x,), tester_factory) + self._test_op(ArgmaxModel(dim=0), (x,), tester_factory) + self._test_op(ArgmaxModel(dim=1), (x,), tester_factory) + + # Tensor with negative values + x = torch.tensor([[-3.0, -2.0, -1.0], [-6.0, -5.0, -4.0]]) + self._test_op(ArgmaxModel(), (x,), tester_factory) + self._test_op(ArgmaxModel(dim=0), (x,), tester_factory) + self._test_op(ArgmaxModel(dim=1), (x,), tester_factory) + + # Tensor with mixed positive and negative values + x = torch.tensor([[-3.0, 2.0, -1.0], [6.0, -5.0, 4.0]]) + self._test_op(ArgmaxModel(), (x,), tester_factory) + self._test_op(ArgmaxModel(dim=0), (x,), tester_factory) + self._test_op(ArgmaxModel(dim=1), (x,), tester_factory) + + def test_argmax_edge_cases(self, tester_factory: Callable) -> None: + # Test edge cases + + # Tensor with all same values + x = torch.ones(3, 4) + self._test_op(ArgmaxModel(), (x,), tester_factory) + self._test_op(ArgmaxModel(dim=0), (x,), tester_factory) + self._test_op(ArgmaxModel(dim=1), (x,), tester_factory) + + # Zero tensor + x = torch.zeros(3, 4) + self._test_op(ArgmaxModel(), (x,), tester_factory) + self._test_op(ArgmaxModel(dim=0), (x,), tester_factory) + self._test_op(ArgmaxModel(dim=1), (x,), tester_factory) + + # Tensor with infinity + x = torch.tensor([[1.0, float('inf'), 3.0], [4.0, 5.0, float('inf')]]) + self._test_op(ArgmaxModel(), (x,), tester_factory) + self._test_op(ArgmaxModel(dim=0), (x,), tester_factory) + self._test_op(ArgmaxModel(dim=1), (x,), tester_factory) + + # Tensor with NaN (NaN should be ignored in comparison) + x = torch.tensor([[1.0, float('nan'), 3.0], [4.0, 5.0, float('nan')]]) + self._test_op(ArgmaxModel(), (x,), tester_factory) + self._test_op(ArgmaxModel(dim=0), (x,), tester_factory) + self._test_op(ArgmaxModel(dim=1), (x,), tester_factory) + + # Single element tensor + x = torch.tensor([5.0]) + self._test_op(ArgmaxModel(), (x,), tester_factory) + + def test_argmax_scalar(self, tester_factory: Callable) -> None: + # Test with scalar input (1-element tensor) + self._test_op(ArgmaxModel(), (torch.tensor([5.0]),), tester_factory) diff --git a/backends/test/suite/operators/test_argmin.py b/backends/test/suite/operators/test_argmin.py new file mode 100644 index 00000000000..3fe1e45d643 --- /dev/null +++ b/backends/test/suite/operators/test_argmin.py @@ -0,0 +1,156 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, Optional + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class ArgminModel(torch.nn.Module): + def __init__(self, dim: Optional[int] = None, keepdim: bool = False): + super().__init__() + self.dim = dim + self.keepdim = keepdim + + def forward(self, x): + return torch.argmin(x, dim=self.dim, keepdim=self.keepdim) + +@operator_test +class TestArgmin(OperatorTest): + @dtype_test + def test_argmin_dtype(self, dtype, tester_factory: Callable) -> None: + # Test with different dtypes + model = ArgminModel().to(dtype) + self._test_op(model, (torch.rand(10, 10).to(dtype),), tester_factory) + + def test_argmin_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters (flattened tensor) + self._test_op(ArgminModel(), (torch.randn(10, 10),), tester_factory) + + def test_argmin_dim(self, tester_factory: Callable) -> None: + # Test with different dimensions + + # 2D tensor, dim=0 + self._test_op(ArgminModel(dim=0), (torch.randn(5, 10),), tester_factory) + + # 2D tensor, dim=1 + self._test_op(ArgminModel(dim=1), (torch.randn(5, 10),), tester_factory) + + # 3D tensor, dim=0 + self._test_op(ArgminModel(dim=0), (torch.randn(3, 4, 5),), tester_factory) + + # 3D tensor, dim=1 + self._test_op(ArgminModel(dim=1), (torch.randn(3, 4, 5),), tester_factory) + + # 3D tensor, dim=2 + self._test_op(ArgminModel(dim=2), (torch.randn(3, 4, 5),), tester_factory) + + # 4D tensor, dim=1 + self._test_op(ArgminModel(dim=1), (torch.randn(2, 3, 4, 5),), tester_factory) + + # Negative dim (last dimension) + self._test_op(ArgminModel(dim=-1), (torch.randn(3, 4, 5),), tester_factory) + + # Negative dim (second-to-last dimension) + self._test_op(ArgminModel(dim=-2), (torch.randn(3, 4, 5),), tester_factory) + + def test_argmin_keepdim(self, tester_factory: Callable) -> None: + # Test with keepdim=True + + # 2D tensor, dim=0, keepdim=True + self._test_op(ArgminModel(dim=0, keepdim=True), (torch.randn(5, 10),), tester_factory) + + # 2D tensor, dim=1, keepdim=True + self._test_op(ArgminModel(dim=1, keepdim=True), (torch.randn(5, 10),), tester_factory) + + # 3D tensor, dim=1, keepdim=True + self._test_op(ArgminModel(dim=1, keepdim=True), (torch.randn(3, 4, 5),), tester_factory) + + # 4D tensor, dim=2, keepdim=True + self._test_op(ArgminModel(dim=2, keepdim=True), (torch.randn(2, 3, 4, 5),), tester_factory) + + def test_argmin_shapes(self, tester_factory: Callable) -> None: + # Test with different tensor shapes + + # 1D tensor + self._test_op(ArgminModel(), (torch.randn(20),), tester_factory) + + # 2D tensor + self._test_op(ArgminModel(), (torch.randn(5, 10),), tester_factory) + + # 3D tensor + self._test_op(ArgminModel(), (torch.randn(3, 4, 5),), tester_factory) + + # 4D tensor + self._test_op(ArgminModel(), (torch.randn(2, 3, 4, 5),), tester_factory) + + # 5D tensor + self._test_op(ArgminModel(), (torch.randn(2, 2, 3, 4, 5),), tester_factory) + + def test_argmin_values(self, tester_factory: Callable) -> None: + # Test with different value patterns + + # Tensor with clear minimum + x = torch.tensor([[6.0, 5.0, 4.0], [3.0, 2.0, 1.0]]) + self._test_op(ArgminModel(), (x,), tester_factory) + self._test_op(ArgminModel(dim=0), (x,), tester_factory) + self._test_op(ArgminModel(dim=1), (x,), tester_factory) + + # Tensor with duplicate minimum values (should return first occurrence) + x = torch.tensor([[3.0, 2.0, 2.0], [1.0, 1.0, 5.0]]) + self._test_op(ArgminModel(), (x,), tester_factory) + self._test_op(ArgminModel(dim=0), (x,), tester_factory) + self._test_op(ArgminModel(dim=1), (x,), tester_factory) + + # Tensor with negative values + x = torch.tensor([[-1.0, -2.0, -3.0], [-4.0, -5.0, -6.0]]) + self._test_op(ArgminModel(), (x,), tester_factory) + self._test_op(ArgminModel(dim=0), (x,), tester_factory) + self._test_op(ArgminModel(dim=1), (x,), tester_factory) + + # Tensor with mixed positive and negative values + x = torch.tensor([[-3.0, 2.0, -1.0], [6.0, -5.0, 4.0]]) + self._test_op(ArgminModel(), (x,), tester_factory) + self._test_op(ArgminModel(dim=0), (x,), tester_factory) + self._test_op(ArgminModel(dim=1), (x,), tester_factory) + + def test_argmin_edge_cases(self, tester_factory: Callable) -> None: + # Test edge cases + + # Tensor with all same values + x = torch.ones(3, 4) + self._test_op(ArgminModel(), (x,), tester_factory) + self._test_op(ArgminModel(dim=0), (x,), tester_factory) + self._test_op(ArgminModel(dim=1), (x,), tester_factory) + + # Zero tensor + x = torch.zeros(3, 4) + self._test_op(ArgminModel(), (x,), tester_factory) + self._test_op(ArgminModel(dim=0), (x,), tester_factory) + self._test_op(ArgminModel(dim=1), (x,), tester_factory) + + # Tensor with infinity + x = torch.tensor([[1.0, float('-inf'), 3.0], [4.0, 5.0, float('-inf')]]) + self._test_op(ArgminModel(), (x,), tester_factory) + self._test_op(ArgminModel(dim=0), (x,), tester_factory) + self._test_op(ArgminModel(dim=1), (x,), tester_factory) + + # Tensor with NaN (NaN should be ignored in comparison) + x = torch.tensor([[1.0, float('nan'), 3.0], [4.0, 5.0, float('nan')]]) + self._test_op(ArgminModel(), (x,), tester_factory) + self._test_op(ArgminModel(dim=0), (x,), tester_factory) + self._test_op(ArgminModel(dim=1), (x,), tester_factory) + + # Single element tensor + x = torch.tensor([5.0]) + self._test_op(ArgminModel(), (x,), tester_factory) + + def test_argmin_scalar(self, tester_factory: Callable) -> None: + # Test with scalar input (1-element tensor) + self._test_op(ArgminModel(), (torch.tensor([5.0]),), tester_factory) diff --git a/backends/test/suite/operators/test_mean.py b/backends/test/suite/operators/test_mean.py new file mode 100644 index 00000000000..776f2f0bbd5 --- /dev/null +++ b/backends/test/suite/operators/test_mean.py @@ -0,0 +1,223 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, List, Optional, Tuple, Union + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class MeanModel(torch.nn.Module): + def __init__( + self, + dim: Optional[Union[int, Tuple[int, ...], List[int]]] = None, + keepdim: bool = False, + dtype: Optional[torch.dtype] = None + ): + super().__init__() + self.dim = dim + self.keepdim = keepdim + self.dtype = dtype + + def forward(self, x): + return torch.mean(x, dim=self.dim, keepdim=self.keepdim, dtype=self.dtype) + +@operator_test +class TestMean(OperatorTest): + @dtype_test + def test_mean_dtype(self, dtype, tester_factory: Callable) -> None: + # Test with different dtypes + model = MeanModel().to(dtype) + self._test_op(model, (torch.rand(10, 10).to(dtype),), tester_factory) + + def test_mean_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters (global reduction) + self._test_op(MeanModel(), (torch.randn(10, 10),), tester_factory) + + def test_mean_dim(self, tester_factory: Callable) -> None: + # Test with different dimensions + + # 2D tensor, dim=0 + self._test_op(MeanModel(dim=0), (torch.randn(5, 10),), tester_factory) + + # 2D tensor, dim=1 + self._test_op(MeanModel(dim=1), (torch.randn(5, 10),), tester_factory) + + # 3D tensor, dim=0 + self._test_op(MeanModel(dim=0), (torch.randn(3, 4, 5),), tester_factory) + + # 3D tensor, dim=1 + self._test_op(MeanModel(dim=1), (torch.randn(3, 4, 5),), tester_factory) + + # 3D tensor, dim=2 + self._test_op(MeanModel(dim=2), (torch.randn(3, 4, 5),), tester_factory) + + # 4D tensor, dim=1 + self._test_op(MeanModel(dim=1), (torch.randn(2, 3, 4, 5),), tester_factory) + + # Negative dim (last dimension) + self._test_op(MeanModel(dim=-1), (torch.randn(3, 4, 5),), tester_factory) + + # Negative dim (second-to-last dimension) + self._test_op(MeanModel(dim=-2), (torch.randn(3, 4, 5),), tester_factory) + + def test_mean_multi_dim(self, tester_factory: Callable) -> None: + # Test with multiple dimensions + + # 3D tensor, dim=(0, 1) + self._test_op(MeanModel(dim=(0, 1)), (torch.randn(3, 4, 5),), tester_factory) + + # 3D tensor, dim=(0, 2) + self._test_op(MeanModel(dim=(0, 2)), (torch.randn(3, 4, 5),), tester_factory) + + # 3D tensor, dim=(1, 2) + self._test_op(MeanModel(dim=(1, 2)), (torch.randn(3, 4, 5),), tester_factory) + + # 4D tensor, dim=(1, 3) + self._test_op(MeanModel(dim=(1, 3)), (torch.randn(2, 3, 4, 5),), tester_factory) + + # 4D tensor, dim=(0, 2) + self._test_op(MeanModel(dim=(0, 2)), (torch.randn(2, 3, 4, 5),), tester_factory) + + # 4D tensor, dim=(-1, -3) + self._test_op(MeanModel(dim=(-1, -3)), (torch.randn(2, 3, 4, 5),), tester_factory) + + # 4D tensor, all dimensions + self._test_op(MeanModel(dim=(0, 1, 2, 3)), (torch.randn(2, 3, 4, 5),), tester_factory) + + def test_mean_keepdim(self, tester_factory: Callable) -> None: + # Test with keepdim=True + + # 2D tensor, dim=0, keepdim=True + self._test_op(MeanModel(dim=0, keepdim=True), (torch.randn(5, 10),), tester_factory) + + # 2D tensor, dim=1, keepdim=True + self._test_op(MeanModel(dim=1, keepdim=True), (torch.randn(5, 10),), tester_factory) + + # 3D tensor, dim=1, keepdim=True + self._test_op(MeanModel(dim=1, keepdim=True), (torch.randn(3, 4, 5),), tester_factory) + + # 4D tensor, dim=2, keepdim=True + self._test_op(MeanModel(dim=2, keepdim=True), (torch.randn(2, 3, 4, 5),), tester_factory) + + # Multiple dimensions with keepdim=True + self._test_op(MeanModel(dim=(1, 2), keepdim=True), (torch.randn(3, 4, 5),), tester_factory) + + def test_mean_output_dtype(self, tester_factory: Callable) -> None: + # Test with explicit output dtype + + # Integer input with float output + self._test_op(MeanModel(dtype=torch.float32), (torch.randint(0, 10, (5, 10)),), tester_factory) + + # Float input with specified float output + self._test_op(MeanModel(dtype=torch.float64), (torch.randn(5, 10),), tester_factory) + + # With dimension reduction and dtype + self._test_op(MeanModel(dim=1, dtype=torch.float64), (torch.randn(5, 10),), tester_factory) + + def test_mean_shapes(self, tester_factory: Callable) -> None: + # Test with different tensor shapes + + # 1D tensor + self._test_op(MeanModel(), (torch.randn(20),), tester_factory) + self._test_op(MeanModel(dim=0), (torch.randn(20),), tester_factory) + + # 2D tensor + self._test_op(MeanModel(), (torch.randn(5, 10),), tester_factory) + + # 3D tensor + self._test_op(MeanModel(), (torch.randn(3, 4, 5),), tester_factory) + + # 4D tensor + self._test_op(MeanModel(), (torch.randn(2, 3, 4, 5),), tester_factory) + + # 5D tensor + self._test_op(MeanModel(), (torch.randn(2, 2, 3, 4, 5),), tester_factory) + + def test_mean_values(self, tester_factory: Callable) -> None: + # Test with different value patterns + + # Tensor with integer sequence + x = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) + self._test_op(MeanModel(), (x,), tester_factory) + self._test_op(MeanModel(dim=0), (x,), tester_factory) + self._test_op(MeanModel(dim=1), (x,), tester_factory) + + # Tensor with duplicate values + x = torch.tensor([[3.0, 3.0, 3.0], [6.0, 6.0, 6.0]]) + self._test_op(MeanModel(), (x,), tester_factory) + self._test_op(MeanModel(dim=0), (x,), tester_factory) + self._test_op(MeanModel(dim=1), (x,), tester_factory) + + # Tensor with negative values + x = torch.tensor([[-3.0, -2.0, -1.0], [-6.0, -5.0, -4.0]]) + self._test_op(MeanModel(), (x,), tester_factory) + self._test_op(MeanModel(dim=0), (x,), tester_factory) + self._test_op(MeanModel(dim=1), (x,), tester_factory) + + # Tensor with mixed positive and negative values + x = torch.tensor([[-3.0, 2.0, -1.0], [6.0, -5.0, 4.0]]) + self._test_op(MeanModel(), (x,), tester_factory) + self._test_op(MeanModel(dim=0), (x,), tester_factory) + self._test_op(MeanModel(dim=1), (x,), tester_factory) + + # Tensor with fractional values + x = torch.tensor([[0.5, 1.5, 2.5], [3.5, 4.5, 5.5]]) + self._test_op(MeanModel(), (x,), tester_factory) + self._test_op(MeanModel(dim=0), (x,), tester_factory) + self._test_op(MeanModel(dim=1), (x,), tester_factory) + + def test_mean_edge_cases(self, tester_factory: Callable) -> None: + # Test edge cases + + # Tensor with all same values + x = torch.ones(3, 4) + self._test_op(MeanModel(), (x,), tester_factory) + self._test_op(MeanModel(dim=0), (x,), tester_factory) + self._test_op(MeanModel(dim=1), (x,), tester_factory) + + # Zero tensor + x = torch.zeros(3, 4) + self._test_op(MeanModel(), (x,), tester_factory) + self._test_op(MeanModel(dim=0), (x,), tester_factory) + self._test_op(MeanModel(dim=1), (x,), tester_factory) + + # Tensor with infinity + x = torch.tensor([[1.0, float('inf'), 3.0], [4.0, 5.0, float('inf')]]) + self._test_op(MeanModel(), (x,), tester_factory) + self._test_op(MeanModel(dim=0), (x,), tester_factory) + self._test_op(MeanModel(dim=1), (x,), tester_factory) + + # Tensor with negative infinity + x = torch.tensor([[1.0, float('-inf'), 3.0], [4.0, 5.0, float('-inf')]]) + self._test_op(MeanModel(), (x,), tester_factory) + self._test_op(MeanModel(dim=0), (x,), tester_factory) + self._test_op(MeanModel(dim=1), (x,), tester_factory) + + # Tensor with NaN (NaN should be propagated) + x = torch.tensor([[1.0, float('nan'), 3.0], [4.0, 5.0, float('nan')]]) + self._test_op(MeanModel(), (x,), tester_factory) + self._test_op(MeanModel(dim=0), (x,), tester_factory) + self._test_op(MeanModel(dim=1), (x,), tester_factory) + + # Single element tensor + x = torch.tensor([5.0]) + self._test_op(MeanModel(), (x,), tester_factory) + self._test_op(MeanModel(dim=0), (x,), tester_factory) + + def test_mean_scalar(self, tester_factory: Callable) -> None: + # Test with scalar input (1-element tensor) + self._test_op(MeanModel(), (torch.tensor([5.0]),), tester_factory) + self._test_op(MeanModel(dim=0), (torch.tensor([5.0]),), tester_factory) + + def test_mean_integer_division(self, tester_factory: Callable) -> None: + # Test with integer tensors (should produce float results) + x = torch.tensor([[1, 2, 3], [4, 5, 6]]) + self._test_op(MeanModel(), (x,), tester_factory) + self._test_op(MeanModel(dim=0), (x,), tester_factory) + self._test_op(MeanModel(dim=1), (x,), tester_factory) diff --git a/backends/test/suite/operators/test_median.py b/backends/test/suite/operators/test_median.py new file mode 100644 index 00000000000..422a2e5df57 --- /dev/null +++ b/backends/test/suite/operators/test_median.py @@ -0,0 +1,251 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable, Optional, Tuple + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class MedianModel(torch.nn.Module): + def __init__( + self, + dim: Optional[int] = None, + keepdim: bool = False + ): + super().__init__() + self.dim = dim + self.keepdim = keepdim + + def forward(self, x): + return torch.median(x, dim=self.dim, keepdim=self.keepdim) + +class MedianValueOnlyModel(torch.nn.Module): + """Model that returns only the median values (not indices) when dim is specified.""" + def __init__( + self, + dim: Optional[int] = None, + keepdim: bool = False + ): + super().__init__() + self.dim = dim + self.keepdim = keepdim + + def forward(self, x): + if self.dim is not None: + return torch.median(x, dim=self.dim, keepdim=self.keepdim)[0] + else: + return torch.median(x) + +@operator_test +class TestMedian(OperatorTest): + @dtype_test + def test_median_dtype(self, dtype, tester_factory: Callable) -> None: + # Test with different dtypes (global reduction) + model = MedianValueOnlyModel().to(dtype) + self._test_op(model, (torch.rand(10, 10).to(dtype),), tester_factory) + + def test_median_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters (global reduction) + self._test_op(MedianValueOnlyModel(), (torch.randn(10, 10),), tester_factory) + + def test_median_dim(self, tester_factory: Callable) -> None: + # Test with different dimensions (values only) + + # 2D tensor, dim=0 + self._test_op(MedianValueOnlyModel(dim=0), (torch.randn(5, 10),), tester_factory) + + # 2D tensor, dim=1 + self._test_op(MedianValueOnlyModel(dim=1), (torch.randn(5, 10),), tester_factory) + + # 3D tensor, dim=0 + self._test_op(MedianValueOnlyModel(dim=0), (torch.randn(3, 4, 5),), tester_factory) + + # 3D tensor, dim=1 + self._test_op(MedianValueOnlyModel(dim=1), (torch.randn(3, 4, 5),), tester_factory) + + # 3D tensor, dim=2 + self._test_op(MedianValueOnlyModel(dim=2), (torch.randn(3, 4, 5),), tester_factory) + + # 4D tensor, dim=1 + self._test_op(MedianValueOnlyModel(dim=1), (torch.randn(2, 3, 4, 5),), tester_factory) + + # Negative dim (last dimension) + self._test_op(MedianValueOnlyModel(dim=-1), (torch.randn(3, 4, 5),), tester_factory) + + # Negative dim (second-to-last dimension) + self._test_op(MedianValueOnlyModel(dim=-2), (torch.randn(3, 4, 5),), tester_factory) + + def test_median_with_indices(self, tester_factory: Callable) -> None: + # Test with different dimensions (values and indices) + + # 2D tensor, dim=0 + self._test_op(MedianModel(dim=0), (torch.randn(5, 10),), tester_factory) + + # 2D tensor, dim=1 + self._test_op(MedianModel(dim=1), (torch.randn(5, 10),), tester_factory) + + # 3D tensor, dim=0 + self._test_op(MedianModel(dim=0), (torch.randn(3, 4, 5),), tester_factory) + + # 3D tensor, dim=1 + self._test_op(MedianModel(dim=1), (torch.randn(3, 4, 5),), tester_factory) + + # 3D tensor, dim=2 + self._test_op(MedianModel(dim=2), (torch.randn(3, 4, 5),), tester_factory) + + # 4D tensor, dim=1 + self._test_op(MedianModel(dim=1), (torch.randn(2, 3, 4, 5),), tester_factory) + + # Negative dim (last dimension) + self._test_op(MedianModel(dim=-1), (torch.randn(3, 4, 5),), tester_factory) + + # Negative dim (second-to-last dimension) + self._test_op(MedianModel(dim=-2), (torch.randn(3, 4, 5),), tester_factory) + + def test_median_keepdim(self, tester_factory: Callable) -> None: + # Test with keepdim=True (values only) + + # 2D tensor, dim=0, keepdim=True + self._test_op(MedianValueOnlyModel(dim=0, keepdim=True), (torch.randn(5, 10),), tester_factory) + + # 2D tensor, dim=1, keepdim=True + self._test_op(MedianValueOnlyModel(dim=1, keepdim=True), (torch.randn(5, 10),), tester_factory) + + # 3D tensor, dim=1, keepdim=True + self._test_op(MedianValueOnlyModel(dim=1, keepdim=True), (torch.randn(3, 4, 5),), tester_factory) + + # 4D tensor, dim=2, keepdim=True + self._test_op(MedianValueOnlyModel(dim=2, keepdim=True), (torch.randn(2, 3, 4, 5),), tester_factory) + + def test_median_keepdim_with_indices(self, tester_factory: Callable) -> None: + # Test with keepdim=True (values and indices) + + # 2D tensor, dim=0, keepdim=True + self._test_op(MedianModel(dim=0, keepdim=True), (torch.randn(5, 10),), tester_factory) + + # 2D tensor, dim=1, keepdim=True + self._test_op(MedianModel(dim=1, keepdim=True), (torch.randn(5, 10),), tester_factory) + + # 3D tensor, dim=1, keepdim=True + self._test_op(MedianModel(dim=1, keepdim=True), (torch.randn(3, 4, 5),), tester_factory) + + # 4D tensor, dim=2, keepdim=True + self._test_op(MedianModel(dim=2, keepdim=True), (torch.randn(2, 3, 4, 5),), tester_factory) + + def test_median_shapes(self, tester_factory: Callable) -> None: + # Test with different tensor shapes (global reduction) + + # 1D tensor + self._test_op(MedianValueOnlyModel(), (torch.randn(20),), tester_factory) + + # 2D tensor + self._test_op(MedianValueOnlyModel(), (torch.randn(5, 10),), tester_factory) + + # 3D tensor + self._test_op(MedianValueOnlyModel(), (torch.randn(3, 4, 5),), tester_factory) + + # 4D tensor + self._test_op(MedianValueOnlyModel(), (torch.randn(2, 3, 4, 5),), tester_factory) + + # 5D tensor + self._test_op(MedianValueOnlyModel(), (torch.randn(2, 2, 3, 4, 5),), tester_factory) + + def test_median_values(self, tester_factory: Callable) -> None: + # Test with different value patterns (global reduction) + + # Tensor with sequential values + x = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) + self._test_op(MedianValueOnlyModel(), (x,), tester_factory) + + # Tensor with odd number of elements (clear median) + x = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0]) + self._test_op(MedianValueOnlyModel(), (x,), tester_factory) + + # Tensor with even number of elements (median is average of middle two) + x = torch.tensor([1.0, 2.0, 3.0, 4.0]) + self._test_op(MedianValueOnlyModel(), (x,), tester_factory) + + # Tensor with duplicate values + x = torch.tensor([[3.0, 3.0, 3.0], [6.0, 6.0, 6.0]]) + self._test_op(MedianValueOnlyModel(), (x,), tester_factory) + + # Tensor with negative values + x = torch.tensor([[-3.0, -2.0, -1.0], [-6.0, -5.0, -4.0]]) + self._test_op(MedianValueOnlyModel(), (x,), tester_factory) + + # Tensor with mixed positive and negative values + x = torch.tensor([[-3.0, 2.0, -1.0], [6.0, -5.0, 4.0]]) + self._test_op(MedianValueOnlyModel(), (x,), tester_factory) + + # Tensor with fractional values + x = torch.tensor([[0.5, 1.5, 2.5], [3.5, 4.5, 5.5]]) + self._test_op(MedianValueOnlyModel(), (x,), tester_factory) + + def test_median_dim_values(self, tester_factory: Callable) -> None: + # Test with different value patterns (dimension reduction) + + # Tensor with sequential values + x = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) + self._test_op(MedianValueOnlyModel(dim=0), (x,), tester_factory) + self._test_op(MedianValueOnlyModel(dim=1), (x,), tester_factory) + + # Tensor with odd number of elements in dimension + x = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]) + self._test_op(MedianValueOnlyModel(dim=0), (x,), tester_factory) + + # Tensor with even number of elements in dimension + x = torch.tensor([[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]]) + self._test_op(MedianValueOnlyModel(dim=1), (x,), tester_factory) + + # Tensor with unsorted values + x = torch.tensor([[3.0, 1.0, 2.0], [6.0, 4.0, 5.0]]) + self._test_op(MedianValueOnlyModel(dim=1), (x,), tester_factory) + + def test_median_edge_cases(self, tester_factory: Callable) -> None: + # Test edge cases + + # Tensor with all same values + x = torch.ones(3, 4) + self._test_op(MedianValueOnlyModel(), (x,), tester_factory) + self._test_op(MedianValueOnlyModel(dim=0), (x,), tester_factory) + self._test_op(MedianValueOnlyModel(dim=1), (x,), tester_factory) + + # Zero tensor + x = torch.zeros(3, 4) + self._test_op(MedianValueOnlyModel(), (x,), tester_factory) + self._test_op(MedianValueOnlyModel(dim=0), (x,), tester_factory) + self._test_op(MedianValueOnlyModel(dim=1), (x,), tester_factory) + + # Tensor with infinity + x = torch.tensor([[1.0, float('inf'), 3.0], [4.0, 5.0, float('inf')]]) + self._test_op(MedianValueOnlyModel(), (x,), tester_factory) + self._test_op(MedianValueOnlyModel(dim=0), (x,), tester_factory) + self._test_op(MedianValueOnlyModel(dim=1), (x,), tester_factory) + + # Tensor with negative infinity + x = torch.tensor([[1.0, float('-inf'), 3.0], [4.0, 5.0, float('-inf')]]) + self._test_op(MedianValueOnlyModel(), (x,), tester_factory) + self._test_op(MedianValueOnlyModel(dim=0), (x,), tester_factory) + self._test_op(MedianValueOnlyModel(dim=1), (x,), tester_factory) + + # Tensor with NaN (NaN should be propagated) + x = torch.tensor([[1.0, float('nan'), 3.0], [4.0, 5.0, float('nan')]]) + self._test_op(MedianValueOnlyModel(), (x,), tester_factory) + self._test_op(MedianValueOnlyModel(dim=0), (x,), tester_factory) + self._test_op(MedianValueOnlyModel(dim=1), (x,), tester_factory) + + # Single element tensor + x = torch.tensor([5.0]) + self._test_op(MedianValueOnlyModel(), (x,), tester_factory) + self._test_op(MedianValueOnlyModel(dim=0), (x,), tester_factory) + + def test_median_scalar(self, tester_factory: Callable) -> None: + # Test with scalar input (1-element tensor) + self._test_op(MedianValueOnlyModel(), (torch.tensor([5.0]),), tester_factory) + self._test_op(MedianValueOnlyModel(dim=0), (torch.tensor([5.0]),), tester_factory) From 109961faba81b941a6a8e381d21ec372b8d475e5 Mon Sep 17 00:00:00 2001 From: Gregory Comer Date: Thu, 24 Jul 2025 21:06:34 -0700 Subject: [PATCH 8/9] Update [ghstack-poisoned] --- .../test/suite/operators/test_floor_divide.py | 227 ++++++++++++++++++ backends/test/suite/operators/test_neg.py | 99 ++++++++ backends/test/suite/operators/test_pow.py | 171 +++++++++++++ backends/test/suite/operators/test_round.py | 164 +++++++++++++ backends/test/suite/operators/test_rsqrt.py | 104 ++++++++ backends/test/suite/operators/test_sqrt.py | 103 ++++++++ backends/test/suite/operators/test_square.py | 116 +++++++++ backends/test/suite/operators/test_trunc.py | 109 +++++++++ 8 files changed, 1093 insertions(+) create mode 100644 backends/test/suite/operators/test_floor_divide.py create mode 100644 backends/test/suite/operators/test_neg.py create mode 100644 backends/test/suite/operators/test_pow.py create mode 100644 backends/test/suite/operators/test_round.py create mode 100644 backends/test/suite/operators/test_rsqrt.py create mode 100644 backends/test/suite/operators/test_sqrt.py create mode 100644 backends/test/suite/operators/test_square.py create mode 100644 backends/test/suite/operators/test_trunc.py diff --git a/backends/test/suite/operators/test_floor_divide.py b/backends/test/suite/operators/test_floor_divide.py new file mode 100644 index 00000000000..a0d128a1b14 --- /dev/null +++ b/backends/test/suite/operators/test_floor_divide.py @@ -0,0 +1,227 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class FloorDivideModel(torch.nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x, y): + return torch.floor_divide(x, y) + +@operator_test +class TestFloorDivide(OperatorTest): + @dtype_test + def test_floor_divide_dtype(self, dtype, tester_factory: Callable) -> None: + # Test with different dtypes + model = FloorDivideModel().to(dtype) + # Use values that won't cause division by zero + x = torch.randint(-100, 100, (10, 10)).to(dtype) + y = torch.full_like(x, 2) # Divisor of 2 + self._test_op(model, (x, y), tester_factory, use_random_test_inputs=False) + + def test_floor_divide_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + # Input: tensor with integer values, divisor: constant tensor + x = torch.randint(-100, 100, (10, 10)) + y = torch.full_like(x, 2).clone() # Divisor of 2 + self._test_op(FloorDivideModel(), (x, y), tester_factory, use_random_test_inputs=False) + + def test_floor_divide_scalar_divisors(self, tester_factory: Callable) -> None: + # Test with different scalar divisors as tensors + + # Positive divisor + x = torch.randint(-100, 100, (10, 10)) + y = torch.full_like(x, 3) # Divisor of 3 + self._test_op(FloorDivideModel(), (x, y), tester_factory, use_random_test_inputs=False) + + # Negative divisor + x = torch.randint(-100, 100, (10, 10)) + y = torch.full_like(x, -2) # Divisor of -2 + self._test_op(FloorDivideModel(), (x, y), tester_factory, use_random_test_inputs=False) + + # Fractional divisor + x = torch.randint(-100, 100, (10, 10)).float() + y = torch.full_like(x, 2.5) # Divisor of 2.5 + self._test_op(FloorDivideModel(), (x, y), tester_factory, use_random_test_inputs=False) + + # Large divisor + x = torch.randint(-1000, 1000, (10, 10)) + y = torch.full_like(x, 100) # Divisor of 100 + self._test_op(FloorDivideModel(), (x, y), tester_factory, use_random_test_inputs=False) + + # Small divisor + x = torch.randint(-100, 100, (10, 10)).float() + y = torch.full_like(x, 0.5) # Divisor of 0.5 + self._test_op(FloorDivideModel(), (x, y), tester_factory, use_random_test_inputs=False) + + def test_floor_divide_tensor_divisors(self, tester_factory: Callable) -> None: + # Test with tensor divisors + + # Constant divisor tensor + x = torch.randint(-100, 100, (10, 10)) + y = torch.full_like(x, 2) # All elements are 2 + self._test_op(FloorDivideModel(), (x, y), tester_factory, use_random_test_inputs=False) + + # Random divisor tensor (non-zero) + x = torch.randint(-100, 100, (10, 10)) + y = torch.randint(1, 10, (10, 10)) # Positive divisors + self._test_op(FloorDivideModel(), (x, y), tester_factory, use_random_test_inputs=False) + + # Mixed positive and negative divisors + x = torch.randint(-100, 100, (10, 10)) + y = torch.randint(-10, 10, (10, 10)) + # Replace zeros to avoid division by zero + y[y == 0] = 1 + self._test_op(FloorDivideModel(), (x, y), tester_factory, use_random_test_inputs=False) + + # Broadcasting: scalar dividend, tensor divisor + x = torch.tensor([10]) + y = torch.arange(1, 5) # [1, 2, 3, 4] + self._test_op(FloorDivideModel(), (x, y), tester_factory, use_random_test_inputs=False) + + # Broadcasting: tensor dividend, scalar divisor + x = torch.arange(-10, 10) + y = torch.tensor([2]) + self._test_op(FloorDivideModel(), (x, y), tester_factory, use_random_test_inputs=False) + + def test_floor_divide_shapes(self, tester_factory: Callable) -> None: + # Test with different tensor shapes + model = FloorDivideModel() + + # 1D tensor + x = torch.randint(-100, 100, (20,)) + y = torch.full_like(x, 2) # Divisor of 2 + self._test_op(model, (x, y), tester_factory, use_random_test_inputs=False) + + # 2D tensor + x = torch.randint(-100, 100, (5, 10)) + y = torch.full_like(x, 2) # Divisor of 2 + self._test_op(model, (x, y), tester_factory, use_random_test_inputs=False) + + # 3D tensor + x = torch.randint(-100, 100, (3, 4, 5)) + y = torch.full_like(x, 2) # Divisor of 2 + self._test_op(model, (x, y), tester_factory, use_random_test_inputs=False) + + # 4D tensor + x = torch.randint(-100, 100, (2, 3, 4, 5)) + y = torch.full_like(x, 2) # Divisor of 2 + self._test_op(model, (x, y), tester_factory, use_random_test_inputs=False) + + # 5D tensor + x = torch.randint(-100, 100, (2, 2, 3, 4, 5)) + y = torch.full_like(x, 2) # Divisor of 2 + self._test_op(model, (x, y), tester_factory, use_random_test_inputs=False) + + def test_floor_divide_values(self, tester_factory: Callable) -> None: + # Test with different value ranges + model = FloorDivideModel() + + # Test with specific dividend values + x = torch.tensor([-7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7]) + + # Divide by 2 + y = torch.tensor([2]).expand_as(x).clone() + self._test_op(model, (x, y), tester_factory, use_random_test_inputs=False) + + # Divide by -2 + y = torch.tensor([-2]).expand_as(x).clone() + self._test_op(model, (x, y), tester_factory, use_random_test_inputs=False) + + # Divide by 3 + y = torch.tensor([3]).expand_as(x).clone() + self._test_op(model, (x, y), tester_factory, use_random_test_inputs=False) + + # Divide by -3 + y = torch.tensor([-3]).expand_as(x).clone() + self._test_op(model, (x, y), tester_factory, use_random_test_inputs=False) + + # Test with floating point values + x = torch.tensor([-3.8, -3.5, -3.2, -0.8, -0.5, -0.2, 0.0, 0.2, 0.5, 0.8, 3.2, 3.5, 3.8]) + + # Divide by 2.0 + y = torch.tensor([2.0]).expand_as(x).clone() + self._test_op(model, (x, y), tester_factory, use_random_test_inputs=False) + + # Divide by -2.0 + y = torch.tensor([-2.0]).expand_as(x).clone() + self._test_op(model, (x, y), tester_factory, use_random_test_inputs=False) + + def test_floor_divide_edge_cases(self, tester_factory: Callable) -> None: + # Test edge cases + model = FloorDivideModel() + + # Zero dividend + x = torch.zeros(10) + y = torch.full_like(x, 2) + self._test_op(model, (x, y), tester_factory, use_random_test_inputs=False) + + # Division that results in exact integers + x = torch.tensor([0, 2, 4, 6, 8, 10]) + y = torch.full_like(x, 2) + self._test_op(model, (x, y), tester_factory, use_random_test_inputs=False) + + # Division with remainder + x = torch.tensor([1, 3, 5, 7, 9]) + y = torch.full_like(x, 2) + self._test_op(model, (x, y), tester_factory, use_random_test_inputs=False) + + # Tensor with infinity + x = torch.tensor([float('inf'), float('-inf'), 10.0, -10.0]) + y = torch.full_like(x, 2) + self._test_op(model, (x, y), tester_factory, use_random_test_inputs=False) + + # Tensor with NaN + x = torch.tensor([float('nan'), 10.0, -10.0]) + y = torch.full_like(x, 2) + self._test_op(model, (x, y), tester_factory, use_random_test_inputs=False) + + # Very large values + x = torch.tensor([1e10, -1e10]) + y = torch.full_like(x, 3) + self._test_op(model, (x, y), tester_factory, use_random_test_inputs=False) + + # Very small values + x = torch.tensor([1e-10, -1e-10]) + y = torch.full_like(x, 2) + self._test_op(model, (x, y), tester_factory, use_random_test_inputs=False) + + def test_floor_divide_scalar(self, tester_factory: Callable) -> None: + # Test with scalar input (1-element tensor) + model = FloorDivideModel() + + # Positive dividend, positive divisor + x = torch.tensor([7]) + y = torch.tensor([2]) + self._test_op(model, (x, y), tester_factory, use_random_test_inputs=False) + + # Negative dividend, positive divisor + x = torch.tensor([-7]) + y = torch.tensor([2]) + self._test_op(model, (x, y), tester_factory, use_random_test_inputs=False) + + # Zero dividend + x = torch.tensor([0]) + y = torch.tensor([2]) + self._test_op(model, (x, y), tester_factory, use_random_test_inputs=False) + + # Positive dividend, negative divisor + x = torch.tensor([7]) + y = torch.tensor([-2]) + self._test_op(model, (x, y), tester_factory, use_random_test_inputs=False) + + # Negative dividend, negative divisor + x = torch.tensor([-7]) + y = torch.tensor([-2]) + self._test_op(model, (x, y), tester_factory, use_random_test_inputs=False) diff --git a/backends/test/suite/operators/test_neg.py b/backends/test/suite/operators/test_neg.py new file mode 100644 index 00000000000..2c3f0f0aeef --- /dev/null +++ b/backends/test/suite/operators/test_neg.py @@ -0,0 +1,99 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class NegModel(torch.nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + return torch.neg(x) + +@operator_test +class TestNeg(OperatorTest): + @dtype_test + def test_neg_dtype(self, dtype, tester_factory: Callable) -> None: + # Test with different dtypes + model = NegModel().to(dtype) + self._test_op(model, (torch.rand(10, 10).to(dtype) * 2 - 1,), tester_factory) + + def test_neg_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + # Input: tensor with positive and negative values + self._test_op(NegModel(), (torch.randn(10, 10),), tester_factory) + + def test_neg_shapes(self, tester_factory: Callable) -> None: + # Test with different tensor shapes + + # 1D tensor + self._test_op(NegModel(), (torch.randn(20),), tester_factory) + + # 2D tensor + self._test_op(NegModel(), (torch.randn(5, 10),), tester_factory) + + # 3D tensor + self._test_op(NegModel(), (torch.randn(3, 4, 5),), tester_factory) + + # 4D tensor + self._test_op(NegModel(), (torch.randn(2, 3, 4, 5),), tester_factory) + + # 5D tensor + self._test_op(NegModel(), (torch.randn(2, 2, 3, 4, 5),), tester_factory) + + def test_neg_values(self, tester_factory: Callable) -> None: + # Test with different value ranges + + # Small values + self._test_op(NegModel(), (torch.randn(10, 10) * 0.01,), tester_factory) + + # Large values + self._test_op(NegModel(), (torch.randn(10, 10) * 1000,), tester_factory) + + # Mixed positive and negative values + self._test_op(NegModel(), (torch.randn(10, 10) * 10,), tester_factory) + + # All positive values + self._test_op(NegModel(), (torch.rand(10, 10) * 10,), tester_factory) + + # All negative values + self._test_op(NegModel(), (torch.rand(10, 10) * -10,), tester_factory) + + # Values close to zero + self._test_op(NegModel(), (torch.randn(10, 10) * 1e-5,), tester_factory) + + # Test double negation (should return to original values) + x = torch.randn(10, 10) + model = NegModel() + double_neg_model = torch.nn.Sequential(model, model) + self._test_op(double_neg_model, (x,), tester_factory) + + def test_neg_edge_cases(self, tester_factory: Callable) -> None: + # Test edge cases + + # Tensor with infinity + x = torch.tensor([float('inf'), float('-inf'), 1.0, -1.0]) + self._test_op(NegModel(), (x,), tester_factory) + + # Tensor with NaN + x = torch.tensor([float('nan'), 1.0, -1.0]) + self._test_op(NegModel(), (x,), tester_factory) + + # Tensor with specific values + x = torch.tensor([-10.0, -1.0, 0.0, 1.0, 10.0]) + self._test_op(NegModel(), (x,), tester_factory) + + def test_neg_scalar(self, tester_factory: Callable) -> None: + # Test with scalar input (1-element tensor) + self._test_op(NegModel(), (torch.tensor([-5.0]),), tester_factory) + self._test_op(NegModel(), (torch.tensor([5.0]),), tester_factory) + self._test_op(NegModel(), (torch.tensor([0.0]),), tester_factory) diff --git a/backends/test/suite/operators/test_pow.py b/backends/test/suite/operators/test_pow.py new file mode 100644 index 00000000000..075c3643af9 --- /dev/null +++ b/backends/test/suite/operators/test_pow.py @@ -0,0 +1,171 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class PowModel(torch.nn.Module): + def __init__(self, exponent=None): + super().__init__() + self.exponent = exponent + + def forward(self, x): + if self.exponent is not None: + return torch.pow(x, self.exponent) + return torch.pow(x, 2) # Default to squaring if no exponent provided + +class PowTensorModel(torch.nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x, y): + return torch.pow(x, y) + +@operator_test +class TestPow(OperatorTest): + @dtype_test + def test_pow_dtype(self, dtype, tester_factory: Callable) -> None: + # Test with different dtypes + model = PowModel(2).to(dtype) + # Use positive values to avoid complex results with fractional powers + self._test_op(model, (torch.rand(10, 10).to(dtype) + 0.1,), tester_factory) + + def test_pow_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters (squaring) + self._test_op(PowModel(), (torch.rand(10, 10) + 0.1,), tester_factory) + + def test_pow_scalar_exponents(self, tester_factory: Callable) -> None: + # Test with different scalar exponents + + # Power of 0 (should return 1 for all inputs) + self._test_op(PowModel(0), (torch.rand(10, 10) + 0.1,), tester_factory) + + # Power of 1 (should return the input unchanged) + self._test_op(PowModel(1), (torch.rand(10, 10) + 0.1,), tester_factory) + + # Power of 2 (squaring) + self._test_op(PowModel(2), (torch.rand(10, 10) + 0.1,), tester_factory) + + # Power of 3 (cubing) + self._test_op(PowModel(3), (torch.rand(10, 10) + 0.1,), tester_factory) + + # Negative power (-1, reciprocal) + self._test_op(PowModel(-1), (torch.rand(10, 10) + 0.1,), tester_factory) + + # Fractional power (square root) + self._test_op(PowModel(0.5), (torch.rand(10, 10) + 0.1,), tester_factory) + + # Large power + self._test_op(PowModel(10), (torch.rand(10, 10) * 0.5 + 0.5,), tester_factory) + + def test_pow_tensor_exponents(self, tester_factory: Callable) -> None: + # Test with tensor exponents + + # Constant exponent tensor + x = torch.rand(10, 10) + 0.1 + y = torch.full_like(x, 2.0) # All elements are 2.0 + self._test_op(PowTensorModel(), (x, y), tester_factory) + + # Random exponent tensor (positive values) + x = torch.rand(10, 10) + 0.1 + y = torch.rand(10, 10) * 3 # Random values between 0 and 3 + self._test_op(PowTensorModel(), (x, y), tester_factory) + + # Mixed positive and negative exponents + x = torch.rand(10, 10) + 0.1 + y = torch.randn(10, 10) # Random values with both positive and negative + self._test_op(PowTensorModel(), (x, y), tester_factory) + + # Broadcasting: scalar base, tensor exponent + x = torch.tensor([2.0]) + y = torch.arange(1, 5).float() # [1, 2, 3, 4] + self._test_op(PowTensorModel(), (x, y), tester_factory) + + # Broadcasting: tensor base, scalar exponent + x = torch.arange(1, 5).float() # [1, 2, 3, 4] + y = torch.tensor([2.0]) + self._test_op(PowTensorModel(), (x, y), tester_factory) + + def test_pow_shapes(self, tester_factory: Callable) -> None: + # Test with different tensor shapes + model = PowModel(2) # Square the input + + # 1D tensor + self._test_op(model, (torch.rand(20) + 0.1,), tester_factory) + + # 2D tensor + self._test_op(model, (torch.rand(5, 10) + 0.1,), tester_factory) + + # 3D tensor + self._test_op(model, (torch.rand(3, 4, 5) + 0.1,), tester_factory) + + # 4D tensor + self._test_op(model, (torch.rand(2, 3, 4, 5) + 0.1,), tester_factory) + + # 5D tensor + self._test_op(model, (torch.rand(2, 2, 3, 4, 5) + 0.1,), tester_factory) + + def test_pow_values(self, tester_factory: Callable) -> None: + # Test with different value ranges + model = PowModel(2) # Square the input + + # Small values + self._test_op(model, (torch.rand(10, 10) * 0.01 + 0.01,), tester_factory) + + # Values around 1 + self._test_op(model, (torch.rand(10, 10) * 0.2 + 0.9,), tester_factory) + + # Medium values + self._test_op(model, (torch.rand(10, 10) * 10 + 0.1,), tester_factory) + + # Large values (use smaller exponent to avoid overflow) + model = PowModel(0.5) # Square root to avoid overflow + self._test_op(model, (torch.rand(10, 10) * 1000 + 0.1,), tester_factory) + + def test_pow_edge_cases(self, tester_factory: Callable) -> None: + # Test edge cases + + # 0^0 = 1 (by convention) + x = torch.zeros(1) + y = torch.zeros(1) + self._test_op(PowTensorModel(), (x, y), tester_factory) + + # x^0 = 1 for any x + x = torch.randn(10) + y = torch.zeros(10) + self._test_op(PowTensorModel(), (x, y), tester_factory) + + # 0^y = 0 for y > 0 + x = torch.zeros(5) + y = torch.arange(1, 6).float() + self._test_op(PowTensorModel(), (x, y), tester_factory) + + # 1^y = 1 for any y + x = torch.ones(10) + y = torch.randn(10) + self._test_op(PowTensorModel(), (x, y), tester_factory) + + # Tensor with infinity + x = torch.tensor([float('inf'), 2.0, 3.0]) + y = torch.tensor([2.0, 2.0, 2.0]) + self._test_op(PowTensorModel(), (x, y), tester_factory) + + # Tensor with NaN + x = torch.tensor([float('nan'), 2.0, 3.0]) + y = torch.tensor([2.0, 2.0, 2.0]) + self._test_op(PowTensorModel(), (x, y), tester_factory) + + def test_pow_scalar(self, tester_factory: Callable) -> None: + # Test with scalar input (1-element tensor) + model = PowModel(2) # Square the input + self._test_op(model, (torch.tensor([2.0]),), tester_factory) + self._test_op(model, (torch.tensor([0.5]),), tester_factory) + self._test_op(model, (torch.tensor([0.0]),), tester_factory) diff --git a/backends/test/suite/operators/test_round.py b/backends/test/suite/operators/test_round.py new file mode 100644 index 00000000000..1af1b433773 --- /dev/null +++ b/backends/test/suite/operators/test_round.py @@ -0,0 +1,164 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class RoundModel(torch.nn.Module): + def __init__(self, decimals=None): + super().__init__() + self.decimals = decimals + + def forward(self, x): + if self.decimals is not None: + return torch.round(x, decimals=self.decimals) + return torch.round(x) + +@operator_test +class TestRound(OperatorTest): + @dtype_test + def test_round_dtype(self, dtype, tester_factory: Callable) -> None: + # Test with different dtypes + model = RoundModel().to(dtype) + self._test_op(model, (torch.rand(10, 10).to(dtype) * 10 - 5,), tester_factory) + + def test_round_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + # Input: tensor with fractional values + self._test_op(RoundModel(), (torch.randn(10, 10) * 5,), tester_factory) + + def test_round_shapes(self, tester_factory: Callable) -> None: + # Test with different tensor shapes + + # 1D tensor + self._test_op(RoundModel(), (torch.randn(20) * 5,), tester_factory) + + # 2D tensor + self._test_op(RoundModel(), (torch.randn(5, 10) * 5,), tester_factory) + + # 3D tensor + self._test_op(RoundModel(), (torch.randn(3, 4, 5) * 5,), tester_factory) + + # 4D tensor + self._test_op(RoundModel(), (torch.randn(2, 3, 4, 5) * 5,), tester_factory) + + # 5D tensor + self._test_op(RoundModel(), (torch.randn(2, 2, 3, 4, 5) * 5,), tester_factory) + + def test_round_values(self, tester_factory: Callable) -> None: + # Test with different value ranges + + # Small fractional values + self._test_op(RoundModel(), (torch.randn(10, 10) * 0.1,), tester_factory) + + # Medium fractional values + self._test_op(RoundModel(), (torch.randn(10, 10) * 5,), tester_factory) + + # Large fractional values + self._test_op(RoundModel(), (torch.randn(10, 10) * 1000,), tester_factory) + + # Mixed positive and negative values + self._test_op(RoundModel(), (torch.randn(10, 10) * 10,), tester_factory) + + # Values with specific fractional parts + x = torch.arange(-5, 5, 0.5) # [-5.0, -4.5, -4.0, ..., 4.0, 4.5] + self._test_op(RoundModel(), (x,), tester_factory) + + def test_round_edge_cases(self, tester_factory: Callable) -> None: + # Test edge cases + + # Integer values (should remain unchanged) + self._test_op(RoundModel(), (torch.arange(-5, 6).float(),), tester_factory) + + # Values exactly halfway between integers (should round to even) + x = torch.tensor([-2.5, -1.5, -0.5, 0.5, 1.5, 2.5]) + self._test_op(RoundModel(), (x,), tester_factory) + + # Values slightly above and below halfway + x = torch.tensor([-2.51, -2.49, -1.51, -1.49, -0.51, -0.49, 0.49, 0.51, 1.49, 1.51, 2.49, 2.51]) + self._test_op(RoundModel(), (x,), tester_factory) + + # Zero tensor + self._test_op(RoundModel(), (torch.zeros(10, 10),), tester_factory) + + # Tensor with infinity + x = torch.tensor([float('inf'), float('-inf'), 1.4, -1.4]) + self._test_op(RoundModel(), (x,), tester_factory) + + # Tensor with NaN + x = torch.tensor([float('nan'), 1.4, -1.4]) + self._test_op(RoundModel(), (x,), tester_factory) + + # Very large values (where fractional part becomes insignificant) + x = torch.tensor([1e10, 1e10 + 0.4, 1e10 + 0.6]) + self._test_op(RoundModel(), (x,), tester_factory) + + # Very small values close to zero + x = torch.tensor([-0.1, -0.01, -0.001, 0.001, 0.01, 0.1]) + self._test_op(RoundModel(), (x,), tester_factory) + + def test_round_scalar(self, tester_factory: Callable) -> None: + # Test with scalar input (1-element tensor) + self._test_op(RoundModel(), (torch.tensor([1.4]),), tester_factory) + self._test_op(RoundModel(), (torch.tensor([1.5]),), tester_factory) + self._test_op(RoundModel(), (torch.tensor([1.6]),), tester_factory) + self._test_op(RoundModel(), (torch.tensor([-1.4]),), tester_factory) + self._test_op(RoundModel(), (torch.tensor([-1.5]),), tester_factory) + self._test_op(RoundModel(), (torch.tensor([-1.6]),), tester_factory) + self._test_op(RoundModel(), (torch.tensor([0.0]),), tester_factory) + + def test_round_decimals(self, tester_factory: Callable) -> None: + # Test with different decimal places + + # Round to 1 decimal place + x = torch.tensor([1.44, 1.45, 1.46, -1.44, -1.45, -1.46]) + self._test_op(RoundModel(decimals=1), (x,), tester_factory) + + # Round to 2 decimal places + x = torch.tensor([1.444, 1.445, 1.446, -1.444, -1.445, -1.446]) + self._test_op(RoundModel(decimals=2), (x,), tester_factory) + + # Round to negative decimal places (tens) + x = torch.tensor([14.4, 15.5, 16.6, -14.4, -15.5, -16.6]) + self._test_op(RoundModel(decimals=-1), (x,), tester_factory) + + # Round to negative decimal places (hundreds) + x = torch.tensor([144.4, 155.5, 166.6, -144.4, -155.5, -166.6]) + self._test_op(RoundModel(decimals=-2), (x,), tester_factory) + + def test_round_decimals_edge_cases(self, tester_factory: Callable) -> None: + # Test edge cases with decimal places + + # Very small values with positive decimals + x = torch.tensor([0.0001, 0.00015, 0.0002, -0.0001, -0.00015, -0.0002]) + self._test_op(RoundModel(decimals=4), (x,), tester_factory) + + # Very large values with negative decimals + x = torch.tensor([12345.6, 12350.0, 12354.9, -12345.6, -12350.0, -12354.9]) + self._test_op(RoundModel(decimals=-2), (x,), tester_factory) + + # Zero with various decimal places + x = torch.zeros(5) + self._test_op(RoundModel(decimals=2), (x,), tester_factory) + self._test_op(RoundModel(decimals=-2), (x,), tester_factory) + + # Infinity and NaN with various decimal places + x = torch.tensor([float('inf'), float('-inf'), float('nan')]) + self._test_op(RoundModel(decimals=2), (x,), tester_factory) + self._test_op(RoundModel(decimals=-2), (x,), tester_factory) + + # Values exactly at the rounding threshold for different decimal places + x = torch.tensor([0.05, 0.15, 0.25, 0.35, 0.45, 0.55, 0.65, 0.75, 0.85, 0.95]) + self._test_op(RoundModel(decimals=1), (x,), tester_factory) + + # Negative values exactly at the rounding threshold + x = torch.tensor([-0.05, -0.15, -0.25, -0.35, -0.45, -0.55, -0.65, -0.75, -0.85, -0.95]) + self._test_op(RoundModel(decimals=1), (x,), tester_factory) diff --git a/backends/test/suite/operators/test_rsqrt.py b/backends/test/suite/operators/test_rsqrt.py new file mode 100644 index 00000000000..029ff85c6b9 --- /dev/null +++ b/backends/test/suite/operators/test_rsqrt.py @@ -0,0 +1,104 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class RsqrtModel(torch.nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + return torch.rsqrt(x) + +@operator_test +class TestRsqrt(OperatorTest): + @dtype_test + def test_rsqrt_dtype(self, dtype, tester_factory: Callable) -> None: + # Test with different dtypes + model = RsqrtModel().to(dtype) + # Use positive values only for rsqrt to avoid division by zero + self._test_op(model, (torch.rand(10, 10).to(dtype) + 0.01,), tester_factory) + + def test_rsqrt_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + # Input: tensor with positive values + self._test_op(RsqrtModel(), (torch.rand(10, 10) * 10 + 0.01,), tester_factory) + + def test_rsqrt_shapes(self, tester_factory: Callable) -> None: + # Test with different tensor shapes + + # 1D tensor + self._test_op(RsqrtModel(), (torch.rand(20) + 0.01,), tester_factory) + + # 2D tensor + self._test_op(RsqrtModel(), (torch.rand(5, 10) + 0.01,), tester_factory) + + # 3D tensor + self._test_op(RsqrtModel(), (torch.rand(3, 4, 5) + 0.01,), tester_factory) + + # 4D tensor + self._test_op(RsqrtModel(), (torch.rand(2, 3, 4, 5) + 0.01,), tester_factory) + + # 5D tensor + self._test_op(RsqrtModel(), (torch.rand(2, 2, 3, 4, 5) + 0.01,), tester_factory) + + def test_rsqrt_values(self, tester_factory: Callable) -> None: + # Test with different value ranges + + # Small values (rsqrt of small values gives large results) + self._test_op(RsqrtModel(), (torch.rand(10, 10) * 0.01 + 0.01,), tester_factory) + + # Values around 1 (rsqrt(1) = 1) + self._test_op(RsqrtModel(), (torch.rand(10, 10) * 0.2 + 0.9,), tester_factory) + + # Perfect squares + x = torch.tensor([1.0, 4.0, 9.0, 16.0, 25.0, 36.0, 49.0, 64.0, 81.0, 100.0]) + self._test_op(RsqrtModel(), (x,), tester_factory) + + # Medium values + self._test_op(RsqrtModel(), (torch.rand(10, 10) * 10 + 0.01,), tester_factory) + + # Large values (rsqrt of large values gives small results) + self._test_op(RsqrtModel(), (torch.rand(10, 10) * 1000 + 0.01,), tester_factory) + + # Very large values + self._test_op(RsqrtModel(), (torch.rand(5, 5) * 1e10 + 0.01,), tester_factory) + + def test_rsqrt_edge_cases(self, tester_factory: Callable) -> None: + # Test edge cases + + # Tensor with specific values + x = torch.tensor([1.0, 2.0, 4.0, 0.25, 0.5, 0.01]) + self._test_op(RsqrtModel(), (x,), tester_factory) + + # Tensor with infinity + x = torch.tensor([float('inf'), 1.0, 4.0]) + self._test_op(RsqrtModel(), (x,), tester_factory) + + # Tensor with NaN + x = torch.tensor([float('nan'), 1.0, 4.0]) + self._test_op(RsqrtModel(), (x,), tester_factory) + + # Values very close to zero (but not zero) + x = torch.tensor([1e-5, 1e-10, 1e-15]) + self._test_op(RsqrtModel(), (x,), tester_factory) + + # Values where rsqrt(x) = 1/sqrt(x) has a simple result + x = torch.tensor([1.0, 4.0, 9.0, 16.0]) # rsqrt gives [1.0, 0.5, 0.33..., 0.25] + self._test_op(RsqrtModel(), (x,), tester_factory) + + def test_rsqrt_scalar(self, tester_factory: Callable) -> None: + # Test with scalar input (1-element tensor) + self._test_op(RsqrtModel(), (torch.tensor([1.0]),), tester_factory) + self._test_op(RsqrtModel(), (torch.tensor([4.0]),), tester_factory) + self._test_op(RsqrtModel(), (torch.tensor([0.25]),), tester_factory) + self._test_op(RsqrtModel(), (torch.tensor([100.0]),), tester_factory) diff --git a/backends/test/suite/operators/test_sqrt.py b/backends/test/suite/operators/test_sqrt.py new file mode 100644 index 00000000000..70b65a0e348 --- /dev/null +++ b/backends/test/suite/operators/test_sqrt.py @@ -0,0 +1,103 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class SqrtModel(torch.nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + return torch.sqrt(x) + +@operator_test +class TestSqrt(OperatorTest): + @dtype_test + def test_sqrt_dtype(self, dtype, tester_factory: Callable) -> None: + # Test with different dtypes + model = SqrtModel().to(dtype) + # Use non-negative values only for sqrt + self._test_op(model, (torch.rand(10, 10).to(dtype),), tester_factory) + + def test_sqrt_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + # Input: tensor with non-negative values + self._test_op(SqrtModel(), (torch.rand(10, 10) * 10,), tester_factory) + + def test_sqrt_shapes(self, tester_factory: Callable) -> None: + # Test with different tensor shapes + + # 1D tensor + self._test_op(SqrtModel(), (torch.rand(20),), tester_factory) + + # 2D tensor + self._test_op(SqrtModel(), (torch.rand(5, 10),), tester_factory) + + # 3D tensor + self._test_op(SqrtModel(), (torch.rand(3, 4, 5),), tester_factory) + + # 4D tensor + self._test_op(SqrtModel(), (torch.rand(2, 3, 4, 5),), tester_factory) + + # 5D tensor + self._test_op(SqrtModel(), (torch.rand(2, 2, 3, 4, 5),), tester_factory) + + def test_sqrt_values(self, tester_factory: Callable) -> None: + # Test with different value ranges + + # Small values close to zero + self._test_op(SqrtModel(), (torch.rand(10, 10) * 0.01,), tester_factory) + + # Values around 1 + self._test_op(SqrtModel(), (torch.rand(10, 10) * 0.2 + 0.9,), tester_factory) + + # Perfect squares + x = torch.tensor([0.0, 1.0, 4.0, 9.0, 16.0, 25.0, 36.0, 49.0, 64.0, 81.0, 100.0]) + self._test_op(SqrtModel(), (x,), tester_factory) + + # Medium values + self._test_op(SqrtModel(), (torch.rand(10, 10) * 10,), tester_factory) + + # Large values + self._test_op(SqrtModel(), (torch.rand(10, 10) * 1000,), tester_factory) + + # Very large values + self._test_op(SqrtModel(), (torch.rand(5, 5) * 1e10,), tester_factory) + + def test_sqrt_edge_cases(self, tester_factory: Callable) -> None: + # Test edge cases + + # Zero tensor + self._test_op(SqrtModel(), (torch.zeros(10, 10),), tester_factory) + + # Tensor with specific values + x = torch.tensor([0.0, 1.0, 2.0, 4.0, 0.25, 0.5, 0.01]) + self._test_op(SqrtModel(), (x,), tester_factory) + + # Tensor with infinity + x = torch.tensor([float('inf'), 1.0, 4.0]) + self._test_op(SqrtModel(), (x,), tester_factory) + + # Tensor with NaN + x = torch.tensor([float('nan'), 1.0, 4.0]) + self._test_op(SqrtModel(), (x,), tester_factory) + + # Values very close to zero + x = torch.tensor([1e-10, 1e-20, 1e-30]) + self._test_op(SqrtModel(), (x,), tester_factory) + + def test_sqrt_scalar(self, tester_factory: Callable) -> None: + # Test with scalar input (1-element tensor) + self._test_op(SqrtModel(), (torch.tensor([0.0]),), tester_factory) + self._test_op(SqrtModel(), (torch.tensor([1.0]),), tester_factory) + self._test_op(SqrtModel(), (torch.tensor([4.0]),), tester_factory) + self._test_op(SqrtModel(), (torch.tensor([0.25]),), tester_factory) diff --git a/backends/test/suite/operators/test_square.py b/backends/test/suite/operators/test_square.py new file mode 100644 index 00000000000..d904497e4aa --- /dev/null +++ b/backends/test/suite/operators/test_square.py @@ -0,0 +1,116 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class SquareModel(torch.nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + return torch.square(x) + +@operator_test +class TestSquare(OperatorTest): + @dtype_test + def test_square_dtype(self, dtype, tester_factory: Callable) -> None: + # Test with different dtypes + model = SquareModel().to(dtype) + self._test_op(model, (torch.rand(10, 10).to(dtype) * 2 - 1,), tester_factory) + + def test_square_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + # Input: tensor with positive and negative values + self._test_op(SquareModel(), (torch.randn(10, 10),), tester_factory) + + def test_square_shapes(self, tester_factory: Callable) -> None: + # Test with different tensor shapes + + # 1D tensor + self._test_op(SquareModel(), (torch.randn(20),), tester_factory) + + # 2D tensor + self._test_op(SquareModel(), (torch.randn(5, 10),), tester_factory) + + # 3D tensor + self._test_op(SquareModel(), (torch.randn(3, 4, 5),), tester_factory) + + # 4D tensor + self._test_op(SquareModel(), (torch.randn(2, 3, 4, 5),), tester_factory) + + # 5D tensor + self._test_op(SquareModel(), (torch.randn(2, 2, 3, 4, 5),), tester_factory) + + def test_square_values(self, tester_factory: Callable) -> None: + # Test with different value ranges + + # Small values + self._test_op(SquareModel(), (torch.randn(10, 10) * 0.01,), tester_factory) + + # Values around 1 + self._test_op(SquareModel(), (torch.randn(10, 10) * 0.2 + 0.9,), tester_factory) + + # Medium values + self._test_op(SquareModel(), (torch.randn(10, 10) * 10,), tester_factory) + + # Large values (be careful with overflow) + self._test_op(SquareModel(), (torch.randn(10, 10) * 100,), tester_factory) + + # Mixed positive and negative values + self._test_op(SquareModel(), (torch.randn(10, 10) * 5,), tester_factory) + + # All positive values + self._test_op(SquareModel(), (torch.rand(10, 10) * 5,), tester_factory) + + # All negative values + self._test_op(SquareModel(), (torch.rand(10, 10) * -5,), tester_factory) + + # Values close to zero + self._test_op(SquareModel(), (torch.randn(10, 10) * 1e-5,), tester_factory) + + # Integer values + x = torch.arange(-5, 6).float() # [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5] + self._test_op(SquareModel(), (x,), tester_factory) + + def test_square_edge_cases(self, tester_factory: Callable) -> None: + # Test edge cases + + # Zero tensor + self._test_op(SquareModel(), (torch.zeros(10, 10),), tester_factory) + + # Tensor with specific values + x = torch.tensor([-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0]) + self._test_op(SquareModel(), (x,), tester_factory) + + # Tensor with infinity + x = torch.tensor([float('inf'), float('-inf'), 1.0, -1.0]) + self._test_op(SquareModel(), (x,), tester_factory) + + # Tensor with NaN + x = torch.tensor([float('nan'), 1.0, -1.0]) + self._test_op(SquareModel(), (x,), tester_factory) + + # Very large values (close to overflow for some dtypes) + x = torch.tensor([1e10, -1e10]) + self._test_op(SquareModel(), (x,), tester_factory) + + # Very small values (close to underflow) + x = torch.tensor([1e-10, -1e-10]) + self._test_op(SquareModel(), (x,), tester_factory) + + def test_square_scalar(self, tester_factory: Callable) -> None: + # Test with scalar input (1-element tensor) + self._test_op(SquareModel(), (torch.tensor([-5.0]),), tester_factory) + self._test_op(SquareModel(), (torch.tensor([5.0]),), tester_factory) + self._test_op(SquareModel(), (torch.tensor([0.0]),), tester_factory) + self._test_op(SquareModel(), (torch.tensor([0.5]),), tester_factory) + self._test_op(SquareModel(), (torch.tensor([-0.5]),), tester_factory) diff --git a/backends/test/suite/operators/test_trunc.py b/backends/test/suite/operators/test_trunc.py new file mode 100644 index 00000000000..be1967afc1f --- /dev/null +++ b/backends/test/suite/operators/test_trunc.py @@ -0,0 +1,109 @@ +# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +# pyre-strict + +from typing import Callable + +import torch + +from executorch.backends.test.compliance_suite import ( + dtype_test, + operator_test, + OperatorTest, +) + +class TruncModel(torch.nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + return torch.trunc(x) + +@operator_test +class TestTrunc(OperatorTest): + @dtype_test + def test_trunc_dtype(self, dtype, tester_factory: Callable) -> None: + # Test with different dtypes + model = TruncModel().to(dtype) + self._test_op(model, (torch.rand(10, 10).to(dtype) * 10 - 5,), tester_factory) + + def test_trunc_basic(self, tester_factory: Callable) -> None: + # Basic test with default parameters + # Input: tensor with fractional values + self._test_op(TruncModel(), (torch.randn(10, 10) * 5,), tester_factory) + + def test_trunc_shapes(self, tester_factory: Callable) -> None: + # Test with different tensor shapes + + # 1D tensor + self._test_op(TruncModel(), (torch.randn(20) * 5,), tester_factory) + + # 2D tensor + self._test_op(TruncModel(), (torch.randn(5, 10) * 5,), tester_factory) + + # 3D tensor + self._test_op(TruncModel(), (torch.randn(3, 4, 5) * 5,), tester_factory) + + # 4D tensor + self._test_op(TruncModel(), (torch.randn(2, 3, 4, 5) * 5,), tester_factory) + + # 5D tensor + self._test_op(TruncModel(), (torch.randn(2, 2, 3, 4, 5) * 5,), tester_factory) + + def test_trunc_values(self, tester_factory: Callable) -> None: + # Test with different value ranges + + # Small fractional values + self._test_op(TruncModel(), (torch.randn(10, 10) * 0.1,), tester_factory) + + # Medium fractional values + self._test_op(TruncModel(), (torch.randn(10, 10) * 5,), tester_factory) + + # Large fractional values + self._test_op(TruncModel(), (torch.randn(10, 10) * 1000,), tester_factory) + + # Mixed positive and negative values + self._test_op(TruncModel(), (torch.randn(10, 10) * 10,), tester_factory) + + # Values with specific fractional parts + x = torch.arange(-5, 5, 0.5) # [-5.0, -4.5, -4.0, ..., 4.0, 4.5] + self._test_op(TruncModel(), (x,), tester_factory) + + def test_trunc_edge_cases(self, tester_factory: Callable) -> None: + # Test edge cases + + # Integer values (should remain unchanged) + self._test_op(TruncModel(), (torch.arange(-5, 6).float(),), tester_factory) + + # Values with different fractional parts + x = torch.tensor([-2.9, -2.5, -2.1, -0.9, -0.5, -0.1, 0.0, 0.1, 0.5, 0.9, 2.1, 2.5, 2.9]) + self._test_op(TruncModel(), (x,), tester_factory) + + # Zero tensor + self._test_op(TruncModel(), (torch.zeros(10, 10),), tester_factory) + + # Tensor with infinity + x = torch.tensor([float('inf'), float('-inf'), 1.4, -1.4]) + self._test_op(TruncModel(), (x,), tester_factory) + + # Tensor with NaN + x = torch.tensor([float('nan'), 1.4, -1.4]) + self._test_op(TruncModel(), (x,), tester_factory) + + # Very large values (where fractional part becomes insignificant) + x = torch.tensor([1e10, 1e10 + 0.4, 1e10 + 0.6]) + self._test_op(TruncModel(), (x,), tester_factory) + + # Very small values close to zero + x = torch.tensor([-0.1, -0.01, -0.001, 0.001, 0.01, 0.1]) + self._test_op(TruncModel(), (x,), tester_factory) + + def test_trunc_scalar(self, tester_factory: Callable) -> None: + # Test with scalar input (1-element tensor) + self._test_op(TruncModel(), (torch.tensor([1.4]),), tester_factory) + self._test_op(TruncModel(), (torch.tensor([1.5]),), tester_factory) + self._test_op(TruncModel(), (torch.tensor([1.6]),), tester_factory) + self._test_op(TruncModel(), (torch.tensor([-1.4]),), tester_factory) + self._test_op(TruncModel(), (torch.tensor([-1.5]),), tester_factory) + self._test_op(TruncModel(), (torch.tensor([-1.6]),), tester_factory) + self._test_op(TruncModel(), (torch.tensor([0.0]),), tester_factory) From 6acef0f6bb0765b434faeb1a277e8ae9160c3e4f Mon Sep 17 00:00:00 2001 From: Gregory James Comer Date: Fri, 25 Jul 2025 21:30:20 -0700 Subject: [PATCH 9/9] Update [ghstack-poisoned] --- backends/test/suite/operators/test_conv1d.py | 144 ++++++++----- backends/test/suite/operators/test_conv2d.py | 182 +++++++++++------ backends/test/suite/operators/test_conv3d.py | 176 ++++++++++------ .../suite/operators/test_convtranspose1d.py | 156 +++++++++----- .../suite/operators/test_convtranspose2d.py | 190 ++++++++++++------ .../suite/operators/test_convtranspose3d.py | 184 +++++++++++------ 6 files changed, 692 insertions(+), 340 deletions(-) diff --git a/backends/test/suite/operators/test_conv1d.py b/backends/test/suite/operators/test_conv1d.py index 1efd7685c18..6f1b840861b 100644 --- a/backends/test/suite/operators/test_conv1d.py +++ b/backends/test/suite/operators/test_conv1d.py @@ -1,17 +1,22 @@ -# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. -# pyre-strict +# pyre-unsafe -from typing import Callable import torch +from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.compliance_suite import ( +from executorch.backends.test.suite.operators import ( dtype_test, operator_test, OperatorTest, ) + class Model(torch.nn.Module): def __init__( self, @@ -37,53 +42,96 @@ def __init__( bias=bias, padding_mode=padding_mode, ) - + def forward(self, x): return self.conv(x) + @operator_test -class TestConv1d(OperatorTest): +class Conv1d(OperatorTest): @dtype_test - def test_conv1d_dtype(self, dtype, tester_factory: Callable) -> None: - # Input shape: (batch_size, in_channels, length) - self._test_op(Model().to(dtype), ((torch.rand(2, 3, 10) * 10).to(dtype),), tester_factory) - - def test_conv1d_basic(self, tester_factory: Callable) -> None: - # Basic test with default parameters - self._test_op(Model(), (torch.randn(2, 3, 10),), tester_factory) - - def test_conv1d_kernel_size(self, tester_factory: Callable) -> None: - # Test with different kernel sizes - self._test_op(Model(kernel_size=1), (torch.randn(2, 3, 10),), tester_factory) - self._test_op(Model(kernel_size=5), (torch.randn(2, 3, 10),), tester_factory) - - def test_conv1d_stride(self, tester_factory: Callable) -> None: - # Test with different stride values - self._test_op(Model(stride=2), (torch.randn(2, 3, 10),), tester_factory) - - def test_conv1d_padding(self, tester_factory: Callable) -> None: - # Test with different padding values - self._test_op(Model(padding=1), (torch.randn(2, 3, 10),), tester_factory) - self._test_op(Model(padding=2), (torch.randn(2, 3, 10),), tester_factory) - - def test_conv1d_dilation(self, tester_factory: Callable) -> None: - # Test with different dilation values - self._test_op(Model(dilation=2), (torch.randn(2, 3, 10),), tester_factory) - - def test_conv1d_groups(self, tester_factory: Callable) -> None: - # Test with groups=3 (in_channels must be divisible by groups) - self._test_op(Model(in_channels=6, out_channels=6, groups=3), (torch.randn(2, 6, 10),), tester_factory) - - def test_conv1d_no_bias(self, tester_factory: Callable) -> None: - # Test without bias - self._test_op(Model(bias=False), (torch.randn(2, 3, 10),), tester_factory) - - def test_conv1d_padding_modes(self, tester_factory: Callable) -> None: - # Test different padding modes + def test_conv1d_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op( + Model().to(dtype), + ((torch.rand(2, 3, 10) * 10).to(dtype),), + flow, + ) + + def test_conv1d_basic(self, flow: TestFlow) -> None: + self._test_op( + Model(), + (torch.randn(2, 3, 10),), + flow, + ) + + def test_conv1d_kernel_size(self, flow: TestFlow) -> None: + self._test_op( + Model(kernel_size=1), + (torch.randn(2, 3, 10),), + flow, + ) + self._test_op( + Model(kernel_size=5), + (torch.randn(2, 3, 10),), + flow, + ) + + def test_conv1d_stride(self, flow: TestFlow) -> None: + self._test_op( + Model(stride=2), + (torch.randn(2, 3, 10),), + flow, + ) + + def test_conv1d_padding(self, flow: TestFlow) -> None: + self._test_op( + Model(padding=1), + (torch.randn(2, 3, 10),), + flow, + ) + self._test_op( + Model(padding=2), + (torch.randn(2, 3, 10),), + flow, + ) + + def test_conv1d_dilation(self, flow: TestFlow) -> None: + self._test_op( + Model(dilation=2), + (torch.randn(2, 3, 10),), + flow, + ) + + def test_conv1d_groups(self, flow: TestFlow) -> None: + self._test_op( + Model(in_channels=6, out_channels=6, groups=3), + (torch.randn(2, 6, 10),), + flow, + ) + + def test_conv1d_no_bias(self, flow: TestFlow) -> None: + self._test_op( + Model(bias=False), + (torch.randn(2, 3, 10),), + flow, + ) + + def test_conv1d_padding_modes(self, flow: TestFlow) -> None: for mode in ["zeros", "reflect", "replicate", "circular"]: - self._test_op(Model(padding=1, padding_mode=mode), (torch.randn(2, 3, 10),), tester_factory) - - def test_conv1d_channels(self, tester_factory: Callable) -> None: - # Test with different channel configurations - self._test_op(Model(in_channels=1, out_channels=1), (torch.randn(2, 1, 10),), tester_factory) - self._test_op(Model(in_channels=5, out_channels=10), (torch.randn(2, 5, 10),), tester_factory) + self._test_op( + Model(padding=1, padding_mode=mode), + (torch.randn(2, 3, 10),), + flow, + ) + + def test_conv1d_channels(self, flow: TestFlow) -> None: + self._test_op( + Model(in_channels=1, out_channels=1), + (torch.randn(2, 1, 10),), + flow, + ) + self._test_op( + Model(in_channels=5, out_channels=10), + (torch.randn(2, 5, 10),), + flow, + ) diff --git a/backends/test/suite/operators/test_conv2d.py b/backends/test/suite/operators/test_conv2d.py index 40b3b9dc24b..2a7bae01faa 100644 --- a/backends/test/suite/operators/test_conv2d.py +++ b/backends/test/suite/operators/test_conv2d.py @@ -1,26 +1,32 @@ -# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. -# pyre-strict +# pyre-unsafe -from typing import Callable, Union, Tuple +from typing import Tuple, Union import torch +from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.compliance_suite import ( +from executorch.backends.test.suite.operators import ( dtype_test, operator_test, OperatorTest, ) + class Model(torch.nn.Module): def __init__( self, in_channels=3, out_channels=6, - kernel_size=3, - stride=1, - padding=0, - dilation=1, + kernel_size: Union[int, Tuple[int, int]] = 3, + stride: Union[int, Tuple[int, int]] = 1, + padding: Union[int, Tuple[int, int]] = 0, + dilation: Union[int, Tuple[int, int]] = 1, groups=1, bias=True, padding_mode="zeros", @@ -37,60 +43,118 @@ def __init__( bias=bias, padding_mode=padding_mode, ) - + def forward(self, x): return self.conv(x) + @operator_test -class TestConv2d(OperatorTest): +class Conv2d(OperatorTest): @dtype_test - def test_conv2d_dtype(self, dtype, tester_factory: Callable) -> None: - # Input shape: (batch_size, in_channels, height, width) - self._test_op(Model().to(dtype), ((torch.rand(2, 3, 8, 8) * 10).to(dtype),), tester_factory) - - def test_conv2d_basic(self, tester_factory: Callable) -> None: - # Basic test with default parameters - self._test_op(Model(), (torch.randn(2, 3, 8, 8),), tester_factory) - - def test_conv2d_kernel_size(self, tester_factory: Callable) -> None: - # Test with different kernel sizes - self._test_op(Model(kernel_size=1), (torch.randn(2, 3, 8, 8),), tester_factory) - self._test_op(Model(kernel_size=5), (torch.randn(2, 3, 8, 8),), tester_factory) - self._test_op(Model(kernel_size=(3, 5)), (torch.randn(2, 3, 8, 8),), tester_factory) - - def test_conv2d_stride(self, tester_factory: Callable) -> None: - # Test with different stride values - self._test_op(Model(stride=2), (torch.randn(2, 3, 8, 8),), tester_factory) - self._test_op(Model(stride=(2, 1)), (torch.randn(2, 3, 8, 8),), tester_factory) - - def test_conv2d_padding(self, tester_factory: Callable) -> None: - # Test with different padding values - self._test_op(Model(padding=1), (torch.randn(2, 3, 8, 8),), tester_factory) - self._test_op(Model(padding=(1, 2)), (torch.randn(2, 3, 8, 8),), tester_factory) - - def test_conv2d_dilation(self, tester_factory: Callable) -> None: - # Test with different dilation values - self._test_op(Model(dilation=2), (torch.randn(2, 3, 8, 8),), tester_factory) - self._test_op(Model(dilation=(2, 1)), (torch.randn(2, 3, 8, 8),), tester_factory) - - def test_conv2d_groups(self, tester_factory: Callable) -> None: - # Test with groups=3 (in_channels must be divisible by groups) - self._test_op(Model(in_channels=6, out_channels=6, groups=3), (torch.randn(2, 6, 8, 8),), tester_factory) - - def test_conv2d_no_bias(self, tester_factory: Callable) -> None: - # Test without bias - self._test_op(Model(bias=False), (torch.randn(2, 3, 8, 8),), tester_factory) - - def test_conv2d_padding_modes(self, tester_factory: Callable) -> None: - # Test different padding modes + def test_conv2d_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op( + Model().to(dtype), + ((torch.rand(2, 3, 8, 8) * 10).to(dtype),), + flow, + ) + + def test_conv2d_basic(self, flow: TestFlow) -> None: + self._test_op( + Model(), + (torch.randn(2, 3, 8, 8),), + flow, + ) + + def test_conv2d_kernel_size(self, flow: TestFlow) -> None: + self._test_op( + Model(kernel_size=1), + (torch.randn(2, 3, 8, 8),), + flow, + ) + self._test_op( + Model(kernel_size=5), + (torch.randn(2, 3, 8, 8),), + flow, + ) + self._test_op( + Model(kernel_size=(3, 5)), + (torch.randn(2, 3, 8, 8),), + flow, + ) + + def test_conv2d_stride(self, flow: TestFlow) -> None: + self._test_op( + Model(stride=2), + (torch.randn(2, 3, 8, 8),), + flow, + ) + self._test_op( + Model(stride=(2, 1)), + (torch.randn(2, 3, 8, 8),), + flow, + ) + + def test_conv2d_padding(self, flow: TestFlow) -> None: + self._test_op( + Model(padding=1), + (torch.randn(2, 3, 8, 8),), + flow, + ) + self._test_op( + Model(padding=(1, 2)), + (torch.randn(2, 3, 8, 8),), + flow, + ) + + def test_conv2d_dilation(self, flow: TestFlow) -> None: + self._test_op( + Model(dilation=2), + (torch.randn(2, 3, 8, 8),), + flow, + ) + self._test_op( + Model(dilation=(2, 1)), + (torch.randn(2, 3, 8, 8),), + flow, + ) + + def test_conv2d_groups(self, flow: TestFlow) -> None: + self._test_op( + Model(in_channels=6, out_channels=6, groups=3), + (torch.randn(2, 6, 8, 8),), + flow, + ) + + def test_conv2d_no_bias(self, flow: TestFlow) -> None: + self._test_op( + Model(bias=False), + (torch.randn(2, 3, 8, 8),), + flow, + ) + + def test_conv2d_padding_modes(self, flow: TestFlow) -> None: for mode in ["zeros", "reflect", "replicate", "circular"]: - self._test_op(Model(padding=1, padding_mode=mode), (torch.randn(2, 3, 8, 8),), tester_factory) - - def test_conv2d_channels(self, tester_factory: Callable) -> None: - # Test with different channel configurations - self._test_op(Model(in_channels=1, out_channels=1), (torch.randn(2, 1, 8, 8),), tester_factory) - self._test_op(Model(in_channels=5, out_channels=10), (torch.randn(2, 5, 8, 8),), tester_factory) - - def test_conv2d_different_spatial_dims(self, tester_factory: Callable) -> None: - # Test with different height and width - self._test_op(Model(), (torch.randn(2, 3, 10, 8),), tester_factory) + self._test_op( + Model(padding=1, padding_mode=mode), + (torch.randn(2, 3, 8, 8),), + flow, + ) + + def test_conv2d_channels(self, flow: TestFlow) -> None: + self._test_op( + Model(in_channels=1, out_channels=1), + (torch.randn(2, 1, 8, 8),), + flow, + ) + self._test_op( + Model(in_channels=5, out_channels=10), + (torch.randn(2, 5, 8, 8),), + flow, + ) + + def test_conv2d_different_spatial_dims(self, flow: TestFlow) -> None: + self._test_op( + Model(), + (torch.randn(2, 3, 10, 8),), + flow, + ) diff --git a/backends/test/suite/operators/test_conv3d.py b/backends/test/suite/operators/test_conv3d.py index baade4df10e..276ee20734e 100644 --- a/backends/test/suite/operators/test_conv3d.py +++ b/backends/test/suite/operators/test_conv3d.py @@ -1,26 +1,32 @@ -# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. -# pyre-strict +# pyre-unsafe -from typing import Callable, Union, Tuple +from typing import Tuple, Union import torch +from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.compliance_suite import ( +from executorch.backends.test.suite.operators import ( dtype_test, operator_test, OperatorTest, ) + class Model(torch.nn.Module): def __init__( self, in_channels=3, out_channels=6, - kernel_size=3, - stride=1, - padding=0, - dilation=1, + kernel_size: Union[int, Tuple[int, int, int]] = 3, + stride: Union[int, Tuple[int, int, int]] = 1, + padding: Union[int, Tuple[int, int, int]] = 0, + dilation: Union[int, Tuple[int, int, int]] = 1, groups=1, bias=True, padding_mode="zeros", @@ -37,59 +43,113 @@ def __init__( bias=bias, padding_mode=padding_mode, ) - + def forward(self, x): return self.conv(x) + @operator_test -class TestConv3d(OperatorTest): +class Conv3d(OperatorTest): @dtype_test - def test_conv3d_dtype(self, dtype, tester_factory: Callable) -> None: - # Input shape: (batch_size, in_channels, depth, height, width) - self._test_op(Model().to(dtype), ((torch.rand(2, 3, 4, 4, 4) * 10).to(dtype),), tester_factory) - - def test_conv3d_basic(self, tester_factory: Callable) -> None: - # Basic test with default parameters - self._test_op(Model(), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - - def test_conv3d_kernel_size(self, tester_factory: Callable) -> None: - # Test with different kernel sizes - self._test_op(Model(kernel_size=1), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - self._test_op(Model(kernel_size=(1, 3, 3)), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - - def test_conv3d_stride(self, tester_factory: Callable) -> None: - # Test with different stride values - self._test_op(Model(stride=2), (torch.randn(2, 3, 6, 6, 6),), tester_factory) - self._test_op(Model(stride=(1, 2, 2)), (torch.randn(2, 3, 4, 6, 6),), tester_factory) - - def test_conv3d_padding(self, tester_factory: Callable) -> None: - # Test with different padding values - self._test_op(Model(padding=1), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - self._test_op(Model(padding=(0, 1, 1)), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - - def test_conv3d_dilation(self, tester_factory: Callable) -> None: - # Test with different dilation values - self._test_op(Model(dilation=2), (torch.randn(2, 3, 6, 6, 6),), tester_factory) - self._test_op(Model(dilation=(1, 2, 2)), (torch.randn(2, 3, 4, 6, 6),), tester_factory) - - def test_conv3d_groups(self, tester_factory: Callable) -> None: - # Test with groups=3 (in_channels must be divisible by groups) - self._test_op(Model(in_channels=6, out_channels=6, groups=3), (torch.randn(2, 6, 4, 4, 4),), tester_factory) - - def test_conv3d_no_bias(self, tester_factory: Callable) -> None: - # Test without bias - self._test_op(Model(bias=False), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - - def test_conv3d_padding_modes(self, tester_factory: Callable) -> None: - # Test different padding modes + def test_conv3d_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op( + Model().to(dtype), + ((torch.rand(2, 3, 4, 4, 4) * 10).to(dtype),), + flow, + ) + + def test_conv3d_basic(self, flow: TestFlow) -> None: + self._test_op( + Model(), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + + def test_conv3d_kernel_size(self, flow: TestFlow) -> None: + self._test_op( + Model(kernel_size=1), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + self._test_op( + Model(kernel_size=(1, 3, 3)), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + + def test_conv3d_stride(self, flow: TestFlow) -> None: + self._test_op( + Model(stride=2), + (torch.randn(2, 3, 6, 6, 6),), + flow, + ) + self._test_op( + Model(stride=(1, 2, 2)), + (torch.randn(2, 3, 4, 6, 6),), + flow, + ) + + def test_conv3d_padding(self, flow: TestFlow) -> None: + self._test_op( + Model(padding=1), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + self._test_op( + Model(padding=(0, 1, 1)), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + + def test_conv3d_dilation(self, flow: TestFlow) -> None: + self._test_op( + Model(dilation=2), + (torch.randn(2, 3, 6, 6, 6),), + flow, + ) + self._test_op( + Model(dilation=(1, 2, 2)), + (torch.randn(2, 3, 4, 6, 6),), + flow, + ) + + def test_conv3d_groups(self, flow: TestFlow) -> None: + self._test_op( + Model(in_channels=6, out_channels=6, groups=3), + (torch.randn(2, 6, 4, 4, 4),), + flow, + ) + + def test_conv3d_no_bias(self, flow: TestFlow) -> None: + self._test_op( + Model(bias=False), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + + def test_conv3d_padding_modes(self, flow: TestFlow) -> None: for mode in ["zeros", "reflect", "replicate", "circular"]: - self._test_op(Model(padding=1, padding_mode=mode), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - - def test_conv3d_channels(self, tester_factory: Callable) -> None: - # Test with different channel configurations - self._test_op(Model(in_channels=1, out_channels=1), (torch.randn(2, 1, 4, 4, 4),), tester_factory) - self._test_op(Model(in_channels=5, out_channels=10), (torch.randn(2, 5, 4, 4, 4),), tester_factory) - - def test_conv3d_different_spatial_dims(self, tester_factory: Callable) -> None: - # Test with different depth, height, and width - self._test_op(Model(), (torch.randn(2, 3, 3, 4, 5),), tester_factory) + self._test_op( + Model(padding=1, padding_mode=mode), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + + def test_conv3d_channels(self, flow: TestFlow) -> None: + self._test_op( + Model(in_channels=1, out_channels=1), + (torch.randn(2, 1, 4, 4, 4),), + flow, + ) + self._test_op( + Model(in_channels=5, out_channels=10), + (torch.randn(2, 5, 4, 4, 4),), + flow, + ) + + def test_conv3d_different_spatial_dims(self, flow: TestFlow) -> None: + self._test_op( + Model(), + (torch.randn(2, 3, 3, 4, 5),), + flow, + ) diff --git a/backends/test/suite/operators/test_convtranspose1d.py b/backends/test/suite/operators/test_convtranspose1d.py index d93e542de4a..b2fe3040225 100644 --- a/backends/test/suite/operators/test_convtranspose1d.py +++ b/backends/test/suite/operators/test_convtranspose1d.py @@ -1,27 +1,33 @@ -# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. -# pyre-strict +# pyre-unsafe -from typing import Callable, Union, Tuple +from typing import Tuple, Union import torch +from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.compliance_suite import ( +from executorch.backends.test.suite.operators import ( dtype_test, operator_test, OperatorTest, ) + class Model(torch.nn.Module): def __init__( self, in_channels=3, out_channels=6, - kernel_size=3, - stride=1, - padding=0, - output_padding=0, - dilation=1, + kernel_size: Union[int, Tuple[int]] = 3, + stride: Union[int, Tuple[int]] = 1, + padding: Union[int, Tuple[int]] = 0, + output_padding: Union[int, Tuple[int]] = 0, + dilation: Union[int, Tuple[int]] = 1, groups=1, bias=True, ): @@ -37,53 +43,95 @@ def __init__( groups=groups, bias=bias, ) - + def forward(self, x): return self.conv_transpose(x) + @operator_test -class TestConvTranspose1d(OperatorTest): +class ConvTranspose1d(OperatorTest): @dtype_test - def test_convtranspose1d_dtype(self, dtype, tester_factory: Callable) -> None: - # Input shape: (batch_size, in_channels, length) - self._test_op(Model().to(dtype), ((torch.rand(2, 3, 10) * 10).to(dtype),), tester_factory) - - def test_convtranspose1d_basic(self, tester_factory: Callable) -> None: - # Basic test with default parameters - self._test_op(Model(), (torch.randn(2, 3, 10),), tester_factory) - - def test_convtranspose1d_kernel_size(self, tester_factory: Callable) -> None: - # Test with different kernel sizes - self._test_op(Model(kernel_size=1), (torch.randn(2, 3, 10),), tester_factory) - self._test_op(Model(kernel_size=5), (torch.randn(2, 3, 10),), tester_factory) - - def test_convtranspose1d_stride(self, tester_factory: Callable) -> None: - # Test with different stride values - self._test_op(Model(stride=2), (torch.randn(2, 3, 10),), tester_factory) - - def test_convtranspose1d_padding(self, tester_factory: Callable) -> None: - # Test with different padding values - self._test_op(Model(padding=1), (torch.randn(2, 3, 10),), tester_factory) - self._test_op(Model(padding=2), (torch.randn(2, 3, 10),), tester_factory) - - def test_convtranspose1d_output_padding(self, tester_factory: Callable) -> None: - # Test with different output_padding values (requires stride > 1) - self._test_op(Model(stride=2, output_padding=1), (torch.randn(2, 3, 10),), tester_factory) - - def test_convtranspose1d_dilation(self, tester_factory: Callable) -> None: - # Test with different dilation values - self._test_op(Model(dilation=2), (torch.randn(2, 3, 10),), tester_factory) - - def test_convtranspose1d_groups(self, tester_factory: Callable) -> None: - # Test with groups=3 (in_channels and out_channels must be divisible by groups) - self._test_op(Model(in_channels=6, out_channels=6, groups=3), (torch.randn(2, 6, 10),), tester_factory) - - def test_convtranspose1d_no_bias(self, tester_factory: Callable) -> None: - # Test without bias - self._test_op(Model(bias=False), (torch.randn(2, 3, 10),), tester_factory) - - def test_convtranspose1d_channels(self, tester_factory: Callable) -> None: - # Test with different channel configurations - self._test_op(Model(in_channels=1, out_channels=1), (torch.randn(2, 1, 10),), tester_factory) - self._test_op(Model(in_channels=5, out_channels=10), (torch.randn(2, 5, 10),), tester_factory) - \ No newline at end of file + def test_convtranspose1d_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op( + Model().to(dtype), + ((torch.rand(2, 3, 10) * 10).to(dtype),), + flow, + ) + + def test_convtranspose1d_basic(self, flow: TestFlow) -> None: + self._test_op( + Model(), + (torch.randn(2, 3, 10),), + flow, + ) + + def test_convtranspose1d_kernel_size(self, flow: TestFlow) -> None: + self._test_op( + Model(kernel_size=1), + (torch.randn(2, 3, 10),), + flow, + ) + self._test_op( + Model(kernel_size=5), + (torch.randn(2, 3, 10),), + flow, + ) + + def test_convtranspose1d_stride(self, flow: TestFlow) -> None: + self._test_op( + Model(stride=2), + (torch.randn(2, 3, 10),), + flow, + ) + + def test_convtranspose1d_padding(self, flow: TestFlow) -> None: + self._test_op( + Model(padding=1), + (torch.randn(2, 3, 10),), + flow, + ) + self._test_op( + Model(padding=2), + (torch.randn(2, 3, 10),), + flow, + ) + + def test_convtranspose1d_output_padding(self, flow: TestFlow) -> None: + self._test_op( + Model(stride=2, output_padding=1), + (torch.randn(2, 3, 10),), + flow, + ) + + def test_convtranspose1d_dilation(self, flow: TestFlow) -> None: + self._test_op( + Model(dilation=2), + (torch.randn(2, 3, 10),), + flow, + ) + + def test_convtranspose1d_groups(self, flow: TestFlow) -> None: + self._test_op( + Model(in_channels=6, out_channels=6, groups=3), + (torch.randn(2, 6, 10),), + flow, + ) + + def test_convtranspose1d_no_bias(self, flow: TestFlow) -> None: + self._test_op( + Model(bias=False), + (torch.randn(2, 3, 10),), + flow, + ) + + def test_convtranspose1d_channels(self, flow: TestFlow) -> None: + self._test_op( + Model(in_channels=1, out_channels=1), + (torch.randn(2, 1, 10),), + flow, + ) + self._test_op( + Model(in_channels=5, out_channels=10), + (torch.randn(2, 5, 10),), + flow, + ) diff --git a/backends/test/suite/operators/test_convtranspose2d.py b/backends/test/suite/operators/test_convtranspose2d.py index b5a4dfb784c..13ad272645f 100644 --- a/backends/test/suite/operators/test_convtranspose2d.py +++ b/backends/test/suite/operators/test_convtranspose2d.py @@ -1,27 +1,33 @@ -# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. -# pyre-strict +# pyre-unsafe -from typing import Callable, Union, Tuple +from typing import Tuple, Union import torch +from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.compliance_suite import ( +from executorch.backends.test.suite.operators import ( dtype_test, operator_test, OperatorTest, ) + class Model(torch.nn.Module): def __init__( self, in_channels=3, out_channels=6, - kernel_size=3, - stride=1, - padding=0, - output_padding=0, - dilation=1, + kernel_size: Union[int, Tuple[int, int]] = 3, + stride: Union[int, Tuple[int, int]] = 1, + padding: Union[int, Tuple[int, int]] = 0, + output_padding: Union[int, Tuple[int, int]] = 0, + dilation: Union[int, Tuple[int, int]] = 1, groups=1, bias=True, ): @@ -37,60 +43,122 @@ def __init__( groups=groups, bias=bias, ) - + def forward(self, x): return self.conv_transpose(x) + @operator_test -class TestConvTranspose2d(OperatorTest): +class ConvTranspose2d(OperatorTest): @dtype_test - def test_convtranspose2d_dtype(self, dtype, tester_factory: Callable) -> None: - # Input shape: (batch_size, in_channels, height, width) - self._test_op(Model().to(dtype), ((torch.rand(2, 3, 8, 8) * 10).to(dtype),), tester_factory) - - def test_convtranspose2d_basic(self, tester_factory: Callable) -> None: - # Basic test with default parameters - self._test_op(Model(), (torch.randn(2, 3, 8, 8),), tester_factory) - - def test_convtranspose2d_kernel_size(self, tester_factory: Callable) -> None: - # Test with different kernel sizes - self._test_op(Model(kernel_size=1), (torch.randn(2, 3, 8, 8),), tester_factory) - self._test_op(Model(kernel_size=5), (torch.randn(2, 3, 8, 8),), tester_factory) - self._test_op(Model(kernel_size=(3, 5)), (torch.randn(2, 3, 8, 8),), tester_factory) - - def test_convtranspose2d_stride(self, tester_factory: Callable) -> None: - # Test with different stride values - self._test_op(Model(stride=2), (torch.randn(2, 3, 8, 8),), tester_factory) - self._test_op(Model(stride=(2, 1)), (torch.randn(2, 3, 8, 8),), tester_factory) - - def test_convtranspose2d_padding(self, tester_factory: Callable) -> None: - # Test with different padding values - self._test_op(Model(padding=1), (torch.randn(2, 3, 8, 8),), tester_factory) - self._test_op(Model(padding=(1, 2)), (torch.randn(2, 3, 8, 8),), tester_factory) - - def test_convtranspose2d_output_padding(self, tester_factory: Callable) -> None: - # Test with different output_padding values (requires stride > 1) - self._test_op(Model(stride=2, output_padding=1), (torch.randn(2, 3, 8, 8),), tester_factory) - self._test_op(Model(stride=(2, 2), output_padding=(1, 0)), (torch.randn(2, 3, 8, 8),), tester_factory) - - def test_convtranspose2d_dilation(self, tester_factory: Callable) -> None: - # Test with different dilation values - self._test_op(Model(dilation=2), (torch.randn(2, 3, 8, 8),), tester_factory) - self._test_op(Model(dilation=(2, 1)), (torch.randn(2, 3, 8, 8),), tester_factory) - - def test_convtranspose2d_groups(self, tester_factory: Callable) -> None: - # Test with groups=3 (in_channels and out_channels must be divisible by groups) - self._test_op(Model(in_channels=6, out_channels=6, groups=3), (torch.randn(2, 6, 8, 8),), tester_factory) - - def test_convtranspose2d_no_bias(self, tester_factory: Callable) -> None: - # Test without bias - self._test_op(Model(bias=False), (torch.randn(2, 3, 8, 8),), tester_factory) - - def test_convtranspose2d_channels(self, tester_factory: Callable) -> None: - # Test with different channel configurations - self._test_op(Model(in_channels=1, out_channels=1), (torch.randn(2, 1, 8, 8),), tester_factory) - self._test_op(Model(in_channels=5, out_channels=10), (torch.randn(2, 5, 8, 8),), tester_factory) - - def test_convtranspose2d_different_spatial_dims(self, tester_factory: Callable) -> None: - # Test with different height and width - self._test_op(Model(), (torch.randn(2, 3, 10, 8),), tester_factory) + def test_convtranspose2d_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op( + Model().to(dtype), + ((torch.rand(2, 3, 8, 8) * 10).to(dtype),), + flow, + ) + + def test_convtranspose2d_basic(self, flow: TestFlow) -> None: + self._test_op( + Model(), + (torch.randn(2, 3, 8, 8),), + flow, + ) + + def test_convtranspose2d_kernel_size(self, flow: TestFlow) -> None: + self._test_op( + Model(kernel_size=1), + (torch.randn(2, 3, 8, 8),), + flow, + ) + self._test_op( + Model(kernel_size=5), + (torch.randn(2, 3, 8, 8),), + flow, + ) + self._test_op( + Model(kernel_size=(3, 5)), + (torch.randn(2, 3, 8, 8),), + flow, + ) + + def test_convtranspose2d_stride(self, flow: TestFlow) -> None: + self._test_op( + Model(stride=2), + (torch.randn(2, 3, 8, 8),), + flow, + ) + self._test_op( + Model(stride=(2, 1)), + (torch.randn(2, 3, 8, 8),), + flow, + ) + + def test_convtranspose2d_padding(self, flow: TestFlow) -> None: + self._test_op( + Model(padding=1), + (torch.randn(2, 3, 8, 8),), + flow, + ) + self._test_op( + Model(padding=(1, 2)), + (torch.randn(2, 3, 8, 8),), + flow, + ) + + def test_convtranspose2d_output_padding(self, flow: TestFlow) -> None: + self._test_op( + Model(stride=2, output_padding=1), + (torch.randn(2, 3, 8, 8),), + flow, + ) + self._test_op( + Model(stride=(2, 2), output_padding=(1, 0)), + (torch.randn(2, 3, 8, 8),), + flow, + ) + + def test_convtranspose2d_dilation(self, flow: TestFlow) -> None: + self._test_op( + Model(dilation=2), + (torch.randn(2, 3, 8, 8),), + flow, + ) + self._test_op( + Model(dilation=(2, 1)), + (torch.randn(2, 3, 8, 8),), + flow, + ) + + def test_convtranspose2d_groups(self, flow: TestFlow) -> None: + self._test_op( + Model(in_channels=6, out_channels=6, groups=3), + (torch.randn(2, 6, 8, 8),), + flow, + ) + + def test_convtranspose2d_no_bias(self, flow: TestFlow) -> None: + self._test_op( + Model(bias=False), + (torch.randn(2, 3, 8, 8),), + flow, + ) + + def test_convtranspose2d_channels(self, flow: TestFlow) -> None: + self._test_op( + Model(in_channels=1, out_channels=1), + (torch.randn(2, 1, 8, 8),), + flow, + ) + self._test_op( + Model(in_channels=5, out_channels=10), + (torch.randn(2, 5, 8, 8),), + flow, + ) + + def test_convtranspose2d_different_spatial_dims(self, flow: TestFlow) -> None: + self._test_op( + Model(), + (torch.randn(2, 3, 10, 8),), + flow, + ) diff --git a/backends/test/suite/operators/test_convtranspose3d.py b/backends/test/suite/operators/test_convtranspose3d.py index 00612725016..6cedc5b31cd 100644 --- a/backends/test/suite/operators/test_convtranspose3d.py +++ b/backends/test/suite/operators/test_convtranspose3d.py @@ -1,27 +1,33 @@ -# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. -# pyre-strict +# pyre-unsafe -from typing import Callable, Union, Tuple +from typing import Tuple, Union import torch +from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.compliance_suite import ( +from executorch.backends.test.suite.operators import ( dtype_test, operator_test, OperatorTest, ) + class Model(torch.nn.Module): def __init__( self, in_channels=3, out_channels=6, - kernel_size=3, - stride=1, - padding=0, - output_padding=0, - dilation=1, + kernel_size: Union[int, Tuple[int, int, int]] = 3, + stride: Union[int, Tuple[int, int, int]] = 1, + padding: Union[int, Tuple[int, int, int]] = 0, + output_padding: Union[int, Tuple[int, int, int]] = 0, + dilation: Union[int, Tuple[int, int, int]] = 1, groups=1, bias=True, ): @@ -37,59 +43,117 @@ def __init__( groups=groups, bias=bias, ) - + def forward(self, x): return self.conv_transpose(x) + @operator_test -class TestConvTranspose3d(OperatorTest): +class ConvTranspose3d(OperatorTest): @dtype_test - def test_convtranspose3d_dtype(self, dtype, tester_factory: Callable) -> None: - # Input shape: (batch_size, in_channels, depth, height, width) - self._test_op(Model().to(dtype), ((torch.rand(2, 3, 4, 4, 4) * 10).to(dtype),), tester_factory) - - def test_convtranspose3d_basic(self, tester_factory: Callable) -> None: - # Basic test with default parameters - self._test_op(Model(), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - - def test_convtranspose3d_kernel_size(self, tester_factory: Callable) -> None: - # Test with different kernel sizes - self._test_op(Model(kernel_size=1), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - self._test_op(Model(kernel_size=(1, 3, 3)), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - - def test_convtranspose3d_stride(self, tester_factory: Callable) -> None: - # Test with different stride values - self._test_op(Model(stride=2), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - self._test_op(Model(stride=(1, 2, 2)), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - - def test_convtranspose3d_padding(self, tester_factory: Callable) -> None: - # Test with different padding values - self._test_op(Model(padding=1), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - self._test_op(Model(padding=(0, 1, 1)), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - - def test_convtranspose3d_output_padding(self, tester_factory: Callable) -> None: - # Test with different output_padding values (requires stride > 1) - self._test_op(Model(stride=2, output_padding=1), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - self._test_op(Model(stride=(2, 2, 2), output_padding=(1, 0, 1)), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - - def test_convtranspose3d_dilation(self, tester_factory: Callable) -> None: - # Test with different dilation values - self._test_op(Model(dilation=2), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - self._test_op(Model(dilation=(1, 2, 2)), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - - def test_convtranspose3d_groups(self, tester_factory: Callable) -> None: - # Test with groups=3 (in_channels and out_channels must be divisible by groups) - self._test_op(Model(in_channels=6, out_channels=6, groups=3), (torch.randn(2, 6, 4, 4, 4),), tester_factory) - - def test_convtranspose3d_no_bias(self, tester_factory: Callable) -> None: - # Test without bias - self._test_op(Model(bias=False), (torch.randn(2, 3, 4, 4, 4),), tester_factory) - - def test_convtranspose3d_channels(self, tester_factory: Callable) -> None: - # Test with different channel configurations - self._test_op(Model(in_channels=1, out_channels=1), (torch.randn(2, 1, 4, 4, 4),), tester_factory) - self._test_op(Model(in_channels=5, out_channels=10), (torch.randn(2, 5, 4, 4, 4),), tester_factory) - - def test_convtranspose3d_different_spatial_dims(self, tester_factory: Callable) -> None: - # Test with different depth, height, and width - self._test_op(Model(), (torch.randn(2, 3, 3, 4, 5),), tester_factory) + def test_convtranspose3d_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op( + Model().to(dtype), + ((torch.rand(2, 3, 4, 4, 4) * 10).to(dtype),), + flow, + ) + + def test_convtranspose3d_basic(self, flow: TestFlow) -> None: + self._test_op( + Model(), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + + def test_convtranspose3d_kernel_size(self, flow: TestFlow) -> None: + self._test_op( + Model(kernel_size=1), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + self._test_op( + Model(kernel_size=(1, 3, 3)), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + + def test_convtranspose3d_stride(self, flow: TestFlow) -> None: + self._test_op( + Model(stride=2), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + self._test_op( + Model(stride=(1, 2, 2)), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + + def test_convtranspose3d_padding(self, flow: TestFlow) -> None: + self._test_op( + Model(padding=1), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + self._test_op( + Model(padding=(0, 1, 1)), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + + def test_convtranspose3d_output_padding(self, flow: TestFlow) -> None: + self._test_op( + Model(stride=2, output_padding=1), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + self._test_op( + Model(stride=(2, 2, 2), output_padding=(1, 0, 1)), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + + def test_convtranspose3d_dilation(self, flow: TestFlow) -> None: + self._test_op( + Model(dilation=2), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + self._test_op( + Model(dilation=(1, 2, 2)), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + + def test_convtranspose3d_groups(self, flow: TestFlow) -> None: + self._test_op( + Model(in_channels=6, out_channels=6, groups=3), + (torch.randn(2, 6, 4, 4, 4),), + flow, + ) + + def test_convtranspose3d_no_bias(self, flow: TestFlow) -> None: + self._test_op( + Model(bias=False), + (torch.randn(2, 3, 4, 4, 4),), + flow, + ) + + def test_convtranspose3d_channels(self, flow: TestFlow) -> None: + self._test_op( + Model(in_channels=1, out_channels=1), + (torch.randn(2, 1, 4, 4, 4),), + flow, + ) + self._test_op( + Model(in_channels=5, out_channels=10), + (torch.randn(2, 5, 4, 4, 4),), + flow, + ) + + def test_convtranspose3d_different_spatial_dims(self, flow: TestFlow) -> None: + self._test_op( + Model(), + (torch.randn(2, 3, 3, 4, 5),), + flow, + )