|
7 | 7 | import unittest
|
8 | 8 |
|
9 | 9 | import torch
|
| 10 | +from executorch.backends.xnnpack.partition.xnnpack_partitioner import XnnpackPartitioner |
10 | 11 | from executorch.backends.xnnpack.test.tester import Tester
|
| 12 | +from executorch.exir import to_edge_transform_and_lower |
| 13 | +from torch.export import export |
11 | 14 |
|
12 | 15 |
|
13 | 16 | class TestStaticConstantPad(unittest.TestCase):
|
@@ -125,6 +128,45 @@ def _test_static_constant_pad_functional(self, inputs):
|
125 | 128 | .run_method_and_compare_outputs()
|
126 | 129 | )
|
127 | 130 |
|
| 131 | + class NegativePadModel(torch.nn.Module): |
| 132 | + def __init__(self): |
| 133 | + super().__init__() |
| 134 | + self.pad = torch.nn.ConstantPad2d((0, 0, -2, 2), 0.0) |
| 135 | + |
| 136 | + def forward(self, input): |
| 137 | + input = self.pad(input) |
| 138 | + return input |
| 139 | + |
| 140 | + def test_negative_pad_model_with_ints(self): |
| 141 | + """Test that negative padding with integer inputs falls back to PyTorch implementation as XNNPACK does not support negative padding dimensions""" |
| 142 | + input_tensor = torch.tensor([[4], [5], [6]]) |
| 143 | + model = self.NegativePadModel() |
| 144 | + model.eval() |
| 145 | + model.to("cpu") |
| 146 | + |
| 147 | + exported_model = export(model, (input_tensor,)) |
| 148 | + |
| 149 | + executorch_program = to_edge_transform_and_lower( |
| 150 | + exported_model, partitioner=[XnnpackPartitioner()] |
| 151 | + ).to_executorch() |
| 152 | + |
| 153 | + self.assertIsNotNone(executorch_program) |
| 154 | + |
| 155 | + def test_negative_pad_model_with_floats(self): |
| 156 | + """Test that negative padding with float inputs is now rejected by XNNPACK partitioner as XNNPACK does not support negative padding dimensions""" |
| 157 | + input_tensor = torch.tensor([[4.0], [5.0], [6.0]]) |
| 158 | + model = self.NegativePadModel() |
| 159 | + model.eval() |
| 160 | + model.to("cpu") |
| 161 | + |
| 162 | + exported_model = export(model, (input_tensor,)) |
| 163 | + |
| 164 | + executorch_program = to_edge_transform_and_lower( |
| 165 | + exported_model, partitioner=[XnnpackPartitioner()] |
| 166 | + ).to_executorch() |
| 167 | + |
| 168 | + self.assertIsNotNone(executorch_program) |
| 169 | + |
128 | 170 | def test_fp16_static_constant_pad_functional(self):
|
129 | 171 | inputs = (
|
130 | 172 | torch.randn(size=(5, 4, 3, 2)).to(torch.float16),
|
|
0 commit comments