Skip to content

[Backend Tester] Add permute, transpose, and masked_fill tests #12850

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 18 commits into from
Jul 29, 2025
5 changes: 4 additions & 1 deletion backends/test/suite/operators/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,9 @@ def _create_test_for_backend(


class OperatorTest(unittest.TestCase):
def _test_op(self, model, inputs, flow: TestFlow):
def _test_op(
self, model, inputs, flow: TestFlow, generate_random_test_inputs: bool = True
):
context = get_active_test_context()

# This should be set in the wrapped test. See _make_wrapped_test above.
Expand All @@ -145,6 +147,7 @@ def _test_op(self, model, inputs, flow: TestFlow):
flow,
context.test_name,
context.params,
generate_random_test_inputs=generate_random_test_inputs,
)

log_test_summary(run_summary)
Expand Down
89 changes: 89 additions & 0 deletions backends/test/suite/operators/test_embedding.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

# pyre-unsafe

import torch
from executorch.backends.test.suite.flow import TestFlow

from executorch.backends.test.suite.operators import (
dtype_test,
operator_test,
OperatorTest,
)


class Model(torch.nn.Module):
def __init__(
self,
num_embeddings=100,
embedding_dim=50,
):
super().__init__()
self.embedding = torch.nn.Embedding(
num_embeddings=num_embeddings,
embedding_dim=embedding_dim,
)

def forward(self, x):
return self.embedding(x)


@operator_test
class Embedding(OperatorTest):
# Note that generate_random_test_inputs is used to avoid the tester
# generating random inputs that are out of range of the embedding size.
# The tester's random input generation is not smart enough to know that
# the index inputs must be within a certain range.

@dtype_test
def test_embedding_dtype(self, flow: TestFlow, dtype) -> None:
self._test_op(
Model().to(dtype),
(torch.randint(0, 10, (2, 8), dtype=torch.long),),
flow,
generate_random_test_inputs=False,
)

def test_embedding_sizes(self, flow: TestFlow) -> None:
self._test_op(
Model(num_embeddings=5, embedding_dim=3),
(torch.randint(0, 5, (2, 8), dtype=torch.long),),
flow,
generate_random_test_inputs=False,
)
self._test_op(
Model(num_embeddings=100, embedding_dim=10),
(torch.randint(0, 100, (2, 8), dtype=torch.long),),
flow,
generate_random_test_inputs=False,
)
self._test_op(
Model(num_embeddings=1000, embedding_dim=50),
(torch.randint(0, 1000, (2, 4), dtype=torch.long),),
flow,
generate_random_test_inputs=False,
)

def test_embedding_batch_dim(self, flow: TestFlow) -> None:
self._test_op(
Model(),
(torch.randint(0, 100, (5,), dtype=torch.long),),
flow,
generate_random_test_inputs=False,
)
self._test_op(
Model(),
(torch.randint(0, 100, (2, 8), dtype=torch.long),),
flow,
generate_random_test_inputs=False,
)
self._test_op(
Model(),
(torch.randint(0, 100, (2, 3, 4), dtype=torch.long),),
flow,
generate_random_test_inputs=False,
)
118 changes: 118 additions & 0 deletions backends/test/suite/operators/test_embedding_bag.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

# pyre-unsafe

import torch
from executorch.backends.test.suite.flow import TestFlow

from executorch.backends.test.suite.operators import (
dtype_test,
operator_test,
OperatorTest,
)


class Model(torch.nn.Module):
def __init__(
self,
num_embeddings=10,
embedding_dim=5,
mode="mean",
include_last_offset: bool = False,
):
super().__init__()
self.embedding_bag = torch.nn.EmbeddingBag(
num_embeddings=num_embeddings,
embedding_dim=embedding_dim,
mode=mode,
include_last_offset=include_last_offset,
)

def forward(self, x, offsets=None):
return self.embedding_bag(x, offsets)


@operator_test
class EmbeddingBag(OperatorTest):
# Note that generate_random_test_inputs is used to avoid the tester
# generating random inputs that are out of range of the embedding size.
# The tester's random input generation is not smart enough to know that
# the index inputs must be within a certain range.

@dtype_test
def test_embedding_bag_dtype(self, flow: TestFlow, dtype) -> None:
indices = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9], dtype=torch.long)
offsets = torch.tensor([0, 4], dtype=torch.long)
self._test_op(
Model().to(dtype),
(indices, offsets),
flow,
generate_random_test_inputs=False,
)

def test_embedding_bag_sizes(self, flow: TestFlow) -> None:
indices = torch.tensor([1, 2, 3, 1], dtype=torch.long)
offsets = torch.tensor([0, 2], dtype=torch.long)

self._test_op(
Model(num_embeddings=5, embedding_dim=3),
(indices, offsets),
flow,
generate_random_test_inputs=False,
)

indices = torch.tensor([5, 20, 10, 43, 7], dtype=torch.long)
offsets = torch.tensor([0, 2, 4], dtype=torch.long)
self._test_op(
Model(num_embeddings=50, embedding_dim=10),
(indices, offsets),
flow,
generate_random_test_inputs=False,
)

indices = torch.tensor([100, 200, 300, 400], dtype=torch.long)
offsets = torch.tensor([0, 2], dtype=torch.long)
self._test_op(
Model(num_embeddings=500, embedding_dim=20),
(indices, offsets),
flow,
generate_random_test_inputs=False,
)

def test_embedding_bag_modes(self, flow: TestFlow) -> None:
indices = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9], dtype=torch.long)
offsets = torch.tensor([0, 4], dtype=torch.long)

self._test_op(
Model(mode="sum"),
(indices, offsets),
flow,
generate_random_test_inputs=False,
)
self._test_op(
Model(mode="mean"),
(indices, offsets),
flow,
generate_random_test_inputs=False,
)
self._test_op(
Model(mode="max"),
(indices, offsets),
flow,
generate_random_test_inputs=False,
)

def test_embedding_bag_include_last_offset(self, flow: TestFlow) -> None:
indices = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9], dtype=torch.long)
offsets = torch.tensor([0, 4], dtype=torch.long)

self._test_op(
Model(include_last_offset=True),
(indices, offsets),
flow,
generate_random_test_inputs=False,
)
101 changes: 101 additions & 0 deletions backends/test/suite/operators/test_masked_fill.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

# pyre-unsafe

from typing import Union

import torch
from executorch.backends.test.suite.flow import TestFlow

from executorch.backends.test.suite.operators import (
dtype_test,
operator_test,
OperatorTest,
)


class MaskedFillModel(torch.nn.Module):
def __init__(self, value: Union[float, int]):
super().__init__()
self.value = value

def forward(self, x, mask):
return x.masked_fill(mask, self.value)


@operator_test
class MaskedFill(OperatorTest):
@dtype_test
def test_masked_fill_dtype(self, flow: TestFlow, dtype) -> None:
mask = torch.randint(0, 2, (16, 32), dtype=torch.bool)
self._test_op(
MaskedFillModel(value=0.0),
(
torch.rand(16, 32).to(dtype),
mask,
),
flow,
)

def test_masked_fill_different_values(self, flow: TestFlow) -> None:
mask = torch.randint(0, 2, (16, 32), dtype=torch.bool)

self._test_op(
MaskedFillModel(value=5.0),
(
torch.randn(16, 32),
mask,
),
flow,
)

self._test_op(
MaskedFillModel(value=-5.0),
(
torch.randn(16, 32),
mask,
),
flow,
)

self._test_op(
MaskedFillModel(value=1),
(
torch.randn(16, 32),
mask,
),
flow,
)

def test_masked_fill_different_shapes(self, flow: TestFlow) -> None:
self._test_op(
MaskedFillModel(value=0.0),
(
torch.randn(512),
torch.randint(0, 2, (512,), dtype=torch.bool),
),
flow,
)

self._test_op(
MaskedFillModel(value=0.0),
(
torch.randn(4, 8, 16),
torch.randint(0, 2, (4, 8, 16), dtype=torch.bool),
),
flow,
)

def test_masked_fill_broadcast(self, flow: TestFlow) -> None:
self._test_op(
MaskedFillModel(value=0.0),
(
torch.randn(16, 32),
torch.randint(0, 2, (32,), dtype=torch.bool),
),
flow,
)
Loading
Loading