Skip to content

Commit 37a91e0

Browse files
committed
simplify and improve crop testing for cvcuda
1 parent 18922e3 commit 37a91e0

File tree

1 file changed

+44
-28
lines changed

1 file changed

+44
-28
lines changed

test/test_transforms_v2.py

Lines changed: 44 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -3537,10 +3537,26 @@ def test_functional_signature(self, kernel, input_type):
35373537
check_functional_kernel_signature_match(F.crop, kernel=kernel, input_type=input_type)
35383538

35393539
@pytest.mark.parametrize("kwargs", CORRECTNESS_CROP_KWARGS)
3540-
def test_functional_image_correctness(self, kwargs):
3541-
image = make_image(self.INPUT_SIZE, dtype=torch.uint8, device="cpu")
3540+
@pytest.mark.parametrize(
3541+
"make_input",
3542+
[
3543+
make_image,
3544+
pytest.param(
3545+
make_image_cvcuda, marks=pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="test requires CVCUDA")
3546+
),
3547+
],
3548+
)
3549+
def test_functional_image_correctness(self, kwargs, make_input):
3550+
image = make_input(self.INPUT_SIZE, dtype=torch.uint8, device="cpu")
35423551

35433552
actual = F.crop(image, **kwargs)
3553+
3554+
if make_input == make_image_cvcuda:
3555+
actual = F.cvcuda_to_tensor(actual).to(device="cpu")
3556+
actual = actual.squeeze(0)
3557+
image = F.cvcuda_to_tensor(image).to(device="cpu")
3558+
image = image.squeeze(0)
3559+
35443560
expected = F.to_image(F.crop(F.to_pil_image(image), **kwargs))
35453561

35463562
assert_equal(actual, expected)
@@ -3628,7 +3644,16 @@ def test_transform_pad_if_needed(self):
36283644
padding_mode=["constant", "edge", "reflect", "symmetric"],
36293645
)
36303646
@pytest.mark.parametrize("seed", list(range(5)))
3631-
def test_transform_image_correctness(self, param, value, seed):
3647+
@pytest.mark.parametrize(
3648+
"make_input",
3649+
[
3650+
make_image,
3651+
pytest.param(
3652+
make_image_cvcuda, marks=pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="test requires CVCUDA")
3653+
),
3654+
],
3655+
)
3656+
def test_transform_image_correctness(self, param, value, seed, make_input):
36323657
kwargs = {param: value}
36333658
if param != "size":
36343659
# 1. size is required
@@ -3639,41 +3664,32 @@ def test_transform_image_correctness(self, param, value, seed):
36393664

36403665
transform = transforms.RandomCrop(pad_if_needed=True, **kwargs)
36413666

3642-
image = make_image(self.INPUT_SIZE)
3667+
will_pad = False
3668+
if kwargs["size"][0] > self.INPUT_SIZE[0] or kwargs["size"][1] > self.INPUT_SIZE[1]:
3669+
will_pad = True
3670+
3671+
image = make_input(self.INPUT_SIZE)
36433672

36443673
with freeze_rng_state():
36453674
torch.manual_seed(seed)
36463675
actual = transform(image)
36473676

36483677
torch.manual_seed(seed)
3649-
expected = F.to_image(transform(F.to_pil_image(image)))
36503678

3651-
assert_equal(actual, expected)
3679+
if make_input == make_image_cvcuda:
3680+
actual = F.cvcuda_to_tensor(actual).to(device="cpu")
3681+
actual = actual.squeeze(0)
3682+
image = F.cvcuda_to_tensor(image).to(device="cpu")
3683+
image = image.squeeze(0)
36523684

3653-
@pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="test requires CVCUDA")
3654-
@pytest.mark.parametrize("size", [(10, 5), (25, 15), (25, 5), (10, 15), (10, 10)])
3655-
@pytest.mark.parametrize("seed", list(range(5)))
3656-
def test_transform_cvcuda_correctness(self, size, seed):
3657-
pad_if_needed = False
3658-
if size[0] > self.INPUT_SIZE[0] or size[1] > self.INPUT_SIZE[1]:
3659-
pad_if_needed = True
3660-
transform = transforms.RandomCrop(size, pad_if_needed=pad_if_needed)
3661-
3662-
image = make_image(size=self.INPUT_SIZE, batch_dims=(1,), device="cuda")
3663-
cv_image = F.to_cvcuda_tensor(image)
3664-
3665-
with freeze_rng_state():
3666-
torch.manual_seed(seed)
3667-
actual = transform(cv_image)
3668-
3669-
torch.manual_seed(seed)
3670-
expected = transform(image)
3685+
expected = F.to_image(transform(F.to_pil_image(image)))
36713686

3672-
if not pad_if_needed:
3673-
torch.testing.assert_close(F.cvcuda_to_tensor(actual), expected, rtol=0, atol=0)
3687+
if make_input == make_image_cvcuda and will_pad:
3688+
# when padding is applied, CV-CUDA will always fill with zeros
3689+
# cannot use assert_equal since it will fail unless random is all zeros
3690+
torch.testing.assert_close(actual, expected, rtol=0, atol=get_max_value(image.dtype))
36743691
else:
3675-
# if padding is requied, CV-CUDA will always fill with zeros
3676-
torch.testing.assert_close(F.cvcuda_to_tensor(actual), expected, rtol=0, atol=get_max_value(image.dtype))
3692+
assert_equal(actual, expected)
36773693

36783694
def _reference_crop_bounding_boxes(self, bounding_boxes, *, top, left, height, width):
36793695
affine_matrix = np.array(

0 commit comments

Comments
 (0)