Skip to content

Commit ba5c114

Browse files
committed
Bypass failures on XPU
1 parent 0a0aedd commit ba5c114

File tree

2 files changed

+4
-2
lines changed

2 files changed

+4
-2
lines changed

test/test_examples.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -320,7 +320,7 @@ def test_low_mem_dropout(self):
320320
size = 8192
321321
seed = 123
322322
seed2 = 456
323-
x = torch.randn(size=(size,)).cuda()
323+
x = torch.randn(size=(size,)).to(device=DEVICE)
324324

325325
_, out_fwd = code_and_output(
326326
low_mem_dropout,
@@ -503,6 +503,7 @@ def test_attention_pointer(self):
503503
)
504504
)
505505

506+
@skipIfXPU("failure on XPU")
506507
def test_attention_block_pointer(self):
507508
args = (
508509
torch.randn(2, 32, 1024, 64, dtype=torch.float16, device=DEVICE),
@@ -697,6 +698,7 @@ def test_segment_reduction(self):
697698
)
698699
)
699700

701+
@skipIfXPU("failure on XPU")
700702
def test_attention_persistent_interleaved_l2_grouping(self):
701703
"""Test attention with persistent interleaved execution and L2 grouping for optimal performance."""
702704
args = (

test/test_tensor_descriptor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -298,7 +298,7 @@ def jsd_forward_kernel(
298298
log_p = torch.randn(batch, vocab, device=DEVICE).log_softmax(dim=-1)
299299

300300
code, (loss, _) = code_and_output(jsd_forward_kernel, (log_q, log_p))
301-
torch.cuda.synchronize()
301+
torch.accelerator.synchronize()
302302

303303
from examples.jsd import TorchJSDBaseline
304304

0 commit comments

Comments
 (0)