|
| 1 | +#!POPCORN leaderboard matmul_py |
| 2 | +import triton |
| 3 | +import triton.language as tl |
| 4 | +import torch |
| 5 | +from task import input_t, output_t |
| 6 | + |
| 7 | + |
| 8 | +@triton.jit |
| 9 | +def matmul_kernel( |
| 10 | + # Pointers to matrices |
| 11 | + a_ptr, b_ptr, c_ptr, |
| 12 | + # Matrix dimensions |
| 13 | + M, N, K, |
| 14 | + # The stride variables represent how much to increase the ptr by when moving by 1 |
| 15 | + # element in a particular dimension. E.g. `stride_am` is how much to increase `a_ptr` |
| 16 | + # by to get the element one row down (A has M rows). |
| 17 | + stride_am, stride_ak, |
| 18 | + stride_bk, stride_bn, |
| 19 | + stride_cm, stride_cn, |
| 20 | + # Meta-parameters |
| 21 | + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, |
| 22 | + GROUP_SIZE_M: tl.constexpr, |
| 23 | +): |
| 24 | + """Kernel for computing the matmul C = A x B. |
| 25 | + A has shape (M, K), B has shape (K, N) and C has shape (M, N) |
| 26 | + """ |
| 27 | + # ----------------------------------------------------------- |
| 28 | + # Map program ids `pid` to the block of C it should compute. |
| 29 | + # This is done in a grouped ordering to promote L2 cache hit rates. |
| 30 | + # See above `L2 Cache Optimizations` section for details. |
| 31 | + pid = tl.program_id(axis=0) |
| 32 | + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) |
| 33 | + num_pid_n = tl.cdiv(N, BLOCK_SIZE_N) |
| 34 | + num_pid_in_group = GROUP_SIZE_M * num_pid_n |
| 35 | + group_id = pid // num_pid_in_group |
| 36 | + first_pid_m = group_id * GROUP_SIZE_M |
| 37 | + group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M) |
| 38 | + pid_m = first_pid_m + (pid % group_size_m) |
| 39 | + pid_n = (pid % num_pid_in_group) // group_size_m |
| 40 | + |
| 41 | + # ---------------------------------------------------------- |
| 42 | + # Create pointers for the first blocks of A and B. |
| 43 | + # We will advance this pointer as we move in the K direction |
| 44 | + # and accumulate |
| 45 | + # `a_ptrs` is a block of [BLOCK_SIZE_M, BLOCK_SIZE_K] pointers |
| 46 | + # `b_ptrs` is a block of [BLOCK_SIZE_K, BLOCK_SIZE_N] pointers |
| 47 | + # See above `Pointer Arithmetic` section for details |
| 48 | + offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M |
| 49 | + offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N |
| 50 | + offs_k = tl.arange(0, BLOCK_SIZE_K) |
| 51 | + a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak) |
| 52 | + b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn) |
| 53 | + |
| 54 | + # ----------------------------------------------------------- |
| 55 | + # Iterate to compute a block of the C matrix. |
| 56 | + # We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block |
| 57 | + # of fp32 values for higher precision. |
| 58 | + # `accumulator` will be converted back to fp16 after the loop. |
| 59 | + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) |
| 60 | + for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)): |
| 61 | + # Load the next block of A and B, generate a mask by checking the K dimension. |
| 62 | + # If it is out of bounds, set it to 0. |
| 63 | + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) |
| 64 | + b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) |
| 65 | + # We accumulate along the K dimension. |
| 66 | + accumulator += tl.dot(a, b) |
| 67 | + # Advance the ptrs to the next K block. |
| 68 | + a_ptrs += BLOCK_SIZE_K * stride_ak |
| 69 | + b_ptrs += BLOCK_SIZE_K * stride_bk |
| 70 | + # You can fuse arbitrary activation functions here |
| 71 | + # while the accumulator is still in FP32! |
| 72 | + c = accumulator.to(tl.float16) |
| 73 | + |
| 74 | + # ----------------------------------------------------------- |
| 75 | + # Write back the block of the output matrix C with masks. |
| 76 | + offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) |
| 77 | + offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) |
| 78 | + c_ptrs = c_ptr + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :] |
| 79 | + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N) |
| 80 | + tl.store(c_ptrs, c, mask=c_mask) |
| 81 | + |
| 82 | + |
| 83 | +def triton_matmul(a, b): |
| 84 | + # Check constraints. |
| 85 | + assert a.shape[1] == b.shape[0], "Incompatible dimensions" |
| 86 | + assert a.is_contiguous(), "Matrix A must be contiguous" |
| 87 | + assert b.is_contiguous(), "Matrix B must be contiguous" |
| 88 | + M, K = a.shape |
| 89 | + K, N = b.shape |
| 90 | + # Allocate output. |
| 91 | + c = torch.empty((M, N), device=a.device, dtype=a.dtype) |
| 92 | + # 1D launch kernel where each block gets its own program. |
| 93 | + grid = lambda META: (triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']), ) |
| 94 | + matmul_kernel[grid]( |
| 95 | + a, b, c, |
| 96 | + M, N, K, |
| 97 | + a.stride(0), a.stride(1), |
| 98 | + b.stride(0), b.stride(1), |
| 99 | + c.stride(0), c.stride(1), |
| 100 | + BLOCK_SIZE_M=128, BLOCK_SIZE_N=128, BLOCK_SIZE_K=32, |
| 101 | + GROUP_SIZE_M=8, |
| 102 | + ) |
| 103 | + return c |
| 104 | + |
| 105 | + |
| 106 | +def custom_kernel(data: input_t) -> output_t: |
| 107 | + a, b = data |
| 108 | + # Convert to torch tensors if they aren't already |
| 109 | + if not isinstance(a, torch.Tensor): |
| 110 | + a = torch.tensor(a, dtype=torch.float16).cuda() |
| 111 | + if not isinstance(b, torch.Tensor): |
| 112 | + b = torch.tensor(b, dtype=torch.float16).cuda() |
| 113 | + |
| 114 | + # Ensure tensors are on GPU and contiguous |
| 115 | + if not a.is_cuda: |
| 116 | + a = a.cuda() |
| 117 | + if not b.is_cuda: |
| 118 | + b = b.cuda() |
| 119 | + |
| 120 | + a = a.contiguous() |
| 121 | + b = b.contiguous() |
| 122 | + |
| 123 | + # Use our custom Triton matmul |
| 124 | + result = triton_matmul(a, b) |
| 125 | + |
| 126 | + # Convert back to the expected output format |
| 127 | + return result |
0 commit comments