Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
119 changes: 119 additions & 0 deletions SPECS/pytorch/CVE-2025-3001.patch
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
From 55c933d182200c684639f1e77a60eae196e31d84 Mon Sep 17 00:00:00 2001
From: Yuxingwang-intel <yuxing.wang@intel.com>
Date: Fri, 19 Dec 2025 10:20:47 +0000
Subject: [PATCH] Fix segmentation fault caused by invalid gate weight size in
lstm_cell (#168348)

This PR adds parameter checks for LSTM weights to fix https://github.com/pytorch/pytorch/issues/149626
Pull Request resolved: https://github.com/pytorch/pytorch/pull/168348
Approved by: https://github.com/jiayisunx, https://github.com/mingfeima, https://github.com/albanD, https://github.com/cyyever

Signed-off-by: Azure Linux Security Servicing Account <azurelinux-security@microsoft.com>
Upstream-reference: https://github.com/pytorch/pytorch/commit/999d94b5ede5f4ec111ba7dd144129e2c2725b03.patch
---
aten/src/ATen/native/RNN.cpp | 15 ++++++++++++++-
test/test_nn.py | 33 +++++++++++++++++++++++++++++++++
2 files changed, 47 insertions(+), 1 deletion(-)

diff --git a/aten/src/ATen/native/RNN.cpp b/aten/src/ATen/native/RNN.cpp
index 015e7797..4b3b2f89 100644
--- a/aten/src/ATen/native/RNN.cpp
+++ b/aten/src/ATen/native/RNN.cpp
@@ -689,6 +689,15 @@ void check_rnn_cell_forward_hidden(const Tensor& input, const Tensor& hx, c10::S
"hidden", hidden_label, " has inconsistent hidden_size: got ", hx.sym_size(1), ", expected ", hidden_size);
}

+template<int64_t gate_count>
+inline void check_rnn_cell_forward_weights(const Tensor& w_ih, const Tensor& w_hh, const c10::SymInt& hidden_size){
+ TORCH_CHECK(w_ih.size(0) == gate_count * hidden_size, "weight_ih first dim must be ", gate_count, " * hidden_size = ",
+ gate_count * hidden_size, ", but got ", w_ih.size(0));
+ TORCH_CHECK(w_hh.size(0) == gate_count * hidden_size, "weight_hh first dim must be ", gate_count, " * hidden_size = ",
+ gate_count * hidden_size, ", but got ", w_hh.size(0));
+}
+
+
template<typename hidden_type_tmpl, typename cell_params_tmpl>
struct Cell {
using hidden_type = hidden_type_tmpl;
@@ -1536,8 +1545,9 @@ std::tuple<Tensor, Tensor> lstm_cell(
const Tensor& b_hh = c10::value_or_else(b_hh_opt, [] {return Tensor();});

TORCH_CHECK(hx.size() == 2, "lstm_cell expects two hidden states");
- check_rnn_cell_forward_input(input, w_ih.sym_size(1));
auto hidden_size = w_hh.sym_size(1);
+ check_rnn_cell_forward_input(input, w_ih.sym_size(1));
+ check_rnn_cell_forward_weights<4>(w_ih, w_hh, hidden_size);
check_rnn_cell_forward_hidden(input, hx[0], hidden_size, 0);
check_rnn_cell_forward_hidden(input, hx[1], std::move(hidden_size), 1);
static at::Tensor undefined;
@@ -1651,6 +1661,7 @@ Tensor gru_cell(

check_rnn_cell_forward_input(input, w_ih.size(1));
check_rnn_cell_forward_hidden(input, hx, w_hh.size(1), 0);
+ check_rnn_cell_forward_weights<3>(w_ih, w_hh, w_hh.size(1));
static at::Tensor undefined;
return GRUCell<CellParams>{}(input, hx, CellParams{w_ih, w_hh, b_ih, b_hh, undefined});
}
@@ -1664,6 +1675,7 @@ Tensor rnn_tanh_cell(
const Tensor& b_hh = c10::value_or_else(b_hh_opt, [] {return Tensor();});

static at::Tensor undefined;
+ check_rnn_cell_forward_weights<1>(w_ih, w_hh, w_hh.size(1));
check_rnn_cell_forward_input(input, w_ih.size(1));
check_rnn_cell_forward_hidden(input, hx, w_hh.size(1), 0);
return SimpleCell<tanh_f, CellParams>{}(input, hx, CellParams{w_ih, w_hh, b_ih, b_hh, undefined});
@@ -1678,6 +1690,7 @@ Tensor rnn_relu_cell(
const Tensor& b_hh = c10::value_or_else(b_hh_opt, [] {return Tensor();});

static at::Tensor undefined;
+ check_rnn_cell_forward_weights<1>(w_ih, w_hh, w_hh.size(1));
check_rnn_cell_forward_input(input, w_ih.size(1));
check_rnn_cell_forward_hidden(input, hx, w_hh.size(1), 0);
return SimpleCell<relu_f, CellParams>{}(input, hx, CellParams{w_ih, w_hh, b_ih, b_hh, undefined});
diff --git a/test/test_nn.py b/test/test_nn.py
index 08200e9d..ea69f97d 100644
--- a/test/test_nn.py
+++ b/test/test_nn.py
@@ -7335,6 +7335,39 @@ tensor(..., device='meta', size=(1,), requires_grad=True)""")
with self.assertRaises(RuntimeError):
res = arg_class(*arg_4)

+ def test_rnn_cell_gate_weights_size(self):
+ def test_rnn_cell(cell_fn, gate_count):
+ input_size = 8
+ hidden_size = 16
+ x = torch.randn(4, input_size)
+ hx = torch.randn(4, hidden_size)
+ cx = torch.randn(4, hidden_size)
+
+ w_ih_invalid = torch.randn((gate_count * hidden_size) + 1, 8)
+ w_ih = torch.randn(gate_count * hidden_size, 8)
+ w_hh_invalid = torch.randn((gate_count * hidden_size) + 1, 16)
+ w_hh = torch.randn(gate_count * hidden_size, 16)
+ b_ih = torch.randn(gate_count * hidden_size)
+ b_hh = torch.randn(gate_count * hidden_size)
+
+ if cell_fn is torch.lstm_cell:
+ state = (hx, cx)
+ else:
+ state = hx
+
+ with self.assertRaisesRegex(RuntimeError, "weight_ih"):
+ cell_fn(x, state, w_ih_invalid, w_hh, b_ih, b_hh)
+
+ with self.assertRaisesRegex(RuntimeError, "weight_hh"):
+ cell_fn(x, state, w_ih, w_hh_invalid, b_ih, b_hh)
+ for cell_fn, gate_count in [
+ (torch.lstm_cell, 4),
+ (torch.gru_cell, 3),
+ (torch.rnn_relu_cell, 1),
+ (torch.rnn_tanh_cell, 1),
+ ]:
+ test_rnn_cell(cell_fn, gate_count)
+
class TestFusionEval(TestCase):
@set_default_dtype(torch.double)
@given(X=hu.tensor(shapes=((5, 3, 5, 5),), dtype=np.double),
--
2.45.4

6 changes: 5 additions & 1 deletion SPECS/pytorch/pytorch.spec
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
Summary: Tensors and Dynamic neural networks in Python with strong GPU acceleration.
Name: pytorch
Version: 2.2.2
Release: 9%{?dist}
Release: 10%{?dist}
License: BSD-3-Clause
Vendor: Microsoft Corporation
Distribution: Azure Linux
Expand Down Expand Up @@ -35,6 +35,7 @@ Patch10: CVE-2025-2953.patch
Patch11: CVE-2025-55552.patch
Patch12: CVE-2025-55560.patch
Patch13: CVE-2025-46152.patch
Patch14: CVE-2025-3001.patch

%description
PyTorch is a Python package that provides two high-level features:
Expand Down Expand Up @@ -96,6 +97,9 @@ cp -arf docs %{buildroot}/%{_pkgdocdir}
%{_docdir}/*

%changelog
* Thu Dec 25 2025 Azure Linux Security Servicing Account <azurelinux-security@microsoft.com> - 2.2.2-10
- Patch for CVE-2025-3001

* Thu Dec 04 2025 Azure Linux Security Servicing Account <azurelinux-security@microsoft.com> - 2.2.2-9
- Patch for CVE-2025-55560 & CVE-2025-46152

Expand Down
Loading