Skip to content

Commit 8b5c04f

Browse files
committed
idpf: convert control queue mutex to a spinlock
jira VULN-136700 cve CVE-2025-38392 commit-author Ahmed Zaki <ahmed.zaki@intel.com> commit b2beb5b With VIRTCHNL2_CAP_MACFILTER enabled, the following warning is generated on module load: [ 324.701677] BUG: sleeping function called from invalid context at kernel/locking/mutex.c:578 [ 324.701684] in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 1582, name: NetworkManager [ 324.701689] preempt_count: 201, expected: 0 [ 324.701693] RCU nest depth: 0, expected: 0 [ 324.701697] 2 locks held by NetworkManager/1582: [ 324.701702] #0: ffffffff9f7be770 (rtnl_mutex){....}-{3:3}, at: rtnl_newlink+0x791/0x21e0 [ 324.701730] #1: ff1100216c380368 (_xmit_ETHER){....}-{2:2}, at: __dev_open+0x3f0/0x870 [ 324.701749] Preemption disabled at: [ 324.701752] [<ffffffff9cd23b9d>] __dev_open+0x3dd/0x870 [ 324.701765] CPU: 30 UID: 0 PID: 1582 Comm: NetworkManager Not tainted 6.15.0-rc5+ #2 PREEMPT(voluntary) [ 324.701771] Hardware name: Intel Corporation M50FCP2SBSTD/M50FCP2SBSTD, BIOS SE5C741.86B.01.01.0001.2211140926 11/14/2022 [ 324.701774] Call Trace: [ 324.701777] <TASK> [ 324.701779] dump_stack_lvl+0x5d/0x80 [ 324.701788] ? __dev_open+0x3dd/0x870 [ 324.701793] __might_resched.cold+0x1ef/0x23d <..> [ 324.701818] __mutex_lock+0x113/0x1b80 <..> [ 324.701917] idpf_ctlq_clean_sq+0xad/0x4b0 [idpf] [ 324.701935] ? kasan_save_track+0x14/0x30 [ 324.701941] idpf_mb_clean+0x143/0x380 [idpf] <..> [ 324.701991] idpf_send_mb_msg+0x111/0x720 [idpf] [ 324.702009] idpf_vc_xn_exec+0x4cc/0x990 [idpf] [ 324.702021] ? rcu_is_watching+0x12/0xc0 [ 324.702035] idpf_add_del_mac_filters+0x3ed/0xb50 [idpf] <..> [ 324.702122] __hw_addr_sync_dev+0x1cf/0x300 [ 324.702126] ? find_held_lock+0x32/0x90 [ 324.702134] idpf_set_rx_mode+0x317/0x390 [idpf] [ 324.702152] __dev_open+0x3f8/0x870 [ 324.702159] ? __pfx___dev_open+0x10/0x10 [ 324.702174] __dev_change_flags+0x443/0x650 <..> [ 324.702208] netif_change_flags+0x80/0x160 [ 324.702218] do_setlink.isra.0+0x16a0/0x3960 <..> [ 324.702349] rtnl_newlink+0x12fd/0x21e0 The sequence is as follows: rtnl_newlink()-> __dev_change_flags()-> __dev_open()-> dev_set_rx_mode() - > # disables BH and grabs "dev->addr_list_lock" idpf_set_rx_mode() -> # proceed only if VIRTCHNL2_CAP_MACFILTER is ON __dev_uc_sync() -> idpf_add_mac_filter -> idpf_add_del_mac_filters -> idpf_send_mb_msg() -> idpf_mb_clean() -> idpf_ctlq_clean_sq() # mutex_lock(cq_lock) Fix by converting cq_lock to a spinlock. All operations under the new lock are safe except freeing the DMA memory, which may use vunmap(). Fix by requesting a contiguous physical memory for the DMA mapping. Fixes: a251eee ("idpf: add SRIOV support and other ndo_ops") Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com> Signed-off-by: Ahmed Zaki <ahmed.zaki@intel.com> Reviewed-by: Simon Horman <horms@kernel.org> Tested-by: Samuel Salin <Samuel.salin@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com> (cherry picked from commit b2beb5b) Signed-off-by: Roxana Nicolescu <rnicolescu@ciq.com>
1 parent a22d387 commit 8b5c04f

File tree

3 files changed

+20
-17
lines changed

3 files changed

+20
-17
lines changed

drivers/net/ethernet/intel/idpf/idpf_controlq.c

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -96,16 +96,15 @@ static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
9696
*/
9797
static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
9898
{
99-
mutex_lock(&cq->cq_lock);
99+
spin_lock(&cq->cq_lock);
100100

101101
/* free ring buffers and the ring itself */
102102
idpf_ctlq_dealloc_ring_res(hw, cq);
103103

104104
/* Set ring_size to 0 to indicate uninitialized queue */
105105
cq->ring_size = 0;
106106

107-
mutex_unlock(&cq->cq_lock);
108-
mutex_destroy(&cq->cq_lock);
107+
spin_unlock(&cq->cq_lock);
109108
}
110109

111110
/**
@@ -173,7 +172,7 @@ int idpf_ctlq_add(struct idpf_hw *hw,
173172

174173
idpf_ctlq_init_regs(hw, cq, is_rxq);
175174

176-
mutex_init(&cq->cq_lock);
175+
spin_lock_init(&cq->cq_lock);
177176

178177
list_add(&cq->cq_list, &hw->cq_list_head);
179178

@@ -272,7 +271,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
272271
int err = 0;
273272
int i;
274273

275-
mutex_lock(&cq->cq_lock);
274+
spin_lock(&cq->cq_lock);
276275

277276
/* Ensure there are enough descriptors to send all messages */
278277
num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
@@ -332,7 +331,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
332331
wr32(hw, cq->reg.tail, cq->next_to_use);
333332

334333
err_unlock:
335-
mutex_unlock(&cq->cq_lock);
334+
spin_unlock(&cq->cq_lock);
336335

337336
return err;
338337
}
@@ -364,7 +363,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
364363
if (*clean_count > cq->ring_size)
365364
return -EBADR;
366365

367-
mutex_lock(&cq->cq_lock);
366+
spin_lock(&cq->cq_lock);
368367

369368
ntc = cq->next_to_clean;
370369

@@ -394,7 +393,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
394393

395394
cq->next_to_clean = ntc;
396395

397-
mutex_unlock(&cq->cq_lock);
396+
spin_unlock(&cq->cq_lock);
398397

399398
/* Return number of descriptors actually cleaned */
400399
*clean_count = i;
@@ -432,7 +431,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
432431
if (*buff_count > 0)
433432
buffs_avail = true;
434433

435-
mutex_lock(&cq->cq_lock);
434+
spin_lock(&cq->cq_lock);
436435

437436
if (tbp >= cq->ring_size)
438437
tbp = 0;
@@ -519,7 +518,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
519518
wr32(hw, cq->reg.tail, cq->next_to_post);
520519
}
521520

522-
mutex_unlock(&cq->cq_lock);
521+
spin_unlock(&cq->cq_lock);
523522

524523
/* return the number of buffers that were not posted */
525524
*buff_count = *buff_count - i;
@@ -552,7 +551,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
552551
return -EBADR;
553552

554553
/* take the lock before we start messing with the ring */
555-
mutex_lock(&cq->cq_lock);
554+
spin_lock(&cq->cq_lock);
556555

557556
ntc = cq->next_to_clean;
558557

@@ -611,7 +610,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
611610

612611
cq->next_to_clean = ntc;
613612

614-
mutex_unlock(&cq->cq_lock);
613+
spin_unlock(&cq->cq_lock);
615614

616615
*num_q_msg = i;
617616
if (*num_q_msg == 0)

drivers/net/ethernet/intel/idpf/idpf_controlq_api.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ struct idpf_ctlq_info {
9494

9595
enum idpf_ctlq_type cq_type;
9696
int q_id;
97-
struct mutex cq_lock; /* control queue lock */
97+
spinlock_t cq_lock; /* control queue lock */
9898
/* used for interrupt processing */
9999
u16 next_to_use;
100100
u16 next_to_clean;

drivers/net/ethernet/intel/idpf/idpf_lib.c

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2329,8 +2329,12 @@ void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, u64 size)
23292329
struct idpf_adapter *adapter = hw->back;
23302330
size_t sz = ALIGN(size, 4096);
23312331

2332-
mem->va = dma_alloc_coherent(&adapter->pdev->dev, sz,
2333-
&mem->pa, GFP_KERNEL);
2332+
/* The control queue resources are freed under a spinlock, contiguous
2333+
* pages will avoid IOMMU remapping and the use vmap (and vunmap in
2334+
* dma_free_*() path.
2335+
*/
2336+
mem->va = dma_alloc_attrs(&adapter->pdev->dev, sz, &mem->pa,
2337+
GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS);
23342338
mem->size = sz;
23352339

23362340
return mem->va;
@@ -2345,8 +2349,8 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem)
23452349
{
23462350
struct idpf_adapter *adapter = hw->back;
23472351

2348-
dma_free_coherent(&adapter->pdev->dev, mem->size,
2349-
mem->va, mem->pa);
2352+
dma_free_attrs(&adapter->pdev->dev, mem->size,
2353+
mem->va, mem->pa, DMA_ATTR_FORCE_CONTIGUOUS);
23502354
mem->size = 0;
23512355
mem->va = NULL;
23522356
mem->pa = 0;

0 commit comments

Comments
 (0)