Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 11 additions & 12 deletions drivers/net/ethernet/intel/idpf/idpf_controlq.c
Original file line number Diff line number Diff line change
Expand Up @@ -96,16 +96,15 @@ static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
*/
static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
{
mutex_lock(&cq->cq_lock);
spin_lock(&cq->cq_lock);

/* free ring buffers and the ring itself */
idpf_ctlq_dealloc_ring_res(hw, cq);

/* Set ring_size to 0 to indicate uninitialized queue */
cq->ring_size = 0;

mutex_unlock(&cq->cq_lock);
mutex_destroy(&cq->cq_lock);
spin_unlock(&cq->cq_lock);
}

/**
Expand Down Expand Up @@ -173,7 +172,7 @@ int idpf_ctlq_add(struct idpf_hw *hw,

idpf_ctlq_init_regs(hw, cq, is_rxq);

mutex_init(&cq->cq_lock);
spin_lock_init(&cq->cq_lock);

list_add(&cq->cq_list, &hw->cq_list_head);

Expand Down Expand Up @@ -272,7 +271,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
int err = 0;
int i;

mutex_lock(&cq->cq_lock);
spin_lock(&cq->cq_lock);

/* Ensure there are enough descriptors to send all messages */
num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
Expand Down Expand Up @@ -332,7 +331,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
wr32(hw, cq->reg.tail, cq->next_to_use);

err_unlock:
mutex_unlock(&cq->cq_lock);
spin_unlock(&cq->cq_lock);

return err;
}
Expand Down Expand Up @@ -364,7 +363,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
if (*clean_count > cq->ring_size)
return -EBADR;

mutex_lock(&cq->cq_lock);
spin_lock(&cq->cq_lock);

ntc = cq->next_to_clean;

Expand Down Expand Up @@ -394,7 +393,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,

cq->next_to_clean = ntc;

mutex_unlock(&cq->cq_lock);
spin_unlock(&cq->cq_lock);

/* Return number of descriptors actually cleaned */
*clean_count = i;
Expand Down Expand Up @@ -432,7 +431,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
if (*buff_count > 0)
buffs_avail = true;

mutex_lock(&cq->cq_lock);
spin_lock(&cq->cq_lock);

if (tbp >= cq->ring_size)
tbp = 0;
Expand Down Expand Up @@ -519,7 +518,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
wr32(hw, cq->reg.tail, cq->next_to_post);
}

mutex_unlock(&cq->cq_lock);
spin_unlock(&cq->cq_lock);

/* return the number of buffers that were not posted */
*buff_count = *buff_count - i;
Expand Down Expand Up @@ -552,7 +551,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
return -EBADR;

/* take the lock before we start messing with the ring */
mutex_lock(&cq->cq_lock);
spin_lock(&cq->cq_lock);

ntc = cq->next_to_clean;

Expand Down Expand Up @@ -611,7 +610,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,

cq->next_to_clean = ntc;

mutex_unlock(&cq->cq_lock);
spin_unlock(&cq->cq_lock);

*num_q_msg = i;
if (*num_q_msg == 0)
Expand Down
2 changes: 1 addition & 1 deletion drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ struct idpf_ctlq_info {

enum idpf_ctlq_type cq_type;
int q_id;
struct mutex cq_lock; /* control queue lock */
spinlock_t cq_lock; /* control queue lock */
/* used for interrupt processing */
u16 next_to_use;
u16 next_to_clean;
Expand Down
12 changes: 8 additions & 4 deletions drivers/net/ethernet/intel/idpf/idpf_lib.c
Original file line number Diff line number Diff line change
Expand Up @@ -2329,8 +2329,12 @@ void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, u64 size)
struct idpf_adapter *adapter = hw->back;
size_t sz = ALIGN(size, 4096);

mem->va = dma_alloc_coherent(&adapter->pdev->dev, sz,
&mem->pa, GFP_KERNEL);
/* The control queue resources are freed under a spinlock, contiguous
* pages will avoid IOMMU remapping and the use vmap (and vunmap in
* dma_free_*() path.
*/
mem->va = dma_alloc_attrs(&adapter->pdev->dev, sz, &mem->pa,
GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS);
mem->size = sz;

return mem->va;
Expand All @@ -2345,8 +2349,8 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem)
{
struct idpf_adapter *adapter = hw->back;

dma_free_coherent(&adapter->pdev->dev, mem->size,
mem->va, mem->pa);
dma_free_attrs(&adapter->pdev->dev, mem->size,
mem->va, mem->pa, DMA_ATTR_FORCE_CONTIGUOUS);
mem->size = 0;
mem->va = NULL;
mem->pa = 0;
Expand Down