Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
61 changes: 35 additions & 26 deletions drivers/firmware/arm_scpi.c
Original file line number Diff line number Diff line change
Expand Up @@ -815,7 +815,7 @@ static int scpi_init_versions(struct scpi_drvinfo *info)
info->firmware_version = le32_to_cpu(caps.platform_version);
}
/* Ignore error if not implemented */
if (scpi_info->is_legacy && ret == -EOPNOTSUPP)
if (info->is_legacy && ret == -EOPNOTSUPP)
return 0;

return ret;
Expand Down Expand Up @@ -913,33 +913,34 @@ static int scpi_probe(struct platform_device *pdev)
struct resource res;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct scpi_drvinfo *scpi_drvinfo;

scpi_info = devm_kzalloc(dev, sizeof(*scpi_info), GFP_KERNEL);
if (!scpi_info)
scpi_drvinfo = devm_kzalloc(dev, sizeof(*scpi_drvinfo), GFP_KERNEL);
if (!scpi_drvinfo)
return -ENOMEM;

if (of_match_device(legacy_scpi_of_match, &pdev->dev))
scpi_info->is_legacy = true;
scpi_drvinfo->is_legacy = true;

count = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
if (count < 0) {
dev_err(dev, "no mboxes property in '%pOF'\n", np);
return -ENODEV;
}

scpi_info->channels = devm_kcalloc(dev, count, sizeof(struct scpi_chan),
GFP_KERNEL);
if (!scpi_info->channels)
scpi_drvinfo->channels =
devm_kcalloc(dev, count, sizeof(struct scpi_chan), GFP_KERNEL);
if (!scpi_drvinfo->channels)
return -ENOMEM;

ret = devm_add_action(dev, scpi_free_channels, scpi_info);
ret = devm_add_action(dev, scpi_free_channels, scpi_drvinfo);
if (ret)
return ret;

for (; scpi_info->num_chans < count; scpi_info->num_chans++) {
for (; scpi_drvinfo->num_chans < count; scpi_drvinfo->num_chans++) {
resource_size_t size;
int idx = scpi_info->num_chans;
struct scpi_chan *pchan = scpi_info->channels + idx;
int idx = scpi_drvinfo->num_chans;
struct scpi_chan *pchan = scpi_drvinfo->channels + idx;
struct mbox_client *cl = &pchan->cl;
struct device_node *shmem = of_parse_phandle(np, "shmem", idx);

Expand Down Expand Up @@ -986,45 +987,53 @@ static int scpi_probe(struct platform_device *pdev)
return ret;
}

scpi_info->commands = scpi_std_commands;
scpi_drvinfo->commands = scpi_std_commands;

platform_set_drvdata(pdev, scpi_info);
platform_set_drvdata(pdev, scpi_drvinfo);

if (scpi_info->is_legacy) {
if (scpi_drvinfo->is_legacy) {
/* Replace with legacy variants */
scpi_ops.clk_set_val = legacy_scpi_clk_set_val;
scpi_info->commands = scpi_legacy_commands;
scpi_drvinfo->commands = scpi_legacy_commands;

/* Fill priority bitmap */
for (idx = 0; idx < ARRAY_SIZE(legacy_hpriority_cmds); idx++)
set_bit(legacy_hpriority_cmds[idx],
scpi_info->cmd_priority);
scpi_drvinfo->cmd_priority);
}

ret = scpi_init_versions(scpi_info);
scpi_info = scpi_drvinfo;

ret = scpi_init_versions(scpi_drvinfo);
if (ret) {
dev_err(dev, "incorrect or no SCP firmware found\n");
scpi_info = NULL;
return ret;
}

if (scpi_info->is_legacy && !scpi_info->protocol_version &&
!scpi_info->firmware_version)
if (scpi_drvinfo->is_legacy && !scpi_drvinfo->protocol_version &&
!scpi_drvinfo->firmware_version)
dev_info(dev, "SCP Protocol legacy pre-1.0 firmware\n");
else
dev_info(dev, "SCP Protocol %lu.%lu Firmware %lu.%lu.%lu version\n",
FIELD_GET(PROTO_REV_MAJOR_MASK,
scpi_info->protocol_version),
scpi_drvinfo->protocol_version),
FIELD_GET(PROTO_REV_MINOR_MASK,
scpi_info->protocol_version),
scpi_drvinfo->protocol_version),
FIELD_GET(FW_REV_MAJOR_MASK,
scpi_info->firmware_version),
scpi_drvinfo->firmware_version),
FIELD_GET(FW_REV_MINOR_MASK,
scpi_info->firmware_version),
scpi_drvinfo->firmware_version),
FIELD_GET(FW_REV_PATCH_MASK,
scpi_info->firmware_version));
scpi_info->scpi_ops = &scpi_ops;
scpi_drvinfo->firmware_version));

scpi_drvinfo->scpi_ops = &scpi_ops;

return devm_of_platform_populate(dev);
ret = devm_of_platform_populate(dev);
if (ret)
scpi_info = NULL;

return ret;
}

static const struct of_device_id scpi_of_match[] = {
Expand Down
7 changes: 4 additions & 3 deletions drivers/net/ethernet/intel/i40e/i40e_common.c
Original file line number Diff line number Diff line change
Expand Up @@ -1070,10 +1070,11 @@ int i40e_pf_reset(struct i40e_hw *hw)
void i40e_clear_hw(struct i40e_hw *hw)
{
u32 num_queues, base_queue;
u32 num_pf_int;
u32 num_vf_int;
s32 num_pf_int;
s32 num_vf_int;
u32 num_vfs;
u32 i, j;
s32 i;
u32 j;
u32 val;
u32 eol = 0x7ff;

Expand Down
23 changes: 11 additions & 12 deletions drivers/net/ethernet/intel/idpf/idpf_controlq.c
Original file line number Diff line number Diff line change
Expand Up @@ -96,16 +96,15 @@ static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
*/
static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
{
mutex_lock(&cq->cq_lock);
spin_lock(&cq->cq_lock);

/* free ring buffers and the ring itself */
idpf_ctlq_dealloc_ring_res(hw, cq);

/* Set ring_size to 0 to indicate uninitialized queue */
cq->ring_size = 0;

mutex_unlock(&cq->cq_lock);
mutex_destroy(&cq->cq_lock);
spin_unlock(&cq->cq_lock);
}

/**
Expand Down Expand Up @@ -173,7 +172,7 @@ int idpf_ctlq_add(struct idpf_hw *hw,

idpf_ctlq_init_regs(hw, cq, is_rxq);

mutex_init(&cq->cq_lock);
spin_lock_init(&cq->cq_lock);

list_add(&cq->cq_list, &hw->cq_list_head);

Expand Down Expand Up @@ -272,7 +271,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
int err = 0;
int i;

mutex_lock(&cq->cq_lock);
spin_lock(&cq->cq_lock);

/* Ensure there are enough descriptors to send all messages */
num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
Expand Down Expand Up @@ -332,7 +331,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
wr32(hw, cq->reg.tail, cq->next_to_use);

err_unlock:
mutex_unlock(&cq->cq_lock);
spin_unlock(&cq->cq_lock);

return err;
}
Expand Down Expand Up @@ -364,7 +363,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
if (*clean_count > cq->ring_size)
return -EBADR;

mutex_lock(&cq->cq_lock);
spin_lock(&cq->cq_lock);

ntc = cq->next_to_clean;

Expand Down Expand Up @@ -394,7 +393,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,

cq->next_to_clean = ntc;

mutex_unlock(&cq->cq_lock);
spin_unlock(&cq->cq_lock);

/* Return number of descriptors actually cleaned */
*clean_count = i;
Expand Down Expand Up @@ -432,7 +431,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
if (*buff_count > 0)
buffs_avail = true;

mutex_lock(&cq->cq_lock);
spin_lock(&cq->cq_lock);

if (tbp >= cq->ring_size)
tbp = 0;
Expand Down Expand Up @@ -519,7 +518,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
wr32(hw, cq->reg.tail, cq->next_to_post);
}

mutex_unlock(&cq->cq_lock);
spin_unlock(&cq->cq_lock);

/* return the number of buffers that were not posted */
*buff_count = *buff_count - i;
Expand Down Expand Up @@ -552,7 +551,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
return -EBADR;

/* take the lock before we start messing with the ring */
mutex_lock(&cq->cq_lock);
spin_lock(&cq->cq_lock);

ntc = cq->next_to_clean;

Expand Down Expand Up @@ -611,7 +610,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,

cq->next_to_clean = ntc;

mutex_unlock(&cq->cq_lock);
spin_unlock(&cq->cq_lock);

*num_q_msg = i;
if (*num_q_msg == 0)
Expand Down
2 changes: 1 addition & 1 deletion drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ struct idpf_ctlq_info {

enum idpf_ctlq_type cq_type;
int q_id;
struct mutex cq_lock; /* control queue lock */
spinlock_t cq_lock; /* control queue lock */
/* used for interrupt processing */
u16 next_to_use;
u16 next_to_clean;
Expand Down
12 changes: 8 additions & 4 deletions drivers/net/ethernet/intel/idpf/idpf_lib.c
Original file line number Diff line number Diff line change
Expand Up @@ -2329,8 +2329,12 @@ void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, u64 size)
struct idpf_adapter *adapter = hw->back;
size_t sz = ALIGN(size, 4096);

mem->va = dma_alloc_coherent(&adapter->pdev->dev, sz,
&mem->pa, GFP_KERNEL);
/* The control queue resources are freed under a spinlock, contiguous
* pages will avoid IOMMU remapping and the use vmap (and vunmap in
* dma_free_*() path.
*/
mem->va = dma_alloc_attrs(&adapter->pdev->dev, sz, &mem->pa,
GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS);
mem->size = sz;

return mem->va;
Expand All @@ -2345,8 +2349,8 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem)
{
struct idpf_adapter *adapter = hw->back;

dma_free_coherent(&adapter->pdev->dev, mem->size,
mem->va, mem->pa);
dma_free_attrs(&adapter->pdev->dev, mem->size,
mem->va, mem->pa, DMA_ATTR_FORCE_CONTIGUOUS);
mem->size = 0;
mem->va = NULL;
mem->pa = 0;
Expand Down
7 changes: 7 additions & 0 deletions drivers/net/usb/smsc75xx.c
Original file line number Diff line number Diff line change
Expand Up @@ -2199,6 +2199,13 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
size = (rx_cmd_a & RX_CMD_A_LEN) - RXW_PADDING;
align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;

if (unlikely(size > skb->len)) {
netif_dbg(dev, rx_err, dev->net,
"size err rx_cmd_a=0x%08x\n",
rx_cmd_a);
return 0;
}

if (unlikely(rx_cmd_a & RX_CMD_A_RED)) {
netif_dbg(dev, rx_err, dev->net,
"Error rx_cmd_a=0x%08x\n", rx_cmd_a);
Expand Down
15 changes: 13 additions & 2 deletions include/net/netfilter/nf_conntrack.h
Original file line number Diff line number Diff line change
Expand Up @@ -300,8 +300,19 @@ static inline bool nf_ct_is_expired(const struct nf_conn *ct)
/* use after obtaining a reference count */
static inline bool nf_ct_should_gc(const struct nf_conn *ct)
{
return nf_ct_is_expired(ct) && nf_ct_is_confirmed(ct) &&
!nf_ct_is_dying(ct);
if (!nf_ct_is_confirmed(ct))
return false;

/* load ct->timeout after is_confirmed() test.
* Pairs with __nf_conntrack_confirm() which:
* 1. Increases ct->timeout value
* 2. Inserts ct into rcu hlist
* 3. Sets the confirmed bit
* 4. Unlocks the hlist lock
*/
smp_acquire__after_ctrl_dep();

return nf_ct_is_expired(ct) && !nf_ct_is_dying(ct);
}

#define NF_CT_DAY (86400 * HZ)
Expand Down
26 changes: 20 additions & 6 deletions net/netfilter/nf_conntrack_core.c
Original file line number Diff line number Diff line change
Expand Up @@ -1087,6 +1087,12 @@ static int nf_ct_resolve_clash_harder(struct sk_buff *skb, u32 repl_idx)

hlist_nulls_add_head_rcu(&loser_ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
&nf_conntrack_hash[repl_idx]);
/* confirmed bit must be set after hlist add, not before:
* loser_ct can still be visible to other cpu due to
* SLAB_TYPESAFE_BY_RCU.
*/
smp_mb__before_atomic();
set_bit(IPS_CONFIRMED_BIT, &loser_ct->status);

NF_CT_STAT_INC(net, clash_resolve);
return NF_ACCEPT;
Expand Down Expand Up @@ -1224,8 +1230,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
* user context, else we insert an already 'dead' hash, blocking
* further use of that particular connection -JM.
*/
ct->status |= IPS_CONFIRMED;

if (unlikely(nf_ct_is_dying(ct))) {
NF_CT_STAT_INC(net, insert_failed);
goto dying;
Expand Down Expand Up @@ -1257,19 +1261,29 @@ __nf_conntrack_confirm(struct sk_buff *skb)
}
}

/* Timer relative to confirmation time, not original
/* Timeout is relative to confirmation time, not original
setting time, otherwise we'd get timer wrap in
weird delay cases. */
ct->timeout += nfct_time_stamp;

__nf_conntrack_insert_prepare(ct);

/* Since the lookup is lockless, hash insertion must be done after
* starting the timer and setting the CONFIRMED bit. The RCU barriers
* guarantee that no other CPU can find the conntrack before the above
* stores are visible.
* setting ct->timeout. The RCU barriers guarantee that no other CPU
* can find the conntrack before the above stores are visible.
*/
__nf_conntrack_hash_insert(ct, hash, reply_hash);

/* IPS_CONFIRMED unset means 'ct not (yet) in hash', conntrack lookups
* skip entries that lack this bit. This happens when a CPU is looking
* at a stale entry that is being recycled due to SLAB_TYPESAFE_BY_RCU
* or when another CPU encounters this entry right after the insertion
* but before the set-confirm-bit below. This bit must not be set until
* after __nf_conntrack_hash_insert().
*/
smp_mb__before_atomic();
set_bit(IPS_CONFIRMED_BIT, &ct->status);

nf_conntrack_double_unlock(hash, reply_hash);
local_bh_enable();

Expand Down
14 changes: 10 additions & 4 deletions net/sched/sch_hfsc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1649,10 +1649,16 @@ hfsc_dequeue(struct Qdisc *sch)
if (cl->qdisc->q.qlen != 0) {
/* update ed */
next_len = qdisc_peek_len(cl->qdisc);
if (realtime)
update_ed(cl, next_len);
else
update_d(cl, next_len);
/* Check queue length again since some qdisc implementations
* (e.g., netem/codel) might empty the queue during the peek
* operation.
*/
if (cl->qdisc->q.qlen != 0) {
if (realtime)
update_ed(cl, next_len);
else
update_d(cl, next_len);
}
} else {
/* the class becomes passive */
eltree_remove(cl);
Expand Down