Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
61 changes: 35 additions & 26 deletions drivers/firmware/arm_scpi.c
Original file line number Diff line number Diff line change
Expand Up @@ -815,7 +815,7 @@ static int scpi_init_versions(struct scpi_drvinfo *info)
info->firmware_version = le32_to_cpu(caps.platform_version);
}
/* Ignore error if not implemented */
if (scpi_info->is_legacy && ret == -EOPNOTSUPP)
if (info->is_legacy && ret == -EOPNOTSUPP)
return 0;

return ret;
Expand Down Expand Up @@ -913,33 +913,34 @@ static int scpi_probe(struct platform_device *pdev)
struct resource res;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct scpi_drvinfo *scpi_drvinfo;

scpi_info = devm_kzalloc(dev, sizeof(*scpi_info), GFP_KERNEL);
if (!scpi_info)
scpi_drvinfo = devm_kzalloc(dev, sizeof(*scpi_drvinfo), GFP_KERNEL);
if (!scpi_drvinfo)
return -ENOMEM;

if (of_match_device(legacy_scpi_of_match, &pdev->dev))
scpi_info->is_legacy = true;
scpi_drvinfo->is_legacy = true;

count = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
if (count < 0) {
dev_err(dev, "no mboxes property in '%pOF'\n", np);
return -ENODEV;
}

scpi_info->channels = devm_kcalloc(dev, count, sizeof(struct scpi_chan),
GFP_KERNEL);
if (!scpi_info->channels)
scpi_drvinfo->channels =
devm_kcalloc(dev, count, sizeof(struct scpi_chan), GFP_KERNEL);
if (!scpi_drvinfo->channels)
return -ENOMEM;

ret = devm_add_action(dev, scpi_free_channels, scpi_info);
ret = devm_add_action(dev, scpi_free_channels, scpi_drvinfo);
if (ret)
return ret;

for (; scpi_info->num_chans < count; scpi_info->num_chans++) {
for (; scpi_drvinfo->num_chans < count; scpi_drvinfo->num_chans++) {
resource_size_t size;
int idx = scpi_info->num_chans;
struct scpi_chan *pchan = scpi_info->channels + idx;
int idx = scpi_drvinfo->num_chans;
struct scpi_chan *pchan = scpi_drvinfo->channels + idx;
struct mbox_client *cl = &pchan->cl;
struct device_node *shmem = of_parse_phandle(np, "shmem", idx);

Expand Down Expand Up @@ -986,45 +987,53 @@ static int scpi_probe(struct platform_device *pdev)
return ret;
}

scpi_info->commands = scpi_std_commands;
scpi_drvinfo->commands = scpi_std_commands;

platform_set_drvdata(pdev, scpi_info);
platform_set_drvdata(pdev, scpi_drvinfo);

if (scpi_info->is_legacy) {
if (scpi_drvinfo->is_legacy) {
/* Replace with legacy variants */
scpi_ops.clk_set_val = legacy_scpi_clk_set_val;
scpi_info->commands = scpi_legacy_commands;
scpi_drvinfo->commands = scpi_legacy_commands;

/* Fill priority bitmap */
for (idx = 0; idx < ARRAY_SIZE(legacy_hpriority_cmds); idx++)
set_bit(legacy_hpriority_cmds[idx],
scpi_info->cmd_priority);
scpi_drvinfo->cmd_priority);
}

ret = scpi_init_versions(scpi_info);
scpi_info = scpi_drvinfo;

ret = scpi_init_versions(scpi_drvinfo);
if (ret) {
dev_err(dev, "incorrect or no SCP firmware found\n");
scpi_info = NULL;
return ret;
}

if (scpi_info->is_legacy && !scpi_info->protocol_version &&
!scpi_info->firmware_version)
if (scpi_drvinfo->is_legacy && !scpi_drvinfo->protocol_version &&
!scpi_drvinfo->firmware_version)
dev_info(dev, "SCP Protocol legacy pre-1.0 firmware\n");
else
dev_info(dev, "SCP Protocol %lu.%lu Firmware %lu.%lu.%lu version\n",
FIELD_GET(PROTO_REV_MAJOR_MASK,
scpi_info->protocol_version),
scpi_drvinfo->protocol_version),
FIELD_GET(PROTO_REV_MINOR_MASK,
scpi_info->protocol_version),
scpi_drvinfo->protocol_version),
FIELD_GET(FW_REV_MAJOR_MASK,
scpi_info->firmware_version),
scpi_drvinfo->firmware_version),
FIELD_GET(FW_REV_MINOR_MASK,
scpi_info->firmware_version),
scpi_drvinfo->firmware_version),
FIELD_GET(FW_REV_PATCH_MASK,
scpi_info->firmware_version));
scpi_info->scpi_ops = &scpi_ops;
scpi_drvinfo->firmware_version));

scpi_drvinfo->scpi_ops = &scpi_ops;

return devm_of_platform_populate(dev);
ret = devm_of_platform_populate(dev);
if (ret)
scpi_info = NULL;

return ret;
}

static const struct of_device_id scpi_of_match[] = {
Expand Down
7 changes: 4 additions & 3 deletions drivers/net/ethernet/intel/i40e/i40e_common.c
Original file line number Diff line number Diff line change
Expand Up @@ -1210,10 +1210,11 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
void i40e_clear_hw(struct i40e_hw *hw)
{
u32 num_queues, base_queue;
u32 num_pf_int;
u32 num_vf_int;
s32 num_pf_int;
s32 num_vf_int;
u32 num_vfs;
u32 i, j;
s32 i;
u32 j;
u32 val;
u32 eol = 0x7ff;

Expand Down
6 changes: 6 additions & 0 deletions drivers/usb/dwc3/gadget.c
Original file line number Diff line number Diff line change
Expand Up @@ -4300,6 +4300,12 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
if (!count)
return IRQ_NONE;

if (count > evt->length) {
dev_err_ratelimited(dwc->dev, "invalid count(%u) > evt->length(%u)\n",
count, evt->length);
return IRQ_NONE;
}

evt->count = count;
evt->flags |= DWC3_EVENT_PENDING;

Expand Down
87 changes: 86 additions & 1 deletion fs/namespace.c
Original file line number Diff line number Diff line change
Expand Up @@ -2287,6 +2287,19 @@ static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
return attach_recursive_mnt(mnt, p, mp, false);
}

static int may_change_propagation(const struct mount *m)
{
struct mnt_namespace *ns = m->mnt_ns;

// it must be mounted in some namespace
if (IS_ERR_OR_NULL(ns)) // is_mounted()
return -EINVAL;
// and the caller must be admin in userns of that namespace
if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN))
return -EPERM;
return 0;
}

/*
* Sanity check the flags to change_mnt_propagation.
*/
Expand Down Expand Up @@ -2323,6 +2336,10 @@ static int do_change_type(struct path *path, int ms_flags)
return -EINVAL;

namespace_lock();
err = may_change_propagation(mnt);
if (err)
goto out_unlock;

if (type == MS_SHARED) {
err = invent_group_ids(mnt, recurse);
if (err)
Expand Down Expand Up @@ -2695,6 +2712,71 @@ static bool check_for_nsfs_mounts(struct mount *subtree)
return ret;
}

static int do_set_group(struct path *from_path, struct path *to_path)
{
struct mount *from, *to;
int err;

from = real_mount(from_path->mnt);
to = real_mount(to_path->mnt);

namespace_lock();

err = may_change_propagation(from);
if (err)
goto out;
err = may_change_propagation(to);
if (err)
goto out;

err = -EINVAL;
/* To and From paths should be mount roots */
if (from_path->dentry != from_path->mnt->mnt_root)
goto out;
if (to_path->dentry != to_path->mnt->mnt_root)
goto out;

/* Setting sharing groups is only allowed across same superblock */
if (from->mnt.mnt_sb != to->mnt.mnt_sb)
goto out;

/* From mount root should be wider than To mount root */
if (!is_subdir(to->mnt.mnt_root, from->mnt.mnt_root))
goto out;

/* From mount should not have locked children in place of To's root */
if (has_locked_children(from, to->mnt.mnt_root))
goto out;

/* Setting sharing groups is only allowed on private mounts */
if (IS_MNT_SHARED(to) || IS_MNT_SLAVE(to))
goto out;

/* From should not be private */
if (!IS_MNT_SHARED(from) && !IS_MNT_SLAVE(from))
goto out;

if (IS_MNT_SLAVE(from)) {
struct mount *m = from->mnt_master;

list_add(&to->mnt_slave, &from->mnt_slave);
to->mnt_master = m;
}

if (IS_MNT_SHARED(from)) {
to->mnt_group_id = from->mnt_group_id;
list_add(&to->mnt_share, &from->mnt_share);
lock_mount_hash();
set_mnt_shared(to);
unlock_mount_hash();
}

err = 0;
out:
namespace_unlock();
return err;
}

static int do_move_mount(struct path *old_path, struct path *new_path)
{
struct mnt_namespace *ns;
Expand Down Expand Up @@ -3679,7 +3761,10 @@ SYSCALL_DEFINE5(move_mount,
if (ret < 0)
goto out_to;

ret = do_move_mount(&from_path, &to_path);
if (flags & MOVE_MOUNT_SET_GROUP)
ret = do_set_group(&from_path, &to_path);
else
ret = do_move_mount(&from_path, &to_path);

out_to:
path_put(&to_path);
Expand Down
15 changes: 13 additions & 2 deletions include/net/netfilter/nf_conntrack.h
Original file line number Diff line number Diff line change
Expand Up @@ -300,8 +300,19 @@ static inline bool nf_ct_is_expired(const struct nf_conn *ct)
/* use after obtaining a reference count */
static inline bool nf_ct_should_gc(const struct nf_conn *ct)
{
return nf_ct_is_expired(ct) && nf_ct_is_confirmed(ct) &&
!nf_ct_is_dying(ct);
if (!nf_ct_is_confirmed(ct))
return false;

/* load ct->timeout after is_confirmed() test.
* Pairs with __nf_conntrack_confirm() which:
* 1. Increases ct->timeout value
* 2. Inserts ct into rcu hlist
* 3. Sets the confirmed bit
* 4. Unlocks the hlist lock
*/
smp_acquire__after_ctrl_dep();

return nf_ct_is_expired(ct) && !nf_ct_is_dying(ct);
}

#define NF_CT_DAY (86400 * HZ)
Expand Down
3 changes: 2 additions & 1 deletion include/uapi/linux/mount.h
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,8 @@
#define MOVE_MOUNT_T_SYMLINKS 0x00000010 /* Follow symlinks on to path */
#define MOVE_MOUNT_T_AUTOMOUNTS 0x00000020 /* Follow automounts on to path */
#define MOVE_MOUNT_T_EMPTY_PATH 0x00000040 /* Empty to path permitted */
#define MOVE_MOUNT__MASK 0x00000077
#define MOVE_MOUNT_SET_GROUP 0x00000100 /* Set sharing group instead */
#define MOVE_MOUNT__MASK 0x00000177

/*
* fsopen() flags.
Expand Down
16 changes: 8 additions & 8 deletions net/core/skbuff.c
Original file line number Diff line number Diff line change
Expand Up @@ -5391,18 +5391,18 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
if (skb_cloned(to))
return false;

/* In general, avoid mixing slab allocated and page_pool allocated
* pages within the same SKB. However when @to is not pp_recycle and
* @from is cloned, we can transition frag pages from page_pool to
* reference counted.
*
* On the other hand, don't allow coalescing two pp_recycle SKBs if
* @from is cloned, in case the SKB is using page_pool fragment
/* In general, avoid mixing page_pool and non-page_pool allocated
* pages within the same SKB. Additionally avoid dealing with clones
* with page_pool pages, in case the SKB is using page_pool fragment
* references (PP_FLAG_PAGE_FRAG). Since we only take full page
* references for cloned SKBs at the moment that would result in
* inconsistent reference counts.
* In theory we could take full references if @from is cloned and
* !@to->pp_recycle but its tricky (due to potential race with
* the clone disappearing) and rare, so not worth dealing with.
*/
if (to->pp_recycle != (from->pp_recycle && !skb_cloned(from)))
if (to->pp_recycle != from->pp_recycle ||
(from->pp_recycle && skb_cloned(from)))
return false;

if (len <= skb_tailroom(to)) {
Expand Down
26 changes: 20 additions & 6 deletions net/netfilter/nf_conntrack_core.c
Original file line number Diff line number Diff line change
Expand Up @@ -1083,6 +1083,12 @@ static int nf_ct_resolve_clash_harder(struct sk_buff *skb, u32 repl_idx)

hlist_nulls_add_head_rcu(&loser_ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
&nf_conntrack_hash[repl_idx]);
/* confirmed bit must be set after hlist add, not before:
* loser_ct can still be visible to other cpu due to
* SLAB_TYPESAFE_BY_RCU.
*/
smp_mb__before_atomic();
set_bit(IPS_CONFIRMED_BIT, &loser_ct->status);

NF_CT_STAT_INC(net, clash_resolve);
return NF_ACCEPT;
Expand Down Expand Up @@ -1220,8 +1226,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
* user context, else we insert an already 'dead' hash, blocking
* further use of that particular connection -JM.
*/
ct->status |= IPS_CONFIRMED;

if (unlikely(nf_ct_is_dying(ct))) {
NF_CT_STAT_INC(net, insert_failed);
goto dying;
Expand Down Expand Up @@ -1253,19 +1257,29 @@ __nf_conntrack_confirm(struct sk_buff *skb)
}
}

/* Timer relative to confirmation time, not original
/* Timeout is relative to confirmation time, not original
setting time, otherwise we'd get timer wrap in
weird delay cases. */
ct->timeout += nfct_time_stamp;

__nf_conntrack_insert_prepare(ct);

/* Since the lookup is lockless, hash insertion must be done after
* starting the timer and setting the CONFIRMED bit. The RCU barriers
* guarantee that no other CPU can find the conntrack before the above
* stores are visible.
* setting ct->timeout. The RCU barriers guarantee that no other CPU
* can find the conntrack before the above stores are visible.
*/
__nf_conntrack_hash_insert(ct, hash, reply_hash);

/* IPS_CONFIRMED unset means 'ct not (yet) in hash', conntrack lookups
* skip entries that lack this bit. This happens when a CPU is looking
* at a stale entry that is being recycled due to SLAB_TYPESAFE_BY_RCU
* or when another CPU encounters this entry right after the insertion
* but before the set-confirm-bit below. This bit must not be set until
* after __nf_conntrack_hash_insert().
*/
smp_mb__before_atomic();
set_bit(IPS_CONFIRMED_BIT, &ct->status);

nf_conntrack_double_unlock(hash, reply_hash);
local_bh_enable();

Expand Down
Loading