diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 002e0cf025f5..b9aac3dd5125 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -123,6 +123,7 @@ config ARM
select RTC_LIB
select SET_FS
select SYS_SUPPORTS_APM_EMULATION
+ select HAVE_LIVEPATCH_WO_FTRACE
# Above selects are sorted alphabetically; please add new ones
# according to that. Thanks.
help
@@ -2050,3 +2051,5 @@ source "arch/arm/crypto/Kconfig"
endif
source "arch/arm/Kconfig.assembler"
+
+source "kernel/livepatch/Kconfig"
diff --git a/arch/arm/include/asm/livepatch.h b/arch/arm/include/asm/livepatch.h
new file mode 100644
index 000000000000..216078d8c2b0
--- /dev/null
+++ b/arch/arm/include/asm/livepatch.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * livepatch.h - arm-specific Kernel Live Patching Core
+ *
+ * Copyright (C) 2018 Huawei Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see .
+ */
+
+#ifndef _ASM_ARM_LIVEPATCH_H
+#define _ASM_ARM_LIVEPATCH_H
+
+#include
+
+struct klp_patch;
+struct klp_func;
+
+/* kernel livepatch instruction barrier */
+#define klp_smp_isb() isb()
+
+int arch_klp_patch_func(struct klp_func *func);
+void arch_klp_unpatch_func(struct klp_func *func);
+
+#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY
+int klp_check_calltrace(struct klp_patch *patch, int enable);
+#endif
+
+#endif /* _ASM_ARM_LIVEPATCH_H */
diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h
index 4b0df09cbe67..ac3df84b935c 100644
--- a/arch/arm/include/asm/module.h
+++ b/arch/arm/include/asm/module.h
@@ -20,7 +20,7 @@ enum {
#endif
struct mod_plt_sec {
- struct elf32_shdr *plt;
+ int plt_shndx;
int plt_count;
};
@@ -35,7 +35,8 @@ struct mod_arch_specific {
};
struct module;
-u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val);
+u32 get_module_plt(struct module *mod, Elf32_Shdr *sechdrs,
+ unsigned long loc, Elf32_Addr val);
#ifdef CONFIG_THUMB2_KERNEL
#define HAVE_ARCH_KALLSYMS_SYMBOL_VALUE
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 536b6b979f63..23b41d0f7539 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -135,6 +135,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
#define TIF_SECCOMP 7 /* seccomp syscall filtering active */
+#define TIF_PATCH_PENDING 8 /* pending live patching update */
#define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
@@ -149,6 +150,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
+#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING)
/* Checks for any syscall work in entry-common.S */
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 89e5d864e923..0cb0fec0bf97 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -55,6 +55,7 @@ obj-$(CONFIG_ARM_ARCH_TIMER) += arch_timer.o
obj-$(CONFIG_FUNCTION_TRACER) += entry-ftrace.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o insn.o patch.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o insn.o patch.o
+obj-$(CONFIG_LIVEPATCH) += livepatch.o insn.o patch.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o insn.o patch.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
# Main staffs in KPROBES are in arch/arm/probes/ .
diff --git a/arch/arm/kernel/livepatch.c b/arch/arm/kernel/livepatch.c
new file mode 100644
index 000000000000..221275714899
--- /dev/null
+++ b/arch/arm/kernel/livepatch.c
@@ -0,0 +1,557 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * livepatch.c - arm-specific Kernel Live Patching Core
+ *
+ * Copyright (C) 2018 Huawei Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see .
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#ifdef ARM_INSN_SIZE
+#error "ARM_INSN_SIZE have been redefined, please check"
+#else
+#define ARM_INSN_SIZE 4
+#endif
+
+#ifdef CONFIG_ARM_MODULE_PLTS
+#define LJMP_INSN_SIZE 3
+#define MAX_SIZE_TO_CHECK (LJMP_INSN_SIZE * ARM_INSN_SIZE)
+#define CHECK_JUMP_RANGE LJMP_INSN_SIZE
+
+#else
+#define MAX_SIZE_TO_CHECK ARM_INSN_SIZE
+#define CHECK_JUMP_RANGE 1
+#endif
+
+struct klp_func_node {
+ struct list_head node;
+ struct list_head func_stack;
+ void *old_func;
+#ifdef CONFIG_ARM_MODULE_PLTS
+ u32 old_insns[LJMP_INSN_SIZE];
+#else
+ u32 old_insn;
+#endif
+};
+
+static LIST_HEAD(klp_func_list);
+
+static struct klp_func_node *klp_find_func_node(void *old_func)
+{
+ struct klp_func_node *func_node;
+
+ list_for_each_entry(func_node, &klp_func_list, node) {
+ if (func_node->old_func == old_func)
+ return func_node;
+ }
+
+ return NULL;
+}
+
+#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY
+/*
+ * The instruction set on arm is A32.
+ * The instruction of BL is xxxx1011xxxxxxxxxxxxxxxxxxxxxxxx, and first four
+ * bits could not be 1111.
+ * The instruction of BLX(immediate) is 1111101xxxxxxxxxxxxxxxxxxxxxxxxx.
+ * The instruction of BLX(register) is xxxx00010010xxxxxxxxxxxx0011xxxx, and
+ * first four bits could not be 1111.
+ */
+static bool is_jump_insn(u32 insn)
+{
+ if (((insn & 0x0f000000) == 0x0b000000) &&
+ ((insn & 0xf0000000) != 0xf0000000))
+ return true;
+ if ((insn & 0xfe000000) == 0xfa000000)
+ return true;
+ if (((insn & 0x0ff000f0) == 0x01200030) &&
+ ((insn & 0xf0000000) != 0xf0000000))
+ return true;
+ return false;
+}
+
+struct klp_func_list {
+ struct klp_func_list *next;
+ unsigned long func_addr;
+ unsigned long func_size;
+ const char *func_name;
+ int force;
+};
+
+struct walk_stackframe_args {
+ int enable;
+ struct klp_func_list *check_funcs;
+ int ret;
+};
+
+static inline unsigned long klp_size_to_check(unsigned long func_size,
+ int force)
+{
+ unsigned long size = func_size;
+
+ if (force == KLP_STACK_OPTIMIZE && size > MAX_SIZE_TO_CHECK)
+ size = MAX_SIZE_TO_CHECK;
+ return size;
+}
+
+static inline int klp_compare_address(unsigned long pc, unsigned long func_addr,
+ const char *func_name, unsigned long check_size)
+{
+ if (pc >= func_addr && pc < func_addr + check_size) {
+ pr_err("func %s is in use!\n", func_name);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static bool check_jump_insn(unsigned long func_addr)
+{
+ unsigned long i;
+ u32 *insn = (u32*)func_addr;
+
+ for (i = 0; i < CHECK_JUMP_RANGE; i++) {
+ if (is_jump_insn(*insn)) {
+ return true;
+ }
+ insn++;
+ }
+ return false;
+}
+
+static int add_func_to_list(struct klp_func_list **funcs, struct klp_func_list **func,
+ unsigned long func_addr, unsigned long func_size, const char *func_name,
+ int force)
+{
+ if (*func == NULL) {
+ *funcs = (struct klp_func_list*)kzalloc(sizeof(**funcs), GFP_ATOMIC);
+ if (!(*funcs))
+ return -ENOMEM;
+ *func = *funcs;
+ } else {
+ (*func)->next = (struct klp_func_list*)kzalloc(sizeof(**funcs),
+ GFP_ATOMIC);
+ if (!(*func)->next)
+ return -ENOMEM;
+ *func = (*func)->next;
+ }
+ (*func)->func_addr = func_addr;
+ (*func)->func_size = func_size;
+ (*func)->func_name = func_name;
+ (*func)->force = force;
+ (*func)->next = NULL;
+ return 0;
+}
+
+static int klp_check_activeness_func(struct klp_patch *patch, int enable,
+ struct klp_func_list **check_funcs)
+{
+ int ret;
+ struct klp_object *obj;
+ struct klp_func_node *func_node;
+ struct klp_func *func;
+ unsigned long func_addr, func_size;
+ struct klp_func_list *pcheck = NULL;
+
+ for (obj = patch->objs; obj->funcs; obj++) {
+ for (func = obj->funcs; func->old_name; func++) {
+ if (enable) {
+ if (func->force == KLP_ENFORCEMENT)
+ continue;
+ /*
+ * When enable, checking the currently
+ * active functions.
+ */
+ func_node = klp_find_func_node(func->old_func);
+ if (!func_node ||
+ list_empty(&func_node->func_stack)) {
+ /*
+ * No patched on this function
+ * [ the origin one ]
+ */
+ func_addr = (unsigned long)func->old_func;
+ func_size = func->old_size;
+ } else {
+ /*
+ * Previously patched function
+ * [ the active one ]
+ */
+ struct klp_func *prev;
+
+ prev = list_first_or_null_rcu(
+ &func_node->func_stack,
+ struct klp_func, stack_node);
+ func_addr = (unsigned long)prev->new_func;
+ func_size = prev->new_size;
+ }
+ /*
+ * When preemption is disabled and the
+ * replacement area does not contain a jump
+ * instruction, the migration thread is
+ * scheduled to run stop machine only after the
+ * excution of intructions to be replaced is
+ * complete.
+ */
+ if (IS_ENABLED(CONFIG_PREEMPTION) ||
+ (func->force == KLP_NORMAL_FORCE) ||
+ check_jump_insn(func_addr)) {
+ ret = add_func_to_list(check_funcs, &pcheck,
+ func_addr, func_size,
+ func->old_name, func->force);
+ if (ret)
+ return ret;
+ }
+ } else {
+ /*
+ * When disable, check for the previously
+ * patched function and the function itself
+ * which to be unpatched.
+ */
+ func_node = klp_find_func_node(func->old_func);
+ if (!func_node)
+ return -EINVAL;
+#ifdef CONFIG_PREEMPTION
+ /*
+ * No scheduling point in the replacement
+ * instructions. Therefore, when preemption is
+ * not enabled, atomic execution is performed
+ * and these instructions will not appear on
+ * the stack.
+ */
+ if (list_is_singular(&func_node->func_stack)) {
+ func_addr = (unsigned long)func->old_func;
+ func_size = func->old_size;
+ } else {
+ struct klp_func *prev;
+
+ prev = list_first_or_null_rcu(
+ &func_node->func_stack,
+ struct klp_func, stack_node);
+ func_addr = (unsigned long)prev->new_func;
+ func_size = prev->new_size;
+ }
+ ret = add_func_to_list(check_funcs, &pcheck,
+ func_addr, func_size,
+ func->old_name, 0);
+ if (ret)
+ return ret;
+#endif
+ func_addr = (unsigned long)func->new_func;
+ func_size = func->new_size;
+ ret = add_func_to_list(check_funcs, &pcheck,
+ func_addr, func_size,
+ func->old_name, 0);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+ return 0;
+}
+
+static bool check_func_list(struct klp_func_list *funcs, int *ret, unsigned long pc)
+{
+ while (funcs != NULL) {
+ *ret = klp_compare_address(pc, funcs->func_addr, funcs->func_name,
+ klp_size_to_check(funcs->func_size, funcs->force));
+ if (*ret) {
+ return true;
+ }
+ funcs = funcs->next;
+ }
+ return false;
+}
+
+static int klp_check_jump_func(struct stackframe *frame, void *data)
+{
+ struct walk_stackframe_args *args = data;
+ struct klp_func_list *check_funcs = args->check_funcs;
+
+ return check_func_list(check_funcs, &args->ret, frame->pc);
+}
+
+static void free_list(struct klp_func_list **funcs)
+{
+ struct klp_func_list *p;
+
+ while (*funcs != NULL) {
+ p = *funcs;
+ *funcs = (*funcs)->next;
+ kfree(p);
+ }
+}
+
+int klp_check_calltrace(struct klp_patch *patch, int enable)
+{
+ struct task_struct *g, *t;
+ struct stackframe frame;
+ int ret = 0;
+ struct klp_func_list *check_funcs = NULL;
+ struct walk_stackframe_args args = {
+ .ret = 0
+ };
+
+ ret = klp_check_activeness_func(patch, enable, &check_funcs);
+ if (ret)
+ goto out;
+ args.check_funcs = check_funcs;
+
+ for_each_process_thread(g, t) {
+ if (t == current) {
+ frame.fp = (unsigned long)__builtin_frame_address(0);
+ frame.sp = current_stack_pointer;
+ frame.lr = (unsigned long)__builtin_return_address(0);
+ frame.pc = (unsigned long)klp_check_calltrace;
+ } else if (strncmp(t->comm, "migration/", 10) == 0) {
+ /*
+ * current on other CPU
+ * we call this in stop_machine, so the current
+ * of each CPUs is mirgation, just compare the
+ * task_comm here, because we can't get the
+ * cpu_curr(task_cpu(t))). This assumes that no
+ * other thread will pretend to be a stopper via
+ * task_comm.
+ */
+ continue;
+ } else {
+ frame.fp = thread_saved_fp(t);
+ frame.sp = thread_saved_sp(t);
+ frame.lr = 0; /* recovered from the stack */
+ frame.pc = thread_saved_pc(t);
+ }
+ if (check_funcs != NULL) {
+ walk_stackframe(&frame, klp_check_jump_func, &args);
+ if (args.ret) {
+ ret = args.ret;
+ pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm);
+ show_stack(t, NULL, KERN_INFO);
+ goto out;
+ }
+ }
+ }
+
+out:
+ free_list(&check_funcs);
+ return ret;
+}
+#endif
+
+static inline bool offset_in_range(unsigned long pc, unsigned long addr,
+ long range)
+{
+ long offset = addr - pc;
+
+ return (offset >= -range && offset < range);
+}
+
+long arm_insn_read(void *addr, u32 *insnp)
+{
+ long ret;
+ u32 val;
+
+ ret = copy_from_kernel_nofault(&val, addr, ARM_INSN_SIZE);
+ if (!ret)
+ *insnp = le32_to_cpu(val);
+
+ return ret;
+}
+
+int arch_klp_patch_func(struct klp_func *func)
+{
+ struct klp_func_node *func_node;
+ unsigned long pc, new_addr;
+ u32 insn;
+ long ret;
+#ifdef CONFIG_ARM_MODULE_PLTS
+ int i;
+ u32 insns[LJMP_INSN_SIZE];
+#endif
+
+ func_node = klp_find_func_node(func->old_func);
+ if (!func_node) {
+ func_node = func->func_node;
+ if (!func_node)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&func_node->func_stack);
+ func_node->old_func = func->old_func;
+#ifdef CONFIG_ARM_MODULE_PLTS
+ for (i = 0; i < LJMP_INSN_SIZE; i++) {
+ ret = arm_insn_read((u32 *)func->old_func + i,
+ &func_node->old_insns[i]);
+ if (ret)
+ break;
+ }
+#else
+ ret = arm_insn_read(func->old_func, &func_node->old_insn);
+#endif
+ if (ret) {
+ return -EPERM;
+ }
+ list_add_rcu(&func_node->node, &klp_func_list);
+ }
+
+ list_add_rcu(&func->stack_node, &func_node->func_stack);
+
+ pc = (unsigned long)func->old_func;
+ new_addr = (unsigned long)func->new_func;
+
+#ifdef CONFIG_ARM_MODULE_PLTS
+ if (!offset_in_range(pc, new_addr, SZ_32M)) {
+ /*
+ * [0] LDR PC, [PC+8]
+ * [4] nop
+ * [8] new_addr_to_jump
+ */
+ insns[0] = __opcode_to_mem_arm(0xe59ff000);
+ insns[1] = __opcode_to_mem_arm(0xe320f000);
+ insns[2] = new_addr;
+
+ for (i = 0; i < LJMP_INSN_SIZE; i++)
+ __patch_text(((u32 *)pc) + i, insns[i]);
+
+ } else {
+ insn = arm_gen_branch(pc, new_addr);
+ __patch_text((void *)pc, insn);
+ }
+#else
+ insn = arm_gen_branch(pc, new_addr);
+ __patch_text((void *)pc, insn);
+#endif
+
+ return 0;
+}
+
+void arch_klp_unpatch_func(struct klp_func *func)
+{
+ struct klp_func_node *func_node;
+ struct klp_func *next_func;
+ unsigned long pc, new_addr;
+ u32 insn;
+#ifdef CONFIG_ARM_MODULE_PLTS
+ int i;
+ u32 insns[LJMP_INSN_SIZE];
+#endif
+
+ func_node = klp_find_func_node(func->old_func);
+ pc = (unsigned long)func_node->old_func;
+ if (list_is_singular(&func_node->func_stack)) {
+#ifdef CONFIG_ARM_MODULE_PLTS
+ for (i = 0; i < LJMP_INSN_SIZE; i++) {
+ insns[i] = func_node->old_insns[i];
+ __patch_text(((u32 *)pc) + i, insns[i]);
+ }
+#else
+ insn = func_node->old_insn;
+ __patch_text((void *)pc, insn);
+#endif
+ list_del_rcu(&func->stack_node);
+ list_del_rcu(&func_node->node);
+ } else {
+ list_del_rcu(&func->stack_node);
+ next_func = list_first_or_null_rcu(&func_node->func_stack,
+ struct klp_func, stack_node);
+
+ new_addr = (unsigned long)next_func->new_func;
+#ifdef CONFIG_ARM_MODULE_PLTS
+ if (!offset_in_range(pc, new_addr, SZ_32M)) {
+ /*
+ * [0] LDR PC, [PC+8]
+ * [4] nop
+ * [8] new_addr_to_jump
+ */
+ insns[0] = __opcode_to_mem_arm(0xe59ff000);
+ insns[1] = __opcode_to_mem_arm(0xe320f000);
+ insns[2] = new_addr;
+
+ for (i = 0; i < LJMP_INSN_SIZE; i++)
+ __patch_text(((u32 *)pc) + i, insns[i]);
+
+ } else {
+ insn = arm_gen_branch(pc, new_addr);
+ __patch_text((void *)pc, insn);
+ }
+#else
+ insn = arm_gen_branch(pc, new_addr);
+ __patch_text((void *)pc, insn);
+#endif
+ }
+}
+
+#ifdef CONFIG_ARM_MODULE_PLTS
+/* return 0 if the func can be patched */
+int arch_klp_func_can_patch(struct klp_func *func)
+{
+ unsigned long pc = (unsigned long)func->old_func;
+ unsigned long new_addr = (unsigned long)func->new_func;
+ unsigned long old_size = func->old_size;
+
+ if (!old_size)
+ return -EINVAL;
+
+ if (!offset_in_range(pc, new_addr, SZ_32M) &&
+ (old_size < LJMP_INSN_SIZE * ARM_INSN_SIZE)) {
+ pr_err("func %s size less than limit\n", func->old_name);
+ return -EPERM;
+ }
+ return 0;
+}
+#else
+int arch_klp_func_can_patch(struct klp_func *func)
+{
+ return 0;
+}
+#endif /* #ifdef CONFIG_ARM_MODULE_PLTS */
+
+void arch_klp_mem_prepare(struct klp_patch *patch)
+{
+ struct klp_object *obj;
+ struct klp_func *func;
+
+ klp_for_each_object(patch, obj) {
+ klp_for_each_func(obj, func) {
+ func->func_node = kzalloc(sizeof(struct klp_func_node),
+ GFP_ATOMIC);
+ }
+ }
+}
+
+void arch_klp_mem_recycle(struct klp_patch *patch)
+{
+ struct klp_object *obj;
+ struct klp_func *func;
+ struct klp_func_node *func_node;
+
+ klp_for_each_object(patch, obj) {
+ klp_for_each_func(obj, func) {
+ func_node = func->func_node;
+ if (func_node && list_is_singular(&func_node->func_stack)) {
+ kfree(func_node);
+ func->func_node = NULL;
+ }
+ }
+ }
+}
diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c
index 6e626abaefc5..1dbdf2726505 100644
--- a/arch/arm/kernel/module-plts.c
+++ b/arch/arm/kernel/module-plts.c
@@ -34,12 +34,14 @@ static bool in_init(const struct module *mod, unsigned long loc)
return loc - (u32)mod->init_layout.base < mod->init_layout.size;
}
-u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
+u32 get_module_plt(struct module *mod, Elf32_Shdr *sechdrs,
+ unsigned long loc, Elf32_Addr val)
{
struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
&mod->arch.init;
- struct plt_entries *plt = (struct plt_entries *)pltsec->plt->sh_addr;
+ struct plt_entries *plt =
+ (struct plt_entries *)sechdrs[pltsec->plt_shndx].sh_addr;
int idx = 0;
/*
@@ -60,7 +62,8 @@ u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
}
pltsec->plt_count++;
- BUG_ON(pltsec->plt_count * PLT_ENT_SIZE > pltsec->plt->sh_size);
+ BUG_ON(pltsec->plt_count * PLT_ENT_SIZE >
+ sechdrs[pltsec->plt_shndx].sh_size);
if (!idx)
/* Populate a new set of entries */
@@ -193,21 +196,23 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
unsigned long init_plts = 0;
Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
Elf32_Sym *syms = NULL;
+ Elf32_Shdr *core_pltsec, *init_pltsec;
+ int i = 0;
/*
* To store the PLTs, we expand the .text section for core module code
* and for initialization code.
*/
- for (s = sechdrs; s < sechdrs_end; ++s) {
+ for (s = sechdrs; s < sechdrs_end; ++s, ++i) {
if (strcmp(".plt", secstrings + s->sh_name) == 0)
- mod->arch.core.plt = s;
+ mod->arch.core.plt_shndx = i;
else if (strcmp(".init.plt", secstrings + s->sh_name) == 0)
- mod->arch.init.plt = s;
+ mod->arch.init.plt_shndx = i;
else if (s->sh_type == SHT_SYMTAB)
syms = (Elf32_Sym *)s->sh_addr;
}
- if (!mod->arch.core.plt || !mod->arch.init.plt) {
+ if (!mod->arch.core.plt_shndx || !mod->arch.init.plt_shndx) {
pr_err("%s: module PLT section(s) missing\n", mod->name);
return -ENOEXEC;
}
@@ -239,21 +244,23 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
numrels, s->sh_info);
}
- mod->arch.core.plt->sh_type = SHT_NOBITS;
- mod->arch.core.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
- mod->arch.core.plt->sh_addralign = L1_CACHE_BYTES;
- mod->arch.core.plt->sh_size = round_up(core_plts * PLT_ENT_SIZE,
- sizeof(struct plt_entries));
+ core_pltsec = sechdrs + mod->arch.core.plt_shndx;
+ core_pltsec->sh_type = SHT_NOBITS;
+ core_pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+ core_pltsec->sh_addralign = L1_CACHE_BYTES;
+ core_pltsec->sh_size = round_up(core_plts * PLT_ENT_SIZE,
+ sizeof(struct plt_entries));
mod->arch.core.plt_count = 0;
- mod->arch.init.plt->sh_type = SHT_NOBITS;
- mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
- mod->arch.init.plt->sh_addralign = L1_CACHE_BYTES;
- mod->arch.init.plt->sh_size = round_up(init_plts * PLT_ENT_SIZE,
- sizeof(struct plt_entries));
+ init_pltsec = sechdrs + mod->arch.init.plt_shndx;
+ init_pltsec->sh_type = SHT_NOBITS;
+ init_pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+ init_pltsec->sh_addralign = L1_CACHE_BYTES;
+ init_pltsec->sh_size = round_up(init_plts * PLT_ENT_SIZE,
+ sizeof(struct plt_entries));
mod->arch.init.plt_count = 0;
pr_debug("%s: plt=%x, init.plt=%x\n", __func__,
- mod->arch.core.plt->sh_size, mod->arch.init.plt->sh_size);
+ core_pltsec->sh_size, init_pltsec->sh_size);
return 0;
}
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index e15444b25ca0..30b938e7ea8d 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -142,7 +142,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
if (IS_ENABLED(CONFIG_ARM_MODULE_PLTS) &&
(offset <= (s32)0xfe000000 ||
offset >= (s32)0x02000000))
- offset = get_module_plt(module, loc,
+ offset = get_module_plt(module, sechdrs, loc,
offset + loc + 8)
- loc - 8;
@@ -255,7 +255,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
if (IS_ENABLED(CONFIG_ARM_MODULE_PLTS) &&
(offset <= (s32)0xff000000 ||
offset >= (s32)0x01000000))
- offset = get_module_plt(module, loc,
+ offset = get_module_plt(module, sechdrs, loc,
offset + loc + 4)
- loc - 4;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 4500f203110a..673bfe201648 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -202,6 +202,7 @@ config ARM64
select SWIOTLB
select SYSCTL_EXCEPTION_TRACE
select THREAD_INFO_IN_TASK
+ select HAVE_LIVEPATCH_WO_FTRACE
help
ARM 64-bit (AArch64) Linux support.
@@ -347,6 +348,8 @@ config KASAN_SHADOW_OFFSET
source "arch/arm64/Kconfig.platforms"
+source "kernel/livepatch/Kconfig"
+
menu "Kernel Features"
menu "ARM errata workarounds via the alternatives framework"
diff --git a/arch/arm64/configs/altra_5.10_defconfig b/arch/arm64/configs/altra_5.10_defconfig
index 6ee777373e32..78adb428c82a 100644
--- a/arch/arm64/configs/altra_5.10_defconfig
+++ b/arch/arm64/configs/altra_5.10_defconfig
@@ -48,6 +48,7 @@ CONFIG_ARCH_THUNDER=y
CONFIG_ARCH_THUNDER2=y
CONFIG_ARCH_VEXPRESS=y
CONFIG_ARCH_XGENE=y
+CONFIG_LIVEPATCH=y
# CONFIG_SOCIONEXT_SYNQUACER_PREITS is not set
CONFIG_ARM64_64K_PAGES=y
CONFIG_ARM64_VA_BITS_52=y
diff --git a/arch/arm64/include/asm/livepatch.h b/arch/arm64/include/asm/livepatch.h
new file mode 100644
index 000000000000..d85991fff647
--- /dev/null
+++ b/arch/arm64/include/asm/livepatch.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2014-2019, Huawei.
+ * Author: Li Bin
+ * Author: Cheng Jian
+ *
+ * livepatch.h - arm64-specific Kernel Live Patching Core
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see .
+ */
+
+#ifndef _ASM_ARM64_LIVEPATCH_H
+#define _ASM_ARM64_LIVEPATCH_H
+
+#include
+#include
+
+
+#ifdef CONFIG_LIVEPATCH
+
+struct klp_patch;
+struct klp_func;
+
+#define klp_smp_isb() isb()
+
+static inline int klp_check_compiler_support(void)
+{
+ return 0;
+}
+
+int arch_klp_patch_func(struct klp_func *func);
+void arch_klp_unpatch_func(struct klp_func *func);
+#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY
+int klp_check_calltrace(struct klp_patch *patch, int enable);
+#endif
+#else
+#error Live patching support is disabled; check CONFIG_LIVEPATCH
+#endif
+
+#endif /* _ASM_ARM64_LIVEPATCH_H */
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 1fbab854a51b..caed3a53d518 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -82,6 +82,7 @@ void arch_release_task_struct(struct task_struct *tsk);
#define TIF_SVE_VL_INHERIT 24 /* Inherit sve_vl_onexec across exec */
#define TIF_SSBD 25 /* Wants SSB mitigation */
#define TIF_TAGGED_ADDR 26 /* Allow tagged user addresses */
+#define TIF_PATCH_PENDING 27 /* pending live patching update */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
@@ -98,6 +99,7 @@ void arch_release_task_struct(struct task_struct *tsk);
#define _TIF_32BIT (1 << TIF_32BIT)
#define _TIF_SVE (1 << TIF_SVE)
#define _TIF_MTE_ASYNC_FAULT (1 << TIF_MTE_ASYNC_FAULT)
+#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING)
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
_TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index bb00caeeae24..697027f3a80a 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_CPU_PM) += sleep.o suspend.o
obj-$(CONFIG_CPU_IDLE) += cpuidle.o
+obj-$(CONFIG_LIVEPATCH) += livepatch.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_EFI) += efi.o efi-entry.stub.o \
diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c
new file mode 100644
index 000000000000..e60841e975c6
--- /dev/null
+++ b/arch/arm64/kernel/livepatch.c
@@ -0,0 +1,558 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * livepatch.c - arm64-specific Kernel Live Patching Core
+ *
+ * Copyright (C) 2014 Li Bin
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see .
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#define LJMP_INSN_SIZE 4
+
+#ifdef CONFIG_ARM64_MODULE_PLTS
+#define MAX_SIZE_TO_CHECK (LJMP_INSN_SIZE * sizeof(u32))
+#define CHECK_JUMP_RANGE LJMP_INSN_SIZE
+
+static inline bool offset_in_range(unsigned long pc, unsigned long addr,
+ long range)
+{
+ long offset = addr - pc;
+
+ return (offset >= -range && offset < range);
+}
+
+#else
+#define MAX_SIZE_TO_CHECK sizeof(u32)
+#define CHECK_JUMP_RANGE 1
+#endif
+
+struct klp_func_node {
+ struct list_head node;
+ struct list_head func_stack;
+ unsigned long old_addr;
+#ifdef CONFIG_ARM64_MODULE_PLTS
+ u32 old_insns[LJMP_INSN_SIZE];
+#else
+ u32 old_insn;
+#endif
+};
+
+static LIST_HEAD(klp_func_list);
+
+static struct klp_func_node *klp_find_func_node(unsigned long old_addr)
+{
+ struct klp_func_node *func_node;
+
+ list_for_each_entry(func_node, &klp_func_list, node) {
+ if (func_node->old_addr == old_addr)
+ return func_node;
+ }
+
+ return NULL;
+}
+
+#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY
+/*
+ * The instruction set on arm64 is A64.
+ * The instruction of BLR is 1101011000111111000000xxxxx00000.
+ * The instruction of BL is 100101xxxxxxxxxxxxxxxxxxxxxxxxxx.
+ * The instruction of BLRAX is 1101011x0011111100001xxxxxxxxxxx.
+ */
+#define is_jump_insn(insn) (((le32_to_cpu(insn) & 0xfffffc1f) == 0xd63f0000) || \
+ ((le32_to_cpu(insn) & 0xfc000000) == 0x94000000) || \
+ ((le32_to_cpu(insn) & 0xfefff800) == 0xd63f0800))
+
+struct klp_func_list {
+ struct klp_func_list *next;
+ unsigned long func_addr;
+ unsigned long func_size;
+ const char *func_name;
+ int force;
+};
+
+struct walk_stackframe_args {
+ int enable;
+ struct klp_func_list *check_funcs;
+ int ret;
+};
+
+static inline unsigned long klp_size_to_check(unsigned long func_size,
+ int force)
+{
+ unsigned long size = func_size;
+
+ if (force == KLP_STACK_OPTIMIZE && size > MAX_SIZE_TO_CHECK)
+ size = MAX_SIZE_TO_CHECK;
+ return size;
+}
+
+static inline int klp_compare_address(unsigned long pc, unsigned long func_addr,
+ const char *func_name, unsigned long check_size)
+{
+ if (pc >= func_addr && pc < func_addr + check_size) {
+ pr_err("func %s is in use!\n", func_name);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static bool check_jump_insn(unsigned long func_addr)
+{
+ unsigned long i;
+ u32 *insn = (u32*)func_addr;
+
+ for (i = 0; i < CHECK_JUMP_RANGE; i++) {
+ if (is_jump_insn(*insn)) {
+ return true;
+ }
+ insn++;
+ }
+ return false;
+}
+
+static int add_func_to_list(struct klp_func_list **funcs, struct klp_func_list **func,
+ unsigned long func_addr, unsigned long func_size, const char *func_name,
+ int force)
+{
+ if (*func == NULL) {
+ *funcs = (struct klp_func_list *)kzalloc(sizeof(**funcs), GFP_ATOMIC);
+ if (!(*funcs))
+ return -ENOMEM;
+ *func = *funcs;
+ } else {
+ (*func)->next = (struct klp_func_list *)kzalloc(sizeof(**funcs),
+ GFP_ATOMIC);
+ if (!(*func)->next)
+ return -ENOMEM;
+ *func = (*func)->next;
+ }
+ (*func)->func_addr = func_addr;
+ (*func)->func_size = func_size;
+ (*func)->func_name = func_name;
+ (*func)->force = force;
+ (*func)->next = NULL;
+ return 0;
+}
+
+static int klp_check_activeness_func(struct klp_patch *patch, int enable,
+ struct klp_func_list **check_funcs)
+{
+ int ret;
+ struct klp_object *obj;
+ struct klp_func *func;
+ unsigned long func_addr, func_size;
+ struct klp_func_node *func_node;
+ struct klp_func_list *pcheck = NULL;
+
+ for (obj = patch->objs; obj->funcs; obj++) {
+ for (func = obj->funcs; func->old_name; func++) {
+ if (enable) {
+ if (func->force == KLP_ENFORCEMENT)
+ continue;
+ /*
+ * When enable, checking the currently
+ * active functions.
+ */
+ func_node = klp_find_func_node((unsigned long)func->old_func);
+ if (!func_node ||
+ list_empty(&func_node->func_stack)) {
+ func_addr = (unsigned long)func->old_func;
+ func_size = func->old_size;
+ } else {
+ /*
+ * Previously patched function
+ * [the active one]
+ */
+ struct klp_func *prev;
+
+ prev = list_first_or_null_rcu(
+ &func_node->func_stack,
+ struct klp_func, stack_node);
+ func_addr = (unsigned long)prev->new_func;
+ func_size = prev->new_size;
+ }
+ /*
+ * When preemption is disabled and the
+ * replacement area does not contain a jump
+ * instruction, the migration thread is
+ * scheduled to run stop machine only after the
+ * excution of instructions to be replaced is
+ * complete.
+ */
+ if (IS_ENABLED(CONFIG_PREEMPTION) ||
+ (func->force == KLP_NORMAL_FORCE) ||
+ check_jump_insn(func_addr)) {
+ ret = add_func_to_list(check_funcs, &pcheck,
+ func_addr, func_size,
+ func->old_name, func->force);
+ if (ret)
+ return ret;
+ }
+ } else {
+ /*
+ * When disable, check for the previously
+ * patched function and the function itself
+ * which to be unpatched.
+ */
+ func_node = klp_find_func_node((unsigned long)func->old_func);
+ if (!func_node) {
+ return -EINVAL;
+ }
+#ifdef CONFIG_PREEMPTION
+ /*
+ * No scheduling point in the replacement
+ * instructions. Therefore, when preemption is
+ * not enabled, atomic execution is performed
+ * and these instructions will not appear on
+ * the stack.
+ */
+ if (list_is_singular(&func_node->func_stack)) {
+ func_addr = (unsigned long)func->old_func;
+ func_size = func->old_size;
+ } else {
+ struct klp_func *prev;
+
+ prev = list_first_or_null_rcu(
+ &func_node->func_stack,
+ struct klp_func, stack_node);
+ func_addr = (unsigned long)prev->new_func;
+ func_size = prev->new_size;
+ }
+ ret = add_func_to_list(check_funcs, &pcheck,
+ func_addr, func_size,
+ func->old_name, 0);
+ if (ret)
+ return ret;
+#endif
+
+ func_addr = (unsigned long)func->new_func;
+ func_size = func->new_size;
+ ret = add_func_to_list(check_funcs, &pcheck,
+ func_addr, func_size,
+ func->old_name, 0);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+ return 0;
+}
+
+static bool check_func_list(struct klp_func_list *funcs, int *ret, unsigned long pc)
+{
+ while (funcs != NULL) {
+ *ret = klp_compare_address(pc, funcs->func_addr, funcs->func_name,
+ klp_size_to_check(funcs->func_size, funcs->force));
+ if (*ret) {
+ return false;
+ }
+ funcs = funcs->next;
+ }
+ return true;
+}
+
+static bool klp_check_jump_func(void *data, unsigned long pc)
+{
+ struct walk_stackframe_args *args = data;
+ struct klp_func_list *check_funcs = args->check_funcs;
+
+ return check_func_list(check_funcs, &args->ret, pc);
+}
+
+static void free_list(struct klp_func_list **funcs)
+{
+ struct klp_func_list *p;
+
+ while (*funcs != NULL) {
+ p = *funcs;
+ *funcs = (*funcs)->next;
+ kfree(p);
+ }
+}
+
+int klp_check_calltrace(struct klp_patch *patch, int enable)
+{
+ struct task_struct *g, *t;
+ struct stackframe frame;
+ int ret = 0;
+ struct klp_func_list *check_funcs = NULL;
+ struct walk_stackframe_args args = {
+ .enable = enable,
+ .ret = 0
+ };
+
+ ret = klp_check_activeness_func(patch, enable, &check_funcs);
+ if (ret)
+ goto out;
+ args.check_funcs = check_funcs;
+
+ for_each_process_thread(g, t) {
+ /*
+ * Handle the current carefully on each CPUs, we shouldn't
+ * use saved FP and PC when backtrace current. It's difficult
+ * to backtrack other CPU currents here. But fortunately,
+ * all CPUs will stay in this function, so the current's
+ * backtrace is so similar
+ */
+ if (t == current) {
+ /* current on this CPU */
+ frame.fp = (unsigned long)__builtin_frame_address(0);
+ frame.pc = (unsigned long)klp_check_calltrace;
+ } else if (strncmp(t->comm, "migration/", 10) == 0) {
+ /*
+ * current on other CPU
+ * we call this in stop_machine, so the current
+ * of each CPUs is mirgation, just compare the
+ * task_comm here, because we can't get the
+ * cpu_curr(task_cpu(t))). This assumes that no
+ * other thread will pretend to be a stopper via
+ * task_comm.
+ */
+ continue;
+ } else {
+ frame.fp = thread_saved_fp(t);
+ frame.pc = thread_saved_pc(t);
+ }
+ if (check_funcs != NULL) {
+ start_backtrace(&frame, frame.fp, frame.pc);
+ walk_stackframe(t, &frame, klp_check_jump_func, &args);
+ if (args.ret) {
+ ret = args.ret;
+ pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm);
+ show_stack(t, NULL, KERN_INFO);
+ goto out;
+ }
+ }
+ }
+
+out:
+ free_list(&check_funcs);
+ return ret;
+}
+#endif
+
+int arch_klp_patch_func(struct klp_func *func)
+{
+ struct klp_func_node *func_node;
+ unsigned long pc, new_addr;
+ u32 insn;
+ u32 memory_flag = 0;
+#ifdef CONFIG_ARM64_MODULE_PLTS
+ int i;
+ u32 insns[LJMP_INSN_SIZE];
+#endif
+ int ret = 0;
+
+ func_node = klp_find_func_node((unsigned long)func->old_func);
+ if (!func_node) {
+ func_node = func->func_node;
+ if (!func_node)
+ return -ENOMEM;
+ memory_flag = 1;
+
+ INIT_LIST_HEAD(&func_node->func_stack);
+ func_node->old_addr = (unsigned long)func->old_func;
+
+#ifdef CONFIG_ARM64_MODULE_PLTS
+ for (i = 0; i < LJMP_INSN_SIZE; i++) {
+ ret = aarch64_insn_read(((u32 *)func->old_func) + i,
+ &func_node->old_insns[i]);
+ if (ret)
+ break;
+ }
+#else
+ ret = aarch64_insn_read((void *)func->old_func,
+ &func_node->old_insn);
+#endif
+ if (ret) {
+ return -EPERM;
+ }
+
+ list_add_rcu(&func_node->node, &klp_func_list);
+ }
+
+ list_add_rcu(&func->stack_node, &func_node->func_stack);
+
+ pc = (unsigned long)func->old_func;
+ new_addr = (unsigned long)func->new_func;
+
+#ifdef CONFIG_ARM64_MODULE_PLTS
+ if (offset_in_range(pc, new_addr, SZ_128M)) {
+ insn = aarch64_insn_gen_branch_imm(pc, new_addr,
+ AARCH64_INSN_BRANCH_NOLINK);
+ if (aarch64_insn_patch_text_nosync((void *)pc, insn))
+ goto ERR_OUT;
+ } else {
+ insns[0] = cpu_to_le32(0x92800010 | (((~new_addr) & 0xffff)) << 5);
+ insns[1] = cpu_to_le32(0xf2a00010 | (((new_addr >> 16) & 0xffff)) << 5);
+ insns[2] = cpu_to_le32(0xf2c00010 | (((new_addr >> 32) & 0xffff)) << 5);
+ insns[3] = cpu_to_le32(0xd61f0200);
+ for (i = 0; i < LJMP_INSN_SIZE; i++) {
+ if (aarch64_insn_patch_text_nosync(((u32 *)pc) + i, insns[i]))
+ goto ERR_OUT;
+ }
+ }
+#else
+ insn = aarch64_insn_gen_branch_imm(pc, new_addr,
+ AARCH64_INSN_BRANCH_NOLINK);
+
+ if (aarch64_insn_patch_text_nosync((void *)pc, insn))
+ goto ERR_OUT;
+#endif
+ return 0;
+
+ERR_OUT:
+ list_del_rcu(&func->stack_node);
+ if (memory_flag) {
+ list_del_rcu(&func_node->node);
+ }
+
+ return -EPERM;
+}
+
+void arch_klp_unpatch_func(struct klp_func *func)
+{
+ struct klp_func_node *func_node;
+ struct klp_func *next_func;
+ unsigned long pc, new_addr;
+ u32 insn;
+#ifdef CONFIG_ARM64_MODULE_PLTS
+ int i;
+ u32 insns[LJMP_INSN_SIZE];
+#endif
+ func_node = klp_find_func_node((unsigned long)func->old_func);
+ if (WARN_ON(!func_node))
+ return;
+
+ pc = func_node->old_addr;
+ if (list_is_singular(&func_node->func_stack)) {
+#ifdef CONFIG_ARM64_MODULE_PLTS
+ for (i = 0; i < LJMP_INSN_SIZE; i++)
+ insns[i] = func_node->old_insns[i];
+#else
+ insn = func_node->old_insn;
+#endif
+ list_del_rcu(&func->stack_node);
+ list_del_rcu(&func_node->node);
+
+#ifdef CONFIG_ARM64_MODULE_PLTS
+ for (i = 0; i < LJMP_INSN_SIZE; i++) {
+ aarch64_insn_patch_text_nosync(((u32 *)pc) + i,
+ insns[i]);
+ }
+#else
+ aarch64_insn_patch_text_nosync((void *)pc, insn);
+#endif
+ } else {
+ list_del_rcu(&func->stack_node);
+ next_func = list_first_or_null_rcu(&func_node->func_stack,
+ struct klp_func, stack_node);
+ if (WARN_ON(!next_func))
+ return;
+
+ new_addr = (unsigned long)next_func->new_func;
+#ifdef CONFIG_ARM64_MODULE_PLTS
+ if (offset_in_range(pc, new_addr, SZ_128M)) {
+ insn = aarch64_insn_gen_branch_imm(pc, new_addr,
+ AARCH64_INSN_BRANCH_NOLINK);
+
+ aarch64_insn_patch_text_nosync((void *)pc, insn);
+ } else {
+ insns[0] = cpu_to_le32(0x92800010 | (((~new_addr) & 0xffff)) << 5);
+ insns[1] = cpu_to_le32(0xf2a00010 | (((new_addr >> 16) & 0xffff)) << 5);
+ insns[2] = cpu_to_le32(0xf2c00010 | (((new_addr >> 32) & 0xffff)) << 5);
+ insns[3] = cpu_to_le32(0xd61f0200);
+ for (i = 0; i < LJMP_INSN_SIZE; i++)
+ aarch64_insn_patch_text_nosync(((u32 *)pc) + i,
+ insns[i]);
+ }
+#else
+ insn = aarch64_insn_gen_branch_imm(pc, new_addr,
+ AARCH64_INSN_BRANCH_NOLINK);
+
+ aarch64_insn_patch_text_nosync((void *)pc, insn);
+#endif
+ }
+}
+
+#ifdef CONFIG_ARM64_MODULE_PLTS
+/* return 0 if the func can be patched */
+int arch_klp_func_can_patch(struct klp_func *func)
+{
+ unsigned long pc = (unsigned long)func->old_func;
+ unsigned long new_addr = (unsigned long)func->new_func;
+ unsigned long old_size = func->old_size;
+
+ if ((long)old_size <= 0)
+ return -EINVAL;
+
+ if (!offset_in_range(pc, new_addr, SZ_128M) &&
+ (old_size < LJMP_INSN_SIZE * sizeof(u32))) {
+ pr_err("func %s size less than limit\n", func->old_name);
+ return -EPERM;
+ }
+ return 0;
+}
+#else
+int arch_klp_func_can_patch(struct klp_func *func)
+{
+ return 0;
+}
+#endif
+
+void arch_klp_mem_prepare(struct klp_patch *patch)
+{
+ struct klp_object *obj;
+ struct klp_func *func;
+
+ klp_for_each_object(patch, obj) {
+ klp_for_each_func(obj, func) {
+ func->func_node = kzalloc(sizeof(struct klp_func_node),
+ GFP_ATOMIC);
+ }
+ }
+}
+
+void arch_klp_mem_recycle(struct klp_patch *patch)
+{
+ struct klp_object *obj;
+ struct klp_func *func;
+ struct klp_func_node *func_node;
+
+ klp_for_each_object(patch, obj) {
+ klp_for_each_func(obj, func) {
+ func_node = func->func_node;
+ if (func_node && list_is_singular(&func_node->func_stack)) {
+ kfree(func_node);
+ func->func_node = NULL;
+ }
+ }
+ }
+}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 31ed8083571f..8725c9d1f851 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -219,7 +219,8 @@ config PPC
select HAVE_KPROBES_ON_FTRACE
select HAVE_KRETPROBES
select HAVE_LD_DEAD_CODE_DATA_ELIMINATION
- select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
+ select HAVE_LIVEPATCH_FTRACE if HAVE_DYNAMIC_FTRACE_WITH_REGS
+ select HAVE_LIVEPATCH_WO_FTRACE if (PPC64 && CPU_BIG_ENDIAN) || PPC32
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S)
select HAVE_HARDLOCKUP_DETECTOR_ARCH if (PPC64 && PPC_BOOK3S)
diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h
index 4a3d5d25fed5..5e5161ceac13 100644
--- a/arch/powerpc/include/asm/livepatch.h
+++ b/arch/powerpc/include/asm/livepatch.h
@@ -12,6 +12,7 @@
#include
#ifdef CONFIG_LIVEPATCH
+#ifdef CONFIG_LIVEPATCH_FTRACE
static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
{
regs->nip = ip;
@@ -27,6 +28,68 @@ static inline unsigned long klp_get_ftrace_location(unsigned long faddr)
return ftrace_location_range(faddr, faddr + 16);
}
+#elif defined(CONFIG_LIVEPATCH_WO_FTRACE)
+struct klp_func;
+
+/* kernel livepatch instruction barrier */
+#define klp_smp_isb() __smp_lwsync()
+
+int arch_klp_patch_func(struct klp_func *func);
+void arch_klp_unpatch_func(struct klp_func *func);
+
+#ifdef CONFIG_PPC64
+/*
+ * use the livepatch stub to jump to the trampoline.
+ * It is similar to stub, but does not need to save
+ * and load R2.
+ * struct ppc64_klp_bstub_entry
+ */
+struct ppc64_klp_bstub_entry {
+ u32 jump[5];
+ u32 magic;
+ /* address for livepatch trampoline */
+ u64 trampoline;
+};
+
+#ifdef PPC64_ELF_ABI_v1
+struct ppc64_klp_btramp_entry {
+ u32 jump[18];
+ u32 magic;
+ union {
+ struct ppc64_opd_entry funcdata;
+ unsigned long saved_entry[2];
+ };
+};
+#endif /* PPC64_ELF_ABI_v1 */
+
+#define PPC64_INSN_SIZE 4
+#define LJMP_INSN_SIZE (sizeof(struct ppc64_klp_bstub_entry) / PPC64_INSN_SIZE)
+
+/* STUB_MAGIC 0x73747562 "stub" */
+#define BRANCH_STUB_MAGIC 0x73747563 /* stub + 1 */
+#define BRANCH_TRAMPOLINE_MAGIC 0x73747564 /* stub + 2 */
+
+extern void livepatch_branch_stub(void);
+extern void livepatch_branch_stub_end(void);
+
+#ifdef PPC64_ELF_ABI_v1
+extern void livepatch_branch_trampoline(void);
+extern void livepatch_branch_trampoline_end(void);
+#endif /* PPC64_ELF_ABI_v1 */
+
+int livepatch_create_branch(unsigned long pc,
+ unsigned long trampoline,
+ unsigned long addr,
+ struct module *me);
+#endif /* CONFIG_PPC64 */
+
+#endif /* CONFIG_LIVEPATCH_FTRACE */
+
+#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY
+struct klp_patch;
+int klp_check_calltrace(struct klp_patch *patch, int enable);
+#endif
+
static inline void klp_init_thread_info(struct task_struct *p)
{
/* + 1 to account for STACK_END_MAGIC */
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
index 857d9ff24295..3d9febabdcdc 100644
--- a/arch/powerpc/include/asm/module.h
+++ b/arch/powerpc/include/asm/module.h
@@ -33,6 +33,9 @@ struct mod_arch_specific {
/* For module function descriptor dereference */
unsigned long start_opd;
unsigned long end_opd;
+#ifdef CONFIG_LIVEPATCH_WO_FTRACE
+ unsigned long toc;
+#endif
#else /* powerpc64 */
/* Indices of PLT sections within module. */
unsigned int core_plt_section;
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index fe2ef598e2ea..cbe9dbfaf005 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -94,6 +94,7 @@ obj-$(CONFIG_44x) += cpu_setup_44x.o
obj-$(CONFIG_PPC_FSL_BOOK3E) += cpu_setup_fsl_booke.o
obj-$(CONFIG_PPC_DOORBELL) += dbell.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
+obj-$(CONFIG_LIVEPATCH_WO_FTRACE) += livepatch_$(BITS).o
extra-$(CONFIG_PPC64) := head_64.o
extra-$(CONFIG_PPC_BOOK3S_32) := head_book3s_32.o
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 2831b0aa92b1..71ff3a4f10a6 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -997,3 +997,76 @@ _GLOBAL(enter_prom)
ld r0,16(r1)
mtlr r0
blr
+
+#ifdef CONFIG_LIVEPATCH_WO_FTRACE
+ /*
+ * Livepatch function branch stub.
+ * see struct ppc64_klp_bstub_entry
+ * use it jump to livepatch trampoline
+ */
+_GLOBAL(livepatch_branch_stub)
+ addis r11,r2, 0 /* */
+ addi r11,r11, 0 /* */
+ ld r12,24(r11)
+ mtctr r12
+ bctr
+_GLOBAL(livepatch_branch_stub_end)
+ nop /* for magic */
+
+#ifdef PPC64_ELF_ABI_v1
+ /*
+ * This function runs in the livepatch context, between two functions.
+ * As such it can only clobber registers which are volatile and used in
+ * function linkage.
+ *
+ * We get here when a function A, calls another function B, but B has
+ * been live patched with a new function C.
+ *
+ * On entry:
+ * - we have no stack frame and can not allocate one
+ * - LR points back to the original caller (in A)
+ * - CTR used to hold the new NIP for call
+ * - r0, r11 & r12 are free
+ * -- r11 point back to the bstub data which store (func descr)
+ * ---- 0(saved_entry) : new function address
+ * ---- 8(r0) : new R2(toc) for new function
+ * -- tag livepatch stack with r11
+ * -- save temporary variables with r12
+ */
+_GLOBAL(livepatch_branch_trampoline)
+ mflr r0
+ std r0, 16(r1)
+ std r2, 24(r1)
+ stdu r1, -STACK_FRAME_OVERHEAD(r1)
+
+ /* Load func descr address to R11 */
+ lis r11, 0 /* saved_entry@highest */
+ ori r11,r11,0 /* saved_entry@higher */
+ rldicr r11,r11,32,31
+ oris r11,r11,0 /* saved_entry@high */
+ ori r11,r11,0 /* saved_entry@low */
+
+ /* Call NEW_FUNC */
+ ld r12, 0(r11) /* load new func address to R12 */
+#ifdef PPC64_ELF_ABI_v1
+ ld r2, 8(r11) /* set up new R2 */
+#endif
+ mtctr r12 /* load R12(new func address) to CTR */
+ bctrl /* call new func */
+
+ /*
+ * Now we are returning from the patched function to the original
+ * caller A. We are free to use r11, r12 and we can use r2 until we
+ * restore it.
+ */
+ addi r1, r1, STACK_FRAME_OVERHEAD
+ ld r2, 24(r1)
+ ld r0, 16(r1)
+ mtlr r0
+
+ /* Return to original caller of live patched function */
+ blr
+_GLOBAL(livepatch_branch_trampoline_end)
+ nop
+#endif
+#endif /* CONFIG_LIVEPATCH_WO_FTRACE */
diff --git a/arch/powerpc/kernel/livepatch_32.c b/arch/powerpc/kernel/livepatch_32.c
new file mode 100644
index 000000000000..13f3200bf52f
--- /dev/null
+++ b/arch/powerpc/kernel/livepatch_32.c
@@ -0,0 +1,570 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * livepatch.c - powerpc-specific Kernel Live Patching Core
+ *
+ * Copyright (C) 2018 Huawei Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see .
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#if defined (CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY) || \
+ defined (CONFIG_LIVEPATCH_WO_FTRACE)
+#define LJMP_INSN_SIZE 4
+#define MAX_SIZE_TO_CHECK (LJMP_INSN_SIZE * sizeof(u32))
+#define CHECK_JUMP_RANGE LJMP_INSN_SIZE
+
+struct klp_func_node {
+ struct list_head node;
+ struct list_head func_stack;
+ void *old_func;
+ u32 old_insns[LJMP_INSN_SIZE];
+};
+
+static LIST_HEAD(klp_func_list);
+
+static struct klp_func_node *klp_find_func_node(void *old_func)
+{
+ struct klp_func_node *func_node;
+
+ list_for_each_entry(func_node, &klp_func_list, node) {
+ if (func_node->old_func == old_func)
+ return func_node;
+ }
+
+ return NULL;
+}
+#endif
+
+#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY
+/*
+ * The instruction set on ppc32 is RISC.
+ * The instructions of BL and BLA are 010010xxxxxxxxxxxxxxxxxxxxxxxxx1.
+ * The instructions of BCL and BCLA are 010000xxxxxxxxxxxxxxxxxxxxxxxxx1.
+ * The instruction of BCCTRL is 010011xxxxxxxxxx0000010000100001.
+ * The instruction of BCLRL is 010011xxxxxxxxxx0000000000100001.
+ */
+static bool is_jump_insn(u32 insn)
+{
+ u32 tmp1 = (insn & 0xfc000001);
+ u32 tmp2 = (insn & 0xfc00ffff);
+
+ if ((tmp1 == 0x48000001) || (tmp1 == 0x40000001) ||
+ (tmp2 == 0x4c000421) || (tmp2 == 0x4c000021))
+ return true;
+ return false;
+}
+
+struct klp_func_list {
+ struct klp_func_list *next;
+ unsigned long func_addr;
+ unsigned long func_size;
+ const char *func_name;
+ int force;
+};
+
+struct stackframe {
+ unsigned long sp;
+ unsigned long pc;
+};
+
+struct walk_stackframe_args {
+ int enable;
+ struct klp_func_list *check_funcs;
+ int ret;
+};
+
+static inline unsigned long klp_size_to_check(unsigned long func_size,
+ int force)
+{
+ unsigned long size = func_size;
+
+ if (force == KLP_STACK_OPTIMIZE && size > MAX_SIZE_TO_CHECK)
+ size = MAX_SIZE_TO_CHECK;
+ return size;
+}
+
+static inline int klp_compare_address(unsigned long pc, unsigned long func_addr,
+ const char *func_name, unsigned long check_size)
+{
+ if (pc >= func_addr && pc < func_addr + check_size) {
+ pr_err("func %s is in use!\n", func_name);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static bool check_jump_insn(unsigned long func_addr)
+{
+ unsigned long i;
+ u32 *insn = (u32*)func_addr;
+
+ for (i = 0; i < CHECK_JUMP_RANGE; i++) {
+ if (is_jump_insn(*insn)) {
+ return true;
+ }
+ insn++;
+ }
+ return false;
+}
+
+static int add_func_to_list(struct klp_func_list **funcs, struct klp_func_list **func,
+ unsigned long func_addr, unsigned long func_size, const char *func_name,
+ int force)
+{
+ if (*func == NULL) {
+ *funcs = (struct klp_func_list*)kzalloc(sizeof(**funcs), GFP_ATOMIC);
+ if (!(*funcs))
+ return -ENOMEM;
+ *func = *funcs;
+ } else {
+ (*func)->next = (struct klp_func_list*)kzalloc(sizeof(**funcs),
+ GFP_ATOMIC);
+ if (!(*func)->next)
+ return -ENOMEM;
+ *func = (*func)->next;
+ }
+ (*func)->func_addr = func_addr;
+ (*func)->func_size = func_size;
+ (*func)->func_name = func_name;
+ (*func)->force = force;
+ (*func)->next = NULL;
+ return 0;
+}
+
+static int klp_check_activeness_func(struct klp_patch *patch, int enable,
+ struct klp_func_list **check_funcs)
+{
+ int ret;
+ struct klp_object *obj;
+ struct klp_func *func;
+ unsigned long func_addr, func_size;
+ struct klp_func_node *func_node;
+ struct klp_func_list *pcheck = NULL;
+
+ for (obj = patch->objs; obj->funcs; obj++) {
+ for (func = obj->funcs; func->old_name; func++) {
+ if (enable) {
+ if (func->force == KLP_ENFORCEMENT)
+ continue;
+ /*
+ * When enable, checking the currently
+ * active functions.
+ */
+ func_node = klp_find_func_node(func->old_func);
+ if (!func_node ||
+ list_empty(&func_node->func_stack)) {
+ /*
+ * No patched on this function
+ * [ the origin one ]
+ */
+ func_addr = (unsigned long)func->old_func;
+ func_size = func->old_size;
+ } else {
+ /*
+ * Previously patched function
+ * [ the active one ]
+ */
+ struct klp_func *prev;
+
+ prev = list_first_or_null_rcu(
+ &func_node->func_stack,
+ struct klp_func, stack_node);
+ func_addr = (unsigned long)prev->new_func;
+ func_size = prev->new_size;
+ }
+ /*
+ * When preemtion is disabled and the
+ * replacement area does not contain a jump
+ * instruction, the migration thread is
+ * scheduled to run stop machine only after the
+ * excution of instructions to be replaced is
+ * complete.
+ */
+ if (IS_ENABLED(CONFIG_PREEMPTION) ||
+ (func->force == KLP_NORMAL_FORCE) ||
+ check_jump_insn(func_addr)) {
+ ret = add_func_to_list(check_funcs, &pcheck,
+ func_addr, func_size,
+ func->old_name, func->force);
+ if (ret)
+ return ret;
+ }
+ } else {
+ /*
+ * When disable, check for the previously
+ * patched function and the function itself
+ * which to be unpatched.
+ */
+ func_node = klp_find_func_node(func->old_func);
+ if (!func_node)
+ return -EINVAL;
+#ifdef CONFIG_PREEMPTION
+ /*
+ * No scheduling point in the replacement
+ * instructions. Therefore, when preemption is
+ * not enabled, atomic execution is performed
+ * and these instructions will not appear on
+ * the stack.
+ */
+ if (list_is_singular(&func_node->func_stack)) {
+ func_addr = (unsigned long)func->old_func;
+ func_size = func->old_size;
+ } else {
+ struct klp_func *prev;
+
+ prev = list_first_or_null_rcu(
+ &func_node->func_stack,
+ struct klp_func, stack_node);
+ func_addr = (unsigned long)prev->new_func;
+ func_size = prev->new_size;
+ }
+ ret = add_func_to_list(check_funcs, &pcheck, func_addr,
+ func_size, func->old_name, 0);
+ if (ret)
+ return ret;
+#endif
+ func_addr = (unsigned long)func->new_func;
+ func_size = func->new_size;
+ ret = add_func_to_list(check_funcs, &pcheck, func_addr,
+ func_size, func->old_name, 0);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+ return 0;
+}
+
+static int unwind_frame(struct task_struct *tsk, struct stackframe *frame)
+{
+
+ unsigned long *stack;
+
+ if (!validate_sp(frame->sp, tsk, STACK_FRAME_OVERHEAD))
+ return -1;
+
+ stack = (unsigned long *)frame->sp;
+ frame->sp = stack[0];
+ frame->pc = stack[STACK_FRAME_LR_SAVE];
+ return 0;
+}
+
+void notrace klp_walk_stackframe(struct stackframe *frame,
+ int (*fn)(struct stackframe *, void *),
+ struct task_struct *tsk, void *data)
+{
+ while (1) {
+ int ret;
+
+ if (fn(frame, data))
+ break;
+ ret = unwind_frame(tsk, frame);
+ if (ret < 0)
+ break;
+ }
+}
+
+static bool check_func_list(struct klp_func_list *funcs, int *ret, unsigned long pc)
+{
+ while (funcs != NULL) {
+ *ret = klp_compare_address(pc, funcs->func_addr, funcs->func_name,
+ klp_size_to_check(funcs->func_size, funcs->force));
+ if (*ret) {
+ return false;
+ }
+ funcs = funcs->next;
+ }
+ return true;
+}
+
+static int klp_check_jump_func(struct stackframe *frame, void *data)
+{
+ struct walk_stackframe_args *args = data;
+ struct klp_func_list *check_funcs = args->check_funcs;
+
+ if (!check_func_list(check_funcs, &args->ret, frame->pc)) {
+ return args->ret;
+ }
+ return 0;
+}
+
+static void free_list(struct klp_func_list **funcs)
+{
+ struct klp_func_list *p;
+
+ while (*funcs != NULL) {
+ p = *funcs;
+ *funcs = (*funcs)->next;
+ kfree(p);
+ }
+}
+
+int klp_check_calltrace(struct klp_patch *patch, int enable)
+{
+ struct task_struct *g, *t;
+ struct stackframe frame;
+ unsigned long *stack;
+ int ret = 0;
+ struct klp_func_list *check_funcs = NULL;
+ struct walk_stackframe_args args = {
+ .ret = 0
+ };
+
+ ret = klp_check_activeness_func(patch, enable, &check_funcs);
+ if (ret)
+ goto out;
+ args.check_funcs = check_funcs;
+
+ for_each_process_thread(g, t) {
+ if (t == current) {
+ /*
+ * Handle the current carefully on each CPUs, we shouldn't
+ * use saved FP and PC when backtrace current. It's difficult
+ * to backtrack other CPU currents here. But fortunately,
+ * all CPUs will stay in this function, so the current's
+ * backtrace is so similar
+ */
+ stack = (unsigned long *)current_stack_pointer;
+ } else if (strncmp(t->comm, "migration/", 10) == 0) {
+ /*
+ * current on other CPU
+ * we call this in stop_machine, so the current
+ * of each CPUs is mirgation, just compare the
+ * task_comm here, because we can't get the
+ * cpu_curr(task_cpu(t))). This assumes that no
+ * other thread will pretend to be a stopper via
+ * task_comm.
+ */
+ continue;
+ } else {
+ /*
+ * Skip the first frame since it does not contain lr
+ * at normal position and nip is stored in the lr
+ * position in the second frame.
+ * See arch/powerpc/kernel/entry_32.S _switch .
+ */
+ unsigned long s = *(unsigned long *)t->thread.ksp;
+
+ if (!validate_sp(s, t, STACK_FRAME_OVERHEAD))
+ continue;
+ stack = (unsigned long *)s;
+ }
+
+ frame.sp = (unsigned long)stack;
+ frame.pc = stack[STACK_FRAME_LR_SAVE];
+ if (check_funcs != NULL) {
+ klp_walk_stackframe(&frame, klp_check_jump_func, t, &args);
+ if (args.ret) {
+ ret = args.ret;
+ pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm);
+ show_stack(t, NULL, KERN_INFO);
+ goto out;
+ }
+ }
+ }
+
+out:
+ free_list(&check_funcs);
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_LIVEPATCH_WO_FTRACE
+static inline bool offset_in_range(unsigned long pc, unsigned long addr,
+ long range)
+{
+ long offset = addr - pc;
+
+ return (offset >= -range && offset < range);
+}
+
+int arch_klp_patch_func(struct klp_func *func)
+{
+ struct klp_func_node *func_node;
+ unsigned long pc, new_addr;
+ long ret;
+ int memory_flag = 0;
+ int i;
+ u32 insns[LJMP_INSN_SIZE];
+
+ func_node = klp_find_func_node(func->old_func);
+ if (!func_node) {
+ func_node = func->func_node;
+ if (!func_node)
+ return -ENOMEM;
+
+ memory_flag = 1;
+ INIT_LIST_HEAD(&func_node->func_stack);
+ func_node->old_func = func->old_func;
+ for (i = 0; i < LJMP_INSN_SIZE; i++) {
+ ret = copy_from_kernel_nofault(&func_node->old_insns[i],
+ ((u32 *)func->old_func) + i, LJMP_INSN_SIZE);
+ if (ret) {
+ return -EPERM;
+ }
+ }
+
+ list_add_rcu(&func_node->node, &klp_func_list);
+ }
+
+ list_add_rcu(&func->stack_node, &func_node->func_stack);
+
+ pc = (unsigned long)func->old_func;
+ new_addr = (unsigned long)func->new_func;
+ if (offset_in_range(pc, new_addr, SZ_32M)) {
+ struct ppc_inst instr;
+
+ create_branch(&instr, (struct ppc_inst *)pc, new_addr, 0);
+ if (patch_instruction((struct ppc_inst *)pc, instr))
+ goto ERR_OUT;
+ } else {
+ /*
+ * lis r12,sym@ha
+ * addi r12,r12,sym@l
+ * mtctr r12
+ * bctr
+ */
+ insns[0] = 0x3d800000 + ((new_addr + 0x8000) >> 16);
+ insns[1] = 0x398c0000 + (new_addr & 0xffff);
+ insns[2] = 0x7d8903a6;
+ insns[3] = 0x4e800420;
+
+ for (i = 0; i < LJMP_INSN_SIZE; i++) {
+ ret = patch_instruction((struct ppc_inst *)(((u32 *)pc) + i),
+ ppc_inst(insns[i]));
+ if (ret)
+ goto ERR_OUT;
+ }
+ }
+
+ return 0;
+
+ERR_OUT:
+ list_del_rcu(&func->stack_node);
+ if (memory_flag) {
+ list_del_rcu(&func_node->node);
+ }
+
+ return -EPERM;
+}
+
+void arch_klp_unpatch_func(struct klp_func *func)
+{
+ struct klp_func_node *func_node;
+ struct klp_func *next_func;
+ unsigned long pc, new_addr;
+ u32 insns[LJMP_INSN_SIZE];
+ int i;
+
+ func_node = klp_find_func_node(func->old_func);
+ pc = (unsigned long)func_node->old_func;
+ if (list_is_singular(&func_node->func_stack)) {
+ for (i = 0; i < LJMP_INSN_SIZE; i++)
+ insns[i] = func_node->old_insns[i];
+
+ list_del_rcu(&func->stack_node);
+ list_del_rcu(&func_node->node);
+
+ for (i = 0; i < LJMP_INSN_SIZE; i++)
+ patch_instruction((struct ppc_inst *)(((u32 *)pc) + i),
+ ppc_inst(insns[i]));
+ } else {
+ list_del_rcu(&func->stack_node);
+ next_func = list_first_or_null_rcu(&func_node->func_stack,
+ struct klp_func, stack_node);
+
+ new_addr = (unsigned long)next_func->new_func;
+ if (offset_in_range(pc, new_addr, SZ_32M)) {
+ struct ppc_inst instr;
+
+ create_branch(&instr, (struct ppc_inst *)pc, new_addr, 0);
+ patch_instruction((struct ppc_inst *)pc, instr);
+ } else {
+ /*
+ * lis r12,sym@ha
+ * addi r12,r12,sym@l
+ * mtctr r12
+ * bctr
+ */
+ insns[0] = 0x3d800000 + ((new_addr + 0x8000) >> 16);
+ insns[1] = 0x398c0000 + (new_addr & 0xffff);
+ insns[2] = 0x7d8903a6;
+ insns[3] = 0x4e800420;
+
+ for (i = 0; i < LJMP_INSN_SIZE; i++)
+ patch_instruction((struct ppc_inst *)(((u32 *)pc) + i),
+ ppc_inst(insns[i]));
+ }
+ }
+}
+
+/* return 0 if the func can be patched */
+int arch_klp_func_can_patch(struct klp_func *func)
+{
+ unsigned long pc = (unsigned long)func->old_func;
+ unsigned long new_addr = (unsigned long)func->new_func;
+ unsigned long old_size = func->old_size;
+
+ if (!old_size)
+ return -EINVAL;
+
+ if (!offset_in_range(pc, new_addr, SZ_32M) &&
+ (old_size < LJMP_INSN_SIZE * sizeof(u32))) {
+ pr_err("func %s size less than limit\n", func->old_name);
+ return -EPERM;
+ }
+ return 0;
+}
+
+void arch_klp_mem_prepare(struct klp_patch *patch)
+{
+ struct klp_object *obj;
+ struct klp_func *func;
+
+ klp_for_each_object(patch, obj) {
+ klp_for_each_func(obj, func) {
+ func->func_node = kzalloc(sizeof(struct klp_func_node),
+ GFP_ATOMIC);
+ }
+ }
+}
+
+void arch_klp_mem_recycle(struct klp_patch *patch)
+{
+ struct klp_object *obj;
+ struct klp_func *func;
+ struct klp_func_node *func_node;
+
+ klp_for_each_object(patch, obj) {
+ klp_for_each_func(obj, func) {
+ func_node = func->func_node;
+ if (func_node && list_is_singular(&func_node->func_stack)) {
+ kfree(func_node);
+ func->func_node = NULL;
+ }
+ }
+ }
+}
+#endif
diff --git a/arch/powerpc/kernel/livepatch_64.c b/arch/powerpc/kernel/livepatch_64.c
new file mode 100644
index 000000000000..77fbb5603137
--- /dev/null
+++ b/arch/powerpc/kernel/livepatch_64.c
@@ -0,0 +1,636 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * livepatch.c - powerpc-specific Kernel Live Patching Core
+ *
+ * Copyright (C) 2018 Huawei Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see .
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+
+#if defined(CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY) || \
+ defined(CONFIG_LIVEPATCH_WO_FTRACE)
+#define MAX_SIZE_TO_CHECK (LJMP_INSN_SIZE * sizeof(u32))
+#define CHECK_JUMP_RANGE LJMP_INSN_SIZE
+
+struct klp_func_node {
+ struct list_head node;
+ struct list_head func_stack;
+ void *old_func;
+ u32 old_insns[LJMP_INSN_SIZE];
+#ifdef PPC64_ELF_ABI_v1
+ struct ppc64_klp_btramp_entry trampoline;
+#else
+ unsigned long trampoline;
+#endif
+};
+
+static LIST_HEAD(klp_func_list);
+
+static struct klp_func_node *klp_find_func_node(void *old_func)
+{
+ struct klp_func_node *func_node;
+
+ list_for_each_entry(func_node, &klp_func_list, node) {
+ if (func_node->old_func == old_func)
+ return func_node;
+ }
+
+ return NULL;
+}
+#endif
+
+#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY
+/*
+ * The instruction set on ppc64 is RISC.
+ * The instructions of BL and BLA are 010010xxxxxxxxxxxxxxxxxxxxxxxxx1.
+ * The instructions of BCL and BCLA are 010000xxxxxxxxxxxxxxxxxxxxxxxxx1.
+ * The instruction of BCCTRL is 010011xxxxxxxxxx0000010000100001.
+ * The instruction of BCLRL is 010011xxxxxxxxxx0000000000100001.
+ */
+static bool is_jump_insn(u32 insn)
+{
+ u32 tmp1 = (insn & 0xfc000001);
+ u32 tmp2 = (insn & 0xfc00ffff);
+
+ if (tmp1 == 0x48000001 || tmp1 == 0x40000001 ||
+ tmp2 == 0x4c000421 || tmp2 == 0x4c000021)
+ return true;
+ return false;
+}
+
+struct klp_func_list {
+ struct klp_func_list *next;
+ unsigned long func_addr;
+ unsigned long func_size;
+ const char *func_name;
+ int force;
+};
+
+struct stackframe {
+ unsigned long sp;
+ unsigned long pc;
+ unsigned long nip;
+};
+
+struct walk_stackframe_args {
+ int enable;
+ struct klp_func_list *check_funcs;
+ int ret;
+};
+
+static inline unsigned long klp_size_to_check(unsigned long func_size,
+ int force)
+{
+ unsigned long size = func_size;
+
+ if (force == KLP_STACK_OPTIMIZE && size > MAX_SIZE_TO_CHECK)
+ size = MAX_SIZE_TO_CHECK;
+ return size;
+}
+
+static inline int klp_compare_address(unsigned long pc, unsigned long func_addr,
+ const char *func_name, unsigned long check_size)
+{
+ if (pc >= func_addr && pc < func_addr + check_size) {
+ pr_err("func %s is in use!\n", func_name);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static bool check_jump_insn(unsigned long func_addr)
+{
+ unsigned long i;
+ u32 *insn = (u32*)func_addr;
+
+ for (i = 0; i < CHECK_JUMP_RANGE; i++) {
+ if (is_jump_insn(*insn)) {
+ return true;
+ }
+ insn++;
+ }
+ return false;
+}
+
+static int add_func_to_list(struct klp_func_list **funcs, struct klp_func_list **func,
+ unsigned long func_addr, unsigned long func_size, const char *func_name,
+ int force)
+{
+ if (*func == NULL) {
+ *funcs = (struct klp_func_list*)kzalloc(sizeof(**funcs), GFP_ATOMIC);
+ if (!(*funcs))
+ return -ENOMEM;
+ *func = *funcs;
+ } else {
+ (*func)->next = (struct klp_func_list*)kzalloc(sizeof(**funcs),
+ GFP_ATOMIC);
+ if (!(*func)->next)
+ return -ENOMEM;
+ *func = (*func)->next;
+ }
+ (*func)->func_addr = func_addr;
+ (*func)->func_size = func_size;
+ (*func)->func_name = func_name;
+ (*func)->force = force;
+ (*func)->next = NULL;
+ return 0;
+}
+
+static int klp_check_activeness_func(struct klp_patch *patch, int enable,
+ struct klp_func_list **check_funcs)
+{
+ int ret;
+ struct klp_object *obj;
+ struct klp_func *func;
+ unsigned long func_addr, func_size;
+ struct klp_func_node *func_node = NULL;
+ struct klp_func_list *pcheck = NULL;
+
+ for (obj = patch->objs; obj->funcs; obj++) {
+ for (func = obj->funcs; func->old_name; func++) {
+ func_node = klp_find_func_node(func->old_func);
+
+ /* Check func address in stack */
+ if (enable) {
+ if (func->force == KLP_ENFORCEMENT)
+ continue;
+ /*
+ * When enable, checking the currently
+ * active functions.
+ */
+ if (!func_node ||
+ list_empty(&func_node->func_stack)) {
+ /*
+ * No patched on this function
+ * [ the origin one ]
+ */
+ func_addr = (unsigned long)func->old_func;
+ func_size = func->old_size;
+ } else {
+ /*
+ * Previously patched function
+ * [ the active one ]
+ */
+ struct klp_func *prev;
+
+ prev = list_first_or_null_rcu(
+ &func_node->func_stack,
+ struct klp_func, stack_node);
+ func_addr = ppc_function_entry(
+ (void *)prev->new_func);
+ func_size = prev->new_size;
+ }
+ /*
+ * When preemption is disabled and the
+ * replacement area does not contain a jump
+ * instruction, the migration thread is
+ * scheduled to run stop machine only after the
+ * excution of instructions to be repalced is
+ * complete.
+ */
+ if (IS_ENABLED(CONFIG_PREEMTION) ||
+ (func->force == KLP_NORMAL_FORCE) ||
+ check_jump_insn(func_addr)) {
+ ret = add_func_to_list(check_funcs, &pcheck,
+ func_addr, func_size,
+ func->old_name, func->force);
+ if (ret)
+ return ret;
+ }
+ } else {
+ /*
+ * When disable, check for the function itself
+ * which to be unpatched.
+ */
+ func_addr = ppc_function_entry(
+ (void *)func->new_func);
+ func_size = func->new_size;
+ ret = add_func_to_list(check_funcs, &pcheck, func_addr,
+ func_size, func->old_name, 0);
+ if (ret)
+ return ret;
+ }
+
+#ifdef PPC64_ELF_ABI_v1
+ /*
+ * Check trampoline in stack
+ * new_func callchain:
+ * old_func
+ * -=> trampoline
+ * -=> new_func
+ * so, we should check all the func in the callchain
+ */
+ if (func_addr != (unsigned long)func->old_func) {
+#ifdef CONFIG_PREEMPTION
+ /*
+ * No scheduling point in the replacement
+ * instructions. Therefore, when preemption is
+ * not enabled, atomic execution is performed
+ * and these instructions will not appear on
+ * the stack.
+ */
+ func_addr = (unsigned long)func->old_func;
+ func_size = func->old_size;
+ ret = add_func_to_list(check_funcs, &pcheck, func_addr,
+ func_size, "OLD_FUNC", 0);
+ if (ret)
+ return ret;
+#endif
+
+ if (func_node == NULL ||
+ func_node->trampoline.magic != BRANCH_TRAMPOLINE_MAGIC)
+ continue;
+
+ func_addr = (unsigned long)&func_node->trampoline;
+ func_size = sizeof(struct ppc64_klp_btramp_entry);
+ ret = add_func_to_list(check_funcs, &pcheck, func_addr,
+ func_size, "trampoline", 0);
+ if (ret)
+ return ret;
+ }
+#endif
+ }
+ }
+ return 0;
+}
+
+static int unwind_frame(struct task_struct *tsk, struct stackframe *frame)
+{
+
+ unsigned long *stack;
+
+ if (!validate_sp(frame->sp, tsk, STACK_FRAME_OVERHEAD))
+ return -1;
+
+ if (frame->nip != 0)
+ frame->nip = 0;
+
+ stack = (unsigned long *)frame->sp;
+
+ /*
+ * When switching to the exception stack,
+ * we save the NIP in pt_regs
+ *
+ * See if this is an exception frame.
+ * We look for the "regshere" marker in the current frame.
+ */
+ if (validate_sp(frame->sp, tsk, STACK_INT_FRAME_SIZE)
+ && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
+ struct pt_regs *regs = (struct pt_regs *)
+ (frame->sp + STACK_FRAME_OVERHEAD);
+ frame->nip = regs->nip;
+ pr_debug("--- interrupt: task = %d/%s, trap %lx at NIP=x%lx/%pS, LR=0x%lx/%pS\n",
+ tsk->pid, tsk->comm, regs->trap,
+ regs->nip, (void *)regs->nip,
+ regs->link, (void *)regs->link);
+ }
+
+ frame->sp = stack[0];
+ frame->pc = stack[STACK_FRAME_LR_SAVE];
+#ifdef CONFIG_FUNCTION_GRAPH_TRACE
+ /*
+ * IMHO these tests do not belong in
+ * arch-dependent code, they are generic.
+ */
+ frame->pc = ftrace_graph_ret_addr(tsk, &ftrace_idx, frame->ip, stack);
+#endif
+
+ return 0;
+}
+
+static void notrace klp_walk_stackframe(struct stackframe *frame,
+ int (*fn)(struct stackframe *, void *),
+ struct task_struct *tsk, void *data)
+{
+ while (1) {
+ int ret;
+
+ if (fn(frame, data))
+ break;
+ ret = unwind_frame(tsk, frame);
+ if (ret < 0)
+ break;
+ }
+}
+
+static bool check_func_list(struct klp_func_list *funcs, int *ret, unsigned long pc)
+{
+ while (funcs != NULL) {
+ *ret = klp_compare_address(pc, funcs->func_addr, funcs->func_name,
+ klp_size_to_check(funcs->func_size, funcs->force));
+ if (*ret) {
+ return false;
+ }
+ funcs = funcs->next;
+ }
+ return true;
+}
+
+static int klp_check_jump_func(struct stackframe *frame, void *data)
+{
+ struct walk_stackframe_args *args = data;
+ struct klp_func_list *check_funcs = args->check_funcs;
+
+ if (!check_func_list(check_funcs, &args->ret, frame->pc)) {
+ return args->ret;
+ }
+ return 0;
+}
+
+static void free_list(struct klp_func_list **funcs)
+{
+ struct klp_func_list *p;
+
+ while (*funcs != NULL) {
+ p = *funcs;
+ *funcs = (*funcs)->next;
+ kfree(p);
+ }
+}
+
+int klp_check_calltrace(struct klp_patch *patch, int enable)
+{
+ struct task_struct *g, *t;
+ struct stackframe frame;
+ unsigned long *stack;
+ int ret = 0;
+ struct klp_func_list *check_funcs = NULL;
+ struct walk_stackframe_args args;
+
+ ret = klp_check_activeness_func(patch, enable, &check_funcs);
+ if (ret)
+ goto out;
+ args.check_funcs = check_funcs;
+ args.ret = 0;
+
+ for_each_process_thread(g, t) {
+ if (t == current) {
+ /*
+ * Handle the current carefully on each CPUs,
+ * we shouldn't use saved FP and PC when
+ * backtrace current. It's difficult to
+ * backtrack other CPU currents here. But
+ * fortunately,all CPUs will stay in this
+ * function, so the current's backtrace is
+ * so similar
+ */
+ stack = (unsigned long *)current_stack_pointer;
+ } else if (strncmp(t->comm, "migration/", 10) == 0) {
+ /*
+ * current on other CPU
+ * we call this in stop_machine, so the current
+ * of each CPUs is mirgation, just compare the
+ * task_comm here, because we can't get the
+ * cpu_curr(task_cpu(t))). This assumes that no
+ * other thread will pretend to be a stopper via
+ * task_comm.
+ */
+ continue;
+ } else {
+ /*
+ * Skip the first frame since it does not contain lr
+ * at notmal position and nip is store ind the lr
+ * position in the second frame.
+ * See arch/powerpc/kernel/entry_64.S _switch .
+ */
+ unsigned long s = *(unsigned long *)t->thread.ksp;
+
+ if (!validate_sp(s, t, STACK_FRAME_OVERHEAD))
+ continue;
+ stack = (unsigned long *)s;
+ }
+
+ frame.sp = (unsigned long)stack;
+ frame.pc = stack[STACK_FRAME_LR_SAVE];
+ frame.nip = 0;
+ if (check_funcs != NULL) {
+ klp_walk_stackframe(&frame, klp_check_jump_func, t, &args);
+ if (args.ret) {
+ ret = args.ret;
+ pr_debug("%s FAILED when %s\n", __func__,
+ enable ? "enabling" : "disabling");
+ pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm);
+ show_stack(t, NULL, KERN_INFO);
+ goto out;
+ }
+ }
+ }
+
+out:
+ free_list(&check_funcs);
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_LIVEPATCH_WO_FTRACE
+int arch_klp_patch_func(struct klp_func *func)
+{
+ struct klp_func_node *func_node;
+ unsigned long pc, new_addr;
+ int i;
+ int memory_flag = 0;
+ long ret;
+
+ func_node = klp_find_func_node(func->old_func);
+ if (!func_node) {
+ func_node = func->func_node;
+ if (!func_node)
+ return -ENOMEM;
+
+ memory_flag = 1;
+ INIT_LIST_HEAD(&func_node->func_stack);
+ func_node->old_func = func->old_func;
+ for (i = 0; i < LJMP_INSN_SIZE; i++) {
+ ret = copy_from_kernel_nofault(&func_node->old_insns[i],
+ ((u32 *)func->old_func) + i, 4);
+ if (ret) {
+ return -EPERM;
+ }
+ }
+ list_add_rcu(&func_node->node, &klp_func_list);
+ }
+
+ list_add_rcu(&func->stack_node, &func_node->func_stack);
+
+ pc = (unsigned long)func->old_func;
+ new_addr = (unsigned long)func->new_func;
+
+ ret = livepatch_create_branch(pc, (unsigned long)&func_node->trampoline,
+ new_addr, func->old_mod);
+ if (ret)
+ goto ERR_OUT;
+ flush_icache_range((unsigned long)pc,
+ (unsigned long)pc + LJMP_INSN_SIZE * PPC64_INSN_SIZE);
+
+ pr_debug("[%s %d] old = 0x%lx/0x%lx/%pS, new = 0x%lx/0x%lx/%pS\n",
+ __func__, __LINE__,
+ pc, ppc_function_entry((void *)pc), (void *)pc,
+ new_addr, ppc_function_entry((void *)new_addr),
+ (void *)ppc_function_entry((void *)new_addr));
+
+ return 0;
+
+ERR_OUT:
+ list_del_rcu(&func->stack_node);
+ if (memory_flag) {
+ list_del_rcu(&func_node->node);
+ }
+
+ return -EPERM;
+}
+
+void arch_klp_unpatch_func(struct klp_func *func)
+{
+ struct klp_func_node *func_node;
+ struct klp_func *next_func;
+ unsigned long pc, new_addr;
+ u32 insns[LJMP_INSN_SIZE];
+ int i;
+
+ func_node = klp_find_func_node(func->old_func);
+ pc = (unsigned long)func_node->old_func;
+ if (list_is_singular(&func_node->func_stack)) {
+ for (i = 0; i < LJMP_INSN_SIZE; i++)
+ insns[i] = func_node->old_insns[i];
+
+ list_del_rcu(&func->stack_node);
+ list_del_rcu(&func_node->node);
+
+ for (i = 0; i < LJMP_INSN_SIZE; i++)
+ patch_instruction((struct ppc_inst *)((u32 *)pc + i),
+ ppc_inst(insns[i]));
+
+ pr_debug("[%s %d] restore insns at 0x%lx\n", __func__, __LINE__, pc);
+ } else {
+ list_del_rcu(&func->stack_node);
+ next_func = list_first_or_null_rcu(&func_node->func_stack,
+ struct klp_func, stack_node);
+ new_addr = (unsigned long)next_func->new_func;
+
+ livepatch_create_branch(pc, (unsigned long)&func_node->trampoline,
+ new_addr, func->old_mod);
+
+ pr_debug("[%s %d] old = 0x%lx/0x%lx/%pS, new = 0x%lx/0x%lx/%pS\n",
+ __func__, __LINE__,
+ pc, ppc_function_entry((void *)pc), (void *)pc,
+ new_addr, ppc_function_entry((void *)new_addr),
+ (void *)ppc_function_entry((void *)new_addr));
+
+ }
+
+ flush_icache_range((unsigned long)pc,
+ (unsigned long)pc + LJMP_INSN_SIZE * PPC64_INSN_SIZE);
+}
+
+/* return 0 if the func can be patched */
+int arch_klp_func_can_patch(struct klp_func *func)
+{
+ unsigned long old_size = func->old_size;
+
+ if (!old_size)
+ return -EINVAL;
+
+ if (old_size < LJMP_INSN_SIZE * sizeof(u32)) {
+ pr_err("func %s size less than limit\n", func->old_name);
+ return -EPERM;
+ }
+ return 0;
+}
+
+int arch_klp_init_func(struct klp_object *obj, struct klp_func *func)
+{
+#ifdef PPC64_ELF_ABI_v1
+ unsigned long new_addr = (unsigned long)func->new_func;
+
+ /*
+ * ABI v1 address is address of the OPD entry,
+ * which contains address of fn. ABI v2 An address
+ * is simply the address of the function.
+ *
+ * The function descriptor is in the data section. So
+ * If new_addr is in the code segment, we think it is
+ * a function address, if addr isn't in the code segment,
+ * we consider it to be a function descriptor.
+ */
+ if (!is_module_text_address(new_addr)) {
+ new_addr = (unsigned long)ppc_function_entry((void *)new_addr);
+ if (!kallsyms_lookup_size_offset((unsigned long)new_addr,
+ &func->new_size, NULL))
+ return -ENOENT;
+ }
+
+ func->this_mod = __module_text_address(new_addr);
+ if (!func->this_mod)
+ return -EINVAL;
+
+ func->new_func_descr.entry = new_addr;
+ func->new_func_descr.toc = func->this_mod->arch.toc;
+
+ func->new_func = (void *)&func->new_func_descr;
+#endif
+
+ if (obj->name)
+ func->old_mod = obj->mod;
+ else
+ func->old_mod = NULL;
+
+
+ return 0;
+}
+
+void arch_klp_mem_prepare(struct klp_patch *patch)
+{
+ struct klp_object *obj;
+ struct klp_func *func;
+
+ klp_for_each_object(patch, obj) {
+ klp_for_each_func(obj, func) {
+ func->func_node = module_alloc(sizeof(struct klp_func_node));
+ }
+ }
+}
+
+void arch_klp_mem_recycle(struct klp_patch *patch)
+{
+ struct klp_object *obj;
+ struct klp_func *func;
+ struct klp_func_node *func_node;
+
+ klp_for_each_object(patch, obj) {
+ klp_for_each_func(obj, func) {
+ func_node = func->func_node;
+ if (func_node && list_is_singular(&func_node->func_stack)) {
+ module_memfree(func_node);
+ func->func_node = NULL;
+ }
+ }
+ }
+}
+#endif
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index ae2b188365b1..7a143ab7d433 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -738,6 +738,9 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
}
}
+#ifdef CONFIG_LIVEPATCH_WO_FTRACE
+ me->arch.toc = my_r2(sechdrs, me);
+#endif
return 0;
}
@@ -799,3 +802,107 @@ int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs)
return 0;
}
#endif
+
+#ifdef CONFIG_LIVEPATCH_WO_FTRACE
+#include
+#include
+
+#define PPC_LIVEPATCH_BITMASK(v, n) (((v) >> (n)) & 0xffff)
+#define PPC_LIVEPATCH_HIGHEST(v) PPC_LIVEPATCH_BITMASK(v, 48)
+#define PPC_LIVEPATCH_HIGHER(v) PPC_LIVEPATCH_BITMASK(v, 32)
+#define PPC_LIVEPATCH_HIGH(v) PPC_LIVEPATCH_BITMASK(v, 16)
+#define PPC_LIVEPATCH_LOW(v) PPC_LIVEPATCH_BITMASK(v, 0)
+
+/*
+ * Patch jump stub to reference trampoline
+ * without saved the old R2 and load the new R2.
+ */
+static int livepatch_create_bstub(struct ppc64_klp_bstub_entry *entry,
+ unsigned long addr,
+ struct module *me)
+{
+ long reladdr;
+ unsigned long my_r2;
+ unsigned long stub_start, stub_end, stub_size;
+
+ /* Stub uses address relative to r2. */
+ my_r2 = me ? me->arch.toc : kernel_toc_addr();
+ reladdr = (unsigned long)entry - my_r2;
+ if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
+ pr_err("%s: Address %p of jump stub out of range of %p.\n",
+ me ? me->name : "kernel",
+ (void *)reladdr, (void *)my_r2);
+ return 0;
+ }
+
+ if (entry->magic != BRANCH_STUB_MAGIC) {
+ stub_start = ppc_function_entry((void *)livepatch_branch_stub);
+ stub_end = ppc_function_entry((void *)livepatch_branch_stub_end);
+ stub_size = stub_end - stub_start;
+ memcpy(entry->jump, (u32 *)stub_start, stub_size);
+
+ entry->jump[0] |= PPC_HA(reladdr);
+ entry->jump[1] |= PPC_LO(reladdr);
+ entry->magic = BRANCH_STUB_MAGIC;
+ }
+ entry->trampoline = addr;
+
+ pr_debug("Create livepatch branch stub 0x%px with reladdr 0x%lx r2 0x%lx to trampoline 0x%lx\n",
+ (void *)entry, reladdr, my_r2, addr);
+
+ return 1;
+}
+
+#ifdef PPC64_ELF_ABI_v1
+static void livepatch_create_btramp(struct ppc64_klp_btramp_entry *entry,
+ unsigned long addr,
+ struct module *me)
+{
+ unsigned long reladdr, tramp_start, tramp_end, tramp_size;
+
+ tramp_start = ppc_function_entry((void *)livepatch_branch_trampoline);
+ tramp_end = ppc_function_entry((void *)livepatch_branch_trampoline_end);
+ tramp_size = tramp_end - tramp_start;
+
+ if (entry->magic != BRANCH_TRAMPOLINE_MAGIC) {
+ reladdr = (unsigned long)entry->saved_entry;
+
+ memcpy(entry->jump, (u32 *)tramp_start, tramp_size);
+
+ entry->jump[4] |= PPC_LIVEPATCH_HIGHEST(reladdr);
+ entry->jump[5] |= PPC_LIVEPATCH_HIGHER(reladdr);
+ entry->jump[7] |= PPC_LIVEPATCH_HIGH(reladdr);
+ entry->jump[8] |= PPC_LIVEPATCH_LOW(reladdr);
+
+ entry->magic = BRANCH_TRAMPOLINE_MAGIC;
+ }
+ entry->funcdata = func_desc(addr);
+
+ flush_icache_range((unsigned long)entry, (unsigned long)entry + tramp_size);
+
+ pr_debug("Create livepatch trampoline 0x%px+%lu/0x%lx to 0x%lx/0x%lx/%pS\n",
+ (void *)entry, tramp_size, (unsigned long)entry->saved_entry,
+ addr, ppc_function_entry((void *)addr),
+ (void *)ppc_function_entry((void *)addr));
+}
+#endif
+
+int livepatch_create_branch(unsigned long pc,
+ unsigned long trampoline,
+ unsigned long addr,
+ struct module *me)
+{
+#ifdef PPC64_ELF_ABI_v1
+ /* Create trampoline to addr(new func) */
+ livepatch_create_btramp((struct ppc64_klp_btramp_entry *)trampoline, addr, me);
+#else
+ trampoline = addr;
+#endif
+
+ /* Create stub to trampoline */
+ if (!livepatch_create_bstub((struct ppc64_klp_bstub_entry *)pc, trampoline, me))
+ return -EINVAL;
+
+ return 0;
+}
+#endif
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 4a2a12be04c9..64a31b499c72 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -165,7 +165,7 @@ config S390
select HAVE_KPROBES_ON_FTRACE
select HAVE_KRETPROBES
select HAVE_KVM
- select HAVE_LIVEPATCH
+ select HAVE_LIVEPATCH_FTRACE
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_MEMBLOCK_PHYS_MAP
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 6ae09ea192dc..c27991db1fa5 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -198,7 +198,8 @@ config X86
select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_KRETPROBES
select HAVE_KVM
- select HAVE_LIVEPATCH if X86_64
+ select HAVE_LIVEPATCH_FTRACE if X86_64
+ select HAVE_LIVEPATCH_WO_FTRACE if X86_64
select HAVE_MIXED_BREAKPOINTS_REGS
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_MOVE_PMD
diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h
index 1fde1ab6559e..dd51bb6c1816 100644
--- a/arch/x86/include/asm/livepatch.h
+++ b/arch/x86/include/asm/livepatch.h
@@ -12,9 +12,22 @@
#include
#include
+struct klp_patch;
+struct klp_func;
+
+#ifdef CONFIG_LIVEPATCH_FTRACE
static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
{
regs->ip = ip;
}
+#else /* CONFIG_LIVEPATCH_WO_FTRACE */
+#define klp_smp_isb()
+int arch_klp_patch_func(struct klp_func *func);
+void arch_klp_unpatch_func(struct klp_func *func);
+#endif
+
+#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY
+int klp_check_calltrace(struct klp_patch *patch, int enable);
+#endif
#endif /* _ASM_X86_LIVEPATCH_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 68608bd892c0..6bbdb50abbf3 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -99,6 +99,7 @@ obj-$(CONFIG_X86_MPPARSE) += mpparse.o
obj-y += apic/
obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
+obj-$(CONFIG_LIVEPATCH) += livepatch.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace_$(BITS).o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c
new file mode 100644
index 000000000000..386d224d5890
--- /dev/null
+++ b/arch/x86/kernel/livepatch.c
@@ -0,0 +1,506 @@
+/*
+ * livepatch.c - x86-specific Kernel Live Patching Core
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see .
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+
+#if defined (CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY) || \
+ defined (CONFIG_LIVEPATCH_WO_FTRACE)
+#define JMP_E9_INSN_SIZE 5
+
+struct klp_func_node {
+ struct list_head node;
+ struct list_head func_stack;
+ void *old_func;
+ unsigned char old_code[JMP_E9_INSN_SIZE];
+};
+
+static LIST_HEAD(klp_func_list);
+
+static struct klp_func_node *klp_find_func_node(void *old_func)
+{
+ struct klp_func_node *func_node;
+
+ list_for_each_entry(func_node, &klp_func_list, node) {
+ if (func_node->old_func == old_func)
+ return func_node;
+ }
+
+ return NULL;
+}
+#endif
+
+#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY
+/*
+ * The instruction set on x86 is CISC.
+ * The instructions of call in same segment are 11101000(direct),
+ * 11111111(register indirect) and 11111111(memory indirect).
+ * The instructions of call in other segment are 10011010(direct),
+ * 11111111(indirect).
+ */
+static bool is_jump_insn(u8 *insn)
+{
+ if ((insn[0] == 0xE8) || (insn[0] == 0x9a))
+ return true;
+ else if ((insn[0] == 0xFF) && ((insn[1] & 0x30) == 0x10))
+ return true;
+ return false;
+}
+
+struct klp_func_list {
+ struct klp_func_list *next;
+ unsigned long func_addr;
+ unsigned long func_size;
+ const char *func_name;
+ int force;
+};
+
+static inline unsigned long klp_size_to_check(unsigned long func_size,
+ int force)
+{
+ unsigned long size = func_size;
+
+ if (force == KLP_STACK_OPTIMIZE && size > JMP_E9_INSN_SIZE)
+ size = JMP_E9_INSN_SIZE;
+ return size;
+}
+
+static inline int klp_compare_address(unsigned long stack_addr,
+ unsigned long func_addr, const char *func_name,
+ unsigned long check_size)
+{
+ if (stack_addr >= func_addr && stack_addr < func_addr + check_size) {
+ pr_err("func %s is in use!\n", func_name);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static bool check_jump_insn(unsigned long func_addr)
+{
+ int len = JMP_E9_INSN_SIZE;
+ struct insn insn;
+ u8 *addr = (u8*)func_addr;
+
+ do {
+ if (is_jump_insn(addr))
+ return true;
+ insn_init(&insn, addr, MAX_INSN_SIZE, 1);
+ insn_get_length(&insn);
+ if (!insn.length || !insn_complete(&insn))
+ return true;
+ len -= insn.length;
+ addr += insn.length;
+ } while (len > 0);
+
+ return false;
+}
+
+static int add_func_to_list(struct klp_func_list **funcs, struct klp_func_list **func,
+ unsigned long func_addr, unsigned long func_size, const char *func_name,
+ int force)
+{
+ if (*func == NULL) {
+ *funcs = (struct klp_func_list*)kzalloc(sizeof(**funcs), GFP_ATOMIC);
+ if (!(*funcs))
+ return -ENOMEM;
+ *func = *funcs;
+ } else {
+ (*func)->next = (struct klp_func_list*)kzalloc(sizeof(**funcs),
+ GFP_ATOMIC);
+ if (!(*func)->next)
+ return -ENOMEM;
+ *func = (*func)->next;
+ }
+ (*func)->func_addr = func_addr;
+ (*func)->func_size = func_size;
+ (*func)->func_name = func_name;
+ (*func)->force = force;
+ (*func)->next = NULL;
+ return 0;
+}
+
+static int klp_check_activeness_func(struct klp_patch *patch, int enable,
+ struct klp_func_list **check_funcs)
+{
+ int ret;
+ struct klp_object *obj;
+ struct klp_func *func;
+ unsigned long func_addr, func_size;
+ struct klp_func_node *func_node = NULL;
+ struct klp_func_list *pcheck = NULL;
+
+ for (obj = patch->objs; obj->funcs; obj++) {
+ for (func = obj->funcs; func->old_name; func++) {
+ func_node = klp_find_func_node(func->old_func);
+
+ /* Check func address in stack */
+ if (enable) {
+ if (func->force == KLP_ENFORCEMENT)
+ continue;
+ /*
+ * When enable, checking the currently
+ * active functions.
+ */
+ if (!func_node ||
+ list_empty(&func_node->func_stack)) {
+ func_addr = (unsigned long)func->old_func;
+ func_size = func->old_size;
+ } else {
+ /*
+ * Previously patched function
+ * [the active one]
+ */
+ struct klp_func *prev;
+
+ prev = list_first_or_null_rcu(
+ &func_node->func_stack,
+ struct klp_func, stack_node);
+ func_addr = (unsigned long)prev->new_func;
+ func_size = prev->new_size;
+ }
+ /*
+ * When preemtion is disabled and the
+ * replacement area does not contain a jump
+ * instruction, the migration thread is
+ * scheduled to run stop machine only after the
+ * excution of instructions to be replaced is
+ * complete.
+ */
+ if (IS_ENABLED(CONFIG_PREEMPTION) ||
+ (func->force == KLP_NORMAL_FORCE) ||
+ check_jump_insn(func_addr)) {
+ ret = add_func_to_list(check_funcs, &pcheck,
+ func_addr, func_size,
+ func->old_name, func->force);
+ if (ret)
+ return ret;
+ }
+ } else {
+ /*
+ * When disable, check for the function
+ * itself which to be unpatched.
+ */
+ if (!func_node)
+ return -EINVAL;
+#ifdef CONFIG_PREEMPTION
+ /*
+ * No scheduling point in the replacement
+ * instructions. Therefore, when preemption is
+ * not enabled, atomic execution is performed
+ * and these instructions will not appear on
+ * the stack.
+ */
+ if (list_is_singular(&func_node->func_stack)) {
+ func_addr = (unsigned long)func->old_func;
+ func_size = func->old_size;
+ } else {
+ struct klp_func *prev;
+
+ prev = list_first_or_null_rcu(
+ &func_node->func_stack,
+ struct klp_func, stack_node);
+ func_addr = (unsigned long)prev->new_func;
+ func_size = prev->new_size;
+ }
+ ret = add_func_to_list(check_funcs, &pcheck, func_addr,
+ func_size, func->old_name, 0);
+ if (ret)
+ return ret;
+#endif
+
+ func_addr = (unsigned long)func->new_func;
+ func_size = func->new_size;
+ ret = add_func_to_list(check_funcs, &pcheck, func_addr,
+ func_size, func->old_name, 0);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+ return 0;
+}
+
+static void klp_print_stack_trace(void *trace_ptr, int trace_len)
+{
+ int i;
+#ifdef CONFIG_ARCH_STACKWALK
+ unsigned long *trace = trace_ptr;
+#else
+ struct stack_trace *trace = trace_ptr;
+#endif
+
+ pr_err("Call Trace:\n");
+#ifdef CONFIG_ARCH_STACKWALK
+ for (i = 0; i < trace_len; i++) {
+ pr_err("[<%pK>] %pS\n",
+ (void *)trace[i],
+ (void *)trace[i]);
+ }
+#else
+ for (i = 0; i < trace->nr_entries; i++) {
+ pr_err("[<%pK>] %pS\n",
+ (void *)trace->entries[i],
+ (void *)trace->entries[i]);
+ }
+#endif
+
+}
+
+#ifdef MAX_STACK_ENTRIES
+#undef MAX_STACK_ENTRIES
+#endif
+#define MAX_STACK_ENTRIES 100
+
+static bool check_func_list(struct klp_func_list *funcs, int *ret, unsigned long pc)
+{
+ while (funcs != NULL) {
+ *ret = klp_compare_address(pc, funcs->func_addr, funcs->func_name,
+ klp_size_to_check(funcs->func_size, funcs->force));
+ if (*ret) {
+ return false;
+ }
+ funcs = funcs->next;
+ }
+ return true;
+}
+
+static int klp_check_stack(void *trace_ptr, int trace_len,
+ struct klp_func_list *check_funcs)
+{
+#ifdef CONFIG_ARCH_STACKWALK
+ unsigned long *trace = trace_ptr;
+#else
+ struct stack_trace *trace = trace_ptr;
+#endif
+ unsigned long address;
+ int i, ret;
+
+#ifdef CONFIG_ARCH_STACKWALK
+ for (i = 0; i < trace_len; i++) {
+ address = trace[i];
+#else
+ for (i = 0; i < trace->nr_entries; i++) {
+ address = trace->entries[i];
+#endif
+ if (!check_func_list(check_funcs, &ret, address)) {
+#ifdef CONFIG_ARCH_STACKWALK
+ klp_print_stack_trace(trace_ptr, trace_len);
+#else
+ klp_print_stack_trace(trace_ptr, 0);
+#endif
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void free_list(struct klp_func_list **funcs)
+{
+ struct klp_func_list *p;
+
+ while (*funcs != NULL) {
+ p = *funcs;
+ *funcs = (*funcs)->next;
+ kfree(p);
+ }
+}
+
+int klp_check_calltrace(struct klp_patch *patch, int enable)
+{
+ struct task_struct *g, *t;
+ int ret = 0;
+ struct klp_func_list *check_funcs = NULL;
+ static unsigned long trace_entries[MAX_STACK_ENTRIES];
+#ifdef CONFIG_ARCH_STACKWALK
+ int trace_len;
+#else
+ struct stack_trace trace;
+#endif
+
+ ret = klp_check_activeness_func(patch, enable, &check_funcs);
+ if (ret)
+ goto out;
+ for_each_process_thread(g, t) {
+ if (!strncmp(t->comm, "migration/", 10))
+ continue;
+
+#ifdef CONFIG_ARCH_STACKWALK
+ ret = stack_trace_save_tsk_reliable(t, trace_entries, MAX_STACK_ENTRIES);
+ if (ret < 0)
+ goto out;
+ trace_len = ret;
+ ret = 0;
+#else
+ trace.skip = 0;
+ trace.nr_entries = 0;
+ trace.max_entries = MAX_STACK_ENTRIES;
+ trace.entries = trace_entries;
+ ret = save_stack_trace_tsk_reliable(t, &trace);
+#endif
+ WARN_ON_ONCE(ret == -ENOSYS);
+ if (ret) {
+ pr_info("%s: %s:%d has an unreliable stack\n",
+ __func__, t->comm, t->pid);
+ goto out;
+ }
+#ifdef CONFIG_ARCH_STACKWALK
+ ret = klp_check_stack(trace_entries, trace_len, check_funcs);
+#else
+ ret = klp_check_stack(&trace, 0, check_funcs);
+#endif
+ if (ret)
+ goto out;
+ }
+
+out:
+ free_list(&check_funcs);
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_LIVEPATCH_WO_FTRACE
+static void *klp_jmp_code(unsigned long ip, unsigned long addr)
+{
+ return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr);
+}
+
+static void *klp_old_code(unsigned char *code)
+{
+ static unsigned char old_code[JMP_E9_INSN_SIZE];
+
+ strncpy(old_code, code, JMP_E9_INSN_SIZE);
+ return old_code;
+}
+
+void arch_klp_code_modify_prepare(void)
+ __acquires(&text_mutex)
+{
+ mutex_lock(&text_mutex);
+}
+
+void arch_klp_code_modify_post_process(void)
+ __releases(&text_mutex)
+{
+ text_poke_sync();
+ mutex_unlock(&text_mutex);
+}
+
+int arch_klp_patch_func(struct klp_func *func)
+{
+ struct klp_func_node *func_node;
+ unsigned long ip, new_addr;
+ void *new;
+ long ret;
+
+ func_node = klp_find_func_node(func->old_func);
+ ip = (unsigned long)func->old_func;
+ if (!func_node) {
+ func_node = func->func_node;
+ if (!func_node)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&func_node->func_stack);
+ func_node->old_func = func->old_func;
+ ret = copy_from_kernel_nofault(func_node->old_code,
+ (void *)ip, JMP_E9_INSN_SIZE);
+ if (ret) {
+ return -EPERM;
+ }
+ list_add_rcu(&func_node->node, &klp_func_list);
+ }
+
+ list_add_rcu(&func->stack_node, &func_node->func_stack);
+
+ new_addr = (unsigned long)func->new_func;
+ /* replace the text with the new text */
+ new = klp_jmp_code(ip, new_addr);
+ text_poke((void *)ip, new, JMP_E9_INSN_SIZE);
+
+ return 0;
+}
+
+void arch_klp_unpatch_func(struct klp_func *func)
+{
+ struct klp_func_node *func_node;
+ struct klp_func *next_func;
+ unsigned long ip, new_addr;
+ void *new;
+
+ func_node = klp_find_func_node(func->old_func);
+ ip = (unsigned long)func_node->old_func;
+ if (list_is_singular(&func_node->func_stack)) {
+ list_del_rcu(&func->stack_node);
+ list_del_rcu(&func_node->node);
+ new = klp_old_code(func_node->old_code);
+ } else {
+ list_del_rcu(&func->stack_node);
+ next_func = list_first_or_null_rcu(&func_node->func_stack,
+ struct klp_func, stack_node);
+
+ new_addr = (unsigned long)next_func->new_func;
+ new = klp_jmp_code(ip, new_addr);
+ }
+
+ /* replace the text with the new text */
+ text_poke((void *)ip, new, JMP_E9_INSN_SIZE);
+}
+
+void arch_klp_mem_prepare(struct klp_patch *patch)
+{
+ struct klp_object *obj;
+ struct klp_func *func;
+
+ klp_for_each_object(patch, obj) {
+ klp_for_each_func(obj, func) {
+ func->func_node = kzalloc(sizeof(struct klp_func_node),
+ GFP_ATOMIC);
+ }
+ }
+}
+
+void arch_klp_mem_recycle(struct klp_patch *patch)
+{
+ struct klp_object *obj;
+ struct klp_func *func;
+ struct klp_func_node *func_node;
+
+ klp_for_each_object(patch, obj) {
+ klp_for_each_func(obj, func) {
+ func_node = func->func_node;
+ if (func_node && list_is_singular(&func_node->func_stack)) {
+ kfree(func_node);
+ func->func_node = NULL;
+ }
+ }
+ }
+}
+#endif
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 32809624d422..7e1dce5670fc 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -229,6 +229,7 @@ extern void static_key_enable(struct static_key *key);
extern void static_key_disable(struct static_key *key);
extern void static_key_enable_cpuslocked(struct static_key *key);
extern void static_key_disable_cpuslocked(struct static_key *key);
+extern int jump_label_register(struct module *mod);
/*
* We should be using ATOMIC_INIT() for initializing .enabled, but
@@ -301,6 +302,11 @@ static inline int jump_label_apply_nops(struct module *mod)
return 0;
}
+static inline int jump_label_register(struct module *mod)
+{
+ return 0;
+}
+
static inline void static_key_enable(struct static_key *key)
{
STATIC_KEY_CHECK_USE(key);
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 2614247a9781..229216646d50 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -23,6 +23,10 @@
#define KLP_UNPATCHED 0
#define KLP_PATCHED 1
+#define KLP_NORMAL_FORCE 0
+#define KLP_ENFORCEMENT 1
+#define KLP_STACK_OPTIMIZE 2
+
/**
* struct klp_func - function structure for live patching
* @old_name: name of the function to be patched
@@ -66,6 +70,7 @@ struct klp_func {
* in kallsyms for the given object is used.
*/
unsigned long old_sympos;
+ int force;
/* internal */
void *old_func;
@@ -75,11 +80,27 @@ struct klp_func {
unsigned long old_size, new_size;
bool nop;
bool patched;
+#ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY
bool transition;
+#endif
+#if defined(CONFIG_LIVEPATCH_WO_FTRACE) && defined(CONFIG_PPC64)
+ struct module *old_mod;
+#ifdef PPC64_ELF_ABI_v1
+ struct module *this_mod;
+ func_descr_t new_func_descr;
+#endif
+#endif
+ void *func_node;
};
struct klp_object;
+#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY
+struct klp_hook {
+ void (*hook)(void);
+};
+#endif
+
/**
* struct klp_callbacks - pre/post live-(un)patch callback structure
* @pre_patch: executed before code patching
@@ -119,6 +140,10 @@ struct klp_object {
/* external */
const char *name;
struct klp_func *funcs;
+#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY
+ struct klp_hook *hooks_load;
+ struct klp_hook *hooks_unload;
+#endif
struct klp_callbacks callbacks;
/* internal */
@@ -193,8 +218,19 @@ struct klp_patch {
#define klp_for_each_func(obj, func) \
list_for_each_entry(func, &obj->func_list, node)
+#ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY
int klp_enable_patch(struct klp_patch *);
+#elif defined(CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY)
+int klp_register_patch(struct klp_patch *patch);
+int klp_unregister_patch(struct klp_patch *patch);
+#endif
+
+int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
+ const char *shstrtab, const char *strtab,
+ unsigned int symindex, unsigned int secindex,
+ const char *objname);
+#ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY
/* Called from the module loader during module coming/going states */
int klp_module_coming(struct module *mod);
void klp_module_going(struct module *mod);
@@ -231,10 +267,20 @@ void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor);
struct klp_state *klp_get_state(struct klp_patch *patch, unsigned long id);
struct klp_state *klp_get_prev_state(unsigned long id);
-int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
- const char *shstrtab, const char *strtab,
- unsigned int symindex, unsigned int secindex,
- const char *objname);
+#else /* !CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY */
+
+static inline int klp_module_coming(struct module *mod) { return 0; }
+static inline void klp_module_going(struct module *mod) {}
+static inline bool klp_patch_pending(struct task_struct *task) { return false; }
+static inline void klp_update_patch_state(struct task_struct *task) {}
+static inline void klp_copy_process(struct task_struct *child) {}
+static inline bool klp_have_reliable_stack(void) { return true; }
+
+#ifndef klp_smp_isb
+#define klp_smp_isb()
+#endif
+
+#endif /* CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY */
#else /* !CONFIG_LIVEPATCH */
diff --git a/include/linux/module.h b/include/linux/module.h
index 6264617bab4d..d7c6792d705b 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -350,6 +350,12 @@ struct mod_kallsyms {
};
#ifdef CONFIG_LIVEPATCH
+enum MODULE_KLP_REL_STATE {
+ MODULE_KLP_REL_NONE = 0,
+ MODULE_KLP_REL_UNDO,
+ MODULE_KLP_REL_DONE,
+};
+
struct klp_modinfo {
Elf_Ehdr hdr;
Elf_Shdr *sechdrs;
@@ -510,6 +516,19 @@ struct module {
/* Elf information */
struct klp_modinfo *klp_info;
+ /*
+ * livepatch should relocate the key of jump_label by
+ * using klp_apply_section_relocs. So it's necessary to
+ * do jump_label_apply_nops() and jump_label_add_module()
+ * later after livepatch relocation finised.
+ *
+ * for normal module :
+ * always MODULE_KLP_REL_DONE.
+ * for livepatch module :
+ * init as MODULE_KLP_REL_UNDO,
+ * set to MODULE_KLP_REL_DONE when relocate completed.
+ */
+ enum MODULE_KLP_REL_STATE klp_rel_state;
#endif
#ifdef CONFIG_MODULE_UNLOAD
@@ -680,11 +699,28 @@ static inline bool is_livepatch_module(struct module *mod)
{
return mod->klp;
}
+
+static inline void set_mod_klp_rel_state(struct module *mod,
+ enum MODULE_KLP_REL_STATE state)
+{
+ mod->klp_rel_state = state;
+}
+
+static inline bool mod_klp_rel_completed(struct module *mod)
+{
+ return mod->klp_rel_state == MODULE_KLP_REL_NONE ||
+ mod->klp_rel_state == MODULE_KLP_REL_DONE;
+}
#else /* !CONFIG_LIVEPATCH */
static inline bool is_livepatch_module(struct module *mod)
{
return false;
}
+
+static inline bool mod_klp_rel_completed(struct module *mod)
+{
+ return true;
+}
#endif /* CONFIG_LIVEPATCH */
bool is_module_sig_enforced(void);
@@ -851,6 +887,14 @@ extern int module_sysfs_initialized;
#define __MODULE_STRING(x) __stringify(x)
+#ifdef CONFIG_STRICT_MODULE_RWX
+extern void module_enable_ro(const struct module *mod, bool after_init);
+extern void module_disable_ro(const struct module *mod);
+#else
+static inline void module_enable_ro(const struct module *mod, bool after_init) { }
+static inline void module_disable_ro(const struct module *mod) { }
+#endif
+
#ifdef CONFIG_GENERIC_BUG
void module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *,
struct module *);
diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
index 4fa67a8b2265..2d835b7dc918 100644
--- a/include/linux/moduleloader.h
+++ b/include/linux/moduleloader.h
@@ -96,6 +96,8 @@ void module_arch_cleanup(struct module *mod);
/* Any cleanup before freeing mod->module_init */
void module_arch_freeing_init(struct module *mod);
+void flush_module_icache(const struct module *mod);
+
#if defined(CONFIG_KASAN) && !defined(CONFIG_KASAN_VMALLOC)
#include
#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index a0c325664190..7dd54db7c27e 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -607,6 +607,9 @@ void jump_label_apply_nops(struct module *mod)
struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
struct jump_entry *iter;
+ if (unlikely(!mod_klp_rel_completed(mod)))
+ return;
+
/* if the module doesn't have jump label entries, just return */
if (iter_start == iter_stop)
return;
@@ -626,6 +629,9 @@ static int jump_label_add_module(struct module *mod)
struct static_key *key = NULL;
struct static_key_mod *jlm, *jlm2;
+ if (unlikely(!mod_klp_rel_completed(mod)))
+ return 0;
+
/* if the module doesn't have jump label entries, just return */
if (iter_start == iter_stop)
return 0;
@@ -763,6 +769,16 @@ static struct notifier_block jump_label_module_nb = {
.priority = 1, /* higher than tracepoints */
};
+int jump_label_register(struct module *mod)
+{
+ int ret;
+
+ ret = jump_label_module_notify(&jump_label_module_nb,
+ MODULE_STATE_COMING, mod);
+
+ return notifier_to_errno(ret);
+}
+
static __init int jump_label_init_module(void)
{
return register_module_notifier(&jump_label_module_nb);
diff --git a/kernel/livepatch/Kconfig b/kernel/livepatch/Kconfig
index 54102deb50ba..297ca41c695e 100644
--- a/kernel/livepatch/Kconfig
+++ b/kernel/livepatch/Kconfig
@@ -1,20 +1,102 @@
# SPDX-License-Identifier: GPL-2.0-only
-config HAVE_LIVEPATCH
+config HAVE_LIVEPATCH_FTRACE
bool
help
- Arch supports kernel live patching
+ Arch supports kernel live patching based on ftrace
+
+config HAVE_LIVEPATCH_WO_FTRACE
+ bool
+ help
+ Arch supports kernel live patching without ftrace
+
+if HAVE_LIVEPATCH_FTRACE || HAVE_LIVEPATCH_WO_FTRACE
+menu "Enable Livepatch"
+
config LIVEPATCH
bool "Kernel Live Patching"
- depends on DYNAMIC_FTRACE_WITH_REGS
depends on MODULES
depends on SYSFS
depends on KALLSYMS_ALL
- depends on HAVE_LIVEPATCH
+ depends on HAVE_LIVEPATCH_FTRACE || HAVE_LIVEPATCH_WO_FTRACE
depends on !TRIM_UNUSED_KSYMS
+ depends on DEBUG_INFO
+ default n
help
Say Y here if you want to support kernel live patching.
This option has no runtime impact until a kernel "patch"
module uses the interface provided by this option to register
a patch, causing calls to patched functions to be redirected
to new function code contained in the patch module.
+
+choice
+ prompt "live patching method"
+ depends on LIVEPATCH
+ help
+ Live patching implementation method configuration.
+
+config LIVEPATCH_FTRACE
+ bool "based on ftrace"
+ depends on HAVE_LIVEPATCH_FTRACE
+ depends on DYNAMIC_FTRACE_WITH_REGS
+ select LIVEPATCH_PER_TASK_CONSISTENCY
+ help
+ Supports kernel live patching based on ftrace
+
+config LIVEPATCH_WO_FTRACE
+ bool "without ftrace"
+ depends on HAVE_LIVEPATCH_WO_FTRACE
+ select LIVEPATCH_STOP_MACHINE_CONSISTENCY
+ help
+ Supports kernel live patching without ftrace
+
+endchoice
+
+choice
+ prompt "live patching consistency model"
+ depends on LIVEPATCH
+ default LIVEPATCH_PER_TASK_CONSISTENCY if LIVEPATCH_FTRACE
+ default LIVEPATCH_STOP_MACHINE_CONSISTENCY if LIVEPATCH_WO_FTRACE
+ help
+ Livepatch consistency model configuration.
+
+config LIVEPATCH_PER_TASK_CONSISTENCY
+ bool "per task consistency"
+ depends on LIVEPATCH_FTRACE
+ help
+ Use basic per-task consistency model
+ It's a hybrid of kGraft and kpatch:
+ uses kGraft's per-task consistency and syscall
+ barrier switching combined with kpatch's stack
+ trace switching. There are also a number of
+ fallback options which make it quite flexible.
+
+config LIVEPATCH_STOP_MACHINE_CONSISTENCY
+ bool "stop machine consistency"
+ depends on LIVEPATCH_WO_FTRACE
+ help
+ Use stop machine consistency model
+ stop-machine consistency and kpatch's stack
+ trace checking.
+
+endchoice
+
+config LIVEPATCH_STACK
+ bool "Enforcing the patch stacking principle"
+ depends on LIVEPATCH_FTRACE || LIVEPATCH_WO_FTRACE
+ default y
+ help
+ Say N here if you want to remove the patch stacking principle.
+
+config LIVEPATCH_RESTRICT_KPROBE
+ bool "Enforing check livepatch and kprobe restrict"
+ depends on LIVEPATCH_WO_FTRACE
+ depends on KPROBES
+ default y
+ help
+ Livepatch without ftrace and kprobe are conflicting.
+ We should not patch for the functions where registered with kprobe,
+ and vice versa.
+ Say Y here if you want to check those.
+endmenu
+endif
diff --git a/kernel/livepatch/Makefile b/kernel/livepatch/Makefile
index cf03d4bdfc66..4130bb7d5417 100644
--- a/kernel/livepatch/Makefile
+++ b/kernel/livepatch/Makefile
@@ -1,4 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_LIVEPATCH) += livepatch.o
+obj-$(CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY) += transition.o
+obj-$(CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY) += shadow.o
+obj-$(CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY) += state.o
-livepatch-objs := core.o patch.o shadow.o state.o transition.o
+livepatch-objs := core.o patch.o
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index f76fdb925532..1fde6ba196a4 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -22,8 +22,17 @@
#include
#include "core.h"
#include "patch.h"
+#include
+#include
+#ifdef CONFIG_LIVEPATCH_RESTRICT_KPROBE
+#include
+#endif
+#if defined(CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY)
#include "state.h"
#include "transition.h"
+#elif defined(CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY)
+#include
+#endif
/*
* klp_mutex is a coarse lock which serializes access to klp data. All
@@ -44,18 +53,59 @@ LIST_HEAD(klp_patches);
static struct kobject *klp_root_kobj;
+#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY
+struct patch_data {
+ struct klp_patch *patch;
+ atomic_t cpu_count;
+};
+#endif
+
+#ifdef CONFIG_LIVEPATCH_RESTRICT_KPROBE
+/*
+ * Check whether a function has been registered with kprobes before patched.
+ * We can't patched this function util we unregistered the kprobes.
+ */
+struct kprobe *klp_check_patch_kprobed(struct klp_patch *patch)
+{
+ struct klp_object *obj;
+ struct klp_func *func;
+ struct kprobe *kp;
+ int i;
+
+ klp_for_each_object(patch, obj) {
+ klp_for_each_func(obj, func) {
+ for (i = 0; i < func->old_size; i++) {
+ kp = get_kprobe(func->old_func + i);
+ if (kp) {
+ pr_err("func %s has been probed, (un)patch failed\n",
+ func->old_name);
+ return kp;
+ }
+ }
+ }
+ }
+
+ return NULL;
+}
+#else
+static inline struct kprobe *klp_check_patch_kprobed(struct klp_patch *patch)
+{
+ return NULL;
+}
+#endif /* CONFIG_LIVEPATCH_RESTRICT_KPROBE */
+
static bool klp_is_module(struct klp_object *obj)
{
return obj->name;
}
/* sets obj->mod if object is not vmlinux and module is found */
-static void klp_find_object_module(struct klp_object *obj)
+static int klp_find_object_module(struct klp_object *obj)
{
struct module *mod;
if (!klp_is_module(obj))
- return;
+ return 0;
mutex_lock(&module_mutex);
/*
@@ -63,6 +113,7 @@ static void klp_find_object_module(struct klp_object *obj)
* we do not take a reference here. The patches are removed by
* klp_module_going() instead.
*/
+
mod = find_module(obj->name);
/*
* Do not mess work of klp_module_coming() and klp_module_going().
@@ -71,10 +122,26 @@ static void klp_find_object_module(struct klp_object *obj)
* until mod->exit() finishes. This is especially important for
* patches that modify semantic of the functions.
*/
+#ifdef CONFIG_LIVEPATCH_FTRACE
if (mod && mod->klp_alive)
obj->mod = mod;
+#else
+ if (!mod) {
+ pr_err("module '%s' not loaded\n", obj->name);
+ mutex_unlock(&module_mutex);
+ return -ENOPKG; /* the deponds module is not loaded */
+ }
+
+ if (mod->state == MODULE_STATE_COMING || !try_module_get(mod)) {
+ mutex_unlock(&module_mutex);
+ return -EINVAL;
+ }
+
+ obj->mod = mod;
+#endif
mutex_unlock(&module_mutex);
+ return 0;
}
static bool klp_initialized(void)
@@ -191,14 +258,18 @@ static int klp_find_object_symbol(const char *objname, const char *name,
return -EINVAL;
}
-static int klp_resolve_symbols(Elf64_Shdr *sechdrs, const char *strtab,
+static int klp_resolve_symbols(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symndx, Elf_Shdr *relasec,
const char *sec_objname)
{
int i, cnt, ret;
char sym_objname[MODULE_NAME_LEN];
char sym_name[KSYM_NAME_LEN];
+#ifdef CONFIG_MODULES_USE_ELF_RELA
Elf_Rela *relas;
+#else
+ Elf_Rel *relas;
+#endif
Elf_Sym *sym;
unsigned long sympos, addr;
bool sym_vmlinux;
@@ -216,10 +287,14 @@ static int klp_resolve_symbols(Elf64_Shdr *sechdrs, const char *strtab,
*/
BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
+#ifdef CONFIG_MODULES_USE_ELF_RELA
relas = (Elf_Rela *) relasec->sh_addr;
+#else
+ relas = (Elf_Rel *) relasec->sh_addr;
+#endif
/* For each rela in this klp relocation section */
- for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
- sym = (Elf64_Sym *)sechdrs[symndx].sh_addr + ELF_R_SYM(relas[i].r_info);
+ for (i = 0; i < relasec->sh_size / sizeof(*relas); i++) {
+ sym = (Elf_Sym *)sechdrs[symndx].sh_addr + ELF_R_SYM(relas[i].r_info);
if (sym->st_shndx != SHN_LIVEPATCH) {
pr_err("symbol %s is not marked as a livepatch symbol\n",
strtab + sym->st_name);
@@ -314,7 +389,11 @@ int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
if (ret)
return ret;
+#ifdef CONFIG_MODULES_USE_ELF_RELA
return apply_relocate_add(sechdrs, strtab, symndx, secndx, pmod);
+#else
+ return apply_relocate(sechdrs, strtab, symndx, secndx, pmod);
+#endif
}
/*
@@ -330,6 +409,7 @@ int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
*/
static int __klp_disable_patch(struct klp_patch *patch);
+#ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY
static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
@@ -373,6 +453,64 @@ static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
return count;
}
+#elif defined(CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY)
+
+static bool klp_is_patch_registered(struct klp_patch *patch)
+{
+ struct klp_patch *mypatch;
+
+ list_for_each_entry(mypatch, &klp_patches, list)
+ if (mypatch == patch)
+ return true;
+
+ return false;
+}
+
+static int __klp_enable_patch(struct klp_patch *patch);
+static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct klp_patch *patch;
+ int ret;
+ bool enabled;
+
+ ret = kstrtobool(buf, &enabled);
+ if (ret)
+ return ret;
+
+ patch = container_of(kobj, struct klp_patch, kobj);
+
+ mutex_lock(&klp_mutex);
+
+ if (!klp_is_patch_registered(patch)) {
+ /*
+ * Module with the patch could either disappear meanwhile or is
+ * not properly initialized yet.
+ */
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (patch->enabled == enabled) {
+ /* already in requested state */
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (enabled)
+ ret = __klp_enable_patch(patch);
+ else
+ ret = __klp_disable_patch(patch);
+
+out:
+ mutex_unlock(&klp_mutex);
+
+ if (ret)
+ return ret;
+ return count;
+}
+#endif
+
static ssize_t enabled_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -382,6 +520,7 @@ static ssize_t enabled_show(struct kobject *kobj,
return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled);
}
+#ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY
static ssize_t transition_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -420,18 +559,60 @@ static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
return count;
}
+#endif /* #ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY */
static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
+#ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY
static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
+#endif /* #ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY */
+
static struct attribute *klp_patch_attrs[] = {
&enabled_kobj_attr.attr,
+#ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY
&transition_kobj_attr.attr,
&force_kobj_attr.attr,
+#endif /* #ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY */
NULL
};
ATTRIBUTE_GROUPS(klp_patch);
+static int state_show(struct seq_file *m, void *v)
+{
+ struct klp_patch *patch;
+ char *state;
+ int index = 0;
+
+ seq_printf(m, "%-5s\t%-26s\t%-8s\n", "Index", "Patch", "State");
+ seq_puts(m, "-----------------------------------------------\n");
+ mutex_lock(&klp_mutex);
+ list_for_each_entry(patch, &klp_patches, list) {
+ if (patch->enabled)
+ state = "enabled";
+ else
+ state = "disabled";
+
+ seq_printf(m, "%-5d\t%-26s\t%-8s\n", ++index,
+ patch->mod->name, state);
+ }
+ mutex_unlock(&klp_mutex);
+ seq_puts(m, "-----------------------------------------------\n");
+
+ return 0;
+}
+
+static int klp_state_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, state_show, NULL);
+}
+
+static const struct proc_ops proc_klpstate_operations = {
+ .proc_open = klp_state_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+};
+
static void klp_free_object_dynamic(struct klp_object *obj)
{
kfree(obj->name);
@@ -607,6 +788,7 @@ static void __klp_free_funcs(struct klp_object *obj, bool nops_only)
}
}
+#ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY
/* Clean up when a patched object is unloaded */
static void klp_free_object_loaded(struct klp_object *obj)
{
@@ -621,12 +803,17 @@ static void klp_free_object_loaded(struct klp_object *obj)
func->new_func = NULL;
}
}
+#endif /* #ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY */
static void __klp_free_objects(struct klp_patch *patch, bool nops_only)
{
struct klp_object *obj, *tmp_obj;
klp_for_each_object_safe(patch, obj, tmp_obj) {
+#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY
+ if (klp_is_module(obj))
+ module_put(obj->mod);
+#endif
__klp_free_funcs(obj, nops_only);
if (nops_only && !obj->dynamic)
@@ -642,10 +829,12 @@ static void klp_free_objects(struct klp_patch *patch)
__klp_free_objects(patch, false);
}
+#ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY
static void klp_free_objects_dynamic(struct klp_patch *patch)
{
__klp_free_objects(patch, true);
}
+#endif
/*
* This function implements the free operations that can be called safely
@@ -662,6 +851,34 @@ static void klp_free_patch_start(struct klp_patch *patch)
klp_free_objects(patch);
}
+#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY
+static inline int klp_load_hook(struct klp_object *obj)
+{
+ struct klp_hook *hook;
+
+ if (!obj->hooks_load)
+ return 0;
+
+ for (hook = obj->hooks_load; hook->hook; hook++)
+ (*hook->hook)();
+
+ return 0;
+}
+
+static inline int klp_unload_hook(struct klp_object *obj)
+{
+ struct klp_hook *hook;
+
+ if (!obj->hooks_unload)
+ return 0;
+
+ for (hook = obj->hooks_unload; hook->hook; hook++)
+ (*hook->hook)();
+
+ return 0;
+}
+#endif
+
/*
* This function implements the free part that must be called outside
* klp_mutex.
@@ -716,8 +933,24 @@ void klp_free_replaced_patches_async(struct klp_patch *new_patch)
}
}
+#ifdef CONFIG_LIVEPATCH_WO_FTRACE
+int __weak arch_klp_func_can_patch(struct klp_func *func)
+{
+ return 0;
+}
+
+int __weak arch_klp_init_func(struct klp_object *obj, struct klp_func *func)
+{
+ return 0;
+}
+#endif
+
static int klp_init_func(struct klp_object *obj, struct klp_func *func)
{
+#ifdef CONFIG_LIVEPATCH_WO_FTRACE
+ int ret;
+#endif
+
if (!func->old_name)
return -EINVAL;
@@ -733,7 +966,25 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func)
INIT_LIST_HEAD(&func->stack_node);
func->patched = false;
+
+#ifdef CONFIG_LIVEPATCH_WO_FTRACE
+#ifdef CONFIG_PPC64
+ if (klp_is_module(obj))
+ func->old_mod = obj->mod;
+ else
+ func->old_mod = NULL;
+#endif
+ ret = arch_klp_func_can_patch(func);
+ if (ret)
+ return ret;
+
+ ret = arch_klp_init_func(obj, func);
+ if (ret)
+ return ret;
+#endif
+#ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY
func->transition = false;
+#endif
/* The format for the sysfs directory is where sympos
* is the nth occurrence of this symbol in kallsyms for the patched
@@ -775,6 +1026,7 @@ static int klp_init_object_loaded(struct klp_patch *patch,
struct klp_func *func;
int ret;
+ module_disable_ro(patch->mod);
if (klp_is_module(obj)) {
/*
* Only write module-specific relocations here
@@ -783,9 +1035,12 @@ static int klp_init_object_loaded(struct klp_patch *patch,
* itself.
*/
ret = klp_apply_object_relocs(patch, obj);
- if (ret)
+ if (ret) {
+ module_enable_ro(patch->mod, true);
return ret;
+ }
}
+ module_enable_ro(patch->mod, true);
klp_for_each_func(obj, func) {
ret = klp_find_object_symbol(obj->name, func->old_name,
@@ -829,22 +1084,43 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
obj->patched = false;
obj->mod = NULL;
- klp_find_object_module(obj);
+ ret = klp_find_object_module(obj);
+ if (ret)
+ return ret;
name = klp_is_module(obj) ? obj->name : "vmlinux";
ret = kobject_add(&obj->kobj, &patch->kobj, "%s", name);
if (ret)
- return ret;
+ goto out;
+
+ /*
+ * For livepatch without ftrace, we need to modify the first N
+ * instructions of the to-be-patched func. So should check if the
+ * func length enough to allow this modification.
+ *
+ * We add check hook in klp_init_func and will using the old_size
+ * internally, so the klp_init_object_loaded should called first
+ * to fill the klp_func struct.
+ */
+ if (klp_is_object_loaded(obj)) {
+ ret = klp_init_object_loaded(patch, obj);
+ if (ret)
+ goto out;
+ }
klp_for_each_func(obj, func) {
ret = klp_init_func(obj, func);
if (ret)
- return ret;
+ goto out;
}
- if (klp_is_object_loaded(obj))
- ret = klp_init_object_loaded(patch, obj);
+ return 0;
+out:
+#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY
+ if (klp_is_module(obj))
+ module_put(obj->mod);
+#endif
return ret;
}
@@ -890,12 +1166,33 @@ static int klp_init_patch_early(struct klp_patch *patch)
}
}
+ /*
+ * For stop_machine model, we only need to module_get and module_put once when
+ * enable_patch and disable_patch respectively.
+ */
+#ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY
if (!try_module_get(patch->mod))
return -ENODEV;
+#endif
return 0;
}
+#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY
+static void klp_free_objects_mod_limited(struct klp_patch *patch,
+ struct klp_object *limit)
+{
+ struct klp_object *obj, *tmp_obj;
+
+ klp_for_each_object_safe(patch, obj, tmp_obj) {
+ if (limit == obj)
+ break;
+ if (klp_is_module(obj))
+ module_put(obj->mod);
+ }
+}
+#endif
+
static int klp_init_patch(struct klp_patch *patch)
{
struct klp_object *obj;
@@ -914,14 +1211,36 @@ static int klp_init_patch(struct klp_patch *patch)
klp_for_each_object(patch, obj) {
ret = klp_init_object(patch, obj);
if (ret)
- return ret;
+ goto out;
}
+ flush_module_icache(patch->mod);
+ set_mod_klp_rel_state(patch->mod, MODULE_KLP_REL_DONE);
+ module_disable_ro(patch->mod);
+ jump_label_apply_nops(patch->mod);
+ ret = jump_label_register(patch->mod);
+ if (ret) {
+ module_enable_ro(patch->mod, true);
+ goto out;
+ }
+ module_enable_ro(patch->mod, true);
+
+#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY
+ klp_for_each_object(patch, obj)
+ klp_load_hook(obj);
+#endif
+
list_add_tail(&patch->list, &klp_patches);
return 0;
+out:
+#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY
+ klp_free_objects_mod_limited(patch, obj);
+#endif
+ return ret;
}
+#ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY
static int __klp_disable_patch(struct klp_patch *patch)
{
struct klp_object *obj;
@@ -953,7 +1272,107 @@ static int __klp_disable_patch(struct klp_patch *patch)
return 0;
}
+#elif defined(CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY)
+int __weak klp_check_calltrace(struct klp_patch *patch, int enable)
+{
+ return 0;
+}
+
+/*
+ * This function is called from stop_machine() context.
+ */
+static int disable_patch(struct klp_patch *patch)
+{
+ pr_notice("disabling patch '%s'\n", patch->mod->name);
+
+ klp_unpatch_objects(patch);
+ patch->enabled = false;
+ module_put(patch->mod);
+ return 0;
+}
+
+int klp_try_disable_patch(void *data)
+{
+ int ret = 0;
+ struct patch_data *pd = (struct patch_data *)data;
+
+ if (atomic_inc_return(&pd->cpu_count) == 1) {
+ struct klp_patch *patch = pd->patch;
+
+ if (klp_check_patch_kprobed(patch)) {
+ atomic_inc(&pd->cpu_count);
+ return -EINVAL;
+ }
+ ret = klp_check_calltrace(patch, 0);
+ if (ret) {
+ atomic_inc(&pd->cpu_count);
+ return ret;
+ }
+ ret = disable_patch(patch);
+ if (ret) {
+ atomic_inc(&pd->cpu_count);
+ return ret;
+ }
+ atomic_inc(&pd->cpu_count);
+ } else {
+ while (atomic_read(&pd->cpu_count) <= num_online_cpus())
+ cpu_relax();
+
+ klp_smp_isb();
+ }
+
+ return ret;
+}
+
+void __weak arch_klp_code_modify_prepare(void)
+{
+}
+
+void __weak arch_klp_code_modify_post_process(void)
+{
+}
+
+void __weak arch_klp_mem_prepare(struct klp_patch *patch)
+{
+}
+
+void __weak arch_klp_mem_recycle(struct klp_patch *patch)
+{
+}
+
+static int __klp_disable_patch(struct klp_patch *patch)
+{
+ int ret;
+ struct patch_data patch_data = {
+ .patch = patch,
+ .cpu_count = ATOMIC_INIT(0),
+ };
+
+ if (WARN_ON(!patch->enabled))
+ return -EINVAL;
+
+#ifdef CONFIG_LIVEPATCH_STACK
+ /* enforce stacking: only the last enabled patch can be disabled */
+ if (!list_is_last(&patch->list, &klp_patches) &&
+ list_next_entry(patch, list)->enabled) {
+ pr_err("only the last enabled patch can be disabled\n");
+ return -EBUSY;
+ }
+#endif
+
+ arch_klp_code_modify_prepare();
+ ret = stop_machine(klp_try_disable_patch, &patch_data, cpu_online_mask);
+ arch_klp_code_modify_post_process();
+ if (ret)
+ return ret;
+
+ arch_klp_mem_recycle(patch);
+ return 0;
+}
+#endif /* if defined(CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY) */
+
+#ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY
static int __klp_enable_patch(struct klp_patch *patch)
{
struct klp_object *obj;
@@ -1081,6 +1500,217 @@ int klp_enable_patch(struct klp_patch *patch)
}
EXPORT_SYMBOL_GPL(klp_enable_patch);
+#elif defined(CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY)
+/*
+ * This function is called from stop_machine() context.
+ */
+static int enable_patch(struct klp_patch *patch)
+{
+ struct klp_object *obj;
+ int ret;
+
+ pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n");
+ add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
+
+ if (!try_module_get(patch->mod))
+ return -ENODEV;
+
+ patch->enabled = true;
+
+ pr_notice("enabling patch '%s'\n", patch->mod->name);
+
+ klp_for_each_object(patch, obj) {
+ if (!klp_is_object_loaded(obj))
+ continue;
+
+ ret = klp_patch_object(obj);
+ if (ret) {
+ pr_warn("failed to patch object '%s'\n",
+ klp_is_module(obj) ? obj->name : "vmlinux");
+ goto disable;
+ }
+ }
+
+ return 0;
+
+disable:
+ disable_patch(patch);
+ return ret;
+}
+
+int klp_try_enable_patch(void *data)
+{
+ int ret = 0;
+ struct patch_data *pd = (struct patch_data *)data;
+
+ if (atomic_inc_return(&pd->cpu_count) == 1) {
+ struct klp_patch *patch = pd->patch;
+
+ if (klp_check_patch_kprobed(patch)) {
+ atomic_inc(&pd->cpu_count);
+ return -EINVAL;
+ }
+
+ ret = klp_check_calltrace(patch, 1);
+ if (ret) {
+ atomic_inc(&pd->cpu_count);
+ return ret;
+ }
+ ret = enable_patch(patch);
+ if (ret) {
+ atomic_inc(&pd->cpu_count);
+ return ret;
+ }
+ atomic_inc(&pd->cpu_count);
+ } else {
+ while (atomic_read(&pd->cpu_count) <= num_online_cpus())
+ cpu_relax();
+
+ klp_smp_isb();
+ }
+
+ return ret;
+}
+
+static int __klp_enable_patch(struct klp_patch *patch)
+{
+ int ret;
+ struct patch_data patch_data = {
+ .patch = patch,
+ .cpu_count = ATOMIC_INIT(0),
+ };
+
+ if (WARN_ON(patch->enabled))
+ return -EINVAL;
+
+#ifdef CONFIG_LIVEPATCH_STACK
+ /* enforce stacking: only the first disabled patch can be enabled */
+ if (patch->list.prev != &klp_patches &&
+ !list_prev_entry(patch, list)->enabled) {
+ pr_err("only the first disabled patch can be enabled\n");
+ return -EBUSY;
+ }
+#endif
+
+ arch_klp_code_modify_prepare();
+ arch_klp_mem_prepare(patch);
+ ret = stop_machine(klp_try_enable_patch, &patch_data, cpu_online_mask);
+ arch_klp_code_modify_post_process();
+ if (ret) {
+ arch_klp_mem_recycle(patch);
+ return ret;
+ }
+
+#ifndef CONFIG_LIVEPATCH_STACK
+ /* move the enabled patch to the list tail */
+ list_del(&patch->list);
+ list_add_tail(&patch->list, &klp_patches);
+#endif
+
+ return 0;
+}
+
+/**
+ * klp_register_patch() - registers a patch
+ * @patch: Patch to be registered
+ *
+ * Initializes the data structure associated with the patch and
+ * creates the sysfs interface.
+ *
+ * Return: 0 on success, otherwise error
+ */
+int klp_register_patch(struct klp_patch *patch)
+{
+ int ret;
+
+ if (!patch || !patch->mod)
+ return -EINVAL;
+
+ if (!is_livepatch_module(patch->mod)) {
+ pr_err("module %s is not marked as a livepatch module\n",
+ patch->mod->name);
+ return -EINVAL;
+ }
+
+ if (!klp_initialized())
+ return -ENODEV;
+
+ mutex_lock(&klp_mutex);
+
+ if (klp_is_patch_registered(patch)) {
+ mutex_unlock(&klp_mutex);
+ return -EINVAL;
+ }
+
+ ret = klp_init_patch_early(patch);
+ if (ret) {
+ mutex_unlock(&klp_mutex);
+ return ret;
+ }
+
+ ret = klp_init_patch(patch);
+ if (ret)
+ goto err;
+
+ mutex_unlock(&klp_mutex);
+
+ return 0;
+
+err:
+ klp_free_patch_start(patch);
+
+ mutex_unlock(&klp_mutex);
+
+ kobject_put(&patch->kobj);
+ wait_for_completion(&patch->finish);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(klp_register_patch);
+
+/**
+ * klp_unregister_patch() - unregisters a patch
+ * @patch: Disabled patch to be unregistered
+ *
+ * Frees the data structures and removes the sysfs interface.
+ *
+ * Return: 0 on success, otherwise error
+ */
+int klp_unregister_patch(struct klp_patch *patch)
+{
+ int ret = 0;
+ struct klp_object *obj;
+
+ mutex_lock(&klp_mutex);
+
+ if (!klp_is_patch_registered(patch)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (patch->enabled) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ klp_for_each_object(patch, obj)
+ klp_unload_hook(obj);
+
+ klp_free_patch_start(patch);
+
+ mutex_unlock(&klp_mutex);
+
+ kobject_put(&patch->kobj);
+ wait_for_completion(&patch->finish);
+
+ return 0;
+out:
+ mutex_unlock(&klp_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(klp_unregister_patch);
+
+#endif /* #ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY */
/*
* This function unpatches objects from the replaced livepatches.
*
@@ -1109,6 +1739,7 @@ void klp_unpatch_replaced_patches(struct klp_patch *new_patch)
}
}
+#ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY
/*
* This function removes the dynamically allocated 'nop' functions.
*
@@ -1264,14 +1895,31 @@ void klp_module_going(struct module *mod)
mutex_unlock(&klp_mutex);
}
+#endif /* ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY */
static int __init klp_init(void)
{
+ struct proc_dir_entry *root_klp_dir, *res;
+
+ root_klp_dir = proc_mkdir("livepatch", NULL);
+ if (!root_klp_dir)
+ goto error_out;
+
+ res = proc_create("livepatch/state", 0, NULL,
+ &proc_klpstate_operations);
+ if (!res)
+ goto error_remove;
+
klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
if (!klp_root_kobj)
- return -ENOMEM;
+ goto error_remove;
return 0;
+
+error_remove:
+ remove_proc_entry("livepatch", NULL);
+error_out:
+ return -ENOMEM;
}
module_init(klp_init);
diff --git a/kernel/livepatch/core.h b/kernel/livepatch/core.h
index 38209c7361b6..9bcd139eb7d6 100644
--- a/kernel/livepatch/core.h
+++ b/kernel/livepatch/core.h
@@ -23,6 +23,7 @@ static inline bool klp_is_object_loaded(struct klp_object *obj)
return !obj->name || obj->mod;
}
+#ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY
static inline int klp_pre_patch_callback(struct klp_object *obj)
{
int ret = 0;
@@ -55,5 +56,5 @@ static inline void klp_post_unpatch_callback(struct klp_object *obj)
obj->callbacks.post_unpatch_enabled = false;
}
-
+#endif /* CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY */
#endif /* _LIVEPATCH_CORE_H */
diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c
index b552cf2d85f8..28e0de4edd72 100644
--- a/kernel/livepatch/patch.c
+++ b/kernel/livepatch/patch.c
@@ -20,6 +20,7 @@
#include "patch.h"
#include "transition.h"
+#ifdef CONFIG_LIVEPATCH_FTRACE
static LIST_HEAD(klp_ops);
struct klp_ops *klp_find_ops(void *old_func)
@@ -78,7 +79,11 @@ static void notrace klp_ftrace_handler(unsigned long ip,
*/
smp_rmb();
+#ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY
if (unlikely(func->transition)) {
+#else
+ {
+#endif
/*
* Enforce the order of the func->transition and
@@ -235,6 +240,47 @@ static int klp_patch_func(struct klp_func *func)
return ret;
}
+#else /* #ifdef CONFIG_LIVEPATCH_WO_FTRACE */
+
+void __weak arch_klp_unpatch_func(struct klp_func *func)
+{
+}
+
+int __weak arch_klp_patch_func(struct klp_func *func)
+{
+ return -ENOSYS;
+}
+
+static void klp_unpatch_func(struct klp_func *func)
+{
+ if (WARN_ON(!func->patched))
+ return;
+ if (WARN_ON(!func->old_func))
+ return;
+
+ arch_klp_unpatch_func(func);
+
+ func->patched = false;
+}
+
+static inline int klp_patch_func(struct klp_func *func)
+{
+ int ret = 0;
+
+ if (WARN_ON(!func->old_func))
+ return -EINVAL;
+
+ if (WARN_ON(func->patched))
+ return -EINVAL;
+
+ ret = arch_klp_patch_func(func);
+ if (!ret)
+ func->patched = true;
+
+ return ret;
+}
+#endif
+
static void __klp_unpatch_object(struct klp_object *obj, bool nops_only)
{
struct klp_func *func;
diff --git a/kernel/livepatch/patch.h b/kernel/livepatch/patch.h
index d5f2fbe373e0..c9cde47f7e97 100644
--- a/kernel/livepatch/patch.h
+++ b/kernel/livepatch/patch.h
@@ -22,7 +22,9 @@
struct klp_ops {
struct list_head node;
struct list_head func_stack;
+#ifdef CONFIG_LIVEPATCH_FTRACE
struct ftrace_ops fops;
+#endif
};
struct klp_ops *klp_find_ops(void *old_func);
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
index f6310f848f34..b5ef759ad510 100644
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
@@ -194,7 +194,9 @@ static int klp_check_stack_func(struct klp_func *func, unsigned long *entries,
unsigned int nr_entries)
{
unsigned long func_addr, func_size, address;
+#ifdef CONFIG_LIVEPATCH_FTRACE
struct klp_ops *ops;
+#endif
int i;
for (i = 0; i < nr_entries; i++) {
@@ -208,6 +210,7 @@ static int klp_check_stack_func(struct klp_func *func, unsigned long *entries,
func_addr = (unsigned long)func->new_func;
func_size = func->new_size;
} else {
+#ifdef CONFIG_LIVEPATCH_FTRACE
/*
* Check for the to-be-patched function
* (the previous func).
@@ -226,6 +229,10 @@ static int klp_check_stack_func(struct klp_func *func, unsigned long *entries,
func_addr = (unsigned long)prev->new_func;
func_size = prev->new_size;
}
+#else
+ func_addr = (unsigned long)func->old_func;
+ func_size = func->old_size;
+#endif
}
if (address >= func_addr && address < func_addr + func_size)
diff --git a/kernel/module.c b/kernel/module.c
index 908d46abe165..d196649baf01 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2063,7 +2063,20 @@ static void frob_writable_data(const struct module_layout *layout,
(layout->size - layout->ro_after_init_size) >> PAGE_SHIFT);
}
-static void module_enable_ro(const struct module *mod, bool after_init)
+/* livepatching wants to disable read-only so it can frob module. */
+void module_disable_ro(const struct module *mod)
+{
+ if (!rodata_enabled)
+ return;
+
+ frob_text(&mod->core_layout, set_memory_rw);
+ frob_rodata(&mod->core_layout, set_memory_rw);
+ frob_ro_after_init(&mod->core_layout, set_memory_rw);
+ frob_text(&mod->init_layout, set_memory_rw);
+ frob_rodata(&mod->init_layout, set_memory_rw);
+}
+
+void module_enable_ro(const struct module *mod, bool after_init)
{
if (!rodata_enabled)
return;
@@ -2108,7 +2121,6 @@ static int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
#else /* !CONFIG_STRICT_MODULE_RWX */
static void module_enable_nx(const struct module *mod) { }
-static void module_enable_ro(const struct module *mod, bool after_init) {}
static int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
char *secstrings, struct module *mod)
{
@@ -3089,7 +3101,10 @@ static int check_modinfo_livepatch(struct module *mod, struct load_info *info)
add_taint_module(mod, TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
pr_notice_once("%s: tainting kernel with TAINT_LIVEPATCH\n",
mod->name);
- }
+
+ set_mod_klp_rel_state(mod, MODULE_KLP_REL_UNDO);
+ } else
+ set_mod_klp_rel_state(mod, MODULE_KLP_REL_NONE);
return 0;
}
@@ -3504,7 +3519,7 @@ static int check_module_license_and_versions(struct module *mod)
return 0;
}
-static void flush_module_icache(const struct module *mod)
+void flush_module_icache(const struct module *mod)
{
/*
* Flush the instruction cache, since we've played with text.
diff --git a/samples/livepatch/Makefile b/samples/livepatch/Makefile
index 9f853eeb6140..1e384d50c73f 100644
--- a/samples/livepatch/Makefile
+++ b/samples/livepatch/Makefile
@@ -1,8 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-sample.o
+ifeq ($(CONFIG_LIVEPATCH_FTRACE), y)
obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-shadow-mod.o
obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-shadow-fix1.o
obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-shadow-fix2.o
obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-callbacks-demo.o
obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-callbacks-mod.o
obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-callbacks-busymod.o
+endif
diff --git a/samples/livepatch/livepatch-sample.c b/samples/livepatch/livepatch-sample.c
index cd76d7ebe598..915f717586b7 100644
--- a/samples/livepatch/livepatch-sample.c
+++ b/samples/livepatch/livepatch-sample.c
@@ -10,6 +10,9 @@
#include
#include
#include
+#ifdef CONFIG_PPC64
+#include
+#endif
/*
* This (dumb) live patch overrides the function that prints the
@@ -30,6 +33,31 @@
*/
#include
+
+#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY
+void load_hook(void)
+{
+ pr_info("load_hook\n");
+}
+
+void unload_hook(void)
+{
+ pr_info("unload_hook\n");
+}
+
+static struct klp_hook hooks_load[] = {
+ {
+ .hook = load_hook
+ }, { }
+};
+
+static struct klp_hook hooks_unload[] = {
+ {
+ .hook = unload_hook
+ }, { }
+};
+#endif
+
static int livepatch_cmdline_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "%s\n", "this has been live patched");
@@ -38,7 +66,11 @@ static int livepatch_cmdline_proc_show(struct seq_file *m, void *v)
static struct klp_func funcs[] = {
{
+#ifdef CONFIG_PPC64
+ .old_name = ".cmdline_proc_show",
+#else
.old_name = "cmdline_proc_show",
+#endif
.new_func = livepatch_cmdline_proc_show,
}, { }
};
@@ -47,6 +79,10 @@ static struct klp_object objs[] = {
{
/* name being NULL means vmlinux */
.funcs = funcs,
+#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY
+ .hooks_load = hooks_load,
+ .hooks_unload = hooks_unload,
+#endif
}, { }
};
@@ -57,11 +93,23 @@ static struct klp_patch patch = {
static int livepatch_init(void)
{
+#ifdef CONFIG_PPC64
+ patch.objs[0].funcs[0].new_func =
+ (void *)ppc_function_entry((void *)livepatch_cmdline_proc_show);
+#endif
+
+#ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY
return klp_enable_patch(&patch);
+#elif defined(CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY)
+ return klp_register_patch(&patch);
+#endif
}
static void livepatch_exit(void)
{
+#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY
+ WARN_ON(klp_unregister_patch(&patch));
+#endif
}
module_init(livepatch_init);