Skip to content

Commit 7285ad2

Browse files
pvts-matPlaidCat
authored andcommitted
x86/mm: Randomize per-cpu entry area
jira VULN-3958 cve CVE-2023-0597 commit-author Peter Zijlstra <peterz@infradead.org> commit 97e3d26 upstream-diff | 1. Ignored changes in `arch/x86/kernel/hw_breakpoint.c'. The modified function `within_cpu_entry()' doesn't exist in `ciqlts8_6' revision. The conflict might have been resolved by pure cherry picking of 24ae0c9, d390e6d, 97417cb, but would result in introducing dead code: `within_area()' and `within_cpu_entry()' functions. 2. Moved the `arch/x86/include/asm/pgtable_areas.h' changes to `arch/x86/include/asm/cpu_entry_area.h'. This must have been done because of the 186525b commit missing from `ciqlts8_6' history, which factored out the relevant #defines from `cpu_entry_area.h' to `pgtable_areas.h'. It was decided not to backport this commit as prerequisite since it's too extensive and making changes not related to the patch. 3. Made small adaptation of changes relating to `cea_offset()' definitions in `arch/x86/mm/cpu_entry_area.c' which was necessary because of the dc4e002 commit missing from `ciqlts8_6' history. It was too functionality-intrusive to backport as prerequisite for auto resolution of just this single conflict. Seth found that the CPU-entry-area; the piece of per-cpu data that is mapped into the userspace page-tables for kPTI is not subject to any randomization -- irrespective of kASLR settings. On x86_64 a whole P4D (512 GB) of virtual address space is reserved for this structure, which is plenty large enough to randomize things a little. As such, use a straight forward randomization scheme that avoids duplicates to spread the existing CPUs over the available space. [ bp: Fix le build. ] Reported-by: Seth Jenkins <sethjenkins@google.com> Reviewed-by: Kees Cook <keescook@chromium.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Signed-off-by: Borislav Petkov <bp@suse.de> (cherry picked from commit 97e3d26) Signed-off-by: Marcin Wcisło <marcin.wcislo@conclusive.pl>
1 parent 836c9da commit 7285ad2

File tree

2 files changed

+52
-8
lines changed

2 files changed

+52
-8
lines changed

arch/x86/include/asm/cpu_entry_area.h

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -107,10 +107,6 @@ struct cpu_entry_area {
107107
};
108108

109109
#define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area))
110-
#define CPU_ENTRY_AREA_ARRAY_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS)
111-
112-
/* Total size includes the readonly IDT mapping page as well: */
113-
#define CPU_ENTRY_AREA_TOTAL_SIZE (CPU_ENTRY_AREA_ARRAY_SIZE + PAGE_SIZE)
114110

115111
DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
116112
DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);
@@ -124,8 +120,14 @@ extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);
124120

125121
#define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT)
126122

127-
#define CPU_ENTRY_AREA_MAP_SIZE \
128-
(CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_ARRAY_SIZE - CPU_ENTRY_AREA_BASE)
123+
#ifdef CONFIG_X86_32
124+
#define CPU_ENTRY_AREA_MAP_SIZE (CPU_ENTRY_AREA_PER_CPU + \
125+
(CPU_ENTRY_AREA_SIZE * NR_CPUS) - \
126+
CPU_ENTRY_AREA_BASE)
127+
#else
128+
#define CPU_ENTRY_AREA_MAP_SIZE P4D_SIZE
129+
#endif
130+
129131

130132
extern struct cpu_entry_area *get_cpu_entry_area(int cpu);
131133

arch/x86/mm/cpu_entry_area.c

Lines changed: 44 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
#include <linux/percpu.h>
55
#include <linux/kallsyms.h>
66
#include <linux/kcore.h>
7+
#include <linux/prandom.h>
78

89
#include <asm/cpu_entry_area.h>
910
#include <asm/pgtable.h>
@@ -16,12 +17,52 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage)
1617
#ifdef CONFIG_X86_64
1718
static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
1819
DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);
20+
21+
static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, _cea_offset);
22+
23+
static __always_inline unsigned int cea_offset(unsigned int cpu)
24+
{
25+
return per_cpu(_cea_offset, cpu);
26+
}
27+
28+
static __init void init_cea_offsets(void)
29+
{
30+
unsigned int max_cea;
31+
unsigned int i, j;
32+
33+
max_cea = (CPU_ENTRY_AREA_MAP_SIZE - PAGE_SIZE) / CPU_ENTRY_AREA_SIZE;
34+
35+
/* O(sodding terrible) */
36+
for_each_possible_cpu(i) {
37+
unsigned int cea;
38+
39+
again:
40+
cea = prandom_u32_max(max_cea);
41+
42+
for_each_possible_cpu(j) {
43+
if (cea_offset(j) == cea)
44+
goto again;
45+
46+
if (i == j)
47+
break;
48+
}
49+
50+
per_cpu(_cea_offset, i) = cea;
51+
}
52+
}
53+
#else /* !X86_64 */
54+
55+
static __always_inline unsigned int cea_offset(unsigned int cpu)
56+
{
57+
return cpu;
58+
}
59+
static inline void init_cea_offsets(void) { }
1960
#endif
2061

2162
/* Is called from entry code, so must be noinstr */
2263
noinstr struct cpu_entry_area *get_cpu_entry_area(int cpu)
2364
{
24-
unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
65+
unsigned long va = CPU_ENTRY_AREA_PER_CPU + cea_offset(cpu) * CPU_ENTRY_AREA_SIZE;
2566
BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
2667

2768
return (struct cpu_entry_area *) va;
@@ -194,7 +235,6 @@ static __init void setup_cpu_entry_area_ptes(void)
194235

195236
/* The +1 is for the readonly IDT: */
196237
BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
197-
BUILD_BUG_ON(CPU_ENTRY_AREA_TOTAL_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
198238
BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
199239

200240
start = CPU_ENTRY_AREA_BASE;
@@ -210,6 +250,8 @@ void __init setup_cpu_entry_areas(void)
210250
{
211251
unsigned int cpu;
212252

253+
init_cea_offsets();
254+
213255
setup_cpu_entry_area_ptes();
214256

215257
for_each_possible_cpu(cpu)

0 commit comments

Comments
 (0)