Skip to content

Commit a59652d

Browse files
committed
[libcpu-riscv]: [surpport SMP]: Support the SMP feature under the Smart framework/environment
By defining a custom .percpu section and controlling the MMU mapping page table entries, different cores can access different physical memory when accessing the same virtual address (percpu_hartid), thereby storing their respective hartids. If the satp register is not used, the hartid is stored in the satp register. Signed-off-by: Mengchen Teng <teng_mengchen@163.com>
1 parent bf917a1 commit a59652d

File tree

11 files changed

+497
-85
lines changed

11 files changed

+497
-85
lines changed

bsp/qemu-virt64-riscv/driver/board.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,14 +15,14 @@
1515

1616
extern unsigned int __bss_start;
1717
extern unsigned int __bss_end;
18-
18+
extern unsigned int _end;
1919
#ifndef RT_USING_SMART
2020
#define KERNEL_VADDR_START 0x0
2121
#endif
2222

2323
#define VIRT64_SBI_MEMSZ (0x200000)
2424

25-
#define RT_HW_HEAP_BEGIN ((void *)&__bss_end)
25+
#define RT_HW_HEAP_BEGIN ((void *)&_end)
2626
#define RT_HW_HEAP_END ((void *)(RT_HW_HEAP_BEGIN + 64 * 1024 * 1024))
2727
#define RT_HW_PAGE_START RT_HW_HEAP_END
2828
#define RT_HW_PAGE_END ((void *)(KERNEL_VADDR_START + (256 * 1024 * 1024 - VIRT64_SBI_MEMSZ)))

bsp/qemu-virt64-riscv/link.lds

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -145,6 +145,24 @@ SECTIONS
145145
__bss_end = .;
146146
} > SRAM
147147

148+
.percpu (NOLOAD) :
149+
{
150+
/* Align for MMU early map */
151+
. = ALIGN(1<<(12+9));
152+
PROVIDE(__percpu_start = .);
153+
154+
*(.percpu)
155+
156+
/* Align for MMU early map */
157+
. = ALIGN(1<<(12+9));
158+
159+
PROVIDE(__percpu_end = .);
160+
161+
/* Clone the area */
162+
. = __percpu_end + (__percpu_end - __percpu_start) * (RT_CPUS_NR - 1);
163+
PROVIDE(__percpu_real_end = .);
164+
} > SRAM
165+
148166
_end = .;
149167

150168
/* Stabs debugging sections. */

bsp/qemu-virt64-riscv/link_smart.lds

Lines changed: 22 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
*/
1010

1111
INCLUDE "link_stacksize.lds"
12+
INCLUDE "link_cpus.lds"
1213

1314
OUTPUT_ARCH( "riscv" )
1415

@@ -122,12 +123,9 @@ SECTIONS
122123
{
123124
. = ALIGN(64);
124125
__stack_start__ = .;
125-
126-
. += __STACKSIZE__;
127-
__stack_cpu0 = .;
128-
129-
. += __STACKSIZE__;
130-
__stack_cpu1 = .;
126+
/* Dynamically allocate stack areas according to RT_CPUS_NR */
127+
. += (__STACKSIZE__ * RT_CPUS_NR);
128+
__stack_end__ = .;
131129
} > SRAM
132130

133131
.sbss :
@@ -147,6 +145,24 @@ SECTIONS
147145
*(COMMON)
148146
__bss_end = .;
149147
} > SRAM
148+
149+
.percpu (NOLOAD) :
150+
{
151+
/* Align for MMU early map */
152+
. = ALIGN(1<<(12+9));
153+
PROVIDE(__percpu_start = .);
154+
155+
*(.percpu)
156+
157+
/* Align for MMU early map */
158+
. = ALIGN(1<<(12+9));
159+
160+
PROVIDE(__percpu_end = .);
161+
162+
/* Clone the area */
163+
. = __percpu_end + (__percpu_end - __percpu_start) * (RT_CPUS_NR - 1);
164+
PROVIDE(__percpu_real_end = .);
165+
} > SRAM
150166

151167
_end = .;
152168

libcpu/risc-v/common64/context_gcc.S

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -93,9 +93,12 @@ rt_hw_context_switch_to:
9393
call rt_thread_self
9494
mv s1, a0
9595

96+
#ifndef RT_USING_SMP
97+
//if enable RT_USING_SMP, it will finished by rt_cpus_lock_status_restore.
9698
#ifdef RT_USING_SMART
97-
call lwp_aspace_switch
99+
call lwp_aspace_switch
98100
#endif
101+
#endif
99102

100103
RESTORE_CONTEXT
101104
sret
@@ -134,9 +137,12 @@ rt_hw_context_switch:
134137
call rt_thread_self
135138
mv s1, a0
136139

140+
#ifndef RT_USING_SMP
141+
// if enable RT_USING_SMP, it will finished by rt_cpus_lock_status_restore.
137142
#ifdef RT_USING_SMART
138-
call lwp_aspace_switch
143+
call lwp_aspace_switch
139144
#endif
145+
#endif
140146

141147
RESTORE_CONTEXT
142148
sret

libcpu/risc-v/common64/cpuport.c

Lines changed: 56 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,10 @@
1818
#include <sbi.h>
1919
#include <encoding.h>
2020

21+
#ifdef ARCH_MM_MMU
22+
#include "mmu.h"
23+
#endif
24+
2125
#ifdef RT_USING_SMP
2226
#include "tick.h"
2327
#include "interrupt.h"
@@ -54,6 +58,10 @@ volatile rt_ubase_t rt_interrupt_to_thread = 0;
5458
*/
5559
volatile rt_ubase_t rt_thread_switch_interrupt_flag = 0;
5660

61+
#ifdef ARCH_MM_MMU
62+
static rt_ubase_t *percpu_hartid;
63+
#endif
64+
5765
void *_rt_hw_stack_init(rt_ubase_t *sp, rt_ubase_t ra, rt_ubase_t sstatus)
5866
{
5967
rt_hw_switch_frame_t frame = (rt_hw_switch_frame_t)((rt_ubase_t)sp - sizeof(struct rt_hw_switch_frame));
@@ -71,10 +79,19 @@ int rt_hw_cpu_id(void)
7179
#ifndef RT_USING_SMP
7280
return 0;
7381
#else
74-
/* Currently, the hartid is stored in the satp register. */
75-
rt_ubase_t hart_id;
76-
asm volatile("csrr %0, satp" : "=r"(hart_id));
77-
return hart_id;
82+
if (rt_kmem_pvoff() != 0)
83+
{
84+
return *percpu_hartid;
85+
}
86+
else
87+
{
88+
// if not enable MMU or pvoff==0, read hartid from satp register
89+
rt_ubase_t hartid;
90+
asm volatile("csrr %0, satp" : "=r"(hartid));
91+
return hartid;
92+
}
93+
94+
7895
#endif /* RT_USING_SMP */
7996
}
8097

@@ -170,11 +187,19 @@ void rt_hw_secondary_cpu_up(void)
170187
rt_uint64_t entry_pa;
171188
int hart, ret;
172189

173-
/* translate kernel virtual _start to physical address.
174-
* TODO: Virtual-to-physical translation is not needed here
175-
* because &_start is already a physical address on this platform.
176-
*/
190+
/* translate kernel virtual _start to physical address. */
191+
#ifdef ARCH_MM_MMU
192+
if (rt_kmem_pvoff() != 0)
193+
{
194+
entry_pa = (rt_uint64_t)rt_kmem_v2p(&_start);
195+
}
196+
else
197+
{
198+
entry_pa = (rt_uint64_t)&_start;
199+
}
200+
#else
177201
entry_pa = (rt_uint64_t)&_start;
202+
#endif /* ARCH_MM_MMU */
178203

179204
for (hart = 0; hart < RT_CPUS_NR; hart++)
180205
{
@@ -191,8 +216,31 @@ void rt_hw_secondary_cpu_up(void)
191216
}
192217
}
193218

219+
#ifdef ARCH_MM_MMU
220+
void rt_hw_percpu_hartid_init(rt_ubase_t *percpu_ptr, rt_ubase_t hartid)
221+
{
222+
rt_ubase_t *percpu_hartid_paddr;
223+
rt_size_t percpu_size = (rt_size_t)((rt_ubase_t)&__percpu_end - (rt_ubase_t)&__percpu_start);
224+
225+
percpu_hartid = percpu_ptr;
226+
227+
// from virtual address to physical address
228+
percpu_ptr = (rt_ubase_t *)((rt_ubase_t)percpu_ptr + (rt_ubase_t)rt_kmem_pvoff());
229+
percpu_hartid_paddr = percpu_ptr;
230+
231+
232+
/* Save to the real area */
233+
*(rt_ubase_t *)((void *)percpu_hartid_paddr + hartid * percpu_size) = hartid;
234+
}
235+
#endif /* ARCH_MM_MMU */
236+
194237
void secondary_cpu_entry(void)
195238
{
239+
240+
#ifdef RT_USING_SMART
241+
/* switch to kernel address space */
242+
rt_hw_aspace_switch(&rt_kernel_space);
243+
#endif
196244
/* The PLIC peripheral interrupts are currently handled by the boot_hart. */
197245
/* Enable the Supervisor-Timer bit in SIE */
198246
rt_hw_tick_init();

libcpu/risc-v/common64/cpuport.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,10 @@ rt_inline void rt_hw_isb(void)
4343
__asm__ volatile(OPC_FENCE_I:::"memory");
4444
}
4545

46+
#ifdef ARCH_MM_MMU
47+
void rt_hw_percpu_hartid_init(rt_ubase_t *percpu_ptr, rt_ubase_t hartid);
48+
#endif
49+
4650
#endif
4751

4852
#endif

libcpu/risc-v/common64/encoding.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -176,6 +176,8 @@
176176
#define PTE_A 0x040 // Accessed
177177
#define PTE_D 0x080 // Dirty
178178
#define PTE_SOFT 0x300 // Reserved for Software
179+
#define PTE_ATTR_RW (PTE_R | PTE_W)
180+
#define PTE_ATTR_RWX (PTE_ATTR_RW | PTE_X)
179181

180182
#define PTE_PPN_SHIFT 10
181183

0 commit comments

Comments
 (0)