From 279364e274f55e98126938fd5332fff03cb2857e Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 17 Sep 2025 15:30:25 +0800 Subject: [PATCH 01/26] Add sched_t to kcb for O(1) scheduler support Previously, the scheduler performed a linear search through the global task list (kcb->tasks) to find the next TASK_READY task. This approach limited scalability as the search iterations increased with the number of tasks, resulting in higher scheduling latency. To support an O(1) scheduler and improve extensibility, a sched_t structure is introduced and integrated into kcb. The new structure contains: - ready_queues: Holds all runnable tasks, including TASK_RUNNING and TASK_READY. The scheduler selects tasks directly from these queues. - ready_bitmap: Records the state of each ready queue. Using the bitmap, the scheduler can locate the highest-priority runnable task in O(1) time complexity. - rr_cursors: Round-robin cursors that track the next task node in each ready queue. Each priority level maintains its own RR cursor. The top priority cursor is assigned to kcb->task_current, which is advanced circularly after each scheduling cycle. - hart_id: Identifies the scheduler instance per hart (0 for single-hart configurations). - task_idle: The system idle task, executed when no runnable tasks exist. In the current design, kcb binds only one sched_t instance (hart0) for single-hart systems, but this structure can be extended for multi-hart scheduling in the future. --- include/sys/task.h | 18 ++++++++++++++++++ kernel/task.c | 9 +++++++++ 2 files changed, 27 insertions(+) diff --git a/include/sys/task.h b/include/sys/task.h index 33d0b60..4199add 100644 --- a/include/sys/task.h +++ b/include/sys/task.h @@ -84,6 +84,21 @@ typedef struct tcb { void *rt_prio; /* Opaque pointer for custom real-time scheduler hook */ } tcb_t; +/* Scheduler attribution */ +typedef struct sched { + uint8_t ready_bitmap; /* 8-bit priority bitmap */ + list_t + *ready_queues[TASK_PRIORITY_LEVELS]; /* Separate queue per priority */ + uint16_t queue_counts[TASK_PRIORITY_LEVELS]; /* O(1) size tracking */ + + /* Weighted Round-Robin State per Priority Level */ + list_node_t *rr_cursors[TASK_PRIORITY_LEVELS]; /* Round-robin position */ + + /* Hart-Specific Data */ + uint8_t hart_id; /* RISC-V hart identifier */ + +} sched_t; + /* Kernel Control Block (KCB) * * Singleton structure holding global kernel state, including task lists, @@ -104,6 +119,9 @@ typedef struct { /* Timer Management */ list_t *timer_list; /* List of active software timers */ volatile uint32_t ticks; /* Global system tick, incremented by timer */ + + /* per-hart scheduler management */ + sched_t *harts; } kcb_t; /* Global pointer to the singleton Kernel Control Block */ diff --git a/kernel/task.c b/kernel/task.c index 59ffdae..e007cb2 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -15,6 +15,14 @@ static int32_t noop_rtsched(void); void _timer_tick_handler(void); +/* Hart scheduler */ +static sched_t hart0 = { + .ready_bitmap = 0, + .ready_queues = {NULL}, + .rr_cursors = {NULL}, + .hart_id = 0, +}; + /* Kernel-wide control block (KCB) */ static kcb_t kernel_state = { .tasks = NULL, @@ -25,6 +33,7 @@ static kcb_t kernel_state = { .task_count = 0, .ticks = 0, .preemptive = true, /* Default to preemptive mode */ + .harts = &hart0, /* Initial hart */ }; kcb_t *kcb = &kernel_state; From 56c1926202216318cd4b83ed6612f1ae7e22963c Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 22 Oct 2025 21:52:13 +0800 Subject: [PATCH 02/26] Add list_unlink() for safe node removal from ready queue Previously, the list operation for removal was limited to list_remove(), which immediately freed the list node during the function call. When removing a running task (TASK_RUNNING), the list node in the ready queue must not be freed because kcb->task_current shares the same node. This change introduces list_unlink(), which detaches the node from the list without freeing it. The unlinked node is returned to the caller, allowing safe reuse and improving flexibility in dequeue operations. This API will be applied in sched_dequeue_task() for safely removing tasks from ready queues. --- include/lib/list.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/include/lib/list.h b/include/lib/list.h index 298e6c8..9f41866 100644 --- a/include/lib/list.h +++ b/include/lib/list.h @@ -134,6 +134,25 @@ static inline void *list_remove(list_t *list, list_node_t *target) return data; } +/* Unlink a node from list without freeing node */ +static inline void list_unlink(list_t *list, list_node_t *target) +{ + if (unlikely(!list || !target || list_is_empty(list))) + return; + + list_node_t *prev = list->head; + while (prev->next != list->tail && prev->next != target) + prev = prev->next; + + if (unlikely(prev->next != target)) + return; /* node not found */ + + prev->next = target->next; + target->next = NULL; + list->length--; + return; +} + /* Iteration */ /* Callback should return non-NULL to stop early, NULL to continue */ From 6bd43f11dd5e8067a5d9916a45e16422e8bf1689 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 24 Oct 2025 12:21:31 +0800 Subject: [PATCH 03/26] Add three marcos for ready queue bitmap operation When a task is enqueued into or dequeued from the ready queue, the bitmap that indicates the ready queue state should be updated. These three marcos can be used in mo_task_dequeue() and mo_task_enqueue() APIs to improve readability and maintain consistency. --- kernel/task.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kernel/task.c b/kernel/task.c index e007cb2..351238b 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -37,6 +37,11 @@ static kcb_t kernel_state = { }; kcb_t *kcb = &kernel_state; +/* Bitmap operation */ +#define BITMAP_CHECK(prio) (kcb->harts->ready_bitmap & 1U << prio) +#define BITMAP_SET(prio) (kcb->harts->ready_bitmap |= 1U << prio) +#define BITMAP_CLEAN(prio) (kcb->harts->ready_bitmap &= ~(1U << prio)) + /* timer work management for reduced latency */ static volatile uint32_t timer_work_pending = 0; /* timer work types */ static volatile uint32_t timer_work_generation = 0; /* counter for coalescing */ From 75b4eeb50213db2ceae27f88e5c5c9289ddb0575 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 24 Oct 2025 12:39:29 +0800 Subject: [PATCH 04/26] Refactor sched_enqueue_task() for O(1) scheduler support Previously, sched_enqueue_task() only changed task state without inserting into ready queue. As a result, the scheduler could not select enqueued task for execution. This change pushes the task into the appropriate ready queue using list_pusback(), and initializes realated attribution such as the ready bitmap and RR cursor. The ready queue for corresponging task priority will be initialized at this enqueue path and never be released afterward. With this updated API, tasks can be enqueued into the ready queue and selected by cursor-based O(1) scheduler. --- kernel/task.c | 32 ++++++++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/kernel/task.c b/kernel/task.c index 351238b..7e41d1b 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -82,7 +82,7 @@ static const uint8_t priority_timeslices[TASK_PRIORITY_LEVELS] = { TASK_TIMESLICE_IDLE /* Priority 7: Idle */ }; -/* Mark task as ready (state-based) */ +/* Enqueue task into ready queue */ static void sched_enqueue_task(tcb_t *task); /* Utility and Validation Functions */ @@ -349,17 +349,41 @@ void _yield(void) __attribute__((weak, alias("yield"))); * practical performance with strong guarantees for fairness and reliability. */ -/* Add task to ready state - simple state-based approach */ +/* Enqueue task into ready queue */ static void sched_enqueue_task(tcb_t *task) { if (unlikely(!task)) return; + uint8_t prio_level = task->prio_level; + /* Ensure task has appropriate time slice for its priority */ - task->time_slice = get_priority_timeslice(task->prio_level); + task->time_slice = get_priority_timeslice(prio_level); task->state = TASK_READY; - /* Task selection is handled directly through the master task list */ + list_t **rq = &kcb->harts->ready_queues[prio_level]; + list_node_t **cursor = &kcb->harts->rr_cursors[prio_level]; + + if (!*rq) + *rq = list_create(); + + list_node_t *rq_node = list_pushback(*rq, task); + if (unlikely(!rq_node)) + return; + + /* Update task count in ready queue */ + kcb->harts->queue_counts[prio_level]++; + + /* Setup first rr_cursor */ + if (!*cursor) + *cursor = rq_node; + + /* Advance cursor when cursor same as running task */ + if (*cursor == kcb->task_current) + *cursor = rq_node; + + BITMAP_SET(task->prio_level); + return; } /* Remove task from ready queues - state-based approach for compatibility */ From 398c4c0987dc2e7fc2de82b9d6a145f9a3c5ec08 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 24 Oct 2025 13:02:44 +0800 Subject: [PATCH 05/26] Implement sched_dequeue_task() to dequeue task from ready queue Previously, mo_task_dequeue() was only a stub and returned immediately without performing any operation. As a result, tasks remained in the ready queue after being dequeued, leading to potential scheduler inconsistencies. This change implements the full dequeue process: - Searches for the task node in the ready queue by task ID. - Maintains RR cursor consistency: the RR cursor should always point to a valid task node in the ready queue. When removing a task node, the cursor is advanced circularly to the next node. - Unlinks the task node using list_unlink(), which removes the node from the ready queue without freeing it. list_unlink() is used instead of list_remove() to avoid accidentally freeing kcb->task_current when the current running task is dequeued. - Updates and checks queue_counts: if the ready queue becomes empty, the RR cursor is set to NULL and the bitmap is cleared until a new task is enqueued. --- kernel/task.c | 32 +++++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/kernel/task.c b/kernel/task.c index 7e41d1b..1adb2bc 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -386,16 +386,34 @@ static void sched_enqueue_task(tcb_t *task) return; } -/* Remove task from ready queues - state-based approach for compatibility */ -void sched_dequeue_task(tcb_t *task) +/* Remove task from ready queue; return removed ready queue node */ +static list_node_t *sched_dequeue_task(tcb_t *task) { if (unlikely(!task)) - return; + return NULL; - /* For tasks that need to be removed from ready state (suspended/cancelled), - * we rely on the state change. The scheduler will skip non-ready tasks - * when it encounters them during the round-robin traversal. - */ + uint8_t prio_level = task->prio_level; + + /* For task that need to be removed from ready/running state, it need be + * removed from corresponding ready queue. */ + list_t *rq = kcb->harts->ready_queues[prio_level]; + list_node_t *rq_node = list_foreach(rq, idcmp, (void *) (size_t) task->id); + list_node_t **cursor = &kcb->harts->rr_cursors[prio_level]; + if (!rq_node) + return NULL; + + /* Safely move cursor to next task node. */ + if (rq_node == *cursor) + *cursor = list_cnext(rq, *cursor); + + list_unlink(rq, rq_node); + + /* Update task count in ready queue */ + if (!--kcb->harts->queue_counts[prio_level]) { + *cursor = NULL; + BITMAP_CLEAN(task->prio_level); + } + return rq_node; } /* Handle time slice expiration for current task */ From 102a9358900ce674d347110bbb2ef79b3a2fa0f1 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 22 Oct 2025 22:07:58 +0800 Subject: [PATCH 06/26] Refactor mo_task_spawn() for O(1) scheduler support Previously, mo_task_spawn() only created a task and appended it to the global task list (kcb->tasks), assigning the first task directly from the global list node. This change adds a call to sched_enqueue_task() within the critical section to enqueue the task into the ready queue and safely initialize its scheduling attributes. The first task assignment is now aligned with the RR cursor mechanism to ensure consistency with the O(1) scheduler. --- kernel/task.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/kernel/task.c b/kernel/task.c index 1adb2bc..56064d1 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -671,8 +671,12 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req) tcb->id = kcb->next_tid++; kcb->task_count++; /* Cached count of active tasks for quick access */ - if (!kcb->task_current) - kcb->task_current = node; + /* Push node to ready queue */ + sched_enqueue_task(tcb); + if (!kcb->task_current) { + kcb->task_current = kcb->harts->rr_cursors[tcb->prio_level]; + tcb->state = TASK_RUNNING; + } CRITICAL_LEAVE(); @@ -686,7 +690,6 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req) /* Add to cache and mark ready */ cache_task(tcb->id, tcb); - sched_enqueue_task(tcb); return tcb->id; } From 8f231a48b6e8253d985119818dff2306e46bb978 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 22 Oct 2025 22:08:38 +0800 Subject: [PATCH 07/26] Refactor scheduler to RR cursor-based O(1) design Previously, the scheduler iterated through the global task list (kcb->tasks) to find the next TASK_READY task, resulting in O(N) selection time. This approach limited scalability and caused inconsistent task rotation under heavy load. The new scheduling process: 1. Check the ready bitmap and find the highest priority level. 2. Select the RR cursor node from the corresponding ready queue. 3. Advance the selected cursor node circularly. Why RR cursor instead of pop/enqueue rotation: - Fewer operations on the ready queue: compared to the pop/enqueue approach, which requires two function calls per switch, the RR cursor method only advances one pointer per scheduling cycle. - Cache friendly: always accesses the same cursor node, improving cache locality on hot paths. - Cycle deterministic: RR cursor design allows deterministic task rotation and enables potential future extensions such as cycle accounting or fairness-based algorithms. This change introduces a fully O(1) scheduler design based on per-priority ready queues and round-robin (RR) cursors. Each ready queue maintains its own cursor, allowing the scheduler to select the next runnable task in constant time. --- include/sys/task.h | 2 -- kernel/task.c | 61 ++++++++++++++++++++++++---------------------- 2 files changed, 32 insertions(+), 31 deletions(-) diff --git a/include/sys/task.h b/include/sys/task.h index 4199add..9bb676e 100644 --- a/include/sys/task.h +++ b/include/sys/task.h @@ -128,8 +128,6 @@ typedef struct { extern kcb_t *kcb; /* System Configuration Constants */ -#define SCHED_IMAX \ - 500 /* Safety limit for scheduler iterations to prevent livelock */ #define MIN_TASK_STACK_SIZE \ 256 /* Minimum stack size to prevent stack overflow */ #define TASK_CACHE_SIZE \ diff --git a/kernel/task.c b/kernel/task.c index 56064d1..4b56dd8 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -450,20 +450,20 @@ void sched_wakeup_task(tcb_t *task) } } -/* Efficient Round-Robin Task Selection with O(n) Complexity +/* Efficient Round-Robin Task Selection (Cursor-Based, O(1) Complexity) * - * Selects the next ready task using circular traversal of the master task list. + * Selects the next ready task by advancing the per-priority round-robin + * cursor (rr_cursor) circularly using list API list_cnext(). * - * Complexity: O(n) where n = number of tasks - * - Best case: O(1) when next task in sequence is ready - * - Worst case: O(n) when only one task is ready and it's the last checked - * - Typical case: O(k) where k << n (number of non-ready tasks to skip) + * Complexity: O(1) + * - Always constant-time selection, regardless of total task count. + * - No need to traverse the task list. * * Performance characteristics: - * - Excellent for small-to-medium task counts (< 50 tasks) - * - Simple and reliable implementation - * - Good cache locality due to sequential list traversal - * - Priority-aware time slice allocation + * - Ideal for systems with frequent context switches or many tasks. + * - Excellent cache locality: only touches nodes in the active ready queue. + * - Priority-aware: highest non-empty ready queue is chosen via bitmap lookup. + * - Each priority level maintains its own rr_cursor to ensure fair rotation. */ uint16_t sched_select_next_task(void) { @@ -476,31 +476,34 @@ uint16_t sched_select_next_task(void) if (current_task->state == TASK_RUNNING) current_task->state = TASK_READY; - /* Round-robin search: find next ready task in the master task list */ - list_node_t *start_node = kcb->task_current; - list_node_t *node = start_node; - int iterations = 0; /* Safety counter to prevent infinite loops */ + /* Check out bitmap */ + uint32_t bitmap = kcb->harts->ready_bitmap; + if (unlikely(!bitmap)) + panic(ERR_NO_TASKS); - do { - /* Move to next task (circular) */ - node = list_cnext(kcb->tasks, node); - if (!node || !node->data) - continue; + /* Find top priority ready queue */ + int top_prio_level = 0; + for (; !(bitmap & 1U); top_prio_level++, bitmap >>= 1) + ; - tcb_t *task = node->data; + list_node_t **cursor = &kcb->harts->rr_cursors[top_prio_level]; + list_t *rq = kcb->harts->ready_queues[top_prio_level]; + if (unlikely(!rq || !*cursor)) + panic(ERR_NO_TASKS); - /* Skip non-ready tasks */ - if (task->state != TASK_READY) - continue; + /* Update next task with top priority cursor */ + kcb->task_current = *cursor; - /* Found a ready task */ - kcb->task_current = node; - task->state = TASK_RUNNING; - task->time_slice = get_priority_timeslice(task->prio_level); + /* Advance top priority cursor to next task node */ + *cursor = list_cnext(rq, *cursor); - return task->id; + /* Update new task properties */ + tcb_t *new_task = kcb->task_current->data; + new_task->time_slice = get_priority_timeslice(new_task->prio_level); + new_task->state = TASK_RUNNING; - } while (node != start_node && ++iterations < SCHED_IMAX); + if (kcb->task_current) + return new_task->id; /* No ready tasks found - this should not happen in normal operation */ panic(ERR_NO_TASKS); From b3ef921be04a88c1d57b0f4759605a7bff1121bc Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Sun, 19 Oct 2025 13:59:40 +0800 Subject: [PATCH 08/26] Add ready queue dequeue path in mo_task_suspend() Previously, mo_task_suspend() only changed the task state to TASK_SUSPENDED without removing the task from the ready queue. As a result, suspended tasks could still be selected by the scheduler, leading to incorrect task switching and inconsistent queue states. This change adds a dequeue operation to remove the corresponding task node from its ready queue before marking it as suspended. Additionally, the condition to detect the currently running task has been updated: the scheduler now compares the TCB pointer (kcb->task_current->data == task) instead of the list node (kcb->task_current == node), since kcb->task_current now stores a ready queue node rather than a global task list node. If the suspended task is currently running, the CPU will yield after the task is suspended to allow the scheduler to select the next runnable task. This ensures that suspended tasks are no longer visible to the scheduler until they are resumed. --- kernel/task.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/kernel/task.c b/kernel/task.c index 4b56dd8..8bad8cb 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -784,8 +784,15 @@ int32_t mo_task_suspend(uint16_t id) return ERR_TASK_CANT_SUSPEND; } + /* Remove task node from ready queue if task is in ready queue + * (TASK_RUNNING/TASK_READY).*/ + if (task->state == TASK_READY || task->state == TASK_RUNNING) { + list_node_t *rq_node = sched_dequeue_task(task); + free(rq_node); + } + task->state = TASK_SUSPENDED; - bool is_current = (kcb->task_current == node); + bool is_current = (kcb->task_current->data == task); CRITICAL_LEAVE(); From 90c9e6a97e88ff76cc24a036f6d2c1a23d1c2617 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 24 Oct 2025 11:35:00 +0800 Subject: [PATCH 09/26] Add ready queue dequeue path in mo_task_cancel() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously, mo_task_cancel() only removed the task node from the global task list (kcb->tasks) but did not remove it from the ready queue. As a result, the scheduler could still select a canceled task that remained in the ready queue. Additionally, freeing the node twice could occur because the same node was already freed after list_remove(), leading to a double-free issue. This change adds a call to sched_dequeue_task() to remove the task from the ready queue, ensuring that once a task is canceled, it will no longer appear in the scheduler’s selection path. This also prevents memory corruption caused by double-freeing list nodes. --- kernel/task.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/kernel/task.c b/kernel/task.c index 8bad8cb..5e7bd68 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -727,12 +727,17 @@ int32_t mo_task_cancel(uint16_t id) } } + /* Remove from ready queue */ + if (tcb->state == TASK_READY) { + list_node_t *rq_node = sched_dequeue_task(tcb); + free(rq_node); + } + CRITICAL_LEAVE(); /* Free memory outside critical section */ free(tcb->stack); free(tcb); - free(node); return ERR_OK; } From a75e34496a3560c17f932988284216d11bee4a42 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 24 Oct 2025 11:47:29 +0800 Subject: [PATCH 10/26] Add sched_enqueue_task() in mo_task_resume() Previously, mo_task_resume() only changed resumed task state to TASK_READY, but didn't enqueue it into ready queue. As a result, the scheduler could not select the resumed task for execution. This change adds sched_enqueue_task() to insert the resumed task into the appropriate ready queue and update the ready bitmap, ensuring the resumed task becomes schedulable again. --- kernel/task.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/kernel/task.c b/kernel/task.c index 5e7bd68..a4e207b 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -824,9 +824,8 @@ int32_t mo_task_resume(uint16_t id) CRITICAL_LEAVE(); return ERR_TASK_CANT_RESUME; } - - /* mark as ready - scheduler will find it */ - task->state = TASK_READY; + /* Enqueue resumed task into ready queue */ + sched_enqueue_task(task); CRITICAL_LEAVE(); return ERR_OK; From 7099067eb739c0759095aae79bd43ad1e853ae3a Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Sun, 19 Oct 2025 16:06:24 +0800 Subject: [PATCH 11/26] Add ready queue enqueue path in mo_task_wakeup() Previously, mo_task_wakeup() only changed the task state to TASK_READY without enqueuing the task back into the ready queue. As a result, a woken-up task could remain invisible to the scheduler and never be selected for execution. This change adds a call to sched_enqueue_task() to insert the task into the appropriate ready queue based on its priority level. The ready bitmap, task counts of each ready queue, and RR cursor are updated accordingly to maintain scheduler consistency. With this update, tasks transitioned from a blocked or suspended state can be properly scheduled for execution once they are woken up. --- kernel/task.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/kernel/task.c b/kernel/task.c index a4e207b..135b584 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -434,20 +434,16 @@ void sched_tick_current_task(void) } } -/* Task wakeup - simple state transition approach */ +/* Task wakeup and enqueue into ready queue */ void sched_wakeup_task(tcb_t *task) { if (unlikely(!task)) return; - /* Mark task as ready - scheduler will find it during round-robin traversal + /* Enqueue task into ready queue for scheduler selection by rr_cursor. */ - if (task->state != TASK_READY) { - task->state = TASK_READY; - /* Ensure task has time slice */ - if (task->time_slice == 0) - task->time_slice = get_priority_timeslice(task->prio_level); - } + if (task->state != TASK_READY && task->state != TASK_RUNNING) + sched_enqueue_task(task); } /* Efficient Round-Robin Task Selection (Cursor-Based, O(1) Complexity) From f614c820b9aa9faf37234397e988d2f24d2f6d13 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Tue, 21 Oct 2025 16:50:12 +0800 Subject: [PATCH 12/26] Add sched_migrate_task() helper This commit introduces a new API, sched_migrate_task(), which enables migration of a task between ready queues of different priority levels. The function safely removes the task from its current ready queue and enqueues it into the target queue, updating the corresponding RR cursor and ready bitmap to maintain scheduler consistency. This helper will be used in mo_task_priority() and other task management routines that adjust task priority dynamically. Future improvement: The current enqueue path allocates a new list node for each task insertion based on its TCB pointer. In the future, this can be optimized by directly transferring or reusing the existing list node between ready queues, eliminating the need for an additional malloc() and free() operations during priority migrations. --- kernel/task.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/kernel/task.c b/kernel/task.c index 135b584..79166e4 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -416,6 +416,28 @@ static list_node_t *sched_dequeue_task(tcb_t *task) return rq_node; } +/* Task migration from origin to new priority ready queue */ +static void sched_migrate_task(tcb_t *task, int16_t priority) +{ + if (unlikely(!task || !is_valid_priority(priority))) + return; + + if (task->prio == priority) + return; + + /* Unlink task node from origin ready queue */ + list_node_t *rq_node = sched_dequeue_task(task); + free(rq_node); + + /* Update new properties */ + task->prio = priority; + task->prio_level = extract_priority_level(priority); + + /* Enqueue into new priority ready queue*/ + sched_enqueue_task(task); + return; +} + /* Handle time slice expiration for current task */ void sched_tick_current_task(void) { From d520a97a83c2fc18f67d3c3bc10e3e10562f12b4 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 22 Oct 2025 13:23:40 +0800 Subject: [PATCH 13/26] Use mo_task_migration() in mo_task_priority() This change refactors the priority update process in mo_task_priority() to include early-return checks and proper task migration handling. - Early-return conditions: * Prevent modification of the idle task. * Disallow assigning TASK_PRIO_IDLE to non-idle tasks. The idle task is created by idle_task_init() during system startup and must retain its fixed priority. - Task migration: If the priority-changed task resides in a ready queue (TASK_READY or TASK_RUNNING), sched_migrate_task() is called to move it to the queue corresponding to the new priority. - Running task behavior: When the current running task changes its own priority, it yields the CPU so the scheduler can dispatch the next highest-priority task. --- kernel/task.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/kernel/task.c b/kernel/task.c index 79166e4..7dfb985 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -867,12 +867,22 @@ int32_t mo_task_priority(uint16_t id, uint16_t priority) return ERR_TASK_NOT_FOUND; } + bool is_current = (kcb->task_current->data == task); + + /* Removed task from ready queue */ + if (task->state == TASK_RUNNING || task->state == TASK_READY) + sched_migrate_task(task, priority); + /* Update priority and level */ task->prio = priority; task->prio_level = extract_priority_level(priority); task->time_slice = get_priority_timeslice(task->prio_level); CRITICAL_LEAVE(); + + if (is_current) + mo_task_yield(); + return ERR_OK; } From 28961b8ff2b644e4a0ee99c31ad62b67e090fd82 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Tue, 21 Oct 2025 16:33:35 +0800 Subject: [PATCH 14/26] Add idle task and initialization API MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit introduces the system idle task and its initialization API (idle_task_init()). The idle task serves as the default execution context when no other runnable tasks exist in the system. The sched_idle() function supports both preemptive and cooperative modes. In sched_t, a list node named task_idle is added to record the idle task sentinel. The idle task never enters any ready queue and its priority level cannot be changed. When idle_task_init() is called, the idle task is initialized as the first execution context. This eliminates the need for additional APIs in main() to set up the initial high-priority task during system launch. This design allows task priorities to be adjusted safely during app_main(), while keeping the scheduler’s entry point consistent. --- include/sys/task.h | 18 ++++++++++-- kernel/task.c | 72 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 88 insertions(+), 2 deletions(-) diff --git a/include/sys/task.h b/include/sys/task.h index 9bb676e..76f2fd6 100644 --- a/include/sys/task.h +++ b/include/sys/task.h @@ -95,8 +95,8 @@ typedef struct sched { list_node_t *rr_cursors[TASK_PRIORITY_LEVELS]; /* Round-robin position */ /* Hart-Specific Data */ - uint8_t hart_id; /* RISC-V hart identifier */ - + uint8_t hart_id; /* RISC-V hart identifier */ + list_node_t *task_idle; /* Idle task */ } sched_t; /* Kernel Control Block (KCB) @@ -303,3 +303,17 @@ void _sched_block(queue_t *wait_q); * Returns 'true' to enable preemptive scheduling, or 'false' for cooperative */ int32_t app_main(void); + +/* Initialize the idle task + * + * This function statically creates and initializes the idle task structure. + * It should be called once during system startup. + * + * The idle task is a permanent system task that runs when no other + * ready tasks exist. It is never enqueued into any ready queue and + * cannot be suspended, canceled, or priority modified. + * + * Only one idle task exists per hart. Its priority is fixed to the + * lowest level and its time slice is zero. + */ +void idle_task_init(void); \ No newline at end of file diff --git a/kernel/task.c b/kernel/task.c index 7dfb985..003fbe5 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -21,6 +21,7 @@ static sched_t hart0 = { .ready_queues = {NULL}, .rr_cursors = {NULL}, .hart_id = 0, + .task_idle = NULL, }; /* Kernel-wide control block (KCB) */ @@ -468,6 +469,20 @@ void sched_wakeup_task(tcb_t *task) sched_enqueue_task(task); } +/* System idle task, it will be executed when no ready tasks in ready queue */ +static void sched_idle(void) +{ + if (!kcb->preemptive) + /* Cooperative mode idle */ + while (1) + mo_task_yield(); + + /* Preemptive mode idle */ + while (1) + mo_task_wfi(); +} + + /* Efficient Round-Robin Task Selection (Cursor-Based, O(1) Complexity) * * Selects the next ready task by advancing the per-priority round-robin @@ -632,6 +647,63 @@ static bool init_task_stack(tcb_t *tcb, size_t stack_size) return true; } +/* Initialize idle task */ +void idle_task_init(void) +{ + /* Ensure proper alignment */ + size_t stack_size = DEFAULT_STACK_SIZE; + stack_size = (stack_size + 0xF) & ~0xFU; + + /* Allocate and initialize TCB */ + tcb_t *idle = malloc(sizeof(tcb_t)); + if (!idle) + panic(ERR_TCB_ALLOC); + + idle->entry = &sched_idle; + idle->delay = 0; + idle->rt_prio = NULL; + idle->state = TASK_READY; + idle->flags = 0; + + /* Set idle task priority */ + idle->prio = TASK_PRIO_IDLE; + idle->prio_level = 0; + idle->time_slice = 0; + + /* Set idle task id and task count */ + idle->id = kcb->next_tid++; + kcb->task_count++; + + /* Initialize stack */ + if (!init_task_stack(idle, stack_size)) { + free(idle); + panic(ERR_STACK_ALLOC); + } + + /* Allocate and initialize idle task node */ + list_node_t *idle_task_node = malloc(sizeof(list_node_t)); + if (!idle_task_node) { + free(idle->stack); + free(idle); + panic(ERR_STACK_ALLOC); + } + idle_task_node->data = idle; + idle_task_node->next = NULL; + kcb->harts->task_idle = idle_task_node; + + /* Initialize idle task execution context */ + hal_context_init(&idle->context, (size_t) idle->stack, stack_size, + (size_t) &sched_idle); + + printf("idle id %u: entry=%p stack=%p size=%u\n", idle->id, &sched_idle, + idle->stack, (unsigned int) stack_size); + + if (!kcb->task_current) + kcb->task_current = kcb->harts->task_idle; + + return; +} + /* Task Management API */ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req) From ec07239fef2622dcb021a16a1fa16aece3ef9898 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 24 Oct 2025 00:42:35 +0800 Subject: [PATCH 15/26] Add sched_switch_to_idle() helper When all ready queues are empty, the scheduler should switch to idle mode and wait for incoming interrupts. This commit introduces a dedicated helper to handle that transition, centralizing the logic and improving readbility of the scheduler path to idle. --- kernel/task.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/kernel/task.c b/kernel/task.c index 003fbe5..0b67156 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -482,6 +482,14 @@ static void sched_idle(void) mo_task_wfi(); } +/* Switch to idle task and return idle task id */ +static inline tcb_t *sched_switch_to_idle(void) +{ + kcb->task_current = kcb->harts->task_idle; + tcb_t *idle = kcb->harts->task_idle->data; + idle->state = TASK_RUNNING; + return idle; +} /* Efficient Round-Robin Task Selection (Cursor-Based, O(1) Complexity) * From 3474f1ad2e8a45c3c9ac29a7727c3274a877e7f7 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 24 Oct 2025 01:00:02 +0800 Subject: [PATCH 16/26] Add sched_switch_to_idle() helper in the scheduler Previously, when all ready queues were empty, the scheduler would trigger a kernel panic. This condition should instead transition into the idle task rather than panic. The new sched_switch_to_idle() helper centralizes this logic, making the path to idle clearer and more readable. --- kernel/task.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/task.c b/kernel/task.c index 0b67156..a0cfb7e 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -520,7 +520,7 @@ uint16_t sched_select_next_task(void) /* Check out bitmap */ uint32_t bitmap = kcb->harts->ready_bitmap; if (unlikely(!bitmap)) - panic(ERR_NO_TASKS); + return sched_switch_to_idle()->id; /* Find top priority ready queue */ int top_prio_level = 0; From 8291d5af929df074cb5dc3df5b061ee90b6c959a Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 24 Oct 2025 02:06:08 +0800 Subject: [PATCH 17/26] Add idle_task_init() call in main() The idle task is now initialized in main() during system startup. This ensures that the scheduler always has a valid execution context before any user or application tasks are created. Initializing the idle task early guarantees a safe fallback path when no runnable tasks exist and keeps the scheduler entry point consistent. --- kernel/main.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kernel/main.c b/kernel/main.c index efa46ff..7b63df3 100644 --- a/kernel/main.c +++ b/kernel/main.c @@ -23,6 +23,9 @@ int32_t main(void) printf("Heap initialized, %u bytes available\n", (unsigned int) (size_t) &_heap_size); + /* Initialize idle task */ + idle_task_init(); + /* Call the application's main entry point to create initial tasks. */ kcb->preemptive = (bool) app_main(); printf("Scheduler mode: %s\n", From 41df6587dc14dfcb9a88b3fcd8642b41640f5afd Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 24 Oct 2025 02:04:47 +0800 Subject: [PATCH 18/26] Refactor launch sequence in main() for scheduler initialization This change sets up the scheduler state during system startup by assigning kcb->task_current to kcb->harts->task_idle and dispatching to the idle task as the first execution context. This commit also keeps the scheduling entry path consistent between startup and runtime. --- kernel/main.c | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/kernel/main.c b/kernel/main.c index 7b63df3..3baccd5 100644 --- a/kernel/main.c +++ b/kernel/main.c @@ -23,35 +23,29 @@ int32_t main(void) printf("Heap initialized, %u bytes available\n", (unsigned int) (size_t) &_heap_size); - /* Initialize idle task */ + /* Initialize the first current task as idle sentinel node. + * This ensures a valid entry point before any real task runs. + */ idle_task_init(); + kcb->task_current = kcb->harts->task_idle; /* Call the application's main entry point to create initial tasks. */ kcb->preemptive = (bool) app_main(); printf("Scheduler mode: %s\n", kcb->preemptive ? "Preemptive" : "Cooperative"); - /* Verify that the application created at least one task. - * If 'kcb->task_current' is still NULL, it means mo_task_spawn was never - * successfully called. - */ - if (!kcb->task_current) - panic(ERR_NO_TASKS); - /* Save the kernel's context. This is a formality to establish a base * execution context before launching the first real task. */ setjmp(kcb->context); - /* Launch the first task. - * 'kcb->task_current' was set by the first call to mo_task_spawn. - * This function transfers control and does not return. + /* Launch the first task (idle task), then scheduler will select highest + * priority task. This function transfers control and does not return. */ - tcb_t *first_task = kcb->task_current->data; - if (!first_task) - panic(ERR_NO_TASKS); + tcb_t *idle = kcb->task_current->data; + idle->state = TASK_RUNNING; - hal_dispatch_init(first_task->context); + hal_dispatch_init(idle->context); /* This line should be unreachable. */ panic(ERR_UNKNOWN); From e04881d9bb13a22ccd03bafc728abca071be6eb1 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 24 Oct 2025 02:09:33 +0800 Subject: [PATCH 19/26] Remove first-task binding from task initialization Previously, both mo_task_spawn() and idle_task_init() implicitly bound their created tasks to kcb->task_current as the first execution context. This behavior caused ambiguity with the scheduler, which is now responsible for determining the active task during system startup. This change removes the initial binding logic from both functions, allowing the startup process (main()) to explicitly assign kcb->task_current (typically to the idle task) during launch. This ensures a single, centralized initialization flow and improves the separation between task creation and scheduling control. --- kernel/task.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/kernel/task.c b/kernel/task.c index a0cfb7e..758b9c8 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -706,9 +706,6 @@ void idle_task_init(void) printf("idle id %u: entry=%p stack=%p size=%u\n", idle->id, &sched_idle, idle->stack, (unsigned int) stack_size); - if (!kcb->task_current) - kcb->task_current = kcb->harts->task_idle; - return; } @@ -774,10 +771,6 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req) /* Push node to ready queue */ sched_enqueue_task(tcb); - if (!kcb->task_current) { - kcb->task_current = kcb->harts->rr_cursors[tcb->prio_level]; - tcb->state = TASK_RUNNING; - } CRITICAL_LEAVE(); From 809653207eb815fd02bd1147615450d476ee54fd Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Sun, 26 Oct 2025 15:04:46 +0800 Subject: [PATCH 20/26] Add De Bruijn LUT for future O(1) priority selection Prepare for O(1) bitmap index lookup by adding a 32-entry De Bruijn sequence table. The table will be used in later commits to replace iterative bit scanning. No functional change in this patch. --- kernel/task.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kernel/task.c b/kernel/task.c index 758b9c8..4048634 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -337,6 +337,11 @@ void panic(int32_t ecode) hal_panic(); } +/* RISC-V optimized priority finding using De Bruijn sequence */ +static const uint8_t debruijn_lut[32] = { + 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, + 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9}; + /* Weak aliases for context switching functions. */ void dispatch(void); void yield(void); From 05ebd3854f37eb1fe04cd11dee9d821b980e33fb Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Sun, 26 Oct 2025 15:17:41 +0800 Subject: [PATCH 21/26] Implement De Bruijn-based top priority helper Implement the helper function that uses a De Bruijn multiply-and-LUT approach to compute the index of the least-significant set bit in O(1) time complexity. This helper is not yet wired into the scheduler logic; integration will follow in a later commit. No functional change in this patch. --- kernel/task.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/kernel/task.c b/kernel/task.c index 4048634..ea01450 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -342,6 +342,18 @@ static const uint8_t debruijn_lut[32] = { 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9}; +/* O(1) priority selection optimized for RISC-V */ +static inline uint8_t find_highest_ready_priority(uint32_t bitmap) +{ + /* Isolate rightmost set bit (highest priority) */ + uint32_t isolated = bitmap & (-bitmap); + + /* De Bruijn multiplication for O(1) bit position finding */ + uint32_t hash = (isolated * 0x077CB531U) >> 27; + + return debruijn_lut[hash & 0x1F]; +} + /* Weak aliases for context switching functions. */ void dispatch(void); void yield(void); From 589b0e7ec544c8af505e980aee78a4996eb7ffc7 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Sun, 26 Oct 2025 15:22:56 +0800 Subject: [PATCH 22/26] Use De Brujin-based top priority helper in scheduler Replace the iterative bitmap scanning with the De Bruijn multiply+LUT method via the new helper. This change makes top-priority selection constant-time and deterministic. --- kernel/task.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/kernel/task.c b/kernel/task.c index ea01450..40494f5 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -540,9 +540,7 @@ uint16_t sched_select_next_task(void) return sched_switch_to_idle()->id; /* Find top priority ready queue */ - int top_prio_level = 0; - for (; !(bitmap & 1U); top_prio_level++, bitmap >>= 1) - ; + uint8_t top_prio_level = find_highest_ready_priority(bitmap); list_node_t **cursor = &kcb->harts->rr_cursors[top_prio_level]; list_t *rq = kcb->harts->ready_queues[top_prio_level]; From 98fd426f231bd4da212db7001cae0ebc79fd6972 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 5 Nov 2025 09:29:23 +0800 Subject: [PATCH 23/26] Add dequeuing ready queue path in _sched_block() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously, _sched_block() only enqueued the task into the wait queue and set its state to TASK_BLOCKED. In the new scheduler design (ready-queue–based), a blocked task must also be removed from its priority's ready queue to prevent it from being selected by the scheduler. This change adds the missing dequeue path for the corresponding ready queue, ensuring behavior consistency. --- kernel/task.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/task.c b/kernel/task.c index 40494f5..9d085f2 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -1055,6 +1055,8 @@ void _sched_block(queue_t *wait_q) tcb_t *self = kcb->task_current->data; + sched_dequeue_task(self); + if (queue_enqueue(wait_q, self) != 0) panic(ERR_SEM_OPERATION); From 1abd962cc0f07349c3a573dfbf663a5a45d9e496 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 5 Nov 2025 10:20:09 +0800 Subject: [PATCH 24/26] Make sched_wakeup_task() globally visible Previously, sched_wakeup_task() was limited to internal use within the scheduler module. This change makes it globally visible so that it can be reused in semaphore.c for task wake-up operations. --- include/sys/task.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/include/sys/task.h b/include/sys/task.h index 76f2fd6..63c2786 100644 --- a/include/sys/task.h +++ b/include/sys/task.h @@ -316,4 +316,7 @@ int32_t app_main(void); * Only one idle task exists per hart. Its priority is fixed to the * lowest level and its time slice is zero. */ -void idle_task_init(void); \ No newline at end of file +void idle_task_init(void); + +/* Wake up and enqueue task into ready queue */ +void sched_wakeup_task(tcb_t *); \ No newline at end of file From 1c68d20f7a9ef4fe907bcce3d2f2ece6da8465c6 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 5 Nov 2025 10:25:06 +0800 Subject: [PATCH 25/26] Add sched_wakeup_task() in mo_sem_signal() Previously, mo_sem_signal() only changed the awakened task state to TASK_READY when a semaphore signal was triggered. In the new scheduler design, which selects runnable tasks from ready queues, the awakened task must also be enqueued for scheduling. This change invokes sched_wakeup_task() to perform the enqueue operation, ensuring the awakened task is properly inserted into the ready queue. --- kernel/semaphore.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/semaphore.c b/kernel/semaphore.c index 640e372..fbc3271 100644 --- a/kernel/semaphore.c +++ b/kernel/semaphore.c @@ -176,7 +176,7 @@ void mo_sem_signal(sem_t *s) if (likely(awakened_task)) { /* Validate awakened task state consistency */ if (likely(awakened_task->state == TASK_BLOCKED)) { - awakened_task->state = TASK_READY; + sched_wakeup_task(awakened_task); should_yield = true; } else { /* Task state inconsistency - this should not happen */ From 334390191e1c6550380a6a7f0492ca408494b971 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Thu, 6 Nov 2025 17:59:54 +0800 Subject: [PATCH 26/26] Add dequeuing ready queue path in mo_task_delay() Previously, mo_task_delay() only changed task state to TASK_BLOCKED and updated delayed ticks. However, it --- kernel/task.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kernel/task.c b/kernel/task.c index 9d085f2..7f7143c 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -868,11 +868,13 @@ void mo_task_delay(uint16_t ticks) tcb_t *self = kcb->task_current->data; - /* Set delay and blocked state - scheduler will skip blocked tasks */ + /* Set delay and blocked state, dequeue from ready queue */ + list_node_t *rq_node = sched_dequeue_task(self); self->delay = ticks; self->state = TASK_BLOCKED; NOSCHED_LEAVE(); + free(rq_node); mo_task_yield(); }