diff --git a/app/boards/intel_adsp_ace15_mtpm.conf b/app/boards/intel_adsp_ace15_mtpm.conf index b93a408ee8f5..88530f83d869 100644 --- a/app/boards/intel_adsp_ace15_mtpm.conf +++ b/app/boards/intel_adsp_ace15_mtpm.conf @@ -48,6 +48,7 @@ CONFIG_SOF_TELEMETRY_IO_PERFORMANCE_MEASUREMENTS=y CONFIG_SOF_TELEMETRY_PERFORMANCE_MEASUREMENTS=y CONFIG_ZEPHYR_NATIVE_DRIVERS=y CONFIG_ZEPHYR_DP_SCHEDULER=y +CONFIG_ZEPHYR_TWB_SCHEDULER=y CONFIG_COLD_STORE_EXECUTE_DRAM=y # SOF / loadable modules @@ -70,6 +71,9 @@ CONFIG_LLEXT_STORAGE_WRITABLE=y CONFIG_MODULES=y CONFIG_TIMING_FUNCTIONS=y CONFIG_WATCHDOG=y +CONFIG_TIMESLICE_PER_THREAD=y +CONFIG_THREAD_RUNTIME_STATS=y +CONFIG_SCHED_THREAD_USAGE=y # Zephyr / device drivers CONFIG_CLOCK_CONTROL=y diff --git a/src/Kconfig b/src/Kconfig index 46f30b287450..a9df53c787d5 100644 --- a/src/Kconfig +++ b/src/Kconfig @@ -15,6 +15,7 @@ rsource "probe/Kconfig" rsource "samples/Kconfig" rsource "schedule/Kconfig" +rsource "schedule/Kconfig.threads_prio" rsource "ipc/Kconfig" diff --git a/src/idc/zephyr_idc.c b/src/idc/zephyr_idc.c index 664d09a260ca..a0b2840f4ca2 100644 --- a/src/idc/zephyr_idc.c +++ b/src/idc/zephyr_idc.c @@ -133,7 +133,7 @@ int idc_send_msg(struct idc_msg *msg, uint32_t mode) idc_send_memcpy_err = memcpy_s(msg_cp, sizeof(*msg_cp), msg, sizeof(*msg)); assert(!idc_send_memcpy_err); /* Same priority as the IPC thread which is an EDF task and under Zephyr */ - work->priority = EDF_ZEPHYR_PRIORITY; + work->priority = CONFIG_EDF_THREAD_PRIORITY; work->deadline = 0; work->handler = idc_handler; work->sync = mode == IDC_BLOCKING; diff --git a/src/include/sof/ipc/common.h b/src/include/sof/ipc/common.h index 43ad023da1ef..562739ffb5c9 100644 --- a/src/include/sof/ipc/common.h +++ b/src/include/sof/ipc/common.h @@ -68,7 +68,11 @@ struct ipc { struct list_item comp_list; /* list of component devices */ /* processing task */ +#if CONFIG_TWB_IPC_TASK + struct task *ipc_task; +#else struct task ipc_task; +#endif #ifdef CONFIG_SOF_TELEMETRY_IO_PERFORMANCE_MEASUREMENTS /* io performance measurement */ diff --git a/src/include/sof/schedule/edf_schedule.h b/src/include/sof/schedule/edf_schedule.h index 013183eb01cd..aeb10d5b57c3 100644 --- a/src/include/sof/schedule/edf_schedule.h +++ b/src/include/sof/schedule/edf_schedule.h @@ -13,8 +13,6 @@ #include #include -#define EDF_ZEPHYR_PRIORITY 1 - #define edf_sch_set_pdata(task, data) \ (task->priv_data = data) diff --git a/src/include/sof/schedule/schedule.h b/src/include/sof/schedule/schedule.h index ab110997cfcb..bbdcbbecf3b4 100644 --- a/src/include/sof/schedule/schedule.h +++ b/src/include/sof/schedule/schedule.h @@ -38,6 +38,10 @@ enum { * and will be unified with SOF_SCHEDULE_EDF for Zephyr builds * current implementation of Zephyr based EDF is depreciated now */ + SOF_SCHEDULE_TWB, /**< Tasks With Budget scheduler based on Zephyr peemptive threads + * for each SOF task that has pre-allocated MCPS budget + * renewed with every system tick. + */ SOF_SCHEDULE_COUNT /**< indicates number of scheduler types */ }; diff --git a/src/include/sof/schedule/twb_schedule.h b/src/include/sof/schedule/twb_schedule.h new file mode 100644 index 000000000000..be7c06d0706f --- /dev/null +++ b/src/include/sof/schedule/twb_schedule.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* + * Copyright(c) 2023 Intel Corporation. All rights reserved. + * + * Author: Adrian Bonislawski + */ + +#ifndef __SOF_SCHEDULE_TWB_SCHEDULE_H__ +#define __SOF_SCHEDULE_TWB_SCHEDULE_H__ + +#include +#include + +/** + * @brief Task With Budget (TWB) Scheduler + * + * TWB scheduler is a scheduler that creates a separate preemptible Zephyr thread + * for each SOF task that has a pre-allocated MCPS budget renewed with every system tick. + * The TWB scheduler assigns either MEDIUM_PRIORITY or LOW_PRIORITY to the task thread + * based on the budget left in the current system tick. + * It allows for opportunistic execution if there is no other ready task + * with a higher priority while the budget is already spent. + * + * Examples of tasks with budget include IPC Task and IDC Task. + * + * The TWB scheduler has two key parameters assigned: + * - cycles granted: the budget per system tick + * - cycles consumed: the number of cycles consumed in a given system tick for task execution + * + * The number of cycles consumed is reset to 0 at the beginning of each system tick, + * renewing the TWB budget. + * When the number of cycles consumed exceeds the cycles granted, + * the task is switchedfrom MEDIUM to LOW priority. + * When the task with budget thread is created, the MPP Scheduling is responsible + * for setting the thread time slice equal to the task budget, along with + * setting a callback on time slice timeout. + * Thread time slicing guarantees that the Zephyr scheduler will interrupt execution + * when the budget is spent, + * so the MPP Scheduling timeout callback can re-evaluate the task priority. + * + * If there is a budget left in some system tick + * (i.e., the task spent less time or started executing closeto the system tick + * that preempts execution), it is reset and not carried over to the next tick. + * + * More info: + * https://thesofproject.github.io/latest/architectures/firmware/sof-zephyr/mpp_layer/mpp_scheduling.html + */ + +/** + * \brief default static stack size for each TWB thread + */ +#define ZEPHYR_TWB_STACK_SIZE 8192 + +/** + * \brief max budget limit + */ +#define ZEPHYR_TWB_BUDGET_MAX (CONFIG_SYS_CLOCK_TICKS_PER_SEC / 1000) + +#define SYS_TICKS_TO_HW_CYCLES(x) (x * CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC / CONFIG_SYS_CLOCK_TICKS_PER_SEC) +#define HW_CYCLES_TO_SYS_TICKS(x) (x * CONFIG_SYS_CLOCK_TICKS_PER_SEC / CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC) + +/** + * \brief Init the Tasks with Budget scheduler + */ +int scheduler_twb_init(void); + +/** + * \brief initialize a TWB task and add it to scheduling + * It must be called on core the task is declared to run on + * + * \param[out] task pointer, pointer to allocated task structure will be return + * \param[in] uid pointer to UUID of the task + * \param[in] ops pointer to task functions + * \param[in] data pointer to the task data + * \param[in] core CPU the thread should run on + * \param[in] name zephyr thread name + * \param[in] stack_size size of stack for a zephyr thread + * \param[in] thread_priority priority of the zephyr thread + * \param[in] cycles_granted cycles budget for the zephyr thread + */ +int scheduler_twb_task_init(struct task **task, + const struct sof_uuid_entry *uid, + const struct task_ops *ops, + void *data, + int32_t core, + const char *name, + size_t stack_size, + int32_t thread_priority, + uint32_t cycles_granted); + +#endif /* __SOF_SCHEDULE_TWB_SCHEDULE_H__ */ diff --git a/src/ipc/ipc-common.c b/src/ipc/ipc-common.c index 244a218d5be8..8f72ac1ba91b 100644 --- a/src/ipc/ipc-common.c +++ b/src/ipc/ipc-common.c @@ -273,7 +273,11 @@ static void ipc_work_handler(struct k_work *work) void ipc_schedule_process(struct ipc *ipc) { +#if CONFIG_TWB_IPC_TASK + schedule_task(ipc->ipc_task, 0, IPC_PERIOD_USEC); +#else schedule_task(&ipc->ipc_task, 0, IPC_PERIOD_USEC); +#endif } int ipc_init(struct sof *sof) diff --git a/src/ipc/ipc-zephyr.c b/src/ipc/ipc-zephyr.c index a410e03123d6..fc2eac10166c 100644 --- a/src/ipc/ipc-zephyr.c +++ b/src/ipc/ipc-zephyr.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -160,9 +161,14 @@ static int ipc_device_resume_handler(const struct device *dev, void *arg) intel_adsp_ipc_set_message_handler(INTEL_ADSP_IPC_HOST_DEV, message_handler, ipc); /* schedule task */ +#if CONFIG_TWB_IPC_TASK + scheduler_twb_task_init(&ipc->ipc_task, SOF_UUID(zipc_task_uuid), + &ipc_task_ops, ipc, 0, "IPC", ZEPHYR_TWB_STACK_SIZE, + CONFIG_TWB_THREAD_MEDIUM_PRIORITY, ZEPHYR_TWB_BUDGET_MAX / 2); +#else schedule_task_init_edf(&ipc->ipc_task, SOF_UUID(zipc_task_uuid), &ipc_task_ops, ipc, 0, 0); - +#endif return 0; } #endif /* CONFIG_PM_DEVICE */ @@ -278,9 +284,14 @@ int platform_ipc_init(struct ipc *ipc) ipc_set_drvdata(ipc, NULL); /* schedule task */ +#if CONFIG_TWB_IPC_TASK + scheduler_twb_task_init(&ipc->ipc_task, SOF_UUID(zipc_task_uuid), + &ipc_task_ops, ipc, 0, "IPC", ZEPHYR_TWB_STACK_SIZE, + CONFIG_TWB_THREAD_MEDIUM_PRIORITY, ZEPHYR_TWB_BUDGET_MAX / 2); +#else schedule_task_init_edf(&ipc->ipc_task, SOF_UUID(zipc_task_uuid), &ipc_task_ops, ipc, 0, 0); - +#endif /* configure interrupt - work is done internally by Zephyr API */ /* attach handlers */ diff --git a/src/platform/intel/ace/platform.c b/src/platform/intel/ace/platform.c index 9e88861d9a97..cc79e92ca9c9 100644 --- a/src/platform/intel/ace/platform.c +++ b/src/platform/intel/ace/platform.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -115,6 +116,12 @@ int platform_init(struct sof *sof) return ret; #endif /* CONFIG_ZEPHYR_DP_SCHEDULER */ +#if CONFIG_ZEPHYR_TWB_SCHEDULER + ret = scheduler_twb_init(); + if (ret < 0) + return ret; +#endif + /* init the system agent */ trace_point(TRACE_BOOT_PLATFORM_AGENT); sa_init(sof, CONFIG_SYSTICK_PERIOD); diff --git a/src/schedule/Kconfig b/src/schedule/Kconfig index fc917c252809..99ca2861f650 100644 --- a/src/schedule/Kconfig +++ b/src/schedule/Kconfig @@ -51,3 +51,22 @@ config SCHEDULE_LL_NO_RESCHEDULE_TASK scheduler_ops::reschedule_task will set to NULL instead, tasks with the attempt to reschedule (e.g. DMA trace works) will be relinguished directly and return no error. + +config ZEPHYR_TWB_SCHEDULER + bool "use Zephyr thread based TWB scheduler" + default n + depends on ZEPHYR_SOF_MODULE + depends on TIMESLICE_PER_THREAD + depends on THREAD_RUNTIME_STATS + depends on SCHED_THREAD_USAGE + help + Enable Tasks with Budget preemptive scheduler based on + Zephyr preemptive threads for each SOF task that has pre-allocated + MCPS budget renewed with every system tick. + +config TWB_IPC_TASK + bool "use TWB scheduler for IPC task" + default n + depends on ZEPHYR_TWB_SCHEDULER + help + Switch IPC task to TWB scheduler. diff --git a/src/schedule/Kconfig.threads_prio b/src/schedule/Kconfig.threads_prio new file mode 100644 index 000000000000..a7d51c666b63 --- /dev/null +++ b/src/schedule/Kconfig.threads_prio @@ -0,0 +1,41 @@ +# SPDX-License-Identifier: BSD-3-Clause + +config LL_THREAD_PRIORITY + int "LL thread cooperative high priority" + default -16 + help + LL thread configured priority in the system. + Should be in cooperative range: + -NUM_COOP_PRIORITIES to -1 + +config TWB_THREAD_MEDIUM_PRIORITY + int "TWB thread preemptible medium priority" + default 1 + help + TWB thread configured priority in the system. + Should be in preemptible range: + 0 to NUM_PREEMPT_PRIORITIES-1 + +config TWB_THREAD_LOW_PRIORITY + int "TWB thread preemptible low priority" + default 12 + help + TWB thread configured priority in the system. + Should be in preemptible range: + 0 to NUM_PREEMPT_PRIORITIES-1 + +config DP_THREAD_PRIORITY + int "DP thread preemptible low priority" + default 12 + help + DP thread configured priority in the system. + Should be in preemptible range: + 0 to NUM_PREEMPT_PRIORITIES-1 + +config EDF_THREAD_PRIORITY + int "EDF thread preemptible low priority" + default 1 + help + EDF thread configured priority in the system. + Should be in preemptible range: + 0 to NUM_PREEMPT_PRIORITIES-1 diff --git a/src/schedule/zephyr_dma_domain.c b/src/schedule/zephyr_dma_domain.c index 05e95a0dad0e..3f95da7a2476 100644 --- a/src/schedule/zephyr_dma_domain.c +++ b/src/schedule/zephyr_dma_domain.c @@ -450,7 +450,7 @@ static int zephyr_dma_domain_register(struct ll_schedule_domain *domain, dt, NULL, NULL, - -CONFIG_NUM_COOP_PRIORITIES, + CONFIG_LL_THREAD_PRIORITY, 0, K_FOREVER); diff --git a/src/schedule/zephyr_domain.c b/src/schedule/zephyr_domain.c index bcb7a93b2498..0c052f611ca4 100644 --- a/src/schedule/zephyr_domain.c +++ b/src/schedule/zephyr_domain.c @@ -195,7 +195,7 @@ static int zephyr_domain_register(struct ll_schedule_domain *domain, ll_sched_stack[core], ZEPHYR_LL_STACK_SIZE, zephyr_domain_thread_fn, zephyr_domain, NULL, NULL, - -CONFIG_NUM_COOP_PRIORITIES, 0, K_FOREVER); + CONFIG_LL_THREAD_PRIORITY, 0, K_FOREVER); k_thread_cpu_mask_clear(thread); k_thread_cpu_mask_enable(thread, core); diff --git a/src/schedule/zephyr_dp_schedule.c b/src/schedule/zephyr_dp_schedule.c index 68f3fdc37e70..8d976925ec07 100644 --- a/src/schedule/zephyr_dp_schedule.c +++ b/src/schedule/zephyr_dp_schedule.c @@ -27,11 +27,6 @@ SOF_DEFINE_REG_UUID(dp_sched); DECLARE_TR_CTX(dp_tr, SOF_UUID(dp_sched_uuid), LOG_LEVEL_INFO); -/** - * \brief a priority of the DP threads in the system. - */ -#define ZEPHYR_DP_THREAD_PRIORITY (CONFIG_NUM_PREEMPT_PRIORITIES - 2) - struct scheduler_dp_data { struct list_item tasks; /* list of active dp tasks */ struct task ll_tick_src; /* LL task - source of DP tick */ @@ -399,7 +394,7 @@ static int scheduler_dp_task_shedule(void *data, struct task *task, uint64_t sta /* create a zephyr thread for the task */ pdata->thread_id = k_thread_create(&pdata->thread, (__sparse_force void *)pdata->p_stack, pdata->stack_size, dp_thread_fn, task, NULL, NULL, - ZEPHYR_DP_THREAD_PRIORITY, K_USER, K_FOREVER); + CONFIG_DP_THREAD_PRIORITY, K_USER, K_FOREVER); /* pin the thread to specific core */ ret = k_thread_cpu_pin(pdata->thread_id, task->core); diff --git a/src/schedule/zephyr_twb_schedule.c b/src/schedule/zephyr_twb_schedule.c new file mode 100644 index 000000000000..38a95f6c3b0f --- /dev/null +++ b/src/schedule/zephyr_twb_schedule.c @@ -0,0 +1,499 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * Copyright(c) 2024 Intel Corporation. All rights reserved. + * + * Author: Adrian Bonislawski + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +LOG_MODULE_REGISTER(twb_schedule, CONFIG_SOF_LOG_LEVEL); +SOF_DEFINE_REG_UUID(twb_sched); + +DECLARE_TR_CTX(twb_tr, SOF_UUID(twb_sched_uuid), LOG_LEVEL_INFO); + +struct scheduler_twb_data { + struct list_item tasks; /* list of active TWB tasks */ + struct task ll_tick_src; /* LL task - source of TWB tick */ +}; + +struct task_twb_data { + k_tid_t thread_id; /* zephyr thread ID */ + k_thread_stack_t __sparse_cache *p_stack; /* pointer to thread stack */ + struct k_sem sem; /* thread semaphore */ + int32_t thread_prio; /* thread default priority */ + uint32_t cycles_granted; /* cycles budget for the task */ + uint32_t cycles_consumed; /* cycles consumed by the task */ + uint64_t cycles_ref; /* reference cycles for the task */ +}; + +/* Single CPU-wide lock + * as each per-core instance if TWB-scheduler has separate structures, it is enough to + * use irq_lock instead of cross-core spinlocks + */ +static inline unsigned int scheduler_twb_lock(void) +{ + return irq_lock(); +} + +static inline void scheduler_twb_unlock(unsigned int key) +{ + irq_unlock(key); +} + +/** + * @brief Callback function for the TWB scheduler task. + * + * This function is called when the TWB scheduler task runs out of budget. + * It lowers the priority of the thread and sets the time slice to 0. + * + * @param thread Pointer to the thread structure. + * @param data Pointer to additional data (not used). + */ +static void scheduler_twb_task_cb(struct k_thread *thread, void *data) +{ + tr_dbg(&twb_tr, "TWB task %p out of budget, lowering priority", data); + + k_thread_priority_set(thread, CONFIG_TWB_THREAD_LOW_PRIORITY); + k_thread_time_slice_set(thread, 0, &scheduler_twb_task_cb, data); +} + +/** + * @brief Executes the LL tick of the TWB scheduler. + * + * This function is responsible for executing the LL tick of the TWB scheduler. + * It iterates through the list of tasks and performs the necessary operations + * based on the task's state. + * If a task is in the QUEUED state, it is transitioned to the RUNNING state + * and its associated thread is resumed. + * If a task is in the RUNNING state, its thread's priority and time slice + * are set based on the task's budget. + * The function also retrieves the runtime statistics of the thread and + * updates the task's reference cycle count. + * If a task is in the CANCEL or COMPLETED state, it is removed from the list. + * + * @param data Pointer to the scheduler_twb_data structure. + * @return The state of the task after the LL tick execution. + * If no task requires further execution, SOF_TASK_STATE_COMPLETED is returned. + * Otherwise, SOF_TASK_STATE_RESCHEDULE is returned. + */ +static enum task_state scheduler_twb_ll_tick(void *data) +{ + struct scheduler_twb_data *twb_sch = data; + k_thread_runtime_stats_t rt_stats_thread; + struct list_item *tlist, *tmp; + struct task_twb_data *pdata; + struct task *curr_task; + unsigned int lock_key; + bool keep_ll_tick_src = false; + + lock_key = scheduler_twb_lock(); + + /* Iterate through the list of tasks */ + list_for_item_safe(tlist, tmp, &twb_sch->tasks) { + curr_task = container_of(tlist, struct task, list); + pdata = curr_task->priv_data; + + /* Reset consumed cycles */ + pdata->cycles_consumed = 0; + + switch (curr_task->state) { + case SOF_TASK_STATE_QUEUED: + curr_task->state = SOF_TASK_STATE_RUNNING; + k_sem_give(&pdata->sem); + COMPILER_FALLTHROUGH; + case SOF_TASK_STATE_RUNNING: + if (pdata->cycles_granted) { + /* Reset thread's priority and time slice based on task's budget */ + k_thread_priority_set(pdata->thread_id, pdata->thread_prio); + k_thread_time_slice_set(pdata->thread_id, pdata->cycles_granted, + &scheduler_twb_task_cb, curr_task); + + /* Retrieve runtime statistics of the thread & update ref cycle */ + k_thread_runtime_stats_get(pdata->thread_id, &rt_stats_thread); + pdata->cycles_ref = rt_stats_thread.execution_cycles; + } + keep_ll_tick_src = true; + break; + case SOF_TASK_STATE_CANCEL: + case SOF_TASK_STATE_COMPLETED: + /* Finally remove task from the list */ + list_item_del(&curr_task->list); + break; + default: + break; + } + } + + scheduler_twb_unlock(lock_key); + + if (!keep_ll_tick_src) + return SOF_TASK_STATE_COMPLETED; + + return SOF_TASK_STATE_RESCHEDULE; +} + +/** + * @brief Thread function for a TWB task. + * + * This function is responsible for executing the TWB task in a loop. + * It checks the state of the task, runs the task if it is in the running state, + * and handles different task states such as rescheduling, cancellation, and completion. + * It also updates the runtime statistics of the thread and suspends the + * thread if the task is not in the running state. + * + * @param p1 Pointer to the task structure. + * @param p2 Unused parameter. + * @param p3 Unused parameter. + */ +static void twb_thread_fn(void *p1, void *p2, void *p3) +{ + struct task *task = p1; + struct task *ll_tick_src = p2; + (void)p3; + struct task_twb_data *pdata = task->priv_data; + k_thread_runtime_stats_t rt_stats_thread; + enum task_state state; + unsigned int lock_key; + + while (1) { + if (ll_tick_src->state == SOF_TASK_STATE_INIT || ll_tick_src->state == SOF_TASK_STATE_FREE) + schedule_task(ll_tick_src, 0, 0); + + if (task->state == SOF_TASK_STATE_RUNNING) { + state = task_run(task); + } else { + state = task->state; /* to avoid undefined variable warning */ + } + + lock_key = scheduler_twb_lock(); + /* + * check if task is still running, may have been canceled by external call + * if not, set the state returned by run procedure + */ + if (task->state == SOF_TASK_STATE_RUNNING) { + if (pdata->cycles_granted) { + k_thread_runtime_stats_get(pdata->thread_id, &rt_stats_thread); + pdata->cycles_consumed += rt_stats_thread.execution_cycles - pdata->cycles_ref; + pdata->cycles_ref = rt_stats_thread.execution_cycles; + } + switch (state) { + case SOF_TASK_STATE_RESCHEDULE: + /* mark to reschedule, schedule time is already calculated */ + task->state = SOF_TASK_STATE_QUEUED; + break; + case SOF_TASK_STATE_CANCEL: + task->state = SOF_TASK_STATE_CANCEL; + break; + case SOF_TASK_STATE_COMPLETED: + break; + + default: + /* illegal state, serious defect, won't happen */ + scheduler_twb_unlock(lock_key); + k_panic(); + } + } + + scheduler_twb_unlock(lock_key); + + if (state == SOF_TASK_STATE_COMPLETED) { + task->state = SOF_TASK_STATE_COMPLETED; + task_complete(task); + } + + if (state != SOF_TASK_STATE_RUNNING) + k_sem_take(&pdata->sem, K_FOREVER); + }; + /* never be here */ +} + +/** + * Schedule a task in the TWB scheduler. + * + * This function adds a task to the TWB scheduler list, + * recalculate budget and starts the thread associated with the task. + * If there are no TWB tasks scheduled yet, it also runs the LL tick source task. + * + * @param data Pointer to the TWB scheduler data. + * @param task Pointer to the task to be scheduled. + * @param start The start time of the task. + * @param period The period of the task. + * @return 0 on success, or a negative error code on failure. + */ +static int scheduler_twb_task_shedule(void *data, struct task *task, uint64_t start, + uint64_t period) +{ + struct scheduler_twb_data *twb_sch = (struct scheduler_twb_data *)data; + struct task_twb_data *pdata = task->priv_data; + struct list_item *tlist; + struct task *task_iter; + unsigned int lock_key; + bool list_prepend = true; + bool thread_started = true; + uint32_t budget_left = 0; + + lock_key = scheduler_twb_lock(); + + if (task->state != SOF_TASK_STATE_INIT && + task->state != SOF_TASK_STATE_CANCEL && + task->state != SOF_TASK_STATE_COMPLETED) { + scheduler_twb_unlock(lock_key); + return -EINVAL; + } else if (task->state == SOF_TASK_STATE_INIT) { + thread_started = false; + } + + /* add a task to TWB scheduler list */ + task->state = SOF_TASK_STATE_RUNNING; + + /* if there's no TWB tasks scheduled yet, run ll tick source task */ + if (list_is_empty(&twb_sch->tasks)) { + if (!k_is_in_isr()) + schedule_task(&twb_sch->ll_tick_src, 0, 0); + } else { + list_for_item(tlist, &twb_sch->tasks) { + task_iter = container_of(tlist, struct task, list); + if (task == task_iter) { + list_prepend = false; + break; + } + } + } + + if (list_prepend) + list_item_prepend(&task->list, &twb_sch->tasks); + + /* If the task has a cycles budget, calculate the budget_left and set the thread priority */ + if (pdata->cycles_granted) { + if (pdata->cycles_consumed < SYS_TICKS_TO_HW_CYCLES(pdata->cycles_granted)) { + budget_left = HW_CYCLES_TO_SYS_TICKS(SYS_TICKS_TO_HW_CYCLES(pdata->cycles_granted) - pdata->cycles_consumed); + k_thread_priority_set(pdata->thread_id, pdata->thread_prio); + } else { + k_thread_priority_set(pdata->thread_id, CONFIG_TWB_THREAD_LOW_PRIORITY); + } + k_thread_time_slice_set(pdata->thread_id, budget_left, &scheduler_twb_task_cb, task); + } + + tr_dbg(&twb_tr, "TWB task %p scheduled with budget %d/%d", task, budget_left, pdata->cycles_granted); + + /* start the thread */ + if (!thread_started) + k_thread_start(pdata->thread_id); + else + k_sem_give(&pdata->sem); + + scheduler_twb_unlock(lock_key); + + return 0; +} + +static int scheduler_twb_task_cancel(void *data, struct task *task) +{ + struct task_twb_data *task_pdata = task->priv_data; + k_thread_runtime_stats_t rt_stats_thread; + unsigned int lock_key; + + lock_key = scheduler_twb_lock(); + + if (task_pdata->cycles_granted) { + /* Get the stats and update the consumed cycles */ + k_thread_runtime_stats_get(task_pdata->thread_id, &rt_stats_thread); + task_pdata->cycles_consumed += rt_stats_thread.execution_cycles - task_pdata->cycles_ref; + task_pdata->cycles_ref = rt_stats_thread.execution_cycles; + } + + /* Set the task state to CANCEL */ + task->state = SOF_TASK_STATE_CANCEL; + + scheduler_twb_unlock(lock_key); + + return 0; +} + +static int scheduler_twb_task_free(void *data, struct task *task) +{ + struct task_twb_data *pdata = task->priv_data; + + scheduler_twb_task_cancel(data, task); + + list_item_del(&task->list); + + /* abort the execution of the thread */ + k_thread_abort(pdata->thread_id); + + /* free task stack */ + rfree((__sparse_force void *)pdata->p_stack); + + /* all other memory has been allocated as a single malloc, will be freed later by caller */ + return 0; +} + +static struct scheduler_ops schedule_twb_ops = { + .schedule_task = scheduler_twb_task_shedule, + .schedule_task_cancel = scheduler_twb_task_cancel, + .schedule_task_free = scheduler_twb_task_free, +}; + +int scheduler_twb_init(void) +{ + struct scheduler_twb_data *twb_sch = rzalloc(SOF_MEM_ZONE_SYS_RUNTIME, 0, SOF_MEM_CAPS_RAM, + sizeof(struct scheduler_twb_data)); + int ret; + + if (!twb_sch) + return -ENOMEM; + + list_init(&twb_sch->tasks); + + scheduler_init(SOF_SCHEDULE_TWB, &schedule_twb_ops, twb_sch); + + /* init src of TWB tick */ + ret = schedule_task_init_ll(&twb_sch->ll_tick_src, + SOF_UUID(twb_sched_uuid), + SOF_SCHEDULE_LL_TIMER, + 0, scheduler_twb_ll_tick, twb_sch, + cpu_get_id(), 0); + + return ret; +} + +int scheduler_twb_task_init(struct task **task, + const struct sof_uuid_entry *uid, + const struct task_ops *ops, + void *data, + int32_t core, + const char *name, + size_t stack_size, + int32_t thread_priority, + uint32_t cycles_granted) +{ + struct scheduler_twb_data *twb_sch; + void __sparse_cache *p_stack = NULL; + k_tid_t thread_id = NULL; + int ret; + + /* memory allocation helper structure */ + struct { + struct task task; + struct task_twb_data pdata; + struct k_thread thread; + } *task_memory; + + twb_sch = scheduler_get_data(SOF_SCHEDULE_TWB); + if (!twb_sch) { + tr_err(&twb_tr, "scheduler_twb_task_init(): TWB not initialized"); + return -EINVAL; + } + + /* must be called on the same core the task will be binded to */ + assert(cpu_get_id() == core); + + if (thread_priority < 0) { + tr_err(&twb_tr, "scheduler_twb_task_init(): non preemptible priority"); + return -EINVAL; + } + + /* + * allocate memory + * to avoid multiple malloc operations allocate all required memory as a single structure + * and return pointer to task_memory->task + * As the structure contains zephyr kernel specific data, it must be located in + * shared, non cached memory + */ + task_memory = rzalloc(SOF_MEM_ZONE_RUNTIME_SHARED, 0, SOF_MEM_CAPS_RAM, + sizeof(*task_memory)); + if (!task_memory) { + tr_err(&twb_tr, "scheduler_twb_task_init(): memory alloc failed"); + return -ENOMEM; + } + + /* allocate stack - must be aligned and cached so a separate alloc */ + stack_size = Z_KERNEL_STACK_SIZE_ADJUST(stack_size); + p_stack = (__sparse_force void __sparse_cache *) + rballoc_align(0, SOF_MEM_CAPS_RAM, stack_size, Z_KERNEL_STACK_OBJ_ALIGN); + if (!p_stack) { + tr_err(&twb_tr, "scheduler_twb_task_init(): stack alloc failed"); + ret = -ENOMEM; + goto err; + } + + /* create a zephyr thread for the task */ + thread_id = k_thread_create(&task_memory->thread, (__sparse_force void *)p_stack, + stack_size, twb_thread_fn, &task_memory->task, &twb_sch->ll_tick_src, NULL, + thread_priority, K_USER, K_FOREVER); + if (!thread_id) { + ret = -EFAULT; + tr_err(&twb_tr, "scheduler_twb_task_init(): zephyr thread create failed"); + goto err; + } + + /* pin the thread to specific core */ + ret = k_thread_cpu_pin(thread_id, core); + if (ret < 0) { + ret = -EFAULT; + tr_err(&twb_tr, "scheduler_twb_task_init(): zephyr task pin to core %d failed", core); + goto err; + } + + /* set the thread name */ + if (name) { + ret = k_thread_name_set(thread_id, name); + if (ret < 0) + tr_warn(&twb_tr, "scheduler_twb_task_init(): failed to set thread name"); + } + + /* internal SOF task init */ + ret = schedule_task_init(&task_memory->task, uid, SOF_SCHEDULE_TWB, thread_priority, + ops->run, data, core, 0); + if (ret < 0) { + tr_err(&twb_tr, "scheduler_twb_task_init(): schedule_task_init failed"); + goto err; + } + + /* unlimited mcps budget */ + if (cycles_granted >= ZEPHYR_TWB_BUDGET_MAX) + cycles_granted = 0; + + /* initialize other task structures */ + task_memory->task.ops.complete = ops->complete; + task_memory->task.ops.get_deadline = ops->get_deadline; + + /* success, fill the structures */ + task_memory->task.priv_data = &task_memory->pdata; + task_memory->pdata.thread_id = thread_id; + task_memory->pdata.p_stack = p_stack; + task_memory->pdata.thread_prio = thread_priority; + task_memory->pdata.cycles_granted = cycles_granted; + task_memory->pdata.cycles_consumed = 0; + task_memory->pdata.cycles_ref = 0; + *task = &task_memory->task; + + k_sem_init(&task_memory->pdata.sem, 0, 10); + + tr_dbg(&twb_tr, "TWB task %p initialized: thread: %p, core: %d, prio: %d, budget: %d", + task, thread_id, core, thread_priority, cycles_granted); + + return 0; +err: + /* cleanup - free all allocated resources */ + if (thread_id) + k_thread_abort(thread_id); + + rfree((__sparse_force void *)p_stack); + rfree(task_memory); + return ret; +} diff --git a/uuid-registry.txt b/uuid-registry.txt index 4d77005ee1a7..56e946940c72 100644 --- a/uuid-registry.txt +++ b/uuid-registry.txt @@ -155,6 +155,7 @@ dd511749-d9fa-455c-b3a713585693f1af tdfb 37c196ae-3532-4282-8a78dd9d50cc7123 testbench 08aeb4ff-7f68-4c71-86c3c842b4262898 tester 04e3f894-2c5c-4f2e-8dc1694eeaab53fa tone +e93326d8-0d14-4bf0-bcb9e063d3d80136 twb_sched 42f8060c-832f-4dbf-b24751e961997b34 up_down_mixer b77e677e-5ff4-4188-af14fba8bdbf8682 volume 8a171323-94a3-4e1d-afe9fe5dbaa4c393 volume4 diff --git a/zephyr/CMakeLists.txt b/zephyr/CMakeLists.txt index 2d83911c884e..b8cf93855fa1 100644 --- a/zephyr/CMakeLists.txt +++ b/zephyr/CMakeLists.txt @@ -274,6 +274,10 @@ if (CONFIG_SOC_SERIES_INTEL_ADSP_ACE) ${SOF_SRC_PATH}/schedule/zephyr_dp_schedule.c ) + zephyr_library_sources_ifdef(CONFIG_ZEPHYR_TWB_SCHEDULER + ${SOF_SRC_PATH}/schedule/zephyr_twb_schedule.c + ) + # Sources for virtual heap management zephyr_library_sources( lib/regions_mm.c diff --git a/zephyr/edf_schedule.c b/zephyr/edf_schedule.c index 6588af5ea05e..8aca007137e7 100644 --- a/zephyr/edf_schedule.c +++ b/zephyr/edf_schedule.c @@ -107,7 +107,7 @@ int scheduler_init_edf(void) k_work_queue_start(&edf_workq, edf_workq_stack, K_THREAD_STACK_SIZEOF(edf_workq_stack), - EDF_ZEPHYR_PRIORITY, NULL); + CONFIG_EDF_THREAD_PRIORITY, NULL); k_thread_suspend(thread);