88#include <sof/audio/component.h>
99#include <sof/audio/module_adapter/module/generic.h>
1010#include <rtos/task.h>
11+ #include <rtos/userspace_helper.h>
1112#include <stdint.h>
1213#include <sof/schedule/dp_schedule.h>
1314#include <sof/schedule/ll_schedule.h>
1718#include <rtos/interrupt.h>
1819#include <zephyr/kernel.h>
1920#include <zephyr/sys_clock.h>
21+ #include <zephyr/sys/sem.h>
22+ #include <zephyr/sys/mutex.h>
2023#include <sof/lib/notifier.h>
2124#include <ipc4/base_fw.h>
2225
@@ -34,20 +37,58 @@ struct scheduler_dp_data {
3437
3538struct task_dp_pdata {
3639 k_tid_t thread_id ; /* zephyr thread ID */
37- struct k_thread thread ; /* memory space for a thread */
40+ struct k_thread * thread ; /* pointer to the kernels' thread object */
41+ struct k_thread thread_struct ; /* thread object for kernel threads */
3842 uint32_t deadline_clock_ticks ; /* dp module deadline in Zephyr ticks */
3943 k_thread_stack_t __sparse_cache * p_stack ; /* pointer to thread stack */
4044 size_t stack_size ; /* size of the stack in bytes */
41- struct k_sem sem ; /* semaphore for task scheduling */
45+ struct k_sem * sem ; /* pointer to semaphore for task scheduling */
46+ struct k_sem sem_struct ; /* semaphore for task scheduling for kernel threads */
4247 struct processing_module * mod ; /* the module to be scheduled */
4348 uint32_t ll_cycles_to_start ; /* current number of LL cycles till delayed start */
4449};
4550
51+ #ifdef CONFIG_USERSPACE
52+ /* Single CPU-wide lock
53+ * The irq_lock is not available for USERSPACE (non-privileged) threads.
54+ * Therefore semaphore is used to control critical section.
55+ */
56+ #define DP_LOCK_INIT (i , _ ) Z_SEM_INITIALIZER(dp_lock[i], 1, 1)
57+ #define DP_LOCK_INIT_LIST LISTIFY(CONFIG_MP_MAX_NUM_CPUS, DP_LOCK_INIT, (,))
58+
59+ /* User threads don't need access to this array. Access is performed from
60+ * the kernel space via a syscall. Array must be placed in special section
61+ * to be qualified as initialized by the gen_kobject_list.py script.
62+ */
63+ static
64+ STRUCT_SECTION_ITERABLE_ARRAY (k_sem , dp_lock , CONFIG_MP_MAX_NUM_CPUS ) = { DP_LOCK_INIT_LIST };
65+
66+ static inline unsigned int scheduler_dp_lock (uint16_t core )
67+ {
68+ k_sem_take (& dp_lock [core ], K_FOREVER );
69+ return core ;
70+ }
71+
72+ static inline void scheduler_dp_unlock (unsigned int key )
73+ {
74+ k_sem_give (& dp_lock [key ]);
75+ }
76+ static inline void scheduler_dp_grant (k_tid_t thread_id , uint16_t core )
77+ {
78+ k_thread_access_grant (thread_id , & dp_lock [core ]);
79+ }
80+
81+ #else /* CONFIG_USERSPACE */
82+
83+ static inline void scheduler_dp_grant (k_tid_t thread_id , uint16_t core )
84+ {
85+ }
86+
4687/* Single CPU-wide lock
4788 * as each per-core instance if dp-scheduler has separate structures, it is enough to
4889 * use irq_lock instead of cross-core spinlocks
4990 */
50- static inline unsigned int scheduler_dp_lock (void )
91+ static inline unsigned int scheduler_dp_lock (uint16_t core )
5192{
5293 return irq_lock ();
5394}
@@ -56,6 +97,7 @@ static inline void scheduler_dp_unlock(unsigned int key)
5697{
5798 irq_unlock (key );
5899}
100+ #endif
59101
60102/* dummy LL task - to start LL on secondary cores */
61103static enum task_state scheduler_dp_ll_tick_dummy (void * data )
@@ -226,7 +268,7 @@ void scheduler_dp_ll_tick(void *receiver_data, enum notify_id event_type, void *
226268 unsigned int lock_key ;
227269 struct scheduler_dp_data * dp_sch = scheduler_get_data (SOF_SCHEDULE_DP );
228270
229- lock_key = scheduler_dp_lock ();
271+ lock_key = scheduler_dp_lock (cpu_get_id () );
230272 list_for_item (tlist , & dp_sch -> tasks ) {
231273 curr_task = container_of (tlist , struct task , list );
232274 pdata = curr_task -> priv_data ;
@@ -256,7 +298,7 @@ void scheduler_dp_ll_tick(void *receiver_data, enum notify_id event_type, void *
256298
257299 /* trigger the task */
258300 curr_task -> state = SOF_TASK_STATE_RUNNING ;
259- k_sem_give (& pdata -> sem );
301+ k_sem_give (pdata -> sem );
260302 }
261303 }
262304 }
@@ -271,7 +313,7 @@ static int scheduler_dp_task_cancel(void *data, struct task *task)
271313
272314
273315 /* this is asyn cancel - mark the task as canceled and remove it from scheduling */
274- lock_key = scheduler_dp_lock ();
316+ lock_key = scheduler_dp_lock (cpu_get_id () );
275317
276318 task -> state = SOF_TASK_STATE_CANCEL ;
277319 list_item_del (& task -> list );
@@ -281,7 +323,7 @@ static int scheduler_dp_task_cancel(void *data, struct task *task)
281323 schedule_task_cancel (& dp_sch -> ll_tick_src );
282324
283325 /* if the task is waiting on a semaphore - let it run and self-terminate */
284- k_sem_give (& pdata -> sem );
326+ k_sem_give (pdata -> sem );
285327 scheduler_dp_unlock (lock_key );
286328
287329 /* wait till the task has finished, if there was any task created */
@@ -294,6 +336,7 @@ static int scheduler_dp_task_cancel(void *data, struct task *task)
294336static int scheduler_dp_task_free (void * data , struct task * task )
295337{
296338 struct task_dp_pdata * pdata = task -> priv_data ;
339+ int ret ;
297340
298341 scheduler_dp_task_cancel (data , task );
299342
@@ -305,12 +348,19 @@ static int scheduler_dp_task_free(void *data, struct task *task)
305348 pdata -> thread_id = NULL ;
306349 }
307350
351+ #ifdef CONFIG_USERSPACE
352+ if (pdata -> sem != & pdata -> sem_struct )
353+ k_object_free (pdata -> sem );
354+ if (pdata -> thread != & pdata -> thread_struct )
355+ k_object_free (pdata -> thread );
356+ #endif
357+
308358 /* free task stack */
309- rfree ((__sparse_force void * )pdata -> p_stack );
359+ ret = user_stack_free ((__sparse_force void * )pdata -> p_stack );
310360 pdata -> p_stack = NULL ;
311361
312362 /* all other memory has been allocated as a single malloc, will be freed later by caller */
313- return 0 ;
363+ return ret ;
314364}
315365
316366/* Thread function called in component context, on target core */
@@ -329,14 +379,14 @@ static void dp_thread_fn(void *p1, void *p2, void *p3)
329379 * the thread is started immediately after creation, it will stop on semaphore
330380 * Semaphore will be released once the task is ready to process
331381 */
332- k_sem_take (& task_pdata -> sem , K_FOREVER );
382+ k_sem_take (task_pdata -> sem , K_FOREVER );
333383
334384 if (task -> state == SOF_TASK_STATE_RUNNING )
335385 state = task_run (task );
336386 else
337387 state = task -> state ; /* to avoid undefined variable warning */
338388
339- lock_key = scheduler_dp_lock ();
389+ lock_key = scheduler_dp_lock (task -> core );
340390 /*
341391 * check if task is still running, may have been canceled by external call
342392 * if not, set the state returned by run procedure
@@ -382,7 +432,7 @@ static int scheduler_dp_task_shedule(void *data, struct task *task, uint64_t sta
382432 uint64_t deadline_clock_ticks ;
383433 int ret ;
384434
385- lock_key = scheduler_dp_lock ();
435+ lock_key = scheduler_dp_lock (cpu_get_id () );
386436
387437 if (task -> state != SOF_TASK_STATE_INIT &&
388438 task -> state != SOF_TASK_STATE_CANCEL &&
@@ -392,19 +442,36 @@ static int scheduler_dp_task_shedule(void *data, struct task *task, uint64_t sta
392442 }
393443
394444 /* create a zephyr thread for the task */
395- pdata -> thread_id = k_thread_create (& pdata -> thread , (__sparse_force void * )pdata -> p_stack ,
445+ pdata -> thread_id = k_thread_create (pdata -> thread , (__sparse_force void * )pdata -> p_stack ,
396446 pdata -> stack_size , dp_thread_fn , task , NULL , NULL ,
397- CONFIG_DP_THREAD_PRIORITY , K_USER , K_FOREVER );
447+ CONFIG_DP_THREAD_PRIORITY , task -> flags , K_FOREVER );
448+ if (!pdata -> thread_id ) {
449+ tr_err (& dp_tr , "DP thread creation failed" );
450+ scheduler_dp_unlock (lock_key );
451+ return - ECHILD ;
452+ }
398453
454+ k_thread_access_grant (pdata -> thread_id , pdata -> sem );
455+ scheduler_dp_grant (pdata -> thread_id , cpu_get_id ());
399456 /* pin the thread to specific core */
400457 ret = k_thread_cpu_pin (pdata -> thread_id , task -> core );
401458 if (ret < 0 ) {
402- tr_err (& dp_tr , "zephyr_dp_task_init(): zephyr task pin to core failed" );
459+ tr_err (& dp_tr , "zephyr task pin to core failed" );
403460 goto err ;
404461 }
405462
406- /* start the thread, it should immediately stop at a semaphore, so clean it */
407- k_sem_reset (& pdata -> sem );
463+ #ifdef CONFIG_USERSPACE
464+ if (task -> flags & K_USER ) {
465+ ret = user_memory_init_shared (pdata -> thread_id , pdata -> mod );
466+ if (ret < 0 ) {
467+ tr_err (& dp_tr , "user_memory_init_shared() failed" );
468+ goto err ;
469+ }
470+ }
471+ #endif /* CONFIG_USERSPACE */
472+
473+ /* start the thread, it should immediately stop at a semaphore, so clean it */
474+ k_sem_init (pdata -> sem , 0 , 1 );
408475 k_thread_start (pdata -> thread_id );
409476
410477 /* if there's no DP tasks scheduled yet, run ll tick source task */
@@ -474,7 +541,8 @@ int scheduler_dp_task_init(struct task **task,
474541 const struct task_ops * ops ,
475542 struct processing_module * mod ,
476543 uint16_t core ,
477- size_t stack_size )
544+ size_t stack_size ,
545+ uint32_t options )
478546{
479547 void __sparse_cache * p_stack = NULL ;
480548
@@ -496,17 +564,15 @@ int scheduler_dp_task_init(struct task **task,
496564 * As the structure contains zephyr kernel specific data, it must be located in
497565 * shared, non cached memory
498566 */
499- task_memory = rzalloc (SOF_MEM_FLAG_KERNEL | SOF_MEM_FLAG_COHERENT ,
500- sizeof (* task_memory ));
567+ task_memory = rzalloc (SOF_MEM_FLAG_KERNEL | SOF_MEM_FLAG_COHERENT |
568+ SOF_MEM_FLAG_USER_SHARED , sizeof (* task_memory ));
501569 if (!task_memory ) {
502570 tr_err (& dp_tr , "zephyr_dp_task_init(): memory alloc failed" );
503571 return - ENOMEM ;
504572 }
505573
506574 /* allocate stack - must be aligned and cached so a separate alloc */
507- stack_size = Z_KERNEL_STACK_SIZE_ADJUST (stack_size );
508- p_stack = (__sparse_force void __sparse_cache * )
509- rballoc_align (SOF_MEM_FLAG_KERNEL , stack_size , Z_KERNEL_STACK_OBJ_ALIGN );
575+ p_stack = user_stack_allocate (stack_size , options );
510576 if (!p_stack ) {
511577 tr_err (& dp_tr , "zephyr_dp_task_init(): stack alloc failed" );
512578 ret = - ENOMEM ;
@@ -515,33 +581,57 @@ int scheduler_dp_task_init(struct task **task,
515581
516582 /* internal SOF task init */
517583 ret = schedule_task_init (& task_memory -> task , uid , SOF_SCHEDULE_DP , 0 , ops -> run ,
518- mod , core , 0 );
584+ mod , core , options );
519585 if (ret < 0 ) {
520586 tr_err (& dp_tr , "zephyr_dp_task_init(): schedule_task_init failed" );
521587 goto err ;
522588 }
523589
590+ /* Point to ksem semaphore for kernel threads synchronization */
591+ /* It will be overwritten for K_USER threads to dynamic ones. */
592+ task_memory -> pdata .sem = & task_memory -> pdata .sem_struct ;
593+ task_memory -> pdata .thread = & task_memory -> pdata .thread_struct ;
594+
595+ #ifdef CONFIG_USERSPACE
596+ if (options & K_USER ) {
597+ task_memory -> pdata .sem = k_object_alloc (K_OBJ_SEM );
598+ if (!task_memory -> pdata .sem ) {
599+ tr_err (& dp_tr , "Semaphore object allocation failed" );
600+ ret = - ENOMEM ;
601+ goto err ;
602+ }
603+
604+ task_memory -> pdata .thread = k_object_alloc (K_OBJ_THREAD );
605+ if (!task_memory -> pdata .thread ) {
606+ tr_err (& dp_tr , "Thread object allocation failed" );
607+ ret = - ENOMEM ;
608+ goto err ;
609+ }
610+ }
611+ #endif /* CONFIG_USERSPACE */
612+
524613 /* initialize other task structures */
525614 task_memory -> task .ops .complete = ops -> complete ;
526615 task_memory -> task .ops .get_deadline = ops -> get_deadline ;
527616 task_memory -> task .state = SOF_TASK_STATE_INIT ;
528617 task_memory -> task .core = core ;
529618
530- /* initialize semaprhore */
531- k_sem_init (& task_memory -> pdata .sem , 0 , 1 );
532-
533619 /* success, fill the structures */
534620 task_memory -> task .priv_data = & task_memory -> pdata ;
535621 task_memory -> pdata .p_stack = p_stack ;
536622 task_memory -> pdata .stack_size = stack_size ;
537623 task_memory -> pdata .mod = mod ;
538624 * task = & task_memory -> task ;
539625
540-
541626 return 0 ;
542627err :
543628 /* cleanup - free all allocated resources */
544- rfree ((__sparse_force void * )p_stack );
629+ if (user_stack_free ((__sparse_force void * )p_stack ))
630+ tr_err (& dp_tr , "user_stack_free failed!" );
631+
632+ /* k_object_free looks for a pointer in the list, any invalid value can be passed */
633+ k_object_free (task_memory -> pdata .sem );
634+ k_object_free (task_memory -> pdata .thread );
545635 rfree (task_memory );
546636 return ret ;
547637}
@@ -554,7 +644,7 @@ void scheduler_get_task_info_dp(struct scheduler_props *scheduler_props, uint32_
554644 struct scheduler_dp_data * dp_sch =
555645 (struct scheduler_dp_data * )scheduler_get_data (SOF_SCHEDULE_DP );
556646
557- lock_key = scheduler_dp_lock ();
647+ lock_key = scheduler_dp_lock (cpu_get_id () );
558648 scheduler_get_task_info (scheduler_props , data_off_size , & dp_sch -> tasks );
559649 scheduler_dp_unlock (lock_key );
560650}
0 commit comments