1414#include <ipc4/base_fw.h>
1515#include <ipc4/error_status.h>
1616#include <ipc4/logging.h>
17+ #include <zephyr/kernel.h>
1718#if !CONFIG_LIBRARY
1819#include <zephyr/logging/log_backend.h>
1920#include <zephyr/logging/log.h>
@@ -51,7 +52,7 @@ LOG_MODULE_REGISTER(mtrace, CONFIG_SOF_LOG_LEVEL);
5152 */
5253#define IPC4_MTRACE_AGING_TIMER_MIN_MS 100
5354
54- SOF_DEFINE_REG_UUID (mtrace_task );
55+ // SOF_DEFINE_REG_UUID(mtrace_task);
5556
5657static uint64_t mtrace_notify_last_sent ;
5758
@@ -63,7 +64,10 @@ struct task mtrace_task;
6364
6465#define MTRACE_IPC_CORE PLATFORM_PRIMARY_CORE_ID
6566
66- static void mtrace_log_hook (size_t written , size_t space_left )
67+ static struct k_mutex log_mutex ;
68+ static struct k_work_delayable log_work ;
69+
70+ static void mtrace_log_hook_unlocked (size_t written , size_t space_left )
6771{
6872 uint64_t delta ;
6973
@@ -74,8 +78,10 @@ static void mtrace_log_hook(size_t written, size_t space_left)
7478 * figure out a safe way to wake up the mtrace task
7579 * from another core.
7680 */
77- if (arch_proc_id () != MTRACE_IPC_CORE )
81+ if (arch_proc_id () != MTRACE_IPC_CORE ) {
82+ k_mutex_unlock (& log_mutex );
7883 return ;
84+ }
7985
8086 delta = k_uptime_get () - mtrace_notify_last_sent ;
8187
@@ -84,22 +90,30 @@ static void mtrace_log_hook(size_t written, size_t space_left)
8490 ipc_send_buffer_status_notify ();
8591 mtrace_notify_last_sent = k_uptime_get ();
8692 mtrace_bytes_pending = 0 ;
93+ } else {
94+ k_work_schedule_for_queue (& ipc_get ()-> ipc_send_wq , & log_work ,
95+ K_MSEC (mtrace_aging_timer - delta ));
8796 }
8897}
8998
90- static enum task_state mtrace_task_run ( void * data )
99+ static void mtrace_log_hook ( size_t written , size_t space_left )
91100{
92- if ( k_uptime_get () - mtrace_notify_last_sent >= mtrace_aging_timer &&
93- mtrace_bytes_pending )
94- mtrace_log_hook ( 0 , 0 );
101+ k_mutex_lock ( & log_mutex , K_FOREVER );
102+
103+ mtrace_log_hook_unlocked ( written , space_left );
95104
96- /* task will be re-run based on mtrace_task_deadline() */
97- return SOF_TASK_STATE_RESCHEDULE ;
105+ k_mutex_unlock (& log_mutex );
98106}
99107
100- static uint64_t mtrace_task_deadline ( void * data )
108+ static void log_work_handler ( struct k_work * work )
101109{
102- return k_uptime_ticks () + k_ms_to_ticks_ceil64 (mtrace_aging_timer );
110+ k_mutex_lock (& log_mutex , K_FOREVER );
111+
112+ if (k_uptime_get () - mtrace_notify_last_sent >= mtrace_aging_timer &&
113+ mtrace_bytes_pending )
114+ mtrace_log_hook_unlocked (0 , 0 );
115+
116+ k_mutex_unlock (& log_mutex );
103117}
104118
105119int ipc4_logging_enable_logs (bool first_block ,
@@ -109,11 +123,6 @@ int ipc4_logging_enable_logs(bool first_block,
109123{
110124 const struct log_backend * log_backend = log_backend_adsp_mtrace_get ();
111125 const struct ipc4_log_state_info * log_state ;
112- struct task_ops ops = {
113- .run = mtrace_task_run ,
114- .get_deadline = mtrace_task_deadline ,
115- .complete = NULL ,
116- };
117126
118127 if (!(first_block && last_block )) {
119128 LOG_ERR ("log_state data is expected to be sent as one chunk" );
@@ -125,6 +134,9 @@ int ipc4_logging_enable_logs(bool first_block,
125134 return - EINVAL ;
126135 }
127136
137+ k_mutex_init (& log_mutex );
138+ k_work_init_delayable (& log_work , log_work_handler );
139+
128140 dcache_invalidate_region ((__sparse_force void __sparse_cache * )data , data_offset_or_size );
129141
130142 /*
@@ -144,14 +156,9 @@ int ipc4_logging_enable_logs(bool first_block,
144156 LOG_WRN ("Too small aging timer value, limiting to %u\n" ,
145157 mtrace_aging_timer );
146158 }
147-
148- schedule_task_init_edf (& mtrace_task , SOF_UUID (mtrace_task_uuid ),
149- & ops , NULL , MTRACE_IPC_CORE , 0 );
150- schedule_task (& mtrace_task , mtrace_aging_timer * 1000 , 0 );
151159 } else {
152160 adsp_mtrace_log_init (NULL );
153161 log_backend_deactivate (log_backend );
154- schedule_task_cancel (& mtrace_task );
155162 }
156163
157164 return 0 ;
@@ -208,9 +215,7 @@ int ipc4_logging_enable_logs(bool first_block,
208215
209216int ipc4_logging_shutdown (void )
210217{
211- struct ipc4_log_state_info log_state = { 0 };
212-
213- /* log_state.enable set to 0 above */
218+ struct ipc4_log_state_info log_state = { .enable = 0 , };
214219
215220 return ipc4_logging_enable_logs (true, true, sizeof (log_state ), (char * )& log_state );
216221}
0 commit comments