1414#include <ipc4/base_fw.h>
1515#include <ipc4/error_status.h>
1616#include <ipc4/logging.h>
17+ #include <rtos/kernel.h>
1718#if !CONFIG_LIBRARY
1819#include <zephyr/logging/log_backend.h>
1920#include <zephyr/logging/log.h>
@@ -51,19 +52,18 @@ LOG_MODULE_REGISTER(mtrace, CONFIG_SOF_LOG_LEVEL);
5152 */
5253#define IPC4_MTRACE_AGING_TIMER_MIN_MS 100
5354
54- SOF_DEFINE_REG_UUID (mtrace_task );
55-
5655static uint64_t mtrace_notify_last_sent ;
5756
5857static uint32_t mtrace_bytes_pending ;
5958
6059static uint32_t mtrace_aging_timer = IPC4_MTRACE_NOTIFY_AGING_TIMER_MS ;
6160
62- struct task mtrace_task ;
63-
6461#define MTRACE_IPC_CORE PLATFORM_PRIMARY_CORE_ID
6562
66- static void mtrace_log_hook (size_t written , size_t space_left )
63+ static struct k_mutex log_mutex ;
64+ static struct k_work_delayable log_work ;
65+
66+ static void mtrace_log_hook_unlocked (size_t written , size_t space_left )
6767{
6868 uint64_t delta ;
6969
@@ -84,22 +84,30 @@ static void mtrace_log_hook(size_t written, size_t space_left)
8484 ipc_send_buffer_status_notify ();
8585 mtrace_notify_last_sent = k_uptime_get ();
8686 mtrace_bytes_pending = 0 ;
87+ } else {
88+ k_work_schedule_for_queue (& ipc_get ()-> ipc_send_wq , & log_work ,
89+ K_MSEC (mtrace_aging_timer - delta ));
8790 }
8891}
8992
90- static enum task_state mtrace_task_run ( void * data )
93+ static void mtrace_log_hook ( size_t written , size_t space_left )
9194{
92- if ( k_uptime_get () - mtrace_notify_last_sent >= mtrace_aging_timer &&
93- mtrace_bytes_pending )
94- mtrace_log_hook ( 0 , 0 );
95+ k_mutex_lock ( & log_mutex , K_FOREVER );
96+
97+ mtrace_log_hook_unlocked ( written , space_left );
9598
96- /* task will be re-run based on mtrace_task_deadline() */
97- return SOF_TASK_STATE_RESCHEDULE ;
99+ k_mutex_unlock (& log_mutex );
98100}
99101
100- static uint64_t mtrace_task_deadline ( void * data )
102+ static void log_work_handler ( struct k_work * work )
101103{
102- return k_uptime_ticks () + k_ms_to_ticks_ceil64 (mtrace_aging_timer );
104+ k_mutex_lock (& log_mutex , K_FOREVER );
105+
106+ if (k_uptime_get () - mtrace_notify_last_sent >= mtrace_aging_timer &&
107+ mtrace_bytes_pending )
108+ mtrace_log_hook_unlocked (0 , 0 );
109+
110+ k_mutex_unlock (& log_mutex );
103111}
104112
105113int ipc4_logging_enable_logs (bool first_block ,
@@ -109,11 +117,6 @@ int ipc4_logging_enable_logs(bool first_block,
109117{
110118 const struct log_backend * log_backend = log_backend_adsp_mtrace_get ();
111119 const struct ipc4_log_state_info * log_state ;
112- struct task_ops ops = {
113- .run = mtrace_task_run ,
114- .get_deadline = mtrace_task_deadline ,
115- .complete = NULL ,
116- };
117120
118121 if (!(first_block && last_block )) {
119122 LOG_ERR ("log_state data is expected to be sent as one chunk" );
@@ -125,6 +128,9 @@ int ipc4_logging_enable_logs(bool first_block,
125128 return - EINVAL ;
126129 }
127130
131+ k_mutex_init (& log_mutex );
132+ k_work_init_delayable (& log_work , log_work_handler );
133+
128134 dcache_invalidate_region ((__sparse_force void __sparse_cache * )data , data_offset_or_size );
129135
130136 /*
@@ -144,14 +150,9 @@ int ipc4_logging_enable_logs(bool first_block,
144150 LOG_WRN ("Too small aging timer value, limiting to %u\n" ,
145151 mtrace_aging_timer );
146152 }
147-
148- schedule_task_init_edf (& mtrace_task , SOF_UUID (mtrace_task_uuid ),
149- & ops , NULL , MTRACE_IPC_CORE , 0 );
150- schedule_task (& mtrace_task , mtrace_aging_timer * 1000 , 0 );
151153 } else {
152154 adsp_mtrace_log_init (NULL );
153155 log_backend_deactivate (log_backend );
154- schedule_task_cancel (& mtrace_task );
155156 }
156157
157158 return 0 ;
@@ -208,9 +209,7 @@ int ipc4_logging_enable_logs(bool first_block,
208209
209210int ipc4_logging_shutdown (void )
210211{
211- struct ipc4_log_state_info log_state = { 0 };
212-
213- /* log_state.enable set to 0 above */
212+ struct ipc4_log_state_info log_state = { .enable = 0 , };
214213
215214 return ipc4_logging_enable_logs (true, true, sizeof (log_state ), (char * )& log_state );
216215}
0 commit comments