From 5994c576330c6505068bca96f680f10b887fc4a6 Mon Sep 17 00:00:00 2001 From: Serhiy Katsyuba Date: Mon, 3 Mar 2025 16:24:35 +0100 Subject: [PATCH 1/6] kpb: Fix timestamp type period_copy_start is a timestamp measured in wall-clock cycles. It should be a 64-bit unsigned value. Signed-off-by: Serhiy Katsyuba --- src/audio/kpb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/audio/kpb.c b/src/audio/kpb.c index 064b3acddb45..059c2f16bdb7 100644 --- a/src/audio/kpb.c +++ b/src/audio/kpb.c @@ -1733,7 +1733,7 @@ static enum task_state kpb_draining_task(void *arg) uint64_t current_time; size_t period_bytes = 0; size_t period_bytes_limit = draining_data->pb_limit; - size_t period_copy_start; + uint64_t period_copy_start; size_t time_taken; size_t *rt_stream_update = &draining_data->buffered_while_draining; struct comp_data *kpb = comp_get_drvdata(draining_data->dev); From f9bdae2ad6ac1ce863cfdc1e4f30c6438d969215 Mon Sep 17 00:00:00 2001 From: Serhiy Katsyuba Date: Mon, 3 Mar 2025 16:31:36 +0100 Subject: [PATCH 2/6] kpb: Simplify math Given: x = a + b - (a - c), reduce to: x = c + b Signed-off-by: Serhiy Katsyuba --- src/audio/kpb.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/src/audio/kpb.c b/src/audio/kpb.c index 059c2f16bdb7..749f8ed709d9 100644 --- a/src/audio/kpb.c +++ b/src/audio/kpb.c @@ -1730,11 +1730,9 @@ static enum task_state kpb_draining_task(void *arg) uint64_t draining_time_ms; uint64_t drain_interval = draining_data->drain_interval; uint64_t next_copy_time = 0; - uint64_t current_time; size_t period_bytes = 0; size_t period_bytes_limit = draining_data->pb_limit; uint64_t period_copy_start; - size_t time_taken; size_t *rt_stream_update = &draining_data->buffered_while_draining; struct comp_data *kpb = comp_get_drvdata(draining_data->dev); bool sync_mode_on = draining_data->sync_mode_on; @@ -1836,12 +1834,8 @@ static enum task_state kpb_draining_task(void *arg) comp_copy(comp_buffer_get_sink_component(sink)); } - if (sync_mode_on && period_bytes >= period_bytes_limit) { - current_time = sof_cycle_get_64(); - time_taken = current_time - period_copy_start; - next_copy_time = current_time + drain_interval - - time_taken; - } + if (sync_mode_on && period_bytes >= period_bytes_limit) + next_copy_time = period_copy_start + drain_interval; if (drain_req == 0) { /* We have finished draining of requested data however From 935ac41602c147853d24fc0cada612b104064bdc Mon Sep 17 00:00:00 2001 From: Serhiy Katsyuba Date: Mon, 3 Mar 2025 15:40:48 +0100 Subject: [PATCH 3/6] kpb: Simplify 'if-else' logic Replace the verbose 'if-else' with simpler code using the MIN() macro. Signed-off-by: Serhiy Katsyuba --- src/audio/kpb.c | 25 ++++++------------------- 1 file changed, 6 insertions(+), 19 deletions(-) diff --git a/src/audio/kpb.c b/src/audio/kpb.c index 749f8ed709d9..5bcc1e5a422a 100644 --- a/src/audio/kpb.c +++ b/src/audio/kpb.c @@ -1721,9 +1721,8 @@ static enum task_state kpb_draining_task(void *arg) struct history_buffer *buff = draining_data->hb; size_t drain_req = draining_data->drain_req; size_t sample_width = draining_data->sample_width; - size_t size_to_read; + size_t avail; size_t size_to_copy; - bool move_buffer = false; uint32_t drained = 0; uint64_t draining_time_start; uint64_t draining_time_end; @@ -1791,21 +1790,9 @@ static enum task_state kpb_draining_task(void *arg) period_copy_start = sof_cycle_get_64(); } - size_to_read = (uintptr_t)buff->end_addr - (uintptr_t)buff->r_ptr; - - if (size_to_read > audio_stream_get_free_bytes(&sink->stream)) { - if (audio_stream_get_free_bytes(&sink->stream) >= drain_req) - size_to_copy = drain_req; - else - size_to_copy = audio_stream_get_free_bytes(&sink->stream); - } else { - if (size_to_read > drain_req) { - size_to_copy = drain_req; - } else { - size_to_copy = size_to_read; - move_buffer = true; - } - } + avail = (uintptr_t)buff->end_addr - (uintptr_t)buff->r_ptr; + size_to_copy = MIN(avail, + MIN(drain_req, audio_stream_get_free_bytes(&sink->stream))); kpb_drain_samples(buff->r_ptr, &sink->stream, size_to_copy, sample_width); @@ -1817,10 +1804,10 @@ static enum task_state kpb_draining_task(void *arg) kpb->hd.free += MIN(kpb->hd.buffer_size - kpb->hd.free, size_to_copy); - if (move_buffer) { + /* no data left in the current buffer -- switch to the next buffer */ + if (size_to_copy == avail) { buff->r_ptr = buff->start_addr; buff = buff->next; - move_buffer = false; } if (size_to_copy) { From d0eace8bfa7f3f2c30ee3f428cfd8c4eb5075386 Mon Sep 17 00:00:00 2001 From: Serhiy Katsyuba Date: Thu, 6 Mar 2025 16:52:59 +0100 Subject: [PATCH 4/6] kpb: Fix crash after a reset The kpb->host_sink is NULL after a reset or unbind. The host could send a reset IPC while the draining task is still running, which results in a crash. Signed-off-by: Serhiy Katsyuba --- src/audio/kpb.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/audio/kpb.c b/src/audio/kpb.c index 5bcc1e5a422a..7ed1118a8528 100644 --- a/src/audio/kpb.c +++ b/src/audio/kpb.c @@ -1850,9 +1850,15 @@ static enum task_state kpb_draining_task(void *arg) out: draining_time_end = sof_cycle_get_64(); - /* Reset host-sink copy mode back to its pre-draining value */ - comp_set_attribute(comp_buffer_get_sink_component(kpb->host_sink), COMP_ATTR_COPY_TYPE, - &kpb->draining_task_data.copy_type); + /* Reset host-sink copy mode back to its pre-draining value. + * kpb->host_sink is NULL after a reset or unbind. + */ + if (kpb->host_sink) + comp_set_attribute(comp_buffer_get_sink_component(kpb->host_sink), + COMP_ATTR_COPY_TYPE, + &kpb->draining_task_data.copy_type); + else + comp_cl_err(&comp_kpb, "Failed to restore host copy mode!"); draining_time_ms = k_cyc_to_ms_near64(draining_time_end - draining_time_start); if (draining_time_ms <= UINT_MAX) From 3718be8ed55a066596d144e4ed60aaca8d60b4c7 Mon Sep 17 00:00:00 2001 From: Serhiy Katsyuba Date: Fri, 7 Mar 2025 16:35:10 +0100 Subject: [PATCH 5/6] kpb: Avoid long seizure of EDF scheduler thread. The draining task could run for quite a long time (e.g., a few seconds). It is implemented as an EDF scheduler task. While working, it blocks other EDF scheduler tasks: the IPC processing task and the mtrace logging task. The fix replaces the while loop in the draining task with a return SOF_TASK_STATE_RESCHEDULE, so other EDF scheduler tasks can have CPU time. Signed-off-by: Serhiy Katsyuba --- src/audio/kpb.c | 135 +++++++++++++++++++++--------------- src/include/sof/audio/kpb.h | 5 ++ 2 files changed, 84 insertions(+), 56 deletions(-) diff --git a/src/audio/kpb.c b/src/audio/kpb.c index 7ed1118a8528..7745db2553d8 100644 --- a/src/audio/kpb.c +++ b/src/audio/kpb.c @@ -160,7 +160,19 @@ static void devicelist_reset(struct device_list *devlist, bool remove_items); static uint64_t kpb_task_deadline(void *data) { +#ifndef __ZEPHYR__ return SOF_TASK_DEADLINE_ALMOST_IDLE; +#else + struct draining_data *dd = (struct draining_data *)data; + uint64_t now; + + if (dd->next_copy_time == 0) + return 0; /* run immediately */ + + now = sof_cycle_get_64(); + return dd->next_copy_time > now ? + k_uptime_ticks() + k_cyc_to_ticks_near64(dd->next_copy_time - now) : 0; +#endif } #if CONFIG_AMS @@ -1684,9 +1696,13 @@ static void kpb_init_draining(struct comp_dev *dev, struct kpb_client *cli) kpb->draining_task_data.sink = kpb->host_sink; kpb->draining_task_data.hb = buff; kpb->draining_task_data.drain_req = drain_req; + kpb->draining_task_data.drained = 0; kpb->draining_task_data.sample_width = sample_width; kpb->draining_task_data.drain_interval = drain_interval; + kpb->draining_task_data.period_copy_start = 0; kpb->draining_task_data.pb_limit = period_bytes_limit; + kpb->draining_task_data.period_bytes = 0; + kpb->draining_task_data.next_copy_time = 0; kpb->draining_task_data.dev = dev; kpb->draining_task_data.sync_mode_on = kpb->sync_draining_mode; @@ -1701,6 +1717,16 @@ static void kpb_init_draining(struct comp_dev *dev, struct kpb_client *cli) /* Pause selector copy. */ comp_buffer_get_sink_component(kpb->sel_sink)->state = COMP_STATE_PAUSED; + if (!pm_runtime_is_active(PM_RUNTIME_DSP, PLATFORM_PRIMARY_CORE_ID)) + pm_runtime_disable(PM_RUNTIME_DSP, PLATFORM_PRIMARY_CORE_ID); + + comp_info(dev, "Scheduling draining task"); + + /* Change KPB internal state to DRAINING */ + kpb_change_state(kpb, KPB_STATE_DRAINING); + + kpb->draining_task_data.draining_time_start = sof_cycle_get_64(); + /* Schedule draining task */ schedule_task(&kpb->draining_task, 0, 0); } @@ -1719,23 +1745,16 @@ static enum task_state kpb_draining_task(void *arg) struct draining_data *draining_data = (struct draining_data *)arg; struct comp_buffer *sink = draining_data->sink; struct history_buffer *buff = draining_data->hb; - size_t drain_req = draining_data->drain_req; size_t sample_width = draining_data->sample_width; size_t avail; size_t size_to_copy; - uint32_t drained = 0; - uint64_t draining_time_start; uint64_t draining_time_end; uint64_t draining_time_ms; uint64_t drain_interval = draining_data->drain_interval; - uint64_t next_copy_time = 0; - size_t period_bytes = 0; size_t period_bytes_limit = draining_data->pb_limit; - uint64_t period_copy_start; size_t *rt_stream_update = &draining_data->buffered_while_draining; struct comp_data *kpb = comp_get_drvdata(draining_data->dev); bool sync_mode_on = draining_data->sync_mode_on; - bool pm_is_active; /* * WORKAROUND: The code below accesses KPB sink buffer and calls comp_copy() on @@ -1748,66 +1767,53 @@ static enum task_state kpb_draining_task(void *arg) k_sched_lock(); #endif - comp_cl_info(&comp_kpb, "kpb_draining_task(), start."); - - pm_is_active = pm_runtime_is_active(PM_RUNTIME_DSP, PLATFORM_PRIMARY_CORE_ID); - - if (!pm_is_active) - pm_runtime_disable(PM_RUNTIME_DSP, PLATFORM_PRIMARY_CORE_ID); - - /* Change KPB internal state to DRAINING */ - kpb_change_state(kpb, KPB_STATE_DRAINING); + comp_cl_dbg(&comp_kpb, "kpb_draining_task()"); - draining_time_start = sof_cycle_get_64(); - period_copy_start = draining_time_start; + /* Have we received reset request? */ + if (kpb->state == KPB_STATE_RESETTING) { + kpb_change_state(kpb, KPB_STATE_RESET_FINISHING); + kpb_reset(draining_data->dev); + draining_data->drain_req = 0; + goto out; + } - while (drain_req > 0) { - /* - * Draining task usually runs for quite a lot of time (could be few seconds). - * LL should not be blocked for such a long time. + if (draining_data->drain_req > 0) { + /* Are we ready to drain further or host still need some time + * to read the data already provided? */ + if (sync_mode_on && draining_data->next_copy_time > sof_cycle_get_64()) { + /* Restore original EDF thread priority */ #ifdef __ZEPHYR__ - k_sched_unlock(); - k_yield(); - k_sched_lock(); + k_sched_unlock(); #endif - - /* Have we received reset request? */ - if (kpb->state == KPB_STATE_RESETTING) { - kpb_change_state(kpb, KPB_STATE_RESET_FINISHING); - kpb_reset(draining_data->dev); - goto out; + return SOF_TASK_STATE_RESCHEDULE; } - /* Are we ready to drain further or host still need some time - * to read the data already provided? - */ - if (sync_mode_on && - next_copy_time > sof_cycle_get_64()) { - period_bytes = 0; - period_copy_start = sof_cycle_get_64(); - continue; - } else if (next_copy_time == 0) { - period_copy_start = sof_cycle_get_64(); + + if (draining_data->period_copy_start == 0) { + /* starting new draining period */ + draining_data->period_copy_start = sof_cycle_get_64(); + draining_data->period_bytes = 0; } avail = (uintptr_t)buff->end_addr - (uintptr_t)buff->r_ptr; size_to_copy = MIN(avail, - MIN(drain_req, audio_stream_get_free_bytes(&sink->stream))); + MIN(draining_data->drain_req, + audio_stream_get_free_bytes(&sink->stream))); kpb_drain_samples(buff->r_ptr, &sink->stream, size_to_copy, sample_width); buff->r_ptr = (char *)buff->r_ptr + (uint32_t)size_to_copy; - drain_req -= size_to_copy; - drained += size_to_copy; - period_bytes += size_to_copy; + draining_data->drain_req -= size_to_copy; + draining_data->drained += size_to_copy; + draining_data->period_bytes += size_to_copy; kpb->hd.free += MIN(kpb->hd.buffer_size - kpb->hd.free, size_to_copy); /* no data left in the current buffer -- switch to the next buffer */ if (size_to_copy == avail) { buff->r_ptr = buff->start_addr; - buff = buff->next; + draining_data->hb = buff->next; } if (size_to_copy) { @@ -1821,10 +1827,15 @@ static enum task_state kpb_draining_task(void *arg) comp_copy(comp_buffer_get_sink_component(sink)); } - if (sync_mode_on && period_bytes >= period_bytes_limit) - next_copy_time = period_copy_start + drain_interval; + if (sync_mode_on && draining_data->period_bytes >= period_bytes_limit) { + draining_data->next_copy_time = draining_data->period_copy_start + + drain_interval; + draining_data->period_copy_start = 0; + } else { + draining_data->next_copy_time = 0; + } - if (drain_req == 0) { + if (draining_data->drain_req == 0) { /* We have finished draining of requested data however * while we were draining real time stream could provided * new data which needs to be copy to host. @@ -1832,9 +1843,9 @@ static enum task_state kpb_draining_task(void *arg) comp_cl_info(&comp_kpb, "kpb: update drain_req by %zu", *rt_stream_update); kpb_lock(kpb); - drain_req += *rt_stream_update; + draining_data->drain_req += *rt_stream_update; *rt_stream_update = 0; - if (!drain_req && kpb->state == KPB_STATE_DRAINING) { + if (!draining_data->drain_req && kpb->state == KPB_STATE_DRAINING) { /* Draining is done. Now switch KPB to copy real time * stream to client's sink. This state is called * "draining on demand" @@ -1848,6 +1859,17 @@ static enum task_state kpb_draining_task(void *arg) } out: + if (draining_data->drain_req > 0) { + /* Restore original EDF thread priority */ +#ifdef __ZEPHYR__ + k_sched_unlock(); +#endif + + /* continue drainig on next task iteration */ + return SOF_TASK_STATE_RESCHEDULE; + } + + /* finished drainig */ draining_time_end = sof_cycle_get_64(); /* Reset host-sink copy mode back to its pre-draining value. @@ -1860,13 +1882,14 @@ static enum task_state kpb_draining_task(void *arg) else comp_cl_err(&comp_kpb, "Failed to restore host copy mode!"); - draining_time_ms = k_cyc_to_ms_near64(draining_time_end - draining_time_start); + draining_time_ms = k_cyc_to_ms_near64(draining_time_end - + draining_data->draining_time_start); if (draining_time_ms <= UINT_MAX) - comp_cl_info(&comp_kpb, "KPB: kpb_draining_task(), done. %u drained in %u ms", - drained, (unsigned int)draining_time_ms); + comp_cl_info(&comp_kpb, "KPB: kpb_draining_task(), done. %zu drained in %u ms", + draining_data->drained, (unsigned int)draining_time_ms); else - comp_cl_info(&comp_kpb, "KPB: kpb_draining_task(), done. %u drained in > %u ms", - drained, UINT_MAX); + comp_cl_info(&comp_kpb, "KPB: kpb_draining_task(), done. %zu drained in > %u ms", + draining_data->drained, UINT_MAX); /* Restore original EDF thread priority */ #ifdef __ZEPHYR__ diff --git a/src/include/sof/audio/kpb.h b/src/include/sof/audio/kpb.h index 7cf786796ed8..3cbd6a290091 100644 --- a/src/include/sof/audio/kpb.h +++ b/src/include/sof/audio/kpb.h @@ -146,11 +146,16 @@ struct draining_data { struct comp_buffer *sink; struct history_buffer *hb; size_t drain_req; + size_t drained; uint8_t is_draining_active; size_t sample_width; size_t buffered_while_draining; + uint64_t draining_time_start; size_t drain_interval; + uint64_t period_copy_start; size_t pb_limit; /**< Period bytes limit */ + size_t period_bytes; + uint64_t next_copy_time; struct comp_dev *dev; bool sync_mode_on; enum comp_copy_type copy_type; From 32400af09ae8efe14d3991b8b80fcba824d08c0d Mon Sep 17 00:00:00 2001 From: Serhiy Katsyuba Date: Fri, 7 Mar 2025 18:59:26 +0100 Subject: [PATCH 6/6] kpb: Fix drain interval calculation The previous drain interval calculation ignored the actual DSP load by LL and only worked for light LL loads (high DSP clock). The drain interval was not appropriate for slow FPGA, where LL takes half of the DSP time. This update introduces code that dynamically readjusts the drain interval and does not depend on LL load (works on low clock, FPGA). Signed-off-by: Serhiy Katsyuba --- src/audio/kpb.c | 54 +++++++++++++++++++++++++++++++++++-- src/include/sof/audio/kpb.h | 3 +++ 2 files changed, 55 insertions(+), 2 deletions(-) diff --git a/src/audio/kpb.c b/src/audio/kpb.c index 7745db2553d8..61f85c34bca3 100644 --- a/src/audio/kpb.c +++ b/src/audio/kpb.c @@ -1705,6 +1705,9 @@ static void kpb_init_draining(struct comp_dev *dev, struct kpb_client *cli) kpb->draining_task_data.next_copy_time = 0; kpb->draining_task_data.dev = dev; kpb->draining_task_data.sync_mode_on = kpb->sync_draining_mode; + kpb->draining_task_data.task_iteration = 0; + kpb->draining_task_data.prev_adjustment_time = 0; + kpb->draining_task_data.prev_adjustment_drained = 0; /* save current sink copy type */ comp_get_attribute(comp_buffer_get_sink_component(kpb->host_sink), @@ -1732,6 +1735,52 @@ static void kpb_init_draining(struct comp_dev *dev, struct kpb_client *cli) } } +static void adjust_drain_interval(struct comp_data *kpb, struct draining_data *dd) +{ + uint64_t now; /* timestamp in wall-clock cycles */ + + /* readjast drain_interval every 32 task iterations */ + if (dd->task_iteration++ % 32) + return; + + now = sof_cycle_get_64(); + + if (dd->prev_adjustment_time) { + size_t drained; + size_t elapsed; + size_t actual_pace, optimal_pace; + size_t pipeline_period; + + drained = dd->drained - dd->prev_adjustment_drained; + elapsed = now - dd->prev_adjustment_time; + assert(elapsed); + /* average drained bytes per second */ + actual_pace = (size_t)k_ms_to_cyc_ceil64(1000) / elapsed * drained; + + pipeline_period = KPB_SAMPLES_PER_MS * + (KPB_SAMPLE_CONTAINER_SIZE(dd->sample_width) / 8) * kpb->config.channels; + /* desired draining pace in bytes per second */ + optimal_pace = pipeline_period * KPB_DRAIN_NUM_OF_PPL_PERIODS_AT_ONCE * 1000; + + /* just in case to prevent div by 0 if draining is stuck (e.g. because of host) */ + if (actual_pace) { + if (actual_pace < optimal_pace) { + dd->drain_interval /= optimal_pace / actual_pace; + dd->drain_interval -= dd->drain_interval / 8; + } else if (actual_pace > optimal_pace) { + dd->drain_interval *= actual_pace / optimal_pace; + dd->drain_interval += dd->drain_interval / 8; + } + /* the above algorithm will get stuck if the drain_interval is below 8 */ + if (dd->drain_interval < 8) + dd->drain_interval = 8; + } + } + + dd->prev_adjustment_time = now; + dd->prev_adjustment_drained = dd->drained; +} + /** * \brief Draining task. * @@ -1750,7 +1799,6 @@ static enum task_state kpb_draining_task(void *arg) size_t size_to_copy; uint64_t draining_time_end; uint64_t draining_time_ms; - uint64_t drain_interval = draining_data->drain_interval; size_t period_bytes_limit = draining_data->pb_limit; size_t *rt_stream_update = &draining_data->buffered_while_draining; struct comp_data *kpb = comp_get_drvdata(draining_data->dev); @@ -1777,6 +1825,8 @@ static enum task_state kpb_draining_task(void *arg) goto out; } + adjust_drain_interval(kpb, draining_data); + if (draining_data->drain_req > 0) { /* Are we ready to drain further or host still need some time * to read the data already provided? @@ -1829,7 +1879,7 @@ static enum task_state kpb_draining_task(void *arg) if (sync_mode_on && draining_data->period_bytes >= period_bytes_limit) { draining_data->next_copy_time = draining_data->period_copy_start + - drain_interval; + draining_data->drain_interval; draining_data->period_copy_start = 0; } else { draining_data->next_copy_time = 0; diff --git a/src/include/sof/audio/kpb.h b/src/include/sof/audio/kpb.h index 3cbd6a290091..fce40f1638a1 100644 --- a/src/include/sof/audio/kpb.h +++ b/src/include/sof/audio/kpb.h @@ -159,6 +159,9 @@ struct draining_data { struct comp_dev *dev; bool sync_mode_on; enum comp_copy_type copy_type; + size_t task_iteration; + uint64_t prev_adjustment_time; + size_t prev_adjustment_drained; }; struct history_data {