diff --git a/Kconfig.sof b/Kconfig.sof index 8a2cf6a1c833..44fc5eda1150 100644 --- a/Kconfig.sof +++ b/Kconfig.sof @@ -1,110 +1,10 @@ # SPDX-License-Identifier: BSD-3-Clause -config XT_WAITI_DELAY - bool - default n - help - LX6 Xtensa platforms may require additional delay to flush loads - and stores before entering WAITI. config HOST_PTABLE bool default n -config XT_HAVE_RESET_VECTOR_ROM - bool - default n - help - Select if your platform has the reset vector - in ROM. - -config XT_IRQ_MAP - bool - default n - -config DMA_GW - bool - default n - -config MEM_WND - bool - default n - -config INTEL_IOMUX - bool - default n - -config DMA_HW_LLI - bool - default n - help - Hardware linked list is DMA feature, which allows - to automatically reload the next programmed linked list - item from memory without stopping the transfer. Without - it the transfer stops after every lli read and FW needs - to manually setup the next transfer. - - Any platforms with hardware linked list support - should set this. - -config DMA_SUSPEND_DRAIN - bool - default n - help - Some platforms cannot just simple disable DMA - channel during the transfer, because it will - hang the whole DMA controller. Instead we can - suspend the channel and drain the FIFO in order - to stop the channel as soon as possible. - - Any platforms without the ability to disable - the DMA channel right away should set this. - -config DMA_FIFO_PARTITION - bool - default n - help - Some platforms require to manually set DMA - FIFO partitions before starting any transfer. - - Any platforms without automatic FIFO partitions - should set this. - -config XT_INTERRUPT_LEVEL_1 - bool - default n - help - Select if the platform supports any interrupts of level 1. - Disabling this option allows for less memory consumption. - -config XT_INTERRUPT_LEVEL_2 - bool - default n - help - Select if the platform supports any interrupts of level 2. - Disabling this option allows for less memory consumption. - -config XT_INTERRUPT_LEVEL_3 - bool - default n - help - Select if the platform supports any interrupts of level 3. - Disabling this option allows for less memory consumption. - -config XT_INTERRUPT_LEVEL_4 - bool - default n - help - Select if the platform supports any interrupts of level 4. - Disabling this option allows for less memory consumption. - -config XT_INTERRUPT_LEVEL_5 - bool - default n - help - Select if the platform supports any interrupts of level 5. - Disabling this option allows for less memory consumption. - config COMPILER_WORKAROUND_CACHE_ATTR bool default n @@ -148,6 +48,8 @@ config FAST_GET counting. Source is src/lib/fast-get.c. The option should be selected on platforms, where __cold_rodata is supported. +rsource "Kconfig.xtos" + rsource "src/Kconfig" if ZEPHYR_SOF_MODULE diff --git a/Kconfig.xtos b/Kconfig.xtos new file mode 100644 index 000000000000..2aa6b991fe7a --- /dev/null +++ b/Kconfig.xtos @@ -0,0 +1,97 @@ +# SPDX-License-Identifier: BSD-3-Clause + +# This file contains build options needed to support +# legacy driver code that still partially rely on XTOS +# definitions, even when used with Zephyr RTOS. +# +# When all platforms have moved over to native Zephyr +# drivers, this file can be removed + +config XT_WAITI_DELAY + bool + default n + help + LX6 Xtensa platforms may require additional delay to flush loads + and stores before entering WAITI. + +config XT_HAVE_RESET_VECTOR_ROM + bool + default n + help + Select if your platform has the reset vector + in ROM. + +config XT_IRQ_MAP + bool + default n + +config DMA_HW_LLI + bool + default n + help + Hardware linked list is DMA feature, which allows + to automatically reload the next programmed linked list + item from memory without stopping the transfer. Without + it the transfer stops after every lli read and FW needs + to manually setup the next transfer. + + Any platforms with hardware linked list support + should set this. + +config DMA_SUSPEND_DRAIN + bool + default n + help + Some platforms cannot just simple disable DMA + channel during the transfer, because it will + hang the whole DMA controller. Instead we can + suspend the channel and drain the FIFO in order + to stop the channel as soon as possible. + + Any platforms without the ability to disable + the DMA channel right away should set this. + +config DMA_FIFO_PARTITION + bool + default n + help + Some platforms require to manually set DMA + FIFO partitions before starting any transfer. + + Any platforms without automatic FIFO partitions + should set this. + +config XT_INTERRUPT_LEVEL_1 + bool + default n + help + Select if the platform supports any interrupts of level 1. + Disabling this option allows for less memory consumption. + +config XT_INTERRUPT_LEVEL_2 + bool + default n + help + Select if the platform supports any interrupts of level 2. + Disabling this option allows for less memory consumption. + +config XT_INTERRUPT_LEVEL_3 + bool + default n + help + Select if the platform supports any interrupts of level 3. + Disabling this option allows for less memory consumption. + +config XT_INTERRUPT_LEVEL_4 + bool + default n + help + Select if the platform supports any interrupts of level 4. + Disabling this option allows for less memory consumption. + +config XT_INTERRUPT_LEVEL_5 + bool + default n + help + Select if the platform supports any interrupts of level 5. + Disabling this option allows for less memory consumption. diff --git a/src/include/sof/trace/dma-trace.h b/src/include/sof/trace/dma-trace.h index 93148ec0ecac..dbc9e666da47 100644 --- a/src/include/sof/trace/dma-trace.h +++ b/src/include/sof/trace/dma-trace.h @@ -30,9 +30,6 @@ struct dma_trace_buf { struct dma_trace_data { struct dma_sg_config config; struct dma_trace_buf dmatb; -#if CONFIG_DMA_GW - struct dma_sg_config gw_config; -#endif struct dma_copy dc; struct sof_ipc_dma_trace_posn posn; struct ipc_msg *msg; diff --git a/src/ipc/dma-copy.c b/src/ipc/dma-copy.c index 5c45484c614d..3e8834474e82 100644 --- a/src/ipc/dma-copy.c +++ b/src/ipc/dma-copy.c @@ -23,7 +23,6 @@ SOF_DEFINE_REG_UUID(dma_copy); DECLARE_TR_CTX(dmacpy_tr, SOF_UUID(dma_copy_uuid), LOG_LEVEL_INFO); -#if !CONFIG_DMA_GW static struct dma_sg_elem *sg_get_elem_at(struct dma_sg_config *host_sg, int32_t *offset) { @@ -49,43 +48,11 @@ static struct dma_sg_elem *sg_get_elem_at(struct dma_sg_config *host_sg, tr_err(&dmacpy_tr, "host offset in beyond end of SG buffer"); return NULL; } -#endif /* Copy DSP memory to host memory. * Copies DSP memory to host in a single PAGE_SIZE or smaller block. Does not * waits/sleeps and can be used in IRQ context. */ -#if CONFIG_DMA_GW - -int dma_copy_to_host(struct dma_copy *dc, struct dma_sg_config *host_sg, - int32_t host_offset, void *local_ptr, int32_t size) -{ - int ret; - - /* tell gateway to copy */ - ret = dma_copy_legacy(dc->chan, size, DMA_COPY_BLOCKING); - if (ret < 0) - return ret; - - /* bytes copied */ - return size; -} - -int dma_copy_to_host_nowait(struct dma_copy *dc, struct dma_sg_config *host_sg, - int32_t host_offset, void *local_ptr, int32_t size) -{ - int ret; - - /* tell gateway to copy */ - ret = dma_copy_legacy(dc->chan, size, 0); - if (ret < 0) - return ret; - - /* bytes copied */ - return size; -} - -#else /* CONFIG_DMA_GW */ static int dma_copy_to_host_flags(struct dma_copy *dc, struct dma_sg_config *host_sg, @@ -153,8 +120,6 @@ int dma_copy_to_host_nowait(struct dma_copy *dc, struct dma_sg_config *host_sg, DMA_COPY_ONE_SHOT); } -#endif /* CONFIG_DMA_GW */ - int dma_copy_new(struct dma_copy *dc) { uint32_t dir, cap, dev; @@ -169,14 +134,12 @@ int dma_copy_new(struct dma_copy *dc) return -ENODEV; } -#if !CONFIG_DMA_GW /* get DMA channel from DMAC0 */ dc->chan = dma_channel_get_legacy(dc->dmac, CONFIG_TRACE_CHANNEL); if (!dc->chan) { tr_err(&dmacpy_tr, "dc->chan is NULL"); return -ENODEV; } -#endif return 0; } diff --git a/src/trace/dma-trace.c b/src/trace/dma-trace.c index f30007a89b6b..8779d71582ab 100644 --- a/src/trace/dma-trace.c +++ b/src/trace/dma-trace.c @@ -249,11 +249,6 @@ static void dma_trace_buffer_free(struct dma_trace_data *d) static int dma_trace_buffer_init(struct dma_trace_data *d) { -#if CONFIG_DMA_GW - struct dma_sg_config *config = &d->gw_config; - uint32_t elem_size, elem_addr, elem_num; - int ret; -#endif struct dma_trace_buf *buffer = &d->dmatb; void *buf; k_spinlock_key_t key; @@ -309,30 +304,6 @@ static int dma_trace_buffer_init(struct dma_trace_data *d) k_spin_unlock(&d->lock, key); -#if CONFIG_DMA_GW - /* size of every trace record */ - elem_size = sizeof(uint64_t) * 2; - - /* Initialize address of local elem */ - elem_addr = (uint32_t)buffer->addr; - - /* the number of elem list */ - elem_num = DMA_TRACE_LOCAL_SIZE / elem_size; - - config->direction = DMA_DIR_LMEM_TO_HMEM; - config->src_width = sizeof(uint32_t); - config->dest_width = sizeof(uint32_t); - config->cyclic = 0; - - ret = dma_sg_alloc(&config->elem_array, SOF_MEM_FLAG_USER, - config->direction, elem_num, elem_size, - elem_addr, 0); - if (ret < 0) { - dma_trace_buffer_free(d); - return ret; - } -#endif - #ifdef __ZEPHYR__ #define ZEPHYR_VER_OPT " zephyr:" STRINGIFY(BUILD_VERSION) #else @@ -361,77 +332,6 @@ static int dma_trace_buffer_init(struct dma_trace_data *d) return 0; } -#if CONFIG_DMA_GW - -static int dma_trace_start(struct dma_trace_data *d) -{ - int err = 0; - - /* DMA Controller initialization is platform-specific */ - if (!d || !d->dc.dmac) { - mtrace_printf(LOG_LEVEL_ERROR, - "dma_trace_start failed: no DMAC!"); - return -ENODEV; - } - - if (d->dc.chan) { - /* We already have DMA channel for dtrace, stop it */ - mtrace_printf(LOG_LEVEL_WARNING, - "dma_trace_start(): DMA reconfiguration (active stream_tag: %u)", - d->active_stream_tag); - - schedule_task_cancel(&d->dmat_work); - err = dma_stop_legacy(d->dc.chan); - if (err < 0) { - mtrace_printf(LOG_LEVEL_ERROR, - "dma_trace_start(): DMA channel failed to stop"); - } else if (d->active_stream_tag != d->stream_tag) { - /* Re-request a channel if different tag is provided */ - mtrace_printf(LOG_LEVEL_WARNING, - "dma_trace_start(): stream_tag change from %u to %u", - d->active_stream_tag, d->stream_tag); - - dma_channel_put_legacy(d->dc.chan); - d->dc.chan = NULL; - err = dma_copy_set_stream_tag(&d->dc, d->stream_tag); - } - } else { - err = dma_copy_set_stream_tag(&d->dc, d->stream_tag); - } - - if (err < 0) - return err; - - /* Reset host buffer information as host is re-configuring dtrace */ - d->posn.host_offset = 0; - - d->active_stream_tag = d->stream_tag; - - err = dma_set_config_legacy(d->dc.chan, &d->gw_config); - if (err < 0) { - mtrace_printf(LOG_LEVEL_ERROR, "dma_set_config() failed: %d", err); - goto error; - } - - err = dma_start_legacy(d->dc.chan); - if (err == 0) - return 0; - -error: - dma_channel_put_legacy(d->dc.chan); - d->dc.chan = NULL; - - return err; -} - -static int dma_trace_get_avail_data(struct dma_trace_data *d, - struct dma_trace_buf *buffer, - int avail) -{ - /* align data to HD-DMA burst size */ - return ALIGN_DOWN(avail, d->dma_copy_align); -} -#else static int dma_trace_get_avail_data(struct dma_trace_data *d, struct dma_trace_buf *buffer, int avail) @@ -464,8 +364,6 @@ static int dma_trace_get_avail_data(struct dma_trace_data *d, return size; } -#endif /* CONFIG_DMA_GW */ - /** Invoked remotely by SOF_IPC_TRACE_DMA_PARAMS* Depends on * dma_trace_init_complete() */ @@ -478,16 +376,6 @@ int dma_trace_enable(struct dma_trace_data *d) if (err < 0) return err; -#if CONFIG_DMA_GW - /* - * GW DMA need finish DMA config and start before - * host driver trigger start DMA - */ - err = dma_trace_start(d); - if (err < 0) - goto out; -#endif - /* validate DMA context */ if (!d->dc.dmac || !d->dc.chan) { tr_err_atomic(&dt_tr, "dma_trace_enable(): not valid");