diff --git a/src/audio/host-zephyr.c b/src/audio/host-zephyr.c index 6bf1fcf78217..b0e7c80915c5 100644 --- a/src/audio/host-zephyr.c +++ b/src/audio/host-zephyr.c @@ -86,7 +86,7 @@ static int host_dma_set_config_and_copy(struct host_data *hd, struct comp_dev *d local_elem->size = bytes; /* reconfigure transfer */ - ret = dma_config(hd->chan->dma->z_dev, hd->chan->index, &hd->z_config); + ret = sof_dma_config(hd->chan->dma, hd->chan->index, &hd->z_config); if (ret < 0) { comp_err(dev, "dma_config() failed, ret = %d", ret); @@ -95,7 +95,7 @@ static int host_dma_set_config_and_copy(struct host_data *hd, struct comp_dev *d cb(dev, bytes); - ret = dma_reload(hd->chan->dma->z_dev, hd->chan->index, 0, 0, bytes); + ret = sof_dma_reload(hd->chan->dma, hd->chan->index, bytes); if (ret < 0) { comp_err(dev, "dma_copy() failed, ret = %d", ret); @@ -225,7 +225,7 @@ static int host_copy_one_shot(struct host_data *hd, struct comp_dev *dev, copy_c hd->z_config.head_block->block_size = local_elem->size; /* reconfigure transfer */ - ret = dma_config(hd->chan->dma->z_dev, hd->chan->index, &hd->z_config); + ret = sof_dma_config(hd->chan->dma, hd->chan->index, &hd->z_config); if (ret < 0) { comp_err(dev, "dma_config() failed, ret = %u", ret); return ret; @@ -233,7 +233,7 @@ static int host_copy_one_shot(struct host_data *hd, struct comp_dev *dev, copy_c cb(dev, copy_bytes); - ret = dma_reload(hd->chan->dma->z_dev, hd->chan->index, 0, 0, copy_bytes); + ret = sof_dma_reload(hd->chan->dma, hd->chan->index, copy_bytes); if (ret < 0) comp_err(dev, "dma_copy() failed, ret = %u", ret); @@ -367,7 +367,7 @@ static void host_dma_cb(struct comp_dev *dev, size_t bytes) /* get status from dma and check for xrun */ static int host_get_status(struct comp_dev *dev, struct host_data *hd, struct dma_status *stat) { - int ret = dma_get_status(hd->chan->dma->z_dev, hd->chan->index, stat); + int ret = sof_dma_get_status(hd->chan->dma, hd->chan->index, stat); #if CONFIG_XRUN_NOTIFICATIONS_ENABLE if (ret == -EPIPE && !hd->xrun_notification_sent) { struct ipc_msg *notify = ipc_notification_pool_get(IPC4_RESOURCE_EVENT_SIZE); @@ -562,8 +562,8 @@ static int host_copy_normal(struct host_data *hd, struct comp_dev *dev, copy_cal if (!copy_bytes) { if (hd->partial_size != 0) { if (stream_sync(hd, dev)) { - ret = dma_reload(hd->chan->dma->z_dev, hd->chan->index, 0, 0, - hd->partial_size); + ret = sof_dma_reload(hd->chan->dma, hd->chan->index, + hd->partial_size); if (ret < 0) comp_err(dev, "dma_reload() failed, ret = %u", ret); @@ -589,8 +589,8 @@ static int host_copy_normal(struct host_data *hd, struct comp_dev *dev, copy_cal hd->dma_buffer_size - hd->partial_size <= (2 + threshold) * hd->period_bytes) { if (stream_sync(hd, dev)) { - ret = dma_reload(hd->chan->dma->z_dev, hd->chan->index, 0, 0, - hd->partial_size); + ret = sof_dma_reload(hd->chan->dma, hd->chan->index, + hd->partial_size); if (ret < 0) comp_err(dev, "dma_reload() failed, ret = %u", ret); @@ -665,14 +665,14 @@ int host_common_trigger(struct host_data *hd, struct comp_dev *dev, int cmd) switch (cmd) { case COMP_TRIGGER_START: hd->partial_size = 0; - ret = dma_start(hd->chan->dma->z_dev, hd->chan->index); + ret = sof_dma_start(hd->chan->dma, hd->chan->index); if (ret < 0) comp_err(dev, "dma_start() failed, ret = %u", ret); break; case COMP_TRIGGER_STOP: case COMP_TRIGGER_XRUN: - ret = dma_stop(hd->chan->dma->z_dev, hd->chan->index); + ret = sof_dma_stop(hd->chan->dma, hd->chan->index); if (ret < 0) comp_err(dev, "dma stop failed: %d", ret); @@ -875,8 +875,8 @@ int host_common_params(struct host_data *hd, struct comp_dev *dev, hd->cont_update_posn = params->cont_update_posn; /* retrieve DMA buffer address alignment */ - err = dma_get_attribute(hd->dma->z_dev, DMA_ATTR_BUFFER_ADDRESS_ALIGNMENT, - &addr_align); + err = sof_dma_get_attribute(hd->dma, DMA_ATTR_BUFFER_ADDRESS_ALIGNMENT, + &addr_align); if (err < 0) { comp_err(dev, "could not get dma buffer address alignment, err = %d", err); @@ -884,7 +884,7 @@ int host_common_params(struct host_data *hd, struct comp_dev *dev, } /* retrieve DMA buffer size alignment */ - err = dma_get_attribute(hd->dma->z_dev, DMA_ATTR_BUFFER_SIZE_ALIGNMENT, &align); + err = sof_dma_get_attribute(hd->dma, DMA_ATTR_BUFFER_SIZE_ALIGNMENT, &align); if (err < 0 || !align) { comp_err(dev, "could not get valid dma buffer alignment, err = %d, align = %u", err, align); @@ -999,7 +999,7 @@ int host_common_params(struct host_data *hd, struct comp_dev *dev, /* get DMA channel from DMAC * note: stream_tag is ignored by dw-dma */ - channel = dma_request_channel(hd->dma->z_dev, &hda_chan); + channel = sof_dma_request_channel(hd->dma, hda_chan); if (channel < 0) { comp_err(dev, "requested channel %d is busy", hda_chan); return -ENODEV; @@ -1061,14 +1061,14 @@ int host_common_params(struct host_data *hd, struct comp_dev *dev, break; } - err = dma_config(hd->chan->dma->z_dev, hd->chan->index, dma_cfg); + err = sof_dma_config(hd->chan->dma, hd->chan->index, dma_cfg); if (err < 0) { comp_err(dev, "dma_config() failed"); goto err_free_block_cfg; } - err = dma_get_attribute(hd->dma->z_dev, DMA_ATTR_COPY_ALIGNMENT, - &hd->dma_copy_align); + err = sof_dma_get_attribute(hd->dma, DMA_ATTR_COPY_ALIGNMENT, + &hd->dma_copy_align); if (err < 0) { comp_err(dev, "dma_get_attribute() failed"); @@ -1091,7 +1091,7 @@ int host_common_params(struct host_data *hd, struct comp_dev *dev, dma_cfg->head_block = NULL; rfree(dma_block_cfg); err_release_channel: - dma_release_channel(hd->dma->z_dev, hd->chan->index); + sof_dma_release_channel(hd->dma, hd->chan->index); hd->chan = NULL; return err; @@ -1151,8 +1151,8 @@ static int host_position(struct comp_dev *dev, void host_common_reset(struct host_data *hd, uint16_t state) { if (hd->chan) { - dma_stop(hd->chan->dma->z_dev, hd->chan->index); - dma_release_channel(hd->dma->z_dev, hd->chan->index); + sof_dma_stop(hd->chan->dma, hd->chan->index); + sof_dma_release_channel(hd->dma, hd->chan->index); hd->chan = NULL; } diff --git a/src/ipc/ipc-common.c b/src/ipc/ipc-common.c index d8ee7dcfc140..10f241784625 100644 --- a/src/ipc/ipc-common.c +++ b/src/ipc/ipc-common.c @@ -294,6 +294,11 @@ __cold int ipc_init(struct sof *sof) tr_dbg(&ipc_tr, "entry"); +#if CONFIG_SOF_BOOT_TEST_STANDALONE + LOG_INF("SOF_BOOT_TEST_STANDALONE, disabling IPC."); + return 0; +#endif + /* init ipc data */ sof->ipc = rzalloc(SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, sizeof(*sof->ipc)); if (!sof->ipc) { diff --git a/src/lib/dma.c b/src/lib/dma.c index d24f730276b1..dd0eea7977d7 100644 --- a/src/lib/dma.c +++ b/src/lib/dma.c @@ -36,7 +36,7 @@ DECLARE_TR_CTX(dma_tr, SOF_UUID(dma_uuid), LOG_LEVEL_INFO); #if CONFIG_ZEPHYR_NATIVE_DRIVERS static int dma_init(struct sof_dma *dma); -struct sof_dma *sof_dma_get(uint32_t dir, uint32_t cap, uint32_t dev, uint32_t flags) +struct sof_dma *z_impl_sof_dma_get(uint32_t dir, uint32_t cap, uint32_t dev, uint32_t flags) { const struct dma_info *info = dma_info_get(); int users, ret = 0; @@ -129,7 +129,7 @@ struct sof_dma *sof_dma_get(uint32_t dir, uint32_t cap, uint32_t dev, uint32_t f return !ret ? dmin : NULL; } -void sof_dma_put(struct sof_dma *dma) +void z_impl_sof_dma_put(struct sof_dma *dma) { k_spinlock_key_t key; @@ -168,8 +168,8 @@ static int dma_init(struct sof_dma *dma) return 0; } -EXPORT_SYMBOL(sof_dma_get); -EXPORT_SYMBOL(sof_dma_put); +EXPORT_SYMBOL(z_impl_sof_dma_get); +EXPORT_SYMBOL(z_impl_sof_dma_put); #else struct dma *dma_get(uint32_t dir, uint32_t cap, uint32_t dev, uint32_t flags) { diff --git a/zephyr/CMakeLists.txt b/zephyr/CMakeLists.txt index 4ef481d8c52d..b3e010ffa58e 100644 --- a/zephyr/CMakeLists.txt +++ b/zephyr/CMakeLists.txt @@ -490,6 +490,12 @@ if(NOT DEFINED PLATFORM) endif() zephyr_include_directories(${SOF_PLATFORM_PATH}/${PLATFORM}/include) +zephyr_library_sources_ifdef(CONFIG_USERSPACE + syscall/sof_dma.c +) + +zephyr_syscall_header(include/sof/lib/sof_dma.h) + # Mandatory Files used on all platforms. # Commented files will be added/removed as integration dictates. zephyr_library_sources( diff --git a/zephyr/Kconfig b/zephyr/Kconfig index 20a3b6f173a2..8eab25d00812 100644 --- a/zephyr/Kconfig +++ b/zephyr/Kconfig @@ -17,6 +17,12 @@ config SOF_USERSPACE processing mode as userspace code and data. This feature is WIP and is not yet ready for production, for developers only. +config SOF_USERSPACE_INTERFACE_DMA + bool "Enable SOF DMA interface to userspace threads" + depends on USERSPACE + help + Allow user-space threads to use the SOF DMA interface. + config SOF_ZEPHYR_HEAP_CACHED bool "Cached Zephyr heap for SOF memory non-shared zones" default y if CAVS || ACE @@ -178,6 +184,15 @@ config SOF_BOOT_TEST initialized. After that SOF will continue running and be usable as usual. +config SOF_BOOT_TEST_STANDALONE + bool "enable tests at boot time that are run instead of SOF main" + select SOF_BOOT_TEST + select ZTEST + help + Run extended set of tests at boot that can use IPC and interfere + with system state. Normal IPC handling of the SOF application + is disabled to allow more complex tests to run. + config SOF_ZEPHYR_NO_SOF_CLOCK bool help diff --git a/zephyr/include/sof/lib/dma.h b/zephyr/include/sof/lib/dma.h index 3c7f56d7b5f5..b13f3c25221b 100644 --- a/zephyr/include/sof/lib/dma.h +++ b/zephyr/include/sof/lib/dma.h @@ -266,22 +266,11 @@ typedef int (*dma_process_func)(const struct audio_stream *source, */ int dmac_init(struct sof *sof); -/** - * \brief API to request a platform DMAC. - * - * Users can request DMAC based on dev type, copy direction, capabilities - * and access privilege. - * For exclusive access, ret DMAC with no channels draining. - * For shared access, ret DMAC with the least number of channels draining. - */ -struct sof_dma *sof_dma_get(uint32_t dir, uint32_t caps, uint32_t dev, uint32_t flags); - -/** - * \brief API to release a platform DMAC. - * - * @param[in] dma DMAC to relese. +/* + * Need to use sof_dma.h to avoid "syscalls/dma.h" name conflict + * with Zephyr autogenerated headers for syscall support. */ -void sof_dma_put(struct sof_dma *dma); +#include "sof_dma.h" #ifndef CONFIG_ZEPHYR_NATIVE_DRIVERS #include "dma-legacy.h" diff --git a/zephyr/include/sof/lib/sof_dma.h b/zephyr/include/sof/lib/sof_dma.h new file mode 100644 index 000000000000..74b7d4289083 --- /dev/null +++ b/zephyr/include/sof/lib/sof_dma.h @@ -0,0 +1,169 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright (c) 2025, Intel Corporation. + */ + +/* + * Need to use sof_dma.h to avoid "syscalls/dma.h" name conflict + * with Zephyr autogenerated headers for syscall support. + */ + +#ifndef SOF_DMA_H +#define SOF_DMA_H + +/** + * \brief API to request a platform DMAC. + * + * Users can request DMAC based on dev type, copy direction, capabilities + * and access privilege. + * For exclusive access, ret DMAC with no channels draining. + * For shared access, ret DMAC with the least number of channels draining. + */ +__syscall struct sof_dma *sof_dma_get(uint32_t dir, uint32_t caps, uint32_t dev, uint32_t flags); + +struct sof_dma *z_impl_sof_dma_get(uint32_t dir, uint32_t cap, uint32_t dev, uint32_t flags); + + +/** + * \brief API to release a platform DMAC. + * + * @param[in] dma DMAC to release. + */ +__syscall void sof_dma_put(struct sof_dma *dma); + +void z_impl_sof_dma_put(struct sof_dma *dma); + +__syscall int sof_dma_get_attribute(struct sof_dma *dma, uint32_t type, uint32_t *value); + +__syscall int sof_dma_request_channel(struct sof_dma *dma, uint32_t stream_tag); + +__syscall void sof_dma_release_channel(struct sof_dma *dma, + uint32_t channel); + +__syscall int sof_dma_config(struct sof_dma *dma, uint32_t channel, + struct dma_config *config); + +__syscall int sof_dma_start(struct sof_dma *dma, uint32_t channel); + +__syscall int sof_dma_stop(struct sof_dma *dma, uint32_t channel); + +__syscall int sof_dma_get_status(struct sof_dma *dma, uint32_t channel, struct dma_status *stat); + +__syscall int sof_dma_reload(struct sof_dma *dma, uint32_t channel, size_t size); + +static inline int z_impl_sof_dma_get_attribute(struct sof_dma *dma, uint32_t type, uint32_t *value) +{ + return dma_get_attribute(dma->z_dev, type, value); +} + +static inline int z_impl_sof_dma_request_channel(struct sof_dma *dma, uint32_t stream_tag) +{ + return dma_request_channel(dma->z_dev, &stream_tag); +} + +static inline void z_impl_sof_dma_release_channel(struct sof_dma *dma, + uint32_t channel) +{ + dma_release_channel(dma->z_dev, channel); +} + +static inline int z_impl_sof_dma_config(struct sof_dma *dma, uint32_t channel, + struct dma_config *config) +{ + return dma_config(dma->z_dev, channel, config); +} + + +static inline int z_impl_sof_dma_start(struct sof_dma *dma, uint32_t channel) +{ + return dma_start(dma->z_dev, channel); +} + +static inline int z_impl_sof_dma_stop(struct sof_dma *dma, uint32_t channel) +{ + return dma_stop(dma->z_dev, channel); +} + +static inline int z_impl_sof_dma_get_status(struct sof_dma *dma, uint32_t channel, + struct dma_status *stat) +{ + return dma_get_status(dma->z_dev, channel, stat); +} + +static inline int z_impl_sof_dma_reload(struct sof_dma *dma, uint32_t channel, size_t size) +{ + return dma_reload(dma->z_dev, channel, 0, 0, size); +} + +#ifdef CONFIG_SOF_USERSPACE_INTERFACE_DMA + +/* include definitions from generated file */ +#include + +#else /* !CONFIG_SOF_USERSPACE_INTERFACE_DMA */ + +/* + * SOF-specific mechanism to allow building SOF with user-space + * support enabled in Zephyr, but not including all syscall + * interfaces in the SOF binary. Thee downside is we cannot + * use the zephyr/syscalls/sof_dma.h boilerplate that is autogenerated + * but instead need a manual wrapper that is below here. + * + * This can be removed if DMA is used in all SOF user-space builds. + */ + +static inline struct sof_dma *sof_dma_get(uint32_t dir, uint32_t caps, uint32_t dev, uint32_t flags) +{ + return z_impl_sof_dma_get(dir, caps, dev, flags); +} + +static inline void sof_dma_put(struct sof_dma *dma) +{ + return z_impl_sof_dma_put(dma); +} + +static inline int sof_dma_get_attribute(struct sof_dma *dma, uint32_t type, uint32_t *value) +{ + return z_impl_sof_dma_get_attribute(dma, type, value); +} + +static inline int sof_dma_request_channel(struct sof_dma *dma, uint32_t stream_tag) +{ + return z_impl_sof_dma_request_channel(dma, stream_tag); +} + +static inline void sof_dma_release_channel(struct sof_dma *dma, + uint32_t channel) +{ + return z_impl_sof_dma_release_channel(dma, channel); +} + +static inline int sof_dma_config(struct sof_dma *dma, uint32_t channel, + struct dma_config *config) +{ + return z_impl_sof_dma_config(dma, channel, config); +} + +static inline int sof_dma_start(struct sof_dma *dma, uint32_t channel) +{ + return z_impl_sof_dma_start(dma, channel); +} + +static inline int sof_dma_stop(struct sof_dma *dma, uint32_t channel) +{ + return z_impl_sof_dma_stop(dma, channel); +} + +static inline int sof_dma_get_status(struct sof_dma *dma, uint32_t channel, struct dma_status *stat) +{ + return z_impl_sof_dma_get_status(dma, channel, stat); +} + +static inline int sof_dma_reload(struct sof_dma *dma, uint32_t channel, size_t size) +{ + return z_impl_sof_dma_reload(dma, channel, size); +} + +#endif /* CONFIG_SOF_USERSPACE_INTERFACE_DMA */ + +#endif diff --git a/zephyr/syscall/sof_dma.c b/zephyr/syscall/sof_dma.c new file mode 100644 index 000000000000..84fd2f24aee4 --- /dev/null +++ b/zephyr/syscall/sof_dma.c @@ -0,0 +1,238 @@ +// SPDX-License-Identifier: BSD-3-Clause +// +// Copyright(c) 2025 Intel Corporation. + +#include +#include +#include + +#ifdef CONFIG_SOF_USERSPACE_INTERFACE_DMA + +static inline bool sof_dma_has_access(struct sof_dma *dma) +{ + /* + * use the Zephyr dma.h device handle to check calling + * thread has access to it + */ + return k_object_is_valid(dma->z_dev, K_OBJ_DRIVER_DMA); +} + +static inline bool sof_dma_is_valid(struct sof_dma *dma) +{ + const struct dma_info *info = dma_info_get(); + uintptr_t offset = (uintptr_t)dma - (uintptr_t)info->dma_array; + struct sof_dma *array_end = info->dma_array + info->num_dmas; + + if (!info->num_dmas) + return false; + + /* + * The 'dma' pointer is not trusted, so we must first ensure it + * points to a valid "struct sof_dma *" kernel object. + */ + if (dma < info->dma_array || dma >= array_end || offset % sizeof(struct sof_dma)) + return false; + + return sof_dma_has_access(dma); +} + +static inline struct sof_dma *z_vrfy_sof_dma_get(uint32_t dir, uint32_t cap, + uint32_t dev, uint32_t flags) +{ + struct sof_dma *dma = z_impl_sof_dma_get(dir, cap, dev, flags); + + /* + * note: Usually validation is done first, but here + * z_impl_sof_dma_get() is first called with unvalidated input + * arguments on purpose. This is done to reuse the existing SOF + * code to track DMA kernel objects. When called from + * user-space, we use existing functionality to look up the + * kernel object, but add an extra layer to check for access + * permission. + */ + if (dma) { + if (sof_dma_has_access(dma)) + return dma; + + /* no access, release reference on error */ + z_impl_sof_dma_put(dma); + } + + return NULL; +} +#include + +static inline void z_vrfy_sof_dma_put(struct sof_dma *dma) +{ + K_OOPS(!sof_dma_is_valid(dma)); + + z_impl_sof_dma_put(dma); +} +#include + +static inline int z_vrfy_sof_dma_get_attribute(struct sof_dma *dma, uint32_t type, uint32_t *value) +{ + K_OOPS(!sof_dma_is_valid(dma)); + K_OOPS(K_SYSCALL_MEMORY_WRITE(value, sizeof(*value))); + + return z_impl_sof_dma_get_attribute(dma, type, value); +} +#include + +static inline int z_vrfy_sof_dma_request_channel(struct sof_dma *dma, uint32_t stream_tag) +{ + K_OOPS(!sof_dma_is_valid(dma)); + + return z_impl_sof_dma_request_channel(dma, stream_tag); +} +#include + +static inline void z_vrfy_sof_dma_release_channel(struct sof_dma *dma, + uint32_t channel) +{ + K_OOPS(!sof_dma_is_valid(dma)); + + return z_impl_sof_dma_release_channel(dma, channel); +} +#include + +/** + * Creates a deep copy of the DMA transfer blocks in kernel address space, + * based on the DMA config description given as argument. + * + * All pointers in 'cfg' are validated for access permission, and if + * ok, contents is copied to a kernel side object. + * + * @arg cfg kernel object for DMA configuration that contains + * user-space pointers to DMA transfer objects + * @return array of kernel DMA block/transfer config objects + */ +static inline struct dma_block_config *deep_copy_dma_blk_cfg_list(struct dma_config *cfg) +{ + struct dma_block_config *kern_cfg = + rmalloc(0, sizeof(*kern_cfg) * cfg->block_count); + struct dma_block_config *kern_prev = NULL, *kern_next, *user_next; + int i = 0; + + if (!kern_cfg) + return NULL; + + for (user_next = cfg->head_block, kern_next = kern_cfg; + user_next; + user_next = user_next->next_block, kern_next++) { + if (++i > cfg->block_count) + goto err; + + if (k_usermode_from_copy(kern_next, user_next, sizeof(*kern_next))) + goto err; + + /* check access permissions for DMA src/dest memory */ + switch (cfg->channel_direction) { + case MEMORY_TO_MEMORY: + if (K_SYSCALL_MEMORY_WRITE((void *)kern_next->dest_address, + kern_next->block_size)) + goto err; + COMPILER_FALLTHROUGH; + case MEMORY_TO_PERIPHERAL: + case MEMORY_TO_HOST: + if (K_SYSCALL_MEMORY_READ((void *)kern_next->source_address, + kern_next->block_size)) + goto err; + break; + case PERIPHERAL_TO_MEMORY: + case HOST_TO_MEMORY: + if (K_SYSCALL_MEMORY_WRITE((void *)kern_next->dest_address, + kern_next->block_size)) + goto err; + break; + default: + goto err; + } + + if (kern_prev) + kern_prev->next_block = kern_next; + + kern_prev = kern_next; + } + + /* set transfer list to point to first kernel transfer config object */ + cfg->head_block = kern_cfg; + + return kern_cfg; + +err: + /* do not call K_OOPS until kernel memory is freed */ + rfree(kern_cfg); + return NULL; +} + +static inline int z_vrfy_sof_dma_config(struct sof_dma *dma, uint32_t channel, + struct dma_config *config) +{ + struct dma_block_config *blk_cfgs; + struct dma_config kern_cfg, user_cfg; + int ret; + + K_OOPS(!sof_dma_is_valid(dma)); + K_OOPS(k_usermode_from_copy(&user_cfg, config, sizeof(user_cfg))); + + /* use only DMA config attributes that are safe to use from user-space */ + kern_cfg.dma_slot = user_cfg.dma_slot; + kern_cfg.channel_direction = user_cfg.channel_direction; + kern_cfg.cyclic = user_cfg.cyclic; + kern_cfg.source_data_size = user_cfg.source_data_size; + kern_cfg.dest_data_size = user_cfg.dest_data_size; + kern_cfg.source_burst_length = user_cfg.source_burst_length; + kern_cfg.dest_burst_length = user_cfg.dest_burst_length; + kern_cfg.block_count = user_cfg.block_count; + kern_cfg.head_block = user_cfg.head_block; + + /* validate and copy transfer blocks to kernel */ + blk_cfgs = deep_copy_dma_blk_cfg_list(&kern_cfg); + K_OOPS(blk_cfgs == NULL); + + /* TODO: add checks for peripheral/host FIFO access? */ + + ret = z_impl_sof_dma_config(dma, channel, &kern_cfg); + + rfree(blk_cfgs); + + return ret; +} +#include + +static inline int z_vrfy_sof_dma_start(struct sof_dma *dma, uint32_t channel) +{ + K_OOPS(!sof_dma_is_valid(dma)); + + return z_impl_sof_dma_start(dma, channel); +} +#include + +static inline int z_vrfy_sof_dma_stop(struct sof_dma *dma, uint32_t channel) +{ + K_OOPS(!sof_dma_is_valid(dma)); + + return z_impl_sof_dma_stop(dma, channel); +} +#include + +static inline int z_vrfy_sof_dma_get_status(struct sof_dma *dma, uint32_t channel, + struct dma_status *stat) +{ + K_OOPS(!sof_dma_is_valid(dma)); + K_OOPS(K_SYSCALL_MEMORY_WRITE(stat, sizeof(*stat))); + + return z_impl_sof_dma_get_status(dma, channel, stat); +} +#include + +static inline int z_vrfy_sof_dma_reload(struct sof_dma *dma, uint32_t channel, size_t size) +{ + K_OOPS(!sof_dma_is_valid(dma)); + + return z_impl_sof_dma_reload(dma, channel, size); +} +#include + +#endif /* CONFIG_SOF_USERSPACE_INTERFACE_DMA */ diff --git a/zephyr/test/CMakeLists.txt b/zephyr/test/CMakeLists.txt index 0577021cbbbb..767d0984d1c5 100644 --- a/zephyr/test/CMakeLists.txt +++ b/zephyr/test/CMakeLists.txt @@ -3,3 +3,9 @@ if (CONFIG_SOF_BOOT_TEST) vmh.c ) endif() + +if (CONFIG_SOF_BOOT_TEST_STANDALONE) + if (CONFIG_DT_HAS_INTEL_ADSP_HDA_HOST_IN_ENABLED AND CONFIG_SOF_USERSPACE_INTERFACE_DMA) + zephyr_library_sources(userspace/test_intel_hda_dma.c) + endif() +endif() diff --git a/zephyr/test/userspace/README.md b/zephyr/test/userspace/README.md new file mode 100644 index 000000000000..55807eecb329 --- /dev/null +++ b/zephyr/test/userspace/README.md @@ -0,0 +1,23 @@ +intel_hda_dma test +------------------ + +This is a standalone test to exercise the Intel HDA DMA host interface +from a userspace Zephyr thread. +Build with ("ptl" example): + +./scripts/xtensa-build-zephyr.py --cmake-args=-DCONFIG_SOF_BOOT_TEST_STANDALONE=y \ + --cmake-args=-DCONFIG_SOF_USERSPACE_INTERFACE_DMA=y \ + -o app/overlays/ptl/userspace_overlay.conf -o app/winconsole_overlay.conf ptl + +Running test: +- Copy resulting firmware (sof-ptl.ri) to device under test. +- Boot and run the test with cavstool.py: + sudo ./cavstool.py sof-ptl.ri +- Test results printed to cavstool.py + +References to related assets in Zephyr codebase: +- cavstool.py + - zephyr/soc/intel/intel_adsp/tools/cavstool.py +- HD DMA tests in Zephyr + - zephyr/tests/boards/intel_adsp/hda/ + - larger set in kernel space, using DMA interface directly without SOF dependencies diff --git a/zephyr/test/userspace/test_intel_hda_dma.c b/zephyr/test/userspace/test_intel_hda_dma.c new file mode 100644 index 000000000000..4b5a3e80cdf2 --- /dev/null +++ b/zephyr/test/userspace/test_intel_hda_dma.c @@ -0,0 +1,241 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * Copyright(c) 2025 Intel Corporation. + */ + +/* + * Test case for user-space use of the SOF DMA interface. The tests + * transfer data from DSP to host using the host HD DMA instance. + * The test uses the cavstool.py infrastructure to perform host side + * programming of the HDA DMA, and to verify the transferred data. + * + * This test is based on the Zephyr kernel tests for Intel HD DMA + * driver (zephyr/tests/boards/intel_adsp/hda/) written by Tom + * Burdick. This test performs only subset of flows. Driver testing + * should primarily done with the Zephyr kernel tests and this test + * is solely to test the added syscall layer added in SOF. + */ + +#include + +#include +#include +#include +#include + +LOG_MODULE_DECLARE(sof_boot_test, LOG_LEVEL_DBG); + +#define USER_STACKSIZE 2048 +#define TEST_BUF_SIZE 256 +#define TEST_CHANNEL 0 +#define HD_DMA_BUF_ALIGN 128 + +static struct k_thread user_thread; +static K_THREAD_STACK_DEFINE(user_stack, USER_STACKSIZE); + +K_SEM_DEFINE(ipc_sem_wake_user, 0, 1); +K_SEM_DEFINE(ipc_sem_wake_kernel, 0, 1); + +static void intel_hda_dma_user(void *p1, void *p2, void *p3) +{ + uint8_t data_buf[TEST_BUF_SIZE] __aligned(HD_DMA_BUF_ALIGN); + struct dma_block_config dma_block_cfg; + struct dma_config config; + struct dma_status stat; + struct sof_dma *dma; + uint32_t addr_align; + int err, channel; + + zassert_true(k_is_user_context(), "isn't user"); + + LOG_INF("SOF thread %s (%s)", + k_is_user_context() ? "UserSpace!" : "privileged mode.", + CONFIG_BOARD_TARGET); + + /* + * note: this gets a pointer to kernel memory this thread + * cannot access + */ + dma = sof_dma_get(SOF_DMA_DIR_LMEM_TO_HMEM, 0, SOF_DMA_DEV_HOST, SOF_DMA_ACCESS_SHARED); + + k_sem_take(&ipc_sem_wake_user, K_FOREVER); + LOG_INF("configure DMA channel"); + + channel = sof_dma_request_channel(dma, TEST_CHANNEL); + LOG_INF("sof_dma_request_channel: ret %d", channel); + + err = sof_dma_get_attribute(dma, DMA_ATTR_BUFFER_ADDRESS_ALIGNMENT, + &addr_align); + zassert_equal(err, 0); + zassert_true(addr_align == HD_DMA_BUF_ALIGN); + + /* set up a DMA transfer */ + memset(&dma_block_cfg, 0, sizeof(dma_block_cfg)); + dma_block_cfg.dest_address = 0; /* host fifo */ + dma_block_cfg.source_address = (uintptr_t)data_buf; + dma_block_cfg.block_size = sizeof(data_buf); + + /* + * fill data ramp, this payload is expected by host test + * harness + */ + for (uint32_t i = 0; i < TEST_BUF_SIZE; i++) { + data_buf[i] = i & 0xff; + } + sys_cache_data_flush_range(data_buf, sizeof(data_buf)); + + memset(&config, 0, sizeof(config)); + config.channel_direction = MEMORY_TO_HOST; + config.block_count = 1; + config.head_block = &dma_block_cfg; + + err = sof_dma_config(dma, channel, &config); + zassert_equal(err, 0); + LOG_INF("sof_dma_config: success"); + + err = sof_dma_start(dma, channel); + zassert_equal(err, 0); + LOG_INF("sof_dma_start: ch %d", channel); + + k_sem_give(&ipc_sem_wake_kernel); + LOG_INF("setup ready, waiting for kernel to configure host-side of the test"); + k_sem_take(&ipc_sem_wake_user, K_FOREVER); + LOG_INF("start DMA test and transfer data"); + + err = sof_dma_get_status(dma, channel, &stat); + zassert_equal(err, 0); + LOG_INF("sof_dma_get_status start: pend %u free %u", + stat.pending_length, stat.free); + + err = sof_dma_reload(dma, channel, sizeof(data_buf)); + zassert_equal(err, 0); + + for (int i = 0; stat.pending_length < TEST_BUF_SIZE; i++) { + err = sof_dma_get_status(dma, channel, &stat); + zassert_equal(err, 0); + LOG_INF("sof_dma_get_status %d: pend %u free %u", i, + stat.pending_length, stat.free); + + zassert_true(i < 100, "DMA transfer completes in 100usec"); + + /* let DMA transfer complete */ + k_sleep(K_USEC(1)); + } + + err = sof_dma_get_status(dma, channel, &stat); + zassert_equal(err, 0); + LOG_INF("sof_dma_get_status end: pend %u free %u", + stat.pending_length, stat.free); + + LOG_INF("transfer done, asking host to validate output"); + k_sem_give(&ipc_sem_wake_kernel); + k_sem_take(&ipc_sem_wake_user, K_FOREVER); + LOG_INF("test done, cleaning up resources"); + + err = sof_dma_stop(dma, channel); + zassert_equal(err, 0); + + sof_dma_release_channel(dma, channel); + + sof_dma_put(dma); + + LOG_INF("DMA stopped and resources freed"); + + k_sem_give(&ipc_sem_wake_kernel); +} + +#define IPC_TIMEOUT K_MSEC(1500) +#define DMA_BUF_SIZE 256 + +#define ALIGNMENT DMA_BUF_ADDR_ALIGNMENT(DT_NODELABEL(hda_host_in)) +static __aligned(ALIGNMENT) uint8_t dma_buf[DMA_BUF_SIZE]; + +#include +#include <../../../../zephyr/tests/boards/intel_adsp/hda/src/tests.h> + +static int msg_validate_res; + +static bool ipc_message(const struct device *dev, void *arg, + uint32_t data, uint32_t ext_data) +{ + LOG_DBG("HDA message received, data %u, ext_data %u", data, ext_data); + msg_validate_res = ext_data; + return true; +} + +static void intel_hda_dma_kernel(void) +{ + const struct device *dma; + + LOG_INF("run %s with buffer at address %p, size %d", + __func__, (void *)dma_buf, DMA_BUF_SIZE); + + intel_adsp_ipc_set_message_handler(INTEL_ADSP_IPC_HOST_DEV, ipc_message, NULL); + + k_thread_create(&user_thread, user_stack, USER_STACKSIZE, + intel_hda_dma_user, NULL, NULL, NULL, + -1, K_USER, K_FOREVER); + + k_thread_access_grant(&user_thread, &ipc_sem_wake_user); + k_thread_access_grant(&user_thread, &ipc_sem_wake_kernel); + + dma = DEVICE_DT_GET(DT_NODELABEL(hda_host_in)); + k_thread_access_grant(&user_thread, dma); + + hda_ipc_msg(INTEL_ADSP_IPC_HOST_DEV, IPCCMD_HDA_RESET, + TEST_CHANNEL, IPC_TIMEOUT); + + hda_ipc_msg(INTEL_ADSP_IPC_HOST_DEV, IPCCMD_HDA_CONFIG, + TEST_CHANNEL | (DMA_BUF_SIZE << 8), IPC_TIMEOUT); + + k_thread_start(&user_thread); + + LOG_INF("user started, waiting for it to be ready"); + + k_sem_give(&ipc_sem_wake_user); + k_sem_take(&ipc_sem_wake_kernel, K_FOREVER); + + LOG_INF("user ready, starting HDA test"); + + hda_ipc_msg(INTEL_ADSP_IPC_HOST_DEV, IPCCMD_HDA_START, TEST_CHANNEL, IPC_TIMEOUT); + + k_sem_give(&ipc_sem_wake_user); + k_sem_take(&ipc_sem_wake_kernel, K_FOREVER); + + LOG_INF("transfer done, validating results"); + + hda_ipc_msg(INTEL_ADSP_IPC_HOST_DEV, IPCCMD_HDA_VALIDATE, TEST_CHANNEL, + IPC_TIMEOUT); + + hda_dump_regs(HOST_OUT, HDA_REGBLOCK_SIZE, TEST_CHANNEL, "host reset"); + + k_sem_give(&ipc_sem_wake_user); + k_sem_take(&ipc_sem_wake_kernel, K_FOREVER); + + LOG_INF("test done, terminate user thread"); + + k_thread_join(&user_thread, K_FOREVER); + + zassert_true(msg_validate_res == 1, "DMA transferred data invalid payload"); +} + +ZTEST(userspace_intel_hda_dma, dma_mem_to_host) +{ + intel_hda_dma_kernel(); + + ztest_test_pass(); +} + +ZTEST_SUITE(userspace_intel_hda_dma, NULL, NULL, NULL, NULL, NULL); + +/** + * SOF main has booted up and IPC handling is stopped. + * Run test suites with ztest_run_all. + */ +static int run_tests(void) +{ + ztest_run_all(NULL, false, 1, 1); + return 0; +} + +SYS_INIT(run_tests, APPLICATION, 99);