From 53ee8c1b03935c335ff9a0df2c91cfd4355eac10 Mon Sep 17 00:00:00 2001 From: Serhiy Katsyuba Date: Fri, 17 Oct 2025 10:59:25 +0200 Subject: [PATCH] zephyr: Protect Zephyr heap metadata from corruption Zephyr stores heap metadata just before each allocated chunk. This change ensures metadata for each chunk is stored in its own separate cache line. So if invalidate/writeback is mistakenly called for non-cached memory, metadata of a neighboring chunk does not get corrupted. We already have such size alignment constraints implemented for cached allocations; this change adds the same size alignment for non-cached allocations. This is a workaround for potential problems caused by invalidate/writeback calls for non-cached memory, which are wrong and should never happen in well-written code. However, such problems could be easily introduced and are quite hard to debug. The trade-off -- we waste an additional cache line for each chunk's metadata for non-cached allocations (as we already do for cached allocations). Signed-off-by: Serhiy Katsyuba --- zephyr/lib/alloc.c | 33 ++++++++++++++------------------- 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/zephyr/lib/alloc.c b/zephyr/lib/alloc.c index 17d8d1a20af5..475fa09374a4 100644 --- a/zephyr/lib/alloc.c +++ b/zephyr/lib/alloc.c @@ -376,6 +376,20 @@ static void *heap_alloc_aligned(struct k_heap *h, size_t min_align, size_t bytes struct sys_memory_stats stats; #endif + /* + * Zephyr sys_heap stores metadata at start of each + * heap allocation. To ensure no allocated cached buffer + * overlaps the same cacheline with the metadata chunk, + * align both allocation start and size of allocation + * to cacheline. As cached and non-cached allocations are + * mixed, same rules need to be followed for both type of + * allocations. + */ +#ifdef CONFIG_SOF_ZEPHYR_HEAP_CACHED + min_align = MAX(PLATFORM_DCACHE_ALIGN, min_align); + bytes = ALIGN_UP(bytes, PLATFORM_DCACHE_ALIGN); +#endif + key = k_spin_lock(&h->lock); ret = sys_heap_aligned_alloc(&h->heap, min_align, bytes); k_spin_unlock(&h->lock, key); @@ -394,20 +408,6 @@ static void __sparse_cache *heap_alloc_aligned_cached(struct k_heap *h, { void __sparse_cache *ptr; - /* - * Zephyr sys_heap stores metadata at start of each - * heap allocation. To ensure no allocated cached buffer - * overlaps the same cacheline with the metadata chunk, - * align both allocation start and size of allocation - * to cacheline. As cached and non-cached allocations are - * mixed, same rules need to be followed for both type of - * allocations. - */ -#ifdef CONFIG_SOF_ZEPHYR_HEAP_CACHED - min_align = MAX(PLATFORM_DCACHE_ALIGN, min_align); - bytes = ALIGN_UP(bytes, min_align); -#endif - ptr = (__sparse_force void __sparse_cache *)heap_alloc_aligned(h, min_align, bytes); #ifdef CONFIG_SOF_ZEPHYR_HEAP_CACHED @@ -470,11 +470,6 @@ void *rmalloc_align(uint32_t flags, size_t bytes, uint32_t alignment) if (!(flags & SOF_MEM_FLAG_COHERENT)) { ptr = (__sparse_force void *)heap_alloc_aligned_cached(heap, alignment, bytes); } else { - /* - * XTOS alloc implementation has used dcache alignment, - * so SOF application code is expecting this behaviour. - */ - alignment = MAX(PLATFORM_DCACHE_ALIGN, alignment); ptr = heap_alloc_aligned(heap, alignment, bytes); }