From e8cdb15e19905f225a196c333e95c317ccc50a0a Mon Sep 17 00:00:00 2001 From: skb666 Date: Thu, 25 Dec 2025 21:36:23 +0800 Subject: [PATCH 1/3] [Feature] Support pvPortRealloc --- examples/coverity/FreeRTOSConfig.h | 1 + .../template_configuration/FreeRTOSConfig.h | 5 + include/FreeRTOS.h | 8 + include/portable.h | 4 + portable/MemMang/heap_4.c | 238 +++++++++++++++++ portable/MemMang/heap_5.c | 241 ++++++++++++++++++ 6 files changed, 497 insertions(+) diff --git a/examples/coverity/FreeRTOSConfig.h b/examples/coverity/FreeRTOSConfig.h index 5feaa40de41..04398324cc7 100644 --- a/examples/coverity/FreeRTOSConfig.h +++ b/examples/coverity/FreeRTOSConfig.h @@ -72,6 +72,7 @@ #define configSUPPORT_STATIC_ALLOCATION 1 #define configSUPPORT_DYNAMIC_ALLOCATION 1 +#define configSUPPORT_HEAP_REALLOC 0 #define configTOTAL_HEAP_SIZE 4096U #define configAPPLICATION_ALLOCATED_HEAP 1 #define configSTACK_ALLOCATION_FROM_SEPARATE_HEAP 0 diff --git a/examples/template_configuration/FreeRTOSConfig.h b/examples/template_configuration/FreeRTOSConfig.h index 4866356849a..d1b44fd43f7 100644 --- a/examples/template_configuration/FreeRTOSConfig.h +++ b/examples/template_configuration/FreeRTOSConfig.h @@ -283,6 +283,11 @@ * https://www.freertos.org/Static_Vs_Dynamic_Memory_Allocation.html. */ #define configSUPPORT_DYNAMIC_ALLOCATION 1 +/* Set configSUPPORT_HEAP_REALLOC to 1 to include FreeRTOS API functions + * that support reallocating memory blocks in the build. Set to 0 to exclude + * realloc support from the build. Defaults to 0 if left undefined. */ +#define configSUPPORT_HEAP_REALLOC 0 + /* Sets the total size of the FreeRTOS heap, in bytes, when heap_1.c, heap_2.c * or heap_4.c are included in the build. This value is defaulted to 4096 bytes * but it must be tailored to each application. Note the heap will appear in diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 63e2feb519b..de73ece933b 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -2840,6 +2840,14 @@ #define configSUPPORT_DYNAMIC_ALLOCATION 1 #endif +#ifndef configSUPPORT_HEAP_REALLOC + #define configSUPPORT_HEAP_REALLOC 0 +#endif + +#if ( ( configSUPPORT_HEAP_REALLOC > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION != 1 ) ) + #error configSUPPORT_HEAP_REALLOC cannot be used without dynamic allocation, but configSUPPORT_HEAP_REALLOC is not set to 1. +#endif + #if ( ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION != 1 ) ) #error configUSE_STATS_FORMATTING_FUNCTIONS cannot be used without dynamic allocation, but configSUPPORT_DYNAMIC_ALLOCATION is not set to 1. #endif diff --git a/include/portable.h b/include/portable.h index 68e11e79311..57e4859d1fe 100644 --- a/include/portable.h +++ b/include/portable.h @@ -189,6 +189,10 @@ void vPortGetHeapStats( HeapStats_t * pxHeapStats ); void * pvPortMalloc( size_t xWantedSize ) PRIVILEGED_FUNCTION; void * pvPortCalloc( size_t xNum, size_t xSize ) PRIVILEGED_FUNCTION; +#if ( configSUPPORT_HEAP_REALLOC == 1 ) +void *pvPortRealloc( void *pv, + size_t xWantedSize ) PRIVILEGED_FUNCTION; +#endif void vPortFree( void * pv ) PRIVILEGED_FUNCTION; void vPortInitialiseBlocks( void ) PRIVILEGED_FUNCTION; size_t xPortGetFreeHeapSize( void ) PRIVILEGED_FUNCTION; diff --git a/portable/MemMang/heap_4.c b/portable/MemMang/heap_4.c index 50af15dfb07..0ecb1d3d037 100644 --- a/portable/MemMang/heap_4.c +++ b/portable/MemMang/heap_4.c @@ -410,6 +410,244 @@ void vPortFree( void * pv ) } /*-----------------------------------------------------------*/ +#if ( configSUPPORT_HEAP_REALLOC == 1 ) +/* + * pvPortRealloc - Reallocate memory block size + * + * Description: Resize an allocated memory block, attempting to expand or shrink + * the block in place. If in-place resize is not possible, allocate a new block + * and copy the data. + * + * Parameters: + * pv - Pointer to the previously allocated memory block + * xWantedSize - New requested size of user data (in bytes) + * + * Return Value: + * On success: Pointer to the new memory block (may be the same as original) + * On failure: NULL + * + * Behavior: + * - When pv is NULL, equivalent to pvPortMalloc(xWantedSize) + * - When xWantedSize is 0, equivalent to vPortFree(pv) + * - Resize strategy: + * 1. If new size <= original size, attempt to shrink the block + * 2. If new size > original size, attempt to expand by merging with adjacent free block + * 3. If in-place resize fails, allocate new block and copy data + */ +void *pvPortRealloc( void *pv, + size_t xWantedSize ) +{ + BlockLink_t *pxBlock; + BlockLink_t *pxPreviousBlock; + BlockLink_t *pxNewBlockLink; + BlockLink_t *pxAdjacentFreeBlock; + void *pvReturn = NULL; + size_t xOriginalSize; + size_t xNewBlockSize; + size_t xAdditionalRequiredSize; + size_t xCopySize; + + /* Handle NULL pointer case - equivalent to malloc */ + if( pv == NULL ) + { + return pvPortMalloc( xWantedSize ); + } + + /* Handle zero size case - equivalent to free */ + if( xWantedSize == 0 ) + { + vPortFree( pv ); + return NULL; + } + + /* Calculate new block size with overhead (header size and alignment) */ + xNewBlockSize = xWantedSize; + if( heapADD_WILL_OVERFLOW( xNewBlockSize, xHeapStructSize ) == 0 ) + { + xNewBlockSize += xHeapStructSize; + if( ( xNewBlockSize & portBYTE_ALIGNMENT_MASK ) != 0x00 ) + { + xAdditionalRequiredSize = portBYTE_ALIGNMENT - ( xNewBlockSize & portBYTE_ALIGNMENT_MASK ); + if( heapADD_WILL_OVERFLOW( xNewBlockSize, xAdditionalRequiredSize ) == 0 ) + { + xNewBlockSize += xAdditionalRequiredSize; + } + else + { + return NULL; + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + return NULL; + } + + /* Get the block header from the user pointer and validate it */ + pxBlock = ( BlockLink_t * )( ( uint8_t * )pv - xHeapStructSize ); + heapVALIDATE_BLOCK_POINTER( pxBlock ); + if( ( heapBLOCK_IS_ALLOCATED( pxBlock ) == 0 ) || ( pxBlock->pxNextFreeBlock != heapPROTECT_BLOCK_POINTER( NULL ) ) ) + { + return NULL; + } + + /* Calculate the original block size (without the allocated bit) + * Check if there's enough free memory to expand the block */ + xOriginalSize = pxBlock->xBlockSize & ~heapBLOCK_ALLOCATED_BITMASK; + if( ( xOriginalSize < xNewBlockSize ) && ( xFreeBytesRemaining < ( xNewBlockSize - xOriginalSize ) ) ) + { + /* Not enough memory to expand the block */ + return NULL; + } + + /* Calculate the amount of user data to copy (excluding the block header). + * The user data size is the block size minus the header size. + * Limit the copy size to the requested size to avoid copying too much data. */ + configASSERT( heapSUBTRACT_WILL_UNDERFLOW( xOriginalSize, xHeapStructSize ) == 0 ); + xCopySize = xOriginalSize - xHeapStructSize; + if( xWantedSize < xCopySize ) + { + xCopySize = xWantedSize; + } + + /* Enter critical section - protect heap structure from concurrent access */ + vTaskSuspendAll(); + { + /* Case 1: Shrink the block (new size is smaller than or equal to original) + * Check if the remaining space is large enough to create a separate free block */ + if( xNewBlockSize <= xOriginalSize ) + { + /* Create a new free block from the remaining space */ + if( ( xOriginalSize - xNewBlockSize ) > heapMINIMUM_BLOCK_SIZE ) + { + pxNewBlockLink = ( BlockLink_t * )( ( ( uint8_t * )pxBlock ) + xNewBlockSize ); + configASSERT( ( ( ( size_t )pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 ); + pxNewBlockLink->xBlockSize = xOriginalSize - xNewBlockSize; + xFreeBytesRemaining += pxNewBlockLink->xBlockSize; + prvInsertBlockIntoFreeList( pxNewBlockLink ); + heapFREE_BLOCK( pxBlock ); + pxBlock->xBlockSize = xNewBlockSize; + heapALLOCATE_BLOCK( pxBlock ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + pvReturn = pv; + } + else + { + /* Case 2: Try to expand by merging with next free block */ + pxAdjacentFreeBlock = ( BlockLink_t * )( ( ( uint8_t * )pxBlock ) + xOriginalSize ); + configASSERT( ( ( ( size_t )pxAdjacentFreeBlock ) & portBYTE_ALIGNMENT_MASK ) == 0 ); + + /* Traverse the free list to find if the adjacent block is actually free. + * The free list is ordered by address, so we can search efficiently.*/ + pxPreviousBlock = &xStart; + while( ( pxPreviousBlock->pxNextFreeBlock != heapPROTECT_BLOCK_POINTER( pxAdjacentFreeBlock ) ) && + ( pxPreviousBlock->pxNextFreeBlock != heapPROTECT_BLOCK_POINTER( NULL ) ) ) + { + pxPreviousBlock = heapPROTECT_BLOCK_POINTER( pxPreviousBlock->pxNextFreeBlock ); + heapVALIDATE_BLOCK_POINTER( pxPreviousBlock ); + } + + if( pxPreviousBlock->pxNextFreeBlock == heapPROTECT_BLOCK_POINTER( pxAdjacentFreeBlock ) ) + { + configASSERT( heapBLOCK_SIZE_IS_VALID( pxAdjacentFreeBlock->xBlockSize ) ); + if( !heapADD_WILL_OVERFLOW( xOriginalSize, pxAdjacentFreeBlock->xBlockSize ) ) + { + /* Found a suitable adjacent free block that can provide enough space. */ + if( ( xOriginalSize + pxAdjacentFreeBlock->xBlockSize ) >= xNewBlockSize ) + { + /* Remove the adjacent free block from the free list and merge it with the allocated block. */ + pxPreviousBlock->pxNextFreeBlock = pxAdjacentFreeBlock->pxNextFreeBlock; + xFreeBytesRemaining -= pxAdjacentFreeBlock->xBlockSize; + heapFREE_BLOCK( pxBlock ); + pxBlock->xBlockSize = xOriginalSize + pxAdjacentFreeBlock->xBlockSize; + + /* If the merged block is larger than needed, split the excess space + * into a new free block. */ + if( ( pxBlock->xBlockSize - xNewBlockSize ) > heapMINIMUM_BLOCK_SIZE ) + { + pxNewBlockLink = ( BlockLink_t * )( ( ( uint8_t * )pxBlock ) + xNewBlockSize ); + configASSERT( ( ( ( size_t )pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 ); + pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xNewBlockSize; + xFreeBytesRemaining += pxNewBlockLink->xBlockSize; + prvInsertBlockIntoFreeList( pxNewBlockLink ); + pxBlock->xBlockSize = xNewBlockSize; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + heapALLOCATE_BLOCK( pxBlock ); + pvReturn = pv; + + /* Update minimum free size statistic if memory was consumed */ + if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining ) + { + xMinimumEverFreeBytesRemaining = xFreeBytesRemaining; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + /* Exit critical section - heap structure modification complete */ + ( void ) xTaskResumeAll(); + + /* Case 3: If in-place resize failed, allocate a new block and move the data. + * This is more expensive as it involves: + * 1. Allocating a new block with the requested size + * 2. Copying the user data from the old block to the new block + * 3. Freeing the old block + * Note: Statistics are updated by the called functions (malloc and free). */ + if( pvReturn == NULL ) + { + pvReturn = pvPortMalloc( xWantedSize ); + if( pvReturn != NULL ) + { + /* Copy user data from old block to new block (up to the smaller of old or new size) */ + ( void )memcpy( pvReturn, pv, xCopySize ); + vPortFree( pv ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + configASSERT( ( ( ( size_t )pvReturn ) & ( size_t )portBYTE_ALIGNMENT_MASK ) == 0 ); + return pvReturn; +} +#endif /* if ( configSUPPORT_HEAP_REALLOC == 1 ) */ +/*-----------------------------------------------------------*/ + size_t xPortGetFreeHeapSize( void ) { return xFreeBytesRemaining; diff --git a/portable/MemMang/heap_5.c b/portable/MemMang/heap_5.c index bf321304f81..ea2dcc1dbf6 100644 --- a/portable/MemMang/heap_5.c +++ b/portable/MemMang/heap_5.c @@ -445,6 +445,244 @@ void vPortFree( void * pv ) } /*-----------------------------------------------------------*/ +#if ( configSUPPORT_HEAP_REALLOC == 1 ) +/* + * pvPortRealloc - Reallocate memory block size + * + * Description: Resize an allocated memory block, attempting to expand or shrink + * the block in place. If in-place resize is not possible, allocate a new block + * and copy the data. + * + * Parameters: + * pv - Pointer to the previously allocated memory block + * xWantedSize - New requested size of user data (in bytes) + * + * Return Value: + * On success: Pointer to the new memory block (may be the same as original) + * On failure: NULL + * + * Behavior: + * - When pv is NULL, equivalent to pvPortMalloc(xWantedSize) + * - When xWantedSize is 0, equivalent to vPortFree(pv) + * - Resize strategy: + * 1. If new size <= original size, attempt to shrink the block + * 2. If new size > original size, attempt to expand by merging with adjacent free block + * 3. If in-place resize fails, allocate new block and copy data + */ +void *pvPortRealloc( void *pv, + size_t xWantedSize ) +{ + BlockLink_t *pxBlock; + BlockLink_t *pxPreviousBlock; + BlockLink_t *pxNewBlockLink; + BlockLink_t *pxAdjacentFreeBlock; + void *pvReturn = NULL; + size_t xOriginalSize; + size_t xNewBlockSize; + size_t xAdditionalRequiredSize; + size_t xCopySize; + + /* Handle NULL pointer case - equivalent to malloc */ + if( pv == NULL ) + { + return pvPortMalloc( xWantedSize ); + } + + /* Handle zero size case - equivalent to free */ + if( xWantedSize == 0 ) + { + vPortFree( pv ); + return NULL; + } + + /* Calculate new block size with overhead (header size and alignment) */ + xNewBlockSize = xWantedSize; + if( heapADD_WILL_OVERFLOW( xNewBlockSize, xHeapStructSize ) == 0 ) + { + xNewBlockSize += xHeapStructSize; + if( ( xNewBlockSize & portBYTE_ALIGNMENT_MASK ) != 0x00 ) + { + xAdditionalRequiredSize = portBYTE_ALIGNMENT - ( xNewBlockSize & portBYTE_ALIGNMENT_MASK ); + if( heapADD_WILL_OVERFLOW( xNewBlockSize, xAdditionalRequiredSize ) == 0 ) + { + xNewBlockSize += xAdditionalRequiredSize; + } + else + { + return NULL; + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + return NULL; + } + + /* Get the block header from the user pointer and validate it */ + pxBlock = ( BlockLink_t * )( ( uint8_t * )pv - xHeapStructSize ); + heapVALIDATE_BLOCK_POINTER( pxBlock ); + if( ( heapBLOCK_IS_ALLOCATED( pxBlock ) == 0 ) || ( pxBlock->pxNextFreeBlock != heapPROTECT_BLOCK_POINTER( NULL ) ) ) + { + return NULL; + } + + /* Calculate the original block size (without the allocated bit) + * Check if there's enough free memory to expand the block */ + xOriginalSize = pxBlock->xBlockSize & ~heapBLOCK_ALLOCATED_BITMASK; + if( ( xOriginalSize < xNewBlockSize ) && ( xFreeBytesRemaining < ( xNewBlockSize - xOriginalSize ) ) ) + { + /* Not enough memory to expand the block */ + return NULL; + } + + /* Calculate the amount of user data to copy (excluding the block header). + * The user data size is the block size minus the header size. + * Limit the copy size to the requested size to avoid copying too much data. */ + configASSERT( heapSUBTRACT_WILL_UNDERFLOW( xOriginalSize, xHeapStructSize ) == 0 ); + xCopySize = xOriginalSize - xHeapStructSize; + if( xWantedSize < xCopySize ) + { + xCopySize = xWantedSize; + } + + /* Enter critical section - protect heap structure from concurrent access */ + vTaskSuspendAll(); + { + /* Case 1: Shrink the block (new size is smaller than or equal to original) + * Check if the remaining space is large enough to create a separate free block */ + if( xNewBlockSize <= xOriginalSize ) + { + /* Create a new free block from the remaining space */ + if( ( xOriginalSize - xNewBlockSize ) > heapMINIMUM_BLOCK_SIZE ) + { + pxNewBlockLink = ( BlockLink_t * )( ( ( uint8_t * )pxBlock ) + xNewBlockSize ); + configASSERT( ( ( ( size_t )pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 ); + pxNewBlockLink->xBlockSize = xOriginalSize - xNewBlockSize; + xFreeBytesRemaining += pxNewBlockLink->xBlockSize; + prvInsertBlockIntoFreeList( pxNewBlockLink ); + heapFREE_BLOCK( pxBlock ); + pxBlock->xBlockSize = xNewBlockSize; + heapALLOCATE_BLOCK( pxBlock ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + pvReturn = pv; + } + else + { + /* Case 2: Try to expand by merging with next free block */ + pxAdjacentFreeBlock = ( BlockLink_t * )( ( ( uint8_t * )pxBlock ) + xOriginalSize ); + configASSERT( ( ( ( size_t )pxAdjacentFreeBlock ) & portBYTE_ALIGNMENT_MASK ) == 0 ); + + /* Traverse the free list to find if the adjacent block is actually free. + * The free list is ordered by address, so we can search efficiently.*/ + pxPreviousBlock = &xStart; + while( ( pxPreviousBlock->pxNextFreeBlock != heapPROTECT_BLOCK_POINTER( pxAdjacentFreeBlock ) ) && + ( pxPreviousBlock->pxNextFreeBlock != heapPROTECT_BLOCK_POINTER( NULL ) ) ) + { + pxPreviousBlock = heapPROTECT_BLOCK_POINTER( pxPreviousBlock->pxNextFreeBlock ); + heapVALIDATE_BLOCK_POINTER( pxPreviousBlock ); + } + + if( pxPreviousBlock->pxNextFreeBlock == heapPROTECT_BLOCK_POINTER( pxAdjacentFreeBlock ) ) + { + configASSERT( heapBLOCK_SIZE_IS_VALID( pxAdjacentFreeBlock->xBlockSize ) ); + if( !heapADD_WILL_OVERFLOW( xOriginalSize, pxAdjacentFreeBlock->xBlockSize ) ) + { + /* Found a suitable adjacent free block that can provide enough space. */ + if( ( xOriginalSize + pxAdjacentFreeBlock->xBlockSize ) >= xNewBlockSize ) + { + /* Remove the adjacent free block from the free list and merge it with the allocated block. */ + pxPreviousBlock->pxNextFreeBlock = pxAdjacentFreeBlock->pxNextFreeBlock; + xFreeBytesRemaining -= pxAdjacentFreeBlock->xBlockSize; + heapFREE_BLOCK( pxBlock ); + pxBlock->xBlockSize = xOriginalSize + pxAdjacentFreeBlock->xBlockSize; + + /* If the merged block is larger than needed, split the excess space + * into a new free block. */ + if( ( pxBlock->xBlockSize - xNewBlockSize ) > heapMINIMUM_BLOCK_SIZE ) + { + pxNewBlockLink = ( BlockLink_t * )( ( ( uint8_t * )pxBlock ) + xNewBlockSize ); + configASSERT( ( ( ( size_t )pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 ); + pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xNewBlockSize; + xFreeBytesRemaining += pxNewBlockLink->xBlockSize; + prvInsertBlockIntoFreeList( pxNewBlockLink ); + pxBlock->xBlockSize = xNewBlockSize; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + heapALLOCATE_BLOCK( pxBlock ); + pvReturn = pv; + + /* Update minimum free size statistic if memory was consumed */ + if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining ) + { + xMinimumEverFreeBytesRemaining = xFreeBytesRemaining; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + /* Exit critical section - heap structure modification complete */ + ( void ) xTaskResumeAll(); + + /* Case 3: If in-place resize failed, allocate a new block and move the data. + * This is more expensive as it involves: + * 1. Allocating a new block with the requested size + * 2. Copying the user data from the old block to the new block + * 3. Freeing the old block + * Note: Statistics are updated by the called functions (malloc and free). */ + if( pvReturn == NULL ) + { + pvReturn = pvPortMalloc( xWantedSize ); + if( pvReturn != NULL ) + { + /* Copy user data from old block to new block (up to the smaller of old or new size) */ + ( void )memcpy( pvReturn, pv, xCopySize ); + vPortFree( pv ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + configASSERT( ( ( ( size_t )pvReturn ) & ( size_t )portBYTE_ALIGNMENT_MASK ) == 0 ); + return pvReturn; +} +#endif /* if ( configSUPPORT_HEAP_REALLOC == 1 ) */ +/*-----------------------------------------------------------*/ + size_t xPortGetFreeHeapSize( void ) { return xFreeBytesRemaining; @@ -560,6 +798,9 @@ void vPortDefineHeapRegions( const HeapRegion_t * const pxHeapRegions ) /* PRIVI portPOINTER_SIZE_TYPE xAddress; const HeapRegion_t * pxHeapRegion; + /* Check for NULL pointer */ + configASSERT( pxHeapRegions != NULL ); + /* Can only call once! */ configASSERT( pxEnd == NULL ); From 075c1f7c9bf80fde1068b0b391839b2223415abe Mon Sep 17 00:00:00 2001 From: SKB <973689813@qq.com> Date: Wed, 31 Dec 2025 11:34:41 +0800 Subject: [PATCH 2/3] Update include/FreeRTOS.h Co-authored-by: Aniruddha Kanhere --- include/FreeRTOS.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index de73ece933b..c0565724305 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -2845,7 +2845,7 @@ #endif #if ( ( configSUPPORT_HEAP_REALLOC > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION != 1 ) ) - #error configSUPPORT_HEAP_REALLOC cannot be used without dynamic allocation, but configSUPPORT_HEAP_REALLOC is not set to 1. + #error configSUPPORT_HEAP_REALLOC cannot be used without dynamic allocation, but configSUPPORT_DYNAMIC_ALLOCATION is not set to 1. #endif #if ( ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION != 1 ) ) From 9903a55c6a0cbd88fd34bba4ca32e50fc7c93b81 Mon Sep 17 00:00:00 2001 From: skb666 <973689813@qq.com> Date: Tue, 6 Jan 2026 23:56:44 +0800 Subject: [PATCH 3/3] [Refactor] Optimize and refine the behavior logic of pvPortRealloc --- portable/MemMang/heap_4.c | 425 +++++++++++++++++++++++++------------- portable/MemMang/heap_5.c | 425 +++++++++++++++++++++++++------------- 2 files changed, 560 insertions(+), 290 deletions(-) diff --git a/portable/MemMang/heap_4.c b/portable/MemMang/heap_4.c index 0ecb1d3d037..f6b0aa038be 100644 --- a/portable/MemMang/heap_4.c +++ b/portable/MemMang/heap_4.c @@ -427,54 +427,78 @@ void vPortFree( void * pv ) * On failure: NULL * * Behavior: - * - When pv is NULL, equivalent to pvPortMalloc(xWantedSize) - * - When xWantedSize is 0, equivalent to vPortFree(pv) - * - Resize strategy: - * 1. If new size <= original size, attempt to shrink the block - * 2. If new size > original size, attempt to expand by merging with adjacent free block - * 3. If in-place resize fails, allocate new block and copy data + * 1) If pv == NULL, behaves like pvPortMalloc(xWantedSize). + * 2) If xWantedSize == 0, behaves like vPortFree(pv) and returns NULL. + * 3) Align the requested size and include the block header size; if the aligned + * size is invalid, return NULL. + * 4) If the aligned requested size is <= current block size, shrink in place and + * insert any sufficiently large remainder as a free block. + * 5) If expansion is required and there are enough free bytes in the heap, try to + * expand into adjacent free blocks in this order: + * - Merge with next free block if it is immediately after the current block. + * - Merge with previous free block if it is immediately before the current block. + * - Merge with both previous and next if combined they provide enough space. + * If none of the above succeed, fall back to allocating a new block, memcpy'ing + * the payload and freeing the old block. */ -void *pvPortRealloc( void *pv, - size_t xWantedSize ) +void * pvPortRealloc( void * pv, + size_t xWantedSize ) { - BlockLink_t *pxBlock; - BlockLink_t *pxPreviousBlock; - BlockLink_t *pxNewBlockLink; - BlockLink_t *pxAdjacentFreeBlock; - void *pvReturn = NULL; - size_t xOriginalSize; - size_t xNewBlockSize; + BlockLink_t * pxBlock; + BlockLink_t * pxNewBlockLink; + BlockLink_t * pxNextFreeBlock; + BlockLink_t * pxPreviousFreeBlock; + BlockLink_t * pxBeforePreviousFreeBlock; + uint8_t * puc; + void * pvReturn = NULL; + size_t xAlignedWantedSize; size_t xAdditionalRequiredSize; - size_t xCopySize; + size_t xCurrentBlockSize; + size_t xRemainingBlockSize; + size_t xNextBlockSize; + size_t xPreviousBlockSize; + BaseType_t xHasNextBlock; + BaseType_t xHasPreviousBlock; - /* Handle NULL pointer case - equivalent to malloc */ + /* Ensure the end marker has been set up. */ + configASSERT( pxEnd ); + + /* If pv is NULL behave like malloc. */ if( pv == NULL ) { - return pvPortMalloc( xWantedSize ); + pvReturn = pvPortMalloc( xWantedSize ); + goto realloc_exit; } - /* Handle zero size case - equivalent to free */ + /* If requested size is zero behave like free. */ if( xWantedSize == 0 ) { vPortFree( pv ); - return NULL; + pvReturn = NULL; + goto realloc_exit; } - /* Calculate new block size with overhead (header size and alignment) */ - xNewBlockSize = xWantedSize; - if( heapADD_WILL_OVERFLOW( xNewBlockSize, xHeapStructSize ) == 0 ) + /* Calculate the internal aligned size including the header. */ + xAlignedWantedSize = xWantedSize; + + /* Add the header size and check for overflow. */ + if( heapADD_WILL_OVERFLOW( xAlignedWantedSize, xHeapStructSize ) == 0 ) { - xNewBlockSize += xHeapStructSize; - if( ( xNewBlockSize & portBYTE_ALIGNMENT_MASK ) != 0x00 ) + xAlignedWantedSize += xHeapStructSize; + + /* Ensure byte alignment. */ + if( ( xAlignedWantedSize & portBYTE_ALIGNMENT_MASK ) != 0x00 ) { - xAdditionalRequiredSize = portBYTE_ALIGNMENT - ( xNewBlockSize & portBYTE_ALIGNMENT_MASK ); - if( heapADD_WILL_OVERFLOW( xNewBlockSize, xAdditionalRequiredSize ) == 0 ) + xAdditionalRequiredSize = portBYTE_ALIGNMENT - ( xAlignedWantedSize & portBYTE_ALIGNMENT_MASK ); + + if( heapADD_WILL_OVERFLOW( xAlignedWantedSize, xAdditionalRequiredSize ) == 0 ) { - xNewBlockSize += xAdditionalRequiredSize; + xAlignedWantedSize += xAdditionalRequiredSize; } else { - return NULL; + /* Overflow -> invalid request. */ + xAlignedWantedSize = 0; } } else @@ -484,165 +508,276 @@ void *pvPortRealloc( void *pv, } else { - return NULL; + xAlignedWantedSize = 0; } - /* Get the block header from the user pointer and validate it */ - pxBlock = ( BlockLink_t * )( ( uint8_t * )pv - xHeapStructSize ); - heapVALIDATE_BLOCK_POINTER( pxBlock ); - if( ( heapBLOCK_IS_ALLOCATED( pxBlock ) == 0 ) || ( pxBlock->pxNextFreeBlock != heapPROTECT_BLOCK_POINTER( NULL ) ) ) + /* Validate the aligned size. */ + if( ( xAlignedWantedSize == 0 ) || ( heapBLOCK_SIZE_IS_VALID( xAlignedWantedSize ) == 0 ) ) { - return NULL; + pvReturn = NULL; + goto realloc_exit; } - /* Calculate the original block size (without the allocated bit) - * Check if there's enough free memory to expand the block */ - xOriginalSize = pxBlock->xBlockSize & ~heapBLOCK_ALLOCATED_BITMASK; - if( ( xOriginalSize < xNewBlockSize ) && ( xFreeBytesRemaining < ( xNewBlockSize - xOriginalSize ) ) ) - { - /* Not enough memory to expand the block */ - return NULL; - } + /* Get the block header for the allocated block. */ + puc = ( uint8_t * ) pv; + puc -= xHeapStructSize; + pxBlock = ( BlockLink_t * ) puc; - /* Calculate the amount of user data to copy (excluding the block header). - * The user data size is the block size minus the header size. - * Limit the copy size to the requested size to avoid copying too much data. */ - configASSERT( heapSUBTRACT_WILL_UNDERFLOW( xOriginalSize, xHeapStructSize ) == 0 ); - xCopySize = xOriginalSize - xHeapStructSize; - if( xWantedSize < xCopySize ) - { - xCopySize = xWantedSize; - } + heapVALIDATE_BLOCK_POINTER( pxBlock ); + configASSERT( heapBLOCK_IS_ALLOCATED( pxBlock ) ); - /* Enter critical section - protect heap structure from concurrent access */ - vTaskSuspendAll(); + /* Current block size without the allocated bit. */ + xCurrentBlockSize = pxBlock->xBlockSize & ~heapBLOCK_ALLOCATED_BITMASK; + + /* 1) Shrink in place if possible. */ + if( xAlignedWantedSize <= xCurrentBlockSize ) { - /* Case 1: Shrink the block (new size is smaller than or equal to original) - * Check if the remaining space is large enough to create a separate free block */ - if( xNewBlockSize <= xOriginalSize ) + xRemainingBlockSize = xCurrentBlockSize - xAlignedWantedSize; + + /* Only split if the remaining space is large enough to form a free block. */ + if( xRemainingBlockSize > heapMINIMUM_BLOCK_SIZE ) { - /* Create a new free block from the remaining space */ - if( ( xOriginalSize - xNewBlockSize ) > heapMINIMUM_BLOCK_SIZE ) + vTaskSuspendAll(); { - pxNewBlockLink = ( BlockLink_t * )( ( ( uint8_t * )pxBlock ) + xNewBlockSize ); - configASSERT( ( ( ( size_t )pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 ); - pxNewBlockLink->xBlockSize = xOriginalSize - xNewBlockSize; - xFreeBytesRemaining += pxNewBlockLink->xBlockSize; - prvInsertBlockIntoFreeList( pxNewBlockLink ); - heapFREE_BLOCK( pxBlock ); - pxBlock->xBlockSize = xNewBlockSize; + /* Set the block to the new size and mark as allocated. */ + pxBlock->xBlockSize = xAlignedWantedSize; heapALLOCATE_BLOCK( pxBlock ); + + /* Create a new free block from the remainder and insert it. */ + pxNewBlockLink = ( BlockLink_t * ) ( ( ( uint8_t * ) pxBlock ) + xAlignedWantedSize ); + configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 ); + + pxNewBlockLink->xBlockSize = xRemainingBlockSize; + xFreeBytesRemaining += xRemainingBlockSize; + prvInsertBlockIntoFreeList( pxNewBlockLink ); } - else - { - mtCOVERAGE_TEST_MARKER(); - } - pvReturn = pv; + ( void ) xTaskResumeAll(); } else { - /* Case 2: Try to expand by merging with next free block */ - pxAdjacentFreeBlock = ( BlockLink_t * )( ( ( uint8_t * )pxBlock ) + xOriginalSize ); - configASSERT( ( ( ( size_t )pxAdjacentFreeBlock ) & portBYTE_ALIGNMENT_MASK ) == 0 ); - - /* Traverse the free list to find if the adjacent block is actually free. - * The free list is ordered by address, so we can search efficiently.*/ - pxPreviousBlock = &xStart; - while( ( pxPreviousBlock->pxNextFreeBlock != heapPROTECT_BLOCK_POINTER( pxAdjacentFreeBlock ) ) && - ( pxPreviousBlock->pxNextFreeBlock != heapPROTECT_BLOCK_POINTER( NULL ) ) ) + /* Remainder too small to split. */ + mtCOVERAGE_TEST_MARKER(); + } + pvReturn = pv; + goto realloc_exit; + } + /* 2) Expansion path: try to use adjacent free blocks if overall free bytes suffice. */ + else if( ( xAlignedWantedSize - xCurrentBlockSize ) <= xFreeBytesRemaining ) + { + vTaskSuspendAll(); + { + /* Walk the free list to find the free blocks immediately before and after pxBlock. */ + pxBeforePreviousFreeBlock = &xStart; + pxPreviousFreeBlock = &xStart; + pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( xStart.pxNextFreeBlock ); + heapVALIDATE_BLOCK_POINTER( pxNextFreeBlock ); + + while( ( pxNextFreeBlock < pxBlock ) && ( pxNextFreeBlock->pxNextFreeBlock != heapPROTECT_BLOCK_POINTER( NULL ) ) ) + { + pxBeforePreviousFreeBlock = pxPreviousFreeBlock; + pxPreviousFreeBlock = pxNextFreeBlock; + pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( pxNextFreeBlock->pxNextFreeBlock ); + heapVALIDATE_BLOCK_POINTER( pxNextFreeBlock ); + } + + /* Check if next is immediately after current. */ + if( ( pxNextFreeBlock != pxEnd ) && + ( ( ( size_t ) pxBlock + xCurrentBlockSize ) == ( size_t ) pxNextFreeBlock ) ) + { + xHasNextBlock = pdTRUE; + } + else { - pxPreviousBlock = heapPROTECT_BLOCK_POINTER( pxPreviousBlock->pxNextFreeBlock ); - heapVALIDATE_BLOCK_POINTER( pxPreviousBlock ); + xHasNextBlock = pdFALSE; } - if( pxPreviousBlock->pxNextFreeBlock == heapPROTECT_BLOCK_POINTER( pxAdjacentFreeBlock ) ) + /* Check if previous is immediately before current. */ + if( ( pxPreviousFreeBlock != &xStart ) && + ( ( ( size_t ) pxPreviousFreeBlock + pxPreviousFreeBlock->xBlockSize ) == ( size_t ) pxBlock ) ) + { + xHasPreviousBlock = pdTRUE; + } + else { - configASSERT( heapBLOCK_SIZE_IS_VALID( pxAdjacentFreeBlock->xBlockSize ) ); - if( !heapADD_WILL_OVERFLOW( xOriginalSize, pxAdjacentFreeBlock->xBlockSize ) ) + xHasPreviousBlock = pdFALSE; + } + + /* Compute required extra size and neighbor sizes. */ + xRemainingBlockSize = xAlignedWantedSize - xCurrentBlockSize; + xNextBlockSize = pxNextFreeBlock->xBlockSize; + xPreviousBlockSize = pxPreviousFreeBlock->xBlockSize; + configASSERT( heapBLOCK_SIZE_IS_VALID( xNextBlockSize ) != 0 ); + configASSERT( heapBLOCK_SIZE_IS_VALID( xPreviousBlockSize ) != 0 ); + + /* a) If next exists and is large enough, merge with next. */ + if( ( xHasNextBlock == pdTRUE ) && + ( xNextBlockSize >= xRemainingBlockSize ) ) + { + /* Remove next from free list and update free bytes. */ + pxPreviousFreeBlock->pxNextFreeBlock = pxNextFreeBlock->pxNextFreeBlock; + pxNextFreeBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( NULL ); + xFreeBytesRemaining -= xNextBlockSize; + + /* Temporarily free the current block for merging. */ + heapFREE_BLOCK( pxBlock ); + + /* Remaining bytes after creating the requested size. */ + xRemainingBlockSize = xCurrentBlockSize + xNextBlockSize - xAlignedWantedSize; + + if( xRemainingBlockSize > heapMINIMUM_BLOCK_SIZE ) { - /* Found a suitable adjacent free block that can provide enough space. */ - if( ( xOriginalSize + pxAdjacentFreeBlock->xBlockSize ) >= xNewBlockSize ) - { - /* Remove the adjacent free block from the free list and merge it with the allocated block. */ - pxPreviousBlock->pxNextFreeBlock = pxAdjacentFreeBlock->pxNextFreeBlock; - xFreeBytesRemaining -= pxAdjacentFreeBlock->xBlockSize; - heapFREE_BLOCK( pxBlock ); - pxBlock->xBlockSize = xOriginalSize + pxAdjacentFreeBlock->xBlockSize; - - /* If the merged block is larger than needed, split the excess space - * into a new free block. */ - if( ( pxBlock->xBlockSize - xNewBlockSize ) > heapMINIMUM_BLOCK_SIZE ) - { - pxNewBlockLink = ( BlockLink_t * )( ( ( uint8_t * )pxBlock ) + xNewBlockSize ); - configASSERT( ( ( ( size_t )pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 ); - pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xNewBlockSize; - xFreeBytesRemaining += pxNewBlockLink->xBlockSize; - prvInsertBlockIntoFreeList( pxNewBlockLink ); - pxBlock->xBlockSize = xNewBlockSize; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - heapALLOCATE_BLOCK( pxBlock ); - pvReturn = pv; - - /* Update minimum free size statistic if memory was consumed */ - if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining ) - { - xMinimumEverFreeBytesRemaining = xFreeBytesRemaining; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + /* Set block to requested size and insert leftover as a free block. */ + pxBlock->xBlockSize = xAlignedWantedSize; + + pxNewBlockLink = ( BlockLink_t * ) ( ( ( uint8_t * ) pxBlock ) + xAlignedWantedSize ); + configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 ); + + pxNewBlockLink->xBlockSize = xRemainingBlockSize; + xFreeBytesRemaining += xRemainingBlockSize; + prvInsertBlockIntoFreeList( pxNewBlockLink ); } else { - mtCOVERAGE_TEST_MARKER(); + /* Leftover too small, keep as part of allocated block. */ + pxBlock->xBlockSize = xCurrentBlockSize + xNextBlockSize; + } + + /* Mark merged block as allocated. */ + heapALLOCATE_BLOCK( pxBlock ); + pvReturn = pv; + } + /* b) If previous exists and is large enough, merge with previous (data must be moved). */ + else if( ( xHasPreviousBlock == pdTRUE ) && + ( xPreviousBlockSize >= xRemainingBlockSize ) ) + { + /* Remove previous from free list and update free bytes. */ + pxBeforePreviousFreeBlock->pxNextFreeBlock = pxPreviousFreeBlock->pxNextFreeBlock; + pxPreviousFreeBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( NULL ); + xFreeBytesRemaining -= xPreviousBlockSize; + + heapFREE_BLOCK( pxBlock ); + + /* Move the payload forward into the previous block's payload area. */ + puc = ( uint8_t * ) pxPreviousFreeBlock; + puc += xHeapStructSize; + /* Ensure memmove length will not underflow. */ + configASSERT( heapSUBTRACT_WILL_UNDERFLOW( xCurrentBlockSize, xHeapStructSize ) == 0 ); + ( void ) memmove( puc, pv, xCurrentBlockSize - xHeapStructSize ); + + /* Remaining bytes after creating the requested size. */ + xRemainingBlockSize = xCurrentBlockSize + xPreviousBlockSize - xAlignedWantedSize; + + if( xRemainingBlockSize > heapMINIMUM_BLOCK_SIZE ) + { + /* previous becomes the allocated block of requested size, insert leftover. */ + pxPreviousFreeBlock->xBlockSize = xAlignedWantedSize; + + pxNewBlockLink = ( BlockLink_t * ) ( ( ( uint8_t * ) pxPreviousFreeBlock ) + xAlignedWantedSize ); + configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 ); + + pxNewBlockLink->xBlockSize = xRemainingBlockSize; + xFreeBytesRemaining += xRemainingBlockSize; + prvInsertBlockIntoFreeList( pxNewBlockLink ); + } + else + { + /* Leftover too small, treat entire previous+current as allocated. */ + pxPreviousFreeBlock->xBlockSize = xCurrentBlockSize + xPreviousBlockSize; + } + + heapALLOCATE_BLOCK( pxPreviousFreeBlock ); + /* Return the payload pointer in the previous block. */ + pvReturn = ( void * ) puc; + } + /* c) If both neighbors exist and combined are large enough, merge both sides (move data). */ + else if( ( xHasNextBlock == pdTRUE ) && + ( xHasPreviousBlock == pdTRUE ) && + ( ( xNextBlockSize + xPreviousBlockSize ) >= xRemainingBlockSize ) ) + { + /* Remove both previous and next from the free list and update free bytes. */ + pxBeforePreviousFreeBlock->pxNextFreeBlock = pxNextFreeBlock->pxNextFreeBlock; + pxNextFreeBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( NULL ); + pxPreviousFreeBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( NULL ); + xFreeBytesRemaining -= xNextBlockSize + xPreviousBlockSize; + + heapFREE_BLOCK( pxBlock ); + + /* Move payload forward into previous block's payload area. */ + puc = ( uint8_t * ) pxPreviousFreeBlock; + puc += xHeapStructSize; + configASSERT( heapSUBTRACT_WILL_UNDERFLOW( xCurrentBlockSize, xHeapStructSize ) == 0 ); + ( void ) memmove( puc, pv, xCurrentBlockSize - xHeapStructSize ); + + /* Remaining bytes after allocation. */ + xRemainingBlockSize = xCurrentBlockSize + xNextBlockSize + xPreviousBlockSize - xAlignedWantedSize; + + if( xRemainingBlockSize > heapMINIMUM_BLOCK_SIZE ) + { + pxPreviousFreeBlock->xBlockSize = xAlignedWantedSize; + + pxNewBlockLink = ( BlockLink_t * ) ( ( ( uint8_t * ) pxPreviousFreeBlock ) + xAlignedWantedSize ); + configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 ); + + pxNewBlockLink->xBlockSize = xRemainingBlockSize; + xFreeBytesRemaining += xRemainingBlockSize; + prvInsertBlockIntoFreeList( pxNewBlockLink ); + } + else + { + pxPreviousFreeBlock->xBlockSize = xCurrentBlockSize + xNextBlockSize + xPreviousBlockSize; } + + heapALLOCATE_BLOCK( pxPreviousFreeBlock ); + pvReturn = ( void * ) puc; + } + else + { + /* None of the merge strategies worked on this path. */ + mtCOVERAGE_TEST_MARKER(); + } + + /* Update historical minimum free bytes. */ + if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining ) + { + xMinimumEverFreeBytesRemaining = xFreeBytesRemaining; } else { mtCOVERAGE_TEST_MARKER(); } } + ( void ) xTaskResumeAll(); + } + else + { + /* Not enough free bytes in the entire heap to satisfy expansion. */ + pvReturn = NULL; + goto realloc_exit; } - /* Exit critical section - heap structure modification complete */ - ( void ) xTaskResumeAll(); - /* Case 3: If in-place resize failed, allocate a new block and move the data. - * This is more expensive as it involves: - * 1. Allocating a new block with the requested size - * 2. Copying the user data from the old block to the new block - * 3. Freeing the old block - * Note: Statistics are updated by the called functions (malloc and free). */ + /* If still NULL, fall back to allocating a new block and copying the payload. */ if( pvReturn == NULL ) { - pvReturn = pvPortMalloc( xWantedSize ); - if( pvReturn != NULL ) + puc = pvPortMalloc( xWantedSize ); + + if( puc != NULL ) { - /* Copy user data from old block to new block (up to the smaller of old or new size) */ - ( void )memcpy( pvReturn, pv, xCopySize ); + /* Copy the old payload (old payload size = xCurrentBlockSize - xHeapStructSize). */ + configASSERT( heapSUBTRACT_WILL_UNDERFLOW( xCurrentBlockSize, xHeapStructSize ) == 0 ); + ( void ) memcpy( puc, pv, xCurrentBlockSize - xHeapStructSize ); vPortFree( pv ); + + pvReturn = ( void * ) puc; } else { mtCOVERAGE_TEST_MARKER(); } } - else - { - mtCOVERAGE_TEST_MARKER(); - } - configASSERT( ( ( ( size_t )pvReturn ) & ( size_t )portBYTE_ALIGNMENT_MASK ) == 0 ); +realloc_exit: + /* Ensure returned pointer is properly aligned (NULL also satisfies this). */ + configASSERT( ( ( size_t ) pvReturn & ( size_t ) portBYTE_ALIGNMENT_MASK ) == 0 ); return pvReturn; } #endif /* if ( configSUPPORT_HEAP_REALLOC == 1 ) */ diff --git a/portable/MemMang/heap_5.c b/portable/MemMang/heap_5.c index ea2dcc1dbf6..5a35060f122 100644 --- a/portable/MemMang/heap_5.c +++ b/portable/MemMang/heap_5.c @@ -462,54 +462,78 @@ void vPortFree( void * pv ) * On failure: NULL * * Behavior: - * - When pv is NULL, equivalent to pvPortMalloc(xWantedSize) - * - When xWantedSize is 0, equivalent to vPortFree(pv) - * - Resize strategy: - * 1. If new size <= original size, attempt to shrink the block - * 2. If new size > original size, attempt to expand by merging with adjacent free block - * 3. If in-place resize fails, allocate new block and copy data + * 1) If pv == NULL, behaves like pvPortMalloc(xWantedSize). + * 2) If xWantedSize == 0, behaves like vPortFree(pv) and returns NULL. + * 3) Align the requested size and include the block header size; if the aligned + * size is invalid, return NULL. + * 4) If the aligned requested size is <= current block size, shrink in place and + * insert any sufficiently large remainder as a free block. + * 5) If expansion is required and there are enough free bytes in the heap, try to + * expand into adjacent free blocks in this order: + * - Merge with next free block if it is immediately after the current block. + * - Merge with previous free block if it is immediately before the current block. + * - Merge with both previous and next if combined they provide enough space. + * If none of the above succeed, fall back to allocating a new block, memcpy'ing + * the payload and freeing the old block. */ -void *pvPortRealloc( void *pv, - size_t xWantedSize ) +void * pvPortRealloc( void * pv, + size_t xWantedSize ) { - BlockLink_t *pxBlock; - BlockLink_t *pxPreviousBlock; - BlockLink_t *pxNewBlockLink; - BlockLink_t *pxAdjacentFreeBlock; - void *pvReturn = NULL; - size_t xOriginalSize; - size_t xNewBlockSize; + BlockLink_t * pxBlock; + BlockLink_t * pxNewBlockLink; + BlockLink_t * pxNextFreeBlock; + BlockLink_t * pxPreviousFreeBlock; + BlockLink_t * pxBeforePreviousFreeBlock; + uint8_t * puc; + void * pvReturn = NULL; + size_t xAlignedWantedSize; size_t xAdditionalRequiredSize; - size_t xCopySize; + size_t xCurrentBlockSize; + size_t xRemainingBlockSize; + size_t xNextBlockSize; + size_t xPreviousBlockSize; + BaseType_t xHasNextBlock; + BaseType_t xHasPreviousBlock; + + /* Ensure the end marker has been set up. */ + configASSERT( pxEnd ); - /* Handle NULL pointer case - equivalent to malloc */ + /* If pv is NULL behave like malloc. */ if( pv == NULL ) { - return pvPortMalloc( xWantedSize ); + pvReturn = pvPortMalloc( xWantedSize ); + goto realloc_exit; } - /* Handle zero size case - equivalent to free */ + /* If requested size is zero behave like free. */ if( xWantedSize == 0 ) { vPortFree( pv ); - return NULL; + pvReturn = NULL; + goto realloc_exit; } - /* Calculate new block size with overhead (header size and alignment) */ - xNewBlockSize = xWantedSize; - if( heapADD_WILL_OVERFLOW( xNewBlockSize, xHeapStructSize ) == 0 ) + /* Calculate the internal aligned size including the header. */ + xAlignedWantedSize = xWantedSize; + + /* Add the header size and check for overflow. */ + if( heapADD_WILL_OVERFLOW( xAlignedWantedSize, xHeapStructSize ) == 0 ) { - xNewBlockSize += xHeapStructSize; - if( ( xNewBlockSize & portBYTE_ALIGNMENT_MASK ) != 0x00 ) + xAlignedWantedSize += xHeapStructSize; + + /* Ensure byte alignment. */ + if( ( xAlignedWantedSize & portBYTE_ALIGNMENT_MASK ) != 0x00 ) { - xAdditionalRequiredSize = portBYTE_ALIGNMENT - ( xNewBlockSize & portBYTE_ALIGNMENT_MASK ); - if( heapADD_WILL_OVERFLOW( xNewBlockSize, xAdditionalRequiredSize ) == 0 ) + xAdditionalRequiredSize = portBYTE_ALIGNMENT - ( xAlignedWantedSize & portBYTE_ALIGNMENT_MASK ); + + if( heapADD_WILL_OVERFLOW( xAlignedWantedSize, xAdditionalRequiredSize ) == 0 ) { - xNewBlockSize += xAdditionalRequiredSize; + xAlignedWantedSize += xAdditionalRequiredSize; } else { - return NULL; + /* Overflow -> invalid request. */ + xAlignedWantedSize = 0; } } else @@ -519,165 +543,276 @@ void *pvPortRealloc( void *pv, } else { - return NULL; + xAlignedWantedSize = 0; } - /* Get the block header from the user pointer and validate it */ - pxBlock = ( BlockLink_t * )( ( uint8_t * )pv - xHeapStructSize ); - heapVALIDATE_BLOCK_POINTER( pxBlock ); - if( ( heapBLOCK_IS_ALLOCATED( pxBlock ) == 0 ) || ( pxBlock->pxNextFreeBlock != heapPROTECT_BLOCK_POINTER( NULL ) ) ) + /* Validate the aligned size. */ + if( ( xAlignedWantedSize == 0 ) || ( heapBLOCK_SIZE_IS_VALID( xAlignedWantedSize ) == 0 ) ) { - return NULL; + pvReturn = NULL; + goto realloc_exit; } - /* Calculate the original block size (without the allocated bit) - * Check if there's enough free memory to expand the block */ - xOriginalSize = pxBlock->xBlockSize & ~heapBLOCK_ALLOCATED_BITMASK; - if( ( xOriginalSize < xNewBlockSize ) && ( xFreeBytesRemaining < ( xNewBlockSize - xOriginalSize ) ) ) - { - /* Not enough memory to expand the block */ - return NULL; - } + /* Get the block header for the allocated block. */ + puc = ( uint8_t * ) pv; + puc -= xHeapStructSize; + pxBlock = ( BlockLink_t * ) puc; - /* Calculate the amount of user data to copy (excluding the block header). - * The user data size is the block size minus the header size. - * Limit the copy size to the requested size to avoid copying too much data. */ - configASSERT( heapSUBTRACT_WILL_UNDERFLOW( xOriginalSize, xHeapStructSize ) == 0 ); - xCopySize = xOriginalSize - xHeapStructSize; - if( xWantedSize < xCopySize ) - { - xCopySize = xWantedSize; - } + heapVALIDATE_BLOCK_POINTER( pxBlock ); + configASSERT( heapBLOCK_IS_ALLOCATED( pxBlock ) ); - /* Enter critical section - protect heap structure from concurrent access */ - vTaskSuspendAll(); + /* Current block size without the allocated bit. */ + xCurrentBlockSize = pxBlock->xBlockSize & ~heapBLOCK_ALLOCATED_BITMASK; + + /* 1) Shrink in place if possible. */ + if( xAlignedWantedSize <= xCurrentBlockSize ) { - /* Case 1: Shrink the block (new size is smaller than or equal to original) - * Check if the remaining space is large enough to create a separate free block */ - if( xNewBlockSize <= xOriginalSize ) + xRemainingBlockSize = xCurrentBlockSize - xAlignedWantedSize; + + /* Only split if the remaining space is large enough to form a free block. */ + if( xRemainingBlockSize > heapMINIMUM_BLOCK_SIZE ) { - /* Create a new free block from the remaining space */ - if( ( xOriginalSize - xNewBlockSize ) > heapMINIMUM_BLOCK_SIZE ) + vTaskSuspendAll(); { - pxNewBlockLink = ( BlockLink_t * )( ( ( uint8_t * )pxBlock ) + xNewBlockSize ); - configASSERT( ( ( ( size_t )pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 ); - pxNewBlockLink->xBlockSize = xOriginalSize - xNewBlockSize; - xFreeBytesRemaining += pxNewBlockLink->xBlockSize; - prvInsertBlockIntoFreeList( pxNewBlockLink ); - heapFREE_BLOCK( pxBlock ); - pxBlock->xBlockSize = xNewBlockSize; + /* Set the block to the new size and mark as allocated. */ + pxBlock->xBlockSize = xAlignedWantedSize; heapALLOCATE_BLOCK( pxBlock ); + + /* Create a new free block from the remainder and insert it. */ + pxNewBlockLink = ( BlockLink_t * ) ( ( ( uint8_t * ) pxBlock ) + xAlignedWantedSize ); + configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 ); + + pxNewBlockLink->xBlockSize = xRemainingBlockSize; + xFreeBytesRemaining += xRemainingBlockSize; + prvInsertBlockIntoFreeList( pxNewBlockLink ); } - else - { - mtCOVERAGE_TEST_MARKER(); - } - pvReturn = pv; + ( void ) xTaskResumeAll(); } else { - /* Case 2: Try to expand by merging with next free block */ - pxAdjacentFreeBlock = ( BlockLink_t * )( ( ( uint8_t * )pxBlock ) + xOriginalSize ); - configASSERT( ( ( ( size_t )pxAdjacentFreeBlock ) & portBYTE_ALIGNMENT_MASK ) == 0 ); - - /* Traverse the free list to find if the adjacent block is actually free. - * The free list is ordered by address, so we can search efficiently.*/ - pxPreviousBlock = &xStart; - while( ( pxPreviousBlock->pxNextFreeBlock != heapPROTECT_BLOCK_POINTER( pxAdjacentFreeBlock ) ) && - ( pxPreviousBlock->pxNextFreeBlock != heapPROTECT_BLOCK_POINTER( NULL ) ) ) + /* Remainder too small to split. */ + mtCOVERAGE_TEST_MARKER(); + } + pvReturn = pv; + goto realloc_exit; + } + /* 2) Expansion path: try to use adjacent free blocks if overall free bytes suffice. */ + else if( ( xAlignedWantedSize - xCurrentBlockSize ) <= xFreeBytesRemaining ) + { + vTaskSuspendAll(); + { + /* Walk the free list to find the free blocks immediately before and after pxBlock. */ + pxBeforePreviousFreeBlock = &xStart; + pxPreviousFreeBlock = &xStart; + pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( xStart.pxNextFreeBlock ); + heapVALIDATE_BLOCK_POINTER( pxNextFreeBlock ); + + while( ( pxNextFreeBlock < pxBlock ) && ( pxNextFreeBlock->pxNextFreeBlock != heapPROTECT_BLOCK_POINTER( NULL ) ) ) + { + pxBeforePreviousFreeBlock = pxPreviousFreeBlock; + pxPreviousFreeBlock = pxNextFreeBlock; + pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( pxNextFreeBlock->pxNextFreeBlock ); + heapVALIDATE_BLOCK_POINTER( pxNextFreeBlock ); + } + + /* Check if next is immediately after current. */ + if( ( pxNextFreeBlock != pxEnd ) && + ( ( ( size_t ) pxBlock + xCurrentBlockSize ) == ( size_t ) pxNextFreeBlock ) ) { - pxPreviousBlock = heapPROTECT_BLOCK_POINTER( pxPreviousBlock->pxNextFreeBlock ); - heapVALIDATE_BLOCK_POINTER( pxPreviousBlock ); + xHasNextBlock = pdTRUE; + } + else + { + xHasNextBlock = pdFALSE; } - if( pxPreviousBlock->pxNextFreeBlock == heapPROTECT_BLOCK_POINTER( pxAdjacentFreeBlock ) ) + /* Check if previous is immediately before current. */ + if( ( pxPreviousFreeBlock != &xStart ) && + ( ( ( size_t ) pxPreviousFreeBlock + pxPreviousFreeBlock->xBlockSize ) == ( size_t ) pxBlock ) ) { - configASSERT( heapBLOCK_SIZE_IS_VALID( pxAdjacentFreeBlock->xBlockSize ) ); - if( !heapADD_WILL_OVERFLOW( xOriginalSize, pxAdjacentFreeBlock->xBlockSize ) ) + xHasPreviousBlock = pdTRUE; + } + else + { + xHasPreviousBlock = pdFALSE; + } + + /* Compute required extra size and neighbor sizes. */ + xRemainingBlockSize = xAlignedWantedSize - xCurrentBlockSize; + xNextBlockSize = pxNextFreeBlock->xBlockSize; + xPreviousBlockSize = pxPreviousFreeBlock->xBlockSize; + configASSERT( heapBLOCK_SIZE_IS_VALID( xNextBlockSize ) != 0 ); + configASSERT( heapBLOCK_SIZE_IS_VALID( xPreviousBlockSize ) != 0 ); + + /* a) If next exists and is large enough, merge with next. */ + if( ( xHasNextBlock == pdTRUE ) && + ( xNextBlockSize >= xRemainingBlockSize ) ) + { + /* Remove next from free list and update free bytes. */ + pxPreviousFreeBlock->pxNextFreeBlock = pxNextFreeBlock->pxNextFreeBlock; + pxNextFreeBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( NULL ); + xFreeBytesRemaining -= xNextBlockSize; + + /* Temporarily free the current block for merging. */ + heapFREE_BLOCK( pxBlock ); + + /* Remaining bytes after creating the requested size. */ + xRemainingBlockSize = xCurrentBlockSize + xNextBlockSize - xAlignedWantedSize; + + if( xRemainingBlockSize > heapMINIMUM_BLOCK_SIZE ) { - /* Found a suitable adjacent free block that can provide enough space. */ - if( ( xOriginalSize + pxAdjacentFreeBlock->xBlockSize ) >= xNewBlockSize ) - { - /* Remove the adjacent free block from the free list and merge it with the allocated block. */ - pxPreviousBlock->pxNextFreeBlock = pxAdjacentFreeBlock->pxNextFreeBlock; - xFreeBytesRemaining -= pxAdjacentFreeBlock->xBlockSize; - heapFREE_BLOCK( pxBlock ); - pxBlock->xBlockSize = xOriginalSize + pxAdjacentFreeBlock->xBlockSize; - - /* If the merged block is larger than needed, split the excess space - * into a new free block. */ - if( ( pxBlock->xBlockSize - xNewBlockSize ) > heapMINIMUM_BLOCK_SIZE ) - { - pxNewBlockLink = ( BlockLink_t * )( ( ( uint8_t * )pxBlock ) + xNewBlockSize ); - configASSERT( ( ( ( size_t )pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 ); - pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xNewBlockSize; - xFreeBytesRemaining += pxNewBlockLink->xBlockSize; - prvInsertBlockIntoFreeList( pxNewBlockLink ); - pxBlock->xBlockSize = xNewBlockSize; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - heapALLOCATE_BLOCK( pxBlock ); - pvReturn = pv; - - /* Update minimum free size statistic if memory was consumed */ - if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining ) - { - xMinimumEverFreeBytesRemaining = xFreeBytesRemaining; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + /* Set block to requested size and insert leftover as a free block. */ + pxBlock->xBlockSize = xAlignedWantedSize; + + pxNewBlockLink = ( BlockLink_t * ) ( ( ( uint8_t * ) pxBlock ) + xAlignedWantedSize ); + configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 ); + + pxNewBlockLink->xBlockSize = xRemainingBlockSize; + xFreeBytesRemaining += xRemainingBlockSize; + prvInsertBlockIntoFreeList( pxNewBlockLink ); } else { - mtCOVERAGE_TEST_MARKER(); + /* Leftover too small, keep as part of allocated block. */ + pxBlock->xBlockSize = xCurrentBlockSize + xNextBlockSize; + } + + /* Mark merged block as allocated. */ + heapALLOCATE_BLOCK( pxBlock ); + pvReturn = pv; + } + /* b) If previous exists and is large enough, merge with previous (data must be moved). */ + else if( ( xHasPreviousBlock == pdTRUE ) && + ( xPreviousBlockSize >= xRemainingBlockSize ) ) + { + /* Remove previous from free list and update free bytes. */ + pxBeforePreviousFreeBlock->pxNextFreeBlock = pxPreviousFreeBlock->pxNextFreeBlock; + pxPreviousFreeBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( NULL ); + xFreeBytesRemaining -= xPreviousBlockSize; + + heapFREE_BLOCK( pxBlock ); + + /* Move the payload forward into the previous block's payload area. */ + puc = ( uint8_t * ) pxPreviousFreeBlock; + puc += xHeapStructSize; + /* Ensure memmove length will not underflow. */ + configASSERT( heapSUBTRACT_WILL_UNDERFLOW( xCurrentBlockSize, xHeapStructSize ) == 0 ); + ( void ) memmove( puc, pv, xCurrentBlockSize - xHeapStructSize ); + + /* Remaining bytes after creating the requested size. */ + xRemainingBlockSize = xCurrentBlockSize + xPreviousBlockSize - xAlignedWantedSize; + + if( xRemainingBlockSize > heapMINIMUM_BLOCK_SIZE ) + { + /* previous becomes the allocated block of requested size, insert leftover. */ + pxPreviousFreeBlock->xBlockSize = xAlignedWantedSize; + + pxNewBlockLink = ( BlockLink_t * ) ( ( ( uint8_t * ) pxPreviousFreeBlock ) + xAlignedWantedSize ); + configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 ); + + pxNewBlockLink->xBlockSize = xRemainingBlockSize; + xFreeBytesRemaining += xRemainingBlockSize; + prvInsertBlockIntoFreeList( pxNewBlockLink ); + } + else + { + /* Leftover too small, treat entire previous+current as allocated. */ + pxPreviousFreeBlock->xBlockSize = xCurrentBlockSize + xPreviousBlockSize; } + + heapALLOCATE_BLOCK( pxPreviousFreeBlock ); + /* Return the payload pointer in the previous block. */ + pvReturn = ( void * ) puc; + } + /* c) If both neighbors exist and combined are large enough, merge both sides (move data). */ + else if( ( xHasNextBlock == pdTRUE ) && + ( xHasPreviousBlock == pdTRUE ) && + ( ( xNextBlockSize + xPreviousBlockSize ) >= xRemainingBlockSize ) ) + { + /* Remove both previous and next from the free list and update free bytes. */ + pxBeforePreviousFreeBlock->pxNextFreeBlock = pxNextFreeBlock->pxNextFreeBlock; + pxNextFreeBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( NULL ); + pxPreviousFreeBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( NULL ); + xFreeBytesRemaining -= xNextBlockSize + xPreviousBlockSize; + + heapFREE_BLOCK( pxBlock ); + + /* Move payload forward into previous block's payload area. */ + puc = ( uint8_t * ) pxPreviousFreeBlock; + puc += xHeapStructSize; + configASSERT( heapSUBTRACT_WILL_UNDERFLOW( xCurrentBlockSize, xHeapStructSize ) == 0 ); + ( void ) memmove( puc, pv, xCurrentBlockSize - xHeapStructSize ); + + /* Remaining bytes after allocation. */ + xRemainingBlockSize = xCurrentBlockSize + xNextBlockSize + xPreviousBlockSize - xAlignedWantedSize; + + if( xRemainingBlockSize > heapMINIMUM_BLOCK_SIZE ) + { + pxPreviousFreeBlock->xBlockSize = xAlignedWantedSize; + + pxNewBlockLink = ( BlockLink_t * ) ( ( ( uint8_t * ) pxPreviousFreeBlock ) + xAlignedWantedSize ); + configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 ); + + pxNewBlockLink->xBlockSize = xRemainingBlockSize; + xFreeBytesRemaining += xRemainingBlockSize; + prvInsertBlockIntoFreeList( pxNewBlockLink ); + } + else + { + pxPreviousFreeBlock->xBlockSize = xCurrentBlockSize + xNextBlockSize + xPreviousBlockSize; + } + + heapALLOCATE_BLOCK( pxPreviousFreeBlock ); + pvReturn = ( void * ) puc; + } + else + { + /* None of the merge strategies worked on this path. */ + mtCOVERAGE_TEST_MARKER(); + } + + /* Update historical minimum free bytes. */ + if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining ) + { + xMinimumEverFreeBytesRemaining = xFreeBytesRemaining; } else { mtCOVERAGE_TEST_MARKER(); } } + ( void ) xTaskResumeAll(); + } + else + { + /* Not enough free bytes in the entire heap to satisfy expansion. */ + pvReturn = NULL; + goto realloc_exit; } - /* Exit critical section - heap structure modification complete */ - ( void ) xTaskResumeAll(); - /* Case 3: If in-place resize failed, allocate a new block and move the data. - * This is more expensive as it involves: - * 1. Allocating a new block with the requested size - * 2. Copying the user data from the old block to the new block - * 3. Freeing the old block - * Note: Statistics are updated by the called functions (malloc and free). */ + /* If still NULL, fall back to allocating a new block and copying the payload. */ if( pvReturn == NULL ) { - pvReturn = pvPortMalloc( xWantedSize ); - if( pvReturn != NULL ) + puc = pvPortMalloc( xWantedSize ); + + if( puc != NULL ) { - /* Copy user data from old block to new block (up to the smaller of old or new size) */ - ( void )memcpy( pvReturn, pv, xCopySize ); + /* Copy the old payload (old payload size = xCurrentBlockSize - xHeapStructSize). */ + configASSERT( heapSUBTRACT_WILL_UNDERFLOW( xCurrentBlockSize, xHeapStructSize ) == 0 ); + ( void ) memcpy( puc, pv, xCurrentBlockSize - xHeapStructSize ); vPortFree( pv ); + + pvReturn = ( void * ) puc; } else { mtCOVERAGE_TEST_MARKER(); } } - else - { - mtCOVERAGE_TEST_MARKER(); - } - configASSERT( ( ( ( size_t )pvReturn ) & ( size_t )portBYTE_ALIGNMENT_MASK ) == 0 ); +realloc_exit: + /* Ensure returned pointer is properly aligned (NULL also satisfies this). */ + configASSERT( ( ( size_t ) pvReturn & ( size_t ) portBYTE_ALIGNMENT_MASK ) == 0 ); return pvReturn; } #endif /* if ( configSUPPORT_HEAP_REALLOC == 1 ) */