diff --git a/docs/config/spelling_exceptions.txt b/docs/config/spelling_exceptions.txt index d4e40a3ec..d4d94aae6 100644 --- a/docs/config/spelling_exceptions.txt +++ b/docs/config/spelling_exceptions.txt @@ -35,6 +35,7 @@ Memtarget memtarget memtargets middleware +minBytesToKeep multithreading Nodemask nodemask diff --git a/include/umf/memory_pool.h b/include/umf/memory_pool.h index c405e6f61..f46784051 100644 --- a/include/umf/memory_pool.h +++ b/include/umf/memory_pool.h @@ -196,6 +196,24 @@ umf_result_t umfPoolSetTag(umf_memory_pool_handle_t hPool, void *tag, /// @return UMF_RESULT_SUCCESS on success. umf_result_t umfPoolGetTag(umf_memory_pool_handle_t hPool, void **tag); +/// +/// @brief Trims memory of the pool, removing resources that are not needed +/// to keep the pool operational. +/// \details +/// The minBytesToKeep parameter is a hint to the pool implementation +/// that it should try to keep at least this number of bytes of +/// memory in the pool. The pool implementation may also ignore this +/// parameter and try to trim the whole memory, in which case it +/// should return UMF_RESULT_SUCCESS. The pool implementation may +/// also return UMF_RESULT_ERROR_NOT_SUPPORTED if it does not support +/// trimming memory. +/// @param hPool pointer to the memory pool +/// @param minBytesToKeep minimum number of bytes to keep in the pool (if +/// possible - see details) +/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure. +umf_result_t umfPoolTrimMemory(umf_memory_pool_handle_t hPool, + size_t minBytesToKeep); + #ifdef __cplusplus } #endif diff --git a/include/umf/memory_pool_ops.h b/include/umf/memory_pool_ops.h index 4cba05319..352d8c398 100644 --- a/include/umf/memory_pool_ops.h +++ b/include/umf/memory_pool_ops.h @@ -22,7 +22,7 @@ extern "C" { /// @brief Version of the Memory Pool ops structure. /// NOTE: This is equal to the latest UMF version, in which the ops structure /// has been modified. -#define UMF_POOL_OPS_VERSION_CURRENT UMF_MAKE_VERSION(1, 0) +#define UMF_POOL_OPS_VERSION_CURRENT UMF_MAKE_VERSION(1, 1) /// /// @brief This structure comprises function pointers used by corresponding umfPool* @@ -143,7 +143,7 @@ typedef struct umf_memory_pool_ops_t { umf_result_t (*get_name)(void *pool, const char **name); /// - /// The following function is optional and memory pool implementation + /// The following functions are optional and memory pool implementation /// can keep it NULL. /// @@ -166,6 +166,26 @@ typedef struct umf_memory_pool_ops_t { const char *name, void *arg, size_t size, umf_ctl_query_type_t queryType, va_list args); + // The following operations were added in ops version 1.1 + + /// + /// @brief Trims memory of the pool, removing resources that are not needed + /// to keep the pool operational. + /// \details + /// The minBytesToKeep parameter is a hint to the pool implementation + /// that it should try to keep at least this number of bytes of + /// memory in the pool. The pool implementation may also ignore this + /// parameter and try to trim the whole memory, in which case it + /// should return UMF_RESULT_SUCCESS. The pool implementation may + /// also return UMF_RESULT_ERROR_NOT_SUPPORTED if it does not support + /// trimming memory. + /// @param pool pointer to the memory pool + /// @param minBytesToKeep minimum number of bytes to keep in the pool (if + /// possible - see details) + /// @return UMF_RESULT_SUCCESS on success or appropriate error code on + /// failure. + /// + umf_result_t (*ext_trim_memory)(void *pool, size_t minBytesToKeep); } umf_memory_pool_ops_t; #ifdef __cplusplus diff --git a/src/libumf.def b/src/libumf.def index ad61d2fb5..9496343a0 100644 --- a/src/libumf.def +++ b/src/libumf.def @@ -152,4 +152,5 @@ EXPORTS umfJemallocPoolParamsSetName umfLevelZeroMemoryProviderParamsSetName umfOsMemoryProviderParamsSetName + umfPoolTrimMemory umfScalablePoolParamsSetName diff --git a/src/libumf.map b/src/libumf.map index c6e3db9ac..4e482c649 100644 --- a/src/libumf.map +++ b/src/libumf.map @@ -150,5 +150,6 @@ UMF_1.1 { umfJemallocPoolParamsSetName; umfLevelZeroMemoryProviderParamsSetName; umfOsMemoryProviderParamsSetName; + umfPoolTrimMemory; umfScalablePoolParamsSetName; } UMF_1.0; diff --git a/src/memory_pool.c b/src/memory_pool.c index 333ae56e6..350ccce67 100644 --- a/src/memory_pool.c +++ b/src/memory_pool.c @@ -212,6 +212,13 @@ umfDefaultCtlPoolHandle(void *hPool, umf_ctl_query_source_t operationType, return UMF_RESULT_ERROR_NOT_SUPPORTED; } +static umf_result_t umfDefaultTrimMemory(void *provider, + size_t minBytesToKeep) { + (void)provider; + (void)minBytesToKeep; + return UMF_RESULT_ERROR_NOT_SUPPORTED; +} + // logical sum (OR) of all umf_pool_create_flags_t flags static const umf_pool_create_flags_t UMF_POOL_CREATE_FLAG_ALL = UMF_POOL_CREATE_FLAG_OWN_PROVIDER | UMF_POOL_CREATE_FLAG_DISABLE_TRACKING; @@ -233,9 +240,9 @@ static umf_result_t umfPoolCreateInternal(const umf_memory_pool_ops_t *ops, const void *params, umf_pool_create_flags_t flags, umf_memory_pool_handle_t *hPool) { - if (!ops || !provider || !hPool) { - return UMF_RESULT_ERROR_INVALID_ARGUMENT; - } + UMF_CHECK((ops != NULL), UMF_RESULT_ERROR_INVALID_ARGUMENT); + UMF_CHECK((provider != NULL), UMF_RESULT_ERROR_INVALID_ARGUMENT); + UMF_CHECK((hPool != NULL), UMF_RESULT_ERROR_INVALID_ARGUMENT); // validate flags if (flags & ~UMF_POOL_CREATE_FLAG_ALL) { @@ -245,10 +252,24 @@ static umf_result_t umfPoolCreateInternal(const umf_memory_pool_ops_t *ops, umf_result_t ret = UMF_RESULT_SUCCESS; + umf_memory_pool_ops_t compatible_ops; if (ops->version != UMF_POOL_OPS_VERSION_CURRENT) { LOG_WARN("Memory Pool ops version \"%d\" is different than the current " "version \"%d\"", ops->version, UMF_POOL_OPS_VERSION_CURRENT); + + // Create a new ops compatible structure with the current version + memset(&compatible_ops, 0, sizeof(compatible_ops)); + if (UMF_MINOR_VERSION(ops->version) == 0) { + LOG_INFO("Detected 1.0 version of Memory Pool ops, " + "upgrading to current version"); + memcpy(&compatible_ops, ops, + offsetof(umf_memory_pool_ops_t, ext_trim_memory)); + } else { + LOG_ERR("Unsupported Memory Pool ops version: %d", ops->version); + return UMF_RESULT_ERROR_NOT_SUPPORTED; + } + ops = &compatible_ops; } umf_memory_pool_handle_t pool = @@ -278,6 +299,10 @@ static umf_result_t umfPoolCreateInternal(const umf_memory_pool_ops_t *ops, pool->ops.ext_ctl = umfDefaultCtlPoolHandle; } + if (NULL == pool->ops.ext_trim_memory) { + pool->ops.ext_trim_memory = umfDefaultTrimMemory; + } + if (NULL == utils_mutex_init(&pool->lock)) { LOG_ERR("Failed to initialize mutex for pool"); ret = UMF_RESULT_ERROR_UNKNOWN; @@ -326,10 +351,7 @@ static umf_result_t umfPoolCreateInternal(const umf_memory_pool_ops_t *ops, } umf_result_t umfPoolDestroy(umf_memory_pool_handle_t hPool) { - if (hPool == NULL) { - LOG_ERR("memory pool handle is NULL"); - return UMF_RESULT_ERROR_INVALID_ARGUMENT; - } + UMF_CHECK((hPool != NULL), UMF_RESULT_ERROR_INVALID_ARGUMENT); if (umf_ba_global_is_destroyed()) { return UMF_RESULT_ERROR_UNKNOWN; @@ -509,6 +531,13 @@ umf_result_t umfPoolGetTag(umf_memory_pool_handle_t hPool, void **tag) { return UMF_RESULT_SUCCESS; } +umf_result_t umfPoolTrimMemory(umf_memory_pool_handle_t hPool, + size_t minBytesToKeep) { + UMF_CHECK((hPool != NULL), UMF_RESULT_ERROR_INVALID_ARGUMENT); + + return hPool->ops.ext_trim_memory(hPool->pool_priv, minBytesToKeep); +} + void umfPoolCtlDefaultsDestroy(void) { utils_init_once(&mem_pool_ctl_initialized, pool_ctl_init); diff --git a/src/pool/pool_disjoint.c b/src/pool/pool_disjoint.c index e5339376e..06bfd58c3 100644 --- a/src/pool/pool_disjoint.c +++ b/src/pool/pool_disjoint.c @@ -444,11 +444,11 @@ static void bucket_free_chunk(bucket_t *bucket, void *ptr, slab_t *slab, // remove slab slab_list_item_t *slab_it = &slab->iter; assert(slab_it->val != NULL); - pool_unregister_slab(bucket->pool, slab_it->val); + destroy_slab(slab_it->val); DL_DELETE(bucket->available_slabs, slab_it); assert(bucket->available_slabs_num > 0); bucket->available_slabs_num--; - destroy_slab(slab_it->val); + pool_unregister_slab(bucket->pool, slab_it->val); } } else { // return this chunk to the pool @@ -1133,6 +1133,47 @@ static umf_result_t disjoint_pool_get_name(void *pool, const char **name) { return UMF_RESULT_SUCCESS; } +umf_result_t disjoint_pool_trim_memory(void *pool, size_t minBytesToKeep) { + assert(pool != NULL); + disjoint_pool_t *hPool = (disjoint_pool_t *)pool; + + for (size_t i = 0; i < hPool->buckets_num; i++) { + bucket_t *bucket = hPool->buckets[i]; + utils_mutex_lock(&bucket->bucket_lock); + + // remove empty slabs from the pool + slab_list_item_t *it = NULL, *tmp = NULL; + LL_FOREACH_SAFE(bucket->available_slabs, it, tmp) { + slab_t *slab = it->val; + if (slab->num_chunks_allocated == 0) { + if (minBytesToKeep > 0) { + // if we still have bytes to keep, do not remove slab + if (minBytesToKeep > slab->slab_size) { + minBytesToKeep -= slab->slab_size; + } else { + minBytesToKeep = 0; + } + continue; + } + + // remove slab + destroy_slab(slab); + DL_DELETE(bucket->available_slabs, it); + assert(bucket->available_slabs_num > 0); + bucket->available_slabs_num--; + pool_unregister_slab(hPool, slab); + + // update stats + bucket_update_stats(bucket, 0, -1); + } + } + + utils_mutex_unlock(&bucket->bucket_lock); + } + + return UMF_RESULT_SUCCESS; +} + static umf_memory_pool_ops_t UMF_DISJOINT_POOL_OPS = { .version = UMF_POOL_OPS_VERSION_CURRENT, .initialize = disjoint_pool_initialize, @@ -1146,6 +1187,7 @@ static umf_memory_pool_ops_t UMF_DISJOINT_POOL_OPS = { .get_last_allocation_error = disjoint_pool_get_last_allocation_error, .get_name = disjoint_pool_get_name, .ext_ctl = disjoint_pool_ctl, + .ext_trim_memory = disjoint_pool_trim_memory, }; const umf_memory_pool_ops_t *umfDisjointPoolOps(void) { diff --git a/src/pool/pool_jemalloc.c b/src/pool/pool_jemalloc.c index 322abc4b9..1a029d66d 100644 --- a/src/pool/pool_jemalloc.c +++ b/src/pool/pool_jemalloc.c @@ -588,6 +588,28 @@ static umf_result_t op_get_name(void *pool, const char **name) { return UMF_RESULT_SUCCESS; } +static umf_result_t op_trim_memory(void *pool, size_t minBytesToKeep) { + // there is no way to tell jemalloc to keep a minimum number of bytes + // so we just purge all arenas + if (minBytesToKeep > 0) { + LOG_WARN("Ignoring minBytesToKeep (%zu) in jemalloc pool", + minBytesToKeep); + } + + jemalloc_memory_pool_t *je_pool = (jemalloc_memory_pool_t *)pool; + for (size_t i = 0; i < je_pool->n_arenas; i++) { + char cmd[64]; + unsigned arena = je_pool->arena_index[i]; + snprintf(cmd, sizeof(cmd), "arena.%u.purge", arena); + if (je_mallctl(cmd, NULL, NULL, NULL, 0)) { + LOG_ERR("Could not purge jemalloc arena %u", arena); + return UMF_RESULT_ERROR_UNKNOWN; + } + } + + return UMF_RESULT_SUCCESS; +} + static umf_memory_pool_ops_t UMF_JEMALLOC_POOL_OPS = { .version = UMF_POOL_OPS_VERSION_CURRENT, .initialize = op_initialize, @@ -600,6 +622,7 @@ static umf_memory_pool_ops_t UMF_JEMALLOC_POOL_OPS = { .free = op_free, .get_last_allocation_error = op_get_last_allocation_error, .get_name = op_get_name, + .ext_trim_memory = op_trim_memory, }; const umf_memory_pool_ops_t *umfJemallocPoolOps(void) { diff --git a/src/pool/pool_proxy.c b/src/pool/pool_proxy.c index c6bf74124..4a4e89247 100644 --- a/src/pool/pool_proxy.c +++ b/src/pool/pool_proxy.c @@ -147,7 +147,9 @@ static umf_memory_pool_ops_t UMF_PROXY_POOL_OPS = { .malloc_usable_size = proxy_malloc_usable_size, .free = proxy_free, .get_last_allocation_error = proxy_get_last_allocation_error, - .get_name = proxy_get_name}; + .get_name = proxy_get_name, + .ext_trim_memory = NULL, // not supported +}; const umf_memory_pool_ops_t *umfProxyPoolOps(void) { return &UMF_PROXY_POOL_OPS; diff --git a/src/pool/pool_scalable.c b/src/pool/pool_scalable.c index 023596374..72afce267 100644 --- a/src/pool/pool_scalable.c +++ b/src/pool/pool_scalable.c @@ -450,12 +450,14 @@ static umf_result_t tbb_get_last_allocation_error(void *pool) { return TLS_last_allocation_error; } +static void initialize_pool_ctl(void) {} + static umf_result_t pool_ctl(void *hPool, umf_ctl_query_source_t operationType, const char *name, void *arg, size_t size, umf_ctl_query_type_t query_type, va_list args) { (void)operationType; // unused umf_memory_pool_handle_t pool_provider = (umf_memory_pool_handle_t)hPool; - utils_init_once(&ctl_initialized, NULL); + utils_init_once(&ctl_initialized, initialize_pool_ctl); return ctl_query(&pool_scallable_ctl_root, pool_provider->pool_priv, CTL_QUERY_PROGRAMMATIC, name, query_type, arg, size, args); } @@ -486,6 +488,7 @@ static umf_memory_pool_ops_t UMF_SCALABLE_POOL_OPS = { .get_last_allocation_error = tbb_get_last_allocation_error, .ext_ctl = pool_ctl, .get_name = scalable_get_name, + .ext_trim_memory = NULL, // not supported }; const umf_memory_pool_ops_t *umfScalablePoolOps(void) { diff --git a/src/utils/utils_posix_concurrency.c b/src/utils/utils_posix_concurrency.c index 44a317361..c6f273bed 100644 --- a/src/utils/utils_posix_concurrency.c +++ b/src/utils/utils_posix_concurrency.c @@ -38,6 +38,11 @@ int utils_mutex_unlock(utils_mutex_t *m) { } void utils_init_once(UTIL_ONCE_FLAG *flag, void (*oneCb)(void)) { + if (oneCb == NULL) { + LOG_FATAL("utils_init_once: callback is NULL"); + return; + } + pthread_once(flag, oneCb); } diff --git a/test/common/pool.hpp b/test/common/pool.hpp index f40f90437..711472ebc 100644 --- a/test/common/pool.hpp +++ b/test/common/pool.hpp @@ -140,25 +140,26 @@ typedef struct pool_base_t { umf_result_t initialize(umf_memory_provider_handle_t) noexcept { return UMF_RESULT_SUCCESS; }; - void *malloc([[maybe_unused]] size_t size) noexcept { return nullptr; } + void *malloc(size_t) noexcept { return nullptr; } void *calloc(size_t, size_t) noexcept { return nullptr; } void *realloc(void *, size_t) noexcept { return nullptr; } void *aligned_malloc(size_t, size_t) noexcept { return nullptr; } - umf_result_t malloc_usable_size(const void *, size_t *size) noexcept { - if (size) { - *size = 0; - } - return UMF_RESULT_SUCCESS; + umf_result_t malloc_usable_size(const void *, size_t *) noexcept { + return UMF_RESULT_ERROR_UNKNOWN; } - umf_result_t free(void *) noexcept { return UMF_RESULT_SUCCESS; } + umf_result_t free(void *) noexcept { return UMF_RESULT_ERROR_UNKNOWN; } umf_result_t get_last_allocation_error() noexcept { - return UMF_RESULT_SUCCESS; + return UMF_RESULT_ERROR_UNKNOWN; } - umf_result_t get_name(const char **name) noexcept { - if (name) { - *name = "pool_base"; - } - return UMF_RESULT_SUCCESS; + umf_result_t get_name(const char **) noexcept { + return UMF_RESULT_ERROR_UNKNOWN; + } + umf_result_t ext_ctl(umf_ctl_query_source_t, const char *, void *, size_t, + umf_ctl_query_type_t, va_list) noexcept { + return UMF_RESULT_ERROR_UNKNOWN; + } + umf_result_t ext_trim_memory(size_t) noexcept { + return UMF_RESULT_ERROR_UNKNOWN; } } pool_base_t; @@ -209,6 +210,11 @@ struct malloc_pool : public pool_base_t { } return UMF_RESULT_SUCCESS; } + + umf_result_t ext_trim_memory(size_t) noexcept { + // malloc_pool frees all memory immediately, so we have nothing to trim + return UMF_RESULT_SUCCESS; + } }; umf_memory_pool_ops_t MALLOC_POOL_OPS = diff --git a/test/common/pool_trace.c b/test/common/pool_trace.c index ce944479f..c05a16d32 100644 --- a/test/common/pool_trace.c +++ b/test/common/pool_trace.c @@ -99,6 +99,14 @@ static umf_result_t traceGetName(void *pool, const char **name) { return UMF_RESULT_SUCCESS; } +static umf_result_t traceTrimMemory(void *pool, size_t minBytesToKeep) { + trace_pool_t *trace_pool = (trace_pool_t *)pool; + + trace_pool->params.trace_handler(trace_pool->params.trace_context, + "trim_memory"); + return umfPoolTrimMemory(trace_pool->params.hUpstreamPool, minBytesToKeep); +} + umf_memory_pool_ops_t UMF_TRACE_POOL_OPS = { .version = UMF_POOL_OPS_VERSION_CURRENT, .initialize = traceInitialize, @@ -111,4 +119,5 @@ umf_memory_pool_ops_t UMF_TRACE_POOL_OPS = { .free = traceFree, .get_last_allocation_error = traceGetLastStatus, .get_name = traceGetName, + .ext_trim_memory = traceTrimMemory, }; diff --git a/test/memoryPoolAPI.cpp b/test/memoryPoolAPI.cpp index dabd9168d..f2cfb61bb 100644 --- a/test/memoryPoolAPI.cpp +++ b/test/memoryPoolAPI.cpp @@ -12,7 +12,9 @@ #include #include +#include #include +#include #ifdef UMF_POOL_JEMALLOC_ENABLED #include @@ -84,7 +86,8 @@ TEST_P(umfPoolWithCreateFlagsTest, memoryPoolTrace) { size_t tmpSize; umfPoolMallocUsableSize(tracingPool.get(), nullptr, &tmpSize); - // we ignore return value of poolMallocUsabeSize(), as it might be not supported + // we ignore return value of umfPoolMallocUsableSize(), as it might be not + // supported ASSERT_EQ(poolCalls["malloc_usable_size"], 1UL); ASSERT_EQ(poolCalls.size(), ++pool_call_count); @@ -117,6 +120,12 @@ TEST_P(umfPoolWithCreateFlagsTest, memoryPoolTrace) { ASSERT_EQ(poolCalls["get_last_native_error"], 1UL); ASSERT_EQ(poolCalls.size(), ++pool_call_count); + umfPoolTrimMemory(tracingPool.get(), 0); + // we ignore return value of umfPoolTrimMemory(), as it might be not + // supported + ASSERT_EQ(poolCalls["trim_memory"], 1UL); + ASSERT_EQ(poolCalls.size(), ++pool_call_count); + if (manuallyDestroyProvider) { umfMemoryProviderDestroy(provider); } @@ -131,6 +140,13 @@ TEST_P(umfPoolWithCreateFlagsTest, memoryPoolWithCustomProvider) { EXPECT_NE_NOEXCEPT(provider, nullptr); return UMF_RESULT_SUCCESS; } + + umf_result_t get_name(const char **name) noexcept { + if (name) { + *name = "pool"; + } + return UMF_RESULT_SUCCESS; + } }; umf_memory_pool_ops_t pool_ops = umf_test::poolMakeCOps(); @@ -342,6 +358,14 @@ INSTANTIATE_TEST_SUITE_P( &BA_GLOBAL_PROVIDER_OPS, nullptr, nullptr}), poolCreateExtParamsNameGen); +#ifdef UMF_POOL_SCALABLE_ENABLED +INSTANTIATE_TEST_SUITE_P(mallocPoolTestScalable, umfPoolTest, + ::testing::Values(poolCreateExtParams{ + umfScalablePoolOps(), nullptr, nullptr, + &BA_GLOBAL_PROVIDER_OPS, nullptr, nullptr}), + poolCreateExtParamsNameGen); +#endif + INSTANTIATE_TEST_SUITE_P(mallocMultiPoolTest, umfMultiPoolTest, ::testing::Values(poolCreateExtParams{ umfProxyPoolOps(), nullptr, nullptr, diff --git a/test/poolFixtures.hpp b/test/poolFixtures.hpp index 6a606f5b7..2b4275023 100644 --- a/test/poolFixtures.hpp +++ b/test/poolFixtures.hpp @@ -405,6 +405,32 @@ TEST_P(umfPoolTest, multiThreadedMallocFreeRandomSizes) { } } +TEST_P(umfPoolTest, trimMemory) { + constexpr size_t size = 1024; + + umf_memory_pool_handle_t hPool = pool.get(); + void *ptr = umfPoolMalloc(hPool, size); + ASSERT_NE(ptr, nullptr); + + umf_result_t ret = umfPoolFree(hPool, ptr); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + + size_t reserved_memory1 = 0; + ret = umfCtlGet("umf.pool.by_handle.{}.stats.reserved_memory", + &reserved_memory1, sizeof(size_t), hPool); + ASSERT_GE(reserved_memory1, 0ull); + + // if supported, call to umfPoolTrimMemory should purge the whole memory + // pool + ret = umfPoolTrimMemory(hPool, 0); + if (ret == UMF_RESULT_SUCCESS) { + size_t reserved_memory2 = 0; + ret = umfCtlGet("umf.pool.by_handle.{}.stats.reserved_memory", + &reserved_memory2, sizeof(size_t), hPool); + ASSERT_EQ(reserved_memory2, 0ull); + } +} + TEST_P(umfMemTest, outOfMem) { static constexpr size_t allocSize = 4096; auto hPool = pool.get(); diff --git a/test/pools/disjoint_pool.cpp b/test/pools/disjoint_pool.cpp index 8050c9af3..c638bfc3e 100644 --- a/test/pools/disjoint_pool.cpp +++ b/test/pools/disjoint_pool.cpp @@ -275,6 +275,90 @@ TEST_F(test, sharedLimits) { EXPECT_EQ(MaxSize / SlabMinSize * 2, numFrees); } +TEST_F(test, disjointPoolTrim) { + struct memory_provider : public umf_test::provider_base_t { + umf_result_t alloc(size_t size, size_t alignment, void **ptr) noexcept { + *ptr = umf_ba_global_aligned_alloc(size, alignment); + return UMF_RESULT_SUCCESS; + } + + umf_result_t free(void *ptr, size_t) noexcept { + umf_ba_global_free(ptr); + return UMF_RESULT_SUCCESS; + } + }; + + umf_memory_provider_ops_t provider_ops = + umf_test::providerMakeCOps(); + + auto providerUnique = + wrapProviderUnique(createProviderChecked(&provider_ops, nullptr)); + + umf_memory_provider_handle_t provider_handle; + provider_handle = providerUnique.get(); + + umf_disjoint_pool_params_handle_t params = + (umf_disjoint_pool_params_handle_t)defaultDisjointPoolConfig(); + params->pool_trace = 3; + // Set the slab min size to 64 so allocating 64 bytes will use the whole + // slab. + params->slab_min_size = 64; + params->capacity = 4; + + // in "internals" test we use ops interface to directly manipulate the pool + // structure + const umf_memory_pool_ops_t *ops = umfDisjointPoolOps(); + EXPECT_NE(ops, nullptr); + + disjoint_pool_t *pool; + umf_result_t res = ops->initialize(provider_handle, params, (void **)&pool); + EXPECT_EQ(res, UMF_RESULT_SUCCESS); + EXPECT_NE(pool, nullptr); + + // do 4 allocs, then free all of them + size_t size = 64; + void *ptrs[4] = {0}; + ptrs[0] = ops->malloc(pool, size); + EXPECT_NE(ptrs[0], nullptr); + ptrs[1] = ops->malloc(pool, size); + EXPECT_NE(ptrs[1], nullptr); + ptrs[2] = ops->malloc(pool, size); + EXPECT_NE(ptrs[2], nullptr); + ptrs[3] = ops->malloc(pool, size); + EXPECT_NE(ptrs[3], nullptr); + + ops->free(pool, ptrs[0]); + ops->free(pool, ptrs[1]); + ops->free(pool, ptrs[2]); + ops->free(pool, ptrs[3]); + + // Because we set the slab min size to 64, each allocation should go to the + // separate slab. Additionally, because we set the capacity to 4, all slabs + // should still be in the pool available for new allocations. + EXPECT_EQ(pool->buckets[0]->available_slabs_num, (size_t)4); + EXPECT_EQ(pool->buckets[0]->curr_slabs_in_use, (size_t)0); + EXPECT_EQ(pool->buckets[0]->curr_slabs_in_pool, (size_t)4); + + // Trim memory - leave 3 slabs in the pool + ops->ext_trim_memory(pool, 3 * pool->buckets[0]->size); + EXPECT_EQ(pool->buckets[0]->available_slabs_num, (size_t)3); + EXPECT_EQ(pool->buckets[0]->curr_slabs_in_use, (size_t)0); + + // Trim memory again - leave 1 slab in the pool + ops->ext_trim_memory(pool, pool->buckets[0]->size); + EXPECT_EQ(pool->buckets[0]->available_slabs_num, (size_t)1); + EXPECT_EQ(pool->buckets[0]->curr_slabs_in_use, (size_t)0); + + // Trim the rest of memory + ops->ext_trim_memory(pool, 0); + EXPECT_EQ(pool->buckets[0]->available_slabs_num, (size_t)0); + EXPECT_EQ(pool->buckets[0]->curr_slabs_in_pool, (size_t)0); + + ops->finalize(pool); + res = umfDisjointPoolParamsDestroy(params); + EXPECT_EQ(res, UMF_RESULT_SUCCESS); +} + TEST_F(test, disjointPoolNullParams) { umf_result_t res = umfDisjointPoolParamsCreate(nullptr); EXPECT_EQ(res, UMF_RESULT_ERROR_INVALID_ARGUMENT); diff --git a/test/pools/pool_base_alloc.cpp b/test/pools/pool_base_alloc.cpp index 8819a01d2..583b417cf 100644 --- a/test/pools/pool_base_alloc.cpp +++ b/test/pools/pool_base_alloc.cpp @@ -49,6 +49,9 @@ struct base_alloc_pool : public umf_test::pool_base_t { } return UMF_RESULT_SUCCESS; } + umf_result_t ext_trim_memory(size_t) noexcept { + return UMF_RESULT_ERROR_NOT_SUPPORTED; + } }; umf_memory_pool_ops_t BA_POOL_OPS = diff --git a/test/utils/cpp_helpers.hpp b/test/utils/cpp_helpers.hpp index c9c9d961b..6b8e840d8 100644 --- a/test/utils/cpp_helpers.hpp +++ b/test/utils/cpp_helpers.hpp @@ -87,6 +87,8 @@ template umf_memory_pool_ops_t poolOpsBase() { UMF_ASSIGN_OP(ops, T, malloc_usable_size, UMF_RESULT_ERROR_UNKNOWN); UMF_ASSIGN_OP(ops, T, free, UMF_RESULT_ERROR_UNKNOWN); UMF_ASSIGN_OP(ops, T, get_last_allocation_error, UMF_RESULT_ERROR_UNKNOWN); + UMF_ASSIGN_OP(ops, T, ext_ctl, UMF_RESULT_ERROR_UNKNOWN); + UMF_ASSIGN_OP(ops, T, ext_trim_memory, UMF_RESULT_ERROR_UNKNOWN); return ops; }