diff --git a/include/umf/pools/pool_disjoint.h b/include/umf/pools/pool_disjoint.h index c7032fd60..85b54d183 100644 --- a/include/umf/pools/pool_disjoint.h +++ b/include/umf/pools/pool_disjoint.h @@ -109,7 +109,8 @@ umf_result_t umfDisjointPoolParamsSetSharedLimits( /// @brief Set custom name of the disjoint pool to be used in the traces. /// @param hParams handle to the parameters of the disjoint pool. -/// @param name custom name of the pool. Name longer than 64 characters will be truncated. +/// @param name custom name of the pool. Must not be NULL. Name longer than 63 +/// characters will be truncated. /// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure. umf_result_t umfDisjointPoolParamsSetName(umf_disjoint_pool_params_handle_t hParams, diff --git a/include/umf/pools/pool_jemalloc.h b/include/umf/pools/pool_jemalloc.h index 8d5b090d6..87901a6ee 100644 --- a/include/umf/pools/pool_jemalloc.h +++ b/include/umf/pools/pool_jemalloc.h @@ -43,6 +43,15 @@ umf_result_t umfJemallocPoolParamsSetNumArenas(umf_jemalloc_pool_params_handle_t hParams, size_t numArenas); +/// @brief Set custom name of the jemalloc pool used in traces. +/// @param hParams handle to the parameters of the jemalloc pool. +/// @param name custom name. Must not be NULL. Name longer than 63 characters +/// will be truncated. +/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure. +umf_result_t +umfJemallocPoolParamsSetName(umf_jemalloc_pool_params_handle_t hParams, + const char *name); + const umf_memory_pool_ops_t *umfJemallocPoolOps(void); #ifdef __cplusplus diff --git a/include/umf/pools/pool_scalable.h b/include/umf/pools/pool_scalable.h index f93e8d38e..c556909ab 100644 --- a/include/umf/pools/pool_scalable.h +++ b/include/umf/pools/pool_scalable.h @@ -53,6 +53,15 @@ umf_result_t umfScalablePoolParamsSetKeepAllMemory(umf_scalable_pool_params_handle_t hParams, bool keepAllMemory); +/// @brief Set custom name of the scalable pool used in traces. +/// @param hParams handle to the parameters of the scalable pool. +/// @param name custom name. Must not be NULL. Name longer than 63 characters +/// will be truncated. +/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure. +umf_result_t +umfScalablePoolParamsSetName(umf_scalable_pool_params_handle_t hParams, + const char *name); + /// @brief Return \p ops structure containing pointers to the scalable pool implementation. /// @return pointer to the \p umf_memory_pool_ops_t struct. const umf_memory_pool_ops_t *umfScalablePoolOps(void); diff --git a/include/umf/providers/provider_cuda.h b/include/umf/providers/provider_cuda.h index bbbabc2de..9e99f9aaf 100644 --- a/include/umf/providers/provider_cuda.h +++ b/include/umf/providers/provider_cuda.h @@ -61,6 +61,14 @@ umf_result_t umfCUDAMemoryProviderParamsSetMemoryType( umf_result_t umfCUDAMemoryProviderParamsSetAllocFlags( umf_cuda_memory_provider_params_handle_t hParams, unsigned int flags); +/// @brief Set custom name of the CUDA Memory Provider. +/// @param hParams handle to the parameters of the CUDA Memory Provider. +/// @param name custom name. Must not be NULL. Name longer than 63 characters +/// will be truncated. +/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure. +umf_result_t umfCUDAMemoryProviderParamsSetName( + umf_cuda_memory_provider_params_handle_t hParams, const char *name); + const umf_memory_provider_ops_t *umfCUDAMemoryProviderOps(void); #ifdef __cplusplus diff --git a/include/umf/providers/provider_devdax_memory.h b/include/umf/providers/provider_devdax_memory.h index f8557f9a3..75f96398e 100644 --- a/include/umf/providers/provider_devdax_memory.h +++ b/include/umf/providers/provider_devdax_memory.h @@ -56,6 +56,14 @@ umf_result_t umfDevDaxMemoryProviderParamsSetDeviceDax( umf_result_t umfDevDaxMemoryProviderParamsSetProtection( umf_devdax_memory_provider_params_handle_t hParams, unsigned protection); +/// @brief Set custom name of the Devdax Memory Provider. +/// @param hParams [in] handle to the parameters of the Devdax Memory Provider. +/// @param name [in] custom name. Must not be NULL. Name longer than 63 characters +/// will be truncated. +/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure. +umf_result_t umfDevDaxMemoryProviderParamsSetName( + umf_devdax_memory_provider_params_handle_t hParams, const char *name); + /// @brief Devdax Memory Provider operation results typedef enum umf_devdax_memory_provider_native_error { UMF_DEVDAX_RESULT_SUCCESS = UMF_DEVDAX_RESULTS_START_FROM, ///< Success diff --git a/include/umf/providers/provider_file_memory.h b/include/umf/providers/provider_file_memory.h index 5d0c6eb16..3e6ec908e 100644 --- a/include/umf/providers/provider_file_memory.h +++ b/include/umf/providers/provider_file_memory.h @@ -68,6 +68,14 @@ typedef enum umf_file_memory_provider_native_error { const umf_memory_provider_ops_t *umfFileMemoryProviderOps(void); +/// @brief Set custom name of the File Memory Provider. +/// @param hParams handle to the parameters of the File Memory Provider. +/// @param name custom name. Must not be NULL. Name longer than 63 characters +/// will be truncated. +/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure. +umf_result_t umfFileMemoryProviderParamsSetName( + umf_file_memory_provider_params_handle_t hParams, const char *name); + #ifdef __cplusplus } #endif diff --git a/include/umf/providers/provider_fixed_memory.h b/include/umf/providers/provider_fixed_memory.h index 7c4507a27..1f381e88a 100644 --- a/include/umf/providers/provider_fixed_memory.h +++ b/include/umf/providers/provider_fixed_memory.h @@ -51,6 +51,14 @@ umf_result_t umfFixedMemoryProviderParamsDestroy( /// @return Pointer to the umf_memory_provider_ops_t structure. const umf_memory_provider_ops_t *umfFixedMemoryProviderOps(void); +/// @brief Set custom name of the Fixed Memory Provider. +/// @param hParams [in] handle to the parameters of the Fixed Memory Provider. +/// @param name [in] custom name. Must not be NULL. Name longer than 63 characters +/// will be truncated. +/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure. +umf_result_t umfFixedMemoryProviderParamsSetName( + umf_fixed_memory_provider_params_handle_t hParams, const char *name); + /// @brief Fixed Memory Provider operation results typedef enum umf_fixed_memory_provider_native_error { UMF_FIXED_RESULT_SUCCESS = UMF_FIXED_RESULTS_START_FROM, ///< Success diff --git a/include/umf/providers/provider_level_zero.h b/include/umf/providers/provider_level_zero.h index 657e19ee3..256ce0adf 100644 --- a/include/umf/providers/provider_level_zero.h +++ b/include/umf/providers/provider_level_zero.h @@ -91,6 +91,14 @@ umf_result_t umfLevelZeroMemoryProviderParamsSetDeviceOrdinal( umf_level_zero_memory_provider_params_handle_t hParams, uint32_t deviceOrdinal); +/// @brief Set custom name of the Level Zero Memory Provider. +/// @param hParams handle to the parameters of the Level Zero Memory Provider. +/// @param name custom name. Must not be NULL. Name longer than 63 characters +/// will be truncated. +/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure. +umf_result_t umfLevelZeroMemoryProviderParamsSetName( + umf_level_zero_memory_provider_params_handle_t hParams, const char *name); + const umf_memory_provider_ops_t *umfLevelZeroMemoryProviderOps(void); #ifdef __cplusplus diff --git a/include/umf/providers/provider_os_memory.h b/include/umf/providers/provider_os_memory.h index 262959609..978965621 100644 --- a/include/umf/providers/provider_os_memory.h +++ b/include/umf/providers/provider_os_memory.h @@ -134,6 +134,15 @@ umf_result_t umfOsMemoryProviderParamsSetPartitions( umf_os_memory_provider_params_handle_t hParams, umf_numa_split_partition_t *partitions, unsigned partitions_len); +/// @brief Set custom name of the OS memory provider. +/// @param hParams handle to the parameters of the OS memory provider. +/// @param name custom name. Must not be NULL. Name longer than 63 characters +/// will be truncated. +/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure. +umf_result_t +umfOsMemoryProviderParamsSetName(umf_os_memory_provider_params_handle_t hParams, + const char *name); + /// @brief OS Memory Provider operation results typedef enum umf_os_memory_provider_native_error { UMF_OS_RESULT_SUCCESS = UMF_OS_RESULTS_START_FROM, ///< Success diff --git a/src/libumf.def b/src/libumf.def index 0159ddbe2..ad61d2fb5 100644 --- a/src/libumf.def +++ b/src/libumf.def @@ -144,3 +144,12 @@ EXPORTS umfJemallocPoolParamsDestroy umfJemallocPoolParamsSetNumArenas umfPoolGetName +; Added in UMF_1.1 + umfCUDAMemoryProviderParamsSetName + umfDevDaxMemoryProviderParamsSetName + umfFileMemoryProviderParamsSetName + umfFixedMemoryProviderParamsSetName + umfJemallocPoolParamsSetName + umfLevelZeroMemoryProviderParamsSetName + umfOsMemoryProviderParamsSetName + umfScalablePoolParamsSetName diff --git a/src/libumf.map b/src/libumf.map index 348675ff0..c6e3db9ac 100644 --- a/src/libumf.map +++ b/src/libumf.map @@ -141,3 +141,14 @@ UMF_1.0 { local: *; }; + +UMF_1.1 { + umfCUDAMemoryProviderParamsSetName; + umfDevDaxMemoryProviderParamsSetName; + umfFileMemoryProviderParamsSetName; + umfFixedMemoryProviderParamsSetName; + umfJemallocPoolParamsSetName; + umfLevelZeroMemoryProviderParamsSetName; + umfOsMemoryProviderParamsSetName; + umfScalablePoolParamsSetName; +} UMF_1.0; diff --git a/src/pool/pool_jemalloc.c b/src/pool/pool_jemalloc.c index abbf50d2b..322abc4b9 100644 --- a/src/pool/pool_jemalloc.c +++ b/src/pool/pool_jemalloc.c @@ -44,19 +44,30 @@ umfJemallocPoolParamsSetNumArenas(umf_jemalloc_pool_params_handle_t hParams, return UMF_RESULT_ERROR_NOT_SUPPORTED; } +umf_result_t +umfJemallocPoolParamsSetName(umf_jemalloc_pool_params_handle_t hParams, + const char *name) { + (void)hParams; // unused + (void)name; // unused + return UMF_RESULT_ERROR_NOT_SUPPORTED; +} + #else #include #define MALLOCX_ARENA_MAX (MALLCTL_ARENAS_ALL - 1) +#define DEFAULT_NAME "jemalloc" typedef struct umf_jemalloc_pool_params_t { size_t n_arenas; + char name[64]; } umf_jemalloc_pool_params_t; typedef struct jemalloc_memory_pool_t { umf_memory_provider_handle_t provider; size_t n_arenas; + char name[64]; unsigned int arena_index[]; } jemalloc_memory_pool_t; @@ -459,6 +470,13 @@ static umf_result_t op_initialize(umf_memory_provider_handle_t provider, if (!pool) { return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; } + memset(pool, 0, sizeof(*pool) + n_arenas * sizeof(*pool->arena_index)); + const char *pool_name = DEFAULT_NAME; + if (jemalloc_params) { + pool_name = jemalloc_params->name; + } + + snprintf(pool->name, sizeof(pool->name), "%s", pool_name); pool->provider = provider; pool->n_arenas = n_arenas; @@ -558,8 +576,15 @@ static umf_result_t op_get_last_allocation_error(void *pool) { } static umf_result_t op_get_name(void *pool, const char **name) { - (void)pool; - *name = "jemalloc"; + if (!name) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + if (pool == NULL) { + *name = DEFAULT_NAME; + return UMF_RESULT_SUCCESS; + } + jemalloc_memory_pool_t *je_pool = (jemalloc_memory_pool_t *)pool; + *name = je_pool->name; return UMF_RESULT_SUCCESS; } @@ -588,6 +613,8 @@ umfJemallocPoolParamsCreate(umf_jemalloc_pool_params_handle_t *hParams) { return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; } memset(params, 0, sizeof(*params)); + strncpy(params->name, DEFAULT_NAME, sizeof(params->name) - 1); + params->name[sizeof(params->name) - 1] = '\0'; *hParams = params; return UMF_RESULT_SUCCESS; } @@ -609,4 +636,23 @@ umfJemallocPoolParamsSetNumArenas(umf_jemalloc_pool_params_handle_t hParams, return UMF_RESULT_SUCCESS; } +umf_result_t +umfJemallocPoolParamsSetName(umf_jemalloc_pool_params_handle_t hParams, + const char *name) { + if (!hParams) { + LOG_ERR("jemalloc pool params handle is NULL"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + if (!name) { + LOG_ERR("name is NULL"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + strncpy(hParams->name, name, sizeof(hParams->name) - 1); + hParams->name[sizeof(hParams->name) - 1] = '\0'; + + return UMF_RESULT_SUCCESS; +} + #endif /* UMF_POOL_JEMALLOC_ENABLED */ diff --git a/src/pool/pool_scalable.c b/src/pool/pool_scalable.c index 982a3408d..023596374 100644 --- a/src/pool/pool_scalable.c +++ b/src/pool/pool_scalable.c @@ -36,6 +36,7 @@ static __TLS umf_result_t TLS_last_allocation_error; static __TLS umf_result_t TLS_last_free_error; static const size_t DEFAULT_GRANULARITY = 2 * 1024 * 1024; // 2MB +static const char *DEFAULT_NAME = "scalable"; typedef struct tbb_mem_pool_policy_t { raw_alloc_tbb_type pAlloc; @@ -48,6 +49,7 @@ typedef struct tbb_mem_pool_policy_t { typedef struct umf_scalable_pool_params_t { size_t granularity; bool keep_all_memory; + char name[64]; } umf_scalable_pool_params_t; typedef struct tbb_callbacks_t { @@ -70,6 +72,7 @@ typedef struct tbb_callbacks_t { typedef struct tbb_memory_pool_t { umf_memory_provider_handle_t mem_provider; void *tbb_pool; + char name[64]; } tbb_memory_pool_t; typedef enum tbb_enums_t { @@ -216,6 +219,8 @@ umfScalablePoolParamsCreate(umf_scalable_pool_params_handle_t *hParams) { params_data->granularity = DEFAULT_GRANULARITY; params_data->keep_all_memory = false; + strncpy(params_data->name, DEFAULT_NAME, sizeof(params_data->name) - 1); + params_data->name[sizeof(params_data->name) - 1] = '\0'; *hParams = (umf_scalable_pool_params_handle_t)params_data; @@ -265,6 +270,25 @@ umfScalablePoolParamsSetKeepAllMemory(umf_scalable_pool_params_handle_t hParams, return UMF_RESULT_SUCCESS; } +umf_result_t +umfScalablePoolParamsSetName(umf_scalable_pool_params_handle_t hParams, + const char *name) { + if (!hParams) { + LOG_ERR("scalable pool params handle is NULL"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + if (!name) { + LOG_ERR("name is NULL"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + strncpy(hParams->name, name, sizeof(hParams->name) - 1); + hParams->name[sizeof(hParams->name) - 1] = '\0'; + + return UMF_RESULT_SUCCESS; +} + static umf_result_t tbb_pool_initialize(umf_memory_provider_handle_t provider, const void *params, void **pool) { tbb_mem_pool_policy_t policy = {.pAlloc = tbb_raw_alloc_wrapper, @@ -275,11 +299,13 @@ static umf_result_t tbb_pool_initialize(umf_memory_provider_handle_t provider, .keep_all_memory = false, .reserved = 0}; + const char *pool_name = DEFAULT_NAME; // If params is provided, override defaults if (params) { const umf_scalable_pool_params_t *scalable_params = params; policy.granularity = scalable_params->granularity; policy.keep_all_memory = scalable_params->keep_all_memory; + pool_name = scalable_params->name; } tbb_memory_pool_t *pool_data = @@ -288,6 +314,8 @@ static umf_result_t tbb_pool_initialize(umf_memory_provider_handle_t provider, LOG_ERR("cannot allocate memory for metadata"); return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; } + memset(pool_data, 0, sizeof(*pool_data)); + snprintf(pool_data->name, sizeof(pool_data->name), "%s", pool_name); umf_result_t res = UMF_RESULT_SUCCESS; int ret = init_tbb_callbacks(); @@ -433,8 +461,15 @@ static umf_result_t pool_ctl(void *hPool, umf_ctl_query_source_t operationType, } static umf_result_t scalable_get_name(void *pool, const char **name) { - (void)pool; // unused - *name = "scalable"; + if (!name) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + if (pool == NULL) { + *name = DEFAULT_NAME; + return UMF_RESULT_SUCCESS; + } + tbb_memory_pool_t *pool_data = (tbb_memory_pool_t *)pool; + *name = pool_data->name; return UMF_RESULT_SUCCESS; } diff --git a/src/provider/provider_cuda.c b/src/provider/provider_cuda.c index 983be6b55..1c15b49e4 100644 --- a/src/provider/provider_cuda.c +++ b/src/provider/provider_cuda.c @@ -55,6 +55,7 @@ typedef struct cu_memory_provider_t { size_t min_alignment; unsigned int alloc_flags; ctl_stats_t stats; + char name[64]; } cu_memory_provider_t; #define CTL_PROVIDER_TYPE cu_memory_provider_t @@ -73,6 +74,7 @@ typedef struct umf_cuda_memory_provider_params_t { // Allocation flags for cuMemHostAlloc/cuMemAllocManaged unsigned int alloc_flags; + char name[64]; } umf_cuda_memory_provider_params_t; typedef struct cu_ops_t { @@ -112,6 +114,8 @@ static umf_result_t cu_memory_provider_free(void *provider, void *ptr, #define TLS_MSG_BUF_LEN 1024 +static const char *DEFAULT_NAME = "CUDA"; + typedef struct cu_last_native_error_t { CUresult native_error; char msg_buff[TLS_MSG_BUF_LEN]; @@ -248,6 +252,8 @@ umf_result_t umfCUDAMemoryProviderParamsCreate( params_data->memory_type = UMF_MEMORY_TYPE_UNKNOWN; params_data->alloc_flags = 0; + strncpy(params_data->name, DEFAULT_NAME, sizeof(params_data->name) - 1); + params_data->name[sizeof(params_data->name) - 1] = '\0'; *hParams = params_data; @@ -310,6 +316,24 @@ umf_result_t umfCUDAMemoryProviderParamsSetAllocFlags( return UMF_RESULT_SUCCESS; } +umf_result_t umfCUDAMemoryProviderParamsSetName( + umf_cuda_memory_provider_params_handle_t hParams, const char *name) { + if (!hParams) { + LOG_ERR("CUDA Memory Provider params handle is NULL"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + if (!name) { + LOG_ERR("name is NULL"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + strncpy(hParams->name, name, sizeof(hParams->name) - 1); + hParams->name[sizeof(hParams->name) - 1] = '\0'; + + return UMF_RESULT_SUCCESS; +} + static umf_result_t cu_memory_provider_initialize(const void *params, void **provider) { if (params == NULL) { @@ -345,7 +369,10 @@ static umf_result_t cu_memory_provider_initialize(const void *params, if (!cu_provider) { return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; } - memset(cu_provider, 0, sizeof(cu_memory_provider_t)); + + memset(cu_provider, 0, sizeof(*cu_provider)); + snprintf(cu_provider->name, sizeof(cu_provider->name), "%s", + cu_params->name); // CUDA alloc functions doesn't allow to provide user alignment - get the // minimum one from the driver @@ -620,8 +647,15 @@ cu_memory_provider_get_recommended_page_size(void *provider, size_t size, static umf_result_t cu_memory_provider_get_name(void *provider, const char **name) { - (void)provider; - *name = "CUDA"; + if (!name) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + if (provider == NULL) { + *name = DEFAULT_NAME; + return UMF_RESULT_SUCCESS; + } + cu_memory_provider_t *cu_provider = (cu_memory_provider_t *)provider; + *name = cu_provider->name; return UMF_RESULT_SUCCESS; } @@ -790,6 +824,14 @@ umf_result_t umfCUDAMemoryProviderParamsSetAllocFlags( return UMF_RESULT_ERROR_NOT_SUPPORTED; } +umf_result_t umfCUDAMemoryProviderParamsSetName( + umf_cuda_memory_provider_params_handle_t hParams, const char *name) { + (void)hParams; + (void)name; + LOG_ERR("CUDA provider is disabled (UMF_BUILD_CUDA_PROVIDER is OFF)!"); + return UMF_RESULT_ERROR_NOT_SUPPORTED; +} + const umf_memory_provider_ops_t *umfCUDAMemoryProviderOps(void) { // not supported LOG_ERR("CUDA provider is disabled (UMF_BUILD_CUDA_PROVIDER is OFF)!"); diff --git a/src/provider/provider_devdax_memory.c b/src/provider/provider_devdax_memory.c index 5cf1ac0fd..7ddf3c72a 100644 --- a/src/provider/provider_devdax_memory.c +++ b/src/provider/provider_devdax_memory.c @@ -62,6 +62,14 @@ umf_result_t umfDevDaxMemoryProviderParamsSetProtection( return UMF_RESULT_ERROR_NOT_SUPPORTED; } +umf_result_t umfDevDaxMemoryProviderParamsSetName( + umf_devdax_memory_provider_params_handle_t hParams, const char *name) { + (void)hParams; + (void)name; + LOG_ERR("DevDax memory provider is disabled!"); + return UMF_RESULT_ERROR_NOT_SUPPORTED; +} + #else // !defined(_WIN32) #include "base_alloc_global.h" @@ -76,6 +84,8 @@ umf_result_t umfDevDaxMemoryProviderParamsSetProtection( #define TLS_MSG_BUF_LEN 1024 +static const char *DEFAULT_NAME = "DEVDAX"; + typedef struct devdax_memory_provider_t { char path[PATH_MAX]; // a path to the device DAX size_t size; // size of the file used for memory mapping @@ -85,6 +95,7 @@ typedef struct devdax_memory_provider_t { unsigned protection; // combination of OS-specific protection flags coarse_t *coarse; // coarse library handle ctl_stats_t stats; + char name[64]; } devdax_memory_provider_t; #define CTL_PROVIDER_TYPE devdax_memory_provider_t @@ -95,6 +106,7 @@ typedef struct umf_devdax_memory_provider_params_t { char *path; size_t size; unsigned protection; + char name[64]; } umf_devdax_memory_provider_params_t; typedef struct devdax_last_native_error_t { @@ -186,6 +198,8 @@ static umf_result_t devdax_initialize(const void *params, void **provider) { } memset(devdax_provider, 0, sizeof(*devdax_provider)); + snprintf(devdax_provider->name, sizeof(devdax_provider->name), "%s", + in_params->name); coarse_params_t coarse_params = {0}; coarse_params.provider = devdax_provider; @@ -381,8 +395,16 @@ static umf_result_t devdax_purge_force(void *provider, void *ptr, size_t size) { } static umf_result_t devdax_get_name(void *provider, const char **name) { - (void)provider; // unused - *name = "DEVDAX"; + if (!name) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + if (provider == NULL) { + *name = DEFAULT_NAME; + return UMF_RESULT_SUCCESS; + } + devdax_memory_provider_t *devdax_provider = + (devdax_memory_provider_t *)provider; + *name = devdax_provider->name; return UMF_RESULT_SUCCESS; } @@ -614,6 +636,9 @@ umf_result_t umfDevDaxMemoryProviderParamsCreate( return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; } + strncpy(params->name, DEFAULT_NAME, sizeof(params->name) - 1); + params->name[sizeof(params->name) - 1] = '\0'; + params->path = NULL; params->size = 0; params->protection = UMF_PROTECTION_READ | UMF_PROTECTION_WRITE; @@ -698,4 +723,22 @@ umf_result_t umfDevDaxMemoryProviderParamsSetProtection( return UMF_RESULT_SUCCESS; } +umf_result_t umfDevDaxMemoryProviderParamsSetName( + umf_devdax_memory_provider_params_handle_t hParams, const char *name) { + if (hParams == NULL) { + LOG_ERR("DevDax Memory Provider params handle is NULL"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + if (name == NULL) { + LOG_ERR("name is NULL"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + strncpy(hParams->name, name, sizeof(hParams->name) - 1); + hParams->name[sizeof(hParams->name) - 1] = '\0'; + + return UMF_RESULT_SUCCESS; +} + #endif // !defined(_WIN32) diff --git a/src/provider/provider_file_memory.c b/src/provider/provider_file_memory.c index 1031993d2..bff4034b2 100644 --- a/src/provider/provider_file_memory.c +++ b/src/provider/provider_file_memory.c @@ -68,6 +68,14 @@ umf_result_t umfFileMemoryProviderParamsSetVisibility( return UMF_RESULT_ERROR_NOT_SUPPORTED; } +umf_result_t umfFileMemoryProviderParamsSetName( + umf_file_memory_provider_params_handle_t hParams, const char *name) { + (void)hParams; + (void)name; + LOG_ERR("File memory provider is disabled!"); + return UMF_RESULT_ERROR_NOT_SUPPORTED; +} + #else // !defined(_WIN32) #include "base_alloc_global.h" @@ -83,6 +91,8 @@ umf_result_t umfFileMemoryProviderParamsSetVisibility( #define TLS_MSG_BUF_LEN 1024 +static const char *DEFAULT_NAME = "FILE"; + typedef struct file_memory_provider_t { utils_mutex_t lock; // lock for file parameters (size and offsets) @@ -113,7 +123,9 @@ typedef struct file_memory_provider_t { critnib *fd_offset_map; coarse_t *coarse; // coarse library handle + ctl_stats_t stats; + char name[64]; } file_memory_provider_t; #define CTL_PROVIDER_TYPE file_memory_provider_t @@ -124,6 +136,7 @@ typedef struct umf_file_memory_provider_params_t { char *path; unsigned protection; umf_memory_visibility_t visibility; + char name[64]; } umf_file_memory_provider_params_t; typedef struct file_last_native_error_t { @@ -218,6 +231,8 @@ static umf_result_t file_initialize(const void *params, void **provider) { } memset(file_provider, 0, sizeof(*file_provider)); + snprintf(file_provider->name, sizeof(file_provider->name), "%s", + in_params->name); ret = file_translate_params(in_params, file_provider); if (ret != UMF_RESULT_SUCCESS) { @@ -649,8 +664,15 @@ static umf_result_t file_purge_force(void *provider, void *ptr, size_t size) { } static umf_result_t file_get_name(void *provider, const char **name) { - (void)provider; // unused - *name = "FILE"; + if (!name) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + if (provider == NULL) { + *name = DEFAULT_NAME; + return UMF_RESULT_SUCCESS; + } + file_memory_provider_t *file_provider = (file_memory_provider_t *)provider; + *name = file_provider->name; return UMF_RESULT_SUCCESS; } @@ -942,6 +964,8 @@ umf_result_t umfFileMemoryProviderParamsCreate( params->path = NULL; params->protection = UMF_PROTECTION_READ | UMF_PROTECTION_WRITE; params->visibility = UMF_MEM_MAP_PRIVATE; + strncpy(params->name, DEFAULT_NAME, sizeof(params->name) - 1); + params->name[sizeof(params->name) - 1] = '\0'; umf_result_t res = umfFileMemoryProviderParamsSetPath(params, path); if (res != UMF_RESULT_SUCCESS) { @@ -1023,4 +1047,22 @@ umf_result_t umfFileMemoryProviderParamsSetVisibility( return UMF_RESULT_SUCCESS; } +umf_result_t umfFileMemoryProviderParamsSetName( + umf_file_memory_provider_params_handle_t hParams, const char *name) { + if (hParams == NULL) { + LOG_ERR("File Memory Provider params handle is NULL"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + if (name == NULL) { + LOG_ERR("name is NULL"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + strncpy(hParams->name, name, sizeof(hParams->name) - 1); + hParams->name[sizeof(hParams->name) - 1] = '\0'; + + return UMF_RESULT_SUCCESS; +} + #endif // !defined(_WIN32) diff --git a/src/provider/provider_fixed_memory.c b/src/provider/provider_fixed_memory.c index 08fd3e7f6..d761a4024 100644 --- a/src/provider/provider_fixed_memory.c +++ b/src/provider/provider_fixed_memory.c @@ -28,17 +28,21 @@ #define TLS_MSG_BUF_LEN 1024 +static const char *DEFAULT_NAME = "FIXED"; + typedef struct fixed_memory_provider_t { void *base; // base address of memory size_t size; // size of the memory region coarse_t *coarse; // coarse library handle ctl_stats_t stats; + char name[64]; } fixed_memory_provider_t; // Fixed Memory provider settings struct typedef struct umf_fixed_memory_provider_params_t { void *ptr; size_t size; + char name[64]; } umf_fixed_memory_provider_params_t; typedef struct fixed_last_native_error_t { @@ -110,6 +114,8 @@ static umf_result_t fixed_initialize(const void *params, void **provider) { } memset(fixed_provider, 0, sizeof(*fixed_provider)); + snprintf(fixed_provider->name, sizeof(fixed_provider->name), "%s", + in_params->name); coarse_params_t coarse_params = {0}; coarse_params.provider = fixed_provider; @@ -251,8 +257,16 @@ static umf_result_t fixed_purge_force(void *provider, void *ptr, size_t size) { } static umf_result_t fixed_get_name(void *provider, const char **name) { - (void)provider; // unused - *name = "FIXED"; + if (!name) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + if (provider == NULL) { + *name = DEFAULT_NAME; + return UMF_RESULT_SUCCESS; + } + fixed_memory_provider_t *fixed_provider = + (fixed_memory_provider_t *)provider; + *name = fixed_provider->name; return UMF_RESULT_SUCCESS; } @@ -333,6 +347,9 @@ umf_result_t umfFixedMemoryProviderParamsCreate( return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; } + strncpy(params->name, DEFAULT_NAME, sizeof(params->name) - 1); + params->name[sizeof(params->name) - 1] = '\0'; + umf_result_t ret = umfFixedMemoryProviderParamsSetMemory(params, ptr, size); if (ret != UMF_RESULT_SUCCESS) { umf_ba_global_free(params); @@ -375,3 +392,21 @@ umf_result_t umfFixedMemoryProviderParamsSetMemory( hParams->size = size; return UMF_RESULT_SUCCESS; } + +umf_result_t umfFixedMemoryProviderParamsSetName( + umf_fixed_memory_provider_params_handle_t hParams, const char *name) { + if (hParams == NULL) { + LOG_ERR("Memory Provider params handle is NULL"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + if (name == NULL) { + LOG_ERR("name is NULL"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + strncpy(hParams->name, name, sizeof(hParams->name) - 1); + hParams->name[sizeof(hParams->name) - 1] = '\0'; + + return UMF_RESULT_SUCCESS; +} diff --git a/src/provider/provider_level_zero.c b/src/provider/provider_level_zero.c index d5e79b244..8703e8fdc 100644 --- a/src/provider/provider_level_zero.c +++ b/src/provider/provider_level_zero.c @@ -57,6 +57,7 @@ typedef struct umf_level_zero_memory_provider_params_t { freePolicy; ///< Memory free policy uint32_t device_ordinal; + char name[64]; } umf_level_zero_memory_provider_params_t; typedef struct ze_memory_provider_t { @@ -74,6 +75,7 @@ typedef struct ze_memory_provider_t { size_t min_page_size; uint32_t device_ordinal; + char name[64]; ctl_stats_t stats; } ze_memory_provider_t; @@ -113,6 +115,7 @@ static ze_ops_t g_ze_ops; static UTIL_ONCE_FLAG ze_is_initialized = UTIL_ONCE_FLAG_INIT; static bool Init_ze_global_state_failed; static __TLS ze_result_t TLS_last_native_error; +static const char *DEFAULT_NAME = "LEVEL_ZERO"; static void store_last_native_error(int32_t native_error) { TLS_last_native_error = native_error; @@ -249,6 +252,8 @@ umf_result_t umfLevelZeroMemoryProviderParamsCreate( params->resident_device_count = 0; params->freePolicy = UMF_LEVEL_ZERO_MEMORY_PROVIDER_FREE_POLICY_DEFAULT; params->device_ordinal = 0; + strncpy(params->name, DEFAULT_NAME, sizeof(params->name) - 1); + params->name[sizeof(params->name) - 1] = '\0'; *hParams = params; @@ -318,6 +323,24 @@ umf_result_t umfLevelZeroMemoryProviderParamsSetDeviceOrdinal( return UMF_RESULT_SUCCESS; } +umf_result_t umfLevelZeroMemoryProviderParamsSetName( + umf_level_zero_memory_provider_params_handle_t hParams, const char *name) { + if (!hParams) { + LOG_ERR("Level Zero memory provider params handle is NULL"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + if (!name) { + LOG_ERR("name is NULL"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + strncpy(hParams->name, name, sizeof(hParams->name) - 1); + hParams->name[sizeof(hParams->name) - 1] = '\0'; + + return UMF_RESULT_SUCCESS; +} + umf_result_t umfLevelZeroMemoryProviderParamsSetResidentDevices( umf_level_zero_memory_provider_params_handle_t hParams, ze_device_handle_t *hDevices, uint32_t deviceCount) { @@ -565,6 +588,9 @@ static umf_result_t ze_memory_provider_initialize(const void *params, LOG_ERR("Cannot allocate memory for Level Zero Memory Provider"); return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; } + memset(ze_provider, 0, sizeof(*ze_provider)); + snprintf(ze_provider->name, sizeof(ze_provider->name), "%s", + ze_params->name); ze_provider->context = ze_params->level_zero_context_handle; ze_provider->device = ze_params->level_zero_device_handle; @@ -687,8 +713,15 @@ ze_memory_provider_get_recommended_page_size(void *provider, size_t size, static umf_result_t ze_memory_provider_get_name(void *provider, const char **name) { - (void)provider; - *name = "LEVEL_ZERO"; + if (!name) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + if (provider == NULL) { + *name = DEFAULT_NAME; + return UMF_RESULT_SUCCESS; + } + ze_memory_provider_t *ze_provider = (ze_memory_provider_t *)provider; + *name = ze_provider->name; return UMF_RESULT_SUCCESS; } @@ -939,6 +972,13 @@ umf_result_t umfLevelZeroMemoryProviderParamsSetDeviceOrdinal( return UMF_RESULT_ERROR_NOT_SUPPORTED; } +umf_result_t umfLevelZeroMemoryProviderParamsSetName( + umf_level_zero_memory_provider_params_handle_t hParams, const char *name) { + (void)hParams; + (void)name; + return UMF_RESULT_ERROR_NOT_SUPPORTED; +} + const umf_memory_provider_ops_t *umfLevelZeroMemoryProviderOps(void) { // not supported LOG_ERR("L0 memory provider is disabled! (UMF_BUILD_LEVEL_ZERO_PROVIDER is " diff --git a/src/provider/provider_os_memory.c b/src/provider/provider_os_memory.c index 659f90841..abea227a3 100644 --- a/src/provider/provider_os_memory.c +++ b/src/provider/provider_os_memory.c @@ -36,6 +36,8 @@ #define TLS_MSG_BUF_LEN 1024 +static const char *DEFAULT_NAME = "OS"; + typedef struct umf_os_memory_provider_params_t { // Combination of 'umf_mem_protection_flags_t' flags unsigned protection; @@ -60,6 +62,7 @@ typedef struct umf_os_memory_provider_params_t { umf_numa_split_partition_t *partitions; /// len of the partitions array unsigned partitions_len; + char name[64]; } umf_os_memory_provider_params_t; typedef struct os_last_native_error_t { @@ -555,6 +558,8 @@ static umf_result_t os_initialize(const void *params, void **provider) { } memset(os_provider, 0, sizeof(*os_provider)); + snprintf(os_provider->name, sizeof(os_provider->name), "%s", + in_params->name); int r = hwloc_topology_init(&os_provider->topo); if (r) { @@ -1157,8 +1162,15 @@ static umf_result_t os_purge_force(void *provider, void *ptr, size_t size) { } static umf_result_t os_get_name(void *provider, const char **name) { - (void)provider; // unused - *name = "OS"; + if (!name) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + if (provider == NULL) { + *name = DEFAULT_NAME; + return UMF_RESULT_SUCCESS; + } + os_memory_provider_t *os_provider = (os_memory_provider_t *)provider; + *name = os_provider->name; return UMF_RESULT_SUCCESS; } @@ -1428,6 +1440,8 @@ umf_result_t umfOsMemoryProviderParamsCreate( params->part_size = 0; params->partitions = NULL; params->partitions_len = 0; + strncpy(params->name, DEFAULT_NAME, sizeof(params->name) - 1); + params->name[sizeof(params->name) - 1] = '\0'; *hParams = params; @@ -1583,3 +1597,22 @@ umf_result_t umfOsMemoryProviderParamsSetPartitions( return UMF_RESULT_SUCCESS; } + +umf_result_t +umfOsMemoryProviderParamsSetName(umf_os_memory_provider_params_handle_t hParams, + const char *name) { + if (hParams == NULL) { + LOG_ERR("OS memory provider params handle is NULL"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + if (name == NULL) { + LOG_ERR("name is NULL"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + strncpy(hParams->name, name, sizeof(hParams->name) - 1); + hParams->name[sizeof(hParams->name) - 1] = '\0'; + + return UMF_RESULT_SUCCESS; +} diff --git a/src/provider/provider_os_memory_internal.h b/src/provider/provider_os_memory_internal.h index 4d2e8e217..3648d4a88 100644 --- a/src/provider/provider_os_memory_internal.h +++ b/src/provider/provider_os_memory_internal.h @@ -70,6 +70,8 @@ typedef struct os_memory_provider_t { hwloc_topology_t topo; + char name[64]; + ctl_stats_t stats; } os_memory_provider_t; diff --git a/test/pools/disjoint_pool.cpp b/test/pools/disjoint_pool.cpp index 5497cd568..8050c9af3 100644 --- a/test/pools/disjoint_pool.cpp +++ b/test/pools/disjoint_pool.cpp @@ -14,6 +14,7 @@ #include "provider.hpp" #include "provider_null.h" #include "provider_trace.h" +#include "umf/memory_provider.h" using umf_test::test; using namespace umf_test; @@ -336,31 +337,56 @@ TEST_F(test, disjointPoolName) { umf_disjoint_pool_params_handle_t params = nullptr; umf_result_t res = umfDisjointPoolParamsCreate(¶ms); EXPECT_EQ(res, UMF_RESULT_SUCCESS); - umf_memory_provider_handle_t provider_handle = nullptr; umf_memory_pool_handle_t pool = NULL; - struct memory_provider : public umf_test::provider_base_t {}; + auto nullProvider = nullProviderCreate(); - umf_memory_provider_ops_t provider_ops = - umf_test::providerMakeCOps(); + res = umfPoolCreate(umfDisjointPoolOps(), nullProvider, params, 0, &pool); - auto providerUnique = - wrapProviderUnique(createProviderChecked(&provider_ops, nullptr)); + EXPECT_EQ(res, UMF_RESULT_SUCCESS); + const char *name = nullptr; + res = umfPoolGetName(pool, &name); + EXPECT_EQ(res, UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, "disjoint"); - provider_handle = providerUnique.get(); + umfPoolDestroy(pool); + umfMemoryProviderDestroy(nullProvider); + umfDisjointPoolParamsDestroy(params); +} - res = - umfPoolCreate(umfDisjointPoolOps(), provider_handle, params, 0, &pool); +TEST_F(test, disjointPoolCustomName) { + umf_disjoint_pool_params_handle_t params = nullptr; + umf_result_t res = umfDisjointPoolParamsCreate(¶ms); + EXPECT_EQ(res, UMF_RESULT_SUCCESS); + + res = umfDisjointPoolParamsSetName(params, "my_disjoint"); + EXPECT_EQ(res, UMF_RESULT_SUCCESS); + + struct memory_provider : public umf_test::provider_base_t {}; + + auto nullProvider = nullProviderCreate(); + umf_memory_pool_handle_t pool = NULL; + + res = umfPoolCreate(umfDisjointPoolOps(), nullProvider, params, 0, &pool); EXPECT_EQ(res, UMF_RESULT_SUCCESS); + const char *name = nullptr; res = umfPoolGetName(pool, &name); EXPECT_EQ(res, UMF_RESULT_SUCCESS); - EXPECT_STREQ(name, "disjoint"); + EXPECT_STREQ(name, "my_disjoint"); umfPoolDestroy(pool); + umfMemoryProviderDestroy(nullProvider); umfDisjointPoolParamsDestroy(params); } +TEST(DisjointPoolOps, default_name_null_handle) { + const char *name = nullptr; + EXPECT_EQ(umfDisjointPoolOps()->get_name(nullptr, &name), + UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, "disjoint"); +} + TEST_F(test, disjointPoolDefaultParams) { // Disjoint pool defaults static constexpr size_t DefaultSlabMinSize = 64 * 1024; // 64K diff --git a/test/pools/jemalloc_pool.cpp b/test/pools/jemalloc_pool.cpp index c0cf202f0..61115cc71 100644 --- a/test/pools/jemalloc_pool.cpp +++ b/test/pools/jemalloc_pool.cpp @@ -193,18 +193,10 @@ TEST_F(test, jemallocPoolName) { umf_jemalloc_pool_params_handle_t params = nullptr; umf_result_t res = umfJemallocPoolParamsCreate(¶ms); EXPECT_EQ(res, UMF_RESULT_SUCCESS); - umf_memory_provider_handle_t provider_handle = nullptr; umf_memory_pool_handle_t pool = NULL; - struct memory_provider : public umf_test::provider_base_t {}; - umf_memory_provider_ops_t provider_ops = - umf_test::providerMakeCOps(); - auto providerUnique = - wrapProviderUnique(createProviderChecked(&provider_ops, nullptr)); - provider_handle = providerUnique.get(); - - res = - umfPoolCreate(umfJemallocPoolOps(), provider_handle, params, 0, &pool); + auto nullProvider = nullProviderCreate(); + res = umfPoolCreate(umfJemallocPoolOps(), nullProvider, params, 0, &pool); EXPECT_EQ(res, UMF_RESULT_SUCCESS); const char *name = nullptr; res = umfPoolGetName(pool, &name); @@ -212,5 +204,36 @@ TEST_F(test, jemallocPoolName) { EXPECT_STREQ(name, "jemalloc"); umfPoolDestroy(pool); + umfMemoryProviderDestroy(nullProvider); + umfJemallocPoolParamsDestroy(params); +} + +TEST_F(test, jemallocPoolCustomName) { + umf_jemalloc_pool_params_handle_t params = nullptr; + umf_result_t res = umfJemallocPoolParamsCreate(¶ms); + EXPECT_EQ(res, UMF_RESULT_SUCCESS); + + res = umfJemallocPoolParamsSetName(params, "my_jemalloc"); + EXPECT_EQ(res, UMF_RESULT_SUCCESS); + + auto nullProvider = nullProviderCreate(); + + umf_memory_pool_handle_t pool = NULL; + res = umfPoolCreate(umfJemallocPoolOps(), nullProvider, params, 0, &pool); + EXPECT_EQ(res, UMF_RESULT_SUCCESS); + const char *name = nullptr; + res = umfPoolGetName(pool, &name); + EXPECT_EQ(res, UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, "my_jemalloc"); + + umfPoolDestroy(pool); + umfMemoryProviderDestroy(nullProvider); umfJemallocPoolParamsDestroy(params); } + +TEST(JemallocPoolOps, default_name_null_handle) { + const char *name = nullptr; + EXPECT_EQ(umfJemallocPoolOps()->get_name(nullptr, &name), + UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, "jemalloc"); +} diff --git a/test/pools/scalable_pool.cpp b/test/pools/scalable_pool.cpp index 9fe9e100e..d34e16826 100644 --- a/test/pools/scalable_pool.cpp +++ b/test/pools/scalable_pool.cpp @@ -195,3 +195,40 @@ TEST(scalablePoolTest, scalablePoolName) { umfMemoryProviderDestroy(provider); umfOsMemoryProviderParamsDestroy(provider_params); } + +TEST(scalablePoolTest, scalablePoolCustomName) { + umf_memory_pool_handle_t pool = nullptr; + umf_os_memory_provider_params_handle_t provider_params = nullptr; + umf_memory_provider_handle_t provider = nullptr; + + auto ret = umfOsMemoryProviderParamsCreate(&provider_params); + ret = umfMemoryProviderCreate(umfOsMemoryProviderOps(), provider_params, + &provider); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + + umf_scalable_pool_params_handle_t params = nullptr; + ret = umfScalablePoolParamsCreate(¶ms); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ASSERT_EQ(umfScalablePoolParamsSetName(params, "custom_scalable"), + UMF_RESULT_SUCCESS); + + ret = umfPoolCreate(umfScalablePoolOps(), provider, params, 0, &pool); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + + const char *name = nullptr; + ret = umfPoolGetName(pool, &name); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, "custom_scalable"); + + umfPoolDestroy(pool); + umfScalablePoolParamsDestroy(params); + umfMemoryProviderDestroy(provider); + umfOsMemoryProviderParamsDestroy(provider_params); +} + +TEST(scalablePoolTest, default_name_null_handle) { + const char *name = nullptr; + EXPECT_EQ(umfScalablePoolOps()->get_name(nullptr, &name), + UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, "scalable"); +} diff --git a/test/provider_devdax_memory.cpp b/test/provider_devdax_memory.cpp index 01e8d62ed..b6b3299e0 100644 --- a/test/provider_devdax_memory.cpp +++ b/test/provider_devdax_memory.cpp @@ -291,6 +291,36 @@ TEST_P(umfProviderTest, get_name) { ASSERT_STREQ(name, "DEVDAX"); } +TEST(DevDaxProviderName, custom_name) { + auto params_handle = create_devdax_params(); + if (!params_handle.get()) { + GTEST_SKIP() << "devdax params unavailable"; + } + + const char *custom = "my_devdax"; + auto ret = + umfDevDaxMemoryProviderParamsSetName(params_handle.get(), custom); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + + umf_memory_provider_handle_t prov = nullptr; + ret = umfMemoryProviderCreate(umfDevDaxMemoryProviderOps(), + params_handle.get(), &prov); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + + const char *name = nullptr; + ret = umfMemoryProviderGetName(prov, &name); + EXPECT_EQ(ret, UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, custom); + umfMemoryProviderDestroy(prov); +} + +TEST(DevDaxProviderName, default_name_null_handle) { + const char *name = nullptr; + EXPECT_EQ(umfDevDaxMemoryProviderOps()->get_name(nullptr, &name), + UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, "DEVDAX"); +} + TEST_P(umfProviderTest, free_size_0_ptr_not_null) { umf_result_t umf_result = umfMemoryProviderFree(provider.get(), INVALID_PTR, 0); diff --git a/test/provider_file_memory.cpp b/test/provider_file_memory.cpp index 1dd5324f0..4fae4ce33 100644 --- a/test/provider_file_memory.cpp +++ b/test/provider_file_memory.cpp @@ -354,6 +354,32 @@ TEST_P(FileProviderParamsDefault, get_name) { ASSERT_STREQ(name, "FILE"); } +TEST(FileProviderName, custom_name) { + auto params = get_file_params_default(FILE_PATH); + ASSERT_NE(params.get(), nullptr); + + const char *custom = "my_file"; + auto ret = umfFileMemoryProviderParamsSetName(params.get(), custom); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + + umf_memory_provider_handle_t prov = nullptr; + ret = umfMemoryProviderCreate(umfFileMemoryProviderOps(), params.get(), + &prov); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + const char *name = nullptr; + ret = umfMemoryProviderGetName(prov, &name); + EXPECT_EQ(ret, UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, custom); + umfMemoryProviderDestroy(prov); +} + +TEST(FileProviderName, default_name_null_handle) { + const char *name = nullptr; + auto ret = umfFileMemoryProviderOps()->get_name(nullptr, &name); + EXPECT_EQ(ret, UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, "FILE"); +} + TEST_P(FileProviderParamsDefault, free_size_0_ptr_not_null) { umf_result_t umf_result = umfMemoryProviderFree(provider.get(), INVALID_PTR, 0); diff --git a/test/provider_fixed_memory.cpp b/test/provider_fixed_memory.cpp index 173f51b53..a72deda63 100644 --- a/test/provider_fixed_memory.cpp +++ b/test/provider_fixed_memory.cpp @@ -263,6 +263,40 @@ TEST_P(FixedProviderTest, get_name) { ASSERT_STREQ(name, "FIXED"); } +TEST(FixedProviderName, custom_name) { + size_t mem_size = utils_get_page_size(); + void *buffer = malloc(mem_size); + ASSERT_NE(buffer, nullptr); + + umf_fixed_memory_provider_params_handle_t params = nullptr; + auto ret = umfFixedMemoryProviderParamsCreate(buffer, mem_size, ¶ms); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + + const char *custom = "my_fixed"; + ret = umfFixedMemoryProviderParamsSetName(params, custom); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + + umf_memory_provider_handle_t prov = nullptr; + ret = umfMemoryProviderCreate(umfFixedMemoryProviderOps(), params, &prov); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + + const char *name = nullptr; + ret = umfMemoryProviderGetName(prov, &name); + EXPECT_EQ(ret, UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, custom); + + umfMemoryProviderDestroy(prov); + umfFixedMemoryProviderParamsDestroy(params); + free(buffer); +} + +TEST(FixedProviderName, default_name_null_handle) { + const char *name = nullptr; + auto ret = umfFixedMemoryProviderOps()->get_name(nullptr, &name); + EXPECT_EQ(ret, UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, "FIXED"); +} + TEST_P(FixedProviderTest, free_size_0_ptr_not_null) { umf_result_t umf_result = umfMemoryProviderFree(provider.get(), INVALID_PTR, 0); diff --git a/test/provider_os_memory.cpp b/test/provider_os_memory.cpp index 1a4cea304..49b023a74 100644 --- a/test/provider_os_memory.cpp +++ b/test/provider_os_memory.cpp @@ -202,9 +202,9 @@ TEST_F(test, create_ZERO_WEIGHT_PARTITION) { os_memory_provider_params, &p, 1); EXPECT_EQ(umf_result, UMF_RESULT_SUCCESS); - umf_result = umfMemoryProviderCreate(umfOsMemoryProviderOps(), - &os_memory_provider_params, - &os_memory_provider); + umf_result = + umfMemoryProviderCreate(umfOsMemoryProviderOps(), + os_memory_provider_params, &os_memory_provider); umfOsMemoryProviderParamsDestroy(os_memory_provider_params); @@ -319,6 +319,32 @@ TEST_P(umfProviderTest, get_name) { ASSERT_STREQ(name, "OS"); } +TEST(OsProviderName, custom_name) { + auto params = createOsMemoryProviderParams(); + ASSERT_NE(params.get(), nullptr); + const char *custom = "my_os"; + auto ret = umfOsMemoryProviderParamsSetName(params.get(), custom); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + + umf_memory_provider_handle_t prov = nullptr; + ret = + umfMemoryProviderCreate(umfOsMemoryProviderOps(), params.get(), &prov); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + + const char *name = nullptr; + ret = umfMemoryProviderGetName(prov, &name); + EXPECT_EQ(ret, UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, custom); + umfMemoryProviderDestroy(prov); +} + +TEST(OsProviderName, default_name_null_handle) { + const char *name = nullptr; + auto ret = umfOsMemoryProviderOps()->get_name(nullptr, &name); + EXPECT_EQ(ret, UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, "OS"); +} + TEST_P(umfProviderTest, free_size_0_ptr_not_null) { umf_result_t umf_result = umfMemoryProviderFree(provider.get(), INVALID_PTR, 0); diff --git a/test/providers/provider_cuda.cpp b/test/providers/provider_cuda.cpp index 1b448eaf0..585868014 100644 --- a/test/providers/provider_cuda.cpp +++ b/test/providers/provider_cuda.cpp @@ -334,10 +334,34 @@ TEST_P(umfCUDAProviderTest, ctl_stats) { sizeof(peak), provider); ASSERT_EQ(ret, UMF_RESULT_SUCCESS); ASSERT_EQ(peak, 0u); + umfMemoryProviderDestroy(provider); +} + +TEST_P(umfCUDAProviderTest, custom_name) { + const char *custom = "my_cuda"; + ASSERT_EQ(umfCUDAMemoryProviderParamsSetName(params, custom), + UMF_RESULT_SUCCESS); + umf_memory_provider_handle_t provider = nullptr; + umf_result_t res = + umfMemoryProviderCreate(umfCUDAMemoryProviderOps(), params, &provider); + ASSERT_EQ(res, UMF_RESULT_SUCCESS); + ASSERT_NE(provider, nullptr); + + const char *name = nullptr; + res = umfMemoryProviderGetName(provider, &name); + ASSERT_EQ(res, UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, custom); umfMemoryProviderDestroy(provider); } +TEST(umfCUDAProviderOps, default_name_null_handle) { + const char *name = nullptr; + auto ret = umfCUDAMemoryProviderOps()->get_name(nullptr, &name); + EXPECT_EQ(ret, UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, "CUDA"); +} + TEST_P(umfCUDAProviderTest, allocInvalidSize) { CUcontext expected_current_context = get_current_context(); // create CUDA provider diff --git a/test/providers/provider_level_zero.cpp b/test/providers/provider_level_zero.cpp index feb377987..7aa3f048b 100644 --- a/test/providers/provider_level_zero.cpp +++ b/test/providers/provider_level_zero.cpp @@ -454,10 +454,35 @@ TEST_P(umfLevelZeroProviderTest, ctl_stats) { sizeof(peak), provider); ASSERT_EQ(ret, UMF_RESULT_SUCCESS); ASSERT_EQ(peak, 0u); + umfMemoryProviderDestroy(provider); +} + +TEST_P(umfLevelZeroProviderTest, custom_name) { + const char *custom = "my_level_zero"; + ASSERT_EQ(umfLevelZeroMemoryProviderParamsSetName(params, custom), + UMF_RESULT_SUCCESS); + + umf_memory_provider_handle_t provider = nullptr; + umf_result_t res = umfMemoryProviderCreate(umfLevelZeroMemoryProviderOps(), + params, &provider); + ASSERT_EQ(res, UMF_RESULT_SUCCESS); + ASSERT_NE(provider, nullptr); + const char *name = nullptr; + res = umfMemoryProviderGetName(provider, &name); + ASSERT_EQ(res, UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, custom); umfMemoryProviderDestroy(provider); } +TEST(umfLevelZeroProviderOps, default_name_null_handle) { + const char *name = nullptr; + auto ret = umfLevelZeroMemoryProviderOps()->get_name(nullptr, &name); + + EXPECT_EQ(ret, UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, "LEVEL_ZERO"); +} + TEST_P(umfLevelZeroProviderTest, allocInvalidSize) { umf_memory_provider_handle_t provider = nullptr; umf_result_t umf_result = umfMemoryProviderCreate(