From 74b2b1d4e98a71da8392be80421ba85dee20e9b8 Mon Sep 17 00:00:00 2001 From: Ben Deane Date: Thu, 10 Oct 2024 16:29:30 -0600 Subject: [PATCH 1/3] :sparkles: Add `atomic_bitset` Problem: - It is useful to have atomic semantics on a bitset. Solution: - Add `atomic_bitset`. Notes: - `std::atomic>` is unwieldy and could not guarantee the use of atomic instructions. - `stdx::bitset>` would not provide the right atomic semantics. - The API of `atomic_bitset` differs from the API of `bitset` in some important ways, but is intended to provide for the use case of effectively using an atomic integral type as a bitset. --- .github/workflows/unit_tests.yml | 2 +- CMakeLists.txt | 2 + docs/intrusive_forward_list.adoc | 3 +- include/stdx/atomic_bitset.hpp | 207 ++++++++++++++ include/stdx/bitset.hpp | 6 +- include/stdx/detail/bitset_common.hpp | 10 + test/CMakeLists.txt | 1 + test/atomic_bitset.cpp | 370 ++++++++++++++++++++++++++ 8 files changed, 593 insertions(+), 8 deletions(-) create mode 100644 include/stdx/atomic_bitset.hpp create mode 100644 include/stdx/detail/bitset_common.hpp create mode 100644 test/atomic_bitset.cpp diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index 4de893e..b8496c8 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -16,7 +16,7 @@ env: DEFAULT_LLVM_VERSION: 18 DEFAULT_GCC_VERSION: 13 MULL_LLVM_VERSION: 17 - HYPOTHESIS_PROFILE: ci + HYPOTHESIS_PROFILE: default concurrency: group: ${{ github.head_ref || github.run_id }} diff --git a/CMakeLists.txt b/CMakeLists.txt index 4464d9c..1714ddb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -37,6 +37,7 @@ target_sources( include FILES include/stdx/algorithm.hpp + include/stdx/atomic_bitset.hpp include/stdx/bit.hpp include/stdx/bitset.hpp include/stdx/byterator.hpp @@ -51,6 +52,7 @@ target_sources( include/stdx/cx_queue.hpp include/stdx/cx_set.hpp include/stdx/cx_vector.hpp + include/stdx/detail/bitset_common.hpp include/stdx/detail/list_common.hpp include/stdx/for_each_n_args.hpp include/stdx/functional.hpp diff --git a/docs/intrusive_forward_list.adoc b/docs/intrusive_forward_list.adoc index 2702edd..72c8e56 100644 --- a/docs/intrusive_forward_list.adoc +++ b/docs/intrusive_forward_list.adoc @@ -2,8 +2,7 @@ == `intrusive_forward_list.hpp` `intrusive_forward_list` is a singly-linked list designed for use at compile-time or -with static objects. It supports pushing and popping at the front or back, and -removal from the middle. +with static objects. It supports pushing and popping at the front or back. [source,cpp] ---- diff --git a/include/stdx/atomic_bitset.hpp b/include/stdx/atomic_bitset.hpp new file mode 100644 index 0000000..3ec6f5a --- /dev/null +++ b/include/stdx/atomic_bitset.hpp @@ -0,0 +1,207 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace stdx { +inline namespace v1 { +namespace detail { +template class atomic_bitset { + constexpr static auto bit = StorageElem{1U}; + + static_assert(N <= std::numeric_limits::digits, + "atomic_bitset is limited to a single storage element"); + static_assert(std::atomic::is_always_lock_free, + "atomic_bitset must always be lock free"); + std::atomic storage{}; + + constexpr static auto mask = bit_mask(); + StorageElem salient_value(std::memory_order order) const { + return storage.load(order) & mask; + } + + [[nodiscard]] static constexpr auto + value_from_string(std::string_view str, std::size_t pos, std::size_t n, + char one) -> StorageElem { + StorageElem ret{}; + auto const len = std::min(n, str.size() - pos); + auto const s = str.substr(pos, std::min(len, N)); + auto i = bit; + for (auto it = std::rbegin(s); it != std::rend(s); ++it) { + if (*it == one) { + ret |= i; + } + i = static_cast(i << 1u); + } + return ret; + } + + using bitset_t = bitset; + + public: + constexpr atomic_bitset() = default; + constexpr explicit atomic_bitset(std::uint64_t value) + : storage{static_cast(value & mask)} {} + + template + constexpr explicit atomic_bitset(place_bits_t, Bs... bs) + : storage{static_cast( + (StorageElem{} | ... | + static_cast(bit << to_underlying(bs))))} {} + + constexpr explicit atomic_bitset(all_bits_t) : storage{mask} {} + + constexpr explicit atomic_bitset(std::string_view str, std::size_t pos = 0, + std::size_t n = std::string_view::npos, + char one = '1') + : storage{value_from_string(str, pos, n, one)} {} + + template + [[nodiscard]] auto + to(std::memory_order order = std::memory_order_seq_cst) const -> T { + using U = underlying_type_t; + static_assert( + unsigned_integral, + "Conversion must be to an unsigned integral type or enum!"); + static_assert(N <= std::numeric_limits::digits, + "atomic_bitset must fit within T"); + return static_cast(storage.load(order)); + } + + [[nodiscard]] auto + to_natural(std::memory_order order = std::memory_order_seq_cst) const { + return storage.load(order); + } + + operator bitset_t() const { return bitset_t{storage.load()}; } + + auto load(std::memory_order order = std::memory_order_seq_cst) const + -> bitset_t { + return bitset_t{storage.load(order)}; + } + auto store(bitset_t b, + std::memory_order order = std::memory_order_seq_cst) { + storage.store(b.template to(), order); + } + + constexpr static std::integral_constant size{}; + + constexpr static std::bool_constant< + std::atomic::is_always_lock_free> + is_always_lock_free{}; + + template [[nodiscard]] auto operator[](T idx) const -> bool { + auto const pos = static_cast(to_underlying(idx)); + return (salient_value(std::memory_order_seq_cst) & (bit << pos)) != 0; + } + + template + auto set(T idx, bool value = true, + std::memory_order order = std::memory_order_seq_cst) -> bitset_t { + auto const pos = static_cast(to_underlying(idx)); + if (value) { + return bitset_t{ + storage.fetch_or(static_cast(bit << pos), order)}; + } + return bitset_t{ + storage.fetch_and(static_cast(~(bit << pos)), order)}; + } + + auto set(lsb_t lsb, msb_t msb, bool value = true, + std::memory_order order = std::memory_order_seq_cst) -> bitset_t { + auto const l = to_underlying(lsb); + auto const m = to_underlying(msb); + auto const shifted_value = bit_mask(m, l); + if (value) { + return bitset_t{storage.fetch_or(shifted_value, order)}; + } + return bitset_t{storage.fetch_and(~shifted_value, order)}; + } + + auto set(lsb_t lsb, length_t len, bool value = true, + std::memory_order order = std::memory_order_seq_cst) -> bitset_t { + auto const l = to_underlying(lsb); + auto const length = to_underlying(len); + return set(lsb, static_cast(l + length - 1), value, order); + } + + auto set(std::memory_order order = std::memory_order_seq_cst) + LIFETIMEBOUND -> atomic_bitset & { + storage.store(mask, order); + return *this; + } + + template auto reset(T idx) -> bitset_t { + auto const pos = static_cast(to_underlying(idx)); + return bitset_t{ + storage.fetch_and(static_cast(~(bit << pos)))}; + } + + auto reset(std::memory_order order = std::memory_order_seq_cst) + LIFETIMEBOUND -> atomic_bitset & { + storage.store(StorageElem{}, order); + return *this; + } + + auto + reset(lsb_t lsb, msb_t msb, + std::memory_order order = std::memory_order_seq_cst) -> bitset_t { + return set(lsb, msb, false, order); + } + + auto + reset(lsb_t lsb, length_t len, + std::memory_order order = std::memory_order_seq_cst) -> bitset_t { + return set(lsb, len, false, order); + } + + template + auto flip(T idx, + std::memory_order order = std::memory_order_seq_cst) -> bitset_t { + auto const pos = static_cast(to_underlying(idx)); + return bitset_t{ + storage.fetch_xor(static_cast(bit << pos), order)}; + } + + auto flip(std::memory_order order = std::memory_order_seq_cst) -> bitset_t { + return bitset_t{storage.fetch_xor(mask, order)}; + } + + [[nodiscard]] auto + all(std::memory_order order = std::memory_order_seq_cst) const -> bool { + return salient_value(order) == mask; + } + [[nodiscard]] auto + any(std::memory_order order = std::memory_order_seq_cst) const -> bool { + return salient_value(order) != 0; + } + [[nodiscard]] auto + none(std::memory_order order = std::memory_order_seq_cst) const -> bool { + return salient_value(order) == 0; + } + + [[nodiscard]] auto + count(std::memory_order order = std::memory_order_seq_cst) const + -> std::size_t { + return static_cast(popcount(salient_value(order))); + } +}; +} // namespace detail + +template +using atomic_bitset = detail::atomic_bitset< + to_underlying(N), decltype(smallest_uint())>; +} // namespace v1 +} // namespace stdx diff --git a/include/stdx/bitset.hpp b/include/stdx/bitset.hpp index 8b8c6ca..08aef83 100644 --- a/include/stdx/bitset.hpp +++ b/include/stdx/bitset.hpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include @@ -17,11 +18,6 @@ namespace stdx { inline namespace v1 { -struct place_bits_t {}; -constexpr inline auto place_bits = place_bits_t{}; -struct all_bits_t {}; -constexpr inline auto all_bits = all_bits_t{}; - namespace detail { template class bitset { constexpr static auto storage_elem_size = diff --git a/include/stdx/detail/bitset_common.hpp b/include/stdx/detail/bitset_common.hpp new file mode 100644 index 0000000..0c584d4 --- /dev/null +++ b/include/stdx/detail/bitset_common.hpp @@ -0,0 +1,10 @@ +#pragma once + +namespace stdx { +inline namespace v1 { +struct place_bits_t {}; +constexpr inline auto place_bits = place_bits_t{}; +struct all_bits_t {}; +constexpr inline auto all_bits = all_bits_t{}; +} // namespace v1 +} // namespace stdx diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 4ce0eea..e06a9d1 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -21,6 +21,7 @@ add_tests( FILES algorithm always_false + atomic_bitset bind bit bitset diff --git a/test/atomic_bitset.cpp b/test/atomic_bitset.cpp new file mode 100644 index 0000000..ba95fb6 --- /dev/null +++ b/test/atomic_bitset.cpp @@ -0,0 +1,370 @@ +#include + +#include +#include + +#include +#include +#include + +TEST_CASE("atomic_bitset with explicit storage element type", + "[atomic_bitset]") { + static_assert(sizeof(stdx::atomic_bitset<1, std::uint8_t>) == + sizeof(std::uint8_t)); + static_assert(sizeof(stdx::atomic_bitset<8, std::uint8_t>) == + sizeof(std::uint8_t)); + static_assert(sizeof(stdx::atomic_bitset<1, std::uint16_t>) == + sizeof(std::uint16_t)); + static_assert(sizeof(stdx::atomic_bitset<16, std::uint16_t>) == + sizeof(std::uint16_t)); + static_assert(sizeof(stdx::atomic_bitset<1, std::uint32_t>) == + sizeof(std::uint32_t)); + static_assert(sizeof(stdx::atomic_bitset<32, std::uint32_t>) == + sizeof(std::uint32_t)); + static_assert(sizeof(stdx::atomic_bitset<1, std::uint64_t>) == + sizeof(std::uint64_t)); + static_assert(sizeof(stdx::atomic_bitset<64, std::uint64_t>) == + sizeof(std::uint64_t)); +} + +TEST_CASE("atomic_bitset with implicit storage element type", + "[atomic_bitset]") { + static_assert(sizeof(stdx::atomic_bitset<1>) == sizeof(std::uint8_t)); + static_assert(sizeof(stdx::atomic_bitset<8>) == sizeof(std::uint8_t)); + static_assert(sizeof(stdx::atomic_bitset<9>) == sizeof(std::uint16_t)); + static_assert(sizeof(stdx::atomic_bitset<16>) == sizeof(std::uint16_t)); + static_assert(sizeof(stdx::atomic_bitset<17>) == sizeof(std::uint32_t)); + static_assert(sizeof(stdx::atomic_bitset<32>) == sizeof(std::uint32_t)); + static_assert(sizeof(stdx::atomic_bitset<33>) == sizeof(std::uint64_t)); + static_assert(sizeof(stdx::atomic_bitset<64>) == sizeof(std::uint64_t)); +} + +TEMPLATE_TEST_CASE("atomic_bitset is always lock free", "[atomic_bitset]", + std::uint8_t, std::uint16_t, std::uint32_t, std::uint64_t) { + static_assert(stdx::atomic_bitset<8, TestType>::is_always_lock_free); +} + +TEMPLATE_TEST_CASE("index operation", "[atomic_bitset]", std::uint8_t, + std::uint16_t, std::uint32_t, std::uint64_t) { + CHECK(not stdx::atomic_bitset<1, TestType>{}[0]); +} + +TEMPLATE_TEST_CASE("set single bit", "[atomic_bitset]", std::uint8_t, + std::uint16_t, std::uint32_t, std::uint64_t) { + auto bs = stdx::atomic_bitset<1, TestType>{}; + CHECK(not bs[0]); + bs.set(0); + CHECK(bs[0]); + bs.set(0, false); + CHECK(not bs[0]); +} + +TEMPLATE_TEST_CASE("set all bits", "[atomic_bitset]", std::uint8_t, + std::uint16_t, std::uint32_t, std::uint64_t) { + auto bs = stdx::atomic_bitset<1, TestType>{}; + CHECK(not bs[0]); + bs.set(); + CHECK(bs[0]); +} + +TEMPLATE_TEST_CASE("reset single bit", "[atomic_bitset]", std::uint8_t, + std::uint16_t, std::uint32_t, std::uint64_t) { + auto bs = stdx::atomic_bitset<1, TestType>{1ul}; + CHECK(bs[0]); + bs.reset(0); + CHECK(not bs[0]); +} + +TEMPLATE_TEST_CASE("reset all bits", "[atomic_bitset]", std::uint8_t, + std::uint16_t, std::uint32_t, std::uint64_t) { + auto bs = stdx::atomic_bitset<1, TestType>{1ul}; + CHECK(bs[0]); + bs.reset(); + CHECK(not bs[0]); +} + +TEMPLATE_TEST_CASE("flip single bit", "[atomic_bitset]", std::uint8_t, + std::uint16_t, std::uint32_t, std::uint64_t) { + auto bs = stdx::atomic_bitset<3, TestType>{0b101ul}; + CHECK(bs[0]); + CHECK(not bs[1]); + CHECK(bs[2]); + bs.flip(0); + CHECK(not bs[0]); + CHECK(not bs[1]); + CHECK(bs[2]); +} + +TEMPLATE_TEST_CASE("flip all bits", "[atomic_bitset]", std::uint8_t, + std::uint16_t, std::uint32_t, std::uint64_t) { + auto bs = stdx::atomic_bitset<3, TestType>{0b101ul}; + CHECK(bs[0]); + CHECK(not bs[1]); + CHECK(bs[2]); + bs.flip(); + CHECK(not bs[0]); + CHECK(bs[1]); + CHECK(not bs[2]); +} + +TEMPLATE_TEST_CASE("default construct", "[atomic_bitset]", std::uint8_t, + std::uint16_t, std::uint32_t, std::uint64_t) { + [[maybe_unused]] constexpr auto bs1 = stdx::atomic_bitset<1, TestType>{}; +} + +TEMPLATE_TEST_CASE("construct with a value", "[atomic_bitset]", std::uint8_t, + std::uint16_t, std::uint32_t, std::uint64_t) { + constexpr auto bs1 = stdx::atomic_bitset<1, TestType>{1ul}; + CHECK(bs1[0]); + + constexpr auto bs2 = stdx::atomic_bitset<3, TestType>{255ul}; + CHECK(bs2[0]); + CHECK(bs2[1]); +} + +TEMPLATE_TEST_CASE("construct with values for bits", "[atomic_bitset]", + std::uint8_t, std::uint16_t, std::uint32_t, std::uint64_t) { + constexpr auto bs = + stdx::atomic_bitset<8, TestType>{stdx::place_bits, 1, 3, 5}; + CHECK(not bs[0]); + CHECK(bs[1]); + CHECK(bs[3]); + CHECK(bs[5]); +} + +TEMPLATE_TEST_CASE("convert to unsigned integral type (same underlying type)", + "[atomic_bitset]", std::uint8_t, std::uint16_t, + std::uint32_t, std::uint64_t) { + constexpr auto bs = stdx::atomic_bitset<3, TestType>{255ul}; + auto const val = bs.template to(); + CHECK(std::is_same_v); + CHECK(val == 7u); + CHECK(bs.template to(std::memory_order_acquire) == 7u); +} + +TEMPLATE_TEST_CASE( + "convert to unsigned integral type (different underlying type)", + "[atomic_bitset]", std::uint16_t, std::uint32_t, std::uint64_t) { + constexpr auto bs = + stdx::atomic_bitset<8, std::uint8_t>{stdx::place_bits, 3, 7}; + auto const val = bs.to(); + CHECK(std::is_same_v); + CHECK(val == 0b1000'1000u); + CHECK(bs.template to(std::memory_order_acquire) == 0b1000'1000u); +} + +TEMPLATE_TEST_CASE("convert to natural type", "[atomic_bitset]", std::uint8_t, + std::uint16_t, std::uint32_t, std::uint64_t) { + constexpr auto bs = + stdx::atomic_bitset<8, TestType>{stdx::place_bits, 3, 7}; + auto const val = bs.to_natural(); + CHECK(std::is_same_v); + CHECK(val == 0b1000'1000u); + CHECK(bs.to_natural(std::memory_order_acquire) == 0b1000'1000u); +} + +TEMPLATE_TEST_CASE("construct with a string_view", "[atomic_bitset]", + std::uint8_t, std::uint16_t, std::uint32_t, std::uint64_t) { + using namespace std::string_view_literals; + CHECK(stdx::atomic_bitset<4, TestType>{"1010"sv}.to_natural() == 0b1010ul); +} + +TEMPLATE_TEST_CASE("construct with a substring", "[atomic_bitset]", + std::uint8_t, std::uint16_t, std::uint32_t, std::uint64_t) { + using namespace std::string_view_literals; + CHECK( + stdx::atomic_bitset<4, TestType>{"XOXOXO"sv, 2, 4, 'X'}.to_natural() == + 0b1010ul); +} + +TEMPLATE_TEST_CASE("all", "[atomic_bitset]", std::uint8_t, std::uint16_t, + std::uint32_t, std::uint64_t) { + constexpr auto bs1 = stdx::atomic_bitset<8, TestType>{0b1111'1111u}; + CHECK(bs1.all()); + + constexpr auto bs2 = stdx::atomic_bitset<8, TestType>{0xb1111'1110u}; + CHECK(not bs2.all()); +} + +TEMPLATE_TEST_CASE("any", "[atomic_bitset]", std::uint8_t, std::uint16_t, + std::uint32_t, std::uint64_t) { + constexpr auto bs1 = stdx::atomic_bitset<8, TestType>{8ul}; + CHECK(bs1.any()); + + constexpr auto bs2 = stdx::atomic_bitset<8, TestType>{}; + CHECK(not bs2.any()); +} + +TEMPLATE_TEST_CASE("none", "[atomic_bitset]", std::uint8_t, std::uint16_t, + std::uint32_t, std::uint64_t) { + constexpr auto bs1 = stdx::atomic_bitset<8, TestType>{}; + CHECK(bs1.none()); + + constexpr auto bs2 = stdx::atomic_bitset<8, TestType>{8ul}; + CHECK(not bs2.none()); +} + +TEMPLATE_TEST_CASE("count", "[atomic_bitset]", std::uint8_t, std::uint16_t, + std::uint32_t, std::uint64_t) { + constexpr auto bs1 = stdx::atomic_bitset<8, TestType>{}; + CHECK(bs1.count() == 0u); + + constexpr auto bs2 = stdx::atomic_bitset<8, TestType>{0b10101ul}; + CHECK(bs2.count() == 3u); +} + +TEMPLATE_TEST_CASE("conversion to bitset", "[atomic_bitset]", std::uint8_t, + std::uint16_t, std::uint32_t, std::uint64_t) { + constexpr auto bs = stdx::atomic_bitset<8, TestType>{0b10101ul}; + CHECK(bs == stdx::bitset<8, TestType>{0b10101ul}); +} + +TEMPLATE_TEST_CASE("load", "[atomic_bitset]", std::uint8_t, std::uint16_t, + std::uint32_t, std::uint64_t) { + constexpr auto bs = stdx::atomic_bitset<8, TestType>{0b10101ul}; + CHECK(bs.load() == stdx::bitset<8, TestType>{0b10101ul}); + CHECK(bs.load(std::memory_order_acquire) == + stdx::bitset<8, TestType>{0b10101ul}); +} + +TEMPLATE_TEST_CASE("store", "[atomic_bitset]", std::uint8_t, std::uint16_t, + std::uint32_t, std::uint64_t) { + auto bs = stdx::atomic_bitset<8, TestType>{}; + bs.store(stdx::bitset<8, TestType>{0b10101ul}); + CHECK(bs.to_natural() == 0b10101ul); + + bs.store(stdx::bitset<8, TestType>{0b1010ul}, std::memory_order_release); + CHECK(bs.to_natural() == 0b1010ul); +} + +TEMPLATE_TEST_CASE("set range of bits (lsb, length)", "[atomic_bitset]", + std::uint8_t, std::uint16_t, std::uint32_t, std::uint64_t) { + using namespace stdx::literals; + auto bs = stdx::atomic_bitset{}; + bs.set(2_lsb, 4_len); + CHECK(bs[2]); + CHECK(bs[3]); + CHECK(bs[4]); + CHECK(bs[5]); + CHECK(bs.count() == 4u); +} + +TEMPLATE_TEST_CASE("set range of bits (lsb, msb)", "[atomic_bitset]", + std::uint8_t, std::uint16_t, std::uint32_t, std::uint64_t) { + using namespace stdx::literals; + auto bs = stdx::atomic_bitset{}; + bs.set(2_lsb, 5_msb); + CHECK(bs[2]); + CHECK(bs[3]); + CHECK(bs[4]); + CHECK(bs[5]); + CHECK(bs.count() == 4u); +} + +TEMPLATE_TEST_CASE("construct with all bits set", "[atomic_bitset]", + std::uint8_t, std::uint16_t, std::uint32_t, std::uint64_t) { + constexpr auto bs = stdx::atomic_bitset<8, TestType>{stdx::all_bits}; + CHECK(bs.all()); +} + +TEMPLATE_TEST_CASE("reset range of bits (lsb, length)", "[atomic_bitset]", + std::uint8_t, std::uint16_t, std::uint32_t, std::uint64_t) { + using namespace stdx::literals; + auto bs = stdx::atomic_bitset{ + stdx::all_bits}; + bs.reset(2_lsb, 4_len); + CHECK(not bs[2]); + CHECK(not bs[3]); + CHECK(not bs[4]); + CHECK(not bs[5]); + CHECK(bs.count() == bs.size() - 4); +} + +TEMPLATE_TEST_CASE("reset range of bits (lsb, msb)", "[atomic_bitset]", + std::uint8_t, std::uint16_t, std::uint32_t, std::uint64_t) { + using namespace stdx::literals; + auto bs = stdx::atomic_bitset{ + stdx::all_bits}; + bs.reset(2_lsb, 5_msb); + CHECK(not bs[2]); + CHECK(not bs[3]); + CHECK(not bs[4]); + CHECK(not bs[5]); + CHECK(bs.count() == bs.size() - 4); +} + +namespace { +enum struct Bits : std::uint8_t { ZERO, ONE, TWO, THREE, MAX }; +} + +TEST_CASE("use atomic_bitset with enum struct (construct)", "[atomic_bitset]") { + constexpr auto bs = stdx::atomic_bitset{}; + static_assert(bs.size() == stdx::to_underlying(Bits::MAX)); +} + +TEST_CASE("use atomic_bitset with enum struct (to)", "[atomic_bitset]") { + constexpr auto bs = stdx::atomic_bitset{stdx::all_bits}; + CHECK(bs.to() == static_cast(0b1111)); +} + +TEST_CASE("use atomic_bitset with enum struct (set/flip)", "[atomic_bitset]") { + auto bs = stdx::atomic_bitset{}; + bs.set(Bits::ZERO); + CHECK(bs.to_natural() == 1); + bs.reset(Bits::ZERO); + CHECK(bs.to_natural() == 0); + bs.flip(Bits::ZERO); + CHECK(bs.to_natural() == 1); +} + +TEST_CASE("use atomic_bitset with enum struct (read index)", + "[atomic_bitset]") { + constexpr auto bs = stdx::atomic_bitset{stdx::all_bits}; + CHECK(bs[Bits::ZERO]); + CHECK(bs[Bits::ONE]); + CHECK(bs[Bits::TWO]); + CHECK(bs[Bits::THREE]); +} + +TEMPLATE_TEST_CASE("set returns the previous bitset", "[atomic_bitset]", + std::uint8_t, std::uint16_t, std::uint32_t, std::uint64_t) { + using namespace stdx::literals; + auto bs = stdx::atomic_bitset<8, TestType>{}; + CHECK(bs.set(0) == stdx::bitset<8, TestType>{}); + CHECK(bs[0]); + CHECK(bs.set(1_lsb, 1_len) == stdx::bitset<8, TestType>{0b1ul}); + CHECK(bs[1]); + CHECK(bs.set(2_lsb, 2_msb) == stdx::bitset<8, TestType>{0b11ul}); + CHECK(bs[2]); +} + +TEMPLATE_TEST_CASE("reset returns the previous bitset", "[atomic_bitset]", + std::uint8_t, std::uint16_t, std::uint32_t, std::uint64_t) { + using namespace stdx::literals; + auto bs = stdx::atomic_bitset<8, TestType>{0b111ul}; + CHECK(bs.reset(0) == stdx::bitset<8, TestType>{0b111ul}); + CHECK(not bs[0]); + CHECK(bs.reset(1_lsb, 1_len) == stdx::bitset<8, TestType>{0b110ul}); + CHECK(not bs[1]); + CHECK(bs.reset(2_lsb, 2_msb) == stdx::bitset<8, TestType>{0b100ul}); + CHECK(not bs[2]); +} + +TEMPLATE_TEST_CASE("flip returns the previous bitset", "[atomic_bitset]", + std::uint8_t, std::uint16_t, std::uint32_t, std::uint64_t) { + using namespace stdx::literals; + auto bs = stdx::atomic_bitset<1, TestType>{0b1ul}; + CHECK(bs.flip() == stdx::bitset<1, TestType>{0b1ul}); + CHECK(not bs[0]); + CHECK(bs.flip(0) == stdx::bitset<1, TestType>{}); + CHECK(bs[0]); +} + +TEST_CASE("atomic bitset is thread-safe", "[atomic_bitset]") { + auto bs = stdx::atomic_bitset<1, std::uint8_t>{}; + auto t1 = std::thread([&] { bs.set(0); }); + auto t2 = std::thread([&] { bs.set(0); }); + + t1.join(); + t2.join(); +} From e00f89a3511ce0d995525d506b51553ab86d45ad Mon Sep 17 00:00:00 2001 From: Ben Deane Date: Fri, 11 Oct 2024 15:53:30 -0600 Subject: [PATCH 2/3] :books: Add docs for `atomic_bitset` --- docs/atomic_bitset.adoc | 107 ++++++++++++++++++++++++++++++++++++++++ docs/bitset.adoc | 23 +++++++-- docs/index.adoc | 2 + docs/intro.adoc | 2 + docs/ranges.adoc | 6 +++ 5 files changed, 137 insertions(+), 3 deletions(-) create mode 100644 docs/atomic_bitset.adoc create mode 100644 docs/ranges.adoc diff --git a/docs/atomic_bitset.adoc b/docs/atomic_bitset.adoc new file mode 100644 index 0000000..1dd4419 --- /dev/null +++ b/docs/atomic_bitset.adoc @@ -0,0 +1,107 @@ + +== `atomic_bitset.hpp` + +https://github.com/intel/cpp-std-extensions/blob/main/include/stdx/atomic_bitset.hpp[`atomic_bitset.hpp`] +provides an implementation of a xref:bitset.adoc#_bitset_hpp[`bitset`] with atomic semantics. + +An `atomic_bitset` is limited in size to the maximum integral type a platform +can support while still using lock-free atomic instructions. Like `bitset`, it +can be defined by selecting the underlying storage type automatically: +[source,cpp] +---- +using A = stdx::bitset<8>; // uses uint8_t +using B = stdx::bitset<16>; // uses uint16_t +using C = stdx::bitset<32>; // uses uint32_t +using D = stdx::bitset<64>; // uses uint64_t +---- + +`atomic_bitset` is constructed in the same way as `bitset`: with `all_bits`, +`place_bits`, a value, or a `string_view`: +[source,cpp] +---- +using namespace std::string_view_literals; +auto bs0 = stdx::atomic_bitset<8>{}; +auto bs1 = stdx::atomic_bitset<8>{stdx::all_bits}; // 0b1111'1111 +auto bs2 = stdx::atomic_bitset<8>{stdx::place_bits, 0, 1, 3}; // 0b1011 +auto bs3 = stdx::atomic_bitset<8>{0b1011}; +auto bs4 = stdx::atomic_bitset<8>{"1011"sv}; +---- + +NOTE: `atomic_bitset`​'s constructors are `constexpr`, but none of the other +functions are. + +Also like `bitset`, `atomic_bitset` supports conversion to integral types: +[source,cpp] +---- +auto bs = stdx::atomic_bitset<11>{0b101}; // 11 bits, value 5 +auto i = bs.to(); // 5 (a std::uint64_t) +auto j = bs.to_natural(); // 5 (a std::uint16_t) +---- + +And operation with enumeration types: +[source,cpp] +---- +enum struct Bits { ZERO, ONE, TWO, THREE, MAX }; +auto bs = stdx::atomic_bitset{stdx::all_bits}; // 4 bits, value 0b1111 +bs.set(Bits::ZERO); +bs.reset(Bits::ZERO); +bs.flip(Bits::ZERO); +auto bit_zero = bs[Bits::ZERO]; +---- + +Unlike `bitset`, `atomic_bitset`​'s operations are atomic. For example, `load` +and `store` are basic operations that return and take a corresponding `bitset`: + +[source,cpp] +---- +constexpr auto bs = stdx::atomic_bitset<8>{0b1010ul}; +auto copy = bs.load(); // a stdx::bitset<8>{0b1010ul}; +bs.store(copy); +---- + +Like https://en.cppreference.com/w/cpp/atomic/atomic/load[`load`] and +https://en.cppreference.com/w/cpp/atomic/atomic/store[`store`] on +https://en.cppreference.com/w/cpp/atomic/atomic[`std::atomic`], the `load` and +`store` operations on `stdx::atomic_bitset` take an optional +https://en.cppreference.com/w/cpp/atomic/memory_order[`std::memory_order`]. +`stdx::atomic_bitset` is also implicitly convertible to a corresponding +`stdx::bitset`; that operation is equivalent to `load()`. + +The `set`, `reset` and `flip` operations also take an optional +`std::memory_order`: these operations are equivalent to `store` in their +semantics, except that they return the `stdx::bitset` that was the previous +value. + +[source,cpp] +---- +constexpr auto bs = stdx::atomic_bitset<8>{0b1010ul}; +auto prev = bs.set(0); +// bs == 1011 +// prev == 1010 (stdx::bitset<8>) +---- + +NOTE: When `set` or `reset` are called without specifying bits, they return a +reference to the `atomic_bitset`. This is because these operations result in a +plain `store` which does not return the previous value. + +`all`, `any`, `none` and `count` are also available on `atomic_bitset` and they +are each equivalent to `load` followed by the respective operation. Like `load`, +they also take an optional `std::memory_order`. + +So what is _not_ available on `atomic_bitset`? + + * any binary operation: equality, binary versions of `and`, `or`, etc. + * bit shift operations + * `for_each` and `lowest_unset` + * unary `not` + +These operations are not provided for varying reasons: + + * atomic semantics are impossible or problematic to guarantee (binary operations) + * atomic instructions are not available (bit shifts, `lowest_unset`) + * atomic semantics are unclear (`for_each`) + * the caller can easily achieve what they want (unary `not`) + +In all of these cases though, the caller can make the right choice for them, and +use the corresponding operations on `bitset` after correctly reasoning about the +required semantics. diff --git a/docs/bitset.adoc b/docs/bitset.adoc index 59455fb..d851574 100644 --- a/docs/bitset.adoc +++ b/docs/bitset.adoc @@ -14,7 +14,8 @@ platform. * Stream input and output operators are not implemented. * A `std::hash` specialization is not implemented. -* `to_string`, `to_ulong` and `to_ullong` are not implemented +* `to_string`, `to_ulong` and `to_ullong` are not implemented -- but `to` and + `to_natural` provide ways to convert to integral types. * `operator[]` is read-only: it does not return a proxy reference type A bitset has two template parameters: the size of the bitset and the storage @@ -70,8 +71,8 @@ auto i = bs.to(); // 5 (a std::uint64_t) auto j = bs.to_natural(); // 5 (a std::uint16_t) ---- -Bitsets support all the usual bitwise operators (`and`, `or`, `xor` and `not`) -and also support `operator-` meaning set difference, or `a & ~b`. +Bitsets support all the usual bitwise operators (`and`, `or`, `xor` and `not`, +shifts) and also support `operator-` meaning set difference, or `a & ~b`. A bitset can also be used with an enumeration that represents bits: [source,cpp] @@ -86,3 +87,19 @@ auto bit_zero = bs[Bits::ZERO]; NOTE: The enumeration values are the bit positions, not the bits themselves (the enumeration values are not fixed to powers-of-2). + +A bitset also supports efficient iteration with `for_each`, which calls a +function with each set bit in turn, working from LSB to MSB: +[source,cpp] +---- +auto bs = stdx::bitset<8>{0b1010'1010ul}; +for_each([&](auto i) { /* i == 1, 3, 5, 7 */ }, bs); +---- + +To support "external" iteration, or use cases like using a bitset to track used +objects, `lowest_unset` is also provided: +[source,cpp] +---- +auto bs = stdx::bitset<8>{0b11'0111ul}; +auto i = bs.lowest_unset(); // i == 3 +---- diff --git a/docs/index.adoc b/docs/index.adoc index 91006f2..8809226 100644 --- a/docs/index.adoc +++ b/docs/index.adoc @@ -7,6 +7,7 @@ :toc: left include::intro.adoc[] +include::atomic_bitset.adoc[] include::algorithm.adoc[] include::bit.adoc[] include::bitset.adoc[] @@ -33,6 +34,7 @@ include::numeric.adoc[] include::optional.adoc[] include::panic.adoc[] include::priority.adoc[] +include::ranges.adoc[] include::span.adoc[] include::tuple.adoc[] include::tuple_algorithms.adoc[] diff --git a/docs/intro.adoc b/docs/intro.adoc index fa3f79f..ea33c3e 100644 --- a/docs/intro.adoc +++ b/docs/intro.adoc @@ -35,6 +35,7 @@ into headers whose names match the standard. The following headers are available: * https://github.com/intel/cpp-std-extensions/blob/main/include/stdx/algorithm.hpp[`algorithm.hpp`] +* https://github.com/intel/cpp-std-extensions/blob/main/include/stdx/atomic_bitset.hpp[`atomic_bitset.hpp`] * https://github.com/intel/cpp-std-extensions/blob/main/include/stdx/bit.hpp[`bit.hpp`] * https://github.com/intel/cpp-std-extensions/blob/main/include/stdx/bitset.hpp[`bitset.hpp`] * https://github.com/intel/cpp-std-extensions/blob/main/include/stdx/byterator.hpp[`byterator.hpp`] @@ -60,6 +61,7 @@ The following headers are available: * https://github.com/intel/cpp-std-extensions/blob/main/include/stdx/optional.hpp[`optional.hpp`] * https://github.com/intel/cpp-std-extensions/blob/main/include/stdx/panic.hpp[`panic.hpp`] * https://github.com/intel/cpp-std-extensions/blob/main/include/stdx/priority.hpp[`priority.hpp`] +* https://github.com/intel/cpp-std-extensions/blob/main/include/stdx/ranges.hpp[`ranges.hpp`] * https://github.com/intel/cpp-std-extensions/blob/main/include/stdx/span.hpp[`span.hpp`] * https://github.com/intel/cpp-std-extensions/blob/main/include/stdx/tuple.hpp[`tuple.hpp`] * https://github.com/intel/cpp-std-extensions/blob/main/include/stdx/tuple_algorithms.hpp[`tuple_algorithms.hpp`] diff --git a/docs/ranges.adoc b/docs/ranges.adoc new file mode 100644 index 0000000..ee9ec20 --- /dev/null +++ b/docs/ranges.adoc @@ -0,0 +1,6 @@ + +== `ranges.hpp` + +https://github.com/intel/cpp-std-extensions/blob/main/include/stdx/ranges.hpp[`ranges.hpp`] +contains a single concept: `range`. A type models the `stdx::range` concept if +`std::begin` and `std::end` are defined for that type. From 212b5cd40185f2c02feaab555a0bb82ce52f88e6 Mon Sep 17 00:00:00 2001 From: Ben Deane Date: Sat, 12 Oct 2024 14:43:29 -0600 Subject: [PATCH 3/3] :art: Add `ct_string` deduction guide for `{atomic_}bitset` --- include/stdx/atomic_bitset.hpp | 73 +++++++++++++---------- include/stdx/bit.hpp | 11 +--- include/stdx/bitset.hpp | 91 ++++++++++++++++------------- test/atomic_bitset.cpp | 10 ++++ test/bitset.cpp | 10 ++++ test/fail/bitset_signed_storage.cpp | 2 +- 6 files changed, 115 insertions(+), 82 deletions(-) diff --git a/include/stdx/atomic_bitset.hpp b/include/stdx/atomic_bitset.hpp index 3ec6f5a..325d6b6 100644 --- a/include/stdx/atomic_bitset.hpp +++ b/include/stdx/atomic_bitset.hpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -17,25 +18,30 @@ namespace stdx { inline namespace v1 { -namespace detail { -template class atomic_bitset { - constexpr static auto bit = StorageElem{1U}; +template ())> +class atomic_bitset { + constexpr static std::size_t N = to_underlying(Size); + using elem_t = StorageElem; + static_assert(std::is_unsigned_v, + "Storage element for atomic_bitset must be an unsigned type"); - static_assert(N <= std::numeric_limits::digits, + constexpr static auto bit = elem_t{1U}; + + static_assert(N <= std::numeric_limits::digits, "atomic_bitset is limited to a single storage element"); - static_assert(std::atomic::is_always_lock_free, - "atomic_bitset must always be lock free"); - std::atomic storage{}; + std::atomic storage{}; - constexpr static auto mask = bit_mask(); - StorageElem salient_value(std::memory_order order) const { + constexpr static auto mask = bit_mask(); + elem_t salient_value(std::memory_order order) const { return storage.load(order) & mask; } - [[nodiscard]] static constexpr auto - value_from_string(std::string_view str, std::size_t pos, std::size_t n, - char one) -> StorageElem { - StorageElem ret{}; + [[nodiscard]] static constexpr auto value_from_string(std::string_view str, + std::size_t pos, + std::size_t n, + char one) -> elem_t { + elem_t ret{}; auto const len = std::min(n, str.size() - pos); auto const s = str.substr(pos, std::min(len, N)); auto i = bit; @@ -43,23 +49,23 @@ template class atomic_bitset { if (*it == one) { ret |= i; } - i = static_cast(i << 1u); + i = static_cast(i << 1u); } return ret; } - using bitset_t = bitset; + using bitset_t = bitset; public: constexpr atomic_bitset() = default; constexpr explicit atomic_bitset(std::uint64_t value) - : storage{static_cast(value & mask)} {} + : storage{static_cast(value & mask)} {} template constexpr explicit atomic_bitset(place_bits_t, Bs... bs) - : storage{static_cast( - (StorageElem{} | ... | - static_cast(bit << to_underlying(bs))))} {} + : storage{static_cast( + (elem_t{} | ... | + static_cast(bit << to_underlying(bs))))} {} constexpr explicit atomic_bitset(all_bits_t) : storage{mask} {} @@ -68,6 +74,11 @@ template class atomic_bitset { char one = '1') : storage{value_from_string(str, pos, n, one)} {} +#if __cplusplus >= 202002L + constexpr explicit atomic_bitset(ct_string s) + : atomic_bitset{static_cast(s)} {} +#endif + template [[nodiscard]] auto to(std::memory_order order = std::memory_order_seq_cst) const -> T { @@ -93,13 +104,13 @@ template class atomic_bitset { } auto store(bitset_t b, std::memory_order order = std::memory_order_seq_cst) { - storage.store(b.template to(), order); + storage.store(b.template to(), order); } constexpr static std::integral_constant size{}; constexpr static std::bool_constant< - std::atomic::is_always_lock_free> + std::atomic::is_always_lock_free> is_always_lock_free{}; template [[nodiscard]] auto operator[](T idx) const -> bool { @@ -113,17 +124,17 @@ template class atomic_bitset { auto const pos = static_cast(to_underlying(idx)); if (value) { return bitset_t{ - storage.fetch_or(static_cast(bit << pos), order)}; + storage.fetch_or(static_cast(bit << pos), order)}; } return bitset_t{ - storage.fetch_and(static_cast(~(bit << pos)), order)}; + storage.fetch_and(static_cast(~(bit << pos)), order)}; } auto set(lsb_t lsb, msb_t msb, bool value = true, std::memory_order order = std::memory_order_seq_cst) -> bitset_t { auto const l = to_underlying(lsb); auto const m = to_underlying(msb); - auto const shifted_value = bit_mask(m, l); + auto const shifted_value = bit_mask(m, l); if (value) { return bitset_t{storage.fetch_or(shifted_value, order)}; } @@ -145,13 +156,12 @@ template class atomic_bitset { template auto reset(T idx) -> bitset_t { auto const pos = static_cast(to_underlying(idx)); - return bitset_t{ - storage.fetch_and(static_cast(~(bit << pos)))}; + return bitset_t{storage.fetch_and(static_cast(~(bit << pos)))}; } auto reset(std::memory_order order = std::memory_order_seq_cst) LIFETIMEBOUND -> atomic_bitset & { - storage.store(StorageElem{}, order); + storage.store(elem_t{}, order); return *this; } @@ -172,7 +182,7 @@ template class atomic_bitset { std::memory_order order = std::memory_order_seq_cst) -> bitset_t { auto const pos = static_cast(to_underlying(idx)); return bitset_t{ - storage.fetch_xor(static_cast(bit << pos), order)}; + storage.fetch_xor(static_cast(bit << pos), order)}; } auto flip(std::memory_order order = std::memory_order_seq_cst) -> bitset_t { @@ -198,10 +208,9 @@ template class atomic_bitset { return static_cast(popcount(salient_value(order))); } }; -} // namespace detail -template -using atomic_bitset = detail::atomic_bitset< - to_underlying(N), decltype(smallest_uint())>; +#if __cplusplus >= 202002L +template atomic_bitset(ct_string) -> atomic_bitset; +#endif } // namespace v1 } // namespace stdx diff --git a/include/stdx/bit.hpp b/include/stdx/bit.hpp index 7f93a3a..b792e83 100644 --- a/include/stdx/bit.hpp +++ b/include/stdx/bit.hpp @@ -341,12 +341,8 @@ template constexpr auto bit_size() -> std::size_t { return sizeof(T) * CHAR_BIT; } -template CONSTEVAL auto smallest_uint() { - if constexpr (not std::is_same_v) { - static_assert(std::is_unsigned_v, - "smallest_uint override must be an unsigned type"); - return S{}; - } else if constexpr (N <= std::numeric_limits::digits) { +template CONSTEVAL auto smallest_uint() { + if constexpr (N <= std::numeric_limits::digits) { return std::uint8_t{}; } else if constexpr (N <= std::numeric_limits::digits) { return std::uint16_t{}; @@ -357,7 +353,6 @@ template CONSTEVAL auto smallest_uint() { } } -template -using smallest_uint_t = decltype(smallest_uint()); +template using smallest_uint_t = decltype(smallest_uint()); } // namespace v1 } // namespace stdx diff --git a/include/stdx/bitset.hpp b/include/stdx/bitset.hpp index 08aef83..ae527e4 100644 --- a/include/stdx/bitset.hpp +++ b/include/stdx/bitset.hpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -18,25 +19,31 @@ namespace stdx { inline namespace v1 { -namespace detail { -template class bitset { +template ())> +class bitset { + constexpr static std::size_t N = to_underlying(Size); + using elem_t = StorageElem; + static_assert(std::is_unsigned_v, + "Storage element for bitset must be an unsigned type"); + constexpr static auto storage_elem_size = - std::numeric_limits::digits; + std::numeric_limits::digits; constexpr static auto storage_size = (N + storage_elem_size - 1) / storage_elem_size; - constexpr static auto bit = StorageElem{1U}; - constexpr static auto allbits = std::numeric_limits::max(); + constexpr static auto bit = elem_t{1U}; + constexpr static auto allbits = std::numeric_limits::max(); - std::array storage{}; + std::array storage{}; - constexpr static auto lastmask = []() -> StorageElem { + constexpr static auto lastmask = []() -> elem_t { if constexpr (N % storage_elem_size != 0) { return allbits >> (storage_elem_size - N % storage_elem_size); } else { return allbits; } }(); - constexpr auto highbits() const -> StorageElem { + constexpr auto highbits() const -> elem_t { return storage.back() & lastmask; } @@ -99,21 +106,21 @@ template class bitset { for (auto e : storage) { while (e != 0) { auto const offset = static_cast(countr_zero(e)); - e &= static_cast(~(bit << offset)); + e &= static_cast(~(bit << offset)); f(i + offset); } - i += std::numeric_limits::digits; + i += std::numeric_limits::digits; } return std::forward(f); } - template + template friend constexpr auto for_each(F &&f, bitset const &...bs) -> F; public: constexpr bitset() = default; constexpr explicit bitset(std::uint64_t value) { - if constexpr (std::is_same_v) { + if constexpr (std::is_same_v) { storage[0] = value; } else { for (auto &elem : storage) { @@ -151,6 +158,11 @@ template class bitset { } } +#if __cplusplus >= 202002L + constexpr explicit bitset(ct_string s) + : bitset{static_cast(s)} {} +#endif + template [[nodiscard]] constexpr auto to() const -> T { using U = underlying_type_t; static_assert( @@ -158,7 +170,7 @@ template class bitset { "Conversion must be to an unsigned integral type or enum!"); static_assert(N <= std::numeric_limits::digits, "Bitset too big for conversion to T"); - if constexpr (std::is_same_v) { + if constexpr (std::is_same_v) { return static_cast(storage[0] & lastmask); } else { U result{highbits()}; @@ -191,9 +203,9 @@ template class bitset { auto const pos = static_cast(to_underlying(idx)); auto const [index, offset] = indices(pos); if (value) { - storage[index] |= static_cast(bit << offset); + storage[index] |= static_cast(bit << offset); } else { - storage[index] &= static_cast(~(bit << offset)); + storage[index] &= static_cast(~(bit << offset)); } return *this; } @@ -205,25 +217,25 @@ template class bitset { auto [l_index, l_offset] = indices(l); auto const [m_index, m_offset] = indices(m); - using setfn = auto (*)(StorageElem *, StorageElem)->void; + using setfn = auto (*)(elem_t *, elem_t)->void; auto const fn = [&]() -> setfn { if (value) { - return [](StorageElem *ptr, StorageElem val) { *ptr |= val; }; + return [](elem_t *ptr, elem_t val) { *ptr |= val; }; } - return [](StorageElem *ptr, StorageElem val) { *ptr &= ~val; }; + return [](elem_t *ptr, elem_t val) { *ptr &= ~val; }; }(); - auto l_mask = std::numeric_limits::max() << l_offset; + auto l_mask = std::numeric_limits::max() << l_offset; if (l_index != m_index) { - fn(&storage[l_index++], static_cast(l_mask)); - l_mask = std::numeric_limits::max(); + fn(&storage[l_index++], static_cast(l_mask)); + l_mask = std::numeric_limits::max(); } while (l_index != m_index) { - fn(&storage[l_index++], static_cast(l_mask)); + fn(&storage[l_index++], static_cast(l_mask)); } - auto const m_mask = std::numeric_limits::max() >> + auto const m_mask = std::numeric_limits::max() >> (storage_elem_size - m_offset - 1); - fn(&storage[l_index], static_cast(l_mask & m_mask)); + fn(&storage[l_index], static_cast(l_mask & m_mask)); return *this; } @@ -245,7 +257,7 @@ template class bitset { constexpr auto reset(T idx) LIFETIMEBOUND -> bitset & { auto const pos = static_cast(to_underlying(idx)); auto const [index, offset] = indices(pos); - storage[index] &= static_cast(~(bit << offset)); + storage[index] &= static_cast(~(bit << offset)); return *this; } @@ -267,7 +279,7 @@ template class bitset { template constexpr auto flip(T idx) LIFETIMEBOUND -> bitset & { auto const pos = static_cast(to_underlying(idx)); auto const [index, offset] = indices(pos); - storage[index] ^= static_cast(bit << offset); + storage[index] ^= static_cast(bit << offset); return *this; } @@ -310,10 +322,10 @@ template class bitset { std::size_t i = 0; for (auto e : storage) { if (auto offset = static_cast(countr_one(e)); - offset != std::numeric_limits::digits) { + offset != std::numeric_limits::digits) { return i + offset; } - i += std::numeric_limits::digits; + i += std::numeric_limits::digits; } return i; } @@ -358,13 +370,13 @@ template class bitset { } else { auto const borrow_shift = storage_elem_size - pos; for (auto i = start; i > std::size_t{}; --i) { - storage[dst] = static_cast(storage[i] << pos); + storage[dst] = static_cast(storage[i] << pos); storage[dst] |= - static_cast(storage[i - 1] >> borrow_shift); + static_cast(storage[i - 1] >> borrow_shift); --dst; } } - storage[dst] = static_cast(storage.front() << pos); + storage[dst] = static_cast(storage.front() << pos); while (dst > std::size_t{}) { storage[--dst] = 0; } @@ -384,13 +396,13 @@ template class bitset { } else { auto const borrow_shift = storage_elem_size - pos; for (auto i = start; i < storage_size - 1; ++i) { - storage[dst] = static_cast(storage[i] >> pos); + storage[dst] = static_cast(storage[i] >> pos); storage[dst] |= - static_cast(storage[i + 1] << borrow_shift); + static_cast(storage[i + 1] << borrow_shift); ++dst; } } - storage[dst++] = static_cast(storage.back() >> pos); + storage[dst++] = static_cast(storage.back() >> pos); while (dst < storage_size) { storage[dst++] = 0; } @@ -398,7 +410,7 @@ template class bitset { } }; -template +template constexpr auto for_each(F &&f, bitset const &...bs) -> F { if constexpr (sizeof...(bs) == 1) { return (bs.for_each(std::forward(f)), ...); @@ -407,12 +419,9 @@ constexpr auto for_each(F &&f, bitset const &...bs) -> F { return f; } } -} // namespace detail - -template -using bitset = - detail::bitset())>; +#if __cplusplus >= 202002L +template bitset(ct_string) -> bitset; +#endif } // namespace v1 } // namespace stdx diff --git a/test/atomic_bitset.cpp b/test/atomic_bitset.cpp index ba95fb6..52bfa29 100644 --- a/test/atomic_bitset.cpp +++ b/test/atomic_bitset.cpp @@ -368,3 +368,13 @@ TEST_CASE("atomic bitset is thread-safe", "[atomic_bitset]") { t1.join(); t2.join(); } + +#if __cplusplus >= 202002L +TEST_CASE("construct with a ct_string", "[atomic_bitset]") { + using namespace stdx::literals; + CHECK(stdx::atomic_bitset{"1010"_cts} == + stdx::bitset<4ul, std::uint8_t>{0b1010ul}); + CHECK(stdx::atomic_bitset{"101010101"_cts} == + stdx::bitset<9ul, std::uint16_t>{0b101010101ul}); +} +#endif diff --git a/test/bitset.cpp b/test/bitset.cpp index 2b54d4f..791b551 100644 --- a/test/bitset.cpp +++ b/test/bitset.cpp @@ -454,3 +454,13 @@ TEST_CASE("use bitset with enum struct (read index)", "[bitset]") { static_assert(bs[Bits::TWO]); static_assert(bs[Bits::THREE]); } + +#if __cplusplus >= 202002L +TEST_CASE("construct with a ct_string", "[bitset]") { + using namespace stdx::literals; + static_assert(stdx::bitset{"1010"_cts} == + stdx::bitset<4ul, std::uint8_t>{0b1010ul}); + static_assert(stdx::bitset{"101010101"_cts} == + stdx::bitset<9ul, std::uint16_t>{0b101010101ul}); +} +#endif diff --git a/test/fail/bitset_signed_storage.cpp b/test/fail/bitset_signed_storage.cpp index 9120630..f02641c 100644 --- a/test/fail/bitset_signed_storage.cpp +++ b/test/fail/bitset_signed_storage.cpp @@ -1,5 +1,5 @@ #include -// EXPECT: smallest_uint override must be an unsigned type +// EXPECT: Storage element for bitset must be an unsigned type auto main() -> int { auto b = stdx::bitset<32, int>{}; }