Merge pull request #247 from boostorg/feature/stats

Feature/stats
This commit is contained in:
joaquintides 2024-05-08 17:19:12 +02:00 committed by GitHub
commit 87e0e52cf7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
26 changed files with 1653 additions and 16 deletions

342
benchmark/string_stats.cpp Normal file
View File

@ -0,0 +1,342 @@
// Copyright 2021 Peter Dimov.
// Copyright 2023-2024 Joaquin M Lopez Munoz.
// Distributed under the Boost Software License, Version 1.0.
// https://www.boost.org/LICENSE_1_0.txt
#define _SILENCE_CXX17_OLD_ALLOCATOR_MEMBERS_DEPRECATION_WARNING
#define _SILENCE_CXX20_CISO646_REMOVED_WARNING
#define BOOST_UNORDERED_ENABLE_STATS
#include <boost/unordered/unordered_flat_map.hpp>
#include <boost/core/detail/splitmix64.hpp>
#include <boost/config.hpp>
#include <unordered_map>
#include <vector>
#include <memory>
#include <cstdint>
#include <iostream>
#include <iomanip>
#include <chrono>
using namespace std::chrono_literals;
static void print_time( std::chrono::steady_clock::time_point & t1, char const* label, std::uint32_t s, std::size_t size )
{
auto t2 = std::chrono::steady_clock::now();
std::cout << label << ": " << ( t2 - t1 ) / 1ms << " ms (s=" << s << ", size=" << size << ")\n";
t1 = t2;
}
constexpr unsigned N = 50'000;
constexpr int K = 10;
static std::vector<std::string> indices1, indices2;
static std::string make_index( unsigned x )
{
char buffer[ 64 ];
std::snprintf( buffer, sizeof(buffer), "pfx_%u_sfx", x );
return buffer;
}
static std::string make_random_index( unsigned x )
{
char buffer[ 64 ];
std::snprintf( buffer, sizeof(buffer), "pfx_%0*d_%u_sfx", x % 8 + 1, 0, x );
return buffer;
}
static void init_indices()
{
indices1.reserve( N*2+1 );
indices1.push_back( make_index( 0 ) );
for( unsigned i = 1; i <= N*2; ++i )
{
indices1.push_back( make_index( i ) );
}
indices2.reserve( N*2+1 );
indices2.push_back( make_index( 0 ) );
{
boost::detail::splitmix64 rng;
for( unsigned i = 1; i <= N*2; ++i )
{
indices2.push_back( make_random_index( static_cast<std::uint32_t>( rng() ) ) );
}
}
}
template<class Map> BOOST_NOINLINE void test_insert( Map& map, std::chrono::steady_clock::time_point & t1 )
{
for( unsigned i = 1; i <= N; ++i )
{
map.insert( { indices1[ i ], i } );
}
print_time( t1, "Consecutive insert", 0, map.size() );
for( unsigned i = 1; i <= N; ++i )
{
map.insert( { indices2[ i ], i } );
}
print_time( t1, "Random insert", 0, map.size() );
std::cout << std::endl;
}
template<class Map> BOOST_NOINLINE void test_lookup( Map& map, std::chrono::steady_clock::time_point & t1 )
{
std::uint32_t s;
s = 0;
for( int j = 0; j < K; ++j )
{
for( unsigned i = 1; i <= N * 2; ++i )
{
auto it = map.find( indices1[ i ] );
if( it != map.end() ) s += it->second;
}
}
print_time( t1, "Consecutive lookup", s, map.size() );
s = 0;
for( int j = 0; j < K; ++j )
{
for( unsigned i = 1; i <= N * 2; ++i )
{
auto it = map.find( indices2[ i ] );
if( it != map.end() ) s += it->second;
}
}
print_time( t1, "Random lookup", s, map.size() );
std::cout << std::endl;
}
template<class Map> BOOST_NOINLINE void test_iteration( Map& map, std::chrono::steady_clock::time_point & t1 )
{
auto it = map.begin();
while( it != map.end() )
{
if( it->second & 1 )
{
if constexpr( std::is_void_v< decltype( map.erase( it ) ) > )
{
map.erase( it++ );
}
else
{
it = map.erase( it );
}
}
else
{
++it;
}
}
print_time( t1, "Iterate and erase odd elements", 0, map.size() );
std::cout << std::endl;
}
template<class Map> BOOST_NOINLINE void test_erase( Map& map, std::chrono::steady_clock::time_point & t1 )
{
for( unsigned i = 1; i <= N; ++i )
{
map.erase( indices1[ i ] );
}
print_time( t1, "Consecutive erase", 0, map.size() );
for( unsigned i = 1; i <= N; ++i )
{
map.erase( indices2[ i ] );
}
print_time( t1, "Random erase", 0, map.size() );
std::cout << std::endl;
}
//
// All Unordered container use the same struct
using stats = boost::unordered_flat_map<int, int>::stats;
struct record
{
std::string label_;
long long time_;
stats stats_;
};
static std::vector<record> records;
template<template<class...> class Map> BOOST_NOINLINE void test( char const* label )
{
std::cout << label << ":\n\n";
Map<std::string, std::uint32_t> map;
auto t0 = std::chrono::steady_clock::now();
auto t1 = t0;
test_insert( map, t1 );
record rec = { label, 0 };
test_lookup( map, t1 );
test_iteration( map, t1 );
test_lookup( map, t1 );
test_erase( map, t1 );
auto tN = std::chrono::steady_clock::now();
std::cout << "Total: " << ( tN - t0 ) / 1ms << " ms\n\n";
rec.time_ = ( tN - t0 ) / 1ms;
rec.stats_ = map.get_stats();
records.push_back( rec );
}
//
template<class K, class V> using boost_unordered_flat_map =
boost::unordered_flat_map<K, V, boost::hash<K>, std::equal_to<K>>;
// fnv1a_hash
template<int Bits> struct fnv1a_hash_impl;
template<> struct fnv1a_hash_impl<32>
{
std::size_t operator()( std::string const& s ) const
{
std::size_t h = 0x811C9DC5u;
char const * first = s.data();
char const * last = first + s.size();
for( ; first != last; ++first )
{
h ^= static_cast<unsigned char>( *first );
h *= 0x01000193ul;
}
return h;
}
};
template<> struct fnv1a_hash_impl<64>
{
std::size_t operator()( std::string const& s ) const
{
std::size_t h = 0xCBF29CE484222325ull;
char const * first = s.data();
char const * last = first + s.size();
for( ; first != last; ++first )
{
h ^= static_cast<unsigned char>( *first );
h *= 0x00000100000001B3ull;
}
return h;
}
};
struct fnv1a_hash: fnv1a_hash_impl< std::numeric_limits<std::size_t>::digits >
{
using is_avalanching = void;
};
template<class K, class V> using boost_unordered_flat_map_fnv1a =
boost::unordered_flat_map<K, V, fnv1a_hash, std::equal_to<K>>;
// slightly bad hash
struct slightly_bad_hash
{
using is_avalanching = void;
std::size_t operator()( std::string const& s ) const
{
std::size_t h = s.size();
for( auto ch: s )
{
h *= 0x811C9DC4u; // multiplicative factor is even!
h += static_cast<std::size_t>( ch );
}
return h;
}
};
template<class K, class V> using boost_unordered_flat_map_slightly_bad_hash =
boost::unordered_flat_map<K, V, slightly_bad_hash, std::equal_to<K>>;
// bad hash
struct bad_hash
{
using is_avalanching = void;
std::size_t operator()( std::string const& s ) const
{
std::size_t h = s.size();
for( auto ch: s )
{
h *= 31;
h += static_cast<std::size_t>( ch );
}
return h;
}
};
template<class K, class V> using boost_unordered_flat_map_bad_hash =
boost::unordered_flat_map<K, V, bad_hash, std::equal_to<K>>;
//
int main()
{
init_indices();
test<boost_unordered_flat_map>( "boost::unordered_flat_map" );
test<boost_unordered_flat_map_fnv1a>( "boost::unordered_flat_map, FNV-1a" );
test<boost_unordered_flat_map_slightly_bad_hash>( "boost::unordered_flat_map, slightly_bad_hash" );
test<boost_unordered_flat_map_bad_hash>( "boost::unordered_flat_map, bad_hash" );
std::cout << "---\n\n";
for( auto const& x: records )
{
std::cout << std::setw( 46 ) << ( x.label_ + ": " ) << std::setw( 5 ) << x.time_ << " ms\n"
<< std::setw( 46 ) << "insertion: "
<< "probe length " << x.stats_.insertion.probe_length.average << "\n"
<< std::setw( 46 ) << "successful lookup: "
<< "probe length " << x.stats_.successful_lookup.probe_length.average
<< ", num comparisons " << x.stats_.successful_lookup.num_comparisons.average << "\n"
<< std::setw( 46 ) << "unsuccessful lookup: "
<< "probe length " << x.stats_.unsuccessful_lookup.probe_length.average
<< ", num comparisons " << x.stats_.unsuccessful_lookup.num_comparisons.average << "\n\n";
}
}

View File

@ -15,6 +15,7 @@ include::unordered/buckets.adoc[]
include::unordered/hash_equality.adoc[]
include::unordered/regular.adoc[]
include::unordered/concurrent.adoc[]
include::unordered/hash_quality.adoc[]
include::unordered/compliance.adoc[]
include::unordered/structures.adoc[]
include::unordered/benchmarks.adoc[]

View File

@ -9,6 +9,7 @@
== Release 1.86.0
* Added container `pmr` aliases when header `<memory_resource>` is available. The alias `boost::unordered::pmr::[container]` refers to `boost::unordered::[container]` with a `std::pmr::polymorphic_allocator` allocator type.
* Equipped open-addressing and concurrent containers to internally calculate and provide statistical metrics affected by the quality of the hash function. This functionality is enabled by the global macro `BOOST_UNORDERED_ENABLE_STATS`.
== Release 1.85.0

View File

@ -50,6 +50,8 @@ namespace boost {
using size_type = std::size_t;
using difference_type = std::ptrdiff_t;
using stats = xref:stats_stats_type[__stats-type__]; // if statistics are xref:concurrent_flat_map_boost_unordered_enable_stats[enabled]
// constants
static constexpr size_type xref:#concurrent_flat_map_constants[bulk_visit_size] = _implementation-defined_;
@ -228,6 +230,10 @@ namespace boost {
size_type xref:#concurrent_flat_map_max_load[max_load]() const noexcept;
void xref:#concurrent_flat_map_rehash[rehash](size_type n);
void xref:#concurrent_flat_map_reserve[reserve](size_type n);
// statistics (if xref:concurrent_flat_map_boost_unordered_enable_stats[enabled])
stats xref:#concurrent_flat_map_get_stats[get_stats]() const;
void xref:#concurrent_flat_map_reset_stats[reset_stats]() noexcept;
};
// Deduction Guides
@ -407,6 +413,15 @@ a function visiting elements of `m`) are detected and signalled through `BOOST_A
When run-time speed is a concern, the feature can be disabled by globally defining
this macro.
---
==== `BOOST_UNORDERED_ENABLE_STATS`
Globally define this macro to enable xref:#stats[statistics calculation] for the table. Note
that this option decreases the overall performance of many operations.
---
=== Constants
```cpp
@ -488,6 +503,8 @@ concurrent_flat_map(concurrent_flat_map&& other);
The move constructor. The internal bucket array of `other` is transferred directly to the new table.
The hash function, predicate and allocator are moved-constructed from `other`.
If statistics are xref:concurrent_flat_map_boost_unordered_enable_stats[enabled],
transfers the internal statistical information from `other` and calls `other.reset_stats()`.
[horizontal]
Concurrency:;; Blocking on `other`.
@ -536,6 +553,9 @@ concurrent_flat_map(concurrent_flat_map&& other, Allocator const& a);
If `a == other.get_allocator()`, the elements of `other` are transferred directly to the new table;
otherwise, elements are moved-constructed from those of `other`. The hash function and predicate are moved-constructed
from `other`, and the allocator is copy-constructed from `a`.
If statistics are xref:concurrent_flat_map_boost_unordered_enable_stats[enabled],
transfers the internal statistical information from `other` iff `a == other.get_allocator()`,
and always calls `other.reset_stats()`.
[horizontal]
Concurrency:;; Blocking on `other`.
@ -551,6 +571,8 @@ concurrent_flat_map(unordered_flat_map<Key, T, Hash, Pred, Allocator>&& other);
Move construction from a xref:#unordered_flat_map[`unordered_flat_map`].
The internal bucket array of `other` is transferred directly to the new container.
The hash function, predicate and allocator are moved-constructed from `other`.
If statistics are xref:concurrent_flat_map_boost_unordered_enable_stats[enabled],
transfers the internal statistical information from `other` and calls `other.reset_stats()`.
[horizontal]
Complexity:;; O(`bucket_count()`)
@ -709,6 +731,9 @@ The move assignment operator. Destroys previously existing elements, swaps the h
and move-assigns the allocator from `other` if `Alloc::propagate_on_container_move_assignment` exists and `Alloc::propagate_on_container_move_assignment::value` is `true`.
If at this point the allocator is equal to `other.get_allocator()`, the internal bucket array of `other` is transferred directly to `*this`;
otherwise, inserts move-constructed copies of the elements of `other`.
If statistics are xref:concurrent_flat_map_boost_unordered_enable_stats[enabled],
transfers the internal statistical information from `other` iff the final allocator is equal to `other.get_allocator()`,
and always calls `other.reset_stats()`.
[horizontal]
Concurrency:;; Blocking on `*this` and `other`.
@ -1480,6 +1505,30 @@ Concurrency:;; Blocking on `*this`.
---
=== Statistics
==== get_stats
```c++
stats get_stats() const;
```
[horizontal]
Returns:;; A statistical description of the insertion and lookup operations performed by the table so far.
Notes:;; Only available if xref:stats[statistics calculation] is xref:concurrent_flat_map_boost_unordered_enable_stats[enabled].
---
==== reset_stats
```c++
void reset_stats() noexcept;
```
[horizontal]
Effects:;; Sets to zero the internal statistics kept by the table.
Notes:;; Only available if xref:stats[statistics calculation] is xref:concurrent_flat_map_boost_unordered_enable_stats[enabled].
---
=== Deduction Guides
A deduction guide will not participate in overload resolution if any of the following are true:

View File

@ -45,6 +45,8 @@ namespace boost {
using size_type = std::size_t;
using difference_type = std::ptrdiff_t;
using stats = xref:stats_stats_type[__stats-type__]; // if statistics are xref:concurrent_flat_set_boost_unordered_enable_stats[enabled]
// constants
static constexpr size_type xref:#concurrent_flat_set_constants[bulk_visit_size] = _implementation-defined_;
@ -188,6 +190,10 @@ namespace boost {
size_type xref:#concurrent_flat_set_max_load[max_load]() const noexcept;
void xref:#concurrent_flat_set_rehash[rehash](size_type n);
void xref:#concurrent_flat_set_reserve[reserve](size_type n);
// statistics (if xref:concurrent_flat_set_boost_unordered_enable_stats[enabled])
stats xref:#concurrent_flat_set_get_stats[get_stats]() const;
void xref:#concurrent_flat_set_reset_stats[reset_stats]() noexcept;
};
// Deduction Guides
@ -358,6 +364,15 @@ a function visiting elements of `m`) are detected and signalled through `BOOST_A
When run-time speed is a concern, the feature can be disabled by globally defining
this macro.
---
==== `BOOST_UNORDERED_ENABLE_STATS`
Globally define this macro to enable xref:#stats[statistics calculation] for the table. Note
that this option decreases the overall performance of many operations.
---
=== Constants
```cpp
@ -439,6 +454,8 @@ concurrent_flat_set(concurrent_flat_set&& other);
The move constructor. The internal bucket array of `other` is transferred directly to the new table.
The hash function, predicate and allocator are moved-constructed from `other`.
If statistics are xref:concurrent_flat_set_boost_unordered_enable_stats[enabled],
transfers the internal statistical information from `other` and calls `other.reset_stats()`.
[horizontal]
Concurrency:;; Blocking on `other`.
@ -487,6 +504,9 @@ concurrent_flat_set(concurrent_flat_set&& other, Allocator const& a);
If `a == other.get_allocator()`, the elements of `other` are transferred directly to the new table;
otherwise, elements are moved-constructed from those of `other`. The hash function and predicate are moved-constructed
from `other`, and the allocator is copy-constructed from `a`.
If statistics are xref:concurrent_flat_set_boost_unordered_enable_stats[enabled],
transfers the internal statistical information from `other` iff `a == other.get_allocator()`,
and always calls `other.reset_stats()`.
[horizontal]
Concurrency:;; Blocking on `other`.
@ -502,6 +522,8 @@ concurrent_flat_set(unordered_flat_set<Key, Hash, Pred, Allocator>&& other);
Move construction from a xref:#unordered_flat_set[`unordered_flat_set`].
The internal bucket array of `other` is transferred directly to the new container.
The hash function, predicate and allocator are moved-constructed from `other`.
If statistics are xref:concurrent_flat_set_boost_unordered_enable_stats[enabled],
transfers the internal statistical information from `other` and calls `other.reset_stats()`.
[horizontal]
Complexity:;; O(`bucket_count()`)
@ -659,6 +681,9 @@ The move assignment operator. Destroys previously existing elements, swaps the h
and move-assigns the allocator from `other` if `Alloc::propagate_on_container_move_assignment` exists and `Alloc::propagate_on_container_move_assignment::value` is `true`.
If at this point the allocator is equal to `other.get_allocator()`, the internal bucket array of `other` is transferred directly to `*this`;
otherwise, inserts move-constructed copies of the elements of `other`.
If statistics are xref:concurrent_flat_set_boost_unordered_enable_stats[enabled],
transfers the internal statistical information from `other` iff the final allocator is equal to `other.get_allocator()`,
and always calls `other.reset_stats()`.
[horizontal]
Concurrency:;; Blocking on `*this` and `other`.
@ -1316,6 +1341,30 @@ Concurrency:;; Blocking on `*this`.
---
=== Statistics
==== get_stats
```c++
stats get_stats() const;
```
[horizontal]
Returns:;; A statistical description of the insertion and lookup operations performed by the table so far.
Notes:;; Only available if xref:stats[statistics calculation] is xref:concurrent_flat_set_boost_unordered_enable_stats[enabled].
---
==== reset_stats
```c++
void reset_stats() noexcept;
```
[horizontal]
Effects:;; Sets to zero the internal statistics kept by the table.
Notes:;; Only available if xref:stats[statistics calculation] is xref:concurrent_flat_set_boost_unordered_enable_stats[enabled].
---
=== Deduction Guides
A deduction guide will not participate in overload resolution if any of the following are true:

View File

@ -0,0 +1,145 @@
[#hash_quality]
= Hash Quality
:idprefix: hash_quality_
In order to work properly, hash tables require that the supplied hash function
be of __good quality__, roughly meaning that it uses its `std::size_t` output
space as uniformly as possible, much like a random number generator would do
—except, of course, that the value of a hash function is not random but strictly determined
by its input argument.
Closed-addressing containers in Boost.Unordered are fairly robust against
hash functions with less-than-ideal quality, but open-addressing and concurrent
containers are much more sensitive to this factor, and their performance can
degrade dramatically if the hash function is not appropriate. In general, if
you're using functions provided by or generated with link:../../../container_hash/index.html[Boost.Hash^],
the quality will be adequate, but you have to be careful when using alternative
hash algorithms.
The rest of this section applies only to open-addressing and concurrent containers.
== Hash Post-mixing and the Avalanching Property
Even if your supplied hash function does not conform to the uniform behavior
required by open addressing, chances are that
the performance of Boost.Unordered containers will be acceptable, because the library
executes an internal __post-mixing__ step that improves the statistical
properties of the calculated hash values. This comes with an extra computational
cost; if you'd like to opt out of post-mixing, annotate your hash function as
follows:
[source,c++]
----
struct my_string_hash_function
{
using is_avalanching = void; // instruct Boost.Unordered to not use post-mixing
std::size_t operator()(const std::string& x) const
{
...
}
};
----
By setting the
xref:#hash_traits_hash_is_avalanching[hash_is_avalanching] trait, we inform Boost.Unordered
that `my_string_hash_function` is of sufficient quality to be used directly without
any post-mixing safety net. This comes at the risk of degraded performance in the
cases where the hash function is not as well-behaved as we've declared.
== Container Statistics
If we globally define the macro `BOOST_UNORDERED_ENABLE_STATS`, open-addressing and
concurrent containers will calculate some internal statistics directly correlated to the
quality of the hash function:
[source,c++]
----
#define BOOST_UNORDERED_ENABLE_STATS
#include <boost/unordered/unordered_map.hpp>
...
int main()
{
boost::unordered_flat_map<std::string, int, my_string_hash> m;
... // use m
auto stats = m.get_stats();
... // inspect stats
}
----
The `stats` object provides the following information:
[source,subs=+quotes]
----
stats
.insertion // *Insertion operations*
.count // Number of operations
.probe_length // Probe length per operation
.average
.variance
.deviation
.successful_lookup // *Lookup operations (element found)*
.count // Number of operations
.probe_length // Probe length per operation
.average
.variance
.deviation
.num_comparisons // Elements compared per operation
.average
.variance
.deviation
.unsuccessful_lookup // *Lookup operations (element not found)*
.count // Number of operations
.probe_length // Probe length per operation
.average
.variance
.deviation
.num_comparisons // Elements compared per operation
.average
.variance
.deviation
----
Statistics for three internal operations are maintained: insertions (without considering
the previous lookup to determine that the key is not present yet), successful lookups,
and unsuccessful lookups (including those issued internally when inserting elements).
_Probe length_ is the number of
xref:#structures_open_addressing_containers[bucket groups] accessed per operation.
If the hash function behaves properly:
* Average probe lengths should be close to 1.0.
* The average number of comparisons per successful lookup should be close to 1.0 (that is,
just the element found is checked).
* The average number of comparisons per unsuccessful lookup should be close to 0.0.
A link:../../benchmark/string_stats.cpp[example^] is provided that displays container
statistics for `boost::hash<std::string>`, an implementation of the
https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function#FNV-1a_hash[FNV-1a hash^]
and two ill-behaved custom hash functions that have been incorrectly marked as avalanching:
[listing]
----
boost::unordered_flat_map: 319 ms
insertion: probe length 1.08771
successful lookup: probe length 1.06206, num comparisons 1.02121
unsuccessful lookup: probe length 1.12301, num comparisons 0.0388251
boost::unordered_flat_map, FNV-1a: 301 ms
insertion: probe length 1.09567
successful lookup: probe length 1.06202, num comparisons 1.0227
unsuccessful lookup: probe length 1.12195, num comparisons 0.040527
boost::unordered_flat_map, slightly_bad_hash: 654 ms
insertion: probe length 1.03443
successful lookup: probe length 1.04137, num comparisons 6.22152
unsuccessful lookup: probe length 1.29334, num comparisons 11.0335
boost::unordered_flat_map, bad_hash: 12216 ms
insertion: probe length 699.218
successful lookup: probe length 590.183, num comparisons 43.4886
unsuccessful lookup: probe length 1361.65, num comparisons 75.238
----

View File

@ -102,7 +102,7 @@ and *high* and *low* are the upper and lower halves of an extended word, respect
In 64-bit architectures, _C_ is the integer part of 2^64^&#8725;https://en.wikipedia.org/wiki/Golden_ratio[_&phi;_],
whereas in 32 bits _C_ = 0xE817FB2Du has been obtained from https://arxiv.org/abs/2001.05304[Steele and Vigna (2021)^].
When using a hash function directly suitable for open addressing, post-mixing can be opted out by via a dedicated <<hash_traits_hash_is_avalanching,`hash_is_avalanching`>>trait.
When using a hash function directly suitable for open addressing, post-mixing can be opted out of via a dedicated <<hash_traits_hash_is_avalanching,`hash_is_avalanching`>>trait.
`boost::hash` specializations for string types are marked as avalanching.
=== Platform Interoperability

View File

@ -6,6 +6,7 @@ include::unordered_multimap.adoc[]
include::unordered_set.adoc[]
include::unordered_multiset.adoc[]
include::hash_traits.adoc[]
include::stats.adoc[]
include::unordered_flat_map.adoc[]
include::unordered_flat_set.adoc[]
include::unordered_node_map.adoc[]

71
doc/unordered/stats.adoc Normal file
View File

@ -0,0 +1,71 @@
[#stats]
== Statistics
:idprefix: stats_
Open-addressing and concurrent containers can be configured to keep running statistics
of some internal operations affected by the quality of the supplied hash function.
=== Synopsis
[listing,subs="+macros,+quotes"]
-----
struct xref:#stats_stats_summary_type[__stats-summary-type__]
{
double average;
double variance;
double deviation;
};
struct xref:#stats_insertion_stats_type[__insertion-stats-type__]
{
std::size_t count;
xref:#stats_stats_summary_type[__stats-summary-type__] probe_length;
};
struct xref:stats_lookup_stats_type[__lookup-stats-type__]
{
std::size_t count;
xref:#stats_stats_summary_type[__stats-summary-type__] probe_length;
xref:#stats_stats_summary_type[__stats-summary-type__] num_comparisons;
};
struct xref:stats_stats_type[__stats-type__]
{
xref:#stats_insertion_stats_type[__insertion-stats-type__] insertion;
xref:stats_lookup_stats_type[__lookup-stats-type__] successful_lookup,
unsuccessful_lookup;
};
-----
==== __stats-summary-type__
Provides the average value, variance and standard deviation of a sequence of numerical values.
==== __insertion-stats-type__
Provides the number of insertion operations performed by a container and
statistics on the associated __probe length__ (number of
xref:#structures_open_addressing_containers[bucket groups] accessed per operation).
==== __lookup-stats-type__
For successful (element found) or unsuccessful (not found) lookup,
provides the number of operations performed by a container and
statistics on the associated __probe length__ (number of
xref:#structures_open_addressing_containers[bucket groups] accessed)
and number of element comparisons per operation.
==== __stats-type__
Provides statistics on insertion, successful and unsuccessful lookups performed by a container.
If the supplied hash function has good quality, then:
* Average probe lenghts should be close to 1.0.
* For successful lookups, the average number of element comparisons should be close to 1.0.
* For unsuccessful lookups, the average number of element comparisons should be close to 0.0.
These statistics can be used to determine if a given hash function
can be marked as xref:hash_traits_hash_is_avalanching[__avalanching__].
---

View File

@ -58,6 +58,8 @@ namespace boost {
using iterator = _implementation-defined_;
using const_iterator = _implementation-defined_;
using stats = xref:stats_stats_type[__stats-type__]; // if statistics are xref:unordered_flat_map_boost_unordered_enable_stats[enabled]
// construct/copy/destroy
xref:#unordered_flat_map_default_constructor[unordered_flat_map]();
explicit xref:#unordered_flat_map_bucket_count_constructor[unordered_flat_map](size_type n,
@ -214,6 +216,10 @@ namespace boost {
size_type xref:#unordered_flat_map_max_load[max_load]() const noexcept;
void xref:#unordered_flat_map_rehash[rehash](size_type n);
void xref:#unordered_flat_map_reserve[reserve](size_type n);
// statistics (if xref:unordered_flat_map_boost_unordered_enable_stats[enabled])
stats xref:#unordered_flat_map_get_stats[get_stats]() const;
void xref:#unordered_flat_map_reset_stats[reset_stats]() noexcept;
};
// Deduction Guides
@ -343,6 +349,15 @@ at the expense of extra computational cost.
---
=== Configuration Macros
==== `BOOST_UNORDERED_ENABLE_STATS`
Globally define this macro to enable xref:#stats[statistics calculation] for the container. Note
that this option decreases the overall performance of many operations.
---
=== Typedefs
[source,c++,subs=+quotes]
@ -439,6 +454,8 @@ unordered_flat_map(unordered_flat_map&& other);
The move constructor. The internal bucket array of `other` is transferred directly to the new container.
The hash function, predicate and allocator are moved-constructed from `other`.
If statistics are xref:unordered_flat_map_boost_unordered_enable_stats[enabled],
transfers the internal statistical information from `other` and calls `other.reset_stats()`.
---
@ -481,6 +498,9 @@ unordered_flat_map(unordered_flat_map&& other, Allocator const& a);
If `a == other.get_allocator()`, the elements of `other` are transferred directly to the new container;
otherwise, elements are moved-constructed from those of `other`. The hash function and predicate are moved-constructed
from `other`, and the allocator is copy-constructed from `a`.
If statistics are xref:unordered_flat_map_boost_unordered_enable_stats[enabled],
transfers the internal statistical information from `other` iff `a == other.get_allocator()`,
and always calls `other.reset_stats()`.
---
@ -493,6 +513,8 @@ unordered_flat_map(concurrent_flat_map<Key, T, Hash, Pred, Allocator>&& other);
Move construction from a xref:#concurrent_flat_map[`concurrent_flat_map`].
The internal bucket array of `other` is transferred directly to the new container.
The hash function, predicate and allocator are moved-constructed from `other`.
If statistics are xref:unordered_flat_map_boost_unordered_enable_stats[enabled],
transfers the internal statistical information from `other` and calls `other.reset_stats()`.
[horizontal]
Complexity:;; Constant time.
@ -651,6 +673,9 @@ The move assignment operator. Destroys previously existing elements, swaps the h
and move-assigns the allocator from `other` if `Alloc::propagate_on_container_move_assignment` exists and `Alloc::propagate_on_container_move_assignment::value` is `true`.
If at this point the allocator is equal to `other.get_allocator()`, the internal bucket array of `other` is transferred directly to the new container;
otherwise, inserts move-constructed copies of the elements of `other`.
If statistics are xref:unordered_flat_map_boost_unordered_enable_stats[enabled],
transfers the internal statistical information from `other` iff the final allocator is equal to `other.get_allocator()`,
and always calls `other.reset_stats()`.
---
@ -1364,6 +1389,32 @@ Invalidates iterators, pointers and references, and changes the order of element
[horizontal]
Throws:;; The function has no effect if an exception is thrown, unless it is thrown by the container's hash function or comparison function.
---
=== Statistics
==== get_stats
```c++
stats get_stats() const;
```
[horizontal]
Returns:;; A statistical description of the insertion and lookup operations performed by the container so far.
Notes:;; Only available if xref:stats[statistics calculation] is xref:unordered_flat_map_boost_unordered_enable_stats[enabled].
---
==== reset_stats
```c++
void reset_stats() noexcept;
```
[horizontal]
Effects:;; Sets to zero the internal statistics kept by the container.
Notes:;; Only available if xref:stats[statistics calculation] is xref:unordered_flat_map_boost_unordered_enable_stats[enabled].
---
=== Deduction Guides
A deduction guide will not participate in overload resolution if any of the following are true:

View File

@ -53,6 +53,8 @@ namespace boost {
using iterator = _implementation-defined_;
using const_iterator = _implementation-defined_;
using stats = xref:stats_stats_type[__stats-type__]; // if statistics are xref:unordered_flat_set_boost_unordered_enable_stats[enabled]
// construct/copy/destroy
xref:#unordered_flat_set_default_constructor[unordered_flat_set]();
explicit xref:#unordered_flat_set_bucket_count_constructor[unordered_flat_set](size_type n,
@ -172,6 +174,10 @@ namespace boost {
size_type xref:#unordered_flat_set_max_load[max_load]() const noexcept;
void xref:#unordered_flat_set_rehash[rehash](size_type n);
void xref:#unordered_flat_set_reserve[reserve](size_type n);
// statistics (if xref:unordered_flat_set_boost_unordered_enable_stats[enabled])
stats xref:#unordered_flat_set_get_stats[get_stats]() const;
void xref:#unordered_flat_set_reset_stats[reset_stats]() noexcept;
};
// Deduction Guides
@ -291,6 +297,15 @@ at the expense of extra computational cost.
---
=== Configuration Macros
==== `BOOST_UNORDERED_ENABLE_STATS`
Globally define this macro to enable xref:#stats[statistics calculation] for the container. Note
that this option decreases the overall performance of many operations.
---
=== Typedefs
[source,c++,subs=+quotes]
@ -387,6 +402,8 @@ unordered_flat_set(unordered_flat_set&& other);
The move constructor. The internal bucket array of `other` is transferred directly to the new container.
The hash function, predicate and allocator are moved-constructed from `other`.
If statistics are xref:unordered_flat_set_boost_unordered_enable_stats[enabled],
transfers the internal statistical information from `other` and calls `other.reset_stats()`.
---
@ -429,6 +446,9 @@ unordered_flat_set(unordered_flat_set&& other, Allocator const& a);
If `a == other.get_allocator()`, the elements of `other` are transferred directly to the new container;
otherwise, elements are moved-constructed from those of `other`. The hash function and predicate are moved-constructed
from `other`, and the allocator is copy-constructed from `a`.
If statistics are xref:unordered_flat_set_boost_unordered_enable_stats[enabled],
transfers the internal statistical information from `other` iff `a == other.get_allocator()`,
and always calls `other.reset_stats()`.
---
@ -441,6 +461,8 @@ unordered_flat_set(concurrent_flat_set<Key, Hash, Pred, Allocator>&& other);
Move construction from a xref:#concurrent_flat_set[`concurrent_flat_set`].
The internal bucket array of `other` is transferred directly to the new container.
The hash function, predicate and allocator are moved-constructed from `other`.
If statistics are xref:unordered_flat_set_boost_unordered_enable_stats[enabled],
transfers the internal statistical information from `other` and calls `other.reset_stats()`.
[horizontal]
Complexity:;; Constant time.
@ -599,6 +621,9 @@ The move assignment operator. Destroys previously existing elements, swaps the h
and move-assigns the allocator from `other` if `Alloc::propagate_on_container_move_assignment` exists and `Alloc::propagate_on_container_move_assignment::value` is `true`.
If at this point the allocator is equal to `other.get_allocator()`, the internal bucket array of `other` is transferred directly to the new container;
otherwise, inserts move-constructed copies of the elements of `other`.
If statistics are xref:unordered_flat_set_boost_unordered_enable_stats[enabled],
transfers the internal statistical information from `other` iff the final allocator is equal to `other.get_allocator()`,
and always calls `other.reset_stats()`.
---
@ -1137,6 +1162,32 @@ Invalidates iterators, pointers and references, and changes the order of element
[horizontal]
Throws:;; The function has no effect if an exception is thrown, unless it is thrown by the container's hash function or comparison function.
---
=== Statistics
==== get_stats
```c++
stats get_stats() const;
```
[horizontal]
Returns:;; A statistical description of the insertion and lookup operations performed by the container so far.
Notes:;; Only available if xref:stats[statistics calculation] is xref:unordered_flat_set_boost_unordered_enable_stats[enabled].
---
==== reset_stats
```c++
void reset_stats() noexcept;
```
[horizontal]
Effects:;; Sets to zero the internal statistics kept by the container.
Notes:;; Only available if xref:stats[statistics calculation] is xref:unordered_flat_set_boost_unordered_enable_stats[enabled].
---
=== Deduction Guides
A deduction guide will not participate in overload resolution if any of the following are true:

View File

@ -57,6 +57,8 @@ namespace boost {
using node_type = _implementation-defined_;
using insert_return_type = _implementation-defined_;
using stats = xref:stats_stats_type[__stats-type__]; // if statistics are xref:unordered_node_map_boost_unordered_enable_stats[enabled]
// construct/copy/destroy
xref:#unordered_node_map_default_constructor[unordered_node_map]();
explicit xref:#unordered_node_map_bucket_count_constructor[unordered_node_map](size_type n,
@ -217,6 +219,10 @@ namespace boost {
size_type xref:#unordered_node_map_max_load[max_load]() const noexcept;
void xref:#unordered_node_map_rehash[rehash](size_type n);
void xref:#unordered_node_map_reserve[reserve](size_type n);
// statistics (if xref:unordered_node_map_boost_unordered_enable_stats[enabled])
stats xref:#unordered_node_map_get_stats[get_stats]() const;
void xref:#unordered_node_map_reset_stats[reset_stats]() noexcept;
};
// Deduction Guides
@ -345,6 +351,15 @@ at the expense of extra computational cost.
---
=== Configuration Macros
==== `BOOST_UNORDERED_ENABLE_STATS`
Globally define this macro to enable xref:#stats[statistics calculation] for the container. Note
that this option decreases the overall performance of many operations.
---
=== Typedefs
[source,c++,subs=+quotes]
@ -472,6 +487,8 @@ unordered_node_map(unordered_node_map&& other);
The move constructor. The internal bucket array of `other` is transferred directly to the new container.
The hash function, predicate and allocator are moved-constructed from `other`.
If statistics are xref:unordered_node_map_boost_unordered_enable_stats[enabled],
transfers the internal statistical information from `other` and calls `other.reset_stats()`.
---
@ -514,6 +531,9 @@ unordered_node_map(unordered_node_map&& other, Allocator const& a);
If `a == other.get_allocator()`, the element nodes of `other` are transferred directly to the new container;
otherwise, elements are moved-constructed from those of `other`. The hash function and predicate are moved-constructed
from `other`, and the allocator is copy-constructed from `a`.
If statistics are xref:unordered_node_map_boost_unordered_enable_stats[enabled],
transfers the internal statistical information from `other` iff `a == other.get_allocator()`,
and always calls `other.reset_stats()`.
---
@ -668,6 +688,9 @@ The move assignment operator. Destroys previously existing elements, swaps the h
and move-assigns the allocator from `other` if `Alloc::propagate_on_container_move_assignment` exists and `Alloc::propagate_on_container_move_assignment::value` is `true`.
If at this point the allocator is equal to `other.get_allocator()`, the internal bucket array of `other` is transferred directly to the new container;
otherwise, inserts move-constructed copies of the elements of `other`.
If statistics are xref:unordered_node_map_boost_unordered_enable_stats[enabled],
transfers the internal statistical information from `other` iff the final allocator is equal to `other.get_allocator()`,
and always calls `other.reset_stats()`.
---
@ -1451,6 +1474,32 @@ Invalidates iterators and changes the order of elements.
[horizontal]
Throws:;; The function has no effect if an exception is thrown, unless it is thrown by the container's hash function or comparison function.
---
=== Statistics
==== get_stats
```c++
stats get_stats() const;
```
[horizontal]
Returns:;; A statistical description of the insertion and lookup operations performed by the container so far.
Notes:;; Only available if xref:stats[statistics calculation] is xref:unordered_node_map_boost_unordered_enable_stats[enabled].
---
==== reset_stats
```c++
void reset_stats() noexcept;
```
[horizontal]
Effects:;; Sets to zero the internal statistics kept by the container.
Notes:;; Only available if xref:stats[statistics calculation] is xref:unordered_node_map_boost_unordered_enable_stats[enabled].
---
=== Deduction Guides
A deduction guide will not participate in overload resolution if any of the following are true:

View File

@ -52,6 +52,8 @@ namespace boost {
using node_type = _implementation-defined_;
using insert_return_type = _implementation-defined_;
using stats = xref:stats_stats_type[__stats-type__]; // if statistics are xref:unordered_node_set_boost_unordered_enable_stats[enabled]
// construct/copy/destroy
xref:#unordered_node_set_default_constructor[unordered_node_set]();
explicit xref:#unordered_node_set_bucket_count_constructor[unordered_node_set](size_type n,
@ -176,6 +178,10 @@ namespace boost {
size_type xref:#unordered_node_set_max_load[max_load]() const noexcept;
void xref:#unordered_node_set_rehash[rehash](size_type n);
void xref:#unordered_node_set_reserve[reserve](size_type n);
// statistics (if xref:unordered_node_set_boost_unordered_enable_stats[enabled])
stats xref:#unordered_node_set_get_stats[get_stats]() const;
void xref:#unordered_node_set_reset_stats[reset_stats]() noexcept;
};
// Deduction Guides
@ -294,6 +300,15 @@ at the expense of extra computational cost.
---
=== Configuration Macros
==== `BOOST_UNORDERED_ENABLE_STATS`
Globally define this macro to enable xref:#stats[statistics calculation] for the container. Note
that this option decreases the overall performance of many operations.
---
=== Typedefs
[source,c++,subs=+quotes]
@ -424,6 +439,8 @@ unordered_node_set(unordered_node_set&& other);
The move constructor. The internal bucket array of `other` is transferred directly to the new container.
The hash function, predicate and allocator are moved-constructed from `other`.
If statistics are xref:unordered_node_set_boost_unordered_enable_stats[enabled],
transfers the internal statistical information from `other` and calls `other.reset_stats()`.
---
@ -466,6 +483,9 @@ unordered_node_set(unordered_node_set&& other, Allocator const& a);
If `a == other.get_allocator()`, the element nodes of `other` are transferred directly to the new container;
otherwise, elements are moved-constructed from those of `other`. The hash function and predicate are moved-constructed
from `other`, and the allocator is copy-constructed from `a`.
If statistics are xref:unordered_node_set_boost_unordered_enable_stats[enabled],
transfers the internal statistical information from `other` iff `a == other.get_allocator()`,
and always calls `other.reset_stats()`.
---
@ -620,6 +640,9 @@ The move assignment operator. Destroys previously existing elements, swaps the h
and move-assigns the allocator from `other` if `Alloc::propagate_on_container_move_assignment` exists and `Alloc::propagate_on_container_move_assignment::value` is `true`.
If at this point the allocator is equal to `other.get_allocator()`, the internal bucket array of `other` is transferred directly to the new container;
otherwise, inserts move-constructed copies of the elements of `other`.
If statistics are xref:unordered_node_set_boost_unordered_enable_stats[enabled],
transfers the internal statistical information from `other` iff the final allocator is equal to `other.get_allocator()`,
and always calls `other.reset_stats()`.
---
@ -1228,6 +1251,32 @@ Invalidates iterators and changes the order of elements.
[horizontal]
Throws:;; The function has no effect if an exception is thrown, unless it is thrown by the container's hash function or comparison function.
---
=== Statistics
==== get_stats
```c++
stats get_stats() const;
```
[horizontal]
Returns:;; A statistical description of the insertion and lookup operations performed by the container so far.
Notes:;; Only available if xref:stats[statistics calculation] is xref:unordered_node_set_boost_unordered_enable_stats[enabled].
---
==== reset_stats
```c++
void reset_stats() noexcept;
```
[horizontal]
Effects:;; Sets to zero the internal statistics kept by the container.
Notes:;; Only available if xref:stats[statistics calculation] is xref:unordered_node_set_boost_unordered_enable_stats[enabled].
---
=== Deduction Guides
A deduction guide will not participate in overload resolution if any of the following are true:

View File

@ -1,7 +1,7 @@
/* Fast open-addressing concurrent hashmap.
*
* Copyright 2023 Christian Mazakas.
* Copyright 2023 Joaquin M Lopez Munoz.
* Copyright 2023-2024 Joaquin M Lopez Munoz.
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
@ -75,6 +75,10 @@ namespace boost {
typename boost::allocator_const_pointer<allocator_type>::type;
static constexpr size_type bulk_visit_size = table_type::bulk_visit_size;
#if defined(BOOST_UNORDERED_ENABLE_STATS)
using stats = typename table_type::stats;
#endif
concurrent_flat_map()
: concurrent_flat_map(detail::foa::default_bucket_count)
{
@ -714,6 +718,14 @@ namespace boost {
void rehash(size_type n) { table_.rehash(n); }
void reserve(size_type n) { table_.reserve(n); }
#if defined(BOOST_UNORDERED_ENABLE_STATS)
/// Stats
///
stats get_stats() const { return table_.get_stats(); }
void reset_stats() noexcept { table_.reset_stats(); }
#endif
/// Observers
///
allocator_type get_allocator() const noexcept

View File

@ -1,7 +1,7 @@
/* Fast open-addressing concurrent hashset.
*
* Copyright 2023 Christian Mazakas.
* Copyright 2023 Joaquin M Lopez Munoz.
* Copyright 2023-2024 Joaquin M Lopez Munoz.
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
@ -72,6 +72,10 @@ namespace boost {
typename boost::allocator_const_pointer<allocator_type>::type;
static constexpr size_type bulk_visit_size = table_type::bulk_visit_size;
#if defined(BOOST_UNORDERED_ENABLE_STATS)
using stats = typename table_type::stats;
#endif
concurrent_flat_set()
: concurrent_flat_set(detail::foa::default_bucket_count)
{
@ -582,6 +586,14 @@ namespace boost {
void rehash(size_type n) { table_.rehash(n); }
void reserve(size_type n) { table_.reserve(n); }
#if defined(BOOST_UNORDERED_ENABLE_STATS)
/// Stats
///
stats get_stats() const { return table_.get_stats(); }
void reset_stats() noexcept { table_.reset_stats(); }
#endif
/// Observers
///
allocator_type get_allocator() const noexcept

View File

@ -0,0 +1,174 @@
/* Copyright 2024 Joaquin M Lopez Munoz.
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* See https://www.boost.org/libs/unordered for library home page.
*/
#ifndef BOOST_UNORDERED_DETAIL_CUMULATIVE_STATS_HPP
#define BOOST_UNORDERED_DETAIL_CUMULATIVE_STATS_HPP
#include <array>
#include <boost/config.hpp>
#include <boost/mp11/tuple.hpp>
#include <cmath>
#include <cstddef>
#if defined(BOOST_HAS_THREADS)
#include <mutex>
#endif
namespace boost{
namespace unordered{
namespace detail{
/* Cumulative one-pass calculation of the average, variance and deviation of
* running sequences.
*/
struct sequence_stats_data
{
double m=0.0;
double m_prior=0.0;
double s=0.0;
};
struct welfords_algorithm /* 0-based */
{
template<typename T>
int operator()(T&& x,sequence_stats_data& d)const noexcept
{
static_assert(
noexcept(static_cast<double>(x)),
"Argument conversion to double must not throw.");
d.m_prior=d.m;
d.m+=(static_cast<double>(x)-d.m)/static_cast<double>(n);
d.s+=(n!=1)*
(static_cast<double>(x)-d.m_prior)*(static_cast<double>(x)-d.m);
return 0; /* mp11::tuple_transform requires that return type not be void */
}
std::size_t n;
};
struct sequence_stats_summary
{
double average;
double variance;
double deviation;
};
/* Stats calculated jointly for N same-sized sequences to save the space
* for count.
*/
template<std::size_t N>
class cumulative_stats
{
public:
struct summary
{
std::size_t count;
std::array<sequence_stats_summary,N> sequence_summary;
};
void reset()noexcept{*this=cumulative_stats();}
template<typename... Ts>
void add(Ts&&... xs)noexcept
{
static_assert(
sizeof...(Ts)==N,"A sample must be provided for each sequence.");
if(BOOST_UNLIKELY(++n==0)){ /* wraparound */
reset();
n=1;
}
mp11::tuple_transform(
welfords_algorithm{n},
std::forward_as_tuple(std::forward<Ts>(xs)...),
data);
}
summary get_summary()const noexcept
{
summary res;
res.count=n;
for(std::size_t i=0;i<N;++i){
double average=data[i].m,
variance=n!=0?data[i].s/static_cast<double>(n):0.0, /* biased */
deviation=std::sqrt(variance);
res.sequence_summary[i]={average,variance,deviation};
}
return res;
}
private:
std::size_t n=0;
std::array<sequence_stats_data,N> data;
};
#if defined(BOOST_HAS_THREADS)
template<std::size_t N>
class concurrent_cumulative_stats:cumulative_stats<N>
{
using super=cumulative_stats<N>;
using lock_guard=std::lock_guard<std::mutex>;
public:
using summary=typename super::summary;
concurrent_cumulative_stats()noexcept:super{}{}
concurrent_cumulative_stats(const concurrent_cumulative_stats& x)noexcept:
concurrent_cumulative_stats{x,lock_guard{x.mut}}{}
concurrent_cumulative_stats&
operator=(const concurrent_cumulative_stats& x)noexcept
{
auto x1=x;
lock_guard lck{mut};
static_cast<super&>(*this)=x1;
return *this;
}
void reset()noexcept
{
lock_guard lck{mut};
super::reset();
}
template<typename... Ts>
void add(Ts&&... xs)noexcept
{
lock_guard lck{mut};
super::add(std::forward<Ts>(xs)...);
}
summary get_summary()const noexcept
{
lock_guard lck{mut};
return super::get_summary();
}
private:
concurrent_cumulative_stats(const super& x,lock_guard&&):super{x}{}
mutable std::mutex mut;
};
#else
template<std::size_t N>
using concurrent_cumulative_stats=cumulative_stats<N>;
#endif
} /* namespace detail */
} /* namespace unordered */
} /* namespace boost */
#endif

View File

@ -469,6 +469,10 @@ public:
using size_type=typename super::size_type;
static constexpr std::size_t bulk_visit_size=16;
#if defined(BOOST_UNORDERED_ENABLE_STATS)
using stats=typename super::stats;
#endif
private:
template<typename Value,typename T>
using enable_if_is_value_type=typename std::enable_if<
@ -510,6 +514,7 @@ public:
x.arrays=ah.release();
x.size_ctrl.ml=x.initial_max_load();
x.size_ctrl.size=0;
BOOST_UNORDERED_SWAP_STATS(this->cstats,x.cstats);
}
concurrent_table(compatible_nonconcurrent_table&& x):
@ -965,6 +970,13 @@ public:
super::reserve(n);
}
#if defined(BOOST_UNORDERED_ENABLE_STATS)
/* already thread safe */
using super::get_stats;
using super::reset_stats;
#endif
template<typename Predicate>
friend std::size_t erase_if(concurrent_table& x,Predicate&& pr)
{
@ -1186,6 +1198,7 @@ private:
GroupAccessMode access_mode,
const Key& x,std::size_t pos0,std::size_t hash,F&& f)const
{
BOOST_UNORDERED_STATS_COUNTER(num_cmps);
prober pb(pos0);
do{
auto pos=pb.get();
@ -1197,19 +1210,27 @@ private:
auto lck=access(access_mode,pos);
do{
auto n=unchecked_countr_zero(mask);
if(BOOST_LIKELY(
pg->is_occupied(n)&&bool(this->pred()(x,this->key_from(p[n]))))){
f(pg,n,p+n);
return 1;
if(BOOST_LIKELY(pg->is_occupied(n))){
BOOST_UNORDERED_INCREMENT_STATS_COUNTER(num_cmps);
if(BOOST_LIKELY(bool(this->pred()(x,this->key_from(p[n]))))){
f(pg,n,p+n);
BOOST_UNORDERED_ADD_STATS(
this->cstats.successful_lookup,(pb.length(),num_cmps));
return 1;
}
}
mask&=mask-1;
}while(mask);
}
if(BOOST_LIKELY(pg->is_not_overflowed(hash))){
BOOST_UNORDERED_ADD_STATS(
this->cstats.unsuccessful_lookup,(pb.length(),num_cmps));
return 0;
}
}
while(BOOST_LIKELY(pb.next(this->arrays.groups_size_mask)));
BOOST_UNORDERED_ADD_STATS(
this->cstats.unsuccessful_lookup,(pb.length(),num_cmps));
return 0;
}
@ -1244,6 +1265,7 @@ private:
it=first;
for(auto i=m;i--;++it){
BOOST_UNORDERED_STATS_COUNTER(num_cmps);
auto pos=positions[i];
prober pb(pos);
auto pg=this->arrays.groups()+pos;
@ -1256,12 +1278,15 @@ private:
auto lck=access(access_mode,pos);
do{
auto n=unchecked_countr_zero(mask);
if(BOOST_LIKELY(
pg->is_occupied(n)&&
bool(this->pred()(*it,this->key_from(p[n]))))){
f(cast_for(access_mode,type_policy::value_from(p[n])));
++res;
goto next_key;
if(BOOST_LIKELY(pg->is_occupied(n))){
BOOST_UNORDERED_INCREMENT_STATS_COUNTER(num_cmps);
if(bool(this->pred()(*it,this->key_from(p[n])))){
f(cast_for(access_mode,type_policy::value_from(p[n])));
++res;
BOOST_UNORDERED_ADD_STATS(
this->cstats.successful_lookup,(pb.length(),num_cmps));
goto next_key;
}
}
mask&=mask-1;
}while(mask);
@ -1270,6 +1295,8 @@ private:
do{
if(BOOST_LIKELY(pg->is_not_overflowed(hashes[i]))||
BOOST_UNLIKELY(!pb.next(this->arrays.groups_size_mask))){
BOOST_UNORDERED_ADD_STATS(
this->cstats.unsuccessful_lookup,(pb.length(),num_cmps));
goto next_key;
}
pos=pb.get();
@ -1490,6 +1517,7 @@ private:
this->construct_element(p,std::forward<Args>(args)...);
rslot.commit();
rsize.commit();
BOOST_UNORDERED_ADD_STATS(this->cstats.insertion,(pb.length()));
return 1;
}
pg->mark_overflow(hash);

View File

@ -40,6 +40,10 @@
#include <type_traits>
#include <utility>
#if defined(BOOST_UNORDERED_ENABLE_STATS)
#include <boost/unordered/detail/cumulative_stats.hpp>
#endif
#if !defined(BOOST_UNORDERED_DISABLE_SSE2)
#if defined(BOOST_UNORDERED_ENABLE_SSE2)|| \
defined(__SSE2__)|| \
@ -864,6 +868,7 @@ struct pow2_quadratic_prober
pow2_quadratic_prober(std::size_t pos_):pos{pos_}{}
inline std::size_t get()const{return pos;}
inline std::size_t length()const{return step+1;}
/* next returns false when the whole array has been traversed, which ends
* probing (in practice, full-table probing will only happen with very small
@ -1125,6 +1130,54 @@ struct table_arrays
value_type_pointer elements_;
};
#if defined(BOOST_UNORDERED_ENABLE_STATS)
/* stats support */
struct table_core_cumulative_stats
{
concurrent_cumulative_stats<1> insertion;
concurrent_cumulative_stats<2> successful_lookup,
unsuccessful_lookup;
};
struct table_core_insertion_stats
{
std::size_t count;
sequence_stats_summary probe_length;
};
struct table_core_lookup_stats
{
std::size_t count;
sequence_stats_summary probe_length;
sequence_stats_summary num_comparisons;
};
struct table_core_stats
{
table_core_insertion_stats insertion;
table_core_lookup_stats successful_lookup,
unsuccessful_lookup;
};
#define BOOST_UNORDERED_ADD_STATS(stats,args) stats.add args
#define BOOST_UNORDERED_SWAP_STATS(stats1,stats2) std::swap(stats1,stats2)
#define BOOST_UNORDERED_COPY_STATS(stats1,stats2) stats1=stats2
#define BOOST_UNORDERED_RESET_STATS_OF(x) x.reset_stats()
#define BOOST_UNORDERED_STATS_COUNTER(name) std::size_t name=0
#define BOOST_UNORDERED_INCREMENT_STATS_COUNTER(name) ++name
#else
#define BOOST_UNORDERED_ADD_STATS(stats,args) ((void)0)
#define BOOST_UNORDERED_SWAP_STATS(stats1,stats2) ((void)0)
#define BOOST_UNORDERED_COPY_STATS(stats1,stats2) ((void)0)
#define BOOST_UNORDERED_RESET_STATS_OF(x) ((void)0)
#define BOOST_UNORDERED_STATS_COUNTER(name) ((void)0)
#define BOOST_UNORDERED_INCREMENT_STATS_COUNTER(name) ((void)0)
#endif
struct if_constexpr_void_else{void operator()()const{}};
template<bool B,typename F,typename G=if_constexpr_void_else>
@ -1395,6 +1448,11 @@ public:
using locator=table_locator<group_type,element_type>;
using arrays_holder_type=arrays_holder<arrays_type,Allocator>;
#if defined(BOOST_UNORDERED_ENABLE_STATS)
using cumulative_stats=table_core_cumulative_stats;
using stats=table_core_stats;
#endif
table_core(
std::size_t n=default_bucket_count,const Hash& h_=Hash(),
const Pred& pred_=Pred(),const Allocator& al_=Allocator()):
@ -1429,6 +1487,7 @@ public:
x.arrays=ah.release();
x.size_ctrl.ml=x.initial_max_load();
x.size_ctrl.size=0;
BOOST_UNORDERED_SWAP_STATS(cstats,x.cstats);
}
table_core(table_core&& x)
@ -1454,11 +1513,13 @@ public:
using std::swap;
swap(arrays,x.arrays);
swap(size_ctrl,x.size_ctrl);
BOOST_UNORDERED_SWAP_STATS(cstats,x.cstats);
}
else{
reserve(x.size());
clear_on_exit c{x};
(void)c; /* unused var warning */
BOOST_UNORDERED_RESET_STATS_OF(x);
/* This works because subsequent x.clear() does not depend on the
* elements' values.
@ -1574,9 +1635,11 @@ public:
arrays=x.arrays;
size_ctrl.ml=std::size_t(x.size_ctrl.ml);
size_ctrl.size=std::size_t(x.size_ctrl.size);
BOOST_UNORDERED_COPY_STATS(cstats,x.cstats);
x.arrays=ah.release();
x.size_ctrl.ml=x.initial_max_load();
x.size_ctrl.size=0;
BOOST_UNORDERED_RESET_STATS_OF(x);
}
else{
swap(h(),x.h());
@ -1586,6 +1649,7 @@ public:
noshrink_reserve(x.size());
clear_on_exit c{x};
(void)c; /* unused var warning */
BOOST_UNORDERED_RESET_STATS_OF(x);
/* This works because subsequent x.clear() does not depend on the
* elements' values.
@ -1639,6 +1703,7 @@ public:
BOOST_FORCEINLINE locator find(
const Key& x,std::size_t pos0,std::size_t hash)const
{
BOOST_UNORDERED_STATS_COUNTER(num_cmps);
prober pb(pos0);
do{
auto pos=pb.get();
@ -1650,18 +1715,25 @@ public:
auto p=elements+pos*N;
BOOST_UNORDERED_PREFETCH_ELEMENTS(p,N);
do{
BOOST_UNORDERED_INCREMENT_STATS_COUNTER(num_cmps);
auto n=unchecked_countr_zero(mask);
if(BOOST_LIKELY(bool(pred()(x,key_from(p[n]))))){
BOOST_UNORDERED_ADD_STATS(
cstats.successful_lookup,(pb.length(),num_cmps));
return {pg,n,p+n};
}
mask&=mask-1;
}while(mask);
}
if(BOOST_LIKELY(pg->is_not_overflowed(hash))){
BOOST_UNORDERED_ADD_STATS(
cstats.unsuccessful_lookup,(pb.length(),num_cmps));
return {};
}
}
while(BOOST_LIKELY(pb.next(arrays.groups_size_mask)));
BOOST_UNORDERED_ADD_STATS(
cstats.unsuccessful_lookup,(pb.length(),num_cmps));
return {};
}
@ -1746,6 +1818,38 @@ public:
rehash(std::size_t(std::ceil(float(n)/mlf)));
}
#if defined(BOOST_UNORDERED_ENABLE_STATS)
stats get_stats()const
{
auto insertion=cstats.insertion.get_summary();
auto successful_lookup=cstats.successful_lookup.get_summary();
auto unsuccessful_lookup=cstats.unsuccessful_lookup.get_summary();
return{
{
insertion.count,
insertion.sequence_summary[0]
},
{
successful_lookup.count,
successful_lookup.sequence_summary[0],
successful_lookup.sequence_summary[1]
},
{
unsuccessful_lookup.count,
unsuccessful_lookup.sequence_summary[0],
unsuccessful_lookup.sequence_summary[1]
},
};
}
void reset_stats()noexcept
{
cstats.insertion.reset();
cstats.successful_lookup.reset();
cstats.unsuccessful_lookup.reset();
}
#endif
friend bool operator==(const table_core& x,const table_core& y)
{
return
@ -1953,8 +2057,12 @@ public:
return true;
}
arrays_type arrays;
size_ctrl_type size_ctrl;
arrays_type arrays;
size_ctrl_type size_ctrl;
#if defined(BOOST_UNORDERED_ENABLE_STATS)
mutable cumulative_stats cstats;
#endif
private:
template<
@ -2243,6 +2351,7 @@ private:
auto p=arrays_.elements()+pos*N+n;
construct_element(p,std::forward<Args>(args)...);
pg->set(n,hash);
BOOST_UNORDERED_ADD_STATS(cstats.insertion,(pb.length()));
return {pg,n,p};
}
else pg->mark_overflow(hash);

View File

@ -1,6 +1,6 @@
/* Fast open-addressing hash table.
*
* Copyright 2022-2023 Joaquin M Lopez Munoz.
* Copyright 2022-2024 Joaquin M Lopez Munoz.
* Copyright 2023 Christian Mazakas.
* Copyright 2024 Braden Ganetsky.
* Distributed under the Boost Software License, Version 1.0.
@ -361,6 +361,10 @@ public:
const_iterator>::type;
using erase_return_type=table_erase_return_type<iterator>;
#if defined(BOOST_UNORDERED_ENABLE_STATS)
using stats=typename super::stats;
#endif
table(
std::size_t n=default_bucket_count,const Hash& h_=Hash(),
const Pred& pred_=Pred(),const Allocator& al_=Allocator()):
@ -542,6 +546,11 @@ public:
using super::rehash;
using super::reserve;
#if defined(BOOST_UNORDERED_ENABLE_STATS)
using super::get_stats;
using super::reset_stats;
#endif
template<typename Predicate>
friend std::size_t erase_if(table& x,Predicate& pr)
{
@ -584,6 +593,7 @@ private:
x.arrays=ah.release();
x.size_ctrl.ml=x.initial_max_load();
x.size_ctrl.size=0;
BOOST_UNORDERED_SWAP_STATS(this->cstats,x.cstats);
}
template<typename ExclusiveLockGuard>

View File

@ -76,6 +76,10 @@ namespace boost {
using iterator = typename table_type::iterator;
using const_iterator = typename table_type::const_iterator;
#if defined(BOOST_UNORDERED_ENABLE_STATS)
using stats = typename table_type::stats;
#endif
unordered_flat_map() : unordered_flat_map(0) {}
explicit unordered_flat_map(size_type n, hasher const& h = hasher(),
@ -654,6 +658,14 @@ namespace boost {
void reserve(size_type n) { table_.reserve(n); }
#if defined(BOOST_UNORDERED_ENABLE_STATS)
/// Stats
///
stats get_stats() const { return table_.get_stats(); }
void reset_stats() noexcept { table_.reset_stats(); }
#endif
/// Observers
///

View File

@ -72,6 +72,10 @@ namespace boost {
using iterator = typename table_type::iterator;
using const_iterator = typename table_type::const_iterator;
#if defined(BOOST_UNORDERED_ENABLE_STATS)
using stats = typename table_type::stats;
#endif
unordered_flat_set() : unordered_flat_set(0) {}
explicit unordered_flat_set(size_type n, hasher const& h = hasher(),
@ -474,6 +478,14 @@ namespace boost {
void reserve(size_type n) { table_.reserve(n); }
#if defined(BOOST_UNORDERED_ENABLE_STATS)
/// Stats
///
stats get_stats() const { return table_.get_stats(); }
void reset_stats() noexcept { table_.reset_stats(); }
#endif
/// Observers
///

View File

@ -115,6 +115,10 @@ namespace boost {
using insert_return_type =
detail::foa::insert_return_type<iterator, node_type>;
#if defined(BOOST_UNORDERED_ENABLE_STATS)
using stats = typename table_type::stats;
#endif
unordered_node_map() : unordered_node_map(0) {}
explicit unordered_node_map(size_type n, hasher const& h = hasher(),
@ -749,6 +753,14 @@ namespace boost {
void reserve(size_type n) { table_.reserve(n); }
#if defined(BOOST_UNORDERED_ENABLE_STATS)
/// Stats
///
stats get_stats() const { return table_.get_stats(); }
void reset_stats() noexcept { table_.reset_stats(); }
#endif
/// Observers
///

View File

@ -105,6 +105,10 @@ namespace boost {
using insert_return_type =
detail::foa::insert_return_type<iterator, node_type>;
#if defined(BOOST_UNORDERED_ENABLE_STATS)
using stats = typename table_type::stats;
#endif
unordered_node_set() : unordered_node_set(0) {}
explicit unordered_node_set(size_type n, hasher const& h = hasher(),
@ -563,6 +567,14 @@ namespace boost {
void reserve(size_type n) { table_.reserve(n); }
#if defined(BOOST_UNORDERED_ENABLE_STATS)
/// Stats
///
stats get_stats() const { return table_.get_stats(); }
void reset_stats() noexcept { table_.reset_stats(); }
#endif
/// Observers
///

View File

@ -228,6 +228,7 @@ local FOA_TESTS =
hash_is_avalanching_test
fancy_pointer_noleak
pmr_allocator_tests
stats_tests
;
for local test in $(FOA_TESTS)
@ -333,6 +334,7 @@ local CFOA_TESTS =
reentrancy_check_test
explicit_alloc_ctor_tests
pmr_allocator_tests
stats_tests
;
for local test in $(CFOA_TESTS)

View File

@ -0,0 +1,6 @@
// Copyright 2024 Joaquin M Lopez Muoz.
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#define BOOST_UNORDERED_CFOA_TESTS
#include "../unordered/stats_tests.cpp"

View File

@ -0,0 +1,377 @@
// Copyright 2024 Joaquin M Lopez Muoz.
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#define BOOST_UNORDERED_ENABLE_STATS
#ifdef BOOST_UNORDERED_CFOA_TESTS
#include <boost/unordered/concurrent_flat_map.hpp>
#include <boost/unordered/concurrent_flat_set.hpp>
#include <boost/unordered/unordered_flat_map.hpp>
#include <boost/unordered/unordered_flat_set.hpp>
#include "../cfoa/helpers.hpp"
#else
#include "../helpers/unordered.hpp"
#endif
#include "../helpers/helpers.hpp"
#include "../helpers/random_values.hpp"
#include "../helpers/test.hpp"
#include <boost/assert.hpp>
#include <boost/core/make_span.hpp>
#include <cstring>
template <class T> struct unequal_allocator
{
typedef T value_type;
unequal_allocator(int n = 0): n_{n} {}
unequal_allocator(unequal_allocator const&) = default;
unequal_allocator(unequal_allocator&&) = default;
template <class U>
unequal_allocator(unequal_allocator<U> const& x): n_{x.n_} {}
BOOST_ATTRIBUTE_NODISCARD T* allocate(std::size_t n)
{
return static_cast<T*>(::operator new(n * sizeof(T)));
}
void deallocate(T* p, std::size_t) noexcept { ::operator delete(p); }
bool operator==(unequal_allocator const& x) const { return n_ == x.n_; }
bool operator!=(unequal_allocator const& x) const { return n_ != x.n_; }
int n_;
};
bool exact_same(double x, double y)
{
return std::memcmp(
reinterpret_cast<void*>(&x), reinterpret_cast<void*>(&y),
sizeof(double))==0;
}
bool not_exact_same(double x, double y)
{
return !exact_same(x, y);
}
enum check_stats_contition
{
stats_empty=0,
stats_full,
stats_mostly_full // unsuccesful lookups may result in num_comparisons == 0
};
template <class Stats>
void check_stat(const Stats& s, check_stats_contition cond)
{
switch (cond) {
case stats_empty:
BOOST_TEST(exact_same(s.average, 0.0));
BOOST_TEST(exact_same(s.variance, 0.0));
BOOST_TEST(exact_same(s.deviation, 0.0));
break;
case stats_full:
BOOST_TEST_GT(s.average, 0.0);
if(not_exact_same(s.variance, 0.0)) {
BOOST_TEST_GT(s.variance, 0.0);
BOOST_TEST_GT(s.deviation, 0.0);
}
break;
case stats_mostly_full:
if(not_exact_same(s.variance, 0.0)) {
BOOST_TEST_GT(s.average, 0.0);
BOOST_TEST_GT(s.variance, 0.0);
BOOST_TEST_GT(s.deviation, 0.0);
}
break;
default:
break;
}
}
template <class Stats> void check_stat(const Stats& s1, const Stats& s2)
{
BOOST_TEST(exact_same(s1.average, s2.average));
BOOST_TEST(exact_same(s1.variance, s2.variance));
BOOST_TEST(exact_same(s1.deviation, s2.deviation));
}
template <class Stats>
void check_insertion_stats(const Stats& s, check_stats_contition cond)
{
switch (cond) {
case stats_empty:
BOOST_TEST_EQ(s.count, 0);
check_stat(s.probe_length, stats_empty);
break;
case stats_full:
BOOST_TEST_NE(s.count, 0);
check_stat(s.probe_length, stats_full);
break;
default:
BOOST_ASSERT(false); // insertion can't be mostly full
}
}
template <class Stats>
void check_insertion_stats(const Stats& s1, const Stats& s2)
{
BOOST_TEST_EQ(s1.count, s2.count);
check_stat(s1.probe_length, s2.probe_length);
}
template <class Stats>
void check_lookup_stats(const Stats& s, check_stats_contition cond)
{
check_stat(s.probe_length, cond == stats_empty? stats_empty : stats_full);
check_stat(s.num_comparisons, cond);
}
template <class Stats>
void check_lookup_stats(const Stats& s1, const Stats& s2)
{
BOOST_TEST_EQ(s1.count, s2.count);
check_stat(s1.probe_length, s2.probe_length);
check_stat(s1.num_comparisons, s2.num_comparisons);
}
template <class Stats>
void check_container_stats(const Stats& s, check_stats_contition cond)
{
if(cond == stats_mostly_full) {
BOOST_ASSERT(false); // mostly full only applies to unsuccessful lookup
}
check_insertion_stats(s.insertion, cond);
check_lookup_stats(s.successful_lookup, cond);
check_lookup_stats(
s.unsuccessful_lookup,
cond == stats_empty? stats_empty : stats_mostly_full);
}
template <class Stats>
void check_container_stats(const Stats& s1, const Stats& s2)
{
check_insertion_stats(s1.insertion, s2.insertion);
check_lookup_stats(s1.successful_lookup, s2.successful_lookup);
check_lookup_stats(s1.unsuccessful_lookup, s2.unsuccessful_lookup);
}
template <class Container> void insert_n(Container& c, std::size_t n)
{
#if defined(BOOST_UNORDERED_CFOA_TESTS)
using value_type = typename Container::value_type;
test::reset_sequence();
test::random_values<Container> l(n, test::sequential);
std::vector<value_type> v(l.begin(), l.end());
thread_runner(v, [&c](boost::span<value_type> sp) {
for (auto const& x : sp) {
c.insert(x);
}
});
#else
test::reset_sequence();
test::random_values<Container> l(n, test::sequential);
c.insert(l.begin(), l.end());
#endif
}
template <class Container> void test_stats()
{
using allocator_type = typename Container::allocator_type;
using stats = typename Container::stats;
Container c;
const Container& cc = c;
// Stats initially empty
stats s = cc.get_stats(); // using cc -> get_stats() is const
check_container_stats(s, stats_empty);
// Stats after insertion
insert_n(c, 10000);
s = cc.get_stats();
check_insertion_stats(s.insertion, stats_full); // insertions happened
check_lookup_stats(s.successful_lookup, stats_empty); // no duplicate values
check_lookup_stats(
s.unsuccessful_lookup, stats_mostly_full); // from insertion
#if !defined(BOOST_UNORDERED_CFOA_TESTS)
// Inequality due to rehashing
// May not hold in concurrent containers because of insertion retries
BOOST_TEST_GT(
s.insertion.count, s.unsuccessful_lookup.count);
#endif
// resets_stats() actually clears stats
c.reset_stats();
check_container_stats(cc.get_stats(), stats_empty);
// Stats after lookup
test::reset_sequence();
#if defined(BOOST_UNORDERED_CFOA_TESTS)
using key_type = typename Container::key_type;
using value_type = typename Container::value_type;
test::random_values<Container> l2(15000, test::sequential);
std::vector<value_type> v2(l2.begin(), l2.end());
std::atomic<std::size_t> found{0}, not_found{0};
thread_runner(v2, [&cc, &found, &not_found](boost::span<value_type> sp) {
// Half the span looked up elementwise
auto sp1 = boost::make_span(sp.begin(), sp.size()/2);
for (auto const& x : sp1) {
if(cc.contains(test::get_key<Container>(x))) ++found;
else ++not_found;
}
// Second half looked up in bulk
std::vector<key_type> ksp2;
for (auto const& x : boost::make_span(
sp1.end(), static_cast<std::size_t>(sp.end() - sp1.end()))) {
ksp2.push_back(test::get_key<Container>(x));
}
auto visited = cc.visit(
ksp2.begin(), ksp2.end(), [](const value_type&) {});
found += visited;
not_found += ksp2.size() - visited;
});
#else
test::random_values<Container> v2(15000, test::sequential);
std::size_t found = 0, not_found = 0;
for (const auto& x: v2) {
if (cc.contains(test::get_key<Container>(x))) ++found;
else ++not_found;
}
#endif
// As many [un]successful lookups as recorded externally
s=cc.get_stats();
check_lookup_stats(s.successful_lookup, stats_full);
check_lookup_stats(s.unsuccessful_lookup, stats_mostly_full);
BOOST_TEST_EQ(s.successful_lookup.count, found);
BOOST_TEST_EQ(s.unsuccessful_lookup.count, not_found);
c.reset_stats();
s = cc.get_stats();
check_container_stats(s, stats_empty);
// Move constructor tests
c.clear();
insert_n(c, 1000);
insert_n(c, 1000); // produces successful lookups
// Move contructor
// Stats transferred to target and reset in source
s = cc.get_stats();
Container c2 = std::move(c);
check_container_stats(c.get_stats(), stats_empty);
check_container_stats(c2.get_stats(), s);
// Move constructor with equal allocator
// Stats transferred to target and reset in source
Container c3(std::move(c2), allocator_type());
check_container_stats(c2.get_stats(), stats_empty);
check_container_stats(c3.get_stats(), s);
// Move constructor with unequal allocator
// Target only has insertions, stats reset in source
Container c4(std::move(c3), allocator_type(1));
check_container_stats(c3.get_stats(), stats_empty);
check_insertion_stats(c4.get_stats().insertion, stats_full);
check_lookup_stats(c4.get_stats().successful_lookup, stats_empty);
check_lookup_stats(c4.get_stats().unsuccessful_lookup, stats_empty);
// Move assignment tests
// Move assignment with equal allocator
// Stats transferred to target and reset in source
Container c5, c6;
insert_n(c5,1000);
insert_n(c5,1000); // produces successful lookups
insert_n(c6,500);
insert_n(c6,500); // produces successful lookups
s = c5.get_stats();
check_container_stats(s, stats_full);
check_container_stats(c6.get_stats(), stats_full);
c6 = std::move(c5);
check_container_stats(c5.get_stats(), stats_empty);
check_container_stats(c6.get_stats(), s);
// Move assignment with unequal allocator
// Target only has insertions (if reset previously), stats reset in source
Container c7(allocator_type(1));
insert_n(c7,250);
insert_n(c7,250); // produces successful lookups
check_container_stats(c7.get_stats(), stats_full);
c7.reset_stats();
c7 = std::move(c6);
check_container_stats(c6.get_stats(), stats_empty);
check_insertion_stats(c7.get_stats().insertion, stats_full);
check_lookup_stats(c7.get_stats().successful_lookup, stats_empty);
check_lookup_stats(c7.get_stats().unsuccessful_lookup, stats_empty);
}
#if defined(BOOST_UNORDERED_CFOA_TESTS)
template <class Container, class ConcurrentContainer>
void test_stats_concurrent_unordered_interop()
{
ConcurrentContainer cc1;
insert_n(cc1,5000);
insert_n(cc1,5000); // produces successful lookups
auto s=cc1.get_stats();
Container c1(std::move(cc1));
check_container_stats(cc1.get_stats(),stats_empty);
check_container_stats(c1.get_stats(),s);
ConcurrentContainer cc2(std::move(c1));
check_container_stats(c1.get_stats(),stats_empty);
check_container_stats(cc2.get_stats(),s);
}
#endif
UNORDERED_AUTO_TEST (stats_) {
#if defined(BOOST_UNORDERED_CFOA_TESTS)
test_stats<
boost::concurrent_flat_map<
int, int, boost::hash<int>, std::equal_to<int>,
unequal_allocator< std::pair< const int, int> >>>();
test_stats<
boost::concurrent_flat_set<
int, boost::hash<int>, std::equal_to<int>, unequal_allocator<int>>>();
test_stats_concurrent_unordered_interop<
boost::unordered_flat_map<int, int>,
boost::concurrent_flat_map<int, int>>();
test_stats_concurrent_unordered_interop<
boost::unordered_flat_set<int>,
boost::concurrent_flat_set<int>>();
#elif defined(BOOST_UNORDERED_FOA_TESTS)
test_stats<
boost::unordered_flat_map<
int, int, boost::hash<int>, std::equal_to<int>,
unequal_allocator< std::pair< const int, int> >>>();
test_stats<
boost::unordered_flat_set<
int, boost::hash<int>, std::equal_to<int>, unequal_allocator<int>>>();
test_stats<
boost::unordered_node_map<
int, int, boost::hash<int>, std::equal_to<int>,
unequal_allocator< std::pair< const int, int> >>>();
test_stats<
boost::unordered_node_set<
int, boost::hash<int>, std::equal_to<int>, unequal_allocator<int>>>();
#else
// Closed-addressing containers do not provide stats
#endif
}
RUN_TESTS()