btree_benchmark.cc 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701
  1. // Copyright 2018 The Abseil Authors.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // https://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include <stdint.h>
  15. #include <algorithm>
  16. #include <functional>
  17. #include <map>
  18. #include <numeric>
  19. #include <random>
  20. #include <set>
  21. #include <string>
  22. #include <type_traits>
  23. #include <unordered_map>
  24. #include <unordered_set>
  25. #include <vector>
  26. #include "benchmark/benchmark.h"
  27. #include "absl/base/internal/raw_logging.h"
  28. #include "absl/container/btree_map.h"
  29. #include "absl/container/btree_set.h"
  30. #include "absl/container/btree_test.h"
  31. #include "absl/container/flat_hash_map.h"
  32. #include "absl/container/flat_hash_set.h"
  33. #include "absl/container/internal/hashtable_debug.h"
  34. #include "absl/flags/flag.h"
  35. #include "absl/hash/hash.h"
  36. #include "absl/memory/memory.h"
  37. #include "absl/strings/cord.h"
  38. #include "absl/strings/str_format.h"
  39. #include "absl/time/time.h"
  40. namespace absl {
  41. ABSL_NAMESPACE_BEGIN
  42. namespace container_internal {
  43. namespace {
  44. constexpr size_t kBenchmarkValues = 1 << 20;
  45. // How many times we add and remove sub-batches in one batch of *AddRem
  46. // benchmarks.
  47. constexpr size_t kAddRemBatchSize = 1 << 2;
  48. // Generates n values in the range [0, 4 * n].
  49. template <typename V>
  50. std::vector<V> GenerateValues(int n) {
  51. constexpr int kSeed = 23;
  52. return GenerateValuesWithSeed<V>(n, 4 * n, kSeed);
  53. }
  54. // Benchmark insertion of values into a container.
  55. template <typename T>
  56. void BM_InsertImpl(benchmark::State& state, bool sorted) {
  57. using V = typename remove_pair_const<typename T::value_type>::type;
  58. typename KeyOfValue<typename T::key_type, V>::type key_of_value;
  59. std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
  60. if (sorted) {
  61. std::sort(values.begin(), values.end());
  62. }
  63. T container(values.begin(), values.end());
  64. // Remove and re-insert 10% of the keys per batch.
  65. const int batch_size = (kBenchmarkValues + 9) / 10;
  66. while (state.KeepRunningBatch(batch_size)) {
  67. state.PauseTiming();
  68. const auto i = static_cast<int>(state.iterations());
  69. for (int j = i; j < i + batch_size; j++) {
  70. int x = j % kBenchmarkValues;
  71. container.erase(key_of_value(values[x]));
  72. }
  73. state.ResumeTiming();
  74. for (int j = i; j < i + batch_size; j++) {
  75. int x = j % kBenchmarkValues;
  76. container.insert(values[x]);
  77. }
  78. }
  79. }
  80. template <typename T>
  81. void BM_Insert(benchmark::State& state) {
  82. BM_InsertImpl<T>(state, false);
  83. }
  84. template <typename T>
  85. void BM_InsertSorted(benchmark::State& state) {
  86. BM_InsertImpl<T>(state, true);
  87. }
  88. // Benchmark inserting the first few elements in a container. In b-tree, this is
  89. // when the root node grows.
  90. template <typename T>
  91. void BM_InsertSmall(benchmark::State& state) {
  92. using V = typename remove_pair_const<typename T::value_type>::type;
  93. const int kSize = 8;
  94. std::vector<V> values = GenerateValues<V>(kSize);
  95. T container;
  96. while (state.KeepRunningBatch(kSize)) {
  97. for (int i = 0; i < kSize; ++i) {
  98. benchmark::DoNotOptimize(container.insert(values[i]));
  99. }
  100. state.PauseTiming();
  101. // Do not measure the time it takes to clear the container.
  102. container.clear();
  103. state.ResumeTiming();
  104. }
  105. }
  106. template <typename T>
  107. void BM_LookupImpl(benchmark::State& state, bool sorted) {
  108. using V = typename remove_pair_const<typename T::value_type>::type;
  109. typename KeyOfValue<typename T::key_type, V>::type key_of_value;
  110. std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
  111. if (sorted) {
  112. std::sort(values.begin(), values.end());
  113. }
  114. T container(values.begin(), values.end());
  115. while (state.KeepRunning()) {
  116. int idx = state.iterations() % kBenchmarkValues;
  117. benchmark::DoNotOptimize(container.find(key_of_value(values[idx])));
  118. }
  119. }
  120. // Benchmark lookup of values in a container.
  121. template <typename T>
  122. void BM_Lookup(benchmark::State& state) {
  123. BM_LookupImpl<T>(state, false);
  124. }
  125. // Benchmark lookup of values in a full container, meaning that values
  126. // are inserted in-order to take advantage of biased insertion, which
  127. // yields a full tree.
  128. template <typename T>
  129. void BM_FullLookup(benchmark::State& state) {
  130. BM_LookupImpl<T>(state, true);
  131. }
  132. // Benchmark deletion of values from a container.
  133. template <typename T>
  134. void BM_Delete(benchmark::State& state) {
  135. using V = typename remove_pair_const<typename T::value_type>::type;
  136. typename KeyOfValue<typename T::key_type, V>::type key_of_value;
  137. std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
  138. T container(values.begin(), values.end());
  139. // Remove and re-insert 10% of the keys per batch.
  140. const int batch_size = (kBenchmarkValues + 9) / 10;
  141. while (state.KeepRunningBatch(batch_size)) {
  142. const int i = state.iterations();
  143. for (int j = i; j < i + batch_size; j++) {
  144. int x = j % kBenchmarkValues;
  145. container.erase(key_of_value(values[x]));
  146. }
  147. state.PauseTiming();
  148. for (int j = i; j < i + batch_size; j++) {
  149. int x = j % kBenchmarkValues;
  150. container.insert(values[x]);
  151. }
  152. state.ResumeTiming();
  153. }
  154. }
  155. // Benchmark deletion of multiple values from a container.
  156. template <typename T>
  157. void BM_DeleteRange(benchmark::State& state) {
  158. using V = typename remove_pair_const<typename T::value_type>::type;
  159. typename KeyOfValue<typename T::key_type, V>::type key_of_value;
  160. std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
  161. T container(values.begin(), values.end());
  162. // Remove and re-insert 10% of the keys per batch.
  163. const int batch_size = (kBenchmarkValues + 9) / 10;
  164. while (state.KeepRunningBatch(batch_size)) {
  165. const int i = state.iterations();
  166. const int start_index = i % kBenchmarkValues;
  167. state.PauseTiming();
  168. {
  169. std::vector<V> removed;
  170. removed.reserve(batch_size);
  171. auto itr = container.find(key_of_value(values[start_index]));
  172. auto start = itr;
  173. for (int j = 0; j < batch_size; j++) {
  174. if (itr == container.end()) {
  175. state.ResumeTiming();
  176. container.erase(start, itr);
  177. state.PauseTiming();
  178. itr = container.begin();
  179. start = itr;
  180. }
  181. removed.push_back(*itr++);
  182. }
  183. state.ResumeTiming();
  184. container.erase(start, itr);
  185. state.PauseTiming();
  186. container.insert(removed.begin(), removed.end());
  187. }
  188. state.ResumeTiming();
  189. }
  190. }
  191. // Benchmark steady-state insert (into first half of range) and remove (from
  192. // second half of range), treating the container approximately like a queue with
  193. // log-time access for all elements. This benchmark does not test the case where
  194. // insertion and removal happen in the same region of the tree. This benchmark
  195. // counts two value constructors.
  196. template <typename T>
  197. void BM_QueueAddRem(benchmark::State& state) {
  198. using V = typename remove_pair_const<typename T::value_type>::type;
  199. typename KeyOfValue<typename T::key_type, V>::type key_of_value;
  200. ABSL_RAW_CHECK(kBenchmarkValues % 2 == 0, "for performance");
  201. T container;
  202. const size_t half = kBenchmarkValues / 2;
  203. std::vector<int> remove_keys(half);
  204. std::vector<int> add_keys(half);
  205. // We want to do the exact same work repeatedly, and the benchmark can end
  206. // after a different number of iterations depending on the speed of the
  207. // individual run so we use a large batch size here and ensure that we do
  208. // deterministic work every batch.
  209. while (state.KeepRunningBatch(half * kAddRemBatchSize)) {
  210. state.PauseTiming();
  211. container.clear();
  212. for (size_t i = 0; i < half; ++i) {
  213. remove_keys[i] = i;
  214. add_keys[i] = i;
  215. }
  216. constexpr int kSeed = 5;
  217. std::mt19937_64 rand(kSeed);
  218. std::shuffle(remove_keys.begin(), remove_keys.end(), rand);
  219. std::shuffle(add_keys.begin(), add_keys.end(), rand);
  220. // Note needs lazy generation of values.
  221. Generator<V> g(kBenchmarkValues * kAddRemBatchSize);
  222. for (size_t i = 0; i < half; ++i) {
  223. container.insert(g(add_keys[i]));
  224. container.insert(g(half + remove_keys[i]));
  225. }
  226. // There are three parts each of size "half":
  227. // 1 is being deleted from [offset - half, offset)
  228. // 2 is standing [offset, offset + half)
  229. // 3 is being inserted into [offset + half, offset + 2 * half)
  230. size_t offset = 0;
  231. for (size_t i = 0; i < kAddRemBatchSize; ++i) {
  232. std::shuffle(remove_keys.begin(), remove_keys.end(), rand);
  233. std::shuffle(add_keys.begin(), add_keys.end(), rand);
  234. offset += half;
  235. state.ResumeTiming();
  236. for (size_t idx = 0; idx < half; ++idx) {
  237. container.erase(key_of_value(g(offset - half + remove_keys[idx])));
  238. container.insert(g(offset + half + add_keys[idx]));
  239. }
  240. state.PauseTiming();
  241. }
  242. state.ResumeTiming();
  243. }
  244. }
  245. // Mixed insertion and deletion in the same range using pre-constructed values.
  246. template <typename T>
  247. void BM_MixedAddRem(benchmark::State& state) {
  248. using V = typename remove_pair_const<typename T::value_type>::type;
  249. typename KeyOfValue<typename T::key_type, V>::type key_of_value;
  250. ABSL_RAW_CHECK(kBenchmarkValues % 2 == 0, "for performance");
  251. T container;
  252. // Create two random shuffles
  253. std::vector<int> remove_keys(kBenchmarkValues);
  254. std::vector<int> add_keys(kBenchmarkValues);
  255. // We want to do the exact same work repeatedly, and the benchmark can end
  256. // after a different number of iterations depending on the speed of the
  257. // individual run so we use a large batch size here and ensure that we do
  258. // deterministic work every batch.
  259. while (state.KeepRunningBatch(kBenchmarkValues * kAddRemBatchSize)) {
  260. state.PauseTiming();
  261. container.clear();
  262. constexpr int kSeed = 7;
  263. std::mt19937_64 rand(kSeed);
  264. std::vector<V> values = GenerateValues<V>(kBenchmarkValues * 2);
  265. // Insert the first half of the values (already in random order)
  266. container.insert(values.begin(), values.begin() + kBenchmarkValues);
  267. // Insert the first half of the values (already in random order)
  268. for (size_t i = 0; i < kBenchmarkValues; ++i) {
  269. // remove_keys and add_keys will be swapped before each round,
  270. // therefore fill add_keys here w/ the keys being inserted, so
  271. // they'll be the first to be removed.
  272. remove_keys[i] = i + kBenchmarkValues;
  273. add_keys[i] = i;
  274. }
  275. for (size_t i = 0; i < kAddRemBatchSize; ++i) {
  276. remove_keys.swap(add_keys);
  277. std::shuffle(remove_keys.begin(), remove_keys.end(), rand);
  278. std::shuffle(add_keys.begin(), add_keys.end(), rand);
  279. state.ResumeTiming();
  280. for (size_t idx = 0; idx < kBenchmarkValues; ++idx) {
  281. container.erase(key_of_value(values[remove_keys[idx]]));
  282. container.insert(values[add_keys[idx]]);
  283. }
  284. state.PauseTiming();
  285. }
  286. state.ResumeTiming();
  287. }
  288. }
  289. // Insertion at end, removal from the beginning. This benchmark
  290. // counts two value constructors.
  291. // TODO(ezb): we could add a GenerateNext version of generator that could reduce
  292. // noise for string-like types.
  293. template <typename T>
  294. void BM_Fifo(benchmark::State& state) {
  295. using V = typename remove_pair_const<typename T::value_type>::type;
  296. T container;
  297. // Need lazy generation of values as state.max_iterations is large.
  298. Generator<V> g(kBenchmarkValues + state.max_iterations);
  299. for (int i = 0; i < kBenchmarkValues; i++) {
  300. container.insert(g(i));
  301. }
  302. while (state.KeepRunning()) {
  303. container.erase(container.begin());
  304. container.insert(container.end(), g(state.iterations() + kBenchmarkValues));
  305. }
  306. }
  307. // Iteration (forward) through the tree
  308. template <typename T>
  309. void BM_FwdIter(benchmark::State& state) {
  310. using V = typename remove_pair_const<typename T::value_type>::type;
  311. using R = typename T::value_type const*;
  312. std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
  313. T container(values.begin(), values.end());
  314. auto iter = container.end();
  315. R r = nullptr;
  316. while (state.KeepRunning()) {
  317. if (iter == container.end()) iter = container.begin();
  318. r = &(*iter);
  319. ++iter;
  320. }
  321. benchmark::DoNotOptimize(r);
  322. }
  323. // Benchmark random range-construction of a container.
  324. template <typename T>
  325. void BM_RangeConstructionImpl(benchmark::State& state, bool sorted) {
  326. using V = typename remove_pair_const<typename T::value_type>::type;
  327. std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
  328. if (sorted) {
  329. std::sort(values.begin(), values.end());
  330. }
  331. {
  332. T container(values.begin(), values.end());
  333. }
  334. while (state.KeepRunning()) {
  335. T container(values.begin(), values.end());
  336. benchmark::DoNotOptimize(container);
  337. }
  338. }
  339. template <typename T>
  340. void BM_InsertRangeRandom(benchmark::State& state) {
  341. BM_RangeConstructionImpl<T>(state, false);
  342. }
  343. template <typename T>
  344. void BM_InsertRangeSorted(benchmark::State& state) {
  345. BM_RangeConstructionImpl<T>(state, true);
  346. }
  347. #define STL_ORDERED_TYPES(value) \
  348. using stl_set_##value = std::set<value>; \
  349. using stl_map_##value = std::map<value, intptr_t>; \
  350. using stl_multiset_##value = std::multiset<value>; \
  351. using stl_multimap_##value = std::multimap<value, intptr_t>
  352. using StdString = std::string;
  353. STL_ORDERED_TYPES(int32_t);
  354. STL_ORDERED_TYPES(int64_t);
  355. STL_ORDERED_TYPES(StdString);
  356. STL_ORDERED_TYPES(Cord);
  357. STL_ORDERED_TYPES(Time);
  358. #define STL_UNORDERED_TYPES(value) \
  359. using stl_unordered_set_##value = std::unordered_set<value>; \
  360. using stl_unordered_map_##value = std::unordered_map<value, intptr_t>; \
  361. using flat_hash_set_##value = flat_hash_set<value>; \
  362. using flat_hash_map_##value = flat_hash_map<value, intptr_t>; \
  363. using stl_unordered_multiset_##value = std::unordered_multiset<value>; \
  364. using stl_unordered_multimap_##value = \
  365. std::unordered_multimap<value, intptr_t>
  366. #define STL_UNORDERED_TYPES_CUSTOM_HASH(value, hash) \
  367. using stl_unordered_set_##value = std::unordered_set<value, hash>; \
  368. using stl_unordered_map_##value = std::unordered_map<value, intptr_t, hash>; \
  369. using flat_hash_set_##value = flat_hash_set<value, hash>; \
  370. using flat_hash_map_##value = flat_hash_map<value, intptr_t, hash>; \
  371. using stl_unordered_multiset_##value = std::unordered_multiset<value, hash>; \
  372. using stl_unordered_multimap_##value = \
  373. std::unordered_multimap<value, intptr_t, hash>
  374. STL_UNORDERED_TYPES_CUSTOM_HASH(Cord, absl::Hash<absl::Cord>);
  375. STL_UNORDERED_TYPES(int32_t);
  376. STL_UNORDERED_TYPES(int64_t);
  377. STL_UNORDERED_TYPES(StdString);
  378. STL_UNORDERED_TYPES_CUSTOM_HASH(Time, absl::Hash<absl::Time>);
  379. #define BTREE_TYPES(value) \
  380. using btree_256_set_##value = \
  381. btree_set<value, std::less<value>, std::allocator<value>>; \
  382. using btree_256_map_##value = \
  383. btree_map<value, intptr_t, std::less<value>, \
  384. std::allocator<std::pair<const value, intptr_t>>>; \
  385. using btree_256_multiset_##value = \
  386. btree_multiset<value, std::less<value>, std::allocator<value>>; \
  387. using btree_256_multimap_##value = \
  388. btree_multimap<value, intptr_t, std::less<value>, \
  389. std::allocator<std::pair<const value, intptr_t>>>
  390. BTREE_TYPES(int32_t);
  391. BTREE_TYPES(int64_t);
  392. BTREE_TYPES(StdString);
  393. BTREE_TYPES(Cord);
  394. BTREE_TYPES(Time);
  395. #define MY_BENCHMARK4(type, func) \
  396. void BM_##type##_##func(benchmark::State& state) { BM_##func<type>(state); } \
  397. BENCHMARK(BM_##type##_##func)
  398. #define MY_BENCHMARK3(type) \
  399. MY_BENCHMARK4(type, Insert); \
  400. MY_BENCHMARK4(type, InsertSorted); \
  401. MY_BENCHMARK4(type, InsertSmall); \
  402. MY_BENCHMARK4(type, Lookup); \
  403. MY_BENCHMARK4(type, FullLookup); \
  404. MY_BENCHMARK4(type, Delete); \
  405. MY_BENCHMARK4(type, DeleteRange); \
  406. MY_BENCHMARK4(type, QueueAddRem); \
  407. MY_BENCHMARK4(type, MixedAddRem); \
  408. MY_BENCHMARK4(type, Fifo); \
  409. MY_BENCHMARK4(type, FwdIter); \
  410. MY_BENCHMARK4(type, InsertRangeRandom); \
  411. MY_BENCHMARK4(type, InsertRangeSorted)
  412. #define MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(type) \
  413. MY_BENCHMARK3(stl_##type); \
  414. MY_BENCHMARK3(stl_unordered_##type); \
  415. MY_BENCHMARK3(btree_256_##type)
  416. #define MY_BENCHMARK2(type) \
  417. MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(type); \
  418. MY_BENCHMARK3(flat_hash_##type)
  419. // Define MULTI_TESTING to see benchmarks for multi-containers also.
  420. //
  421. // You can use --copt=-DMULTI_TESTING.
  422. #ifdef MULTI_TESTING
  423. #define MY_BENCHMARK(type) \
  424. MY_BENCHMARK2(set_##type); \
  425. MY_BENCHMARK2(map_##type); \
  426. MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(multiset_##type); \
  427. MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(multimap_##type)
  428. #else
  429. #define MY_BENCHMARK(type) \
  430. MY_BENCHMARK2(set_##type); \
  431. MY_BENCHMARK2(map_##type)
  432. #endif
  433. MY_BENCHMARK(int32_t);
  434. MY_BENCHMARK(int64_t);
  435. MY_BENCHMARK(StdString);
  436. MY_BENCHMARK(Cord);
  437. MY_BENCHMARK(Time);
  438. // Define a type whose size and cost of moving are independently customizable.
  439. // When sizeof(value_type) increases, we expect btree to no longer have as much
  440. // cache-locality advantage over STL. When cost of moving increases, we expect
  441. // btree to actually do more work than STL because it has to move values around
  442. // and STL doesn't have to.
  443. template <int Size, int Copies>
  444. struct BigType {
  445. BigType() : BigType(0) {}
  446. explicit BigType(int x) { std::iota(values.begin(), values.end(), x); }
  447. void Copy(const BigType& other) {
  448. for (int i = 0; i < Size && i < Copies; ++i) values[i] = other.values[i];
  449. // If Copies > Size, do extra copies.
  450. for (int i = Size, idx = 0; i < Copies; ++i) {
  451. int64_t tmp = other.values[idx];
  452. benchmark::DoNotOptimize(tmp);
  453. idx = idx + 1 == Size ? 0 : idx + 1;
  454. }
  455. }
  456. BigType(const BigType& other) { Copy(other); }
  457. BigType& operator=(const BigType& other) {
  458. Copy(other);
  459. return *this;
  460. }
  461. // Compare only the first Copies elements if Copies is less than Size.
  462. bool operator<(const BigType& other) const {
  463. return std::lexicographical_compare(
  464. values.begin(), values.begin() + std::min(Size, Copies),
  465. other.values.begin(), other.values.begin() + std::min(Size, Copies));
  466. }
  467. bool operator==(const BigType& other) const {
  468. return std::equal(values.begin(), values.begin() + std::min(Size, Copies),
  469. other.values.begin());
  470. }
  471. // Support absl::Hash.
  472. template <typename State>
  473. friend State AbslHashValue(State h, const BigType& b) {
  474. for (int i = 0; i < Size && i < Copies; ++i)
  475. h = State::combine(std::move(h), b.values[i]);
  476. return h;
  477. }
  478. std::array<int64_t, Size> values;
  479. };
  480. #define BIG_TYPE_BENCHMARKS(SIZE, COPIES) \
  481. using stl_set_size##SIZE##copies##COPIES = std::set<BigType<SIZE, COPIES>>; \
  482. using stl_map_size##SIZE##copies##COPIES = \
  483. std::map<BigType<SIZE, COPIES>, intptr_t>; \
  484. using stl_multiset_size##SIZE##copies##COPIES = \
  485. std::multiset<BigType<SIZE, COPIES>>; \
  486. using stl_multimap_size##SIZE##copies##COPIES = \
  487. std::multimap<BigType<SIZE, COPIES>, intptr_t>; \
  488. using stl_unordered_set_size##SIZE##copies##COPIES = \
  489. std::unordered_set<BigType<SIZE, COPIES>, \
  490. absl::Hash<BigType<SIZE, COPIES>>>; \
  491. using stl_unordered_map_size##SIZE##copies##COPIES = \
  492. std::unordered_map<BigType<SIZE, COPIES>, intptr_t, \
  493. absl::Hash<BigType<SIZE, COPIES>>>; \
  494. using flat_hash_set_size##SIZE##copies##COPIES = \
  495. flat_hash_set<BigType<SIZE, COPIES>>; \
  496. using flat_hash_map_size##SIZE##copies##COPIES = \
  497. flat_hash_map<BigType<SIZE, COPIES>, intptr_t>; \
  498. using stl_unordered_multiset_size##SIZE##copies##COPIES = \
  499. std::unordered_multiset<BigType<SIZE, COPIES>, \
  500. absl::Hash<BigType<SIZE, COPIES>>>; \
  501. using stl_unordered_multimap_size##SIZE##copies##COPIES = \
  502. std::unordered_multimap<BigType<SIZE, COPIES>, intptr_t, \
  503. absl::Hash<BigType<SIZE, COPIES>>>; \
  504. using btree_256_set_size##SIZE##copies##COPIES = \
  505. btree_set<BigType<SIZE, COPIES>>; \
  506. using btree_256_map_size##SIZE##copies##COPIES = \
  507. btree_map<BigType<SIZE, COPIES>, intptr_t>; \
  508. using btree_256_multiset_size##SIZE##copies##COPIES = \
  509. btree_multiset<BigType<SIZE, COPIES>>; \
  510. using btree_256_multimap_size##SIZE##copies##COPIES = \
  511. btree_multimap<BigType<SIZE, COPIES>, intptr_t>; \
  512. MY_BENCHMARK(size##SIZE##copies##COPIES)
  513. // Define BIG_TYPE_TESTING to see benchmarks for more big types.
  514. //
  515. // You can use --copt=-DBIG_TYPE_TESTING.
  516. #ifndef NODESIZE_TESTING
  517. #ifdef BIG_TYPE_TESTING
  518. BIG_TYPE_BENCHMARKS(1, 4);
  519. BIG_TYPE_BENCHMARKS(4, 1);
  520. BIG_TYPE_BENCHMARKS(4, 4);
  521. BIG_TYPE_BENCHMARKS(1, 8);
  522. BIG_TYPE_BENCHMARKS(8, 1);
  523. BIG_TYPE_BENCHMARKS(8, 8);
  524. BIG_TYPE_BENCHMARKS(1, 16);
  525. BIG_TYPE_BENCHMARKS(16, 1);
  526. BIG_TYPE_BENCHMARKS(16, 16);
  527. BIG_TYPE_BENCHMARKS(1, 32);
  528. BIG_TYPE_BENCHMARKS(32, 1);
  529. BIG_TYPE_BENCHMARKS(32, 32);
  530. #else
  531. BIG_TYPE_BENCHMARKS(32, 32);
  532. #endif
  533. #endif
  534. // Benchmark using unique_ptrs to large value types. In order to be able to use
  535. // the same benchmark code as the other types, use a type that holds a
  536. // unique_ptr and has a copy constructor.
  537. template <int Size>
  538. struct BigTypePtr {
  539. BigTypePtr() : BigTypePtr(0) {}
  540. explicit BigTypePtr(int x) {
  541. ptr = absl::make_unique<BigType<Size, Size>>(x);
  542. }
  543. BigTypePtr(const BigTypePtr& other) {
  544. ptr = absl::make_unique<BigType<Size, Size>>(*other.ptr);
  545. }
  546. BigTypePtr(BigTypePtr&& other) noexcept = default;
  547. BigTypePtr& operator=(const BigTypePtr& other) {
  548. ptr = absl::make_unique<BigType<Size, Size>>(*other.ptr);
  549. }
  550. BigTypePtr& operator=(BigTypePtr&& other) noexcept = default;
  551. bool operator<(const BigTypePtr& other) const { return *ptr < *other.ptr; }
  552. bool operator==(const BigTypePtr& other) const { return *ptr == *other.ptr; }
  553. std::unique_ptr<BigType<Size, Size>> ptr;
  554. };
  555. template <int Size>
  556. double ContainerInfo(const btree_set<BigTypePtr<Size>>& b) {
  557. const double bytes_used =
  558. b.bytes_used() + b.size() * sizeof(BigType<Size, Size>);
  559. const double bytes_per_value = bytes_used / b.size();
  560. BtreeContainerInfoLog(b, bytes_used, bytes_per_value);
  561. return bytes_per_value;
  562. }
  563. template <int Size>
  564. double ContainerInfo(const btree_map<int, BigTypePtr<Size>>& b) {
  565. const double bytes_used =
  566. b.bytes_used() + b.size() * sizeof(BigType<Size, Size>);
  567. const double bytes_per_value = bytes_used / b.size();
  568. BtreeContainerInfoLog(b, bytes_used, bytes_per_value);
  569. return bytes_per_value;
  570. }
  571. #define BIG_TYPE_PTR_BENCHMARKS(SIZE) \
  572. using stl_set_size##SIZE##copies##SIZE##ptr = std::set<BigType<SIZE, SIZE>>; \
  573. using stl_map_size##SIZE##copies##SIZE##ptr = \
  574. std::map<int, BigType<SIZE, SIZE>>; \
  575. using stl_unordered_set_size##SIZE##copies##SIZE##ptr = \
  576. std::unordered_set<BigType<SIZE, SIZE>, \
  577. absl::Hash<BigType<SIZE, SIZE>>>; \
  578. using stl_unordered_map_size##SIZE##copies##SIZE##ptr = \
  579. std::unordered_map<int, BigType<SIZE, SIZE>>; \
  580. using flat_hash_set_size##SIZE##copies##SIZE##ptr = \
  581. flat_hash_set<BigType<SIZE, SIZE>>; \
  582. using flat_hash_map_size##SIZE##copies##SIZE##ptr = \
  583. flat_hash_map<int, BigTypePtr<SIZE>>; \
  584. using btree_256_set_size##SIZE##copies##SIZE##ptr = \
  585. btree_set<BigTypePtr<SIZE>>; \
  586. using btree_256_map_size##SIZE##copies##SIZE##ptr = \
  587. btree_map<int, BigTypePtr<SIZE>>; \
  588. MY_BENCHMARK3(stl_set_size##SIZE##copies##SIZE##ptr); \
  589. MY_BENCHMARK3(stl_unordered_set_size##SIZE##copies##SIZE##ptr); \
  590. MY_BENCHMARK3(flat_hash_set_size##SIZE##copies##SIZE##ptr); \
  591. MY_BENCHMARK3(btree_256_set_size##SIZE##copies##SIZE##ptr); \
  592. MY_BENCHMARK3(stl_map_size##SIZE##copies##SIZE##ptr); \
  593. MY_BENCHMARK3(stl_unordered_map_size##SIZE##copies##SIZE##ptr); \
  594. MY_BENCHMARK3(flat_hash_map_size##SIZE##copies##SIZE##ptr); \
  595. MY_BENCHMARK3(btree_256_map_size##SIZE##copies##SIZE##ptr)
  596. BIG_TYPE_PTR_BENCHMARKS(32);
  597. } // namespace
  598. } // namespace container_internal
  599. ABSL_NAMESPACE_END
  600. } // namespace absl