inlined_vector_benchmark.cc 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829
  1. // Copyright 2019 The Abseil Authors.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // https://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include <array>
  15. #include <string>
  16. #include <vector>
  17. #include "benchmark/benchmark.h"
  18. #include "absl/base/internal/raw_logging.h"
  19. #include "absl/base/macros.h"
  20. #include "absl/container/inlined_vector.h"
  21. #include "absl/strings/str_cat.h"
  22. namespace {
  23. void BM_InlinedVectorFill(benchmark::State& state) {
  24. const int len = state.range(0);
  25. absl::InlinedVector<int, 8> v;
  26. v.reserve(len);
  27. for (auto _ : state) {
  28. v.resize(0); // Use resize(0) as InlinedVector releases storage on clear().
  29. for (int i = 0; i < len; ++i) {
  30. v.push_back(i);
  31. }
  32. benchmark::DoNotOptimize(v);
  33. }
  34. }
  35. BENCHMARK(BM_InlinedVectorFill)->Range(1, 256);
  36. void BM_InlinedVectorFillRange(benchmark::State& state) {
  37. const int len = state.range(0);
  38. const std::vector<int> src(len, len);
  39. absl::InlinedVector<int, 8> v;
  40. v.reserve(len);
  41. for (auto _ : state) {
  42. benchmark::DoNotOptimize(src);
  43. v.assign(src.begin(), src.end());
  44. benchmark::DoNotOptimize(v);
  45. }
  46. }
  47. BENCHMARK(BM_InlinedVectorFillRange)->Range(1, 256);
  48. void BM_StdVectorFill(benchmark::State& state) {
  49. const int len = state.range(0);
  50. std::vector<int> v;
  51. v.reserve(len);
  52. for (auto _ : state) {
  53. v.clear();
  54. for (int i = 0; i < len; ++i) {
  55. v.push_back(i);
  56. }
  57. benchmark::DoNotOptimize(v);
  58. }
  59. }
  60. BENCHMARK(BM_StdVectorFill)->Range(1, 256);
  61. // The purpose of the next two benchmarks is to verify that
  62. // absl::InlinedVector is efficient when moving is more efficent than
  63. // copying. To do so, we use strings that are larger than the short
  64. // string optimization.
  65. bool StringRepresentedInline(std::string s) {
  66. const char* chars = s.data();
  67. std::string s1 = std::move(s);
  68. return s1.data() != chars;
  69. }
  70. int GetNonShortStringOptimizationSize() {
  71. for (int i = 24; i <= 192; i *= 2) {
  72. if (!StringRepresentedInline(std::string(i, 'A'))) {
  73. return i;
  74. }
  75. }
  76. ABSL_RAW_LOG(
  77. FATAL,
  78. "Failed to find a string larger than the short string optimization");
  79. return -1;
  80. }
  81. void BM_InlinedVectorFillString(benchmark::State& state) {
  82. const int len = state.range(0);
  83. const int no_sso = GetNonShortStringOptimizationSize();
  84. std::string strings[4] = {std::string(no_sso, 'A'), std::string(no_sso, 'B'),
  85. std::string(no_sso, 'C'), std::string(no_sso, 'D')};
  86. for (auto _ : state) {
  87. absl::InlinedVector<std::string, 8> v;
  88. for (int i = 0; i < len; i++) {
  89. v.push_back(strings[i & 3]);
  90. }
  91. }
  92. state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * len);
  93. }
  94. BENCHMARK(BM_InlinedVectorFillString)->Range(0, 1024);
  95. void BM_StdVectorFillString(benchmark::State& state) {
  96. const int len = state.range(0);
  97. const int no_sso = GetNonShortStringOptimizationSize();
  98. std::string strings[4] = {std::string(no_sso, 'A'), std::string(no_sso, 'B'),
  99. std::string(no_sso, 'C'), std::string(no_sso, 'D')};
  100. for (auto _ : state) {
  101. std::vector<std::string> v;
  102. for (int i = 0; i < len; i++) {
  103. v.push_back(strings[i & 3]);
  104. }
  105. }
  106. state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * len);
  107. }
  108. BENCHMARK(BM_StdVectorFillString)->Range(0, 1024);
  109. struct Buffer { // some arbitrary structure for benchmarking.
  110. char* base;
  111. int length;
  112. int capacity;
  113. void* user_data;
  114. };
  115. void BM_InlinedVectorAssignments(benchmark::State& state) {
  116. const int len = state.range(0);
  117. using BufferVec = absl::InlinedVector<Buffer, 2>;
  118. BufferVec src;
  119. src.resize(len);
  120. BufferVec dst;
  121. for (auto _ : state) {
  122. benchmark::DoNotOptimize(dst);
  123. benchmark::DoNotOptimize(src);
  124. dst = src;
  125. }
  126. }
  127. BENCHMARK(BM_InlinedVectorAssignments)
  128. ->Arg(0)
  129. ->Arg(1)
  130. ->Arg(2)
  131. ->Arg(3)
  132. ->Arg(4)
  133. ->Arg(20);
  134. void BM_CreateFromContainer(benchmark::State& state) {
  135. for (auto _ : state) {
  136. absl::InlinedVector<int, 4> src{1, 2, 3};
  137. benchmark::DoNotOptimize(src);
  138. absl::InlinedVector<int, 4> dst(std::move(src));
  139. benchmark::DoNotOptimize(dst);
  140. }
  141. }
  142. BENCHMARK(BM_CreateFromContainer);
  143. struct LargeCopyableOnly {
  144. LargeCopyableOnly() : d(1024, 17) {}
  145. LargeCopyableOnly(const LargeCopyableOnly& o) = default;
  146. LargeCopyableOnly& operator=(const LargeCopyableOnly& o) = default;
  147. std::vector<int> d;
  148. };
  149. struct LargeCopyableSwappable {
  150. LargeCopyableSwappable() : d(1024, 17) {}
  151. LargeCopyableSwappable(const LargeCopyableSwappable& o) = default;
  152. LargeCopyableSwappable& operator=(LargeCopyableSwappable o) {
  153. using std::swap;
  154. swap(*this, o);
  155. return *this;
  156. }
  157. friend void swap(LargeCopyableSwappable& a, LargeCopyableSwappable& b) {
  158. using std::swap;
  159. swap(a.d, b.d);
  160. }
  161. std::vector<int> d;
  162. };
  163. struct LargeCopyableMovable {
  164. LargeCopyableMovable() : d(1024, 17) {}
  165. // Use implicitly defined copy and move.
  166. std::vector<int> d;
  167. };
  168. struct LargeCopyableMovableSwappable {
  169. LargeCopyableMovableSwappable() : d(1024, 17) {}
  170. LargeCopyableMovableSwappable(const LargeCopyableMovableSwappable& o) =
  171. default;
  172. LargeCopyableMovableSwappable(LargeCopyableMovableSwappable&& o) = default;
  173. LargeCopyableMovableSwappable& operator=(LargeCopyableMovableSwappable o) {
  174. using std::swap;
  175. swap(*this, o);
  176. return *this;
  177. }
  178. LargeCopyableMovableSwappable& operator=(LargeCopyableMovableSwappable&& o) =
  179. default;
  180. friend void swap(LargeCopyableMovableSwappable& a,
  181. LargeCopyableMovableSwappable& b) {
  182. using std::swap;
  183. swap(a.d, b.d);
  184. }
  185. std::vector<int> d;
  186. };
  187. template <typename ElementType>
  188. void BM_SwapElements(benchmark::State& state) {
  189. const int len = state.range(0);
  190. using Vec = absl::InlinedVector<ElementType, 32>;
  191. Vec a(len);
  192. Vec b;
  193. for (auto _ : state) {
  194. using std::swap;
  195. benchmark::DoNotOptimize(a);
  196. benchmark::DoNotOptimize(b);
  197. swap(a, b);
  198. }
  199. }
  200. BENCHMARK_TEMPLATE(BM_SwapElements, LargeCopyableOnly)->Range(0, 1024);
  201. BENCHMARK_TEMPLATE(BM_SwapElements, LargeCopyableSwappable)->Range(0, 1024);
  202. BENCHMARK_TEMPLATE(BM_SwapElements, LargeCopyableMovable)->Range(0, 1024);
  203. BENCHMARK_TEMPLATE(BM_SwapElements, LargeCopyableMovableSwappable)
  204. ->Range(0, 1024);
  205. // The following benchmark is meant to track the efficiency of the vector size
  206. // as a function of stored type via the benchmark label. It is not meant to
  207. // output useful sizeof operator performance. The loop is a dummy operation
  208. // to fulfill the requirement of running the benchmark.
  209. template <typename VecType>
  210. void BM_Sizeof(benchmark::State& state) {
  211. int size = 0;
  212. for (auto _ : state) {
  213. VecType vec;
  214. size = sizeof(vec);
  215. }
  216. state.SetLabel(absl::StrCat("sz=", size));
  217. }
  218. BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<char, 1>);
  219. BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<char, 4>);
  220. BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<char, 7>);
  221. BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<char, 8>);
  222. BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<int, 1>);
  223. BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<int, 4>);
  224. BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<int, 7>);
  225. BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<int, 8>);
  226. BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<void*, 1>);
  227. BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<void*, 4>);
  228. BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<void*, 7>);
  229. BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<void*, 8>);
  230. BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<std::string, 1>);
  231. BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<std::string, 4>);
  232. BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<std::string, 7>);
  233. BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<std::string, 8>);
  234. void BM_InlinedVectorIndexInlined(benchmark::State& state) {
  235. absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
  236. for (auto _ : state) {
  237. benchmark::DoNotOptimize(v);
  238. benchmark::DoNotOptimize(v[4]);
  239. }
  240. }
  241. BENCHMARK(BM_InlinedVectorIndexInlined);
  242. void BM_InlinedVectorIndexExternal(benchmark::State& state) {
  243. absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
  244. for (auto _ : state) {
  245. benchmark::DoNotOptimize(v);
  246. benchmark::DoNotOptimize(v[4]);
  247. }
  248. }
  249. BENCHMARK(BM_InlinedVectorIndexExternal);
  250. void BM_StdVectorIndex(benchmark::State& state) {
  251. std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
  252. for (auto _ : state) {
  253. benchmark::DoNotOptimize(v);
  254. benchmark::DoNotOptimize(v[4]);
  255. }
  256. }
  257. BENCHMARK(BM_StdVectorIndex);
  258. void BM_InlinedVectorDataInlined(benchmark::State& state) {
  259. absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
  260. for (auto _ : state) {
  261. benchmark::DoNotOptimize(v);
  262. benchmark::DoNotOptimize(v.data());
  263. }
  264. }
  265. BENCHMARK(BM_InlinedVectorDataInlined);
  266. void BM_InlinedVectorDataExternal(benchmark::State& state) {
  267. absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
  268. for (auto _ : state) {
  269. benchmark::DoNotOptimize(v);
  270. benchmark::DoNotOptimize(v.data());
  271. }
  272. state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
  273. }
  274. BENCHMARK(BM_InlinedVectorDataExternal);
  275. void BM_StdVectorData(benchmark::State& state) {
  276. std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
  277. for (auto _ : state) {
  278. benchmark::DoNotOptimize(v);
  279. benchmark::DoNotOptimize(v.data());
  280. }
  281. state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
  282. }
  283. BENCHMARK(BM_StdVectorData);
  284. void BM_InlinedVectorSizeInlined(benchmark::State& state) {
  285. absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
  286. for (auto _ : state) {
  287. benchmark::DoNotOptimize(v);
  288. benchmark::DoNotOptimize(v.size());
  289. }
  290. }
  291. BENCHMARK(BM_InlinedVectorSizeInlined);
  292. void BM_InlinedVectorSizeExternal(benchmark::State& state) {
  293. absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
  294. for (auto _ : state) {
  295. benchmark::DoNotOptimize(v);
  296. benchmark::DoNotOptimize(v.size());
  297. }
  298. }
  299. BENCHMARK(BM_InlinedVectorSizeExternal);
  300. void BM_StdVectorSize(benchmark::State& state) {
  301. std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
  302. for (auto _ : state) {
  303. benchmark::DoNotOptimize(v);
  304. benchmark::DoNotOptimize(v.size());
  305. }
  306. }
  307. BENCHMARK(BM_StdVectorSize);
  308. void BM_InlinedVectorEmptyInlined(benchmark::State& state) {
  309. absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
  310. for (auto _ : state) {
  311. benchmark::DoNotOptimize(v);
  312. benchmark::DoNotOptimize(v.empty());
  313. }
  314. }
  315. BENCHMARK(BM_InlinedVectorEmptyInlined);
  316. void BM_InlinedVectorEmptyExternal(benchmark::State& state) {
  317. absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
  318. for (auto _ : state) {
  319. benchmark::DoNotOptimize(v);
  320. benchmark::DoNotOptimize(v.empty());
  321. }
  322. }
  323. BENCHMARK(BM_InlinedVectorEmptyExternal);
  324. void BM_StdVectorEmpty(benchmark::State& state) {
  325. std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
  326. for (auto _ : state) {
  327. benchmark::DoNotOptimize(v);
  328. benchmark::DoNotOptimize(v.empty());
  329. }
  330. }
  331. BENCHMARK(BM_StdVectorEmpty);
  332. constexpr size_t kInlinedCapacity = 4;
  333. constexpr size_t kLargeSize = kInlinedCapacity * 2;
  334. constexpr size_t kSmallSize = kInlinedCapacity / 2;
  335. constexpr size_t kBatchSize = 100;
  336. #define ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_FunctionTemplate, T) \
  337. BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kLargeSize); \
  338. BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kSmallSize)
  339. #define ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_FunctionTemplate, T) \
  340. BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kLargeSize, kLargeSize); \
  341. BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kLargeSize, kSmallSize); \
  342. BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kSmallSize, kLargeSize); \
  343. BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kSmallSize, kSmallSize)
  344. template <typename T>
  345. using InlVec = absl::InlinedVector<T, kInlinedCapacity>;
  346. struct TrivialType {
  347. size_t val;
  348. };
  349. class NontrivialType {
  350. public:
  351. ABSL_ATTRIBUTE_NOINLINE NontrivialType() : val_() {
  352. benchmark::DoNotOptimize(*this);
  353. }
  354. ABSL_ATTRIBUTE_NOINLINE NontrivialType(const NontrivialType& other)
  355. : val_(other.val_) {
  356. benchmark::DoNotOptimize(*this);
  357. }
  358. ABSL_ATTRIBUTE_NOINLINE NontrivialType& operator=(
  359. const NontrivialType& other) {
  360. val_ = other.val_;
  361. benchmark::DoNotOptimize(*this);
  362. return *this;
  363. }
  364. ABSL_ATTRIBUTE_NOINLINE ~NontrivialType() noexcept {
  365. benchmark::DoNotOptimize(*this);
  366. }
  367. private:
  368. size_t val_;
  369. };
  370. template <typename T, typename PrepareVecFn, typename TestVecFn>
  371. void BatchedBenchmark(benchmark::State& state, PrepareVecFn prepare_vec,
  372. TestVecFn test_vec) {
  373. std::array<InlVec<T>, kBatchSize> vector_batch{};
  374. while (state.KeepRunningBatch(kBatchSize)) {
  375. // Prepare batch
  376. state.PauseTiming();
  377. for (size_t i = 0; i < kBatchSize; ++i) {
  378. prepare_vec(vector_batch.data() + i, i);
  379. }
  380. benchmark::DoNotOptimize(vector_batch);
  381. state.ResumeTiming();
  382. // Test batch
  383. for (size_t i = 0; i < kBatchSize; ++i) {
  384. test_vec(vector_batch.data() + i, i);
  385. }
  386. }
  387. }
  388. template <typename T, size_t ToSize>
  389. void BM_ConstructFromSize(benchmark::State& state) {
  390. using VecT = InlVec<T>;
  391. auto size = ToSize;
  392. BatchedBenchmark<T>(
  393. state,
  394. /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->~VecT(); },
  395. /* test_vec = */
  396. [&](void* ptr, size_t) {
  397. benchmark::DoNotOptimize(size);
  398. ::new (ptr) VecT(size);
  399. });
  400. }
  401. ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSize, TrivialType);
  402. ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSize, NontrivialType);
  403. template <typename T, size_t ToSize>
  404. void BM_ConstructFromSizeRef(benchmark::State& state) {
  405. using VecT = InlVec<T>;
  406. auto size = ToSize;
  407. auto ref = T();
  408. BatchedBenchmark<T>(
  409. state,
  410. /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->~VecT(); },
  411. /* test_vec = */
  412. [&](void* ptr, size_t) {
  413. benchmark::DoNotOptimize(size);
  414. benchmark::DoNotOptimize(ref);
  415. ::new (ptr) VecT(size, ref);
  416. });
  417. }
  418. ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSizeRef, TrivialType);
  419. ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSizeRef, NontrivialType);
  420. template <typename T, size_t ToSize>
  421. void BM_ConstructFromRange(benchmark::State& state) {
  422. using VecT = InlVec<T>;
  423. std::array<T, ToSize> arr{};
  424. BatchedBenchmark<T>(
  425. state,
  426. /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->~VecT(); },
  427. /* test_vec = */
  428. [&](void* ptr, size_t) {
  429. benchmark::DoNotOptimize(arr);
  430. ::new (ptr) VecT(arr.begin(), arr.end());
  431. });
  432. }
  433. ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromRange, TrivialType);
  434. ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromRange, NontrivialType);
  435. template <typename T, size_t ToSize>
  436. void BM_ConstructFromCopy(benchmark::State& state) {
  437. using VecT = InlVec<T>;
  438. VecT other_vec(ToSize);
  439. BatchedBenchmark<T>(
  440. state,
  441. /* prepare_vec = */
  442. [](InlVec<T>* vec, size_t) { vec->~VecT(); },
  443. /* test_vec = */
  444. [&](void* ptr, size_t) {
  445. benchmark::DoNotOptimize(other_vec);
  446. ::new (ptr) VecT(other_vec);
  447. });
  448. }
  449. ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromCopy, TrivialType);
  450. ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromCopy, NontrivialType);
  451. template <typename T, size_t ToSize>
  452. void BM_ConstructFromMove(benchmark::State& state) {
  453. using VecT = InlVec<T>;
  454. std::array<VecT, kBatchSize> vector_batch{};
  455. BatchedBenchmark<T>(
  456. state,
  457. /* prepare_vec = */
  458. [&](InlVec<T>* vec, size_t i) {
  459. vector_batch[i].clear();
  460. vector_batch[i].resize(ToSize);
  461. vec->~VecT();
  462. },
  463. /* test_vec = */
  464. [&](void* ptr, size_t i) {
  465. benchmark::DoNotOptimize(vector_batch[i]);
  466. ::new (ptr) VecT(std::move(vector_batch[i]));
  467. });
  468. }
  469. ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, TrivialType);
  470. ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, NontrivialType);
  471. // Measure cost of copy-constructor+destructor.
  472. void BM_CopyTrivial(benchmark::State& state) {
  473. const int n = state.range(0);
  474. InlVec<int64_t> src(n);
  475. for (auto s : state) {
  476. InlVec<int64_t> copy(src);
  477. benchmark::DoNotOptimize(copy);
  478. }
  479. }
  480. BENCHMARK(BM_CopyTrivial)->Arg(0)->Arg(1)->Arg(kLargeSize);
  481. // Measure cost of copy-constructor+destructor.
  482. void BM_CopyNonTrivial(benchmark::State& state) {
  483. const int n = state.range(0);
  484. InlVec<InlVec<int64_t>> src(n);
  485. for (auto s : state) {
  486. InlVec<InlVec<int64_t>> copy(src);
  487. benchmark::DoNotOptimize(copy);
  488. }
  489. }
  490. BENCHMARK(BM_CopyNonTrivial)->Arg(0)->Arg(1)->Arg(kLargeSize);
  491. template <typename T, size_t FromSize, size_t ToSize>
  492. void BM_AssignSizeRef(benchmark::State& state) {
  493. auto size = ToSize;
  494. auto ref = T();
  495. BatchedBenchmark<T>(
  496. state,
  497. /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->resize(FromSize); },
  498. /* test_vec = */
  499. [&](InlVec<T>* vec, size_t) {
  500. benchmark::DoNotOptimize(size);
  501. benchmark::DoNotOptimize(ref);
  502. vec->assign(size, ref);
  503. });
  504. }
  505. ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignSizeRef, TrivialType);
  506. ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignSizeRef, NontrivialType);
  507. template <typename T, size_t FromSize, size_t ToSize>
  508. void BM_AssignRange(benchmark::State& state) {
  509. std::array<T, ToSize> arr{};
  510. BatchedBenchmark<T>(
  511. state,
  512. /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->resize(FromSize); },
  513. /* test_vec = */
  514. [&](InlVec<T>* vec, size_t) {
  515. benchmark::DoNotOptimize(arr);
  516. vec->assign(arr.begin(), arr.end());
  517. });
  518. }
  519. ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignRange, TrivialType);
  520. ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignRange, NontrivialType);
  521. template <typename T, size_t FromSize, size_t ToSize>
  522. void BM_AssignFromCopy(benchmark::State& state) {
  523. InlVec<T> other_vec(ToSize);
  524. BatchedBenchmark<T>(
  525. state,
  526. /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->resize(FromSize); },
  527. /* test_vec = */
  528. [&](InlVec<T>* vec, size_t) {
  529. benchmark::DoNotOptimize(other_vec);
  530. *vec = other_vec;
  531. });
  532. }
  533. ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromCopy, TrivialType);
  534. ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromCopy, NontrivialType);
  535. template <typename T, size_t FromSize, size_t ToSize>
  536. void BM_AssignFromMove(benchmark::State& state) {
  537. using VecT = InlVec<T>;
  538. std::array<VecT, kBatchSize> vector_batch{};
  539. BatchedBenchmark<T>(
  540. state,
  541. /* prepare_vec = */
  542. [&](InlVec<T>* vec, size_t i) {
  543. vector_batch[i].clear();
  544. vector_batch[i].resize(ToSize);
  545. vec->resize(FromSize);
  546. },
  547. /* test_vec = */
  548. [&](InlVec<T>* vec, size_t i) {
  549. benchmark::DoNotOptimize(vector_batch[i]);
  550. *vec = std::move(vector_batch[i]);
  551. });
  552. }
  553. ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromMove, TrivialType);
  554. ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromMove, NontrivialType);
  555. template <typename T, size_t FromSize, size_t ToSize>
  556. void BM_ResizeSize(benchmark::State& state) {
  557. BatchedBenchmark<T>(
  558. state,
  559. /* prepare_vec = */
  560. [](InlVec<T>* vec, size_t) {
  561. vec->clear();
  562. vec->resize(FromSize);
  563. },
  564. /* test_vec = */
  565. [](InlVec<T>* vec, size_t) { vec->resize(ToSize); });
  566. }
  567. ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSize, TrivialType);
  568. ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSize, NontrivialType);
  569. template <typename T, size_t FromSize, size_t ToSize>
  570. void BM_ResizeSizeRef(benchmark::State& state) {
  571. auto t = T();
  572. BatchedBenchmark<T>(
  573. state,
  574. /* prepare_vec = */
  575. [](InlVec<T>* vec, size_t) {
  576. vec->clear();
  577. vec->resize(FromSize);
  578. },
  579. /* test_vec = */
  580. [&](InlVec<T>* vec, size_t) {
  581. benchmark::DoNotOptimize(t);
  582. vec->resize(ToSize, t);
  583. });
  584. }
  585. ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSizeRef, TrivialType);
  586. ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSizeRef, NontrivialType);
  587. template <typename T, size_t FromSize, size_t ToSize>
  588. void BM_InsertSizeRef(benchmark::State& state) {
  589. auto t = T();
  590. BatchedBenchmark<T>(
  591. state,
  592. /* prepare_vec = */
  593. [](InlVec<T>* vec, size_t) {
  594. vec->clear();
  595. vec->resize(FromSize);
  596. },
  597. /* test_vec = */
  598. [&](InlVec<T>* vec, size_t) {
  599. benchmark::DoNotOptimize(t);
  600. auto* pos = vec->data() + (vec->size() / 2);
  601. vec->insert(pos, t);
  602. });
  603. }
  604. ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertSizeRef, TrivialType);
  605. ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertSizeRef, NontrivialType);
  606. template <typename T, size_t FromSize, size_t ToSize>
  607. void BM_InsertRange(benchmark::State& state) {
  608. InlVec<T> other_vec(ToSize);
  609. BatchedBenchmark<T>(
  610. state,
  611. /* prepare_vec = */
  612. [](InlVec<T>* vec, size_t) {
  613. vec->clear();
  614. vec->resize(FromSize);
  615. },
  616. /* test_vec = */
  617. [&](InlVec<T>* vec, size_t) {
  618. benchmark::DoNotOptimize(other_vec);
  619. auto* pos = vec->data() + (vec->size() / 2);
  620. vec->insert(pos, other_vec.begin(), other_vec.end());
  621. });
  622. }
  623. ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertRange, TrivialType);
  624. ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertRange, NontrivialType);
  625. template <typename T, size_t FromSize>
  626. void BM_EmplaceBack(benchmark::State& state) {
  627. BatchedBenchmark<T>(
  628. state,
  629. /* prepare_vec = */
  630. [](InlVec<T>* vec, size_t) {
  631. vec->clear();
  632. vec->resize(FromSize);
  633. },
  634. /* test_vec = */
  635. [](InlVec<T>* vec, size_t) { vec->emplace_back(); });
  636. }
  637. ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EmplaceBack, TrivialType);
  638. ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EmplaceBack, NontrivialType);
  639. template <typename T, size_t FromSize>
  640. void BM_PopBack(benchmark::State& state) {
  641. BatchedBenchmark<T>(
  642. state,
  643. /* prepare_vec = */
  644. [](InlVec<T>* vec, size_t) {
  645. vec->clear();
  646. vec->resize(FromSize);
  647. },
  648. /* test_vec = */
  649. [](InlVec<T>* vec, size_t) { vec->pop_back(); });
  650. }
  651. ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_PopBack, TrivialType);
  652. ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_PopBack, NontrivialType);
  653. template <typename T, size_t FromSize>
  654. void BM_EraseOne(benchmark::State& state) {
  655. BatchedBenchmark<T>(
  656. state,
  657. /* prepare_vec = */
  658. [](InlVec<T>* vec, size_t) {
  659. vec->clear();
  660. vec->resize(FromSize);
  661. },
  662. /* test_vec = */
  663. [](InlVec<T>* vec, size_t) {
  664. auto* pos = vec->data() + (vec->size() / 2);
  665. vec->erase(pos);
  666. });
  667. }
  668. ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseOne, TrivialType);
  669. ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseOne, NontrivialType);
  670. template <typename T, size_t FromSize>
  671. void BM_EraseRange(benchmark::State& state) {
  672. BatchedBenchmark<T>(
  673. state,
  674. /* prepare_vec = */
  675. [](InlVec<T>* vec, size_t) {
  676. vec->clear();
  677. vec->resize(FromSize);
  678. },
  679. /* test_vec = */
  680. [](InlVec<T>* vec, size_t) {
  681. auto* pos = vec->data() + (vec->size() / 2);
  682. vec->erase(pos, pos + 1);
  683. });
  684. }
  685. ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseRange, TrivialType);
  686. ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseRange, NontrivialType);
  687. template <typename T, size_t FromSize>
  688. void BM_Clear(benchmark::State& state) {
  689. BatchedBenchmark<T>(
  690. state,
  691. /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->resize(FromSize); },
  692. /* test_vec = */ [](InlVec<T>* vec, size_t) { vec->clear(); });
  693. }
  694. ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_Clear, TrivialType);
  695. ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_Clear, NontrivialType);
  696. template <typename T, size_t FromSize, size_t ToCapacity>
  697. void BM_Reserve(benchmark::State& state) {
  698. BatchedBenchmark<T>(
  699. state,
  700. /* prepare_vec = */
  701. [](InlVec<T>* vec, size_t) {
  702. vec->clear();
  703. vec->resize(FromSize);
  704. },
  705. /* test_vec = */
  706. [](InlVec<T>* vec, size_t) { vec->reserve(ToCapacity); });
  707. }
  708. ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Reserve, TrivialType);
  709. ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Reserve, NontrivialType);
  710. template <typename T, size_t FromCapacity, size_t ToCapacity>
  711. void BM_ShrinkToFit(benchmark::State& state) {
  712. BatchedBenchmark<T>(
  713. state,
  714. /* prepare_vec = */
  715. [](InlVec<T>* vec, size_t) {
  716. vec->clear();
  717. vec->resize(ToCapacity);
  718. vec->reserve(FromCapacity);
  719. },
  720. /* test_vec = */ [](InlVec<T>* vec, size_t) { vec->shrink_to_fit(); });
  721. }
  722. ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ShrinkToFit, TrivialType);
  723. ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ShrinkToFit, NontrivialType);
  724. template <typename T, size_t FromSize, size_t ToSize>
  725. void BM_Swap(benchmark::State& state) {
  726. using VecT = InlVec<T>;
  727. std::array<VecT, kBatchSize> vector_batch{};
  728. BatchedBenchmark<T>(
  729. state,
  730. /* prepare_vec = */
  731. [&](InlVec<T>* vec, size_t i) {
  732. vector_batch[i].clear();
  733. vector_batch[i].resize(ToSize);
  734. vec->resize(FromSize);
  735. },
  736. /* test_vec = */
  737. [&](InlVec<T>* vec, size_t i) {
  738. using std::swap;
  739. benchmark::DoNotOptimize(vector_batch[i]);
  740. swap(*vec, vector_batch[i]);
  741. });
  742. }
  743. ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Swap, TrivialType);
  744. ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Swap, NontrivialType);
  745. } // namespace