hashtablez_sampler.cc 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. // Copyright 2018 The Abseil Authors.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // https://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include "absl/container/internal/hashtablez_sampler.h"
  15. #include <atomic>
  16. #include <cassert>
  17. #include <cmath>
  18. #include <functional>
  19. #include <limits>
  20. #include "absl/base/attributes.h"
  21. #include "absl/container/internal/have_sse.h"
  22. #include "absl/debugging/stacktrace.h"
  23. #include "absl/memory/memory.h"
  24. #include "absl/profiling/internal/exponential_biased.h"
  25. #include "absl/profiling/internal/sample_recorder.h"
  26. #include "absl/synchronization/mutex.h"
  27. namespace absl {
  28. ABSL_NAMESPACE_BEGIN
  29. namespace container_internal {
  30. constexpr int HashtablezInfo::kMaxStackDepth;
  31. namespace {
  32. ABSL_CONST_INIT std::atomic<bool> g_hashtablez_enabled{
  33. false
  34. };
  35. ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_sample_parameter{1 << 10};
  36. #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
  37. ABSL_PER_THREAD_TLS_KEYWORD absl::profiling_internal::ExponentialBiased
  38. g_exponential_biased_generator;
  39. #endif
  40. } // namespace
  41. #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
  42. ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample = 0;
  43. #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
  44. HashtablezSampler& GlobalHashtablezSampler() {
  45. static auto* sampler = new HashtablezSampler();
  46. return *sampler;
  47. }
  48. // TODO(bradleybear): The comments at this constructors declaration say that the
  49. // fields are not initialized, but this definition does initialize the fields.
  50. // Something needs to be cleaned up.
  51. HashtablezInfo::HashtablezInfo() { PrepareForSampling(); }
  52. HashtablezInfo::~HashtablezInfo() = default;
  53. void HashtablezInfo::PrepareForSampling() {
  54. capacity.store(0, std::memory_order_relaxed);
  55. size.store(0, std::memory_order_relaxed);
  56. num_erases.store(0, std::memory_order_relaxed);
  57. num_rehashes.store(0, std::memory_order_relaxed);
  58. max_probe_length.store(0, std::memory_order_relaxed);
  59. total_probe_length.store(0, std::memory_order_relaxed);
  60. hashes_bitwise_or.store(0, std::memory_order_relaxed);
  61. hashes_bitwise_and.store(~size_t{}, std::memory_order_relaxed);
  62. hashes_bitwise_xor.store(0, std::memory_order_relaxed);
  63. max_reserve.store(0, std::memory_order_relaxed);
  64. create_time = absl::Now();
  65. // The inliner makes hardcoded skip_count difficult (especially when combined
  66. // with LTO). We use the ability to exclude stacks by regex when encoding
  67. // instead.
  68. depth = absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth,
  69. /* skip_count= */ 0);
  70. }
  71. static bool ShouldForceSampling() {
  72. enum ForceState {
  73. kDontForce,
  74. kForce,
  75. kUninitialized
  76. };
  77. ABSL_CONST_INIT static std::atomic<ForceState> global_state{
  78. kUninitialized};
  79. ForceState state = global_state.load(std::memory_order_relaxed);
  80. if (ABSL_PREDICT_TRUE(state == kDontForce)) return false;
  81. if (state == kUninitialized) {
  82. state = ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)()
  83. ? kForce
  84. : kDontForce;
  85. global_state.store(state, std::memory_order_relaxed);
  86. }
  87. return state == kForce;
  88. }
  89. HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) {
  90. if (ABSL_PREDICT_FALSE(ShouldForceSampling())) {
  91. *next_sample = 1;
  92. HashtablezInfo* result = GlobalHashtablezSampler().Register();
  93. result->inline_element_size = inline_element_size;
  94. return result;
  95. }
  96. #if !defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
  97. *next_sample = std::numeric_limits<int64_t>::max();
  98. return nullptr;
  99. #else
  100. bool first = *next_sample < 0;
  101. *next_sample = g_exponential_biased_generator.GetStride(
  102. g_hashtablez_sample_parameter.load(std::memory_order_relaxed));
  103. // Small values of interval are equivalent to just sampling next time.
  104. ABSL_ASSERT(*next_sample >= 1);
  105. // g_hashtablez_enabled can be dynamically flipped, we need to set a threshold
  106. // low enough that we will start sampling in a reasonable time, so we just use
  107. // the default sampling rate.
  108. if (!g_hashtablez_enabled.load(std::memory_order_relaxed)) return nullptr;
  109. // We will only be negative on our first count, so we should just retry in
  110. // that case.
  111. if (first) {
  112. if (ABSL_PREDICT_TRUE(--*next_sample > 0)) return nullptr;
  113. return SampleSlow(next_sample, inline_element_size);
  114. }
  115. HashtablezInfo* result = GlobalHashtablezSampler().Register();
  116. result->inline_element_size = inline_element_size;
  117. return result;
  118. #endif
  119. }
  120. void UnsampleSlow(HashtablezInfo* info) {
  121. GlobalHashtablezSampler().Unregister(info);
  122. }
  123. void RecordInsertSlow(HashtablezInfo* info, size_t hash,
  124. size_t distance_from_desired) {
  125. // SwissTables probe in groups of 16, so scale this to count items probes and
  126. // not offset from desired.
  127. size_t probe_length = distance_from_desired;
  128. #if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
  129. probe_length /= 16;
  130. #else
  131. probe_length /= 8;
  132. #endif
  133. info->hashes_bitwise_and.fetch_and(hash, std::memory_order_relaxed);
  134. info->hashes_bitwise_or.fetch_or(hash, std::memory_order_relaxed);
  135. info->hashes_bitwise_xor.fetch_xor(hash, std::memory_order_relaxed);
  136. info->max_probe_length.store(
  137. std::max(info->max_probe_length.load(std::memory_order_relaxed),
  138. probe_length),
  139. std::memory_order_relaxed);
  140. info->total_probe_length.fetch_add(probe_length, std::memory_order_relaxed);
  141. info->size.fetch_add(1, std::memory_order_relaxed);
  142. }
  143. void SetHashtablezEnabled(bool enabled) {
  144. g_hashtablez_enabled.store(enabled, std::memory_order_release);
  145. }
  146. void SetHashtablezSampleParameter(int32_t rate) {
  147. if (rate > 0) {
  148. g_hashtablez_sample_parameter.store(rate, std::memory_order_release);
  149. } else {
  150. ABSL_RAW_LOG(ERROR, "Invalid hashtablez sample rate: %lld",
  151. static_cast<long long>(rate)); // NOLINT(runtime/int)
  152. }
  153. }
  154. void SetHashtablezMaxSamples(int32_t max) {
  155. if (max > 0) {
  156. GlobalHashtablezSampler().SetMaxSamples(max);
  157. } else {
  158. ABSL_RAW_LOG(ERROR, "Invalid hashtablez max samples: %lld",
  159. static_cast<long long>(max)); // NOLINT(runtime/int)
  160. }
  161. }
  162. } // namespace container_internal
  163. ABSL_NAMESPACE_END
  164. } // namespace absl