Austin Schuh | 36244a1 | 2019-09-21 17:52:38 -0700 | [diff] [blame^] | 1 | // Copyright 2018 The Abseil Authors. |
| 2 | // |
| 3 | // Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | // you may not use this file except in compliance with the License. |
| 5 | // You may obtain a copy of the License at |
| 6 | // |
| 7 | // https://www.apache.org/licenses/LICENSE-2.0 |
| 8 | // |
| 9 | // Unless required by applicable law or agreed to in writing, software |
| 10 | // distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | // See the License for the specific language governing permissions and |
| 13 | // limitations under the License. |
| 14 | |
| 15 | #include "absl/container/internal/hashtablez_sampler.h" |
| 16 | |
| 17 | #include <atomic> |
| 18 | #include <limits> |
| 19 | #include <random> |
| 20 | |
| 21 | #include "gmock/gmock.h" |
| 22 | #include "gtest/gtest.h" |
| 23 | #include "absl/base/attributes.h" |
| 24 | #include "absl/container/internal/have_sse.h" |
| 25 | #include "absl/synchronization/blocking_counter.h" |
| 26 | #include "absl/synchronization/internal/thread_pool.h" |
| 27 | #include "absl/synchronization/mutex.h" |
| 28 | #include "absl/synchronization/notification.h" |
| 29 | #include "absl/time/clock.h" |
| 30 | #include "absl/time/time.h" |
| 31 | |
| 32 | #if SWISSTABLE_HAVE_SSE2 |
| 33 | constexpr int kProbeLength = 16; |
| 34 | #else |
| 35 | constexpr int kProbeLength = 8; |
| 36 | #endif |
| 37 | |
| 38 | namespace absl { |
| 39 | namespace container_internal { |
| 40 | class HashtablezInfoHandlePeer { |
| 41 | public: |
| 42 | static bool IsSampled(const HashtablezInfoHandle& h) { |
| 43 | return h.info_ != nullptr; |
| 44 | } |
| 45 | |
| 46 | static HashtablezInfo* GetInfo(HashtablezInfoHandle* h) { return h->info_; } |
| 47 | }; |
| 48 | |
| 49 | namespace { |
| 50 | using ::absl::synchronization_internal::ThreadPool; |
| 51 | using ::testing::IsEmpty; |
| 52 | using ::testing::UnorderedElementsAre; |
| 53 | |
| 54 | std::vector<size_t> GetSizes(HashtablezSampler* s) { |
| 55 | std::vector<size_t> res; |
| 56 | s->Iterate([&](const HashtablezInfo& info) { |
| 57 | res.push_back(info.size.load(std::memory_order_acquire)); |
| 58 | }); |
| 59 | return res; |
| 60 | } |
| 61 | |
| 62 | HashtablezInfo* Register(HashtablezSampler* s, size_t size) { |
| 63 | auto* info = s->Register(); |
| 64 | assert(info != nullptr); |
| 65 | info->size.store(size); |
| 66 | return info; |
| 67 | } |
| 68 | |
| 69 | TEST(HashtablezInfoTest, PrepareForSampling) { |
| 70 | absl::Time test_start = absl::Now(); |
| 71 | HashtablezInfo info; |
| 72 | absl::MutexLock l(&info.init_mu); |
| 73 | info.PrepareForSampling(); |
| 74 | |
| 75 | EXPECT_EQ(info.capacity.load(), 0); |
| 76 | EXPECT_EQ(info.size.load(), 0); |
| 77 | EXPECT_EQ(info.num_erases.load(), 0); |
| 78 | EXPECT_EQ(info.max_probe_length.load(), 0); |
| 79 | EXPECT_EQ(info.total_probe_length.load(), 0); |
| 80 | EXPECT_EQ(info.hashes_bitwise_or.load(), 0); |
| 81 | EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{}); |
| 82 | EXPECT_GE(info.create_time, test_start); |
| 83 | |
| 84 | info.capacity.store(1, std::memory_order_relaxed); |
| 85 | info.size.store(1, std::memory_order_relaxed); |
| 86 | info.num_erases.store(1, std::memory_order_relaxed); |
| 87 | info.max_probe_length.store(1, std::memory_order_relaxed); |
| 88 | info.total_probe_length.store(1, std::memory_order_relaxed); |
| 89 | info.hashes_bitwise_or.store(1, std::memory_order_relaxed); |
| 90 | info.hashes_bitwise_and.store(1, std::memory_order_relaxed); |
| 91 | info.create_time = test_start - absl::Hours(20); |
| 92 | |
| 93 | info.PrepareForSampling(); |
| 94 | EXPECT_EQ(info.capacity.load(), 0); |
| 95 | EXPECT_EQ(info.size.load(), 0); |
| 96 | EXPECT_EQ(info.num_erases.load(), 0); |
| 97 | EXPECT_EQ(info.max_probe_length.load(), 0); |
| 98 | EXPECT_EQ(info.total_probe_length.load(), 0); |
| 99 | EXPECT_EQ(info.hashes_bitwise_or.load(), 0); |
| 100 | EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{}); |
| 101 | EXPECT_GE(info.create_time, test_start); |
| 102 | } |
| 103 | |
| 104 | TEST(HashtablezInfoTest, RecordStorageChanged) { |
| 105 | HashtablezInfo info; |
| 106 | absl::MutexLock l(&info.init_mu); |
| 107 | info.PrepareForSampling(); |
| 108 | RecordStorageChangedSlow(&info, 17, 47); |
| 109 | EXPECT_EQ(info.size.load(), 17); |
| 110 | EXPECT_EQ(info.capacity.load(), 47); |
| 111 | RecordStorageChangedSlow(&info, 20, 20); |
| 112 | EXPECT_EQ(info.size.load(), 20); |
| 113 | EXPECT_EQ(info.capacity.load(), 20); |
| 114 | } |
| 115 | |
| 116 | TEST(HashtablezInfoTest, RecordInsert) { |
| 117 | HashtablezInfo info; |
| 118 | absl::MutexLock l(&info.init_mu); |
| 119 | info.PrepareForSampling(); |
| 120 | EXPECT_EQ(info.max_probe_length.load(), 0); |
| 121 | RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength); |
| 122 | EXPECT_EQ(info.max_probe_length.load(), 6); |
| 123 | EXPECT_EQ(info.hashes_bitwise_and.load(), 0x0000FF00); |
| 124 | EXPECT_EQ(info.hashes_bitwise_or.load(), 0x0000FF00); |
| 125 | RecordInsertSlow(&info, 0x000FF000, 4 * kProbeLength); |
| 126 | EXPECT_EQ(info.max_probe_length.load(), 6); |
| 127 | EXPECT_EQ(info.hashes_bitwise_and.load(), 0x0000F000); |
| 128 | EXPECT_EQ(info.hashes_bitwise_or.load(), 0x000FFF00); |
| 129 | RecordInsertSlow(&info, 0x00FF0000, 12 * kProbeLength); |
| 130 | EXPECT_EQ(info.max_probe_length.load(), 12); |
| 131 | EXPECT_EQ(info.hashes_bitwise_and.load(), 0x00000000); |
| 132 | EXPECT_EQ(info.hashes_bitwise_or.load(), 0x00FFFF00); |
| 133 | } |
| 134 | |
| 135 | TEST(HashtablezInfoTest, RecordErase) { |
| 136 | HashtablezInfo info; |
| 137 | absl::MutexLock l(&info.init_mu); |
| 138 | info.PrepareForSampling(); |
| 139 | EXPECT_EQ(info.num_erases.load(), 0); |
| 140 | EXPECT_EQ(info.size.load(), 0); |
| 141 | RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength); |
| 142 | EXPECT_EQ(info.size.load(), 1); |
| 143 | RecordEraseSlow(&info); |
| 144 | EXPECT_EQ(info.size.load(), 0); |
| 145 | EXPECT_EQ(info.num_erases.load(), 1); |
| 146 | } |
| 147 | |
| 148 | TEST(HashtablezInfoTest, RecordRehash) { |
| 149 | HashtablezInfo info; |
| 150 | absl::MutexLock l(&info.init_mu); |
| 151 | info.PrepareForSampling(); |
| 152 | RecordInsertSlow(&info, 0x1, 0); |
| 153 | RecordInsertSlow(&info, 0x2, kProbeLength); |
| 154 | RecordInsertSlow(&info, 0x4, kProbeLength); |
| 155 | RecordInsertSlow(&info, 0x8, 2 * kProbeLength); |
| 156 | EXPECT_EQ(info.size.load(), 4); |
| 157 | EXPECT_EQ(info.total_probe_length.load(), 4); |
| 158 | |
| 159 | RecordEraseSlow(&info); |
| 160 | RecordEraseSlow(&info); |
| 161 | EXPECT_EQ(info.size.load(), 2); |
| 162 | EXPECT_EQ(info.total_probe_length.load(), 4); |
| 163 | EXPECT_EQ(info.num_erases.load(), 2); |
| 164 | |
| 165 | RecordRehashSlow(&info, 3 * kProbeLength); |
| 166 | EXPECT_EQ(info.size.load(), 2); |
| 167 | EXPECT_EQ(info.total_probe_length.load(), 3); |
| 168 | EXPECT_EQ(info.num_erases.load(), 0); |
| 169 | } |
| 170 | |
| 171 | TEST(HashtablezSamplerTest, SmallSampleParameter) { |
| 172 | SetHashtablezEnabled(true); |
| 173 | SetHashtablezSampleParameter(100); |
| 174 | |
| 175 | for (int i = 0; i < 1000; ++i) { |
| 176 | int64_t next_sample = 0; |
| 177 | HashtablezInfo* sample = SampleSlow(&next_sample); |
| 178 | EXPECT_GT(next_sample, 0); |
| 179 | EXPECT_NE(sample, nullptr); |
| 180 | UnsampleSlow(sample); |
| 181 | } |
| 182 | } |
| 183 | |
| 184 | TEST(HashtablezSamplerTest, LargeSampleParameter) { |
| 185 | SetHashtablezEnabled(true); |
| 186 | SetHashtablezSampleParameter(std::numeric_limits<int32_t>::max()); |
| 187 | |
| 188 | for (int i = 0; i < 1000; ++i) { |
| 189 | int64_t next_sample = 0; |
| 190 | HashtablezInfo* sample = SampleSlow(&next_sample); |
| 191 | EXPECT_GT(next_sample, 0); |
| 192 | EXPECT_NE(sample, nullptr); |
| 193 | UnsampleSlow(sample); |
| 194 | } |
| 195 | } |
| 196 | |
| 197 | TEST(HashtablezSamplerTest, Sample) { |
| 198 | SetHashtablezEnabled(true); |
| 199 | SetHashtablezSampleParameter(100); |
| 200 | int64_t num_sampled = 0; |
| 201 | int64_t total = 0; |
| 202 | double sample_rate = 0.0; |
| 203 | for (int i = 0; i < 1000000; ++i) { |
| 204 | HashtablezInfoHandle h = Sample(); |
| 205 | ++total; |
| 206 | if (HashtablezInfoHandlePeer::IsSampled(h)) { |
| 207 | ++num_sampled; |
| 208 | } |
| 209 | sample_rate = static_cast<double>(num_sampled) / total; |
| 210 | if (0.005 < sample_rate && sample_rate < 0.015) break; |
| 211 | } |
| 212 | EXPECT_NEAR(sample_rate, 0.01, 0.005); |
| 213 | } |
| 214 | |
| 215 | TEST(HashtablezSamplerTest, Handle) { |
| 216 | auto& sampler = HashtablezSampler::Global(); |
| 217 | HashtablezInfoHandle h(sampler.Register()); |
| 218 | auto* info = HashtablezInfoHandlePeer::GetInfo(&h); |
| 219 | info->hashes_bitwise_and.store(0x12345678, std::memory_order_relaxed); |
| 220 | |
| 221 | bool found = false; |
| 222 | sampler.Iterate([&](const HashtablezInfo& h) { |
| 223 | if (&h == info) { |
| 224 | EXPECT_EQ(h.hashes_bitwise_and.load(), 0x12345678); |
| 225 | found = true; |
| 226 | } |
| 227 | }); |
| 228 | EXPECT_TRUE(found); |
| 229 | |
| 230 | h = HashtablezInfoHandle(); |
| 231 | found = false; |
| 232 | sampler.Iterate([&](const HashtablezInfo& h) { |
| 233 | if (&h == info) { |
| 234 | // this will only happen if some other thread has resurrected the info |
| 235 | // the old handle was using. |
| 236 | if (h.hashes_bitwise_and.load() == 0x12345678) { |
| 237 | found = true; |
| 238 | } |
| 239 | } |
| 240 | }); |
| 241 | EXPECT_FALSE(found); |
| 242 | } |
| 243 | |
| 244 | TEST(HashtablezSamplerTest, Registration) { |
| 245 | HashtablezSampler sampler; |
| 246 | auto* info1 = Register(&sampler, 1); |
| 247 | EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1)); |
| 248 | |
| 249 | auto* info2 = Register(&sampler, 2); |
| 250 | EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1, 2)); |
| 251 | info1->size.store(3); |
| 252 | EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(3, 2)); |
| 253 | |
| 254 | sampler.Unregister(info1); |
| 255 | sampler.Unregister(info2); |
| 256 | } |
| 257 | |
| 258 | TEST(HashtablezSamplerTest, Unregistration) { |
| 259 | HashtablezSampler sampler; |
| 260 | std::vector<HashtablezInfo*> infos; |
| 261 | for (size_t i = 0; i < 3; ++i) { |
| 262 | infos.push_back(Register(&sampler, i)); |
| 263 | } |
| 264 | EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 1, 2)); |
| 265 | |
| 266 | sampler.Unregister(infos[1]); |
| 267 | EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2)); |
| 268 | |
| 269 | infos.push_back(Register(&sampler, 3)); |
| 270 | infos.push_back(Register(&sampler, 4)); |
| 271 | EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 3, 4)); |
| 272 | sampler.Unregister(infos[3]); |
| 273 | EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 4)); |
| 274 | |
| 275 | sampler.Unregister(infos[0]); |
| 276 | sampler.Unregister(infos[2]); |
| 277 | sampler.Unregister(infos[4]); |
| 278 | EXPECT_THAT(GetSizes(&sampler), IsEmpty()); |
| 279 | } |
| 280 | |
| 281 | TEST(HashtablezSamplerTest, MultiThreaded) { |
| 282 | HashtablezSampler sampler; |
| 283 | Notification stop; |
| 284 | ThreadPool pool(10); |
| 285 | |
| 286 | for (int i = 0; i < 10; ++i) { |
| 287 | pool.Schedule([&sampler, &stop]() { |
| 288 | std::random_device rd; |
| 289 | std::mt19937 gen(rd()); |
| 290 | |
| 291 | std::vector<HashtablezInfo*> infoz; |
| 292 | while (!stop.HasBeenNotified()) { |
| 293 | if (infoz.empty()) { |
| 294 | infoz.push_back(sampler.Register()); |
| 295 | } |
| 296 | switch (std::uniform_int_distribution<>(0, 2)(gen)) { |
| 297 | case 0: { |
| 298 | infoz.push_back(sampler.Register()); |
| 299 | break; |
| 300 | } |
| 301 | case 1: { |
| 302 | size_t p = |
| 303 | std::uniform_int_distribution<>(0, infoz.size() - 1)(gen); |
| 304 | HashtablezInfo* info = infoz[p]; |
| 305 | infoz[p] = infoz.back(); |
| 306 | infoz.pop_back(); |
| 307 | sampler.Unregister(info); |
| 308 | break; |
| 309 | } |
| 310 | case 2: { |
| 311 | absl::Duration oldest = absl::ZeroDuration(); |
| 312 | sampler.Iterate([&](const HashtablezInfo& info) { |
| 313 | oldest = std::max(oldest, absl::Now() - info.create_time); |
| 314 | }); |
| 315 | ASSERT_GE(oldest, absl::ZeroDuration()); |
| 316 | break; |
| 317 | } |
| 318 | } |
| 319 | } |
| 320 | }); |
| 321 | } |
| 322 | // The threads will hammer away. Give it a little bit of time for tsan to |
| 323 | // spot errors. |
| 324 | absl::SleepFor(absl::Seconds(3)); |
| 325 | stop.Notify(); |
| 326 | } |
| 327 | |
| 328 | TEST(HashtablezSamplerTest, Callback) { |
| 329 | HashtablezSampler sampler; |
| 330 | |
| 331 | auto* info1 = Register(&sampler, 1); |
| 332 | auto* info2 = Register(&sampler, 2); |
| 333 | |
| 334 | static const HashtablezInfo* expected; |
| 335 | |
| 336 | auto callback = [](const HashtablezInfo& info) { |
| 337 | // We can't use `info` outside of this callback because the object will be |
| 338 | // disposed as soon as we return from here. |
| 339 | EXPECT_EQ(&info, expected); |
| 340 | }; |
| 341 | |
| 342 | // Set the callback. |
| 343 | EXPECT_EQ(sampler.SetDisposeCallback(callback), nullptr); |
| 344 | expected = info1; |
| 345 | sampler.Unregister(info1); |
| 346 | |
| 347 | // Unset the callback. |
| 348 | EXPECT_EQ(callback, sampler.SetDisposeCallback(nullptr)); |
| 349 | expected = nullptr; // no more calls. |
| 350 | sampler.Unregister(info2); |
| 351 | } |
| 352 | |
| 353 | } // namespace |
| 354 | } // namespace container_internal |
| 355 | } // namespace absl |