summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAaron Gao <gzh@fb.com>2017-05-10 11:53:28 -0700
committerAaron Gao <gzh@fb.com>2017-05-10 14:06:54 -0700
commit22f277e034c526d6da6c0ffdce28c8c266c1c960 (patch)
tree96401392a4905c25e8fff08a03d36b0b3d8ff0bb
parent7e62c5d67a6afccbcc92c38b58c8af93af7814d8 (diff)
fix readampbitmap tests
Summary: fix test failure of ReadAmpBitmap and ReadAmpBitmapLiveInCacheAfterDBClose. test ReadAmpBitmapLiveInCacheAfterDBClose individually and make check Closes https://github.com/facebook/rocksdb/pull/2271 Differential Revision: D5038133 Pulled By: lightmark fbshipit-source-id: 803cd6f45ccfdd14a9d9473c8af311033e164be8
-rw-r--r--db/db_test2.cc135
1 files changed, 73 insertions, 62 deletions
diff --git a/db/db_test2.cc b/db/db_test2.cc
index ebd32fa71..8c83c0d16 100644
--- a/db/db_test2.cc
+++ b/db/db_test2.cc
@@ -1723,82 +1723,93 @@ TEST_F(DBTest2, ReadAmpBitmapLiveInCacheAfterDBClose) {
// the blocks again regardless of them being already in the cache
return;
}
+ uint32_t bytes_per_bit[2] = {1, 16};
+ for (size_t k = 0; k < 2; k++) {
+ std::shared_ptr<Cache> lru_cache = NewLRUCache(1024 * 1024 * 1024);
+ std::shared_ptr<Statistics> stats = rocksdb::CreateDBStatistics();
- std::shared_ptr<Cache> lru_cache = NewLRUCache(1024 * 1024 * 1024);
- std::shared_ptr<Statistics> stats = rocksdb::CreateDBStatistics();
+ Options options = CurrentOptions();
+ BlockBasedTableOptions bbto;
+ // Disable delta encoding to make it easier to calculate read amplification
+ bbto.use_delta_encoding = false;
+ // Huge block cache to make it easier to calculate read amplification
+ bbto.block_cache = lru_cache;
+ bbto.read_amp_bytes_per_bit = bytes_per_bit[k];
+ options.table_factory.reset(NewBlockBasedTableFactory(bbto));
+ options.statistics = stats;
+ DestroyAndReopen(options);
- Options options = CurrentOptions();
- BlockBasedTableOptions bbto;
- // Disable delta encoding to make it easier to calculate read amplification
- bbto.use_delta_encoding = false;
- // Huge block cache to make it easier to calculate read amplification
- bbto.block_cache = lru_cache;
- bbto.read_amp_bytes_per_bit = 16;
- options.table_factory.reset(NewBlockBasedTableFactory(bbto));
- options.statistics = stats;
- DestroyAndReopen(options);
+ const int kNumEntries = 10000;
- const int kNumEntries = 10000;
+ Random rnd(301);
+ for (int i = 0; i < kNumEntries; i++) {
+ ASSERT_OK(Put(Key(i), RandomString(&rnd, 100)));
+ }
+ ASSERT_OK(Flush());
- Random rnd(301);
- for (int i = 0; i < kNumEntries; i++) {
- ASSERT_OK(Put(Key(i), RandomString(&rnd, 100)));
- }
- ASSERT_OK(Flush());
+ Close();
+ Reopen(options);
- Close();
- Reopen(options);
+ uint64_t total_useful_bytes = 0;
+ std::set<int> read_keys;
+ std::string value;
+ // Iter1: Read half the DB, Read even keys
+ // Key(0), Key(2), Key(4), Key(6), Key(8), ...
+ for (int i = 0; i < kNumEntries; i += 2) {
+ std::string key = Key(i);
+ ASSERT_OK(db_->Get(ReadOptions(), key, &value));
- uint64_t total_useful_bytes = 0;
- std::set<int> read_keys;
- std::string value;
- // Iter1: Read half the DB, Read even keys
- // Key(0), Key(2), Key(4), Key(6), Key(8), ...
- for (int i = 0; i < kNumEntries; i += 2) {
- std::string k = Key(i);
- ASSERT_OK(db_->Get(ReadOptions(), k, &value));
-
- if (read_keys.find(i) == read_keys.end()) {
- auto ik = InternalKey(k, 0, ValueType::kTypeValue);
- total_useful_bytes += GetEncodedEntrySize(ik.size(), value.size());
- read_keys.insert(i);
+ if (read_keys.find(i) == read_keys.end()) {
+ auto internal_key = InternalKey(key, 0, ValueType::kTypeValue);
+ total_useful_bytes +=
+ GetEncodedEntrySize(internal_key.size(), value.size());
+ read_keys.insert(i);
+ }
}
- }
- size_t total_useful_bytes_iter1 =
- options.statistics->getTickerCount(READ_AMP_ESTIMATE_USEFUL_BYTES);
- size_t total_loaded_bytes_iter1 =
- options.statistics->getTickerCount(READ_AMP_TOTAL_READ_BYTES);
+ size_t total_useful_bytes_iter1 =
+ options.statistics->getTickerCount(READ_AMP_ESTIMATE_USEFUL_BYTES);
+ size_t total_loaded_bytes_iter1 =
+ options.statistics->getTickerCount(READ_AMP_TOTAL_READ_BYTES);
- Close();
- std::shared_ptr<Statistics> new_statistics = rocksdb::CreateDBStatistics();
- // Destroy old statistics obj that the blocks in lru_cache are pointing to
- options.statistics.reset();
- // Use the statistics object that we just created
- options.statistics = new_statistics;
- Reopen(options);
+ Close();
+ std::shared_ptr<Statistics> new_statistics = rocksdb::CreateDBStatistics();
+ // Destroy old statistics obj that the blocks in lru_cache are pointing to
+ options.statistics.reset();
+ // Use the statistics object that we just created
+ options.statistics = new_statistics;
+ Reopen(options);
- // Iter2: Read half the DB, Read odd keys
- // Key(1), Key(3), Key(5), Key(7), Key(9), ...
- for (int i = 1; i < kNumEntries; i += 2) {
- std::string k = Key(i);
- ASSERT_OK(db_->Get(ReadOptions(), k, &value));
+ // Iter2: Read half the DB, Read odd keys
+ // Key(1), Key(3), Key(5), Key(7), Key(9), ...
+ for (int i = 1; i < kNumEntries; i += 2) {
+ std::string key = Key(i);
+ ASSERT_OK(db_->Get(ReadOptions(), key, &value));
- if (read_keys.find(i) == read_keys.end()) {
- auto ik = InternalKey(k, 0, ValueType::kTypeValue);
- total_useful_bytes += GetEncodedEntrySize(ik.size(), value.size());
- read_keys.insert(i);
+ if (read_keys.find(i) == read_keys.end()) {
+ auto internal_key = InternalKey(key, 0, ValueType::kTypeValue);
+ total_useful_bytes +=
+ GetEncodedEntrySize(internal_key.size(), value.size());
+ read_keys.insert(i);
+ }
}
- }
- size_t total_useful_bytes_iter2 =
- options.statistics->getTickerCount(READ_AMP_ESTIMATE_USEFUL_BYTES);
- size_t total_loaded_bytes_iter2 =
- options.statistics->getTickerCount(READ_AMP_TOTAL_READ_BYTES);
+ size_t total_useful_bytes_iter2 =
+ options.statistics->getTickerCount(READ_AMP_ESTIMATE_USEFUL_BYTES);
+ size_t total_loaded_bytes_iter2 =
+ options.statistics->getTickerCount(READ_AMP_TOTAL_READ_BYTES);
+
- // We reached read_amp of 100% because we read all the keys in the DB
- ASSERT_EQ(total_useful_bytes_iter1 + total_useful_bytes_iter2,
- total_loaded_bytes_iter1 + total_loaded_bytes_iter2);
+ // Read amp is on average 100% since we read all what we loaded in memory
+ if (k == 0) {
+ ASSERT_EQ(total_useful_bytes_iter1 + total_useful_bytes_iter2,
+ total_loaded_bytes_iter1 + total_loaded_bytes_iter2);
+ } else {
+ ASSERT_NEAR((total_useful_bytes_iter1 + total_useful_bytes_iter2) * 1.0f /
+ (total_loaded_bytes_iter1 + total_loaded_bytes_iter2),
+ 1, .01);
+ }
+ }
}
#ifndef ROCKSDB_LITE