summaryrefslogtreecommitdiff
path: root/db/db_test.cc
diff options
context:
space:
mode:
authorAndrew Kryczka <andrewkr@fb.com>2017-03-02 17:40:24 -0800
committerFacebook Github Bot <facebook-github-bot@users.noreply.github.com>2017-03-02 17:54:15 -0800
commit7c80a6d7d189d4414da7c2126e111ea71cf1504e (patch)
treea999171bf5fffbb7c9e02408bb2d0ab024c801e3 /db/db_test.cc
parent0ad5af42d0459424590616651cff445522fcde68 (diff)
Statistic for how often rate limiter is drained
Summary: This is the metric I plan to use for adaptive rate limiting. The statistics are updated only if the rate limiter is drained by flush or compaction. I believe (but am not certain) that this is the normal case. The Statistics object is passed in RateLimiter::Request() to avoid requiring changes to client code, which would've been necessary if we passed it in the RateLimiter constructor. Closes https://github.com/facebook/rocksdb/pull/1946 Differential Revision: D4646489 Pulled By: ajkr fbshipit-source-id: d8e0161
Diffstat (limited to 'db/db_test.cc')
-rw-r--r--db/db_test.cc18
1 files changed, 18 insertions, 0 deletions
diff --git a/db/db_test.cc b/db/db_test.cc
index 3b3870152..ee280223c 100644
--- a/db/db_test.cc
+++ b/db/db_test.cc
@@ -2781,6 +2781,7 @@ TEST_F(DBTest, RateLimitingTest) {
options.compression = kNoCompression;
options.create_if_missing = true;
options.env = env_;
+ options.statistics = rocksdb::CreateDBStatistics();
options.IncreaseParallelism(4);
DestroyAndReopen(options);
@@ -2797,6 +2798,9 @@ TEST_F(DBTest, RateLimitingTest) {
}
uint64_t elapsed = env_->NowMicros() - start;
double raw_rate = env_->bytes_written_ * 1000000.0 / elapsed;
+ uint64_t rate_limiter_drains =
+ TestGetTickerCount(options, NUMBER_RATE_LIMITER_DRAINS);
+ ASSERT_EQ(0, rate_limiter_drains);
Close();
// # rate limiting with 0.7 x threshold
@@ -2812,8 +2816,15 @@ TEST_F(DBTest, RateLimitingTest) {
Put(RandomString(&rnd, 32), RandomString(&rnd, (1 << 10) + 1), wo));
}
elapsed = env_->NowMicros() - start;
+ rate_limiter_drains =
+ TestGetTickerCount(options, NUMBER_RATE_LIMITER_DRAINS) -
+ rate_limiter_drains;
Close();
ASSERT_EQ(options.rate_limiter->GetTotalBytesThrough(), env_->bytes_written_);
+ // Most intervals should've been drained (interval time is 100ms, elapsed is
+ // micros)
+ ASSERT_GT(rate_limiter_drains, elapsed / 100000 / 2);
+ ASSERT_LE(rate_limiter_drains, elapsed / 100000);
double ratio = env_->bytes_written_ * 1000000 / elapsed / raw_rate;
fprintf(stderr, "write rate ratio = %.2lf, expected 0.7\n", ratio);
ASSERT_TRUE(ratio < 0.8);
@@ -2831,8 +2842,15 @@ TEST_F(DBTest, RateLimitingTest) {
Put(RandomString(&rnd, 32), RandomString(&rnd, (1 << 10) + 1), wo));
}
elapsed = env_->NowMicros() - start;
+ rate_limiter_drains =
+ TestGetTickerCount(options, NUMBER_RATE_LIMITER_DRAINS) -
+ rate_limiter_drains;
Close();
ASSERT_EQ(options.rate_limiter->GetTotalBytesThrough(), env_->bytes_written_);
+ // Most intervals should've been drained (interval time is 100ms, elapsed is
+ // micros)
+ ASSERT_GT(rate_limiter_drains, elapsed / 100000 / 2);
+ ASSERT_LE(rate_limiter_drains, elapsed / 100000);
ratio = env_->bytes_written_ * 1000000 / elapsed / raw_rate;
fprintf(stderr, "write rate ratio = %.2lf, expected 0.5\n", ratio);
ASSERT_LT(ratio, 0.6);