summaryrefslogtreecommitdiff
path: root/memory
diff options
context:
space:
mode:
authorPeter Dillinger <peterd@fb.com>2022-12-01 13:18:40 -0800
committerFacebook GitHub Bot <facebook-github-bot@users.noreply.github.com>2022-12-01 13:18:40 -0800
commit95bf3021899edef53aa6aba6d3c7172eded3560e (patch)
treee6e4fa1d154bca10987f1d8a850ac696a5ac40a0 /memory
parentbe3a62a2e7f70b573c8d977a90650bb217c62aa8 (diff)
Fix use of make_unique in Arena::AllocateNewBlock (#11012)
Summary: The change to `make_unique<char[]>` in https://github.com/facebook/rocksdb/issues/10810 inadvertently started initializing data in Arena blocks, which could lead to increased memory use due to (at least on our implementation) force-mapping pages as a result. This change goes back to `new char[]` while keeping all the other good parts of https://github.com/facebook/rocksdb/issues/10810. Pull Request resolved: https://github.com/facebook/rocksdb/pull/11012 Test Plan: unit test added (fails on Linux before fix) Reviewed By: anand1976 Differential Revision: D41658893 Pulled By: pdillinger fbshipit-source-id: 267b7dccfadaeeb1be767d43c602a6abb0e71cd0
Diffstat (limited to 'memory')
-rw-r--r--memory/arena.cc7
-rw-r--r--memory/arena_test.cc30
2 files changed, 34 insertions, 3 deletions
diff --git a/memory/arena.cc b/memory/arena.cc
index d42868edf..0a920203d 100644
--- a/memory/arena.cc
+++ b/memory/arena.cc
@@ -143,9 +143,10 @@ char* Arena::AllocateAligned(size_t bytes, size_t huge_page_size,
}
char* Arena::AllocateNewBlock(size_t block_bytes) {
- auto uniq = std::make_unique<char[]>(block_bytes);
- char* block = uniq.get();
- blocks_.push_back(std::move(uniq));
+ // NOTE: std::make_unique zero-initializes the block so is not appropriate
+ // here
+ char* block = new char[block_bytes];
+ blocks_.push_back(std::unique_ptr<char[]>(block));
size_t allocated_size;
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
diff --git a/memory/arena_test.cc b/memory/arena_test.cc
index 30887c23a..21bf7ed62 100644
--- a/memory/arena_test.cc
+++ b/memory/arena_test.cc
@@ -256,6 +256,36 @@ TEST(MmapTest, AllocateLazyZeroed) {
}
}
+TEST_F(ArenaTest, UnmappedAllocation) {
+ // Verify that it's possible to get unmapped pages in large allocations,
+ // for memory efficiency and to ensure we don't accidentally waste time &
+ // space initializing the memory.
+ constexpr size_t kBlockSize = 2U << 20;
+ Arena arena(kBlockSize);
+
+ // The allocator might give us back recycled memory for a while, but
+ // shouldn't last forever.
+ for (int i = 0;; ++i) {
+ char* p = arena.Allocate(kBlockSize);
+
+ // Start counting page faults
+ PopMinorPageFaultCount();
+
+ // Overwrite the whole allocation
+ for (size_t j = 0; j < kBlockSize; ++j) {
+ p[j] = static_cast<char>(j & 255);
+ }
+
+ size_t faults = PopMinorPageFaultCount();
+ if (faults >= kBlockSize * 3 / 4 / port::kPageSize) {
+ // Most of the access generated page faults => GOOD
+ break;
+ }
+ // Should have succeeded after enough tries
+ ASSERT_LT(i, 1000);
+ }
+}
+
} // namespace ROCKSDB_NAMESPACE
int main(int argc, char** argv) {