summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDirreke <mingyang_ge@163.com>2024-03-21 21:20:45 +0800
committerZaidoon Abd Al Hadi <43054535+zaidoon1@users.noreply.github.com>2024-03-25 03:42:03 -0400
commitda8cf6eba00597f6e4152395ec4b3fd882e8eae2 (patch)
tree48b68634b900cc82a4ae76456324ffc3231f7166
parent0d8804a5d668989fc450205e990204bf72427650 (diff)
Update to RocksDB 9.0.0
-rw-r--r--Cargo.toml4
-rw-r--r--librocksdb-sys/Cargo.toml2
-rw-r--r--librocksdb-sys/build_version.cc8
m---------librocksdb-sys/rocksdb0
-rw-r--r--src/db_options.rs22
-rw-r--r--src/statistics.rs270
6 files changed, 141 insertions, 165 deletions
diff --git a/Cargo.toml b/Cargo.toml
index 79a5102..e5d664a 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -38,11 +38,11 @@ portable = ["librocksdb-sys/portable"]
[dependencies]
libc = "0.2"
-librocksdb-sys = { path = "librocksdb-sys", version = "0.16.0" }
+librocksdb-sys = { path = "librocksdb-sys", version = "0.17.0" }
serde = { version = "1", features = [ "derive" ], optional = true }
[dev-dependencies]
-trybuild = "1.0"
+trybuild = "<=1.0.89" # trybuild 1.0.90 needs MSRV 1.70
tempfile = "3.1"
pretty_assertions = "1.0"
bincode = "1.3"
diff --git a/librocksdb-sys/Cargo.toml b/librocksdb-sys/Cargo.toml
index e87cec4..2f64b64 100644
--- a/librocksdb-sys/Cargo.toml
+++ b/librocksdb-sys/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "librocksdb-sys"
-version = "0.16.0+8.10.0"
+version = "0.17.0+9.0.0"
edition = "2018"
rust-version = "1.66.0"
authors = ["Karl Hobley <karlhobley10@gmail.com>", "Arkadiy Paronyan <arkadiy@ethcore.io>"]
diff --git a/librocksdb-sys/build_version.cc b/librocksdb-sys/build_version.cc
index f13f2a0..a7e0256 100644
--- a/librocksdb-sys/build_version.cc
+++ b/librocksdb-sys/build_version.cc
@@ -8,17 +8,17 @@
// The build script may replace these values with real values based
// on whether or not GIT is available and the platform settings
-static const std::string rocksdb_build_git_sha = "54d628602706c0c718cf81f87202e0b8f6615faf";
-static const std::string rocksdb_build_git_tag = "rocksdb_build_git_tag:v8.10.0";
+static const std::string rocksdb_build_git_sha = "f4441966592636253fd5ab0bb9ed44fc2697fc53";
+static const std::string rocksdb_build_git_tag = "rocksdb_build_git_tag:v9.0.0";
#define HAS_GIT_CHANGES 0
#if HAS_GIT_CHANGES == 0
// If HAS_GIT_CHANGES is 0, the GIT date is used.
// Use the time the branch/tag was last modified
-static const std::string rocksdb_build_date = "rocksdb_build_date:2023-12-15 13:01:14";
+static const std::string rocksdb_build_date = "rocksdb_build_date:2024-03-11 11:26:24";
#else
// If HAS_GIT_CHANGES is > 0, the branch/tag has modifications.
// Use the time the build was created.
-static const std::string rocksdb_build_date = "rocksdb_build_date:2023-12-15 13:01:14";
+static const std::string rocksdb_build_date = "rocksdb_build_date:2024-03-11 11:26:24";
#endif
std::unordered_map<std::string, ROCKSDB_NAMESPACE::RegistrarFunc> ROCKSDB_NAMESPACE::ObjectRegistry::builtins_ = {};
diff --git a/librocksdb-sys/rocksdb b/librocksdb-sys/rocksdb
-Subproject 54d628602706c0c718cf81f87202e0b8f6615fa
+Subproject f4441966592636253fd5ab0bb9ed44fc2697fc5
diff --git a/src/db_options.rs b/src/db_options.rs
index 65ea976..2ee25b3 100644
--- a/src/db_options.rs
+++ b/src/db_options.rs
@@ -2849,17 +2849,6 @@ impl Options {
}
}
- /// Specifies the file access pattern once a compaction is started.
- ///
- /// It will be applied to all input files of a compaction.
- ///
- /// Default: Normal
- pub fn set_access_hint_on_compaction_start(&mut self, pattern: AccessHint) {
- unsafe {
- ffi::rocksdb_options_set_access_hint_on_compaction_start(self.inner, pattern as c_int);
- }
- }
-
/// Enable/disable adaptive mutex, which spins in the user space before resorting to kernel.
///
/// This could reduce context switch when the mutex is not
@@ -3986,17 +3975,6 @@ pub enum DBRecoveryMode {
SkipAnyCorruptedRecord = ffi::rocksdb_skip_any_corrupted_records_recovery as isize,
}
-/// File access pattern once a compaction has started
-#[derive(Debug, Copy, Clone, PartialEq, Eq)]
-#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
-#[repr(i32)]
-pub enum AccessHint {
- None = 0,
- Normal,
- Sequential,
- WillNeed,
-}
-
pub struct FifoCompactOptions {
pub(crate) inner: *mut ffi::rocksdb_fifo_compaction_options_t,
}
diff --git a/src/statistics.rs b/src/statistics.rs
index cfdc70f..d79024e 100644
--- a/src/statistics.rs
+++ b/src/statistics.rs
@@ -146,6 +146,42 @@ iterable_named_enum! {
/// # of bytes written into cache.
BlockCacheBytesWrite("rocksdb.block.cache.bytes.write"),
+ BlockCacheCompressionDictMiss("rocksdb.block.cache.compression.dict.miss"),
+ BlockCacheCompressionDictHit("rocksdb.block.cache.compression.dict.hit"),
+ BlockCacheCompressionDictAdd("rocksdb.block.cache.compression.dict.add"),
+ BlockCacheCompressionDictBytesInsert("rocksdb.block.cache.compression.dict.bytes.insert"),
+
+ /// # of blocks redundantly inserted into block cache.
+ /// REQUIRES: BlockCacheAddRedundant <= BlockCacheAdd
+ BlockCacheAddRedundant("rocksdb.block.cache.add.redundant"),
+ /// # of index blocks redundantly inserted into block cache.
+ /// REQUIRES: BlockCacheIndexAddRedundant <= BlockCacheIndexAdd
+ BlockCacheIndexAddRedundant("rocksdb.block.cache.index.add.redundant"),
+ /// # of filter blocks redundantly inserted into block cache.
+ /// REQUIRES: BlockCacheFilterAddRedundant <= BlockCacheFilterAdd
+ BlockCacheFilterAddRedundant("rocksdb.block.cache.filter.add.redundant"),
+ /// # of data blocks redundantly inserted into block cache.
+ /// REQUIRES: BlockCacheDataAddRedundant <= BlockCacheDataAdd
+ BlockCacheDataAddRedundant("rocksdb.block.cache.data.add.redundant"),
+ /// # of dict blocks redundantly inserted into block cache.
+ /// REQUIRES: BlockCacheCompressionDictAddRedundant
+ /// <= BlockCacheCompressionDictAdd
+ BlockCacheCompressionDictAddRedundant("rocksdb.block.cache.compression.dict.add.redundant"),
+
+ /// Secondary cache statistics
+ SecondaryCacheHits("rocksdb.secondary.cache.hits"),
+
+ /// Fine grained secondary cache stats
+ SecondaryCacheFilterHits("rocksdb.secondary.cache.filter.hits"),
+ SecondaryCacheIndexHits("rocksdb.secondary.cache.index.hits"),
+ SecondaryCacheDataHits("rocksdb.secondary.cache.data.hits"),
+
+ /// Compressed secondary cache related stats
+ CompressedSecondaryCacheDummyHits("rocksdb.compressed.secondary.cache.dummy.hits"),
+ CompressedSecondaryCacheHits("rocksdb.compressed.secondary.cache.hits"),
+ CompressedSecondaryCachePromotions("rocksdb.compressed.secondary.cache.promotions"),
+ CompressedSecondaryCachePromotionSkips("rocksdb.compressed.secondary.cache.promotion.skips"),
+
/// # of times bloom filter has avoided file reads, i.e., negatives.
BloomFilterUseful("rocksdb.bloom.filter.useful"),
/// # of times bloom FullFilter has not avoided the reads.
@@ -153,6 +189,16 @@ iterable_named_enum! {
/// # of times bloom FullFilter has not avoided the reads and data actually
/// exist.
BloomFilterFullTruePositive("rocksdb.bloom.filter.full.true.positive"),
+ /// Prefix filter stats when used for point lookups (Get / MultiGet).
+ /// (For prefix filter stats on iterators, see *_LEVEL_Seek_*.)
+ /// Checked: filter was queried
+ BloomFilterPrefixChecked("rocksdb.bloom.filter.prefix.checked"),
+ /// Useful: filter returned false so prevented accessing data+index blocks
+ BloomFilterPrefixUseful("rocksdb.bloom.filter.prefix.useful"),
+ /// True positive: found a key matching the point query. When another key
+ /// with the same prefix matches, it is considered a false positive by
+ /// these statistics even though the filter returned a true positive.
+ BloomFilterPrefixTruePositive("rocksdb.bloom.filter.prefix.true.positive"),
/// # persistent cache hit
PersistentCacheHit("rocksdb.persistent.cache.hit"),
@@ -176,21 +222,21 @@ iterable_named_enum! {
/// # of Get() queries served by L2 and up
GetHitL2AndUp("rocksdb.l2andup.hit"),
- /**
- * Compaction_KeyDrop* count the reasons for key drop during compaction
- * There are 4 reasons currently.
- */
- CompactionKeyDropNewerEntry("rocksdb.compaction.key.drop.new"),
+ ///
+ /// Compaction_KeyDrop* count the reasons for key drop during compaction
+ /// There are 4 reasons currently.
+ ///
/// key was written with a newer value.
/// Also includes keys dropped for range del.
- CompactionKeyDropObsolete("rocksdb.compaction.key.drop.obsolete"),
+ CompactionKeyDropNewerEntry("rocksdb.compaction.key.drop.new"),
/// The key is obsolete.
- CompactionKeyDropRangeDel("rocksdb.compaction.key.drop.range_del"),
+ CompactionKeyDropObsolete("rocksdb.compaction.key.drop.obsolete"),
/// key was covered by a range tombstone.
- CompactionKeyDropUser("rocksdb.compaction.key.drop.user"),
+ CompactionKeyDropRangeDel("rocksdb.compaction.key.drop.range_del"),
/// user compaction function has dropped the key.
- CompactionRangeDelDropObsolete("rocksdb.compaction.range_del.drop.obsolete"),
+ CompactionKeyDropUser("rocksdb.compaction.key.drop.user"),
/// all keys in range were deleted.
+ CompactionRangeDelDropObsolete("rocksdb.compaction.range_del.drop.obsolete"),
/// Deletions obsoleted before bottom level due to file gap optimization.
CompactionOptimizedDelDropObsolete("rocksdb.compaction.optimized.del.drop.obsolete"),
/// If a compaction was canceled in sfm to prevent ENOSPC
@@ -221,6 +267,17 @@ iterable_named_enum! {
/// The number of uncompressed bytes read from an iterator.
/// Includes size of key and value.
IterBytesRead("rocksdb.db.iter.bytes.read"),
+ /// Number of internal keys skipped by Iterator
+ NumberIterSkip("rocksdb.number.iter.skip"),
+ /// Number of times we had to reseek inside an iteration to skip
+ /// over large number of keys with same userkey.
+ NumberOfReseeksInIteration("rocksdb.number.reseeks.iteration"),
+
+ /// number of iterators created
+ NoIteratorCreated("rocksdb.num.iterator.created"),
+ /// number of iterators deleted
+ NoIteratorDeleted("rocksdb.num.iterator.deleted"),
+
NoFileOpens("rocksdb.no.file.opens"),
NoFileErrors("rocksdb.no.file.errors"),
/// Writer has to wait for compaction or flush to finish.
@@ -233,24 +290,13 @@ iterable_named_enum! {
NumberMultigetCalls("rocksdb.number.multiget.get"),
NumberMultigetKeysRead("rocksdb.number.multiget.keys.read"),
NumberMultigetBytesRead("rocksdb.number.multiget.bytes.read"),
+ /// Number of keys actually found in MultiGet calls (vs number requested by
+ /// caller)
+ /// NumberMultigetKeys_Read gives the number requested by caller
+ NumberMultigetKeysFound("rocksdb.number.multiget.keys.found"),
NumberMergeFailures("rocksdb.number.merge.failures"),
- /// Prefix filter stats when used for point lookups (Get / MultiGet).
- /// (For prefix filter stats on iterators, see *_LEVEL_Seek_*.)
- /// Checked: filter was queried
- BloomFilterPrefixChecked("rocksdb.bloom.filter.prefix.checked"),
- /// Useful: filter returned false so prevented accessing data+index blocks
- BloomFilterPrefixUseful("rocksdb.bloom.filter.prefix.useful"),
- /// True positive: found a key matching the point query. When another key
- /// with the same prefix matches, it is considered a false positive by
- /// these statistics even though the filter returned a true positive.
- BloomFilterPrefixTruePositive("rocksdb.bloom.filter.prefix.true.positive"),
-
- /// Number of times we had to reseek inside an iteration to skip
- /// over large number of keys with same userkey.
- NumberOfReseeksInIteration("rocksdb.number.reseeks.iteration"),
-
/// Record the number of calls to GetUpdatesSince. Useful to keep track of
/// transaction log iterator refreshes
GetUpdatesSinceCalls("rocksdb.getupdatessince.calls"),
@@ -262,16 +308,17 @@ iterable_named_enum! {
/// Writes can be processed by requesting thread or by the thread at the
/// head of the writers queue.
WriteDoneBySelf("rocksdb.write.self"),
- WriteDoneByOther("rocksdb.write.other"),
/// Equivalent to writes done for others
- WriteWithWal("rocksdb.write.wal"),
+ WriteDoneByOther("rocksdb.write.other"),
/// Number of Write calls that request WAL
- CompactReadBytes("rocksdb.compact.read.bytes"),
+ WriteWithWal("rocksdb.write.wal"),
/// Bytes read during compaction
- CompactWriteBytes("rocksdb.compact.write.bytes"),
+ CompactReadBytes("rocksdb.compact.read.bytes"),
/// Bytes written during compaction
- FlushWriteBytes("rocksdb.flush.write.bytes"),
+ CompactWriteBytes("rocksdb.compact.write.bytes"),
/// Bytes written during flush
+ FlushWriteBytes("rocksdb.flush.write.bytes"),
+
/// Compaction read and write statistics broken down by CompactionReason
CompactReadBytesMarked("rocksdb.compact.read.marked.bytes"),
@@ -292,8 +339,35 @@ iterable_named_enum! {
NumberBlockCompressed("rocksdb.number.block.compressed"),
NumberBlockDecompressed("rocksdb.number.block.decompressed"),
- /// DEPRECATED / unused (see NumberBlockCompression_*)
- NumberBlockNotCompressed("rocksdb.number.block.not_compressed"),
+ /// Number of input bytes (uncompressed) to compression for SST blocks that
+ /// are stored compressed.
+ BytesCompressedFrom("rocksdb.bytes.compressed.from"),
+ /// Number of output bytes (compressed) from compression for SST blocks that
+ /// are stored compressed.
+ BytesCompressedTo("rocksdb.bytes.compressed.to"),
+ /// Number of uncompressed bytes for SST blocks that are stored uncompressed
+ /// because compression type is kNoCompression, or some error case caused
+ /// compression not to run or produce an output. Index blocks are only counted
+ /// if enable_index_compression is true.
+ BytesCompressionBypassed("rocksdb.bytes.compression_bypassed"),
+ /// Number of input bytes (uncompressed) to compression for SST blocks that
+ /// are stored uncompressed because the compression result was rejected,
+ /// either because the ratio was not acceptable (see
+ /// CompressionOptions::max_compressed_bytes_per_kb) or found invalid by the
+ /// `verify_compression` option.
+ BytesCompressionRejected("rocksdb.bytes.compression.rejected"),
+
+ /// Like BytesCompressionBypassed but counting number of blocks
+ NumberBlockCompressionBypassed("rocksdb.number.block_compression_bypassed"),
+ /// Like BytesCompressionRejected but counting number of blocks
+ NumberBlockCompressionRejected("rocksdb.number.block_compression_rejected"),
+
+ /// Number of input bytes (compressed) to decompression in reading compressed
+ /// SST blocks from storage.
+ BytesDecompressedFrom("rocksdb.bytes.decompressed.from"),
+ /// Number of output bytes (uncompressed) from decompression in reading
+ /// compressed SST blocks from storage.
+ BytesDecompressedTo("rocksdb.bytes.decompressed.to"),
/// Tickers that record cumulative time.
MergeOperationTotalTime("rocksdb.merge.operation.time.nanos"),
@@ -309,17 +383,15 @@ iterable_named_enum! {
/// (ReadAMP_ToTAL_ReadBytes / Read_AMP_Estimate_UsefulBytes)
//
/// REQUIRES: ReadOptions::read_amp_bytes_per_bit to be enabled
- ReadAmpEstimateUsefulBytes("rocksdb.read.amp.estimate.useful.bytes"),
/// Estimate of total bytes actually used.
- ReadAmpTotalReadBytes("rocksdb.read.amp.total.read.bytes"),
+ ReadAmpEstimateUsefulBytes("rocksdb.read.amp.estimate.useful.bytes"),
/// Total size of loaded data blocks.
+ ReadAmpTotalReadBytes("rocksdb.read.amp.total.read.bytes"),
+
/// Number of refill intervals where rate limiter's bytes are fully consumed.
NumberRateLimiterDrains("rocksdb.number.rate_limiter.drains"),
- /// Number of internal keys skipped by Iterator
- NumberIterSkip("rocksdb.number.iter.skip"),
-
/// BlobDB specific stats
/// # of Put/PutTtl/PutUntil to BlobDB. Only applicable to legacy BlobDB.
BlobDbNumPut("rocksdb.blobdb.num.put"),
@@ -398,6 +470,20 @@ iterable_named_enum! {
/// applicable to legacy BlobDB.
BlobDbFifoBytesEvicted("rocksdb.blobdb.fifo.bytes.evicted"),
+ /// Integrated BlobDB specific stats
+ /// # of times cache miss when accessing blob from blob cache.
+ BlobDbCacheMiss("rocksdb.blobdb.cache.miss"),
+ /// # of times cache hit when accessing blob from blob cache.
+ BlobDbCacheHit("rocksdb.blobdb.cache.hit"),
+ /// # of data blocks added to blob cache.
+ BlobDbCacheAdd("rocksdb.blobdb.cache.add"),
+ /// # of failures when adding blobs to blob cache.
+ BlobDbCacheAddFailures("rocksdb.blobdb.cache.add.failures"),
+ /// # of bytes read from blob cache.
+ BlobDbCacheBytesRead("rocksdb.blobdb.cache.bytes.read"),
+ /// # of bytes written into blob cache.
+ BlobDbCacheBytesWrite("rocksdb.blobdb.cache.bytes.write"),
+
/// These counters indicate a performance issue in WritePrepared transactions.
/// We should not seem them ticking them much.
/// # of times prepare_mutex_ is acquired in the fast path.
@@ -411,37 +497,6 @@ iterable_named_enum! {
/// # of times ::Get returned TryAgain due to expired snapshot seq
TxnGetTryAgain("rocksdb.txn.get.tryagain"),
- /// Number of keys actually found in MultiGet calls (vs number requested by
- /// caller)
- /// NumberMultigetKeys_Read gives the number requested by caller
- NumberMultigetKeysFound("rocksdb.number.multiget.keys.found"),
-
- NoIteratorCreated("rocksdb.num.iterator.created"),
- /// number of iterators created
- NoIteratorDeleted("rocksdb.num.iterator.deleted"),
- /// number of iterators deleted
- BlockCacheCompressionDictMiss("rocksdb.block.cache.compression.dict.miss"),
- BlockCacheCompressionDictHit("rocksdb.block.cache.compression.dict.hit"),
- BlockCacheCompressionDictAdd("rocksdb.block.cache.compression.dict.add"),
- BlockCacheCompressionDictBytesInsert("rocksdb.block.cache.compression.dict.bytes.insert"),
-
- /// # of blocks redundantly inserted into block cache.
- /// REQUIRES: BlockCacheAddRedundant <= BlockCacheAdd
- BlockCacheAddRedundant("rocksdb.block.cache.add.redundant"),
- /// # of index blocks redundantly inserted into block cache.
- /// REQUIRES: BlockCacheIndexAddRedundant <= BlockCacheIndexAdd
- BlockCacheIndexAddRedundant("rocksdb.block.cache.index.add.redundant"),
- /// # of filter blocks redundantly inserted into block cache.
- /// REQUIRES: BlockCacheFilterAddRedundant <= BlockCacheFilterAdd
- BlockCacheFilterAddRedundant("rocksdb.block.cache.filter.add.redundant"),
- /// # of data blocks redundantly inserted into block cache.
- /// REQUIRES: BlockCacheDataAddRedundant <= BlockCacheDataAdd
- BlockCacheDataAddRedundant("rocksdb.block.cache.data.add.redundant"),
- /// # of dict blocks redundantly inserted into block cache.
- /// REQUIRES: BlockCacheCompressionDictAddRedundant
- /// <= BlockCacheCompressionDictAdd
- BlockCacheCompressionDictAddRedundant("rocksdb.block.cache.compression.dict.add.redundant"),
-
/// # of files marked as trash by sst file manager and will be deleted
/// later by background thread.
FilesMarkedTrash("rocksdb.files.marked.trash"),
@@ -453,14 +508,9 @@ iterable_named_enum! {
/// The counters for error handler, not that, bg_io_error is the subset of
/// bg_error and bg_retryable_io_error is the subset of bg_io_error.
- /// The misspelled versions are deprecated and only kept for compatibility.
- /// ToDO: remove the misspelled tickers in the next major release.
ErrorHandlerBgErrorCount("rocksdb.error.handler.bg.error.count"),
- ErrorHandlerBgErrorCountMisspelled("rocksdb.error.handler.bg.errro.count"),
ErrorHandlerBgIoErrorCount("rocksdb.error.handler.bg.io.error.count"),
- ErrorHandlerBgIoErrorCountMisspelled("rocksdb.error.handler.bg.io.errro.count"),
ErrorHandlerBgRetryableIoErrorCount("rocksdb.error.handler.bg.retryable.io.error.count"),
- ErrorHandlerBgRetryableIoErrorCountMisspelled("rocksdb.error.handler.bg.retryable.io.errro.count"),
ErrorHandlerAutoResumeCount("rocksdb.error.handler.autoresume.count"),
ErrorHandlerAutoResumeRetryTotalCount("rocksdb.error.handler.autoresume.retry.total.count"),
ErrorHandlerAutoResumeSuccessCount("rocksdb.error.handler.autoresume.success.count"),
@@ -471,9 +521,6 @@ iterable_named_enum! {
/// Outdated bytes of data present on memtable at flush time.
MemtableGarbageBytesAtFlush("rocksdb.memtable.garbage.bytes.at.flush"),
- /// Secondary cache statistics
- SecondaryCacheHits("rocksdb.secondary.cache.hits"),
-
/// Bytes read by `VerifyChecksum()` and `VerifyFileChecksums()` APIs.
VerifyChecksumReadBytes("rocksdb.verify_checksum.read.bytes"),
@@ -534,30 +581,11 @@ iterable_named_enum! {
MultigetCoroutineCount("rocksdb.multiget.coroutine.count"),
- /// Integrated BlobDB specific stats
- /// # of times cache miss when accessing blob from blob cache.
- BlobDbCacheMiss("rocksdb.blobdb.cache.miss"),
- /// # of times cache hit when accessing blob from blob cache.
- BlobDbCacheHit("rocksdb.blobdb.cache.hit"),
- /// # of data blocks added to blob cache.
- BlobDbCacheAdd("rocksdb.blobdb.cache.add"),
- /// # of failures when adding blobs to blob cache.
- BlobDbCacheAddFailures("rocksdb.blobdb.cache.add.failures"),
- /// # of bytes read from blob cache.
- BlobDbCacheBytesRead("rocksdb.blobdb.cache.bytes.read"),
- /// # of bytes written into blob cache.
- BlobDbCacheBytesWrite("rocksdb.blobdb.cache.bytes.write"),
-
/// Time spent in the ReadAsync file system call
ReadAsyncMicros("rocksdb.read.async.micros"),
/// Number of errors returned to the async read callback
AsyncReadErrorCount("rocksdb.async.read.error.count"),
- /// Fine grained secondary cache stats
- SecondaryCacheFilterHits("rocksdb.secondary.cache.filter.hits"),
- SecondaryCacheIndexHits("rocksdb.secondary.cache.index.hits"),
- SecondaryCacheDataHits("rocksdb.secondary.cache.data.hits"),
-
/// Number of lookup into the prefetched tail (see
/// `TableOpenPrefetchTailReadBytes`)
/// that can't find its data for table open
@@ -573,36 +601,6 @@ iterable_named_enum! {
/// # of times timestamps can successfully help skip the table access
TimestampFilterTableFiltered("rocksdb.timestamp.filter.table.filtered"),
- /// Number of input bytes (uncompressed) to compression for SST blocks that
- /// are stored compressed.
- BytesCompressedFrom("rocksdb.bytes.compressed.from"),
- /// Number of output bytes (compressed) from compression for SST blocks that
- /// are stored compressed.
- BytesCompressedTo("rocksdb.bytes.compressed.to"),
- /// Number of uncompressed bytes for SST blocks that are stored uncompressed
- /// because compression type is kNoCompression, or some error case caused
- /// compression not to run or produce an output. Index blocks are only counted
- /// if enable_index_compression is true.
- BytesCompressionBypassed("rocksdb.bytes.compression_bypassed"),
- /// Number of input bytes (uncompressed) to compression for SST blocks that
- /// are stored uncompressed because the compression result was rejected,
- /// either because the ratio was not acceptable (see
- /// CompressionOptions::max_compressed_bytes_per_kb) or found invalid by the
- /// `verify_compression` option.
- BytesCompressionRejected("rocksdb.bytes.compression.rejected"),
-
- /// Like BytesCompressionBypassed but counting number of blocks
- NumberBlockCompressionBypassed("rocksdb.number.block_compression_bypassed"),
- /// Like BytesCompressionRejected but counting number of blocks
- NumberBlockCompressionRejected("rocksdb.number.block_compression_rejected"),
-
- /// Number of input bytes (compressed) to decompression in reading compressed
- /// SST blocks from storage.
- BytesDecompressedFrom("rocksdb.bytes.decompressed.from"),
- /// Number of output bytes (uncompressed) from decompression in reading
- /// compressed SST blocks from storage.
- BytesDecompressedTo("rocksdb.bytes.decompressed.to"),
-
/// Number of times readahead is trimmed during scans when
/// ReadOptions.auto_readahead_size is set.
ReadAheadTrimmed("rocksdb.readahead.trimmed"),
@@ -619,12 +617,6 @@ iterable_named_enum! {
/// Number of FS reads avoided due to scan prefetching
PrefetchHits("rocksdb.prefetch.hits"),
-
- /// Compressed secondary cache related stats
- CompressedSecondaryCacheDummyHits("rocksdb.compressed.secondary.cache.dummy.hits"),
- CompressedSecondaryCacheHits("rocksdb.compressed.secondary.cache.hits"),
- CompressedSecondaryCachePromotions("rocksdb.compressed.secondary.cache.promotions"),
- CompressedSecondaryCachePromotionSkips("rocksdb.compressed.secondary.cache.promotion.skips"),
}
}
@@ -664,16 +656,22 @@ iterable_named_enum! {
FileReadDbIteratorMicros("rocksdb.file.read.db.iterator.micros"),
FileReadVerifyDbChecksumMicros("rocksdb.file.read.verify.db.checksum.micros"),
FileReadVerifyFileChecksumsMicros("rocksdb.file.read.verify.file.checksums.micros"),
+
+ // Time spent in writing SST files
+ SstWriteMicros("rocksdb.sst.write.micros"),
+ // Time spent in writing SST table (currently only block-based table) or blob
+ // file for flush, compaction or db open
+ FileWriteFlushMicros("rocksdb.file.write.flush.micros"),
+ FileWriteCompactionMicros("rocksdb.file.write.compaction.micros"),
+ FileWriteDbOpenMicros("rocksdb.file.write.db.open.micros"),
+
/// The number of subcompactions actually scheduled during a compaction
NumSubcompactionsScheduled("rocksdb.num.subcompactions.scheduled"),
/// Value size distribution in each operation
BytesPerRead("rocksdb.bytes.per.read"),
BytesPerWrite("rocksdb.bytes.per.write"),
BytesPerMultiget("rocksdb.bytes.per.multiget"),
- BytesCompressed("rocksdb.bytes.compressed"),
- /// DEPRECATED / unused (see BytesCompressed{From,To})
- BytesDecompressed("rocksdb.bytes.decompressed"),
- /// DEPRECATED / unused (see BytesDecompressed{From,To})
+
CompressionTimesNanos("rocksdb.compression.times.nanos"),
DecompressionTimesNanos("rocksdb.decompression.times.nanos"),
/// Number of merge operands passed to the merge operator in user read
@@ -803,6 +801,6 @@ fn sanity_checks() {
assert_eq!(want, Ticker::BlockCacheIndexMiss.to_string());
// assert enum lengths
- assert_eq!(Ticker::iter().count(), 215 /* TICKER_ENUM_MAX */);
- assert_eq!(Histogram::iter().count(), 60 /* HISTOGRAM_ENUM_MAX */);
+ assert_eq!(Ticker::iter().count(), 211 /* TICKER_ENUM_MAX */);
+ assert_eq!(Histogram::iter().count(), 62 /* HISTOGRAM_ENUM_MAX */);
}