summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorGuido Tagliavini Ponce <tagliavini@fb.com>2022-06-01 18:00:28 -0700
committerFacebook GitHub Bot <facebook-github-bot@users.noreply.github.com>2022-06-01 18:00:28 -0700
commitb4d0e041d05b445a914a56bfe5459ebce77486c6 (patch)
tree8b27413948b354e099f94bfb574f04a0a857699e /tools
parent45b1c788c492caecf556f57433ff670cb7f8385e (diff)
Add support for FastLRUCache in stress and crash tests. (#10081)
Summary: Stress tests can run with the experimental FastLRUCache. Crash tests randomly choose between LRUCache and FastLRUCache. Since only LRUCache supports a secondary cache, we validate the `--secondary_cache_uri` and `--cache_type` flags---when `--secondary_cache_uri` is set, the `--cache_type` is set to `lru_cache`. Pull Request resolved: https://github.com/facebook/rocksdb/pull/10081 Test Plan: - To test that the FastLRUCache is used and the stress test runs successfully, run `make -j24 CRASH_TEST_EXT_ARGS=—duration=960 blackbox_crash_test_with_atomic_flush`. The cache type should sometimes be `fast_lru_cache`. - To test the flag validation, run `make -j24 CRASH_TEST_EXT_ARGS="--duration=960 --secondary_cache_uri=x" blackbox_crash_test_with_atomic_flush` multiple times. The test will always be aborted (which is okay). Check that the cache type is always `lru_cache`. Reviewed By: anand1976 Differential Revision: D36839908 Pulled By: guidotag fbshipit-source-id: ebcdfdcd12ec04c96c09ae5b9c9d1e613bdd1725
Diffstat (limited to 'tools')
-rw-r--r--tools/db_crashtest.py16
1 files changed, 10 insertions, 6 deletions
diff --git a/tools/db_crashtest.py b/tools/db_crashtest.py
index 005cda1ce..bf0f5655d 100644
--- a/tools/db_crashtest.py
+++ b/tools/db_crashtest.py
@@ -113,7 +113,7 @@ default_params = {
"use_direct_reads": lambda: random.randint(0, 1),
"use_direct_io_for_flush_and_compaction": lambda: random.randint(0, 1),
"mock_direct_io": False,
- "use_clock_cache": 0, # currently broken
+ "cache_type": lambda: random.choice(["fast_lru_cache", "lru_cache"]), # clock_cache is broken
"use_full_merge_v1": lambda: random.randint(0, 1),
"use_merge": lambda: random.randint(0, 1),
# 999 -> use Bloom API
@@ -176,6 +176,7 @@ default_params = {
"async_io": lambda: random.choice([0, 1]),
"wal_compression": lambda: random.choice(["none", "zstd"]),
"verify_sst_unique_id_in_manifest": 1, # always do unique_id verification
+ "secondary_cache_uri": "",
}
_TEST_DIR_ENV_VAR = 'TEST_TMPDIR'
@@ -525,11 +526,14 @@ def finalize_and_sanitize(src_params):
if dest_params.get("two_write_queues") == 1:
dest_params["enable_pipelined_write"] = 0
if dest_params.get("best_efforts_recovery") == 1:
- dest_params["disable_wal"] = 1
- dest_params["atomic_flush"] = 0
- dest_params["enable_compaction_filter"] = 0
- dest_params["sync"] = 0
- dest_params["write_fault_one_in"] = 0
+ dest_params["disable_wal"] = 1
+ dest_params["atomic_flush"] = 0
+ dest_params["enable_compaction_filter"] = 0
+ dest_params["sync"] = 0
+ dest_params["write_fault_one_in"] = 0
+ if dest_params["secondary_cache_uri"] != "":
+ # Currently the only cache type compatible with a secondary cache is LRUCache
+ dest_params["cache_type"] = "lru_cache"
return dest_params