summaryrefslogtreecommitdiff
path: root/crates/store
diff options
context:
space:
mode:
authormdecimus <mauro@stalw.art>2024-07-17 18:33:22 +0200
committermdecimus <mauro@stalw.art>2024-07-17 18:33:22 +0200
commitd2ad44cf9f085db5b067338d560291db667aeaa9 (patch)
treec5fd2b3859a70f0cda86668379d954b4c1a92d25 /crates/store
parente74b29189a45c4afb6b204fced1b07032b16af61 (diff)
Improved error handling (part 3)
Diffstat (limited to 'crates/store')
-rw-r--r--crates/store/src/backend/elastic/mod.rs6
-rw-r--r--crates/store/src/backend/elastic/query.rs6
-rw-r--r--crates/store/src/backend/foundationdb/mod.rs2
-rw-r--r--crates/store/src/backend/foundationdb/write.rs4
-rw-r--r--crates/store/src/backend/fs/mod.rs26
-rw-r--r--crates/store/src/backend/mysql/mod.rs2
-rw-r--r--crates/store/src/backend/mysql/write.rs6
-rw-r--r--crates/store/src/backend/postgres/mod.rs2
-rw-r--r--crates/store/src/backend/postgres/write.rs8
-rw-r--r--crates/store/src/backend/redis/mod.rs2
-rw-r--r--crates/store/src/backend/redis/pool.rs4
-rw-r--r--crates/store/src/backend/rocksdb/mod.rs2
-rw-r--r--crates/store/src/backend/rocksdb/write.rs2
-rw-r--r--crates/store/src/backend/s3/mod.rs8
-rw-r--r--crates/store/src/backend/sqlite/mod.rs2
-rw-r--r--crates/store/src/backend/sqlite/write.rs2
-rw-r--r--crates/store/src/dispatch/blob.rs10
-rw-r--r--crates/store/src/dispatch/lookup.rs51
-rw-r--r--crates/store/src/dispatch/store.rs66
-rw-r--r--crates/store/src/query/acl.rs10
-rw-r--r--crates/store/src/write/key.rs10
-rw-r--r--crates/store/src/write/mod.rs20
22 files changed, 128 insertions, 123 deletions
diff --git a/crates/store/src/backend/elastic/mod.rs b/crates/store/src/backend/elastic/mod.rs
index 99cb998d..abcf1ba0 100644
--- a/crates/store/src/backend/elastic/mod.rs
+++ b/crates/store/src/backend/elastic/mod.rs
@@ -111,7 +111,7 @@ impl ElasticSearchStore {
.exists(IndicesExistsParts::Index(&[INDEX_NAMES[0]]))
.send()
.await
- .map_err(|err| trc::Cause::ElasticSearch.reason(err))?;
+ .map_err(|err| trc::StoreCause::ElasticSearch.reason(err))?;
if exists.status_code() == StatusCode::NOT_FOUND {
let response = self
@@ -183,11 +183,11 @@ pub(crate) async fn assert_success(response: Result<Response, Error>) -> trc::Re
if status.is_success() {
Ok(response)
} else {
- Err(trc::Cause::ElasticSearch
+ Err(trc::StoreCause::ElasticSearch
.reason(response.text().await.unwrap_or_default())
.ctx(trc::Key::Code, status.as_u16()))
}
}
- Err(err) => Err(trc::Cause::ElasticSearch.reason(err)),
+ Err(err) => Err(trc::StoreCause::ElasticSearch.reason(err)),
}
}
diff --git a/crates/store/src/backend/elastic/query.rs b/crates/store/src/backend/elastic/query.rs
index b2448f7d..624ae8a5 100644
--- a/crates/store/src/backend/elastic/query.rs
+++ b/crates/store/src/backend/elastic/query.rs
@@ -107,14 +107,14 @@ impl ElasticSearchStore {
let json: Value = response
.json()
.await
- .map_err(|err| trc::Cause::ElasticSearch.reason(err))?;
+ .map_err(|err| trc::StoreCause::ElasticSearch.reason(err))?;
let mut results = RoaringBitmap::new();
for hit in json["hits"]["hits"].as_array().ok_or_else(|| {
- trc::Cause::ElasticSearch.reason("Invalid response from ElasticSearch")
+ trc::StoreCause::ElasticSearch.reason("Invalid response from ElasticSearch")
})? {
results.insert(hit["_source"]["document_id"].as_u64().ok_or_else(|| {
- trc::Cause::ElasticSearch.reason("Invalid response from ElasticSearch")
+ trc::StoreCause::ElasticSearch.reason("Invalid response from ElasticSearch")
})? as u32);
}
diff --git a/crates/store/src/backend/foundationdb/mod.rs b/crates/store/src/backend/foundationdb/mod.rs
index 3ea02014..52a28f7f 100644
--- a/crates/store/src/backend/foundationdb/mod.rs
+++ b/crates/store/src/backend/foundationdb/mod.rs
@@ -77,7 +77,7 @@ impl TimedTransaction {
#[inline(always)]
fn into_error(error: FdbError) -> trc::Error {
- trc::Cause::FoundationDB
+ trc::StoreCause::FoundationDB
.reason(error.message())
.ctx(trc::Key::Code, error.code())
}
diff --git a/crates/store/src/backend/foundationdb/write.rs b/crates/store/src/backend/foundationdb/write.rs
index 4a498995..9fd10f75 100644
--- a/crates/store/src/backend/foundationdb/write.rs
+++ b/crates/store/src/backend/foundationdb/write.rs
@@ -94,7 +94,7 @@ impl FdbStore {
*key.last_mut().unwrap() += 1;
} else {
trx.cancel();
- return Err(trc::Cause::FoundationDB.ctx(
+ return Err(trc::StoreCause::FoundationDB.ctx(
trc::Key::Reason,
"Value is too large",
));
@@ -257,7 +257,7 @@ impl FdbStore {
if !matches {
trx.cancel();
- return Err(trc::Cause::AssertValue.into());
+ return Err(trc::StoreCause::AssertValue.into());
}
}
}
diff --git a/crates/store/src/backend/fs/mod.rs b/crates/store/src/backend/fs/mod.rs
index f2695f2e..bc522c1b 100644
--- a/crates/store/src/backend/fs/mod.rs
+++ b/crates/store/src/backend/fs/mod.rs
@@ -57,7 +57,7 @@ impl FsStore {
Ok(m) => m.len() as usize,
Err(_) => return Ok(None),
};
- let mut blob = File::open(&blob_path).await?;
+ let mut blob = File::open(&blob_path).await.map_err(into_error)?;
Ok(Some(if range.start != 0 || range.end != usize::MAX {
let from_offset = if range.start < blob_size {
@@ -68,13 +68,15 @@ impl FsStore {
let mut buf = vec![0; (std::cmp::min(range.end, blob_size) - from_offset) as usize];
if from_offset > 0 {
- blob.seek(SeekFrom::Start(from_offset as u64)).await?;
+ blob.seek(SeekFrom::Start(from_offset as u64))
+ .await
+ .map_err(into_error)?;
}
- blob.read_exact(&mut buf).await?;
+ blob.read_exact(&mut buf).await.map_err(into_error)?;
buf
} else {
let mut buf = Vec::with_capacity(blob_size as usize);
- blob.read_to_end(&mut buf).await?;
+ blob.read_to_end(&mut buf).await.map_err(into_error)?;
buf
}))
}
@@ -86,10 +88,12 @@ impl FsStore {
.await
.map_or(true, |m| m.len() as usize != data.len())
{
- fs::create_dir_all(blob_path.parent().unwrap()).await?;
- let mut blob_file = File::create(&blob_path).await?;
- blob_file.write_all(data).await?;
- blob_file.flush().await?;
+ fs::create_dir_all(blob_path.parent().unwrap())
+ .await
+ .map_err(into_error)?;
+ let mut blob_file = File::create(&blob_path).await.map_err(into_error)?;
+ blob_file.write_all(data).await.map_err(into_error)?;
+ blob_file.flush().await.map_err(into_error)?;
}
Ok(())
@@ -98,7 +102,7 @@ impl FsStore {
pub(crate) async fn delete_blob(&self, key: &[u8]) -> trc::Result<bool> {
let blob_path = self.build_path(key);
if fs::metadata(&blob_path).await.is_ok() {
- fs::remove_file(&blob_path).await?;
+ fs::remove_file(&blob_path).await.map_err(into_error)?;
Ok(true)
} else {
Ok(false)
@@ -115,3 +119,7 @@ impl FsStore {
path
}
}
+
+fn into_error(err: std::io::Error) -> trc::Error {
+ trc::StoreCause::Filesystem.reason(err)
+}
diff --git a/crates/store/src/backend/mysql/mod.rs b/crates/store/src/backend/mysql/mod.rs
index 2a3d2d90..fa28d1bc 100644
--- a/crates/store/src/backend/mysql/mod.rs
+++ b/crates/store/src/backend/mysql/mod.rs
@@ -20,5 +20,5 @@ pub struct MysqlStore {
#[inline(always)]
fn into_error(err: impl Display) -> trc::Error {
- trc::Cause::MySQL.reason(err)
+ trc::StoreCause::MySQL.reason(err)
}
diff --git a/crates/store/src/backend/mysql/write.rs b/crates/store/src/backend/mysql/write.rs
index 5c7e7c08..ca20ba20 100644
--- a/crates/store/src/backend/mysql/write.rs
+++ b/crates/store/src/backend/mysql/write.rs
@@ -46,7 +46,7 @@ impl MysqlStore {
&& start.elapsed() < MAX_COMMIT_TIME => {}
Err(CommitError::Retry) => {
if retry_count > MAX_COMMIT_ATTEMPTS || start.elapsed() > MAX_COMMIT_TIME {
- return Err(trc::Cause::AssertValue.into());
+ return Err(trc::StoreCause::AssertValue.into());
}
}
Err(CommitError::Mysql(err)) => {
@@ -135,7 +135,7 @@ impl MysqlStore {
Ok(_) => {
if exists.is_some() && trx.affected_rows() == 0 {
trx.rollback().await?;
- return Err(trc::Cause::AssertValue.into_err().into());
+ return Err(trc::StoreCause::AssertValue.into_err().into());
}
}
Err(err) => {
@@ -308,7 +308,7 @@ impl MysqlStore {
.unwrap_or_else(|| (false, assert_value.is_none()));
if !matches {
trx.rollback().await?;
- return Err(trc::Cause::AssertValue.into_err().into());
+ return Err(trc::StoreCause::AssertValue.into_err().into());
}
asserted_values.insert(key, exists);
}
diff --git a/crates/store/src/backend/postgres/mod.rs b/crates/store/src/backend/postgres/mod.rs
index 81031e5d..63093154 100644
--- a/crates/store/src/backend/postgres/mod.rs
+++ b/crates/store/src/backend/postgres/mod.rs
@@ -21,5 +21,5 @@ pub struct PostgresStore {
#[inline(always)]
fn into_error(err: impl Display) -> trc::Error {
- trc::Cause::PostgreSQL.reason(err)
+ trc::StoreCause::PostgreSQL.reason(err)
}
diff --git a/crates/store/src/backend/postgres/write.rs b/crates/store/src/backend/postgres/write.rs
index 04b19d32..aaf4e825 100644
--- a/crates/store/src/backend/postgres/write.rs
+++ b/crates/store/src/backend/postgres/write.rs
@@ -50,7 +50,7 @@ impl PostgresStore {
) if retry_count < MAX_COMMIT_ATTEMPTS
&& start.elapsed() < MAX_COMMIT_TIME => {}
Some(&SqlState::UNIQUE_VIOLATION) => {
- return Err(trc::Cause::AssertValue.into());
+ return Err(trc::StoreCause::AssertValue.into());
}
_ => return Err(into_error(err)),
},
@@ -59,7 +59,7 @@ impl PostgresStore {
if retry_count > MAX_COMMIT_ATTEMPTS
|| start.elapsed() > MAX_COMMIT_TIME
{
- return Err(trc::Cause::AssertValue.into());
+ return Err(trc::StoreCause::AssertValue.into());
}
}
}
@@ -148,7 +148,7 @@ impl PostgresStore {
.await?
== 0
{
- return Err(trc::Cause::AssertValue.into_err().into());
+ return Err(trc::StoreCause::AssertValue.into_err().into());
}
}
ValueOp::AtomicAdd(by) => {
@@ -322,7 +322,7 @@ impl PostgresStore {
})
.unwrap_or_else(|| (false, assert_value.is_none()));
if !matches {
- return Err(trc::Cause::AssertValue.into_err().into());
+ return Err(trc::StoreCause::AssertValue.into_err().into());
}
asserted_values.insert(key, exists);
}
diff --git a/crates/store/src/backend/redis/mod.rs b/crates/store/src/backend/redis/mod.rs
index 479b2c5e..6ce007e2 100644
--- a/crates/store/src/backend/redis/mod.rs
+++ b/crates/store/src/backend/redis/mod.rs
@@ -184,5 +184,5 @@ fn build_pool<M: Manager>(
#[inline(always)]
fn into_error(err: impl Display) -> trc::Error {
- trc::Cause::Redis.reason(err)
+ trc::StoreCause::Redis.reason(err)
}
diff --git a/crates/store/src/backend/redis/pool.rs b/crates/store/src/backend/redis/pool.rs
index b8f01863..c322584e 100644
--- a/crates/store/src/backend/redis/pool.rs
+++ b/crates/store/src/backend/redis/pool.rs
@@ -21,7 +21,7 @@ impl managed::Manager for RedisConnectionManager {
.await
{
Ok(conn) => conn.map_err(into_error),
- Err(_) => Err(trc::Cause::Timeout.into()),
+ Err(_) => Err(trc::StoreCause::Redis.ctx(trc::Key::Details, "Connection Timeout")),
}
}
@@ -44,7 +44,7 @@ impl managed::Manager for RedisClusterConnectionManager {
async fn create(&self) -> Result<ClusterConnection, trc::Error> {
match tokio::time::timeout(self.timeout, self.client.get_async_connection()).await {
Ok(conn) => conn.map_err(into_error),
- Err(_) => Err(trc::Cause::Timeout.into()),
+ Err(_) => Err(trc::StoreCause::Redis.ctx(trc::Key::Details, "Connection Timeout")),
}
}
diff --git a/crates/store/src/backend/rocksdb/mod.rs b/crates/store/src/backend/rocksdb/mod.rs
index 33d273a9..78b80e86 100644
--- a/crates/store/src/backend/rocksdb/mod.rs
+++ b/crates/store/src/backend/rocksdb/mod.rs
@@ -38,5 +38,5 @@ pub struct RocksDbStore {
#[inline(always)]
fn into_error(err: rocksdb::Error) -> trc::Error {
- trc::Cause::RocksDB.reason(err)
+ trc::StoreCause::RocksDB.reason(err)
}
diff --git a/crates/store/src/backend/rocksdb/write.rs b/crates/store/src/backend/rocksdb/write.rs
index 8639e2b7..d8c47b2b 100644
--- a/crates/store/src/backend/rocksdb/write.rs
+++ b/crates/store/src/backend/rocksdb/write.rs
@@ -308,7 +308,7 @@ impl<'x> RocksDBTransaction<'x> {
if !matches {
txn.rollback()?;
- return Err(CommitError::Internal(trc::Cause::AssertValue.into()));
+ return Err(CommitError::Internal(trc::StoreCause::AssertValue.into()));
}
}
}
diff --git a/crates/store/src/backend/s3/mod.rs b/crates/store/src/backend/s3/mod.rs
index 70b314e1..dee52205 100644
--- a/crates/store/src/backend/s3/mod.rs
+++ b/crates/store/src/backend/s3/mod.rs
@@ -90,7 +90,7 @@ impl S3Store {
match response.status_code() {
200..=299 => Ok(Some(response.to_vec())),
404 => Ok(None),
- code => Err(trc::Cause::S3
+ code => Err(trc::StoreCause::S3
.reason(String::from_utf8_lossy(response.as_slice()))
.ctx(trc::Key::Code, code)),
}
@@ -105,7 +105,7 @@ impl S3Store {
match response.status_code() {
200..=299 => Ok(()),
- code => Err(trc::Cause::S3
+ code => Err(trc::StoreCause::S3
.reason(String::from_utf8_lossy(response.as_slice()))
.ctx(trc::Key::Code, code)),
}
@@ -121,7 +121,7 @@ impl S3Store {
match response.status_code() {
200..=299 => Ok(true),
404 => Ok(false),
- code => Err(trc::Cause::S3
+ code => Err(trc::StoreCause::S3
.reason(String::from_utf8_lossy(response.as_slice()))
.ctx(trc::Key::Code, code)),
}
@@ -142,5 +142,5 @@ impl S3Store {
#[inline(always)]
fn into_error(err: impl Display) -> trc::Error {
- trc::Cause::S3.reason(err)
+ trc::StoreCause::S3.reason(err)
}
diff --git a/crates/store/src/backend/sqlite/mod.rs b/crates/store/src/backend/sqlite/mod.rs
index 6cd07502..3bc632f8 100644
--- a/crates/store/src/backend/sqlite/mod.rs
+++ b/crates/store/src/backend/sqlite/mod.rs
@@ -24,5 +24,5 @@ pub struct SqliteStore {
#[inline(always)]
fn into_error(err: impl Display) -> trc::Error {
- trc::Cause::SQLite.reason(err)
+ trc::StoreCause::SQLite.reason(err)
}
diff --git a/crates/store/src/backend/sqlite/write.rs b/crates/store/src/backend/sqlite/write.rs
index c5050462..c07f42df 100644
--- a/crates/store/src/backend/sqlite/write.rs
+++ b/crates/store/src/backend/sqlite/write.rs
@@ -245,7 +245,7 @@ impl SqliteStore {
.unwrap_or_else(|| assert_value.is_none());
if !matches {
trx.rollback().map_err(into_error)?;
- return Err(trc::Cause::AssertValue.into());
+ return Err(trc::StoreCause::AssertValue.into());
}
}
}
diff --git a/crates/store/src/dispatch/blob.rs b/crates/store/src/dispatch/blob.rs
index f774b855..68303596 100644
--- a/crates/store/src/dispatch/blob.rs
+++ b/crates/store/src/dispatch/blob.rs
@@ -30,7 +30,7 @@ impl BlobStore {
Store::MySQL(store) => store.get_blob(key, read_range).await,
#[cfg(feature = "rocks")]
Store::RocksDb(store) => store.get_blob(key, read_range).await,
- Store::None => Err(trc::Cause::NotConfigured.into()),
+ Store::None => Err(trc::StoreCause::NotConfigured.into()),
},
BlobBackend::Fs(store) => store.get_blob(key, read_range).await,
#[cfg(feature = "s3")]
@@ -47,14 +47,14 @@ impl BlobStore {
data.get(..data.len() - 1).unwrap_or_default(),
)
.map_err(|err| {
- trc::Cause::Decompress
+ trc::StoreCause::Decompress
.reason(err)
.ctx(trc::Key::Key, key)
.ctx(trc::Key::CausedBy, trc::location!())
})?
}
Some(data) => {
- trc::error!(BlobMissingMarker, Details = key);
+ let todo = "log";
data
}
None => return Ok(None),
@@ -96,7 +96,7 @@ impl BlobStore {
Store::MySQL(store) => store.put_blob(key, data.as_ref()).await,
#[cfg(feature = "rocks")]
Store::RocksDb(store) => store.put_blob(key, data.as_ref()).await,
- Store::None => Err(trc::Cause::NotConfigured.into()),
+ Store::None => Err(trc::StoreCause::NotConfigured.into()),
},
BlobBackend::Fs(store) => store.put_blob(key, data.as_ref()).await,
#[cfg(feature = "s3")]
@@ -118,7 +118,7 @@ impl BlobStore {
Store::MySQL(store) => store.delete_blob(key).await,
#[cfg(feature = "rocks")]
Store::RocksDb(store) => store.delete_blob(key).await,
- Store::None => Err(trc::Cause::NotConfigured.into()),
+ Store::None => Err(trc::StoreCause::NotConfigured.into()),
},
BlobBackend::Fs(store) => store.delete_blob(key).await,
#[cfg(feature = "s3")]
diff --git a/crates/store/src/dispatch/lookup.rs b/crates/store/src/dispatch/lookup.rs
index 8ab3aaed..afe02835 100644
--- a/crates/store/src/dispatch/lookup.rs
+++ b/crates/store/src/dispatch/lookup.rs
@@ -32,7 +32,7 @@ impl LookupStore {
LookupStore::Store(Store::PostgreSQL(store)) => store.query(query, &params).await,
#[cfg(feature = "mysql")]
LookupStore::Store(Store::MySQL(store)) => store.query(query, &params).await,
- _ => Err(trc::Cause::Unsupported.into_err()),
+ _ => Err(trc::StoreCause::NotSupported.into_err()),
};
trc::trace!(
@@ -42,7 +42,7 @@ impl LookupStore {
Result = &result,
);
- result.caused_by( trc::location!())
+ result.caused_by(trc::location!())
}
pub async fn key_set(
@@ -76,9 +76,9 @@ impl LookupStore {
)
.await
.map(|_| ()),
- LookupStore::Memory(_) => Err(trc::Cause::Unsupported.into_err()),
+ LookupStore::Memory(_) => Err(trc::StoreCause::NotSupported.into_err()),
}
- .caused_by( trc::location!())
+ .caused_by(trc::location!())
}
pub async fn counter_incr(
@@ -125,10 +125,10 @@ impl LookupStore {
#[cfg(feature = "redis")]
LookupStore::Redis(store) => store.key_incr(key, value, expires).await,
LookupStore::Query(_) | LookupStore::Memory(_) => {
- Err(trc::Cause::Unsupported.into_err())
+ Err(trc::StoreCause::NotSupported.into_err())
}
}
- .caused_by( trc::location!())
+ .caused_by(trc::location!())
}
pub async fn key_delete(&self, key: Vec<u8>) -> trc::Result<()> {
@@ -144,10 +144,10 @@ impl LookupStore {
#[cfg(feature = "redis")]
LookupStore::Redis(store) => store.key_delete(key).await,
LookupStore::Query(_) | LookupStore::Memory(_) => {
- Err(trc::Cause::Unsupported.into_err())
+ Err(trc::StoreCause::NotSupported.into_err())
}
}
- .caused_by( trc::location!())
+ .caused_by(trc::location!())
}
pub async fn counter_delete(&self, key: Vec<u8>) -> trc::Result<()> {
@@ -163,10 +163,10 @@ impl LookupStore {
#[cfg(feature = "redis")]
LookupStore::Redis(store) => store.key_delete(key).await,
LookupStore::Query(_) | LookupStore::Memory(_) => {
- Err(trc::Cause::Unsupported.into_err())
+ Err(trc::StoreCause::NotSupported.into_err())
}
}
- .caused_by( trc::location!())
+ .caused_by(trc::location!())
}
pub async fn key_get<T: Deserialize + From<Value<'static>> + std::fmt::Debug + 'static>(
@@ -197,7 +197,7 @@ impl LookupStore {
.get(std::str::from_utf8(&key).unwrap_or_default())
.map(|value| T::from(value.clone()))),
}
- .caused_by( trc::location!())
+ .caused_by(trc::location!())
}
pub async fn counter_get(&self, key: Vec<u8>) -> trc::Result<i64> {
@@ -212,10 +212,10 @@ impl LookupStore {
#[cfg(feature = "redis")]
LookupStore::Redis(store) => store.counter_get(key).await,
LookupStore::Query(_) | LookupStore::Memory(_) => {
- Err(trc::Cause::Unsupported.into_err())
+ Err(trc::StoreCause::NotSupported.into_err())
}
}
- .caused_by( trc::location!())
+ .caused_by(trc::location!())
}
pub async fn key_exists(&self, key: Vec<u8>) -> trc::Result<bool> {
@@ -240,7 +240,7 @@ impl LookupStore {
.get(std::str::from_utf8(&key).unwrap_or_default())
.is_some()),
}
- .caused_by( trc::location!())
+ .caused_by(trc::location!())
}
pub async fn is_rate_allowed(
@@ -261,12 +261,9 @@ impl LookupStore {
let requests = if !soft_check {
self.counter_incr(bucket, 1, expires_in.into(), true)
.await
- .caused_by( trc::location!())?
+ .caused_by(trc::location!())?
} else {
- self.counter_get(bucket)
- .await
- .caused_by( trc::location!())?
- + 1
+ self.counter_get(bucket).await.caused_by(trc::location!())? + 1
};
if requests <= rate.requests as i64 {
@@ -289,11 +286,11 @@ impl LookupStore {
let mut expired_counters = Vec::new();
store
.iterate(IterateParams::new(from_key, to_key), |key, value| {
- let expiry = value.deserialize_be_u64(0).caused_by( trc::location!())?;
+ let expiry = value.deserialize_be_u64(0).caused_by(trc::location!())?;
if expiry == 0 {
if value
.deserialize_be_u64(U64_LEN)
- .caused_by( trc::location!())?
+ .caused_by(trc::location!())?
<= current_time
{
expired_counters.push(key.to_vec());
@@ -304,7 +301,7 @@ impl LookupStore {
Ok(true)
})
.await
- .caused_by( trc::location!())?;
+ .caused_by(trc::location!())?;
if !expired_keys.is_empty() {
let mut batch = BatchBuilder::new();
@@ -317,7 +314,7 @@ impl LookupStore {
store
.write(batch.build())
.await
- .caused_by( trc::location!())?;
+ .caused_by(trc::location!())?;
batch = BatchBuilder::new();
}
}
@@ -325,7 +322,7 @@ impl LookupStore {
store
.write(batch.build())
.await
- .caused_by( trc::location!())?;
+ .caused_by(trc::location!())?;
}
}
@@ -344,7 +341,7 @@ impl LookupStore {
store
.write(batch.build())
.await
- .caused_by( trc::location!())?;
+ .caused_by(trc::location!())?;
batch = BatchBuilder::new();
}
}
@@ -352,7 +349,7 @@ impl LookupStore {
store
.write(batch.build())
.await
- .caused_by( trc::location!())?;
+ .caused_by(trc::location!())?;
}
}
}
@@ -383,7 +380,7 @@ impl<T: Deserialize> Deserialize for LookupValue<T> {
Ok(if expires > now() {
LookupValue::Value(
T::deserialize(bytes.get(U64_LEN..).unwrap_or_default())
- .caused_by( trc::location!())?,
+ .caused_by(trc::location!())?,
)
} else {
LookupValue::None
diff --git a/crates/store/src/dispatch/store.rs b/crates/store/src/dispatch/store.rs
index 8981a5da..badccd41 100644
--- a/crates/store/src/dispatch/store.rs
+++ b/crates/store/src/dispatch/store.rs
@@ -43,9 +43,9 @@ impl Store {
Self::MySQL(store) => store.get_value(key).await,
#[cfg(feature = "rocks")]
Self::RocksDb(store) => store.get_value(key).await,
- Self::None => Err(trc::Cause::NotConfigured.into()),
+ Self::None => Err(trc::StoreCause::NotConfigured.into()),
}
- .caused_by( trc::location!())
+ .caused_by(trc::location!())
}
pub async fn get_bitmap(
@@ -63,9 +63,9 @@ impl Store {
Self::MySQL(store) => store.get_bitmap(key).await,
#[cfg(feature = "rocks")]
Self::RocksDb(store) => store.get_bitmap(key).await,
- Self::None => Err(trc::Cause::NotConfigured.into()),
+ Self::None => Err(trc::StoreCause::NotConfigured.into()),
}
- .caused_by( trc::location!())
+ .caused_by(trc::location!())
}
pub async fn get_bitmaps_intersection(
@@ -74,7 +74,7 @@ impl Store {
) -> trc::Result<Option<RoaringBitmap>> {
let mut result: Option<RoaringBitmap> = None;
for key in keys {
- if let Some(bitmap) = self.get_bitmap(key).await.caused_by( trc::location!())? {
+ if let Some(bitmap) = self.get_bitmap(key).await.caused_by(trc::location!())? {
if let Some(result) = &mut result {
result.bitand_assign(&bitmap);
if result.is_empty() {
@@ -106,9 +106,9 @@ impl Store {
Self::MySQL(store) => store.iterate(params, cb).await,
#[cfg(feature = "rocks")]
Self::RocksDb(store) => store.iterate(params, cb).await,
- Self::None => Err(trc::Cause::NotConfigured.into()),
+ Self::None => Err(trc::StoreCause::NotConfigured.into()),
}
- .caused_by( trc::location!())
+ .caused_by(trc::location!())
}
pub async fn get_counter(
@@ -126,9 +126,9 @@ impl Store {
Self::MySQL(store) => store.get_counter(key).await,
#[cfg(feature = "rocks")]
Self::RocksDb(store) => store.get_counter(key).await,
- Self::None => Err(trc::Cause::NotConfigured.into()),
+ Self::None => Err(trc::StoreCause::NotConfigured.into()),
}
- .caused_by( trc::location!())
+ .caused_by(trc::location!())
}
pub async fn write(&self, batch: Batch) -> trc::Result<AssignedIds> {
@@ -189,9 +189,9 @@ impl Store {
Self::MySQL(store) => store.write(batch).await,
#[cfg(feature = "rocks")]
Self::RocksDb(store) => store.write(batch).await,
- Self::None => Err(trc::Cause::NotConfigured.into()),
+ Self::None => Err(trc::StoreCause::NotConfigured.into()),
}
- .caused_by( trc::location!())?;
+ .caused_by(trc::location!())?;
for (key, class, document_id, set) in bitmaps {
let mut bitmaps = BITMAPS.lock();
@@ -231,7 +231,7 @@ impl Store {
Self::MySQL(store) => store.write(batch).await,
#[cfg(feature = "rocks")]
Self::RocksDb(store) => store.write(batch).await,
- Self::None => Err(trc::Cause::NotConfigured.into()),
+ Self::None => Err(trc::StoreCause::NotConfigured.into()),
}
}
@@ -246,7 +246,7 @@ impl Store {
})),
)
.await
- .caused_by( trc::location!())?;
+ .caused_by(trc::location!())?;
self.delete_range(
ValueKey::from(ValueClass::Report(ReportClass::Tls { id: 0, expires: 0 })),
ValueKey::from(ValueClass::Report(ReportClass::Tls {
@@ -255,7 +255,7 @@ impl Store {
})),
)
.await
- .caused_by( trc::location!())?;
+ .caused_by(trc::location!())?;
self.delete_range(
ValueKey::from(ValueClass::Report(ReportClass::Arf { id: 0, expires: 0 })),
ValueKey::from(ValueClass::Report(ReportClass::Arf {
@@ -264,7 +264,7 @@ impl Store {
})),
)
.await
- .caused_by( trc::location!())?;
+ .caused_by(trc::location!())?;
match self {
#[cfg(feature = "sqlite")]
@@ -277,9 +277,9 @@ impl Store {
Self::MySQL(store) => store.purge_store().await,
#[cfg(feature = "rocks")]
Self::RocksDb(store) => store.purge_store().await,
- Self::None => Err(trc::Cause::NotConfigured.into()),
+ Self::None => Err(trc::StoreCause::NotConfigured.into()),
}
- .caused_by( trc::location!())
+ .caused_by(trc::location!())
}
pub async fn delete_range(&self, from: impl Key, to: impl Key) -> trc::Result<()> {
@@ -294,9 +294,9 @@ impl Store {
Self::MySQL(store) => store.delete_range(from, to).await,
#[cfg(feature = "rocks")]
Self::RocksDb(store) => store.delete_range(from, to).await,
- Self::None => Err(trc::Cause::NotConfigured.into()),
+ Self::None => Err(trc::StoreCause::NotConfigured.into()),
}
- .caused_by( trc::location!())
+ .caused_by(trc::location!())
}
pub async fn delete_documents(
@@ -352,7 +352,7 @@ impl Store {
},
)
.await
- .caused_by( trc::location!())?;
+ .caused_by(trc::location!())?;
// Remove keys
let mut batch = BatchBuilder::new();
@@ -361,7 +361,7 @@ impl Store {
if batch.ops.len() >= 1000 {
self.write(std::mem::take(&mut batch).build())
.await
- .caused_by( trc::location!())?;
+ .caused_by(trc::location!())?;
}
batch.ops.push(Operation::Value {
class: ValueClass::Any(AnyClass { subspace, key }),
@@ -372,7 +372,7 @@ impl Store {
if !batch.is_empty() {
self.write(batch.build())
.await
- .caused_by( trc::location!())?;
+ .caused_by(trc::location!())?;
}
Ok(())
@@ -397,7 +397,7 @@ impl Store {
},
)
.await
- .caused_by( trc::location!())?;
+ .caused_by(trc::location!())?;
}
for (from_class, to_class) in [
@@ -429,7 +429,7 @@ impl Store {
},
)
.await
- .caused_by( trc::location!())?;
+ .caused_by(trc::location!())?;
}
Ok(())
@@ -447,9 +447,9 @@ impl Store {
Self::MySQL(store) => store.get_blob(key, range).await,
#[cfg(feature = "rocks")]
Self::RocksDb(store) => store.get_blob(key, range).await,
- Self::None => Err(trc::Cause::NotConfigured.into()),
+ Self::None => Err(trc::StoreCause::NotConfigured.into()),
}
- .caused_by( trc::location!())
+ .caused_by(trc::location!())
}
pub async fn put_blob(&self, key: &[u8], data: &[u8]) -> trc::Result<()> {
@@ -464,9 +464,9 @@ impl Store {
Self::MySQL(store) => store.put_blob(key, data).await,
#[cfg(feature = "rocks")]
Self::RocksDb(store) => store.put_blob(key, data).await,
- Self::None => Err(trc::Cause::NotConfigured.into()),
+ Self::None => Err(trc::StoreCause::NotConfigured.into()),
}
- .caused_by( trc::location!())
+ .caused_by(trc::location!())
}
pub async fn delete_blob(&self, key: &[u8]) -> trc::Result<bool> {
@@ -481,9 +481,9 @@ impl Store {
Self::MySQL(store) => store.delete_blob(key).await,
#[cfg(feature = "rocks")]
Self::RocksDb(store) => store.delete_blob(key).await,
- Self::None => Err(trc::Cause::NotConfigured.into()),
+ Self::None => Err(trc::StoreCause::NotConfigured.into()),
}
- .caused_by( trc::location!())
+ .caused_by(trc::location!())
}
#[cfg(feature = "test_mode")]
@@ -568,7 +568,7 @@ impl Store {
self.iterate(
IterateParams::new(from_key, to_key).ascending().no_values(),
|key, _| {
- let account_id = key.deserialize_be_u32(0).caused_by( trc::location!())?;
+ let account_id = key.deserialize_be_u32(0).caused_by(trc::location!())?;
if account_id != last_account_id {
last_account_id = account_id;
batch.with_account_id(account_id);
@@ -582,7 +582,7 @@ impl Store {
.unwrap(),
until: key
.deserialize_be_u64(key.len() - U64_LEN)
- .caused_by( trc::location!())?,
+ .caused_by(trc::location!())?,
}),
op: ValueOp::Clear,
});
@@ -607,7 +607,7 @@ impl Store {
let mut expired_counters = Vec::new();
self.iterate(IterateParams::new(from_key, to_key), |key, value| {
- let expiry = value.deserialize_be_u64(0).caused_by( trc::location!())?;
+ let expiry = value.deserialize_be_u64(0).caused_by(trc::location!())?;
if expiry == 0 {
expired_counters.push(key.to_vec());
} else if expiry != u64::MAX {
diff --git a/crates/store/src/query/acl.rs b/crates/store/src/query/acl.rs
index bd6f0d16..53d5c088 100644
--- a/crates/store/src/query/acl.rs
+++ b/crates/store/src/query/acl.rs
@@ -75,7 +75,7 @@ impl Store {
},
)
.await
- .caused_by( trc::location!())
+ .caused_by(trc::location!())
.map(|_| results)
}
@@ -108,7 +108,7 @@ impl Store {
},
)
.await
- .caused_by( trc::location!())?;
+ .caused_by(trc::location!())?;
// Remove permissions
let mut batch = BatchBuilder::new();
@@ -118,7 +118,7 @@ impl Store {
if batch.ops.len() >= 1000 {
self.write(batch.build())
.await
- .caused_by( trc::location!())?;
+ .caused_by(trc::location!())?;
batch = BatchBuilder::new();
batch.with_account_id(account_id);
last_collection = u8::MAX;
@@ -136,7 +136,7 @@ impl Store {
if !batch.is_empty() {
self.write(batch.build())
.await
- .caused_by( trc::location!())?;
+ .caused_by(trc::location!())?;
}
Ok(())
@@ -149,7 +149,7 @@ impl Deserialize for AclItem {
to_account_id: bytes.deserialize_be_u32(U32_LEN)?,
to_collection: *bytes
.get(U32_LEN * 2)
- .ok_or_else(|| trc::Cause::DataCorruption.caused_by(trc::location!()))?,
+ .ok_or_else(|| trc::StoreCause::DataCorruption.caused_by(trc::location!()))?,
to_document_id: bytes.deserialize_be_u32((U32_LEN * 2) + 1)?,
permissions: 0,
})
diff --git a/crates/store/src/write/key.rs b/crates/store/src/write/key.rs
index 22ff00e0..fff9a816 100644
--- a/crates/store/src/write/key.rs
+++ b/crates/store/src/write/key.rs
@@ -102,13 +102,13 @@ impl DeserializeBigEndian for &[u8] {
fn deserialize_be_u32(&self, index: usize) -> trc::Result<u32> {
self.get(index..index + U32_LEN)
.ok_or_else(|| {
- trc::Cause::DataCorruption
+ trc::StoreCause::DataCorruption
.caused_by(trc::location!())
.ctx(trc::Key::Value, *self)
})
.and_then(|bytes| {
bytes.try_into().map_err(|_| {
- trc::Cause::DataCorruption
+ trc::StoreCause::DataCorruption
.caused_by(trc::location!())
.ctx(trc::Key::Value, *self)
})
@@ -119,13 +119,13 @@ impl DeserializeBigEndian for &[u8] {
fn deserialize_be_u64(&self, index: usize) -> trc::Result<u64> {
self.get(index..index + U64_LEN)
.ok_or_else(|| {
- trc::Cause::DataCorruption
+ trc::StoreCause::DataCorruption
.caused_by(trc::location!())
.ctx(trc::Key::Value, *self)
})
.and_then(|bytes| {
bytes.try_into().map_err(|_| {
- trc::Cause::DataCorruption
+ trc::StoreCause::DataCorruption
.caused_by(trc::location!())
.ctx(trc::Key::Value, *self)
})
@@ -644,7 +644,7 @@ impl Deserialize for ReportEvent {
.and_then(|domain| std::str::from_utf8(domain).ok())
.map(|s| s.to_string())
.ok_or_else(|| {
- trc::Cause::DataCorruption
+ trc::StoreCause::DataCorruption
.caused_by(trc::location!())
.ctx(trc::Key::Key, key)
})?,
diff --git a/crates/store/src/write/mod.rs b/crates/store/src/write/mod.rs
index b014637a..f59fdc0f 100644
--- a/crates/store/src/write/mod.rs
+++ b/crates/store/src/write/mod.rs
@@ -346,7 +346,7 @@ impl Deserialize for String {
impl Deserialize for u64 {
fn deserialize(bytes: &[u8]) -> trc::Result<Self> {
Ok(u64::from_be_bytes(bytes.try_into().map_err(|_| {
- trc::Cause::DataCorruption.caused_by(trc::location!())
+ trc::StoreCause::DataCorruption.caused_by(trc::location!())
})?))
}
}
@@ -354,7 +354,7 @@ impl Deserialize for u64 {
impl Deserialize for i64 {
fn deserialize(bytes: &[u8]) -> trc::Result<Self> {
Ok(i64::from_be_bytes(bytes.try_into().map_err(|_| {
- trc::Cause::DataCorruption.caused_by(trc::location!())
+ trc::StoreCause::DataCorruption.caused_by(trc::location!())
})?))
}
}
@@ -362,7 +362,7 @@ impl Deserialize for i64 {
impl Deserialize for u32 {
fn deserialize(bytes: &[u8]) -> trc::Result<Self> {
Ok(u32::from_be_bytes(bytes.try_into().map_err(|_| {
- trc::Cause::DataCorruption.caused_by(trc::location!())
+ trc::StoreCause::DataCorruption.caused_by(trc::location!())
})?))
}
}
@@ -456,12 +456,12 @@ impl<T: DeserializeFrom + Sync + Send> Deserialize for Vec<T> {
let mut bytes = bytes.iter();
let len: usize = bytes
.next_leb128()
- .ok_or_else(|| trc::Cause::DataCorruption.caused_by(trc::location!()))?;
+ .ok_or_else(|| trc::StoreCause::DataCorruption.caused_by(trc::location!()))?;
let mut list = Vec::with_capacity(len);
for _ in 0..len {
list.push(
T::deserialize_from(&mut bytes)
- .ok_or_else(|| trc::Cause::DataCorruption.caused_by(trc::location!()))?,
+ .ok_or_else(|| trc::StoreCause::DataCorruption.caused_by(trc::location!()))?,
);
}
Ok(list)
@@ -686,13 +686,13 @@ impl<T: serde::Serialize + serde::de::DeserializeOwned + Sized + Sync + Send> De
fn deserialize(bytes: &[u8]) -> trc::Result<Self> {
lz4_flex::decompress_size_prepended(bytes)
.map_err(|err| {
- trc::Cause::Decompress
+ trc::StoreCause::Decompress
.caused_by(trc::location!())
.reason(err)
})
.and_then(|result| {
bincode::deserialize(&result).map_err(|err| {
- trc::Cause::DataCorruption
+ trc::StoreCause::DataCorruption
.caused_by(trc::location!())
.reason(err)
})
@@ -724,7 +724,7 @@ impl AssignedIds {
pub fn get_document_id(&self, idx: usize) -> trc::Result<u32> {
self.document_ids.get(idx).copied().ok_or_else(|| {
- trc::Cause::Unexpected
+ trc::StoreCause::Unexpected
.caused_by(trc::location!())
.ctx(trc::Key::Reason, "No document ids were created")
})
@@ -736,7 +736,7 @@ impl AssignedIds {
pub fn last_document_id(&self) -> trc::Result<u32> {
self.document_ids.last().copied().ok_or_else(|| {
- trc::Cause::Unexpected
+ trc::StoreCause::Unexpected
.caused_by(trc::location!())
.ctx(trc::Key::Reason, "No document ids were created")
})
@@ -744,7 +744,7 @@ impl AssignedIds {
pub fn last_counter_id(&self) -> trc::Result<i64> {
self.counter_ids.last().copied().ok_or_else(|| {
- trc::Cause::Unexpected
+ trc::StoreCause::Unexpected
.caused_by(trc::location!())
.ctx(trc::Key::Reason, "No document ids were created")
})