1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
use std::collections::BTreeMap;
use re_types_core::SizeBytes as _;
use crate::{QueryCache, QueryCacheKey};
// ---
/// Stats for all primary caches.
///
/// Fetch them via [`QueryCache::stats`].
#[derive(Default, Debug, Clone)]
pub struct CachesStats {
pub latest_at: BTreeMap<QueryCacheKey, CacheStats>,
pub range: BTreeMap<QueryCacheKey, CacheStats>,
}
impl CachesStats {
#[inline]
pub fn total_size_bytes(&self) -> u64 {
re_tracing::profile_function!();
let Self { latest_at, range } = self;
let latest_at_size_bytes: u64 = latest_at
.values()
.map(|stats| stats.total_actual_size_bytes)
.sum();
let range_size_bytes: u64 = range
.values()
.map(|stats| stats.total_actual_size_bytes)
.sum();
latest_at_size_bytes + range_size_bytes
}
}
/// Stats for a single `crate::RangeCache`.
#[derive(Default, Debug, Clone)]
pub struct CacheStats {
/// How many chunks in the cache?
pub total_chunks: u64,
/// What would be the size of this cache in the worst case, i.e. if all chunks had
/// been fully copied?
pub total_effective_size_bytes: u64,
/// What is the actual size of this cache after deduplication?
pub total_actual_size_bytes: u64,
}
impl QueryCache {
/// Computes the stats for all primary caches.
pub fn stats(&self) -> CachesStats {
re_tracing::profile_function!();
let latest_at = {
let latest_at = self.latest_at_per_cache_key.read().clone();
// Implicitly releasing top-level cache mappings -- concurrent queries can run once again.
latest_at
.iter()
.map(|(key, cache)| {
let cache = cache.read();
(
key.clone(),
CacheStats {
total_chunks: cache.per_query_time.len() as _,
total_effective_size_bytes: cache
.per_query_time
.values()
.map(|cached| cached.unit.total_size_bytes())
.sum(),
total_actual_size_bytes: cache.per_query_time.total_size_bytes(),
},
)
})
.collect()
};
let range = {
let range = self.range_per_cache_key.read().clone();
// Implicitly releasing top-level cache mappings -- concurrent queries can run once again.
range
.iter()
.map(|(key, cache)| {
let cache = cache.read();
(
key.clone(),
CacheStats {
total_chunks: cache.chunks.len() as _,
total_effective_size_bytes: cache
.chunks
.values()
.map(|cached| cached.chunk.total_size_bytes())
.sum(),
total_actual_size_bytes: cache.chunks.total_size_bytes(),
},
)
})
.collect()
};
CachesStats { latest_at, range }
}
}