dfir_rs/compiled/pull/half_join_state/
multiset.rs

1use std::borrow::Cow;
2use std::collections::VecDeque;
3use std::collections::hash_map::Entry;
4
5use super::HalfJoinState;
6use crate::util::clear::Clear;
7
8type HashMap<K, V> = rustc_hash::FxHashMap<K, V>;
9
10use smallvec::{SmallVec, smallvec};
11
12/// [`HalfJoinState`] with multiset semantics.
13#[derive(Debug)]
14pub struct HalfMultisetJoinState<Key, ValBuild, ValProbe> {
15    // Here a smallvec with inline storage of 1 is chosen.
16    // The rationale for this decision is that, I speculate, that joins possibly have a bimodal distribution with regards to how much key contention they have.
17    // That is, I think that there are many joins that have 1 value per key on LHS/RHS, and there are also a large category of joins that have multiple values per key.
18    // For the category of joins that have multiple values per key, it's not clear why they would only have 2, 3, 4, or N specific number of values per key. So there's no good number to set the smallvec storage to.
19    // Instead we can just focus on the first group of joins that have 1 value per key and get benefit there without hurting the other group too much with excessive memory usage.
20    /// Table to probe, vec val contains all matches.
21    table: HashMap<Key, SmallVec<[ValBuild; 1]>>,
22    /// Not-yet emitted matches.
23    current_matches: VecDeque<(Key, ValProbe, ValBuild)>,
24    len: usize,
25}
26impl<Key, ValBuild, ValProbe> Default for HalfMultisetJoinState<Key, ValBuild, ValProbe> {
27    fn default() -> Self {
28        Self {
29            table: HashMap::default(),
30            current_matches: VecDeque::default(),
31            len: 0,
32        }
33    }
34}
35impl<Key, ValBuild, ValProbe> Clear for HalfMultisetJoinState<Key, ValBuild, ValProbe> {
36    fn clear(&mut self) {
37        self.table.clear();
38        self.current_matches.clear();
39        self.len = 0;
40    }
41}
42impl<Key, ValBuild, ValProbe> HalfJoinState<Key, ValBuild, ValProbe>
43    for HalfMultisetJoinState<Key, ValBuild, ValProbe>
44where
45    Key: Clone + Eq + std::hash::Hash,
46    ValBuild: Clone,
47    ValProbe: Clone,
48{
49    fn build(&mut self, k: Key, v: Cow<'_, ValBuild>) -> bool {
50        let entry = self.table.entry(k);
51
52        match entry {
53            Entry::Occupied(mut e) => {
54                let vec = e.get_mut();
55
56                vec.push(v.into_owned());
57                self.len += 1;
58            }
59            Entry::Vacant(e) => {
60                e.insert(smallvec![v.into_owned()]);
61                self.len += 1;
62            }
63        };
64
65        true
66    }
67
68    fn probe(&mut self, k: &Key, v: &ValProbe) -> Option<(Key, ValProbe, ValBuild)> {
69        // TODO: We currently don't free/shrink the self.current_matches vecdeque to save time.
70        // This mean it will grow to eventually become the largest number of matches in a single probe call.
71        // Maybe we should clear this memory at the beginning of every tick/periodically?
72        let mut iter = self
73            .table
74            .get(k)?
75            .iter()
76            .map(|valbuild| (k.clone(), v.clone(), valbuild.clone()));
77
78        let first = iter.next();
79
80        self.current_matches.extend(iter);
81
82        first
83    }
84
85    fn full_probe(&self, k: &Key) -> std::slice::Iter<'_, ValBuild> {
86        let Some(sv) = self.table.get(k) else {
87            return [].iter();
88        };
89
90        sv.iter()
91    }
92
93    fn pop_match(&mut self) -> Option<(Key, ValProbe, ValBuild)> {
94        self.current_matches.pop_front()
95    }
96
97    fn len(&self) -> usize {
98        self.len
99    }
100    fn iter(&self) -> std::collections::hash_map::Iter<'_, Key, SmallVec<[ValBuild; 1]>> {
101        self.table.iter()
102    }
103}