penumbra_sdk_tct/storage.rs
1//! Incremental serialization and non-incremental deserialization for the [`Tree`](crate::Tree).
2
3use std::{
4 collections::{btree_map::Entry, BTreeMap},
5 fmt::Debug,
6 ops::Range,
7};
8
9use futures::Stream;
10
11use crate::prelude::*;
12
13pub(crate) mod deserialize;
14pub(crate) mod serialize;
15
16pub mod in_memory;
17pub use deserialize::{LoadCommitments, LoadHashes};
18pub use in_memory::InMemory;
19
20/// A stored position for the tree: either the position of the tree, or a marker indicating that it
21/// is full, and therefore does not have a position.
22#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
23pub enum StoredPosition {
24 /// The tree has the given position.
25 Position(Position),
26 /// The tree is full.
27 Full,
28}
29
30impl Default for StoredPosition {
31 fn default() -> Self {
32 StoredPosition::Position(Position::default())
33 }
34}
35
36impl From<StoredPosition> for Option<Position> {
37 fn from(stored: StoredPosition) -> Self {
38 match stored {
39 StoredPosition::Position(position) => Some(position),
40 StoredPosition::Full => None,
41 }
42 }
43}
44
45impl From<Option<Position>> for StoredPosition {
46 fn from(position: Option<Position>) -> Self {
47 match position {
48 Some(position) => StoredPosition::Position(position),
49 None => StoredPosition::Full,
50 }
51 }
52}
53
54/// An `async` storage backend capable of reading stored [`struct@Hash`]es and [`Commitment`]s as
55/// well as storing the current [`Position`].
56#[async_trait]
57pub trait AsyncRead {
58 /// The error returned when something goes wrong in a request.
59 type Error;
60
61 /// The type of stream returned by [`AsyncRead::hashes`].
62 type HashesStream<'a>: Stream<Item = Result<(Position, u8, Hash), Self::Error>> + Unpin + 'a
63 where
64 Self: 'a;
65
66 /// The type of stream returned by [`AsyncRead::commitments`].
67 type CommitmentsStream<'a>: Stream<Item = Result<(Position, StateCommitment), Self::Error>>
68 + Unpin
69 + 'a
70 where
71 Self: 'a;
72
73 /// Fetch the current position stored.
74 async fn position(&mut self) -> Result<StoredPosition, Self::Error>;
75
76 /// Fetch the current forgotten version.
77 async fn forgotten(&mut self) -> Result<Forgotten, Self::Error>;
78
79 /// Fetch the hash at the given position and height, if it exists.
80 async fn hash(&mut self, position: Position, height: u8) -> Result<Option<Hash>, Self::Error>;
81
82 /// Get the full list of all internal hashes stored, indexed by position and height.
83 fn hashes(&mut self) -> Self::HashesStream<'_>;
84
85 /// Fetch the commitment at the given position, if it exists.
86 async fn commitment(
87 &mut self,
88 position: Position,
89 ) -> Result<Option<StateCommitment>, Self::Error>;
90
91 /// Get the full list of all commitments stored, indexed by position.
92 fn commitments(&mut self) -> Self::CommitmentsStream<'_>;
93}
94
95/// An `async` storage backend capable of writing [`struct@Hash`]es and [`Commitment`]s, and
96/// garbage-collecting those which have been forgotten.
97#[async_trait]
98pub trait AsyncWrite: AsyncRead {
99 /// Write a single hash into storage.
100 ///
101 /// Backends are only *required* to persist hashes marked as `essential`. They may choose to
102 /// persist other hashes, and the choice of which non-essential hashes to persist is
103 /// unconstrained. However, choosing not to persist non-essential hashes imposes computational
104 /// overhead upon deserialization.
105 async fn add_hash(
106 &mut self,
107 position: Position,
108 height: u8,
109 hash: Hash,
110 essential: bool,
111 ) -> Result<(), Self::Error>;
112
113 /// Write a single commitment into storage.
114 ///
115 /// This should return an error if a commitment is already present at that location; no
116 /// location's value should ever be overwritten.
117 async fn add_commitment(
118 &mut self,
119 position: Position,
120 commitment: StateCommitment,
121 ) -> Result<(), Self::Error>;
122
123 /// Delete every stored [`struct@Hash`] whose height is less than `below_height` and whose
124 /// position is within the half-open [`Range`] of `positions`, as well as every [`Commitment`]
125 /// whose position is within the range.
126 async fn delete_range(
127 &mut self,
128 below_height: u8,
129 positions: Range<Position>,
130 ) -> Result<(), Self::Error>;
131
132 /// Set the stored position of the tree.
133 ///
134 /// This should return an error if the position goes backwards.
135 async fn set_position(&mut self, position: StoredPosition) -> Result<(), Self::Error>;
136
137 /// Set the forgotten version of the tree.
138 ///
139 /// This should return an error if the version goes backwards.
140 async fn set_forgotten(&mut self, forgotten: Forgotten) -> Result<(), Self::Error>;
141}
142
143/// A synchronous storage backend capable of reading stored [`struct@Hash`]es and [`Commitment`]s as
144/// well as storing the current [`Position`].
145pub trait Read {
146 /// The error returned when something goes wrong in a request.
147 type Error;
148
149 /// The type of iterator returned when reading hashes from the database.
150 type HashesIter<'a>: Iterator<Item = Result<(Position, u8, Hash), Self::Error>> + 'a
151 where
152 Self: 'a;
153
154 /// The type of iterator returned when reading commitments from the database.
155 type CommitmentsIter<'a>: Iterator<Item = Result<(Position, StateCommitment), Self::Error>> + 'a
156 where
157 Self: 'a;
158
159 /// Fetch the current position stored.
160 fn position(&mut self) -> Result<StoredPosition, Self::Error>;
161
162 /// Fetch the current forgotten version.
163 fn forgotten(&mut self) -> Result<Forgotten, Self::Error>;
164
165 /// Fetch a specific hash at the given position and height, if it exists.
166 fn hash(&mut self, position: Position, height: u8) -> Result<Option<Hash>, Self::Error>;
167
168 /// Get the full list of all internal hashes stored, indexed by position and height.
169 #[allow(clippy::type_complexity)]
170 fn hashes(&mut self) -> Self::HashesIter<'_>;
171
172 /// Fetch a specific commitment at the given position, if it exists.
173 fn commitment(&mut self, position: Position) -> Result<Option<StateCommitment>, Self::Error>;
174
175 /// Get the full list of all commitments stored, indexed by position.
176 #[allow(clippy::type_complexity)]
177 fn commitments(&mut self) -> Self::CommitmentsIter<'_>;
178}
179
180/// A synchronous storage backend capable of writing [`struct@Hash`]es and [`Commitment`]s, and
181/// garbage-collecting those which have been forgotten.
182pub trait Write: Read {
183 /// Write a single hash into storage.
184 ///
185 /// Backends are only *required* to persist hashes marked as `essential`. They may choose to
186 /// persist other hashes, and the choice of which non-essential hashes to persist is
187 /// unconstrained. However, choosing not to persist non-essential hashes imposes computational
188 /// overhead upon deserialization.
189 fn add_hash(
190 &mut self,
191 position: Position,
192 height: u8,
193 hash: Hash,
194 essential: bool,
195 ) -> Result<(), Self::Error>;
196
197 /// Write a single commitment into storage.
198 ///
199 /// This should return an error if a commitment is already present at that location; no
200 /// location's value should ever be overwritten.
201 fn add_commitment(
202 &mut self,
203 position: Position,
204 commitment: StateCommitment,
205 ) -> Result<(), Self::Error>;
206
207 /// Delete every stored [`struct@Hash`] whose height is less than `below_height` and whose
208 /// position is within the half-open [`Range`] of `positions`, as well as every [`Commitment`]
209 /// whose position is within the range.
210 fn delete_range(
211 &mut self,
212 below_height: u8,
213 positions: Range<Position>,
214 ) -> Result<(), Self::Error>;
215
216 /// Set the stored position of the tree.
217 ///
218 /// This should return an error if the position goes backwards.
219 fn set_position(&mut self, position: StoredPosition) -> Result<(), Self::Error>;
220
221 /// Set the forgotten version of the tree.
222 ///
223 /// This should return an error if the version goes backwards.
224 fn set_forgotten(&mut self, forgotten: Forgotten) -> Result<(), Self::Error>;
225}
226
227/// A single update to the underlying storage, as a data type.
228#[derive(Debug, Clone, Serialize, Deserialize)]
229pub enum Update {
230 /// Set the position of the tree.
231 SetPosition(StoredPosition),
232 /// Set the forgotten version of the tree.
233 SetForgotten(Forgotten),
234 /// Add a commitment to the tree.
235 StoreCommitment(StoreCommitment),
236 /// Add a hash to the tree.
237 StoreHash(StoreHash),
238 /// Delete a range of hashes and commitments from the tree.
239 DeleteRange(DeleteRange),
240}
241
242/// An update to the underlying storage that constitutes storing a single hash.
243#[derive(Debug, Clone, Serialize, Deserialize)]
244pub struct StoreHash {
245 /// The position of the hash.
246 pub position: Position,
247 /// The height of the hash.
248 pub height: u8,
249 /// The hash itself.
250 pub hash: Hash,
251 /// Whether the hash is essential to store, or can be dropped at the discretion of the storage backend.
252 pub essential: bool,
253}
254
255/// An update to the underlying storage that constitutes storing a single commitment.
256#[derive(Debug, Clone, Serialize, Deserialize)]
257pub struct StoreCommitment {
258 /// The position of the commitment.
259 pub position: Position,
260 /// The commitment itself.
261 pub commitment: StateCommitment,
262}
263
264/// An update to the underlying storage that constitutes deleting a range of hashes and commitments.
265#[derive(Debug, Clone, Serialize, Deserialize)]
266pub struct DeleteRange {
267 /// The height strictly below which hashes should be deleted.
268 pub below_height: u8,
269 /// The half-open range of positions within which hashes and commitments should be deleted.
270 pub positions: Range<Position>,
271}
272
273/// A collection of updates to the underlying storage.
274///
275/// Note that this is both `FromIterator<Update>` and `Iterator<Item = Update>`, so you can
276/// [`.collect()`](Iterator::collect) an `Iterator<Item = Update>` into this type, and you can also
277/// iterate over its contained updates.
278#[derive(Debug, Clone, Default, Serialize, Deserialize)]
279pub struct Updates {
280 /// The new position to set, if any.
281 pub set_position: Option<StoredPosition>,
282 /// The new forgotten version to set, if any.
283 pub set_forgotten: Option<Forgotten>,
284 /// The new commitments to store.
285 pub store_commitments: Vec<StoreCommitment>,
286 /// The new hashes to store.
287 pub store_hashes: Vec<StoreHash>,
288 /// The ranges of hashes and commitments to delete.
289 pub delete_ranges: Vec<DeleteRange>,
290}
291
292impl FromIterator<Update> for Updates {
293 fn from_iter<I: IntoIterator<Item = Update>>(iter: I) -> Self {
294 let mut updates = Updates::default();
295 for update in iter {
296 match update {
297 Update::SetPosition(position) => updates.set_position = Some(position),
298 Update::SetForgotten(forgotten) => updates.set_forgotten = Some(forgotten),
299 Update::StoreCommitment(commitment) => updates.store_commitments.push(commitment),
300 Update::StoreHash(hash) => updates.store_hashes.push(hash),
301 Update::DeleteRange(range) => updates.delete_ranges.push(range),
302 }
303 }
304 updates
305 }
306}
307
308impl Iterator for Updates {
309 type Item = Update;
310
311 fn next(&mut self) -> Option<Self::Item> {
312 if let Some(position) = self.set_position.take() {
313 return Some(Update::SetPosition(position));
314 }
315 if let Some(forgotten) = self.set_forgotten.take() {
316 return Some(Update::SetForgotten(forgotten));
317 }
318 if let Some(commitment) = self.store_commitments.pop() {
319 return Some(Update::StoreCommitment(commitment));
320 }
321 if let Some(hash) = self.store_hashes.pop() {
322 return Some(Update::StoreHash(hash));
323 }
324 if let Some(range) = self.delete_ranges.pop() {
325 return Some(Update::DeleteRange(range));
326 }
327 None
328 }
329}