|
22 | 22 | #![warn(missing_docs)]
|
23 | 23 |
|
24 | 24 | use crate::{aux_schema, HashFor, MmrClient, LOG_TARGET};
|
25 |
| -use log::{debug, error, info, warn}; |
| 25 | +use log::{debug, error, info, trace, warn}; |
26 | 26 | use pallet_ismp::mmr::Leaf;
|
27 | 27 | use pallet_mmr_runtime_api::MmrRuntimeApi;
|
28 | 28 | use sc_client_api::{Backend, FinalityNotification};
|
@@ -168,9 +168,15 @@ where
|
168 | 168 | );
|
169 | 169 |
|
170 | 170 | for pos in stale_nodes {
|
171 |
| - let temp_key = self.node_temp_offchain_key(pos, fork_identifier); |
172 |
| - self.offchain_db.local_storage_clear(StorageKind::PERSISTENT, &temp_key); |
173 |
| - debug!(target: LOG_TARGET, "Pruned elem at pos {} fork_identifier {:?} header_hash {:?}", pos, fork_identifier, block_hash); |
| 171 | + // Only prune nodes that have been moved to the canonical path to prevent deleting nodes |
| 172 | + // from forks that share the same child trie root before they are finalized. |
| 173 | + let canon_key = self.node_canon_offchain_key(pos); |
| 174 | + if let Some(_) = self.offchain_db.local_storage_get(StorageKind::PERSISTENT, &canon_key) |
| 175 | + { |
| 176 | + let temp_key = self.node_temp_offchain_key(pos, fork_identifier); |
| 177 | + self.offchain_db.local_storage_clear(StorageKind::PERSISTENT, &temp_key); |
| 178 | + debug!(target: LOG_TARGET, "Pruned elem at pos {} fork_identifier {:?} header_hash {:?}", pos, fork_identifier, block_hash); |
| 179 | + } |
174 | 180 | }
|
175 | 181 | }
|
176 | 182 |
|
@@ -252,15 +258,16 @@ where
|
252 | 258 | self.best_canonicalized,
|
253 | 259 | header.number,
|
254 | 260 | );
|
| 261 | + // If a block has been skipped canonicalize all blocks in between |
| 262 | + self.canonicalize_catch_up(header.parent); |
255 | 263 | }
|
256 | 264 | self.best_canonicalized = header.number;
|
257 | 265 | }
|
258 | 266 |
|
259 | 267 | /// In case of missed finality notifications (node restarts for example),
|
260 | 268 | /// make sure to also canon everything leading up to `notification.tree_route`.
|
261 |
| - pub fn canonicalize_catch_up(&mut self, notification: &FinalityNotification<B>) { |
262 |
| - let first = notification.tree_route.first().unwrap_or(¬ification.hash); |
263 |
| - if let Some(mut header) = self.header_metadata_or_log(*first, "canonicalize") { |
| 269 | + pub fn canonicalize_catch_up(&mut self, first: HashFor<B>) { |
| 270 | + if let Some(mut header) = self.header_metadata_or_log(first, "canonicalize") { |
264 | 271 | let mut to_canon = VecDeque::<<B as Block>::Hash>::new();
|
265 | 272 | // Walk up the chain adding all blocks newer than `self.best_canonicalized`.
|
266 | 273 | loop {
|
@@ -304,8 +311,22 @@ where
|
304 | 311 | // Update the first MMR block in case of a pallet reset.
|
305 | 312 | self.handle_potential_pallet_reset(¬ification);
|
306 | 313 |
|
307 |
| - // Move offchain MMR nodes for finalized blocks to canonical keys. |
| 314 | + // // Run catchup to just to be sure no blocks are skipped |
| 315 | + // self.canonicalize_catch_up(¬ification); |
308 | 316 |
|
| 317 | + trace!(target: LOG_TARGET, "Got new finality notification {:?}, Best Canonicalized {:?}", notification.hash, self.best_canonicalized); |
| 318 | + let mut block_nums = vec![]; |
| 319 | + for hash in notification.tree_route.iter().chain(std::iter::once(¬ification.hash)) { |
| 320 | + match self.header_metadata_or_log(*hash, "canonicalize") { |
| 321 | + Some(header) => { |
| 322 | + block_nums.push(header.number); |
| 323 | + }, |
| 324 | + _ => {}, |
| 325 | + }; |
| 326 | + } |
| 327 | + |
| 328 | + trace!(target: LOG_TARGET, "Canonicalizing Blocks{block_nums:?}"); |
| 329 | + // Move offchain MMR nodes for finalized blocks to canonical keys. |
309 | 330 | for hash in notification.tree_route.iter().chain(std::iter::once(¬ification.hash)) {
|
310 | 331 | self.canonicalize_branch(*hash);
|
311 | 332 | }
|
|
0 commit comments