Skip to content

Commit 27200b9

Browse files
committed
initail configuration
1 parent 3ffbbdb commit 27200b9

File tree

1 file changed

+133
-9
lines changed

1 file changed

+133
-9
lines changed

parachain/simtests/src/pallet_mmr.rs

+133-9
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
use std::{env, time::Duration};
44

55
use codec::Decode;
6-
use ismp::host::ethereum;
76
use merkle_mountain_range::MerkleProof;
87
use sc_consensus_manual_seal::CreatedBlock;
98
use sp_core::{crypto::Ss58Codec, keccak_256, offchain::StorageKind, Bytes, H256};
@@ -14,14 +13,31 @@ use sp_mmr_primitives::{
1413
INDEXING_PREFIX,
1514
};
1615
use sp_runtime::traits::Keccak256;
17-
use subxt::{rpc_params, tx::SubmittableExtrinsic, utils::H160};
16+
use subxt::{
17+
rpc_params,
18+
tx::SubmittableExtrinsic,
19+
utils::{AccountId32, H160},
20+
OnlineClient,
21+
};
1822

23+
use ismp::host::StateMachine;
1924
use mmr_primitives::{DataOrHash, FullLeaf};
2025
use pallet_ismp::mmr::{Leaf, ProofKeys};
2126
use pallet_mmr::mmr::Hasher as MmrHasher;
27+
use subxt_signer::sr25519::dev::{self};
2228
use subxt_utils::{
23-
gargantua, gargantua::api::runtime_types::pallet_ismp_demo::pallet::EvmParams, Hyperbridge,
29+
gargantua,
30+
gargantua::api::runtime_types::{
31+
ismp::host::Ethereum,
32+
pallet_ismp_demo::pallet::{EvmParams, GetRequest, TransferParams},
33+
},
34+
Hyperbridge,
2435
};
36+
use tesseract_integration_test;
37+
use tesseract_substrate::{self, SubstrateClient};
38+
39+
const LEAVES_DEPTH: u32 = 630_000; // leaves = approx 1Gb
40+
const PRUNING_DEPTH: u32 = 63_000; // leaves = approx 100 Mb
2541

2642
#[tokio::test]
2743
#[ignore]
@@ -41,13 +57,13 @@ async fn test_insert_1_billion_mmr_leaves() -> Result<(), anyhow::Error> {
4157
&format!("ws://127.0.0.1:{}", port),
4258
u32::MAX,
4359
)
44-
.await?;
60+
.await?;
4561
let pb = ProgressBar::new(100_000);
4662
for pos in 44_243..100_000 {
4763
// Initialize MMR Pallet by dispatching some leaves and finalizing
4864
let params = EvmParams {
4965
module: H160::random(),
50-
destination: ethereum::EXECUTION_LAYER,
66+
destination: Ethereum::ExecutionLayer,
5167
timeout: 0,
5268
count: 10_000,
5369
};
@@ -95,7 +111,7 @@ async fn dispatch_requests() -> Result<(), anyhow::Error> {
95111
&format!("ws://127.0.0.1:{}", port),
96112
u32::MAX,
97113
)
98-
.await?;
114+
.await?;
99115

100116
let address = subxt_utils::gargantua::api::storage().mmr().number_of_leaves();
101117
let leaf_count_at_start = client
@@ -117,7 +133,7 @@ async fn dispatch_requests() -> Result<(), anyhow::Error> {
117133
// Initialize MMR Pallet by dispatching some leaves and finalizing
118134
let params = EvmParams {
119135
module: H160::random(),
120-
destination: ethereum::EXECUTION_LAYER,
136+
destination: Ethereum::ExecutionLayer,
121137
timeout: 0,
122138
count: 10,
123139
};
@@ -194,7 +210,7 @@ async fn dispatch_requests() -> Result<(), anyhow::Error> {
194210
for _ in 0..3 {
195211
let params = EvmParams {
196212
module: H160::random(),
197-
destination: ethereum::EXECUTION_LAYER,
213+
destination: Ethereum::ExecutionLayer,
198214
timeout: 0,
199215
count: 10,
200216
};
@@ -255,7 +271,7 @@ async fn dispatch_requests() -> Result<(), anyhow::Error> {
255271
for i in 0..accounts.len() {
256272
let params = EvmParams {
257273
module: H160::random(),
258-
destination: ethereum::ARBITRUM,
274+
destination: Ethereum::Arbitrum,
259275
timeout: 0,
260276
count: 10,
261277
};
@@ -447,3 +463,111 @@ async fn dispatch_requests() -> Result<(), anyhow::Error> {
447463
assert!(res);
448464
Ok(())
449465
}
466+
467+
/// offchainDB mmr prunning for timedout request and processed requests that knowledge of their
468+
/// internal nodes is of no use for relayers and users.
469+
/// Prunning should be triggered iff a certain depth has been reached,
470+
/// depth = current_number_of_leaves - last_number_of_leaves
471+
/// This logic should make/enforce somehow maximum storage of requests
472+
///
473+
/// Pruning should not consume any useful blockweight hence should be done during on-idle.
474+
//#[cfg(feature = "offchainDb-pruning")]
475+
#[tokio::test(flavor = "multi_thread")]
476+
#[ignore]
477+
async fn offchainDb_mmr_pruning_requests_work() -> Result<(), anyhow::Error> {
478+
let (chain_a_sub_client, chain_b_sub_client) = tesseract_integration_test::create_clients()?;
479+
// produce some initial blocks on both chains ( A and B )
480+
for _ in 0..=10 {
481+
let block = chain_a_sub_client
482+
.client
483+
.rpc()
484+
.request::<CreatedBlock<H256>>("engine_createBlock", rpc_params![true, false])
485+
.await?;
486+
487+
let finalized = chain_a_sub_client
488+
.client
489+
.rpc()
490+
.request::<bool>("engine_finalizeBlock", rpc_params![block.hash])
491+
.await?;
492+
assert!(finalized);
493+
494+
let block = chain_b_sub_client
495+
.client
496+
.rpc()
497+
.request::<CreatedBlock<H256>>("engine_createBlock", rpc_params![true, false])
498+
.await?;
499+
500+
let finalized = chain_b_sub_client
501+
.client
502+
.rpc()
503+
.request::<bool>("engine_finalizeBlock", rpc_params![block.hash])
504+
.await?;
505+
assert!(finalized);
506+
}
507+
tokio::time::sleep(Duration::from_secs(10)).await;
508+
509+
// dispatch and process 600_000 request
510+
dispatch_and_process_requests(chain_a_sub_client.clone(), chain_b_sub_client.clone(), 600_000)
511+
.await?;
512+
// assert number of leaves
513+
let leaves_count = chain_a_sub_client
514+
.client
515+
.storage()
516+
.at_latest()
517+
.await?
518+
.fetch(&gargantua::api::storage().mmr().number_of_leaves())
519+
.await?
520+
.ok_or("Failed to fetch")
521+
.unwrap();
522+
523+
assert_eq!(leaves_count, 600_000);
524+
// perform random lookup in the offchain Db
525+
526+
// dispatch and process additional 30_000 request to trigger prunning
527+
dispatch_and_process_requests(chain_a_sub_client.clone(), chain_b_sub_client.clone(), 30_000)
528+
.await?;
529+
530+
// assert the number of leaves are not 630_000
531+
532+
// get the number of leaves remaining (A)
533+
534+
dispatch_and_process_requests(chain_a_sub_client, chain_b_sub_client, 100_000).await?;
535+
536+
// assert the number of leaves are A + 100_000 indicating not pruned as the depth not reached
537+
538+
Ok(())
539+
}
540+
541+
/// dispatch get and post requests and timeout post request and process get_requests
542+
/// this function should return no of timedout and pruned requests
543+
async fn dispatch_and_process_requests(
544+
client_a_sub: SubstrateClient<Hyperbridge>,
545+
client_b_sub: SubstrateClient<Hyperbridge>,
546+
count: u64,
547+
) -> Result<u64, anyhow::Error> {
548+
// post request
549+
let amount: u128 = 100 * 1000000000000;
550+
let transfer_params = TransferParams {
551+
to: AccountId32(dev::alice().public_key().0),
552+
amount,
553+
para_id: 2001,
554+
timeout: 5,
555+
};
556+
557+
// get request
558+
let encoded_chain_b_id_storage_key =
559+
"0x0d715f2646c8f85767b5d2764bb2782604a74d81251e398fd8a0a4d55023bb3f";
560+
561+
let latest_height_b =
562+
client_a_sub.query_latest_height(client_b_sub.state_machine_id()).await? - 1;
563+
let get_params = GetRequest {
564+
para_id: 2001,
565+
height: latest_height_b,
566+
timeout: 0,
567+
keys: vec![hex::decode(encoded_chain_b_id_storage_key.strip_prefix("0x").unwrap()).unwrap()],
568+
};
569+
570+
// batch submit the requests based on number of counts
571+
todo!();
572+
Ok(0)
573+
}

0 commit comments

Comments
 (0)