3
3
use std:: { env, time:: Duration } ;
4
4
5
5
use codec:: Decode ;
6
- use ismp:: host:: ethereum;
7
6
use merkle_mountain_range:: MerkleProof ;
8
7
use sc_consensus_manual_seal:: CreatedBlock ;
9
8
use sp_core:: { crypto:: Ss58Codec , keccak_256, offchain:: StorageKind , Bytes , H256 } ;
@@ -14,14 +13,31 @@ use sp_mmr_primitives::{
14
13
INDEXING_PREFIX ,
15
14
} ;
16
15
use sp_runtime:: traits:: Keccak256 ;
17
- use subxt:: { rpc_params, tx:: SubmittableExtrinsic , utils:: H160 } ;
16
+ use subxt:: {
17
+ rpc_params,
18
+ tx:: SubmittableExtrinsic ,
19
+ utils:: { AccountId32 , H160 } ,
20
+ OnlineClient ,
21
+ } ;
18
22
23
+ use ismp:: host:: StateMachine ;
19
24
use mmr_primitives:: { DataOrHash , FullLeaf } ;
20
25
use pallet_ismp:: mmr:: { Leaf , ProofKeys } ;
21
26
use pallet_mmr:: mmr:: Hasher as MmrHasher ;
27
+ use subxt_signer:: sr25519:: dev:: { self } ;
22
28
use subxt_utils:: {
23
- gargantua, gargantua:: api:: runtime_types:: pallet_ismp_demo:: pallet:: EvmParams , Hyperbridge ,
29
+ gargantua,
30
+ gargantua:: api:: runtime_types:: {
31
+ ismp:: host:: Ethereum ,
32
+ pallet_ismp_demo:: pallet:: { EvmParams , GetRequest , TransferParams } ,
33
+ } ,
34
+ Hyperbridge ,
24
35
} ;
36
+ use tesseract_integration_test;
37
+ use tesseract_substrate:: { self , SubstrateClient } ;
38
+
39
+ const LEAVES_DEPTH : u32 = 630_000 ; // leaves = approx 1Gb
40
+ const PRUNING_DEPTH : u32 = 63_000 ; // leaves = approx 100 Mb
25
41
26
42
#[ tokio:: test]
27
43
#[ ignore]
@@ -41,13 +57,13 @@ async fn test_insert_1_billion_mmr_leaves() -> Result<(), anyhow::Error> {
41
57
& format ! ( "ws://127.0.0.1:{}" , port) ,
42
58
u32:: MAX ,
43
59
)
44
- . await ?;
60
+ . await ?;
45
61
let pb = ProgressBar :: new ( 100_000 ) ;
46
62
for pos in 44_243 ..100_000 {
47
63
// Initialize MMR Pallet by dispatching some leaves and finalizing
48
64
let params = EvmParams {
49
65
module : H160 :: random ( ) ,
50
- destination : ethereum :: EXECUTION_LAYER ,
66
+ destination : Ethereum :: ExecutionLayer ,
51
67
timeout : 0 ,
52
68
count : 10_000 ,
53
69
} ;
@@ -95,7 +111,7 @@ async fn dispatch_requests() -> Result<(), anyhow::Error> {
95
111
& format ! ( "ws://127.0.0.1:{}" , port) ,
96
112
u32:: MAX ,
97
113
)
98
- . await ?;
114
+ . await ?;
99
115
100
116
let address = subxt_utils:: gargantua:: api:: storage ( ) . mmr ( ) . number_of_leaves ( ) ;
101
117
let leaf_count_at_start = client
@@ -117,7 +133,7 @@ async fn dispatch_requests() -> Result<(), anyhow::Error> {
117
133
// Initialize MMR Pallet by dispatching some leaves and finalizing
118
134
let params = EvmParams {
119
135
module : H160 :: random ( ) ,
120
- destination : ethereum :: EXECUTION_LAYER ,
136
+ destination : Ethereum :: ExecutionLayer ,
121
137
timeout : 0 ,
122
138
count : 10 ,
123
139
} ;
@@ -194,7 +210,7 @@ async fn dispatch_requests() -> Result<(), anyhow::Error> {
194
210
for _ in 0 ..3 {
195
211
let params = EvmParams {
196
212
module : H160 :: random ( ) ,
197
- destination : ethereum :: EXECUTION_LAYER ,
213
+ destination : Ethereum :: ExecutionLayer ,
198
214
timeout : 0 ,
199
215
count : 10 ,
200
216
} ;
@@ -255,7 +271,7 @@ async fn dispatch_requests() -> Result<(), anyhow::Error> {
255
271
for i in 0 ..accounts. len ( ) {
256
272
let params = EvmParams {
257
273
module : H160 :: random ( ) ,
258
- destination : ethereum :: ARBITRUM ,
274
+ destination : Ethereum :: Arbitrum ,
259
275
timeout : 0 ,
260
276
count : 10 ,
261
277
} ;
@@ -447,3 +463,111 @@ async fn dispatch_requests() -> Result<(), anyhow::Error> {
447
463
assert ! ( res) ;
448
464
Ok ( ( ) )
449
465
}
466
+
467
+ /// offchainDB mmr prunning for timedout request and processed requests that knowledge of their
468
+ /// internal nodes is of no use for relayers and users.
469
+ /// Prunning should be triggered iff a certain depth has been reached,
470
+ /// depth = current_number_of_leaves - last_number_of_leaves
471
+ /// This logic should make/enforce somehow maximum storage of requests
472
+ ///
473
+ /// Pruning should not consume any useful blockweight hence should be done during on-idle.
474
+ //#[cfg(feature = "offchainDb-pruning")]
475
+ #[ tokio:: test( flavor = "multi_thread" ) ]
476
+ #[ ignore]
477
+ async fn offchainDb_mmr_pruning_requests_work ( ) -> Result < ( ) , anyhow:: Error > {
478
+ let ( chain_a_sub_client, chain_b_sub_client) = tesseract_integration_test:: create_clients ( ) ?;
479
+ // produce some initial blocks on both chains ( A and B )
480
+ for _ in 0 ..=10 {
481
+ let block = chain_a_sub_client
482
+ . client
483
+ . rpc ( )
484
+ . request :: < CreatedBlock < H256 > > ( "engine_createBlock" , rpc_params ! [ true , false ] )
485
+ . await ?;
486
+
487
+ let finalized = chain_a_sub_client
488
+ . client
489
+ . rpc ( )
490
+ . request :: < bool > ( "engine_finalizeBlock" , rpc_params ! [ block. hash] )
491
+ . await ?;
492
+ assert ! ( finalized) ;
493
+
494
+ let block = chain_b_sub_client
495
+ . client
496
+ . rpc ( )
497
+ . request :: < CreatedBlock < H256 > > ( "engine_createBlock" , rpc_params ! [ true , false ] )
498
+ . await ?;
499
+
500
+ let finalized = chain_b_sub_client
501
+ . client
502
+ . rpc ( )
503
+ . request :: < bool > ( "engine_finalizeBlock" , rpc_params ! [ block. hash] )
504
+ . await ?;
505
+ assert ! ( finalized) ;
506
+ }
507
+ tokio:: time:: sleep ( Duration :: from_secs ( 10 ) ) . await ;
508
+
509
+ // dispatch and process 600_000 request
510
+ dispatch_and_process_requests ( chain_a_sub_client. clone ( ) , chain_b_sub_client. clone ( ) , 600_000 )
511
+ . await ?;
512
+ // assert number of leaves
513
+ let leaves_count = chain_a_sub_client
514
+ . client
515
+ . storage ( )
516
+ . at_latest ( )
517
+ . await ?
518
+ . fetch ( & gargantua:: api:: storage ( ) . mmr ( ) . number_of_leaves ( ) )
519
+ . await ?
520
+ . ok_or ( "Failed to fetch" )
521
+ . unwrap ( ) ;
522
+
523
+ assert_eq ! ( leaves_count, 600_000 ) ;
524
+ // perform random lookup in the offchain Db
525
+
526
+ // dispatch and process additional 30_000 request to trigger prunning
527
+ dispatch_and_process_requests ( chain_a_sub_client. clone ( ) , chain_b_sub_client. clone ( ) , 30_000 )
528
+ . await ?;
529
+
530
+ // assert the number of leaves are not 630_000
531
+
532
+ // get the number of leaves remaining (A)
533
+
534
+ dispatch_and_process_requests ( chain_a_sub_client, chain_b_sub_client, 100_000 ) . await ?;
535
+
536
+ // assert the number of leaves are A + 100_000 indicating not pruned as the depth not reached
537
+
538
+ Ok ( ( ) )
539
+ }
540
+
541
+ /// dispatch get and post requests and timeout post request and process get_requests
542
+ /// this function should return no of timedout and pruned requests
543
+ async fn dispatch_and_process_requests (
544
+ client_a_sub : SubstrateClient < Hyperbridge > ,
545
+ client_b_sub : SubstrateClient < Hyperbridge > ,
546
+ count : u64 ,
547
+ ) -> Result < u64 , anyhow:: Error > {
548
+ // post request
549
+ let amount: u128 = 100 * 1000000000000 ;
550
+ let transfer_params = TransferParams {
551
+ to : AccountId32 ( dev:: alice ( ) . public_key ( ) . 0 ) ,
552
+ amount,
553
+ para_id : 2001 ,
554
+ timeout : 5 ,
555
+ } ;
556
+
557
+ // get request
558
+ let encoded_chain_b_id_storage_key =
559
+ "0x0d715f2646c8f85767b5d2764bb2782604a74d81251e398fd8a0a4d55023bb3f" ;
560
+
561
+ let latest_height_b =
562
+ client_a_sub. query_latest_height ( client_b_sub. state_machine_id ( ) ) . await ? - 1 ;
563
+ let get_params = GetRequest {
564
+ para_id : 2001 ,
565
+ height : latest_height_b,
566
+ timeout : 0 ,
567
+ keys : vec ! [ hex:: decode( encoded_chain_b_id_storage_key. strip_prefix( "0x" ) . unwrap( ) ) . unwrap( ) ] ,
568
+ } ;
569
+
570
+ // batch submit the requests based on number of counts
571
+ todo ! ( ) ;
572
+ Ok ( 0 )
573
+ }
0 commit comments