@@ -13,16 +13,30 @@ use sp_mmr_primitives::{
13
13
INDEXING_PREFIX ,
14
14
} ;
15
15
use sp_runtime:: traits:: Keccak256 ;
16
- use subxt:: { rpc_params, tx:: SubmittableExtrinsic , utils:: H160 } ;
16
+ use subxt:: {
17
+ rpc_params,
18
+ tx:: SubmittableExtrinsic ,
19
+ utils:: { AccountId32 , H160 } ,
20
+ OnlineClient ,
21
+ } ;
17
22
23
+ use ismp:: host:: StateMachine ;
18
24
use mmr_primitives:: { DataOrHash , FullLeaf } ;
19
25
use pallet_ismp:: mmr:: { Leaf , ProofKeys } ;
20
26
use pallet_mmr:: mmr:: Hasher as MmrHasher ;
27
+ use subxt_signer:: sr25519:: dev:: { self } ;
21
28
use subxt_utils:: {
22
29
gargantua,
23
- gargantua:: api:: runtime_types:: { ismp:: host:: Ethereum , pallet_ismp_demo:: pallet:: EvmParams } ,
30
+ gargantua:: api:: runtime_types:: {
31
+ ismp:: host:: Ethereum ,
32
+ pallet_ismp_demo:: pallet:: { EvmParams , GetRequest , TransferParams } ,
33
+ } ,
24
34
Hyperbridge ,
25
35
} ;
36
+ use tesseract_integration_test;
37
+ use tesseract_substrate:: { self , SubstrateClient } ;
38
+
39
+ const PRUNNING_DEPTH : u32 = 630_0000 ; // leaves = approx 1Gb
26
40
27
41
#[ tokio:: test]
28
42
#[ ignore]
@@ -448,3 +462,111 @@ async fn dispatch_requests() -> Result<(), anyhow::Error> {
448
462
assert ! ( res) ;
449
463
Ok ( ( ) )
450
464
}
465
+
466
+ /// offchainDB mmr prunning for timedout request and processed requests that knowledge of their
467
+ /// internal nodes is of no use for relayers and users.
468
+ /// Prunning should be triggered iff a certain depth has been reached,
469
+ /// depth = current_number_of_leaves - last_number_of_leaves
470
+ /// This logic should make/enforce somehow maximum storage of requests
471
+ ///
472
+ /// Pruning should not consume any useful blockweight hence should be done during on-idle.
473
+ //#[cfg(feature = "offchainDb-pruning")]
474
+ #[ tokio:: test( flavor = "multi_thread" ) ]
475
+ #[ ignore]
476
+ async fn offchainDb_mmr_pruning_requests_work ( ) -> Result < ( ) , anyhow:: Error > {
477
+ let ( chain_a_sub_client, chain_b_sub_client) = tesseract_integration_test:: create_clients ( ) ?;
478
+ // produce some initial blocks on both chains ( A and B )
479
+ for _ in 0 ..=10 {
480
+ let block = chain_a_sub_client
481
+ . client
482
+ . rpc ( )
483
+ . request :: < CreatedBlock < H256 > > ( "engine_createBlock" , rpc_params ! [ true , false ] )
484
+ . await ?;
485
+
486
+ let finalized = chain_a_sub_client
487
+ . client
488
+ . rpc ( )
489
+ . request :: < bool > ( "engine_finalizeBlock" , rpc_params ! [ block. hash] )
490
+ . await ?;
491
+ assert ! ( finalized) ;
492
+
493
+ let block = chain_b_sub_client
494
+ . client
495
+ . rpc ( )
496
+ . request :: < CreatedBlock < H256 > > ( "engine_createBlock" , rpc_params ! [ true , false ] )
497
+ . await ?;
498
+
499
+ let finalized = chain_b_sub_client
500
+ . client
501
+ . rpc ( )
502
+ . request :: < bool > ( "engine_finalizeBlock" , rpc_params ! [ block. hash] )
503
+ . await ?;
504
+ assert ! ( finalized) ;
505
+ }
506
+ tokio:: time:: sleep ( Duration :: from_secs ( 10 ) ) . await ;
507
+
508
+ // dispatch and process 600_000 request
509
+ dispatch_and_process_requests ( chain_a_sub_client. clone ( ) , chain_b_sub_client. clone ( ) , 600_000 )
510
+ . await ?;
511
+ // assert number of leaves
512
+ let leaves_count = chain_a_sub_client
513
+ . client
514
+ . storage ( )
515
+ . at_latest ( )
516
+ . await ?
517
+ . fetch ( & gargantua:: api:: storage ( ) . mmr ( ) . number_of_leaves ( ) )
518
+ . await ?
519
+ . ok_or ( "Failed to fetch" )
520
+ . unwrap ( ) ;
521
+
522
+ assert_eq ! ( leaves_count, 600_000 ) ;
523
+ // perform random lookup in the offchain Db
524
+
525
+ // dispatch and process additional 30_000 request to trigger prunning
526
+ dispatch_and_process_requests ( chain_a_sub_client. clone ( ) , chain_b_sub_client. clone ( ) , 30_000 )
527
+ . await ?;
528
+
529
+ // assert the number of leaves are not 630_000
530
+
531
+ // get the number of leaves remaining (A)
532
+
533
+ dispatch_and_process_requests ( chain_a_sub_client, chain_b_sub_client, 100_000 ) . await ?;
534
+
535
+ // assert the number of leaves are A + 100_000 indicating not pruned as the depth not reached
536
+
537
+ Ok ( ( ) )
538
+ }
539
+
540
+ /// dispatch get and post requests and timeout post request and process get_requests
541
+ /// this function should return no of timedout and pruned requests
542
+ async fn dispatch_and_process_requests (
543
+ client_a_sub : SubstrateClient < Hyperbridge > ,
544
+ client_b_sub : SubstrateClient < Hyperbridge > ,
545
+ count : u64 ,
546
+ ) -> Result < u64 , anyhow:: Error > {
547
+ // post request
548
+ let amount: u128 = 100 * 1000000000000 ;
549
+ let transfer_params = TransferParams {
550
+ to : AccountId32 ( dev:: alice ( ) . public_key ( ) . 0 ) ,
551
+ amount,
552
+ para_id : 2001 ,
553
+ timeout : 5 ,
554
+ } ;
555
+
556
+ // get request
557
+ let encoded_chain_b_id_storage_key =
558
+ "0x0d715f2646c8f85767b5d2764bb2782604a74d81251e398fd8a0a4d55023bb3f" ;
559
+
560
+ let latest_height_b =
561
+ client_a_sub. query_latest_height ( client_b_sub. state_machine_id ( ) ) . await ? - 1 ;
562
+ let get_params = GetRequest {
563
+ para_id : 2001 ,
564
+ height : latest_height_b,
565
+ timeout : 0 ,
566
+ keys : vec ! [ hex:: decode( encoded_chain_b_id_storage_key. strip_prefix( "0x" ) . unwrap( ) ) . unwrap( ) ] ,
567
+ } ;
568
+
569
+ // batch submit the requests based on number of counts
570
+ todo ! ( ) ;
571
+ Ok ( 0 )
572
+ }
0 commit comments