From 0100a6d4f053c2bc08337bebad77f3aadf509230 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 15 May 2023 16:26:44 +0800 Subject: [PATCH 001/357] Refactor: move `block_status` from `ckb_sync` to `ckb_shared` --- Cargo.lock | 2 +- shared/Cargo.toml | 2 +- {sync => shared}/src/block_status.rs | 0 shared/src/lib.rs | 1 + sync/Cargo.toml | 1 - sync/src/lib.rs | 1 - sync/src/relayer/compact_block_process.rs | 2 +- sync/src/relayer/mod.rs | 2 +- sync/src/relayer/tests/compact_block_process.rs | 2 +- sync/src/synchronizer/block_fetcher.rs | 3 ++- sync/src/synchronizer/get_blocks_process.rs | 2 +- sync/src/synchronizer/headers_process.rs | 2 +- sync/src/synchronizer/mod.rs | 2 +- sync/src/tests/block_status.rs | 2 +- sync/src/tests/sync_shared.rs | 2 +- sync/src/types/mod.rs | 2 +- 16 files changed, 14 insertions(+), 14 deletions(-) rename {sync => shared}/src/block_status.rs (100%) diff --git a/Cargo.lock b/Cargo.lock index 90db61f9df..22d44d7080 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1435,6 +1435,7 @@ version = "0.114.0-pre" dependencies = [ "arc-swap", "ckb-app-config", + "bitflags", "ckb-async-runtime", "ckb-chain-spec", "ckb-channel", @@ -1515,7 +1516,6 @@ dependencies = [ name = "ckb-sync" version = "0.114.0-pre" dependencies = [ - "bitflags 1.3.2", "ckb-app-config", "ckb-async-runtime", "ckb-chain", diff --git a/shared/Cargo.toml b/shared/Cargo.toml index 2450c50f78..ddf5bfa115 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -31,7 +31,7 @@ ckb-app-config = {path = "../util/app-config", version = "= 0.114.0-pre"} ckb-migrate = { path = "../util/migrate", version = "= 0.114.0-pre" } once_cell = "1.8.0" tempfile.workspace = true - +bitflags = "1.0" [dev-dependencies] ckb-systemtime = { path = "../util/systemtime", version = "= 0.114.0-pre", features = ["enable_faketime"] } diff --git a/sync/src/block_status.rs b/shared/src/block_status.rs similarity index 100% rename from sync/src/block_status.rs rename to shared/src/block_status.rs diff --git a/shared/src/lib.rs b/shared/src/lib.rs index 63bfa56a35..cff6ab0f87 100644 --- a/shared/src/lib.rs +++ b/shared/src/lib.rs @@ -7,3 +7,4 @@ pub mod shared_builder; pub use ckb_snapshot::{Snapshot, SnapshotMgr}; pub use shared::Shared; pub use shared_builder::{SharedBuilder, SharedPackage}; +pub mod block_status; diff --git a/sync/Cargo.toml b/sync/Cargo.toml index 7005cb1061..c9b91343c5 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -35,7 +35,6 @@ futures = "0.3" governor = "0.3.1" tempfile.workspace = true ckb-systemtime = { path = "../util/systemtime", version = "= 0.114.0-pre" } -bitflags = "1.0" dashmap = "4.0" keyed_priority_queue = "0.3" sled = "0.34.7" diff --git a/sync/src/lib.rs b/sync/src/lib.rs index 427880ed0f..a12ba2596d 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -3,7 +3,6 @@ //! Sync module implement ckb sync protocol as specified here: //! -mod block_status; mod filter; pub(crate) mod net_time_checker; pub(crate) mod orphan_block_pool; diff --git a/sync/src/relayer/compact_block_process.rs b/sync/src/relayer/compact_block_process.rs index 3bd1d5043c..de45b9379a 100644 --- a/sync/src/relayer/compact_block_process.rs +++ b/sync/src/relayer/compact_block_process.rs @@ -1,4 +1,3 @@ -use crate::block_status::BlockStatus; use crate::relayer::compact_block_verifier::CompactBlockVerifier; use crate::relayer::{ReconstructionResult, Relayer}; use crate::types::{ActiveChain, HeaderIndex, PendingCompactBlockMap}; @@ -8,6 +7,7 @@ use crate::{attempt, Status, StatusCode}; use ckb_chain_spec::consensus::Consensus; use ckb_logger::{self, debug_target}; use ckb_network::{CKBProtocolContext, PeerIndex}; +use ckb_shared::block_status::BlockStatus; use ckb_systemtime::unix_time_as_millis; use ckb_traits::{HeaderFields, HeaderFieldsProvider}; use ckb_types::{ diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index eba37a3f5d..a8cfdadaf4 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -20,7 +20,6 @@ use self::get_block_transactions_process::GetBlockTransactionsProcess; use self::get_transactions_process::GetTransactionsProcess; use self::transaction_hashes_process::TransactionHashesProcess; use self::transactions_process::TransactionsProcess; -use crate::block_status::BlockStatus; use crate::types::{ActiveChain, BlockNumberAndHash, SyncShared}; use crate::utils::{ is_internal_db_error, metric_ckb_message_bytes, send_message_to, MetricDirection, @@ -33,6 +32,7 @@ use ckb_network::{ async_trait, bytes::Bytes, tokio, CKBProtocolContext, CKBProtocolHandler, PeerIndex, SupportProtocols, TargetSession, }; +use ckb_shared::block_status::BlockStatus; use ckb_systemtime::unix_time_as_millis; use ckb_tx_pool::service::TxVerificationResult; use ckb_types::{ diff --git a/sync/src/relayer/tests/compact_block_process.rs b/sync/src/relayer/tests/compact_block_process.rs index 17b64a1339..9205727858 100644 --- a/sync/src/relayer/tests/compact_block_process.rs +++ b/sync/src/relayer/tests/compact_block_process.rs @@ -1,4 +1,3 @@ -use crate::block_status::BlockStatus; use crate::relayer::compact_block_process::CompactBlockProcess; use crate::relayer::tests::helper::{ build_chain, gen_block, new_header_builder, MockProtocolContext, @@ -6,6 +5,7 @@ use crate::relayer::tests::helper::{ use crate::{Status, StatusCode}; use ckb_chain::chain::ChainService; use ckb_network::{PeerIndex, SupportProtocols}; +use ckb_shared::block_status::BlockStatus; use ckb_store::ChainStore; use ckb_systemtime::unix_time_as_millis; use ckb_tx_pool::{PlugTarget, TxEntry}; diff --git a/sync/src/synchronizer/block_fetcher.rs b/sync/src/synchronizer/block_fetcher.rs index e880716d6d..5851e61d55 100644 --- a/sync/src/synchronizer/block_fetcher.rs +++ b/sync/src/synchronizer/block_fetcher.rs @@ -1,4 +1,4 @@ -use crate::block_status::BlockStatus; +use crate::synchronizer::Synchronizer; use crate::types::{ActiveChain, BlockNumberAndHash, HeaderIndex, HeaderIndexView, IBDState}; use crate::SyncShared; use ckb_constant::sync::{ @@ -6,6 +6,7 @@ use ckb_constant::sync::{ }; use ckb_logger::{debug, trace}; use ckb_network::PeerIndex; +use ckb_shared::block_status::BlockStatus; use ckb_systemtime::unix_time_as_millis; use ckb_types::packed; use std::cmp::min; diff --git a/sync/src/synchronizer/get_blocks_process.rs b/sync/src/synchronizer/get_blocks_process.rs index b9670d5f85..ac69b5f8fe 100644 --- a/sync/src/synchronizer/get_blocks_process.rs +++ b/sync/src/synchronizer/get_blocks_process.rs @@ -1,10 +1,10 @@ -use crate::block_status::BlockStatus; use crate::synchronizer::Synchronizer; use crate::utils::send_message_to; use crate::{attempt, Status, StatusCode}; use ckb_constant::sync::{INIT_BLOCKS_IN_TRANSIT_PER_PEER, MAX_HEADERS_LEN}; use ckb_logger::debug; use ckb_network::{CKBProtocolContext, PeerIndex}; +use ckb_shared::block_status::BlockStatus; use ckb_types::{packed, prelude::*}; use std::collections::HashSet; diff --git a/sync/src/synchronizer/headers_process.rs b/sync/src/synchronizer/headers_process.rs index 1cb5d7e19f..7e19686ed0 100644 --- a/sync/src/synchronizer/headers_process.rs +++ b/sync/src/synchronizer/headers_process.rs @@ -1,4 +1,3 @@ -use crate::block_status::BlockStatus; use crate::synchronizer::Synchronizer; use crate::types::{ActiveChain, SyncShared}; use crate::{Status, StatusCode}; @@ -6,6 +5,7 @@ use ckb_constant::sync::MAX_HEADERS_LEN; use ckb_error::Error; use ckb_logger::{debug, log_enabled, warn, Level}; use ckb_network::{CKBProtocolContext, PeerIndex}; +use ckb_shared::block_status::BlockStatus; use ckb_traits::HeaderFieldsProvider; use ckb_types::{core, packed, prelude::*}; use ckb_verification::{HeaderError, HeaderVerifier}; diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 8974e10ea2..a0a08c6727 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -20,10 +20,10 @@ pub(crate) use self::get_headers_process::GetHeadersProcess; pub(crate) use self::headers_process::HeadersProcess; pub(crate) use self::in_ibd_process::InIBDProcess; -use crate::block_status::BlockStatus; use crate::types::{HeaderIndexView, HeadersSyncController, IBDState, Peers, SyncShared}; use crate::utils::{metric_ckb_message_bytes, send_message_to, MetricDirection}; use crate::{Status, StatusCode}; +use ckb_shared::block_status::BlockStatus; use ckb_chain::chain::ChainController; use ckb_channel as channel; diff --git a/sync/src/tests/block_status.rs b/sync/src/tests/block_status.rs index 351b120236..c9a797b20c 100644 --- a/sync/src/tests/block_status.rs +++ b/sync/src/tests/block_status.rs @@ -1,6 +1,6 @@ use std::collections::HashSet; -use crate::block_status::BlockStatus; +use ckb_shared::block_status::BlockStatus; fn all() -> Vec { vec![ diff --git a/sync/src/tests/sync_shared.rs b/sync/src/tests/sync_shared.rs index 9955ef84b7..1f37d110cb 100644 --- a/sync/src/tests/sync_shared.rs +++ b/sync/src/tests/sync_shared.rs @@ -1,7 +1,7 @@ -use crate::block_status::BlockStatus; use crate::tests::util::{build_chain, inherit_block}; use crate::SyncShared; use ckb_chain::chain::ChainService; +use ckb_shared::block_status::BlockStatus; use ckb_shared::SharedBuilder; use ckb_store::{self, ChainStore}; use ckb_test_chain_utils::always_success_cellbase; diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 3fed18e37c..744fa3331e 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1,4 +1,3 @@ -use crate::block_status::BlockStatus; use crate::orphan_block_pool::OrphanBlockPool; use crate::utils::is_internal_db_error; use crate::{Status, StatusCode, FAST_INDEX, LOW_INDEX, NORMAL_INDEX, TIME_TRACE_SIZE}; @@ -16,6 +15,7 @@ use ckb_constant::sync::{ use ckb_error::Error as CKBError; use ckb_logger::{debug, error, trace}; use ckb_network::{CKBProtocolContext, PeerIndex, SupportProtocols}; +use ckb_shared::block_status::BlockStatus; use ckb_shared::{shared::Shared, Snapshot}; use ckb_store::{ChainDB, ChainStore}; use ckb_systemtime::unix_time_as_millis; From 3abe253d767dfab8b5b1f6019f6d2d50a511da18 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 16 May 2023 09:42:00 +0800 Subject: [PATCH 002/357] Refactor: move `header_map` to `ckb_shared` Signed-off-by: Eval EXEC --- Cargo.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 22d44d7080..1185aa2736 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1434,8 +1434,8 @@ name = "ckb-shared" version = "0.114.0-pre" dependencies = [ "arc-swap", + "bitflags 1.3.2", "ckb-app-config", - "bitflags", "ckb-async-runtime", "ckb-chain-spec", "ckb-channel", From 631406d2795c3a8ea2d3cc2e62bf2c392d9418b1 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 16 May 2023 09:42:00 +0800 Subject: [PATCH 003/357] Refactor: move `header_map` to `ckb_shared` Signed-off-by: Eval EXEC --- Cargo.lock | 4 +- shared/Cargo.toml | 5 +- shared/src/lib.rs | 3 + .../src/types/header_map/backend.rs | 0 .../src/types/header_map/backend_sled.rs | 0 .../src/types/header_map/kernel_lru.rs | 0 .../src/types/header_map/memory.rs | 0 {sync => shared}/src/types/header_map/mod.rs | 10 +- shared/src/types/mod.rs | 306 +++++++++++++++++ sync/Cargo.toml | 1 - sync/src/relayer/compact_block_process.rs | 3 +- sync/src/relayer/mod.rs | 3 +- sync/src/synchronizer/block_fetcher.rs | 4 +- sync/src/synchronizer/mod.rs | 3 +- sync/src/tests/inflight_blocks.rs | 3 +- sync/src/tests/synchronizer/functions.rs | 3 +- sync/src/tests/types.rs | 3 +- sync/src/types/mod.rs | 312 +----------------- 18 files changed, 342 insertions(+), 321 deletions(-) rename {sync => shared}/src/types/header_map/backend.rs (100%) rename {sync => shared}/src/types/header_map/backend_sled.rs (100%) rename {sync => shared}/src/types/header_map/kernel_lru.rs (100%) rename {sync => shared}/src/types/header_map/memory.rs (100%) rename {sync => shared}/src/types/header_map/mod.rs (85%) create mode 100644 shared/src/types/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 1185aa2736..f848994f6b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1453,9 +1453,12 @@ dependencies = [ "ckb-systemtime", "ckb-tx-pool", "ckb-types", + "ckb-util", "ckb-verification", "once_cell", + "sled", "tempfile", + "tokio", ] [[package]] @@ -1551,7 +1554,6 @@ dependencies = [ "once_cell", "rand 0.7.3", "sentry", - "sled", "tempfile", "tokio", ] diff --git a/shared/Cargo.toml b/shared/Cargo.toml index ddf5bfa115..d7c87155eb 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -30,8 +30,11 @@ ckb-channel = { path = "../util/channel", version = "= 0.114.0-pre" } ckb-app-config = {path = "../util/app-config", version = "= 0.114.0-pre"} ckb-migrate = { path = "../util/migrate", version = "= 0.114.0-pre" } once_cell = "1.8.0" -tempfile.workspace = true +ckb-util = { path = "../util", version = "= 0.113.0-pre" } bitflags = "1.0" +tokio = { version = "1", features = ["sync"] } +tempfile.workspace = true +sled = "0.34.7" [dev-dependencies] ckb-systemtime = { path = "../util/systemtime", version = "= 0.114.0-pre", features = ["enable_faketime"] } diff --git a/shared/src/lib.rs b/shared/src/lib.rs index cff6ab0f87..a495984ee7 100644 --- a/shared/src/lib.rs +++ b/shared/src/lib.rs @@ -8,3 +8,6 @@ pub use ckb_snapshot::{Snapshot, SnapshotMgr}; pub use shared::Shared; pub use shared_builder::{SharedBuilder, SharedPackage}; pub mod block_status; +pub mod types; + +pub use types::header_map::HeaderMap; diff --git a/sync/src/types/header_map/backend.rs b/shared/src/types/header_map/backend.rs similarity index 100% rename from sync/src/types/header_map/backend.rs rename to shared/src/types/header_map/backend.rs diff --git a/sync/src/types/header_map/backend_sled.rs b/shared/src/types/header_map/backend_sled.rs similarity index 100% rename from sync/src/types/header_map/backend_sled.rs rename to shared/src/types/header_map/backend_sled.rs diff --git a/sync/src/types/header_map/kernel_lru.rs b/shared/src/types/header_map/kernel_lru.rs similarity index 100% rename from sync/src/types/header_map/kernel_lru.rs rename to shared/src/types/header_map/kernel_lru.rs diff --git a/sync/src/types/header_map/memory.rs b/shared/src/types/header_map/memory.rs similarity index 100% rename from sync/src/types/header_map/memory.rs rename to shared/src/types/header_map/memory.rs diff --git a/sync/src/types/header_map/mod.rs b/shared/src/types/header_map/mod.rs similarity index 85% rename from sync/src/types/header_map/mod.rs rename to shared/src/types/header_map/mod.rs index 78939164b6..d72772c6a1 100644 --- a/sync/src/types/header_map/mod.rs +++ b/shared/src/types/header_map/mod.rs @@ -29,7 +29,7 @@ const ITEM_BYTES_SIZE: usize = size_of::(); const WARN_THRESHOLD: usize = ITEM_BYTES_SIZE * 100_000; impl HeaderMap { - pub(crate) fn new

(tmpdir: Option

, memory_limit: usize, async_handle: &Handle) -> Self + pub fn new

(tmpdir: Option

, memory_limit: usize, async_handle: &Handle) -> Self where P: AsRef, { @@ -66,19 +66,19 @@ impl HeaderMap { Self { inner } } - pub(crate) fn contains_key(&self, hash: &Byte32) -> bool { + pub fn contains_key(&self, hash: &Byte32) -> bool { self.inner.contains_key(hash) } - pub(crate) fn get(&self, hash: &Byte32) -> Option { + pub fn get(&self, hash: &Byte32) -> Option { self.inner.get(hash) } - pub(crate) fn insert(&self, view: HeaderIndexView) -> Option<()> { + pub fn insert(&self, view: HeaderIndexView) -> Option<()> { self.inner.insert(view) } - pub(crate) fn remove(&self, hash: &Byte32) { + pub fn remove(&self, hash: &Byte32) { self.inner.remove(hash) } } diff --git a/shared/src/types/mod.rs b/shared/src/types/mod.rs new file mode 100644 index 0000000000..8db42092b1 --- /dev/null +++ b/shared/src/types/mod.rs @@ -0,0 +1,306 @@ +use ckb_types::core::{BlockNumber, EpochNumberWithFraction}; +use ckb_types::packed::Byte32; +use ckb_types::prelude::{Entity, FromSliceShouldBeOk, Reader}; +use ckb_types::{packed, U256}; + +pub mod header_map; + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct HeaderIndexView { + hash: Byte32, + number: BlockNumber, + epoch: EpochNumberWithFraction, + timestamp: u64, + parent_hash: Byte32, + total_difficulty: U256, + skip_hash: Option, +} + +impl HeaderIndexView { + pub fn new( + hash: Byte32, + number: BlockNumber, + epoch: EpochNumberWithFraction, + timestamp: u64, + parent_hash: Byte32, + total_difficulty: U256, + ) -> Self { + HeaderIndexView { + hash, + number, + epoch, + timestamp, + parent_hash, + total_difficulty, + skip_hash: None, + } + } + + pub fn hash(&self) -> Byte32 { + self.hash.clone() + } + + pub fn number(&self) -> BlockNumber { + self.number + } + + pub fn epoch(&self) -> EpochNumberWithFraction { + self.epoch + } + + pub fn timestamp(&self) -> u64 { + self.timestamp + } + + pub fn total_difficulty(&self) -> &U256 { + &self.total_difficulty + } + + pub fn parent_hash(&self) -> Byte32 { + self.parent_hash.clone() + } + + pub fn skip_hash(&self) -> Option<&Byte32> { + self.skip_hash.as_ref() + } + + // deserialize from bytes + fn from_slice_should_be_ok(hash: &[u8], slice: &[u8]) -> Self { + let hash = packed::Byte32Reader::from_slice_should_be_ok(hash).to_entity(); + let number = BlockNumber::from_le_bytes(slice[0..8].try_into().expect("stored slice")); + let epoch = EpochNumberWithFraction::from_full_value(u64::from_le_bytes( + slice[8..16].try_into().expect("stored slice"), + )); + let timestamp = u64::from_le_bytes(slice[16..24].try_into().expect("stored slice")); + let parent_hash = packed::Byte32Reader::from_slice_should_be_ok(&slice[24..56]).to_entity(); + let total_difficulty = U256::from_little_endian(&slice[56..88]).expect("stored slice"); + let skip_hash = if slice.len() == 120 { + Some(packed::Byte32Reader::from_slice_should_be_ok(&slice[88..120]).to_entity()) + } else { + None + }; + Self { + hash, + number, + epoch, + timestamp, + parent_hash, + total_difficulty, + skip_hash, + } + } + + // serialize all fields except `hash` to bytes + fn to_vec(&self) -> Vec { + let mut v = Vec::new(); + v.extend_from_slice(self.number.to_le_bytes().as_slice()); + v.extend_from_slice(self.epoch.full_value().to_le_bytes().as_slice()); + v.extend_from_slice(self.timestamp.to_le_bytes().as_slice()); + v.extend_from_slice(self.parent_hash.as_slice()); + v.extend_from_slice(self.total_difficulty.to_le_bytes().as_slice()); + if let Some(ref skip_hash) = self.skip_hash { + v.extend_from_slice(skip_hash.as_slice()); + } + v + } + + pub fn build_skip(&mut self, tip_number: BlockNumber, get_header_view: F, fast_scanner: G) + where + F: Fn(&Byte32, bool) -> Option, + G: Fn(BlockNumber, BlockNumberAndHash) -> Option, + { + if self.number == 0 { + return; + } + self.skip_hash = self + .get_ancestor( + tip_number, + get_skip_height(self.number()), + get_header_view, + fast_scanner, + ) + .map(|header| header.hash()); + } + + pub fn get_ancestor( + &self, + tip_number: BlockNumber, + number: BlockNumber, + get_header_view: F, + fast_scanner: G, + ) -> Option + where + F: Fn(&Byte32, bool) -> Option, + G: Fn(BlockNumber, BlockNumberAndHash) -> Option, + { + if number > self.number() { + return None; + } + + let mut current = self.clone(); + let mut number_walk = current.number(); + while number_walk > number { + let number_skip = get_skip_height(number_walk); + let number_skip_prev = get_skip_height(number_walk - 1); + let store_first = current.number() <= tip_number; + match current.skip_hash { + Some(ref hash) + if number_skip == number + || (number_skip > number + && !(number_skip_prev + 2 < number_skip + && number_skip_prev >= number)) => + { + // Only follow skip if parent->skip isn't better than skip->parent + current = get_header_view(hash, store_first)?; + number_walk = number_skip; + } + _ => { + current = get_header_view(¤t.parent_hash(), store_first)?; + number_walk -= 1; + } + } + if let Some(target) = fast_scanner(number, (current.number(), current.hash()).into()) { + current = target; + break; + } + } + Some(current) + } + + pub fn as_header_index(&self) -> HeaderIndex { + HeaderIndex::new(self.number(), self.hash(), self.total_difficulty().clone()) + } + + pub fn number_and_hash(&self) -> BlockNumberAndHash { + (self.number(), self.hash()).into() + } + + pub fn is_better_than(&self, total_difficulty: &U256) -> bool { + self.total_difficulty() > total_difficulty + } +} + +impl From<(ckb_types::core::HeaderView, U256)> for HeaderIndexView { + fn from((header, total_difficulty): (ckb_types::core::HeaderView, U256)) -> Self { + HeaderIndexView { + hash: header.hash(), + number: header.number(), + epoch: header.epoch(), + timestamp: header.timestamp(), + parent_hash: header.parent_hash(), + total_difficulty, + skip_hash: None, + } + } +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct HeaderIndex { + number: BlockNumber, + hash: Byte32, + total_difficulty: U256, +} + +impl HeaderIndex { + pub fn new(number: BlockNumber, hash: Byte32, total_difficulty: U256) -> Self { + HeaderIndex { + number, + hash, + total_difficulty, + } + } + + pub fn number(&self) -> BlockNumber { + self.number + } + + pub fn hash(&self) -> Byte32 { + self.hash.clone() + } + + pub fn total_difficulty(&self) -> &U256 { + &self.total_difficulty + } + + pub fn number_and_hash(&self) -> BlockNumberAndHash { + (self.number(), self.hash()).into() + } + + pub fn is_better_chain(&self, other: &Self) -> bool { + self.is_better_than(other.total_difficulty()) + } + + pub fn is_better_than(&self, other_total_difficulty: &U256) -> bool { + self.total_difficulty() > other_total_difficulty + } +} + +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct BlockNumberAndHash { + pub number: BlockNumber, + pub hash: Byte32, +} + +impl BlockNumberAndHash { + pub fn new(number: BlockNumber, hash: Byte32) -> Self { + Self { number, hash } + } + + pub fn number(&self) -> BlockNumber { + self.number + } + + pub fn hash(&self) -> Byte32 { + self.hash.clone() + } +} + +impl From<(BlockNumber, Byte32)> for BlockNumberAndHash { + fn from(inner: (BlockNumber, Byte32)) -> Self { + Self { + number: inner.0, + hash: inner.1, + } + } +} + +impl From<&ckb_types::core::HeaderView> for BlockNumberAndHash { + fn from(header: &ckb_types::core::HeaderView) -> Self { + Self { + number: header.number(), + hash: header.hash(), + } + } +} + +impl From for BlockNumberAndHash { + fn from(header: ckb_types::core::HeaderView) -> Self { + Self { + number: header.number(), + hash: header.hash(), + } + } +} + +// Compute what height to jump back to with the skip pointer. +fn get_skip_height(height: BlockNumber) -> BlockNumber { + // Turn the lowest '1' bit in the binary representation of a number into a '0'. + fn invert_lowest_one(n: i64) -> i64 { + n & (n - 1) + } + + if height < 2 { + return 0; + } + + // Determine which height to jump back to. Any number strictly lower than height is acceptable, + // but the following expression seems to perform well in simulations (max 110 steps to go back + // up to 2**18 blocks). + if (height & 1) > 0 { + invert_lowest_one(invert_lowest_one(height as i64 - 1)) as u64 + 1 + } else { + invert_lowest_one(height as i64) as u64 + } +} + +pub const SHRINK_THRESHOLD: usize = 300; diff --git a/sync/Cargo.toml b/sync/Cargo.toml index c9b91343c5..a64b886021 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -37,7 +37,6 @@ tempfile.workspace = true ckb-systemtime = { path = "../util/systemtime", version = "= 0.114.0-pre" } dashmap = "4.0" keyed_priority_queue = "0.3" -sled = "0.34.7" itertools.workspace = true [dev-dependencies] diff --git a/sync/src/relayer/compact_block_process.rs b/sync/src/relayer/compact_block_process.rs index de45b9379a..514b416e47 100644 --- a/sync/src/relayer/compact_block_process.rs +++ b/sync/src/relayer/compact_block_process.rs @@ -1,6 +1,6 @@ use crate::relayer::compact_block_verifier::CompactBlockVerifier; use crate::relayer::{ReconstructionResult, Relayer}; -use crate::types::{ActiveChain, HeaderIndex, PendingCompactBlockMap}; +use crate::types::{ActiveChain, PendingCompactBlockMap}; use crate::utils::send_message_to; use crate::SyncShared; use crate::{attempt, Status, StatusCode}; @@ -8,6 +8,7 @@ use ckb_chain_spec::consensus::Consensus; use ckb_logger::{self, debug_target}; use ckb_network::{CKBProtocolContext, PeerIndex}; use ckb_shared::block_status::BlockStatus; +use ckb_shared::types::HeaderIndex; use ckb_systemtime::unix_time_as_millis; use ckb_traits::{HeaderFields, HeaderFieldsProvider}; use ckb_types::{ diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index a8cfdadaf4..43d3f77e45 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -20,7 +20,7 @@ use self::get_block_transactions_process::GetBlockTransactionsProcess; use self::get_transactions_process::GetTransactionsProcess; use self::transaction_hashes_process::TransactionHashesProcess; use self::transactions_process::TransactionsProcess; -use crate::types::{ActiveChain, BlockNumberAndHash, SyncShared}; +use crate::types::{ActiveChain, SyncShared}; use crate::utils::{ is_internal_db_error, metric_ckb_message_bytes, send_message_to, MetricDirection, }; @@ -33,6 +33,7 @@ use ckb_network::{ SupportProtocols, TargetSession, }; use ckb_shared::block_status::BlockStatus; +use ckb_shared::types::BlockNumberAndHash; use ckb_systemtime::unix_time_as_millis; use ckb_tx_pool::service::TxVerificationResult; use ckb_types::{ diff --git a/sync/src/synchronizer/block_fetcher.rs b/sync/src/synchronizer/block_fetcher.rs index 5851e61d55..1b35862944 100644 --- a/sync/src/synchronizer/block_fetcher.rs +++ b/sync/src/synchronizer/block_fetcher.rs @@ -1,5 +1,4 @@ -use crate::synchronizer::Synchronizer; -use crate::types::{ActiveChain, BlockNumberAndHash, HeaderIndex, HeaderIndexView, IBDState}; +use crate::types::{ActiveChain, IBDState}; use crate::SyncShared; use ckb_constant::sync::{ BLOCK_DOWNLOAD_WINDOW, CHECK_POINT_WINDOW, INIT_BLOCKS_IN_TRANSIT_PER_PEER, @@ -7,6 +6,7 @@ use ckb_constant::sync::{ use ckb_logger::{debug, trace}; use ckb_network::PeerIndex; use ckb_shared::block_status::BlockStatus; +use ckb_shared::types::{BlockNumberAndHash, HeaderIndex, HeaderIndexView}; use ckb_systemtime::unix_time_as_millis; use ckb_types::packed; use std::cmp::min; diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index a0a08c6727..573c74d4e7 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -20,7 +20,7 @@ pub(crate) use self::get_headers_process::GetHeadersProcess; pub(crate) use self::headers_process::HeadersProcess; pub(crate) use self::in_ibd_process::InIBDProcess; -use crate::types::{HeaderIndexView, HeadersSyncController, IBDState, Peers, SyncShared}; +use crate::types::{HeadersSyncController, IBDState, Peers, SyncShared}; use crate::utils::{metric_ckb_message_bytes, send_message_to, MetricDirection}; use crate::{Status, StatusCode}; use ckb_shared::block_status::BlockStatus; @@ -38,6 +38,7 @@ use ckb_network::{ async_trait, bytes::Bytes, tokio, CKBProtocolContext, CKBProtocolHandler, PeerIndex, ServiceControl, SupportProtocols, }; +use ckb_shared::types::HeaderIndexView; use ckb_stop_handler::{new_crossbeam_exit_rx, register_thread}; use ckb_systemtime::unix_time_as_millis; use ckb_types::{ diff --git a/sync/src/tests/inflight_blocks.rs b/sync/src/tests/inflight_blocks.rs index 46e6f45437..c2f3fcd11a 100644 --- a/sync/src/tests/inflight_blocks.rs +++ b/sync/src/tests/inflight_blocks.rs @@ -1,5 +1,6 @@ -use crate::types::{BlockNumberAndHash, InflightBlocks}; +use crate::types::InflightBlocks; use ckb_constant::sync::BLOCK_DOWNLOAD_TIMEOUT; +use ckb_shared::types::BlockNumberAndHash; use ckb_types::h256; use ckb_types::prelude::*; use std::collections::HashSet; diff --git a/sync/src/tests/synchronizer/functions.rs b/sync/src/tests/synchronizer/functions.rs index 8b71178d0c..5eb2e952cb 100644 --- a/sync/src/tests/synchronizer/functions.rs +++ b/sync/src/tests/synchronizer/functions.rs @@ -8,6 +8,7 @@ use ckb_network::{ SessionType, TargetSession, }; use ckb_reward_calculator::RewardCalculator; +use ckb_shared::types::HeaderIndex; use ckb_shared::{Shared, SharedBuilder, Snapshot}; use ckb_store::ChainStore; use ckb_systemtime::unix_time_as_millis; @@ -36,7 +37,7 @@ use std::{ use crate::{ synchronizer::{BlockFetcher, BlockProcess, GetBlocksProcess, HeadersProcess, Synchronizer}, - types::{HeaderIndex, HeadersSyncController, IBDState, PeerState}, + types::{HeadersSyncController, IBDState, PeerState}, Status, StatusCode, SyncShared, }; diff --git a/sync/src/tests/types.rs b/sync/src/tests/types.rs index 081c95a012..228de50fb2 100644 --- a/sync/src/tests/types.rs +++ b/sync/src/tests/types.rs @@ -1,3 +1,4 @@ +use ckb_shared::types::HeaderIndexView; use ckb_types::{ core::{BlockNumber, EpochNumberWithFraction, HeaderBuilder}, packed::Byte32, @@ -10,7 +11,7 @@ use std::{ sync::atomic::{AtomicUsize, Ordering::Relaxed}, }; -use crate::types::{HeaderIndexView, TtlFilter, FILTER_TTL}; +use crate::types::{TtlFilter, FILTER_TTL}; const SKIPLIST_LENGTH: u64 = 10_000; diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 744fa3331e..b5c3642faa 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -15,8 +15,12 @@ use ckb_constant::sync::{ use ckb_error::Error as CKBError; use ckb_logger::{debug, error, trace}; use ckb_network::{CKBProtocolContext, PeerIndex, SupportProtocols}; -use ckb_shared::block_status::BlockStatus; -use ckb_shared::{shared::Shared, Snapshot}; +use ckb_shared::{ + block_status::BlockStatus, + shared::Shared, + types::{BlockNumberAndHash, HeaderIndex, HeaderIndexView, SHRINK_THRESHOLD}, + HeaderMap, Snapshot, +}; use ckb_store::{ChainDB, ChainStore}; use ckb_systemtime::unix_time_as_millis; use ckb_traits::{HeaderFields, HeaderFieldsProvider}; @@ -40,11 +44,8 @@ use std::sync::Arc; use std::time::{Duration, Instant}; use std::{cmp, fmt, iter}; -mod header_map; - use crate::utils::send_message; -use ckb_types::core::{EpochNumber, EpochNumberWithFraction}; -pub use header_map::HeaderMap; +use ckb_types::core::EpochNumber; const GET_HEADERS_CACHE_SIZE: usize = 10000; // TODO: Need discussed @@ -53,7 +54,6 @@ const FILTER_SIZE: usize = 50000; const ORPHAN_BLOCK_SIZE: usize = 1024; // 2 ** 13 < 6 * 1800 < 2 ** 14 const ONE_DAY_BLOCK_NUMBER: u64 = 8192; -const SHRINK_THRESHOLD: usize = 300; pub(crate) const FILTER_TTL: u64 = 4 * 60 * 60; // State used to enforce CHAIN_SYNC_TIMEOUT @@ -402,53 +402,6 @@ impl InflightState { } } -#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct BlockNumberAndHash { - pub number: BlockNumber, - pub hash: Byte32, -} - -impl BlockNumberAndHash { - pub fn new(number: BlockNumber, hash: Byte32) -> Self { - Self { number, hash } - } - - pub fn number(&self) -> BlockNumber { - self.number - } - - pub fn hash(&self) -> Byte32 { - self.hash.clone() - } -} - -impl From<(BlockNumber, Byte32)> for BlockNumberAndHash { - fn from(inner: (BlockNumber, Byte32)) -> Self { - Self { - number: inner.0, - hash: inner.1, - } - } -} - -impl From<&core::HeaderView> for BlockNumberAndHash { - fn from(header: &core::HeaderView) -> Self { - Self { - number: header.number(), - hash: header.hash(), - } - } -} - -impl From for BlockNumberAndHash { - fn from(header: core::HeaderView) -> Self { - Self { - number: header.number(), - hash: header.hash(), - } - } -} - enum TimeQuantile { MinToFast, FastToNormal, @@ -1015,257 +968,6 @@ impl Peers { } } -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct HeaderIndex { - number: BlockNumber, - hash: Byte32, - total_difficulty: U256, -} - -impl HeaderIndex { - pub fn new(number: BlockNumber, hash: Byte32, total_difficulty: U256) -> Self { - HeaderIndex { - number, - hash, - total_difficulty, - } - } - - pub fn number(&self) -> BlockNumber { - self.number - } - - pub fn hash(&self) -> Byte32 { - self.hash.clone() - } - - pub fn total_difficulty(&self) -> &U256 { - &self.total_difficulty - } - - pub fn number_and_hash(&self) -> BlockNumberAndHash { - (self.number(), self.hash()).into() - } - - pub fn is_better_chain(&self, other: &Self) -> bool { - self.is_better_than(other.total_difficulty()) - } - - pub fn is_better_than(&self, other_total_difficulty: &U256) -> bool { - self.total_difficulty() > other_total_difficulty - } -} - -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct HeaderIndexView { - hash: Byte32, - number: BlockNumber, - epoch: EpochNumberWithFraction, - timestamp: u64, - parent_hash: Byte32, - total_difficulty: U256, - skip_hash: Option, -} - -impl HeaderIndexView { - pub fn new( - hash: Byte32, - number: BlockNumber, - epoch: EpochNumberWithFraction, - timestamp: u64, - parent_hash: Byte32, - total_difficulty: U256, - ) -> Self { - HeaderIndexView { - hash, - number, - epoch, - timestamp, - parent_hash, - total_difficulty, - skip_hash: None, - } - } - - pub fn hash(&self) -> Byte32 { - self.hash.clone() - } - - pub fn number(&self) -> BlockNumber { - self.number - } - - pub fn epoch(&self) -> EpochNumberWithFraction { - self.epoch - } - - pub fn timestamp(&self) -> u64 { - self.timestamp - } - - pub fn total_difficulty(&self) -> &U256 { - &self.total_difficulty - } - - pub fn parent_hash(&self) -> Byte32 { - self.parent_hash.clone() - } - - pub fn skip_hash(&self) -> Option<&Byte32> { - self.skip_hash.as_ref() - } - - // deserialize from bytes - fn from_slice_should_be_ok(hash: &[u8], slice: &[u8]) -> Self { - let hash = packed::Byte32Reader::from_slice_should_be_ok(hash).to_entity(); - let number = BlockNumber::from_le_bytes(slice[0..8].try_into().expect("stored slice")); - let epoch = EpochNumberWithFraction::from_full_value(u64::from_le_bytes( - slice[8..16].try_into().expect("stored slice"), - )); - let timestamp = u64::from_le_bytes(slice[16..24].try_into().expect("stored slice")); - let parent_hash = packed::Byte32Reader::from_slice_should_be_ok(&slice[24..56]).to_entity(); - let total_difficulty = U256::from_little_endian(&slice[56..88]).expect("stored slice"); - let skip_hash = if slice.len() == 120 { - Some(packed::Byte32Reader::from_slice_should_be_ok(&slice[88..120]).to_entity()) - } else { - None - }; - Self { - hash, - number, - epoch, - timestamp, - parent_hash, - total_difficulty, - skip_hash, - } - } - - // serialize all fields except `hash` to bytes - fn to_vec(&self) -> Vec { - let mut v = Vec::new(); - v.extend_from_slice(self.number.to_le_bytes().as_slice()); - v.extend_from_slice(self.epoch.full_value().to_le_bytes().as_slice()); - v.extend_from_slice(self.timestamp.to_le_bytes().as_slice()); - v.extend_from_slice(self.parent_hash.as_slice()); - v.extend_from_slice(self.total_difficulty.to_le_bytes().as_slice()); - if let Some(ref skip_hash) = self.skip_hash { - v.extend_from_slice(skip_hash.as_slice()); - } - v - } - - pub fn build_skip(&mut self, tip_number: BlockNumber, get_header_view: F, fast_scanner: G) - where - F: Fn(&Byte32, bool) -> Option, - G: Fn(BlockNumber, BlockNumberAndHash) -> Option, - { - if self.number == 0 { - return; - } - self.skip_hash = self - .get_ancestor( - tip_number, - get_skip_height(self.number()), - get_header_view, - fast_scanner, - ) - .map(|header| header.hash()); - } - - pub fn get_ancestor( - &self, - tip_number: BlockNumber, - number: BlockNumber, - get_header_view: F, - fast_scanner: G, - ) -> Option - where - F: Fn(&Byte32, bool) -> Option, - G: Fn(BlockNumber, BlockNumberAndHash) -> Option, - { - if number > self.number() { - return None; - } - - let mut current = self.clone(); - let mut number_walk = current.number(); - while number_walk > number { - let number_skip = get_skip_height(number_walk); - let number_skip_prev = get_skip_height(number_walk - 1); - let store_first = current.number() <= tip_number; - match current.skip_hash { - Some(ref hash) - if number_skip == number - || (number_skip > number - && !(number_skip_prev + 2 < number_skip - && number_skip_prev >= number)) => - { - // Only follow skip if parent->skip isn't better than skip->parent - current = get_header_view(hash, store_first)?; - number_walk = number_skip; - } - _ => { - current = get_header_view(¤t.parent_hash(), store_first)?; - number_walk -= 1; - } - } - if let Some(target) = fast_scanner(number, (current.number(), current.hash()).into()) { - current = target; - break; - } - } - Some(current) - } - - pub fn as_header_index(&self) -> HeaderIndex { - HeaderIndex::new(self.number(), self.hash(), self.total_difficulty().clone()) - } - - pub fn number_and_hash(&self) -> BlockNumberAndHash { - (self.number(), self.hash()).into() - } - - pub fn is_better_than(&self, total_difficulty: &U256) -> bool { - self.total_difficulty() > total_difficulty - } -} - -impl From<(core::HeaderView, U256)> for HeaderIndexView { - fn from((header, total_difficulty): (core::HeaderView, U256)) -> Self { - HeaderIndexView { - hash: header.hash(), - number: header.number(), - epoch: header.epoch(), - timestamp: header.timestamp(), - parent_hash: header.parent_hash(), - total_difficulty, - skip_hash: None, - } - } -} - -// Compute what height to jump back to with the skip pointer. -fn get_skip_height(height: BlockNumber) -> BlockNumber { - // Turn the lowest '1' bit in the binary representation of a number into a '0'. - fn invert_lowest_one(n: i64) -> i64 { - n & (n - 1) - } - - if height < 2 { - return 0; - } - - // Determine which height to jump back to. Any number strictly lower than height is acceptable, - // but the following expression seems to perform well in simulations (max 110 steps to go back - // up to 2**18 blocks). - if (height & 1) > 0 { - invert_lowest_one(invert_lowest_one(height as i64 - 1)) as u64 + 1 - } else { - invert_lowest_one(height as i64) as u64 - } -} - // , Vec)>, timestamp)> pub(crate) type PendingCompactBlockMap = HashMap< Byte32, From cf088e0e1920e6b4d3e4f3f2d750623cc3153d33 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 16 May 2023 11:05:26 +0800 Subject: [PATCH 004/357] Refactor: copy `HeaderMap` to `ckb_shared` Signed-off-by: Eval EXEC --- shared/src/shared.rs | 6 +++++- shared/src/shared_builder.rs | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/shared/src/shared.rs b/shared/src/shared.rs index fc3e9fea04..6d40ae1f16 100644 --- a/shared/src/shared.rs +++ b/shared/src/shared.rs @@ -1,5 +1,5 @@ //! TODO(doc): @quake -use crate::{Snapshot, SnapshotMgr}; +use crate::{HeaderMap, Snapshot, SnapshotMgr}; use arc_swap::Guard; use ckb_async_runtime::Handle; use ckb_chain_spec::consensus::Consensus; @@ -54,6 +54,8 @@ pub struct Shared { pub(crate) snapshot_mgr: Arc, pub(crate) async_handle: Handle, pub(crate) ibd_finished: Arc, + + pub(crate) header_map: Arc, } impl Shared { @@ -68,6 +70,7 @@ impl Shared { snapshot_mgr: Arc, async_handle: Handle, ibd_finished: Arc, + header_map: Arc, ) -> Shared { Shared { store, @@ -78,6 +81,7 @@ impl Shared { snapshot_mgr, async_handle, ibd_finished, + header_map, } } /// Spawn freeze background thread that periodically checks and moves ancient data from the kv database into the freezer. diff --git a/shared/src/shared_builder.rs b/shared/src/shared_builder.rs index 5f2a7b7098..eb4d570c57 100644 --- a/shared/src/shared_builder.rs +++ b/shared/src/shared_builder.rs @@ -5,6 +5,10 @@ use ckb_tx_pool::service::TxVerificationResult; use ckb_tx_pool::{TokioRwLock, TxEntry, TxPool, TxPoolServiceBuilder}; use std::cmp::Ordering; +use crate::migrate::Migrate; +use ckb_app_config::{BlockAssemblerConfig, DBConfig, NotifyConfig, StoreConfig, TxPoolConfig}; +use ckb_app_config::{ExitCode, HeaderMapConfig}; +use ckb_async_runtime::{new_background_runtime, Handle}; use ckb_chain_spec::consensus::Consensus; use ckb_chain_spec::SpecError; @@ -22,10 +26,22 @@ use ckb_error::{Error, InternalErrorKind}; use ckb_logger::{error, info}; use ckb_migrate::migrate::Migrate; use ckb_notify::{NotifyController, NotifyService}; +use ckb_notify::{NotifyController, NotifyService, PoolTransactionEntry}; +use ckb_proposal_table::ProposalTable; +use ckb_proposal_table::ProposalView; +use ckb_shared::{HeaderMap, Shared}; +use ckb_snapshot::{Snapshot, SnapshotMgr}; +use ckb_store::ChainDB; +use ckb_store::ChainStore; use ckb_store::{ChainDB, ChainStore, Freezer}; +use ckb_tx_pool::{ + error::Reject, service::TxVerificationResult, TokioRwLock, TxEntry, TxPool, + TxPoolServiceBuilder, +}; use ckb_types::core::hardfork::HardForks; use ckb_types::core::service::PoolTransactionEntry; use ckb_types::core::tx_pool::Reject; + use ckb_types::core::EpochExt; use ckb_types::core::HeaderView; use ckb_verification::cache::init_cache; @@ -45,6 +61,9 @@ pub struct SharedBuilder { block_assembler_config: Option, notify_config: Option, async_handle: Handle, + + header_map_memory_limit: Option, + header_map_tmp_dir: Option, } /// Open or create a rocksdb @@ -148,6 +167,8 @@ impl SharedBuilder { store_config: None, block_assembler_config: None, async_handle, + header_map_memory_limit: None, + header_map_tmp_dir: None, }) } @@ -193,6 +214,9 @@ impl SharedBuilder { store_config: None, block_assembler_config: None, async_handle: runtime.get_or_init(new_background_runtime).clone(), + + header_map_memory_limit: None, + header_map_tmp_dir: None, }) } } @@ -328,8 +352,19 @@ impl SharedBuilder { block_assembler_config, notify_config, async_handle, + header_map_memory_limit, + header_map_tmp_dir, } = self; + let header_map_memory_limit = header_map_memory_limit + .unwrap_or(HeaderMapConfig::default().memory_limit.as_u64() as usize); + + let header_map = Arc::new(HeaderMap::new( + header_map_tmp_dir, + header_map_memory_limit, + &async_handle.clone(), + )); + let tx_pool_config = tx_pool_config.unwrap_or_default(); let notify_config = notify_config.unwrap_or_default(); let store_config = store_config.unwrap_or_default(); @@ -375,6 +410,7 @@ impl SharedBuilder { snapshot_mgr, async_handle, ibd_finished, + header_map, ); let pack = SharedPackage { From 2c34f273780f2dbe2a0aef4f32b5ddb6566e1284 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 22 May 2023 11:09:41 +0800 Subject: [PATCH 005/357] Refactor: copy `block_status_map` to `ckb_shared` Signed-off-by: Eval EXEC --- shared/Cargo.toml | 1 + shared/src/shared.rs | 5 +++++ shared/src/shared_builder.rs | 5 +++++ 3 files changed, 11 insertions(+) diff --git a/shared/Cargo.toml b/shared/Cargo.toml index d7c87155eb..247b8ed866 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -35,6 +35,7 @@ bitflags = "1.0" tokio = { version = "1", features = ["sync"] } tempfile.workspace = true sled = "0.34.7" +dashmap = "4.0" [dev-dependencies] ckb-systemtime = { path = "../util/systemtime", version = "= 0.114.0-pre", features = ["enable_faketime"] } diff --git a/shared/src/shared.rs b/shared/src/shared.rs index 6d40ae1f16..d0de5fefb8 100644 --- a/shared/src/shared.rs +++ b/shared/src/shared.rs @@ -1,4 +1,5 @@ //! TODO(doc): @quake +use crate::block_status::BlockStatus; use crate::{HeaderMap, Snapshot, SnapshotMgr}; use arc_swap::Guard; use ckb_async_runtime::Handle; @@ -21,6 +22,7 @@ use ckb_types::{ U256, }; use ckb_verification::cache::TxVerificationCache; +use dashmap::DashMap; use std::cmp; use std::collections::BTreeMap; use std::sync::atomic::{AtomicBool, Ordering}; @@ -56,6 +58,7 @@ pub struct Shared { pub(crate) ibd_finished: Arc, pub(crate) header_map: Arc, + pub(crate) block_status_map: Arc>, } impl Shared { @@ -71,6 +74,7 @@ impl Shared { async_handle: Handle, ibd_finished: Arc, header_map: Arc, + block_status_map: Arc>, ) -> Shared { Shared { store, @@ -82,6 +86,7 @@ impl Shared { async_handle, ibd_finished, header_map, + block_status_map, } } /// Spawn freeze background thread that periodically checks and moves ancient data from the kv database into the freezer. diff --git a/shared/src/shared_builder.rs b/shared/src/shared_builder.rs index eb4d570c57..3b79988554 100644 --- a/shared/src/shared_builder.rs +++ b/shared/src/shared_builder.rs @@ -45,6 +45,8 @@ use ckb_types::core::tx_pool::Reject; use ckb_types::core::EpochExt; use ckb_types::core::HeaderView; use ckb_verification::cache::init_cache; +use dashmap::DashMap; +use std::cmp::Ordering; use std::collections::HashSet; use std::path::{Path, PathBuf}; use std::sync::atomic::AtomicBool; @@ -400,6 +402,8 @@ impl SharedBuilder { register_tx_pool_callback(&mut tx_pool_builder, notify_controller.clone()); + let block_status_map = Arc::new(DashMap::new()); + let ibd_finished = Arc::new(AtomicBool::new(false)); let shared = Shared::new( store, @@ -411,6 +415,7 @@ impl SharedBuilder { async_handle, ibd_finished, header_map, + block_status_map, ); let pack = SharedPackage { From df9263ad72a272024b0563e7318e046518d9d6bb Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 23 May 2023 12:00:45 +0800 Subject: [PATCH 006/357] Fix block_status_map and header_map usage --- rpc/src/module/net.rs | 3 +- shared/src/shared.rs | 37 ++- sync/src/relayer/compact_block_process.rs | 2 +- sync/src/relayer/mod.rs | 23 +- sync/src/synchronizer/block_process.rs | 3 +- sync/src/synchronizer/get_headers_process.rs | 6 +- sync/src/synchronizer/headers_process.rs | 12 +- sync/src/synchronizer/mod.rs | 5 +- sync/src/types/mod.rs | 277 +++++++++---------- 9 files changed, 194 insertions(+), 174 deletions(-) diff --git a/rpc/src/module/net.rs b/rpc/src/module/net.rs index 05c82f8db7..94c9a6e7bb 100644 --- a/rpc/src/module/net.rs +++ b/rpc/src/module/net.rs @@ -715,6 +715,7 @@ impl NetRpc for NetRpcImpl { fn sync_state(&self) -> Result { let chain = self.sync_shared.active_chain(); + let shared = chain.shared(); let state = chain.shared().state(); let (fast_time, normal_time, low_time) = state.read_inflight_blocks().division_point(); let best_known = state.shared_best_header(); @@ -722,7 +723,7 @@ impl NetRpc for NetRpcImpl { ibd: chain.is_initial_block_download(), best_known_block_number: best_known.number().into(), best_known_block_timestamp: best_known.timestamp().into(), - orphan_blocks_count: (state.orphan_pool().len() as u64).into(), + orphan_blocks_count: (shared.shared().orphan_pool_count()).into(), inflight_blocks_count: (state.read_inflight_blocks().total_inflight_count() as u64) .into(), fast_time: fast_time.into(), diff --git a/shared/src/shared.rs b/shared/src/shared.rs index d0de5fefb8..0faaf1890c 100644 --- a/shared/src/shared.rs +++ b/shared/src/shared.rs @@ -16,11 +16,13 @@ use ckb_store::{ChainDB, ChainStore}; use ckb_systemtime::unix_time_as_millis; use ckb_tx_pool::{BlockTemplate, TokioRwLock, TxPoolController}; use ckb_types::{ - core::{BlockNumber, EpochExt, EpochNumber, HeaderView, Version}, + core, + core::{service, BlockNumber, EpochExt, EpochNumber, HeaderView, Version}, packed::{self, Byte32}, prelude::*, U256, }; +use ckb_util::shrink_to_fit; use ckb_verification::cache::TxVerificationCache; use dashmap::DashMap; use std::cmp; @@ -34,6 +36,8 @@ const FREEZER_INTERVAL: Duration = Duration::from_secs(60); const THRESHOLD_EPOCH: EpochNumber = 2; const MAX_FREEZE_LIMIT: BlockNumber = 30_000; +pub const SHRINK_THRESHOLD: usize = 300; + /// An owned permission to close on a freezer thread pub struct FreezerClose { stopped: Arc, @@ -57,7 +61,7 @@ pub struct Shared { pub(crate) async_handle: Handle, pub(crate) ibd_finished: Arc, - pub(crate) header_map: Arc, + pub header_map: Arc, pub(crate) block_status_map: Arc>, } @@ -379,4 +383,33 @@ impl Shared { max_version.map(Into::into), ) } + + pub fn header_map(&self) -> &HeaderMap { + &self.header_map + } + pub fn block_status_map(&self) -> &DashMap { + &self.block_status_map + } + + pub fn remove_header_view(&self, hash: &Byte32) { + self.header_map.remove(hash); + } + + pub fn insert_block_status(&self, block_hash: Byte32, status: BlockStatus) { + self.block_status_map.insert(block_hash, status); + } + + pub fn remove_block_status(&self, block_hash: &Byte32) { + self.block_status_map.remove(block_hash); + shrink_to_fit!(self.block_status_map, SHRINK_THRESHOLD); + } + + pub fn get_orphan_block(&self, block_hash: &Byte32) -> Option { + todo!("get_orphan_block") + // self.orphan_block_pool.get_block(block_hash) + } + + pub fn orphan_pool_count(&self) -> u64 { + 0 + } } diff --git a/sync/src/relayer/compact_block_process.rs b/sync/src/relayer/compact_block_process.rs index 514b416e47..426b38da42 100644 --- a/sync/src/relayer/compact_block_process.rs +++ b/sync/src/relayer/compact_block_process.rs @@ -332,7 +332,7 @@ fn contextual_check( return Status::ignored(); } else { shared - .state() + .shared() .insert_block_status(block_hash.clone(), BlockStatus::BLOCK_INVALID); return StatusCode::CompactBlockHasInvalidHeader .with_context(format!("{block_hash} {err}")); diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index 43d3f77e45..814c4721e5 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -351,7 +351,10 @@ impl Relayer { "relayer send block when accept block error: {:?}", err, ); - } + let block_hash = boxed.hash(); + self.shared().shared().remove_header_view(&block_hash); + let cb = packed::CompactBlock::build_from_block(&boxed, &HashSet::new()); + let message = packed::RelayMessage::new_builder().set(cb).build(); if let Some(p2p_control) = nc.p2p_control() { let snapshot = self.shared.shared().snapshot(); @@ -514,7 +517,7 @@ impl Relayer { } } BlockStatus::BLOCK_RECEIVED => { - if let Some(uncle) = self.shared.state().get_orphan_block(&uncle_hash) { + if let Some(uncle) = self.shared.shared().get_orphan_block(&uncle_hash) { uncles.push(uncle.as_uncle().data()); } else { debug_target!( @@ -959,14 +962,14 @@ impl CKBProtocolHandler for Relayer { } ASK_FOR_TXS_TOKEN => self.ask_for_txs(nc.as_ref()), TX_HASHES_TOKEN => self.send_bulk_of_tx_hashes(nc.as_ref()), - SEARCH_ORPHAN_POOL_TOKEN => { - if !self.shared.state().orphan_pool().is_empty() { - tokio::task::block_in_place(|| { - self.shared.try_search_orphan_pool(&self.chain); - self.shared.periodic_clean_orphan_pool(); - }) - } - } + // SEARCH_ORPHAN_POOL_TOKEN => { + // if !self.shared.state().orphan_pool().is_empty() { + // tokio::task::block_in_place(|| { + // self.shared.try_search_orphan_pool(&self.chain); + // self.shared.periodic_clean_orphan_pool(); + // }) + // } + // } _ => unreachable!(), } trace_target!( diff --git a/sync/src/synchronizer/block_process.rs b/sync/src/synchronizer/block_process.rs index 3526fb1450..b8fc6b5824 100644 --- a/sync/src/synchronizer/block_process.rs +++ b/sync/src/synchronizer/block_process.rs @@ -30,9 +30,8 @@ impl<'a> BlockProcess<'a> { block.hash(), ); let shared = self.synchronizer.shared(); - let state = shared.state(); - if state.new_block_received(&block) { + if shared.new_block_received(&block) { if let Err(err) = self.synchronizer.process_new_block(block.clone()) { if !is_internal_db_error(&err) { return StatusCode::BlockIsInvalid.with_context(format!( diff --git a/sync/src/synchronizer/get_headers_process.rs b/sync/src/synchronizer/get_headers_process.rs index 3b4b44cf12..12c5041413 100644 --- a/sync/src/synchronizer/get_headers_process.rs +++ b/sync/src/synchronizer/get_headers_process.rs @@ -55,10 +55,10 @@ impl<'a> GetHeadersProcess<'a> { self.peer ); self.send_in_ibd(); - let state = self.synchronizer.shared.state(); - if let Some(flag) = state.peers().get_flag(self.peer) { + let shared = self.synchronizer.shared(); + if let Some(flag) = shared.state().peers().get_flag(self.peer) { if flag.is_outbound || flag.is_whitelist || flag.is_protect { - state.insert_peer_unknown_header_list(self.peer, block_locator_hashes); + shared.insert_peer_unknown_header_list(self.peer, block_locator_hashes); } }; return Status::ignored(); diff --git a/sync/src/synchronizer/headers_process.rs b/sync/src/synchronizer/headers_process.rs index 7e19686ed0..9da100a77c 100644 --- a/sync/src/synchronizer/headers_process.rs +++ b/sync/src/synchronizer/headers_process.rs @@ -307,7 +307,9 @@ impl<'a, DL: HeaderFieldsProvider> HeaderAcceptor<'a, DL> { self.header.number(), self.header.hash(), ); - state.insert_block_status(self.header.hash(), BlockStatus::BLOCK_INVALID); + shared + .shared() + .insert_block_status(self.header.hash(), BlockStatus::BLOCK_INVALID); return result; } @@ -318,7 +320,9 @@ impl<'a, DL: HeaderFieldsProvider> HeaderAcceptor<'a, DL> { self.header.hash(), ); if is_invalid { - state.insert_block_status(self.header.hash(), BlockStatus::BLOCK_INVALID); + shared + .shared() + .insert_block_status(self.header.hash(), BlockStatus::BLOCK_INVALID); } return result; } @@ -329,7 +333,9 @@ impl<'a, DL: HeaderFieldsProvider> HeaderAcceptor<'a, DL> { self.header.number(), self.header.hash(), ); - state.insert_block_status(self.header.hash(), BlockStatus::BLOCK_INVALID); + shared + .shared() + .insert_block_status(self.header.hash(), BlockStatus::BLOCK_INVALID); return result; } diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 573c74d4e7..26d5c88e13 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -158,7 +158,8 @@ impl BlockFetchCMD { return self.can_start; } - let state = self.sync_shared.state(); + let sync_shared = self.sync_shared; + let state = sync_shared.state(); let min_work_reach = |flag: &mut CanStart| { if state.min_chain_work_ready() { @@ -169,7 +170,7 @@ impl BlockFetchCMD { let assume_valid_target_find = |flag: &mut CanStart| { let mut assume_valid_target = state.assume_valid_target(); if let Some(ref target) = *assume_valid_target { - match state.header_map().get(&target.pack()) { + match sync_shared.shared().header_map().get(&target.pack()) { Some(header) => { *flag = CanStart::Ready; // Blocks that are no longer in the scope of ibd must be forced to verify diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index b5c3642faa..44acc8dfc8 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1017,22 +1017,15 @@ impl SyncShared { "header_map.memory_limit {}", sync_config.header_map.memory_limit ); - let header_map = HeaderMap::new( - tmpdir, - sync_config.header_map.memory_limit.as_u64() as usize, - shared.async_handle(), - ); let state = SyncState { shared_best_header, - header_map, - block_status_map: DashMap::new(), tx_filter: Mutex::new(TtlFilter::default()), unknown_tx_hashes: Mutex::new(KeyedPriorityQueue::new()), peers: Peers::default(), pending_get_block_proposals: DashMap::new(), pending_compact_blocks: Mutex::new(HashMap::default()), - orphan_block_pool: OrphanBlockPool::with_capacity(ORPHAN_BLOCK_SIZE), + // orphan_block_pool: OrphanBlockPool::with_capacity(ORPHAN_BLOCK_SIZE), inflight_proposals: DashMap::new(), inflight_blocks: RwLock::new(InflightBlocks::default()), pending_get_headers: RwLock::new(LruCache::new(GET_HEADERS_CACHE_SIZE)), @@ -1082,15 +1075,15 @@ impl SyncShared { block: Arc, ) -> Result { // Insert the given block into orphan_block_pool if its parent is not found - if !self.is_stored(&block.parent_hash()) { - debug!( - "insert new orphan block {} {}", - block.header().number(), - block.header().hash() - ); - self.state.insert_orphan_block((*block).clone()); - return Ok(false); - } + // if !self.is_stored(&block.parent_hash()) { + // debug!( + // "insert new orphan block {} {}", + // block.header().number(), + // block.header().hash() + // ); + // self.state.insert_orphan_block((*block).clone()); + // return Ok(false); + // } // Attempt to accept the given block if its parent already exist in database let ret = self.accept_block(chain, Arc::clone(&block)); @@ -1101,61 +1094,61 @@ impl SyncShared { // The above block has been accepted. Attempt to accept its descendant blocks in orphan pool. // The returned blocks of `remove_blocks_by_parent` are in topology order by parents - self.try_search_orphan_pool(chain); + // self.try_search_orphan_pool(chain); ret } /// Try to find blocks from the orphan block pool that may no longer be orphan - pub fn try_search_orphan_pool(&self, chain: &ChainController) { - let leaders = self.state.orphan_pool().clone_leaders(); - debug!("orphan pool leader parents hash len: {}", leaders.len()); - - for hash in leaders { - if self.state.orphan_pool().is_empty() { - break; - } - if self.is_stored(&hash) { - let descendants = self.state.remove_orphan_by_parent(&hash); - debug!( - "attempting to accept {} descendant orphan blocks with existing parents hash", - descendants.len() - ); - for block in descendants { - // If we can not find the block's parent in database, that means it was failed to accept - // its parent, so we treat it as an invalid block as well. - if !self.is_stored(&block.parent_hash()) { - debug!( - "parent-unknown orphan block, block: {}, {}, parent: {}", - block.header().number(), - block.header().hash(), - block.header().parent_hash(), - ); - continue; - } - - let block = Arc::new(block); - if let Err(err) = self.accept_block(chain, Arc::clone(&block)) { - debug!( - "accept descendant orphan block {} error {:?}", - block.header().hash(), - err - ); - } - } - } - } - } - + // pub fn try_search_orphan_pool(&self, chain: &ChainController) { + // let leaders = self.state.orphan_pool().clone_leaders(); + // debug!("orphan pool leader parents hash len: {}", leaders.len()); + // + // for hash in leaders { + // if self.state.orphan_pool().is_empty() { + // break; + // } + // if self.is_stored(&hash) { + // let descendants = self.state.remove_orphan_by_parent(&hash); + // debug!( + // "try accepting {} descendant orphan blocks by exist parents hash", + // descendants.len() + // ); + // for block in descendants { + // // If we can not find the block's parent in database, that means it was failed to accept + // // its parent, so we treat it as an invalid block as well. + // if !self.is_stored(&block.parent_hash()) { + // debug!( + // "parent-unknown orphan block, block: {}, {}, parent: {}", + // block.header().number(), + // block.header().hash(), + // block.header().parent_hash(), + // ); + // continue; + // } + // + // let block = Arc::new(block); + // if let Err(err) = self.accept_block(chain, Arc::clone(&block)) { + // debug!( + // "accept descendant orphan block {} error {:?}", + // block.header().hash(), + // err + // ); + // } + // } + // } + // } + // } + // /// Cleanup orphan_pool, /// Remove blocks whose epoch is 6 (EXPIRED_EPOCH) epochs behind the current epoch. - pub(crate) fn periodic_clean_orphan_pool(&self) { - let hashes = self - .state - .clean_expired_blocks(self.active_chain().epoch_ext().number()); - for hash in hashes { - self.state.remove_header_view(&hash); - } - } + // pub(crate) fn periodic_clean_orphan_pool(&self) { + // let hashes = self + // .state + // .clean_expired_blocks(self.active_chain().epoch_ext().number()); + // for hash in hashes { + // self.shared().remove_header_view(&hash); + // } + // } pub(crate) fn accept_block( &self, @@ -1181,7 +1174,7 @@ impl SyncShared { if let Err(ref error) = ret { if !is_internal_db_error(error) { error!("accept block {:?} {}", block, error); - self.state + self.shared() .insert_block_status(block.header().hash(), BlockStatus::BLOCK_INVALID); } } else { @@ -1191,8 +1184,8 @@ impl SyncShared { // So we just simply remove the corresponding in-memory block status, // and the next time `get_block_status` would acquire the real-time // status via fetching block_ext from the database. - self.state.remove_block_status(&block.as_ref().hash()); - self.state.remove_header_view(&block.as_ref().hash()); + self.shared().remove_block_status(&block.as_ref().hash()); + self.shared().remove_header_view(&block.as_ref().hash()); } ret @@ -1238,7 +1231,7 @@ impl SyncShared { } }, ); - self.state.header_map.insert(header_view.clone()); + self.shared.header_map().insert(header_view.clone()); self.state .peers() .may_set_best_known_header(peer, header_view.as_header_index()); @@ -1259,9 +1252,9 @@ impl SyncShared { .get_block_ext(hash) .map(|block_ext| (header, block_ext.total_difficulty).into()) }) - .or_else(|| self.state.header_map.get(hash)) + .or_else(|| self.shared.header_map().get(hash)) } else { - self.state.header_map.get(hash).or_else(|| { + self.shared.header_map().get(hash).or_else(|| { store.get_block_header(hash).and_then(|header| { store .get_block_ext(hash) @@ -1281,12 +1274,45 @@ impl SyncShared { pub fn get_epoch_ext(&self, hash: &Byte32) -> Option { self.store().get_block_epoch(hash) } + + pub fn insert_peer_unknown_header_list(&self, pi: PeerIndex, header_list: Vec) { + // update peer's unknown_header_list only once + if self.state().peers.unknown_header_list_is_empty(pi) { + // header list is an ordered list, sorted from highest to lowest, + // so here you discard and exit early + for hash in header_list { + if let Some(header) = self.shared().header_map().get(&hash) { + self.state() + .peers + .may_set_best_known_header(pi, header.as_header_index()); + break; + } else { + self.state().peers.insert_unknown_header_hash(pi, hash) + } + } + } + } + + // Return true when the block is that we have requested and received first time. + pub fn new_block_received(&self, block: &core::BlockView) -> bool { + if self + .state() + .write_inflight_blocks() + .remove_by_block((block.number(), block.hash()).into()) + { + self.shared() + .insert_block_status(block.hash(), BlockStatus::BLOCK_RECEIVED); + true + } else { + false + } + } } impl HeaderFieldsProvider for SyncShared { fn get_header_fields(&self, hash: &Byte32) -> Option { - self.state - .header_map + self.shared + .header_map() .get(hash) .map(|header| HeaderFields { hash: header.hash(), @@ -1374,8 +1400,6 @@ impl PartialOrd for UnknownTxHashPriority { pub struct SyncState { /* Status irrelevant to peers */ shared_best_header: RwLock, - header_map: HeaderMap, - block_status_map: DashMap, tx_filter: Mutex>, // The priority is ordering by timestamp (reversed), means do not ask the tx before this timestamp (timeout). @@ -1388,7 +1412,7 @@ pub struct SyncState { pending_get_block_proposals: DashMap>, pending_get_headers: RwLock>, pending_compact_blocks: Mutex, - orphan_block_pool: OrphanBlockPool, + // orphan_block_pool: OrphanBlockPool, /* In-flight items for which we request to peers, but not got the responses yet */ inflight_proposals: DashMap, @@ -1457,10 +1481,6 @@ impl SyncState { self.shared_best_header.read() } - pub fn header_map(&self) -> &HeaderMap { - &self.header_map - } - pub fn may_set_shared_best_header(&self, header: HeaderIndexView) { if !header.is_better_than(self.shared_best_header.read().total_difficulty()) { return; @@ -1472,10 +1492,6 @@ impl SyncState { *self.shared_best_header.write() = header; } - pub fn remove_header_view(&self, hash: &Byte32) { - self.header_map.remove(hash); - } - pub(crate) fn suspend_sync(&self, peer_state: &mut PeerState) { if peer_state.sync_started() { assert_ne!( @@ -1614,19 +1630,6 @@ impl SyncState { self.unknown_tx_hashes.lock() } - // Return true when the block is that we have requested and received first time. - pub fn new_block_received(&self, block: &core::BlockView) -> bool { - if self - .write_inflight_blocks() - .remove_by_block((block.number(), block.hash()).into()) - { - self.insert_block_status(block.hash(), BlockStatus::BLOCK_RECEIVED); - true - } else { - false - } - } - pub fn insert_inflight_proposals( &self, ids: Vec, @@ -1665,32 +1668,23 @@ impl SyncState { self.inflight_proposals.contains_key(proposal_id) } - pub fn insert_orphan_block(&self, block: core::BlockView) { - self.insert_block_status(block.hash(), BlockStatus::BLOCK_RECEIVED); - self.orphan_block_pool.insert(block); - } - - pub fn remove_orphan_by_parent(&self, parent_hash: &Byte32) -> Vec { - let blocks = self.orphan_block_pool.remove_blocks_by_parent(parent_hash); - blocks.iter().for_each(|block| { - self.block_status_map.remove(&block.hash()); - }); - shrink_to_fit!(self.block_status_map, SHRINK_THRESHOLD); - blocks - } - - pub fn orphan_pool(&self) -> &OrphanBlockPool { - &self.orphan_block_pool - } - - pub fn insert_block_status(&self, block_hash: Byte32, status: BlockStatus) { - self.block_status_map.insert(block_hash, status); - } - - pub fn remove_block_status(&self, block_hash: &Byte32) { - self.block_status_map.remove(block_hash); - shrink_to_fit!(self.block_status_map, SHRINK_THRESHOLD); - } + // pub fn insert_orphan_block(&self, block: core::BlockView) { + // self.insert_block_status(block.hash(), BlockStatus::BLOCK_RECEIVED); + // self.orphan_block_pool.insert(block); + // } + // + // pub fn remove_orphan_by_parent(&self, parent_hash: &Byte32) -> Vec { + // let blocks = self.orphan_block_pool.remove_blocks_by_parent(parent_hash); + // blocks.iter().for_each(|block| { + // self.block_status_map.remove(&block.hash()); + // }); + // shrink_to_fit!(self.block_status_map, SHRINK_THRESHOLD); + // blocks + // } + // + // pub fn orphan_pool(&self) -> &OrphanBlockPool { + // &self.orphan_block_pool + // } pub fn drain_get_block_proposals( &self, @@ -1718,30 +1712,13 @@ impl SyncState { self.peers().disconnected(pi); } - pub fn get_orphan_block(&self, block_hash: &Byte32) -> Option { - self.orphan_block_pool.get_block(block_hash) - } - - pub fn clean_expired_blocks(&self, epoch: EpochNumber) -> Vec { - self.orphan_block_pool.clean_expired_blocks(epoch) - } - - pub fn insert_peer_unknown_header_list(&self, pi: PeerIndex, header_list: Vec) { - // update peer's unknown_header_list only once - if self.peers.unknown_header_list_is_empty(pi) { - // header list is an ordered list, sorted from highest to lowest, - // so here you discard and exit early - for hash in header_list { - if let Some(header) = self.header_map.get(&hash) { - self.peers - .may_set_best_known_header(pi, header.as_header_index()); - break; - } else { - self.peers.insert_unknown_header_hash(pi, hash) - } - } - } - } + // pub fn get_orphan_block(&self, block_hash: &Byte32) -> Option { + // self.orphan_block_pool.get_block(block_hash) + // } + // + // pub fn clean_expired_blocks(&self, epoch: EpochNumber) -> Vec { + // self.orphan_block_pool.clean_expired_blocks(epoch) + // } } /** ActiveChain captures a point-in-time view of indexed chain of blocks. */ @@ -2040,10 +2017,10 @@ impl ActiveChain { } pub fn get_block_status(&self, block_hash: &Byte32) -> BlockStatus { - match self.shared().state().block_status_map.get(block_hash) { + match self.shared().shared().block_status_map().get(block_hash) { Some(status_ref) => *status_ref.value(), None => { - if self.shared().state().header_map.contains_key(block_hash) { + if self.shared().shared().header_map().contains_key(block_hash) { BlockStatus::HEADER_VALID } else { let verified = self From 144faeb0a2265568e7bb7db9a5ba15421c444095 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Fri, 4 Aug 2023 14:29:51 +0800 Subject: [PATCH 007/357] Remove `SEARCH_ORPHAN_POOL_TOKEN` --- Cargo.lock | 1 + sync/src/relayer/mod.rs | 16 ---------------- 2 files changed, 1 insertion(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f848994f6b..b74bffef95 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1455,6 +1455,7 @@ dependencies = [ "ckb-types", "ckb-util", "ckb-verification", + "dashmap", "once_cell", "sled", "tempfile", diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index 814c4721e5..8d3d95bcbb 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -50,7 +50,6 @@ use std::time::{Duration, Instant}; pub const TX_PROPOSAL_TOKEN: u64 = 0; pub const ASK_FOR_TXS_TOKEN: u64 = 1; pub const TX_HASHES_TOKEN: u64 = 2; -pub const SEARCH_ORPHAN_POOL_TOKEN: u64 = 3; pub const MAX_RELAY_PEERS: usize = 128; pub const MAX_RELAY_TXS_NUM_PER_BATCH: usize = 32767; @@ -788,10 +787,6 @@ impl CKBProtocolHandler for Relayer { nc.set_notify(Duration::from_millis(300), TX_HASHES_TOKEN) .await .expect("set_notify at init is ok"); - // todo: remove when the asynchronous verification is completed - nc.set_notify(Duration::from_secs(5), SEARCH_ORPHAN_POOL_TOKEN) - .await - .expect("set_notify at init is ok"); } async fn received( @@ -940,9 +935,6 @@ impl CKBProtocolHandler for Relayer { if nc.remove_notify(TX_HASHES_TOKEN).await.is_err() { trace_target!(crate::LOG_TARGET_RELAY, "remove v2 relay notify fail"); } - if nc.remove_notify(SEARCH_ORPHAN_POOL_TOKEN).await.is_err() { - trace_target!(crate::LOG_TARGET_RELAY, "remove v2 relay notify fail"); - } for kv_pair in self.shared().state().peers().state.iter() { let (peer, state) = kv_pair.pair(); if !state.peer_flags.is_2023edition { @@ -962,14 +954,6 @@ impl CKBProtocolHandler for Relayer { } ASK_FOR_TXS_TOKEN => self.ask_for_txs(nc.as_ref()), TX_HASHES_TOKEN => self.send_bulk_of_tx_hashes(nc.as_ref()), - // SEARCH_ORPHAN_POOL_TOKEN => { - // if !self.shared.state().orphan_pool().is_empty() { - // tokio::task::block_in_place(|| { - // self.shared.try_search_orphan_pool(&self.chain); - // self.shared.periodic_clean_orphan_pool(); - // }) - // } - // } _ => unreachable!(), } trace_target!( From f797536b6bd277b1faaeba856a807c1ae1814326 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 20 Sep 2023 09:41:05 +0800 Subject: [PATCH 008/357] Refactor: move `ForkChanges` to independent module --- chain/src/chain.rs | 85 ++-------------------------------------- chain/src/forkchanges.rs | 84 +++++++++++++++++++++++++++++++++++++++ chain/src/lib.rs | 1 + 3 files changed, 88 insertions(+), 82 deletions(-) create mode 100644 chain/src/forkchanges.rs diff --git a/chain/src/chain.rs b/chain/src/chain.rs index a42aa788b0..520e1cf7ff 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -1,6 +1,7 @@ //! CKB chain service. #![allow(missing_docs)] +use crate::forkchanges::ForkChanges; use ckb_channel::{self as channel, select, Sender}; use ckb_error::{Error, InternalErrorKind}; use ckb_logger::Level::Trace; @@ -21,11 +22,10 @@ use ckb_types::{ resolve_transaction, BlockCellProvider, HeaderChecker, OverlayCellProvider, ResolvedTransaction, }, - hardfork::HardForks, service::{Request, DEFAULT_CHANNEL_SIZE}, BlockExt, BlockNumber, BlockView, Cycle, HeaderView, }, - packed::{Byte32, ProposalShortId}, + packed::Byte32, utilities::merkle_mountain_range::ChainRootMMR, U256, }; @@ -33,7 +33,7 @@ use ckb_verification::cache::Completed; use ckb_verification::{BlockVerifier, InvalidParentError, NonContextualBlockTxsVerifier}; use ckb_verification_contextual::{ContextualBlockVerifier, VerifyContext}; use ckb_verification_traits::{Switch, Verifier}; -use std::collections::{HashSet, VecDeque}; +use std::collections::HashSet; use std::sync::Arc; use std::time::Instant; use std::{cmp, thread}; @@ -102,85 +102,6 @@ impl ChainController { } } -/// The struct represent fork -#[derive(Debug, Default)] -pub struct ForkChanges { - /// Blocks attached to index after forks - pub(crate) attached_blocks: VecDeque, - /// Blocks detached from index after forks - pub(crate) detached_blocks: VecDeque, - /// HashSet with proposal_id detached to index after forks - pub(crate) detached_proposal_id: HashSet, - /// to be updated exts - pub(crate) dirty_exts: VecDeque, -} - -impl ForkChanges { - /// blocks attached to index after forks - pub fn attached_blocks(&self) -> &VecDeque { - &self.attached_blocks - } - - /// blocks detached from index after forks - pub fn detached_blocks(&self) -> &VecDeque { - &self.detached_blocks - } - - /// proposal_id detached to index after forks - pub fn detached_proposal_id(&self) -> &HashSet { - &self.detached_proposal_id - } - - /// are there any block should be detached - pub fn has_detached(&self) -> bool { - !self.detached_blocks.is_empty() - } - - /// cached verified attached block num - pub fn verified_len(&self) -> usize { - self.attached_blocks.len() - self.dirty_exts.len() - } - - /// assertion for make sure attached_blocks and detached_blocks are sorted - #[cfg(debug_assertions)] - pub fn is_sorted(&self) -> bool { - IsSorted::is_sorted_by_key(&mut self.attached_blocks().iter(), |blk| { - blk.header().number() - }) && IsSorted::is_sorted_by_key(&mut self.detached_blocks().iter(), |blk| { - blk.header().number() - }) - } - - pub fn during_hardfork(&self, hardfork_switch: &HardForks) -> bool { - let hardfork_during_detach = - self.check_if_hardfork_during_blocks(hardfork_switch, &self.detached_blocks); - let hardfork_during_attach = - self.check_if_hardfork_during_blocks(hardfork_switch, &self.attached_blocks); - - hardfork_during_detach || hardfork_during_attach - } - - fn check_if_hardfork_during_blocks( - &self, - hardfork: &HardForks, - blocks: &VecDeque, - ) -> bool { - if blocks.is_empty() { - false - } else { - // This method assumes that the input blocks are sorted and unique. - let rfc_0049 = hardfork.ckb2023.rfc_0049(); - let epoch_first = blocks.front().unwrap().epoch().number(); - let epoch_next = blocks - .back() - .unwrap() - .epoch() - .minimum_epoch_number_after_n_blocks(1); - epoch_first < rfc_0049 && rfc_0049 <= epoch_next - } - } -} - pub(crate) struct GlobalIndex { pub(crate) number: BlockNumber, pub(crate) hash: Byte32, diff --git a/chain/src/forkchanges.rs b/chain/src/forkchanges.rs new file mode 100644 index 0000000000..01e3415c67 --- /dev/null +++ b/chain/src/forkchanges.rs @@ -0,0 +1,84 @@ +use ckb_rust_unstable_port::IsSorted; +use ckb_types::core::hardfork::HardForks; +use ckb_types::core::{BlockExt, BlockView}; +use ckb_types::packed::ProposalShortId; +use std::collections::{HashSet, VecDeque}; + +/// The struct represent fork +#[derive(Debug, Default)] +pub struct ForkChanges { + /// Blocks attached to index after forks + pub(crate) attached_blocks: VecDeque, + /// Blocks detached from index after forks + pub(crate) detached_blocks: VecDeque, + /// HashSet with proposal_id detached to index after forks + pub(crate) detached_proposal_id: HashSet, + /// to be updated exts + pub(crate) dirty_exts: VecDeque, +} + +impl ForkChanges { + /// blocks attached to index after forks + pub fn attached_blocks(&self) -> &VecDeque { + &self.attached_blocks + } + + /// blocks detached from index after forks + pub fn detached_blocks(&self) -> &VecDeque { + &self.detached_blocks + } + + /// proposal_id detached to index after forks + pub fn detached_proposal_id(&self) -> &HashSet { + &self.detached_proposal_id + } + + /// are there any block should be detached + pub fn has_detached(&self) -> bool { + !self.detached_blocks.is_empty() + } + + /// cached verified attached block num + pub fn verified_len(&self) -> usize { + self.attached_blocks.len() - self.dirty_exts.len() + } + + /// assertion for make sure attached_blocks and detached_blocks are sorted + #[cfg(debug_assertions)] + pub fn is_sorted(&self) -> bool { + IsSorted::is_sorted_by_key(&mut self.attached_blocks().iter(), |blk| { + blk.header().number() + }) && IsSorted::is_sorted_by_key(&mut self.detached_blocks().iter(), |blk| { + blk.header().number() + }) + } + + pub fn during_hardfork(&self, hardfork_switch: &HardForks) -> bool { + let hardfork_during_detach = + self.check_if_hardfork_during_blocks(hardfork_switch, &self.detached_blocks); + let hardfork_during_attach = + self.check_if_hardfork_during_blocks(hardfork_switch, &self.attached_blocks); + + hardfork_during_detach || hardfork_during_attach + } + + fn check_if_hardfork_during_blocks( + &self, + hardfork: &HardForks, + blocks: &VecDeque, + ) -> bool { + if blocks.is_empty() { + false + } else { + // This method assumes that the input blocks are sorted and unique. + let rfc_0049 = hardfork.ckb2023.rfc_0049(); + let epoch_first = blocks.front().unwrap().epoch().number(); + let epoch_next = blocks + .back() + .unwrap() + .epoch() + .minimum_epoch_number_after_n_blocks(1); + epoch_first < rfc_0049 && rfc_0049 <= epoch_next + } + } +} diff --git a/chain/src/lib.rs b/chain/src/lib.rs index 5898633b83..6885da60b9 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -7,5 +7,6 @@ //! [`ChainController`]: chain/struct.ChainController.html pub mod chain; +mod forkchanges; #[cfg(test)] mod tests; From 5ca8a7a76d1687b5140407dc831accbff5bd674b Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 20 Sep 2023 09:41:46 +0800 Subject: [PATCH 009/357] Fix usage for `ForkChanges --- chain/src/tests/find_fork.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/chain/src/tests/find_fork.rs b/chain/src/tests/find_fork.rs index f25c04de5e..9b34c79aaa 100644 --- a/chain/src/tests/find_fork.rs +++ b/chain/src/tests/find_fork.rs @@ -1,4 +1,5 @@ -use crate::chain::{ChainService, ForkChanges}; +use crate::chain::ChainService; +use crate::forkchanges::ForkChanges; use ckb_chain_spec::consensus::{Consensus, ProposalWindow}; use ckb_shared::SharedBuilder; use ckb_store::ChainStore; @@ -495,7 +496,7 @@ fn test_fork_proposal_table() { assert_eq!( &vec![ packed::ProposalShortId::new([0u8, 0, 0, 0, 0, 0, 0, 0, 0, 3]), - packed::ProposalShortId::new([1u8, 0, 0, 0, 0, 0, 0, 0, 0, 4]) + packed::ProposalShortId::new([1u8, 0, 0, 0, 0, 0, 0, 0, 0, 4]), ] .into_iter() .collect::>(), From e89dd957e5f5dd1da12ae9e28ab9628b81a72a10 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 24 May 2023 10:23:21 +0800 Subject: [PATCH 010/357] Introduce Async process Signed-off-by: Eval EXEC --- Cargo.lock | 28 ++ chain/Cargo.toml | 3 + chain/src/chain.rs | 596 +++++++++++++++++++++++++++++++-- chain/src/lib.rs | 1 + chain/src/orphan_block_pool.rs | 170 ++++++++++ shared/src/block_status.rs | 8 +- shared/src/lib.rs | 1 + shared/src/shared.rs | 80 ++++- 8 files changed, 839 insertions(+), 48 deletions(-) create mode 100644 chain/src/orphan_block_pool.rs diff --git a/Cargo.lock b/Cargo.lock index b74bffef95..74190eb287 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -689,6 +689,8 @@ dependencies = [ "ckb-app-config", "ckb-chain-spec", "ckb-channel", + "ckb-constant", + "ckb-dao", "ckb-dao-utils", "ckb-error", "ckb-jsonrpc-types", @@ -706,9 +708,11 @@ dependencies = [ "ckb-test-chain-utils", "ckb-tx-pool", "ckb-types", + "ckb-util", "ckb-verification", "ckb-verification-contextual", "ckb-verification-traits", + "crossbeam", "faux", "lazy_static", "tempfile", @@ -1965,6 +1969,20 @@ dependencies = [ "itertools 0.10.5", ] +[[package]] +name = "crossbeam" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2801af0d36612ae591caa9568261fddce32ce6e08a7275ea334a06a4ad021a2c" +dependencies = [ + "cfg-if 1.0.0", + "crossbeam-channel", + "crossbeam-deque", + "crossbeam-epoch", + "crossbeam-queue", + "crossbeam-utils", +] + [[package]] name = "crossbeam-channel" version = "0.5.8" @@ -1999,6 +2017,16 @@ dependencies = [ "scopeguard", ] +[[package]] +name = "crossbeam-queue" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" +dependencies = [ + "cfg-if 1.0.0", + "crossbeam-utils", +] + [[package]] name = "crossbeam-utils" version = "0.8.16" diff --git a/chain/Cargo.toml b/chain/Cargo.toml index c474b43c4a..2be6d5cde0 100644 --- a/chain/Cargo.toml +++ b/chain/Cargo.toml @@ -27,6 +27,9 @@ ckb-rust-unstable-port = { path = "../util/rust-unstable-port", version = "= 0.1 ckb-channel = { path = "../util/channel", version = "= 0.114.0-pre" } faux = { version = "^0.1", optional = true } ckb-merkle-mountain-range = "0.5.2" +ckb-constant = { path = "../util/constant", version = "= 0.113.0-pre" } +ckb-util = { path = "../util", version = "= 0.113.0-pre" } +crossbeam = "0.8.2" [dev-dependencies] ckb-test-chain-utils = { path = "../util/test-chain-utils", version = "= 0.114.0-pre" } diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 520e1cf7ff..a967024637 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -2,7 +2,10 @@ #![allow(missing_docs)] use crate::forkchanges::ForkChanges; -use ckb_channel::{self as channel, select, Sender}; +use crate::orphan_block_pool::OrphanBlockPool; +use ckb_chain_spec::versionbits::VersionbitsIndexer; +use ckb_channel::{self as channel, select, Receiver, SendError, Sender}; +use ckb_constant::sync::BLOCK_DOWNLOAD_WINDOW; use ckb_error::{Error, InternalErrorKind}; use ckb_logger::Level::Trace; use ckb_logger::{ @@ -12,6 +15,7 @@ use ckb_merkle_mountain_range::leaf_index_to_mmr_size; use ckb_proposal_table::ProposalTable; #[cfg(debug_assertions)] use ckb_rust_unstable_port::IsSorted; +use ckb_shared::block_status::BlockStatus; use ckb_shared::shared::Shared; use ckb_stop_handler::{new_crossbeam_exit_rx, register_thread}; use ckb_store::{attach_block_cell, detach_block_cell, ChainStore, StoreTransaction}; @@ -27,17 +31,23 @@ use ckb_types::{ }, packed::Byte32, utilities::merkle_mountain_range::ChainRootMMR, - U256, + H256, U256, }; +use ckb_util::Mutex; use ckb_verification::cache::Completed; use ckb_verification::{BlockVerifier, InvalidParentError, NonContextualBlockTxsVerifier}; use ckb_verification_contextual::{ContextualBlockVerifier, VerifyContext}; use ckb_verification_traits::{Switch, Verifier}; -use std::collections::HashSet; +use crossbeam::channel::SendTimeoutError; +use std::collections::{HashSet, VecDeque}; +use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; +use std::time::Duration; use std::time::Instant; use std::{cmp, thread}; +const ORPHAN_BLOCK_SIZE: usize = 100000; + type ProcessBlockRequest = Request<(Arc, Switch), Result>; type TruncateRequest = Request>; @@ -50,7 +60,8 @@ type TruncateRequest = Request>; #[derive(Clone)] pub struct ChainController { process_block_sender: Sender, - truncate_sender: Sender, // Used for testing only + truncate_sender: Sender, + orphan_block_broker: Arc, } #[cfg_attr(feature = "mock", faux::methods)] @@ -58,10 +69,12 @@ impl ChainController { pub fn new( process_block_sender: Sender, truncate_sender: Sender, + orphan_block_broker: Arc, ) -> Self { ChainController { process_block_sender, truncate_sender, + orphan_block_broker, } } /// Inserts the block into database. @@ -100,6 +113,15 @@ impl ChainController { .into()) }) } + + // Relay need this + pub fn get_orphan_block(&self, hash: &Byte32) -> Option { + todo!("load orphan block") + } + + pub fn orphan_blocks_len(&self) -> usize { + self.orphan_block_broker.len() + } } pub(crate) struct GlobalIndex { @@ -126,24 +148,55 @@ impl GlobalIndex { /// Chain background service /// /// The ChainService provides a single-threaded background executor. +#[derive(Clone)] pub struct ChainService { shared: Shared, - proposal_table: ProposalTable, + proposal_table: Arc>, + + orphan_blocks_broker: Arc, + + new_block_tx: Sender, + new_block_rx: Receiver, + + unverified_tx: Sender, + unverified_rx: Receiver, +} + +#[derive(Clone)] +struct UnverifiedBlock { + block: BlockView, + parent_header: HeaderView, + switch: Switch, } impl ChainService { /// Create a new ChainService instance with shared and initial proposal_table. pub fn new(shared: Shared, proposal_table: ProposalTable) -> ChainService { + let (unverified_tx, unverified_rx) = + channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize * 11); + + let (new_block_tx, new_block_rx) = + channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize); + ChainService { shared, - proposal_table, + proposal_table: Arc::new(Mutex::new(proposal_table)), + orphan_blocks_broker: Arc::new(OrphanBlockPool::with_capacity(ORPHAN_BLOCK_SIZE)), + unverified_tx, + unverified_rx, + new_block_tx, + new_block_rx, } } /// start background single-threaded service with specified thread_name. pub fn start(mut self, thread_name: Option) -> ChainController { + let orphan_blocks_broker_clone = Arc::clone(&self.orphan_blocks_broker); + let signal_receiver = new_crossbeam_exit_rx(); - let (process_block_sender, process_block_receiver) = channel::bounded(DEFAULT_CHANNEL_SIZE); + let (process_block_sender, process_block_receiver) = + channel::bounded(BLOCK_DOWNLOAD_WINDOW as usize); + let (truncate_sender, truncate_receiver) = channel::bounded(1); // Mainly for test: give an empty thread_name @@ -152,6 +205,25 @@ impl ChainService { thread_builder = thread_builder.name(name.to_string()); } let tx_control = self.shared.tx_pool_controller().clone(); + let (unverified_queue_stop_tx, unverified_queue_stop_rx) = ckb_channel::bounded::<()>(1); + let (search_orphan_pool_stop_tx, search_orphan_pool_stop_rx) = + ckb_channel::bounded::<()>(1); + + let unverified_consumer_thread = thread::Builder::new() + .name("verify_blocks".into()) + .spawn({ + let chain_service = self.clone(); + move || chain_service.start_consume_unverified_blocks(unverified_queue_stop_rx) + }) + .expect("start unverified_queue consumer thread should ok"); + + let search_orphan_pool_thread = thread::Builder::new() + .name("search_orphan".into()) + .spawn({ + let chain_service = self.clone(); + move || chain_service.start_search_orphan_pool(search_orphan_pool_stop_rx) + }) + .expect("start search_orphan_pool thread should ok"); let chain_jh = thread_builder .spawn(move || loop { @@ -161,7 +233,7 @@ impl ChainService { let instant = Instant::now(); let _ = tx_control.suspend_chunk_process(); - let _ = responder.send(self.process_block(block, verify)); + let _ = responder.send(self.process_block_v2(block, verify)); let _ = tx_control.continue_chunk_process(); if let Some(metrics) = ckb_metrics::handle() { @@ -188,6 +260,11 @@ impl ChainService { }, recv(signal_receiver) -> _ => { info!("ChainService received exit signal, exit now"); + unverified_queue_stop_tx.send(()); + search_orphan_pool_stop_tx.send(()); + + search_orphan_pool_thread.join(); + unverified_consumer_thread.join(); break; } } @@ -196,7 +273,189 @@ impl ChainService { register_thread("ChainService", chain_jh); - ChainController::new(process_block_sender, truncate_sender) + ChainController::new( + process_block_sender, + truncate_sender, + orphan_blocks_broker_clone, + ) + } + + fn start_consume_unverified_blocks(&self, unverified_queue_stop_rx: Receiver<()>) { + let mut begin_loop = std::time::Instant::now(); + loop { + begin_loop = std::time::Instant::now(); + select! { + recv(unverified_queue_stop_rx) -> _ => { + info!("unverified_queue_consumer got exit signal, exit now"); + return; + }, + recv(self.unverified_rx) -> msg => match msg { + Ok(unverified_task) => { + // process this unverified block + trace!("got an unverified block, wait cost: {:?}", begin_loop.elapsed()); + self.consume_unverified_blocks(unverified_task); + trace!("consume_unverified_blocks cost: {:?}", begin_loop.elapsed()); + }, + Err(err) => { + error!("unverified_rx err: {}", err); + return; + }, + }, + default => {}, + } + } + } + + fn consume_unverified_blocks(&self, unverified_block: UnverifiedBlock) { + // process this unverified block + match self.verify_block(&unverified_block) { + Ok(_) => { + let log_now = std::time::Instant::now(); + self.shared + .remove_block_status(&unverified_block.block.hash()); + let log_elapsed_remove_block_status = log_now.elapsed(); + self.shared + .remove_header_view(&unverified_block.block.hash()); + debug!( + "block {} remove_block_status cost: {:?}, and header_view cost: {:?}", + unverified_block.block.hash(), + log_elapsed_remove_block_status, + log_now.elapsed() + ); + } + Err(err) => { + error!( + "verify block {} failed: {}", + unverified_block.block.hash(), + err + ); + // TODO punish the peer who give me the bad block + + // TODO decrease unverified_tip + let tip = self + .shared + .store() + .get_tip_header() + .expect("tip_header must exist"); + let tip_ext = self + .shared + .store() + .get_block_ext(&tip.hash()) + .expect("tip header's ext must exist"); + + self.shared.set_unverified_tip(ckb_shared::HeaderIndex::new( + tip.clone().number(), + tip.clone().hash(), + tip_ext.total_difficulty, + )); + + self.shared + .insert_block_status(unverified_block.block.hash(), BlockStatus::BLOCK_INVALID); + error!( + "set_unverified tip to {}-{}, because verify {} failed: {}", + tip.number(), + tip.hash(), + unverified_block.block.hash(), + err + ); + } + } + } + + fn start_search_orphan_pool(&self, search_orphan_pool_stop_rx: Receiver<()>) { + loop { + select! { + recv(search_orphan_pool_stop_rx) -> _ => { + info!("unverified_queue_consumer got exit signal, exit now"); + return; + }, + recv(self.new_block_rx) -> msg => match msg { + Ok(switch) => { + self.search_orphan_pool(switch) + }, + Err(err) => { + error!("new_block_rx err: {}", err); + return + } + }, + } + } + } + fn search_orphan_pool(&self, switch: Switch) { + for leader_hash in self.orphan_blocks_broker.clone_leaders() { + if !self + .shared + .contains_block_status(&leader_hash, BlockStatus::BLOCK_PARTIAL_STORED) + { + trace!("orphan leader: {} not partial stored", leader_hash); + continue; + } + + let descendants = self + .orphan_blocks_broker + .remove_blocks_by_parent(&leader_hash); + if descendants.is_empty() { + continue; + } + let mut accept_error_occurred = false; + for descendant in &descendants { + match self.accept_block(descendant) { + Err(err) => { + accept_error_occurred = true; + error!("accept block {} failed: {}", descendant.hash(), err); + continue; + } + Ok(accepted_opt) => { + match accepted_opt { + Some((parent_header, total_difficulty)) => { + match self.unverified_tx.send(UnverifiedBlock { + block: descendant.to_owned(), + parent_header, + switch, + }) { + Ok(_) => {} + Err(err) => error!("send unverified_tx failed: {}", err), + }; + + if total_difficulty + .gt(self.shared.get_unverified_tip().total_difficulty()) + { + self.shared.set_unverified_tip(ckb_shared::HeaderIndex::new( + descendant.header().number(), + descendant.header().hash(), + total_difficulty, + )); + } + } + None => { + info!( + "doesn't accept block {}, because it has been stored", + descendant.hash() + ); + } + } + + debug!( + "set unverified_tip to {}-{}, while unverified_tip - verified_tip = {}", + descendant.number(), + descendant.hash(), + descendant + .number() + .saturating_sub(self.shared.snapshot().tip_number()) + ) + } + } + } + + if !accept_error_occurred { + debug!( + "accept {} blocks [{}->{}] success", + descendants.len(), + descendants.first().expect("descendants not empty").number(), + descendants.last().expect("descendants not empty").number(), + ) + } + } } fn make_fork_for_truncate(&self, target: &HeaderView, current_tip: &HeaderView) -> ForkChanges { @@ -212,7 +471,7 @@ impl ChainService { } // Truncate the main chain - // Use for testing only, can only truncate less than 50000 blocks each time + // Use for testing only pub(crate) fn truncate(&mut self, target_tip_hash: &Byte32) -> Result<(), Error> { let snapshot = Arc::clone(&self.shared.snapshot()); assert!(snapshot.is_main_chain(target_tip_hash)); @@ -224,19 +483,6 @@ impl ChainService { .and_then(|index| snapshot.get_epoch_ext(&index)) .expect("checked"); let origin_proposals = snapshot.proposals(); - - let block_count = snapshot - .tip_header() - .number() - .saturating_sub(target_tip_header.number()); - - if block_count > 5_0000 { - let err = format!( - "trying to truncate too many blocks: {}, exceed 50000", - block_count - ); - return Err(InternalErrorKind::Database.other(err).into()); - } let mut fork = self.make_fork_for_truncate(&target_tip_header, snapshot.tip_header()); let db_txn = self.shared.store().begin_transaction(); @@ -245,14 +491,15 @@ impl ChainService { db_txn.insert_tip_header(&target_tip_header)?; db_txn.insert_current_epoch_ext(&target_epoch_ext)?; - // Currently, we only move the target tip header here, we don't delete the block for performance - // TODO: delete the blocks if we need in the future - + for blk in fork.attached_blocks() { + db_txn.delete_block(blk)?; + } db_txn.commit()?; self.update_proposal_table(&fork); let (detached_proposal_id, new_proposals) = self .proposal_table + .lock() .finalize(origin_proposals, target_tip_header.number()); fork.detached_proposal_id = detached_proposal_id; @@ -266,6 +513,7 @@ impl ChainService { self.shared.store_snapshot(Arc::clone(&new_snapshot)); // NOTE: Dont update tx-pool when truncate + Ok(()) } @@ -305,6 +553,273 @@ impl ChainService { .map(|_| ()) } + // make block IO and verify asynchronize + #[doc(hidden)] + pub fn process_block_v2(&self, block: Arc, switch: Switch) -> Result { + let block_number = block.number(); + let block_hash = block.hash(); + if block_number < 1 { + warn!("receive 0 number block: 0-{}", block_hash); + } + + // if self + // .shared + // .contains_block_status(&block_hash, BlockStatus::BLOCK_RECEIVED) + // { + // debug!("block {}-{} has been stored", block_number, block_hash); + // return Ok(false); + // } + + if !switch.disable_non_contextual() { + self.non_contextual_verify(&block)?; + } + + self.orphan_blocks_broker.insert(block.as_ref().to_owned()); + + match self.new_block_tx.send(switch) { + Ok(_) => {} + Err(err) => { + error!("notify new block to orphan pool err: {}", err) + } + } + debug!( + "processing block: {}-{}, orphan_len: {}, (tip:unverified_tip):({}:{})", + block_number, + block_hash, + self.orphan_blocks_broker.len(), + self.shared.snapshot().tip_number(), + self.shared.get_unverified_tip().number(), + ); + + Ok(false) + } + + fn accept_block(&self, block: &BlockView) -> Result, Error> { + let (block_number, block_hash) = (block.number(), block.hash()); + + if self + .shared + .contains_block_status(&block_hash, BlockStatus::BLOCK_PARTIAL_STORED) + { + debug!("block {}-{} has been stored", block_number, block_hash); + return Ok(None); + } + + trace!("begin accept block: {}-{}", block.number(), block.hash()); + + let parent_ext = self + .shared + .store() + .get_block_ext(&block.data().header().raw().parent_hash()) + .expect("parent already store"); + + let cannon_total_difficulty = + parent_ext.total_difficulty.to_owned() + block.header().difficulty(); + + let parent_header = self + .shared + .store() + .get_block_header(&block.data().header().raw().parent_hash()) + .expect("parent already store"); + + let db_txn = Arc::new(self.shared.store().begin_transaction()); + + db_txn.insert_block(block)?; + + // if parent_ext.verified == Some(false) { + // return Err(InvalidParentError { + // parent_hash: parent_header.hash(), + // } + // .into()); + // } + + let next_block_epoch = self + .shared + .consensus() + .next_epoch_ext(&parent_header, &db_txn.borrow_as_data_loader()) + .expect("epoch should be stored"); + let new_epoch = next_block_epoch.is_head(); + let epoch = next_block_epoch.epoch(); + + db_txn.insert_block_epoch_index( + &block.header().hash(), + &epoch.last_block_hash_in_previous_epoch(), + )?; + if new_epoch { + db_txn.insert_epoch_ext(&epoch.last_block_hash_in_previous_epoch(), &epoch)?; + } + + let ext = BlockExt { + received_at: unix_time_as_millis(), + total_difficulty: cannon_total_difficulty.clone(), + total_uncles_count: parent_ext.total_uncles_count + block.data().uncles().len() as u64, + verified: None, + txs_fees: vec![], + cycles: None, + txs_sizes: None, + }; + + db_txn.insert_block_ext(&block.header().hash(), &ext)?; + + db_txn.commit()?; + + self.shared + .insert_block_status(block_hash, BlockStatus::BLOCK_PARTIAL_STORED); + + Ok(Some((parent_header, cannon_total_difficulty))) + } + + fn verify_block(&self, unverified_block: &UnverifiedBlock) -> Result { + let log_now = std::time::Instant::now(); + + let UnverifiedBlock { + block, + parent_header, + switch, + } = unverified_block; + + let parent_ext = self + .shared + .store() + .get_block_ext(&block.data().header().raw().parent_hash()) + .expect("parent already store"); + + let cannon_total_difficulty = + parent_ext.total_difficulty.to_owned() + block.header().difficulty(); + + if parent_ext.verified == Some(false) { + return Err(InvalidParentError { + parent_hash: parent_header.hash(), + } + .into()); + } + + let ext = BlockExt { + received_at: unix_time_as_millis(), + total_difficulty: cannon_total_difficulty.clone(), + total_uncles_count: parent_ext.total_uncles_count + block.data().uncles().len() as u64, + verified: None, + txs_fees: vec![], + cycles: None, + txs_sizes: None, + }; + + let shared_snapshot = Arc::clone(&self.shared.snapshot()); + let origin_proposals = shared_snapshot.proposals(); + let current_tip_header = shared_snapshot.tip_header(); + let current_total_difficulty = shared_snapshot.total_difficulty().to_owned(); + + // is_better_than + let new_best_block = cannon_total_difficulty > current_total_difficulty; + + let mut fork = ForkChanges::default(); + + let next_block_epoch = self + .shared + .consensus() + .next_epoch_ext(&parent_header, &self.shared.store().borrow_as_data_loader()) + .expect("epoch should be stored"); + let new_epoch = next_block_epoch.is_head(); + let epoch = next_block_epoch.epoch(); + + let db_txn = Arc::new(self.shared.store().begin_transaction()); + if new_best_block { + debug!( + "[verify block] new best block found: {} => {:#x}, difficulty diff = {:#x}, unverified_tip: {}", + block.header().number(), + block.header().hash(), + &cannon_total_difficulty - ¤t_total_difficulty, + self.shared.get_unverified_tip().number(), + ); + self.find_fork(&mut fork, current_tip_header.number(), &block, ext); + self.rollback(&fork, &db_txn)?; + + // update and verify chain root + // MUST update index before reconcile_main_chain + let begin_reconcile_main_chain = std::time::Instant::now(); + self.reconcile_main_chain(Arc::clone(&db_txn), &mut fork, switch.to_owned())?; + trace!( + "reconcile_main_chain cost {:?}", + begin_reconcile_main_chain.elapsed() + ); + + db_txn.insert_tip_header(&block.header())?; + if new_epoch || fork.has_detached() { + db_txn.insert_current_epoch_ext(&epoch)?; + } + } else { + db_txn.insert_block_ext(&block.header().hash(), &ext)?; + } + db_txn.commit()?; + + if new_best_block { + let tip_header = block.header(); + info!( + "block: {}, hash: {:#x}, epoch: {:#}, total_diff: {:#x}, txs: {}", + tip_header.number(), + tip_header.hash(), + tip_header.epoch(), + cannon_total_difficulty, + block.transactions().len() + ); + + self.update_proposal_table(&fork); + let (detached_proposal_id, new_proposals) = self + .proposal_table + .lock() + .finalize(origin_proposals, tip_header.number()); + fork.detached_proposal_id = detached_proposal_id; + + let new_snapshot = + self.shared + .new_snapshot(tip_header, cannon_total_difficulty, epoch, new_proposals); + + self.shared.store_snapshot(Arc::clone(&new_snapshot)); + + let tx_pool_controller = self.shared.tx_pool_controller(); + if tx_pool_controller.service_started() { + if let Err(e) = tx_pool_controller.update_tx_pool_for_reorg( + fork.detached_blocks().clone(), + fork.attached_blocks().clone(), + fork.detached_proposal_id().clone(), + new_snapshot, + ) { + error!("[verify block] notify update_tx_pool_for_reorg error {}", e); + } + } + + let block_ref: &BlockView = █ + self.shared + .notify_controller() + .notify_new_block(block_ref.clone()); + if log_enabled!(ckb_logger::Level::Trace) { + self.print_chain(10); + } + if let Some(metrics) = ckb_metrics::handle() { + metrics.ckb_chain_tip.set(block.header().number() as i64); + } + } else { + self.shared.refresh_snapshot(); + info!( + "[verify block] uncle: {}, hash: {:#x}, epoch: {:#}, total_diff: {:#x}, txs: {}", + block.header().number(), + block.header().hash(), + block.header().epoch(), + cannon_total_difficulty, + block.transactions().len() + ); + + let tx_pool_controller = self.shared.tx_pool_controller(); + if tx_pool_controller.service_started() { + let block_ref: &BlockView = █ + if let Err(e) = tx_pool_controller.notify_new_uncle(block_ref.as_uncle()) { + error!("[verify block] notify new_uncle error {}", e); + } + } + } + Ok(true) + } + fn insert_block(&mut self, block: Arc, switch: Switch) -> Result { let db_txn = Arc::new(self.shared.store().begin_transaction()); let txn_snapshot = db_txn.get_snapshot(); @@ -419,6 +934,7 @@ impl ChainService { self.update_proposal_table(&fork); let (detached_proposal_id, new_proposals) = self .proposal_table + .lock() .finalize(origin_proposals, tip_header.number()); fork.detached_proposal_id = detached_proposal_id; @@ -473,19 +989,20 @@ impl ChainService { Ok(true) } - pub(crate) fn update_proposal_table(&mut self, fork: &ForkChanges) { + pub(crate) fn update_proposal_table(&self, fork: &ForkChanges) { for blk in fork.detached_blocks() { - self.proposal_table.remove(blk.header().number()); + self.proposal_table.lock().remove(blk.header().number()); } for blk in fork.attached_blocks() { self.proposal_table + .lock() .insert(blk.header().number(), blk.union_proposal_ids()); } self.reload_proposal_table(fork); } // if rollback happen, go back check whether need reload proposal_table from block - pub(crate) fn reload_proposal_table(&mut self, fork: &ForkChanges) { + pub(crate) fn reload_proposal_table(&self, fork: &ForkChanges) { if fork.has_detached() { let proposal_window = self.shared.consensus().tx_proposal_window(); let detached_front = fork @@ -515,7 +1032,9 @@ impl ChainService { .and_then(|hash| self.shared.store().get_block(&hash)) .expect("block stored"); - self.proposal_table.insert(bn, blk.union_proposal_ids()); + self.proposal_table + .lock() + .insert(bn, blk.union_proposal_ids()); } } } @@ -697,7 +1216,13 @@ impl ChainService { { if !switch.disable_all() { if found_error.is_none() { + let log_now = std::time::Instant::now(); let resolved = self.resolve_block_transactions(&txn, b, &verify_context); + debug!( + "resolve_block_transactions {} cost: {:?}", + b.hash(), + log_now.elapsed() + ); match resolved { Ok(resolved) => { let verified = { @@ -708,7 +1233,14 @@ impl ChainService { Arc::clone(&txs_verify_cache), &mmr, ); - contextual_block_verifier.verify(&resolved, b) + let log_now = std::time::Instant::now(); + let verify_result = contextual_block_verifier.verify(&resolved, b); + debug!( + "contextual_block_verifier {} cost: {:?}", + b.hash(), + log_now.elapsed() + ); + verify_result }; match verified { Ok((cycles, cache_entries)) => { @@ -866,7 +1398,7 @@ impl ChainService { err ); if log_enabled!(ckb_logger::Level::Trace) { - trace!("Block {}", b.data()); + trace!("Block {}", b); } } diff --git a/chain/src/lib.rs b/chain/src/lib.rs index 6885da60b9..e536b83365 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -8,5 +8,6 @@ pub mod chain; mod forkchanges; +mod orphan_block_pool; #[cfg(test)] mod tests; diff --git a/chain/src/orphan_block_pool.rs b/chain/src/orphan_block_pool.rs new file mode 100644 index 0000000000..ead446d3ca --- /dev/null +++ b/chain/src/orphan_block_pool.rs @@ -0,0 +1,170 @@ +use ckb_logger::debug; +use ckb_types::core::EpochNumber; +use ckb_types::{core, packed}; +use ckb_util::{parking_lot::RwLock, shrink_to_fit}; +use std::collections::{HashMap, HashSet, VecDeque}; + +pub type ParentHash = packed::Byte32; + +const SHRINK_THRESHOLD: usize = 100; +const EXPIRED_EPOCH: u64 = 6; + +#[derive(Default)] +struct InnerPool { + // Group by blocks in the pool by the parent hash. + blocks: HashMap>, + // The map tells the parent hash when given the hash of a block in the pool. + // + // The block is in the orphan pool if and only if the block hash exists as a key in this map. + parents: HashMap, + // Leaders are blocks not in the orphan pool but having at least a child in the pool. + leaders: HashSet, +} + +impl InnerPool { + fn with_capacity(capacity: usize) -> Self { + InnerPool { + blocks: HashMap::with_capacity(capacity), + parents: HashMap::new(), + leaders: HashSet::new(), + } + } + + fn insert(&mut self, block: core::BlockView) { + let hash = block.header().hash(); + let parent_hash = block.data().header().raw().parent_hash(); + self.blocks + .entry(parent_hash.clone()) + .or_insert_with(HashMap::default) + .insert(hash.clone(), block); + // Out-of-order insertion needs to be deduplicated + self.leaders.remove(&hash); + // It is a possible optimization to make the judgment in advance, + // because the parent of the block must not be equal to its own hash, + // so we can judge first, which may reduce one arc clone + if !self.parents.contains_key(&parent_hash) { + // Block referenced by `parent_hash` is not in the pool, + // and it has at least one child, the new inserted block, so add it to leaders. + self.leaders.insert(parent_hash.clone()); + } + self.parents.insert(hash, parent_hash); + } + + pub fn remove_blocks_by_parent(&mut self, parent_hash: &ParentHash) -> Vec { + // try remove leaders first + if !self.leaders.remove(parent_hash) { + return Vec::new(); + } + + let mut queue: VecDeque = VecDeque::new(); + queue.push_back(parent_hash.to_owned()); + + let mut removed: Vec = Vec::new(); + while let Some(parent_hash) = queue.pop_front() { + if let Some(orphaned) = self.blocks.remove(&parent_hash) { + let (hashes, blocks): (Vec<_>, Vec<_>) = orphaned.into_iter().unzip(); + for hash in hashes.iter() { + self.parents.remove(hash); + } + queue.extend(hashes); + removed.extend(blocks); + } + } + + debug!("orphan pool pop chain len: {}", removed.len()); + debug_assert_ne!( + removed.len(), + 0, + "orphan pool removed list must not be zero" + ); + + shrink_to_fit!(self.blocks, SHRINK_THRESHOLD); + shrink_to_fit!(self.parents, SHRINK_THRESHOLD); + shrink_to_fit!(self.leaders, SHRINK_THRESHOLD); + removed + } + + pub fn get_block(&self, hash: &packed::Byte32) -> Option { + self.parents.get(hash).and_then(|parent_hash| { + self.blocks + .get(parent_hash) + .and_then(|blocks| blocks.get(hash).cloned()) + }) + } + + /// cleanup expired blocks(epoch + EXPIRED_EPOCH < tip_epoch) + pub fn clean_expired_blocks(&mut self, tip_epoch: EpochNumber) -> Vec { + let mut result = vec![]; + + for hash in self.leaders.clone().iter() { + if self.need_clean(hash, tip_epoch) { + // remove items in orphan pool and return hash to callee(clean header map) + let descendants = self.remove_blocks_by_parent(hash); + result.extend(descendants.iter().map(|block| block.hash())); + } + } + result + } + + /// get 1st block belongs to that parent and check if it's expired block + fn need_clean(&self, parent_hash: &packed::Byte32, tip_epoch: EpochNumber) -> bool { + self.blocks + .get(parent_hash) + .and_then(|map| { + map.iter() + .next() + .map(|(_, block)| block.header().epoch().number() + EXPIRED_EPOCH < tip_epoch) + }) + .unwrap_or_default() + } +} + +// NOTE: Never use `LruCache` as container. We have to ensure synchronizing between +// orphan_block_pool and block_status_map, but `LruCache` would prune old items implicitly. +// RwLock ensures the consistency between maps. Using multiple concurrent maps does not work here. +#[derive(Default)] +pub struct OrphanBlockPool { + inner: RwLock, +} + +impl OrphanBlockPool { + pub fn with_capacity(capacity: usize) -> Self { + OrphanBlockPool { + inner: RwLock::new(InnerPool::with_capacity(capacity)), + } + } + + /// Insert orphaned block, for which we have already requested its parent block + pub fn insert(&self, block: core::BlockView) { + self.inner.write().insert(block); + } + + pub fn remove_blocks_by_parent(&self, parent_hash: &ParentHash) -> Vec { + self.inner.write().remove_blocks_by_parent(parent_hash) + } + + pub fn get_block(&self, hash: &packed::Byte32) -> Option { + self.inner.read().get_block(hash) + } + + pub fn clean_expired_blocks(&self, epoch: EpochNumber) -> Vec { + self.inner.write().clean_expired_blocks(epoch) + } + + pub fn len(&self) -> usize { + self.inner.read().parents.len() + } + + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + pub fn clone_leaders(&self) -> Vec { + self.inner.read().leaders.iter().cloned().collect() + } + + #[cfg(test)] + pub(crate) fn leaders_len(&self) -> usize { + self.inner.read().leaders.len() + } +} diff --git a/shared/src/block_status.rs b/shared/src/block_status.rs index 6787de24cb..60dd37115d 100644 --- a/shared/src/block_status.rs +++ b/shared/src/block_status.rs @@ -1,13 +1,13 @@ use bitflags::bitflags; - bitflags! { pub struct BlockStatus: u32 { const UNKNOWN = 0; const HEADER_VALID = 1; - const BLOCK_RECEIVED = Self::HEADER_VALID.bits | 1 << 1; - const BLOCK_STORED = Self::HEADER_VALID.bits | Self::BLOCK_RECEIVED.bits | 1 << 3; - const BLOCK_VALID = Self::HEADER_VALID.bits | Self::BLOCK_RECEIVED.bits | Self::BLOCK_STORED.bits | 1 << 4; + const BLOCK_RECEIVED = 1 | Self::HEADER_VALID.bits << 1; + const BLOCK_PARTIAL_STORED = 1 | Self::BLOCK_RECEIVED.bits << 1; + const BLOCK_STORED = 1 | Self::BLOCK_PARTIAL_STORED.bits << 1; + const BLOCK_VALID = 1 | Self::BLOCK_STORED.bits << 1; const BLOCK_INVALID = 1 << 12; } diff --git a/shared/src/lib.rs b/shared/src/lib.rs index a495984ee7..02d7dbbc54 100644 --- a/shared/src/lib.rs +++ b/shared/src/lib.rs @@ -11,3 +11,4 @@ pub mod block_status; pub mod types; pub use types::header_map::HeaderMap; +pub use types::{HeaderIndex, HeaderIndexView}; diff --git a/shared/src/shared.rs b/shared/src/shared.rs index 0faaf1890c..6e1a4dde0a 100644 --- a/shared/src/shared.rs +++ b/shared/src/shared.rs @@ -1,7 +1,7 @@ //! TODO(doc): @quake use crate::block_status::BlockStatus; use crate::{HeaderMap, Snapshot, SnapshotMgr}; -use arc_swap::Guard; +use arc_swap::{ArcSwap, Guard}; use ckb_async_runtime::Handle; use ckb_chain_spec::consensus::Consensus; use ckb_constant::store::TX_INDEX_UPPER_BOUND; @@ -9,6 +9,7 @@ use ckb_constant::sync::MAX_TIP_AGE; use ckb_db::{Direction, IteratorMode}; use ckb_db_schema::{COLUMN_BLOCK_BODY, COLUMN_NUMBER_HASH}; use ckb_error::{AnyError, Error}; +use ckb_logger::debug; use ckb_notify::NotifyController; use ckb_proposal_table::ProposalView; use ckb_stop_handler::{new_crossbeam_exit_rx, register_thread}; @@ -27,6 +28,7 @@ use ckb_verification::cache::TxVerificationCache; use dashmap::DashMap; use std::cmp; use std::collections::BTreeMap; +use std::hash::Hash; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use std::thread; @@ -63,6 +65,7 @@ pub struct Shared { pub header_map: Arc, pub(crate) block_status_map: Arc>, + pub(crate) unverified_tip: Arc>, } impl Shared { @@ -80,6 +83,15 @@ impl Shared { header_map: Arc, block_status_map: Arc>, ) -> Shared { + let header = store + .get_tip_header() + .unwrap_or(consensus.genesis_block().header()); + let unverified_tip = Arc::new(ArcSwap::new(Arc::new(crate::HeaderIndex::new( + header.number(), + header.hash(), + header.difficulty(), + )))); + Shared { store, tx_pool_controller, @@ -91,6 +103,7 @@ impl Shared { ibd_finished, header_map, block_status_map, + unverified_tip, } } /// Spawn freeze background thread that periodically checks and moves ancient data from the kv database into the freezer. @@ -384,15 +397,58 @@ impl Shared { ) } + pub fn set_unverified_tip(&self, header: crate::HeaderIndex) { + self.unverified_tip.store(Arc::new(header)); + } + pub fn get_unverified_tip(&self) -> crate::HeaderIndex { + self.unverified_tip.load().as_ref().clone() + } + pub fn header_map(&self) -> &HeaderMap { &self.header_map } + pub fn remove_header_view(&self, hash: &Byte32) { + self.header_map.remove(hash); + } + + pub fn get_orphan_block(&self, block_hash: &Byte32) -> Option { + todo!("get_orphan_block") + // self.orphan_block_pool.get_block(block_hash) + } + + pub fn orphan_pool_count(&self) -> u64 { + 0 + } + pub fn block_status_map(&self) -> &DashMap { &self.block_status_map } - - pub fn remove_header_view(&self, hash: &Byte32) { - self.header_map.remove(hash); + pub fn get_block_status(&self, block_hash: &Byte32) -> BlockStatus { + match self.block_status_map.get(block_hash) { + Some(status_ref) => *status_ref.value(), + None => { + if self.header_map.contains_key(block_hash) { + BlockStatus::HEADER_VALID + } else { + let verified = self + .store() + .get_block_ext(block_hash) + .map(|block_ext| block_ext.verified); + match verified { + Some(Some(true)) => BlockStatus::BLOCK_VALID, + Some(Some(false)) => BlockStatus::BLOCK_INVALID, + Some(None) => BlockStatus::BLOCK_STORED, + None => { + if self.store().get_block_header(block_hash).is_some() { + BlockStatus::BLOCK_PARTIAL_STORED + } else { + BlockStatus::UNKNOWN + } + } + } + } + } + } } pub fn insert_block_status(&self, block_hash: Byte32, status: BlockStatus) { @@ -400,16 +456,16 @@ impl Shared { } pub fn remove_block_status(&self, block_hash: &Byte32) { + let log_now = std::time::Instant::now(); self.block_status_map.remove(block_hash); + debug!("remove_block_status cost {:?}", log_now.elapsed()); shrink_to_fit!(self.block_status_map, SHRINK_THRESHOLD); + debug!( + "remove_block_status shrink_to_fit cost {:?}", + log_now.elapsed() + ); } - - pub fn get_orphan_block(&self, block_hash: &Byte32) -> Option { - todo!("get_orphan_block") - // self.orphan_block_pool.get_block(block_hash) - } - - pub fn orphan_pool_count(&self) -> u64 { - 0 + pub fn contains_block_status(&self, block_hash: &Byte32, status: BlockStatus) -> bool { + self.get_block_status(block_hash).contains(status) } } From 56bcb667e4afcb591a692887fb931c3a8a300945 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 24 May 2023 17:05:18 +0800 Subject: [PATCH 011/357] Fetch block from unverified tip --- sync/src/synchronizer/block_fetcher.rs | 51 +++++++++++++++++++++----- sync/src/types/mod.rs | 29 ++++++++++++--- 2 files changed, 65 insertions(+), 15 deletions(-) diff --git a/sync/src/synchronizer/block_fetcher.rs b/sync/src/synchronizer/block_fetcher.rs index 1b35862944..90bd45ca4c 100644 --- a/sync/src/synchronizer/block_fetcher.rs +++ b/sync/src/synchronizer/block_fetcher.rs @@ -58,10 +58,15 @@ impl BlockFetcher { { header } else { - let tip_header = self.active_chain.tip_header(); - let guess_number = min(tip_header.number(), best_known.number()); - let guess_hash = self.active_chain.get_block_hash(guess_number)?; - (guess_number, guess_hash).into() + let unverified_tip_header = self.sync_shared.shared().get_unverified_tip(); + if best_known.number() < unverified_tip_header.number() { + (best_known.number(), best_known.hash()).into() + } else { + (unverified_tip_header.number(), unverified_tip_header.hash()).into() + } + // let guess_number = min(tip_header.number(), best_known.number()); + // let guess_hash = self.active_chain.get_block_hash(guess_number)?; + // (guess_number, guess_hash).into() }; // If the peer reorganized, our previous last_common_header may not be an ancestor @@ -79,6 +84,8 @@ impl BlockFetcher { } pub fn fetch(self) -> Option>> { + let trace_timecost_now = std::time::Instant::now(); + if self.reached_inflight_limit() { trace!( "[block_fetcher] inflight count has reached the limit, preventing further downloads from peer {}", @@ -123,7 +130,7 @@ impl BlockFetcher { // last_common_header, is expected to provide a more realistic picture. Hence here we // specially advance this peer's last_common_header at the case of both us on the same // active chain. - if self.active_chain.is_main_chain(&best_known.hash()) { + if self.active_chain.is_unverified_chain(&best_known.hash()) { self.sync_shared .state() .peers() @@ -196,8 +203,9 @@ impl BlockFetcher { fetch.sort_by_key(|header| header.number()); let tip = self.active_chain.tip_number(); + let unverified_tip = self.active_chain.unverified_tip_number(); let should_mark = fetch.last().map_or(false, |header| { - header.number().saturating_sub(CHECK_POINT_WINDOW) > tip + header.number().saturating_sub(CHECK_POINT_WINDOW) > unverified_tip }); if should_mark { inflight.mark_slow_block(tip); @@ -205,15 +213,38 @@ impl BlockFetcher { if fetch.is_empty() { debug!( - "[block fetch empty] fixed_last_common_header = {} \ - best_known_header = {}, tip = {}, inflight_len = {}, \ - inflight_state = {:?}", + "[block fetch empty] peer-{}, fixed_last_common_header = {} \ + best_known_header = {}, tip = {}, unverified_tip = {}, inflight_len = {}, time_cost: {}ms", + self.peer, last_common.number(), best_known.number(), tip, + unverified_tip, inflight.total_inflight_count(), + trace_timecost_now.elapsed().as_millis(), + ); + trace!( + "[block fetch empty] peer-{}, inflight_state = {:?}", + self.peer, *inflight - ) + ); + } else { + let fetch_head = fetch.first().map_or(0_u64.into(), |v| v.number()); + let fetch_last = fetch.last().map_or(0_u64.into(), |v| v.number()); + let inflight_peer_count = inflight.peer_inflight_count(self.peer); + let inflight_total_count = inflight.total_inflight_count(); + debug!( + "request peer-{} for batch blocks: [{}-{}], batch len:{} , unverified_tip: {}, [peer/total inflight count]: [{} / {}], timecost: {}ms, blocks: {}", + self.peer, + fetch_head, + fetch_last, + fetch.len(), + self.synchronizer.shared().shared().get_unverified_tip().number(), + inflight_peer_count, + inflight_total_count, + trace_timecost_now.elapsed().as_millis(), + fetch.iter().map(|h| h.number().to_string()).collect::>().join(","), + ); } Some( diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 44acc8dfc8..d833575a97 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1184,8 +1184,8 @@ impl SyncShared { // So we just simply remove the corresponding in-memory block status, // and the next time `get_block_status` would acquire the real-time // status via fetching block_ext from the database. - self.shared().remove_block_status(&block.as_ref().hash()); - self.shared().remove_header_view(&block.as_ref().hash()); + // self.shared().remove_block_status(&block.as_ref().hash()); + // self.shared().remove_header_view(&block.as_ref().hash()); } ret @@ -1796,22 +1796,41 @@ impl ActiveChain { pub fn is_main_chain(&self, hash: &packed::Byte32) -> bool { self.snapshot.is_main_chain(hash) } + pub fn is_unverified_chain(&self, hash: &packed::Byte32) -> bool { + self.shared() + .shared() + .store() + .get_block_epoch_index(hash) + .is_some() + } pub fn is_initial_block_download(&self) -> bool { self.shared.shared().is_initial_block_download() } + pub fn unverified_tip_header(&self) -> HeaderIndex { + self.shared.shared.get_unverified_tip() + } + + pub fn unverified_tip_hash(&self) -> Byte32 { + self.unverified_tip_header().hash() + } + + pub fn unverified_tip_number(&self) -> BlockNumber { + self.unverified_tip_header().number() + } pub fn get_ancestor(&self, base: &Byte32, number: BlockNumber) -> Option { - let tip_number = self.tip_number(); + let unverified_tip_number = self.unverified_tip_number(); self.shared .get_header_index_view(base, false)? .get_ancestor( - tip_number, + unverified_tip_number, number, |hash, store_first| self.shared.get_header_index_view(hash, store_first), |number, current| { // shortcut to return an ancestor block - if current.number <= tip_number && self.snapshot().is_main_chain(¤t.hash) + if current.number <= unverified_tip_number + && self.is_unverified_chain(¤t.hash) { self.get_block_hash(number) .and_then(|hash| self.shared.get_header_index_view(&hash, true)) From 162e5b994b08ff80aa11cf16fb03d1b92753fe61 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 25 May 2023 13:06:29 +0800 Subject: [PATCH 012/357] Fetch blocks from unverified_tip --- chain/src/chain.rs | 14 +++++++------- chain/src/orphan_block_pool.rs | 20 ++++++++++++-------- rpc/src/module/net.rs | 5 +++-- rpc/src/service_builder.rs | 2 ++ shared/src/shared.rs | 1 - shared/src/types/header_map/memory.rs | 6 ++++-- shared/src/types/header_map/mod.rs | 4 +++- sync/src/synchronizer/block_fetcher.rs | 19 +++++++++++++++---- sync/src/synchronizer/mod.rs | 14 +++++++++----- util/launcher/src/lib.rs | 6 +++++- 10 files changed, 60 insertions(+), 31 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index a967024637..7c6c34708c 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -46,7 +46,7 @@ use std::time::Duration; use std::time::Instant; use std::{cmp, thread}; -const ORPHAN_BLOCK_SIZE: usize = 100000; +const ORPHAN_BLOCK_SIZE: usize = (BLOCK_DOWNLOAD_WINDOW * 2) as usize; type ProcessBlockRequest = Request<(Arc, Switch), Result>; type TruncateRequest = Request>; @@ -164,7 +164,7 @@ pub struct ChainService { #[derive(Clone)] struct UnverifiedBlock { - block: BlockView, + block: Arc, parent_header: HeaderView, switch: Switch, } @@ -173,7 +173,7 @@ impl ChainService { /// Create a new ChainService instance with shared and initial proposal_table. pub fn new(shared: Shared, proposal_table: ProposalTable) -> ChainService { let (unverified_tx, unverified_rx) = - channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize * 11); + channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize * 3); let (new_block_tx, new_block_rx) = channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize); @@ -399,7 +399,7 @@ impl ChainService { } let mut accept_error_occurred = false; for descendant in &descendants { - match self.accept_block(descendant) { + match self.accept_block(descendant.to_owned()) { Err(err) => { accept_error_occurred = true; error!("accept block {} failed: {}", descendant.hash(), err); @@ -574,7 +574,7 @@ impl ChainService { self.non_contextual_verify(&block)?; } - self.orphan_blocks_broker.insert(block.as_ref().to_owned()); + self.orphan_blocks_broker.insert(block); match self.new_block_tx.send(switch) { Ok(_) => {} @@ -594,7 +594,7 @@ impl ChainService { Ok(false) } - fn accept_block(&self, block: &BlockView) -> Result, Error> { + fn accept_block(&self, block: Arc) -> Result, Error> { let (block_number, block_hash) = (block.number(), block.hash()); if self @@ -624,7 +624,7 @@ impl ChainService { let db_txn = Arc::new(self.shared.store().begin_transaction()); - db_txn.insert_block(block)?; + db_txn.insert_block(block.as_ref())?; // if parent_ext.verified == Some(false) { // return Err(InvalidParentError { diff --git a/chain/src/orphan_block_pool.rs b/chain/src/orphan_block_pool.rs index ead446d3ca..9459f4864b 100644 --- a/chain/src/orphan_block_pool.rs +++ b/chain/src/orphan_block_pool.rs @@ -3,6 +3,7 @@ use ckb_types::core::EpochNumber; use ckb_types::{core, packed}; use ckb_util::{parking_lot::RwLock, shrink_to_fit}; use std::collections::{HashMap, HashSet, VecDeque}; +use std::sync::Arc; pub type ParentHash = packed::Byte32; @@ -12,7 +13,7 @@ const EXPIRED_EPOCH: u64 = 6; #[derive(Default)] struct InnerPool { // Group by blocks in the pool by the parent hash. - blocks: HashMap>, + blocks: HashMap>>, // The map tells the parent hash when given the hash of a block in the pool. // // The block is in the orphan pool if and only if the block hash exists as a key in this map. @@ -30,7 +31,7 @@ impl InnerPool { } } - fn insert(&mut self, block: core::BlockView) { + fn insert(&mut self, block: Arc) { let hash = block.header().hash(); let parent_hash = block.data().header().raw().parent_hash(); self.blocks @@ -50,7 +51,10 @@ impl InnerPool { self.parents.insert(hash, parent_hash); } - pub fn remove_blocks_by_parent(&mut self, parent_hash: &ParentHash) -> Vec { + pub fn remove_blocks_by_parent( + &mut self, + parent_hash: &ParentHash, + ) -> Vec> { // try remove leaders first if !self.leaders.remove(parent_hash) { return Vec::new(); @@ -59,7 +63,7 @@ impl InnerPool { let mut queue: VecDeque = VecDeque::new(); queue.push_back(parent_hash.to_owned()); - let mut removed: Vec = Vec::new(); + let mut removed: Vec> = Vec::new(); while let Some(parent_hash) = queue.pop_front() { if let Some(orphaned) = self.blocks.remove(&parent_hash) { let (hashes, blocks): (Vec<_>, Vec<_>) = orphaned.into_iter().unzip(); @@ -84,7 +88,7 @@ impl InnerPool { removed } - pub fn get_block(&self, hash: &packed::Byte32) -> Option { + pub fn get_block(&self, hash: &packed::Byte32) -> Option> { self.parents.get(hash).and_then(|parent_hash| { self.blocks .get(parent_hash) @@ -135,15 +139,15 @@ impl OrphanBlockPool { } /// Insert orphaned block, for which we have already requested its parent block - pub fn insert(&self, block: core::BlockView) { + pub fn insert(&self, block: Arc) { self.inner.write().insert(block); } - pub fn remove_blocks_by_parent(&self, parent_hash: &ParentHash) -> Vec { + pub fn remove_blocks_by_parent(&self, parent_hash: &ParentHash) -> Vec> { self.inner.write().remove_blocks_by_parent(parent_hash) } - pub fn get_block(&self, hash: &packed::Byte32) -> Option { + pub fn get_block(&self, hash: &packed::Byte32) -> Option> { self.inner.read().get_block(hash) } diff --git a/rpc/src/module/net.rs b/rpc/src/module/net.rs index 94c9a6e7bb..72d6b762ee 100644 --- a/rpc/src/module/net.rs +++ b/rpc/src/module/net.rs @@ -1,5 +1,6 @@ use crate::error::RPCError; use async_trait::async_trait; +use ckb_chain::chain::ChainController; use ckb_jsonrpc_types::{ BannedAddr, LocalNode, LocalNodeProtocol, NodeAddress, PeerSyncState, RemoteNode, RemoteNodeProtocol, SyncState, Timestamp, @@ -537,6 +538,7 @@ pub trait NetRpc { pub(crate) struct NetRpcImpl { pub network_controller: NetworkController, pub sync_shared: Arc, + pub chain_controller: Arc, } #[async_trait] @@ -715,7 +717,6 @@ impl NetRpc for NetRpcImpl { fn sync_state(&self) -> Result { let chain = self.sync_shared.active_chain(); - let shared = chain.shared(); let state = chain.shared().state(); let (fast_time, normal_time, low_time) = state.read_inflight_blocks().division_point(); let best_known = state.shared_best_header(); @@ -723,7 +724,7 @@ impl NetRpc for NetRpcImpl { ibd: chain.is_initial_block_download(), best_known_block_number: best_known.number().into(), best_known_block_timestamp: best_known.timestamp().into(), - orphan_blocks_count: (shared.shared().orphan_pool_count()).into(), + orphan_blocks_count: (self.chain_controller.orphan_blocks_len() as u64).into(), inflight_blocks_count: (state.read_inflight_blocks().total_inflight_count() as u64) .into(), fast_time: fast_time.into(), diff --git a/rpc/src/service_builder.rs b/rpc/src/service_builder.rs index 210accd72c..3577b4d9c5 100644 --- a/rpc/src/service_builder.rs +++ b/rpc/src/service_builder.rs @@ -102,10 +102,12 @@ impl<'a> ServiceBuilder<'a> { mut self, network_controller: NetworkController, sync_shared: Arc, + chain_controller: Arc, ) -> Self { let methods = NetRpcImpl { network_controller, sync_shared, + chain_controller, }; set_rpc_module_methods!(self, "Net", net_enable, add_net_rpc_methods, methods) } diff --git a/shared/src/shared.rs b/shared/src/shared.rs index 6e1a4dde0a..ffc5e22628 100644 --- a/shared/src/shared.rs +++ b/shared/src/shared.rs @@ -413,7 +413,6 @@ impl Shared { pub fn get_orphan_block(&self, block_hash: &Byte32) -> Option { todo!("get_orphan_block") - // self.orphan_block_pool.get_block(block_hash) } pub fn orphan_pool_count(&self) -> u64 { diff --git a/shared/src/types/header_map/memory.rs b/shared/src/types/header_map/memory.rs index 0411e8c671..0bf62d50f4 100644 --- a/shared/src/types/header_map/memory.rs +++ b/shared/src/types/header_map/memory.rs @@ -1,4 +1,4 @@ -use crate::types::{HeaderIndexView, SHRINK_THRESHOLD}; +use crate::types::HeaderIndexView; use ckb_types::{ core::{BlockNumber, EpochNumberWithFraction}, packed::Byte32, @@ -7,6 +7,8 @@ use ckb_types::{ use ckb_util::{shrink_to_fit, LinkedHashMap, RwLock}; use std::default; +const SHRINK_THRESHOLD: usize = 300; + #[derive(Clone, Debug, PartialEq, Eq)] struct HeaderIndexViewInner { number: BlockNumber, @@ -99,7 +101,7 @@ impl MemoryMap { pub(crate) fn remove(&self, key: &Byte32) -> Option { let mut guard = self.0.write(); let ret = guard.remove(key); - shrink_to_fit!(guard, SHRINK_THRESHOLD); + // shrink_to_fit!(guard, SHRINK_THRESHOLD); ret.map(|inner| (key.clone(), inner).into()) } diff --git a/shared/src/types/header_map/mod.rs b/shared/src/types/header_map/mod.rs index d72772c6a1..e764755ea6 100644 --- a/shared/src/types/header_map/mod.rs +++ b/shared/src/types/header_map/mod.rs @@ -24,7 +24,7 @@ pub struct HeaderMap { inner: Arc>, } -const INTERVAL: Duration = Duration::from_millis(500); +const INTERVAL: Duration = Duration::from_millis(5000); const ITEM_BYTES_SIZE: usize = size_of::(); const WARN_THRESHOLD: usize = ITEM_BYTES_SIZE * 100_000; @@ -53,7 +53,9 @@ impl HeaderMap { loop { tokio::select! { _ = interval.tick() => { + let now = std::time::Instant::now(); map.limit_memory(); + debug!("HeaderMap limit_memory cost: {:?}", now.elapsed()); } _ = stop_rx.cancelled() => { info!("HeaderMap limit_memory received exit signal, exit now"); diff --git a/sync/src/synchronizer/block_fetcher.rs b/sync/src/synchronizer/block_fetcher.rs index 90bd45ca4c..a3b998ec18 100644 --- a/sync/src/synchronizer/block_fetcher.rs +++ b/sync/src/synchronizer/block_fetcher.rs @@ -156,6 +156,12 @@ impl BlockFetcher { ); let mut fetch = Vec::with_capacity(n_fetch); let now = unix_time_as_millis(); + debug!( + "finding which blocks to fetch, start: {}, end: {}, best_known: {}", + start, + end, + best_known.number(), + ); while fetch.len() < n_fetch && start <= end { let span = min(end - start + 1, (n_fetch - fetch.len()) as u64); @@ -164,14 +170,18 @@ impl BlockFetcher { let mut header = self .active_chain .get_ancestor(&best_known.hash(), start + span - 1)?; - let mut status = self.active_chain.get_block_status(&header.hash()); + let mut status = self + .synchronizer + .shared() + .shared() + .get_block_status(&header.hash()); // Judge whether we should fetch the target block, neither stored nor in-flighted for _ in 0..span { let parent_hash = header.parent_hash(); let hash = header.hash(); - if status.contains(BlockStatus::BLOCK_STORED) { + if status.contains(BlockStatus::BLOCK_PARTIAL_STORED) { // If the block is stored, its ancestor must on store // So we can skip the search of this space directly self.sync_shared @@ -214,7 +224,7 @@ impl BlockFetcher { if fetch.is_empty() { debug!( "[block fetch empty] peer-{}, fixed_last_common_header = {} \ - best_known_header = {}, tip = {}, unverified_tip = {}, inflight_len = {}, time_cost: {}ms", + best_known_header = {}, [tip/unverified_tip]: [{}/{}], inflight_len = {}, time_cost: {}ms", self.peer, last_common.number(), best_known.number(), @@ -234,11 +244,12 @@ impl BlockFetcher { let inflight_peer_count = inflight.peer_inflight_count(self.peer); let inflight_total_count = inflight.total_inflight_count(); debug!( - "request peer-{} for batch blocks: [{}-{}], batch len:{} , unverified_tip: {}, [peer/total inflight count]: [{} / {}], timecost: {}ms, blocks: {}", + "request peer-{} for batch blocks: [{}-{}], batch len:{}, [tip/unverified_tip]: [{}/{}], [peer/total inflight count]: [{} / {}], timecost: {}ms, blocks: {}", self.peer, fetch_head, fetch_last, fetch.len(), + tip, self.synchronizer.shared().shared().get_unverified_tip().number(), inflight_peer_count, inflight_total_count, diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 26d5c88e13..faf53e1a53 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -29,8 +29,8 @@ use ckb_chain::chain::ChainController; use ckb_channel as channel; use ckb_channel::{select, Receiver}; use ckb_constant::sync::{ - BAD_MESSAGE_BAN_TIME, CHAIN_SYNC_TIMEOUT, EVICTION_HEADERS_RESPONSE_TIME, - INIT_BLOCKS_IN_TRANSIT_PER_PEER, MAX_TIP_AGE, + BAD_MESSAGE_BAN_TIME, BLOCK_DOWNLOAD_WINDOW, CHAIN_SYNC_TIMEOUT, + EVICTION_HEADERS_RESPONSE_TIME, INIT_BLOCKS_IN_TRANSIT_PER_PEER, MAX_TIP_AGE, }; use ckb_error::Error as CKBError; use ckb_logger::{debug, error, info, trace, warn}; @@ -580,10 +580,14 @@ impl Synchronizer { } fn find_blocks_to_fetch(&mut self, nc: &dyn CKBProtocolContext, ibd: IBDState) { - let tip = self.shared.active_chain().tip_number(); + let unverified_tip = self.shared.active_chain().unverified_tip_number(); let disconnect_list = { - let mut list = self.shared().state().write_inflight_blocks().prune(tip); + let mut list = self + .shared() + .state() + .write_inflight_blocks() + .prune(unverified_tip); if let IBDState::In = ibd { // best known < tip and in IBD state, and unknown list is empty, // these node can be disconnect @@ -591,7 +595,7 @@ impl Synchronizer { self.shared .state() .peers() - .get_best_known_less_than_tip_and_unknown_empty(tip), + .get_best_known_less_than_tip_and_unknown_empty(unverified_tip), ) }; list diff --git a/util/launcher/src/lib.rs b/util/launcher/src/lib.rs index db3af82673..1cf46867af 100644 --- a/util/launcher/src/lib.rs +++ b/util/launcher/src/lib.rs @@ -397,7 +397,11 @@ impl Launcher { chain_controller.clone(), miner_enable, ) - .enable_net(network_controller.clone(), sync_shared) + .enable_net( + network_controller.clone(), + sync_shared, + Arc::new(chain_controller.clone()), + ) .enable_stats(shared.clone(), Arc::clone(&alert_notifier)) .enable_experiment(shared.clone()) .enable_integration_test(shared.clone(), network_controller.clone(), chain_controller) From 65f22330ee25046bca41351f615777887bf3bb53 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 25 May 2023 23:34:24 +0800 Subject: [PATCH 013/357] Reduce inflight_blocks write block hold duration --- sync/src/synchronizer/block_fetcher.rs | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/sync/src/synchronizer/block_fetcher.rs b/sync/src/synchronizer/block_fetcher.rs index a3b998ec18..f6f093ea46 100644 --- a/sync/src/synchronizer/block_fetcher.rs +++ b/sync/src/synchronizer/block_fetcher.rs @@ -146,13 +146,12 @@ impl BlockFetcher { return None; } - let state = self.sync_shared.state(); - let mut inflight = state.write_inflight_blocks(); + let state = self.sync_shared.shared().state(); let mut start = last_common.number() + 1; let mut end = min(best_known.number(), start + BLOCK_DOWNLOAD_WINDOW); let n_fetch = min( end.saturating_sub(start) as usize + 1, - inflight.peer_can_fetch_count(self.peer), + state.read_inflight_blocks().peer_can_fetch_count(self.peer), ); let mut fetch = Vec::with_capacity(n_fetch); let now = unix_time_as_millis(); @@ -194,12 +193,18 @@ impl BlockFetcher { // Do not download repeatedly } else if (matches!(self.ibd, IBDState::In) || state.compare_with_pending_compact(&hash, now)) - && inflight.insert(self.peer, (header.number(), hash).into()) + && state + .write_inflight_blocks() + .insert(self.peer, (header.number(), hash).into()) { fetch.push(header) } - status = self.active_chain.get_block_status(&parent_hash); + status = self + .synchronizer + .shared() + .shared() + .get_block_status(&parent_hash); header = self .sync_shared .get_header_index_view(&parent_hash, false)?; @@ -218,7 +223,7 @@ impl BlockFetcher { header.number().saturating_sub(CHECK_POINT_WINDOW) > unverified_tip }); if should_mark { - inflight.mark_slow_block(tip); + state.write_inflight_blocks().mark_slow_block(tip); } if fetch.is_empty() { @@ -230,19 +235,19 @@ impl BlockFetcher { best_known.number(), tip, unverified_tip, - inflight.total_inflight_count(), + state.read_inflight_blocks().total_inflight_count(), trace_timecost_now.elapsed().as_millis(), ); trace!( "[block fetch empty] peer-{}, inflight_state = {:?}", self.peer, - *inflight + *state.read_inflight_blocks() ); } else { let fetch_head = fetch.first().map_or(0_u64.into(), |v| v.number()); let fetch_last = fetch.last().map_or(0_u64.into(), |v| v.number()); - let inflight_peer_count = inflight.peer_inflight_count(self.peer); - let inflight_total_count = inflight.total_inflight_count(); + let inflight_peer_count = state.read_inflight_blocks().peer_inflight_count(self.peer); + let inflight_total_count = state.read_inflight_blocks().total_inflight_count(); debug!( "request peer-{} for batch blocks: [{}-{}], batch len:{}, [tip/unverified_tip]: [{}/{}], [peer/total inflight count]: [{} / {}], timecost: {}ms, blocks: {}", self.peer, From 337ad4dd0d482ce1f3ba69c0d7f5144d822dfc60 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 21 Aug 2023 20:15:06 +0800 Subject: [PATCH 014/357] Fix InvalidRewardAmount --- sync/src/synchronizer/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index faf53e1a53..73f41f9489 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -339,8 +339,8 @@ impl Synchronizer { let status = self.shared.active_chain().get_block_status(&block_hash); // NOTE: Filtering `BLOCK_STORED` but not `BLOCK_RECEIVED`, is for avoiding // stopping synchronization even when orphan_pool maintains dirty items by bugs. - if status.contains(BlockStatus::BLOCK_STORED) { - debug!("Block {} already stored", block_hash); + if status.contains(BlockStatus::BLOCK_PARTIAL_STORED) { + error!("Block {} already partial stored", block_hash); Ok(false) } else if status.contains(BlockStatus::HEADER_VALID) { self.shared.insert_new_block(&self.chain, Arc::new(block)) From 923d4aa4af82ebf8e707d597123a16cf44b3943e Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 21 Aug 2023 21:21:20 +0800 Subject: [PATCH 015/357] Add error log when a block doesn't have parent --- chain/src/chain.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 7c6c34708c..c1599e4331 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -395,6 +395,10 @@ impl ChainService { .orphan_blocks_broker .remove_blocks_by_parent(&leader_hash); if descendants.is_empty() { + error!( + "leader {} does not have any descendants, this shouldn't happen", + leader_hash + ); continue; } let mut accept_error_occurred = false; From 6589e321ae7f2c69cced89c13dfaa923026260f0 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 28 Aug 2023 15:09:51 +0800 Subject: [PATCH 016/357] Fix InvalidRewardAmount --- sync/src/synchronizer/block_fetcher.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/sync/src/synchronizer/block_fetcher.rs b/sync/src/synchronizer/block_fetcher.rs index f6f093ea46..bcf4b7609a 100644 --- a/sync/src/synchronizer/block_fetcher.rs +++ b/sync/src/synchronizer/block_fetcher.rs @@ -223,7 +223,9 @@ impl BlockFetcher { header.number().saturating_sub(CHECK_POINT_WINDOW) > unverified_tip }); if should_mark { - state.write_inflight_blocks().mark_slow_block(tip); + state + .write_inflight_blocks() + .mark_slow_block(unverified_tip); } if fetch.is_empty() { From 291c3133f234e555127b0ec09cd11ad387e20e15 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 30 Aug 2023 16:59:51 +0800 Subject: [PATCH 017/357] Fix log message in accept_block --- chain/src/chain.rs | 70 ++++++++++++++++++++++++---------------------- 1 file changed, 36 insertions(+), 34 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index c1599e4331..7369c78512 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -409,45 +409,47 @@ impl ChainService { error!("accept block {} failed: {}", descendant.hash(), err); continue; } - Ok(accepted_opt) => { - match accepted_opt { - Some((parent_header, total_difficulty)) => { - match self.unverified_tx.send(UnverifiedBlock { - block: descendant.to_owned(), - parent_header, - switch, - }) { - Ok(_) => {} - Err(err) => error!("send unverified_tx failed: {}", err), - }; - - if total_difficulty - .gt(self.shared.get_unverified_tip().total_difficulty()) - { - self.shared.set_unverified_tip(ckb_shared::HeaderIndex::new( - descendant.header().number(), - descendant.header().hash(), - total_difficulty, - )); - } - } - None => { - info!( - "doesn't accept block {}, because it has been stored", - descendant.hash() - ); - } - } + Ok(accepted_opt) => match accepted_opt { + Some((parent_header, total_difficulty)) => { + match self.unverified_tx.send(UnverifiedBlock { + block: descendant.to_owned(), + parent_header, + switch, + }) { + Ok(_) => {} + Err(err) => error!("send unverified_tx failed: {}", err), + }; - debug!( - "set unverified_tip to {}-{}, while unverified_tip - verified_tip = {}", + if total_difficulty + .gt(self.shared.get_unverified_tip().total_difficulty()) + { + self.shared.set_unverified_tip(ckb_shared::HeaderIndex::new( + descendant.header().number(), + descendant.header().hash(), + total_difficulty, + )); + debug!("set unverified_tip to {}-{}, while unverified_tip - verified_tip = {}", descendant.number(), descendant.hash(), descendant .number() - .saturating_sub(self.shared.snapshot().tip_number()) - ) - } + .saturating_sub(self.shared.snapshot().tip_number())) + } else { + debug!("received a block {}-{} with lower or equal difficulty than unverified_tip {}-{}", + descendant.number(), + descendant.hash(), + self.shared.get_unverified_tip().number(), + self.shared.get_unverified_tip().hash(), + ); + } + } + None => { + info!( + "doesn't accept block {}, because it has been stored", + descendant.hash() + ); + } + }, } } From 5c9d47f772b055f1e8e4ae89ed132e1d6db93f5e Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 31 Aug 2023 12:37:21 +0800 Subject: [PATCH 018/357] Try to fix InvalidRewardAmount Signed-off-by: Eval EXEC --- sync/src/types/mod.rs | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index d833575a97..c22ae84d74 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -13,7 +13,7 @@ use ckb_constant::sync::{ RETRY_ASK_TX_TIMEOUT_INCREASE, SUSPEND_SYNC_TIME, }; use ckb_error::Error as CKBError; -use ckb_logger::{debug, error, trace}; +use ckb_logger::{debug, error, trace, warn}; use ckb_network::{CKBProtocolContext, PeerIndex, SupportProtocols}; use ckb_shared::{ block_status::BlockStatus, @@ -1295,17 +1295,23 @@ impl SyncShared { // Return true when the block is that we have requested and received first time. pub fn new_block_received(&self, block: &core::BlockView) -> bool { - if self + if !self .state() .write_inflight_blocks() .remove_by_block((block.number(), block.hash()).into()) { - self.shared() - .insert_block_status(block.hash(), BlockStatus::BLOCK_RECEIVED); - true - } else { - false + return false; + } + let mut is_new_block_received: bool = false; + let status = self + .shared() + .block_status_map() + .entry(block.hash()) + .or_insert(BlockStatus::BLOCK_RECEIVED); + if status.eq(&BlockStatus::BLOCK_RECEIVED) { + is_new_block_received = true; } + is_new_block_received } } From 3c0f6dcbe43879d560cfefa19caa3df73d845446 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 31 Aug 2023 17:40:22 +0800 Subject: [PATCH 019/357] Revert "try to fix InvalidRewardAmount" This reverts commit eac9bc2c1dbcbd169ea18e16475b70c794f78e7b. --- sync/src/types/mod.rs | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index c22ae84d74..d833575a97 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -13,7 +13,7 @@ use ckb_constant::sync::{ RETRY_ASK_TX_TIMEOUT_INCREASE, SUSPEND_SYNC_TIME, }; use ckb_error::Error as CKBError; -use ckb_logger::{debug, error, trace, warn}; +use ckb_logger::{debug, error, trace}; use ckb_network::{CKBProtocolContext, PeerIndex, SupportProtocols}; use ckb_shared::{ block_status::BlockStatus, @@ -1295,23 +1295,17 @@ impl SyncShared { // Return true when the block is that we have requested and received first time. pub fn new_block_received(&self, block: &core::BlockView) -> bool { - if !self + if self .state() .write_inflight_blocks() .remove_by_block((block.number(), block.hash()).into()) { - return false; - } - let mut is_new_block_received: bool = false; - let status = self - .shared() - .block_status_map() - .entry(block.hash()) - .or_insert(BlockStatus::BLOCK_RECEIVED); - if status.eq(&BlockStatus::BLOCK_RECEIVED) { - is_new_block_received = true; + self.shared() + .insert_block_status(block.hash(), BlockStatus::BLOCK_RECEIVED); + true + } else { + false } - is_new_block_received } } From 35f1a26fec22f4edb06a12d249fd2716b4237f2b Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 31 Aug 2023 17:41:43 +0800 Subject: [PATCH 020/357] Add debug log for new received blocks --- sync/src/types/mod.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index d833575a97..0cddd6b8ef 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1300,6 +1300,15 @@ impl SyncShared { .write_inflight_blocks() .remove_by_block((block.number(), block.hash()).into()) { + { + let status = self.shared().get_block_status(&block.hash()); + debug!( + "new_block_received {}-{}, status: {:?}", + block.number(), + block.hash(), + status + ); + } self.shared() .insert_block_status(block.hash(), BlockStatus::BLOCK_RECEIVED); true From 0f1c403a210ab315a12401585283ab9e60c0c377 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 31 Aug 2023 21:25:41 +0800 Subject: [PATCH 021/357] Add debug log for inflight blocks process Signed-off-by: Eval EXEC --- sync/src/synchronizer/block_fetcher.rs | 6 ++++++ sync/src/types/mod.rs | 8 ++++++++ 2 files changed, 14 insertions(+) diff --git a/sync/src/synchronizer/block_fetcher.rs b/sync/src/synchronizer/block_fetcher.rs index bcf4b7609a..8e17f7206a 100644 --- a/sync/src/synchronizer/block_fetcher.rs +++ b/sync/src/synchronizer/block_fetcher.rs @@ -197,6 +197,12 @@ impl BlockFetcher { .write_inflight_blocks() .insert(self.peer, (header.number(), hash).into()) { + debug!( + "block: {}-{} added to inflight, block_status: {:?}", + header.number(), + header.hash(), + status + ); fetch.push(header) } diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 0cddd6b8ef..c606afad12 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -677,6 +677,10 @@ impl InflightBlocks { trace.remove(key); } remove_key.push(key.clone()); + debug!( + "prune: remove InflightState: remove {}-{} from {}", + key.number, key.hash, value.peer + ); } } @@ -721,6 +725,10 @@ impl InflightBlocks { d.punish(1); } d.hashes.remove(key); + debug!( + "prune: remove download_schedulers: remove {}-{} from {}", + key.number, key.hash, state.peer + ); }; } From 830e0de4f1e1b1556108c1aef6a1f6b2aa807445 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 31 Aug 2023 22:09:12 +0800 Subject: [PATCH 022/357] Add debug log for protocol disconnect --- sync/src/types/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index c606afad12..ae5765ad2a 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1727,6 +1727,7 @@ impl SyncState { pub fn disconnected(&self, pi: PeerIndex) { self.write_inflight_blocks().remove_by_peer(pi); self.peers().disconnected(pi); + debug!("peer {} disconnected", pi); } // pub fn get_orphan_block(&self, block_hash: &Byte32) -> Option { From 3258bcd03918adf9e0f21fb6292c65988b8d7198 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 31 Aug 2023 22:23:15 +0800 Subject: [PATCH 023/357] Fix new_block_received got duplicate block Signed-off-by: Eval EXEC --- sync/src/types/mod.rs | 36 +++++++++++++++++++++--------------- 1 file changed, 21 insertions(+), 15 deletions(-) diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index ae5765ad2a..7aefba6974 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1303,26 +1303,32 @@ impl SyncShared { // Return true when the block is that we have requested and received first time. pub fn new_block_received(&self, block: &core::BlockView) -> bool { - if self + if !self .state() .write_inflight_blocks() .remove_by_block((block.number(), block.hash()).into()) { - { - let status = self.shared().get_block_status(&block.hash()); - debug!( - "new_block_received {}-{}, status: {:?}", - block.number(), - block.hash(), - status - ); - } - self.shared() - .insert_block_status(block.hash(), BlockStatus::BLOCK_RECEIVED); - true - } else { - false + return false; + } + + let status = self.shared().get_block_status(&block.hash()); + debug!( + "new_block_received {}-{}, status: {:?}", + block.number(), + block.hash(), + status + ); + if !BlockStatus::HEADER_VALID.eq(&status) { + return false; + } + + if let dashmap::mapref::entry::Entry::Vacant(status) = + self.shared().block_status_map().entry(block.hash()) + { + status.insert(BlockStatus::BLOCK_RECEIVED); + return true; } + false } } From 74ec1ae1a067ce4cebeacbc8e205cb5245d80030 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Fri, 1 Sep 2023 13:39:32 +0800 Subject: [PATCH 024/357] Insert orphan blocks and search orphan blocks in same thread --- chain/src/chain.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 7369c78512..45df8ca949 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -155,8 +155,8 @@ pub struct ChainService { orphan_blocks_broker: Arc, - new_block_tx: Sender, - new_block_rx: Receiver, + new_block_tx: Sender<(Arc, Switch)>, + new_block_rx: Receiver<(Arc, Switch)>, unverified_tx: Sender, unverified_rx: Receiver, @@ -176,7 +176,7 @@ impl ChainService { channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize * 3); let (new_block_tx, new_block_rx) = - channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize); + channel::bounded::<(Arc, Switch)>(BLOCK_DOWNLOAD_WINDOW as usize); ChainService { shared, @@ -370,7 +370,8 @@ impl ChainService { return; }, recv(self.new_block_rx) -> msg => match msg { - Ok(switch) => { + Ok((block, switch)) => { + self.orphan_blocks_broker.insert(block); self.search_orphan_pool(switch) }, Err(err) => { @@ -580,9 +581,7 @@ impl ChainService { self.non_contextual_verify(&block)?; } - self.orphan_blocks_broker.insert(block); - - match self.new_block_tx.send(switch) { + match self.new_block_tx.send((block, switch)) { Ok(_) => {} Err(err) => { error!("notify new block to orphan pool err: {}", err) From eede67f7362d0a6cc2c1c1a7eaddf905ed54d163 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Fri, 1 Sep 2023 13:57:31 +0800 Subject: [PATCH 025/357] Use `get_for_update` to protect protect BlockExt --- chain/src/chain.rs | 3 +++ store/src/transaction.rs | 25 +++++++++++++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 45df8ca949..76178e69c6 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -629,6 +629,9 @@ impl ChainService { let db_txn = Arc::new(self.shared.store().begin_transaction()); + let txn_snapshot = db_txn.get_snapshot(); + let _snapshot_block_ext = db_txn.get_update_for_block_ext(&block.hash(), &txn_snapshot); + db_txn.insert_block(block.as_ref())?; // if parent_ext.verified == Some(false) { diff --git a/store/src/transaction.rs b/store/src/transaction.rs index 62ba110b0f..48ef652a95 100644 --- a/store/src/transaction.rs +++ b/store/src/transaction.rs @@ -165,6 +165,31 @@ impl StoreTransaction { .map(|slice| packed::Byte32Reader::from_slice_should_be_ok(slice.as_ref()).to_entity()) } + /// TODO(doc): @eval-exec + pub fn get_update_for_block_ext( + &self, + hash: &packed::Byte32, + snapshot: &StoreTransactionSnapshot<'_>, + ) -> Option { + self.inner + .get_for_update(COLUMN_BLOCK_EXT, hash.as_slice(), &snapshot.inner) + .expect("db operation should be ok") + .map(|slice| { + let reader = + packed::BlockExtReader::from_compatible_slice_should_be_ok(slice.as_ref()); + match reader.count_extra_fields() { + 0 => reader.unpack(), + 2 => packed::BlockExtV1Reader::from_slice_should_be_ok(slice.as_ref()).unpack(), + _ => { + panic!( + "BlockExt storage field count doesn't match, expect 7 or 5, actual {}", + reader.field_count() + ) + } + } + }) + } + /// TODO(doc): @quake pub fn insert_tip_header(&self, h: &HeaderView) -> Result<(), Error> { self.insert_raw(COLUMN_META, META_TIP_HEADER_KEY, h.hash().as_slice()) From d01e1ed805b76073c09217e92f66e83a327a04e6 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Fri, 1 Sep 2023 14:06:06 +0800 Subject: [PATCH 026/357] Relayer query orphan block from ChainController --- chain/src/chain.rs | 4 ++-- shared/src/shared.rs | 8 -------- sync/src/relayer/mod.rs | 2 +- 3 files changed, 3 insertions(+), 11 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 76178e69c6..be4d3b3e7c 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -115,8 +115,8 @@ impl ChainController { } // Relay need this - pub fn get_orphan_block(&self, hash: &Byte32) -> Option { - todo!("load orphan block") + pub fn get_orphan_block(&self, hash: &Byte32) -> Option> { + self.orphan_block_broker.get_block(hash) } pub fn orphan_blocks_len(&self) -> usize { diff --git a/shared/src/shared.rs b/shared/src/shared.rs index ffc5e22628..9415d19096 100644 --- a/shared/src/shared.rs +++ b/shared/src/shared.rs @@ -411,14 +411,6 @@ impl Shared { self.header_map.remove(hash); } - pub fn get_orphan_block(&self, block_hash: &Byte32) -> Option { - todo!("get_orphan_block") - } - - pub fn orphan_pool_count(&self) -> u64 { - 0 - } - pub fn block_status_map(&self) -> &DashMap { &self.block_status_map } diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index 8d3d95bcbb..f7d96c006e 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -516,7 +516,7 @@ impl Relayer { } } BlockStatus::BLOCK_RECEIVED => { - if let Some(uncle) = self.shared.shared().get_orphan_block(&uncle_hash) { + if let Some(uncle) = self.chain.get_orphan_block(&uncle_hash) { uncles.push(uncle.as_uncle().data()); } else { debug_target!( From e59a709f5160f9a32c74e41bfa48e0e6135d347c Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Sat, 2 Sep 2023 08:38:24 +0800 Subject: [PATCH 027/357] Add a python script to draw CKB sync chart --- devtools/block_sync/draw_sync_chart.py | 94 ++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) create mode 100755 devtools/block_sync/draw_sync_chart.py diff --git a/devtools/block_sync/draw_sync_chart.py b/devtools/block_sync/draw_sync_chart.py new file mode 100755 index 0000000000..1be266579a --- /dev/null +++ b/devtools/block_sync/draw_sync_chart.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python3 +import matplotlib.pyplot as plt +import re +import datetime +import tqdm +import argparse + + +def parse_sync_statics(log_file): + """ + parse sync statics from log file + sample: + 2023-09-01 06:54:45.096 +00:00 verify_blocks INFO ckb_chain::chain block: 811224, hash: 0x00f54aaadd1a36339e69a10624dec3250658100ffd5773a7e9f228bb9a96187e, epoch: 514(841/1800), total_diff: 0x59a4a071ba9f0de59d, txs: 1 + """ + duration = [] + height = [] + base_timestamp = 0 + + print("reading file: ", log_file) + total_lines = len(open(log_file, 'r').readlines()) + print("total lines: ", total_lines) + + with open(log_file, 'r') as f: + pbar = tqdm.tqdm(total=total_lines) + for line_idx, line in enumerate(f): + pbar.update(1) + if line.find('INFO ckb_chain::chain block: ') != -1: + timestamp_str = re.search(r'^(\S+ \S+)', line).group(1) # Extract the timestamp string + timestamp = datetime.datetime.strptime(timestamp_str, "%Y-%m-%d %H:%M:%S.%f").timestamp() + + if base_timestamp == 0: + base_timestamp = timestamp + timestamp = int(timestamp - base_timestamp) + + block_number = int(re.search(r'block: (\d+)', line).group(1)) # Extract the block number using regex + + if line_idx == 0 or block_number % 10000 == 0: + duration.append(timestamp / 60 / 60) + height.append(block_number) + + pbar.close() + + return duration, height + + +parser = argparse.ArgumentParser( + description='Draw CKB Sync progress Chart. Usage: ./draw_sync_chart.py --ckb_log ./run1.log ./run2.log --label branch_develop branch_async --result_path /tmp/compare_result.png') +parser.add_argument('--ckb_log', metavar='ckb_log_file', type=str, + action='store', nargs='+', required=True, + help='the ckb node log file path') +parser.add_argument('--label', metavar='label', type=str, + action='store', nargs='+', required=True, + help='what label should be put on the chart') +parser.add_argument('--result_path', type=str, nargs=1, action='store', + help='where to save the result chart') + +args = parser.parse_args() +assert len(args.ckb_log) == len(args.label) + + +tasks = zip(args.ckb_log, args.label) + +result_path = args.result_path[0] +fig, ax = plt.subplots(1, 1, figsize=(10, 8)) + +lgs = [] +for ckb_log_file, label in tasks: + print("ckb_log_file: ", ckb_log_file) + print("label: ", label) + duration, height = parse_sync_statics(ckb_log_file) + + lg = ax.scatter(duration, height, s=1, label=label) + ax.plot(duration, height, label=label) + + lgs.append(lg) + + ax.get_yaxis().get_major_formatter().set_scientific(False) + ax.get_yaxis().get_major_formatter().set_useOffset(False) + + ax.set_axisbelow(True) + + ax.xaxis.grid(color='gray', linestyle='solid', which='major') + ax.yaxis.grid(color='gray', linestyle='solid', which='major') + + ax.xaxis.grid(color='gray', linestyle='dashed', which='minor') + ax.yaxis.grid(color='gray', linestyle='dashed', which='minor') + + plt.setp(ax.get_xticklabels(), rotation=30, horizontalalignment='right') + +plt.legend(tuple(lgs), tuple(args.label), loc='upper right', shadow=True) +plt.title('CKB Sync progress Chart') +plt.xlabel('Timecost (hours)') +plt.ylabel('Block Height') +plt.savefig(result_path) From f54bb8dd36bc8b4f598508ce8ccdb6577bed62f1 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Sun, 3 Sep 2023 07:36:47 +0800 Subject: [PATCH 028/357] Add annotation on sync chart --- devtools/block_sync/draw_sync_chart.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/devtools/block_sync/draw_sync_chart.py b/devtools/block_sync/draw_sync_chart.py index 1be266579a..ca40d5ae80 100755 --- a/devtools/block_sync/draw_sync_chart.py +++ b/devtools/block_sync/draw_sync_chart.py @@ -5,6 +5,8 @@ import tqdm import argparse +from matplotlib.ticker import MultipleLocator + def parse_sync_statics(log_file): """ @@ -57,7 +59,6 @@ def parse_sync_statics(log_file): args = parser.parse_args() assert len(args.ckb_log) == len(args.label) - tasks = zip(args.ckb_log, args.label) result_path = args.result_path[0] @@ -74,8 +75,14 @@ def parse_sync_statics(log_file): lgs.append(lg) + for i, h in enumerate(height): + if h % 2000000 == 0: + ax.vlines([duration[i]], 0, h, colors="gray", linestyles="dashed") + ax.get_yaxis().get_major_formatter().set_scientific(False) ax.get_yaxis().get_major_formatter().set_useOffset(False) + + ax.margins(0) ax.set_axisbelow(True) @@ -84,10 +91,13 @@ def parse_sync_statics(log_file): ax.xaxis.grid(color='gray', linestyle='dashed', which='minor') ax.yaxis.grid(color='gray', linestyle='dashed', which='minor') - + + minorLocator = MultipleLocator(10) + ax.xaxis.set_minor_locator(minorLocator) + plt.setp(ax.get_xticklabels(), rotation=30, horizontalalignment='right') -plt.legend(tuple(lgs), tuple(args.label), loc='upper right', shadow=True) +plt.legend(tuple(lgs), tuple(args.label), loc='upper left', shadow=True) plt.title('CKB Sync progress Chart') plt.xlabel('Timecost (hours)') plt.ylabel('Block Height') From 219d6405706eaa556b1bda3e3794aa646caf17b5 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 4 Sep 2023 09:21:58 +0800 Subject: [PATCH 029/357] Prevent overwrite BlockExt, prevent verify block twice --- chain/src/chain.rs | 32 ++++++++++++++++++++++++++------ 1 file changed, 26 insertions(+), 6 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index be4d3b3e7c..fde9a857d2 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -610,6 +610,17 @@ impl ChainService { return Ok(None); } + let parent_header = self + .shared + .store() + .get_block_header(&block.data().header().raw().parent_hash()) + .expect("parent already store"); + + if let Some(ext) = self.shared.store().get_block_ext(&block.hash()) { + debug!("block {}-{} has stored BlockExt", block_number, block_hash); + return Ok(Some((parent_header, ext.total_difficulty))); + } + trace!("begin accept block: {}-{}", block.number(), block.hash()); let parent_ext = self @@ -621,12 +632,6 @@ impl ChainService { let cannon_total_difficulty = parent_ext.total_difficulty.to_owned() + block.header().difficulty(); - let parent_header = self - .shared - .store() - .get_block_header(&block.data().header().raw().parent_hash()) - .expect("parent already store"); - let db_txn = Arc::new(self.shared.store().begin_transaction()); let txn_snapshot = db_txn.get_snapshot(); @@ -692,6 +697,21 @@ impl ChainService { .get_block_ext(&block.data().header().raw().parent_hash()) .expect("parent already store"); + if let Some(ext) = self.shared.store().get_block_ext(&block.hash()) { + match ext.verified { + Some(verified) => { + debug!( + "block {}-{} has been verified: {}", + block.number(), + block.hash(), + verified + ); + return Ok(verified); + } + _ => {} + } + } + let cannon_total_difficulty = parent_ext.total_difficulty.to_owned() + block.header().difficulty(); From f98e4cfbaace89c4e84f175f7dd973d56d200911 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 4 Sep 2023 11:37:58 +0800 Subject: [PATCH 030/357] Return malformed_peers from ckb-chain to ckb-sync --- Cargo.lock | 1 + chain/src/chain.rs | 54 +++++++++++++++++++------- shared/Cargo.toml | 1 + shared/src/types/mod.rs | 6 +++ sync/src/synchronizer/block_process.rs | 5 ++- sync/src/synchronizer/mod.rs | 21 ++++++++-- sync/src/types/mod.rs | 18 +++------ 7 files changed, 74 insertions(+), 32 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 74190eb287..063d9d5560 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1449,6 +1449,7 @@ dependencies = [ "ckb-error", "ckb-logger", "ckb-migrate", + "ckb-network", "ckb-notify", "ckb-proposal-table", "ckb-snapshot", diff --git a/chain/src/chain.rs b/chain/src/chain.rs index fde9a857d2..4471a38738 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -12,11 +12,13 @@ use ckb_logger::{ self, debug, error, info, log_enabled, log_enabled_target, trace, trace_target, warn, }; use ckb_merkle_mountain_range::leaf_index_to_mmr_size; +use ckb_network::PeerId; use ckb_proposal_table::ProposalTable; #[cfg(debug_assertions)] use ckb_rust_unstable_port::IsSorted; use ckb_shared::block_status::BlockStatus; use ckb_shared::shared::Shared; +use ckb_shared::types::VerifyFailedBlockInfo; use ckb_stop_handler::{new_crossbeam_exit_rx, register_thread}; use ckb_store::{attach_block_cell, detach_block_cell, ChainStore, StoreTransaction}; use ckb_systemtime::unix_time_as_millis; @@ -84,7 +86,10 @@ impl ChainController { /// If the block already exists, does nothing and false is returned. /// /// [BlockVerifier] [NonContextualBlockTxsVerifier] [ContextualBlockVerifier] will performed - pub fn process_block(&self, block: Arc) -> Result { + pub fn process_block( + &self, + block: Arc, + ) -> (Result, Vec) { self.internal_process_block(block, Switch::NONE) } @@ -95,7 +100,7 @@ impl ChainController { &self, block: Arc, switch: Switch, - ) -> Result { + ) -> (Result, Vec) { Request::call(&self.process_block_sender, (block, switch)).unwrap_or_else(|| { Err(InternalErrorKind::System .other("Chain service has gone") @@ -160,12 +165,16 @@ pub struct ChainService { unverified_tx: Sender, unverified_rx: Receiver, + + verify_failed_blocks_tx: Sender, + verify_failed_blocks_rx: Receiver, } #[derive(Clone)] struct UnverifiedBlock { block: Arc, parent_header: HeaderView, + peer_id: PeerId, switch: Switch, } @@ -178,6 +187,8 @@ impl ChainService { let (new_block_tx, new_block_rx) = channel::bounded::<(Arc, Switch)>(BLOCK_DOWNLOAD_WINDOW as usize); + let (verify_failed_blocks_tx, verify_failed_blocks_rx) = channel::unbounded(); + ChainService { shared, proposal_table: Arc::new(Mutex::new(proposal_table)), @@ -186,6 +197,8 @@ impl ChainService { unverified_rx, new_block_tx, new_block_rx, + verify_failed_blocks_tx, + verify_failed_blocks_rx, } } @@ -329,9 +342,18 @@ impl ChainService { unverified_block.block.hash(), err ); - // TODO punish the peer who give me the bad block + if let Err(SendError(peer_id)) = + self.verify_failed_blocks_tx.send(VerifyFailedBlockInfo { + block_hash: unverified_block.block.hash(), + peer_id: unverified_block.peer_id, + }) + { + error!( + "send verify_failed_blocks_tx failed for peer: {:?}", + unverified_block.peer_id + ); + } - // TODO decrease unverified_tip let tip = self .shared .store() @@ -416,6 +438,7 @@ impl ChainService { block: descendant.to_owned(), parent_header, switch, + peer_id, }) { Ok(_) => {} Err(err) => error!("send unverified_tx failed: {}", err), @@ -562,23 +585,26 @@ impl ChainService { // make block IO and verify asynchronize #[doc(hidden)] - pub fn process_block_v2(&self, block: Arc, switch: Switch) -> Result { + pub fn process_block_v2( + &self, + block: Arc, + switch: Switch, + ) -> (Result, Vec) { let block_number = block.number(); let block_hash = block.hash(); if block_number < 1 { warn!("receive 0 number block: 0-{}", block_hash); } - // if self - // .shared - // .contains_block_status(&block_hash, BlockStatus::BLOCK_RECEIVED) - // { - // debug!("block {}-{} has been stored", block_number, block_hash); - // return Ok(false); - // } + let failed_blocks_peer_ids: Vec = + self.verify_failed_blocks_rx.iter().collect(); if !switch.disable_non_contextual() { - self.non_contextual_verify(&block)?; + let result = self.non_contextual_verify(&block); + match result { + Err(err) => return (Err(err), failed_blocks_peer_ids), + _ => {} + } } match self.new_block_tx.send((block, switch)) { @@ -596,7 +622,7 @@ impl ChainService { self.shared.get_unverified_tip().number(), ); - Ok(false) + (Ok(false), failed_blocks_peer_ids) } fn accept_block(&self, block: Arc) -> Result, Error> { diff --git a/shared/Cargo.toml b/shared/Cargo.toml index 247b8ed866..bc0986bd80 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -30,6 +30,7 @@ ckb-channel = { path = "../util/channel", version = "= 0.114.0-pre" } ckb-app-config = {path = "../util/app-config", version = "= 0.114.0-pre"} ckb-migrate = { path = "../util/migrate", version = "= 0.114.0-pre" } once_cell = "1.8.0" +ckb-network = { path = "../network", version = "= 0.113.0-pre" } ckb-util = { path = "../util", version = "= 0.113.0-pre" } bitflags = "1.0" tokio = { version = "1", features = ["sync"] } diff --git a/shared/src/types/mod.rs b/shared/src/types/mod.rs index 8db42092b1..f0083e6596 100644 --- a/shared/src/types/mod.rs +++ b/shared/src/types/mod.rs @@ -1,3 +1,4 @@ +use ckb_network::PeerId; use ckb_types::core::{BlockNumber, EpochNumberWithFraction}; use ckb_types::packed::Byte32; use ckb_types::prelude::{Entity, FromSliceShouldBeOk, Reader}; @@ -304,3 +305,8 @@ fn get_skip_height(height: BlockNumber) -> BlockNumber { } pub const SHRINK_THRESHOLD: usize = 300; + +pub struct VerifyFailedBlockInfo { + pub block_hash: Byte32, + pub peer_id: PeerId, +} diff --git a/sync/src/synchronizer/block_process.rs b/sync/src/synchronizer/block_process.rs index b8fc6b5824..3c58c54a4f 100644 --- a/sync/src/synchronizer/block_process.rs +++ b/sync/src/synchronizer/block_process.rs @@ -32,7 +32,10 @@ impl<'a> BlockProcess<'a> { let shared = self.synchronizer.shared(); if shared.new_block_received(&block) { - if let Err(err) = self.synchronizer.process_new_block(block.clone()) { + let (this_block_verify_result, maliformed_peers) = + self.synchronizer.process_new_block(block.clone()); + + if let Err(err) = this_block_verify_result { if !is_internal_db_error(&err) { return StatusCode::BlockIsInvalid.with_context(format!( "{}, error: {}", diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 73f41f9489..8fba3abd64 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -38,7 +38,7 @@ use ckb_network::{ async_trait, bytes::Bytes, tokio, CKBProtocolContext, CKBProtocolHandler, PeerIndex, ServiceControl, SupportProtocols, }; -use ckb_shared::types::HeaderIndexView; +use ckb_shared::types::{HeaderIndexView, VerifyFailedBlockInfo}; use ckb_stop_handler::{new_crossbeam_exit_rx, register_thread}; use ckb_systemtime::unix_time_as_millis; use ckb_types::{ @@ -289,6 +289,16 @@ impl Synchronizer { let item_bytes = message.as_slice().len() as u64; let status = self.try_process(nc, peer, message); + Self::post_sync_process(nc, peer, item_name, item_bytes, status); + } + + fn post_sync_process( + nc: &dyn CKBProtocolContext, + peer: PeerIndex, + item_name: &str, + item_bytes: u64, + status: Status, + ) { metric_ckb_message_bytes( MetricDirection::In, &SupportProtocols::Sync.name(), @@ -334,14 +344,17 @@ impl Synchronizer { /// Process a new block sync from other peer //TODO: process block which we don't request - pub fn process_new_block(&self, block: core::BlockView) -> Result { + pub fn process_new_block( + &self, + block: core::BlockView, + ) -> (Result, Vec) { let block_hash = block.hash(); let status = self.shared.active_chain().get_block_status(&block_hash); // NOTE: Filtering `BLOCK_STORED` but not `BLOCK_RECEIVED`, is for avoiding // stopping synchronization even when orphan_pool maintains dirty items by bugs. if status.contains(BlockStatus::BLOCK_PARTIAL_STORED) { error!("Block {} already partial stored", block_hash); - Ok(false) + (Ok(false), Vec::new()) } else if status.contains(BlockStatus::HEADER_VALID) { self.shared.insert_new_block(&self.chain, Arc::new(block)) } else { @@ -350,7 +363,7 @@ impl Synchronizer { status, block_hash, ); // TODO which error should we return? - Ok(false) + (Ok(false), Vec::new()) } } diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 7aefba6974..b90d8a0b94 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -14,7 +14,8 @@ use ckb_constant::sync::{ }; use ckb_error::Error as CKBError; use ckb_logger::{debug, error, trace}; -use ckb_network::{CKBProtocolContext, PeerIndex, SupportProtocols}; +use ckb_network::{CKBProtocolContext, PeerId, PeerIndex, SupportProtocols}; +use ckb_shared::types::VerifyFailedBlockInfo; use ckb_shared::{ block_status::BlockStatus, shared::Shared, @@ -1081,7 +1082,7 @@ impl SyncShared { &self, chain: &ChainController, block: Arc, - ) -> Result { + ) -> (Result, Vec) { // Insert the given block into orphan_block_pool if its parent is not found // if !self.is_stored(&block.parent_hash()) { // debug!( @@ -1162,7 +1163,7 @@ impl SyncShared { &self, chain: &ChainController, block: Arc, - ) -> Result { + ) -> (Result, Vec) { let ret = { let mut assume_valid_target = self.state.assume_valid_target(); if let Some(ref target) = *assume_valid_target { @@ -1179,23 +1180,14 @@ impl SyncShared { chain.process_block(Arc::clone(&block)) } }; + if let Err(ref error) = ret { if !is_internal_db_error(error) { error!("accept block {:?} {}", block, error); self.shared() .insert_block_status(block.header().hash(), BlockStatus::BLOCK_INVALID); } - } else { - // Clear the newly inserted block from block_status_map. - // - // We don't know whether the actual block status is BLOCK_VALID or BLOCK_INVALID. - // So we just simply remove the corresponding in-memory block status, - // and the next time `get_block_status` would acquire the real-time - // status via fetching block_ext from the database. - // self.shared().remove_block_status(&block.as_ref().hash()); - // self.shared().remove_header_view(&block.as_ref().hash()); } - ret } From ddf8d301192e33179829b08ca4f6815ff9189fe7 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 4 Sep 2023 15:48:21 +0800 Subject: [PATCH 031/357] Use verifiedFailedBlockInfo as process_block's return type --- chain/src/chain.rs | 22 +++++++++++++++------- shared/src/types/mod.rs | 1 + sync/src/synchronizer/block_process.rs | 2 +- sync/src/synchronizer/mod.rs | 6 +++--- sync/src/types/mod.rs | 4 ++-- 5 files changed, 22 insertions(+), 13 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 4471a38738..c4bedcbf8b 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -89,7 +89,7 @@ impl ChainController { pub fn process_block( &self, block: Arc, - ) -> (Result, Vec) { + ) -> Result, Error> { self.internal_process_block(block, Switch::NONE) } @@ -100,7 +100,7 @@ impl ChainController { &self, block: Arc, switch: Switch, - ) -> (Result, Vec) { + ) -> Result, Error> { Request::call(&self.process_block_sender, (block, switch)).unwrap_or_else(|| { Err(InternalErrorKind::System .other("Chain service has gone") @@ -588,21 +588,28 @@ impl ChainService { pub fn process_block_v2( &self, block: Arc, + peer_id: PeerId, switch: Switch, - ) -> (Result, Vec) { + ) -> Vec { let block_number = block.number(); let block_hash = block.hash(); if block_number < 1 { warn!("receive 0 number block: 0-{}", block_hash); } - let failed_blocks_peer_ids: Vec = + let mut failed_blocks_peer_ids: Vec = self.verify_failed_blocks_rx.iter().collect(); if !switch.disable_non_contextual() { let result = self.non_contextual_verify(&block); match result { - Err(err) => return (Err(err), failed_blocks_peer_ids), + Err(err) => { + failed_blocks_peer_ids.push(VerifyFailedBlockInfo { + block_hash, + peer_id, + }); + return failed_blocks_peer_ids; + } _ => {} } } @@ -614,15 +621,16 @@ impl ChainService { } } debug!( - "processing block: {}-{}, orphan_len: {}, (tip:unverified_tip):({}:{})", + "processing block: {}-{}, orphan_len: {}, (tip:unverified_tip):({}:{}), and return failed_blocks_peer_ids: {:?}", block_number, block_hash, self.orphan_blocks_broker.len(), self.shared.snapshot().tip_number(), self.shared.get_unverified_tip().number(), + failed_blocks_peer_ids, ); - (Ok(false), failed_blocks_peer_ids) + failed_blocks_peer_ids } fn accept_block(&self, block: Arc) -> Result, Error> { diff --git a/shared/src/types/mod.rs b/shared/src/types/mod.rs index f0083e6596..a1f38faa85 100644 --- a/shared/src/types/mod.rs +++ b/shared/src/types/mod.rs @@ -306,6 +306,7 @@ fn get_skip_height(height: BlockNumber) -> BlockNumber { pub const SHRINK_THRESHOLD: usize = 300; +#[derive(Clone, Debug, PartialEq, Eq)] pub struct VerifyFailedBlockInfo { pub block_hash: Byte32, pub peer_id: PeerId, diff --git a/sync/src/synchronizer/block_process.rs b/sync/src/synchronizer/block_process.rs index 3c58c54a4f..8fd9d75da4 100644 --- a/sync/src/synchronizer/block_process.rs +++ b/sync/src/synchronizer/block_process.rs @@ -32,7 +32,7 @@ impl<'a> BlockProcess<'a> { let shared = self.synchronizer.shared(); if shared.new_block_received(&block) { - let (this_block_verify_result, maliformed_peers) = + let (this_block_verify_result, malformed_peers) = self.synchronizer.process_new_block(block.clone()); if let Err(err) = this_block_verify_result { diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 8fba3abd64..9537775cf9 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -347,14 +347,14 @@ impl Synchronizer { pub fn process_new_block( &self, block: core::BlockView, - ) -> (Result, Vec) { + ) -> Result>, CKBError> { let block_hash = block.hash(); let status = self.shared.active_chain().get_block_status(&block_hash); // NOTE: Filtering `BLOCK_STORED` but not `BLOCK_RECEIVED`, is for avoiding // stopping synchronization even when orphan_pool maintains dirty items by bugs. if status.contains(BlockStatus::BLOCK_PARTIAL_STORED) { error!("Block {} already partial stored", block_hash); - (Ok(false), Vec::new()) + Ok(Some(Vec::new())) } else if status.contains(BlockStatus::HEADER_VALID) { self.shared.insert_new_block(&self.chain, Arc::new(block)) } else { @@ -363,7 +363,7 @@ impl Synchronizer { status, block_hash, ); // TODO which error should we return? - (Ok(false), Vec::new()) + (Ok(Some(Vec::new()))) } } diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index b90d8a0b94..5e02456feb 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1082,7 +1082,7 @@ impl SyncShared { &self, chain: &ChainController, block: Arc, - ) -> (Result, Vec) { + ) -> Result, CKBError> { // Insert the given block into orphan_block_pool if its parent is not found // if !self.is_stored(&block.parent_hash()) { // debug!( @@ -1163,7 +1163,7 @@ impl SyncShared { &self, chain: &ChainController, block: Arc, - ) -> (Result, Vec) { + ) -> Result, CKBError> { let ret = { let mut assume_valid_target = self.state.assume_valid_target(); if let Some(ref target) = *assume_valid_target { From 86c64eae0e29f4dfdb68eff1fd2b75cf71026271 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 4 Sep 2023 16:07:02 +0800 Subject: [PATCH 032/357] Fix UnverifiedBlock object destruction --- chain/src/chain.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index c4bedcbf8b..dba139a4a8 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -722,6 +722,7 @@ impl ChainService { let UnverifiedBlock { block, parent_header, + peer_id, switch, } = unverified_block; From b2650d1b5ac34ceef96900f45a8955588160655e Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 4 Sep 2023 20:48:21 +0800 Subject: [PATCH 033/357] Add LonelyBlock struct --- chain/src/chain.rs | 119 +++++++++++++++++++-------------- chain/src/orphan_block_pool.rs | 35 +++++----- 2 files changed, 87 insertions(+), 67 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index dba139a4a8..88e26eb242 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -50,7 +50,7 @@ use std::{cmp, thread}; const ORPHAN_BLOCK_SIZE: usize = (BLOCK_DOWNLOAD_WINDOW * 2) as usize; -type ProcessBlockRequest = Request<(Arc, Switch), Result>; +type ProcessBlockRequest = Request<(LonelyBlock), Vec>; type TruncateRequest = Request>; /// Controller to the chain service. @@ -88,9 +88,9 @@ impl ChainController { /// [BlockVerifier] [NonContextualBlockTxsVerifier] [ContextualBlockVerifier] will performed pub fn process_block( &self, - block: Arc, + lonely_block: LonelyBlock, ) -> Result, Error> { - self.internal_process_block(block, Switch::NONE) + self.internal_process_block(lonely_block) } /// Internal method insert block for test @@ -98,10 +98,9 @@ impl ChainController { /// switch bit flags for particular verify, make easier to generating test data pub fn internal_process_block( &self, - block: Arc, - switch: Switch, + lonely_block: LonelyBlock, ) -> Result, Error> { - Request::call(&self.process_block_sender, (block, switch)).unwrap_or_else(|| { + Request::call(&self.process_block_sender, lonely_block).unwrap_or_else(|| { Err(InternalErrorKind::System .other("Chain service has gone") .into()) @@ -160,8 +159,8 @@ pub struct ChainService { orphan_blocks_broker: Arc, - new_block_tx: Sender<(Arc, Switch)>, - new_block_rx: Receiver<(Arc, Switch)>, + new_block_tx: Sender<(LonelyBlock)>, + new_block_rx: Receiver<(LonelyBlock)>, unverified_tx: Sender, unverified_rx: Receiver, @@ -170,11 +169,28 @@ pub struct ChainService { verify_failed_blocks_rx: Receiver, } +pub struct LonelyBlock { + pub block: Arc, + pub peer_id: Option, + pub switch: Switch, +} + +impl LonelyBlock { + fn combine_parent_header(&self, parent_header: HeaderView) -> UnverifiedBlock { + UnverifiedBlock { + block: self.block.clone(), + parent_header, + peer_id: self.peer_id.clone(), + switch: self.switch, + } + } +} + #[derive(Clone)] struct UnverifiedBlock { block: Arc, parent_header: HeaderView, - peer_id: PeerId, + peer_id: Option, switch: Switch, } @@ -185,7 +201,7 @@ impl ChainService { channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize * 3); let (new_block_tx, new_block_rx) = - channel::bounded::<(Arc, Switch)>(BLOCK_DOWNLOAD_WINDOW as usize); + channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize); let (verify_failed_blocks_tx, verify_failed_blocks_rx) = channel::unbounded(); @@ -242,11 +258,9 @@ impl ChainService { .spawn(move || loop { select! { recv(process_block_receiver) -> msg => match msg { - Ok(Request { responder, arguments: (block, verify) }) => { - let instant = Instant::now(); - + Ok(Request { responder, arguments: (block, peer_id, verify) }) => { let _ = tx_control.suspend_chunk_process(); - let _ = responder.send(self.process_block_v2(block, verify)); + let _ = responder.send(self.process_block_v2(block, peer_id, verify)); let _ = tx_control.continue_chunk_process(); if let Some(metrics) = ckb_metrics::handle() { @@ -338,20 +352,23 @@ impl ChainService { } Err(err) => { error!( - "verify block {} failed: {}", + "verify [{:?}]'s block {} failed: {}", + unverified_block.peer_id, unverified_block.block.hash(), err ); - if let Err(SendError(peer_id)) = - self.verify_failed_blocks_tx.send(VerifyFailedBlockInfo { - block_hash: unverified_block.block.hash(), - peer_id: unverified_block.peer_id, - }) - { - error!( - "send verify_failed_blocks_tx failed for peer: {:?}", - unverified_block.peer_id - ); + if let Some(peer_id) = unverified_block.peer_id { + if let Err(SendError(peer_id)) = + self.verify_failed_blocks_tx.send(VerifyFailedBlockInfo { + block_hash: unverified_block.block.hash(), + peer_id, + }) + { + error!( + "send verify_failed_blocks_tx failed for peer: {:?}", + peer_id + ); + } } let tip = self @@ -392,9 +409,9 @@ impl ChainService { return; }, recv(self.new_block_rx) -> msg => match msg { - Ok((block, switch)) => { - self.orphan_blocks_broker.insert(block); - self.search_orphan_pool(switch) + Ok(lonely_block) => { + self.orphan_blocks_broker.insert(lonely_block); + self.search_orphan_pool() }, Err(err) => { error!("new_block_rx err: {}", err); @@ -414,7 +431,7 @@ impl ChainService { continue; } - let descendants = self + let descendants: Vec = self .orphan_blocks_broker .remove_blocks_by_parent(&leader_hash); if descendants.is_empty() { @@ -424,8 +441,14 @@ impl ChainService { ); continue; } + let mut accept_error_occurred = false; - for descendant in &descendants { + for descendant_block in &descendants { + let &LonelyBlock { + block: descendant, + peer_id, + switch, + } = descendant_block; match self.accept_block(descendant.to_owned()) { Err(err) => { accept_error_occurred = true; @@ -434,12 +457,9 @@ impl ChainService { } Ok(accepted_opt) => match accepted_opt { Some((parent_header, total_difficulty)) => { - match self.unverified_tx.send(UnverifiedBlock { - block: descendant.to_owned(), - parent_header, - switch, - peer_id, - }) { + let unverified_block: UnverifiedBlock = + descendant_block.combine_parent_header(parent_header); + match self.unverified_tx.send(unverified_block) { Ok(_) => {} Err(err) => error!("send unverified_tx failed: {}", err), }; @@ -585,14 +605,9 @@ impl ChainService { // make block IO and verify asynchronize #[doc(hidden)] - pub fn process_block_v2( - &self, - block: Arc, - peer_id: PeerId, - switch: Switch, - ) -> Vec { - let block_number = block.number(); - let block_hash = block.hash(); + pub fn process_block_v2(&self, lonely_block: LonelyBlock) -> Vec { + let block_number = lonely_block.block.number(); + let block_hash = lonely_block.block.hash(); if block_number < 1 { warn!("receive 0 number block: 0-{}", block_hash); } @@ -600,21 +615,23 @@ impl ChainService { let mut failed_blocks_peer_ids: Vec = self.verify_failed_blocks_rx.iter().collect(); - if !switch.disable_non_contextual() { - let result = self.non_contextual_verify(&block); + if !lonely_block.switch.disable_non_contextual() { + let result = self.non_contextual_verify(&lonely_block.block); match result { Err(err) => { - failed_blocks_peer_ids.push(VerifyFailedBlockInfo { - block_hash, - peer_id, - }); + if let Some(peer_id) = lonely_block.peer_id { + failed_blocks_peer_ids.push(VerifyFailedBlockInfo { + block_hash, + peer_id, + }); + } return failed_blocks_peer_ids; } _ => {} } } - match self.new_block_tx.send((block, switch)) { + match self.new_block_tx.send(lonely_block) { Ok(_) => {} Err(err) => { error!("notify new block to orphan pool err: {}", err) diff --git a/chain/src/orphan_block_pool.rs b/chain/src/orphan_block_pool.rs index 9459f4864b..0c73806f3c 100644 --- a/chain/src/orphan_block_pool.rs +++ b/chain/src/orphan_block_pool.rs @@ -1,4 +1,6 @@ +use crate::chain::LonelyBlock; use ckb_logger::debug; +use ckb_network::PeerId; use ckb_types::core::EpochNumber; use ckb_types::{core, packed}; use ckb_util::{parking_lot::RwLock, shrink_to_fit}; @@ -13,7 +15,7 @@ const EXPIRED_EPOCH: u64 = 6; #[derive(Default)] struct InnerPool { // Group by blocks in the pool by the parent hash. - blocks: HashMap>>, + blocks: HashMap>, // The map tells the parent hash when given the hash of a block in the pool. // // The block is in the orphan pool if and only if the block hash exists as a key in this map. @@ -31,13 +33,13 @@ impl InnerPool { } } - fn insert(&mut self, block: Arc) { - let hash = block.header().hash(); - let parent_hash = block.data().header().raw().parent_hash(); + fn insert(&mut self, lonely_block: LonelyBlock) { + let hash = lonely_block.block.header().hash(); + let parent_hash = lonely_block.block.data().header().raw().parent_hash(); self.blocks .entry(parent_hash.clone()) .or_insert_with(HashMap::default) - .insert(hash.clone(), block); + .insert(hash.clone(), lonely_block); // Out-of-order insertion needs to be deduplicated self.leaders.remove(&hash); // It is a possible optimization to make the judgment in advance, @@ -51,10 +53,7 @@ impl InnerPool { self.parents.insert(hash, parent_hash); } - pub fn remove_blocks_by_parent( - &mut self, - parent_hash: &ParentHash, - ) -> Vec> { + pub fn remove_blocks_by_parent(&mut self, parent_hash: &ParentHash) -> Vec<(LonelyBlock)> { // try remove leaders first if !self.leaders.remove(parent_hash) { return Vec::new(); @@ -63,7 +62,7 @@ impl InnerPool { let mut queue: VecDeque = VecDeque::new(); queue.push_back(parent_hash.to_owned()); - let mut removed: Vec> = Vec::new(); + let mut removed: Vec<(LonelyBlock)> = Vec::new(); while let Some(parent_hash) = queue.pop_front() { if let Some(orphaned) = self.blocks.remove(&parent_hash) { let (hashes, blocks): (Vec<_>, Vec<_>) = orphaned.into_iter().unzip(); @@ -88,7 +87,7 @@ impl InnerPool { removed } - pub fn get_block(&self, hash: &packed::Byte32) -> Option> { + pub fn get_block(&self, hash: &packed::Byte32) -> Option { self.parents.get(hash).and_then(|parent_hash| { self.blocks .get(parent_hash) @@ -104,7 +103,11 @@ impl InnerPool { if self.need_clean(hash, tip_epoch) { // remove items in orphan pool and return hash to callee(clean header map) let descendants = self.remove_blocks_by_parent(hash); - result.extend(descendants.iter().map(|block| block.hash())); + result.extend( + descendants + .iter() + .map(|lonely_block| lonely_block.block.hash()), + ); } } result @@ -139,15 +142,15 @@ impl OrphanBlockPool { } /// Insert orphaned block, for which we have already requested its parent block - pub fn insert(&self, block: Arc) { - self.inner.write().insert(block); + pub fn insert(&self, lonely_block: LonelyBlock) { + self.inner.write().insert(lonely_block); } - pub fn remove_blocks_by_parent(&self, parent_hash: &ParentHash) -> Vec> { + pub fn remove_blocks_by_parent(&self, parent_hash: &ParentHash) -> Vec<(LonelyBlock)> { self.inner.write().remove_blocks_by_parent(parent_hash) } - pub fn get_block(&self, hash: &packed::Byte32) -> Option> { + pub fn get_block(&self, hash: &packed::Byte32) -> Option { self.inner.read().get_block(hash) } From c8e0ce7f4c220fa2d8ff481d31d631c098c66d07 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 4 Sep 2023 20:49:22 +0800 Subject: [PATCH 034/357] Rename `new_block_{tx,rx}` to `lonely_block_{tx,rx}` --- chain/src/chain.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 88e26eb242..15e8a3a422 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -159,8 +159,8 @@ pub struct ChainService { orphan_blocks_broker: Arc, - new_block_tx: Sender<(LonelyBlock)>, - new_block_rx: Receiver<(LonelyBlock)>, + lonely_block_tx: Sender<(LonelyBlock)>, + lonely_block_rx: Receiver<(LonelyBlock)>, unverified_tx: Sender, unverified_rx: Receiver, @@ -211,8 +211,8 @@ impl ChainService { orphan_blocks_broker: Arc::new(OrphanBlockPool::with_capacity(ORPHAN_BLOCK_SIZE)), unverified_tx, unverified_rx, - new_block_tx, - new_block_rx, + lonely_block_tx: new_block_tx, + lonely_block_rx: new_block_rx, verify_failed_blocks_tx, verify_failed_blocks_rx, } @@ -408,13 +408,13 @@ impl ChainService { info!("unverified_queue_consumer got exit signal, exit now"); return; }, - recv(self.new_block_rx) -> msg => match msg { + recv(self.lonely_block_rx) -> msg => match msg { Ok(lonely_block) => { self.orphan_blocks_broker.insert(lonely_block); self.search_orphan_pool() }, Err(err) => { - error!("new_block_rx err: {}", err); + error!("lonely_block_rx err: {}", err); return } }, @@ -631,7 +631,7 @@ impl ChainService { } } - match self.new_block_tx.send(lonely_block) { + match self.lonely_block_tx.send(lonely_block) { Ok(_) => {} Err(err) => { error!("notify new block to orphan pool err: {}", err) From 42110e6a7283e4602892bf82c5e4897f2d0c2a04 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 4 Sep 2023 20:50:00 +0800 Subject: [PATCH 035/357] Rename `unverified_{tx,rx}` to `unverified_block_{tx,rx}` --- chain/src/chain.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 15e8a3a422..1fc74aef60 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -162,8 +162,8 @@ pub struct ChainService { lonely_block_tx: Sender<(LonelyBlock)>, lonely_block_rx: Receiver<(LonelyBlock)>, - unverified_tx: Sender, - unverified_rx: Receiver, + unverified_block_tx: Sender, + unverified_block_rx: Receiver, verify_failed_blocks_tx: Sender, verify_failed_blocks_rx: Receiver, @@ -209,8 +209,8 @@ impl ChainService { shared, proposal_table: Arc::new(Mutex::new(proposal_table)), orphan_blocks_broker: Arc::new(OrphanBlockPool::with_capacity(ORPHAN_BLOCK_SIZE)), - unverified_tx, - unverified_rx, + unverified_block_tx: unverified_tx, + unverified_block_rx: unverified_rx, lonely_block_tx: new_block_tx, lonely_block_rx: new_block_rx, verify_failed_blocks_tx, @@ -316,7 +316,7 @@ impl ChainService { info!("unverified_queue_consumer got exit signal, exit now"); return; }, - recv(self.unverified_rx) -> msg => match msg { + recv(self.unverified_block_rx) -> msg => match msg { Ok(unverified_task) => { // process this unverified block trace!("got an unverified block, wait cost: {:?}", begin_loop.elapsed()); @@ -324,7 +324,7 @@ impl ChainService { trace!("consume_unverified_blocks cost: {:?}", begin_loop.elapsed()); }, Err(err) => { - error!("unverified_rx err: {}", err); + error!("unverified_block_rx err: {}", err); return; }, }, @@ -459,9 +459,9 @@ impl ChainService { Some((parent_header, total_difficulty)) => { let unverified_block: UnverifiedBlock = descendant_block.combine_parent_header(parent_header); - match self.unverified_tx.send(unverified_block) { + match self.unverified_block_tx.send(unverified_block) { Ok(_) => {} - Err(err) => error!("send unverified_tx failed: {}", err), + Err(err) => error!("send unverified_block_tx failed: {}", err), }; if total_difficulty From 33e32bf0925209f17fa73d642e1c9b5258a775bb Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 4 Sep 2023 21:16:32 +0800 Subject: [PATCH 036/357] Add a dummy function to synchronize get insert_new_block's result Signed-off-by: Eval EXEC --- Cargo.lock | 1 + chain/Cargo.toml | 1 + chain/src/chain.rs | 66 +++++++++++++++----------- chain/src/orphan_block_pool.rs | 12 ++--- sync/src/relayer/mod.rs | 3 +- sync/src/synchronizer/block_process.rs | 9 ++-- sync/src/synchronizer/mod.rs | 4 +- sync/src/types/mod.rs | 21 +++++++- 8 files changed, 75 insertions(+), 42 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 063d9d5560..30d6066715 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1440,6 +1440,7 @@ dependencies = [ "arc-swap", "bitflags 1.3.2", "ckb-app-config", + "bitflags 1.3.2", "ckb-async-runtime", "ckb-chain-spec", "ckb-channel", diff --git a/chain/Cargo.toml b/chain/Cargo.toml index 2be6d5cde0..a6e6f4f7f5 100644 --- a/chain/Cargo.toml +++ b/chain/Cargo.toml @@ -30,6 +30,7 @@ ckb-merkle-mountain-range = "0.5.2" ckb-constant = { path = "../util/constant", version = "= 0.113.0-pre" } ckb-util = { path = "../util", version = "= 0.113.0-pre" } crossbeam = "0.8.2" +ckb-network = { path = "../network", version = "= 0.113.0-pre" } [dev-dependencies] ckb-test-chain-utils = { path = "../util/test-chain-utils", version = "= 0.114.0-pre" } diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 1fc74aef60..c795bb3640 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -50,7 +50,7 @@ use std::{cmp, thread}; const ORPHAN_BLOCK_SIZE: usize = (BLOCK_DOWNLOAD_WINDOW * 2) as usize; -type ProcessBlockRequest = Request<(LonelyBlock), Vec>; +type ProcessBlockRequest = Request>; type TruncateRequest = Request>; /// Controller to the chain service. @@ -100,11 +100,11 @@ impl ChainController { &self, lonely_block: LonelyBlock, ) -> Result, Error> { - Request::call(&self.process_block_sender, lonely_block).unwrap_or_else(|| { - Err(InternalErrorKind::System + Request::call(&self.process_block_sender, lonely_block).ok_or( + InternalErrorKind::System .other("Chain service has gone") - .into()) - }) + .into(), + ) } /// Truncate chain to specified target @@ -120,7 +120,9 @@ impl ChainController { // Relay need this pub fn get_orphan_block(&self, hash: &Byte32) -> Option> { - self.orphan_block_broker.get_block(hash) + self.orphan_block_broker + .get_block(hash) + .map(|lonely_block| lonely_block.block) } pub fn orphan_blocks_len(&self) -> usize { @@ -159,8 +161,8 @@ pub struct ChainService { orphan_blocks_broker: Arc, - lonely_block_tx: Sender<(LonelyBlock)>, - lonely_block_rx: Receiver<(LonelyBlock)>, + lonely_block_tx: Sender, + lonely_block_rx: Receiver, unverified_block_tx: Sender, unverified_block_rx: Receiver, @@ -169,6 +171,7 @@ pub struct ChainService { verify_failed_blocks_rx: Receiver, } +#[derive(Clone)] pub struct LonelyBlock { pub block: Arc, pub peer_id: Option, @@ -258,9 +261,9 @@ impl ChainService { .spawn(move || loop { select! { recv(process_block_receiver) -> msg => match msg { - Ok(Request { responder, arguments: (block, peer_id, verify) }) => { + Ok(Request { responder, arguments: lonely_block }) => { let _ = tx_control.suspend_chunk_process(); - let _ = responder.send(self.process_block_v2(block, peer_id, verify)); + let _ = responder.send(self.process_block_v2(lonely_block)); let _ = tx_control.continue_chunk_process(); if let Some(metrics) = ckb_metrics::handle() { @@ -421,7 +424,7 @@ impl ChainService { } } } - fn search_orphan_pool(&self, switch: Switch) { + fn search_orphan_pool(&self) { for leader_hash in self.orphan_blocks_broker.clone_leaders() { if !self .shared @@ -444,15 +447,14 @@ impl ChainService { let mut accept_error_occurred = false; for descendant_block in &descendants { - let &LonelyBlock { - block: descendant, - peer_id, - switch, - } = descendant_block; - match self.accept_block(descendant.to_owned()) { + match self.accept_block(descendant_block.block.to_owned()) { Err(err) => { accept_error_occurred = true; - error!("accept block {} failed: {}", descendant.hash(), err); + error!( + "accept block {} failed: {}", + descendant_block.block.hash(), + err + ); continue; } Ok(accepted_opt) => match accepted_opt { @@ -468,20 +470,20 @@ impl ChainService { .gt(self.shared.get_unverified_tip().total_difficulty()) { self.shared.set_unverified_tip(ckb_shared::HeaderIndex::new( - descendant.header().number(), - descendant.header().hash(), + descendant_block.block.header().number(), + descendant_block.block.header().hash(), total_difficulty, )); debug!("set unverified_tip to {}-{}, while unverified_tip - verified_tip = {}", - descendant.number(), - descendant.hash(), - descendant + descendant_block.block.number(), + descendant_block.block.hash(), + descendant_block.block .number() .saturating_sub(self.shared.snapshot().tip_number())) } else { debug!("received a block {}-{} with lower or equal difficulty than unverified_tip {}-{}", - descendant.number(), - descendant.hash(), + descendant_block.block.number(), + descendant_block.block.hash(), self.shared.get_unverified_tip().number(), self.shared.get_unverified_tip().hash(), ); @@ -490,7 +492,7 @@ impl ChainService { None => { info!( "doesn't accept block {}, because it has been stored", - descendant.hash() + descendant_block.block.hash() ); } }, @@ -501,8 +503,16 @@ impl ChainService { debug!( "accept {} blocks [{}->{}] success", descendants.len(), - descendants.first().expect("descendants not empty").number(), - descendants.last().expect("descendants not empty").number(), + descendants + .first() + .expect("descendants not empty") + .block + .number(), + descendants + .last() + .expect("descendants not empty") + .block + .number(), ) } } diff --git a/chain/src/orphan_block_pool.rs b/chain/src/orphan_block_pool.rs index 0c73806f3c..585d07d93f 100644 --- a/chain/src/orphan_block_pool.rs +++ b/chain/src/orphan_block_pool.rs @@ -53,7 +53,7 @@ impl InnerPool { self.parents.insert(hash, parent_hash); } - pub fn remove_blocks_by_parent(&mut self, parent_hash: &ParentHash) -> Vec<(LonelyBlock)> { + pub fn remove_blocks_by_parent(&mut self, parent_hash: &ParentHash) -> Vec { // try remove leaders first if !self.leaders.remove(parent_hash) { return Vec::new(); @@ -62,7 +62,7 @@ impl InnerPool { let mut queue: VecDeque = VecDeque::new(); queue.push_back(parent_hash.to_owned()); - let mut removed: Vec<(LonelyBlock)> = Vec::new(); + let mut removed: Vec = Vec::new(); while let Some(parent_hash) = queue.pop_front() { if let Some(orphaned) = self.blocks.remove(&parent_hash) { let (hashes, blocks): (Vec<_>, Vec<_>) = orphaned.into_iter().unzip(); @@ -118,9 +118,9 @@ impl InnerPool { self.blocks .get(parent_hash) .and_then(|map| { - map.iter() - .next() - .map(|(_, block)| block.header().epoch().number() + EXPIRED_EPOCH < tip_epoch) + map.iter().next().map(|(_, lonely_block)| { + lonely_block.block.header().epoch().number() + EXPIRED_EPOCH < tip_epoch + }) }) .unwrap_or_default() } @@ -146,7 +146,7 @@ impl OrphanBlockPool { self.inner.write().insert(lonely_block); } - pub fn remove_blocks_by_parent(&self, parent_hash: &ParentHash) -> Vec<(LonelyBlock)> { + pub fn remove_blocks_by_parent(&self, parent_hash: &ParentHash) -> Vec { self.inner.write().remove_blocks_by_parent(parent_hash) } diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index f7d96c006e..40d4bded29 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -298,7 +298,8 @@ impl Relayer { let boxed: Arc = Arc::new(block); match self .shared() - .insert_new_block(&self.chain, Arc::clone(&boxed)) + .insert_new_block_and_wait_result(&self.chain, Arc::clone(&boxed)) + .unwrap_or(false) { Ok(true) => self.broadcast_compact_block(nc, peer, &boxed), Ok(false) => debug_target!( diff --git a/sync/src/synchronizer/block_process.rs b/sync/src/synchronizer/block_process.rs index 8fd9d75da4..0ad30fb0bf 100644 --- a/sync/src/synchronizer/block_process.rs +++ b/sync/src/synchronizer/block_process.rs @@ -6,7 +6,7 @@ use ckb_types::{packed, prelude::*}; pub struct BlockProcess<'a> { message: packed::SendBlockReader<'a>, synchronizer: &'a Synchronizer, - _peer: PeerIndex, + peer: PeerIndex, } impl<'a> BlockProcess<'a> { @@ -18,7 +18,7 @@ impl<'a> BlockProcess<'a> { BlockProcess { message, synchronizer, - _peer: peer, + peer: peer, } } @@ -32,8 +32,9 @@ impl<'a> BlockProcess<'a> { let shared = self.synchronizer.shared(); if shared.new_block_received(&block) { - let (this_block_verify_result, malformed_peers) = - self.synchronizer.process_new_block(block.clone()); + let (this_block_verify_result, malformed_peers) = self + .synchronizer + .process_new_block(block.clone(), self.peer); if let Err(err) = this_block_verify_result { if !is_internal_db_error(&err) { diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 9537775cf9..7dcceb0661 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -347,6 +347,7 @@ impl Synchronizer { pub fn process_new_block( &self, block: core::BlockView, + peer_id: PeerId, ) -> Result>, CKBError> { let block_hash = block.hash(); let status = self.shared.active_chain().get_block_status(&block_hash); @@ -356,7 +357,8 @@ impl Synchronizer { error!("Block {} already partial stored", block_hash); Ok(Some(Vec::new())) } else if status.contains(BlockStatus::HEADER_VALID) { - self.shared.insert_new_block(&self.chain, Arc::new(block)) + self.shared + .insert_new_block(&self.chain, Arc::new(block), peer_id) } else { debug!( "Synchronizer process_new_block unexpected status {:?} {}", diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 5e02456feb..89f5c03e94 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -2,7 +2,7 @@ use crate::orphan_block_pool::OrphanBlockPool; use crate::utils::is_internal_db_error; use crate::{Status, StatusCode, FAST_INDEX, LOW_INDEX, NORMAL_INDEX, TIME_TRACE_SIZE}; use ckb_app_config::SyncConfig; -use ckb_chain::chain::ChainController; +use ckb_chain::chain::{ChainController, LonelyBlock}; use ckb_chain_spec::consensus::{Consensus, MAX_BLOCK_INTERVAL, MIN_BLOCK_INTERVAL}; use ckb_channel::Receiver; use ckb_constant::sync::{ @@ -1077,11 +1077,20 @@ impl SyncShared { self.shared.consensus() } + pub fn insert_new_block_and_wait_result( + &self, + chain: &ChainController, + block: Arc, + ) -> Result { + todo!("") + } + /// Insert new block to chain store pub fn insert_new_block( &self, chain: &ChainController, block: Arc, + peer_id: PeerId, ) -> Result, CKBError> { // Insert the given block into orphan_block_pool if its parent is not found // if !self.is_stored(&block.parent_hash()) { @@ -1095,7 +1104,7 @@ impl SyncShared { // } // Attempt to accept the given block if its parent already exist in database - let ret = self.accept_block(chain, Arc::clone(&block)); + let ret = self.accept_block(chain, Arc::clone(&block), peer_id); if ret.is_err() { debug!("accept block {:?} {:?}", block, ret); return ret; @@ -1181,6 +1190,14 @@ impl SyncShared { } }; + // TODO move switch logic to ckb-chain + let lonely_block = LonelyBlock { + block, + peer_id: None, + switch: Switch::NONE, + }; + let ret = chain.process_block(lonely_block); + if let Err(ref error) = ret { if !is_internal_db_error(error) { error!("accept block {:?} {}", block, error); From a8553f85a2069f231c0f77ba07942f53ec7404e5 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 4 Sep 2023 21:17:36 +0800 Subject: [PATCH 037/357] Use `PeerIndex` instead of `PeerId` in `ckb-chain` --- chain/src/chain.rs | 6 +++--- chain/src/orphan_block_pool.rs | 1 - 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index c795bb3640..cc86c36ba8 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -12,7 +12,7 @@ use ckb_logger::{ self, debug, error, info, log_enabled, log_enabled_target, trace, trace_target, warn, }; use ckb_merkle_mountain_range::leaf_index_to_mmr_size; -use ckb_network::PeerId; +use ckb_network::PeerIndex; use ckb_proposal_table::ProposalTable; #[cfg(debug_assertions)] use ckb_rust_unstable_port::IsSorted; @@ -174,7 +174,7 @@ pub struct ChainService { #[derive(Clone)] pub struct LonelyBlock { pub block: Arc, - pub peer_id: Option, + pub peer_id: Option, pub switch: Switch, } @@ -193,7 +193,7 @@ impl LonelyBlock { struct UnverifiedBlock { block: Arc, parent_header: HeaderView, - peer_id: Option, + peer_id: Option, switch: Switch, } diff --git a/chain/src/orphan_block_pool.rs b/chain/src/orphan_block_pool.rs index 585d07d93f..013f677daa 100644 --- a/chain/src/orphan_block_pool.rs +++ b/chain/src/orphan_block_pool.rs @@ -1,6 +1,5 @@ use crate::chain::LonelyBlock; use ckb_logger::debug; -use ckb_network::PeerId; use ckb_types::core::EpochNumber; use ckb_types::{core, packed}; use ckb_util::{parking_lot::RwLock, shrink_to_fit}; From a87ebc57a74e42f1898023391bb6bb7c156d6bc2 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 5 Sep 2023 14:54:52 +0800 Subject: [PATCH 038/357] Fix VerifyFAiledBlockInfo's peer_id type --- shared/src/types/mod.rs | 6 ++-- sync/src/synchronizer/block_process.rs | 40 +++++++++++++++++--------- sync/src/synchronizer/mod.rs | 24 ++++++++++++---- sync/src/types/mod.rs | 37 ++++++++++++------------ 4 files changed, 68 insertions(+), 39 deletions(-) diff --git a/shared/src/types/mod.rs b/shared/src/types/mod.rs index a1f38faa85..898154d3e7 100644 --- a/shared/src/types/mod.rs +++ b/shared/src/types/mod.rs @@ -1,4 +1,4 @@ -use ckb_network::PeerId; +use ckb_network::PeerIndex; use ckb_types::core::{BlockNumber, EpochNumberWithFraction}; use ckb_types::packed::Byte32; use ckb_types::prelude::{Entity, FromSliceShouldBeOk, Reader}; @@ -309,5 +309,7 @@ pub const SHRINK_THRESHOLD: usize = 300; #[derive(Clone, Debug, PartialEq, Eq)] pub struct VerifyFailedBlockInfo { pub block_hash: Byte32, - pub peer_id: PeerId, + pub peer_id: PeerIndex, + pub message_bytes: u64, + pub reason: String, } diff --git a/sync/src/synchronizer/block_process.rs b/sync/src/synchronizer/block_process.rs index 0ad30fb0bf..257a983d1b 100644 --- a/sync/src/synchronizer/block_process.rs +++ b/sync/src/synchronizer/block_process.rs @@ -1,12 +1,14 @@ use crate::{synchronizer::Synchronizer, utils::is_internal_db_error, Status, StatusCode}; -use ckb_logger::debug; +use ckb_logger::{debug, error}; use ckb_network::PeerIndex; +use ckb_shared::types::VerifyFailedBlockInfo; use ckb_types::{packed, prelude::*}; pub struct BlockProcess<'a> { message: packed::SendBlockReader<'a>, synchronizer: &'a Synchronizer, peer: PeerIndex, + message_bytes: usize, } impl<'a> BlockProcess<'a> { @@ -14,15 +16,17 @@ impl<'a> BlockProcess<'a> { message: packed::SendBlockReader<'a>, synchronizer: &'a Synchronizer, peer: PeerIndex, + message_bytes: usize, ) -> Self { BlockProcess { message, synchronizer, - peer: peer, + peer, + message_bytes, } } - pub fn execute(self) -> Status { + pub fn execute(self) -> Vec { let block = self.message.block().to_entity().into_view(); debug!( "BlockProcess received block {} {}", @@ -32,21 +36,29 @@ impl<'a> BlockProcess<'a> { let shared = self.synchronizer.shared(); if shared.new_block_received(&block) { - let (this_block_verify_result, malformed_peers) = self + match self .synchronizer - .process_new_block(block.clone(), self.peer); - - if let Err(err) = this_block_verify_result { - if !is_internal_db_error(&err) { - return StatusCode::BlockIsInvalid.with_context(format!( - "{}, error: {}", - block.hash(), - err, - )); + .process_new_block(block.clone(), self.peer, self.message_bytes) + { + Ok(verify_failed_peers) => { + return verify_failed_peers; + } + Err(err) => { + error!("BlockProcess process_new_block error: {:?}", err); } } + + // if let Err(err) = this_block_verify_result { + // if !is_internal_db_error(&err) { + // return StatusCode::BlockIsInvalid.with_context(format!( + // "{}, error: {}", + // block.hash(), + // err, + // )); + // } + // } } - Status::ok() + Vec::new() } } diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 7dcceb0661..bfe6f847f0 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -270,7 +270,21 @@ impl Synchronizer { } packed::SyncMessageUnionReader::SendBlock(reader) => { if reader.check_data() { - BlockProcess::new(reader, self, peer).execute() + let verify_failed_peers = + BlockProcess::new(reader, self, peer, message.as_slice().len()).execute(); + + verify_failed_peers.iter().for_each(|malformed_peer_info| { + Self::post_sync_process( + nc, + malformed_peer_info.peer, + "SendBlock", + 0, + StatusCode::BlockIsInvalid.with_context(format!( + "block {} is invalid, reason: {}", + malformed_peer_info.block_hash, malformed_peer_info.reason + )), + ); + }) } else { StatusCode::ProtocolMessageIsMalformed.with_context("SendBlock is invalid") } @@ -347,15 +361,15 @@ impl Synchronizer { pub fn process_new_block( &self, block: core::BlockView, - peer_id: PeerId, - ) -> Result>, CKBError> { + peer_id: PeerIndex, + ) -> Result, CKBError> { let block_hash = block.hash(); let status = self.shared.active_chain().get_block_status(&block_hash); // NOTE: Filtering `BLOCK_STORED` but not `BLOCK_RECEIVED`, is for avoiding // stopping synchronization even when orphan_pool maintains dirty items by bugs. if status.contains(BlockStatus::BLOCK_PARTIAL_STORED) { error!("Block {} already partial stored", block_hash); - Ok(Some(Vec::new())) + Ok(Vec::new()) } else if status.contains(BlockStatus::HEADER_VALID) { self.shared .insert_new_block(&self.chain, Arc::new(block), peer_id) @@ -365,7 +379,7 @@ impl Synchronizer { status, block_hash, ); // TODO which error should we return? - (Ok(Some(Vec::new()))) + (Ok(Vec::new())) } } diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 89f5c03e94..11dacc299e 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1090,7 +1090,7 @@ impl SyncShared { &self, chain: &ChainController, block: Arc, - peer_id: PeerId, + peer_id: PeerIndex, ) -> Result, CKBError> { // Insert the given block into orphan_block_pool if its parent is not found // if !self.is_stored(&block.parent_hash()) { @@ -1172,28 +1172,29 @@ impl SyncShared { &self, chain: &ChainController, block: Arc, + peer_id: PeerIndex, ) -> Result, CKBError> { - let ret = { - let mut assume_valid_target = self.state.assume_valid_target(); - if let Some(ref target) = *assume_valid_target { - // if the target has been reached, delete it - let switch = if target == &Unpack::::unpack(&core::BlockView::hash(&block)) { - assume_valid_target.take(); - Switch::NONE - } else { - Switch::DISABLE_SCRIPT - }; - - chain.internal_process_block(Arc::clone(&block), switch) - } else { - chain.process_block(Arc::clone(&block)) - } - }; + // let ret = { + // let mut assume_valid_target = self.state.assume_valid_target(); + // if let Some(ref target) = *assume_valid_target { + // // if the target has been reached, delete it + // let switch = if target == &Unpack::::unpack(&core::BlockView::hash(&block)) { + // assume_valid_target.take(); + // Switch::NONE + // } else { + // Switch::DISABLE_SCRIPT + // }; + // + // chain.internal_process_block(Arc::clone(&block), switch) + // } else { + // chain.process_block(Arc::clone(&block)) + // } + // }; // TODO move switch logic to ckb-chain let lonely_block = LonelyBlock { block, - peer_id: None, + peer_id, switch: Switch::NONE, }; let ret = chain.process_block(lonely_block); From c1b76e15603bc631edf9afb79dfc487b8ef6430c Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 12 Sep 2023 16:08:35 +0800 Subject: [PATCH 039/357] Get malformed peer_id from Synchronizer::poll --- chain/src/chain.rs | 44 ++++++++++++++++-------------------- sync/src/synchronizer/mod.rs | 24 ++++++++++++++++++++ 2 files changed, 43 insertions(+), 25 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index cc86c36ba8..152760fc34 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -12,7 +12,7 @@ use ckb_logger::{ self, debug, error, info, log_enabled, log_enabled_target, trace, trace_target, warn, }; use ckb_merkle_mountain_range::leaf_index_to_mmr_size; -use ckb_network::PeerIndex; +use ckb_network::{PeerIndex, tokio}; use ckb_proposal_table::ProposalTable; #[cfg(debug_assertions)] use ckb_rust_unstable_port::IsSorted; @@ -47,6 +47,7 @@ use std::sync::Arc; use std::time::Duration; use std::time::Instant; use std::{cmp, thread}; +use std::iter::Cloned; const ORPHAN_BLOCK_SIZE: usize = (BLOCK_DOWNLOAD_WINDOW * 2) as usize; @@ -167,8 +168,7 @@ pub struct ChainService { unverified_block_tx: Sender, unverified_block_rx: Receiver, - verify_failed_blocks_tx: Sender, - verify_failed_blocks_rx: Receiver, + verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, } #[derive(Clone)] @@ -206,7 +206,6 @@ impl ChainService { let (new_block_tx, new_block_rx) = channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize); - let (verify_failed_blocks_tx, verify_failed_blocks_rx) = channel::unbounded(); ChainService { shared, @@ -217,7 +216,6 @@ impl ChainService { lonely_block_tx: new_block_tx, lonely_block_rx: new_block_rx, verify_failed_blocks_tx, - verify_failed_blocks_rx, } } @@ -361,16 +359,13 @@ impl ChainService { err ); if let Some(peer_id) = unverified_block.peer_id { - if let Err(SendError(peer_id)) = - self.verify_failed_blocks_tx.send(VerifyFailedBlockInfo { - block_hash: unverified_block.block.hash(), - peer_id, - }) - { - error!( - "send verify_failed_blocks_tx failed for peer: {:?}", - peer_id - ); + if Err(_) = self.verify_failed_blocks_tx.send(VerifyFailedBlockInfo{ + block_hash: unverified_block.block.hash(), + peer_id, + message_bytes: 0, + reason: "".to_string(), + }){ + error!("ChainService want to send VerifyFailedBlockInfo to Synchronizer, but Synchronizer has dropped the receiver"); } } @@ -615,7 +610,7 @@ impl ChainService { // make block IO and verify asynchronize #[doc(hidden)] - pub fn process_block_v2(&self, lonely_block: LonelyBlock) -> Vec { + pub fn process_block_v2(&self, lonely_block: LonelyBlock) { let block_number = lonely_block.block.number(); let block_hash = lonely_block.block.hash(); if block_number < 1 { @@ -629,13 +624,14 @@ impl ChainService { let result = self.non_contextual_verify(&lonely_block.block); match result { Err(err) => { - if let Some(peer_id) = lonely_block.peer_id { - failed_blocks_peer_ids.push(VerifyFailedBlockInfo { - block_hash, - peer_id, - }); + if Err(_) = self.verify_failed_blocks_tx.send(VerifyFailedBlockInfo{ + block_hash: lonely_block.block.hash(), + peer_id, + message_bytes: 0, + reason: err.to_string(), + }){ + error!("ChainService want to send VerifyFailedBlockInfo to Synchronizer, but Synchronizer has dropped the receiver"); } - return failed_blocks_peer_ids; } _ => {} } @@ -648,16 +644,14 @@ impl ChainService { } } debug!( - "processing block: {}-{}, orphan_len: {}, (tip:unverified_tip):({}:{}), and return failed_blocks_peer_ids: {:?}", + "processing block: {}-{}, orphan_len: {}, (tip:unverified_tip):({}:{})", block_number, block_hash, self.orphan_blocks_broker.len(), self.shared.snapshot().tip_number(), self.shared.get_unverified_tip().number(), - failed_blocks_peer_ids, ); - failed_blocks_peer_ids } fn accept_block(&self, block: Arc) -> Result, Error> { diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index bfe6f847f0..01c33c5e6f 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -233,6 +233,8 @@ pub struct Synchronizer { /// Sync shared state pub shared: Arc, fetch_channel: Option>, + + verify_failed_blocks_rx: tokio::sync::mpsc::UnboundedReceiver, } impl Synchronizer { @@ -886,4 +888,26 @@ impl CKBProtocolHandler for Synchronizer { debug!("No peers connected"); } } + + async fn poll(&mut self, nc: Arc) -> Option<()> { + let mut have_malformed_peers = false; + while let Some(malformed_peer_info) = self.verify_failed_blocks_rx.recv().await { + have_malformed_peers = true; + let x = Self::post_sync_process( + &nc, + malformed_peer_info.peer, + "SendBlock", + malformed_peer_info.message_bytes, + StatusCode::BlockIsInvalid.with_context(format!( + "block {} is invalid, reason: {}", + malformed_peer_info.block_hash, malformed_peer_info.reason + )), + ); + + } + if have_malformed_peers { + return Some(()) + } + None + } } From b4b029bc9b190bfeb63864471071b74f11ed9063 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 12 Sep 2023 16:32:32 +0800 Subject: [PATCH 040/357] Let ChainController's methods signature same as ChainService's --- chain/src/chain.rs | 6 +++--- sync/src/types/mod.rs | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 152760fc34..536d3fe941 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -51,7 +51,7 @@ use std::iter::Cloned; const ORPHAN_BLOCK_SIZE: usize = (BLOCK_DOWNLOAD_WINDOW * 2) as usize; -type ProcessBlockRequest = Request>; +type ProcessBlockRequest = Request; type TruncateRequest = Request>; /// Controller to the chain service. @@ -90,7 +90,7 @@ impl ChainController { pub fn process_block( &self, lonely_block: LonelyBlock, - ) -> Result, Error> { + ) -> Result<(), Error> { self.internal_process_block(lonely_block) } @@ -100,7 +100,7 @@ impl ChainController { pub fn internal_process_block( &self, lonely_block: LonelyBlock, - ) -> Result, Error> { + ) -> Result<(), Error> { Request::call(&self.process_block_sender, lonely_block).ok_or( InternalErrorKind::System .other("Chain service has gone") diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 11dacc299e..8c0b776143 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1194,7 +1194,7 @@ impl SyncShared { // TODO move switch logic to ckb-chain let lonely_block = LonelyBlock { block, - peer_id, + Some(peer_id), switch: Switch::NONE, }; let ret = chain.process_block(lonely_block); From f74a89383d37d22ca99859b43583dea42d7890d1 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 12 Sep 2023 23:47:47 +0800 Subject: [PATCH 041/357] Move fields of UnverifiedBlock to LonelyBlock --- chain/src/chain.rs | 94 +++++++++++++++++------------------- sync/src/synchronizer/mod.rs | 3 +- sync/src/types/mod.rs | 37 +++++++------- 3 files changed, 64 insertions(+), 70 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 536d3fe941..193fca1db7 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -12,7 +12,7 @@ use ckb_logger::{ self, debug, error, info, log_enabled, log_enabled_target, trace, trace_target, warn, }; use ckb_merkle_mountain_range::leaf_index_to_mmr_size; -use ckb_network::{PeerIndex, tokio}; +use ckb_network::{tokio, PeerIndex}; use ckb_proposal_table::ProposalTable; #[cfg(debug_assertions)] use ckb_rust_unstable_port::IsSorted; @@ -42,12 +42,13 @@ use ckb_verification_contextual::{ContextualBlockVerifier, VerifyContext}; use ckb_verification_traits::{Switch, Verifier}; use crossbeam::channel::SendTimeoutError; use std::collections::{HashSet, VecDeque}; +use std::iter::Cloned; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use std::time::Duration; use std::time::Instant; use std::{cmp, thread}; -use std::iter::Cloned; +use ckb_types::packed::UncleBlockVecReaderIterator; const ORPHAN_BLOCK_SIZE: usize = (BLOCK_DOWNLOAD_WINDOW * 2) as usize; @@ -87,25 +88,17 @@ impl ChainController { /// If the block already exists, does nothing and false is returned. /// /// [BlockVerifier] [NonContextualBlockTxsVerifier] [ContextualBlockVerifier] will performed - pub fn process_block( - &self, - lonely_block: LonelyBlock, - ) -> Result<(), Error> { + pub fn process_block(&self, lonely_block: LonelyBlock) { self.internal_process_block(lonely_block) } /// Internal method insert block for test /// /// switch bit flags for particular verify, make easier to generating test data - pub fn internal_process_block( - &self, - lonely_block: LonelyBlock, - ) -> Result<(), Error> { - Request::call(&self.process_block_sender, lonely_block).ok_or( - InternalErrorKind::System - .other("Chain service has gone") - .into(), - ) + pub fn internal_process_block(&self, lonely_block: LonelyBlock) { + if Request::call(&self.process_block_sender, lonely_block).is_none() { + error!("Chain service has gone") + } } /// Truncate chain to specified target @@ -176,37 +169,38 @@ pub struct LonelyBlock { pub block: Arc, pub peer_id: Option, pub switch: Switch, + + pub verify_result_tx: Option>, } impl LonelyBlock { - fn combine_parent_header(&self, parent_header: HeaderView) -> UnverifiedBlock { + fn combine_parent_header(self, parent_header: HeaderView) -> UnverifiedBlock { UnverifiedBlock { - block: self.block.clone(), parent_header, - peer_id: self.peer_id.clone(), - switch: self.switch, + lonely_block:self, } } } #[derive(Clone)] struct UnverifiedBlock { - block: Arc, + lonely_block: LonelyBlock, parent_header: HeaderView, - peer_id: Option, - switch: Switch, } impl ChainService { /// Create a new ChainService instance with shared and initial proposal_table. - pub fn new(shared: Shared, proposal_table: ProposalTable) -> ChainService { + pub fn new( + shared: Shared, + proposal_table: ProposalTable, + verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, + ) -> ChainService { let (unverified_tx, unverified_rx) = channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize * 3); let (new_block_tx, new_block_rx) = channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize); - ChainService { shared, proposal_table: Arc::new(Mutex::new(proposal_table)), @@ -340,13 +334,13 @@ impl ChainService { Ok(_) => { let log_now = std::time::Instant::now(); self.shared - .remove_block_status(&unverified_block.block.hash()); + .remove_block_status(&unverified_block.block().hash()); let log_elapsed_remove_block_status = log_now.elapsed(); self.shared - .remove_header_view(&unverified_block.block.hash()); + .remove_header_view(&unverified_block.unverified_block.block.hash()); debug!( "block {} remove_block_status cost: {:?}, and header_view cost: {:?}", - unverified_block.block.hash(), + unverified_block.unverified_block.block.hash(), log_elapsed_remove_block_status, log_now.elapsed() ); @@ -354,17 +348,17 @@ impl ChainService { Err(err) => { error!( "verify [{:?}]'s block {} failed: {}", - unverified_block.peer_id, - unverified_block.block.hash(), + unverified_block.unverified_block.peer_id, + unverified_block.unverified_block.block.hash(), err ); - if let Some(peer_id) = unverified_block.peer_id { - if Err(_) = self.verify_failed_blocks_tx.send(VerifyFailedBlockInfo{ - block_hash: unverified_block.block.hash(), + if let Some(peer_id) = unverified_block.unverified_block.peer_id { + if Err(_) = self.verify_failed_blocks_tx.send(VerifyFailedBlockInfo { + block_hash: unverified_block.unverified_block.block.hash(), peer_id, message_bytes: 0, reason: "".to_string(), - }){ + }) { error!("ChainService want to send VerifyFailedBlockInfo to Synchronizer, but Synchronizer has dropped the receiver"); } } @@ -387,12 +381,12 @@ impl ChainService { )); self.shared - .insert_block_status(unverified_block.block.hash(), BlockStatus::BLOCK_INVALID); + .insert_block_status(unverified_block.unverified_block.block.hash(), BlockStatus::BLOCK_INVALID); error!( "set_unverified tip to {}-{}, because verify {} failed: {}", tip.number(), tip.hash(), - unverified_block.block.hash(), + unverified_block.unverified_block.block.hash(), err ); } @@ -439,9 +433,11 @@ impl ChainService { ); continue; } + let descendants_len = descendants.len(); + let first_descendants_number = descendants.first().expect("descdant not empty").number(); let mut accept_error_occurred = false; - for descendant_block in &descendants { + for descendant_block in descendants { match self.accept_block(descendant_block.block.to_owned()) { Err(err) => { accept_error_occurred = true; @@ -617,20 +613,19 @@ impl ChainService { warn!("receive 0 number block: 0-{}", block_hash); } - let mut failed_blocks_peer_ids: Vec = - self.verify_failed_blocks_rx.iter().collect(); - if !lonely_block.switch.disable_non_contextual() { let result = self.non_contextual_verify(&lonely_block.block); match result { Err(err) => { - if Err(_) = self.verify_failed_blocks_tx.send(VerifyFailedBlockInfo{ - block_hash: lonely_block.block.hash(), - peer_id, - message_bytes: 0, - reason: err.to_string(), - }){ - error!("ChainService want to send VerifyFailedBlockInfo to Synchronizer, but Synchronizer has dropped the receiver"); + if let Some(peer_id) = lonely_block.peer_id { + if Err(_) = self.verify_failed_blocks_tx.send(VerifyFailedBlockInfo { + block_hash: lonely_block.block.hash(), + peer_id, + message_bytes: 0, + reason: err.to_string(), + }) { + error!("ChainService want to send VerifyFailedBlockInfo to Synchronizer, but Synchronizer has dropped the receiver"); + } } } _ => {} @@ -651,7 +646,6 @@ impl ChainService { self.shared.snapshot().tip_number(), self.shared.get_unverified_tip().number(), ); - } fn accept_block(&self, block: Arc) -> Result, Error> { @@ -741,10 +735,10 @@ impl ChainService { let log_now = std::time::Instant::now(); let UnverifiedBlock { - block, parent_header, - peer_id, - switch, + lonely_block: LonelyBlock{ + block, peer_id, switch, verify_result_tx + } } = unverified_block; let parent_ext = self diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 01c33c5e6f..529a0a5f5b 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -903,10 +903,9 @@ impl CKBProtocolHandler for Synchronizer { malformed_peer_info.block_hash, malformed_peer_info.reason )), ); - } if have_malformed_peers { - return Some(()) + return Some(()); } None } diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 8c0b776143..89bb5280ba 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1091,7 +1091,7 @@ impl SyncShared { chain: &ChainController, block: Arc, peer_id: PeerIndex, - ) -> Result, CKBError> { + ) { // Insert the given block into orphan_block_pool if its parent is not found // if !self.is_stored(&block.parent_hash()) { // debug!( @@ -1104,16 +1104,16 @@ impl SyncShared { // } // Attempt to accept the given block if its parent already exist in database - let ret = self.accept_block(chain, Arc::clone(&block), peer_id); - if ret.is_err() { - debug!("accept block {:?} {:?}", block, ret); - return ret; - } + self.accept_block(chain, Arc::clone(&block), peer_id); + // if ret.is_err() { + // debug!("accept block {:?} {:?}", block, ret); + // return ret; + // } // The above block has been accepted. Attempt to accept its descendant blocks in orphan pool. // The returned blocks of `remove_blocks_by_parent` are in topology order by parents // self.try_search_orphan_pool(chain); - ret + // ret } /// Try to find blocks from the orphan block pool that may no longer be orphan @@ -1173,7 +1173,7 @@ impl SyncShared { chain: &ChainController, block: Arc, peer_id: PeerIndex, - ) -> Result, CKBError> { + ) { // let ret = { // let mut assume_valid_target = self.state.assume_valid_target(); // if let Some(ref target) = *assume_valid_target { @@ -1194,19 +1194,20 @@ impl SyncShared { // TODO move switch logic to ckb-chain let lonely_block = LonelyBlock { block, - Some(peer_id), + peer_id: Some(peer_id), switch: Switch::NONE, }; - let ret = chain.process_block(lonely_block); - if let Err(ref error) = ret { - if !is_internal_db_error(error) { - error!("accept block {:?} {}", block, error); - self.shared() - .insert_block_status(block.header().hash(), BlockStatus::BLOCK_INVALID); - } - } - ret + chain.process_block(lonely_block); + + // if let Err(ref error) = ret { + // if !is_internal_db_error(error) { + // error!("accept block {:?} {}", block, error); + // self.shared() + // .insert_block_status(block.header().hash(), BlockStatus::BLOCK_INVALID); + // } + // } + // ret } /// Sync a new valid header, try insert to sync state From c3c8ec983d8d9c3c2316b513fba03cdfcf33db8e Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Sat, 16 Sep 2023 16:00:00 +0800 Subject: [PATCH 042/357] Extract `Relayer::build_and_broadcast_compact_block` function --- sync/src/relayer/mod.rs | 33 ++++++++------------------------- 1 file changed, 8 insertions(+), 25 deletions(-) diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index 40d4bded29..b4637be33b 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -34,6 +34,7 @@ use ckb_network::{ }; use ckb_shared::block_status::BlockStatus; use ckb_shared::types::BlockNumberAndHash; +use ckb_shared::Shared; use ckb_systemtime::unix_time_as_millis; use ckb_tx_pool::service::TxVerificationResult; use ckb_types::{ @@ -301,27 +302,13 @@ impl Relayer { .insert_new_block_and_wait_result(&self.chain, Arc::clone(&boxed)) .unwrap_or(false) { - Ok(true) => self.broadcast_compact_block(nc, peer, &boxed), - Ok(false) => debug_target!( - crate::LOG_TARGET_RELAY, - "Relayer accept_block received an uncle block, don't broadcast compact block" - ), - Err(err) => { - if !is_internal_db_error(&err) { - return StatusCode::BlockIsInvalid.with_context(format!( - "{}, error: {}", - boxed.hash(), - err, - )); - } - } + Self::build_and_broadcast_compact_block(nc, self.shared.shared(), peer, &boxed) } - Status::ok() } - fn broadcast_compact_block( - &self, + fn build_and_broadcast_compact_block( nc: &dyn CKBProtocolContext, + shared: &Shared, peer: PeerIndex, boxed: &Arc, ) { @@ -332,8 +319,8 @@ impl Relayer { unix_time_as_millis() ); let block_hash = boxed.hash(); - self.shared().state().remove_header_view(&block_hash); - let cb = packed::CompactBlock::build_from_block(boxed, &HashSet::new()); + shared.remove_header_view(&block_hash); + let cb = packed::CompactBlock::build_from_block(&boxed, &HashSet::new()); let message = packed::RelayMessage::new_builder().set(cb).build(); let selected_peers: Vec = nc @@ -351,13 +338,10 @@ impl Relayer { "relayer send block when accept block error: {:?}", err, ); - let block_hash = boxed.hash(); - self.shared().shared().remove_header_view(&block_hash); - let cb = packed::CompactBlock::build_from_block(&boxed, &HashSet::new()); - let message = packed::RelayMessage::new_builder().set(cb).build(); + } if let Some(p2p_control) = nc.p2p_control() { - let snapshot = self.shared.shared().snapshot(); + let snapshot = shared.snapshot(); let parent_chain_root = { let mmr = snapshot.chain_root_mmr(boxed.header().number() - 1); match mmr.get_root() { @@ -368,7 +352,6 @@ impl Relayer { "Generate last state to light client failed: {:?}", err ); - return; } } }; From 7d4dd9b682eba49557cd0796f118dfd88fa224af Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Sat, 16 Sep 2023 16:10:49 +0800 Subject: [PATCH 043/357] Remove verify_result_tx from LonelyBlock --- chain/src/chain.rs | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 193fca1db7..3caea9cb00 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -22,6 +22,7 @@ use ckb_shared::types::VerifyFailedBlockInfo; use ckb_stop_handler::{new_crossbeam_exit_rx, register_thread}; use ckb_store::{attach_block_cell, detach_block_cell, ChainStore, StoreTransaction}; use ckb_systemtime::unix_time_as_millis; +use ckb_types::packed::UncleBlockVecReaderIterator; use ckb_types::{ core::{ cell::{ @@ -48,7 +49,6 @@ use std::sync::Arc; use std::time::Duration; use std::time::Instant; use std::{cmp, thread}; -use ckb_types::packed::UncleBlockVecReaderIterator; const ORPHAN_BLOCK_SIZE: usize = (BLOCK_DOWNLOAD_WINDOW * 2) as usize; @@ -169,15 +169,13 @@ pub struct LonelyBlock { pub block: Arc, pub peer_id: Option, pub switch: Switch, - - pub verify_result_tx: Option>, } impl LonelyBlock { fn combine_parent_header(self, parent_header: HeaderView) -> UnverifiedBlock { UnverifiedBlock { parent_header, - lonely_block:self, + lonely_block: self, } } } @@ -380,8 +378,10 @@ impl ChainService { tip_ext.total_difficulty, )); - self.shared - .insert_block_status(unverified_block.unverified_block.block.hash(), BlockStatus::BLOCK_INVALID); + self.shared.insert_block_status( + unverified_block.unverified_block.block.hash(), + BlockStatus::BLOCK_INVALID, + ); error!( "set_unverified tip to {}-{}, because verify {} failed: {}", tip.number(), @@ -434,7 +434,8 @@ impl ChainService { continue; } let descendants_len = descendants.len(); - let first_descendants_number = descendants.first().expect("descdant not empty").number(); + let first_descendants_number = + descendants.first().expect("descdant not empty").number(); let mut accept_error_occurred = false; for descendant_block in descendants { @@ -736,9 +737,12 @@ impl ChainService { let UnverifiedBlock { parent_header, - lonely_block: LonelyBlock{ - block, peer_id, switch, verify_result_tx - } + lonely_block: + LonelyBlock { + block, + peer_id, + switch, + }, } = unverified_block; let parent_ext = self From 3bd213064281ebf1ff62462d480416e97d46c572 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Sat, 16 Sep 2023 17:07:31 +0800 Subject: [PATCH 044/357] Fix unverified_block.lonely_block --- chain/src/chain.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 3caea9cb00..42e8999e34 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -335,10 +335,10 @@ impl ChainService { .remove_block_status(&unverified_block.block().hash()); let log_elapsed_remove_block_status = log_now.elapsed(); self.shared - .remove_header_view(&unverified_block.unverified_block.block.hash()); + .remove_header_view(&unverified_block.lonely_block.block.hash()); debug!( "block {} remove_block_status cost: {:?}, and header_view cost: {:?}", - unverified_block.unverified_block.block.hash(), + unverified_block.lonely_block.block.hash(), log_elapsed_remove_block_status, log_now.elapsed() ); @@ -346,13 +346,13 @@ impl ChainService { Err(err) => { error!( "verify [{:?}]'s block {} failed: {}", - unverified_block.unverified_block.peer_id, - unverified_block.unverified_block.block.hash(), + unverified_block.lonely_block.peer_id, + unverified_block.lonely_block.block.hash(), err ); - if let Some(peer_id) = unverified_block.unverified_block.peer_id { + if let Some(peer_id) = unverified_block.lonely_block.peer_id { if Err(_) = self.verify_failed_blocks_tx.send(VerifyFailedBlockInfo { - block_hash: unverified_block.unverified_block.block.hash(), + block_hash: unverified_block.lonely_block.block.hash(), peer_id, message_bytes: 0, reason: "".to_string(), @@ -379,14 +379,14 @@ impl ChainService { )); self.shared.insert_block_status( - unverified_block.unverified_block.block.hash(), + unverified_block.lonely_block.block.hash(), BlockStatus::BLOCK_INVALID, ); error!( "set_unverified tip to {}-{}, because verify {} failed: {}", tip.number(), tip.hash(), - unverified_block.unverified_block.block.hash(), + unverified_block.lonely_block.block.hash(), err ); } From 7cd430f36990a3a49a3bfcd991a72f81fa635359 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Sun, 17 Sep 2023 09:17:23 +0800 Subject: [PATCH 045/357] Add callback for ChainService --- chain/src/chain.rs | 22 ++++++++++++++++++++++ sync/src/relayer/mod.rs | 35 +++++++++++++++++++---------------- sync/src/types/mod.rs | 16 +++++++++++++--- 3 files changed, 54 insertions(+), 19 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 42e8999e34..2d676f584d 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -169,6 +169,7 @@ pub struct LonelyBlock { pub block: Arc, pub peer_id: Option, pub switch: Switch, + pub verify_ok_callback: Option)>, } impl LonelyBlock { @@ -342,6 +343,27 @@ impl ChainService { log_elapsed_remove_block_status, log_now.elapsed() ); + + // start execute this block's callback function + match ( + unverified_block.lonely_block.verify_ok_callback, + unverified_block.lonely_block.peer_id, + ) { + (Some(verify_ok_callback), Some(peer_id)) => { + verify_ok_callback( + &self.shared, + peer_id, + unverified_block.lonely_block.block, + ); + } + (Some(verify_ok_callback), _) => { + error!( + "block {} verify_ok_callback have no peer_id, this should not happen", + unverified_block.lonely_block.block.hash() + ); + } + _ => {} + } } Err(err) => { error!( diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index b4637be33b..8eba21fa46 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -296,31 +296,34 @@ impl Relayer { return Status::ok(); } - let boxed: Arc = Arc::new(block); - match self - .shared() - .insert_new_block_and_wait_result(&self.chain, Arc::clone(&boxed)) - .unwrap_or(false) - { - Self::build_and_broadcast_compact_block(nc, self.shared.shared(), peer, &boxed) - } + let block = Arc::new(block); + let verify_success_callback = |shared: &Shared, peer: PeerIndex, block: Arc| { + Self::build_and_broadcast_compact_block(nc, shared, peer, block) + }; + + self.shared().insert_new_block_with_callback( + &self.chain, + Arc::clone(&block), + peer, + verify_success_callback, + ); } fn build_and_broadcast_compact_block( nc: &dyn CKBProtocolContext, shared: &Shared, peer: PeerIndex, - boxed: &Arc, + block: Arc, ) { debug_target!( crate::LOG_TARGET_RELAY, "[block_relay] relayer accept_block {} {}", - boxed.header().hash(), + block.header().hash(), unix_time_as_millis() ); - let block_hash = boxed.hash(); + let block_hash = block.hash(); shared.remove_header_view(&block_hash); - let cb = packed::CompactBlock::build_from_block(&boxed, &HashSet::new()); + let cb = packed::CompactBlock::build_from_block(&block, &HashSet::new()); let message = packed::RelayMessage::new_builder().set(cb).build(); let selected_peers: Vec = nc @@ -343,7 +346,7 @@ impl Relayer { if let Some(p2p_control) = nc.p2p_control() { let snapshot = shared.snapshot(); let parent_chain_root = { - let mmr = snapshot.chain_root_mmr(boxed.header().number() - 1); + let mmr = snapshot.chain_root_mmr(block.header().number() - 1); match mmr.get_root() { Ok(root) => root, Err(err) => { @@ -357,9 +360,9 @@ impl Relayer { }; let tip_header = packed::VerifiableHeader::new_builder() - .header(boxed.header().data()) - .uncles_hash(boxed.calc_uncles_hash()) - .extension(Pack::pack(&boxed.extension())) + .header(block.header().data()) + .uncles_hash(block.calc_uncles_hash()) + .extension(Pack::pack(&block.extension())) .parent_chain_root(parent_chain_root) .build(); let light_client_message = { diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 89bb5280ba..eee5c16981 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -27,6 +27,7 @@ use ckb_systemtime::unix_time_as_millis; use ckb_traits::{HeaderFields, HeaderFieldsProvider}; use ckb_tx_pool::service::TxVerificationResult; use ckb_types::{ + core, core::{self, BlockNumber, EpochExt}, packed::{self, Byte32}, prelude::*, @@ -1077,12 +1078,19 @@ impl SyncShared { self.shared.consensus() } - pub fn insert_new_block_and_wait_result( + pub fn insert_new_block_with_callback( &self, chain: &ChainController, block: Arc, - ) -> Result { - todo!("") + peer_id: PeerIndex, + verify_success_callback: fn(&Shared, PeerIndex, Arc), + ) { + self.accept_block( + chain, + Arc::clone(&block), + peer_id, + Some(verify_success_callback), + ) } /// Insert new block to chain store @@ -1173,6 +1181,7 @@ impl SyncShared { chain: &ChainController, block: Arc, peer_id: PeerIndex, + verify_ok_callback: Option)>, ) { // let ret = { // let mut assume_valid_target = self.state.assume_valid_target(); @@ -1196,6 +1205,7 @@ impl SyncShared { block, peer_id: Some(peer_id), switch: Switch::NONE, + verify_ok_callback, }; chain.process_block(lonely_block); From 54c722370f213db87a103fe387c2f021ff11824e Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Sun, 17 Sep 2023 09:18:07 +0800 Subject: [PATCH 046/357] Flatten `UnverifiedBlock`'s structure --- chain/src/chain.rs | 148 ++++++++++++++++++---------------- sync/src/relayer/mod.rs | 1 + sync/src/synchronizer/mod.rs | 2 +- sync/src/types/mod.rs | 1 - util/instrument/src/import.rs | 14 +++- 5 files changed, 89 insertions(+), 77 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 2d676f584d..10b944fb28 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -164,27 +164,37 @@ pub struct ChainService { verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, } +pub type VerifyCallbackArgs<'a> = (&'a Shared, PeerIndex, Arc); + #[derive(Clone)] pub struct LonelyBlock { pub block: Arc, pub peer_id: Option, - pub switch: Switch, - pub verify_ok_callback: Option)>, + pub switch: Option, + + pub verify_ok_callback: Option, + pub verify_failed_callback: Option, } impl LonelyBlock { - fn combine_parent_header(self, parent_header: HeaderView) -> UnverifiedBlock { + fn combine_parent_header(self, parent_header: HeaderView, switch: Switch) -> UnverifiedBlock { UnverifiedBlock { + block: self.block, + peer_id: self.peer_id, + switch, + verify_ok_callback: self.verify_ok_callback, parent_header, - lonely_block: self, } } } #[derive(Clone)] struct UnverifiedBlock { - lonely_block: LonelyBlock, - parent_header: HeaderView, + pub block: Arc, + pub peer_id: Option, + pub switch: Switch, + pub verify_ok_callback: Option, + pub parent_header: HeaderView, } impl ChainService { @@ -333,33 +343,29 @@ impl ChainService { Ok(_) => { let log_now = std::time::Instant::now(); self.shared - .remove_block_status(&unverified_block.block().hash()); + .remove_block_status(&unverified_block.block.hash()); let log_elapsed_remove_block_status = log_now.elapsed(); self.shared - .remove_header_view(&unverified_block.lonely_block.block.hash()); + .remove_header_view(&unverified_block.block.hash()); debug!( "block {} remove_block_status cost: {:?}, and header_view cost: {:?}", - unverified_block.lonely_block.block.hash(), + unverified_block.block.hash(), log_elapsed_remove_block_status, log_now.elapsed() ); // start execute this block's callback function match ( - unverified_block.lonely_block.verify_ok_callback, - unverified_block.lonely_block.peer_id, + unverified_block.verify_ok_callback, + unverified_block.peer_id, ) { (Some(verify_ok_callback), Some(peer_id)) => { - verify_ok_callback( - &self.shared, - peer_id, - unverified_block.lonely_block.block, - ); + verify_ok_callback((&self.shared, peer_id, unverified_block.block)); } (Some(verify_ok_callback), _) => { error!( - "block {} verify_ok_callback have no peer_id, this should not happen", - unverified_block.lonely_block.block.hash() + "block {} have verify_ok_callback, but have no peer_id, this should not happen", + unverified_block.block.hash() ); } _ => {} @@ -368,13 +374,13 @@ impl ChainService { Err(err) => { error!( "verify [{:?}]'s block {} failed: {}", - unverified_block.lonely_block.peer_id, - unverified_block.lonely_block.block.hash(), + unverified_block.peer_id, + unverified_block.block.hash(), err ); - if let Some(peer_id) = unverified_block.lonely_block.peer_id { - if Err(_) = self.verify_failed_blocks_tx.send(VerifyFailedBlockInfo { - block_hash: unverified_block.lonely_block.block.hash(), + if let Some(peer_id) = unverified_block.peer_id { + if let Err(_) = self.verify_failed_blocks_tx.send(VerifyFailedBlockInfo { + block_hash: unverified_block.block.hash(), peer_id, message_bytes: 0, reason: "".to_string(), @@ -400,15 +406,13 @@ impl ChainService { tip_ext.total_difficulty, )); - self.shared.insert_block_status( - unverified_block.lonely_block.block.hash(), - BlockStatus::BLOCK_INVALID, - ); + self.shared + .insert_block_status(unverified_block.block.hash(), BlockStatus::BLOCK_INVALID); error!( "set_unverified tip to {}-{}, because verify {} failed: {}", tip.number(), tip.hash(), - unverified_block.lonely_block.block.hash(), + unverified_block.block.hash(), err ); } @@ -456,8 +460,18 @@ impl ChainService { continue; } let descendants_len = descendants.len(); - let first_descendants_number = - descendants.first().expect("descdant not empty").number(); + let (first_descendants_number, last_descendants_number) = ( + descendants + .first() + .expect("descdant not empty") + .block + .number(), + descendants + .last() + .expect("descdant not empty") + .block + .number(), + ); let mut accept_error_occurred = false; for descendant_block in descendants { @@ -474,7 +488,10 @@ impl ChainService { Ok(accepted_opt) => match accepted_opt { Some((parent_header, total_difficulty)) => { let unverified_block: UnverifiedBlock = - descendant_block.combine_parent_header(parent_header); + descendant_block.combine_parent_header(parent_header, Switch::NONE); + let block_number = unverified_block.block.number(); + let block_hash = unverified_block.block.hash(); + match self.unverified_block_tx.send(unverified_block) { Ok(_) => {} Err(err) => error!("send unverified_block_tx failed: {}", err), @@ -484,20 +501,18 @@ impl ChainService { .gt(self.shared.get_unverified_tip().total_difficulty()) { self.shared.set_unverified_tip(ckb_shared::HeaderIndex::new( - descendant_block.block.header().number(), - descendant_block.block.header().hash(), + block_number.clone(), + block_hash.clone(), total_difficulty, )); debug!("set unverified_tip to {}-{}, while unverified_tip - verified_tip = {}", - descendant_block.block.number(), - descendant_block.block.hash(), - descendant_block.block - .number() - .saturating_sub(self.shared.snapshot().tip_number())) + block_number.clone(), + block_hash.clone(), + block_number.saturating_sub(self.shared.snapshot().tip_number())) } else { debug!("received a block {}-{} with lower or equal difficulty than unverified_tip {}-{}", - descendant_block.block.number(), - descendant_block.block.hash(), + block_number, + block_hash, self.shared.get_unverified_tip().number(), self.shared.get_unverified_tip().hash(), ); @@ -516,17 +531,7 @@ impl ChainService { if !accept_error_occurred { debug!( "accept {} blocks [{}->{}] success", - descendants.len(), - descendants - .first() - .expect("descendants not empty") - .block - .number(), - descendants - .last() - .expect("descendants not empty") - .block - .number(), + descendants_len, first_descendants_number, last_descendants_number ) } } @@ -635,23 +640,26 @@ impl ChainService { if block_number < 1 { warn!("receive 0 number block: 0-{}", block_hash); } - - if !lonely_block.switch.disable_non_contextual() { - let result = self.non_contextual_verify(&lonely_block.block); - match result { - Err(err) => { - if let Some(peer_id) = lonely_block.peer_id { - if Err(_) = self.verify_failed_blocks_tx.send(VerifyFailedBlockInfo { - block_hash: lonely_block.block.hash(), - peer_id, - message_bytes: 0, - reason: err.to_string(), - }) { - error!("ChainService want to send VerifyFailedBlockInfo to Synchronizer, but Synchronizer has dropped the receiver"); + if let Some(switch) = lonely_block.switch { + if !switch.disable_non_contextual() { + let result = self.non_contextual_verify(&lonely_block.block); + match result { + Err(err) => { + if let Some(peer_id) = lonely_block.peer_id { + if let Err(_) = + self.verify_failed_blocks_tx.send(VerifyFailedBlockInfo { + block_hash: lonely_block.block.hash(), + peer_id, + message_bytes: 0, + reason: err.to_string(), + }) + { + error!("ChainService want to send VerifyFailedBlockInfo to Synchronizer, but Synchronizer has dropped the receiver"); + } } } + _ => {} } - _ => {} } } @@ -758,13 +766,11 @@ impl ChainService { let log_now = std::time::Instant::now(); let UnverifiedBlock { + block, + peer_id, + switch, + verify_ok_callback, parent_header, - lonely_block: - LonelyBlock { - block, - peer_id, - switch, - }, } = unverified_block; let parent_ext = self diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index 8eba21fa46..182edb75d4 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -355,6 +355,7 @@ impl Relayer { "Generate last state to light client failed: {:?}", err ); + return; } } }; diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 529a0a5f5b..7ed8dab9fc 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -234,7 +234,7 @@ pub struct Synchronizer { pub shared: Arc, fetch_channel: Option>, - verify_failed_blocks_rx: tokio::sync::mpsc::UnboundedReceiver, + verify_failed_blocks_rx: Arc>, } impl Synchronizer { diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index eee5c16981..057fd26a95 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -27,7 +27,6 @@ use ckb_systemtime::unix_time_as_millis; use ckb_traits::{HeaderFields, HeaderFieldsProvider}; use ckb_tx_pool::service::TxVerificationResult; use ckb_types::{ - core, core::{self, BlockNumber, EpochExt}, packed::{self, Byte32}, prelude::*, diff --git a/util/instrument/src/import.rs b/util/instrument/src/import.rs index 1c911e5e79..6b106265a8 100644 --- a/util/instrument/src/import.rs +++ b/util/instrument/src/import.rs @@ -1,4 +1,4 @@ -use ckb_chain::chain::ChainController; +use ckb_chain::chain::{ChainController, LonelyBlock}; use ckb_jsonrpc_types::BlockView as JsonBlock; use ckb_types::core; #[cfg(feature = "progress_bar")] @@ -63,9 +63,15 @@ impl Import { let block: JsonBlock = serde_json::from_str(&s)?; let block: Arc = Arc::new(block.into()); if !block.is_genesis() { - self.chain - .process_block(block) - .expect("import occur malformation data"); + self.chain.process_block(LonelyBlock { + block, + peer_id: None, + switch: None, + verify_ok_callback: None, + verify_failed_callback: Some(|_: ckb_chain::chain::VerifyCallbackArgs| { + panic!("import occur malformation data") + }), + }); } progress_bar.inc(s.as_bytes().len() as u64); } From fe8200cdb19385d90ae45f6c7f5c085517fb4c6a Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Sun, 17 Sep 2023 11:21:43 +0800 Subject: [PATCH 047/357] Add `verify_failed_callback` and `verify_ok_callback` --- chain/src/chain.rs | 2 +- sync/src/synchronizer/mod.rs | 6 ++---- sync/src/types/mod.rs | 38 +++++++++++++++++++++++++++++++++--- 3 files changed, 38 insertions(+), 8 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 10b944fb28..3dc8fc90b4 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -173,7 +173,7 @@ pub struct LonelyBlock { pub switch: Option, pub verify_ok_callback: Option, - pub verify_failed_callback: Option, + pub verify_failed_callback: Option, } impl LonelyBlock { diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 7ed8dab9fc..4b18894cff 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -233,8 +233,6 @@ pub struct Synchronizer { /// Sync shared state pub shared: Arc, fetch_channel: Option>, - - verify_failed_blocks_rx: Arc>, } impl Synchronizer { @@ -891,11 +889,11 @@ impl CKBProtocolHandler for Synchronizer { async fn poll(&mut self, nc: Arc) -> Option<()> { let mut have_malformed_peers = false; - while let Some(malformed_peer_info) = self.verify_failed_blocks_rx.recv().await { + while let Some(malformed_peer_info) = self.shared.verify_failed_blocks_rx.recv().await { have_malformed_peers = true; let x = Self::post_sync_process( &nc, - malformed_peer_info.peer, + malformed_peer_info.peer_id, "SendBlock", malformed_peer_info.message_bytes, StatusCode::BlockIsInvalid.with_context(format!( diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 057fd26a95..15442f89fc 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -992,6 +992,11 @@ pub(crate) type PendingCompactBlockMap = HashMap< pub struct SyncShared { shared: Shared, state: Arc, + + pub(crate) verify_failed_blocks_tx: + Arc>, + pub(crate) verify_failed_blocks_rx: + Arc>, } impl SyncShared { @@ -1043,9 +1048,14 @@ impl SyncShared { min_chain_work: sync_config.min_chain_work, }; + let (verify_failed_blocks_tx, verify_failed_blocks_rx) = + tokio::sync::mpsc::unbounded_channel::(); + SyncShared { shared, state: Arc::new(state), + verify_failed_blocks_tx: Arc::new(verify_failed_blocks_tx), + verify_failed_blocks_rx: Arc::new(verify_failed_blocks_rx), } } @@ -1089,6 +1099,7 @@ impl SyncShared { Arc::clone(&block), peer_id, Some(verify_success_callback), + None, ) } @@ -1110,8 +1121,27 @@ impl SyncShared { // return Ok(false); // } + let verify_failed_callback = + || match self.verify_failed_blocks_tx.send(VerifyFailedBlockInfo { + block_hash: block.header().hash(), + peer_id, + message_bytes: 0, + reason: "".to_string(), + }) { + Err(e) => { + todo!("how to handle this ???") + } + _ => (), + }; + // Attempt to accept the given block if its parent already exist in database - self.accept_block(chain, Arc::clone(&block), peer_id); + self.accept_block( + chain, + Arc::clone(&block), + peer_id, + None, + Some(verify_failed_callback), + ); // if ret.is_err() { // debug!("accept block {:?} {:?}", block, ret); // return ret; @@ -1181,6 +1211,7 @@ impl SyncShared { block: Arc, peer_id: PeerIndex, verify_ok_callback: Option)>, + verify_failed_callback: Option, ) { // let ret = { // let mut assume_valid_target = self.state.assume_valid_target(); @@ -1203,8 +1234,9 @@ impl SyncShared { let lonely_block = LonelyBlock { block, peer_id: Some(peer_id), - switch: Switch::NONE, - verify_ok_callback, + switch: Some(Switch::NONE), + verify_ok_callback: None, + verify_failed_callback, }; chain.process_block(lonely_block); From f005592b4604f3130c107303f143688a03ab7b5f Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Sun, 17 Sep 2023 17:08:07 +0800 Subject: [PATCH 048/357] Try to make whole program compile --- chain/src/chain.rs | 116 ++++++++++++------------- chain/src/orphan_block_pool.rs | 14 +-- sync/src/relayer/mod.rs | 44 +++++++++- sync/src/synchronizer/block_process.rs | 29 +++---- sync/src/synchronizer/mod.rs | 38 ++++---- sync/src/types/mod.rs | 36 ++------ util/instrument/src/import.rs | 4 +- 7 files changed, 142 insertions(+), 139 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 3dc8fc90b4..14c6d95f68 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -114,9 +114,7 @@ impl ChainController { // Relay need this pub fn get_orphan_block(&self, hash: &Byte32) -> Option> { - self.orphan_block_broker - .get_block(hash) - .map(|lonely_block| lonely_block.block) + self.orphan_block_broker.get_block(hash) } pub fn orphan_blocks_len(&self) -> usize { @@ -154,26 +152,17 @@ pub struct ChainService { proposal_table: Arc>, orphan_blocks_broker: Arc, - - lonely_block_tx: Sender, - lonely_block_rx: Receiver, - - unverified_block_tx: Sender, - unverified_block_rx: Receiver, - - verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, } pub type VerifyCallbackArgs<'a> = (&'a Shared, PeerIndex, Arc); -#[derive(Clone)] pub struct LonelyBlock { pub block: Arc, pub peer_id: Option, pub switch: Option, - pub verify_ok_callback: Option, - pub verify_failed_callback: Option, + pub verify_ok_callback: Option>, + // pub verify_failed_callback: Option, } impl LonelyBlock { @@ -188,22 +177,17 @@ impl LonelyBlock { } } -#[derive(Clone)] struct UnverifiedBlock { pub block: Arc, pub peer_id: Option, pub switch: Switch, - pub verify_ok_callback: Option, + pub verify_ok_callback: Option>, pub parent_header: HeaderView, } impl ChainService { /// Create a new ChainService instance with shared and initial proposal_table. - pub fn new( - shared: Shared, - proposal_table: ProposalTable, - verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, - ) -> ChainService { + pub fn new(shared: Shared, proposal_table: ProposalTable) -> ChainService { let (unverified_tx, unverified_rx) = channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize * 3); @@ -214,11 +198,6 @@ impl ChainService { shared, proposal_table: Arc::new(Mutex::new(proposal_table)), orphan_blocks_broker: Arc::new(OrphanBlockPool::with_capacity(ORPHAN_BLOCK_SIZE)), - unverified_block_tx: unverified_tx, - unverified_block_rx: unverified_rx, - lonely_block_tx: new_block_tx, - lonely_block_rx: new_block_rx, - verify_failed_blocks_tx, } } @@ -242,19 +221,34 @@ impl ChainService { let (search_orphan_pool_stop_tx, search_orphan_pool_stop_rx) = ckb_channel::bounded::<()>(1); + let (unverified_tx, unverified_rx) = + channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize * 3); + let unverified_consumer_thread = thread::Builder::new() .name("verify_blocks".into()) .spawn({ let chain_service = self.clone(); - move || chain_service.start_consume_unverified_blocks(unverified_queue_stop_rx) + move || { + chain_service + .start_consume_unverified_blocks(unverified_queue_stop_rx, unverified_rx) + } }) .expect("start unverified_queue consumer thread should ok"); + let (lonely_block_tx, lonely_block_rx) = + channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize); + let search_orphan_pool_thread = thread::Builder::new() .name("search_orphan".into()) .spawn({ let chain_service = self.clone(); - move || chain_service.start_search_orphan_pool(search_orphan_pool_stop_rx) + move || { + chain_service.start_search_orphan_pool( + search_orphan_pool_stop_rx, + lonely_block_rx, + unverified_tx, + ) + } }) .expect("start search_orphan_pool thread should ok"); @@ -264,7 +258,7 @@ impl ChainService { recv(process_block_receiver) -> msg => match msg { Ok(Request { responder, arguments: lonely_block }) => { let _ = tx_control.suspend_chunk_process(); - let _ = responder.send(self.process_block_v2(lonely_block)); + let _ = responder.send(self.process_block_v2(lonely_block, lonely_block_tx.clone())); let _ = tx_control.continue_chunk_process(); if let Some(metrics) = ckb_metrics::handle() { @@ -311,7 +305,11 @@ impl ChainService { ) } - fn start_consume_unverified_blocks(&self, unverified_queue_stop_rx: Receiver<()>) { + fn start_consume_unverified_blocks( + &self, + unverified_queue_stop_rx: Receiver<()>, + unverified_block_rx: Receiver, + ) { let mut begin_loop = std::time::Instant::now(); loop { begin_loop = std::time::Instant::now(); @@ -320,7 +318,7 @@ impl ChainService { info!("unverified_queue_consumer got exit signal, exit now"); return; }, - recv(self.unverified_block_rx) -> msg => match msg { + recv(unverified_block_rx) -> msg => match msg { Ok(unverified_task) => { // process this unverified block trace!("got an unverified block, wait cost: {:?}", begin_loop.elapsed()); @@ -360,7 +358,7 @@ impl ChainService { unverified_block.peer_id, ) { (Some(verify_ok_callback), Some(peer_id)) => { - verify_ok_callback((&self.shared, peer_id, unverified_block.block)); + // verify_ok_callback((&self.shared, peer_id, unverified_block.block)); } (Some(verify_ok_callback), _) => { error!( @@ -379,14 +377,14 @@ impl ChainService { err ); if let Some(peer_id) = unverified_block.peer_id { - if let Err(_) = self.verify_failed_blocks_tx.send(VerifyFailedBlockInfo { - block_hash: unverified_block.block.hash(), - peer_id, - message_bytes: 0, - reason: "".to_string(), - }) { - error!("ChainService want to send VerifyFailedBlockInfo to Synchronizer, but Synchronizer has dropped the receiver"); - } + // if let Err(_) = self.verify_failed_blocks_tx.send(VerifyFailedBlockInfo { + // block_hash: unverified_block.block.hash(), + // peer_id, + // message_bytes: 0, + // reason: "".to_string(), + // }) { + // error!("ChainService want to send VerifyFailedBlockInfo to Synchronizer, but Synchronizer has dropped the receiver"); + // } } let tip = self @@ -419,17 +417,22 @@ impl ChainService { } } - fn start_search_orphan_pool(&self, search_orphan_pool_stop_rx: Receiver<()>) { + fn start_search_orphan_pool( + &self, + search_orphan_pool_stop_rx: Receiver<()>, + lonely_block_rx: Receiver, + unverified_block_tx: Sender, + ) { loop { select! { recv(search_orphan_pool_stop_rx) -> _ => { info!("unverified_queue_consumer got exit signal, exit now"); return; }, - recv(self.lonely_block_rx) -> msg => match msg { + recv(lonely_block_rx) -> msg => match msg { Ok(lonely_block) => { self.orphan_blocks_broker.insert(lonely_block); - self.search_orphan_pool() + self.search_orphan_pool(unverified_block_tx.clone()) }, Err(err) => { error!("lonely_block_rx err: {}", err); @@ -439,7 +442,7 @@ impl ChainService { } } } - fn search_orphan_pool(&self) { + fn search_orphan_pool(&self, unverified_block_tx: Sender) { for leader_hash in self.orphan_blocks_broker.clone_leaders() { if !self .shared @@ -492,7 +495,7 @@ impl ChainService { let block_number = unverified_block.block.number(); let block_hash = unverified_block.block.hash(); - match self.unverified_block_tx.send(unverified_block) { + match unverified_block_tx.send(unverified_block) { Ok(_) => {} Err(err) => error!("send unverified_block_tx failed: {}", err), }; @@ -634,7 +637,11 @@ impl ChainService { // make block IO and verify asynchronize #[doc(hidden)] - pub fn process_block_v2(&self, lonely_block: LonelyBlock) { + pub fn process_block_v2( + &self, + lonely_block: LonelyBlock, + lonely_block_tx: Sender, + ) { let block_number = lonely_block.block.number(); let block_hash = lonely_block.block.hash(); if block_number < 1 { @@ -644,26 +651,13 @@ impl ChainService { if !switch.disable_non_contextual() { let result = self.non_contextual_verify(&lonely_block.block); match result { - Err(err) => { - if let Some(peer_id) = lonely_block.peer_id { - if let Err(_) = - self.verify_failed_blocks_tx.send(VerifyFailedBlockInfo { - block_hash: lonely_block.block.hash(), - peer_id, - message_bytes: 0, - reason: err.to_string(), - }) - { - error!("ChainService want to send VerifyFailedBlockInfo to Synchronizer, but Synchronizer has dropped the receiver"); - } - } - } + Err(err) => {} _ => {} } } } - match self.lonely_block_tx.send(lonely_block) { + match lonely_block_tx.send(lonely_block) { Ok(_) => {} Err(err) => { error!("notify new block to orphan pool err: {}", err) diff --git a/chain/src/orphan_block_pool.rs b/chain/src/orphan_block_pool.rs index 013f677daa..4614eaed20 100644 --- a/chain/src/orphan_block_pool.rs +++ b/chain/src/orphan_block_pool.rs @@ -1,6 +1,6 @@ use crate::chain::LonelyBlock; use ckb_logger::debug; -use ckb_types::core::EpochNumber; +use ckb_types::core::{BlockView, EpochNumber}; use ckb_types::{core, packed}; use ckb_util::{parking_lot::RwLock, shrink_to_fit}; use std::collections::{HashMap, HashSet, VecDeque}; @@ -86,11 +86,13 @@ impl InnerPool { removed } - pub fn get_block(&self, hash: &packed::Byte32) -> Option { + pub fn get_block(&self, hash: &packed::Byte32) -> Option> { self.parents.get(hash).and_then(|parent_hash| { - self.blocks - .get(parent_hash) - .and_then(|blocks| blocks.get(hash).cloned()) + self.blocks.get(parent_hash).and_then(|blocks| { + blocks + .get(hash) + .map(|lonely_block| lonely_block.block.clone()) + }) }) } @@ -149,7 +151,7 @@ impl OrphanBlockPool { self.inner.write().remove_blocks_by_parent(parent_hash) } - pub fn get_block(&self, hash: &packed::Byte32) -> Option { + pub fn get_block(&self, hash: &packed::Byte32) -> Option> { self.inner.read().get_block(hash) } diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index 182edb75d4..b6416ec5a2 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -27,7 +27,7 @@ use crate::utils::{ use crate::{Status, StatusCode}; use ckb_chain::chain::ChainController; use ckb_constant::sync::BAD_MESSAGE_BAN_TIME; -use ckb_logger::{debug_target, error_target, info_target, trace_target, warn_target}; +use ckb_logger::{debug_target, error, error_target, info_target, trace_target, warn_target}; use ckb_network::{ async_trait, bytes::Bytes, tokio, CKBProtocolContext, CKBProtocolHandler, PeerIndex, SupportProtocols, TargetSession, @@ -70,6 +70,8 @@ pub enum ReconstructionResult { Error(Status), } +type BroadcastCompactBlockType = (Arc, PeerIndex); + /// Relayer protocol handle #[derive(Clone)] pub struct Relayer { @@ -77,6 +79,11 @@ pub struct Relayer { pub(crate) shared: Arc, rate_limiter: Arc>>, v3: bool, + + pub(crate) broadcast_compact_block_tx: + tokio::sync::mpsc::UnboundedSender, + pub(crate) broadcast_compact_block_rx: + tokio::sync::mpsc::UnboundedReceiver, } impl Relayer { @@ -88,11 +95,18 @@ impl Relayer { // current max rps is 10 (ASK_FOR_TXS_TOKEN / TX_PROPOSAL_TOKEN), 30 is a flexible hard cap with buffer let quota = governor::Quota::per_second(std::num::NonZeroU32::new(30).unwrap()); let rate_limiter = Arc::new(Mutex::new(RateLimiter::keyed(quota))); + + let (broadcast_compact_block_tx, broadcast_compact_block_rx) = + tokio::sync::mpsc::unbounded_channel::(); + Relayer { chain, shared, rate_limiter, v3: false, + + broadcast_compact_block_tx, + broadcast_compact_block_rx, } } @@ -297,8 +311,19 @@ impl Relayer { } let block = Arc::new(block); - let verify_success_callback = |shared: &Shared, peer: PeerIndex, block: Arc| { - Self::build_and_broadcast_compact_block(nc, shared, peer, block) + + let broadcast_compact_block_tx = self.broadcast_compact_block_tx.clone(); + let block_clone = Arc::clone(&block); + let peer_clone = peer.clone(); + let verify_success_callback = { + || match broadcast_compact_block_tx.send((block_clone, peer_clone)) { + Err(_) => { + error!( + "send block to broadcast_compact_block_tx failed, this shouldn't happen", + ); + } + _ => {} + } }; self.shared().insert_new_block_with_callback( @@ -951,6 +976,19 @@ impl CKBProtocolHandler for Relayer { Instant::now().saturating_duration_since(start_time) ); } + + async fn poll(&mut self, nc: Arc) -> Option<()> { + if let Some((block, peer)) = self.broadcast_compact_block_rx.recv().await { + Self::build_and_broadcast_compact_block( + nc.as_ref(), + self.shared().shared(), + peer, + block, + ); + return Some(()); + } + None + } } #[derive(Copy, Clone, Debug)] diff --git a/sync/src/synchronizer/block_process.rs b/sync/src/synchronizer/block_process.rs index 257a983d1b..f8e236e0cb 100644 --- a/sync/src/synchronizer/block_process.rs +++ b/sync/src/synchronizer/block_process.rs @@ -8,7 +8,7 @@ pub struct BlockProcess<'a> { message: packed::SendBlockReader<'a>, synchronizer: &'a Synchronizer, peer: PeerIndex, - message_bytes: usize, + message_bytes: u64, } impl<'a> BlockProcess<'a> { @@ -16,7 +16,7 @@ impl<'a> BlockProcess<'a> { message: packed::SendBlockReader<'a>, synchronizer: &'a Synchronizer, peer: PeerIndex, - message_bytes: usize, + message_bytes: u64, ) -> Self { BlockProcess { message, @@ -26,7 +26,7 @@ impl<'a> BlockProcess<'a> { } } - pub fn execute(self) -> Vec { + pub fn execute(self) { let block = self.message.block().to_entity().into_view(); debug!( "BlockProcess received block {} {}", @@ -36,17 +36,16 @@ impl<'a> BlockProcess<'a> { let shared = self.synchronizer.shared(); if shared.new_block_received(&block) { - match self - .synchronizer - .process_new_block(block.clone(), self.peer, self.message_bytes) - { - Ok(verify_failed_peers) => { - return verify_failed_peers; - } - Err(err) => { - error!("BlockProcess process_new_block error: {:?}", err); - } - } + self.synchronizer + .process_new_block(block.clone(), self.peer, self.message_bytes); + // { + // Ok(verify_failed_peers) => { + // return verify_failed_peers; + // } + // Err(err) => { + // error!("BlockProcess process_new_block error: {:?}", err); + // } + // } // if let Err(err) = this_block_verify_result { // if !is_internal_db_error(&err) { @@ -58,7 +57,5 @@ impl<'a> BlockProcess<'a> { // } // } } - - Vec::new() } } diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 4b18894cff..0a6b20c62a 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -233,6 +233,11 @@ pub struct Synchronizer { /// Sync shared state pub shared: Arc, fetch_channel: Option>, + + pub(crate) verify_failed_blocks_tx: + Arc>, + pub(crate) verify_failed_blocks_rx: + Arc>, } impl Synchronizer { @@ -240,10 +245,14 @@ impl Synchronizer { /// /// This is a runtime sync protocol shared state, and any Sync protocol messages will be processed and forwarded by it pub fn new(chain: ChainController, shared: Arc) -> Synchronizer { + let (verify_failed_blocks_tx, verify_failed_blocks_rx) = + tokio::sync::mpsc::unbounded_channel::(); Synchronizer { chain, shared, fetch_channel: None, + verify_failed_blocks_tx: Arc::new(verify_failed_blocks_tx), + verify_failed_blocks_rx: Arc::new(verify_failed_blocks_rx), } } @@ -270,21 +279,9 @@ impl Synchronizer { } packed::SyncMessageUnionReader::SendBlock(reader) => { if reader.check_data() { - let verify_failed_peers = - BlockProcess::new(reader, self, peer, message.as_slice().len()).execute(); - - verify_failed_peers.iter().for_each(|malformed_peer_info| { - Self::post_sync_process( - nc, - malformed_peer_info.peer, - "SendBlock", - 0, - StatusCode::BlockIsInvalid.with_context(format!( - "block {} is invalid, reason: {}", - malformed_peer_info.block_hash, malformed_peer_info.reason - )), - ); - }) + BlockProcess::new(reader, self, peer, message.as_slice().len() as u64) + .execute(); + Status::ignored() } else { StatusCode::ProtocolMessageIsMalformed.with_context("SendBlock is invalid") } @@ -362,24 +359,23 @@ impl Synchronizer { &self, block: core::BlockView, peer_id: PeerIndex, - ) -> Result, CKBError> { + message_bytes: u64, + ) { let block_hash = block.hash(); let status = self.shared.active_chain().get_block_status(&block_hash); // NOTE: Filtering `BLOCK_STORED` but not `BLOCK_RECEIVED`, is for avoiding // stopping synchronization even when orphan_pool maintains dirty items by bugs. if status.contains(BlockStatus::BLOCK_PARTIAL_STORED) { error!("Block {} already partial stored", block_hash); - Ok(Vec::new()) } else if status.contains(BlockStatus::HEADER_VALID) { self.shared - .insert_new_block(&self.chain, Arc::new(block), peer_id) + .insert_new_block(&self.chain, Arc::new(block), peer_id, message_bytes); } else { debug!( "Synchronizer process_new_block unexpected status {:?} {}", status, block_hash, ); // TODO which error should we return? - (Ok(Vec::new())) } } @@ -889,10 +885,10 @@ impl CKBProtocolHandler for Synchronizer { async fn poll(&mut self, nc: Arc) -> Option<()> { let mut have_malformed_peers = false; - while let Some(malformed_peer_info) = self.shared.verify_failed_blocks_rx.recv().await { + while let Some(malformed_peer_info) = self.verify_failed_blocks_rx.recv().await { have_malformed_peers = true; let x = Self::post_sync_process( - &nc, + nc.as_ref(), malformed_peer_info.peer_id, "SendBlock", malformed_peer_info.message_bytes, diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 15442f89fc..ccf77d54d3 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -992,11 +992,6 @@ pub(crate) type PendingCompactBlockMap = HashMap< pub struct SyncShared { shared: Shared, state: Arc, - - pub(crate) verify_failed_blocks_tx: - Arc>, - pub(crate) verify_failed_blocks_rx: - Arc>, } impl SyncShared { @@ -1048,14 +1043,9 @@ impl SyncShared { min_chain_work: sync_config.min_chain_work, }; - let (verify_failed_blocks_tx, verify_failed_blocks_rx) = - tokio::sync::mpsc::unbounded_channel::(); - SyncShared { shared, state: Arc::new(state), - verify_failed_blocks_tx: Arc::new(verify_failed_blocks_tx), - verify_failed_blocks_rx: Arc::new(verify_failed_blocks_rx), } } @@ -1092,13 +1082,13 @@ impl SyncShared { chain: &ChainController, block: Arc, peer_id: PeerIndex, - verify_success_callback: fn(&Shared, PeerIndex, Arc), + verify_success_callback: impl FnOnce() + Send + Sync, ) { self.accept_block( chain, Arc::clone(&block), peer_id, - Some(verify_success_callback), + Some(Box::new(verify_success_callback)), None, ) } @@ -1109,6 +1099,7 @@ impl SyncShared { chain: &ChainController, block: Arc, peer_id: PeerIndex, + message_bytes: u64, ) { // Insert the given block into orphan_block_pool if its parent is not found // if !self.is_stored(&block.parent_hash()) { @@ -1121,26 +1112,13 @@ impl SyncShared { // return Ok(false); // } - let verify_failed_callback = - || match self.verify_failed_blocks_tx.send(VerifyFailedBlockInfo { - block_hash: block.header().hash(), - peer_id, - message_bytes: 0, - reason: "".to_string(), - }) { - Err(e) => { - todo!("how to handle this ???") - } - _ => (), - }; - // Attempt to accept the given block if its parent already exist in database self.accept_block( chain, Arc::clone(&block), peer_id, + None::>, None, - Some(verify_failed_callback), ); // if ret.is_err() { // debug!("accept block {:?} {:?}", block, ret); @@ -1210,7 +1188,7 @@ impl SyncShared { chain: &ChainController, block: Arc, peer_id: PeerIndex, - verify_ok_callback: Option)>, + verify_ok_callback: Option>, verify_failed_callback: Option, ) { // let ret = { @@ -1235,8 +1213,8 @@ impl SyncShared { block, peer_id: Some(peer_id), switch: Some(Switch::NONE), - verify_ok_callback: None, - verify_failed_callback, + verify_ok_callback, + // verify_failed_callback, }; chain.process_block(lonely_block); diff --git a/util/instrument/src/import.rs b/util/instrument/src/import.rs index 6b106265a8..c66c45eb14 100644 --- a/util/instrument/src/import.rs +++ b/util/instrument/src/import.rs @@ -68,9 +68,7 @@ impl Import { peer_id: None, switch: None, verify_ok_callback: None, - verify_failed_callback: Some(|_: ckb_chain::chain::VerifyCallbackArgs| { - panic!("import occur malformation data") - }), + // verify_failed_callback: Some(|| panic!("import occur malformation data")), }); } progress_bar.inc(s.as_bytes().len() as u64); From 84e161e63236b25ede72854964d67f37266efc59 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 18 Sep 2023 08:17:06 +0800 Subject: [PATCH 049/357] Remove Relayer's Clone attribute --- sync/src/relayer/mod.rs | 3 +-- sync/src/synchronizer/block_fetcher.rs | 16 ++++------------ sync/src/synchronizer/mod.rs | 21 ++++++++++----------- sync/src/types/mod.rs | 2 +- 4 files changed, 16 insertions(+), 26 deletions(-) diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index b6416ec5a2..a318303209 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -73,7 +73,6 @@ pub enum ReconstructionResult { type BroadcastCompactBlockType = (Arc, PeerIndex); /// Relayer protocol handle -#[derive(Clone)] pub struct Relayer { chain: ChainController, pub(crate) shared: Arc, @@ -316,7 +315,7 @@ impl Relayer { let block_clone = Arc::clone(&block); let peer_clone = peer.clone(); let verify_success_callback = { - || match broadcast_compact_block_tx.send((block_clone, peer_clone)) { + move || match broadcast_compact_block_tx.send((block_clone, peer_clone)) { Err(_) => { error!( "send block to broadcast_compact_block_tx failed, this shouldn't happen", diff --git a/sync/src/synchronizer/block_fetcher.rs b/sync/src/synchronizer/block_fetcher.rs index 8e17f7206a..3c405fd373 100644 --- a/sync/src/synchronizer/block_fetcher.rs +++ b/sync/src/synchronizer/block_fetcher.rs @@ -146,7 +146,7 @@ impl BlockFetcher { return None; } - let state = self.sync_shared.shared().state(); + let state = self.sync_shared.state(); let mut start = last_common.number() + 1; let mut end = min(best_known.number(), start + BLOCK_DOWNLOAD_WINDOW); let n_fetch = min( @@ -169,11 +169,7 @@ impl BlockFetcher { let mut header = self .active_chain .get_ancestor(&best_known.hash(), start + span - 1)?; - let mut status = self - .synchronizer - .shared() - .shared() - .get_block_status(&header.hash()); + let mut status = self.sync_shared.shared().get_block_status(&header.hash()); // Judge whether we should fetch the target block, neither stored nor in-flighted for _ in 0..span { @@ -206,11 +202,7 @@ impl BlockFetcher { fetch.push(header) } - status = self - .synchronizer - .shared() - .shared() - .get_block_status(&parent_hash); + status = self.sync_shared.shared().get_block_status(&parent_hash); header = self .sync_shared .get_header_index_view(&parent_hash, false)?; @@ -263,7 +255,7 @@ impl BlockFetcher { fetch_last, fetch.len(), tip, - self.synchronizer.shared().shared().get_unverified_tip().number(), + self.sync_shared.shared().get_unverified_tip().number(), inflight_peer_count, inflight_total_count, trace_timecost_now.elapsed().as_millis(), diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 0a6b20c62a..0b8cfe5c0e 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -20,7 +20,7 @@ pub(crate) use self::get_headers_process::GetHeadersProcess; pub(crate) use self::headers_process::HeadersProcess; pub(crate) use self::in_ibd_process::InIBDProcess; -use crate::types::{HeadersSyncController, IBDState, Peers, SyncShared}; +use crate::types::{HeadersSyncController, IBDState, Peers, SyncShared, SyncState}; use crate::utils::{metric_ckb_message_bytes, send_message_to, MetricDirection}; use crate::{Status, StatusCode}; use ckb_shared::block_status::BlockStatus; @@ -158,8 +158,8 @@ impl BlockFetchCMD { return self.can_start; } - let sync_shared = self.sync_shared; - let state = sync_shared.state(); + let shared = self.sync_shared.shared(); + let state = self.sync_shared.state(); let min_work_reach = |flag: &mut CanStart| { if state.min_chain_work_ready() { @@ -170,7 +170,7 @@ impl BlockFetchCMD { let assume_valid_target_find = |flag: &mut CanStart| { let mut assume_valid_target = state.assume_valid_target(); if let Some(ref target) = *assume_valid_target { - match sync_shared.shared().header_map().get(&target.pack()) { + match shared.header_map().get(&target.pack()) { Some(header) => { *flag = CanStart::Ready; // Blocks that are no longer in the scope of ibd must be forced to verify @@ -234,10 +234,8 @@ pub struct Synchronizer { pub shared: Arc, fetch_channel: Option>, - pub(crate) verify_failed_blocks_tx: - Arc>, - pub(crate) verify_failed_blocks_rx: - Arc>, + pub(crate) verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, + pub(crate) verify_failed_blocks_rx: tokio::sync::mpsc::UnboundedReceiver, } impl Synchronizer { @@ -251,8 +249,8 @@ impl Synchronizer { chain, shared, fetch_channel: None, - verify_failed_blocks_tx: Arc::new(verify_failed_blocks_tx), - verify_failed_blocks_rx: Arc::new(verify_failed_blocks_rx), + verify_failed_blocks_tx, + verify_failed_blocks_rx, } } @@ -385,7 +383,7 @@ impl Synchronizer { peer: PeerIndex, ibd: IBDState, ) -> Option>> { - BlockFetcher::new(Arc::to_owned(self.shared()), peer, ibd).fetch() + BlockFetcher::new(Arc::clone(&self.shared), peer, ibd).fetch() } pub(crate) fn on_connected(&self, nc: &dyn CKBProtocolContext, peer: PeerIndex) { @@ -660,6 +658,7 @@ impl Synchronizer { } None => { let p2p_control = raw.clone(); + let sync_shared = Arc::clone(self.shared()); let (sender, recv) = channel::bounded(2); let peers = self.get_peers_to_fetch(ibd, &disconnect_list); sender diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index ccf77d54d3..243d18a4ac 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1082,7 +1082,7 @@ impl SyncShared { chain: &ChainController, block: Arc, peer_id: PeerIndex, - verify_success_callback: impl FnOnce() + Send + Sync, + verify_success_callback: impl FnOnce() + Send + Sync + 'static, ) { self.accept_block( chain, From 172074c302056710d0210aa94f5629677485a0eb Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 18 Sep 2023 10:31:01 +0800 Subject: [PATCH 050/357] Add callback entry for ChainController Signed-off-by: Eval EXEC --- chain/src/chain.rs | 24 +++++++++++++++++++++--- rpc/src/module/miner.rs | 11 +++++++---- rpc/src/module/test.rs | 16 +++++++++------- sync/src/types/mod.rs | 2 +- util/instrument/src/import.rs | 8 +------- 5 files changed, 39 insertions(+), 22 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 14c6d95f68..3a2ce0e3a1 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -88,14 +88,32 @@ impl ChainController { /// If the block already exists, does nothing and false is returned. /// /// [BlockVerifier] [NonContextualBlockTxsVerifier] [ContextualBlockVerifier] will performed - pub fn process_block(&self, lonely_block: LonelyBlock) { - self.internal_process_block(lonely_block) + pub fn process_lonely_block(&self, lonely_block: LonelyBlock) { + self.internal_process_lonely_block(lonely_block) + } + + pub fn process_block(&self, block: Arc) { + self.internal_process_lonely_block(LonelyBlock { + block, + peer_id: None, + switch: None, + verify_ok_callback: None, + }) + } + + pub fn internal_process_block(&self, block: Arc, switch: Switch) { + self.internal_process_lonely_block(LonelyBlock { + block, + peer_id: None, + switch: Some(switch), + verify_ok_callback: None, + }) } /// Internal method insert block for test /// /// switch bit flags for particular verify, make easier to generating test data - pub fn internal_process_block(&self, lonely_block: LonelyBlock) { + pub fn internal_process_lonely_block(&self, lonely_block: LonelyBlock) { if Request::call(&self.process_block_sender, lonely_block).is_none() { error!("Chain service has gone") } diff --git a/rpc/src/module/miner.rs b/rpc/src/module/miner.rs index 76e100bd5a..729b15d0d0 100644 --- a/rpc/src/module/miner.rs +++ b/rpc/src/module/miner.rs @@ -276,10 +276,13 @@ impl MinerRpc for MinerRpcImpl { .map_err(|err| handle_submit_error(&work_id, &err))?; // Verify and insert block - let is_new = self - .chain - .process_block(Arc::clone(&block)) - .map_err(|err| handle_submit_error(&work_id, &err))?; + let is_new: bool = { + // self + // .chain + // .process_block(Arc::clone(&block)) + // .map_err(|err| handle_submit_error(&work_id, &err))?; + todo!("retrive verify block result by callback"); + }; info!( "end to submit block, work_id = {}, is_new = {}, block = #{}({})", work_id, diff --git a/rpc/src/module/test.rs b/rpc/src/module/test.rs index 1490e1101f..1e8bdcef6c 100644 --- a/rpc/src/module/test.rs +++ b/rpc/src/module/test.rs @@ -512,10 +512,11 @@ impl IntegrationTestRpc for IntegrationTestRpcImpl { fn process_block_without_verify(&self, data: Block, broadcast: bool) -> Result> { let block: packed::Block = data.into(); let block: Arc = Arc::new(block.into_view()); - let ret = self - .chain - .internal_process_block(Arc::clone(&block), Switch::DISABLE_ALL); - + let ret: Result<()> = { + // self.chain + // .internal_process_block(Arc::clone(&block), Switch::DISABLE_ALL); + todo!("retrive verify block result by callback"); + }; if broadcast { let content = packed::CompactBlock::build_from_block(&block, &HashSet::new()); let message = packed::RelayMessage::new_builder().set(content).build(); @@ -675,10 +676,11 @@ impl IntegrationTestRpcImpl { let content = packed::CompactBlock::build_from_block(&block_view, &HashSet::new()); let message = packed::RelayMessage::new_builder().set(content).build(); + todo!("retrive verify block result by callback"); // insert block to chain - self.chain - .process_block(Arc::clone(&block_view)) - .map_err(|err| RPCError::custom(RPCError::CKBInternalError, err.to_string()))?; + // self.chain + // .process_block(Arc::clone(&block_view)) + // .map_err(|err| RPCError::custom(RPCError::CKBInternalError, err.to_string()))?; // announce new block if let Err(err) = self diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 243d18a4ac..84778a45a0 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1217,7 +1217,7 @@ impl SyncShared { // verify_failed_callback, }; - chain.process_block(lonely_block); + chain.process_lonely_block(lonely_block); // if let Err(ref error) = ret { // if !is_internal_db_error(error) { diff --git a/util/instrument/src/import.rs b/util/instrument/src/import.rs index c66c45eb14..f2fcfdce3a 100644 --- a/util/instrument/src/import.rs +++ b/util/instrument/src/import.rs @@ -63,13 +63,7 @@ impl Import { let block: JsonBlock = serde_json::from_str(&s)?; let block: Arc = Arc::new(block.into()); if !block.is_genesis() { - self.chain.process_block(LonelyBlock { - block, - peer_id: None, - switch: None, - verify_ok_callback: None, - // verify_failed_callback: Some(|| panic!("import occur malformation data")), - }); + self.chain.process_block(block); } progress_bar.inc(s.as_bytes().len() as u64); } From 48e52c352b117d64dd9befa972954b3cd0be3da6 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 18 Sep 2023 11:30:19 +0800 Subject: [PATCH 051/357] Modify ChainService's callback signature --- chain/src/chain.rs | 9 +++++---- sync/src/relayer/mod.rs | 18 +++++++++++------- sync/src/types/mod.rs | 7 ++++--- util/launcher/src/lib.rs | 7 ++++--- 4 files changed, 24 insertions(+), 17 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 3a2ce0e3a1..4763aca861 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -179,7 +179,7 @@ pub struct LonelyBlock { pub peer_id: Option, pub switch: Option, - pub verify_ok_callback: Option>, + pub verify_ok_callback: Option) + Send + Sync>>, // pub verify_failed_callback: Option, } @@ -199,7 +199,7 @@ struct UnverifiedBlock { pub block: Arc, pub peer_id: Option, pub switch: Switch, - pub verify_ok_callback: Option>, + pub verify_ok_callback: Option) + Send + Sync>>, pub parent_header: HeaderView, } @@ -376,13 +376,14 @@ impl ChainService { unverified_block.peer_id, ) { (Some(verify_ok_callback), Some(peer_id)) => { - // verify_ok_callback((&self.shared, peer_id, unverified_block.block)); + verify_ok_callback(Ok(())); } - (Some(verify_ok_callback), _) => { + (Some(verify_ok_callback), None) => { error!( "block {} have verify_ok_callback, but have no peer_id, this should not happen", unverified_block.block.hash() ); + verify_ok_callback(Ok(())) } _ => {} } diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index a318303209..ce9d942c55 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -311,17 +311,21 @@ impl Relayer { let block = Arc::new(block); - let broadcast_compact_block_tx = self.broadcast_compact_block_tx.clone(); - let block_clone = Arc::clone(&block); - let peer_clone = peer.clone(); let verify_success_callback = { - move || match broadcast_compact_block_tx.send((block_clone, peer_clone)) { - Err(_) => { - error!( + let broadcast_compact_block_tx = self.broadcast_compact_block_tx.clone(); + let block = Arc::clone(&block); + let peer = peer.clone(); + move |result: Result<(), ckb_error::Error>| { + if result.is_err() { + match broadcast_compact_block_tx.send((block, peer)) { + Err(_) => { + error!( "send block to broadcast_compact_block_tx failed, this shouldn't happen", ); + } + _ => {} + } } - _ => {} } }; diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 84778a45a0..a697bff6f8 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -47,6 +47,7 @@ use std::{cmp, fmt, iter}; use crate::utils::send_message; use ckb_types::core::EpochNumber; +use ckb_types::error::Error; const GET_HEADERS_CACHE_SIZE: usize = 10000; // TODO: Need discussed @@ -1082,7 +1083,7 @@ impl SyncShared { chain: &ChainController, block: Arc, peer_id: PeerIndex, - verify_success_callback: impl FnOnce() + Send + Sync + 'static, + verify_success_callback: impl FnOnce(Result<(), ckb_error::Error>) + Send + Sync + 'static, ) { self.accept_block( chain, @@ -1117,7 +1118,7 @@ impl SyncShared { chain, Arc::clone(&block), peer_id, - None::>, + None::) + Send + Sync>>, None, ); // if ret.is_err() { @@ -1188,7 +1189,7 @@ impl SyncShared { chain: &ChainController, block: Arc, peer_id: PeerIndex, - verify_ok_callback: Option>, + verify_ok_callback: Option) + Sync + Send>>, verify_failed_callback: Option, ) { // let ret = { diff --git a/util/launcher/src/lib.rs b/util/launcher/src/lib.rs index 1cf46867af..d3538c55f1 100644 --- a/util/launcher/src/lib.rs +++ b/util/launcher/src/lib.rs @@ -293,17 +293,18 @@ impl Launcher { let mut flags = Flags::all(); if support_protocols.contains(&SupportProtocol::Relay) { - let relayer = Relayer::new(chain_controller.clone(), Arc::clone(&sync_shared)); + let relayer_v3 = Relayer::new(chain_controller.clone(), Arc::clone(&sync_shared)).v3(); protocols.push(CKBProtocol::new_with_support_protocol( SupportProtocols::RelayV3, - Box::new(relayer.clone().v3()), + Box::new(relayer_v3), Arc::clone(&network_state), )); if !fork_enable { + let relayer_v2 = Relayer::new(chain_controller.clone(), Arc::clone(&sync_shared)); protocols.push(CKBProtocol::new_with_support_protocol( SupportProtocols::RelayV2, - Box::new(relayer), + Box::new(relayer_v2), Arc::clone(&network_state), )) } From aeb281d86b924a8337e6c31bf487be20a983b602 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 18 Sep 2023 13:26:20 +0800 Subject: [PATCH 052/357] Execute Callback when process_block failure Signed-off-by: Eval EXEC --- Cargo.lock | 1 + chain/src/chain.rs | 99 +++++++++++++++++++++-------------------- rpc/Cargo.toml | 1 + rpc/src/module/miner.rs | 32 +++++++++---- sync/src/relayer/mod.rs | 22 +++++---- sync/src/types/mod.rs | 8 +--- 6 files changed, 92 insertions(+), 71 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 30d6066715..dd5c573b1f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1350,6 +1350,7 @@ dependencies = [ "ckb-async-runtime", "ckb-chain", "ckb-chain-spec", + "ckb-channel", "ckb-constant", "ckb-dao", "ckb-dao-utils", diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 4763aca861..5a398ce9e5 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -92,12 +92,25 @@ impl ChainController { self.internal_process_lonely_block(lonely_block) } + pub fn process_block_with_callback( + &self, + block: Arc, + verify_callback: Box) + Send + Sync>, + ) { + self.internal_process_lonely_block(LonelyBlock { + block, + peer_id: None, + switch: None, + verify_callback: Some(verify_callback), + }) + } + pub fn process_block(&self, block: Arc) { self.internal_process_lonely_block(LonelyBlock { block, peer_id: None, switch: None, - verify_ok_callback: None, + verify_callback: None, }) } @@ -106,7 +119,7 @@ impl ChainController { block, peer_id: None, switch: Some(switch), - verify_ok_callback: None, + verify_callback: None, }) } @@ -179,8 +192,7 @@ pub struct LonelyBlock { pub peer_id: Option, pub switch: Option, - pub verify_ok_callback: Option) + Send + Sync>>, - // pub verify_failed_callback: Option, + pub verify_callback: Option) + Send + Sync>>, } impl LonelyBlock { @@ -189,7 +201,7 @@ impl LonelyBlock { block: self.block, peer_id: self.peer_id, switch, - verify_ok_callback: self.verify_ok_callback, + verify_callback: self.verify_callback, parent_header, } } @@ -199,7 +211,7 @@ struct UnverifiedBlock { pub block: Arc, pub peer_id: Option, pub switch: Switch, - pub verify_ok_callback: Option) + Send + Sync>>, + pub verify_callback: Option) + Send + Sync>>, pub parent_header: HeaderView, } @@ -340,8 +352,17 @@ impl ChainService { Ok(unverified_task) => { // process this unverified block trace!("got an unverified block, wait cost: {:?}", begin_loop.elapsed()); - self.consume_unverified_blocks(unverified_task); + let verify_result = self.consume_unverified_blocks(&unverified_task); trace!("consume_unverified_blocks cost: {:?}", begin_loop.elapsed()); + + match unverified_task.verify_callback { + Some(callback) => { + debug!("executing block {}-{} verify_callback", unverified_task.block.number(), unverified_task.block.hash()); + callback(verify_result); + }, + None => { + } + } }, Err(err) => { error!("unverified_block_rx err: {}", err); @@ -353,9 +374,10 @@ impl ChainService { } } - fn consume_unverified_blocks(&self, unverified_block: UnverifiedBlock) { + fn consume_unverified_blocks(&self, unverified_block: &UnverifiedBlock) -> Result<(), Error> { // process this unverified block - match self.verify_block(&unverified_block) { + let verify_result = self.verify_block(unverified_block); + match &verify_result { Ok(_) => { let log_now = std::time::Instant::now(); self.shared @@ -369,24 +391,6 @@ impl ChainService { log_elapsed_remove_block_status, log_now.elapsed() ); - - // start execute this block's callback function - match ( - unverified_block.verify_ok_callback, - unverified_block.peer_id, - ) { - (Some(verify_ok_callback), Some(peer_id)) => { - verify_ok_callback(Ok(())); - } - (Some(verify_ok_callback), None) => { - error!( - "block {} have verify_ok_callback, but have no peer_id, this should not happen", - unverified_block.block.hash() - ); - verify_ok_callback(Ok(())) - } - _ => {} - } } Err(err) => { error!( @@ -395,16 +399,6 @@ impl ChainService { unverified_block.block.hash(), err ); - if let Some(peer_id) = unverified_block.peer_id { - // if let Err(_) = self.verify_failed_blocks_tx.send(VerifyFailedBlockInfo { - // block_hash: unverified_block.block.hash(), - // peer_id, - // message_bytes: 0, - // reason: "".to_string(), - // }) { - // error!("ChainService want to send VerifyFailedBlockInfo to Synchronizer, but Synchronizer has dropped the receiver"); - // } - } let tip = self .shared @@ -434,6 +428,7 @@ impl ChainService { ); } } + verify_result } fn start_search_orphan_pool( @@ -670,7 +665,13 @@ impl ChainService { if !switch.disable_non_contextual() { let result = self.non_contextual_verify(&lonely_block.block); match result { - Err(err) => {} + Err(err) => match lonely_block.verify_callback { + Some(verify_callback) => { + verify_callback(Err(err)); + return; + } + None => {} + }, _ => {} } } @@ -722,6 +723,13 @@ impl ChainService { .get_block_ext(&block.data().header().raw().parent_hash()) .expect("parent already store"); + if parent_ext.verified == Some(false) { + return Err(InvalidParentError { + parent_hash: parent_header.hash(), + } + .into()); + } + let cannon_total_difficulty = parent_ext.total_difficulty.to_owned() + block.header().difficulty(); @@ -732,13 +740,6 @@ impl ChainService { db_txn.insert_block(block.as_ref())?; - // if parent_ext.verified == Some(false) { - // return Err(InvalidParentError { - // parent_hash: parent_header.hash(), - // } - // .into()); - // } - let next_block_epoch = self .shared .consensus() @@ -775,14 +776,14 @@ impl ChainService { Ok(Some((parent_header, cannon_total_difficulty))) } - fn verify_block(&self, unverified_block: &UnverifiedBlock) -> Result { + fn verify_block(&self, unverified_block: &UnverifiedBlock) -> Result<(), Error> { let log_now = std::time::Instant::now(); let UnverifiedBlock { block, peer_id, switch, - verify_ok_callback, + verify_callback, parent_header, } = unverified_block; @@ -801,7 +802,7 @@ impl ChainService { block.hash(), verified ); - return Ok(verified); + return Ok(()); } _ => {} } @@ -940,7 +941,7 @@ impl ChainService { } } } - Ok(true) + Ok(()) } fn insert_block(&mut self, block: Arc, switch: Switch) -> Result { diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 9493c673bb..64bfeb4f48 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -50,6 +50,7 @@ async-stream = "0.3.3" ckb-async-runtime = { path = "../util/runtime", version = "= 0.114.0-pre" } # issue tracking: https://github.com/GREsau/schemars/pull/251 schemars = { version = "0.8.16", package = "ckb_schemars" } +ckb-channel = { path = "../util/channel", version = "= 0.113.0-pre" } [dev-dependencies] reqwest = { version = "=0.11.20", features = ["blocking", "json"] } diff --git a/rpc/src/module/miner.rs b/rpc/src/module/miner.rs index 729b15d0d0..66023db56a 100644 --- a/rpc/src/module/miner.rs +++ b/rpc/src/module/miner.rs @@ -275,14 +275,30 @@ impl MinerRpc for MinerRpcImpl { .verify(&header) .map_err(|err| handle_submit_error(&work_id, &err))?; - // Verify and insert block - let is_new: bool = { - // self - // .chain - // .process_block(Arc::clone(&block)) - // .map_err(|err| handle_submit_error(&work_id, &err))?; - todo!("retrive verify block result by callback"); - }; + let (verify_result_tx, verify_result_rx) = + ckb_channel::oneshot::channel::>(); + let verify_callback: fn(std::result::Result<(), ckb_error::Error>) = + move |verify_result: std::result::Result<(), ckb_error::Error>| match verify_result_tx + .send(verify_result) + { + Err(_) => { + error!("send verify result failed, the Receiver in MinerRpc is disconnected") + } + _ => {} + }; + + self.chain + .process_block_with_callback(Arc::clone(&block), Box::new(verify_callback)); + + let is_new = verify_result_rx + .recv() + .map_err(|recv_err| { + RPCError::ckb_internal_error(format!( + "failed to receive verify result, error: {}", + recv_err + )) + })? + .map_err(|verify_err| handle_submit_error(&work_id, &verify_err))?; info!( "end to submit block, work_id = {}, is_new = {}, block = #{}({})", work_id, diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index ce9d942c55..021acd922b 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -311,20 +311,26 @@ impl Relayer { let block = Arc::new(block); - let verify_success_callback = { + let verify_success_callback: fn(Result<(), ckb_error::Error>) = { let broadcast_compact_block_tx = self.broadcast_compact_block_tx.clone(); let block = Arc::clone(&block); let peer = peer.clone(); - move |result: Result<(), ckb_error::Error>| { - if result.is_err() { - match broadcast_compact_block_tx.send((block, peer)) { - Err(_) => { - error!( + move |result: Result<(), ckb_error::Error>| match result { + Ok(()) => match broadcast_compact_block_tx.send((block, peer)) { + Err(_) => { + error!( "send block to broadcast_compact_block_tx failed, this shouldn't happen", ); - } - _ => {} } + _ => {} + }, + Err(err) => { + error!( + "verify block {}-{} failed: {:?}, won't build compact block and broadcast it", + block.number(), + block.hash(), + err + ); } } }; diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index a697bff6f8..9cf5763190 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1090,7 +1090,6 @@ impl SyncShared { Arc::clone(&block), peer_id, Some(Box::new(verify_success_callback)), - None, ) } @@ -1119,7 +1118,6 @@ impl SyncShared { Arc::clone(&block), peer_id, None::) + Send + Sync>>, - None, ); // if ret.is_err() { // debug!("accept block {:?} {:?}", block, ret); @@ -1189,8 +1187,7 @@ impl SyncShared { chain: &ChainController, block: Arc, peer_id: PeerIndex, - verify_ok_callback: Option) + Sync + Send>>, - verify_failed_callback: Option, + verify_callback: Option) + Sync + Send>>, ) { // let ret = { // let mut assume_valid_target = self.state.assume_valid_target(); @@ -1214,8 +1211,7 @@ impl SyncShared { block, peer_id: Some(peer_id), switch: Some(Switch::NONE), - verify_ok_callback, - // verify_failed_callback, + verify_callback, }; chain.process_lonely_block(lonely_block); From 5e11fe4158bae78ea4e6a6e63e9f8ee9f08156e9 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 18 Sep 2023 15:11:19 +0800 Subject: [PATCH 053/357] Create `VerifyCallback` type alias --- chain/src/chain.rs | 10 +++++----- sync/src/relayer/mod.rs | 2 +- sync/src/types/mod.rs | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 5a398ce9e5..6b460d8cb5 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -55,6 +55,8 @@ const ORPHAN_BLOCK_SIZE: usize = (BLOCK_DOWNLOAD_WINDOW * 2) as usize; type ProcessBlockRequest = Request; type TruncateRequest = Request>; +pub type VerifyCallback = dyn FnOnce(Result<(), ckb_error::Error>) + Send + Sync; + /// Controller to the chain service. /// /// The controller is internally reference-counted and can be freely cloned. @@ -95,7 +97,7 @@ impl ChainController { pub fn process_block_with_callback( &self, block: Arc, - verify_callback: Box) + Send + Sync>, + verify_callback: Box, ) { self.internal_process_lonely_block(LonelyBlock { block, @@ -185,14 +187,12 @@ pub struct ChainService { orphan_blocks_broker: Arc, } -pub type VerifyCallbackArgs<'a> = (&'a Shared, PeerIndex, Arc); - pub struct LonelyBlock { pub block: Arc, pub peer_id: Option, pub switch: Option, - pub verify_callback: Option) + Send + Sync>>, + pub verify_callback: Option>, } impl LonelyBlock { @@ -211,7 +211,7 @@ struct UnverifiedBlock { pub block: Arc, pub peer_id: Option, pub switch: Switch, - pub verify_callback: Option) + Send + Sync>>, + pub verify_callback: Option>, pub parent_header: HeaderView, } diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index 021acd922b..62bd906b92 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -311,7 +311,7 @@ impl Relayer { let block = Arc::new(block); - let verify_success_callback: fn(Result<(), ckb_error::Error>) = { + let verify_success_callback = { let broadcast_compact_block_tx = self.broadcast_compact_block_tx.clone(); let block = Arc::clone(&block); let peer = peer.clone(); diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 9cf5763190..aaf9ce574e 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -2,7 +2,7 @@ use crate::orphan_block_pool::OrphanBlockPool; use crate::utils::is_internal_db_error; use crate::{Status, StatusCode, FAST_INDEX, LOW_INDEX, NORMAL_INDEX, TIME_TRACE_SIZE}; use ckb_app_config::SyncConfig; -use ckb_chain::chain::{ChainController, LonelyBlock}; +use ckb_chain::chain::{ChainController, LonelyBlock, VerifyCallback}; use ckb_chain_spec::consensus::{Consensus, MAX_BLOCK_INTERVAL, MIN_BLOCK_INTERVAL}; use ckb_channel::Receiver; use ckb_constant::sync::{ @@ -1117,7 +1117,7 @@ impl SyncShared { chain, Arc::clone(&block), peer_id, - None::) + Send + Sync>>, + None::>, ); // if ret.is_err() { // debug!("accept block {:?} {:?}", block, ret); @@ -1187,7 +1187,7 @@ impl SyncShared { chain: &ChainController, block: Arc, peer_id: PeerIndex, - verify_callback: Option) + Sync + Send>>, + verify_callback: Option>, ) { // let ret = { // let mut assume_valid_target = self.state.assume_valid_target(); From 3c1ecf35c51ecc6839700dd2cf5e31959f093a39 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 18 Sep 2023 15:11:29 +0800 Subject: [PATCH 054/357] Comment MinerRpc's, use callback later --- rpc/src/module/miner.rs | 37 ++++++++++++++++++++----------------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/rpc/src/module/miner.rs b/rpc/src/module/miner.rs index 66023db56a..b9d0a0405b 100644 --- a/rpc/src/module/miner.rs +++ b/rpc/src/module/miner.rs @@ -277,7 +277,7 @@ impl MinerRpc for MinerRpcImpl { let (verify_result_tx, verify_result_rx) = ckb_channel::oneshot::channel::>(); - let verify_callback: fn(std::result::Result<(), ckb_error::Error>) = + let verify_callback = move |verify_result: std::result::Result<(), ckb_error::Error>| match verify_result_tx .send(verify_result) { @@ -290,22 +290,25 @@ impl MinerRpc for MinerRpcImpl { self.chain .process_block_with_callback(Arc::clone(&block), Box::new(verify_callback)); - let is_new = verify_result_rx - .recv() - .map_err(|recv_err| { - RPCError::ckb_internal_error(format!( - "failed to receive verify result, error: {}", - recv_err - )) - })? - .map_err(|verify_err| handle_submit_error(&work_id, &verify_err))?; - info!( - "end to submit block, work_id = {}, is_new = {}, block = #{}({})", - work_id, - is_new, - block.number(), - block.hash() - ); + let is_new = true; + todo!("got a block is new or not via callback"); + + // let is_new = verify_result_rx + // .recv() + // .map_err(|recv_err| { + // RPCError::ckb_internal_error(format!( + // "failed to receive verify result, error: {}", + // recv_err + // )) + // })? + // .map_err(|verify_err| handle_submit_error(&work_id, &verify_err))?; + // info!( + // "end to submit block, work_id = {}, is_new = {}, block = #{}({})", + // work_id, + // is_new, + // block.number(), + // block.hash() + // ); // Announce only new block if is_new { From 01c1eb22a111539fc08f5d0e46e681fc31f837ad Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 18 Sep 2023 17:42:42 +0800 Subject: [PATCH 055/357] Introduce VerifiedBlockStatus as verify_block's return type Signed-off-by: Eval EXEC --- chain/src/chain.rs | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 6b460d8cb5..a1a3ca8a95 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -6,7 +6,7 @@ use crate::orphan_block_pool::OrphanBlockPool; use ckb_chain_spec::versionbits::VersionbitsIndexer; use ckb_channel::{self as channel, select, Receiver, SendError, Sender}; use ckb_constant::sync::BLOCK_DOWNLOAD_WINDOW; -use ckb_error::{Error, InternalErrorKind}; +use ckb_error::{Error, ErrorKind, InternalError, InternalErrorKind}; use ckb_logger::Level::Trace; use ckb_logger::{ self, debug, error, info, log_enabled, log_enabled_target, trace, trace_target, warn, @@ -55,7 +55,16 @@ const ORPHAN_BLOCK_SIZE: usize = (BLOCK_DOWNLOAD_WINDOW * 2) as usize; type ProcessBlockRequest = Request; type TruncateRequest = Request>; -pub type VerifyCallback = dyn FnOnce(Result<(), ckb_error::Error>) + Send + Sync; +pub type VerifyCallback = dyn FnOnce(Result) + Send + Sync; + +/// VerifiedBlockStatus is +pub enum VerifiedBlockStatus { + // The block is being seen for the first time. + FirstSeen, + + // The block has been verified before. + PreviouslyVerified, +} /// Controller to the chain service. /// @@ -374,7 +383,10 @@ impl ChainService { } } - fn consume_unverified_blocks(&self, unverified_block: &UnverifiedBlock) -> Result<(), Error> { + fn consume_unverified_blocks( + &self, + unverified_block: &UnverifiedBlock, + ) -> Result { // process this unverified block let verify_result = self.verify_block(unverified_block); match &verify_result { @@ -679,8 +691,15 @@ impl ChainService { match lonely_block_tx.send(lonely_block) { Ok(_) => {} - Err(err) => { - error!("notify new block to orphan pool err: {}", err) + Err(SendError(lonely_block)) => { + error!("notify new block to orphan pool err: {}", err); + if let Some(verify_callback) = lonely_block.verify_callback { + verify_callback( + InternalErrorKind::System + .other("OrphanBlock broker disconnected") + .into(), + ); + } } } debug!( From 8fefebcd1a034a8243fa33bbf13ac93306971ae8 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 20 Sep 2023 12:56:40 +0800 Subject: [PATCH 056/357] Fix ChainService error handle --- chain/src/chain.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index a1a3ca8a95..60db0955a0 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -523,7 +523,9 @@ impl ChainService { match unverified_block_tx.send(unverified_block) { Ok(_) => {} - Err(err) => error!("send unverified_block_tx failed: {}", err), + Err(err) => { + error!("send unverified_block_tx failed: {}", err) + } }; if total_difficulty @@ -694,11 +696,9 @@ impl ChainService { Err(SendError(lonely_block)) => { error!("notify new block to orphan pool err: {}", err); if let Some(verify_callback) = lonely_block.verify_callback { - verify_callback( - InternalErrorKind::System - .other("OrphanBlock broker disconnected") - .into(), - ); + verify_callback(Err(InternalErrorKind::System + .other("OrphanBlock broker disconnected") + .into())); } } } From fd5b3d5d6f5a5d66bfd89fee3e738b4c6a857e8c Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 9 Oct 2023 12:18:00 +0800 Subject: [PATCH 057/357] Unify process_block's return type as `VerifyResult` --- chain/src/chain.rs | 36 +++++++++++++++++++++++------------- rpc/src/module/miner.rs | 23 +++++++++++------------ sync/src/relayer/mod.rs | 16 +++++++++++----- sync/src/types/mod.rs | 6 ++++-- 4 files changed, 49 insertions(+), 32 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 60db0955a0..66a6fa0df7 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -55,12 +55,15 @@ const ORPHAN_BLOCK_SIZE: usize = (BLOCK_DOWNLOAD_WINDOW * 2) as usize; type ProcessBlockRequest = Request; type TruncateRequest = Request>; -pub type VerifyCallback = dyn FnOnce(Result) + Send + Sync; +pub type VerifyResult = Result; + +pub type VerifyCallback = dyn FnOnce(VerifyResult) + Send + Sync; /// VerifiedBlockStatus is pub enum VerifiedBlockStatus { // The block is being seen for the first time. - FirstSeen, + FirstSeenAndVerified, + FirstSeenButNotVerified, // The block has been verified before. PreviouslyVerified, @@ -383,10 +386,7 @@ impl ChainService { } } - fn consume_unverified_blocks( - &self, - unverified_block: &UnverifiedBlock, - ) -> Result { + fn consume_unverified_blocks(&self, unverified_block: &UnverifiedBlock) -> VerifyResult { // process this unverified block let verify_result = self.verify_block(unverified_block); match &verify_result { @@ -694,7 +694,7 @@ impl ChainService { match lonely_block_tx.send(lonely_block) { Ok(_) => {} Err(SendError(lonely_block)) => { - error!("notify new block to orphan pool err: {}", err); + error!("failed to notify new block to orphan pool"); if let Some(verify_callback) = lonely_block.verify_callback { verify_callback(Err(InternalErrorKind::System .other("OrphanBlock broker disconnected") @@ -795,7 +795,7 @@ impl ChainService { Ok(Some((parent_header, cannon_total_difficulty))) } - fn verify_block(&self, unverified_block: &UnverifiedBlock) -> Result<(), Error> { + fn verify_block(&self, unverified_block: &UnverifiedBlock) -> VerifyResult { let log_now = std::time::Instant::now(); let UnverifiedBlock { @@ -810,20 +810,28 @@ impl ChainService { .shared .store() .get_block_ext(&block.data().header().raw().parent_hash()) - .expect("parent already store"); + .expect("parent should be stored already"); if let Some(ext) = self.shared.store().get_block_ext(&block.hash()) { match ext.verified { Some(verified) => { debug!( - "block {}-{} has been verified: {}", + "block {}-{} has been verified, previously verified result: {}", block.number(), block.hash(), verified ); - return Ok(()); + return if verified { + Ok(VerifiedBlockStatus::PreviouslyVerified) + } else { + Err(InternalErrorKind::Other + .other("block previously verified failed") + .into()) + }; + } + _ => { + // we didn't verify this block, going on verify now } - _ => {} } } @@ -941,6 +949,8 @@ impl ChainService { if let Some(metrics) = ckb_metrics::handle() { metrics.ckb_chain_tip.set(block.header().number() as i64); } + + Ok(VerifiedBlockStatus::FirstSeenAndVerified) } else { self.shared.refresh_snapshot(); info!( @@ -959,8 +969,8 @@ impl ChainService { error!("[verify block] notify new_uncle error {}", e); } } + Ok(VerifiedBlockStatus::FirstSeenButNotVerified) } - Ok(()) } fn insert_block(&mut self, block: Arc, switch: Switch) -> Result { diff --git a/rpc/src/module/miner.rs b/rpc/src/module/miner.rs index b9d0a0405b..51398e9da9 100644 --- a/rpc/src/module/miner.rs +++ b/rpc/src/module/miner.rs @@ -1,6 +1,6 @@ use crate::error::RPCError; use async_trait::async_trait; -use ckb_chain::chain::ChainController; +use ckb_chain::chain::{ChainController, VerifiedBlockStatus, VerifyResult}; use ckb_jsonrpc_types::{Block, BlockTemplate, Uint64, Version}; use ckb_logger::{debug, error, info, warn}; use ckb_network::{NetworkController, PeerIndex, SupportProtocols, TargetSession}; @@ -275,17 +275,16 @@ impl MinerRpc for MinerRpcImpl { .verify(&header) .map_err(|err| handle_submit_error(&work_id, &err))?; - let (verify_result_tx, verify_result_rx) = - ckb_channel::oneshot::channel::>(); - let verify_callback = - move |verify_result: std::result::Result<(), ckb_error::Error>| match verify_result_tx - .send(verify_result) - { - Err(_) => { - error!("send verify result failed, the Receiver in MinerRpc is disconnected") - } - _ => {} - }; + let (verify_result_tx, verify_result_rx) = ckb_channel::oneshot::channel::(); + let verify_callback = move |verify_result: std::result::Result< + VerifiedBlockStatus, + ckb_error::Error, + >| match verify_result_tx.send(verify_result) { + Err(_) => { + error!("send verify result failed, the Receiver in MinerRpc is disconnected") + } + _ => {} + }; self.chain .process_block_with_callback(Arc::clone(&block), Box::new(verify_callback)); diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index 62bd906b92..e0a8726aaf 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -25,7 +25,7 @@ use crate::utils::{ is_internal_db_error, metric_ckb_message_bytes, send_message_to, MetricDirection, }; use crate::{Status, StatusCode}; -use ckb_chain::chain::ChainController; +use ckb_chain::chain::{ChainController, VerifiedBlockStatus, VerifyResult}; use ckb_constant::sync::BAD_MESSAGE_BAN_TIME; use ckb_logger::{debug_target, error, error_target, info_target, trace_target, warn_target}; use ckb_network::{ @@ -315,12 +315,18 @@ impl Relayer { let broadcast_compact_block_tx = self.broadcast_compact_block_tx.clone(); let block = Arc::clone(&block); let peer = peer.clone(); - move |result: Result<(), ckb_error::Error>| match result { - Ok(()) => match broadcast_compact_block_tx.send((block, peer)) { - Err(_) => { - error!( + move |result: VerifyResult| match result { + Ok(verified_block_status) => match verified_block_status { + VerifiedBlockStatus::FirstSeenAndVerified + | VerifiedBlockStatus::FirstSeenAndVerified => { + match broadcast_compact_block_tx.send((block, peer)) { + Err(_) => { + error!( "send block to broadcast_compact_block_tx failed, this shouldn't happen", ); + } + _ => {} + } } _ => {} }, diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index aaf9ce574e..5be719613f 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -2,7 +2,9 @@ use crate::orphan_block_pool::OrphanBlockPool; use crate::utils::is_internal_db_error; use crate::{Status, StatusCode, FAST_INDEX, LOW_INDEX, NORMAL_INDEX, TIME_TRACE_SIZE}; use ckb_app_config::SyncConfig; -use ckb_chain::chain::{ChainController, LonelyBlock, VerifyCallback}; +use ckb_chain::chain::{ + ChainController, LonelyBlock, VerifiedBlockStatus, VerifyCallback, VerifyResult, +}; use ckb_chain_spec::consensus::{Consensus, MAX_BLOCK_INTERVAL, MIN_BLOCK_INTERVAL}; use ckb_channel::Receiver; use ckb_constant::sync::{ @@ -1083,7 +1085,7 @@ impl SyncShared { chain: &ChainController, block: Arc, peer_id: PeerIndex, - verify_success_callback: impl FnOnce(Result<(), ckb_error::Error>) + Send + Sync + 'static, + verify_success_callback: impl FnOnce(VerifyResult) + Send + Sync + 'static, ) { self.accept_block( chain, From 29fef22f1f935993785a83e78586c8cb9b6e8651 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 16 Oct 2023 15:07:13 +0800 Subject: [PATCH 058/357] Rename `LonelyBlock` to `LonelyBlockWithCallback` Signed-off-by: Eval EXEC --- chain/src/chain.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 66a6fa0df7..19726dc7fb 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -199,7 +199,7 @@ pub struct ChainService { orphan_blocks_broker: Arc, } -pub struct LonelyBlock { +pub struct LonelyBlockWithCallback { pub block: Arc, pub peer_id: Option, pub switch: Option, From afba7b0ef3926e6ff31d92db5f6e2205b5b17b20 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 16 Oct 2023 15:08:17 +0800 Subject: [PATCH 059/357] Modify all usage of LonelyBlockWithCallback --- chain/src/chain.rs | 26 +++++++++++++------------- chain/src/orphan_block_pool.rs | 20 +++++++++++++------- sync/src/types/mod.rs | 4 ++-- util/instrument/src/import.rs | 2 +- 4 files changed, 29 insertions(+), 23 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 19726dc7fb..0e5bdf8fde 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -52,7 +52,7 @@ use std::{cmp, thread}; const ORPHAN_BLOCK_SIZE: usize = (BLOCK_DOWNLOAD_WINDOW * 2) as usize; -type ProcessBlockRequest = Request; +type ProcessBlockRequest = Request; type TruncateRequest = Request>; pub type VerifyResult = Result; @@ -102,7 +102,7 @@ impl ChainController { /// If the block already exists, does nothing and false is returned. /// /// [BlockVerifier] [NonContextualBlockTxsVerifier] [ContextualBlockVerifier] will performed - pub fn process_lonely_block(&self, lonely_block: LonelyBlock) { + pub fn process_lonely_block(&self, lonely_block: LonelyBlockWithCallback) { self.internal_process_lonely_block(lonely_block) } @@ -111,7 +111,7 @@ impl ChainController { block: Arc, verify_callback: Box, ) { - self.internal_process_lonely_block(LonelyBlock { + self.internal_process_lonely_block(LonelyBlockWithCallback { block, peer_id: None, switch: None, @@ -120,7 +120,7 @@ impl ChainController { } pub fn process_block(&self, block: Arc) { - self.internal_process_lonely_block(LonelyBlock { + self.internal_process_lonely_block(LonelyBlockWithCallback { block, peer_id: None, switch: None, @@ -129,7 +129,7 @@ impl ChainController { } pub fn internal_process_block(&self, block: Arc, switch: Switch) { - self.internal_process_lonely_block(LonelyBlock { + self.internal_process_lonely_block(LonelyBlockWithCallback { block, peer_id: None, switch: Some(switch), @@ -140,7 +140,7 @@ impl ChainController { /// Internal method insert block for test /// /// switch bit flags for particular verify, make easier to generating test data - pub fn internal_process_lonely_block(&self, lonely_block: LonelyBlock) { + pub fn internal_process_lonely_block(&self, lonely_block: LonelyBlockWithCallback) { if Request::call(&self.process_block_sender, lonely_block).is_none() { error!("Chain service has gone") } @@ -207,7 +207,7 @@ pub struct LonelyBlockWithCallback { pub verify_callback: Option>, } -impl LonelyBlock { +impl LonelyBlockWithCallback { fn combine_parent_header(self, parent_header: HeaderView, switch: Switch) -> UnverifiedBlock { UnverifiedBlock { block: self.block, @@ -234,7 +234,7 @@ impl ChainService { channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize * 3); let (new_block_tx, new_block_rx) = - channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize); + channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize); ChainService { shared, @@ -278,7 +278,7 @@ impl ChainService { .expect("start unverified_queue consumer thread should ok"); let (lonely_block_tx, lonely_block_rx) = - channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize); + channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize); let search_orphan_pool_thread = thread::Builder::new() .name("search_orphan".into()) @@ -446,7 +446,7 @@ impl ChainService { fn start_search_orphan_pool( &self, search_orphan_pool_stop_rx: Receiver<()>, - lonely_block_rx: Receiver, + lonely_block_rx: Receiver, unverified_block_tx: Sender, ) { loop { @@ -478,7 +478,7 @@ impl ChainService { continue; } - let descendants: Vec = self + let descendants: Vec = self .orphan_blocks_broker .remove_blocks_by_parent(&leader_hash); if descendants.is_empty() { @@ -667,8 +667,8 @@ impl ChainService { #[doc(hidden)] pub fn process_block_v2( &self, - lonely_block: LonelyBlock, - lonely_block_tx: Sender, + lonely_block: LonelyBlockWithCallback, + lonely_block_tx: Sender, ) { let block_number = lonely_block.block.number(); let block_hash = lonely_block.block.hash(); diff --git a/chain/src/orphan_block_pool.rs b/chain/src/orphan_block_pool.rs index 4614eaed20..56064f25f1 100644 --- a/chain/src/orphan_block_pool.rs +++ b/chain/src/orphan_block_pool.rs @@ -1,4 +1,4 @@ -use crate::chain::LonelyBlock; +use crate::chain::LonelyBlockWithCallback; use ckb_logger::debug; use ckb_types::core::{BlockView, EpochNumber}; use ckb_types::{core, packed}; @@ -14,7 +14,7 @@ const EXPIRED_EPOCH: u64 = 6; #[derive(Default)] struct InnerPool { // Group by blocks in the pool by the parent hash. - blocks: HashMap>, + blocks: HashMap>, // The map tells the parent hash when given the hash of a block in the pool. // // The block is in the orphan pool if and only if the block hash exists as a key in this map. @@ -32,7 +32,7 @@ impl InnerPool { } } - fn insert(&mut self, lonely_block: LonelyBlock) { + fn insert(&mut self, lonely_block: LonelyBlockWithCallback) { let hash = lonely_block.block.header().hash(); let parent_hash = lonely_block.block.data().header().raw().parent_hash(); self.blocks @@ -52,7 +52,10 @@ impl InnerPool { self.parents.insert(hash, parent_hash); } - pub fn remove_blocks_by_parent(&mut self, parent_hash: &ParentHash) -> Vec { + pub fn remove_blocks_by_parent( + &mut self, + parent_hash: &ParentHash, + ) -> Vec { // try remove leaders first if !self.leaders.remove(parent_hash) { return Vec::new(); @@ -61,7 +64,7 @@ impl InnerPool { let mut queue: VecDeque = VecDeque::new(); queue.push_back(parent_hash.to_owned()); - let mut removed: Vec = Vec::new(); + let mut removed: Vec = Vec::new(); while let Some(parent_hash) = queue.pop_front() { if let Some(orphaned) = self.blocks.remove(&parent_hash) { let (hashes, blocks): (Vec<_>, Vec<_>) = orphaned.into_iter().unzip(); @@ -143,11 +146,14 @@ impl OrphanBlockPool { } /// Insert orphaned block, for which we have already requested its parent block - pub fn insert(&self, lonely_block: LonelyBlock) { + pub fn insert(&self, lonely_block: LonelyBlockWithCallback) { self.inner.write().insert(lonely_block); } - pub fn remove_blocks_by_parent(&self, parent_hash: &ParentHash) -> Vec { + pub fn remove_blocks_by_parent( + &self, + parent_hash: &ParentHash, + ) -> Vec { self.inner.write().remove_blocks_by_parent(parent_hash) } diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 5be719613f..d4bc32a22f 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -3,7 +3,7 @@ use crate::utils::is_internal_db_error; use crate::{Status, StatusCode, FAST_INDEX, LOW_INDEX, NORMAL_INDEX, TIME_TRACE_SIZE}; use ckb_app_config::SyncConfig; use ckb_chain::chain::{ - ChainController, LonelyBlock, VerifiedBlockStatus, VerifyCallback, VerifyResult, + ChainController, LonelyBlockWithCallback, VerifiedBlockStatus, VerifyCallback, VerifyResult, }; use ckb_chain_spec::consensus::{Consensus, MAX_BLOCK_INTERVAL, MIN_BLOCK_INTERVAL}; use ckb_channel::Receiver; @@ -1209,7 +1209,7 @@ impl SyncShared { // }; // TODO move switch logic to ckb-chain - let lonely_block = LonelyBlock { + let lonely_block = LonelyBlockWithCallback { block, peer_id: Some(peer_id), switch: Some(Switch::NONE), diff --git a/util/instrument/src/import.rs b/util/instrument/src/import.rs index f2fcfdce3a..2dd40b3b71 100644 --- a/util/instrument/src/import.rs +++ b/util/instrument/src/import.rs @@ -1,4 +1,4 @@ -use ckb_chain::chain::{ChainController, LonelyBlock}; +use ckb_chain::chain::{ChainController, LonelyBlockWithCallback}; use ckb_jsonrpc_types::BlockView as JsonBlock; use ckb_types::core; #[cfg(feature = "progress_bar")] From 940509cb114101b4876e27c90399cf2feb5b9edc Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 16 Oct 2023 15:20:11 +0800 Subject: [PATCH 060/357] Extract `LonelyBlock` --- chain/src/chain.rs | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 0e5bdf8fde..e2d3bb4728 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -199,19 +199,34 @@ pub struct ChainService { orphan_blocks_broker: Arc, } -pub struct LonelyBlockWithCallback { +pub struct LonelyBlock { pub block: Arc, pub peer_id: Option, pub switch: Option, +} + +impl LonelyBlock { + fn with_callback( + self, + verify_callback: Option>, + ) -> LonelyBlockWithCallback { + LonelyBlockWithCallback { + lonely_block: self, + verify_callback, + } + } +} +pub struct LonelyBlockWithCallback { + pub lonely_block: LonelyBlock, pub verify_callback: Option>, } impl LonelyBlockWithCallback { fn combine_parent_header(self, parent_header: HeaderView, switch: Switch) -> UnverifiedBlock { UnverifiedBlock { - block: self.block, - peer_id: self.peer_id, + block: self.lonely_block.block, + peer_id: self.lonely_block.peer_id, switch, verify_callback: self.verify_callback, parent_header, From 7fc80838128ea0d260c7bb6151d569938ddc06d1 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 16 Oct 2023 15:25:49 +0800 Subject: [PATCH 061/357] Construct LonelyBlockWithCallback from LonelyBlock --- chain/src/chain.rs | 74 +++++++++++++++++++++------------- chain/src/orphan_block_pool.rs | 10 ++--- sync/src/types/mod.rs | 9 +++-- 3 files changed, 56 insertions(+), 37 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index e2d3bb4728..d4b0e523e0 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -111,30 +111,36 @@ impl ChainController { block: Arc, verify_callback: Box, ) { - self.internal_process_lonely_block(LonelyBlockWithCallback { - block, - peer_id: None, - switch: None, - verify_callback: Some(verify_callback), - }) + self.internal_process_lonely_block( + LonelyBlock { + block, + peer_id: None, + switch: None, + } + .with_callback(Some(verify_callback)), + ) } pub fn process_block(&self, block: Arc) { - self.internal_process_lonely_block(LonelyBlockWithCallback { - block, - peer_id: None, - switch: None, - verify_callback: None, - }) + self.internal_process_lonely_block( + LonelyBlock { + block, + peer_id: None, + switch: None, + } + .with_callback(None), + ) } pub fn internal_process_block(&self, block: Arc, switch: Switch) { - self.internal_process_lonely_block(LonelyBlockWithCallback { - block, - peer_id: None, - switch: Some(switch), - verify_callback: None, - }) + self.internal_process_lonely_block( + LonelyBlock { + block, + peer_id: None, + switch: Some(switch), + } + .with_callback(None), + ) } /// Internal method insert block for test @@ -206,7 +212,7 @@ pub struct LonelyBlock { } impl LonelyBlock { - fn with_callback( + pub fn with_callback( self, verify_callback: Option>, ) -> LonelyBlockWithCallback { @@ -222,6 +228,18 @@ pub struct LonelyBlockWithCallback { pub verify_callback: Option>, } +impl LonelyBlockWithCallback { + pub fn block(&self) -> &Arc { + &self.lonely_block.block + } + pub fn peer_id(&self) -> Option { + self.lonely_block.peer_id + } + pub fn switch(&self) -> Option { + self.lonely_block.switch + } +} + impl LonelyBlockWithCallback { fn combine_parent_header(self, parent_header: HeaderView, switch: Switch) -> UnverifiedBlock { UnverifiedBlock { @@ -508,23 +526,23 @@ impl ChainService { descendants .first() .expect("descdant not empty") - .block + .block() .number(), descendants .last() .expect("descdant not empty") - .block + .block() .number(), ); let mut accept_error_occurred = false; for descendant_block in descendants { - match self.accept_block(descendant_block.block.to_owned()) { + match self.accept_block(descendant_block.block().to_owned()) { Err(err) => { accept_error_occurred = true; error!( "accept block {} failed: {}", - descendant_block.block.hash(), + descendant_block.block().hash(), err ); continue; @@ -567,7 +585,7 @@ impl ChainService { None => { info!( "doesn't accept block {}, because it has been stored", - descendant_block.block.hash() + descendant_block.block().hash() ); } }, @@ -685,14 +703,14 @@ impl ChainService { lonely_block: LonelyBlockWithCallback, lonely_block_tx: Sender, ) { - let block_number = lonely_block.block.number(); - let block_hash = lonely_block.block.hash(); + let block_number = lonely_block.block().number(); + let block_hash = lonely_block.block().hash(); if block_number < 1 { warn!("receive 0 number block: 0-{}", block_hash); } - if let Some(switch) = lonely_block.switch { + if let Some(switch) = lonely_block.switch() { if !switch.disable_non_contextual() { - let result = self.non_contextual_verify(&lonely_block.block); + let result = self.non_contextual_verify(&lonely_block.block()); match result { Err(err) => match lonely_block.verify_callback { Some(verify_callback) => { diff --git a/chain/src/orphan_block_pool.rs b/chain/src/orphan_block_pool.rs index 56064f25f1..f7ce3a4bcb 100644 --- a/chain/src/orphan_block_pool.rs +++ b/chain/src/orphan_block_pool.rs @@ -33,8 +33,8 @@ impl InnerPool { } fn insert(&mut self, lonely_block: LonelyBlockWithCallback) { - let hash = lonely_block.block.header().hash(); - let parent_hash = lonely_block.block.data().header().raw().parent_hash(); + let hash = lonely_block.block().header().hash(); + let parent_hash = lonely_block.block().data().header().raw().parent_hash(); self.blocks .entry(parent_hash.clone()) .or_insert_with(HashMap::default) @@ -94,7 +94,7 @@ impl InnerPool { self.blocks.get(parent_hash).and_then(|blocks| { blocks .get(hash) - .map(|lonely_block| lonely_block.block.clone()) + .map(|lonely_block| lonely_block.block().clone()) }) }) } @@ -110,7 +110,7 @@ impl InnerPool { result.extend( descendants .iter() - .map(|lonely_block| lonely_block.block.hash()), + .map(|lonely_block| lonely_block.block().hash()), ); } } @@ -123,7 +123,7 @@ impl InnerPool { .get(parent_hash) .and_then(|map| { map.iter().next().map(|(_, lonely_block)| { - lonely_block.block.header().epoch().number() + EXPIRED_EPOCH < tip_epoch + lonely_block.block().header().epoch().number() + EXPIRED_EPOCH < tip_epoch }) }) .unwrap_or_default() diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index d4bc32a22f..5355776913 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -3,7 +3,8 @@ use crate::utils::is_internal_db_error; use crate::{Status, StatusCode, FAST_INDEX, LOW_INDEX, NORMAL_INDEX, TIME_TRACE_SIZE}; use ckb_app_config::SyncConfig; use ckb_chain::chain::{ - ChainController, LonelyBlockWithCallback, VerifiedBlockStatus, VerifyCallback, VerifyResult, + ChainController, LonelyBlock, LonelyBlockWithCallback, VerifiedBlockStatus, VerifyCallback, + VerifyResult, }; use ckb_chain_spec::consensus::{Consensus, MAX_BLOCK_INTERVAL, MIN_BLOCK_INTERVAL}; use ckb_channel::Receiver; @@ -1209,12 +1210,12 @@ impl SyncShared { // }; // TODO move switch logic to ckb-chain - let lonely_block = LonelyBlockWithCallback { + let lonely_block = LonelyBlock { block, peer_id: Some(peer_id), switch: Some(Switch::NONE), - verify_callback, - }; + } + .with_callback(verify_callback); chain.process_lonely_block(lonely_block); From 8bf6b68feff167c63f0b965eb96ede6879622ec6 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 16 Oct 2023 16:01:56 +0800 Subject: [PATCH 062/357] Add asynchronous methods to process block --- chain/src/chain.rs | 61 +++++++++++++++++++++++----------------------- 1 file changed, 31 insertions(+), 30 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index d4b0e523e0..fda5a29061 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -95,59 +95,56 @@ impl ChainController { orphan_block_broker, } } - /// Inserts the block into database. - /// - /// Expects the block's header to be valid and already verified. - /// - /// If the block already exists, does nothing and false is returned. - /// - /// [BlockVerifier] [NonContextualBlockTxsVerifier] [ContextualBlockVerifier] will performed - pub fn process_lonely_block(&self, lonely_block: LonelyBlockWithCallback) { - self.internal_process_lonely_block(lonely_block) + + pub fn asynchronous_process_block_with_switch(&self, block: Arc, switch: Switch) { + self.asynchronous_process_lonely_block(LonelyBlock { + block, + peer_id: None, + switch: Some(switch), + }) } - pub fn process_block_with_callback( - &self, - block: Arc, - verify_callback: Box, - ) { - self.internal_process_lonely_block( + pub fn asynchronous_process_block(&self, block: Arc) { + self.asynchronous_process_lonely_block_with_callback( LonelyBlock { block, peer_id: None, switch: None, } - .with_callback(Some(verify_callback)), + .without_callback(), ) } - pub fn process_block(&self, block: Arc) { - self.internal_process_lonely_block( + pub fn asynchronous_process_block_with_callback( + &self, + block: Arc, + verify_callback: Box, + ) { + self.asynchronous_process_lonely_block_with_callback( LonelyBlock { block, peer_id: None, switch: None, } - .with_callback(None), + .with_callback(Some(verify_callback)), ) } - pub fn internal_process_block(&self, block: Arc, switch: Switch) { - self.internal_process_lonely_block( - LonelyBlock { - block, - peer_id: None, - switch: Some(switch), - } - .with_callback(None), - ) + pub fn asynchronous_process_lonely_block(&self, lonely_block: LonelyBlock) { + let lonely_block_without_callback: LonelyBlockWithCallback = + lonely_block.without_callback(); + + self.asynchronous_process_lonely_block_with_callback(lonely_block_without_callback); } /// Internal method insert block for test /// /// switch bit flags for particular verify, make easier to generating test data - pub fn internal_process_lonely_block(&self, lonely_block: LonelyBlockWithCallback) { - if Request::call(&self.process_block_sender, lonely_block).is_none() { + pub fn asynchronous_process_lonely_block_with_callback( + &self, + lonely_block_with_callback: LonelyBlockWithCallback, + ) { + if Request::call(&self.process_block_sender, lonely_block_with_callback).is_none() { error!("Chain service has gone") } } @@ -221,6 +218,10 @@ impl LonelyBlock { verify_callback, } } + + pub fn without_callback(self) -> LonelyBlockWithCallback { + self.with_callback(None) + } } pub struct LonelyBlockWithCallback { From 8a221523d6cd017eb7819aa91bfa67b6db343ca1 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 16 Oct 2023 16:02:11 +0800 Subject: [PATCH 063/357] Add blocking methods to process block --- chain/src/chain.rs | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index fda5a29061..fda99d1a3f 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -149,6 +149,49 @@ impl ChainController { } } + pub fn blocking_process_block(&self, block: Arc) -> VerifyResult { + self.blocking_process_lonely_block(LonelyBlock { + block, + peer_id: None, + switch: None, + }) + } + + pub fn blocking_process_block_with_switch( + &self, + block: Arc, + switch: Switch, + ) -> VerifyResult { + self.blocking_process_lonely_block(LonelyBlock { + block, + peer_id: None, + switch: Some(switch), + }) + } + + pub fn blocking_process_lonely_block(&self, lonely_block: LonelyBlock) -> VerifyResult { + let (verify_result_tx, verify_result_rx) = ckb_channel::oneshot::channel::(); + + let verify_callback = { + move |result: VerifyResult| match verify_result_tx.send(result) { + Err(err) => error!( + "blocking send verify_result failed: {}, this shouldn't happen", + err + ), + _ => {} + } + }; + + let lonely_block_with_callback = + lonely_block.with_callback(Some(Box::new(verify_callback))); + self.internal_process_lonely_block_with_callback(lonely_block_with_callback); + verify_result_rx.recv().unwrap_or_else(|err| { + Err(InternalErrorKind::System + .other(format!("blocking recv verify_result failed: {}", err)) + .into()) + }) + } + /// Truncate chain to specified target /// /// Should use for testing only From 29ecfbbb6cc98ae400bb69ea0f10edc097854f21 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 16 Oct 2023 16:02:59 +0800 Subject: [PATCH 064/357] Fix `blocking_process_lonely_block` internal call --- chain/src/chain.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index fda99d1a3f..313c5d2f21 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -184,7 +184,7 @@ impl ChainController { let lonely_block_with_callback = lonely_block.with_callback(Some(Box::new(verify_callback))); - self.internal_process_lonely_block_with_callback(lonely_block_with_callback); + self.asynchronous_process_lonely_block_with_callback(lonely_block_with_callback); verify_result_rx.recv().unwrap_or_else(|err| { Err(InternalErrorKind::System .other(format!("blocking recv verify_result failed: {}", err)) From 96b3f3d92f19648a62493d0743905bea9fe32cf8 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 16 Oct 2023 16:07:51 +0800 Subject: [PATCH 065/357] Use blocking process_block method for `ckb import` util --- util/instrument/src/import.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/util/instrument/src/import.rs b/util/instrument/src/import.rs index 2dd40b3b71..74c28a72fb 100644 --- a/util/instrument/src/import.rs +++ b/util/instrument/src/import.rs @@ -63,7 +63,9 @@ impl Import { let block: JsonBlock = serde_json::from_str(&s)?; let block: Arc = Arc::new(block.into()); if !block.is_genesis() { - self.chain.process_block(block); + self.chain + .blocking_process_block(block) + .expect("import occur malformation data"); } progress_bar.inc(s.as_bytes().len() as u64); } From ee75da5705dd46eb6f933c432b6399c831bff8a0 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 16 Oct 2023 16:16:43 +0800 Subject: [PATCH 066/357] Use asynchronous process_block in Synchronizer --- sync/src/types/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 5355776913..fefac1d9c7 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1210,14 +1210,14 @@ impl SyncShared { // }; // TODO move switch logic to ckb-chain - let lonely_block = LonelyBlock { + let lonely_block_with_callback = LonelyBlock { block, peer_id: Some(peer_id), switch: Some(Switch::NONE), } .with_callback(verify_callback); - chain.process_lonely_block(lonely_block); + chain.asynchronous_process_lonely_block_with_callback(lonely_block_with_callback); // if let Err(ref error) = ret { // if !is_internal_db_error(error) { From 0fbc790906e9e3435f2310ba3b87ff3c2138b1e1 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 16 Oct 2023 16:17:01 +0800 Subject: [PATCH 067/357] Use blocking process_block method for `MinerRpcImpl::submit_block` --- rpc/src/module/miner.rs | 35 +++-------------------------------- 1 file changed, 3 insertions(+), 32 deletions(-) diff --git a/rpc/src/module/miner.rs b/rpc/src/module/miner.rs index 51398e9da9..2e952cfb9e 100644 --- a/rpc/src/module/miner.rs +++ b/rpc/src/module/miner.rs @@ -275,39 +275,10 @@ impl MinerRpc for MinerRpcImpl { .verify(&header) .map_err(|err| handle_submit_error(&work_id, &err))?; - let (verify_result_tx, verify_result_rx) = ckb_channel::oneshot::channel::(); - let verify_callback = move |verify_result: std::result::Result< - VerifiedBlockStatus, - ckb_error::Error, - >| match verify_result_tx.send(verify_result) { - Err(_) => { - error!("send verify result failed, the Receiver in MinerRpc is disconnected") - } - _ => {} - }; - - self.chain - .process_block_with_callback(Arc::clone(&block), Box::new(verify_callback)); - - let is_new = true; - todo!("got a block is new or not via callback"); + let verify_result = self.chain.blocking_process_block(Arc::clone(&block)); - // let is_new = verify_result_rx - // .recv() - // .map_err(|recv_err| { - // RPCError::ckb_internal_error(format!( - // "failed to receive verify result, error: {}", - // recv_err - // )) - // })? - // .map_err(|verify_err| handle_submit_error(&work_id, &verify_err))?; - // info!( - // "end to submit block, work_id = {}, is_new = {}, block = #{}({})", - // work_id, - // is_new, - // block.number(), - // block.hash() - // ); + // TODO: need to consider every enum item of verify_result + let is_new = verify_result.is_ok(); // Announce only new block if is_new { From f7803c057a0db644bcb6b06c7807de9e6ea715f0 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 17 Oct 2023 11:54:18 +0800 Subject: [PATCH 068/357] Rename CKB Sync progress chart name --- devtools/block_sync/draw_sync_chart.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/devtools/block_sync/draw_sync_chart.py b/devtools/block_sync/draw_sync_chart.py index ca40d5ae80..401eaddd03 100755 --- a/devtools/block_sync/draw_sync_chart.py +++ b/devtools/block_sync/draw_sync_chart.py @@ -98,7 +98,7 @@ def parse_sync_statics(log_file): plt.setp(ax.get_xticklabels(), rotation=30, horizontalalignment='right') plt.legend(tuple(lgs), tuple(args.label), loc='upper left', shadow=True) -plt.title('CKB Sync progress Chart') +plt.title('CKB Block Sync progress Chart') plt.xlabel('Timecost (hours)') plt.ylabel('Block Height') plt.savefig(result_path) From 1de2bacf3ee13e2f16c728d324af39fd3ed1d3f4 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 18 Oct 2023 13:30:52 +0800 Subject: [PATCH 069/357] Use blocking process_block method for `IntegrationTestRpcImpl::process_and_announce_block` --- rpc/src/module/test.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/rpc/src/module/test.rs b/rpc/src/module/test.rs index 1e8bdcef6c..0893a98664 100644 --- a/rpc/src/module/test.rs +++ b/rpc/src/module/test.rs @@ -676,11 +676,10 @@ impl IntegrationTestRpcImpl { let content = packed::CompactBlock::build_from_block(&block_view, &HashSet::new()); let message = packed::RelayMessage::new_builder().set(content).build(); - todo!("retrive verify block result by callback"); // insert block to chain - // self.chain - // .process_block(Arc::clone(&block_view)) - // .map_err(|err| RPCError::custom(RPCError::CKBInternalError, err.to_string()))?; + self.chain + .blocking_process_block(Arc::clone(&block_view)) + .map_err(|err| RPCError::custom(RPCError::CKBInternalError, err.to_string()))?; // announce new block if let Err(err) = self From 179166208ce01598e37ca8a13a41868b04a4c013 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 18 Oct 2023 13:34:32 +0800 Subject: [PATCH 070/357] Derive `Debug` attribute for `VerifiedBlockStatus`, since `error!` need that --- chain/src/chain.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 313c5d2f21..fb0b13c518 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -60,6 +60,7 @@ pub type VerifyResult = Result; pub type VerifyCallback = dyn FnOnce(VerifyResult) + Send + Sync; /// VerifiedBlockStatus is +#[derive(Debug)] pub enum VerifiedBlockStatus { // The block is being seen for the first time. FirstSeenAndVerified, From 7a6d7439ca1fcac7a6a0d39203407adca827f7ac Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 18 Oct 2023 13:35:01 +0800 Subject: [PATCH 071/357] Use blocking process_block method for `IntegrationTestRpcImpl::process_block_without_verify` --- rpc/src/module/test.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/rpc/src/module/test.rs b/rpc/src/module/test.rs index 0893a98664..0bae70244c 100644 --- a/rpc/src/module/test.rs +++ b/rpc/src/module/test.rs @@ -1,6 +1,6 @@ use crate::error::RPCError; use async_trait::async_trait; -use ckb_chain::chain::ChainController; +use ckb_chain::chain::{ChainController, VerifyResult}; use ckb_dao::DaoCalculator; use ckb_jsonrpc_types::{Block, BlockTemplate, Byte32, EpochNumberWithFraction, Transaction}; use ckb_logger::error; @@ -512,11 +512,9 @@ impl IntegrationTestRpc for IntegrationTestRpcImpl { fn process_block_without_verify(&self, data: Block, broadcast: bool) -> Result> { let block: packed::Block = data.into(); let block: Arc = Arc::new(block.into_view()); - let ret: Result<()> = { - // self.chain - // .internal_process_block(Arc::clone(&block), Switch::DISABLE_ALL); - todo!("retrive verify block result by callback"); - }; + let ret: VerifyResult = self + .chain + .blocking_process_block_with_switch(Arc::clone(&block), Switch::DISABLE_ALL); if broadcast { let content = packed::CompactBlock::build_from_block(&block, &HashSet::new()); let message = packed::RelayMessage::new_builder().set(content).build(); From 8ee7d391090d8ef8d5de2ca3c16219918f97fcf8 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 18 Oct 2023 13:57:14 +0800 Subject: [PATCH 072/357] fixup! Unify process_block's return type as `VerifyResult` --- sync/src/relayer/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index e0a8726aaf..075e2540d6 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -318,7 +318,7 @@ impl Relayer { move |result: VerifyResult| match result { Ok(verified_block_status) => match verified_block_status { VerifiedBlockStatus::FirstSeenAndVerified - | VerifiedBlockStatus::FirstSeenAndVerified => { + | VerifiedBlockStatus::FirstSeenButNotVerified => { match broadcast_compact_block_tx.send((block, peer)) { Err(_) => { error!( From 98833188a6a622438b66cffc93846c2a1af7e8fa Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 18 Oct 2023 15:12:37 +0800 Subject: [PATCH 073/357] Remove useless import items --- rpc/src/module/miner.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rpc/src/module/miner.rs b/rpc/src/module/miner.rs index 2e952cfb9e..60d57187e9 100644 --- a/rpc/src/module/miner.rs +++ b/rpc/src/module/miner.rs @@ -1,8 +1,8 @@ use crate::error::RPCError; use async_trait::async_trait; -use ckb_chain::chain::{ChainController, VerifiedBlockStatus, VerifyResult}; +use ckb_chain::chain::ChainController; use ckb_jsonrpc_types::{Block, BlockTemplate, Uint64, Version}; -use ckb_logger::{debug, error, info, warn}; +use ckb_logger::{debug, error, warn}; use ckb_network::{NetworkController, PeerIndex, SupportProtocols, TargetSession}; use ckb_shared::{shared::Shared, Snapshot}; use ckb_systemtime::unix_time_as_millis; From 1ff3882dd50e86dca23fb04a7d44c5967e89c464 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 18 Oct 2023 15:13:00 +0800 Subject: [PATCH 074/357] Add is_internal_db_error to VerifyFailedBlockInfo --- shared/src/types/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/shared/src/types/mod.rs b/shared/src/types/mod.rs index 898154d3e7..ac9a83c317 100644 --- a/shared/src/types/mod.rs +++ b/shared/src/types/mod.rs @@ -312,4 +312,5 @@ pub struct VerifyFailedBlockInfo { pub peer_id: PeerIndex, pub message_bytes: u64, pub reason: String, + pub is_internal_db_error: bool, } From 68593ff8b16ef6e5a503deb18cc1382c4e1d72e9 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 18 Oct 2023 15:23:32 +0800 Subject: [PATCH 075/357] Move `is_internal_db_error` to `ckb-error` crate --- error/src/lib.rs | 21 +++++++++++++++++++++ sync/src/utils.rs | 22 ---------------------- 2 files changed, 21 insertions(+), 22 deletions(-) diff --git a/error/src/lib.rs b/error/src/lib.rs index 20db9982dc..2c2dfa575e 100644 --- a/error/src/lib.rs +++ b/error/src/lib.rs @@ -92,3 +92,24 @@ impl fmt::Debug for AnyError { self.0.fmt(f) } } +/// Return whether the error's kind is `InternalErrorKind::Database` +/// +/// ### Panic +/// +/// Panic if the error kind is `InternalErrorKind::DataCorrupted`. +/// If the database is corrupted, panic is better than handle it silently. +pub fn is_internal_db_error(error: &Error) -> bool { + if error.kind() == ErrorKind::Internal { + let error_kind = error + .downcast_ref::() + .expect("error kind checked") + .kind(); + if error_kind == InternalErrorKind::DataCorrupted { + panic!("{}", error) + } else { + return error_kind == InternalErrorKind::Database + || error_kind == InternalErrorKind::System; + } + } + false +} diff --git a/sync/src/utils.rs b/sync/src/utils.rs index fac6e7ef05..92fedf9536 100644 --- a/sync/src/utils.rs +++ b/sync/src/utils.rs @@ -157,25 +157,3 @@ fn protocol_name(protocol_id: ProtocolId) -> String { } } } - -/// return whether the error's kind is `InternalErrorKind::Database` -/// -/// ### Panic -/// -/// Panic if the error kind is `InternalErrorKind::DataCorrupted`. -/// If the database is corrupted, panic is better than handle it silently. -pub(crate) fn is_internal_db_error(error: &CKBError) -> bool { - if error.kind() == ErrorKind::Internal { - let error_kind = error - .downcast_ref::() - .expect("error kind checked") - .kind(); - if error_kind == InternalErrorKind::DataCorrupted { - panic!("{}", error) - } else { - return error_kind == InternalErrorKind::Database - || error_kind == InternalErrorKind::System; - } - } - false -} From 59ff1bfe9fc494f16d713b14e0e8f549de7aefd8 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 18 Oct 2023 15:24:12 +0800 Subject: [PATCH 076/357] Fix usage for `is_internal_db_error` --- sync/src/tests/synchronizer/functions.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sync/src/tests/synchronizer/functions.rs b/sync/src/tests/synchronizer/functions.rs index 5eb2e952cb..7fe84a1293 100644 --- a/sync/src/tests/synchronizer/functions.rs +++ b/sync/src/tests/synchronizer/functions.rs @@ -1198,7 +1198,7 @@ fn get_blocks_process() { #[test] fn test_internal_db_error() { - use crate::utils::is_internal_db_error; + use ckb_error::is_internal_db_error; let consensus = Consensus::default(); let mut builder = SharedBuilder::with_temp_db(); From 439c49987c94168ed4f931e400601dbf91379085 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 18 Oct 2023 15:51:23 +0800 Subject: [PATCH 077/357] Extract ChainService's execute_callback method --- Cargo.lock | 1 + chain/Cargo.toml | 1 + chain/src/chain.rs | 92 ++++++++++++++++++++++++++++-------- sync/src/synchronizer/mod.rs | 7 ++- 4 files changed, 79 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dd5c573b1f..7c57a5d0d4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -716,6 +716,7 @@ dependencies = [ "faux", "lazy_static", "tempfile", + "tokio", ] [[package]] diff --git a/chain/Cargo.toml b/chain/Cargo.toml index a6e6f4f7f5..6989ab8c2f 100644 --- a/chain/Cargo.toml +++ b/chain/Cargo.toml @@ -31,6 +31,7 @@ ckb-constant = { path = "../util/constant", version = "= 0.113.0-pre" } ckb-util = { path = "../util", version = "= 0.113.0-pre" } crossbeam = "0.8.2" ckb-network = { path = "../network", version = "= 0.113.0-pre" } +tokio = { version = "1", features = ["sync"] } [dev-dependencies] ckb-test-chain-utils = { path = "../util/test-chain-utils", version = "= 0.114.0-pre" } diff --git a/chain/src/chain.rs b/chain/src/chain.rs index fb0b13c518..a880e9c03e 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -6,7 +6,7 @@ use crate::orphan_block_pool::OrphanBlockPool; use ckb_chain_spec::versionbits::VersionbitsIndexer; use ckb_channel::{self as channel, select, Receiver, SendError, Sender}; use ckb_constant::sync::BLOCK_DOWNLOAD_WINDOW; -use ckb_error::{Error, ErrorKind, InternalError, InternalErrorKind}; +use ckb_error::{is_internal_db_error, Error, ErrorKind, InternalError, InternalErrorKind}; use ckb_logger::Level::Trace; use ckb_logger::{ self, debug, error, info, log_enabled, log_enabled_target, trace, trace_target, warn, @@ -244,6 +244,8 @@ pub struct ChainService { proposal_table: Arc>, orphan_blocks_broker: Arc, + + verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, } pub struct LonelyBlock { @@ -274,6 +276,15 @@ pub struct LonelyBlockWithCallback { } impl LonelyBlockWithCallback { + fn execute_callback(&self, verify_result: VerifyResult) { + match &self.verify_callback { + Some(verify_callback) => { + verify_callback(verify_result); + } + None => {} + } + } + pub fn block(&self) -> &Arc { &self.lonely_block.block } @@ -305,6 +316,23 @@ struct UnverifiedBlock { pub parent_header: HeaderView, } +impl UnverifiedBlock { + fn execute_callback(&self, verify_result: VerifyResult) { + match &self.verify_callback { + Some(verify_callback) => { + debug!( + "executing block {}-{} verify_callback", + self.block.number(), + self.block.hash() + ); + + verify_callback(verify_result); + } + None => {} + } + } +} + impl ChainService { /// Create a new ChainService instance with shared and initial proposal_table. pub fn new(shared: Shared, proposal_table: ProposalTable) -> ChainService { @@ -445,14 +473,7 @@ impl ChainService { let verify_result = self.consume_unverified_blocks(&unverified_task); trace!("consume_unverified_blocks cost: {:?}", begin_loop.elapsed()); - match unverified_task.verify_callback { - Some(callback) => { - debug!("executing block {}-{} verify_callback", unverified_task.block.number(), unverified_task.block.hash()); - callback(verify_result); - }, - None => { - } - } + unverified_task.execute_callback(verify_result); }, Err(err) => { error!("unverified_block_rx err: {}", err); @@ -584,6 +605,10 @@ impl ChainService { for descendant_block in descendants { match self.accept_block(descendant_block.block().to_owned()) { Err(err) => { + self.tell_synchronizer_to_punish_the_bad_peer(&descendant_block, &err); + + descendant_block.execute_callback(Err(err)); + accept_error_occurred = true; error!( "accept block {} failed: {}", @@ -757,13 +782,12 @@ impl ChainService { if !switch.disable_non_contextual() { let result = self.non_contextual_verify(&lonely_block.block()); match result { - Err(err) => match lonely_block.verify_callback { - Some(verify_callback) => { - verify_callback(Err(err)); - return; - } - None => {} - }, + Err(err) => { + self.tell_synchronizer_to_punish_the_bad_peer(&lonely_block, &err); + + lonely_block.execute_callback(Err(err)); + return; + } _ => {} } } @@ -773,11 +797,9 @@ impl ChainService { Ok(_) => {} Err(SendError(lonely_block)) => { error!("failed to notify new block to orphan pool"); - if let Some(verify_callback) = lonely_block.verify_callback { - verify_callback(Err(InternalErrorKind::System - .other("OrphanBlock broker disconnected") - .into())); - } + lonely_block.execute_callback(Err(InternalErrorKind::System + .other("OrphanBlock broker disconnected") + .into())); } } debug!( @@ -790,6 +812,34 @@ impl ChainService { ); } + fn tell_synchronizer_to_punish_the_bad_peer( + &self, + lonely_block: &LonelyBlockWithCallback, + err: &Error, + ) { + let is_internal_db_error = is_internal_db_error(&err); + if let Some(peer_id) = lonely_block.peer_id() { + let verify_failed_block_info = VerifyFailedBlockInfo { + block_hash: lonely_block.lonely_block.block.hash(), + peer_id, + message_bytes: 0, + reason: err.to_string(), + is_internal_db_error, + }; + match self.verify_failed_blocks_tx.send(verify_failed_block_info) { + Err(_err) => { + error!("ChainService failed to send verify failed block info to Synchronizer, the receiver side may have been closed, this shouldn't happen") + } + _ => { + debug!( + "ChainService has sent verify failed block info to Synchronizer: {:?}", + verify_failed_block_info + ) + } + } + } + } + fn accept_block(&self, block: Arc) -> Result, Error> { let (block_number, block_hash) = (block.number(), block.hash()); diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 0b8cfe5c0e..87e8bb2bd4 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -886,7 +886,12 @@ impl CKBProtocolHandler for Synchronizer { let mut have_malformed_peers = false; while let Some(malformed_peer_info) = self.verify_failed_blocks_rx.recv().await { have_malformed_peers = true; - let x = Self::post_sync_process( + if malformed_peer_info.is_internal_db_error { + // we shouldn't ban that peer if it's an internal db error + continue; + } + + Self::post_sync_process( nc.as_ref(), malformed_peer_info.peer_id, "SendBlock", From d9ac6bc6e34f0de94c621b563118f778c87660d8 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 18 Oct 2023 16:56:57 +0800 Subject: [PATCH 078/357] Make ChainServices's verify_failed_blocks_tx optional --- chain/src/chain.rs | 88 ++++++++++++++++++++++++++++------------------ 1 file changed, 54 insertions(+), 34 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index a880e9c03e..7a4b6bc6dc 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -245,7 +245,7 @@ pub struct ChainService { orphan_blocks_broker: Arc, - verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, + verify_failed_blocks_tx: Option>, } pub struct LonelyBlock { @@ -335,17 +335,16 @@ impl UnverifiedBlock { impl ChainService { /// Create a new ChainService instance with shared and initial proposal_table. - pub fn new(shared: Shared, proposal_table: ProposalTable) -> ChainService { - let (unverified_tx, unverified_rx) = - channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize * 3); - - let (new_block_tx, new_block_rx) = - channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize); - + pub fn new( + shared: Shared, + proposal_table: ProposalTable, + verify_failed_block_tx: Option>, + ) -> ChainService { ChainService { shared, proposal_table: Arc::new(Mutex::new(proposal_table)), orphan_blocks_broker: Arc::new(OrphanBlockPool::with_capacity(ORPHAN_BLOCK_SIZE)), + verify_failed_blocks_tx, } } @@ -468,12 +467,10 @@ impl ChainService { }, recv(unverified_block_rx) -> msg => match msg { Ok(unverified_task) => { - // process this unverified block + // process this unverified block trace!("got an unverified block, wait cost: {:?}", begin_loop.elapsed()); - let verify_result = self.consume_unverified_blocks(&unverified_task); + self.consume_unverified_blocks(&unverified_task); trace!("consume_unverified_blocks cost: {:?}", begin_loop.elapsed()); - - unverified_task.execute_callback(verify_result); }, Err(err) => { error!("unverified_block_rx err: {}", err); @@ -485,7 +482,7 @@ impl ChainService { } } - fn consume_unverified_blocks(&self, unverified_block: &UnverifiedBlock) -> VerifyResult { + fn consume_unverified_blocks(&self, unverified_block: &UnverifiedBlock) { // process this unverified block let verify_result = self.verify_block(unverified_block); match &verify_result { @@ -537,9 +534,12 @@ impl ChainService { unverified_block.block.hash(), err ); + + self.tell_synchronizer_to_punish_the_bad_peer(unverified_block, err); } } - verify_result + + unverified_block.execute_callback(verify_result); } fn start_search_orphan_pool( @@ -627,7 +627,17 @@ impl ChainService { match unverified_block_tx.send(unverified_block) { Ok(_) => {} Err(err) => { - error!("send unverified_block_tx failed: {}", err) + error!("send unverified_block_tx failed: {}, the receiver has been closed", err); + let err = Err(InternalErrorKind::System + .other(format!("send unverified_block_tx failed, the receiver have been close")).into()); + + self.tell_synchronizer_to_punish_the_bad_peer( + &unverified_block, + &err, + ); + + unverified_block.execute_callback(err); + continue; } }; @@ -797,9 +807,14 @@ impl ChainService { Ok(_) => {} Err(SendError(lonely_block)) => { error!("failed to notify new block to orphan pool"); - lonely_block.execute_callback(Err(InternalErrorKind::System + + let verify_result = Err(InternalErrorKind::System .other("OrphanBlock broker disconnected") - .into())); + .into()); + + self.tell_synchronizer_to_punish_the_bad_peer(&lonely_block, &verify_result); + lonely_block.execute_callback(verify_result); + return; } } debug!( @@ -818,25 +833,30 @@ impl ChainService { err: &Error, ) { let is_internal_db_error = is_internal_db_error(&err); - if let Some(peer_id) = lonely_block.peer_id() { - let verify_failed_block_info = VerifyFailedBlockInfo { - block_hash: lonely_block.lonely_block.block.hash(), - peer_id, - message_bytes: 0, - reason: err.to_string(), - is_internal_db_error, - }; - match self.verify_failed_blocks_tx.send(verify_failed_block_info) { - Err(_err) => { - error!("ChainService failed to send verify failed block info to Synchronizer, the receiver side may have been closed, this shouldn't happen") - } - _ => { - debug!( - "ChainService has sent verify failed block info to Synchronizer: {:?}", - verify_failed_block_info - ) + match (lonely_block.peer_id(), &self.verify_failed_blocks_tx) { + (Some(peer_id), Some(verify_failed_blocks_tx)) => { + let verify_failed_block_info = VerifyFailedBlockInfo { + block_hash: lonely_block.lonely_block.block.hash(), + peer_id, + message_bytes: 0, + reason: err.to_string(), + is_internal_db_error, + }; + match verify_failed_blocks_tx.send(verify_failed_block_info) { + Err(_err) => { + error!("ChainService failed to send verify failed block info to Synchronizer, the receiver side may have been closed, this shouldn't happen") + } + _ => { + debug!( + "ChainService has sent verify failed block info to Synchronizer: {:?}", + verify_failed_block_info + ) + } } } + _ => { + debug!("Don't know which peer to punish, or don't have a channel Sender to Synchronizer, skip it") + } } } From 8642581dbd09d2a7dd92e40386f97a18aba1be06 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 18 Oct 2023 17:22:28 +0800 Subject: [PATCH 079/357] Pass unverified_block_info channel to ChainService and Synchronizer --- Cargo.lock | 1 + ckb-bin/Cargo.toml | 1 + ckb-bin/src/subcommand/run.rs | 8 +++-- sync/src/synchronizer/mod.rs | 57 +++++++++++++++++++---------------- util/launcher/src/lib.rs | 17 +++++++++-- 5 files changed, 53 insertions(+), 31 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7c57a5d0d4..942914e366 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -663,6 +663,7 @@ dependencies = [ "serde_json", "serde_plain", "tempfile", + "tokio", "toml", ] diff --git a/ckb-bin/Cargo.toml b/ckb-bin/Cargo.toml index 1e75ae57a0..40fbb2cb5f 100644 --- a/ckb-bin/Cargo.toml +++ b/ckb-bin/Cargo.toml @@ -45,6 +45,7 @@ sentry = { version = "0.26.0", optional = true } is-terminal = "0.4.7" fdlimit = "0.2.1" ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.114.0-pre" } +tokio = { version = "1", features = ["sync"] } [target.'cfg(not(target_os="windows"))'.dependencies] daemonize = { version = "0.5.0" } diff --git a/ckb-bin/src/subcommand/run.rs b/ckb-bin/src/subcommand/run.rs index 3b74a95cc7..ba7f3bfb53 100644 --- a/ckb-bin/src/subcommand/run.rs +++ b/ckb-bin/src/subcommand/run.rs @@ -4,6 +4,7 @@ use ckb_async_runtime::Handle; use ckb_build_info::Version; use ckb_launcher::Launcher; use ckb_logger::info; +use ckb_shared::types::VerifyFailedBlockInfo; use ckb_stop_handler::{broadcast_exit_signals, wait_all_ckb_services_exit}; use ckb_types::core::cell::setup_system_cell_cache; @@ -39,8 +40,10 @@ pub fn run(args: RunArgs, version: Version, async_handle: Handle) -> Result<(), ); launcher.check_assume_valid_target(&shared); - - let chain_controller = launcher.start_chain_service(&shared, pack.take_proposal_table()); + let (verify_failed_block_tx, verify_failed_block_rx) = + tokio::sync::mpsc::unbounded_channel::(); + let chain_controller = + launcher.start_chain_service(&shared, pack.take_proposal_table(), verify_failed_block_tx); launcher.start_block_filter(&shared); @@ -49,6 +52,7 @@ pub fn run(args: RunArgs, version: Version, async_handle: Handle) -> Result<(), chain_controller.clone(), miner_enable, pack.take_relay_tx_receiver(), + verify_failed_block_rx, ); let tx_pool_builder = pack.take_tx_pool_builder(); diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 87e8bb2bd4..a6c5687e57 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -234,22 +234,25 @@ pub struct Synchronizer { pub shared: Arc, fetch_channel: Option>, - pub(crate) verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, - pub(crate) verify_failed_blocks_rx: tokio::sync::mpsc::UnboundedReceiver, + pub(crate) verify_failed_blocks_rx: + Option>, } impl Synchronizer { /// Init sync protocol handle /// /// This is a runtime sync protocol shared state, and any Sync protocol messages will be processed and forwarded by it - pub fn new(chain: ChainController, shared: Arc) -> Synchronizer { - let (verify_failed_blocks_tx, verify_failed_blocks_rx) = - tokio::sync::mpsc::unbounded_channel::(); + pub fn new( + chain: ChainController, + shared: Arc, + verify_failed_blocks_rx: Option< + tokio::sync::mpsc::UnboundedReceiver, + >, + ) -> Synchronizer { Synchronizer { chain, shared, fetch_channel: None, - verify_failed_blocks_tx, verify_failed_blocks_rx, } } @@ -883,27 +886,29 @@ impl CKBProtocolHandler for Synchronizer { } async fn poll(&mut self, nc: Arc) -> Option<()> { - let mut have_malformed_peers = false; - while let Some(malformed_peer_info) = self.verify_failed_blocks_rx.recv().await { - have_malformed_peers = true; - if malformed_peer_info.is_internal_db_error { - // we shouldn't ban that peer if it's an internal db error - continue; - } + if let Some(verify_failed_blocks_rx) = &mut self.verify_failed_blocks_rx { + let mut have_malformed_peers = false; + while let Some(malformed_peer_info) = verify_failed_blocks_rx.recv().await { + have_malformed_peers = true; + if malformed_peer_info.is_internal_db_error { + // we shouldn't ban that peer if it's an internal db error + continue; + } - Self::post_sync_process( - nc.as_ref(), - malformed_peer_info.peer_id, - "SendBlock", - malformed_peer_info.message_bytes, - StatusCode::BlockIsInvalid.with_context(format!( - "block {} is invalid, reason: {}", - malformed_peer_info.block_hash, malformed_peer_info.reason - )), - ); - } - if have_malformed_peers { - return Some(()); + Self::post_sync_process( + nc.as_ref(), + malformed_peer_info.peer_id, + "SendBlock", + malformed_peer_info.message_bytes, + StatusCode::BlockIsInvalid.with_context(format!( + "block {} is invalid, reason: {}", + malformed_peer_info.block_hash, malformed_peer_info.reason + )), + ); + } + if have_malformed_peers { + return Some(()); + } } None } diff --git a/util/launcher/src/lib.rs b/util/launcher/src/lib.rs index d3538c55f1..1b6cbb45b2 100644 --- a/util/launcher/src/lib.rs +++ b/util/launcher/src/lib.rs @@ -25,6 +25,7 @@ use ckb_rpc::ServiceBuilder; use ckb_shared::Shared; use ckb_shared::shared_builder::{SharedBuilder, SharedPackage}; +use ckb_shared::types::VerifyFailedBlockInfo; use ckb_store::{ChainDB, ChainStore}; use ckb_sync::{BlockFilter, NetTimeProtocol, Relayer, SyncShared, Synchronizer}; use ckb_tx_pool::service::TxVerificationResult; @@ -225,8 +226,13 @@ impl Launcher { } /// Start chain service, return ChainController - pub fn start_chain_service(&self, shared: &Shared, table: ProposalTable) -> ChainController { - let chain_service = ChainService::new(shared.clone(), table); + pub fn start_chain_service( + &self, + shared: &Shared, + table: ProposalTable, + verify_failed_block_tx: tokio::sync::mpsc::UnboundedSender, + ) -> ChainController { + let chain_service = ChainService::new(shared.clone(), table, verify_failed_block_tx); let chain_controller = chain_service.start(Some("ChainService")); info!("chain genesis hash: {:#x}", shared.genesis_hash()); chain_controller @@ -260,6 +266,7 @@ impl Launcher { chain_controller: ChainController, miner_enable: bool, relay_tx_receiver: Receiver, + verify_failed_block_rx: tokio::sync::mpsc::UnboundedReceiver, ) -> NetworkController { let sync_shared = Arc::new(SyncShared::with_tmpdir( shared.clone(), @@ -282,7 +289,11 @@ impl Launcher { ); // Sync is a core protocol, user cannot disable it via config - let synchronizer = Synchronizer::new(chain_controller.clone(), Arc::clone(&sync_shared)); + let synchronizer = Synchronizer::new( + chain_controller.clone(), + Arc::clone(&sync_shared), + Some(verify_failed_block_rx), + ); let mut protocols = vec![CKBProtocol::new_with_support_protocol( SupportProtocols::Sync, Box::new(synchronizer), From 45441f52ce3748dda1a4e074e5e5d72b299a54e9 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 18 Oct 2023 18:11:41 +0800 Subject: [PATCH 080/357] Fix UnverifiedBlock reference issue --- chain/src/chain.rs | 131 ++++++++++++++++++++++-------------------- sync/src/types/mod.rs | 1 - 2 files changed, 68 insertions(+), 64 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 7a4b6bc6dc..9e31be080a 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -22,7 +22,6 @@ use ckb_shared::types::VerifyFailedBlockInfo; use ckb_stop_handler::{new_crossbeam_exit_rx, register_thread}; use ckb_store::{attach_block_cell, detach_block_cell, ChainStore, StoreTransaction}; use ckb_systemtime::unix_time_as_millis; -use ckb_types::packed::UncleBlockVecReaderIterator; use ckb_types::{ core::{ cell::{ @@ -60,7 +59,7 @@ pub type VerifyResult = Result; pub type VerifyCallback = dyn FnOnce(VerifyResult) + Send + Sync; /// VerifiedBlockStatus is -#[derive(Debug)] +#[derive(Debug, Clone)] pub enum VerifiedBlockStatus { // The block is being seen for the first time. FirstSeenAndVerified, @@ -276,8 +275,8 @@ pub struct LonelyBlockWithCallback { } impl LonelyBlockWithCallback { - fn execute_callback(&self, verify_result: VerifyResult) { - match &self.verify_callback { + fn execute_callback(self, verify_result: VerifyResult) { + match self.verify_callback { Some(verify_callback) => { verify_callback(verify_result); } @@ -297,39 +296,33 @@ impl LonelyBlockWithCallback { } impl LonelyBlockWithCallback { - fn combine_parent_header(self, parent_header: HeaderView, switch: Switch) -> UnverifiedBlock { + fn combine_parent_header(self, parent_header: HeaderView) -> UnverifiedBlock { UnverifiedBlock { - block: self.lonely_block.block, - peer_id: self.lonely_block.peer_id, - switch, - verify_callback: self.verify_callback, + unverified_block: self, parent_header, } } } struct UnverifiedBlock { - pub block: Arc, - pub peer_id: Option, - pub switch: Switch, - pub verify_callback: Option>, + pub unverified_block: LonelyBlockWithCallback, pub parent_header: HeaderView, } impl UnverifiedBlock { - fn execute_callback(&self, verify_result: VerifyResult) { - match &self.verify_callback { - Some(verify_callback) => { - debug!( - "executing block {}-{} verify_callback", - self.block.number(), - self.block.hash() - ); + fn block(&self) -> &Arc { + self.unverified_block.block() + } - verify_callback(verify_result); - } - None => {} - } + pub fn peer_id(&self) -> Option { + self.unverified_block.peer_id() + } + pub fn switch(&self) -> Option { + self.unverified_block.switch() + } + + fn execute_callback(self, verify_result: VerifyResult) { + self.unverified_block.execute_callback(verify_result) } } @@ -338,7 +331,7 @@ impl ChainService { pub fn new( shared: Shared, proposal_table: ProposalTable, - verify_failed_block_tx: Option>, + verify_failed_blocks_tx: Option>, ) -> ChainService { ChainService { shared, @@ -469,7 +462,7 @@ impl ChainService { Ok(unverified_task) => { // process this unverified block trace!("got an unverified block, wait cost: {:?}", begin_loop.elapsed()); - self.consume_unverified_blocks(&unverified_task); + self.consume_unverified_blocks(unverified_task); trace!("consume_unverified_blocks cost: {:?}", begin_loop.elapsed()); }, Err(err) => { @@ -482,20 +475,20 @@ impl ChainService { } } - fn consume_unverified_blocks(&self, unverified_block: &UnverifiedBlock) { + fn consume_unverified_blocks(&self, unverified_block: UnverifiedBlock) { // process this unverified block - let verify_result = self.verify_block(unverified_block); + let verify_result = self.verify_block(&unverified_block); match &verify_result { Ok(_) => { let log_now = std::time::Instant::now(); self.shared - .remove_block_status(&unverified_block.block.hash()); + .remove_block_status(&unverified_block.block().hash()); let log_elapsed_remove_block_status = log_now.elapsed(); self.shared - .remove_header_view(&unverified_block.block.hash()); + .remove_header_view(&unverified_block.block().hash()); debug!( "block {} remove_block_status cost: {:?}, and header_view cost: {:?}", - unverified_block.block.hash(), + unverified_block.block().hash(), log_elapsed_remove_block_status, log_now.elapsed() ); @@ -503,8 +496,8 @@ impl ChainService { Err(err) => { error!( "verify [{:?}]'s block {} failed: {}", - unverified_block.peer_id, - unverified_block.block.hash(), + unverified_block.peer_id(), + unverified_block.block().hash(), err ); @@ -525,17 +518,22 @@ impl ChainService { tip_ext.total_difficulty, )); - self.shared - .insert_block_status(unverified_block.block.hash(), BlockStatus::BLOCK_INVALID); + self.shared.insert_block_status( + unverified_block.block().hash(), + BlockStatus::BLOCK_INVALID, + ); error!( "set_unverified tip to {}-{}, because verify {} failed: {}", tip.number(), tip.hash(), - unverified_block.block.hash(), + unverified_block.block().hash(), err ); - self.tell_synchronizer_to_punish_the_bad_peer(unverified_block, err); + self.tell_synchronizer_to_punish_the_bad_peer( + &unverified_block.unverified_block, + err, + ); } } @@ -607,36 +605,37 @@ impl ChainService { Err(err) => { self.tell_synchronizer_to_punish_the_bad_peer(&descendant_block, &err); - descendant_block.execute_callback(Err(err)); - accept_error_occurred = true; error!( "accept block {} failed: {}", descendant_block.block().hash(), err ); + + descendant_block.execute_callback(Err(err)); continue; } Ok(accepted_opt) => match accepted_opt { Some((parent_header, total_difficulty)) => { let unverified_block: UnverifiedBlock = - descendant_block.combine_parent_header(parent_header, Switch::NONE); - let block_number = unverified_block.block.number(); - let block_hash = unverified_block.block.hash(); + descendant_block.combine_parent_header(parent_header); + let block_number = unverified_block.block().number(); + let block_hash = unverified_block.block().hash(); match unverified_block_tx.send(unverified_block) { Ok(_) => {} - Err(err) => { - error!("send unverified_block_tx failed: {}, the receiver has been closed", err); - let err = Err(InternalErrorKind::System - .other(format!("send unverified_block_tx failed, the receiver have been close")).into()); + Err(SendError(unverified_block)) => { + error!("send unverified_block_tx failed, the receiver has been closed"); + let err: Error = InternalErrorKind::System + .other(format!("send unverified_block_tx failed, the receiver have been close")).into(); self.tell_synchronizer_to_punish_the_bad_peer( - &unverified_block, + &unverified_block.unverified_block, &err, ); - unverified_block.execute_callback(err); + let verify_result: VerifyResult = Err(err); + unverified_block.execute_callback(verify_result); continue; } }; @@ -808,11 +807,13 @@ impl ChainService { Err(SendError(lonely_block)) => { error!("failed to notify new block to orphan pool"); - let verify_result = Err(InternalErrorKind::System + let err: Error = InternalErrorKind::System .other("OrphanBlock broker disconnected") - .into()); + .into(); + + self.tell_synchronizer_to_punish_the_bad_peer(&lonely_block, &err); - self.tell_synchronizer_to_punish_the_bad_peer(&lonely_block, &verify_result); + let verify_result = Err(err); lonely_block.execute_callback(verify_result); return; } @@ -846,12 +847,7 @@ impl ChainService { Err(_err) => { error!("ChainService failed to send verify failed block info to Synchronizer, the receiver side may have been closed, this shouldn't happen") } - _ => { - debug!( - "ChainService has sent verify failed block info to Synchronizer: {:?}", - verify_failed_block_info - ) - } + _ => {} } } _ => { @@ -947,13 +943,22 @@ impl ChainService { let log_now = std::time::Instant::now(); let UnverifiedBlock { - block, - peer_id, - switch, - verify_callback, + unverified_block: + LonelyBlockWithCallback { + lonely_block: + LonelyBlock { + block, + peer_id: _peer_id, + switch, + }, + verify_callback: _verify_callback, + }, parent_header, } = unverified_block; + // TODO: calculate the value of switch if we specified assume-valid-target + let switch = Switch::NONE; + let parent_ext = self .shared .store() @@ -1036,7 +1041,7 @@ impl ChainService { // update and verify chain root // MUST update index before reconcile_main_chain let begin_reconcile_main_chain = std::time::Instant::now(); - self.reconcile_main_chain(Arc::clone(&db_txn), &mut fork, switch.to_owned())?; + self.reconcile_main_chain(Arc::clone(&db_txn), &mut fork, switch)?; trace!( "reconcile_main_chain cost {:?}", begin_reconcile_main_chain.elapsed() diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index fefac1d9c7..269e6ba26f 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1,5 +1,4 @@ use crate::orphan_block_pool::OrphanBlockPool; -use crate::utils::is_internal_db_error; use crate::{Status, StatusCode, FAST_INDEX, LOW_INDEX, NORMAL_INDEX, TIME_TRACE_SIZE}; use ckb_app_config::SyncConfig; use ckb_chain::chain::{ From 6fa85028f7ae42d8dba9a00da91b3837d83f15b8 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 18 Oct 2023 21:41:08 +0800 Subject: [PATCH 081/357] Usage of ChainService::new need verify_failed_block_tx --- ckb-bin/src/subcommand/import.rs | 2 +- ckb-bin/src/subcommand/replay.rs | 2 +- sync/src/synchronizer/block_process.rs | 2 +- util/launcher/src/lib.rs | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ckb-bin/src/subcommand/import.rs b/ckb-bin/src/subcommand/import.rs index d6fba348c3..38301171b1 100644 --- a/ckb-bin/src/subcommand/import.rs +++ b/ckb-bin/src/subcommand/import.rs @@ -15,7 +15,7 @@ pub fn import(args: ImportArgs, async_handle: Handle) -> Result<(), ExitCode> { )?; let (shared, mut pack) = builder.build()?; - let chain_service = ChainService::new(shared, pack.take_proposal_table()); + let chain_service = ChainService::new(shared, pack.take_proposal_table(), None); let chain_controller = chain_service.start::<&str>(Some("ImportChainService")); // manual drop tx_pool_builder and relay_tx_receiver diff --git a/ckb-bin/src/subcommand/replay.rs b/ckb-bin/src/subcommand/replay.rs index ac7da08fb2..027d025a21 100644 --- a/ckb-bin/src/subcommand/replay.rs +++ b/ckb-bin/src/subcommand/replay.rs @@ -47,7 +47,7 @@ pub fn replay(args: ReplayArgs, async_handle: Handle) -> Result<(), ExitCode> { args.consensus, )?; let (tmp_shared, mut pack) = shared_builder.tx_pool_config(args.config.tx_pool).build()?; - let chain = ChainService::new(tmp_shared, pack.take_proposal_table()); + let chain = ChainService::new(tmp_shared, pack.take_proposal_table(), None); if let Some((from, to)) = args.profile { profile(shared, chain, from, to); diff --git a/sync/src/synchronizer/block_process.rs b/sync/src/synchronizer/block_process.rs index f8e236e0cb..f4a3fdbcf9 100644 --- a/sync/src/synchronizer/block_process.rs +++ b/sync/src/synchronizer/block_process.rs @@ -1,4 +1,4 @@ -use crate::{synchronizer::Synchronizer, utils::is_internal_db_error, Status, StatusCode}; +use crate::{synchronizer::Synchronizer, Status, StatusCode}; use ckb_logger::{debug, error}; use ckb_network::PeerIndex; use ckb_shared::types::VerifyFailedBlockInfo; diff --git a/util/launcher/src/lib.rs b/util/launcher/src/lib.rs index 1b6cbb45b2..1cbde7fd04 100644 --- a/util/launcher/src/lib.rs +++ b/util/launcher/src/lib.rs @@ -232,7 +232,7 @@ impl Launcher { table: ProposalTable, verify_failed_block_tx: tokio::sync::mpsc::UnboundedSender, ) -> ChainController { - let chain_service = ChainService::new(shared.clone(), table, verify_failed_block_tx); + let chain_service = ChainService::new(shared.clone(), table, Some(verify_failed_block_tx)); let chain_controller = chain_service.start(Some("ChainService")); info!("chain genesis hash: {:#x}", shared.genesis_hash()); chain_controller From 7062f9e2923fc2472fb58bc797cc02943efb9655 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 18 Oct 2023 22:00:26 +0800 Subject: [PATCH 082/357] Refactor: move ckb-sync's SyncState::assume_valid_target to ckb-shared --- shared/src/shared.rs | 13 +++++++++++-- sync/src/synchronizer/mod.rs | 5 +++-- sync/src/types/mod.rs | 6 ------ 3 files changed, 14 insertions(+), 10 deletions(-) diff --git a/shared/src/shared.rs b/shared/src/shared.rs index 9415d19096..0ae32b5101 100644 --- a/shared/src/shared.rs +++ b/shared/src/shared.rs @@ -21,9 +21,9 @@ use ckb_types::{ core::{service, BlockNumber, EpochExt, EpochNumber, HeaderView, Version}, packed::{self, Byte32}, prelude::*, - U256, + H256, U256, }; -use ckb_util::shrink_to_fit; +use ckb_util::{shrink_to_fit, Mutex, MutexGuard}; use ckb_verification::cache::TxVerificationCache; use dashmap::DashMap; use std::cmp; @@ -63,6 +63,8 @@ pub struct Shared { pub(crate) async_handle: Handle, pub(crate) ibd_finished: Arc, + pub assume_valid_target: Arc>>, + pub header_map: Arc, pub(crate) block_status_map: Arc>, pub(crate) unverified_tip: Arc>, @@ -80,6 +82,8 @@ impl Shared { snapshot_mgr: Arc, async_handle: Handle, ibd_finished: Arc, + + assume_valid_target: Arc>>, header_map: Arc, block_status_map: Arc>, ) -> Shared { @@ -101,6 +105,7 @@ impl Shared { snapshot_mgr, async_handle, ibd_finished, + assume_valid_target, header_map, block_status_map, unverified_tip, @@ -459,4 +464,8 @@ impl Shared { pub fn contains_block_status(&self, block_hash: &Byte32, status: BlockStatus) -> bool { self.get_block_status(block_hash).contains(status) } + + pub fn assume_valid_target(&self) -> MutexGuard> { + self.assume_valid_target.lock() + } } diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index a6c5687e57..6e53b17de9 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -114,9 +114,10 @@ impl BlockFetchCMD { } CanStart::AssumeValidNotFound => { let state = self.sync_shared.state(); + let shared = self.sync_shared.shared(); let best_known = state.shared_best_header_ref(); let number = best_known.number(); - let assume_valid_target: Byte32 = state + let assume_valid_target: Byte32 = shared .assume_valid_target() .as_ref() .map(Pack::pack) @@ -168,7 +169,7 @@ impl BlockFetchCMD { }; let assume_valid_target_find = |flag: &mut CanStart| { - let mut assume_valid_target = state.assume_valid_target(); + let mut assume_valid_target = shared.assume_valid_target(); if let Some(ref target) = *assume_valid_target { match shared.header_map().get(&target.pack()) { Some(header) => { diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 269e6ba26f..f21396615f 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1042,7 +1042,6 @@ impl SyncShared { inflight_blocks: RwLock::new(InflightBlocks::default()), pending_get_headers: RwLock::new(LruCache::new(GET_HEADERS_CACHE_SIZE)), tx_relay_receiver, - assume_valid_target: Mutex::new(sync_config.assume_valid_target), min_chain_work: sync_config.min_chain_work, }; @@ -1472,15 +1471,10 @@ pub struct SyncState { /* cached for sending bulk */ tx_relay_receiver: Receiver, - assume_valid_target: Mutex>, min_chain_work: U256, } impl SyncState { - pub fn assume_valid_target(&self) -> MutexGuard> { - self.assume_valid_target.lock() - } - pub fn min_chain_work(&self) -> &U256 { &self.min_chain_work } From 501dd44436eb6a252d77719f49a332ce10e3fbb6 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 18 Oct 2023 22:00:59 +0800 Subject: [PATCH 083/357] Add sync_config to SharedBuilder, since assume_valid_target need it --- Cargo.lock | 1 + shared/src/shared_builder.rs | 18 +++++++++++++++++- util/launcher/src/lib.rs | 1 + 3 files changed, 19 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 942914e366..4328fde156 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -981,6 +981,7 @@ dependencies = [ "ckb-sync", "ckb-tx-pool", "ckb-types", + "ckb-util", "ckb-verification", "ckb-verification-traits", ] diff --git a/shared/src/shared_builder.rs b/shared/src/shared_builder.rs index 3b79988554..e278fc0a57 100644 --- a/shared/src/shared_builder.rs +++ b/shared/src/shared_builder.rs @@ -6,7 +6,9 @@ use ckb_tx_pool::{TokioRwLock, TxEntry, TxPool, TxPoolServiceBuilder}; use std::cmp::Ordering; use crate::migrate::Migrate; -use ckb_app_config::{BlockAssemblerConfig, DBConfig, NotifyConfig, StoreConfig, TxPoolConfig}; +use ckb_app_config::{ + BlockAssemblerConfig, DBConfig, NotifyConfig, StoreConfig, SyncConfig, TxPoolConfig, +}; use ckb_app_config::{ExitCode, HeaderMapConfig}; use ckb_async_runtime::{new_background_runtime, Handle}; use ckb_chain_spec::consensus::Consensus; @@ -41,6 +43,7 @@ use ckb_tx_pool::{ use ckb_types::core::hardfork::HardForks; use ckb_types::core::service::PoolTransactionEntry; use ckb_types::core::tx_pool::Reject; +use ckb_util::Mutex; use ckb_types::core::EpochExt; use ckb_types::core::HeaderView; @@ -60,6 +63,7 @@ pub struct SharedBuilder { consensus: Consensus, tx_pool_config: Option, store_config: Option, + sync_config: Option, block_assembler_config: Option, notify_config: Option, async_handle: Handle, @@ -167,6 +171,7 @@ impl SharedBuilder { tx_pool_config: None, notify_config: None, store_config: None, + sync_config: None, block_assembler_config: None, async_handle, header_map_memory_limit: None, @@ -214,6 +219,7 @@ impl SharedBuilder { tx_pool_config: None, notify_config: None, store_config: None, + sync_config: None, block_assembler_config: None, async_handle: runtime.get_or_init(new_background_runtime).clone(), @@ -248,6 +254,12 @@ impl SharedBuilder { self } + /// TODO(doc): @eval-exec + pub fn sync_config(mut self, config: SyncConfig) -> Self { + self.sync_config = Some(config); + self + } + /// TODO(doc): @quake pub fn block_assembler_config(mut self, config: Option) -> Self { self.block_assembler_config = config; @@ -351,6 +363,7 @@ impl SharedBuilder { consensus, tx_pool_config, store_config, + sync_config, block_assembler_config, notify_config, async_handle, @@ -370,6 +383,7 @@ impl SharedBuilder { let tx_pool_config = tx_pool_config.unwrap_or_default(); let notify_config = notify_config.unwrap_or_default(); let store_config = store_config.unwrap_or_default(); + let sync_config = sync_config.unwrap_or_default(); let consensus = Arc::new(consensus); let notify_controller = start_notify_service(notify_config, async_handle.clone()); @@ -404,6 +418,7 @@ impl SharedBuilder { let block_status_map = Arc::new(DashMap::new()); + let assume_valid_target = Arc::new(Mutex::new(sync_config.assume_valid_target)); let ibd_finished = Arc::new(AtomicBool::new(false)); let shared = Shared::new( store, @@ -414,6 +429,7 @@ impl SharedBuilder { snapshot_mgr, async_handle, ibd_finished, + assume_valid_target, header_map, block_status_map, ); diff --git a/util/launcher/src/lib.rs b/util/launcher/src/lib.rs index 1cbde7fd04..25926e14fc 100644 --- a/util/launcher/src/lib.rs +++ b/util/launcher/src/lib.rs @@ -203,6 +203,7 @@ impl Launcher { .tx_pool_config(self.args.config.tx_pool.clone()) .notify_config(self.args.config.notify.clone()) .store_config(self.args.config.store) + .sync_config(self.args.config.network.sync.clone()) .block_assembler_config(block_assembler_config) .build()?; From d19161e5e3a8d0a23def006eb1cca9a423dea2f9 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 18 Oct 2023 22:17:53 +0800 Subject: [PATCH 084/357] Let assume_valid_target affect switch argument in ChainService --- chain/src/chain.rs | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 9e31be080a..89599211bb 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -956,8 +956,23 @@ impl ChainService { parent_header, } = unverified_block; - // TODO: calculate the value of switch if we specified assume-valid-target - let switch = Switch::NONE; + let switch: Switch = switch.unwrap_or_else(|| { + let mut assume_valid_target = self.shared.assume_valid_target(); + match *assume_valid_target { + Some(ref target) => { + // if the target has been reached, delete it + if target + == &ckb_types::prelude::Unpack::::unpack(&BlockView::hash(&block)) + { + assume_valid_target.take(); + Switch::NONE + } else { + Switch::DISABLE_SCRIPT + } + } + None => Switch::NONE, + } + }); let parent_ext = self .shared From eabecd9889fd081d071deea5d821bbc54f3ca3ef Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 18 Oct 2023 22:22:26 +0800 Subject: [PATCH 085/357] Synchronizer should pass Switch::NONE to ChainService --- sync/src/types/mod.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index f21396615f..c896ef04b9 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1207,11 +1207,10 @@ impl SyncShared { // } // }; - // TODO move switch logic to ckb-chain let lonely_block_with_callback = LonelyBlock { block, peer_id: Some(peer_id), - switch: Some(Switch::NONE), + switch: None, } .with_callback(verify_callback); From b64e8a6a0f1194be8dff975b0e476bebd7dc1198 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 18 Oct 2023 22:54:52 +0800 Subject: [PATCH 086/357] Make ChainService::asynchronous_process_block private --- chain/src/chain.rs | 22 ++-------------------- 1 file changed, 2 insertions(+), 20 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 89599211bb..883f058a76 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -398,7 +398,7 @@ impl ChainService { recv(process_block_receiver) -> msg => match msg { Ok(Request { responder, arguments: lonely_block }) => { let _ = tx_control.suspend_chunk_process(); - let _ = responder.send(self.process_block_v2(lonely_block, lonely_block_tx.clone())); + let _ = responder.send(self.asynchronous_process_block(lonely_block, lonely_block_tx.clone())); let _ = tx_control.continue_chunk_process(); if let Some(metrics) = ckb_metrics::handle() { @@ -739,23 +739,6 @@ impl ChainService { Ok(()) } - // visible pub just for test - #[doc(hidden)] - pub fn process_block(&mut self, block: Arc, switch: Switch) -> Result { - let block_number = block.number(); - let block_hash = block.hash(); - - debug!("Begin processing block: {}-{}", block_number, block_hash); - if block_number < 1 { - warn!("Receive 0 number block: 0-{}", block_hash); - } - - self.insert_block(block, switch).map(|ret| { - debug!("Finish processing block"); - ret - }) - } - fn non_contextual_verify(&self, block: &BlockView) -> Result<(), Error> { let consensus = self.shared.consensus(); BlockVerifier::new(consensus).verify(block).map_err(|e| { @@ -776,8 +759,7 @@ impl ChainService { } // make block IO and verify asynchronize - #[doc(hidden)] - pub fn process_block_v2( + fn asynchronous_process_block( &self, lonely_block: LonelyBlockWithCallback, lonely_block_tx: Sender, From f5b27c485240cba00967fcd9cd2f89e59f0f3990 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 18 Oct 2023 22:56:25 +0800 Subject: [PATCH 087/357] Remove ChainService::insert_block --- chain/src/chain.rs | 169 --------------------------------------------- 1 file changed, 169 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 883f058a76..0d4d6c35e3 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -1123,175 +1123,6 @@ impl ChainService { } } - fn insert_block(&mut self, block: Arc, switch: Switch) -> Result { - let db_txn = Arc::new(self.shared.store().begin_transaction()); - let txn_snapshot = db_txn.get_snapshot(); - let _snapshot_tip_hash = db_txn.get_update_for_tip_hash(&txn_snapshot); - - // insert_block are assumed be executed in single thread - if txn_snapshot.block_exists(&block.header().hash()) { - return Ok(false); - } - // non-contextual verify - if !switch.disable_non_contextual() { - self.non_contextual_verify(&block)?; - } - - let mut total_difficulty = U256::zero(); - let mut fork = ForkChanges::default(); - - let parent_ext = txn_snapshot - .get_block_ext(&block.data().header().raw().parent_hash()) - .expect("parent already store"); - - let parent_header = txn_snapshot - .get_block_header(&block.data().header().raw().parent_hash()) - .expect("parent already store"); - - let cannon_total_difficulty = - parent_ext.total_difficulty.to_owned() + block.header().difficulty(); - - if parent_ext.verified == Some(false) { - return Err(InvalidParentError { - parent_hash: parent_header.hash(), - } - .into()); - } - - db_txn.insert_block(&block)?; - - let next_block_epoch = self - .shared - .consensus() - .next_epoch_ext(&parent_header, &txn_snapshot.borrow_as_data_loader()) - .expect("epoch should be stored"); - let new_epoch = next_block_epoch.is_head(); - let epoch = next_block_epoch.epoch(); - - let ext = BlockExt { - received_at: unix_time_as_millis(), - total_difficulty: cannon_total_difficulty.clone(), - total_uncles_count: parent_ext.total_uncles_count + block.data().uncles().len() as u64, - verified: None, - txs_fees: vec![], - cycles: None, - txs_sizes: None, - }; - - db_txn.insert_block_epoch_index( - &block.header().hash(), - &epoch.last_block_hash_in_previous_epoch(), - )?; - if new_epoch { - db_txn.insert_epoch_ext(&epoch.last_block_hash_in_previous_epoch(), &epoch)?; - } - - let shared_snapshot = Arc::clone(&self.shared.snapshot()); - let origin_proposals = shared_snapshot.proposals(); - let current_tip_header = shared_snapshot.tip_header(); - - let current_total_difficulty = shared_snapshot.total_difficulty().to_owned(); - debug!( - "Current difficulty = {:#x}, cannon = {:#x}", - current_total_difficulty, cannon_total_difficulty, - ); - - // is_better_than - let new_best_block = cannon_total_difficulty > current_total_difficulty; - - if new_best_block { - debug!( - "Newly found best block : {} => {:#x}, difficulty diff = {:#x}", - block.header().number(), - block.header().hash(), - &cannon_total_difficulty - ¤t_total_difficulty - ); - self.find_fork(&mut fork, current_tip_header.number(), &block, ext); - self.rollback(&fork, &db_txn)?; - - // update and verify chain root - // MUST update index before reconcile_main_chain - self.reconcile_main_chain(Arc::clone(&db_txn), &mut fork, switch)?; - - db_txn.insert_tip_header(&block.header())?; - if new_epoch || fork.has_detached() { - db_txn.insert_current_epoch_ext(&epoch)?; - } - total_difficulty = cannon_total_difficulty.clone(); - } else { - db_txn.insert_block_ext(&block.header().hash(), &ext)?; - } - db_txn.commit()?; - - if new_best_block { - let tip_header = block.header(); - info!( - "block: {}, hash: {:#x}, epoch: {:#}, total_diff: {:#x}, txs: {}", - tip_header.number(), - tip_header.hash(), - tip_header.epoch(), - total_difficulty, - block.transactions().len() - ); - - self.update_proposal_table(&fork); - let (detached_proposal_id, new_proposals) = self - .proposal_table - .lock() - .finalize(origin_proposals, tip_header.number()); - fork.detached_proposal_id = detached_proposal_id; - - let new_snapshot = - self.shared - .new_snapshot(tip_header, total_difficulty, epoch, new_proposals); - - self.shared.store_snapshot(Arc::clone(&new_snapshot)); - - let tx_pool_controller = self.shared.tx_pool_controller(); - if tx_pool_controller.service_started() { - if let Err(e) = tx_pool_controller.update_tx_pool_for_reorg( - fork.detached_blocks().clone(), - fork.attached_blocks().clone(), - fork.detached_proposal_id().clone(), - new_snapshot, - ) { - error!("Notify update_tx_pool_for_reorg error {}", e); - } - } - - let block_ref: &BlockView = █ - self.shared - .notify_controller() - .notify_new_block(block_ref.clone()); - if log_enabled!(ckb_logger::Level::Debug) { - self.print_chain(10); - } - if let Some(metrics) = ckb_metrics::handle() { - metrics.ckb_chain_tip.set(block.header().number() as i64); - } - } else { - self.shared.refresh_snapshot(); - info!( - "uncle: {}, hash: {:#x}, epoch: {:#}, total_diff: {:#x}, txs: {}", - block.header().number(), - block.header().hash(), - block.header().epoch(), - cannon_total_difficulty, - block.transactions().len() - ); - - let tx_pool_controller = self.shared.tx_pool_controller(); - if tx_pool_controller.service_started() { - let block_ref: &BlockView = █ - if let Err(e) = tx_pool_controller.notify_new_uncle(block_ref.as_uncle()) { - error!("Notify new_uncle error {}", e); - } - } - } - - Ok(true) - } - pub(crate) fn update_proposal_table(&self, fork: &ForkChanges) { for blk in fork.detached_blocks() { self.proposal_table.lock().remove(blk.header().number()); From 46c372f0b480e7abc2a9a01157130c99d4acf9f4 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 18 Oct 2023 23:02:06 +0800 Subject: [PATCH 088/357] `ckb replay` use ChainController to blocking process block --- ckb-bin/src/subcommand/replay.rs | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/ckb-bin/src/subcommand/replay.rs b/ckb-bin/src/subcommand/replay.rs index 027d025a21..0114d1a2e7 100644 --- a/ckb-bin/src/subcommand/replay.rs +++ b/ckb-bin/src/subcommand/replay.rs @@ -1,6 +1,6 @@ use ckb_app_config::{ExitCode, ReplayArgs}; use ckb_async_runtime::Handle; -use ckb_chain::chain::ChainService; +use ckb_chain::chain::{ChainController, ChainService}; use ckb_chain_iter::ChainIterator; use ckb_instrument::{ProgressBar, ProgressStyle}; use ckb_shared::{Shared, SharedBuilder}; @@ -47,12 +47,13 @@ pub fn replay(args: ReplayArgs, async_handle: Handle) -> Result<(), ExitCode> { args.consensus, )?; let (tmp_shared, mut pack) = shared_builder.tx_pool_config(args.config.tx_pool).build()?; - let chain = ChainService::new(tmp_shared, pack.take_proposal_table(), None); + let chain_service = ChainService::new(tmp_shared, pack.take_proposal_table(), None); + let chain_controller = chain_service.start(Some("ckb_reply::ChainService")); if let Some((from, to)) = args.profile { - profile(shared, chain, from, to); + profile(shared, chain_controller, from, to); } else if args.sanity_check { - sanity_check(shared, chain, args.full_verification); + sanity_check(shared, chain_controller, args.full_verification); } } tmp_db_dir.close().map_err(|err| { @@ -63,16 +64,16 @@ pub fn replay(args: ReplayArgs, async_handle: Handle) -> Result<(), ExitCode> { Ok(()) } -fn profile(shared: Shared, mut chain: ChainService, from: Option, to: Option) { +fn profile(shared: Shared, chain_controller: ChainController, from: Option, to: Option) { let tip_number = shared.snapshot().tip_number(); let from = from.map(|v| std::cmp::max(1, v)).unwrap_or(1); let to = to .map(|v| std::cmp::min(v, tip_number)) .unwrap_or(tip_number); - process_range_block(&shared, &mut chain, 1..from); - println!("Start profiling; re-process blocks {from}..{to}:"); + process_range_block(&shared, chain_controller.clone(), 1..from); + println!("Start profiling, re-process blocks {from}..{to}:"); let now = std::time::Instant::now(); - let tx_count = process_range_block(&shared, &mut chain, from..=to); + let tx_count = process_range_block(&shared, chain_controller, from..=to); let duration = std::time::Instant::now().saturating_duration_since(now); if duration.as_secs() >= MIN_PROFILING_TIME { println!( @@ -97,7 +98,7 @@ fn profile(shared: Shared, mut chain: ChainService, from: Option, to: Optio fn process_range_block( shared: &Shared, - chain: &mut ChainService, + chain_controller: ChainController, range: impl Iterator, ) -> usize { let mut tx_count = 0; @@ -108,12 +109,14 @@ fn process_range_block( .and_then(|hash| snapshot.get_block(&hash)) .expect("read block from store"); tx_count += block.transactions().len().saturating_sub(1); - chain.process_block(Arc::new(block), Switch::NONE).unwrap(); + chain_controller + .blocking_process_block_with_switch(Arc::new(block), Switch::NONE) + .unwrap(); } tx_count } -fn sanity_check(shared: Shared, mut chain: ChainService, full_verification: bool) { +fn sanity_check(shared: Shared, chain_controller: ChainController, full_verification: bool) { let tip_header = shared.snapshot().tip_header().clone(); let chain_iter = ChainIterator::new(shared.store()); let pb = ProgressBar::new(chain_iter.len()); @@ -132,7 +135,8 @@ fn sanity_check(shared: Shared, mut chain: ChainService, full_verification: bool let mut cursor = shared.consensus().genesis_block().header(); for block in chain_iter { let header = block.header(); - if let Err(e) = chain.process_block(Arc::new(block), switch) { + if let Err(e) = chain_controller.blocking_process_block_with_switch(Arc::new(block), switch) + { eprintln!( "Replay sanity-check error: {:?} at block({}-{})", e, From 9290251a4fa072e432b2fb5d18ef179da7d5ab72 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 19 Oct 2023 10:17:33 +0800 Subject: [PATCH 089/357] Pass header_map_tmp_dir to SharedBuilder Signed-off-by: Eval EXEC --- shared/src/shared_builder.rs | 6 ++++++ util/launcher/src/lib.rs | 4 ++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/shared/src/shared_builder.rs b/shared/src/shared_builder.rs index e278fc0a57..5bf45b265e 100644 --- a/shared/src/shared_builder.rs +++ b/shared/src/shared_builder.rs @@ -260,6 +260,12 @@ impl SharedBuilder { self } + /// TODO(doc): @eval-exec + pub fn header_map_tmp_dir(mut self, header_map_tmp_dir: Option) -> Self { + self.header_map_tmp_dir = header_map_tmp_dir; + self + } + /// TODO(doc): @quake pub fn block_assembler_config(mut self, config: Option) -> Self { self.block_assembler_config = config; diff --git a/util/launcher/src/lib.rs b/util/launcher/src/lib.rs index 25926e14fc..b0f0789877 100644 --- a/util/launcher/src/lib.rs +++ b/util/launcher/src/lib.rs @@ -204,6 +204,7 @@ impl Launcher { .notify_config(self.args.config.notify.clone()) .store_config(self.args.config.store) .sync_config(self.args.config.network.sync.clone()) + .header_map_tmp_dir(self.args.config.tmp_dir.clone()) .block_assembler_config(block_assembler_config) .build()?; @@ -269,10 +270,9 @@ impl Launcher { relay_tx_receiver: Receiver, verify_failed_block_rx: tokio::sync::mpsc::UnboundedReceiver, ) -> NetworkController { - let sync_shared = Arc::new(SyncShared::with_tmpdir( + let sync_shared = Arc::new(SyncShared::new( shared.clone(), self.args.config.network.sync.clone(), - self.args.config.tmp_dir.as_ref(), relay_tx_receiver, )); let fork_enable = { From 819e191fcf9b0f494b78c7a8bb66b8f14750c4b6 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 19 Oct 2023 10:20:54 +0800 Subject: [PATCH 090/357] SyncShared don't need with_tmpdir anymore --- sync/src/types/mod.rs | 30 ++++-------------------------- 1 file changed, 4 insertions(+), 26 deletions(-) diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index c896ef04b9..0f348d1163 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1,4 +1,3 @@ -use crate::orphan_block_pool::OrphanBlockPool; use crate::{Status, StatusCode, FAST_INDEX, LOW_INDEX, NORMAL_INDEX, TIME_TRACE_SIZE}; use ckb_app_config::SyncConfig; use ckb_chain::chain::{ @@ -14,15 +13,13 @@ use ckb_constant::sync::{ MAX_UNKNOWN_TX_HASHES_SIZE, MAX_UNKNOWN_TX_HASHES_SIZE_PER_PEER, POW_INTERVAL, RETRY_ASK_TX_TIMEOUT_INCREASE, SUSPEND_SYNC_TIME, }; -use ckb_error::Error as CKBError; -use ckb_logger::{debug, error, trace}; -use ckb_network::{CKBProtocolContext, PeerId, PeerIndex, SupportProtocols}; -use ckb_shared::types::VerifyFailedBlockInfo; +use ckb_logger::{debug, trace}; +use ckb_network::{CKBProtocolContext, PeerIndex, SupportProtocols}; use ckb_shared::{ block_status::BlockStatus, shared::Shared, types::{BlockNumberAndHash, HeaderIndex, HeaderIndexView, SHRINK_THRESHOLD}, - HeaderMap, Snapshot, + Snapshot, }; use ckb_store::{ChainDB, ChainStore}; use ckb_systemtime::unix_time_as_millis; @@ -32,10 +29,9 @@ use ckb_types::{ core::{self, BlockNumber, EpochExt}, packed::{self, Byte32}, prelude::*, - H256, U256, + U256, }; use ckb_util::{shrink_to_fit, Mutex, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}; -use ckb_verification_traits::Switch; use dashmap::{self, DashMap}; use keyed_priority_queue::{self, KeyedPriorityQueue}; use lru::LruCache; @@ -48,14 +44,11 @@ use std::time::{Duration, Instant}; use std::{cmp, fmt, iter}; use crate::utils::send_message; -use ckb_types::core::EpochNumber; -use ckb_types::error::Error; const GET_HEADERS_CACHE_SIZE: usize = 10000; // TODO: Need discussed const GET_HEADERS_TIMEOUT: Duration = Duration::from_secs(15); const FILTER_SIZE: usize = 50000; -const ORPHAN_BLOCK_SIZE: usize = 1024; // 2 ** 13 < 6 * 1800 < 2 ** 14 const ONE_DAY_BLOCK_NUMBER: u64 = 8192; pub(crate) const FILTER_TTL: u64 = 4 * 60 * 60; @@ -998,25 +991,11 @@ pub struct SyncShared { } impl SyncShared { - /// only use on test pub fn new( shared: Shared, sync_config: SyncConfig, tx_relay_receiver: Receiver, ) -> SyncShared { - Self::with_tmpdir::(shared, sync_config, None, tx_relay_receiver) - } - - /// Generate a global sync state through configuration - pub fn with_tmpdir

( - shared: Shared, - sync_config: SyncConfig, - tmpdir: Option

, - tx_relay_receiver: Receiver, - ) -> SyncShared - where - P: AsRef, - { let (total_difficulty, header) = { let snapshot = shared.snapshot(); ( @@ -1037,7 +1016,6 @@ impl SyncShared { peers: Peers::default(), pending_get_block_proposals: DashMap::new(), pending_compact_blocks: Mutex::new(HashMap::default()), - // orphan_block_pool: OrphanBlockPool::with_capacity(ORPHAN_BLOCK_SIZE), inflight_proposals: DashMap::new(), inflight_blocks: RwLock::new(InflightBlocks::default()), pending_get_headers: RwLock::new(LruCache::new(GET_HEADERS_CACHE_SIZE)), From ed8d660d5b171ff6b2c3dc3c49eef964e1178242 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 19 Oct 2023 10:21:41 +0800 Subject: [PATCH 091/357] Cargo clippy, remove useless import statements --- chain/src/chain.rs | 15 ++++----------- chain/src/orphan_block_pool.rs | 2 +- shared/src/shared.rs | 3 +-- sync/src/relayer/mod.rs | 2 +- sync/src/synchronizer/block_process.rs | 5 ++--- sync/src/synchronizer/mod.rs | 8 +++----- sync/src/utils.rs | 1 - util/instrument/src/import.rs | 2 +- 8 files changed, 13 insertions(+), 25 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 0d4d6c35e3..e9e94fa649 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -3,10 +3,9 @@ use crate::forkchanges::ForkChanges; use crate::orphan_block_pool::OrphanBlockPool; -use ckb_chain_spec::versionbits::VersionbitsIndexer; use ckb_channel::{self as channel, select, Receiver, SendError, Sender}; use ckb_constant::sync::BLOCK_DOWNLOAD_WINDOW; -use ckb_error::{is_internal_db_error, Error, ErrorKind, InternalError, InternalErrorKind}; +use ckb_error::{is_internal_db_error, Error, InternalErrorKind}; use ckb_logger::Level::Trace; use ckb_logger::{ self, debug, error, info, log_enabled, log_enabled_target, trace, trace_target, warn, @@ -28,7 +27,7 @@ use ckb_types::{ resolve_transaction, BlockCellProvider, HeaderChecker, OverlayCellProvider, ResolvedTransaction, }, - service::{Request, DEFAULT_CHANNEL_SIZE}, + service::Request, BlockExt, BlockNumber, BlockView, Cycle, HeaderView, }, packed::Byte32, @@ -40,13 +39,8 @@ use ckb_verification::cache::Completed; use ckb_verification::{BlockVerifier, InvalidParentError, NonContextualBlockTxsVerifier}; use ckb_verification_contextual::{ContextualBlockVerifier, VerifyContext}; use ckb_verification_traits::{Switch, Verifier}; -use crossbeam::channel::SendTimeoutError; -use std::collections::{HashSet, VecDeque}; -use std::iter::Cloned; -use std::sync::atomic::{AtomicBool, Ordering}; +use std::collections::HashSet; use std::sync::Arc; -use std::time::Duration; -use std::time::Instant; use std::{cmp, thread}; const ORPHAN_BLOCK_SIZE: usize = (BLOCK_DOWNLOAD_WINDOW * 2) as usize; @@ -317,6 +311,7 @@ impl UnverifiedBlock { pub fn peer_id(&self) -> Option { self.unverified_block.peer_id() } + pub fn switch(&self) -> Option { self.unverified_block.switch() } @@ -922,8 +917,6 @@ impl ChainService { } fn verify_block(&self, unverified_block: &UnverifiedBlock) -> VerifyResult { - let log_now = std::time::Instant::now(); - let UnverifiedBlock { unverified_block: LonelyBlockWithCallback { diff --git a/chain/src/orphan_block_pool.rs b/chain/src/orphan_block_pool.rs index f7ce3a4bcb..db895939c4 100644 --- a/chain/src/orphan_block_pool.rs +++ b/chain/src/orphan_block_pool.rs @@ -1,7 +1,7 @@ use crate::chain::LonelyBlockWithCallback; use ckb_logger::debug; use ckb_types::core::{BlockView, EpochNumber}; -use ckb_types::{core, packed}; +use ckb_types::packed; use ckb_util::{parking_lot::RwLock, shrink_to_fit}; use std::collections::{HashMap, HashSet, VecDeque}; use std::sync::Arc; diff --git a/shared/src/shared.rs b/shared/src/shared.rs index 0ae32b5101..af92876b01 100644 --- a/shared/src/shared.rs +++ b/shared/src/shared.rs @@ -17,8 +17,7 @@ use ckb_store::{ChainDB, ChainStore}; use ckb_systemtime::unix_time_as_millis; use ckb_tx_pool::{BlockTemplate, TokioRwLock, TxPoolController}; use ckb_types::{ - core, - core::{service, BlockNumber, EpochExt, EpochNumber, HeaderView, Version}, + core::{BlockNumber, EpochExt, EpochNumber, HeaderView, Version}, packed::{self, Byte32}, prelude::*, H256, U256, diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index 075e2540d6..37a6e78ffe 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -297,7 +297,7 @@ impl Relayer { #[allow(clippy::needless_collect)] pub fn accept_block( &self, - nc: &dyn CKBProtocolContext, + _nc: &dyn CKBProtocolContext, peer: PeerIndex, block: core::BlockView, ) -> Status { diff --git a/sync/src/synchronizer/block_process.rs b/sync/src/synchronizer/block_process.rs index f4a3fdbcf9..732da3a78a 100644 --- a/sync/src/synchronizer/block_process.rs +++ b/sync/src/synchronizer/block_process.rs @@ -1,7 +1,6 @@ -use crate::{synchronizer::Synchronizer, Status, StatusCode}; -use ckb_logger::{debug, error}; +use crate::synchronizer::Synchronizer; +use ckb_logger::debug; use ckb_network::PeerIndex; -use ckb_shared::types::VerifyFailedBlockInfo; use ckb_types::{packed, prelude::*}; pub struct BlockProcess<'a> { diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 6e53b17de9..be77696075 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -20,7 +20,7 @@ pub(crate) use self::get_headers_process::GetHeadersProcess; pub(crate) use self::headers_process::HeadersProcess; pub(crate) use self::in_ibd_process::InIBDProcess; -use crate::types::{HeadersSyncController, IBDState, Peers, SyncShared, SyncState}; +use crate::types::{HeadersSyncController, IBDState, Peers, SyncShared}; use crate::utils::{metric_ckb_message_bytes, send_message_to, MetricDirection}; use crate::{Status, StatusCode}; use ckb_shared::block_status::BlockStatus; @@ -29,10 +29,9 @@ use ckb_chain::chain::ChainController; use ckb_channel as channel; use ckb_channel::{select, Receiver}; use ckb_constant::sync::{ - BAD_MESSAGE_BAN_TIME, BLOCK_DOWNLOAD_WINDOW, CHAIN_SYNC_TIMEOUT, - EVICTION_HEADERS_RESPONSE_TIME, INIT_BLOCKS_IN_TRANSIT_PER_PEER, MAX_TIP_AGE, + BAD_MESSAGE_BAN_TIME, CHAIN_SYNC_TIMEOUT, EVICTION_HEADERS_RESPONSE_TIME, + INIT_BLOCKS_IN_TRANSIT_PER_PEER, MAX_TIP_AGE, }; -use ckb_error::Error as CKBError; use ckb_logger::{debug, error, info, trace, warn}; use ckb_network::{ async_trait, bytes::Bytes, tokio, CKBProtocolContext, CKBProtocolHandler, PeerIndex, @@ -662,7 +661,6 @@ impl Synchronizer { } None => { let p2p_control = raw.clone(); - let sync_shared = Arc::clone(self.shared()); let (sender, recv) = channel::bounded(2); let peers = self.get_peers_to_fetch(ibd, &disconnect_list); sender diff --git a/sync/src/utils.rs b/sync/src/utils.rs index 92fedf9536..c0949de0fd 100644 --- a/sync/src/utils.rs +++ b/sync/src/utils.rs @@ -1,5 +1,4 @@ use crate::{Status, StatusCode}; -use ckb_error::{Error as CKBError, ErrorKind, InternalError, InternalErrorKind}; use ckb_logger::error; use ckb_network::{CKBProtocolContext, PeerIndex, ProtocolId, SupportProtocols}; use ckb_types::packed::{RelayMessageReader, SyncMessageReader}; diff --git a/util/instrument/src/import.rs b/util/instrument/src/import.rs index 74c28a72fb..c18bec1fbc 100644 --- a/util/instrument/src/import.rs +++ b/util/instrument/src/import.rs @@ -1,4 +1,4 @@ -use ckb_chain::chain::{ChainController, LonelyBlockWithCallback}; +use ckb_chain::chain::ChainController; use ckb_jsonrpc_types::BlockView as JsonBlock; use ckb_types::core; #[cfg(feature = "progress_bar")] From aff2fad3a483a32bb37726298b274f331e6b0292 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 19 Oct 2023 10:34:08 +0800 Subject: [PATCH 092/357] Move ckb-sync's tests/orphan_block_pool.rs to ckb-chain --- chain/src/tests/mod.rs | 1 + {sync => chain}/src/tests/orphan_block_pool.rs | 0 2 files changed, 1 insertion(+) rename {sync => chain}/src/tests/orphan_block_pool.rs (100%) diff --git a/chain/src/tests/mod.rs b/chain/src/tests/mod.rs index cafc0d6a57..ea5909c044 100644 --- a/chain/src/tests/mod.rs +++ b/chain/src/tests/mod.rs @@ -8,6 +8,7 @@ mod load_code_with_snapshot; mod load_input_cell_data; mod load_input_data_hash_cell; mod non_contextual_block_txs_verify; +mod orphan_block_pool; mod reward; mod truncate; mod uncle; diff --git a/sync/src/tests/orphan_block_pool.rs b/chain/src/tests/orphan_block_pool.rs similarity index 100% rename from sync/src/tests/orphan_block_pool.rs rename to chain/src/tests/orphan_block_pool.rs From 4deae67beabda5a15bb31e531d3334485e68daf3 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 19 Oct 2023 10:37:29 +0800 Subject: [PATCH 093/357] Fix integration test for OrphanBlockPool --- chain/src/tests/orphan_block_pool.rs | 32 ++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/chain/src/tests/orphan_block_pool.rs b/chain/src/tests/orphan_block_pool.rs index 4d3d14e524..9b3562d3d4 100644 --- a/chain/src/tests/orphan_block_pool.rs +++ b/chain/src/tests/orphan_block_pool.rs @@ -1,3 +1,4 @@ +use crate::chain::LonelyBlockWithCallback; use ckb_chain_spec::consensus::ConsensusBuilder; use ckb_systemtime::unix_time_as_millis; use ckb_types::core::{BlockBuilder, BlockView, EpochNumberWithFraction, HeaderView}; @@ -8,15 +9,23 @@ use std::thread; use crate::orphan_block_pool::OrphanBlockPool; -fn gen_block(parent_header: &HeaderView) -> BlockView { +fn gen_lonely_block_with_callback(parent_header: &HeaderView) -> LonelyBlockWithCallback { let number = parent_header.number() + 1; - BlockBuilder::default() + let block = BlockBuilder::default() .parent_hash(parent_header.hash()) .timestamp(unix_time_as_millis().pack()) .number(number.pack()) .epoch(EpochNumberWithFraction::new(number / 1000, number % 1000, 1000).pack()) .nonce((parent_header.nonce() + 1).pack()) - .build() + .build(); + LonelyBlockWithCallback { + lonely_block: LonelyBlock { + block: Arc::new(block), + peer_id: None, + switch: None, + }, + verify_callback: None, + } } #[test] @@ -27,7 +36,7 @@ fn test_remove_blocks_by_parent() { let mut parent = consensus.genesis_block().header(); let pool = OrphanBlockPool::with_capacity(200); for _ in 1..block_number { - let new_block = gen_block(&parent); + let new_block = gen_lonely_block_with_callback(&parent); blocks.push(new_block.clone()); pool.insert(new_block.clone()); parent = new_block.header(); @@ -46,7 +55,7 @@ fn test_remove_blocks_by_parent_and_get_block_should_not_deadlock() { let mut header = consensus.genesis_block().header(); let mut hashes = Vec::new(); for _ in 1..1024 { - let new_block = gen_block(&header); + let new_block = gen_lonely_block_with_callback(&header); pool.insert(new_block.clone()); header = new_block.header(); hashes.push(header.hash()); @@ -74,7 +83,7 @@ fn test_leaders() { let mut parent = consensus.genesis_block().header(); let pool = OrphanBlockPool::with_capacity(20); for i in 0..block_number - 1 { - let new_block = gen_block(&parent); + let new_block = gen_lonely_block_with_callback(&parent); blocks.push(new_block.clone()); parent = new_block.header(); if i % 5 != 0 { @@ -137,8 +146,17 @@ fn test_remove_expired_blocks() { .epoch(deprecated.clone().pack()) .nonce((parent.nonce() + 1).pack()) .build(); - pool.insert(new_block.clone()); + parent = new_block.header(); + let lonely_block_with_callback = LonelyBlockWithCallback { + lonely_block: LonelyBlock { + block: Arc::new(new_block), + peer_id: None, + switch: None, + }, + verify_callback: None, + }; + pool.insert(lonely_block_with_callback); } assert_eq!(pool.leaders_len(), 1); From a3d1b92dee5e37689dc7f65c7e6ce4237d0ffd41 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 19 Oct 2023 10:42:56 +0800 Subject: [PATCH 094/357] Fix Unit test: ckb-chain::tests::truncate.rs --- chain/src/tests/truncate.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/chain/src/tests/truncate.rs b/chain/src/tests/truncate.rs index a9c892c7ee..d1d2dd1d6e 100644 --- a/chain/src/tests/truncate.rs +++ b/chain/src/tests/truncate.rs @@ -11,7 +11,8 @@ fn test_truncate() { let builder = SharedBuilder::with_temp_db(); let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); - let mut chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); + let mut _chain_service = ChainService::new(shared.clone(), pack.take_proposal_table(), None); + let chain_controller = _chain_service.start(Some("test_truncate::ChainService")); let genesis = shared .store() @@ -26,8 +27,8 @@ fn test_truncate() { } for blk in mock.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) .unwrap(); } @@ -38,12 +39,12 @@ fn test_truncate() { } for blk in mock.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) .unwrap(); } - chain_service.truncate(&target.hash()).unwrap(); + chain_controller.truncate(target.hash()).unwrap(); assert_eq!(shared.snapshot().tip_header(), &target); } From e1d57417429d708fd9053b1290ac7cab418503ae Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 19 Oct 2023 10:45:47 +0800 Subject: [PATCH 095/357] Fix Unit test: ckb-chain::tests::basic.rs --- chain/src/tests/basic.rs | 52 ++++++++++++++++++++++------------------ 1 file changed, 29 insertions(+), 23 deletions(-) diff --git a/chain/src/tests/basic.rs b/chain/src/tests/basic.rs index d9b7c1ece8..3f1b4cb673 100644 --- a/chain/src/tests/basic.rs +++ b/chain/src/tests/basic.rs @@ -1,4 +1,4 @@ -use crate::chain::ChainController; +use crate::chain::{ChainController, VerifiedBlockStatus}; use crate::tests::util::start_chain; use ckb_chain_spec::consensus::{Consensus, ConsensusBuilder}; use ckb_dao_utils::genesis_dao_data; @@ -33,9 +33,12 @@ fn repeat_process_block() { chain.gen_empty_block_with_nonce(100u128, &mock_store); let block = Arc::new(chain.blocks().last().unwrap().clone()); - assert!(chain_controller - .process_block(Arc::clone(&block)) - .expect("process block ok")); + assert_eq!( + chain_controller + .blocking_process_block(Arc::clone(&block)) + .expect("process block ok"), + VerifiedBlockStatus::FirstSeenAndVerified + ); assert_eq!( shared .store() @@ -45,9 +48,12 @@ fn repeat_process_block() { Some(true) ); - assert!(!chain_controller - .process_block(Arc::clone(&block)) - .expect("process block ok")); + assert_ne!( + chain_controller + .blocking_process_block(Arc::clone(&block)) + .expect("process block ok"), + VerifiedBlockStatus::FirstSeenAndVerified + ); assert_eq!( shared .store() @@ -108,7 +114,7 @@ fn test_genesis_transaction_spend() { for block in &chain.blocks()[0..10] { assert!(chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_ALL) .is_ok()); } @@ -165,7 +171,7 @@ fn test_transaction_spend_in_same_block() { for block in chain.blocks() { chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_EPOCH) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_EPOCH) .expect("process block ok"); } @@ -205,7 +211,7 @@ fn test_transaction_spend_in_same_block() { parent_number4, epoch.number_with_fraction(parent_number4), parent_hash4, - 2 + 2, )), mem_cell_data: None, mem_cell_data_hash: None, @@ -236,13 +242,13 @@ fn test_transaction_conflict_in_same_block() { for block in chain.blocks().iter().take(3) { chain_controller - .process_block(Arc::new(block.clone())) + .blocking_process_block(Arc::new(block.clone())) .expect("process block ok"); } assert_error_eq!( OutPointError::Dead(OutPoint::new(tx1_hash, 0)), chain_controller - .process_block(Arc::new(chain.blocks()[3].clone())) + .blocking_process_block(Arc::new(chain.blocks()[3].clone())) .unwrap_err(), ); } @@ -273,13 +279,13 @@ fn test_transaction_conflict_in_different_blocks() { for block in chain.blocks().iter().take(4) { chain_controller - .process_block(Arc::new(block.clone())) + .blocking_process_block(Arc::new(block.clone())) .expect("process block ok"); } assert_error_eq!( OutPointError::Unknown(OutPoint::new(tx1_hash, 0)), chain_controller - .process_block(Arc::new(chain.blocks()[4].clone())) + .blocking_process_block(Arc::new(chain.blocks()[4].clone())) .unwrap_err(), ); } @@ -307,13 +313,13 @@ fn test_invalid_out_point_index_in_same_block() { for block in chain.blocks().iter().take(3) { chain_controller - .process_block(Arc::new(block.clone())) + .blocking_process_block(Arc::new(block.clone())) .expect("process block ok"); } assert_error_eq!( OutPointError::Unknown(OutPoint::new(tx1_hash, 1)), chain_controller - .process_block(Arc::new(chain.blocks()[3].clone())) + .blocking_process_block(Arc::new(chain.blocks()[3].clone())) .unwrap_err(), ); } @@ -342,14 +348,14 @@ fn test_invalid_out_point_index_in_different_blocks() { for block in chain.blocks().iter().take(4) { chain_controller - .process_block(Arc::new(block.clone())) + .blocking_process_block(Arc::new(block.clone())) .expect("process block ok"); } assert_error_eq!( OutPointError::Unknown(OutPoint::new(tx1_hash, 1)), chain_controller - .process_block(Arc::new(chain.blocks()[4].clone())) + .blocking_process_block(Arc::new(chain.blocks()[4].clone())) .unwrap_err(), ); } @@ -411,13 +417,13 @@ fn test_chain_fork_by_total_difficulty() { for block in chain1.blocks() { chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_ALL) .expect("process block ok"); } for block in chain2.blocks() { chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_ALL) .expect("process block ok"); } assert_eq!( @@ -454,7 +460,7 @@ fn test_chain_fork_by_first_received() { for chain in vec![chain1.clone(), chain2.clone(), chain3.clone()] { for block in chain.blocks() { chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_ALL) .expect("process block ok"); } } @@ -515,7 +521,7 @@ fn prepare_context_chain( .build(); chain_controller - .internal_process_block(Arc::new(new_block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(new_block.clone()), Switch::DISABLE_ALL) .expect("process block ok"); chain1.push(new_block.clone()); mock_store.insert_block(&new_block, &epoch); @@ -555,7 +561,7 @@ fn prepare_context_chain( .build(); chain_controller - .internal_process_block(Arc::new(new_block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(new_block.clone()), Switch::DISABLE_ALL) .expect("process block ok"); chain2.push(new_block.clone()); mock_store.insert_block(&new_block, &epoch); From 1575f977cadf8299218d82be9fb741d65a059e0f Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 19 Oct 2023 10:51:19 +0800 Subject: [PATCH 096/357] Fix Unit test: ckb-chain::tests::find_fork.rs --- chain/src/tests/find_fork.rs | 94 +++++++++++++++++++----------------- 1 file changed, 50 insertions(+), 44 deletions(-) diff --git a/chain/src/tests/find_fork.rs b/chain/src/tests/find_fork.rs index 9b34c79aaa..9ade30ea20 100644 --- a/chain/src/tests/find_fork.rs +++ b/chain/src/tests/find_fork.rs @@ -23,7 +23,8 @@ use std::sync::Arc; fn test_find_fork_case1() { let builder = SharedBuilder::with_temp_db(); let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); - let mut chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); + let mut _chain_service = ChainService::new(shared.clone(), pack.take_proposal_table(), None); + let chain_controller = _chain_service.start(Some("test_find_fork_case1::ChainService")); let genesis = shared .store() .get_block_header(&shared.store().get_block_hash(0).unwrap()) @@ -43,15 +44,15 @@ fn test_find_fork_case1() { // fork1 total_difficulty 400 for blk in fork1.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) .unwrap(); } // fork2 total_difficulty 270 for blk in fork2.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) .unwrap(); } @@ -73,7 +74,7 @@ fn test_find_fork_case1() { let mut fork = ForkChanges::default(); - chain_service.find_fork(&mut fork, tip_number, fork2.tip(), ext); + _chain_service.find_fork(&mut fork, tip_number, fork2.tip(), ext); let detached_blocks: HashSet = fork1.blocks().clone().into_iter().collect(); let attached_blocks: HashSet = fork2.blocks().clone().into_iter().collect(); @@ -95,7 +96,8 @@ fn test_find_fork_case1() { fn test_find_fork_case2() { let builder = SharedBuilder::with_temp_db(); let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); - let mut chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); + let mut _chain_service = ChainService::new(shared.clone(), pack.take_proposal_table(), None); + let chain_controller = _chain_service.start(Some("test_find_fork_case2::ChainService")); let genesis = shared .store() @@ -115,15 +117,15 @@ fn test_find_fork_case2() { // fork1 total_difficulty 400 for blk in fork1.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) .unwrap(); } // fork2 total_difficulty 280 for blk in fork2.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) .unwrap(); } @@ -145,7 +147,7 @@ fn test_find_fork_case2() { let mut fork = ForkChanges::default(); - chain_service.find_fork(&mut fork, tip_number, fork2.tip(), ext); + _chain_service.find_fork(&mut fork, tip_number, fork2.tip(), ext); let detached_blocks: HashSet = fork1.blocks()[1..].iter().cloned().collect(); let attached_blocks: HashSet = fork2.blocks().clone().into_iter().collect(); @@ -167,7 +169,8 @@ fn test_find_fork_case2() { fn test_find_fork_case3() { let builder = SharedBuilder::with_temp_db(); let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); - let mut chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); + let mut _chain_service = ChainService::new(shared.clone(), pack.take_proposal_table(), None); + let chain_controller = _chain_service.start(Some("test_find_fork_case3::ChainService")); let genesis = shared .store() @@ -188,15 +191,15 @@ fn test_find_fork_case3() { // fork1 total_difficulty 240 for blk in fork1.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) .unwrap(); } // fork2 total_difficulty 200 for blk in fork2.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) .unwrap(); } @@ -217,7 +220,7 @@ fn test_find_fork_case3() { }; let mut fork = ForkChanges::default(); - chain_service.find_fork(&mut fork, tip_number, fork2.tip(), ext); + _chain_service.find_fork(&mut fork, tip_number, fork2.tip(), ext); let detached_blocks: HashSet = fork1.blocks().clone().into_iter().collect(); let attached_blocks: HashSet = fork2.blocks().clone().into_iter().collect(); @@ -239,7 +242,8 @@ fn test_find_fork_case3() { fn test_find_fork_case4() { let builder = SharedBuilder::with_temp_db(); let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); - let mut chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); + let mut _chain_service = ChainService::new(shared.clone(), pack.take_proposal_table(), None); + let chain_controller = _chain_service.start(Some("test_find_fork_case4::ChainService")); let genesis = shared .store() @@ -260,15 +264,15 @@ fn test_find_fork_case4() { // fork1 total_difficulty 200 for blk in fork1.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) .unwrap(); } // fork2 total_difficulty 160 for blk in fork2.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) .unwrap(); } @@ -290,7 +294,7 @@ fn test_find_fork_case4() { let mut fork = ForkChanges::default(); - chain_service.find_fork(&mut fork, tip_number, fork2.tip(), ext); + _chain_service.find_fork(&mut fork, tip_number, fork2.tip(), ext); let detached_blocks: HashSet = fork1.blocks().clone().into_iter().collect(); let attached_blocks: HashSet = fork2.blocks().clone().into_iter().collect(); @@ -323,7 +327,8 @@ fn repeatedly_switch_fork() { .consensus(Consensus::default()) .build() .unwrap(); - let mut chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); + let mut _chain_service = ChainService::new(shared.clone(), pack.take_proposal_table(), None); + let chain_controller = _chain_service.start(Some("repeatedly_switch_fork::ChainService")); for _ in 0..2 { fork1.gen_empty_block_with_nonce(1u128, &mock_store); @@ -334,14 +339,14 @@ fn repeatedly_switch_fork() { } for blk in fork1.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) .unwrap(); } for blk in fork2.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) .unwrap(); } @@ -361,8 +366,8 @@ fn repeatedly_switch_fork() { .nonce(1u128.pack()) .uncle(uncle) .build(); - chain_service - .process_block(Arc::new(new_block1.clone()), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(new_block1.clone()), Switch::DISABLE_ALL) .unwrap(); //switch fork2 @@ -380,8 +385,8 @@ fn repeatedly_switch_fork() { .nonce(2u128.pack()) .build(); parent = new_block2.clone(); - chain_service - .process_block(Arc::new(new_block2), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(new_block2), Switch::DISABLE_ALL) .unwrap(); let epoch = shared .consensus() @@ -395,8 +400,8 @@ fn repeatedly_switch_fork() { .epoch(epoch.number_with_fraction(parent.number() + 1).pack()) .nonce(2u128.pack()) .build(); - chain_service - .process_block(Arc::new(new_block3), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(new_block3), Switch::DISABLE_ALL) .unwrap(); //switch fork1 @@ -413,8 +418,8 @@ fn repeatedly_switch_fork() { .epoch(epoch.number_with_fraction(parent.number() + 1).pack()) .nonce(1u128.pack()) .build(); - chain_service - .process_block(Arc::new(new_block4.clone()), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(new_block4.clone()), Switch::DISABLE_ALL) .unwrap(); parent = new_block4; @@ -430,8 +435,8 @@ fn repeatedly_switch_fork() { .epoch(epoch.number_with_fraction(parent.number() + 1).pack()) .nonce(1u128.pack()) .build(); - chain_service - .process_block(Arc::new(new_block5), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(new_block5), Switch::DISABLE_ALL) .unwrap(); } @@ -449,7 +454,8 @@ fn test_fork_proposal_table() { }; let (shared, mut pack) = builder.consensus(consensus).build().unwrap(); - let mut chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); + let mut _chain_service = ChainService::new(shared.clone(), pack.take_proposal_table(), None); + let chain_controller = _chain_service.start(Some("test_fork_proposal_table::ChainService")); let genesis = shared .store() @@ -467,8 +473,8 @@ fn test_fork_proposal_table() { } for blk in mock.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) .unwrap(); } @@ -484,8 +490,8 @@ fn test_fork_proposal_table() { } for blk in mock.blocks().iter().skip(3) { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) .unwrap(); } From cdd2a6b216edc7cbf4ad01926100061605fe1d36 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 19 Oct 2023 10:52:37 +0800 Subject: [PATCH 097/357] Fix Unit test: ckb-chain::tests::dep_cell.rs --- chain/src/tests/dep_cell.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/chain/src/tests/dep_cell.rs b/chain/src/tests/dep_cell.rs index cac812d6ae..64e3fbe7d4 100644 --- a/chain/src/tests/dep_cell.rs +++ b/chain/src/tests/dep_cell.rs @@ -152,7 +152,7 @@ fn test_package_txs_with_deps() { ) .build(); chain_controller - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_ALL) .unwrap(); } @@ -168,7 +168,7 @@ fn test_package_txs_with_deps() { let block: Block = block_template.clone().into(); let block = block.as_advanced_builder().build(); chain_controller - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_ALL) .unwrap(); } @@ -298,7 +298,7 @@ fn test_package_txs_with_deps_unstable_sort() { ) .build(); chain_controller - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_ALL) .unwrap(); } @@ -314,7 +314,7 @@ fn test_package_txs_with_deps_unstable_sort() { let block: Block = block_template.clone().into(); let block = block.as_advanced_builder().build(); chain_controller - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_ALL) .unwrap(); } @@ -437,7 +437,7 @@ fn test_package_txs_with_deps2() { ) .build(); chain_controller - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_ALL) .unwrap(); } // skip gap @@ -452,7 +452,7 @@ fn test_package_txs_with_deps2() { let block: Block = block_template.clone().into(); let block = block.as_advanced_builder().build(); chain_controller - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_ALL) .unwrap(); } @@ -562,7 +562,7 @@ fn test_package_txs_with_deps_priority() { let block: Block = block_template.clone().into(); let block = block.as_advanced_builder().build(); chain_controller - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_ALL) .unwrap(); } @@ -578,7 +578,7 @@ fn test_package_txs_with_deps_priority() { let block: Block = block_template.clone().into(); let block = block.as_advanced_builder().build(); chain_controller - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_ALL) .unwrap(); } From 0274b63f7e5b35083c7f5fdbeb0b7cb071d68db9 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 19 Oct 2023 10:57:49 +0800 Subject: [PATCH 098/357] Fix Unit test: ckb-chain::tests::block_assembler.rs --- chain/src/tests/block_assembler.rs | 32 +++++++++++++++--------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/chain/src/tests/block_assembler.rs b/chain/src/tests/block_assembler.rs index aa50eca718..456b671b0d 100644 --- a/chain/src/tests/block_assembler.rs +++ b/chain/src/tests/block_assembler.rs @@ -47,8 +47,8 @@ fn start_chain(consensus: Option) -> (ChainController, Shared) { let network = dummy_network(&shared); pack.take_tx_pool_builder().start(network); - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); - let chain_controller = chain_service.start::<&str>(None); + let _chain_service = ChainService::new(shared.clone(), pack.take_proposal_table(), None); + let chain_controller = _chain_service.start::<&str>(None); (chain_controller, shared) } @@ -142,7 +142,7 @@ fn test_block_template_timestamp() { let block = gen_block(&genesis, 0, &epoch); chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_ALL) .unwrap(); let mut block_template = shared @@ -209,13 +209,13 @@ fn test_prepare_uncles() { let block1_1 = gen_block(&block0_1.header(), 10, &epoch); chain_controller - .internal_process_block(Arc::new(block0_1), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block0_1), Switch::DISABLE_ALL) .unwrap(); chain_controller - .internal_process_block(Arc::new(block0_0.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block0_0.clone()), Switch::DISABLE_ALL) .unwrap(); chain_controller - .internal_process_block(Arc::new(block1_1.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block1_1.clone()), Switch::DISABLE_ALL) .unwrap(); let mut block_template = shared @@ -239,7 +239,7 @@ fn test_prepare_uncles() { let block2_1 = gen_block(&block1_1.header(), 10, &epoch); chain_controller - .internal_process_block(Arc::new(block2_1.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block2_1.clone()), Switch::DISABLE_ALL) .unwrap(); let mut block_template = shared @@ -263,7 +263,7 @@ fn test_prepare_uncles() { let block3_1 = gen_block(&block2_1.header(), 10, &epoch); chain_controller - .internal_process_block(Arc::new(block3_1), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block3_1), Switch::DISABLE_ALL) .unwrap(); let mut block_template = shared @@ -299,13 +299,13 @@ fn test_candidate_uncles_retain() { let block1_1 = gen_block(&block0_1.header(), 10, &epoch); chain_controller - .internal_process_block(Arc::new(block0_1), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block0_1), Switch::DISABLE_ALL) .unwrap(); chain_controller - .internal_process_block(Arc::new(block0_0.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block0_0.clone()), Switch::DISABLE_ALL) .unwrap(); chain_controller - .internal_process_block(Arc::new(block1_1.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block1_1.clone()), Switch::DISABLE_ALL) .unwrap(); candidate_uncles.insert(block0_0.as_uncle()); @@ -326,7 +326,7 @@ fn test_candidate_uncles_retain() { let block2_0 = gen_block(&block1_0.header(), 13, &epoch); for block in vec![block1_0, block2_0.clone()] { chain_controller - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_ALL) .unwrap(); } @@ -346,7 +346,7 @@ fn test_candidate_uncles_retain() { let block3_0 = gen_block(&block2_0.header(), 10, &epoch); chain_controller - .internal_process_block(Arc::new(block3_0.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block3_0.clone()), Switch::DISABLE_ALL) .unwrap(); { @@ -413,7 +413,7 @@ fn test_package_basic() { for _i in 0..4 { let block = gen_block(&parent_header, 11, &epoch); chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_ALL) .expect("process block"); parent_header = block.header().to_owned(); blocks.push(block); @@ -520,7 +520,7 @@ fn test_package_multi_best_scores() { for _i in 0..4 { let block = gen_block(&parent_header, 11, &epoch); chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_ALL) .expect("process block"); parent_header = block.header().to_owned(); blocks.push(block); @@ -636,7 +636,7 @@ fn test_package_low_fee_descendants() { for _i in 0..4 { let block = gen_block(&parent_header, 11, &epoch); chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_ALL) .expect("process block"); parent_header = block.header().to_owned(); blocks.push(block); From 683d601df75b39738232ee68f56bb30148f2c5c0 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 19 Oct 2023 10:58:30 +0800 Subject: [PATCH 099/357] Fix Unit test: ckb-chain::tests::delay_verify.rs --- chain/src/tests/delay_verify.rs | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/chain/src/tests/delay_verify.rs b/chain/src/tests/delay_verify.rs index b2b8029edf..e9fcb3a2aa 100644 --- a/chain/src/tests/delay_verify.rs +++ b/chain/src/tests/delay_verify.rs @@ -46,20 +46,20 @@ fn test_dead_cell_in_same_block() { for block in chain1.blocks() { chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_EPOCH) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_EPOCH) .expect("process block ok"); } for block in chain2.blocks().iter().take(switch_fork_number + 1) { chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_EPOCH) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_EPOCH) .expect("process block ok"); } assert_error_eq!( OutPointError::Dead(OutPoint::new(tx1_hash, 0)), chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(chain2.blocks()[switch_fork_number + 1].clone()), Switch::DISABLE_EPOCH, ) @@ -101,20 +101,20 @@ fn test_dead_cell_in_different_block() { for block in chain1.blocks() { chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_EPOCH) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_EPOCH) .expect("process block ok"); } for block in chain2.blocks().iter().take(switch_fork_number + 2) { chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_EPOCH) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_EPOCH) .expect("process block ok"); } assert_error_eq!( OutPointError::Unknown(OutPoint::new(tx1_hash, 0)), chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(chain2.blocks()[switch_fork_number + 2].clone()), Switch::DISABLE_EPOCH, ) @@ -157,20 +157,20 @@ fn test_invalid_out_point_index_in_same_block() { for block in chain1.blocks() { chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_EPOCH) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_EPOCH) .expect("process block ok"); } for block in chain2.blocks().iter().take(switch_fork_number + 1) { chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_EPOCH) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_EPOCH) .expect("process block ok"); } assert_error_eq!( OutPointError::Unknown(OutPoint::new(tx1_hash, 1)), chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(chain2.blocks()[switch_fork_number + 1].clone()), Switch::DISABLE_EPOCH, ) @@ -214,20 +214,20 @@ fn test_invalid_out_point_index_in_different_blocks() { for block in chain1.blocks() { chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_EPOCH) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_EPOCH) .expect("process block ok"); } for block in chain2.blocks().iter().take(switch_fork_number + 2) { chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_EPOCH) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_EPOCH) .expect("process block ok"); } assert_error_eq!( OutPointError::Unknown(OutPoint::new(tx1_hash, 1)), chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(chain2.blocks()[switch_fork_number + 2].clone()), Switch::DISABLE_EPOCH, ) @@ -271,7 +271,7 @@ fn test_full_dead_transaction() { .build(); chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_EPOCH) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_EPOCH) .expect("process block ok"); mock_store.insert_block(&block, &epoch); @@ -346,7 +346,7 @@ fn test_full_dead_transaction() { .build() }; chain_controller - .internal_process_block(Arc::new(new_block.clone()), Switch::DISABLE_EPOCH) + .blocking_process_block_with_switch(Arc::new(new_block.clone()), Switch::DISABLE_EPOCH) .expect("process block ok"); mock_store.insert_block(&new_block, &epoch); parent = new_block.header().to_owned(); @@ -426,7 +426,7 @@ fn test_full_dead_transaction() { .build() }; chain_controller - .internal_process_block(Arc::new(new_block.clone()), Switch::DISABLE_EPOCH) + .blocking_process_block_with_switch(Arc::new(new_block.clone()), Switch::DISABLE_EPOCH) .expect("process block ok"); mock_store.insert_block(&new_block, &epoch); parent = new_block.header().to_owned(); @@ -495,7 +495,7 @@ fn test_full_dead_transaction() { .build() }; chain_controller - .internal_process_block(Arc::new(new_block.clone()), Switch::DISABLE_EPOCH) + .blocking_process_block_with_switch(Arc::new(new_block.clone()), Switch::DISABLE_EPOCH) .expect("process block ok"); mock_store.insert_block(&new_block, &epoch); parent = new_block.header().to_owned(); From 0ddef8f2e89b2d003b1545d742ce202fe5cdcc52 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 19 Oct 2023 10:59:03 +0800 Subject: [PATCH 100/357] Fix Unit test: ckb-chain::tests::non_contextual_block_txs_verify.rs --- chain/src/tests/non_contextual_block_txs_verify.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chain/src/tests/non_contextual_block_txs_verify.rs b/chain/src/tests/non_contextual_block_txs_verify.rs index b8317363a3..68178658d8 100644 --- a/chain/src/tests/non_contextual_block_txs_verify.rs +++ b/chain/src/tests/non_contextual_block_txs_verify.rs @@ -156,7 +156,7 @@ fn non_contextual_block_txs_verify() { let block = gen_block(&parent, vec![tx0, tx1], &shared, &mock_store); - let ret = chain_controller.process_block(Arc::new(block)); + let ret = chain_controller.blocking_process_block(Arc::new(block)); assert!(ret.is_err()); assert_eq!( format!("{}", ret.err().unwrap()), From 15136db6591487dd46cd9bb83d9317247042b6db Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 19 Oct 2023 10:59:28 +0800 Subject: [PATCH 101/357] Fix Unit test: ckb-chain::tests::reward.rs --- chain/src/tests/reward.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/chain/src/tests/reward.rs b/chain/src/tests/reward.rs index 774fa6cd58..3b2529acad 100644 --- a/chain/src/tests/reward.rs +++ b/chain/src/tests/reward.rs @@ -228,7 +228,7 @@ fn finalize_reward() { parent = block.header().clone(); chain_controller - .process_block(Arc::new(block.clone())) + .blocking_process_block(Arc::new(block.clone())) .expect("process block ok"); blocks.push(block); } @@ -265,7 +265,7 @@ fn finalize_reward() { parent = block.header(); chain_controller - .process_block(Arc::new(block.clone())) + .blocking_process_block(Arc::new(block.clone())) .expect("process block ok"); let (target, reward) = RewardCalculator::new(shared.consensus(), shared.snapshot().as_ref()) @@ -299,6 +299,6 @@ fn finalize_reward() { ); chain_controller - .process_block(Arc::new(block)) + .blocking_process_block(Arc::new(block)) .expect("process block ok"); } From 550478f57f27f179866399c612b7e9acf11da56e Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 19 Oct 2023 11:00:50 +0800 Subject: [PATCH 102/357] Fix Unit test: ckb-chain::tests::uncle.rs --- chain/src/tests/uncle.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/chain/src/tests/uncle.rs b/chain/src/tests/uncle.rs index 3d8d4da0a0..6c32ff1560 100644 --- a/chain/src/tests/uncle.rs +++ b/chain/src/tests/uncle.rs @@ -10,7 +10,9 @@ use std::sync::Arc; fn test_get_block_body_after_inserting() { let builder = SharedBuilder::with_temp_db(); let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); - let mut chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); + let mut _chain_service = ChainService::new(shared.clone(), pack.take_proposal_table(), None); + let chain_controller = + _chain_service.start(Some("test_get_block_body_after_inserting::ChainService")); let genesis = shared .store() .get_block_header(&shared.store().get_block_hash(0).unwrap()) @@ -26,15 +28,15 @@ fn test_get_block_body_after_inserting() { } for blk in fork1.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) .unwrap(); let len = shared.snapshot().get_block_body(&blk.hash()).len(); assert_eq!(len, 1, "[fork1] snapshot.get_block_body({})", blk.hash(),); } for blk in fork2.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) .unwrap(); let snapshot = shared.snapshot(); assert!(snapshot.get_block_header(&blk.hash()).is_some()); From f291a243b61cfdc61de0c998dc676f6c3ff7e2c6 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 19 Oct 2023 11:33:39 +0800 Subject: [PATCH 103/357] Add PartialEq attribute to VerifiedBlockStatus, Add Clone attribute to LonelyBlock --- chain/src/chain.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index e9e94fa649..3fe5a28c2e 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -53,7 +53,7 @@ pub type VerifyResult = Result; pub type VerifyCallback = dyn FnOnce(VerifyResult) + Send + Sync; /// VerifiedBlockStatus is -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub enum VerifiedBlockStatus { // The block is being seen for the first time. FirstSeenAndVerified, @@ -241,6 +241,7 @@ pub struct ChainService { verify_failed_blocks_tx: Option>, } +#[derive(Clone)] pub struct LonelyBlock { pub block: Arc, pub peer_id: Option, From 6a772a9618c6b1f6a5b546454924986cbe2d9b13 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 19 Oct 2023 12:50:17 +0800 Subject: [PATCH 104/357] Use Box as outer type of VerifyCallback --- chain/src/chain.rs | 11 ++++------- sync/src/relayer/mod.rs | 2 +- sync/src/types/mod.rs | 13 ++++--------- 3 files changed, 9 insertions(+), 17 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 3fe5a28c2e..5897afb9e8 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -50,7 +50,7 @@ type TruncateRequest = Request>; pub type VerifyResult = Result; -pub type VerifyCallback = dyn FnOnce(VerifyResult) + Send + Sync; +pub type VerifyCallback = Box; /// VerifiedBlockStatus is #[derive(Debug, Clone, PartialEq)] @@ -112,7 +112,7 @@ impl ChainController { pub fn asynchronous_process_block_with_callback( &self, block: Arc, - verify_callback: Box, + verify_callback: VerifyCallback, ) { self.asynchronous_process_lonely_block_with_callback( LonelyBlock { @@ -249,10 +249,7 @@ pub struct LonelyBlock { } impl LonelyBlock { - pub fn with_callback( - self, - verify_callback: Option>, - ) -> LonelyBlockWithCallback { + pub fn with_callback(self, verify_callback: Option) -> LonelyBlockWithCallback { LonelyBlockWithCallback { lonely_block: self, verify_callback, @@ -266,7 +263,7 @@ impl LonelyBlock { pub struct LonelyBlockWithCallback { pub lonely_block: LonelyBlock, - pub verify_callback: Option>, + pub verify_callback: Option, } impl LonelyBlockWithCallback { diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index 37a6e78ffe..f0b90ad5b8 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -345,7 +345,7 @@ impl Relayer { &self.chain, Arc::clone(&block), peer, - verify_success_callback, + Box::new(verify_success_callback), ); } diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 0f348d1163..9cea119348 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1062,13 +1062,13 @@ impl SyncShared { chain: &ChainController, block: Arc, peer_id: PeerIndex, - verify_success_callback: impl FnOnce(VerifyResult) + Send + Sync + 'static, + verify_success_callback: VerifyCallback, ) { self.accept_block( chain, Arc::clone(&block), peer_id, - Some(Box::new(verify_success_callback)), + Some(verify_success_callback), ) } @@ -1092,12 +1092,7 @@ impl SyncShared { // } // Attempt to accept the given block if its parent already exist in database - self.accept_block( - chain, - Arc::clone(&block), - peer_id, - None::>, - ); + self.accept_block(chain, Arc::clone(&block), peer_id, None::); // if ret.is_err() { // debug!("accept block {:?} {:?}", block, ret); // return ret; @@ -1166,7 +1161,7 @@ impl SyncShared { chain: &ChainController, block: Arc, peer_id: PeerIndex, - verify_callback: Option>, + verify_callback: Option, ) { // let ret = { // let mut assume_valid_target = self.state.assume_valid_target(); From 4b14d6fc2eaff86858991da6b549c0a57114bd50 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 19 Oct 2023 21:36:50 +0800 Subject: [PATCH 105/357] Fix Unit test: ckb-chain::tests::find_fork.rs --- chain/src/tests/find_fork.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/chain/src/tests/find_fork.rs b/chain/src/tests/find_fork.rs index 9ade30ea20..5e4cd87208 100644 --- a/chain/src/tests/find_fork.rs +++ b/chain/src/tests/find_fork.rs @@ -24,6 +24,7 @@ fn test_find_fork_case1() { let builder = SharedBuilder::with_temp_db(); let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); let mut _chain_service = ChainService::new(shared.clone(), pack.take_proposal_table(), None); + let _chain_service_clone = _chain_service.clone(); let chain_controller = _chain_service.start(Some("test_find_fork_case1::ChainService")); let genesis = shared .store() @@ -74,7 +75,7 @@ fn test_find_fork_case1() { let mut fork = ForkChanges::default(); - _chain_service.find_fork(&mut fork, tip_number, fork2.tip(), ext); + _chain_service_clone.find_fork(&mut fork, tip_number, fork2.tip(), ext); let detached_blocks: HashSet = fork1.blocks().clone().into_iter().collect(); let attached_blocks: HashSet = fork2.blocks().clone().into_iter().collect(); @@ -97,6 +98,7 @@ fn test_find_fork_case2() { let builder = SharedBuilder::with_temp_db(); let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); let mut _chain_service = ChainService::new(shared.clone(), pack.take_proposal_table(), None); + let _chain_service_clone = _chain_service.clone(); let chain_controller = _chain_service.start(Some("test_find_fork_case2::ChainService")); let genesis = shared @@ -147,7 +149,7 @@ fn test_find_fork_case2() { let mut fork = ForkChanges::default(); - _chain_service.find_fork(&mut fork, tip_number, fork2.tip(), ext); + _chain_service_clone.find_fork(&mut fork, tip_number, fork2.tip(), ext); let detached_blocks: HashSet = fork1.blocks()[1..].iter().cloned().collect(); let attached_blocks: HashSet = fork2.blocks().clone().into_iter().collect(); @@ -170,6 +172,7 @@ fn test_find_fork_case3() { let builder = SharedBuilder::with_temp_db(); let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); let mut _chain_service = ChainService::new(shared.clone(), pack.take_proposal_table(), None); + let _chain_service_clone = _chain_service.clone(); let chain_controller = _chain_service.start(Some("test_find_fork_case3::ChainService")); let genesis = shared @@ -220,7 +223,7 @@ fn test_find_fork_case3() { }; let mut fork = ForkChanges::default(); - _chain_service.find_fork(&mut fork, tip_number, fork2.tip(), ext); + _chain_service_clone.find_fork(&mut fork, tip_number, fork2.tip(), ext); let detached_blocks: HashSet = fork1.blocks().clone().into_iter().collect(); let attached_blocks: HashSet = fork2.blocks().clone().into_iter().collect(); @@ -243,6 +246,7 @@ fn test_find_fork_case4() { let builder = SharedBuilder::with_temp_db(); let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); let mut _chain_service = ChainService::new(shared.clone(), pack.take_proposal_table(), None); + let _chain_service_clone = _chain_service.clone(); let chain_controller = _chain_service.start(Some("test_find_fork_case4::ChainService")); let genesis = shared @@ -294,7 +298,7 @@ fn test_find_fork_case4() { let mut fork = ForkChanges::default(); - _chain_service.find_fork(&mut fork, tip_number, fork2.tip(), ext); + _chain_service_clone.find_fork(&mut fork, tip_number, fork2.tip(), ext); let detached_blocks: HashSet = fork1.blocks().clone().into_iter().collect(); let attached_blocks: HashSet = fork2.blocks().clone().into_iter().collect(); From f3b6257738645436e32c35af0493acc00f655520 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 19 Oct 2023 23:14:15 +0800 Subject: [PATCH 106/357] Fix Unit test: ckb-verification::contextual/src/tests/contextual_block_verifier.rs --- .../src/tests/contextual_block_verifier.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/verification/contextual/src/tests/contextual_block_verifier.rs b/verification/contextual/src/tests/contextual_block_verifier.rs index a53b1146ba..62514ce8b5 100644 --- a/verification/contextual/src/tests/contextual_block_verifier.rs +++ b/verification/contextual/src/tests/contextual_block_verifier.rs @@ -83,7 +83,7 @@ fn start_chain(consensus: Option) -> (ChainController, Shared) { } let (shared, mut pack) = builder.build().unwrap(); - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); + let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table(), None); let chain_controller = chain_service.start::<&str>(None); (chain_controller, shared) } @@ -230,7 +230,7 @@ fn test_proposal() { .collect(); let block = gen_block(&parent, vec![], proposal_ids, vec![]); chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_ALL) .unwrap(); parent = block.header(); @@ -249,7 +249,7 @@ fn test_proposal() { //test chain forward let new_block = gen_block(&parent, vec![], vec![], vec![]); chain_controller - .internal_process_block(Arc::new(new_block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(new_block.clone()), Switch::DISABLE_ALL) .unwrap(); parent = new_block.header().to_owned(); } @@ -263,7 +263,7 @@ fn test_proposal() { //test chain forward let new_block = gen_block(&parent, vec![], vec![], vec![]); chain_controller - .internal_process_block(Arc::new(new_block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(new_block.clone()), Switch::DISABLE_ALL) .unwrap(); parent = new_block.header().to_owned(); } @@ -311,7 +311,7 @@ fn test_uncle_proposal() { let uncle = gen_block(&parent, vec![], proposal_ids, vec![]); let block = gen_block(&parent, vec![], vec![], vec![uncle.as_uncle()]); chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_ALL) .unwrap(); parent = block.header(); @@ -326,7 +326,7 @@ fn test_uncle_proposal() { //test chain forward let new_block = gen_block(&parent, vec![], vec![], vec![]); chain_controller - .internal_process_block(Arc::new(new_block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(new_block.clone()), Switch::DISABLE_ALL) .unwrap(); parent = new_block.header().to_owned(); } @@ -340,7 +340,7 @@ fn test_uncle_proposal() { //test chain forward let new_block = gen_block(&parent, vec![], vec![], vec![]); chain_controller - .internal_process_block(Arc::new(new_block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(new_block.clone()), Switch::DISABLE_ALL) .unwrap(); parent = new_block.header().to_owned(); } From 0dfe15c45c6c44722f02e609f459685ea0f0fca8 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 19 Oct 2023 23:14:39 +0800 Subject: [PATCH 107/357] Fix Unit test: ckb-verification::contextual/src/tests/uncle_verifier.rs --- verification/contextual/src/tests/uncle_verifier.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/verification/contextual/src/tests/uncle_verifier.rs b/verification/contextual/src/tests/uncle_verifier.rs index af12732084..7545af7415 100644 --- a/verification/contextual/src/tests/uncle_verifier.rs +++ b/verification/contextual/src/tests/uncle_verifier.rs @@ -43,8 +43,9 @@ fn start_chain(consensus: Option) -> (ChainController, Shared) { } let (shared, mut pack) = builder.build().unwrap(); - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); - let chain_controller = chain_service.start::<&str>(None); + let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table(), None); + let chain_controller = + chain_service.start::<&str>(Some("ckb-verification::tests::ChainService")); (chain_controller, shared) } @@ -88,7 +89,7 @@ fn prepare() -> (Shared, Vec, Vec) { .epoch(); let new_block = gen_block(&parent, random(), &epoch); chain_controller - .internal_process_block(Arc::new(new_block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(new_block.clone()), Switch::DISABLE_ALL) .expect("process block ok"); chain1.push(new_block.clone()); parent = new_block.header(); @@ -110,7 +111,7 @@ fn prepare() -> (Shared, Vec, Vec) { chain1[(i - 1) as usize].clone() }; chain_controller - .internal_process_block(Arc::new(new_block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(new_block.clone()), Switch::DISABLE_ALL) .expect("process block ok"); chain2.push(new_block.clone()); parent = new_block.header(); @@ -493,7 +494,7 @@ fn test_uncle_with_uncle_descendant() { for block in &chain2 { controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_ALL) .expect("process block ok"); } @@ -506,7 +507,7 @@ fn test_uncle_with_uncle_descendant() { .build(); controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_ALL) .expect("process block ok"); { From 5c49e5880a70cc6501dd8c78eac9f2076f25c10c Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 19 Oct 2023 23:15:51 +0800 Subject: [PATCH 108/357] Fix Unit test: ckb-light-client-protocol-server::tests/utils/chain.rs --- .../src/tests/utils/chain.rs | 20 +++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/util/light-client-protocol-server/src/tests/utils/chain.rs b/util/light-client-protocol-server/src/tests/utils/chain.rs index 177d4c9bee..29e1df8f7b 100644 --- a/util/light-client-protocol-server/src/tests/utils/chain.rs +++ b/util/light-client-protocol-server/src/tests/utils/chain.rs @@ -4,7 +4,7 @@ use std::{ }; use ckb_app_config::{BlockAssemblerConfig, NetworkConfig}; -use ckb_chain::chain::{ChainController, ChainService}; +use ckb_chain::chain::{ChainController, ChainService, VerifiedBlockStatus}; use ckb_chain_spec::consensus::{build_genesis_epoch_ext, ConsensusBuilder}; use ckb_dao_utils::genesis_dao_data; use ckb_jsonrpc_types::ScriptHashType; @@ -87,8 +87,10 @@ impl MockChain { let network = dummy_network(&shared); pack.take_tx_pool_builder().start(network); - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); - let chain_controller = chain_service.start::<&str>(None); + let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table(), None); + let chain_controller = chain_service.start::<&str>(Some( + "ckb-light-client-protocol-server::tests::ChainService", + )); Self { chain_controller, @@ -142,11 +144,17 @@ impl MockChain { let block: packed::Block = block_template.into(); let block = build(block); let block_number = block.number(); - let is_ok = self + let verified_block_status = self .controller() - .process_block(Arc::new(block)) + .blocking_process_block(Arc::new(block)) .expect("process block"); - assert!(is_ok, "failed to process block {block_number}"); + assert!( + matches!( + verified_block_status, + VerifiedBlockStatus::FirstSeenAndVerified + ), + "failed to process block {block_number}" + ); while self .tx_pool() .get_tx_pool_info() From 71bcaec85fa0718d7adb06aa8d9490e8c1980bb3 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 19 Oct 2023 23:16:51 +0800 Subject: [PATCH 109/357] Fix Unit test: chain/src/tests/util.rs --- chain/src/tests/util.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/chain/src/tests/util.rs b/chain/src/tests/util.rs index 0d42b0def6..1481875a22 100644 --- a/chain/src/tests/util.rs +++ b/chain/src/tests/util.rs @@ -85,8 +85,8 @@ pub(crate) fn start_chain_with_tx_pool_config( let network = dummy_network(&shared); pack.take_tx_pool_builder().start(network); - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); - let chain_controller = chain_service.start::<&str>(None); + let _chain_service = ChainService::new(shared.clone(), pack.take_proposal_table(), None); + let chain_controller = _chain_service.start::<&str>(Some("ckb_chain::tests::ChainService")); let parent = { let snapshot = shared.snapshot(); snapshot From 685387f2ecc9a20f1e935a1e43ae0cbadb8590e0 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 19 Oct 2023 23:17:10 +0800 Subject: [PATCH 110/357] Fix Unit test: chain/src/tests/orphan_block_pool.rs --- chain/src/tests/orphan_block_pool.rs | 69 ++++++++++++++++------------ 1 file changed, 40 insertions(+), 29 deletions(-) diff --git a/chain/src/tests/orphan_block_pool.rs b/chain/src/tests/orphan_block_pool.rs index 9b3562d3d4..7616cf78c9 100644 --- a/chain/src/tests/orphan_block_pool.rs +++ b/chain/src/tests/orphan_block_pool.rs @@ -1,4 +1,4 @@ -use crate::chain::LonelyBlockWithCallback; +use crate::chain::{LonelyBlock, LonelyBlockWithCallback}; use ckb_chain_spec::consensus::ConsensusBuilder; use ckb_systemtime::unix_time_as_millis; use ckb_types::core::{BlockBuilder, BlockView, EpochNumberWithFraction, HeaderView}; @@ -9,7 +9,7 @@ use std::thread; use crate::orphan_block_pool::OrphanBlockPool; -fn gen_lonely_block_with_callback(parent_header: &HeaderView) -> LonelyBlockWithCallback { +fn gen_lonely_block(parent_header: &HeaderView) -> LonelyBlock { let number = parent_header.number() + 1; let block = BlockBuilder::default() .parent_hash(parent_header.hash()) @@ -18,16 +18,17 @@ fn gen_lonely_block_with_callback(parent_header: &HeaderView) -> LonelyBlockWith .epoch(EpochNumberWithFraction::new(number / 1000, number % 1000, 1000).pack()) .nonce((parent_header.nonce() + 1).pack()) .build(); - LonelyBlockWithCallback { - lonely_block: LonelyBlock { - block: Arc::new(block), - peer_id: None, - switch: None, - }, - verify_callback: None, + LonelyBlock { + block: Arc::new(block), + peer_id: None, + switch: None, } } +fn gen_lonely_block_with_callback(parent_header: &HeaderView) -> LonelyBlockWithCallback { + gen_lonely_block(parent_header).without_callback() +} + #[test] fn test_remove_blocks_by_parent() { let consensus = ConsensusBuilder::default().build(); @@ -36,15 +37,18 @@ fn test_remove_blocks_by_parent() { let mut parent = consensus.genesis_block().header(); let pool = OrphanBlockPool::with_capacity(200); for _ in 1..block_number { - let new_block = gen_lonely_block_with_callback(&parent); - blocks.push(new_block.clone()); - pool.insert(new_block.clone()); - parent = new_block.header(); + let lonely_block = gen_lonely_block(&parent); + let new_block_clone = lonely_block.clone().without_callback(); + let new_block = lonely_block.without_callback(); + blocks.push(new_block_clone); + + parent = new_block.block().header(); + pool.insert(new_block); } let orphan = pool.remove_blocks_by_parent(&consensus.genesis_block().hash()); - let orphan_set: HashSet = orphan.into_iter().collect(); - let blocks_set: HashSet = blocks.into_iter().collect(); + let orphan_set: HashSet<_> = orphan.into_iter().map(|b| b.lonely_block.block).collect(); + let blocks_set: HashSet<_> = blocks.into_iter().map(|b| b.lonely_block.block).collect(); assert_eq!(orphan_set, blocks_set) } @@ -55,9 +59,11 @@ fn test_remove_blocks_by_parent_and_get_block_should_not_deadlock() { let mut header = consensus.genesis_block().header(); let mut hashes = Vec::new(); for _ in 1..1024 { - let new_block = gen_lonely_block_with_callback(&header); - pool.insert(new_block.clone()); - header = new_block.header(); + let lonely_block = gen_lonely_block(&header); + let new_block = lonely_block.clone().without_callback(); + let new_block_clone = lonely_block.without_callback(); + pool.insert(new_block_clone); + header = new_block.block().header(); hashes.push(header.hash()); } @@ -83,22 +89,23 @@ fn test_leaders() { let mut parent = consensus.genesis_block().header(); let pool = OrphanBlockPool::with_capacity(20); for i in 0..block_number - 1 { - let new_block = gen_lonely_block_with_callback(&parent); - blocks.push(new_block.clone()); - parent = new_block.header(); + let lonely_block = gen_lonely_block(&parent); + let new_block = lonely_block.clone().without_callback(); + blocks.push(lonely_block); + parent = new_block.block().header(); if i % 5 != 0 { - pool.insert(new_block.clone()); + pool.insert(new_block); } } assert_eq!(pool.len(), 15); assert_eq!(pool.leaders_len(), 4); - pool.insert(blocks[5].clone()); + pool.insert(blocks[5].clone().without_callback()); assert_eq!(pool.len(), 16); assert_eq!(pool.leaders_len(), 3); - pool.insert(blocks[10].clone()); + pool.insert(blocks[10].clone().without_callback()); assert_eq!(pool.len(), 17); assert_eq!(pool.leaders_len(), 2); @@ -108,7 +115,7 @@ fn test_leaders() { assert_eq!(pool.len(), 17); assert_eq!(pool.leaders_len(), 2); - pool.insert(blocks[0].clone()); + pool.insert(blocks[0].clone().without_callback()); assert_eq!(pool.len(), 18); assert_eq!(pool.leaders_len(), 2); @@ -116,14 +123,18 @@ fn test_leaders() { assert_eq!(pool.len(), 3); assert_eq!(pool.leaders_len(), 1); - pool.insert(blocks[15].clone()); + pool.insert(blocks[15].clone().without_callback()); assert_eq!(pool.len(), 4); assert_eq!(pool.leaders_len(), 1); - let orphan_1 = pool.remove_blocks_by_parent(&blocks[14].hash()); + let orphan_1 = pool.remove_blocks_by_parent(&blocks[14].block.hash()); - let orphan_set: HashSet = orphan.into_iter().chain(orphan_1.into_iter()).collect(); - let blocks_set: HashSet = blocks.into_iter().collect(); + let orphan_set: HashSet> = orphan + .into_iter() + .map(|b| b.lonely_block.block) + .chain(orphan_1.into_iter().map(|b| b.lonely_block.block)) + .collect(); + let blocks_set: HashSet> = blocks.into_iter().map(|b| b.block).collect(); assert_eq!(orphan_set, blocks_set); assert_eq!(pool.len(), 0); assert_eq!(pool.leaders_len(), 0); From 2e463363efabe2b9d1908971f3f57772869027e2 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 25 Oct 2023 10:33:54 +0800 Subject: [PATCH 111/357] Add verify_failed_block channel to SharedPackage --- shared/src/shared_builder.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/shared/src/shared_builder.rs b/shared/src/shared_builder.rs index 5bf45b265e..2e3d666560 100644 --- a/shared/src/shared_builder.rs +++ b/shared/src/shared_builder.rs @@ -33,6 +33,9 @@ use ckb_proposal_table::ProposalTable; use ckb_proposal_table::ProposalView; use ckb_shared::{HeaderMap, Shared}; use ckb_snapshot::{Snapshot, SnapshotMgr}; +use ckb_util::Mutex; + +use ckb_shared::types::VerifyFailedBlockInfo; use ckb_store::ChainDB; use ckb_store::ChainStore; use ckb_store::{ChainDB, ChainStore, Freezer}; @@ -440,10 +443,15 @@ impl SharedBuilder { block_status_map, ); + let (verify_failed_block_tx, verify_failed_block_rx) = + tokio::sync::mpsc::unbounded_channel::(); + let pack = SharedPackage { table: Some(table), tx_pool_builder: Some(tx_pool_builder), relay_tx_receiver: Some(receiver), + verify_failed_block_tx: Some(verify_failed_block_tx), + verify_failed_block_rx: Some(verify_failed_block_rx), }; Ok((shared, pack)) From 62484179329b634263897cdf252f1931a4c807ac Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 25 Oct 2023 10:38:59 +0800 Subject: [PATCH 112/357] Construct ChainService with SharedPackage provided verify_failed_block_rx --- ckb-bin/src/subcommand/run.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/ckb-bin/src/subcommand/run.rs b/ckb-bin/src/subcommand/run.rs index ba7f3bfb53..d947a2856f 100644 --- a/ckb-bin/src/subcommand/run.rs +++ b/ckb-bin/src/subcommand/run.rs @@ -40,10 +40,12 @@ pub fn run(args: RunArgs, version: Version, async_handle: Handle) -> Result<(), ); launcher.check_assume_valid_target(&shared); - let (verify_failed_block_tx, verify_failed_block_rx) = - tokio::sync::mpsc::unbounded_channel::(); - let chain_controller = - launcher.start_chain_service(&shared, pack.take_proposal_table(), verify_failed_block_tx); + + let chain_controller = launcher.start_chain_service( + &shared, + pack.take_proposal_table(), + pack.take_verify_failed_block_tx(), + ); launcher.start_block_filter(&shared); @@ -52,7 +54,7 @@ pub fn run(args: RunArgs, version: Version, async_handle: Handle) -> Result<(), chain_controller.clone(), miner_enable, pack.take_relay_tx_receiver(), - verify_failed_block_rx, + pack.take_verify_failed_block_rx(), ); let tx_pool_builder = pack.take_tx_pool_builder(); From ee7508d1c0b247dbc187f18014741909f29f54ad Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 25 Oct 2023 10:50:35 +0800 Subject: [PATCH 113/357] Remove ChainService::verify_failed_block_tx Option wrap --- chain/src/chain.rs | 10 +++++----- util/launcher/src/lib.rs | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 5897afb9e8..49001ab432 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -238,7 +238,7 @@ pub struct ChainService { orphan_blocks_broker: Arc, - verify_failed_blocks_tx: Option>, + verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, } #[derive(Clone)] @@ -324,7 +324,7 @@ impl ChainService { pub fn new( shared: Shared, proposal_table: ProposalTable, - verify_failed_blocks_tx: Option>, + verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, ) -> ChainService { ChainService { shared, @@ -809,8 +809,8 @@ impl ChainService { err: &Error, ) { let is_internal_db_error = is_internal_db_error(&err); - match (lonely_block.peer_id(), &self.verify_failed_blocks_tx) { - (Some(peer_id), Some(verify_failed_blocks_tx)) => { + match lonely_block.peer_id() { + Some(peer_id) => { let verify_failed_block_info = VerifyFailedBlockInfo { block_hash: lonely_block.lonely_block.block.hash(), peer_id, @@ -818,7 +818,7 @@ impl ChainService { reason: err.to_string(), is_internal_db_error, }; - match verify_failed_blocks_tx.send(verify_failed_block_info) { + match self.verify_failed_blocks_tx.send(verify_failed_block_info) { Err(_err) => { error!("ChainService failed to send verify failed block info to Synchronizer, the receiver side may have been closed, this shouldn't happen") } diff --git a/util/launcher/src/lib.rs b/util/launcher/src/lib.rs index b0f0789877..9b908fa966 100644 --- a/util/launcher/src/lib.rs +++ b/util/launcher/src/lib.rs @@ -234,7 +234,7 @@ impl Launcher { table: ProposalTable, verify_failed_block_tx: tokio::sync::mpsc::UnboundedSender, ) -> ChainController { - let chain_service = ChainService::new(shared.clone(), table, Some(verify_failed_block_tx)); + let chain_service = ChainService::new(shared.clone(), table, verify_failed_block_tx); let chain_controller = chain_service.start(Some("ChainService")); info!("chain genesis hash: {:#x}", shared.genesis_hash()); chain_controller From 1eb31e9c370c5bd0e05142fdb17c8ef5637440f0 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 25 Oct 2023 10:51:11 +0800 Subject: [PATCH 114/357] Remove useless import orphan_block_pool in ckb-sync --- sync/src/tests/mod.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/sync/src/tests/mod.rs b/sync/src/tests/mod.rs index a64e84d4a5..cb6d1ab347 100644 --- a/sync/src/tests/mod.rs +++ b/sync/src/tests/mod.rs @@ -15,7 +15,6 @@ use std::time::Duration; mod block_status; mod inflight_blocks; mod net_time_checker; -mod orphan_block_pool; mod sync_shared; mod synchronizer; From a598082dd34a603e85bb1833e008e0883b5d20cb Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 25 Oct 2023 10:59:39 +0800 Subject: [PATCH 115/357] Fix blocking process block usage in benches/benches/benchmarks/always_success.rs --- benches/benches/benchmarks/always_success.rs | 36 +++++++++++++------- 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/benches/benches/benchmarks/always_success.rs b/benches/benches/benchmarks/always_success.rs index dc5f0f205a..adcae7d245 100644 --- a/benches/benches/benchmarks/always_success.rs +++ b/benches/benches/benchmarks/always_success.rs @@ -32,7 +32,7 @@ fn bench(c: &mut Criterion) { (0..20).for_each(|_| { let block = gen_always_success_block(&mut blocks, &parent, shared2); chain2 - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_ALL, ) @@ -44,7 +44,7 @@ fn bench(c: &mut Criterion) { |(chain, blocks)| { blocks.into_iter().skip(1).for_each(|block| { chain - .process_block(Arc::new(block)) + .blocking_process_block(Arc::new(block)) .expect("process block OK"); }); }, @@ -77,14 +77,14 @@ fn bench(c: &mut Criterion) { (0..5).for_each(|i| { let block = gen_always_success_block(&mut blocks, &parent, shared2); chain2 - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_ALL, ) .expect("process block OK"); if i < 2 { chain3 - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_ALL, ) @@ -96,7 +96,7 @@ fn bench(c: &mut Criterion) { (0..2).for_each(|_| { let block = gen_always_success_block(&mut blocks, &parent, shared3); chain3 - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_ALL, ) @@ -110,7 +110,10 @@ fn bench(c: &mut Criterion) { .take(5) .for_each(|block| { chain1 - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch( + Arc::new(block), + Switch::DISABLE_ALL, + ) .expect("process block OK"); }); (chain1.clone(), blocks) @@ -118,7 +121,7 @@ fn bench(c: &mut Criterion) { |(chain, blocks)| { blocks.into_iter().skip(6).for_each(|block| { chain - .process_block(Arc::new(block)) + .blocking_process_block(Arc::new(block)) .expect("process block OK"); }); }, @@ -152,11 +155,17 @@ fn bench(c: &mut Criterion) { let block = gen_always_success_block(&mut blocks, &parent, shared2); let arc_block = Arc::new(block.clone()); chain2 - .internal_process_block(Arc::clone(&arc_block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch( + Arc::clone(&arc_block), + Switch::DISABLE_ALL, + ) .expect("process block OK"); if i < 2 { chain3 - .internal_process_block(arc_block, Switch::DISABLE_ALL) + .blocking_process_block_with_switch( + arc_block, + Switch::DISABLE_ALL, + ) .expect("process block OK"); } parent = block; @@ -165,7 +174,7 @@ fn bench(c: &mut Criterion) { (0..4).for_each(|_| { let block = gen_always_success_block(&mut blocks, &parent, shared3); chain3 - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_ALL, ) @@ -179,7 +188,10 @@ fn bench(c: &mut Criterion) { .take(7) .for_each(|block| { chain1 - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch( + Arc::new(block), + Switch::DISABLE_ALL, + ) .expect("process block OK"); }); (chain1.clone(), blocks) @@ -187,7 +199,7 @@ fn bench(c: &mut Criterion) { |(chain, blocks)| { blocks.into_iter().skip(8).for_each(|block| { chain - .process_block(Arc::new(block)) + .blocking_process_block(Arc::new(block)) .expect("process block OK"); }); }, From 9fdd9dfc1bdd7be07c538af7b06a75d0bdcc596c Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 25 Oct 2023 10:59:49 +0800 Subject: [PATCH 116/357] Fix blocking process block usage in benches/benches/benchmarks/overall.rs --- benches/benches/benchmarks/overall.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/benches/benches/benchmarks/overall.rs b/benches/benches/benchmarks/overall.rs index 74bf84b953..93552d25c4 100644 --- a/benches/benches/benchmarks/overall.rs +++ b/benches/benches/benchmarks/overall.rs @@ -132,7 +132,11 @@ pub fn setup_chain(txs_size: usize) -> (Shared, ChainController) { let network = dummy_network(&shared); pack.take_tx_pool_builder().start(network); - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); + let chain_service = ChainService::new( + shared.clone(), + pack.take_proposal_table(), + pack.take_verify_failed_block_tx(), + ); let chain_controller = chain_service.start(Some("ChainService")); (shared, chain_controller) @@ -217,7 +221,9 @@ fn bench(c: &mut Criterion) { .verify(&block.header()) .expect("header verified"); - chain.process_block(Arc::new(block)).expect("process_block"); + chain + .blocking_process_block(Arc::new(block)) + .expect("process_block"); i -= 1; } }, From 797b3cd855657d1dbad5cf9f274689928f34be82 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 25 Oct 2023 10:59:55 +0800 Subject: [PATCH 117/357] Fix blocking process block usage in benches/benches/benchmarks/resolve.rs --- benches/benches/benchmarks/resolve.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/benches/benches/benchmarks/resolve.rs b/benches/benches/benchmarks/resolve.rs index 29ce56bc8c..0c7a6d0502 100644 --- a/benches/benches/benchmarks/resolve.rs +++ b/benches/benches/benchmarks/resolve.rs @@ -96,7 +96,11 @@ pub fn setup_chain(txs_size: usize) -> (Shared, ChainController) { .tx_pool_config(tx_pool_config) .build() .unwrap(); - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); + let chain_service = ChainService::new( + shared.clone(), + pack.take_proposal_table(), + pack.take_verify_failed_block_tx(), + ); let chain_controller = chain_service.start(Some("ChainService")); // FIXME: global cache !!! From 6491c754cf14b10b3f85bc08d7e6cb0c72776517 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 25 Oct 2023 11:00:05 +0800 Subject: [PATCH 118/357] Fix blocking process block usage in benches/benches/benchmarks/secp_2in2out.rs --- benches/benches/benchmarks/secp_2in2out.rs | 28 +++++++++++++--------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/benches/benches/benchmarks/secp_2in2out.rs b/benches/benches/benchmarks/secp_2in2out.rs index 8dd3eb3d2d..84e9230423 100644 --- a/benches/benches/benchmarks/secp_2in2out.rs +++ b/benches/benches/benchmarks/secp_2in2out.rs @@ -32,7 +32,7 @@ fn bench(c: &mut Criterion) { (0..20).for_each(|_| { let block = gen_secp_block(&mut blocks, &parent, shared2); chain2 - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_ALL, ) @@ -44,7 +44,7 @@ fn bench(c: &mut Criterion) { |(chain, blocks)| { blocks.into_iter().skip(1).for_each(|block| { chain - .process_block(Arc::new(block)) + .blocking_process_block(Arc::new(block)) .expect("process block OK"); }); }, @@ -77,14 +77,14 @@ fn bench(c: &mut Criterion) { (0..5).for_each(|i| { let block = gen_secp_block(&mut blocks, &parent, shared2); chain2 - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_ALL, ) .expect("process block OK"); if i < 2 { chain3 - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_ALL, ) @@ -96,7 +96,7 @@ fn bench(c: &mut Criterion) { (0..2).for_each(|_| { let block = gen_secp_block(&mut blocks, &parent, shared3); chain3 - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_ALL, ) @@ -110,7 +110,10 @@ fn bench(c: &mut Criterion) { .take(5) .for_each(|block| { chain1 - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch( + Arc::new(block), + Switch::DISABLE_ALL, + ) .expect("process block OK"); }); (chain1.clone(), blocks) @@ -118,7 +121,7 @@ fn bench(c: &mut Criterion) { |(chain, blocks)| { blocks.into_iter().skip(6).for_each(|block| { chain - .process_block(Arc::new(block)) + .blocking_process_block(Arc::new(block)) .expect("process block OK"); }); }, @@ -152,11 +155,14 @@ fn bench(c: &mut Criterion) { let block = gen_secp_block(&mut blocks, &parent, shared2); let arc_block = Arc::new(block.clone()); chain2 - .internal_process_block(Arc::clone(&arc_block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch( + Arc::clone(&arc_block), + Switch::DISABLE_ALL, + ) .expect("process block OK"); if i < 2 { chain3 - .internal_process_block(arc_block, Switch::DISABLE_ALL) + .blocking_process_block_with_switch(arc_block, Switch::DISABLE_ALL) .expect("process block OK"); } parent = block; @@ -165,7 +171,7 @@ fn bench(c: &mut Criterion) { (0..4).for_each(|_| { let block = gen_secp_block(&mut blocks, &parent, shared3); chain3 - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_ALL, ) @@ -179,7 +185,7 @@ fn bench(c: &mut Criterion) { .take(7) .for_each(|block| { chain1 - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_ALL) .expect("process block OK"); }); (chain1.clone(), blocks) From 2ae888d57370b2bcddb6d39ac24139b0c6135796 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 25 Oct 2023 11:01:34 +0800 Subject: [PATCH 119/357] let ckb-import and ckb-replay construct ChainService with SharedPackage --- ckb-bin/src/subcommand/import.rs | 6 +++++- ckb-bin/src/subcommand/replay.rs | 6 +++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/ckb-bin/src/subcommand/import.rs b/ckb-bin/src/subcommand/import.rs index 38301171b1..38efa5c124 100644 --- a/ckb-bin/src/subcommand/import.rs +++ b/ckb-bin/src/subcommand/import.rs @@ -15,7 +15,11 @@ pub fn import(args: ImportArgs, async_handle: Handle) -> Result<(), ExitCode> { )?; let (shared, mut pack) = builder.build()?; - let chain_service = ChainService::new(shared, pack.take_proposal_table(), None); + let chain_service = ChainService::new( + shared, + pack.take_proposal_table(), + pack.take_verify_failed_block_tx(), + ); let chain_controller = chain_service.start::<&str>(Some("ImportChainService")); // manual drop tx_pool_builder and relay_tx_receiver diff --git a/ckb-bin/src/subcommand/replay.rs b/ckb-bin/src/subcommand/replay.rs index 0114d1a2e7..5091e37504 100644 --- a/ckb-bin/src/subcommand/replay.rs +++ b/ckb-bin/src/subcommand/replay.rs @@ -47,7 +47,11 @@ pub fn replay(args: ReplayArgs, async_handle: Handle) -> Result<(), ExitCode> { args.consensus, )?; let (tmp_shared, mut pack) = shared_builder.tx_pool_config(args.config.tx_pool).build()?; - let chain_service = ChainService::new(tmp_shared, pack.take_proposal_table(), None); + let chain_service = ChainService::new( + tmp_shared, + pack.take_proposal_table(), + pack.take_verify_failed_block_tx(), + ); let chain_controller = chain_service.start(Some("ckb_reply::ChainService")); if let Some((from, to)) = args.profile { From 369249b4f893ec7870a1f43b57bc33147b6fbea2 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 25 Oct 2023 11:03:09 +0800 Subject: [PATCH 120/357] Fix Unit test: ckb-light-client-protocol-server::tests/utils/chain.rs --- util/light-client-protocol-server/src/tests/utils/chain.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/util/light-client-protocol-server/src/tests/utils/chain.rs b/util/light-client-protocol-server/src/tests/utils/chain.rs index 29e1df8f7b..bfd4293780 100644 --- a/util/light-client-protocol-server/src/tests/utils/chain.rs +++ b/util/light-client-protocol-server/src/tests/utils/chain.rs @@ -87,7 +87,11 @@ impl MockChain { let network = dummy_network(&shared); pack.take_tx_pool_builder().start(network); - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table(), None); + let chain_service = ChainService::new( + shared.clone(), + pack.take_proposal_table(), + pack.take_verify_failed_block_tx(), + ); let chain_controller = chain_service.start::<&str>(Some( "ckb-light-client-protocol-server::tests::ChainService", )); From ffa8ea98e5c4d87099d28af68a441e017bf8c635 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 25 Oct 2023 11:03:34 +0800 Subject: [PATCH 121/357] Fix Unit test: ckb-verification::contextual/src/tests/uncle_verifier.rs --- verification/contextual/src/tests/uncle_verifier.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/verification/contextual/src/tests/uncle_verifier.rs b/verification/contextual/src/tests/uncle_verifier.rs index 7545af7415..d77e0ab2bd 100644 --- a/verification/contextual/src/tests/uncle_verifier.rs +++ b/verification/contextual/src/tests/uncle_verifier.rs @@ -43,7 +43,11 @@ fn start_chain(consensus: Option) -> (ChainController, Shared) { } let (shared, mut pack) = builder.build().unwrap(); - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table(), None); + let chain_service = ChainService::new( + shared.clone(), + pack.take_proposal_table(), + pack.take_verify_failed_block_tx(), + ); let chain_controller = chain_service.start::<&str>(Some("ckb-verification::tests::ChainService")); (chain_controller, shared) From b1d1fcf4e0a10e222aedc0951d38c6a8f68ed866 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 25 Oct 2023 11:03:47 +0800 Subject: [PATCH 122/357] Fix Unit test: ckb-verification::contextual/src/tests/contextual_block_verifier.rs --- .../contextual/src/tests/contextual_block_verifier.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/verification/contextual/src/tests/contextual_block_verifier.rs b/verification/contextual/src/tests/contextual_block_verifier.rs index 62514ce8b5..ea85f7129b 100644 --- a/verification/contextual/src/tests/contextual_block_verifier.rs +++ b/verification/contextual/src/tests/contextual_block_verifier.rs @@ -83,7 +83,11 @@ fn start_chain(consensus: Option) -> (ChainController, Shared) { } let (shared, mut pack) = builder.build().unwrap(); - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table(), None); + let chain_service = ChainService::new( + shared.clone(), + pack.take_proposal_table(), + pack.take_verify_failed_block_tx(), + ); let chain_controller = chain_service.start::<&str>(None); (chain_controller, shared) } From 09467d573d791e6e8945a2c19a03c5a3f6d21bb2 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 25 Oct 2023 11:05:04 +0800 Subject: [PATCH 123/357] Fix Unit test: ckb-sync::src/tests/util.rs --- sync/src/tests/util.rs | 2 +- sync/src/types/mod.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/sync/src/tests/util.rs b/sync/src/tests/util.rs index 0ccf5ba2e8..d2f0224096 100644 --- a/sync/src/tests/util.rs +++ b/sync/src/tests/util.rs @@ -40,7 +40,7 @@ pub fn generate_blocks( let block = inherit_block(shared, &parent_hash).build(); parent_hash = block.header().hash(); chain_controller - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_ALL) .expect("processing block should be ok"); } } diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 9cea119348..aefb69d11c 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1174,7 +1174,7 @@ impl SyncShared { // Switch::DISABLE_SCRIPT // }; // - // chain.internal_process_block(Arc::clone(&block), switch) + // chain.blocking_process_block_with_switch(Arc::clone(&block), switch) // } else { // chain.process_block(Arc::clone(&block)) // } From c337a77cb8f30116209c4926cabcccca980768e9 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 25 Oct 2023 11:05:26 +0800 Subject: [PATCH 124/357] Fix blocking process block usage in sync/src/relayer/tests/helper.rs --- sync/src/relayer/tests/helper.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sync/src/relayer/tests/helper.rs b/sync/src/relayer/tests/helper.rs index d81da762a4..b423b6225c 100644 --- a/sync/src/relayer/tests/helper.rs +++ b/sync/src/relayer/tests/helper.rs @@ -212,7 +212,7 @@ pub(crate) fn build_chain(tip: BlockNumber) -> (Relayer, OutPoint) { .transaction(cellbase) .build(); chain_controller - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_ALL) .expect("processing block should be ok"); } From 5c31f940a6a875cb229fac946134c50f39c3b1ab Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 25 Oct 2023 11:05:57 +0800 Subject: [PATCH 125/357] Fix Unit test of blocking process block usage in ckb-sync --- sync/src/tests/synchronizer/basic_sync.rs | 2 +- sync/src/tests/synchronizer/functions.rs | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/sync/src/tests/synchronizer/basic_sync.rs b/sync/src/tests/synchronizer/basic_sync.rs index 9c5c1977b6..e1c705a98b 100644 --- a/sync/src/tests/synchronizer/basic_sync.rs +++ b/sync/src/tests/synchronizer/basic_sync.rs @@ -160,7 +160,7 @@ fn setup_node(height: u64) -> (TestNode, Shared) { .build(); chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_ALL) .expect("process block should be OK"); } diff --git a/sync/src/tests/synchronizer/functions.rs b/sync/src/tests/synchronizer/functions.rs index 7fe84a1293..d03962fb5a 100644 --- a/sync/src/tests/synchronizer/functions.rs +++ b/sync/src/tests/synchronizer/functions.rs @@ -233,10 +233,10 @@ fn test_locate_latest_common_block2() { blocks.push(new_block.clone()); chain_controller1 - .internal_process_block(Arc::new(new_block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(new_block.clone()), Switch::DISABLE_ALL) .expect("process block ok"); chain_controller2 - .internal_process_block(Arc::new(new_block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(new_block.clone()), Switch::DISABLE_ALL) .expect("process block ok"); parent = new_block.header().to_owned(); } @@ -253,7 +253,7 @@ fn test_locate_latest_common_block2() { let new_block = gen_block(&shared2, &parent, &epoch, i + 100); chain_controller2 - .internal_process_block(Arc::new(new_block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(new_block.clone()), Switch::DISABLE_ALL) .expect("process block ok"); parent = new_block.header().to_owned(); } @@ -341,7 +341,7 @@ fn test_process_new_block() { let new_block = gen_block(&shared1, &parent, &epoch, i + 100); chain_controller1 - .internal_process_block(Arc::new(new_block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(new_block.clone()), Switch::DISABLE_ALL) .expect("process block ok"); parent = new_block.header().to_owned(); blocks.push(new_block); @@ -378,7 +378,7 @@ fn test_get_locator_response() { blocks.push(new_block.clone()); chain_controller - .internal_process_block(Arc::new(new_block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(new_block.clone()), Switch::DISABLE_ALL) .expect("process block ok"); parent = new_block.header().to_owned(); } From 08015f03be9bb00c350eb47b06a8f7b41c7e0701 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 25 Oct 2023 13:29:50 +0800 Subject: [PATCH 126/357] Use blocking process_block method for benches/benches/benchmarks/secp_2in2out.rs --- benches/benches/benchmarks/secp_2in2out.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/benches/benches/benchmarks/secp_2in2out.rs b/benches/benches/benchmarks/secp_2in2out.rs index 84e9230423..3d7c3933ca 100644 --- a/benches/benches/benchmarks/secp_2in2out.rs +++ b/benches/benches/benchmarks/secp_2in2out.rs @@ -162,7 +162,10 @@ fn bench(c: &mut Criterion) { .expect("process block OK"); if i < 2 { chain3 - .blocking_process_block_with_switch(arc_block, Switch::DISABLE_ALL) + .blocking_process_block_with_switch( + arc_block, + Switch::DISABLE_ALL, + ) .expect("process block OK"); } parent = block; @@ -185,7 +188,10 @@ fn bench(c: &mut Criterion) { .take(7) .for_each(|block| { chain1 - .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch( + Arc::new(block), + Switch::DISABLE_ALL, + ) .expect("process block OK"); }); (chain1.clone(), blocks) @@ -193,7 +199,7 @@ fn bench(c: &mut Criterion) { |(chain, blocks)| { blocks.into_iter().skip(8).for_each(|block| { chain - .process_block(Arc::new(block)) + .blocking_process_block(Arc::new(block)) .expect("process block OK"); }); }, From 35fa5799342fbc718d02689bf7041f4b6b614b06 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 25 Oct 2023 13:30:52 +0800 Subject: [PATCH 127/357] Benches: Construct ChainService with SharedPackage provided verify_failed_block_rx --- benches/benches/benchmarks/util.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/benches/benches/benchmarks/util.rs b/benches/benches/benchmarks/util.rs index 8c21dddc3b..5cf30676bc 100644 --- a/benches/benches/benchmarks/util.rs +++ b/benches/benches/benchmarks/util.rs @@ -78,7 +78,11 @@ pub fn new_always_success_chain(txs_size: usize, chains_num: usize) -> Chains { .consensus(consensus.clone()) .build() .unwrap(); - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); + let chain_service = ChainService::new( + shared.clone(), + pack.take_proposal_table(), + pack.take_verify_failed_block_tx(), + ); chains.push((chain_service.start::<&str>(None), shared)); } @@ -296,7 +300,11 @@ pub fn new_secp_chain(txs_size: usize, chains_num: usize) -> Chains { .consensus(consensus.clone()) .build() .unwrap(); - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); + let chain_service = ChainService::new( + shared.clone(), + pack.take_proposal_table(), + pack.take_verify_failed_block_tx(), + ); chains.push((chain_service.start::<&str>(None), shared)); } From 52edf8bc8ab81d84a305a8b10b750276be111674 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 25 Oct 2023 13:31:13 +0800 Subject: [PATCH 128/357] Modify BlockStatus by SyncShared.Shared --- sync/src/relayer/tests/compact_block_process.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sync/src/relayer/tests/compact_block_process.rs b/sync/src/relayer/tests/compact_block_process.rs index 9205727858..78a27a6128 100644 --- a/sync/src/relayer/tests/compact_block_process.rs +++ b/sync/src/relayer/tests/compact_block_process.rs @@ -55,7 +55,7 @@ fn test_in_block_status_map() { { relayer .shared - .state() + .shared() .insert_block_status(block.header().hash(), BlockStatus::BLOCK_INVALID); } @@ -75,7 +75,7 @@ fn test_in_block_status_map() { { relayer .shared - .state() + .shared() .insert_block_status(block.header().hash(), BlockStatus::BLOCK_STORED); } @@ -95,7 +95,7 @@ fn test_in_block_status_map() { { relayer .shared - .state() + .shared() .insert_block_status(block.header().hash(), BlockStatus::BLOCK_RECEIVED); } From 901bd2204a6cf93dcf13ff1378a8730c33b3833f Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 25 Oct 2023 13:32:30 +0800 Subject: [PATCH 129/357] Unit Test: Use SharedPackage to construct ChainService --- chain/src/tests/block_assembler.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/chain/src/tests/block_assembler.rs b/chain/src/tests/block_assembler.rs index 456b671b0d..6b6dc46eb3 100644 --- a/chain/src/tests/block_assembler.rs +++ b/chain/src/tests/block_assembler.rs @@ -47,7 +47,11 @@ fn start_chain(consensus: Option) -> (ChainController, Shared) { let network = dummy_network(&shared); pack.take_tx_pool_builder().start(network); - let _chain_service = ChainService::new(shared.clone(), pack.take_proposal_table(), None); + let _chain_service = ChainService::new( + shared.clone(), + pack.take_proposal_table(), + pack.take_verify_failed_block_tx(), + ); let chain_controller = _chain_service.start::<&str>(None); (chain_controller, shared) } From 78ef52d7cac688f33f7b0e497018bada1018da5e Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 25 Oct 2023 13:33:00 +0800 Subject: [PATCH 130/357] Unit Test: Use SharedPackage to construct ChainService in ckb-chain::find_fork.rs --- chain/src/tests/find_fork.rs | 36 ++++++++++++++++++++++++++++++------ 1 file changed, 30 insertions(+), 6 deletions(-) diff --git a/chain/src/tests/find_fork.rs b/chain/src/tests/find_fork.rs index 5e4cd87208..e073435168 100644 --- a/chain/src/tests/find_fork.rs +++ b/chain/src/tests/find_fork.rs @@ -23,7 +23,11 @@ use std::sync::Arc; fn test_find_fork_case1() { let builder = SharedBuilder::with_temp_db(); let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); - let mut _chain_service = ChainService::new(shared.clone(), pack.take_proposal_table(), None); + let mut _chain_service = ChainService::new( + shared.clone(), + pack.take_proposal_table(), + pack.take_verify_failed_block_tx(), + ); let _chain_service_clone = _chain_service.clone(); let chain_controller = _chain_service.start(Some("test_find_fork_case1::ChainService")); let genesis = shared @@ -97,7 +101,11 @@ fn test_find_fork_case1() { fn test_find_fork_case2() { let builder = SharedBuilder::with_temp_db(); let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); - let mut _chain_service = ChainService::new(shared.clone(), pack.take_proposal_table(), None); + let mut _chain_service = ChainService::new( + shared.clone(), + pack.take_proposal_table(), + pack.take_verify_failed_block_tx(), + ); let _chain_service_clone = _chain_service.clone(); let chain_controller = _chain_service.start(Some("test_find_fork_case2::ChainService")); @@ -171,7 +179,11 @@ fn test_find_fork_case2() { fn test_find_fork_case3() { let builder = SharedBuilder::with_temp_db(); let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); - let mut _chain_service = ChainService::new(shared.clone(), pack.take_proposal_table(), None); + let mut _chain_service = ChainService::new( + shared.clone(), + pack.take_proposal_table(), + pack.take_verify_failed_block_tx(), + ); let _chain_service_clone = _chain_service.clone(); let chain_controller = _chain_service.start(Some("test_find_fork_case3::ChainService")); @@ -245,7 +257,11 @@ fn test_find_fork_case3() { fn test_find_fork_case4() { let builder = SharedBuilder::with_temp_db(); let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); - let mut _chain_service = ChainService::new(shared.clone(), pack.take_proposal_table(), None); + let mut _chain_service = ChainService::new( + shared.clone(), + pack.take_proposal_table(), + pack.take_verify_failed_block_tx(), + ); let _chain_service_clone = _chain_service.clone(); let chain_controller = _chain_service.start(Some("test_find_fork_case4::ChainService")); @@ -331,7 +347,11 @@ fn repeatedly_switch_fork() { .consensus(Consensus::default()) .build() .unwrap(); - let mut _chain_service = ChainService::new(shared.clone(), pack.take_proposal_table(), None); + let mut _chain_service = ChainService::new( + shared.clone(), + pack.take_proposal_table(), + pack.take_verify_failed_block_tx(), + ); let chain_controller = _chain_service.start(Some("repeatedly_switch_fork::ChainService")); for _ in 0..2 { @@ -458,7 +478,11 @@ fn test_fork_proposal_table() { }; let (shared, mut pack) = builder.consensus(consensus).build().unwrap(); - let mut _chain_service = ChainService::new(shared.clone(), pack.take_proposal_table(), None); + let mut _chain_service = ChainService::new( + shared.clone(), + pack.take_proposal_table(), + pack.take_verify_failed_block_tx(), + ); let chain_controller = _chain_service.start(Some("test_fork_proposal_table::ChainService")); let genesis = shared From c5fdb4836c8f678abcddb97619a86b669862be8b Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 25 Oct 2023 13:40:58 +0800 Subject: [PATCH 131/357] Unit test: Modify `ChainService` initialization to include `pack.take_verify_failed_block_tx()` parameter --- chain/src/tests/truncate.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/chain/src/tests/truncate.rs b/chain/src/tests/truncate.rs index d1d2dd1d6e..4c55cb4770 100644 --- a/chain/src/tests/truncate.rs +++ b/chain/src/tests/truncate.rs @@ -11,7 +11,11 @@ fn test_truncate() { let builder = SharedBuilder::with_temp_db(); let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); - let mut _chain_service = ChainService::new(shared.clone(), pack.take_proposal_table(), None); + let mut _chain_service = ChainService::new( + shared.clone(), + pack.take_proposal_table(), + pack.take_verify_failed_block_tx(), + ); let chain_controller = _chain_service.start(Some("test_truncate::ChainService")); let genesis = shared From 3adafba4d10dc05a20908a5082f18879451afc52 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 25 Oct 2023 13:41:32 +0800 Subject: [PATCH 132/357] Unit test: Modify the `new` function in `ChainService` to include verify_failed_block_tx parameter --- chain/src/tests/uncle.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/chain/src/tests/uncle.rs b/chain/src/tests/uncle.rs index 6c32ff1560..407b695f60 100644 --- a/chain/src/tests/uncle.rs +++ b/chain/src/tests/uncle.rs @@ -10,7 +10,11 @@ use std::sync::Arc; fn test_get_block_body_after_inserting() { let builder = SharedBuilder::with_temp_db(); let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); - let mut _chain_service = ChainService::new(shared.clone(), pack.take_proposal_table(), None); + let mut _chain_service = ChainService::new( + shared.clone(), + pack.take_proposal_table(), + pack.take_verify_failed_block_tx(), + ); let chain_controller = _chain_service.start(Some("test_get_block_body_after_inserting::ChainService")); let genesis = shared From e49aa13f6bef509075ed92d15a67885d7176d5a8 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 25 Oct 2023 13:42:45 +0800 Subject: [PATCH 133/357] Unit test: Initialization of ChainService need pack.take_verify_failed_block_tx --- chain/src/tests/util.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/chain/src/tests/util.rs b/chain/src/tests/util.rs index 1481875a22..547a8255c3 100644 --- a/chain/src/tests/util.rs +++ b/chain/src/tests/util.rs @@ -85,7 +85,11 @@ pub(crate) fn start_chain_with_tx_pool_config( let network = dummy_network(&shared); pack.take_tx_pool_builder().start(network); - let _chain_service = ChainService::new(shared.clone(), pack.take_proposal_table(), None); + let _chain_service = ChainService::new( + shared.clone(), + pack.take_proposal_table(), + pack.take_verify_failed_block_tx(), + ); let chain_controller = _chain_service.start::<&str>(Some("ckb_chain::tests::ChainService")); let parent = { let snapshot = shared.snapshot(); From e31f9b3d2338d0eca23695619fa58a65750a41c6 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 30 Oct 2023 10:16:41 +0800 Subject: [PATCH 134/357] Add more minor ticks to sync progress chart --- devtools/block_sync/draw_sync_chart.py | 35 ++++++++++++++++++-------- 1 file changed, 24 insertions(+), 11 deletions(-) diff --git a/devtools/block_sync/draw_sync_chart.py b/devtools/block_sync/draw_sync_chart.py index 401eaddd03..e95e50f629 100755 --- a/devtools/block_sync/draw_sync_chart.py +++ b/devtools/block_sync/draw_sync_chart.py @@ -26,17 +26,20 @@ def parse_sync_statics(log_file): pbar = tqdm.tqdm(total=total_lines) for line_idx, line in enumerate(f): pbar.update(1) - if line.find('INFO ckb_chain::chain block: ') != -1: + if line_idx == 0: timestamp_str = re.search(r'^(\S+ \S+)', line).group(1) # Extract the timestamp string timestamp = datetime.datetime.strptime(timestamp_str, "%Y-%m-%d %H:%M:%S.%f").timestamp() - - if base_timestamp == 0: - base_timestamp = timestamp - timestamp = int(timestamp - base_timestamp) + base_timestamp = timestamp + + + if line.find('INFO ckb_chain::chain block: ') != -1: block_number = int(re.search(r'block: (\d+)', line).group(1)) # Extract the block number using regex - if line_idx == 0 or block_number % 10000 == 0: + if line_idx == 0 or block_number % 10_000 == 0: + timestamp_str = re.search(r'^(\S+ \S+)', line).group(1) # Extract the timestamp string + timestamp = datetime.datetime.strptime(timestamp_str, "%Y-%m-%d %H:%M:%S.%f").timestamp() + timestamp = int(timestamp - base_timestamp) duration.append(timestamp / 60 / 60) height.append(block_number) @@ -76,8 +79,14 @@ def parse_sync_statics(log_file): lgs.append(lg) for i, h in enumerate(height): - if h % 2000000 == 0: + if h % 1_000_000 == 0: ax.vlines([duration[i]], 0, h, colors="gray", linestyles="dashed") + ax.annotate(str(round(duration[i], 1)), + xy=(duration[i], 0), + xycoords='axes fraction', + xytext=(duration[i], -0.05), + arrowprops=dict(arrowstyle="->", color='b') + ) ax.get_yaxis().get_major_formatter().set_scientific(False) ax.get_yaxis().get_major_formatter().set_useOffset(False) @@ -92,10 +101,14 @@ def parse_sync_statics(log_file): ax.xaxis.grid(color='gray', linestyle='dashed', which='minor') ax.yaxis.grid(color='gray', linestyle='dashed', which='minor') - minorLocator = MultipleLocator(10) - ax.xaxis.set_minor_locator(minorLocator) - - plt.setp(ax.get_xticklabels(), rotation=30, horizontalalignment='right') + xminorLocator = MultipleLocator(1.0) + ax.xaxis.set_minor_locator(xminorLocator) + + yminorLocator = MultipleLocator(1_000_000) + ax.yaxis.set_minor_locator(yminorLocator) + + # plt.xticks(ax.get_xticks(), ax.get_xticklabels(which='both')) + # plt.setp(ax.get_xticklabels(which='both'), rotation=30, horizontalalignment='right') plt.legend(tuple(lgs), tuple(args.label), loc='upper left', shadow=True) plt.title('CKB Block Sync progress Chart') From 9c42f62f87d847af44345c4e878c10d80ee21ffd Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 30 Oct 2023 13:54:17 +0800 Subject: [PATCH 135/357] Add draft mermaid sequence diagram for develop branch --- docs/ckb_sync.mermaid | 50 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) create mode 100644 docs/ckb_sync.mermaid diff --git a/docs/ckb_sync.mermaid b/docs/ckb_sync.mermaid new file mode 100644 index 0000000000..7fa807f337 --- /dev/null +++ b/docs/ckb_sync.mermaid @@ -0,0 +1,50 @@ +sequenceDiagram + autonumber + + participant S as Synchronizer + participant BP as BlockProcess + participant C as ChainService + + + box crate:ckb_sync + participant S + participant BP + end + + + box crate:ckb_chain + participant C + end + + Note left of S: synchronizer received
Block(122) from remote peer + + Note over S: try_process SyncMessageUnionReader::SendBlock + + + S->>+BP: BlockProcess::execute(Block(122)) + BP->>+C: process_block(Block(122)) + Note over BP: waiting ChainService to return
the result of process_block(Block(123)) + Note over C: insert_block(Block(122)) + C->>-BP: return result of process_block(Block(122)) + BP->>-S: return result of BlockProcess::execute(Block(122)) + + alt block is Valid + Note over S: going on + else block is Invalid + Note over S: punish the malicious peer + end + + Note left of S: synchronizer received
Block(123) from remote peer + Note over S: try_process SyncMessageUnionReader::SendBlock + S->>+BP: BlockProcess::execute(Block(123)) + BP->>+C: process_block(Block(123)) + Note over BP: waiting ChainService to return
the result of process_block(Block(123)) + Note over C: insert_block(Block(123)) + C->>-BP: return result of process_block(Block(123)) + BP->>-S: return result of BlockProcess::execute(Block(123)) + + alt block is Valid + Note over S: going on + else block is Invalid + Note over S: punish the malicious peer + end From c1bab4361acaf74c82cf7fd7b2f0e87ffbdd3fc8 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 30 Oct 2023 13:54:37 +0800 Subject: [PATCH 136/357] Add draft mermaid sequence diagram for asynchronous block download --- docs/ckb_async_block_sync.mermaid | 75 +++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) create mode 100644 docs/ckb_async_block_sync.mermaid diff --git a/docs/ckb_async_block_sync.mermaid b/docs/ckb_async_block_sync.mermaid new file mode 100644 index 0000000000..bad6ef2efc --- /dev/null +++ b/docs/ckb_async_block_sync.mermaid @@ -0,0 +1,75 @@ +sequenceDiagram + autonumber + + participant Sr as Synchronizer::received + participant BP as BlockProcess + + participant Sp as Synchronizer::poll + + participant C as main thread + participant CO as OrphanBlockPool thread + participant CV as ConsumeUnverifiedBlocks thread + + box crate:ckb-sync + participant Sr + participant Sp + participant BP + end + + box crate:ckb-chain + participant C + participant CO + participant CV + end + + + + Note left of Sr: synchronizer received
Block(122) from remote peer + Note over Sr: try_process SyncMessageUnionReader::SendBlock + Sr->>+BP: BlockProcess::execute(Block(122)) + BP->>+C: asynchronous_process_block(Block(122)) + Note over C: non_contextual_verify(Block(122)) + C->>+CO: send Block(122) to OrphanBlockPool via channel + C->>-BP: return + BP->>-Sr: return + + Note over CO: insert Block(122) to OrphanBlockPool + + Note left of Sr: synchronizer received
Block(123) from remote peer + Note over Sr: try_process SyncMessageUnionReader::SendBlock + Sr->>+BP: BlockProcess::execute(Block(123)) + BP->>+C: asynchronous_process_block(Block(123)) + Note over C: non_contextual_verify(Block(123)) + C->>+CO: send Block(123) to OrphanBlockPool via channel + C->>-BP: return + BP->>-Sr: return + + Note over CO: insert Block(123) to OrphanBlockPool + + loop Search Orphan Pool + Note over CO: if a leader block have descendants + Note over CO: load all descendants from OrphanBlockPool + Note over CO: assume these descendants are valid, let BlockExt.verified = None + Note over CO: insert them to RocksDB + Note over CO: Increase Unverified TIP + CO->>+CV: send the UnverifiedBlock to ConsumeUnverifiedBlocks via channel + end + + loop Consume Unverified Blocks + Note over CV: start verify UnverifiedBlock if the channel is not empty + + Note over CV: Verify Block in CKB VM + + + alt Block is Valid + Note over CV: remove Block block_status and HeaderMap + else Block is Invalid + Note over CV: Decrease Unverified TIP + CV->>Sp: I received a Invalid Block, please punish the malicious peer + Note over Sp: call nc.ban_peer() to punish the malicious peer + end + opt Execute Callback + Note over CV: callback: Box) + Send + Sync> + + end + end From e9aeb2421b087e514e8e20704b2400e7cc8722f6 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 30 Oct 2023 16:18:00 +0800 Subject: [PATCH 137/357] Add unverified block info to sync_state rpc --- rpc/src/module/net.rs | 5 +++++ util/jsonrpc-types/src/net.rs | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/rpc/src/module/net.rs b/rpc/src/module/net.rs index 72d6b762ee..03433084ee 100644 --- a/rpc/src/module/net.rs +++ b/rpc/src/module/net.rs @@ -8,6 +8,7 @@ use ckb_jsonrpc_types::{ use ckb_network::{extract_peer_id, multiaddr::Multiaddr, NetworkController}; use ckb_sync::SyncShared; use ckb_systemtime::unix_time_as_millis; +use ckb_types::prelude::Unpack; use jsonrpc_core::Result; use jsonrpc_utils::rpc; use std::sync::Arc; @@ -717,9 +718,11 @@ impl NetRpc for NetRpcImpl { fn sync_state(&self) -> Result { let chain = self.sync_shared.active_chain(); + let shared = chain.shared().shared(); let state = chain.shared().state(); let (fast_time, normal_time, low_time) = state.read_inflight_blocks().division_point(); let best_known = state.shared_best_header(); + let unverified_tip = shared.get_unverified_tip(); let sync_state = SyncState { ibd: chain.is_initial_block_download(), best_known_block_number: best_known.number().into(), @@ -727,6 +730,8 @@ impl NetRpc for NetRpcImpl { orphan_blocks_count: (self.chain_controller.orphan_blocks_len() as u64).into(), inflight_blocks_count: (state.read_inflight_blocks().total_inflight_count() as u64) .into(), + unverified_tip_number: unverified_tip.number().into(), + unverified_tip_hash: unverified_tip.hash().unpack(), fast_time: fast_time.into(), normal_time: normal_time.into(), low_time: low_time.into(), diff --git a/util/jsonrpc-types/src/net.rs b/util/jsonrpc-types/src/net.rs index 09a83e90b9..f5fc8861c2 100644 --- a/util/jsonrpc-types/src/net.rs +++ b/util/jsonrpc-types/src/net.rs @@ -1,4 +1,5 @@ use crate::{BlockNumber, Byte32, Timestamp, Uint64}; +use ckb_types::H256; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; @@ -278,6 +279,10 @@ pub struct SyncState { pub orphan_blocks_count: Uint64, /// Count of downloading blocks. pub inflight_blocks_count: Uint64, + /// The block number of current unverified tip block + pub unverified_tip_number: BlockNumber, + /// The block hash of current unverified tip block + pub unverified_tip_hash: H256, /// The download scheduler's time analysis data, the fast is the 1/3 of the cut-off point, unit ms pub fast_time: Uint64, /// The download scheduler's time analysis data, the normal is the 4/5 of the cut-off point, unit ms From ed87710d7646498c02317d10f486167aacded0ab Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 30 Oct 2023 16:18:37 +0800 Subject: [PATCH 138/357] Upgrade Synchronizer disconnect log level from debug to info --- sync/src/synchronizer/mod.rs | 1 + sync/src/types/mod.rs | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index be77696075..b7e8cfab08 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -839,6 +839,7 @@ impl CKBProtocolHandler for Synchronizer { ) { let sync_state = self.shared().state(); sync_state.disconnected(peer_index); + info!("SyncProtocol.disconnected peer={}", peer_index); } async fn notify(&mut self, nc: Arc, token: u64) { diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index aefb69d11c..c441b9ceed 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1728,7 +1728,6 @@ impl SyncState { pub fn disconnected(&self, pi: PeerIndex) { self.write_inflight_blocks().remove_by_peer(pi); self.peers().disconnected(pi); - debug!("peer {} disconnected", pi); } // pub fn get_orphan_block(&self, block_hash: &Byte32) -> Option { From a811b3c9109906bb2e739370e5d4dfd99d5bfdc7 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 31 Oct 2023 11:04:24 +0800 Subject: [PATCH 139/357] Add tip_hash and tip_number to sync_state rpc --- rpc/src/module/net.rs | 2 ++ util/jsonrpc-types/src/net.rs | 3 +++ 2 files changed, 5 insertions(+) diff --git a/rpc/src/module/net.rs b/rpc/src/module/net.rs index 03433084ee..62e4ef0c34 100644 --- a/rpc/src/module/net.rs +++ b/rpc/src/module/net.rs @@ -732,6 +732,8 @@ impl NetRpc for NetRpcImpl { .into(), unverified_tip_number: unverified_tip.number().into(), unverified_tip_hash: unverified_tip.hash().unpack(), + tip_number: chain.tip_number().into(), + tip_hash: chain.tip_hash().unpack(), fast_time: fast_time.into(), normal_time: normal_time.into(), low_time: low_time.into(), diff --git a/util/jsonrpc-types/src/net.rs b/util/jsonrpc-types/src/net.rs index f5fc8861c2..9c01b41cbb 100644 --- a/util/jsonrpc-types/src/net.rs +++ b/util/jsonrpc-types/src/net.rs @@ -283,6 +283,9 @@ pub struct SyncState { pub unverified_tip_number: BlockNumber, /// The block hash of current unverified tip block pub unverified_tip_hash: H256, + + pub tip_number: BlockNumber, + pub tip_hash: H256, /// The download scheduler's time analysis data, the fast is the 1/3 of the cut-off point, unit ms pub fast_time: Uint64, /// The download scheduler's time analysis data, the normal is the 4/5 of the cut-off point, unit ms From 34f9c78aa1dd28e60cc26e6130a1f9df71c7e269 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 31 Oct 2023 14:50:50 +0800 Subject: [PATCH 140/357] Set receive exit signal log from debug to info --- Cargo.lock | 2 -- shared/src/types/header_map/mod.rs | 2 +- tx-pool/src/chunk_process.rs | 2 +- util/stop-handler/src/stop_register.rs | 2 +- 4 files changed, 3 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4328fde156..1632215963 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -981,7 +981,6 @@ dependencies = [ "ckb-sync", "ckb-tx-pool", "ckb-types", - "ckb-util", "ckb-verification", "ckb-verification-traits", ] @@ -1444,7 +1443,6 @@ dependencies = [ "arc-swap", "bitflags 1.3.2", "ckb-app-config", - "bitflags 1.3.2", "ckb-async-runtime", "ckb-chain-spec", "ckb-channel", diff --git a/shared/src/types/header_map/mod.rs b/shared/src/types/header_map/mod.rs index e764755ea6..40554afb34 100644 --- a/shared/src/types/header_map/mod.rs +++ b/shared/src/types/header_map/mod.rs @@ -1,5 +1,5 @@ use ckb_async_runtime::Handle; -use ckb_logger::info; +use ckb_logger::{debug, info}; use ckb_stop_handler::{new_tokio_exit_rx, CancellationToken}; use ckb_types::packed::Byte32; use std::sync::Arc; diff --git a/tx-pool/src/chunk_process.rs b/tx-pool/src/chunk_process.rs index 0d9b03f2f3..5dd48ddba6 100644 --- a/tx-pool/src/chunk_process.rs +++ b/tx-pool/src/chunk_process.rs @@ -4,7 +4,7 @@ use crate::try_or_return_with_snapshot; use crate::{error::Reject, service::TxPoolService}; use ckb_chain_spec::consensus::Consensus; use ckb_error::Error; -use ckb_logger::info; +use ckb_logger::{debug, info}; use ckb_snapshot::Snapshot; use ckb_store::data_loader_wrapper::AsDataLoader; use ckb_traits::{CellDataProvider, ExtensionProvider, HeaderProvider}; diff --git a/util/stop-handler/src/stop_register.rs b/util/stop-handler/src/stop_register.rs index c9146332dc..73b3efbe1d 100644 --- a/util/stop-handler/src/stop_register.rs +++ b/util/stop-handler/src/stop_register.rs @@ -25,7 +25,7 @@ pub fn wait_all_ckb_services_exit() { } } } - debug!("All ckb threads have been stopped."); + info!("All ckb threads have been stopped"); } static CKB_HANDLES: once_cell::sync::Lazy> = From 21dd012ec0f33ecbe19f312129c4ff8a808ab37c Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 31 Oct 2023 14:59:59 +0800 Subject: [PATCH 141/357] Let HeaderMap stats feature use info log --- shared/src/types/header_map/kernel_lru.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/shared/src/types/header_map/kernel_lru.rs b/shared/src/types/header_map/kernel_lru.rs index f9d5eba2c7..7471128513 100644 --- a/shared/src/types/header_map/kernel_lru.rs +++ b/shared/src/types/header_map/kernel_lru.rs @@ -1,7 +1,7 @@ use std::path; #[cfg(feature = "stats")] -use ckb_logger::trace; +use ckb_logger::info; #[cfg(feature = "stats")] use ckb_util::{Mutex, MutexGuard}; @@ -153,7 +153,7 @@ where let progress = stats.trace_progress(); let frequency = stats.frequency(); if progress % frequency == 0 { - trace!( + info!( "Header Map Statistics\ \n>\t| storage | length | limit | contain | select | insert | delete |\ \n>\t|---------+---------+---------+---------+------------+---------+---------|\ From 0143bc367fcaf08a2b5ebb4619e303ef14cbe899 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 31 Oct 2023 15:55:46 +0800 Subject: [PATCH 142/357] Activate HeaderMap stats profiling feature in `make profiling` Signed-off-by: Eval EXEC --- Makefile | 10 +++++----- ckb-bin/Cargo.toml | 2 +- shared/Cargo.toml | 1 + 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/Makefile b/Makefile index 873e19a250..c419d05bcf 100644 --- a/Makefile +++ b/Makefile @@ -121,13 +121,13 @@ check: setup-ckb-test ## Runs all of the compiler's checks. build: ## Build binary with release profile. cargo build ${VERBOSE} --release -.PHONY: build-for-profiling-without-debug-symbols -build-for-profiling-without-debug-symbols: ## Build binary with for profiling without debug symbols. - JEMALLOC_SYS_WITH_MALLOC_CONF="prof:true" cargo build ${VERBOSE} --release --features "profiling" +.PHONY: profiling +profiling: ## Build binary with for profiling without debug symbols. + JEMALLOC_SYS_WITH_MALLOC_CONF="prof:true" cargo build ${VERBOSE} --profile prod --features "with_sentry,with_dns_seeding,profiling" -.PHONY: build-for-profiling +.PHONY: profiling-with-debug-symbols build-for-profiling: ## Build binary with for profiling. - devtools/release/make-with-debug-symbols build-for-profiling-without-debug-symbols + devtools/release/make-with-debug-symbols profilling .PHONY: prod prod: ## Build binary for production release. diff --git a/ckb-bin/Cargo.toml b/ckb-bin/Cargo.toml index 40fbb2cb5f..5be1804a40 100644 --- a/ckb-bin/Cargo.toml +++ b/ckb-bin/Cargo.toml @@ -54,7 +54,7 @@ colored = "2.0" [features] deadlock_detection = ["ckb-util/deadlock_detection"] -profiling = ["ckb-memory-tracker/profiling"] +profiling = ["ckb-memory-tracker/profiling", "ckb-shared/stats"] with_sentry = ["sentry", "ckb-launcher/with_sentry", "ckb-network/with_sentry", "ckb-app-config/with_sentry", "ckb-logger-service/with_sentry"] with_dns_seeding = ["ckb-network/with_dns_seeding"] portable = ["ckb-launcher/portable"] diff --git a/shared/Cargo.toml b/shared/Cargo.toml index bc0986bd80..44816cfff1 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -44,3 +44,4 @@ ckb-systemtime = { path = "../util/systemtime", version = "= 0.114.0-pre", featu [features] portable = ["ckb-db/portable", "ckb-store/portable", "ckb-tx-pool/portable", "ckb-migrate/portable"] march-native = ["ckb-db/march-native", "ckb-store/march-native", "ckb-tx-pool/march-native", "ckb-migrate/march-native"] +stats = [] From 98c77ae630946e6b0850fd360fc5c8e81d2ced54 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 31 Oct 2023 18:19:09 +0800 Subject: [PATCH 143/357] Return removed inflight blocks count when disconnect Signed-off-by: Eval EXEC --- sync/src/types/mod.rs | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index c441b9ceed..e7efb0e1c2 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -762,21 +762,23 @@ impl InflightBlocks { download_scheduler.hashes.insert(block) } - pub fn remove_by_peer(&mut self, peer: PeerIndex) -> bool { + pub fn remove_by_peer(&mut self, peer: PeerIndex) -> usize { let trace = &mut self.trace_number; let state = &mut self.inflight_states; self.download_schedulers .remove(&peer) .map(|blocks| { + let blocks_count = blocks.hashes.iter().len(); for block in blocks.hashes { state.remove(&block); if !trace.is_empty() { trace.remove(&block); } } + blocks_count }) - .is_some() + .unwrap_or_default() } pub fn remove_by_block(&mut self, block: BlockNumberAndHash) -> bool { @@ -1726,7 +1728,13 @@ impl SyncState { // TODO: record peer's connection duration (disconnect time - connect established time) // and report peer's connection duration to ckb_metrics pub fn disconnected(&self, pi: PeerIndex) { - self.write_inflight_blocks().remove_by_peer(pi); + let removed_inflight_blocks_count = self.write_inflight_blocks().remove_by_peer(pi); + if removed_inflight_blocks_count > 0 { + debug!( + "disconnected {}, remove {} inflight blocks", + pi, removed_inflight_blocks_count + ) + } self.peers().disconnected(pi); } From 9a63f2a4a6ad1ea288761b0be8b4ccde6e8d61fb Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 1 Nov 2023 20:00:54 +0800 Subject: [PATCH 144/357] Remove log message time cost unit --- sync/src/synchronizer/block_fetcher.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sync/src/synchronizer/block_fetcher.rs b/sync/src/synchronizer/block_fetcher.rs index 3c405fd373..3f2646d200 100644 --- a/sync/src/synchronizer/block_fetcher.rs +++ b/sync/src/synchronizer/block_fetcher.rs @@ -229,14 +229,14 @@ impl BlockFetcher { if fetch.is_empty() { debug!( "[block fetch empty] peer-{}, fixed_last_common_header = {} \ - best_known_header = {}, [tip/unverified_tip]: [{}/{}], inflight_len = {}, time_cost: {}ms", + best_known_header = {}, [tip/unverified_tip]: [{}/{}], inflight_len = {}, time_cost: {:?}", self.peer, last_common.number(), best_known.number(), tip, unverified_tip, state.read_inflight_blocks().total_inflight_count(), - trace_timecost_now.elapsed().as_millis(), + trace_timecost_now.elapsed(), ); trace!( "[block fetch empty] peer-{}, inflight_state = {:?}", @@ -249,7 +249,7 @@ impl BlockFetcher { let inflight_peer_count = state.read_inflight_blocks().peer_inflight_count(self.peer); let inflight_total_count = state.read_inflight_blocks().total_inflight_count(); debug!( - "request peer-{} for batch blocks: [{}-{}], batch len:{}, [tip/unverified_tip]: [{}/{}], [peer/total inflight count]: [{} / {}], timecost: {}ms, blocks: {}", + "request peer-{} for batch blocks: [{}-{}], batch len:{}, [tip/unverified_tip]: [{}/{}], [peer/total inflight count]: [{} / {}], timecost: {:?}, blocks: {}", self.peer, fetch_head, fetch_last, @@ -258,7 +258,7 @@ impl BlockFetcher { self.sync_shared.shared().get_unverified_tip().number(), inflight_peer_count, inflight_total_count, - trace_timecost_now.elapsed().as_millis(), + trace_timecost_now.elapsed(), fetch.iter().map(|h| h.number().to_string()).collect::>().join(","), ); } From 739d19cc45940b85096fc4005144327e48508f64 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Sat, 4 Nov 2023 18:57:45 +0800 Subject: [PATCH 145/357] Move ChainService proposal_table to the param for start method --- chain/src/chain.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 49001ab432..ba3e79ffa0 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -234,7 +234,6 @@ impl GlobalIndex { #[derive(Clone)] pub struct ChainService { shared: Shared, - proposal_table: Arc>, orphan_blocks_broker: Arc, @@ -323,19 +322,21 @@ impl ChainService { /// Create a new ChainService instance with shared and initial proposal_table. pub fn new( shared: Shared, - proposal_table: ProposalTable, verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, ) -> ChainService { ChainService { shared, - proposal_table: Arc::new(Mutex::new(proposal_table)), orphan_blocks_broker: Arc::new(OrphanBlockPool::with_capacity(ORPHAN_BLOCK_SIZE)), verify_failed_blocks_tx, } } /// start background single-threaded service with specified thread_name. - pub fn start(mut self, thread_name: Option) -> ChainController { + pub fn start( + mut self, + proposal_table: ProposalTable, + thread_name: Option, + ) -> ChainController { let orphan_blocks_broker_clone = Arc::clone(&self.orphan_blocks_broker); let signal_receiver = new_crossbeam_exit_rx(); From fd8724c7d570ee318948984a9b4056d9fe00ac3c Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Sun, 5 Nov 2023 04:30:32 +0800 Subject: [PATCH 146/357] Remove proposal_table's RWLock --- chain/src/chain.rs | 74 ++++++++++++++++++++++++++++------------------ 1 file changed, 46 insertions(+), 28 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index ba3e79ffa0..4dc5159762 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -363,8 +363,11 @@ impl ChainService { .spawn({ let chain_service = self.clone(); move || { - chain_service - .start_consume_unverified_blocks(unverified_queue_stop_rx, unverified_rx) + chain_service.start_consume_unverified_blocks( + &mut proposal_table, + unverified_queue_stop_rx, + unverified_rx, + ) } }) .expect("start unverified_queue consumer thread should ok"); @@ -409,7 +412,9 @@ impl ChainService { recv(truncate_receiver) -> msg => match msg { Ok(Request { responder, arguments: target_tip_hash }) => { let _ = tx_control.suspend_chunk_process(); - let _ = responder.send(self.truncate(&target_tip_hash)); + let _ = responder.send(self.truncate( + &mut proposal_table, + &target_tip_hash)); let _ = tx_control.continue_chunk_process(); }, _ => { @@ -441,6 +446,7 @@ impl ChainService { fn start_consume_unverified_blocks( &self, + proposal_table: &mut ProposalTable, unverified_queue_stop_rx: Receiver<()>, unverified_block_rx: Receiver, ) { @@ -456,7 +462,7 @@ impl ChainService { Ok(unverified_task) => { // process this unverified block trace!("got an unverified block, wait cost: {:?}", begin_loop.elapsed()); - self.consume_unverified_blocks(unverified_task); + self.consume_unverified_blocks(proposal_table, unverified_task); trace!("consume_unverified_blocks cost: {:?}", begin_loop.elapsed()); }, Err(err) => { @@ -469,9 +475,13 @@ impl ChainService { } } - fn consume_unverified_blocks(&self, unverified_block: UnverifiedBlock) { + fn consume_unverified_blocks( + &self, + proposal_table: &mut ProposalTable, + unverified_block: UnverifiedBlock, + ) { // process this unverified block - let verify_result = self.verify_block(&unverified_block); + let verify_result = self.verify_block(proposal_table, &unverified_block); match &verify_result { Ok(_) => { let log_now = std::time::Instant::now(); @@ -688,7 +698,11 @@ impl ChainService { // Truncate the main chain // Use for testing only - pub(crate) fn truncate(&mut self, target_tip_hash: &Byte32) -> Result<(), Error> { + pub(crate) fn truncate( + &mut self, + proposal_table: &mut ProposalTable, + target_tip_hash: &Byte32, + ) -> Result<(), Error> { let snapshot = Arc::clone(&self.shared.snapshot()); assert!(snapshot.is_main_chain(target_tip_hash)); @@ -712,11 +726,9 @@ impl ChainService { } db_txn.commit()?; - self.update_proposal_table(&fork); - let (detached_proposal_id, new_proposals) = self - .proposal_table - .lock() - .finalize(origin_proposals, target_tip_header.number()); + self.update_proposal_table(&fork, proposal_table); + let (detached_proposal_id, new_proposals) = + proposal_table.finalize(origin_proposals, target_tip_header.number()); fork.detached_proposal_id = detached_proposal_id; let new_snapshot = self.shared.new_snapshot( @@ -915,7 +927,11 @@ impl ChainService { Ok(Some((parent_header, cannon_total_difficulty))) } - fn verify_block(&self, unverified_block: &UnverifiedBlock) -> VerifyResult { + fn verify_block( + &self, + proposal_table: &mut ProposalTable, + unverified_block: &UnverifiedBlock, + ) -> VerifyResult { let UnverifiedBlock { unverified_block: LonelyBlockWithCallback { @@ -1056,11 +1072,9 @@ impl ChainService { block.transactions().len() ); - self.update_proposal_table(&fork); - let (detached_proposal_id, new_proposals) = self - .proposal_table - .lock() - .finalize(origin_proposals, tip_header.number()); + self.update_proposal_table(&fork, proposal_table); + let (detached_proposal_id, new_proposals) = + proposal_table.finalize(origin_proposals, tip_header.number()); fork.detached_proposal_id = detached_proposal_id; let new_snapshot = @@ -1115,20 +1129,26 @@ impl ChainService { } } - pub(crate) fn update_proposal_table(&self, fork: &ForkChanges) { + pub(crate) fn update_proposal_table( + &self, + fork: &ForkChanges, + proposal_table: &mut ProposalTable, + ) { for blk in fork.detached_blocks() { - self.proposal_table.lock().remove(blk.header().number()); + proposal_table.remove(blk.header().number()); } for blk in fork.attached_blocks() { - self.proposal_table - .lock() - .insert(blk.header().number(), blk.union_proposal_ids()); + proposal_table.insert(blk.header().number(), blk.union_proposal_ids()); } - self.reload_proposal_table(fork); + self.reload_proposal_table(fork, proposal_table); } // if rollback happen, go back check whether need reload proposal_table from block - pub(crate) fn reload_proposal_table(&self, fork: &ForkChanges) { + pub(crate) fn reload_proposal_table( + &self, + fork: &ForkChanges, + proposal_table: &mut ProposalTable, + ) { if fork.has_detached() { let proposal_window = self.shared.consensus().tx_proposal_window(); let detached_front = fork @@ -1158,9 +1178,7 @@ impl ChainService { .and_then(|hash| self.shared.store().get_block(&hash)) .expect("block stored"); - self.proposal_table - .lock() - .insert(bn, blk.union_proposal_ids()); + proposal_table.insert(bn, blk.union_proposal_ids()); } } } From 339d762482f94a1cf55cbb9c2769a6ac84196e24 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 13 Nov 2023 11:43:28 +0800 Subject: [PATCH 147/357] Add VerifiedBlockStatus::PreviouslyVerified --- chain/src/chain.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 4dc5159762..9cd234f58b 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -57,10 +57,15 @@ pub type VerifyCallback = Box; pub enum VerifiedBlockStatus { // The block is being seen for the first time. FirstSeenAndVerified, + + // The block is being seen for the first time, but not verify it yet FirstSeenButNotVerified, // The block has been verified before. PreviouslyVerified, + + // The block has been verified before, but not veriify it yet + PreviouslyUnVerified, } /// Controller to the chain service. @@ -334,7 +339,7 @@ impl ChainService { /// start background single-threaded service with specified thread_name. pub fn start( mut self, - proposal_table: ProposalTable, + mut proposal_table: ProposalTable, thread_name: Option, ) -> ChainController { let orphan_blocks_broker_clone = Arc::clone(&self.orphan_blocks_broker); @@ -670,6 +675,9 @@ impl ChainService { "doesn't accept block {}, because it has been stored", descendant_block.block().hash() ); + let verify_result: VerifyResult = + Ok(VerifiedBlockStatus::PreviouslyUnVerified); + descendant_block.execute_callback(verify_result); } }, } From 66ff7979a761b78cc5106cf2d6f47dc5623859e5 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 13 Nov 2023 11:57:37 +0800 Subject: [PATCH 148/357] Will move truncate process to consume_unverified_blocks --- chain/src/chain.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 9cd234f58b..5bd147b982 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -417,9 +417,10 @@ impl ChainService { recv(truncate_receiver) -> msg => match msg { Ok(Request { responder, arguments: target_tip_hash }) => { let _ = tx_control.suspend_chunk_process(); - let _ = responder.send(self.truncate( - &mut proposal_table, - &target_tip_hash)); + todo!("move truncate process to consume unverified_block"); + // let _ = responder.send(self.truncate( + // &mut proposal_table, + // &target_tip_hash)); let _ = tx_control.continue_chunk_process(); }, _ => { From d95efc0e915ad51045adc476ef1985f05220d19e Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 13 Nov 2023 15:31:43 +0800 Subject: [PATCH 149/357] Extract consume_orphan_blocks from ChainService --- chain/src/consume_orphan.rs | 276 ++++++++++++++++++++++++++++++++++++ 1 file changed, 276 insertions(+) create mode 100644 chain/src/consume_orphan.rs diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs new file mode 100644 index 0000000000..42390a2a80 --- /dev/null +++ b/chain/src/consume_orphan.rs @@ -0,0 +1,276 @@ +use crate::orphan_block_pool::OrphanBlockPool; +use crate::{ + tell_synchronizer_to_punish_the_bad_peer, LonelyBlockWithCallback, UnverifiedBlock, + VerifiedBlockStatus, VerifyResult, +}; +use ckb_channel::{select, Receiver, SendError, Sender}; +use ckb_error::{Error, InternalErrorKind}; +use ckb_logger::internal::trace; +use ckb_logger::{debug, error, info}; +use ckb_shared::block_status::BlockStatus; +use ckb_shared::types::VerifyFailedBlockInfo; +use ckb_shared::Shared; +use ckb_store::ChainStore; +use ckb_systemtime::unix_time_as_millis; +use ckb_types::core::{BlockExt, BlockView, HeaderView}; +use ckb_types::U256; +use ckb_verification::InvalidParentError; +use std::sync::Arc; + +pub(crate) struct ConsumeOrphan { + shared: Shared, + orphan_blocks_broker: Arc, + lonely_blocks_rx: Receiver, + unverified_blocks_tx: Sender, + + verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, + + stop_rx: Receiver<()>, +} + +impl ConsumeOrphan { + pub(crate) fn new( + shared: Shared, + orphan_block_pool: Arc, + unverified_blocks_tx: Sender, + lonely_blocks_rx: Receiver, + + verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, + stop_rx: Receiver<()>, + ) -> ConsumeOrphan { + ConsumeOrphan { + shared, + orphan_blocks_broker: orphan_block_pool, + lonely_blocks_rx, + unverified_blocks_tx, + verify_failed_blocks_tx, + stop_rx, + } + } + + pub(crate) fn start(&self) { + loop { + select! { + recv(self.stop_rx) -> _ => { + info!("unverified_queue_consumer got exit signal, exit now"); + return; + }, + recv(self.lonely_blocks_rx) -> msg => match msg { + Ok(lonely_block) => { + self.orphan_blocks_broker.insert(lonely_block); + self.search_orphan_pool(&self.unverified_blocks_tx) + }, + Err(err) => { + error!("lonely_block_rx err: {}", err); + return + } + }, + } + } + } + fn search_orphan_pool(&self, unverified_block_tx: &Sender) { + for leader_hash in self.orphan_blocks_broker.clone_leaders() { + if !self + .shared + .contains_block_status(&leader_hash, BlockStatus::BLOCK_PARTIAL_STORED) + { + trace!("orphan leader: {} not partial stored", leader_hash); + continue; + } + + let descendants: Vec = self + .orphan_blocks_broker + .remove_blocks_by_parent(&leader_hash); + if descendants.is_empty() { + error!( + "leader {} does not have any descendants, this shouldn't happen", + leader_hash + ); + continue; + } + let descendants_len = descendants.len(); + let (first_descendants_number, last_descendants_number) = ( + descendants + .first() + .expect("descdant not empty") + .block() + .number(), + descendants + .last() + .expect("descdant not empty") + .block() + .number(), + ); + + let mut accept_error_occurred = false; + for descendant_block in descendants { + match self.accept_block(descendant_block.block().to_owned()) { + Err(err) => { + tell_synchronizer_to_punish_the_bad_peer( + self.verify_failed_blocks_tx.clone(), + &descendant_block, + &err, + ); + + accept_error_occurred = true; + error!( + "accept block {} failed: {}", + descendant_block.block().hash(), + err + ); + + descendant_block.execute_callback(Err(err)); + continue; + } + Ok(accepted_opt) => match accepted_opt { + Some((parent_header, total_difficulty)) => { + let unverified_block: UnverifiedBlock = + descendant_block.combine_parent_header(parent_header); + let block_number = unverified_block.block().number(); + let block_hash = unverified_block.block().hash(); + + match unverified_block_tx.send(unverified_block) { + Ok(_) => {} + Err(SendError(unverified_block)) => { + error!("send unverified_block_tx failed, the receiver has been closed"); + let err: Error = InternalErrorKind::System + .other(format!("send unverified_block_tx failed, the receiver have been close")).into(); + + tell_synchronizer_to_punish_the_bad_peer( + self.verify_failed_blocks_tx.clone(), + &unverified_block.unverified_block, + &err, + ); + + let verify_result: VerifyResult = Err(err); + unverified_block.execute_callback(verify_result); + continue; + } + }; + + if total_difficulty + .gt(self.shared.get_unverified_tip().total_difficulty()) + { + self.shared.set_unverified_tip(ckb_shared::HeaderIndex::new( + block_number.clone(), + block_hash.clone(), + total_difficulty, + )); + debug!("set unverified_tip to {}-{}, while unverified_tip - verified_tip = {}", + block_number.clone(), + block_hash.clone(), + block_number.saturating_sub(self.shared.snapshot().tip_number())) + } else { + debug!("received a block {}-{} with lower or equal difficulty than unverified_tip {}-{}", + block_number, + block_hash, + self.shared.get_unverified_tip().number(), + self.shared.get_unverified_tip().hash(), + ); + } + } + None => { + info!( + "doesn't accept block {}, because it has been stored", + descendant_block.block().hash() + ); + let verify_result: VerifyResult = + Ok(VerifiedBlockStatus::PreviouslyUnVerified); + descendant_block.execute_callback(verify_result); + } + }, + } + } + + if !accept_error_occurred { + debug!( + "accept {} blocks [{}->{}] success", + descendants_len, first_descendants_number, last_descendants_number + ) + } + } + } + + fn accept_block(&self, block: Arc) -> Result, Error> { + let (block_number, block_hash) = (block.number(), block.hash()); + + if self + .shared + .contains_block_status(&block_hash, BlockStatus::BLOCK_PARTIAL_STORED) + { + debug!("block {}-{} has been stored", block_number, block_hash); + return Ok(None); + } + + let parent_header = self + .shared + .store() + .get_block_header(&block.data().header().raw().parent_hash()) + .expect("parent already store"); + + if let Some(ext) = self.shared.store().get_block_ext(&block.hash()) { + debug!("block {}-{} has stored BlockExt", block_number, block_hash); + return Ok(Some((parent_header, ext.total_difficulty))); + } + + trace!("begin accept block: {}-{}", block.number(), block.hash()); + + let parent_ext = self + .shared + .store() + .get_block_ext(&block.data().header().raw().parent_hash()) + .expect("parent already store"); + + if parent_ext.verified == Some(false) { + return Err(InvalidParentError { + parent_hash: parent_header.hash(), + } + .into()); + } + + let cannon_total_difficulty = + parent_ext.total_difficulty.to_owned() + block.header().difficulty(); + + let db_txn = Arc::new(self.shared.store().begin_transaction()); + + let txn_snapshot = db_txn.get_snapshot(); + let _snapshot_block_ext = db_txn.get_update_for_block_ext(&block.hash(), &txn_snapshot); + + db_txn.insert_block(block.as_ref())?; + + let next_block_epoch = self + .shared + .consensus() + .next_epoch_ext(&parent_header, &db_txn.borrow_as_data_loader()) + .expect("epoch should be stored"); + let new_epoch = next_block_epoch.is_head(); + let epoch = next_block_epoch.epoch(); + + db_txn.insert_block_epoch_index( + &block.header().hash(), + &epoch.last_block_hash_in_previous_epoch(), + )?; + if new_epoch { + db_txn.insert_epoch_ext(&epoch.last_block_hash_in_previous_epoch(), &epoch)?; + } + + let ext = BlockExt { + received_at: unix_time_as_millis(), + total_difficulty: cannon_total_difficulty.clone(), + total_uncles_count: parent_ext.total_uncles_count + block.data().uncles().len() as u64, + verified: None, + txs_fees: vec![], + cycles: None, + txs_sizes: None, + }; + + db_txn.insert_block_ext(&block.header().hash(), &ext)?; + + db_txn.commit()?; + + self.shared + .insert_block_status(block_hash, BlockStatus::BLOCK_PARTIAL_STORED); + + Ok(Some((parent_header, cannon_total_difficulty))) + } +} From 9d3b643430c9767bf49405c0b5f77a455d0e48c1 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 13 Nov 2023 15:32:01 +0800 Subject: [PATCH 150/357] Extract consume_unverified_blocks from ChainService --- chain/src/consume_unverified.rs | 849 ++++++++++++++++++++++++++++++++ 1 file changed, 849 insertions(+) create mode 100644 chain/src/consume_unverified.rs diff --git a/chain/src/consume_unverified.rs b/chain/src/consume_unverified.rs new file mode 100644 index 0000000000..c36c1928d1 --- /dev/null +++ b/chain/src/consume_unverified.rs @@ -0,0 +1,849 @@ +use crate::forkchanges::ForkChanges; +use crate::{ + tell_synchronizer_to_punish_the_bad_peer, GlobalIndex, LonelyBlock, LonelyBlockWithCallback, + UnverifiedBlock, VerifiedBlockStatus, VerifyResult, +}; +use ckb_channel::{select, Receiver}; +use ckb_error::{Error, InternalErrorKind}; +use ckb_logger::internal::{log_enabled, trace}; +use ckb_logger::Level::Trace; +use ckb_logger::{debug, error, info, log_enabled_target, trace_target}; +use ckb_merkle_mountain_range::leaf_index_to_mmr_size; +use ckb_proposal_table::ProposalTable; +use ckb_shared::block_status::BlockStatus; +use ckb_shared::types::VerifyFailedBlockInfo; +use ckb_shared::Shared; +use ckb_store::{attach_block_cell, detach_block_cell, ChainStore, StoreTransaction}; +use ckb_systemtime::unix_time_as_millis; +use ckb_types::core::cell::{ + resolve_transaction, BlockCellProvider, HeaderChecker, OverlayCellProvider, ResolvedTransaction, +}; +use ckb_types::core::{BlockExt, BlockNumber, BlockView, Cycle, HeaderView}; +use ckb_types::packed::Byte32; +use ckb_types::utilities::merkle_mountain_range::ChainRootMMR; +use ckb_types::H256; +use ckb_verification::cache::Completed; +use ckb_verification::InvalidParentError; +use ckb_verification_contextual::{ContextualBlockVerifier, VerifyContext}; +use ckb_verification_traits::Switch; +use std::cmp; +use std::collections::HashSet; +use std::sync::Arc; + +pub(crate) struct ConsumeUnverifiedBlocks { + shared: Shared, + unverified_block_rx: Receiver, + proposal_table: ProposalTable, + + verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, + + stop_rx: Receiver<()>, +} + +impl ConsumeUnverifiedBlocks { + pub(crate) fn new( + shared: Shared, + unverified_blocks_rx: Receiver, + proposal_table: ProposalTable, + verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, + stop_rx: Receiver<()>, + ) -> Self { + ConsumeUnverifiedBlocks { + shared, + unverified_block_rx: unverified_blocks_rx, + proposal_table, + + verify_failed_blocks_tx, + stop_rx, + } + } + pub(crate) fn start(mut self) { + let mut begin_loop = std::time::Instant::now(); + loop { + begin_loop = std::time::Instant::now(); + select! { + recv(self.stop_rx) -> _ => { + info!("unverified_queue_consumer got exit signal, exit now"); + return; + }, + recv(self.unverified_block_rx) -> msg => match msg { + Ok(unverified_task) => { + // process this unverified block + trace!("got an unverified block, wait cost: {:?}", begin_loop.elapsed()); + self.consume_unverified_blocks(unverified_task); + trace!("consume_unverified_blocks cost: {:?}", begin_loop.elapsed()); + }, + Err(err) => { + error!("unverified_block_rx err: {}", err); + return; + }, + }, + default => {}, + } + } + } + + fn consume_unverified_blocks(&mut self, unverified_block: UnverifiedBlock) { + // process this unverified block + let verify_result = self.verify_block(&unverified_block); + match &verify_result { + Ok(_) => { + let log_now = std::time::Instant::now(); + self.shared + .remove_block_status(&unverified_block.block().hash()); + let log_elapsed_remove_block_status = log_now.elapsed(); + self.shared + .remove_header_view(&unverified_block.block().hash()); + debug!( + "block {} remove_block_status cost: {:?}, and header_view cost: {:?}", + unverified_block.block().hash(), + log_elapsed_remove_block_status, + log_now.elapsed() + ); + } + Err(err) => { + error!( + "verify [{:?}]'s block {} failed: {}", + unverified_block.peer_id(), + unverified_block.block().hash(), + err + ); + + let tip = self + .shared + .store() + .get_tip_header() + .expect("tip_header must exist"); + let tip_ext = self + .shared + .store() + .get_block_ext(&tip.hash()) + .expect("tip header's ext must exist"); + + self.shared.set_unverified_tip(ckb_shared::HeaderIndex::new( + tip.clone().number(), + tip.clone().hash(), + tip_ext.total_difficulty, + )); + + self.shared.insert_block_status( + unverified_block.block().hash(), + BlockStatus::BLOCK_INVALID, + ); + error!( + "set_unverified tip to {}-{}, because verify {} failed: {}", + tip.number(), + tip.hash(), + unverified_block.block().hash(), + err + ); + + tell_synchronizer_to_punish_the_bad_peer( + self.verify_failed_blocks_tx.clone(), + &unverified_block.unverified_block, + err, + ); + } + } + + unverified_block.execute_callback(verify_result); + } + + fn verify_block(&mut self, unverified_block: &UnverifiedBlock) -> VerifyResult { + let UnverifiedBlock { + unverified_block: + LonelyBlockWithCallback { + lonely_block: + LonelyBlock { + block, + peer_id: _peer_id, + switch, + }, + verify_callback: _verify_callback, + }, + parent_header, + } = unverified_block; + + let switch: Switch = switch.unwrap_or_else(|| { + let mut assume_valid_target = self.shared.assume_valid_target(); + match *assume_valid_target { + Some(ref target) => { + // if the target has been reached, delete it + if target + == &ckb_types::prelude::Unpack::::unpack(&BlockView::hash(&block)) + { + assume_valid_target.take(); + Switch::NONE + } else { + Switch::DISABLE_SCRIPT + } + } + None => Switch::NONE, + } + }); + + let parent_ext = self + .shared + .store() + .get_block_ext(&block.data().header().raw().parent_hash()) + .expect("parent should be stored already"); + + if let Some(ext) = self.shared.store().get_block_ext(&block.hash()) { + match ext.verified { + Some(verified) => { + debug!( + "block {}-{} has been verified, previously verified result: {}", + block.number(), + block.hash(), + verified + ); + return if verified { + Ok(VerifiedBlockStatus::PreviouslyVerified) + } else { + Err(InternalErrorKind::Other + .other("block previously verified failed") + .into()) + }; + } + _ => { + // we didn't verify this block, going on verify now + } + } + } + + let cannon_total_difficulty = + parent_ext.total_difficulty.to_owned() + block.header().difficulty(); + + if parent_ext.verified == Some(false) { + return Err(InvalidParentError { + parent_hash: parent_header.hash(), + } + .into()); + } + + let ext = BlockExt { + received_at: unix_time_as_millis(), + total_difficulty: cannon_total_difficulty.clone(), + total_uncles_count: parent_ext.total_uncles_count + block.data().uncles().len() as u64, + verified: None, + txs_fees: vec![], + cycles: None, + txs_sizes: None, + }; + + let shared_snapshot = Arc::clone(&self.shared.snapshot()); + let origin_proposals = shared_snapshot.proposals(); + let current_tip_header = shared_snapshot.tip_header(); + let current_total_difficulty = shared_snapshot.total_difficulty().to_owned(); + + // is_better_than + let new_best_block = cannon_total_difficulty > current_total_difficulty; + + let mut fork = ForkChanges::default(); + + let next_block_epoch = self + .shared + .consensus() + .next_epoch_ext(&parent_header, &self.shared.store().borrow_as_data_loader()) + .expect("epoch should be stored"); + let new_epoch = next_block_epoch.is_head(); + let epoch = next_block_epoch.epoch(); + + let db_txn = Arc::new(self.shared.store().begin_transaction()); + if new_best_block { + debug!( + "[verify block] new best block found: {} => {:#x}, difficulty diff = {:#x}, unverified_tip: {}", + block.header().number(), + block.header().hash(), + &cannon_total_difficulty - ¤t_total_difficulty, + self.shared.get_unverified_tip().number(), + ); + self.find_fork(&mut fork, current_tip_header.number(), &block, ext); + self.rollback(&fork, &db_txn)?; + + // update and verify chain root + // MUST update index before reconcile_main_chain + let begin_reconcile_main_chain = std::time::Instant::now(); + self.reconcile_main_chain(Arc::clone(&db_txn), &mut fork, switch)?; + trace!( + "reconcile_main_chain cost {:?}", + begin_reconcile_main_chain.elapsed() + ); + + db_txn.insert_tip_header(&block.header())?; + if new_epoch || fork.has_detached() { + db_txn.insert_current_epoch_ext(&epoch)?; + } + } else { + db_txn.insert_block_ext(&block.header().hash(), &ext)?; + } + db_txn.commit()?; + + if new_best_block { + let tip_header = block.header(); + info!( + "block: {}, hash: {:#x}, epoch: {:#}, total_diff: {:#x}, txs: {}", + tip_header.number(), + tip_header.hash(), + tip_header.epoch(), + cannon_total_difficulty, + block.transactions().len() + ); + + self.update_proposal_table(&fork); + let (detached_proposal_id, new_proposals) = self + .proposal_table + .finalize(origin_proposals, tip_header.number()); + fork.detached_proposal_id = detached_proposal_id; + + let new_snapshot = + self.shared + .new_snapshot(tip_header, cannon_total_difficulty, epoch, new_proposals); + + self.shared.store_snapshot(Arc::clone(&new_snapshot)); + + let tx_pool_controller = self.shared.tx_pool_controller(); + if tx_pool_controller.service_started() { + if let Err(e) = tx_pool_controller.update_tx_pool_for_reorg( + fork.detached_blocks().clone(), + fork.attached_blocks().clone(), + fork.detached_proposal_id().clone(), + new_snapshot, + ) { + error!("[verify block] notify update_tx_pool_for_reorg error {}", e); + } + } + + let block_ref: &BlockView = █ + self.shared + .notify_controller() + .notify_new_block(block_ref.clone()); + if log_enabled!(ckb_logger::Level::Trace) { + self.print_chain(10); + } + if let Some(metrics) = ckb_metrics::handle() { + metrics.ckb_chain_tip.set(block.header().number() as i64); + } + + Ok(VerifiedBlockStatus::FirstSeenAndVerified) + } else { + self.shared.refresh_snapshot(); + info!( + "[verify block] uncle: {}, hash: {:#x}, epoch: {:#}, total_diff: {:#x}, txs: {}", + block.header().number(), + block.header().hash(), + block.header().epoch(), + cannon_total_difficulty, + block.transactions().len() + ); + + let tx_pool_controller = self.shared.tx_pool_controller(); + if tx_pool_controller.service_started() { + let block_ref: &BlockView = █ + if let Err(e) = tx_pool_controller.notify_new_uncle(block_ref.as_uncle()) { + error!("[verify block] notify new_uncle error {}", e); + } + } + Ok(VerifiedBlockStatus::FirstSeenButNotVerified) + } + } + + pub(crate) fn update_proposal_table(&mut self, fork: &ForkChanges) { + for blk in fork.detached_blocks() { + self.proposal_table.remove(blk.header().number()); + } + for blk in fork.attached_blocks() { + self.proposal_table + .insert(blk.header().number(), blk.union_proposal_ids()); + } + self.reload_proposal_table(fork); + } + + // if rollback happen, go back check whether need reload proposal_table from block + pub(crate) fn reload_proposal_table(&mut self, fork: &ForkChanges) { + if fork.has_detached() { + let proposal_window = self.shared.consensus().tx_proposal_window(); + let detached_front = fork + .detached_blocks() + .front() + .map(|blk| blk.header().number()) + .expect("detached_blocks is not empty"); + if detached_front < 2 { + return; + } + let common = detached_front - 1; + let new_tip = fork + .attached_blocks() + .back() + .map(|blk| blk.header().number()) + .unwrap_or(common); + + let proposal_start = + cmp::max(1, (new_tip + 1).saturating_sub(proposal_window.farthest())); + + debug!("reload_proposal_table [{}, {}]", proposal_start, common); + for bn in proposal_start..=common { + let blk = self + .shared + .store() + .get_block_hash(bn) + .and_then(|hash| self.shared.store().get_block(&hash)) + .expect("block stored"); + + self.proposal_table.insert(bn, blk.union_proposal_ids()); + } + } + } + + pub(crate) fn rollback(&self, fork: &ForkChanges, txn: &StoreTransaction) -> Result<(), Error> { + for block in fork.detached_blocks().iter().rev() { + txn.detach_block(block)?; + detach_block_cell(txn, block)?; + } + Ok(()) + } + + fn alignment_fork( + &self, + fork: &mut ForkChanges, + index: &mut GlobalIndex, + new_tip_number: BlockNumber, + current_tip_number: BlockNumber, + ) { + if new_tip_number <= current_tip_number { + for bn in new_tip_number..=current_tip_number { + let hash = self + .shared + .store() + .get_block_hash(bn) + .expect("block hash stored before alignment_fork"); + let old_block = self + .shared + .store() + .get_block(&hash) + .expect("block data stored before alignment_fork"); + fork.detached_blocks.push_back(old_block); + } + } else { + while index.number > current_tip_number { + if index.unseen { + let ext = self + .shared + .store() + .get_block_ext(&index.hash) + .expect("block ext stored before alignment_fork"); + if ext.verified.is_none() { + fork.dirty_exts.push_front(ext) + } else { + index.unseen = false; + } + } + let new_block = self + .shared + .store() + .get_block(&index.hash) + .expect("block data stored before alignment_fork"); + index.forward(new_block.data().header().raw().parent_hash()); + fork.attached_blocks.push_front(new_block); + } + } + } + + fn find_fork_until_latest_common(&self, fork: &mut ForkChanges, index: &mut GlobalIndex) { + loop { + if index.number == 0 { + break; + } + let detached_hash = self + .shared + .store() + .get_block_hash(index.number) + .expect("detached hash stored before find_fork_until_latest_common"); + if detached_hash == index.hash { + break; + } + let detached_blocks = self + .shared + .store() + .get_block(&detached_hash) + .expect("detached block stored before find_fork_until_latest_common"); + fork.detached_blocks.push_front(detached_blocks); + + if index.unseen { + let ext = self + .shared + .store() + .get_block_ext(&index.hash) + .expect("block ext stored before find_fork_until_latest_common"); + if ext.verified.is_none() { + fork.dirty_exts.push_front(ext) + } else { + index.unseen = false; + } + } + + let attached_block = self + .shared + .store() + .get_block(&index.hash) + .expect("attached block stored before find_fork_until_latest_common"); + index.forward(attached_block.data().header().raw().parent_hash()); + fork.attached_blocks.push_front(attached_block); + } + } + + pub(crate) fn find_fork( + &self, + fork: &mut ForkChanges, + current_tip_number: BlockNumber, + new_tip_block: &BlockView, + new_tip_ext: BlockExt, + ) { + let new_tip_number = new_tip_block.header().number(); + fork.dirty_exts.push_front(new_tip_ext); + + // attached_blocks = forks[latest_common + 1 .. new_tip] + // detached_blocks = chain[latest_common + 1 .. old_tip] + fork.attached_blocks.push_front(new_tip_block.clone()); + + let mut index = GlobalIndex::new( + new_tip_number - 1, + new_tip_block.data().header().raw().parent_hash(), + true, + ); + + // if new_tip_number <= current_tip_number + // then detached_blocks.extend(chain[new_tip_number .. =current_tip_number]) + // if new_tip_number > current_tip_number + // then attached_blocks.extend(forks[current_tip_number + 1 .. =new_tip_number]) + self.alignment_fork(fork, &mut index, new_tip_number, current_tip_number); + + // find latest common ancestor + self.find_fork_until_latest_common(fork, &mut index); + + is_sorted_assert(fork); + } + + // we found new best_block + pub(crate) fn reconcile_main_chain( + &self, + txn: Arc, + fork: &mut ForkChanges, + switch: Switch, + ) -> Result<(), Error> { + if fork.attached_blocks().is_empty() { + return Ok(()); + } + + let txs_verify_cache = self.shared.txs_verify_cache(); + + let consensus = self.shared.consensus(); + let hardfork_switch = consensus.hardfork_switch(); + let during_hardfork = fork.during_hardfork(hardfork_switch); + let async_handle = self.shared.tx_pool_controller().handle(); + + if during_hardfork { + async_handle.block_on(async { + txs_verify_cache.write().await.clear(); + }); + } + + let consensus = self.shared.cloned_consensus(); + let start_block_header = fork.attached_blocks()[0].header(); + let mmr_size = leaf_index_to_mmr_size(start_block_header.number() - 1); + trace!("light-client: new chain root MMR with size = {}", mmr_size); + let mut mmr = ChainRootMMR::new(mmr_size, txn.as_ref()); + + let verified_len = fork.verified_len(); + for b in fork.attached_blocks().iter().take(verified_len) { + txn.attach_block(b)?; + attach_block_cell(&txn, b)?; + mmr.push(b.digest()) + .map_err(|e| InternalErrorKind::MMR.other(e))?; + } + + let verify_context = VerifyContext::new(Arc::clone(&txn), consensus); + + let mut found_error = None; + for (ext, b) in fork + .dirty_exts + .iter() + .zip(fork.attached_blocks.iter().skip(verified_len)) + { + if !switch.disable_all() { + if found_error.is_none() { + let log_now = std::time::Instant::now(); + let resolved = self.resolve_block_transactions(&txn, b, &verify_context); + debug!( + "resolve_block_transactions {} cost: {:?}", + b.hash(), + log_now.elapsed() + ); + match resolved { + Ok(resolved) => { + let verified = { + let contextual_block_verifier = ContextualBlockVerifier::new( + verify_context.clone(), + async_handle, + switch, + Arc::clone(&txs_verify_cache), + &mmr, + ); + let log_now = std::time::Instant::now(); + let verify_result = contextual_block_verifier.verify(&resolved, b); + debug!( + "contextual_block_verifier {} cost: {:?}", + b.hash(), + log_now.elapsed() + ); + verify_result + }; + match verified { + Ok((cycles, cache_entries)) => { + let txs_sizes = resolved + .iter() + .map(|rtx| { + rtx.transaction.data().serialized_size_in_block() as u64 + }) + .collect(); + txn.attach_block(b)?; + attach_block_cell(&txn, b)?; + mmr.push(b.digest()) + .map_err(|e| InternalErrorKind::MMR.other(e))?; + + self.insert_ok_ext( + &txn, + &b.header().hash(), + ext.clone(), + Some(&cache_entries), + Some(txs_sizes), + )?; + + if !switch.disable_script() && b.transactions().len() > 1 { + self.monitor_block_txs_verified( + b, + &resolved, + &cache_entries, + cycles, + ); + } + } + Err(err) => { + self.print_error(b, &err); + found_error = Some(err); + self.insert_failure_ext(&txn, &b.header().hash(), ext.clone())?; + } + } + } + Err(err) => { + found_error = Some(err); + self.insert_failure_ext(&txn, &b.header().hash(), ext.clone())?; + } + } + } else { + self.insert_failure_ext(&txn, &b.header().hash(), ext.clone())?; + } + } else { + txn.attach_block(b)?; + attach_block_cell(&txn, b)?; + mmr.push(b.digest()) + .map_err(|e| InternalErrorKind::MMR.other(e))?; + self.insert_ok_ext(&txn, &b.header().hash(), ext.clone(), None, None)?; + } + } + + if let Some(err) = found_error { + Err(err) + } else { + trace!("light-client: commit"); + // Before commit, all new MMR nodes are in memory only. + mmr.commit().map_err(|e| InternalErrorKind::MMR.other(e))?; + Ok(()) + } + } + + fn resolve_block_transactions( + &self, + txn: &StoreTransaction, + block: &BlockView, + verify_context: &HC, + ) -> Result>, Error> { + let mut seen_inputs = HashSet::new(); + let block_cp = BlockCellProvider::new(block)?; + let transactions = block.transactions(); + let cell_provider = OverlayCellProvider::new(&block_cp, txn); + let resolved = transactions + .iter() + .cloned() + .map(|tx| { + resolve_transaction(tx, &mut seen_inputs, &cell_provider, verify_context) + .map(Arc::new) + }) + .collect::>, _>>()?; + Ok(resolved) + } + + fn insert_ok_ext( + &self, + txn: &StoreTransaction, + hash: &Byte32, + mut ext: BlockExt, + cache_entries: Option<&[Completed]>, + txs_sizes: Option>, + ) -> Result<(), Error> { + ext.verified = Some(true); + if let Some(entries) = cache_entries { + let (txs_fees, cycles) = entries + .iter() + .map(|entry| (entry.fee, entry.cycles)) + .unzip(); + ext.txs_fees = txs_fees; + ext.cycles = Some(cycles); + } + ext.txs_sizes = txs_sizes; + txn.insert_block_ext(hash, &ext) + } + + fn insert_failure_ext( + &self, + txn: &StoreTransaction, + hash: &Byte32, + mut ext: BlockExt, + ) -> Result<(), Error> { + ext.verified = Some(false); + txn.insert_block_ext(hash, &ext) + } + + fn monitor_block_txs_verified( + &self, + b: &BlockView, + resolved: &[Arc], + cache_entries: &[Completed], + cycles: Cycle, + ) { + info!( + "[block_verifier] block number: {}, hash: {}, size:{}/{}, cycles: {}/{}", + b.number(), + b.hash(), + b.data().serialized_size_without_uncle_proposals(), + self.shared.consensus().max_block_bytes(), + cycles, + self.shared.consensus().max_block_cycles() + ); + + // log tx verification result for monitor node + if log_enabled_target!("ckb_tx_monitor", Trace) { + // `cache_entries` already excludes cellbase tx, but `resolved` includes cellbase tx, skip it + // to make them aligned + for (rtx, cycles) in resolved.iter().skip(1).zip(cache_entries.iter()) { + trace_target!( + "ckb_tx_monitor", + r#"{{"tx_hash":"{:#x}","cycles":{}}}"#, + rtx.transaction.hash(), + cycles.cycles + ); + } + } + } + + fn print_error(&self, b: &BlockView, err: &Error) { + error!( + "block verify error, block number: {}, hash: {}, error: {:?}", + b.header().number(), + b.header().hash(), + err + ); + if log_enabled!(ckb_logger::Level::Trace) { + trace!("block {}", b); + } + } + + // TODO: beatify + fn print_chain(&self, len: u64) { + debug!("Chain {{"); + + let snapshot = self.shared.snapshot(); + let tip_header = snapshot.tip_header(); + let tip_number = tip_header.number(); + + let bottom = tip_number - cmp::min(tip_number, len); + + for number in (bottom..=tip_number).rev() { + let hash = snapshot + .get_block_hash(number) + .unwrap_or_else(|| panic!("invalid block number({number}), tip={tip_number}")); + debug!(" {number} => {hash}"); + } + + debug!("}}"); + } + + fn make_fork_for_truncate(&self, target: &HeaderView, current_tip: &HeaderView) -> ForkChanges { + let mut fork = ForkChanges::default(); + let store = self.shared.store(); + for bn in (target.number() + 1)..=current_tip.number() { + let hash = store.get_block_hash(bn).expect("index checked"); + let old_block = store.get_block(&hash).expect("index checked"); + fork.detached_blocks.push_back(old_block); + } + is_sorted_assert(&fork); + fork + } + + // Truncate the main chain + // Use for testing only + pub(crate) fn truncate( + &mut self, + proposal_table: &mut ProposalTable, + target_tip_hash: &Byte32, + ) -> Result<(), Error> { + let snapshot = Arc::clone(&self.shared.snapshot()); + assert!(snapshot.is_main_chain(target_tip_hash)); + + let target_tip_header = snapshot.get_block_header(target_tip_hash).expect("checked"); + let target_block_ext = snapshot.get_block_ext(target_tip_hash).expect("checked"); + let target_epoch_ext = snapshot + .get_block_epoch_index(target_tip_hash) + .and_then(|index| snapshot.get_epoch_ext(&index)) + .expect("checked"); + let origin_proposals = snapshot.proposals(); + let mut fork = self.make_fork_for_truncate(&target_tip_header, snapshot.tip_header()); + + let db_txn = self.shared.store().begin_transaction(); + self.rollback(&fork, &db_txn)?; + + db_txn.insert_tip_header(&target_tip_header)?; + db_txn.insert_current_epoch_ext(&target_epoch_ext)?; + + for blk in fork.attached_blocks() { + db_txn.delete_block(blk)?; + } + db_txn.commit()?; + + self.update_proposal_table(&fork); + let (detached_proposal_id, new_proposals) = + proposal_table.finalize(origin_proposals, target_tip_header.number()); + fork.detached_proposal_id = detached_proposal_id; + + let new_snapshot = self.shared.new_snapshot( + target_tip_header, + target_block_ext.total_difficulty, + target_epoch_ext, + new_proposals, + ); + + self.shared.store_snapshot(Arc::clone(&new_snapshot)); + + // NOTE: Dont update tx-pool when truncate + + Ok(()) + } +} + +#[cfg(debug_assertions)] +fn is_sorted_assert(fork: &ForkChanges) { + assert!(fork.is_sorted()) +} + +#[cfg(not(debug_assertions))] +fn is_sorted_assert(_fork: &ForkChanges) {} From 73f808668f2dcf54d7622b303db12318850327a3 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 13 Nov 2023 15:32:36 +0800 Subject: [PATCH 151/357] Move out consume_unverified_blocks and consume_orphan --- chain/src/chain.rs | 1446 +++++--------------------------------------- 1 file changed, 144 insertions(+), 1302 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 5bd147b982..8c47e50862 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -1,73 +1,36 @@ //! CKB chain service. #![allow(missing_docs)] -use crate::forkchanges::ForkChanges; +use crate::consume_orphan::ConsumeOrphan; +use crate::consume_unverified::ConsumeUnverifiedBlocks; use crate::orphan_block_pool::OrphanBlockPool; +use crate::{ + tell_synchronizer_to_punish_the_bad_peer, LonelyBlock, LonelyBlockWithCallback, + ProcessBlockRequest, TruncateRequest, UnverifiedBlock, VerifyCallback, VerifyResult, +}; use ckb_channel::{self as channel, select, Receiver, SendError, Sender}; use ckb_constant::sync::BLOCK_DOWNLOAD_WINDOW; -use ckb_error::{is_internal_db_error, Error, InternalErrorKind}; -use ckb_logger::Level::Trace; -use ckb_logger::{ - self, debug, error, info, log_enabled, log_enabled_target, trace, trace_target, warn, -}; -use ckb_merkle_mountain_range::leaf_index_to_mmr_size; -use ckb_network::{tokio, PeerIndex}; +use ckb_error::{Error, InternalErrorKind}; +use ckb_logger::{self, debug, error, info, warn}; +use ckb_network::tokio; use ckb_proposal_table::ProposalTable; #[cfg(debug_assertions)] use ckb_rust_unstable_port::IsSorted; -use ckb_shared::block_status::BlockStatus; use ckb_shared::shared::Shared; use ckb_shared::types::VerifyFailedBlockInfo; use ckb_stop_handler::{new_crossbeam_exit_rx, register_thread}; -use ckb_store::{attach_block_cell, detach_block_cell, ChainStore, StoreTransaction}; -use ckb_systemtime::unix_time_as_millis; +use ckb_store::ChainStore; use ckb_types::{ - core::{ - cell::{ - resolve_transaction, BlockCellProvider, HeaderChecker, OverlayCellProvider, - ResolvedTransaction, - }, - service::Request, - BlockExt, BlockNumber, BlockView, Cycle, HeaderView, - }, + core::{cell::HeaderChecker, service::Request, BlockView}, packed::Byte32, - utilities::merkle_mountain_range::ChainRootMMR, - H256, U256, }; -use ckb_util::Mutex; -use ckb_verification::cache::Completed; -use ckb_verification::{BlockVerifier, InvalidParentError, NonContextualBlockTxsVerifier}; -use ckb_verification_contextual::{ContextualBlockVerifier, VerifyContext}; +use ckb_verification::{BlockVerifier, NonContextualBlockTxsVerifier}; use ckb_verification_traits::{Switch, Verifier}; -use std::collections::HashSet; use std::sync::Arc; -use std::{cmp, thread}; +use std::thread; const ORPHAN_BLOCK_SIZE: usize = (BLOCK_DOWNLOAD_WINDOW * 2) as usize; -type ProcessBlockRequest = Request; -type TruncateRequest = Request>; - -pub type VerifyResult = Result; - -pub type VerifyCallback = Box; - -/// VerifiedBlockStatus is -#[derive(Debug, Clone, PartialEq)] -pub enum VerifiedBlockStatus { - // The block is being seen for the first time. - FirstSeenAndVerified, - - // The block is being seen for the first time, but not verify it yet - FirstSeenButNotVerified, - - // The block has been verified before. - PreviouslyVerified, - - // The block has been verified before, but not veriify it yet - PreviouslyUnVerified, -} - /// Controller to the chain service. /// /// The controller is internally reference-counted and can be freely cloned. @@ -212,167 +175,47 @@ impl ChainController { } } -pub(crate) struct GlobalIndex { - pub(crate) number: BlockNumber, - pub(crate) hash: Byte32, - pub(crate) unseen: bool, -} - -impl GlobalIndex { - pub(crate) fn new(number: BlockNumber, hash: Byte32, unseen: bool) -> GlobalIndex { - GlobalIndex { - number, - hash, - unseen, - } - } - - pub(crate) fn forward(&mut self, hash: Byte32) { - self.number -= 1; - self.hash = hash; - } -} - -/// Chain background service -/// -/// The ChainService provides a single-threaded background executor. -#[derive(Clone)] -pub struct ChainService { +pub struct ChainServicesBuilder { shared: Shared, - - orphan_blocks_broker: Arc, - + proposal_table: ProposalTable, verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, } -#[derive(Clone)] -pub struct LonelyBlock { - pub block: Arc, - pub peer_id: Option, - pub switch: Option, -} - -impl LonelyBlock { - pub fn with_callback(self, verify_callback: Option) -> LonelyBlockWithCallback { - LonelyBlockWithCallback { - lonely_block: self, - verify_callback, - } - } - - pub fn without_callback(self) -> LonelyBlockWithCallback { - self.with_callback(None) - } -} - -pub struct LonelyBlockWithCallback { - pub lonely_block: LonelyBlock, - pub verify_callback: Option, -} - -impl LonelyBlockWithCallback { - fn execute_callback(self, verify_result: VerifyResult) { - match self.verify_callback { - Some(verify_callback) => { - verify_callback(verify_result); - } - None => {} - } - } - - pub fn block(&self) -> &Arc { - &self.lonely_block.block - } - pub fn peer_id(&self) -> Option { - self.lonely_block.peer_id - } - pub fn switch(&self) -> Option { - self.lonely_block.switch - } -} - -impl LonelyBlockWithCallback { - fn combine_parent_header(self, parent_header: HeaderView) -> UnverifiedBlock { - UnverifiedBlock { - unverified_block: self, - parent_header, - } - } -} - -struct UnverifiedBlock { - pub unverified_block: LonelyBlockWithCallback, - pub parent_header: HeaderView, -} - -impl UnverifiedBlock { - fn block(&self) -> &Arc { - self.unverified_block.block() - } - - pub fn peer_id(&self) -> Option { - self.unverified_block.peer_id() - } - - pub fn switch(&self) -> Option { - self.unverified_block.switch() - } - - fn execute_callback(self, verify_result: VerifyResult) { - self.unverified_block.execute_callback(verify_result) - } -} - -impl ChainService { - /// Create a new ChainService instance with shared and initial proposal_table. +impl ChainServicesBuilder { pub fn new( shared: Shared, + proposal_table: ProposalTable, verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, - ) -> ChainService { - ChainService { + ) -> Self { + ChainServicesBuilder { shared, - orphan_blocks_broker: Arc::new(OrphanBlockPool::with_capacity(ORPHAN_BLOCK_SIZE)), + proposal_table, verify_failed_blocks_tx, } } - /// start background single-threaded service with specified thread_name. - pub fn start( - mut self, - mut proposal_table: ProposalTable, - thread_name: Option, - ) -> ChainController { - let orphan_blocks_broker_clone = Arc::clone(&self.orphan_blocks_broker); - - let signal_receiver = new_crossbeam_exit_rx(); - let (process_block_sender, process_block_receiver) = - channel::bounded(BLOCK_DOWNLOAD_WINDOW as usize); - - let (truncate_sender, truncate_receiver) = channel::bounded(1); + pub fn start(self) -> ChainController { + let orphan_blocks_broker = Arc::new(OrphanBlockPool::with_capacity(ORPHAN_BLOCK_SIZE)); - // Mainly for test: give an empty thread_name - let mut thread_builder = thread::Builder::new(); - if let Some(name) = thread_name { - thread_builder = thread_builder.name(name.to_string()); - } - let tx_control = self.shared.tx_pool_controller().clone(); let (unverified_queue_stop_tx, unverified_queue_stop_rx) = ckb_channel::bounded::<()>(1); - let (search_orphan_pool_stop_tx, search_orphan_pool_stop_rx) = - ckb_channel::bounded::<()>(1); - let (unverified_tx, unverified_rx) = channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize * 3); - let unverified_consumer_thread = thread::Builder::new() - .name("verify_blocks".into()) + let consumer_unverified_thread = thread::Builder::new() + .name("consume_unverified_blocks".into()) .spawn({ - let chain_service = self.clone(); + let shared = self.shared.clone(); + let verify_failed_blocks_tx = self.verify_failed_blocks_tx.clone(); move || { - chain_service.start_consume_unverified_blocks( - &mut proposal_table, - unverified_queue_stop_rx, + let mut consume_unverified = ConsumeUnverifiedBlocks::new( + shared, unverified_rx, - ) + self.proposal_table, + verify_failed_blocks_tx, + unverified_queue_stop_rx, + ); + + consume_unverified.start(); } }) .expect("start unverified_queue consumer thread should ok"); @@ -380,378 +223,132 @@ impl ChainService { let (lonely_block_tx, lonely_block_rx) = channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize); + let (search_orphan_pool_stop_tx, search_orphan_pool_stop_rx) = + ckb_channel::bounded::<()>(1); + let search_orphan_pool_thread = thread::Builder::new() - .name("search_orphan".into()) + .name("consume_orphan_blocks".into()) .spawn({ - let chain_service = self.clone(); + let orphan_blocks_broker = orphan_blocks_broker.clone(); + let shared = self.shared.clone(); + use crate::consume_orphan::ConsumeOrphan; + let verify_failed_block_tx = self.verify_failed_blocks_tx.clone(); move || { - chain_service.start_search_orphan_pool( - search_orphan_pool_stop_rx, - lonely_block_rx, + let consume_orphan = ConsumeOrphan::new( + shared, + orphan_blocks_broker, unverified_tx, - ) + lonely_block_rx, + verify_failed_block_tx, + search_orphan_pool_stop_rx, + ); + consume_orphan.start(); } }) .expect("start search_orphan_pool thread should ok"); - let chain_jh = thread_builder - .spawn(move || loop { - select! { - recv(process_block_receiver) -> msg => match msg { - Ok(Request { responder, arguments: lonely_block }) => { - let _ = tx_control.suspend_chunk_process(); - let _ = responder.send(self.asynchronous_process_block(lonely_block, lonely_block_tx.clone())); - let _ = tx_control.continue_chunk_process(); + let (process_block_tx, process_block_rx) = channel::bounded(BLOCK_DOWNLOAD_WINDOW as usize); - if let Some(metrics) = ckb_metrics::handle() { - metrics - .ckb_block_process_duration - .observe(instant.elapsed().as_secs_f64()); - } - }, - _ => { - error!("process_block_receiver closed"); - break; - }, - }, - recv(truncate_receiver) -> msg => match msg { - Ok(Request { responder, arguments: target_tip_hash }) => { - let _ = tx_control.suspend_chunk_process(); - todo!("move truncate process to consume unverified_block"); - // let _ = responder.send(self.truncate( - // &mut proposal_table, - // &target_tip_hash)); - let _ = tx_control.continue_chunk_process(); - }, - _ => { - error!("truncate_receiver closed"); - break; - }, - }, - recv(signal_receiver) -> _ => { - info!("ChainService received exit signal, exit now"); - unverified_queue_stop_tx.send(()); - search_orphan_pool_stop_tx.send(()); + let (truncate_block_tx, truncate_block_rx) = channel::bounded(1); - search_orphan_pool_thread.join(); - unverified_consumer_thread.join(); - break; - } - } - }) - .expect("Start ChainService failed"); + let chain_service: ChainService = ChainService::new( + self.shared, + process_block_rx, + truncate_block_rx, + lonely_block_tx, + self.verify_failed_blocks_tx, + ); + let chain_service_thread = thread::Builder::new() + .name("ChainService".into()) + .spawn({ + move || { + chain_service.start(); - register_thread("ChainService", chain_jh); + search_orphan_pool_stop_tx.send(()); + search_orphan_pool_thread.join(); - ChainController::new( - process_block_sender, - truncate_sender, - orphan_blocks_broker_clone, - ) - } + unverified_queue_stop_tx.send(()); + consumer_unverified_thread.join(); + } + }) + .expect("start chain_service thread should ok"); + register_thread("ChainServices", chain_service_thread); - fn start_consume_unverified_blocks( - &self, - proposal_table: &mut ProposalTable, - unverified_queue_stop_rx: Receiver<()>, - unverified_block_rx: Receiver, - ) { - let mut begin_loop = std::time::Instant::now(); - loop { - begin_loop = std::time::Instant::now(); - select! { - recv(unverified_queue_stop_rx) -> _ => { - info!("unverified_queue_consumer got exit signal, exit now"); - return; - }, - recv(unverified_block_rx) -> msg => match msg { - Ok(unverified_task) => { - // process this unverified block - trace!("got an unverified block, wait cost: {:?}", begin_loop.elapsed()); - self.consume_unverified_blocks(proposal_table, unverified_task); - trace!("consume_unverified_blocks cost: {:?}", begin_loop.elapsed()); - }, - Err(err) => { - error!("unverified_block_rx err: {}", err); - return; - }, - }, - default => {}, - } - } + ChainController::new(process_block_tx, truncate_block_tx, orphan_blocks_broker) } +} - fn consume_unverified_blocks( - &self, - proposal_table: &mut ProposalTable, - unverified_block: UnverifiedBlock, - ) { - // process this unverified block - let verify_result = self.verify_block(proposal_table, &unverified_block); - match &verify_result { - Ok(_) => { - let log_now = std::time::Instant::now(); - self.shared - .remove_block_status(&unverified_block.block().hash()); - let log_elapsed_remove_block_status = log_now.elapsed(); - self.shared - .remove_header_view(&unverified_block.block().hash()); - debug!( - "block {} remove_block_status cost: {:?}, and header_view cost: {:?}", - unverified_block.block().hash(), - log_elapsed_remove_block_status, - log_now.elapsed() - ); - } - Err(err) => { - error!( - "verify [{:?}]'s block {} failed: {}", - unverified_block.peer_id(), - unverified_block.block().hash(), - err - ); - - let tip = self - .shared - .store() - .get_tip_header() - .expect("tip_header must exist"); - let tip_ext = self - .shared - .store() - .get_block_ext(&tip.hash()) - .expect("tip header's ext must exist"); +/// Chain background service +/// +/// The ChainService provides a single-threaded background executor. +#[derive(Clone)] +pub struct ChainService { + shared: Shared, - self.shared.set_unverified_tip(ckb_shared::HeaderIndex::new( - tip.clone().number(), - tip.clone().hash(), - tip_ext.total_difficulty, - )); + process_block_rx: Receiver, + truncate_block_rx: Receiver, - self.shared.insert_block_status( - unverified_block.block().hash(), - BlockStatus::BLOCK_INVALID, - ); - error!( - "set_unverified tip to {}-{}, because verify {} failed: {}", - tip.number(), - tip.hash(), - unverified_block.block().hash(), - err - ); + lonely_block_tx: Sender, + verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, +} +impl ChainService { + /// Create a new ChainService instance with shared and initial proposal_table. + pub fn new( + shared: Shared, + process_block_rx: Receiver, + truncate_block_rx: Receiver, - self.tell_synchronizer_to_punish_the_bad_peer( - &unverified_block.unverified_block, - err, - ); - } + lonely_block_tx: Sender, + verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, + ) -> ChainService { + ChainService { + shared, + process_block_rx, + truncate_block_rx, + lonely_block_tx, + verify_failed_blocks_tx, } - - unverified_block.execute_callback(verify_result); } - fn start_search_orphan_pool( - &self, - search_orphan_pool_stop_rx: Receiver<()>, - lonely_block_rx: Receiver, - unverified_block_tx: Sender, - ) { + /// start background single-threaded service with specified thread_name. + pub fn start(mut self) { + let signal_receiver = new_crossbeam_exit_rx(); + + // Mainly for test: give an empty thread_name + let tx_control = self.shared.tx_pool_controller().clone(); loop { select! { - recv(search_orphan_pool_stop_rx) -> _ => { - info!("unverified_queue_consumer got exit signal, exit now"); - return; - }, - recv(lonely_block_rx) -> msg => match msg { - Ok(lonely_block) => { - self.orphan_blocks_broker.insert(lonely_block); - self.search_orphan_pool(unverified_block_tx.clone()) + recv(self.process_block_rx) -> msg => match msg { + Ok(Request { responder, arguments: lonely_block }) => { + let _ = tx_control.suspend_chunk_process(); + let _ = responder.send(self.asynchronous_process_block(lonely_block)); + let _ = tx_control.continue_chunk_process(); + }, + _ => { + error!("process_block_receiver closed"); + break; }, - Err(err) => { - error!("lonely_block_rx err: {}", err); - return - } }, - } - } - } - fn search_orphan_pool(&self, unverified_block_tx: Sender) { - for leader_hash in self.orphan_blocks_broker.clone_leaders() { - if !self - .shared - .contains_block_status(&leader_hash, BlockStatus::BLOCK_PARTIAL_STORED) - { - trace!("orphan leader: {} not partial stored", leader_hash); - continue; - } - - let descendants: Vec = self - .orphan_blocks_broker - .remove_blocks_by_parent(&leader_hash); - if descendants.is_empty() { - error!( - "leader {} does not have any descendants, this shouldn't happen", - leader_hash - ); - continue; - } - let descendants_len = descendants.len(); - let (first_descendants_number, last_descendants_number) = ( - descendants - .first() - .expect("descdant not empty") - .block() - .number(), - descendants - .last() - .expect("descdant not empty") - .block() - .number(), - ); - - let mut accept_error_occurred = false; - for descendant_block in descendants { - match self.accept_block(descendant_block.block().to_owned()) { - Err(err) => { - self.tell_synchronizer_to_punish_the_bad_peer(&descendant_block, &err); - - accept_error_occurred = true; - error!( - "accept block {} failed: {}", - descendant_block.block().hash(), - err - ); - - descendant_block.execute_callback(Err(err)); - continue; - } - Ok(accepted_opt) => match accepted_opt { - Some((parent_header, total_difficulty)) => { - let unverified_block: UnverifiedBlock = - descendant_block.combine_parent_header(parent_header); - let block_number = unverified_block.block().number(); - let block_hash = unverified_block.block().hash(); - - match unverified_block_tx.send(unverified_block) { - Ok(_) => {} - Err(SendError(unverified_block)) => { - error!("send unverified_block_tx failed, the receiver has been closed"); - let err: Error = InternalErrorKind::System - .other(format!("send unverified_block_tx failed, the receiver have been close")).into(); - - self.tell_synchronizer_to_punish_the_bad_peer( - &unverified_block.unverified_block, - &err, - ); - - let verify_result: VerifyResult = Err(err); - unverified_block.execute_callback(verify_result); - continue; - } - }; - - if total_difficulty - .gt(self.shared.get_unverified_tip().total_difficulty()) - { - self.shared.set_unverified_tip(ckb_shared::HeaderIndex::new( - block_number.clone(), - block_hash.clone(), - total_difficulty, - )); - debug!("set unverified_tip to {}-{}, while unverified_tip - verified_tip = {}", - block_number.clone(), - block_hash.clone(), - block_number.saturating_sub(self.shared.snapshot().tip_number())) - } else { - debug!("received a block {}-{} with lower or equal difficulty than unverified_tip {}-{}", - block_number, - block_hash, - self.shared.get_unverified_tip().number(), - self.shared.get_unverified_tip().hash(), - ); - } - } - None => { - info!( - "doesn't accept block {}, because it has been stored", - descendant_block.block().hash() - ); - let verify_result: VerifyResult = - Ok(VerifiedBlockStatus::PreviouslyUnVerified); - descendant_block.execute_callback(verify_result); - } + recv(self.truncate_block_rx) -> msg => match msg { + Ok(Request { responder, arguments: target_tip_hash }) => { + let _ = tx_control.suspend_chunk_process(); + todo!("move truncate process to consume unverified_block"); + // let _ = responder.send(self.truncate( + // &mut proposal_table, + // &target_tip_hash)); + let _ = tx_control.continue_chunk_process(); + }, + _ => { + error!("truncate_receiver closed"); + break; }, + }, + recv(signal_receiver) -> _ => { + info!("ChainService received exit signal, exit now"); + break; } } - - if !accept_error_occurred { - debug!( - "accept {} blocks [{}->{}] success", - descendants_len, first_descendants_number, last_descendants_number - ) - } - } - } - - fn make_fork_for_truncate(&self, target: &HeaderView, current_tip: &HeaderView) -> ForkChanges { - let mut fork = ForkChanges::default(); - let store = self.shared.store(); - for bn in (target.number() + 1)..=current_tip.number() { - let hash = store.get_block_hash(bn).expect("index checked"); - let old_block = store.get_block(&hash).expect("index checked"); - fork.detached_blocks.push_back(old_block); - } - is_sorted_assert(&fork); - fork - } - - // Truncate the main chain - // Use for testing only - pub(crate) fn truncate( - &mut self, - proposal_table: &mut ProposalTable, - target_tip_hash: &Byte32, - ) -> Result<(), Error> { - let snapshot = Arc::clone(&self.shared.snapshot()); - assert!(snapshot.is_main_chain(target_tip_hash)); - - let target_tip_header = snapshot.get_block_header(target_tip_hash).expect("checked"); - let target_block_ext = snapshot.get_block_ext(target_tip_hash).expect("checked"); - let target_epoch_ext = snapshot - .get_block_epoch_index(target_tip_hash) - .and_then(|index| snapshot.get_epoch_ext(&index)) - .expect("checked"); - let origin_proposals = snapshot.proposals(); - let mut fork = self.make_fork_for_truncate(&target_tip_header, snapshot.tip_header()); - - let db_txn = self.shared.store().begin_transaction(); - self.rollback(&fork, &db_txn)?; - - db_txn.insert_tip_header(&target_tip_header)?; - db_txn.insert_current_epoch_ext(&target_epoch_ext)?; - - for blk in fork.attached_blocks() { - db_txn.delete_block(blk)?; } - db_txn.commit()?; - - self.update_proposal_table(&fork, proposal_table); - let (detached_proposal_id, new_proposals) = - proposal_table.finalize(origin_proposals, target_tip_header.number()); - fork.detached_proposal_id = detached_proposal_id; - - let new_snapshot = self.shared.new_snapshot( - target_tip_header, - target_block_ext.total_difficulty, - target_epoch_ext, - new_proposals, - ); - - self.shared.store_snapshot(Arc::clone(&new_snapshot)); - - // NOTE: Dont update tx-pool when truncate - - Ok(()) } fn non_contextual_verify(&self, block: &BlockView) -> Result<(), Error> { @@ -774,11 +371,7 @@ impl ChainService { } // make block IO and verify asynchronize - fn asynchronous_process_block( - &self, - lonely_block: LonelyBlockWithCallback, - lonely_block_tx: Sender, - ) { + fn asynchronous_process_block(&self, lonely_block: LonelyBlockWithCallback) { let block_number = lonely_block.block().number(); let block_hash = lonely_block.block().hash(); if block_number < 1 { @@ -789,7 +382,11 @@ impl ChainService { let result = self.non_contextual_verify(&lonely_block.block()); match result { Err(err) => { - self.tell_synchronizer_to_punish_the_bad_peer(&lonely_block, &err); + tell_synchronizer_to_punish_the_bad_peer( + self.verify_failed_blocks_tx.clone(), + &lonely_block, + &err, + ); lonely_block.execute_callback(Err(err)); return; @@ -799,7 +396,7 @@ impl ChainService { } } - match lonely_block_tx.send(lonely_block) { + match self.lonely_block_tx.send(lonely_block) { Ok(_) => {} Err(SendError(lonely_block)) => { error!("failed to notify new block to orphan pool"); @@ -808,7 +405,11 @@ impl ChainService { .other("OrphanBlock broker disconnected") .into(); - self.tell_synchronizer_to_punish_the_bad_peer(&lonely_block, &err); + tell_synchronizer_to_punish_the_bad_peer( + self.verify_failed_blocks_tx.clone(), + &lonely_block, + &err, + ); let verify_result = Err(err); lonely_block.execute_callback(verify_result); @@ -816,770 +417,11 @@ impl ChainService { } } debug!( - "processing block: {}-{}, orphan_len: {}, (tip:unverified_tip):({}:{})", + "processing block: {}-{}, (tip:unverified_tip):({}:{})", block_number, block_hash, - self.orphan_blocks_broker.len(), self.shared.snapshot().tip_number(), self.shared.get_unverified_tip().number(), ); } - - fn tell_synchronizer_to_punish_the_bad_peer( - &self, - lonely_block: &LonelyBlockWithCallback, - err: &Error, - ) { - let is_internal_db_error = is_internal_db_error(&err); - match lonely_block.peer_id() { - Some(peer_id) => { - let verify_failed_block_info = VerifyFailedBlockInfo { - block_hash: lonely_block.lonely_block.block.hash(), - peer_id, - message_bytes: 0, - reason: err.to_string(), - is_internal_db_error, - }; - match self.verify_failed_blocks_tx.send(verify_failed_block_info) { - Err(_err) => { - error!("ChainService failed to send verify failed block info to Synchronizer, the receiver side may have been closed, this shouldn't happen") - } - _ => {} - } - } - _ => { - debug!("Don't know which peer to punish, or don't have a channel Sender to Synchronizer, skip it") - } - } - } - - fn accept_block(&self, block: Arc) -> Result, Error> { - let (block_number, block_hash) = (block.number(), block.hash()); - - if self - .shared - .contains_block_status(&block_hash, BlockStatus::BLOCK_PARTIAL_STORED) - { - debug!("block {}-{} has been stored", block_number, block_hash); - return Ok(None); - } - - let parent_header = self - .shared - .store() - .get_block_header(&block.data().header().raw().parent_hash()) - .expect("parent already store"); - - if let Some(ext) = self.shared.store().get_block_ext(&block.hash()) { - debug!("block {}-{} has stored BlockExt", block_number, block_hash); - return Ok(Some((parent_header, ext.total_difficulty))); - } - - trace!("begin accept block: {}-{}", block.number(), block.hash()); - - let parent_ext = self - .shared - .store() - .get_block_ext(&block.data().header().raw().parent_hash()) - .expect("parent already store"); - - if parent_ext.verified == Some(false) { - return Err(InvalidParentError { - parent_hash: parent_header.hash(), - } - .into()); - } - - let cannon_total_difficulty = - parent_ext.total_difficulty.to_owned() + block.header().difficulty(); - - let db_txn = Arc::new(self.shared.store().begin_transaction()); - - let txn_snapshot = db_txn.get_snapshot(); - let _snapshot_block_ext = db_txn.get_update_for_block_ext(&block.hash(), &txn_snapshot); - - db_txn.insert_block(block.as_ref())?; - - let next_block_epoch = self - .shared - .consensus() - .next_epoch_ext(&parent_header, &db_txn.borrow_as_data_loader()) - .expect("epoch should be stored"); - let new_epoch = next_block_epoch.is_head(); - let epoch = next_block_epoch.epoch(); - - db_txn.insert_block_epoch_index( - &block.header().hash(), - &epoch.last_block_hash_in_previous_epoch(), - )?; - if new_epoch { - db_txn.insert_epoch_ext(&epoch.last_block_hash_in_previous_epoch(), &epoch)?; - } - - let ext = BlockExt { - received_at: unix_time_as_millis(), - total_difficulty: cannon_total_difficulty.clone(), - total_uncles_count: parent_ext.total_uncles_count + block.data().uncles().len() as u64, - verified: None, - txs_fees: vec![], - cycles: None, - txs_sizes: None, - }; - - db_txn.insert_block_ext(&block.header().hash(), &ext)?; - - db_txn.commit()?; - - self.shared - .insert_block_status(block_hash, BlockStatus::BLOCK_PARTIAL_STORED); - - Ok(Some((parent_header, cannon_total_difficulty))) - } - - fn verify_block( - &self, - proposal_table: &mut ProposalTable, - unverified_block: &UnverifiedBlock, - ) -> VerifyResult { - let UnverifiedBlock { - unverified_block: - LonelyBlockWithCallback { - lonely_block: - LonelyBlock { - block, - peer_id: _peer_id, - switch, - }, - verify_callback: _verify_callback, - }, - parent_header, - } = unverified_block; - - let switch: Switch = switch.unwrap_or_else(|| { - let mut assume_valid_target = self.shared.assume_valid_target(); - match *assume_valid_target { - Some(ref target) => { - // if the target has been reached, delete it - if target - == &ckb_types::prelude::Unpack::::unpack(&BlockView::hash(&block)) - { - assume_valid_target.take(); - Switch::NONE - } else { - Switch::DISABLE_SCRIPT - } - } - None => Switch::NONE, - } - }); - - let parent_ext = self - .shared - .store() - .get_block_ext(&block.data().header().raw().parent_hash()) - .expect("parent should be stored already"); - - if let Some(ext) = self.shared.store().get_block_ext(&block.hash()) { - match ext.verified { - Some(verified) => { - debug!( - "block {}-{} has been verified, previously verified result: {}", - block.number(), - block.hash(), - verified - ); - return if verified { - Ok(VerifiedBlockStatus::PreviouslyVerified) - } else { - Err(InternalErrorKind::Other - .other("block previously verified failed") - .into()) - }; - } - _ => { - // we didn't verify this block, going on verify now - } - } - } - - let cannon_total_difficulty = - parent_ext.total_difficulty.to_owned() + block.header().difficulty(); - - if parent_ext.verified == Some(false) { - return Err(InvalidParentError { - parent_hash: parent_header.hash(), - } - .into()); - } - - let ext = BlockExt { - received_at: unix_time_as_millis(), - total_difficulty: cannon_total_difficulty.clone(), - total_uncles_count: parent_ext.total_uncles_count + block.data().uncles().len() as u64, - verified: None, - txs_fees: vec![], - cycles: None, - txs_sizes: None, - }; - - let shared_snapshot = Arc::clone(&self.shared.snapshot()); - let origin_proposals = shared_snapshot.proposals(); - let current_tip_header = shared_snapshot.tip_header(); - let current_total_difficulty = shared_snapshot.total_difficulty().to_owned(); - - // is_better_than - let new_best_block = cannon_total_difficulty > current_total_difficulty; - - let mut fork = ForkChanges::default(); - - let next_block_epoch = self - .shared - .consensus() - .next_epoch_ext(&parent_header, &self.shared.store().borrow_as_data_loader()) - .expect("epoch should be stored"); - let new_epoch = next_block_epoch.is_head(); - let epoch = next_block_epoch.epoch(); - - let db_txn = Arc::new(self.shared.store().begin_transaction()); - if new_best_block { - debug!( - "[verify block] new best block found: {} => {:#x}, difficulty diff = {:#x}, unverified_tip: {}", - block.header().number(), - block.header().hash(), - &cannon_total_difficulty - ¤t_total_difficulty, - self.shared.get_unverified_tip().number(), - ); - self.find_fork(&mut fork, current_tip_header.number(), &block, ext); - self.rollback(&fork, &db_txn)?; - - // update and verify chain root - // MUST update index before reconcile_main_chain - let begin_reconcile_main_chain = std::time::Instant::now(); - self.reconcile_main_chain(Arc::clone(&db_txn), &mut fork, switch)?; - trace!( - "reconcile_main_chain cost {:?}", - begin_reconcile_main_chain.elapsed() - ); - - db_txn.insert_tip_header(&block.header())?; - if new_epoch || fork.has_detached() { - db_txn.insert_current_epoch_ext(&epoch)?; - } - } else { - db_txn.insert_block_ext(&block.header().hash(), &ext)?; - } - db_txn.commit()?; - - if new_best_block { - let tip_header = block.header(); - info!( - "block: {}, hash: {:#x}, epoch: {:#}, total_diff: {:#x}, txs: {}", - tip_header.number(), - tip_header.hash(), - tip_header.epoch(), - cannon_total_difficulty, - block.transactions().len() - ); - - self.update_proposal_table(&fork, proposal_table); - let (detached_proposal_id, new_proposals) = - proposal_table.finalize(origin_proposals, tip_header.number()); - fork.detached_proposal_id = detached_proposal_id; - - let new_snapshot = - self.shared - .new_snapshot(tip_header, cannon_total_difficulty, epoch, new_proposals); - - self.shared.store_snapshot(Arc::clone(&new_snapshot)); - - let tx_pool_controller = self.shared.tx_pool_controller(); - if tx_pool_controller.service_started() { - if let Err(e) = tx_pool_controller.update_tx_pool_for_reorg( - fork.detached_blocks().clone(), - fork.attached_blocks().clone(), - fork.detached_proposal_id().clone(), - new_snapshot, - ) { - error!("[verify block] notify update_tx_pool_for_reorg error {}", e); - } - } - - let block_ref: &BlockView = █ - self.shared - .notify_controller() - .notify_new_block(block_ref.clone()); - if log_enabled!(ckb_logger::Level::Trace) { - self.print_chain(10); - } - if let Some(metrics) = ckb_metrics::handle() { - metrics.ckb_chain_tip.set(block.header().number() as i64); - } - - Ok(VerifiedBlockStatus::FirstSeenAndVerified) - } else { - self.shared.refresh_snapshot(); - info!( - "[verify block] uncle: {}, hash: {:#x}, epoch: {:#}, total_diff: {:#x}, txs: {}", - block.header().number(), - block.header().hash(), - block.header().epoch(), - cannon_total_difficulty, - block.transactions().len() - ); - - let tx_pool_controller = self.shared.tx_pool_controller(); - if tx_pool_controller.service_started() { - let block_ref: &BlockView = █ - if let Err(e) = tx_pool_controller.notify_new_uncle(block_ref.as_uncle()) { - error!("[verify block] notify new_uncle error {}", e); - } - } - Ok(VerifiedBlockStatus::FirstSeenButNotVerified) - } - } - - pub(crate) fn update_proposal_table( - &self, - fork: &ForkChanges, - proposal_table: &mut ProposalTable, - ) { - for blk in fork.detached_blocks() { - proposal_table.remove(blk.header().number()); - } - for blk in fork.attached_blocks() { - proposal_table.insert(blk.header().number(), blk.union_proposal_ids()); - } - self.reload_proposal_table(fork, proposal_table); - } - - // if rollback happen, go back check whether need reload proposal_table from block - pub(crate) fn reload_proposal_table( - &self, - fork: &ForkChanges, - proposal_table: &mut ProposalTable, - ) { - if fork.has_detached() { - let proposal_window = self.shared.consensus().tx_proposal_window(); - let detached_front = fork - .detached_blocks() - .front() - .map(|blk| blk.header().number()) - .expect("detached_blocks is not empty"); - if detached_front < 2 { - return; - } - let common = detached_front - 1; - let new_tip = fork - .attached_blocks() - .back() - .map(|blk| blk.header().number()) - .unwrap_or(common); - - let proposal_start = - cmp::max(1, (new_tip + 1).saturating_sub(proposal_window.farthest())); - - debug!("Reload_proposal_table [{}, {}]", proposal_start, common); - for bn in proposal_start..=common { - let blk = self - .shared - .store() - .get_block_hash(bn) - .and_then(|hash| self.shared.store().get_block(&hash)) - .expect("block stored"); - - proposal_table.insert(bn, blk.union_proposal_ids()); - } - } - } - - pub(crate) fn rollback(&self, fork: &ForkChanges, txn: &StoreTransaction) -> Result<(), Error> { - for block in fork.detached_blocks().iter().rev() { - txn.detach_block(block)?; - detach_block_cell(txn, block)?; - } - Ok(()) - } - - fn alignment_fork( - &self, - fork: &mut ForkChanges, - index: &mut GlobalIndex, - new_tip_number: BlockNumber, - current_tip_number: BlockNumber, - ) { - if new_tip_number <= current_tip_number { - for bn in new_tip_number..=current_tip_number { - let hash = self - .shared - .store() - .get_block_hash(bn) - .expect("block hash stored before alignment_fork"); - let old_block = self - .shared - .store() - .get_block(&hash) - .expect("block data stored before alignment_fork"); - fork.detached_blocks.push_back(old_block); - } - } else { - while index.number > current_tip_number { - if index.unseen { - let ext = self - .shared - .store() - .get_block_ext(&index.hash) - .expect("block ext stored before alignment_fork"); - if ext.verified.is_none() { - fork.dirty_exts.push_front(ext) - } else { - index.unseen = false; - } - } - let new_block = self - .shared - .store() - .get_block(&index.hash) - .expect("block data stored before alignment_fork"); - index.forward(new_block.data().header().raw().parent_hash()); - fork.attached_blocks.push_front(new_block); - } - } - } - - fn find_fork_until_latest_common(&self, fork: &mut ForkChanges, index: &mut GlobalIndex) { - loop { - if index.number == 0 { - break; - } - let detached_hash = self - .shared - .store() - .get_block_hash(index.number) - .expect("detached hash stored before find_fork_until_latest_common"); - if detached_hash == index.hash { - break; - } - let detached_blocks = self - .shared - .store() - .get_block(&detached_hash) - .expect("detached block stored before find_fork_until_latest_common"); - fork.detached_blocks.push_front(detached_blocks); - - if index.unseen { - let ext = self - .shared - .store() - .get_block_ext(&index.hash) - .expect("block ext stored before find_fork_until_latest_common"); - if ext.verified.is_none() { - fork.dirty_exts.push_front(ext) - } else { - index.unseen = false; - } - } - - let attached_block = self - .shared - .store() - .get_block(&index.hash) - .expect("attached block stored before find_fork_until_latest_common"); - index.forward(attached_block.data().header().raw().parent_hash()); - fork.attached_blocks.push_front(attached_block); - } - } - - pub(crate) fn find_fork( - &self, - fork: &mut ForkChanges, - current_tip_number: BlockNumber, - new_tip_block: &BlockView, - new_tip_ext: BlockExt, - ) { - let new_tip_number = new_tip_block.header().number(); - fork.dirty_exts.push_front(new_tip_ext); - - // attached_blocks = forks[latest_common + 1 .. new_tip] - // detached_blocks = chain[latest_common + 1 .. old_tip] - fork.attached_blocks.push_front(new_tip_block.clone()); - - let mut index = GlobalIndex::new( - new_tip_number - 1, - new_tip_block.data().header().raw().parent_hash(), - true, - ); - - // if new_tip_number <= current_tip_number - // then detached_blocks.extend(chain[new_tip_number .. =current_tip_number]) - // if new_tip_number > current_tip_number - // then attached_blocks.extend(forks[current_tip_number + 1 .. =new_tip_number]) - self.alignment_fork(fork, &mut index, new_tip_number, current_tip_number); - - // find latest common ancestor - self.find_fork_until_latest_common(fork, &mut index); - - is_sorted_assert(fork); - } - - // we found new best_block - pub(crate) fn reconcile_main_chain( - &self, - txn: Arc, - fork: &mut ForkChanges, - switch: Switch, - ) -> Result<(), Error> { - if fork.attached_blocks().is_empty() { - return Ok(()); - } - - let txs_verify_cache = self.shared.txs_verify_cache(); - - let consensus = self.shared.consensus(); - let hardfork_switch = consensus.hardfork_switch(); - let during_hardfork = fork.during_hardfork(hardfork_switch); - let async_handle = self.shared.tx_pool_controller().handle(); - - if during_hardfork { - async_handle.block_on(async { - txs_verify_cache.write().await.clear(); - }); - } - - let consensus = self.shared.cloned_consensus(); - let start_block_header = fork.attached_blocks()[0].header(); - let mmr_size = leaf_index_to_mmr_size(start_block_header.number() - 1); - trace!("light-client: new chain root MMR with size = {}", mmr_size); - let mut mmr = ChainRootMMR::new(mmr_size, txn.as_ref()); - - let verified_len = fork.verified_len(); - for b in fork.attached_blocks().iter().take(verified_len) { - txn.attach_block(b)?; - attach_block_cell(&txn, b)?; - mmr.push(b.digest()) - .map_err(|e| InternalErrorKind::MMR.other(e))?; - } - - let verify_context = VerifyContext::new(Arc::clone(&txn), consensus); - - let mut found_error = None; - for (ext, b) in fork - .dirty_exts - .iter() - .zip(fork.attached_blocks.iter().skip(verified_len)) - { - if !switch.disable_all() { - if found_error.is_none() { - let log_now = std::time::Instant::now(); - let resolved = self.resolve_block_transactions(&txn, b, &verify_context); - debug!( - "resolve_block_transactions {} cost: {:?}", - b.hash(), - log_now.elapsed() - ); - match resolved { - Ok(resolved) => { - let verified = { - let contextual_block_verifier = ContextualBlockVerifier::new( - verify_context.clone(), - async_handle, - switch, - Arc::clone(&txs_verify_cache), - &mmr, - ); - let log_now = std::time::Instant::now(); - let verify_result = contextual_block_verifier.verify(&resolved, b); - debug!( - "contextual_block_verifier {} cost: {:?}", - b.hash(), - log_now.elapsed() - ); - verify_result - }; - match verified { - Ok((cycles, cache_entries)) => { - let txs_sizes = resolved - .iter() - .map(|rtx| { - rtx.transaction.data().serialized_size_in_block() as u64 - }) - .collect(); - txn.attach_block(b)?; - attach_block_cell(&txn, b)?; - mmr.push(b.digest()) - .map_err(|e| InternalErrorKind::MMR.other(e))?; - - self.insert_ok_ext( - &txn, - &b.header().hash(), - ext.clone(), - Some(&cache_entries), - Some(txs_sizes), - )?; - - if !switch.disable_script() && b.transactions().len() > 1 { - self.monitor_block_txs_verified( - b, - &resolved, - &cache_entries, - cycles, - ); - } - } - Err(err) => { - self.print_error(b, &err); - found_error = Some(err); - self.insert_failure_ext(&txn, &b.header().hash(), ext.clone())?; - } - } - } - Err(err) => { - found_error = Some(err); - self.insert_failure_ext(&txn, &b.header().hash(), ext.clone())?; - } - } - } else { - self.insert_failure_ext(&txn, &b.header().hash(), ext.clone())?; - } - } else { - txn.attach_block(b)?; - attach_block_cell(&txn, b)?; - mmr.push(b.digest()) - .map_err(|e| InternalErrorKind::MMR.other(e))?; - self.insert_ok_ext(&txn, &b.header().hash(), ext.clone(), None, None)?; - } - } - - if let Some(err) = found_error { - Err(err) - } else { - trace!("light-client: commit"); - // Before commit, all new MMR nodes are in memory only. - mmr.commit().map_err(|e| InternalErrorKind::MMR.other(e))?; - Ok(()) - } - } - - fn resolve_block_transactions( - &self, - txn: &StoreTransaction, - block: &BlockView, - verify_context: &HC, - ) -> Result>, Error> { - let mut seen_inputs = HashSet::new(); - let block_cp = BlockCellProvider::new(block)?; - let transactions = block.transactions(); - let cell_provider = OverlayCellProvider::new(&block_cp, txn); - let resolved = transactions - .iter() - .cloned() - .map(|tx| { - resolve_transaction(tx, &mut seen_inputs, &cell_provider, verify_context) - .map(Arc::new) - }) - .collect::>, _>>()?; - Ok(resolved) - } - - fn insert_ok_ext( - &self, - txn: &StoreTransaction, - hash: &Byte32, - mut ext: BlockExt, - cache_entries: Option<&[Completed]>, - txs_sizes: Option>, - ) -> Result<(), Error> { - ext.verified = Some(true); - if let Some(entries) = cache_entries { - let (txs_fees, cycles) = entries - .iter() - .map(|entry| (entry.fee, entry.cycles)) - .unzip(); - ext.txs_fees = txs_fees; - ext.cycles = Some(cycles); - } - ext.txs_sizes = txs_sizes; - txn.insert_block_ext(hash, &ext) - } - - fn insert_failure_ext( - &self, - txn: &StoreTransaction, - hash: &Byte32, - mut ext: BlockExt, - ) -> Result<(), Error> { - ext.verified = Some(false); - txn.insert_block_ext(hash, &ext) - } - - fn monitor_block_txs_verified( - &self, - b: &BlockView, - resolved: &[Arc], - cache_entries: &[Completed], - cycles: Cycle, - ) { - info!( - "[block_verifier] block number: {}, hash: {}, size:{}/{}, cycles: {}/{}", - b.number(), - b.hash(), - b.data().serialized_size_without_uncle_proposals(), - self.shared.consensus().max_block_bytes(), - cycles, - self.shared.consensus().max_block_cycles() - ); - - // log tx verification result for monitor node - if log_enabled_target!("ckb_tx_monitor", Trace) { - // `cache_entries` already excludes cellbase tx, but `resolved` includes cellbase tx, skip it - // to make them aligned - for (rtx, cycles) in resolved.iter().skip(1).zip(cache_entries.iter()) { - trace_target!( - "ckb_tx_monitor", - r#"{{"tx_hash":"{:#x}","cycles":{}}}"#, - rtx.transaction.hash(), - cycles.cycles - ); - } - } - } - - fn print_error(&self, b: &BlockView, err: &Error) { - error!( - "Block verify error. Block number: {}, hash: {}, error: {:?}", - b.header().number(), - b.header().hash(), - err - ); - if log_enabled!(ckb_logger::Level::Trace) { - trace!("Block {}", b); - } - } - - // TODO: beatify - fn print_chain(&self, len: u64) { - debug!("Chain {{"); - - let snapshot = self.shared.snapshot(); - let tip_header = snapshot.tip_header(); - let tip_number = tip_header.number(); - - let bottom = tip_number - cmp::min(tip_number, len); - - for number in (bottom..=tip_number).rev() { - let hash = snapshot - .get_block_hash(number) - .unwrap_or_else(|| panic!("invalid block number({number}), tip={tip_number}")); - debug!(" {number} => {hash}"); - } - - debug!("}}"); - } -} - -#[cfg(debug_assertions)] -fn is_sorted_assert(fork: &ForkChanges) { - assert!(fork.is_sorted()) } - -#[cfg(not(debug_assertions))] -fn is_sorted_assert(_fork: &ForkChanges) {} From 3493f916fc4fac7e9a0ff1158b8c06e240aad416 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 13 Nov 2023 15:32:56 +0800 Subject: [PATCH 152/357] Move LonelyBlock related struct to module root --- chain/src/lib.rs | 162 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 162 insertions(+) diff --git a/chain/src/lib.rs b/chain/src/lib.rs index e536b83365..8b979345b1 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -6,8 +6,170 @@ //! [`ChainService`]: chain/struct.ChainService.html //! [`ChainController`]: chain/struct.ChainController.html +use ckb_error::{is_internal_db_error, Error}; +use ckb_logger::{debug, error}; +use ckb_network::PeerIndex; +use ckb_shared::types::VerifyFailedBlockInfo; +use ckb_types::core::service::Request; +use ckb_types::core::{BlockNumber, BlockView, HeaderView}; +use ckb_types::packed::Byte32; +use ckb_verification_traits::Switch; +use std::sync::Arc; + pub mod chain; +mod consume_orphan; +mod consume_unverified; mod forkchanges; mod orphan_block_pool; #[cfg(test)] mod tests; + +type ProcessBlockRequest = Request; +type TruncateRequest = Request>; + +pub type VerifyResult = Result; + +pub type VerifyCallback = Box; + +/// VerifiedBlockStatus is +#[derive(Debug, Clone, PartialEq)] +pub enum VerifiedBlockStatus { + // The block is being seen for the first time. + FirstSeenAndVerified, + + // The block is being seen for the first time, but not verify it yet + FirstSeenButNotVerified, + + // The block has been verified before. + PreviouslyVerified, + + // The block has been verified before, but not veriify it yet + PreviouslyUnVerified, +} + +#[derive(Clone)] +pub struct LonelyBlock { + pub block: Arc, + pub peer_id: Option, + pub switch: Option, +} + +impl LonelyBlock { + pub fn with_callback(self, verify_callback: Option) -> LonelyBlockWithCallback { + LonelyBlockWithCallback { + lonely_block: self, + verify_callback, + } + } + + pub fn without_callback(self) -> LonelyBlockWithCallback { + self.with_callback(None) + } +} + +pub struct LonelyBlockWithCallback { + pub lonely_block: LonelyBlock, + pub verify_callback: Option, +} + +impl LonelyBlockWithCallback { + pub(crate) fn execute_callback(self, verify_result: VerifyResult) { + match self.verify_callback { + Some(verify_callback) => { + verify_callback(verify_result); + } + None => {} + } + } + + pub fn block(&self) -> &Arc { + &self.lonely_block.block + } + pub fn peer_id(&self) -> Option { + self.lonely_block.peer_id + } + pub fn switch(&self) -> Option { + self.lonely_block.switch + } +} + +impl LonelyBlockWithCallback { + pub(crate) fn combine_parent_header(self, parent_header: HeaderView) -> UnverifiedBlock { + UnverifiedBlock { + unverified_block: self, + parent_header, + } + } +} + +pub(crate) struct UnverifiedBlock { + pub unverified_block: LonelyBlockWithCallback, + pub parent_header: HeaderView, +} + +impl UnverifiedBlock { + pub(crate) fn block(&self) -> &Arc { + self.unverified_block.block() + } + + pub fn peer_id(&self) -> Option { + self.unverified_block.peer_id() + } + + pub fn switch(&self) -> Option { + self.unverified_block.switch() + } + + pub fn execute_callback(self, verify_result: VerifyResult) { + self.unverified_block.execute_callback(verify_result) + } +} + +pub(crate) struct GlobalIndex { + pub(crate) number: BlockNumber, + pub(crate) hash: Byte32, + pub(crate) unseen: bool, +} + +impl GlobalIndex { + pub(crate) fn new(number: BlockNumber, hash: Byte32, unseen: bool) -> GlobalIndex { + GlobalIndex { + number, + hash, + unseen, + } + } + + pub(crate) fn forward(&mut self, hash: Byte32) { + self.number -= 1; + self.hash = hash; + } +} + +pub(crate) fn tell_synchronizer_to_punish_the_bad_peer( + verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, + lonely_block: &LonelyBlockWithCallback, + err: &Error, +) { + let is_internal_db_error = is_internal_db_error(&err); + match lonely_block.peer_id() { + Some(peer_id) => { + let verify_failed_block_info = VerifyFailedBlockInfo { + block_hash: lonely_block.lonely_block.block.hash(), + peer_id, + message_bytes: 0, + reason: err.to_string(), + is_internal_db_error, + }; + match verify_failed_blocks_tx.send(verify_failed_block_info) { + Err(_err) => { + error!("ChainService failed to send verify failed block info to Synchronizer, the receiver side may have been closed, this shouldn't happen") + } + _ => {} + } + } + _ => { + debug!("Don't know which peer to punish, or don't have a channel Sender to Synchronizer, skip it") + } + } +} From 41f7b520dee36de9659fcb1de80af1b84d4027c3 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 13 Nov 2023 15:33:30 +0800 Subject: [PATCH 153/357] Fix LonelyBlockWithCallBack import path --- chain/src/orphan_block_pool.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chain/src/orphan_block_pool.rs b/chain/src/orphan_block_pool.rs index db895939c4..f6bc5d1ea7 100644 --- a/chain/src/orphan_block_pool.rs +++ b/chain/src/orphan_block_pool.rs @@ -1,4 +1,4 @@ -use crate::chain::LonelyBlockWithCallback; +use crate::LonelyBlockWithCallback; use ckb_logger::debug; use ckb_types::core::{BlockView, EpochNumber}; use ckb_types::packed; From 74ca009a427e1fbd6f16bc267033ad124b1ee2cf Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 13 Nov 2023 15:34:15 +0800 Subject: [PATCH 154/357] Add ChainServicesBuilder to SharedPackage --- shared/src/shared_builder.rs | 110 ++++++++++++++++++++--------------- 1 file changed, 63 insertions(+), 47 deletions(-) diff --git a/shared/src/shared_builder.rs b/shared/src/shared_builder.rs index 2e3d666560..245b196780 100644 --- a/shared/src/shared_builder.rs +++ b/shared/src/shared_builder.rs @@ -35,6 +35,7 @@ use ckb_shared::{HeaderMap, Shared}; use ckb_snapshot::{Snapshot, SnapshotMgr}; use ckb_util::Mutex; +use ckb_chain::chain::{ChainService, ChainServicesBuilder}; use ckb_shared::types::VerifyFailedBlockInfo; use ckb_store::ChainDB; use ckb_store::ChainStore; @@ -446,11 +447,13 @@ impl SharedBuilder { let (verify_failed_block_tx, verify_failed_block_rx) = tokio::sync::mpsc::unbounded_channel::(); + let chain_services_builder = + ChainServicesBuilder::new(shared.clone(), table, verify_failed_block_tx); + let pack = SharedPackage { - table: Some(table), + chain_services_builder: Some(chain_services_builder), tx_pool_builder: Some(tx_pool_builder), relay_tx_receiver: Some(receiver), - verify_failed_block_tx: Some(verify_failed_block_tx), verify_failed_block_rx: Some(verify_failed_block_rx), }; @@ -458,6 +461,64 @@ impl SharedBuilder { } } +/// SharedBuilder build returning the shared/package halves +/// The package structs used for init other component +pub struct SharedPackage { + chain_services_builder: Option, + tx_pool_builder: Option, + relay_tx_receiver: Option>, + + verify_failed_block_rx: Option>, +} + +impl SharedPackage { + /// Takes the chain_services_builder out of the package, leaving a None in its place. + pub fn take_chain_services_builder(&mut self) -> ChainServicesBuilder { + self.chain_services_builder + .take() + .expect("take chain_services_builder") + } + + /// Takes the tx_pool_builder out of the package, leaving a None in its place. + pub fn take_tx_pool_builder(&mut self) -> TxPoolServiceBuilder { + self.tx_pool_builder.take().expect("take tx_pool_builder") + } + + /// Takes the relay_tx_receiver out of the package, leaving a None in its place. + pub fn take_relay_tx_receiver(&mut self) -> Receiver { + self.relay_tx_receiver + .take() + .expect("take relay_tx_receiver") + } + + /// Takes the verify_failed_block_rx out of the package, leaving a None in its place. + pub fn take_verify_failed_block_rx( + &mut self, + ) -> tokio::sync::mpsc::UnboundedReceiver { + self.verify_failed_block_rx + .take() + .expect("take verify_failed_block_rx") + } +} + +fn start_notify_service(notify_config: NotifyConfig, handle: Handle) -> NotifyController { + NotifyService::new(notify_config, handle).start() +} + +fn build_store( + db: RocksDB, + store_config: StoreConfig, + ancient_path: Option, +) -> Result { + let store = if store_config.freezer_enable && ancient_path.is_some() { + let freezer = Freezer::open(ancient_path.expect("exist checked"))?; + ChainDB::new_with_freezer(db, freezer, store_config) + } else { + ChainDB::new(db, store_config) + }; + Ok(store) +} + fn register_tx_pool_callback(tx_pool_builder: &mut TxPoolServiceBuilder, notify: NotifyController) { let notify_pending = notify.clone(); @@ -507,48 +568,3 @@ fn register_tx_pool_callback(tx_pool_builder: &mut TxPoolServiceBuilder, notify: }, )); } - -fn start_notify_service(notify_config: NotifyConfig, handle: Handle) -> NotifyController { - NotifyService::new(notify_config, handle).start() -} - -fn build_store( - db: RocksDB, - store_config: StoreConfig, - ancient_path: Option, -) -> Result { - let store = if store_config.freezer_enable && ancient_path.is_some() { - let freezer = Freezer::open(ancient_path.expect("exist checked"))?; - ChainDB::new_with_freezer(db, freezer, store_config) - } else { - ChainDB::new(db, store_config) - }; - Ok(store) -} - -/// SharedBuilder build returning the shared/package halves -/// The package structs used for init other component -pub struct SharedPackage { - table: Option, - tx_pool_builder: Option, - relay_tx_receiver: Option>, -} - -impl SharedPackage { - /// Takes the proposal_table out of the package, leaving a None in its place. - pub fn take_proposal_table(&mut self) -> ProposalTable { - self.table.take().expect("take proposal_table") - } - - /// Takes the tx_pool_builder out of the package, leaving a None in its place. - pub fn take_tx_pool_builder(&mut self) -> TxPoolServiceBuilder { - self.tx_pool_builder.take().expect("take tx_pool_builder") - } - - /// Takes the relay_tx_receiver out of the package, leaving a None in its place. - pub fn take_relay_tx_receiver(&mut self) -> Receiver { - self.relay_tx_receiver - .take() - .expect("take relay_tx_receiver") - } -} From 09c32f6fbadba458390b07cb50b4b181085057d9 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 13 Nov 2023 15:34:52 +0800 Subject: [PATCH 155/357] Launcher start chain_service by ChainServicesBuilder --- util/launcher/src/lib.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/util/launcher/src/lib.rs b/util/launcher/src/lib.rs index 9b908fa966..9e3edb6794 100644 --- a/util/launcher/src/lib.rs +++ b/util/launcher/src/lib.rs @@ -8,7 +8,7 @@ use ckb_app_config::{ use ckb_async_runtime::Handle; use ckb_block_filter::filter::BlockFilter as BlockFilterService; use ckb_build_info::Version; -use ckb_chain::chain::{ChainController, ChainService}; +use ckb_chain::chain::{ChainController, ChainService, ChainServicesBuilder}; use ckb_channel::Receiver; use ckb_jsonrpc_types::ScriptHashType; use ckb_light_client_protocol_server::LightClientProtocol; @@ -231,11 +231,9 @@ impl Launcher { pub fn start_chain_service( &self, shared: &Shared, - table: ProposalTable, - verify_failed_block_tx: tokio::sync::mpsc::UnboundedSender, + chain_services_builder: ChainServicesBuilder, ) -> ChainController { - let chain_service = ChainService::new(shared.clone(), table, verify_failed_block_tx); - let chain_controller = chain_service.start(Some("ChainService")); + let chain_controller = chain_services_builder.start(); info!("chain genesis hash: {:#x}", shared.genesis_hash()); chain_controller } From 751102ace69fe1cfe8f230b1e4f7b28ac456d676 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 13 Nov 2023 15:35:17 +0800 Subject: [PATCH 156/357] Fix ckb-sync VerifyResult import path --- sync/src/relayer/mod.rs | 3 ++- sync/src/types/mod.rs | 6 ++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index f0b90ad5b8..1085aa1f84 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -25,7 +25,8 @@ use crate::utils::{ is_internal_db_error, metric_ckb_message_bytes, send_message_to, MetricDirection, }; use crate::{Status, StatusCode}; -use ckb_chain::chain::{ChainController, VerifiedBlockStatus, VerifyResult}; +use ckb_chain::chain::ChainController; +use ckb_chain::{VerifiedBlockStatus, VerifyResult}; use ckb_constant::sync::BAD_MESSAGE_BAN_TIME; use ckb_logger::{debug_target, error, error_target, info_target, trace_target, warn_target}; use ckb_network::{ diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index e7efb0e1c2..6525a071a1 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1,9 +1,7 @@ use crate::{Status, StatusCode, FAST_INDEX, LOW_INDEX, NORMAL_INDEX, TIME_TRACE_SIZE}; use ckb_app_config::SyncConfig; -use ckb_chain::chain::{ - ChainController, LonelyBlock, LonelyBlockWithCallback, VerifiedBlockStatus, VerifyCallback, - VerifyResult, -}; +use ckb_chain::chain::ChainController; +use ckb_chain::{LonelyBlock, VerifyCallback}; use ckb_chain_spec::consensus::{Consensus, MAX_BLOCK_INTERVAL, MIN_BLOCK_INTERVAL}; use ckb_channel::Receiver; use ckb_constant::sync::{ From ee9659a55031eaff08b1a310630bafa61c12e760 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 13 Nov 2023 15:35:32 +0800 Subject: [PATCH 157/357] Fix ckb-rpc VerifyResult import path --- rpc/src/module/test.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/rpc/src/module/test.rs b/rpc/src/module/test.rs index 0bae70244c..df08d926a0 100644 --- a/rpc/src/module/test.rs +++ b/rpc/src/module/test.rs @@ -1,6 +1,7 @@ use crate::error::RPCError; use async_trait::async_trait; -use ckb_chain::chain::{ChainController, VerifyResult}; +use ckb_chain::chain::ChainController; +use ckb_chain::VerifyResult; use ckb_dao::DaoCalculator; use ckb_jsonrpc_types::{Block, BlockTemplate, Byte32, EpochNumberWithFraction, Transaction}; use ckb_logger::error; From cc4d1aca2a7c4c31a4a5b3f1d5ebc48667f31765 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 13 Nov 2023 15:36:03 +0800 Subject: [PATCH 158/357] Fix ChainService initialize for ckb import subcommand --- ckb-bin/src/subcommand/import.rs | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/ckb-bin/src/subcommand/import.rs b/ckb-bin/src/subcommand/import.rs index 38efa5c124..5c76e29351 100644 --- a/ckb-bin/src/subcommand/import.rs +++ b/ckb-bin/src/subcommand/import.rs @@ -1,6 +1,5 @@ use ckb_app_config::{ExitCode, ImportArgs}; use ckb_async_runtime::Handle; -use ckb_chain::chain::ChainService; use ckb_instrument::Import; use ckb_shared::SharedBuilder; @@ -15,12 +14,7 @@ pub fn import(args: ImportArgs, async_handle: Handle) -> Result<(), ExitCode> { )?; let (shared, mut pack) = builder.build()?; - let chain_service = ChainService::new( - shared, - pack.take_proposal_table(), - pack.take_verify_failed_block_tx(), - ); - let chain_controller = chain_service.start::<&str>(Some("ImportChainService")); + let chain_controller = pack.take_chain_services_builder().start(); // manual drop tx_pool_builder and relay_tx_receiver pack.take_tx_pool_builder(); From b151c8d42106ff0273d1dd7c7a731a4106c4589f Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 13 Nov 2023 15:36:11 +0800 Subject: [PATCH 159/357] Fix ChainService initialize for ckb replay subcommand --- ckb-bin/src/subcommand/replay.rs | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/ckb-bin/src/subcommand/replay.rs b/ckb-bin/src/subcommand/replay.rs index 5091e37504..7295214101 100644 --- a/ckb-bin/src/subcommand/replay.rs +++ b/ckb-bin/src/subcommand/replay.rs @@ -47,12 +47,8 @@ pub fn replay(args: ReplayArgs, async_handle: Handle) -> Result<(), ExitCode> { args.consensus, )?; let (tmp_shared, mut pack) = shared_builder.tx_pool_config(args.config.tx_pool).build()?; - let chain_service = ChainService::new( - tmp_shared, - pack.take_proposal_table(), - pack.take_verify_failed_block_tx(), - ); - let chain_controller = chain_service.start(Some("ckb_reply::ChainService")); + let chain_service_builder = pack.take_chain_services_builder(); + let chain_controller = chain_service_builder.start(); if let Some((from, to)) = args.profile { profile(shared, chain_controller, from, to); From 65d21364bed6c06a45ba86b9cee092318e5668da Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 13 Nov 2023 15:36:18 +0800 Subject: [PATCH 160/357] Fix ChainService initialize for ckb run subcommand --- ckb-bin/src/subcommand/run.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/ckb-bin/src/subcommand/run.rs b/ckb-bin/src/subcommand/run.rs index d947a2856f..cd5f36ca09 100644 --- a/ckb-bin/src/subcommand/run.rs +++ b/ckb-bin/src/subcommand/run.rs @@ -41,11 +41,8 @@ pub fn run(args: RunArgs, version: Version, async_handle: Handle) -> Result<(), launcher.check_assume_valid_target(&shared); - let chain_controller = launcher.start_chain_service( - &shared, - pack.take_proposal_table(), - pack.take_verify_failed_block_tx(), - ); + let chain_controller = + launcher.start_chain_service(&shared, pack.take_chain_services_builder()); launcher.start_block_filter(&shared); From a306cca7478509aef77c989ce9b5d813ee0f1303 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 13 Nov 2023 16:27:03 +0800 Subject: [PATCH 161/357] Extract punish_bad_peer's params to peer_id and block_hash --- chain/src/lib.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/chain/src/lib.rs b/chain/src/lib.rs index 8b979345b1..b636e353b1 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -148,14 +148,15 @@ impl GlobalIndex { pub(crate) fn tell_synchronizer_to_punish_the_bad_peer( verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, - lonely_block: &LonelyBlockWithCallback, + peer_id: Option, + block_hash: Byte32, err: &Error, ) { let is_internal_db_error = is_internal_db_error(&err); - match lonely_block.peer_id() { + match peer_id { Some(peer_id) => { let verify_failed_block_info = VerifyFailedBlockInfo { - block_hash: lonely_block.lonely_block.block.hash(), + block_hash, peer_id, message_bytes: 0, reason: err.to_string(), From 356f090edd8316fade2d122a27cb4b1936589b68 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 13 Nov 2023 16:29:21 +0800 Subject: [PATCH 162/357] Fix tell_synchronizer_to_punish_the_bad_peer's params --- chain/src/chain.rs | 6 ++++-- chain/src/consume_orphan.rs | 6 ++++-- chain/src/consume_unverified.rs | 3 ++- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 8c47e50862..d10e67bf40 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -384,7 +384,8 @@ impl ChainService { Err(err) => { tell_synchronizer_to_punish_the_bad_peer( self.verify_failed_blocks_tx.clone(), - &lonely_block, + lonely_block.peer_id(), + lonely_block.block().hash(), &err, ); @@ -407,7 +408,8 @@ impl ChainService { tell_synchronizer_to_punish_the_bad_peer( self.verify_failed_blocks_tx.clone(), - &lonely_block, + lonely_block.peer_id(), + lonely_block.block().hash(), &err, ); diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index 42390a2a80..85dfd02eba 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -108,7 +108,8 @@ impl ConsumeOrphan { Err(err) => { tell_synchronizer_to_punish_the_bad_peer( self.verify_failed_blocks_tx.clone(), - &descendant_block, + descendant_block.peer_id(), + descendant_block.block().hash(), &err, ); @@ -138,7 +139,8 @@ impl ConsumeOrphan { tell_synchronizer_to_punish_the_bad_peer( self.verify_failed_blocks_tx.clone(), - &unverified_block.unverified_block, + unverified_block.peer_id(), + unverified_block.block().hash(), &err, ); diff --git a/chain/src/consume_unverified.rs b/chain/src/consume_unverified.rs index c36c1928d1..645480dfc8 100644 --- a/chain/src/consume_unverified.rs +++ b/chain/src/consume_unverified.rs @@ -140,7 +140,8 @@ impl ConsumeUnverifiedBlocks { tell_synchronizer_to_punish_the_bad_peer( self.verify_failed_blocks_tx.clone(), - &unverified_block.unverified_block, + unverified_block.peer_id(), + unverified_block.block().hash(), err, ); } From 0724a3d8c70168c9d0bc4f18700b79db70f0b1b5 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 13 Nov 2023 16:31:05 +0800 Subject: [PATCH 163/357] Remove search_orphan_pool's param --- chain/src/consume_orphan.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index 85dfd02eba..c634c2ca7a 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -58,7 +58,7 @@ impl ConsumeOrphan { recv(self.lonely_blocks_rx) -> msg => match msg { Ok(lonely_block) => { self.orphan_blocks_broker.insert(lonely_block); - self.search_orphan_pool(&self.unverified_blocks_tx) + self.search_orphan_pool() }, Err(err) => { error!("lonely_block_rx err: {}", err); @@ -68,7 +68,7 @@ impl ConsumeOrphan { } } } - fn search_orphan_pool(&self, unverified_block_tx: &Sender) { + fn search_orphan_pool(&self) { for leader_hash in self.orphan_blocks_broker.clone_leaders() { if !self .shared @@ -130,7 +130,7 @@ impl ConsumeOrphan { let block_number = unverified_block.block().number(); let block_hash = unverified_block.block().hash(); - match unverified_block_tx.send(unverified_block) { + match self.unverified_blocks_tx.send(unverified_block) { Ok(_) => {} Err(SendError(unverified_block)) => { error!("send unverified_block_tx failed, the receiver has been closed"); From 12c915c289ee4ac9210bba257e8c8caca0eabaf3 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Fri, 17 Nov 2023 12:28:44 +0800 Subject: [PATCH 164/357] Add ConsumeUnverifiedBlockProcessor as child field of ConsumeUnverifiedBlocks --- chain/src/consume_unverified.rs | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/chain/src/consume_unverified.rs b/chain/src/consume_unverified.rs index 645480dfc8..75f5a8ec03 100644 --- a/chain/src/consume_unverified.rs +++ b/chain/src/consume_unverified.rs @@ -30,14 +30,16 @@ use std::cmp; use std::collections::HashSet; use std::sync::Arc; -pub(crate) struct ConsumeUnverifiedBlocks { +pub(crate) struct ConsumeUnverifiedBlockProcessor { shared: Shared, - unverified_block_rx: Receiver, proposal_table: ProposalTable, - verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, +} +pub(crate) struct ConsumeUnverifiedBlocks { + unverified_block_rx: Receiver, stop_rx: Receiver<()>, + processor: ConsumeUnverifiedBlockProcessor, } impl ConsumeUnverifiedBlocks { @@ -49,12 +51,13 @@ impl ConsumeUnverifiedBlocks { stop_rx: Receiver<()>, ) -> Self { ConsumeUnverifiedBlocks { - shared, unverified_block_rx: unverified_blocks_rx, - proposal_table, - - verify_failed_blocks_tx, stop_rx, + processor: ConsumeUnverifiedBlockProcessor { + shared, + proposal_table, + verify_failed_blocks_tx, + }, } } pub(crate) fn start(mut self) { @@ -70,7 +73,7 @@ impl ConsumeUnverifiedBlocks { Ok(unverified_task) => { // process this unverified block trace!("got an unverified block, wait cost: {:?}", begin_loop.elapsed()); - self.consume_unverified_blocks(unverified_task); + self.processor.consume_unverified_blocks(unverified_task); trace!("consume_unverified_blocks cost: {:?}", begin_loop.elapsed()); }, Err(err) => { @@ -82,7 +85,9 @@ impl ConsumeUnverifiedBlocks { } } } +} +impl ConsumeUnverifiedBlockProcessor { fn consume_unverified_blocks(&mut self, unverified_block: UnverifiedBlock) { // process this unverified block let verify_result = self.verify_block(&unverified_block); From a9dc5c84856c9fa8e25be735b7f50f65c39a92df Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Sun, 19 Nov 2023 15:01:23 +0800 Subject: [PATCH 165/357] Make `struct ChainService` private --- chain/src/chain.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index d10e67bf40..a804d3a5ef 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -46,7 +46,7 @@ pub struct ChainController { #[cfg_attr(feature = "mock", faux::methods)] impl ChainController { - pub fn new( + fn new( process_block_sender: Sender, truncate_sender: Sender, orphan_block_broker: Arc, @@ -282,7 +282,7 @@ impl ChainServicesBuilder { /// /// The ChainService provides a single-threaded background executor. #[derive(Clone)] -pub struct ChainService { +pub(crate) struct ChainService { shared: Shared, process_block_rx: Receiver, @@ -293,7 +293,7 @@ pub struct ChainService { } impl ChainService { /// Create a new ChainService instance with shared and initial proposal_table. - pub fn new( + pub(crate) fn new( shared: Shared, process_block_rx: Receiver, truncate_block_rx: Receiver, @@ -311,7 +311,7 @@ impl ChainService { } /// start background single-threaded service with specified thread_name. - pub fn start(mut self) { + pub(crate) fn start(mut self) { let signal_receiver = new_crossbeam_exit_rx(); // Mainly for test: give an empty thread_name From 2991c515f06e8f8b5c5a2721712fe573d063031d Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Sun, 19 Nov 2023 15:01:53 +0800 Subject: [PATCH 166/357] Make `ConsumeUnverifiedBlockProcessor` public for crate --- chain/src/consume_unverified.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/chain/src/consume_unverified.rs b/chain/src/consume_unverified.rs index 75f5a8ec03..b7ca53c4a5 100644 --- a/chain/src/consume_unverified.rs +++ b/chain/src/consume_unverified.rs @@ -31,9 +31,9 @@ use std::collections::HashSet; use std::sync::Arc; pub(crate) struct ConsumeUnverifiedBlockProcessor { - shared: Shared, - proposal_table: ProposalTable, - verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, + pub(crate) shared: Shared, + pub(crate) proposal_table: ProposalTable, + pub(crate) verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, } pub(crate) struct ConsumeUnverifiedBlocks { @@ -88,7 +88,7 @@ impl ConsumeUnverifiedBlocks { } impl ConsumeUnverifiedBlockProcessor { - fn consume_unverified_blocks(&mut self, unverified_block: UnverifiedBlock) { + pub(crate) fn consume_unverified_blocks(&mut self, unverified_block: UnverifiedBlock) { // process this unverified block let verify_result = self.verify_block(&unverified_block); match &verify_result { From 13eca1ab7991360b088a726ec52fcb62e72e0237 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Sun, 19 Nov 2023 15:02:51 +0800 Subject: [PATCH 167/357] Make `chain` module private, re-export `ChainController` and `ChainSerivceBuilder` --- chain/src/lib.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/chain/src/lib.rs b/chain/src/lib.rs index b636e353b1..79257e3a86 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -15,8 +15,7 @@ use ckb_types::core::{BlockNumber, BlockView, HeaderView}; use ckb_types::packed::Byte32; use ckb_verification_traits::Switch; use std::sync::Arc; - -pub mod chain; +mod chain; mod consume_orphan; mod consume_unverified; mod forkchanges; @@ -24,6 +23,8 @@ mod orphan_block_pool; #[cfg(test)] mod tests; +pub use chain::{ChainController, ChainServicesBuilder}; + type ProcessBlockRequest = Request; type TruncateRequest = Request>; From a8c2b4cf175b474b420efe284cac04a7cba5245e Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Sun, 19 Nov 2023 15:04:35 +0800 Subject: [PATCH 168/357] Fix `find_fork` related unit test use `ConsumeUnverifiedBlockProcessor::find_fork` --- chain/src/tests/find_fork.rs | 228 ++++++++++++++++++++++------------- 1 file changed, 143 insertions(+), 85 deletions(-) diff --git a/chain/src/tests/find_fork.rs b/chain/src/tests/find_fork.rs index e073435168..f0321fd3d8 100644 --- a/chain/src/tests/find_fork.rs +++ b/chain/src/tests/find_fork.rs @@ -1,6 +1,8 @@ -use crate::chain::ChainService; +use crate::consume_unverified::{ConsumeUnverifiedBlockProcessor, ConsumeUnverifiedBlocks}; use crate::forkchanges::ForkChanges; +use crate::{LonelyBlock, LonelyBlockWithCallback, UnverifiedBlock, VerifyFailedBlockInfo}; use ckb_chain_spec::consensus::{Consensus, ProposalWindow}; +use ckb_proposal_table::ProposalTable; use ckb_shared::SharedBuilder; use ckb_store::ChainStore; use ckb_systemtime::unix_time_as_millis; @@ -15,6 +17,31 @@ use ckb_verification_traits::Switch; use std::collections::HashSet; use std::sync::Arc; +fn consume_unverified_block( + processor: &mut ConsumeUnverifiedBlockProcessor, + blk: &BlockView, + switch: Switch, +) { + let parent_header = processor + .shared + .store() + .get_block_header(&blk.data().header().raw().parent_hash()) + .unwrap(); + + let unverified_block = UnverifiedBlock { + unverified_block: LonelyBlockWithCallback { + lonely_block: LonelyBlock { + block: Arc::new(blk.to_owned()), + peer_id: None, + switch: Some(switch), + }, + verify_callback: None, + }, + parent_header, + }; + processor.consume_unverified_blocks(unverified_block); +} + // 0--1--2--3--4 // \ // \ @@ -22,14 +49,10 @@ use std::sync::Arc; #[test] fn test_find_fork_case1() { let builder = SharedBuilder::with_temp_db(); - let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); - let mut _chain_service = ChainService::new( - shared.clone(), - pack.take_proposal_table(), - pack.take_verify_failed_block_tx(), - ); - let _chain_service_clone = _chain_service.clone(); - let chain_controller = _chain_service.start(Some("test_find_fork_case1::ChainService")); + let consensus = Consensus::default(); + let (shared, mut pack) = builder.consensus(consensus).build().unwrap(); + let chain_controller = pack.take_chain_services_builder().start(); + let genesis = shared .store() .get_block_header(&shared.store().get_block_hash(0).unwrap()) @@ -47,18 +70,32 @@ fn test_find_fork_case1() { fork2.gen_empty_block_with_diff(90u64, &mock_store); } + let proposal_table = ProposalTable::new(consensus.tx_proposal_window()); + let (verify_failed_blocks_tx, _verify_failed_blocks_rx) = + tokio::sync::mpsc::unbounded_channel::(); + + let mut consume_unverified_block_processor = ConsumeUnverifiedBlockProcessor { + shared, + proposal_table, + verify_failed_blocks_tx, + }; + // fork1 total_difficulty 400 for blk in fork1.blocks() { - chain_controller - .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) - .unwrap(); + consume_unverified_block( + &mut consume_unverified_block_processor, + blk, + Switch::DISABLE_ALL, + ); } // fork2 total_difficulty 270 for blk in fork2.blocks() { - chain_controller - .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) - .unwrap(); + consume_unverified_block( + &mut consume_unverified_block_processor, + blk, + Switch::DISABLE_ALL, + ); } let tip_number = { shared.snapshot().tip_number() }; @@ -79,7 +116,7 @@ fn test_find_fork_case1() { let mut fork = ForkChanges::default(); - _chain_service_clone.find_fork(&mut fork, tip_number, fork2.tip(), ext); + consume_unverified_block_processor.find_fork(&mut fork, tip_number, fork2.tip(), ext); let detached_blocks: HashSet = fork1.blocks().clone().into_iter().collect(); let attached_blocks: HashSet = fork2.blocks().clone().into_iter().collect(); @@ -100,14 +137,8 @@ fn test_find_fork_case1() { #[test] fn test_find_fork_case2() { let builder = SharedBuilder::with_temp_db(); - let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); - let mut _chain_service = ChainService::new( - shared.clone(), - pack.take_proposal_table(), - pack.take_verify_failed_block_tx(), - ); - let _chain_service_clone = _chain_service.clone(); - let chain_controller = _chain_service.start(Some("test_find_fork_case2::ChainService")); + let consensus = Consensus::default(); + let (shared, mut pack) = builder.consensus(consensus).build().unwrap(); let genesis = shared .store() @@ -124,19 +155,32 @@ fn test_find_fork_case2() { for _ in 0..2 { fork2.gen_empty_block_with_diff(90u64, &mock_store); } + let proposal_table = ProposalTable::new(consensus.tx_proposal_window()); + let (verify_failed_blocks_tx, _verify_failed_blocks_rx) = + tokio::sync::mpsc::unbounded_channel::(); + + let mut consume_unverified_block_processor = ConsumeUnverifiedBlockProcessor { + shared, + proposal_table, + verify_failed_blocks_tx, + }; // fork1 total_difficulty 400 for blk in fork1.blocks() { - chain_controller - .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) - .unwrap(); + consume_unverified_block( + &mut consume_unverified_block_processor, + blk, + Switch::DISABLE_ALL, + ); } // fork2 total_difficulty 280 for blk in fork2.blocks() { - chain_controller - .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) - .unwrap(); + consume_unverified_block( + &mut consume_unverified_block_processor, + blk, + Switch::DISABLE_ALL, + ); } let tip_number = { shared.snapshot().tip_number() }; @@ -157,7 +201,7 @@ fn test_find_fork_case2() { let mut fork = ForkChanges::default(); - _chain_service_clone.find_fork(&mut fork, tip_number, fork2.tip(), ext); + consume_unverified_block_processor.find_fork(&mut fork, tip_number, fork2.tip(), ext); let detached_blocks: HashSet = fork1.blocks()[1..].iter().cloned().collect(); let attached_blocks: HashSet = fork2.blocks().clone().into_iter().collect(); @@ -178,14 +222,8 @@ fn test_find_fork_case2() { #[test] fn test_find_fork_case3() { let builder = SharedBuilder::with_temp_db(); - let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); - let mut _chain_service = ChainService::new( - shared.clone(), - pack.take_proposal_table(), - pack.take_verify_failed_block_tx(), - ); - let _chain_service_clone = _chain_service.clone(); - let chain_controller = _chain_service.start(Some("test_find_fork_case3::ChainService")); + let consensus = Consensus::default(); + let (shared, mut pack) = builder.consensus(consensus).build().unwrap(); let genesis = shared .store() @@ -203,19 +241,32 @@ fn test_find_fork_case3() { for _ in 0..5 { fork2.gen_empty_block_with_diff(40u64, &mock_store) } + let proposal_table = ProposalTable::new(consensus.tx_proposal_window()); + let (verify_failed_blocks_tx, _verify_failed_blocks_rx) = + tokio::sync::mpsc::unbounded_channel::(); + + let mut consume_unverified_block_processor = ConsumeUnverifiedBlockProcessor { + shared, + proposal_table, + verify_failed_blocks_tx, + }; // fork1 total_difficulty 240 for blk in fork1.blocks() { - chain_controller - .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) - .unwrap(); + consume_unverified_block( + &mut consume_unverified_block_processor, + blk, + Switch::DISABLE_ALL, + ); } // fork2 total_difficulty 200 for blk in fork2.blocks() { - chain_controller - .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) - .unwrap(); + consume_unverified_block( + &mut consume_unverified_block_processor, + blk, + Switch::DISABLE_ALL, + ); } let tip_number = { shared.snapshot().tip_number() }; @@ -235,7 +286,7 @@ fn test_find_fork_case3() { }; let mut fork = ForkChanges::default(); - _chain_service_clone.find_fork(&mut fork, tip_number, fork2.tip(), ext); + consume_unverified_block_processor.find_fork(&mut fork, tip_number, fork2.tip(), ext); let detached_blocks: HashSet = fork1.blocks().clone().into_iter().collect(); let attached_blocks: HashSet = fork2.blocks().clone().into_iter().collect(); @@ -256,14 +307,8 @@ fn test_find_fork_case3() { #[test] fn test_find_fork_case4() { let builder = SharedBuilder::with_temp_db(); - let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); - let mut _chain_service = ChainService::new( - shared.clone(), - pack.take_proposal_table(), - pack.take_verify_failed_block_tx(), - ); - let _chain_service_clone = _chain_service.clone(); - let chain_controller = _chain_service.start(Some("test_find_fork_case4::ChainService")); + let consensus = Consensus::default(); + let (shared, mut pack) = builder.consensus(consensus).build().unwrap(); let genesis = shared .store() @@ -281,19 +326,32 @@ fn test_find_fork_case4() { for _ in 0..2 { fork2.gen_empty_block_with_diff(80u64, &mock_store); } + let proposal_table = ProposalTable::new(consensus.tx_proposal_window()); + let (verify_failed_blocks_tx, _verify_failed_blocks_rx) = + tokio::sync::mpsc::unbounded_channel::(); + + let mut consume_unverified_block_processor = ConsumeUnverifiedBlockProcessor { + shared, + proposal_table, + verify_failed_blocks_tx, + }; // fork1 total_difficulty 200 for blk in fork1.blocks() { - chain_controller - .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) - .unwrap(); + consume_unverified_block( + &mut consume_unverified_block_processor, + blk, + Switch::DISABLE_ALL, + ); } // fork2 total_difficulty 160 for blk in fork2.blocks() { - chain_controller - .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) - .unwrap(); + consume_unverified_block( + &mut consume_unverified_block_processor, + blk, + Switch::DISABLE_ALL, + ); } let tip_number = { shared.snapshot().tip_number() }; @@ -314,7 +372,7 @@ fn test_find_fork_case4() { let mut fork = ForkChanges::default(); - _chain_service_clone.find_fork(&mut fork, tip_number, fork2.tip(), ext); + consume_unverified_block_processor.find_fork(&mut fork, tip_number, fork2.tip(), ext); let detached_blocks: HashSet = fork1.blocks().clone().into_iter().collect(); let attached_blocks: HashSet = fork2.blocks().clone().into_iter().collect(); @@ -331,8 +389,9 @@ fn test_find_fork_case4() { // this case is create for issuse from https://github.com/nervosnetwork/ckb/pull/1470 #[test] fn repeatedly_switch_fork() { - let (shared, _) = SharedBuilder::with_temp_db() - .consensus(Consensus::default()) + let consensus = Consensus::default(); + let (shared, mut pack) = SharedBuilder::with_temp_db() + .consensus(consensus) .build() .unwrap(); let genesis = shared @@ -343,16 +402,7 @@ fn repeatedly_switch_fork() { let mut fork1 = MockChain::new(genesis.clone(), shared.consensus()); let mut fork2 = MockChain::new(genesis, shared.consensus()); - let (shared, mut pack) = SharedBuilder::with_temp_db() - .consensus(Consensus::default()) - .build() - .unwrap(); - let mut _chain_service = ChainService::new( - shared.clone(), - pack.take_proposal_table(), - pack.take_verify_failed_block_tx(), - ); - let chain_controller = _chain_service.start(Some("repeatedly_switch_fork::ChainService")); + let chain_controller = pack.take_chain_services_builder().start(); for _ in 0..2 { fork1.gen_empty_block_with_nonce(1u128, &mock_store); @@ -361,17 +411,30 @@ fn repeatedly_switch_fork() { for _ in 0..2 { fork2.gen_empty_block_with_nonce(2u128, &mock_store); } + let proposal_table = ProposalTable::new(consensus.tx_proposal_window()); + let (verify_failed_blocks_tx, _verify_failed_blocks_rx) = + tokio::sync::mpsc::unbounded_channel::(); + + let mut consume_unverified_block_processor = ConsumeUnverifiedBlockProcessor { + shared, + proposal_table, + verify_failed_blocks_tx, + }; for blk in fork1.blocks() { - chain_controller - .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) - .unwrap(); + consume_unverified_block( + &mut consume_unverified_block_processor, + blk, + Switch::DISABLE_ALL, + ); } for blk in fork2.blocks() { - chain_controller - .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) - .unwrap(); + consume_unverified_block( + &mut consume_unverified_block_processor, + blk, + Switch::DISABLE_ALL, + ); } //switch fork1 @@ -478,12 +541,7 @@ fn test_fork_proposal_table() { }; let (shared, mut pack) = builder.consensus(consensus).build().unwrap(); - let mut _chain_service = ChainService::new( - shared.clone(), - pack.take_proposal_table(), - pack.take_verify_failed_block_tx(), - ); - let chain_controller = _chain_service.start(Some("test_fork_proposal_table::ChainService")); + let chain_controller = pack.take_chain_services_builder().start(); let genesis = shared .store() From 0b66be30f07423961b9f2f316a512ee03a7bfc4c Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Sun, 19 Nov 2023 15:06:28 +0800 Subject: [PATCH 169/357] Use `pack.take_chain_services_builder` to construct chain_controller for `ckb-chain`'s unit tests --- chain/src/tests/basic.rs | 3 ++- chain/src/tests/block_assembler.rs | 11 ++++------- chain/src/tests/orphan_block_pool.rs | 2 +- chain/src/tests/truncate.rs | 8 +------- chain/src/tests/uncle.rs | 10 ++-------- chain/src/tests/util.rs | 9 ++------- 6 files changed, 12 insertions(+), 31 deletions(-) diff --git a/chain/src/tests/basic.rs b/chain/src/tests/basic.rs index 3f1b4cb673..b264bb0204 100644 --- a/chain/src/tests/basic.rs +++ b/chain/src/tests/basic.rs @@ -1,5 +1,6 @@ -use crate::chain::{ChainController, VerifiedBlockStatus}; +use crate::chain::ChainController; use crate::tests::util::start_chain; +use crate::VerifiedBlockStatus; use ckb_chain_spec::consensus::{Consensus, ConsensusBuilder}; use ckb_dao_utils::genesis_dao_data; use ckb_error::assert_error_eq; diff --git a/chain/src/tests/block_assembler.rs b/chain/src/tests/block_assembler.rs index 6b6dc46eb3..e9aabf1f72 100644 --- a/chain/src/tests/block_assembler.rs +++ b/chain/src/tests/block_assembler.rs @@ -1,5 +1,5 @@ -use crate::chain::{ChainController, ChainService}; use crate::tests::util::dummy_network; +use crate::{ChainController, ChainServicesBuilder}; use ckb_app_config::BlockAssemblerConfig; use ckb_chain_spec::consensus::Consensus; use ckb_dao_utils::genesis_dao_data; @@ -47,12 +47,9 @@ fn start_chain(consensus: Option) -> (ChainController, Shared) { let network = dummy_network(&shared); pack.take_tx_pool_builder().start(network); - let _chain_service = ChainService::new( - shared.clone(), - pack.take_proposal_table(), - pack.take_verify_failed_block_tx(), - ); - let chain_controller = _chain_service.start::<&str>(None); + let chain_services_builder: ChainServicesBuilder = pack.take_chain_services_builder(); + let chain_controller: ChainController = chain_services_builder.start(); + (chain_controller, shared) } diff --git a/chain/src/tests/orphan_block_pool.rs b/chain/src/tests/orphan_block_pool.rs index 7616cf78c9..83736cc581 100644 --- a/chain/src/tests/orphan_block_pool.rs +++ b/chain/src/tests/orphan_block_pool.rs @@ -1,4 +1,4 @@ -use crate::chain::{LonelyBlock, LonelyBlockWithCallback}; +use crate::{LonelyBlock, LonelyBlockWithCallback}; use ckb_chain_spec::consensus::ConsensusBuilder; use ckb_systemtime::unix_time_as_millis; use ckb_types::core::{BlockBuilder, BlockView, EpochNumberWithFraction, HeaderView}; diff --git a/chain/src/tests/truncate.rs b/chain/src/tests/truncate.rs index 4c55cb4770..30c42deec9 100644 --- a/chain/src/tests/truncate.rs +++ b/chain/src/tests/truncate.rs @@ -1,4 +1,3 @@ -use crate::chain::ChainService; use ckb_chain_spec::consensus::Consensus; use ckb_shared::SharedBuilder; use ckb_store::ChainStore; @@ -11,12 +10,7 @@ fn test_truncate() { let builder = SharedBuilder::with_temp_db(); let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); - let mut _chain_service = ChainService::new( - shared.clone(), - pack.take_proposal_table(), - pack.take_verify_failed_block_tx(), - ); - let chain_controller = _chain_service.start(Some("test_truncate::ChainService")); + let chain_controller = pack.take_chain_services_builder().start(); let genesis = shared .store() diff --git a/chain/src/tests/uncle.rs b/chain/src/tests/uncle.rs index 407b695f60..3122038558 100644 --- a/chain/src/tests/uncle.rs +++ b/chain/src/tests/uncle.rs @@ -1,4 +1,3 @@ -use crate::chain::ChainService; use ckb_chain_spec::consensus::Consensus; use ckb_shared::SharedBuilder; use ckb_store::ChainStore; @@ -10,13 +9,8 @@ use std::sync::Arc; fn test_get_block_body_after_inserting() { let builder = SharedBuilder::with_temp_db(); let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); - let mut _chain_service = ChainService::new( - shared.clone(), - pack.take_proposal_table(), - pack.take_verify_failed_block_tx(), - ); - let chain_controller = - _chain_service.start(Some("test_get_block_body_after_inserting::ChainService")); + let chain_controller = pack.take_chain_services_builder().start(); + let genesis = shared .store() .get_block_header(&shared.store().get_block_hash(0).unwrap()) diff --git a/chain/src/tests/util.rs b/chain/src/tests/util.rs index 547a8255c3..1c66093729 100644 --- a/chain/src/tests/util.rs +++ b/chain/src/tests/util.rs @@ -1,4 +1,4 @@ -use crate::chain::{ChainController, ChainService}; +use crate::ChainController; use ckb_app_config::TxPoolConfig; use ckb_app_config::{BlockAssemblerConfig, NetworkConfig}; use ckb_chain_spec::consensus::{Consensus, ConsensusBuilder}; @@ -85,12 +85,7 @@ pub(crate) fn start_chain_with_tx_pool_config( let network = dummy_network(&shared); pack.take_tx_pool_builder().start(network); - let _chain_service = ChainService::new( - shared.clone(), - pack.take_proposal_table(), - pack.take_verify_failed_block_tx(), - ); - let chain_controller = _chain_service.start::<&str>(Some("ckb_chain::tests::ChainService")); + let chain_controller = pack.take_chain_services_builder().start(); let parent = { let snapshot = shared.snapshot(); snapshot From 4e5dab039627a91af1613832e49f2dc5647887e3 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Sun, 19 Nov 2023 15:11:18 +0800 Subject: [PATCH 170/357] Fix `ChainController`'s import path in `ckb-sync` --- sync/src/relayer/mod.rs | 2 +- sync/src/synchronizer/mod.rs | 2 +- sync/src/tests/synchronizer/functions.rs | 2 +- sync/src/tests/util.rs | 2 +- sync/src/types/mod.rs | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index 1085aa1f84..709d48c7e6 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -25,7 +25,7 @@ use crate::utils::{ is_internal_db_error, metric_ckb_message_bytes, send_message_to, MetricDirection, }; use crate::{Status, StatusCode}; -use ckb_chain::chain::ChainController; +use ckb_chain::ChainController; use ckb_chain::{VerifiedBlockStatus, VerifyResult}; use ckb_constant::sync::BAD_MESSAGE_BAN_TIME; use ckb_logger::{debug_target, error, error_target, info_target, trace_target, warn_target}; diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index b7e8cfab08..50f681c817 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -25,7 +25,7 @@ use crate::utils::{metric_ckb_message_bytes, send_message_to, MetricDirection}; use crate::{Status, StatusCode}; use ckb_shared::block_status::BlockStatus; -use ckb_chain::chain::ChainController; +use ckb_chain::ChainController; use ckb_channel as channel; use ckb_channel::{select, Receiver}; use ckb_constant::sync::{ diff --git a/sync/src/tests/synchronizer/functions.rs b/sync/src/tests/synchronizer/functions.rs index d03962fb5a..dc4acbcea8 100644 --- a/sync/src/tests/synchronizer/functions.rs +++ b/sync/src/tests/synchronizer/functions.rs @@ -1,4 +1,4 @@ -use ckb_chain::chain::{ChainController, ChainService}; +use ckb_chain::ChainController; use ckb_chain_spec::consensus::{Consensus, ConsensusBuilder}; use ckb_constant::sync::{CHAIN_SYNC_TIMEOUT, EVICTION_HEADERS_RESPONSE_TIME, MAX_TIP_AGE}; use ckb_dao::DaoCalculator; diff --git a/sync/src/tests/util.rs b/sync/src/tests/util.rs index d2f0224096..4992952472 100644 --- a/sync/src/tests/util.rs +++ b/sync/src/tests/util.rs @@ -1,5 +1,5 @@ use crate::SyncShared; -use ckb_chain::chain::{ChainController, ChainService}; +use ckb_chain::ChainController; use ckb_dao::DaoCalculator; use ckb_reward_calculator::RewardCalculator; use ckb_shared::{Shared, SharedBuilder, Snapshot}; diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 6525a071a1..08df411e02 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1,6 +1,6 @@ use crate::{Status, StatusCode, FAST_INDEX, LOW_INDEX, NORMAL_INDEX, TIME_TRACE_SIZE}; use ckb_app_config::SyncConfig; -use ckb_chain::chain::ChainController; +use ckb_chain::ChainController; use ckb_chain::{LonelyBlock, VerifyCallback}; use ckb_chain_spec::consensus::{Consensus, MAX_BLOCK_INTERVAL, MIN_BLOCK_INTERVAL}; use ckb_channel::Receiver; From 984bed3a262ba53b5ca40a205cc44010b550a7a6 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Sun, 19 Nov 2023 15:27:22 +0800 Subject: [PATCH 171/357] Fix `ChainController`'s import path in `ckb-verification`, `ckb-sync`, `ckb-rpc`, `ckb-benches` --- benches/benches/benchmarks/overall.rs | 2 +- benches/benches/benchmarks/resolve.rs | 2 +- benches/benches/benchmarks/util.rs | 2 +- ckb-bin/src/subcommand/replay.rs | 2 +- rpc/src/module/miner.rs | 2 +- rpc/src/module/net.rs | 2 +- rpc/src/module/test.rs | 2 +- rpc/src/service_builder.rs | 2 +- rpc/src/tests/mod.rs | 2 +- shared/src/shared_builder.rs | 2 +- util/instrument/src/import.rs | 2 +- util/launcher/src/lib.rs | 2 +- util/light-client-protocol-server/src/tests/utils/chain.rs | 3 ++- verification/contextual/src/tests/contextual_block_verifier.rs | 2 +- verification/contextual/src/tests/uncle_verifier.rs | 2 +- 15 files changed, 16 insertions(+), 15 deletions(-) diff --git a/benches/benches/benchmarks/overall.rs b/benches/benches/benchmarks/overall.rs index 93552d25c4..220155189b 100644 --- a/benches/benches/benchmarks/overall.rs +++ b/benches/benches/benchmarks/overall.rs @@ -1,7 +1,7 @@ use crate::benchmarks::util::{create_2out_transaction, create_secp_tx, secp_cell}; use ckb_app_config::NetworkConfig; use ckb_app_config::{BlockAssemblerConfig, TxPoolConfig}; -use ckb_chain::chain::{ChainController, ChainService}; +use ckb_chain::ChainController; use ckb_chain_spec::consensus::{ConsensusBuilder, ProposalWindow}; use ckb_dao_utils::genesis_dao_data; use ckb_jsonrpc_types::JsonBytes; diff --git a/benches/benches/benchmarks/resolve.rs b/benches/benches/benchmarks/resolve.rs index 0c7a6d0502..d635c2374c 100644 --- a/benches/benches/benchmarks/resolve.rs +++ b/benches/benches/benchmarks/resolve.rs @@ -1,6 +1,6 @@ use crate::benchmarks::util::create_2out_transaction; use ckb_app_config::{BlockAssemblerConfig, TxPoolConfig}; -use ckb_chain::chain::{ChainController, ChainService}; +use ckb_chain::ChainController; use ckb_chain_spec::{ChainSpec, IssuedCell}; use ckb_jsonrpc_types::JsonBytes; use ckb_resource::Resource; diff --git a/benches/benches/benchmarks/util.rs b/benches/benches/benchmarks/util.rs index 5cf30676bc..60629696dd 100644 --- a/benches/benches/benchmarks/util.rs +++ b/benches/benches/benchmarks/util.rs @@ -1,4 +1,4 @@ -use ckb_chain::chain::{ChainController, ChainService}; +use ckb_chain::ChainController; use ckb_chain_spec::consensus::{ConsensusBuilder, ProposalWindow}; use ckb_crypto::secp::Privkey; use ckb_dao::DaoCalculator; diff --git a/ckb-bin/src/subcommand/replay.rs b/ckb-bin/src/subcommand/replay.rs index 7295214101..26f82db39d 100644 --- a/ckb-bin/src/subcommand/replay.rs +++ b/ckb-bin/src/subcommand/replay.rs @@ -1,6 +1,6 @@ use ckb_app_config::{ExitCode, ReplayArgs}; use ckb_async_runtime::Handle; -use ckb_chain::chain::{ChainController, ChainService}; +use ckb_chain::ChainController; use ckb_chain_iter::ChainIterator; use ckb_instrument::{ProgressBar, ProgressStyle}; use ckb_shared::{Shared, SharedBuilder}; diff --git a/rpc/src/module/miner.rs b/rpc/src/module/miner.rs index 60d57187e9..99fcd08d2d 100644 --- a/rpc/src/module/miner.rs +++ b/rpc/src/module/miner.rs @@ -1,6 +1,6 @@ use crate::error::RPCError; use async_trait::async_trait; -use ckb_chain::chain::ChainController; +use ckb_chain::ChainController; use ckb_jsonrpc_types::{Block, BlockTemplate, Uint64, Version}; use ckb_logger::{debug, error, warn}; use ckb_network::{NetworkController, PeerIndex, SupportProtocols, TargetSession}; diff --git a/rpc/src/module/net.rs b/rpc/src/module/net.rs index 62e4ef0c34..be994cd5f9 100644 --- a/rpc/src/module/net.rs +++ b/rpc/src/module/net.rs @@ -1,6 +1,6 @@ use crate::error::RPCError; use async_trait::async_trait; -use ckb_chain::chain::ChainController; +use ckb_chain::ChainController; use ckb_jsonrpc_types::{ BannedAddr, LocalNode, LocalNodeProtocol, NodeAddress, PeerSyncState, RemoteNode, RemoteNodeProtocol, SyncState, Timestamp, diff --git a/rpc/src/module/test.rs b/rpc/src/module/test.rs index df08d926a0..20687d5e93 100644 --- a/rpc/src/module/test.rs +++ b/rpc/src/module/test.rs @@ -1,6 +1,6 @@ use crate::error::RPCError; use async_trait::async_trait; -use ckb_chain::chain::ChainController; +use ckb_chain::ChainController; use ckb_chain::VerifyResult; use ckb_dao::DaoCalculator; use ckb_jsonrpc_types::{Block, BlockTemplate, Byte32, EpochNumberWithFraction, Transaction}; diff --git a/rpc/src/service_builder.rs b/rpc/src/service_builder.rs index 3577b4d9c5..09de734c88 100644 --- a/rpc/src/service_builder.rs +++ b/rpc/src/service_builder.rs @@ -9,7 +9,7 @@ use crate::module::{ }; use crate::{IoHandler, RPCError}; use ckb_app_config::{DBConfig, IndexerConfig, RpcConfig}; -use ckb_chain::chain::ChainController; +use ckb_chain::ChainController; use ckb_indexer::IndexerService; use ckb_network::NetworkController; use ckb_network_alert::{notifier::Notifier as AlertNotifier, verifier::Verifier as AlertVerifier}; diff --git a/rpc/src/tests/mod.rs b/rpc/src/tests/mod.rs index b59897bd22..5b3017d5d5 100644 --- a/rpc/src/tests/mod.rs +++ b/rpc/src/tests/mod.rs @@ -1,4 +1,4 @@ -use ckb_chain::chain::ChainController; +use ckb_chain::ChainController; use ckb_chain_spec::consensus::Consensus; use ckb_dao::DaoCalculator; use ckb_reward_calculator::RewardCalculator; diff --git a/shared/src/shared_builder.rs b/shared/src/shared_builder.rs index 245b196780..2bdc945ad4 100644 --- a/shared/src/shared_builder.rs +++ b/shared/src/shared_builder.rs @@ -35,7 +35,7 @@ use ckb_shared::{HeaderMap, Shared}; use ckb_snapshot::{Snapshot, SnapshotMgr}; use ckb_util::Mutex; -use ckb_chain::chain::{ChainService, ChainServicesBuilder}; +use ckb_chain::ChainServicesBuilder; use ckb_shared::types::VerifyFailedBlockInfo; use ckb_store::ChainDB; use ckb_store::ChainStore; diff --git a/util/instrument/src/import.rs b/util/instrument/src/import.rs index c18bec1fbc..3861f811de 100644 --- a/util/instrument/src/import.rs +++ b/util/instrument/src/import.rs @@ -1,4 +1,4 @@ -use ckb_chain::chain::ChainController; +use ckb_chain::ChainController; use ckb_jsonrpc_types::BlockView as JsonBlock; use ckb_types::core; #[cfg(feature = "progress_bar")] diff --git a/util/launcher/src/lib.rs b/util/launcher/src/lib.rs index 9e3edb6794..efb43f6b5d 100644 --- a/util/launcher/src/lib.rs +++ b/util/launcher/src/lib.rs @@ -8,7 +8,7 @@ use ckb_app_config::{ use ckb_async_runtime::Handle; use ckb_block_filter::filter::BlockFilter as BlockFilterService; use ckb_build_info::Version; -use ckb_chain::chain::{ChainController, ChainService, ChainServicesBuilder}; +use ckb_chain::{ChainController, ChainServicesBuilder}; use ckb_channel::Receiver; use ckb_jsonrpc_types::ScriptHashType; use ckb_light_client_protocol_server::LightClientProtocol; diff --git a/util/light-client-protocol-server/src/tests/utils/chain.rs b/util/light-client-protocol-server/src/tests/utils/chain.rs index bfd4293780..c4283455a9 100644 --- a/util/light-client-protocol-server/src/tests/utils/chain.rs +++ b/util/light-client-protocol-server/src/tests/utils/chain.rs @@ -4,7 +4,8 @@ use std::{ }; use ckb_app_config::{BlockAssemblerConfig, NetworkConfig}; -use ckb_chain::chain::{ChainController, ChainService, VerifiedBlockStatus}; +use ckb_chain::ChainController; +use ckb_chain::VerifiedBlockStatus; use ckb_chain_spec::consensus::{build_genesis_epoch_ext, ConsensusBuilder}; use ckb_dao_utils::genesis_dao_data; use ckb_jsonrpc_types::ScriptHashType; diff --git a/verification/contextual/src/tests/contextual_block_verifier.rs b/verification/contextual/src/tests/contextual_block_verifier.rs index ea85f7129b..18052c5e82 100644 --- a/verification/contextual/src/tests/contextual_block_verifier.rs +++ b/verification/contextual/src/tests/contextual_block_verifier.rs @@ -1,6 +1,6 @@ use super::super::contextual_block_verifier::{EpochVerifier, TwoPhaseCommitVerifier}; use crate::contextual_block_verifier::{RewardVerifier, VerifyContext}; -use ckb_chain::chain::{ChainController, ChainService}; +use ckb_chain::ChainController; use ckb_chain_spec::consensus::{Consensus, ConsensusBuilder}; use ckb_error::assert_error_eq; use ckb_shared::{Shared, SharedBuilder}; diff --git a/verification/contextual/src/tests/uncle_verifier.rs b/verification/contextual/src/tests/uncle_verifier.rs index d77e0ab2bd..479d9ff526 100644 --- a/verification/contextual/src/tests/uncle_verifier.rs +++ b/verification/contextual/src/tests/uncle_verifier.rs @@ -2,7 +2,7 @@ use crate::contextual_block_verifier::{UncleVerifierContext, VerifyContext}; use crate::uncles_verifier::UnclesVerifier; -use ckb_chain::chain::{ChainController, ChainService}; +use ckb_chain::ChainController; use ckb_chain_spec::consensus::Consensus; use ckb_error::assert_error_eq; use ckb_shared::{Shared, SharedBuilder}; From ee40e93d95c5be4e322452a380fe9c32e82ce77f Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Sun, 19 Nov 2023 15:31:15 +0800 Subject: [PATCH 172/357] Fix start `ckb-chain`'s services by `pack.take_chain_services_builder` --- benches/benches/benchmarks/overall.rs | 7 +------ benches/benches/benchmarks/resolve.rs | 7 +------ benches/benches/benchmarks/util.rs | 16 ++++------------ sync/src/relayer/tests/helper.rs | 6 +----- sync/src/tests/sync_shared.rs | 6 +----- sync/src/tests/synchronizer/basic_sync.rs | 3 +-- sync/src/tests/synchronizer/functions.rs | 3 +-- sync/src/tests/util.rs | 5 +---- util/launcher/src/lib.rs | 1 - .../src/tests/utils/chain.rs | 9 +-------- .../src/tests/contextual_block_verifier.rs | 7 +------ .../contextual/src/tests/uncle_verifier.rs | 9 ++------- 12 files changed, 15 insertions(+), 64 deletions(-) diff --git a/benches/benches/benchmarks/overall.rs b/benches/benches/benchmarks/overall.rs index 220155189b..8d1057c6a8 100644 --- a/benches/benches/benchmarks/overall.rs +++ b/benches/benches/benchmarks/overall.rs @@ -132,12 +132,7 @@ pub fn setup_chain(txs_size: usize) -> (Shared, ChainController) { let network = dummy_network(&shared); pack.take_tx_pool_builder().start(network); - let chain_service = ChainService::new( - shared.clone(), - pack.take_proposal_table(), - pack.take_verify_failed_block_tx(), - ); - let chain_controller = chain_service.start(Some("ChainService")); + let chain_controller = pack.take_chain_services_builder().start(); (shared, chain_controller) } diff --git a/benches/benches/benchmarks/resolve.rs b/benches/benches/benchmarks/resolve.rs index d635c2374c..43bb8d72e5 100644 --- a/benches/benches/benchmarks/resolve.rs +++ b/benches/benches/benchmarks/resolve.rs @@ -96,12 +96,7 @@ pub fn setup_chain(txs_size: usize) -> (Shared, ChainController) { .tx_pool_config(tx_pool_config) .build() .unwrap(); - let chain_service = ChainService::new( - shared.clone(), - pack.take_proposal_table(), - pack.take_verify_failed_block_tx(), - ); - let chain_controller = chain_service.start(Some("ChainService")); + let chain_controller = pack.take_chain_services_builder().start(); // FIXME: global cache !!! let _ret = setup_system_cell_cache( diff --git a/benches/benches/benchmarks/util.rs b/benches/benches/benchmarks/util.rs index 60629696dd..44e9ab5e28 100644 --- a/benches/benches/benchmarks/util.rs +++ b/benches/benches/benchmarks/util.rs @@ -78,13 +78,9 @@ pub fn new_always_success_chain(txs_size: usize, chains_num: usize) -> Chains { .consensus(consensus.clone()) .build() .unwrap(); - let chain_service = ChainService::new( - shared.clone(), - pack.take_proposal_table(), - pack.take_verify_failed_block_tx(), - ); + let chain_controller = pack.take_chain_services_builder().start(); - chains.push((chain_service.start::<&str>(None), shared)); + chains.push((chain_controller, shared)); } chains @@ -300,13 +296,9 @@ pub fn new_secp_chain(txs_size: usize, chains_num: usize) -> Chains { .consensus(consensus.clone()) .build() .unwrap(); - let chain_service = ChainService::new( - shared.clone(), - pack.take_proposal_table(), - pack.take_verify_failed_block_tx(), - ); + let chain_controller = pack.take_chain_services_builder().start(); - chains.push((chain_service.start::<&str>(None), shared)); + chains.push((chain_controller, shared)); } chains diff --git a/sync/src/relayer/tests/helper.rs b/sync/src/relayer/tests/helper.rs index b423b6225c..ccfe934f26 100644 --- a/sync/src/relayer/tests/helper.rs +++ b/sync/src/relayer/tests/helper.rs @@ -1,6 +1,5 @@ use crate::{Relayer, SyncShared}; use ckb_app_config::NetworkConfig; -use ckb_chain::chain::ChainService; use ckb_chain_spec::consensus::{build_genesis_epoch_ext, ConsensusBuilder}; use ckb_dao::DaoCalculator; use ckb_dao_utils::genesis_dao_data; @@ -171,10 +170,7 @@ pub(crate) fn build_chain(tip: BlockNumber) -> (Relayer, OutPoint) { let network = dummy_network(&shared); pack.take_tx_pool_builder().start(network); - let chain_controller = { - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); - chain_service.start::<&str>(None) - }; + let chain_controller = pack.take_chain_services_builder().start(); // Build 1 ~ (tip-1) heights for i in 0..tip { diff --git a/sync/src/tests/sync_shared.rs b/sync/src/tests/sync_shared.rs index 1f37d110cb..038787b11c 100644 --- a/sync/src/tests/sync_shared.rs +++ b/sync/src/tests/sync_shared.rs @@ -1,6 +1,5 @@ use crate::tests::util::{build_chain, inherit_block}; use crate::SyncShared; -use ckb_chain::chain::ChainService; use ckb_shared::block_status::BlockStatus; use ckb_shared::SharedBuilder; use ckb_store::{self, ChainStore}; @@ -54,10 +53,7 @@ fn test_insert_parent_unknown_block() { .consensus(shared1.consensus().clone()) .build() .unwrap(); - let chain_controller = { - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); - chain_service.start::<&str>(None) - }; + let chain_controller = pack.take_chain_services_builder().start(); ( SyncShared::new(shared, Default::default(), pack.take_relay_tx_receiver()), chain_controller, diff --git a/sync/src/tests/synchronizer/basic_sync.rs b/sync/src/tests/synchronizer/basic_sync.rs index e1c705a98b..00061704f7 100644 --- a/sync/src/tests/synchronizer/basic_sync.rs +++ b/sync/src/tests/synchronizer/basic_sync.rs @@ -99,8 +99,7 @@ fn setup_node(height: u64) -> (TestNode, Shared) { .build() .unwrap(); - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); - let chain_controller = chain_service.start::<&str>(None); + let chain_controller = pack.take_chain_services_builder().start(); for _i in 0..height { let number = block.header().number() + 1; diff --git a/sync/src/tests/synchronizer/functions.rs b/sync/src/tests/synchronizer/functions.rs index dc4acbcea8..39c6031b6f 100644 --- a/sync/src/tests/synchronizer/functions.rs +++ b/sync/src/tests/synchronizer/functions.rs @@ -49,8 +49,7 @@ fn start_chain(consensus: Option) -> (ChainController, Shared, Synchr let (shared, mut pack) = builder.build().unwrap(); - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); - let chain_controller = chain_service.start::<&str>(None); + let chain_controller = pack.take_chain_services_builder().start(); let sync_shared = Arc::new(SyncShared::new( shared.clone(), diff --git a/sync/src/tests/util.rs b/sync/src/tests/util.rs index 4992952472..63fa305b83 100644 --- a/sync/src/tests/util.rs +++ b/sync/src/tests/util.rs @@ -19,10 +19,7 @@ pub fn build_chain(tip: BlockNumber) -> (SyncShared, ChainController) { .consensus(always_success_consensus()) .build() .unwrap(); - let chain_controller = { - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); - chain_service.start::<&str>(None) - }; + let chain_controller = pack.take_chain_services_builder().start(); generate_blocks(&shared, &chain_controller, tip); let sync_shared = SyncShared::new(shared, Default::default(), pack.take_relay_tx_receiver()); (sync_shared, chain_controller) diff --git a/util/launcher/src/lib.rs b/util/launcher/src/lib.rs index efb43f6b5d..9ca6419a64 100644 --- a/util/launcher/src/lib.rs +++ b/util/launcher/src/lib.rs @@ -18,7 +18,6 @@ use ckb_network::{ NetworkState, SupportProtocols, }; use ckb_network_alert::alert_relayer::AlertRelayer; -use ckb_proposal_table::ProposalTable; use ckb_resource::Resource; use ckb_rpc::RpcServer; use ckb_rpc::ServiceBuilder; diff --git a/util/light-client-protocol-server/src/tests/utils/chain.rs b/util/light-client-protocol-server/src/tests/utils/chain.rs index c4283455a9..83cb1e2030 100644 --- a/util/light-client-protocol-server/src/tests/utils/chain.rs +++ b/util/light-client-protocol-server/src/tests/utils/chain.rs @@ -88,14 +88,7 @@ impl MockChain { let network = dummy_network(&shared); pack.take_tx_pool_builder().start(network); - let chain_service = ChainService::new( - shared.clone(), - pack.take_proposal_table(), - pack.take_verify_failed_block_tx(), - ); - let chain_controller = chain_service.start::<&str>(Some( - "ckb-light-client-protocol-server::tests::ChainService", - )); + let chain_controller = pack.take_chain_services_builder().start(); Self { chain_controller, diff --git a/verification/contextual/src/tests/contextual_block_verifier.rs b/verification/contextual/src/tests/contextual_block_verifier.rs index 18052c5e82..b906667e95 100644 --- a/verification/contextual/src/tests/contextual_block_verifier.rs +++ b/verification/contextual/src/tests/contextual_block_verifier.rs @@ -83,12 +83,7 @@ fn start_chain(consensus: Option) -> (ChainController, Shared) { } let (shared, mut pack) = builder.build().unwrap(); - let chain_service = ChainService::new( - shared.clone(), - pack.take_proposal_table(), - pack.take_verify_failed_block_tx(), - ); - let chain_controller = chain_service.start::<&str>(None); + let chain_controller = pack.take_chain_services_builder().start(); (chain_controller, shared) } diff --git a/verification/contextual/src/tests/uncle_verifier.rs b/verification/contextual/src/tests/uncle_verifier.rs index 479d9ff526..f517f603fe 100644 --- a/verification/contextual/src/tests/uncle_verifier.rs +++ b/verification/contextual/src/tests/uncle_verifier.rs @@ -43,13 +43,8 @@ fn start_chain(consensus: Option) -> (ChainController, Shared) { } let (shared, mut pack) = builder.build().unwrap(); - let chain_service = ChainService::new( - shared.clone(), - pack.take_proposal_table(), - pack.take_verify_failed_block_tx(), - ); - let chain_controller = - chain_service.start::<&str>(Some("ckb-verification::tests::ChainService")); + let chain_controller = pack.take_chain_services_builder().start(); + (chain_controller, shared) } From f908e5791ea57fff05975b02c3de0866edbed00f Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Sun, 19 Nov 2023 15:44:24 +0800 Subject: [PATCH 173/357] Unit test: process block by blocking way --- rpc/src/tests/module/miner.rs | 2 +- sync/src/tests/synchronizer/basic_sync.rs | 1 - sync/src/tests/synchronizer/functions.rs | 7 +++++-- util/instrument/src/import.rs | 2 +- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/rpc/src/tests/module/miner.rs b/rpc/src/tests/module/miner.rs index 44c144e8e1..1ee9a85c50 100644 --- a/rpc/src/tests/module/miner.rs +++ b/rpc/src/tests/module/miner.rs @@ -37,7 +37,7 @@ fn test_get_block_template_cache() { .build(); suite .chain_controller - .process_block(Arc::new(fork_block)) + .blocking_process_block(Arc::new(fork_block)) .expect("processing new block should be ok"); assert_eq!(response_old.result["uncles"].to_string(), "[]"); diff --git a/sync/src/tests/synchronizer/basic_sync.rs b/sync/src/tests/synchronizer/basic_sync.rs index 00061704f7..da22b2d752 100644 --- a/sync/src/tests/synchronizer/basic_sync.rs +++ b/sync/src/tests/synchronizer/basic_sync.rs @@ -4,7 +4,6 @@ use crate::synchronizer::{ }; use crate::tests::TestNode; use crate::{SyncShared, Synchronizer}; -use ckb_chain::chain::ChainService; use ckb_chain_spec::consensus::ConsensusBuilder; use ckb_channel::bounded; use ckb_dao::DaoCalculator; diff --git a/sync/src/tests/synchronizer/functions.rs b/sync/src/tests/synchronizer/functions.rs index 39c6031b6f..4644a6f6b2 100644 --- a/sync/src/tests/synchronizer/functions.rs +++ b/sync/src/tests/synchronizer/functions.rs @@ -137,7 +137,7 @@ fn insert_block( let block = gen_block(shared, &parent, &epoch, nonce); chain_controller - .process_block(Arc::new(block)) + .blocking_process_block(Arc::new(block)) .expect("process block ok"); } @@ -1084,7 +1084,10 @@ fn test_fix_last_common_header() { for number in 1..=main_tip_number { let key = m_(number); let block = graph.get(&key).cloned().unwrap(); - synchronizer.chain.process_block(Arc::new(block)).unwrap(); + synchronizer + .chain + .blocking_process_block(Arc::new(block)) + .unwrap(); } { let nc = mock_network_context(1); diff --git a/util/instrument/src/import.rs b/util/instrument/src/import.rs index 3861f811de..70500f2913 100644 --- a/util/instrument/src/import.rs +++ b/util/instrument/src/import.rs @@ -39,7 +39,7 @@ impl Import { let block: Arc = Arc::new(block.into()); if !block.is_genesis() { self.chain - .process_block(block) + .blocking_process_block(block) .expect("import occur malformation data"); } } From 703b7a9cf7249ebc48f5781ddb028104056f1891 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Sun, 19 Nov 2023 15:56:57 +0800 Subject: [PATCH 174/357] Fix `ckb-sync` unit tests: Synchronizer need verify_failed_block_rx --- sync/src/tests/synchronizer/basic_sync.rs | 6 +++++- sync/src/tests/synchronizer/functions.rs | 12 ++++++++++-- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/sync/src/tests/synchronizer/basic_sync.rs b/sync/src/tests/synchronizer/basic_sync.rs index da22b2d752..1fc266c778 100644 --- a/sync/src/tests/synchronizer/basic_sync.rs +++ b/sync/src/tests/synchronizer/basic_sync.rs @@ -167,7 +167,11 @@ fn setup_node(height: u64) -> (TestNode, Shared) { Default::default(), pack.take_relay_tx_receiver(), )); - let synchronizer = Synchronizer::new(chain_controller, sync_shared); + let synchronizer = Synchronizer::new( + chain_controller, + sync_shared, + pack.take_verify_failed_block_rx(), + ); let mut node = TestNode::new(); let protocol = Arc::new(RwLock::new(synchronizer)) as Arc<_>; node.add_protocol( diff --git a/sync/src/tests/synchronizer/functions.rs b/sync/src/tests/synchronizer/functions.rs index 4644a6f6b2..94c2456dbe 100644 --- a/sync/src/tests/synchronizer/functions.rs +++ b/sync/src/tests/synchronizer/functions.rs @@ -56,7 +56,11 @@ fn start_chain(consensus: Option) -> (ChainController, Shared, Synchr Default::default(), pack.take_relay_tx_receiver(), )); - let synchronizer = Synchronizer::new(chain_controller.clone(), sync_shared); + let synchronizer = Synchronizer::new( + chain_controller.clone(), + sync_shared, + pack.take_verify_failed_block_rx(), + ); (chain_controller, shared, synchronizer) } @@ -1225,7 +1229,11 @@ fn test_internal_db_error() { InternalErrorKind::Database.other("mocked db error").into(), )); - let synchronizer = Synchronizer::new(chain_controller, sync_shared); + let synchronizer = Synchronizer::new( + chain_controller, + sync_shared, + pack.take_verify_failed_block_rx(), + ); let status = synchronizer .shared() From 856f93bc8f0f78a96363f4111ad77a173e9904d8 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 20 Nov 2023 16:40:45 +0800 Subject: [PATCH 175/357] Rename `VerifiedBlockStatus` variants to a more meaningfull name --- chain/src/consume_orphan.rs | 2 +- chain/src/consume_unverified.rs | 2 +- chain/src/lib.rs | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index c634c2ca7a..3517878a0f 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -177,7 +177,7 @@ impl ConsumeOrphan { descendant_block.block().hash() ); let verify_result: VerifyResult = - Ok(VerifiedBlockStatus::PreviouslyUnVerified); + Ok(VerifiedBlockStatus::PreviouslySeenButNotVerified); descendant_block.execute_callback(verify_result); } }, diff --git a/chain/src/consume_unverified.rs b/chain/src/consume_unverified.rs index b7ca53c4a5..018871e6c7 100644 --- a/chain/src/consume_unverified.rs +++ b/chain/src/consume_unverified.rs @@ -204,7 +204,7 @@ impl ConsumeUnverifiedBlockProcessor { verified ); return if verified { - Ok(VerifiedBlockStatus::PreviouslyVerified) + Ok(VerifiedBlockStatus::PreviouslySeenAndVerified) } else { Err(InternalErrorKind::Other .other("block previously verified failed") diff --git a/chain/src/lib.rs b/chain/src/lib.rs index 79257e3a86..3704567328 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -42,10 +42,10 @@ pub enum VerifiedBlockStatus { FirstSeenButNotVerified, // The block has been verified before. - PreviouslyVerified, + PreviouslySeenAndVerified, - // The block has been verified before, but not veriify it yet - PreviouslyUnVerified, + // The block is being seen before, but not verify it yet + PreviouslySeenButNotVerified, } #[derive(Clone)] From 4e9b42fb02b43ab4339fd5130a76ff15b27a99e1 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 21 Nov 2023 16:16:10 +0800 Subject: [PATCH 176/357] Extract `ChainServicesBuilder` start method as an independent function --- chain/src/chain.rs | 159 ++++++++++++++++++++++----------------------- 1 file changed, 79 insertions(+), 80 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index a804d3a5ef..9e7e194e1e 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -193,89 +193,88 @@ impl ChainServicesBuilder { verify_failed_blocks_tx, } } +} - pub fn start(self) -> ChainController { - let orphan_blocks_broker = Arc::new(OrphanBlockPool::with_capacity(ORPHAN_BLOCK_SIZE)); - - let (unverified_queue_stop_tx, unverified_queue_stop_rx) = ckb_channel::bounded::<()>(1); - let (unverified_tx, unverified_rx) = - channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize * 3); - - let consumer_unverified_thread = thread::Builder::new() - .name("consume_unverified_blocks".into()) - .spawn({ - let shared = self.shared.clone(); - let verify_failed_blocks_tx = self.verify_failed_blocks_tx.clone(); - move || { - let mut consume_unverified = ConsumeUnverifiedBlocks::new( - shared, - unverified_rx, - self.proposal_table, - verify_failed_blocks_tx, - unverified_queue_stop_rx, - ); - - consume_unverified.start(); - } - }) - .expect("start unverified_queue consumer thread should ok"); - - let (lonely_block_tx, lonely_block_rx) = - channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize); - - let (search_orphan_pool_stop_tx, search_orphan_pool_stop_rx) = - ckb_channel::bounded::<()>(1); - - let search_orphan_pool_thread = thread::Builder::new() - .name("consume_orphan_blocks".into()) - .spawn({ - let orphan_blocks_broker = orphan_blocks_broker.clone(); - let shared = self.shared.clone(); - use crate::consume_orphan::ConsumeOrphan; - let verify_failed_block_tx = self.verify_failed_blocks_tx.clone(); - move || { - let consume_orphan = ConsumeOrphan::new( - shared, - orphan_blocks_broker, - unverified_tx, - lonely_block_rx, - verify_failed_block_tx, - search_orphan_pool_stop_rx, - ); - consume_orphan.start(); - } - }) - .expect("start search_orphan_pool thread should ok"); - - let (process_block_tx, process_block_rx) = channel::bounded(BLOCK_DOWNLOAD_WINDOW as usize); - - let (truncate_block_tx, truncate_block_rx) = channel::bounded(1); - - let chain_service: ChainService = ChainService::new( - self.shared, - process_block_rx, - truncate_block_rx, - lonely_block_tx, - self.verify_failed_blocks_tx, - ); - let chain_service_thread = thread::Builder::new() - .name("ChainService".into()) - .spawn({ - move || { - chain_service.start(); - - search_orphan_pool_stop_tx.send(()); - search_orphan_pool_thread.join(); +pub fn start(builder: ChainServicesBuilder) -> ChainController { + let orphan_blocks_broker = Arc::new(OrphanBlockPool::with_capacity(ORPHAN_BLOCK_SIZE)); + + let (unverified_queue_stop_tx, unverified_queue_stop_rx) = ckb_channel::bounded::<()>(1); + let (unverified_tx, unverified_rx) = + channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize * 3); + + let consumer_unverified_thread = thread::Builder::new() + .name("consume_unverified_blocks".into()) + .spawn({ + let shared = builder.shared.clone(); + let verify_failed_blocks_tx = builder.verify_failed_blocks_tx.clone(); + move || { + let mut consume_unverified = ConsumeUnverifiedBlocks::new( + shared, + unverified_rx, + builder.proposal_table, + verify_failed_blocks_tx, + unverified_queue_stop_rx, + ); - unverified_queue_stop_tx.send(()); - consumer_unverified_thread.join(); - } - }) - .expect("start chain_service thread should ok"); - register_thread("ChainServices", chain_service_thread); + consume_unverified.start(); + } + }) + .expect("start unverified_queue consumer thread should ok"); + + let (lonely_block_tx, lonely_block_rx) = + channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize); + + let (search_orphan_pool_stop_tx, search_orphan_pool_stop_rx) = ckb_channel::bounded::<()>(1); + + let search_orphan_pool_thread = thread::Builder::new() + .name("consume_orphan_blocks".into()) + .spawn({ + let orphan_blocks_broker = orphan_blocks_broker.clone(); + let shared = builder.shared.clone(); + use crate::consume_orphan::ConsumeOrphan; + let verify_failed_block_tx = builder.verify_failed_blocks_tx.clone(); + move || { + let consume_orphan = ConsumeOrphan::new( + shared, + orphan_blocks_broker, + unverified_tx, + lonely_block_rx, + verify_failed_block_tx, + search_orphan_pool_stop_rx, + ); + consume_orphan.start(); + } + }) + .expect("start search_orphan_pool thread should ok"); + + let (process_block_tx, process_block_rx) = channel::bounded(BLOCK_DOWNLOAD_WINDOW as usize); + + let (truncate_block_tx, truncate_block_rx) = channel::bounded(1); + + let chain_service: ChainService = ChainService::new( + builder.shared, + process_block_rx, + truncate_block_rx, + lonely_block_tx, + builder.verify_failed_blocks_tx, + ); + let chain_service_thread = thread::Builder::new() + .name("ChainService".into()) + .spawn({ + move || { + chain_service.start(); + + search_orphan_pool_stop_tx.send(()); + search_orphan_pool_thread.join(); + + unverified_queue_stop_tx.send(()); + consumer_unverified_thread.join(); + } + }) + .expect("start chain_service thread should ok"); + register_thread("ChainServices", chain_service_thread); - ChainController::new(process_block_tx, truncate_block_tx, orphan_blocks_broker) - } + ChainController::new(process_block_tx, truncate_block_tx, orphan_blocks_broker) } /// Chain background service From 8a56c64afdd6d36c6b4b774a98811f90feeeed87 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 21 Nov 2023 22:35:27 +0800 Subject: [PATCH 177/357] Move ChainServicesBuilder to ckb_shared --- shared/src/chain_services_builder.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 shared/src/chain_services_builder.rs diff --git a/shared/src/chain_services_builder.rs b/shared/src/chain_services_builder.rs new file mode 100644 index 0000000000..e69de29bb2 From b0eaab08234c321fe0bc96f4864e573523906296 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 21 Nov 2023 22:36:04 +0800 Subject: [PATCH 178/357] Remove ChainServicesbuilder from ckb_chain --- chain/src/chain.rs | 23 ++--------------------- 1 file changed, 2 insertions(+), 21 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 9e7e194e1e..b361d3e409 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -18,6 +18,7 @@ use ckb_proposal_table::ProposalTable; use ckb_rust_unstable_port::IsSorted; use ckb_shared::shared::Shared; use ckb_shared::types::VerifyFailedBlockInfo; +use ckb_shared::ChainServicesBuilder; use ckb_stop_handler::{new_crossbeam_exit_rx, register_thread}; use ckb_store::ChainStore; use ckb_types::{ @@ -175,27 +176,7 @@ impl ChainController { } } -pub struct ChainServicesBuilder { - shared: Shared, - proposal_table: ProposalTable, - verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, -} - -impl ChainServicesBuilder { - pub fn new( - shared: Shared, - proposal_table: ProposalTable, - verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, - ) -> Self { - ChainServicesBuilder { - shared, - proposal_table, - verify_failed_blocks_tx, - } - } -} - -pub fn start(builder: ChainServicesBuilder) -> ChainController { +pub fn start_chain_services(builder: ChainServicesBuilder) -> ChainController { let orphan_blocks_broker = Arc::new(OrphanBlockPool::with_capacity(ORPHAN_BLOCK_SIZE)); let (unverified_queue_stop_tx, unverified_queue_stop_rx) = ckb_channel::bounded::<()>(1); From 72d23a24572aa3177ec22844c6ce4ea14f6d89dc Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 21 Nov 2023 22:36:39 +0800 Subject: [PATCH 179/357] Re-export `start_chain_services` in `ckb_chain` --- chain/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chain/src/lib.rs b/chain/src/lib.rs index 3704567328..e6ea79ae39 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -23,7 +23,7 @@ mod orphan_block_pool; #[cfg(test)] mod tests; -pub use chain::{ChainController, ChainServicesBuilder}; +pub use chain::{start_chain_services, ChainController}; type ProcessBlockRequest = Request; type TruncateRequest = Request>; From b5359cf25600e626538e600bf9cf4ece9931ace1 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 22 Nov 2023 10:32:43 +0800 Subject: [PATCH 180/357] Re-sort shared_builder import statements --- shared/src/shared_builder.rs | 42 ++++++++---------------------------- 1 file changed, 9 insertions(+), 33 deletions(-) diff --git a/shared/src/shared_builder.rs b/shared/src/shared_builder.rs index 2bdc945ad4..9d74cf280b 100644 --- a/shared/src/shared_builder.rs +++ b/shared/src/shared_builder.rs @@ -1,56 +1,32 @@ //! shared_builder provide SharedBuilder and SharedPacakge -use ckb_channel::Receiver; -use ckb_proposal_table::ProposalTable; -use ckb_tx_pool::service::TxVerificationResult; -use ckb_tx_pool::{TokioRwLock, TxEntry, TxPool, TxPoolServiceBuilder}; -use std::cmp::Ordering; - -use crate::migrate::Migrate; +use crate::ChainServicesBuilder; +use crate::{types::VerifyFailedBlockInfo, HeaderMap, Shared}; use ckb_app_config::{ - BlockAssemblerConfig, DBConfig, NotifyConfig, StoreConfig, SyncConfig, TxPoolConfig, + BlockAssemblerConfig, DBConfig, ExitCode, HeaderMapConfig, NotifyConfig, StoreConfig, + SyncConfig, TxPoolConfig, }; -use ckb_app_config::{ExitCode, HeaderMapConfig}; use ckb_async_runtime::{new_background_runtime, Handle}; use ckb_chain_spec::consensus::Consensus; use ckb_chain_spec::SpecError; - -use crate::Shared; -use ckb_proposal_table::ProposalView; -use ckb_snapshot::{Snapshot, SnapshotMgr}; - -use ckb_app_config::{ - BlockAssemblerConfig, DBConfig, ExitCode, NotifyConfig, StoreConfig, TxPoolConfig, -}; -use ckb_async_runtime::{new_background_runtime, Handle}; +use ckb_channel::Receiver; use ckb_db::RocksDB; use ckb_db_schema::COLUMNS; use ckb_error::{Error, InternalErrorKind}; use ckb_logger::{error, info}; use ckb_migrate::migrate::Migrate; use ckb_notify::{NotifyController, NotifyService}; -use ckb_notify::{NotifyController, NotifyService, PoolTransactionEntry}; use ckb_proposal_table::ProposalTable; use ckb_proposal_table::ProposalView; -use ckb_shared::{HeaderMap, Shared}; use ckb_snapshot::{Snapshot, SnapshotMgr}; -use ckb_util::Mutex; - -use ckb_chain::ChainServicesBuilder; -use ckb_shared::types::VerifyFailedBlockInfo; -use ckb_store::ChainDB; -use ckb_store::ChainStore; use ckb_store::{ChainDB, ChainStore, Freezer}; use ckb_tx_pool::{ - error::Reject, service::TxVerificationResult, TokioRwLock, TxEntry, TxPool, - TxPoolServiceBuilder, + service::TxVerificationResult, TokioRwLock, TxEntry, TxPool, TxPoolServiceBuilder, }; use ckb_types::core::hardfork::HardForks; -use ckb_types::core::service::PoolTransactionEntry; -use ckb_types::core::tx_pool::Reject; +use ckb_types::{ + core::service::PoolTransactionEntry, core::tx_pool::Reject, core::EpochExt, core::HeaderView, +}; use ckb_util::Mutex; - -use ckb_types::core::EpochExt; -use ckb_types::core::HeaderView; use ckb_verification::cache::init_cache; use dashmap::DashMap; use std::cmp::Ordering; From 44e58b25dedc870c77de5d6a39465a977ca836c0 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 22 Nov 2023 10:36:38 +0800 Subject: [PATCH 181/357] Re-export ChainServicesBuilder in ckb_shared --- shared/src/chain_services_builder.rs | 23 +++++++++++++++++++++++ shared/src/lib.rs | 2 ++ 2 files changed, 25 insertions(+) diff --git a/shared/src/chain_services_builder.rs b/shared/src/chain_services_builder.rs index e69de29bb2..a6ee4a76e1 100644 --- a/shared/src/chain_services_builder.rs +++ b/shared/src/chain_services_builder.rs @@ -0,0 +1,23 @@ +use crate::types::VerifyFailedBlockInfo; +use crate::Shared; +use ckb_proposal_table::ProposalTable; + +pub struct ChainServicesBuilder { + pub shared: Shared, + pub proposal_table: ProposalTable, + pub verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, +} + +impl ChainServicesBuilder { + pub fn new( + shared: Shared, + proposal_table: ProposalTable, + verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, + ) -> Self { + ChainServicesBuilder { + shared, + proposal_table, + verify_failed_blocks_tx, + } + } +} diff --git a/shared/src/lib.rs b/shared/src/lib.rs index 02d7dbbc54..8c3c27b843 100644 --- a/shared/src/lib.rs +++ b/shared/src/lib.rs @@ -1,9 +1,11 @@ //! TODO(doc): @quake // num_cpus is used in proc_macro +pub mod chain_services_builder; pub mod shared; pub mod shared_builder; +pub use chain_services_builder::ChainServicesBuilder; pub use ckb_snapshot::{Snapshot, SnapshotMgr}; pub use shared::Shared; pub use shared_builder::{SharedBuilder, SharedPackage}; From cc8e65c90414d504674cbedf4d70e68f3dee12db Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 22 Nov 2023 10:37:14 +0800 Subject: [PATCH 182/357] Fix ckb-launcher start chain services --- util/launcher/Cargo.toml | 2 +- util/launcher/src/lib.rs | 9 ++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/util/launcher/Cargo.toml b/util/launcher/Cargo.toml index 6926798bec..fd2fd8b1c0 100644 --- a/util/launcher/Cargo.toml +++ b/util/launcher/Cargo.toml @@ -32,7 +32,7 @@ ckb-channel = { path = "../channel", version = "= 0.114.0-pre" } ckb-tx-pool = { path = "../../tx-pool", version = "= 0.114.0-pre" } ckb-light-client-protocol-server = { path = "../light-client-protocol-server", version = "= 0.114.0-pre" } ckb-block-filter = { path = "../../block-filter", version = "= 0.114.0-pre" } - +tokio = { version = "1", features = ["sync"] } [features] with_sentry = [ "ckb-sync/with_sentry", "ckb-network/with_sentry", "ckb-app-config/with_sentry" ] diff --git a/util/launcher/src/lib.rs b/util/launcher/src/lib.rs index 9ca6419a64..c9c114a136 100644 --- a/util/launcher/src/lib.rs +++ b/util/launcher/src/lib.rs @@ -8,7 +8,7 @@ use ckb_app_config::{ use ckb_async_runtime::Handle; use ckb_block_filter::filter::BlockFilter as BlockFilterService; use ckb_build_info::Version; -use ckb_chain::{ChainController, ChainServicesBuilder}; +use ckb_chain::ChainController; use ckb_channel::Receiver; use ckb_jsonrpc_types::ScriptHashType; use ckb_light_client_protocol_server::LightClientProtocol; @@ -19,9 +19,8 @@ use ckb_network::{ }; use ckb_network_alert::alert_relayer::AlertRelayer; use ckb_resource::Resource; -use ckb_rpc::RpcServer; -use ckb_rpc::ServiceBuilder; -use ckb_shared::Shared; +use ckb_rpc::{RpcServer, ServiceBuilder}; +use ckb_shared::{ChainServicesBuilder, Shared}; use ckb_shared::shared_builder::{SharedBuilder, SharedPackage}; use ckb_shared::types::VerifyFailedBlockInfo; @@ -232,7 +231,7 @@ impl Launcher { shared: &Shared, chain_services_builder: ChainServicesBuilder, ) -> ChainController { - let chain_controller = chain_services_builder.start(); + let chain_controller = ckb_chain::start_chain_services(chain_services_builder); info!("chain genesis hash: {:#x}", shared.genesis_hash()); chain_controller } From 5324afa0d89e2c1b6b58b5a40883f061160a06c9 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 22 Nov 2023 10:37:55 +0800 Subject: [PATCH 183/357] Fix subcommand::replay start chain_services --- ckb-bin/src/subcommand/replay.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ckb-bin/src/subcommand/replay.rs b/ckb-bin/src/subcommand/replay.rs index 26f82db39d..8414575537 100644 --- a/ckb-bin/src/subcommand/replay.rs +++ b/ckb-bin/src/subcommand/replay.rs @@ -3,7 +3,7 @@ use ckb_async_runtime::Handle; use ckb_chain::ChainController; use ckb_chain_iter::ChainIterator; use ckb_instrument::{ProgressBar, ProgressStyle}; -use ckb_shared::{Shared, SharedBuilder}; +use ckb_shared::{ChainServicesBuilder, Shared, SharedBuilder}; use ckb_store::ChainStore; use ckb_verification_traits::Switch; use std::sync::Arc; @@ -47,8 +47,8 @@ pub fn replay(args: ReplayArgs, async_handle: Handle) -> Result<(), ExitCode> { args.consensus, )?; let (tmp_shared, mut pack) = shared_builder.tx_pool_config(args.config.tx_pool).build()?; - let chain_service_builder = pack.take_chain_services_builder(); - let chain_controller = chain_service_builder.start(); + let chain_service_builder: ChainServicesBuilder = pack.take_chain_services_builder(); + let chain_controller = ckb_chain::start_chain_services(chain_service_builder); if let Some((from, to)) = args.profile { profile(shared, chain_controller, from, to); From 166c3ea7c4033c67e9f76d181301346ef22ada09 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 22 Nov 2023 10:38:09 +0800 Subject: [PATCH 184/357] Fix subcommand::import start chain services --- ckb-bin/src/subcommand/import.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ckb-bin/src/subcommand/import.rs b/ckb-bin/src/subcommand/import.rs index 5c76e29351..81867f3e48 100644 --- a/ckb-bin/src/subcommand/import.rs +++ b/ckb-bin/src/subcommand/import.rs @@ -14,7 +14,7 @@ pub fn import(args: ImportArgs, async_handle: Handle) -> Result<(), ExitCode> { )?; let (shared, mut pack) = builder.build()?; - let chain_controller = pack.take_chain_services_builder().start(); + let chain_controller = ckb_chain::start_chain_services(pack.take_chain_services_builder()); // manual drop tx_pool_builder and relay_tx_receiver pack.take_tx_pool_builder(); From 1907eaddcb0a7286651e89efe08c71cea70964a8 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 22 Nov 2023 10:50:56 +0800 Subject: [PATCH 185/357] Update `Cargo.lock` by `make prod` --- Cargo.lock | 1 + 1 file changed, 1 insertion(+) diff --git a/Cargo.lock b/Cargo.lock index 1632215963..d7b1edf127 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -983,6 +983,7 @@ dependencies = [ "ckb-types", "ckb-verification", "ckb-verification-traits", + "tokio", ] [[package]] From 1a5c90334d09379aa8cc96a01acec2e1d6daa690 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 22 Nov 2023 11:17:46 +0800 Subject: [PATCH 186/357] Fix ChainServicesBuilder import path in unit tests --- chain/src/tests/block_assembler.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/chain/src/tests/block_assembler.rs b/chain/src/tests/block_assembler.rs index e9aabf1f72..0397c521e1 100644 --- a/chain/src/tests/block_assembler.rs +++ b/chain/src/tests/block_assembler.rs @@ -1,10 +1,10 @@ use crate::tests::util::dummy_network; -use crate::{ChainController, ChainServicesBuilder}; +use crate::ChainController; use ckb_app_config::BlockAssemblerConfig; use ckb_chain_spec::consensus::Consensus; use ckb_dao_utils::genesis_dao_data; use ckb_jsonrpc_types::ScriptHashType; -use ckb_shared::{Shared, SharedBuilder, Snapshot}; +use ckb_shared::{ChainServicesBuilder, Shared, SharedBuilder, Snapshot}; use ckb_store::ChainStore; use ckb_tx_pool::{block_assembler::CandidateUncles, PlugTarget, TxEntry}; use ckb_types::{ From 031f5a6f95bfbeb2d9cc4539c99390595bfcb270 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 22 Nov 2023 11:18:31 +0800 Subject: [PATCH 187/357] breadcast_compact_block only when FirstSeenAndVerified --- rpc/src/module/miner.rs | 8 ++++---- sync/src/relayer/mod.rs | 3 +-- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/rpc/src/module/miner.rs b/rpc/src/module/miner.rs index 99fcd08d2d..9921cd08bd 100644 --- a/rpc/src/module/miner.rs +++ b/rpc/src/module/miner.rs @@ -1,6 +1,6 @@ use crate::error::RPCError; use async_trait::async_trait; -use ckb_chain::ChainController; +use ckb_chain::{ChainController, VerifiedBlockStatus, VerifyResult}; use ckb_jsonrpc_types::{Block, BlockTemplate, Uint64, Version}; use ckb_logger::{debug, error, warn}; use ckb_network::{NetworkController, PeerIndex, SupportProtocols, TargetSession}; @@ -275,10 +275,10 @@ impl MinerRpc for MinerRpcImpl { .verify(&header) .map_err(|err| handle_submit_error(&work_id, &err))?; - let verify_result = self.chain.blocking_process_block(Arc::clone(&block)); + let verify_result: VerifyResult = self.chain.blocking_process_block(Arc::clone(&block)); - // TODO: need to consider every enum item of verify_result - let is_new = verify_result.is_ok(); + // TODO: review this logic + let is_new = matches!(verify_result, Ok(VerifiedBlockStatus::FirstSeenAndVerified)); // Announce only new block if is_new { diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index 709d48c7e6..9bcdb36103 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -318,8 +318,7 @@ impl Relayer { let peer = peer.clone(); move |result: VerifyResult| match result { Ok(verified_block_status) => match verified_block_status { - VerifiedBlockStatus::FirstSeenAndVerified - | VerifiedBlockStatus::FirstSeenButNotVerified => { + VerifiedBlockStatus::FirstSeenAndVerified => { match broadcast_compact_block_tx.send((block, peer)) { Err(_) => { error!( From 295a1b438976c23014a15ac5a3b73bcaf2037359 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 22 Nov 2023 13:44:08 +0800 Subject: [PATCH 188/357] ChainService should execute non_contextual_verify when switch is None --- chain/src/chain.rs | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index b361d3e409..2464374ce2 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -24,6 +24,7 @@ use ckb_store::ChainStore; use ckb_types::{ core::{cell::HeaderChecker, service::Request, BlockView}, packed::Byte32, + H256, }; use ckb_verification::{BlockVerifier, NonContextualBlockTxsVerifier}; use ckb_verification_traits::{Switch, Verifier}; @@ -357,23 +358,24 @@ impl ChainService { if block_number < 1 { warn!("receive 0 number block: 0-{}", block_hash); } - if let Some(switch) = lonely_block.switch() { - if !switch.disable_non_contextual() { - let result = self.non_contextual_verify(&lonely_block.block()); - match result { - Err(err) => { - tell_synchronizer_to_punish_the_bad_peer( - self.verify_failed_blocks_tx.clone(), - lonely_block.peer_id(), - lonely_block.block().hash(), - &err, - ); - - lonely_block.execute_callback(Err(err)); - return; - } - _ => {} + + if lonely_block.switch().is_none() + || matches!(lonely_block.switch(), Some(switch) if !switch.disable_non_contextual()) + { + let result = self.non_contextual_verify(&lonely_block.block()); + match result { + Err(err) => { + tell_synchronizer_to_punish_the_bad_peer( + self.verify_failed_blocks_tx.clone(), + lonely_block.peer_id(), + lonely_block.block().hash(), + &err, + ); + + lonely_block.execute_callback(Err(err)); + return; } + _ => {} } } From 2cf6149cd2761f4a9aa058a5c349d97278c3d480 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 22 Nov 2023 15:58:08 +0800 Subject: [PATCH 189/357] Extract `accept_descendants` from `ConsumeOrphan` --- chain/src/consume_orphan.rs | 162 +++++++++++++++++++----------------- 1 file changed, 86 insertions(+), 76 deletions(-) diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index 3517878a0f..05ec04c78b 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -68,6 +68,7 @@ impl ConsumeOrphan { } } } + fn search_orphan_pool(&self) { for leader_hash in self.orphan_blocks_broker.clone_leaders() { if !self @@ -88,8 +89,7 @@ impl ConsumeOrphan { ); continue; } - let descendants_len = descendants.len(); - let (first_descendants_number, last_descendants_number) = ( + let (first_descendants_number, last_descendants_number, descendants_len) = ( descendants .first() .expect("descdant not empty") @@ -100,100 +100,110 @@ impl ConsumeOrphan { .expect("descdant not empty") .block() .number(), + descendants.len(), ); + let accept_error_occurred = self.accept_descendants(descendants); - let mut accept_error_occurred = false; - for descendant_block in descendants { - match self.accept_block(descendant_block.block().to_owned()) { - Err(err) => { - tell_synchronizer_to_punish_the_bad_peer( - self.verify_failed_blocks_tx.clone(), - descendant_block.peer_id(), - descendant_block.block().hash(), - &err, - ); - - accept_error_occurred = true; - error!( - "accept block {} failed: {}", - descendant_block.block().hash(), - err - ); + if !accept_error_occurred { + debug!( + "accept {} blocks [{}->{}] success", + descendants_len, first_descendants_number, last_descendants_number + ) + } + } + } - descendant_block.execute_callback(Err(err)); - continue; - } - Ok(accepted_opt) => match accepted_opt { - Some((parent_header, total_difficulty)) => { - let unverified_block: UnverifiedBlock = - descendant_block.combine_parent_header(parent_header); - let block_number = unverified_block.block().number(); - let block_hash = unverified_block.block().hash(); - - match self.unverified_blocks_tx.send(unverified_block) { - Ok(_) => {} - Err(SendError(unverified_block)) => { - error!("send unverified_block_tx failed, the receiver has been closed"); - let err: Error = InternalErrorKind::System + fn accept_descendants(&self, descendants: Vec) -> bool { + let mut accept_error_occurred = false; + for descendant_block in descendants { + match self.accept_descendant(descendant_block.block().to_owned()) { + Ok(accepted_opt) => match accepted_opt { + Some((parent_header, total_difficulty)) => { + let unverified_block: UnverifiedBlock = + descendant_block.combine_parent_header(parent_header); + let block_number = unverified_block.block().number(); + let block_hash = unverified_block.block().hash(); + + match self.unverified_blocks_tx.send(unverified_block) { + Ok(_) => {} + Err(SendError(unverified_block)) => { + error!( + "send unverified_block_tx failed, the receiver has been closed" + ); + let err: Error = InternalErrorKind::System .other(format!("send unverified_block_tx failed, the receiver have been close")).into(); - tell_synchronizer_to_punish_the_bad_peer( - self.verify_failed_blocks_tx.clone(), - unverified_block.peer_id(), - unverified_block.block().hash(), - &err, - ); + tell_synchronizer_to_punish_the_bad_peer( + self.verify_failed_blocks_tx.clone(), + unverified_block.peer_id(), + unverified_block.block().hash(), + &err, + ); - let verify_result: VerifyResult = Err(err); - unverified_block.execute_callback(verify_result); - continue; - } - }; - - if total_difficulty - .gt(self.shared.get_unverified_tip().total_difficulty()) - { - self.shared.set_unverified_tip(ckb_shared::HeaderIndex::new( - block_number.clone(), - block_hash.clone(), - total_difficulty, - )); - debug!("set unverified_tip to {}-{}, while unverified_tip - verified_tip = {}", + let verify_result: VerifyResult = Err(err); + unverified_block.execute_callback(verify_result); + continue; + } + }; + + if total_difficulty.gt(self.shared.get_unverified_tip().total_difficulty()) + { + self.shared.set_unverified_tip(ckb_shared::HeaderIndex::new( + block_number.clone(), + block_hash.clone(), + total_difficulty, + )); + debug!("set unverified_tip to {}-{}, while unverified_tip - verified_tip = {}", block_number.clone(), block_hash.clone(), block_number.saturating_sub(self.shared.snapshot().tip_number())) - } else { - debug!("received a block {}-{} with lower or equal difficulty than unverified_tip {}-{}", + } else { + debug!("received a block {}-{} with lower or equal difficulty than unverified_tip {}-{}", block_number, block_hash, self.shared.get_unverified_tip().number(), self.shared.get_unverified_tip().hash(), ); - } } - None => { - info!( - "doesn't accept block {}, because it has been stored", - descendant_block.block().hash() - ); - let verify_result: VerifyResult = - Ok(VerifiedBlockStatus::PreviouslySeenButNotVerified); - descendant_block.execute_callback(verify_result); - } - }, - } - } + } + None => { + info!( + "doesn't accept block {}, because it has been stored", + descendant_block.block().hash() + ); + let verify_result: VerifyResult = + Ok(VerifiedBlockStatus::PreviouslySeenButNotVerified); + descendant_block.execute_callback(verify_result); + } + }, - if !accept_error_occurred { - debug!( - "accept {} blocks [{}->{}] success", - descendants_len, first_descendants_number, last_descendants_number - ) + Err(err) => { + accept_error_occurred = true; + + tell_synchronizer_to_punish_the_bad_peer( + self.verify_failed_blocks_tx.clone(), + descendant_block.peer_id(), + descendant_block.block().hash(), + &err, + ); + + error!( + "accept block {} failed: {}", + descendant_block.block().hash(), + err + ); + + descendant_block.execute_callback(Err(err)); + } } } + accept_error_occurred } - fn accept_block(&self, block: Arc) -> Result, Error> { + fn accept_descendant( + &self, + block: Arc, + ) -> Result, Error> { let (block_number, block_hash) = (block.number(), block.hash()); if self From dfd2a6eb0a3c375020b5973dfc92cb9e66ad87de Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 22 Nov 2023 16:05:54 +0800 Subject: [PATCH 190/357] Add `contains_block` for OrphanBlock --- chain/src/orphan_block_pool.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/chain/src/orphan_block_pool.rs b/chain/src/orphan_block_pool.rs index f6bc5d1ea7..b7fe99dbf4 100644 --- a/chain/src/orphan_block_pool.rs +++ b/chain/src/orphan_block_pool.rs @@ -99,6 +99,10 @@ impl InnerPool { }) } + pub fn contains_block(self, hash: &packed::Byte32) -> bool { + self.parents.contains_key(hash) + } + /// cleanup expired blocks(epoch + EXPIRED_EPOCH < tip_epoch) pub fn clean_expired_blocks(&mut self, tip_epoch: EpochNumber) -> Vec { let mut result = vec![]; @@ -161,6 +165,10 @@ impl OrphanBlockPool { self.inner.read().get_block(hash) } + pub fn contains_block(self, hash: &packed::Byte32) -> bool { + self.inner.read().contains_block(hash) + } + pub fn clean_expired_blocks(&self, epoch: EpochNumber) -> Vec { self.inner.write().clean_expired_blocks(epoch) } From bb1a17672620a2011492fe93f215e40d2a3dfecf Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 28 Nov 2023 14:38:49 +0800 Subject: [PATCH 191/357] Only insert new block to orphan pool if its parent not partial stored --- chain/src/consume_orphan.rs | 75 ++++++++++++++++++++++++------------- 1 file changed, 50 insertions(+), 25 deletions(-) diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index 05ec04c78b..c98aefa337 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -52,13 +52,12 @@ impl ConsumeOrphan { loop { select! { recv(self.stop_rx) -> _ => { - info!("unverified_queue_consumer got exit signal, exit now"); - return; + info!("unverified_queue_consumer got exit signal, exit now"); + return; }, recv(self.lonely_blocks_rx) -> msg => match msg { Ok(lonely_block) => { - self.orphan_blocks_broker.insert(lonely_block); - self.search_orphan_pool() + self.process_lonely_block(lonely_block); }, Err(err) => { error!("lonely_block_rx err: {}", err); @@ -69,6 +68,25 @@ impl ConsumeOrphan { } } + fn process_lonely_block(&self, lonely_block: LonelyBlockWithCallback) { + let parent_hash = lonely_block.block().parent_hash(); + let parent_status = self.shared.get_block_status(&parent_hash); + if parent_status.contains(BlockStatus::BLOCK_PARTIAL_STORED) { + let parent_header = self + .shared + .store() + .get_block_header(&parent_hash) + .expect("parent already store"); + + let unverified_block: UnverifiedBlock = + lonely_block.combine_parent_header(parent_header); + self.send_unverified_block(unverified_block); + } else { + self.orphan_blocks_broker.insert(lonely_block); + } + self.search_orphan_pool() + } + fn search_orphan_pool(&self) { for leader_hash in self.orphan_blocks_broker.clone_leaders() { if !self @@ -113,6 +131,31 @@ impl ConsumeOrphan { } } + fn send_unverified_block(&self, unverified_block: UnverifiedBlock) -> bool { + match self.unverified_blocks_tx.send(unverified_block) { + Ok(_) => true, + Err(SendError(unverified_block)) => { + error!("send unverified_block_tx failed, the receiver has been closed"); + let err: Error = InternalErrorKind::System + .other(format!( + "send unverified_block_tx failed, the receiver have been close" + )) + .into(); + + tell_synchronizer_to_punish_the_bad_peer( + self.verify_failed_blocks_tx.clone(), + unverified_block.peer_id(), + unverified_block.block().hash(), + &err, + ); + + let verify_result: VerifyResult = Err(err); + unverified_block.execute_callback(verify_result); + false + } + } + } + fn accept_descendants(&self, descendants: Vec) -> bool { let mut accept_error_occurred = false; for descendant_block in descendants { @@ -124,27 +167,9 @@ impl ConsumeOrphan { let block_number = unverified_block.block().number(); let block_hash = unverified_block.block().hash(); - match self.unverified_blocks_tx.send(unverified_block) { - Ok(_) => {} - Err(SendError(unverified_block)) => { - error!( - "send unverified_block_tx failed, the receiver has been closed" - ); - let err: Error = InternalErrorKind::System - .other(format!("send unverified_block_tx failed, the receiver have been close")).into(); - - tell_synchronizer_to_punish_the_bad_peer( - self.verify_failed_blocks_tx.clone(), - unverified_block.peer_id(), - unverified_block.block().hash(), - &err, - ); - - let verify_result: VerifyResult = Err(err); - unverified_block.execute_callback(verify_result); - continue; - } - }; + if !self.send_unverified_block(unverified_block) { + continue; + } if total_difficulty.gt(self.shared.get_unverified_tip().total_difficulty()) { From 322f3f084dd4950bc6c5fa9e5eff3b565ea935f8 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 28 Nov 2023 14:43:10 +0800 Subject: [PATCH 192/357] ConsumeOrphan::accept_descendant expect block is not stored --- chain/src/consume_orphan.rs | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index c98aefa337..9fe75545d8 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -225,20 +225,9 @@ impl ConsumeOrphan { accept_error_occurred } - fn accept_descendant( - &self, - block: Arc, - ) -> Result, Error> { + fn accept_descendant(&self, block: Arc) -> Result<(HeaderView, U256), Error> { let (block_number, block_hash) = (block.number(), block.hash()); - if self - .shared - .contains_block_status(&block_hash, BlockStatus::BLOCK_PARTIAL_STORED) - { - debug!("block {}-{} has been stored", block_number, block_hash); - return Ok(None); - } - let parent_header = self .shared .store() From ae38eb2cbf061a7bf2a148e98034327d2d5ecd3d Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 28 Nov 2023 14:54:42 +0800 Subject: [PATCH 193/357] accept_descendant does not return None --- chain/src/consume_orphan.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index 9fe75545d8..cfd8d7e045 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -236,7 +236,7 @@ impl ConsumeOrphan { if let Some(ext) = self.shared.store().get_block_ext(&block.hash()) { debug!("block {}-{} has stored BlockExt", block_number, block_hash); - return Ok(Some((parent_header, ext.total_difficulty))); + return Ok((parent_header, ext.total_difficulty)); } trace!("begin accept block: {}-{}", block.number(), block.hash()); @@ -297,6 +297,6 @@ impl ConsumeOrphan { self.shared .insert_block_status(block_hash, BlockStatus::BLOCK_PARTIAL_STORED); - Ok(Some((parent_header, cannon_total_difficulty))) + Ok((parent_header, cannon_total_difficulty)) } } From bcf7249c54c55da0803fb3730d5b71c58560970c Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 28 Nov 2023 14:56:34 +0800 Subject: [PATCH 194/357] accept_descendants need not handle the case when accept_descendant return None --- chain/src/consume_orphan.rs | 56 +++++++++++++++---------------------- 1 file changed, 23 insertions(+), 33 deletions(-) diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index cfd8d7e045..61a2a8d6a9 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -160,47 +160,37 @@ impl ConsumeOrphan { let mut accept_error_occurred = false; for descendant_block in descendants { match self.accept_descendant(descendant_block.block().to_owned()) { - Ok(accepted_opt) => match accepted_opt { - Some((parent_header, total_difficulty)) => { - let unverified_block: UnverifiedBlock = - descendant_block.combine_parent_header(parent_header); - let block_number = unverified_block.block().number(); - let block_hash = unverified_block.block().hash(); - - if !self.send_unverified_block(unverified_block) { - continue; - } - - if total_difficulty.gt(self.shared.get_unverified_tip().total_difficulty()) - { - self.shared.set_unverified_tip(ckb_shared::HeaderIndex::new( - block_number.clone(), - block_hash.clone(), - total_difficulty, - )); - debug!("set unverified_tip to {}-{}, while unverified_tip - verified_tip = {}", + Ok((parent_header, total_difficulty)) => { + let unverified_block: UnverifiedBlock = + descendant_block.combine_parent_header(parent_header); + let block_number = unverified_block.block().number(); + let block_hash = unverified_block.block().hash(); + + if !self.send_unverified_block(unverified_block) { + continue; + } + + if total_difficulty.gt(self.shared.get_unverified_tip().total_difficulty()) { + self.shared.set_unverified_tip(ckb_shared::HeaderIndex::new( + block_number.clone(), + block_hash.clone(), + total_difficulty, + )); + debug!( + "set unverified_tip to {}-{}, while unverified_tip - verified_tip = {}", block_number.clone(), block_hash.clone(), - block_number.saturating_sub(self.shared.snapshot().tip_number())) - } else { - debug!("received a block {}-{} with lower or equal difficulty than unverified_tip {}-{}", + block_number.saturating_sub(self.shared.snapshot().tip_number()) + ) + } else { + debug!("received a block {}-{} with lower or equal difficulty than unverified_tip {}-{}", block_number, block_hash, self.shared.get_unverified_tip().number(), self.shared.get_unverified_tip().hash(), ); - } } - None => { - info!( - "doesn't accept block {}, because it has been stored", - descendant_block.block().hash() - ); - let verify_result: VerifyResult = - Ok(VerifiedBlockStatus::PreviouslySeenButNotVerified); - descendant_block.execute_callback(verify_result); - } - }, + } Err(err) => { accept_error_occurred = true; From 24ad7a6a4fc63e581ad539e478feaba8b3982e5c Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 28 Nov 2023 14:57:06 +0800 Subject: [PATCH 195/357] Remove `VerifiedBlockStatus::PreviouslySeenButNotVerified` --- chain/src/lib.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/chain/src/lib.rs b/chain/src/lib.rs index e6ea79ae39..976f57e3f9 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -43,9 +43,6 @@ pub enum VerifiedBlockStatus { // The block has been verified before. PreviouslySeenAndVerified, - - // The block is being seen before, but not verify it yet - PreviouslySeenButNotVerified, } #[derive(Clone)] From 7e51052e64267b86debead3fdb29a26b9f6f6e30 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 28 Nov 2023 15:03:36 +0800 Subject: [PATCH 196/357] contains_block shoud not take ownership of self --- chain/src/orphan_block_pool.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chain/src/orphan_block_pool.rs b/chain/src/orphan_block_pool.rs index b7fe99dbf4..a76d546db5 100644 --- a/chain/src/orphan_block_pool.rs +++ b/chain/src/orphan_block_pool.rs @@ -165,7 +165,7 @@ impl OrphanBlockPool { self.inner.read().get_block(hash) } - pub fn contains_block(self, hash: &packed::Byte32) -> bool { + pub fn contains_block(&self, hash: &packed::Byte32) -> bool { self.inner.read().contains_block(hash) } From 2f4c64d4f2cecd54b9732ab6979ada56247a0956 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 28 Nov 2023 15:15:35 +0800 Subject: [PATCH 197/357] Orphan: InnerPool shoud borrow self --- chain/src/orphan_block_pool.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chain/src/orphan_block_pool.rs b/chain/src/orphan_block_pool.rs index a76d546db5..39006a454e 100644 --- a/chain/src/orphan_block_pool.rs +++ b/chain/src/orphan_block_pool.rs @@ -99,7 +99,7 @@ impl InnerPool { }) } - pub fn contains_block(self, hash: &packed::Byte32) -> bool { + pub fn contains_block(&self, hash: &packed::Byte32) -> bool { self.parents.contains_key(hash) } From 4082e27aa0c76a5176f775328b9a29a6216ae9f6 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 28 Nov 2023 17:43:11 +0800 Subject: [PATCH 198/357] Extract `process_descendant` from `accept_descendant` --- chain/src/consume_orphan.rs | 161 +++++++++++++++++------------------- 1 file changed, 76 insertions(+), 85 deletions(-) diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index 61a2a8d6a9..1f18d3524f 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -72,21 +72,45 @@ impl ConsumeOrphan { let parent_hash = lonely_block.block().parent_hash(); let parent_status = self.shared.get_block_status(&parent_hash); if parent_status.contains(BlockStatus::BLOCK_PARTIAL_STORED) { - let parent_header = self - .shared - .store() - .get_block_header(&parent_hash) - .expect("parent already store"); - - let unverified_block: UnverifiedBlock = - lonely_block.combine_parent_header(parent_header); - self.send_unverified_block(unverified_block); + debug!( + "parent has stored, processing descendant directly {}", + lonely_block.block().hash() + ); + self.process_descendant(lonely_block); } else { self.orphan_blocks_broker.insert(lonely_block); } self.search_orphan_pool() } + fn process_descendant(&self, lonely_block: LonelyBlockWithCallback) { + match self.accept_descendant(lonely_block.block().to_owned()) { + Ok((parent_header, total_difficulty)) => { + let unverified_block: UnverifiedBlock = + lonely_block.combine_parent_header(parent_header); + + self.send_unverified_block(unverified_block, total_difficulty) + } + + Err(err) => { + tell_synchronizer_to_punish_the_bad_peer( + self.verify_failed_blocks_tx.clone(), + lonely_block.peer_id(), + lonely_block.block().hash(), + &err, + ); + + error!( + "accept block {} failed: {}", + lonely_block.block().hash(), + err + ); + + lonely_block.execute_callback(Err(err)); + } + } + } + fn search_orphan_pool(&self) { for leader_hash in self.orphan_blocks_broker.clone_leaders() { if !self @@ -120,20 +144,22 @@ impl ConsumeOrphan { .number(), descendants.len(), ); - let accept_error_occurred = self.accept_descendants(descendants); - - if !accept_error_occurred { - debug!( - "accept {} blocks [{}->{}] success", - descendants_len, first_descendants_number, last_descendants_number - ) - } + self.accept_descendants(descendants); } } - fn send_unverified_block(&self, unverified_block: UnverifiedBlock) -> bool { - match self.unverified_blocks_tx.send(unverified_block) { - Ok(_) => true, + fn send_unverified_block(&self, unverified_block: UnverifiedBlock, total_difficulty: U256) { + let block_number = unverified_block.block().number(); + let block_hash = unverified_block.block().hash(); + + let send_success = match self.unverified_blocks_tx.send(unverified_block) { + Ok(_) => { + debug!( + "process desendant block success {}-{}", + block_number, block_hash + ); + true + } Err(SendError(unverified_block)) => { error!("send unverified_block_tx failed, the receiver has been closed"); let err: Error = InternalErrorKind::System @@ -142,77 +168,45 @@ impl ConsumeOrphan { )) .into(); - tell_synchronizer_to_punish_the_bad_peer( - self.verify_failed_blocks_tx.clone(), - unverified_block.peer_id(), - unverified_block.block().hash(), - &err, - ); - let verify_result: VerifyResult = Err(err); unverified_block.execute_callback(verify_result); false } + }; + if !send_success { + return; + } + + if total_difficulty.gt(self.shared.get_unverified_tip().total_difficulty()) { + self.shared.set_unverified_tip(ckb_shared::HeaderIndex::new( + block_number.clone(), + block_hash.clone(), + total_difficulty, + )); + debug!( + "set unverified_tip to {}-{}, while unverified_tip - verified_tip = {}", + block_number.clone(), + block_hash.clone(), + block_number.saturating_sub(self.shared.snapshot().tip_number()) + ) + } else { + debug!( + "received a block {}-{} with lower or equal difficulty than unverified_tip {}-{}", + block_number, + block_hash, + self.shared.get_unverified_tip().number(), + self.shared.get_unverified_tip().hash(), + ); } + + self.shared + .insert_block_status(block_hash, BlockStatus::BLOCK_PARTIAL_STORED); } - fn accept_descendants(&self, descendants: Vec) -> bool { - let mut accept_error_occurred = false; + fn accept_descendants(&self, descendants: Vec) { for descendant_block in descendants { - match self.accept_descendant(descendant_block.block().to_owned()) { - Ok((parent_header, total_difficulty)) => { - let unverified_block: UnverifiedBlock = - descendant_block.combine_parent_header(parent_header); - let block_number = unverified_block.block().number(); - let block_hash = unverified_block.block().hash(); - - if !self.send_unverified_block(unverified_block) { - continue; - } - - if total_difficulty.gt(self.shared.get_unverified_tip().total_difficulty()) { - self.shared.set_unverified_tip(ckb_shared::HeaderIndex::new( - block_number.clone(), - block_hash.clone(), - total_difficulty, - )); - debug!( - "set unverified_tip to {}-{}, while unverified_tip - verified_tip = {}", - block_number.clone(), - block_hash.clone(), - block_number.saturating_sub(self.shared.snapshot().tip_number()) - ) - } else { - debug!("received a block {}-{} with lower or equal difficulty than unverified_tip {}-{}", - block_number, - block_hash, - self.shared.get_unverified_tip().number(), - self.shared.get_unverified_tip().hash(), - ); - } - } - - Err(err) => { - accept_error_occurred = true; - - tell_synchronizer_to_punish_the_bad_peer( - self.verify_failed_blocks_tx.clone(), - descendant_block.peer_id(), - descendant_block.block().hash(), - &err, - ); - - error!( - "accept block {} failed: {}", - descendant_block.block().hash(), - err - ); - - descendant_block.execute_callback(Err(err)); - } - } + self.process_descendant(descendant_block); } - accept_error_occurred } fn accept_descendant(&self, block: Arc) -> Result<(HeaderView, U256), Error> { @@ -284,9 +278,6 @@ impl ConsumeOrphan { db_txn.commit()?; - self.shared - .insert_block_status(block_hash, BlockStatus::BLOCK_PARTIAL_STORED); - Ok((parent_header, cannon_total_difficulty)) } } From 07eab23551868c9c707c4322a7ae49d30fc865a2 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 28 Nov 2023 17:56:23 +0800 Subject: [PATCH 199/357] Move ForkChanges to chain/src/utils --- chain/src/{ => utils}/forkchanges.rs | 0 chain/src/utils/mod.rs | 1 + 2 files changed, 1 insertion(+) rename chain/src/{ => utils}/forkchanges.rs (100%) create mode 100644 chain/src/utils/mod.rs diff --git a/chain/src/forkchanges.rs b/chain/src/utils/forkchanges.rs similarity index 100% rename from chain/src/forkchanges.rs rename to chain/src/utils/forkchanges.rs diff --git a/chain/src/utils/mod.rs b/chain/src/utils/mod.rs new file mode 100644 index 0000000000..5ecd06c91e --- /dev/null +++ b/chain/src/utils/mod.rs @@ -0,0 +1 @@ +pub mod forkchanges; From 5ea235fd3148eae566e315e12c7267d07dc277b0 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 28 Nov 2023 17:56:44 +0800 Subject: [PATCH 200/357] Fix ForkChanges related import path --- chain/src/consume_unverified.rs | 5 ++--- chain/src/lib.rs | 2 +- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/chain/src/consume_unverified.rs b/chain/src/consume_unverified.rs index 018871e6c7..72671a92c3 100644 --- a/chain/src/consume_unverified.rs +++ b/chain/src/consume_unverified.rs @@ -1,7 +1,6 @@ -use crate::forkchanges::ForkChanges; use crate::{ - tell_synchronizer_to_punish_the_bad_peer, GlobalIndex, LonelyBlock, LonelyBlockWithCallback, - UnverifiedBlock, VerifiedBlockStatus, VerifyResult, + tell_synchronizer_to_punish_the_bad_peer, utils::forkchanges::ForkChanges, GlobalIndex, + LonelyBlock, LonelyBlockWithCallback, UnverifiedBlock, VerifiedBlockStatus, VerifyResult, }; use ckb_channel::{select, Receiver}; use ckb_error::{Error, InternalErrorKind}; diff --git a/chain/src/lib.rs b/chain/src/lib.rs index 976f57e3f9..531d0dd755 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -18,10 +18,10 @@ use std::sync::Arc; mod chain; mod consume_orphan; mod consume_unverified; -mod forkchanges; mod orphan_block_pool; #[cfg(test)] mod tests; +mod utils; pub use chain::{start_chain_services, ChainController}; From 69d2a9886d68ec1486fd5aa8252166ca22c48ded Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 28 Nov 2023 17:58:00 +0800 Subject: [PATCH 201/357] Move OrphanBlocksPool to chain/src/utils --- chain/src/utils/mod.rs | 1 + chain/src/{ => utils}/orphan_block_pool.rs | 0 2 files changed, 1 insertion(+) rename chain/src/{ => utils}/orphan_block_pool.rs (100%) diff --git a/chain/src/utils/mod.rs b/chain/src/utils/mod.rs index 5ecd06c91e..efdc1e092a 100644 --- a/chain/src/utils/mod.rs +++ b/chain/src/utils/mod.rs @@ -1 +1,2 @@ pub mod forkchanges; +pub mod orphan_block_pool; diff --git a/chain/src/orphan_block_pool.rs b/chain/src/utils/orphan_block_pool.rs similarity index 100% rename from chain/src/orphan_block_pool.rs rename to chain/src/utils/orphan_block_pool.rs From bfafebb005758dc7c65ceea9a49c3cb20524ddd0 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 28 Nov 2023 17:58:53 +0800 Subject: [PATCH 202/357] Fix OrphanBlockPool related import path --- chain/src/consume_orphan.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index 1f18d3524f..7a2c5ac466 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -1,4 +1,4 @@ -use crate::orphan_block_pool::OrphanBlockPool; +use crate::utils::orphan_block_pool::OrphanBlockPool; use crate::{ tell_synchronizer_to_punish_the_bad_peer, LonelyBlockWithCallback, UnverifiedBlock, VerifiedBlockStatus, VerifyResult, From a40aedd08826adde15d2c7ce2c2ca4d1ae64de3f Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 28 Nov 2023 19:01:39 +0800 Subject: [PATCH 203/357] Trace last_common_ancestor timecost Signed-off-by: Eval EXEC --- sync/src/synchronizer/block_fetcher.rs | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/sync/src/synchronizer/block_fetcher.rs b/sync/src/synchronizer/block_fetcher.rs index 3f2646d200..52a189073c 100644 --- a/sync/src/synchronizer/block_fetcher.rs +++ b/sync/src/synchronizer/block_fetcher.rs @@ -71,9 +71,20 @@ impl BlockFetcher { // If the peer reorganized, our previous last_common_header may not be an ancestor // of its current tip anymore. Go back enough to fix that. - last_common = self - .active_chain - .last_common_ancestor(&last_common, best_known)?; + last_common = { + let now = std::time::Instant::now(); + let last_common_ancestor = self + .active_chain + .last_common_ancestor(&last_common, best_known)?; + debug!( + "last_common_ancestor({}, {})->{} cost {:?}", + last_common, + best_known, + last_common_ancestor, + now.elapsed() + ); + last_common_ancestor + }; self.sync_shared .state() From c5303598f71293ce23b27bf9bee00544e384ab37 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 28 Nov 2023 19:29:19 +0800 Subject: [PATCH 204/357] Fix module orphan_block_pool usage --- chain/src/chain.rs | 2 +- chain/src/lib.rs | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 2464374ce2..f3bf0d25be 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -3,7 +3,7 @@ use crate::consume_orphan::ConsumeOrphan; use crate::consume_unverified::ConsumeUnverifiedBlocks; -use crate::orphan_block_pool::OrphanBlockPool; +use crate::utils::orphan_block_pool::OrphanBlockPool; use crate::{ tell_synchronizer_to_punish_the_bad_peer, LonelyBlock, LonelyBlockWithCallback, ProcessBlockRequest, TruncateRequest, UnverifiedBlock, VerifyCallback, VerifyResult, diff --git a/chain/src/lib.rs b/chain/src/lib.rs index 531d0dd755..8cd8c6329f 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -18,7 +18,6 @@ use std::sync::Arc; mod chain; mod consume_orphan; mod consume_unverified; -mod orphan_block_pool; #[cfg(test)] mod tests; mod utils; From ecbfc8d1203ee3a9ef6c16dd5fcbb925fc9b64f5 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 28 Nov 2023 19:38:18 +0800 Subject: [PATCH 205/357] Fix last common_ancestor's log message --- sync/src/synchronizer/block_fetcher.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sync/src/synchronizer/block_fetcher.rs b/sync/src/synchronizer/block_fetcher.rs index 52a189073c..d053558489 100644 --- a/sync/src/synchronizer/block_fetcher.rs +++ b/sync/src/synchronizer/block_fetcher.rs @@ -77,7 +77,7 @@ impl BlockFetcher { .active_chain .last_common_ancestor(&last_common, best_known)?; debug!( - "last_common_ancestor({}, {})->{} cost {:?}", + "last_common_ancestor({:?}, {:?})->{:?} cost {:?}", last_common, best_known, last_common_ancestor, From 9278e01a85ac4893fadef072d4906c8bfc319812 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 29 Nov 2023 09:49:05 +0800 Subject: [PATCH 206/357] Rename FirstSeenButNotVerified to UncleBlockNotVerified --- chain/src/consume_unverified.rs | 2 +- chain/src/lib.rs | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/chain/src/consume_unverified.rs b/chain/src/consume_unverified.rs index 72671a92c3..de19f9b37a 100644 --- a/chain/src/consume_unverified.rs +++ b/chain/src/consume_unverified.rs @@ -349,7 +349,7 @@ impl ConsumeUnverifiedBlockProcessor { error!("[verify block] notify new_uncle error {}", e); } } - Ok(VerifiedBlockStatus::FirstSeenButNotVerified) + Ok(VerifiedBlockStatus::UncleBlockNotVerified) } } diff --git a/chain/src/lib.rs b/chain/src/lib.rs index 8cd8c6329f..8633ab2100 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -34,11 +34,12 @@ pub type VerifyCallback = Box; /// VerifiedBlockStatus is #[derive(Debug, Clone, PartialEq)] pub enum VerifiedBlockStatus { - // The block is being seen for the first time. + // The block is being seen for the first time, and VM have verified it FirstSeenAndVerified, - // The block is being seen for the first time, but not verify it yet - FirstSeenButNotVerified, + // The block is being seen for the first time + // but VM have not verified it since its a uncle block + UncleBlockNotVerified, // The block has been verified before. PreviouslySeenAndVerified, From 174905763018d3fed2ed4842cdb8c9bc8f536b0e Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 29 Nov 2023 10:03:31 +0800 Subject: [PATCH 207/357] Remove useless import statements, fix clippy --- chain/src/chain.rs | 10 ++-------- chain/src/consume_orphan.rs | 2 +- chain/src/tests/find_fork.rs | 2 +- chain/src/tests/orphan_block_pool.rs | 2 +- shared/src/shared.rs | 1 - tx-pool/src/chunk_process.rs | 2 +- .../contextual/src/tests/contextual_block_verifier.rs | 2 +- 7 files changed, 7 insertions(+), 14 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index f3bf0d25be..1ead84ed6f 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -1,7 +1,6 @@ //! CKB chain service. #![allow(missing_docs)] -use crate::consume_orphan::ConsumeOrphan; use crate::consume_unverified::ConsumeUnverifiedBlocks; use crate::utils::orphan_block_pool::OrphanBlockPool; use crate::{ @@ -13,18 +12,13 @@ use ckb_constant::sync::BLOCK_DOWNLOAD_WINDOW; use ckb_error::{Error, InternalErrorKind}; use ckb_logger::{self, debug, error, info, warn}; use ckb_network::tokio; -use ckb_proposal_table::ProposalTable; -#[cfg(debug_assertions)] -use ckb_rust_unstable_port::IsSorted; use ckb_shared::shared::Shared; use ckb_shared::types::VerifyFailedBlockInfo; use ckb_shared::ChainServicesBuilder; use ckb_stop_handler::{new_crossbeam_exit_rx, register_thread}; -use ckb_store::ChainStore; use ckb_types::{ - core::{cell::HeaderChecker, service::Request, BlockView}, + core::{service::Request, BlockView}, packed::Byte32, - H256, }; use ckb_verification::{BlockVerifier, NonContextualBlockTxsVerifier}; use ckb_verification_traits::{Switch, Verifier}; @@ -190,7 +184,7 @@ pub fn start_chain_services(builder: ChainServicesBuilder) -> ChainController { let shared = builder.shared.clone(); let verify_failed_blocks_tx = builder.verify_failed_blocks_tx.clone(); move || { - let mut consume_unverified = ConsumeUnverifiedBlocks::new( + let consume_unverified = ConsumeUnverifiedBlocks::new( shared, unverified_rx, builder.proposal_table, diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index 7a2c5ac466..a1636b1f2b 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -1,7 +1,7 @@ use crate::utils::orphan_block_pool::OrphanBlockPool; use crate::{ tell_synchronizer_to_punish_the_bad_peer, LonelyBlockWithCallback, UnverifiedBlock, - VerifiedBlockStatus, VerifyResult, + VerifyResult, }; use ckb_channel::{select, Receiver, SendError, Sender}; use ckb_error::{Error, InternalErrorKind}; diff --git a/chain/src/tests/find_fork.rs b/chain/src/tests/find_fork.rs index f0321fd3d8..4b364e6677 100644 --- a/chain/src/tests/find_fork.rs +++ b/chain/src/tests/find_fork.rs @@ -1,5 +1,5 @@ use crate::consume_unverified::{ConsumeUnverifiedBlockProcessor, ConsumeUnverifiedBlocks}; -use crate::forkchanges::ForkChanges; +use crate::utils::forkchanges::ForkChanges; use crate::{LonelyBlock, LonelyBlockWithCallback, UnverifiedBlock, VerifyFailedBlockInfo}; use ckb_chain_spec::consensus::{Consensus, ProposalWindow}; use ckb_proposal_table::ProposalTable; diff --git a/chain/src/tests/orphan_block_pool.rs b/chain/src/tests/orphan_block_pool.rs index 83736cc581..d6bd0a9491 100644 --- a/chain/src/tests/orphan_block_pool.rs +++ b/chain/src/tests/orphan_block_pool.rs @@ -7,7 +7,7 @@ use std::collections::HashSet; use std::sync::Arc; use std::thread; -use crate::orphan_block_pool::OrphanBlockPool; +use crate::utils::orphan_block_pool::OrphanBlockPool; fn gen_lonely_block(parent_header: &HeaderView) -> LonelyBlock { let number = parent_header.number() + 1; diff --git a/shared/src/shared.rs b/shared/src/shared.rs index af92876b01..647cd0eebe 100644 --- a/shared/src/shared.rs +++ b/shared/src/shared.rs @@ -27,7 +27,6 @@ use ckb_verification::cache::TxVerificationCache; use dashmap::DashMap; use std::cmp; use std::collections::BTreeMap; -use std::hash::Hash; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use std::thread; diff --git a/tx-pool/src/chunk_process.rs b/tx-pool/src/chunk_process.rs index 5dd48ddba6..0d9b03f2f3 100644 --- a/tx-pool/src/chunk_process.rs +++ b/tx-pool/src/chunk_process.rs @@ -4,7 +4,7 @@ use crate::try_or_return_with_snapshot; use crate::{error::Reject, service::TxPoolService}; use ckb_chain_spec::consensus::Consensus; use ckb_error::Error; -use ckb_logger::{debug, info}; +use ckb_logger::info; use ckb_snapshot::Snapshot; use ckb_store::data_loader_wrapper::AsDataLoader; use ckb_traits::{CellDataProvider, ExtensionProvider, HeaderProvider}; diff --git a/verification/contextual/src/tests/contextual_block_verifier.rs b/verification/contextual/src/tests/contextual_block_verifier.rs index b906667e95..b8cacae816 100644 --- a/verification/contextual/src/tests/contextual_block_verifier.rs +++ b/verification/contextual/src/tests/contextual_block_verifier.rs @@ -1,6 +1,6 @@ use super::super::contextual_block_verifier::{EpochVerifier, TwoPhaseCommitVerifier}; use crate::contextual_block_verifier::{RewardVerifier, VerifyContext}; -use ckb_chain::ChainController; +use ckb_chain::start_chain_services; use ckb_chain_spec::consensus::{Consensus, ConsensusBuilder}; use ckb_error::assert_error_eq; use ckb_shared::{Shared, SharedBuilder}; From 00b4417fd8fbc6f43f7820d55bdc2f7573cc6e5c Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 29 Nov 2023 10:17:06 +0800 Subject: [PATCH 208/357] Using `ckb_chain::start_chain_services` to start ckb-chain services --- chain/src/tests/block_assembler.rs | 4 ++-- chain/src/tests/find_fork.rs | 11 +++++++---- .../src/tests/utils/chain.rs | 4 ++-- .../contextual/src/tests/contextual_block_verifier.rs | 4 ++-- verification/contextual/src/tests/uncle_verifier.rs | 4 ++-- 5 files changed, 15 insertions(+), 12 deletions(-) diff --git a/chain/src/tests/block_assembler.rs b/chain/src/tests/block_assembler.rs index 0397c521e1..007bcc583d 100644 --- a/chain/src/tests/block_assembler.rs +++ b/chain/src/tests/block_assembler.rs @@ -1,5 +1,5 @@ use crate::tests::util::dummy_network; -use crate::ChainController; +use crate::{start_chain_services, ChainController}; use ckb_app_config::BlockAssemblerConfig; use ckb_chain_spec::consensus::Consensus; use ckb_dao_utils::genesis_dao_data; @@ -48,7 +48,7 @@ fn start_chain(consensus: Option) -> (ChainController, Shared) { pack.take_tx_pool_builder().start(network); let chain_services_builder: ChainServicesBuilder = pack.take_chain_services_builder(); - let chain_controller: ChainController = chain_services_builder.start(); + let chain_controller: ChainController = start_chain_services(chain_services_builder); (chain_controller, shared) } diff --git a/chain/src/tests/find_fork.rs b/chain/src/tests/find_fork.rs index 4b364e6677..07f4a4673f 100644 --- a/chain/src/tests/find_fork.rs +++ b/chain/src/tests/find_fork.rs @@ -1,6 +1,9 @@ use crate::consume_unverified::{ConsumeUnverifiedBlockProcessor, ConsumeUnverifiedBlocks}; use crate::utils::forkchanges::ForkChanges; -use crate::{LonelyBlock, LonelyBlockWithCallback, UnverifiedBlock, VerifyFailedBlockInfo}; +use crate::{ + start_chain_services, LonelyBlock, LonelyBlockWithCallback, UnverifiedBlock, + VerifyFailedBlockInfo, +}; use ckb_chain_spec::consensus::{Consensus, ProposalWindow}; use ckb_proposal_table::ProposalTable; use ckb_shared::SharedBuilder; @@ -51,7 +54,7 @@ fn test_find_fork_case1() { let builder = SharedBuilder::with_temp_db(); let consensus = Consensus::default(); let (shared, mut pack) = builder.consensus(consensus).build().unwrap(); - let chain_controller = pack.take_chain_services_builder().start(); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); let genesis = shared .store() @@ -402,7 +405,7 @@ fn repeatedly_switch_fork() { let mut fork1 = MockChain::new(genesis.clone(), shared.consensus()); let mut fork2 = MockChain::new(genesis, shared.consensus()); - let chain_controller = pack.take_chain_services_builder().start(); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); for _ in 0..2 { fork1.gen_empty_block_with_nonce(1u128, &mock_store); @@ -541,7 +544,7 @@ fn test_fork_proposal_table() { }; let (shared, mut pack) = builder.consensus(consensus).build().unwrap(); - let chain_controller = pack.take_chain_services_builder().start(); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); let genesis = shared .store() diff --git a/util/light-client-protocol-server/src/tests/utils/chain.rs b/util/light-client-protocol-server/src/tests/utils/chain.rs index 83cb1e2030..c9d4cd00ad 100644 --- a/util/light-client-protocol-server/src/tests/utils/chain.rs +++ b/util/light-client-protocol-server/src/tests/utils/chain.rs @@ -4,8 +4,8 @@ use std::{ }; use ckb_app_config::{BlockAssemblerConfig, NetworkConfig}; -use ckb_chain::ChainController; use ckb_chain::VerifiedBlockStatus; +use ckb_chain::{start_chain_services, ChainController}; use ckb_chain_spec::consensus::{build_genesis_epoch_ext, ConsensusBuilder}; use ckb_dao_utils::genesis_dao_data; use ckb_jsonrpc_types::ScriptHashType; @@ -88,7 +88,7 @@ impl MockChain { let network = dummy_network(&shared); pack.take_tx_pool_builder().start(network); - let chain_controller = pack.take_chain_services_builder().start(); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); Self { chain_controller, diff --git a/verification/contextual/src/tests/contextual_block_verifier.rs b/verification/contextual/src/tests/contextual_block_verifier.rs index b8cacae816..fc6c4182d4 100644 --- a/verification/contextual/src/tests/contextual_block_verifier.rs +++ b/verification/contextual/src/tests/contextual_block_verifier.rs @@ -1,6 +1,6 @@ use super::super::contextual_block_verifier::{EpochVerifier, TwoPhaseCommitVerifier}; use crate::contextual_block_verifier::{RewardVerifier, VerifyContext}; -use ckb_chain::start_chain_services; +use ckb_chain::{start_chain_services, ChainController}; use ckb_chain_spec::consensus::{Consensus, ConsensusBuilder}; use ckb_error::assert_error_eq; use ckb_shared::{Shared, SharedBuilder}; @@ -83,7 +83,7 @@ fn start_chain(consensus: Option) -> (ChainController, Shared) { } let (shared, mut pack) = builder.build().unwrap(); - let chain_controller = pack.take_chain_services_builder().start(); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); (chain_controller, shared) } diff --git a/verification/contextual/src/tests/uncle_verifier.rs b/verification/contextual/src/tests/uncle_verifier.rs index f517f603fe..0928abdee9 100644 --- a/verification/contextual/src/tests/uncle_verifier.rs +++ b/verification/contextual/src/tests/uncle_verifier.rs @@ -2,7 +2,7 @@ use crate::contextual_block_verifier::{UncleVerifierContext, VerifyContext}; use crate::uncles_verifier::UnclesVerifier; -use ckb_chain::ChainController; +use ckb_chain::{start_chain_services, ChainController}; use ckb_chain_spec::consensus::Consensus; use ckb_error::assert_error_eq; use ckb_shared::{Shared, SharedBuilder}; @@ -43,7 +43,7 @@ fn start_chain(consensus: Option) -> (ChainController, Shared) { } let (shared, mut pack) = builder.build().unwrap(); - let chain_controller = pack.take_chain_services_builder().start(); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); (chain_controller, shared) } From 0b088842a6008f598bd9a918c864bf18624d0a8f Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 29 Nov 2023 10:39:39 +0800 Subject: [PATCH 209/357] Benches: Start chain services by start_chain_services --- benches/benches/benchmarks/overall.rs | 2 +- benches/benches/benchmarks/resolve.rs | 2 +- benches/benches/benchmarks/util.rs | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/benches/benches/benchmarks/overall.rs b/benches/benches/benchmarks/overall.rs index 8d1057c6a8..8ac94a2149 100644 --- a/benches/benches/benchmarks/overall.rs +++ b/benches/benches/benchmarks/overall.rs @@ -132,7 +132,7 @@ pub fn setup_chain(txs_size: usize) -> (Shared, ChainController) { let network = dummy_network(&shared); pack.take_tx_pool_builder().start(network); - let chain_controller = pack.take_chain_services_builder().start(); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); (shared, chain_controller) } diff --git a/benches/benches/benchmarks/resolve.rs b/benches/benches/benchmarks/resolve.rs index 43bb8d72e5..65f19dd741 100644 --- a/benches/benches/benchmarks/resolve.rs +++ b/benches/benches/benchmarks/resolve.rs @@ -96,7 +96,7 @@ pub fn setup_chain(txs_size: usize) -> (Shared, ChainController) { .tx_pool_config(tx_pool_config) .build() .unwrap(); - let chain_controller = pack.take_chain_services_builder().start(); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); // FIXME: global cache !!! let _ret = setup_system_cell_cache( diff --git a/benches/benches/benchmarks/util.rs b/benches/benches/benchmarks/util.rs index 44e9ab5e28..557885635f 100644 --- a/benches/benches/benchmarks/util.rs +++ b/benches/benches/benchmarks/util.rs @@ -78,7 +78,7 @@ pub fn new_always_success_chain(txs_size: usize, chains_num: usize) -> Chains { .consensus(consensus.clone()) .build() .unwrap(); - let chain_controller = pack.take_chain_services_builder().start(); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); chains.push((chain_controller, shared)); } @@ -296,7 +296,7 @@ pub fn new_secp_chain(txs_size: usize, chains_num: usize) -> Chains { .consensus(consensus.clone()) .build() .unwrap(); - let chain_controller = pack.take_chain_services_builder().start(); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); chains.push((chain_controller, shared)); } From 0917d45fb185f02fbacf7cd14d52bfb08f5dfaae Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 29 Nov 2023 10:40:06 +0800 Subject: [PATCH 210/357] Fix Unit test in `ckb-rpc`: Start chain services by start_chain_services --- rpc/src/tests/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rpc/src/tests/mod.rs b/rpc/src/tests/mod.rs index 5b3017d5d5..1d3ed34261 100644 --- a/rpc/src/tests/mod.rs +++ b/rpc/src/tests/mod.rs @@ -1,4 +1,4 @@ -use ckb_chain::ChainController; +use ckb_chain::{start_chain_services, ChainController}; use ckb_chain_spec::consensus::Consensus; use ckb_dao::DaoCalculator; use ckb_reward_calculator::RewardCalculator; From 453e35908dbf17b46660424c7c4555e5c947a062 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 29 Nov 2023 10:40:28 +0800 Subject: [PATCH 211/357] Fix Unit test in `ckb-chain`: Start chain services by start_chain_services --- chain/src/tests/find_fork.rs | 2 +- chain/src/tests/truncate.rs | 3 ++- chain/src/tests/uncle.rs | 3 ++- chain/src/tests/util.rs | 4 ++-- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/chain/src/tests/find_fork.rs b/chain/src/tests/find_fork.rs index 07f4a4673f..0b06e44896 100644 --- a/chain/src/tests/find_fork.rs +++ b/chain/src/tests/find_fork.rs @@ -53,6 +53,7 @@ fn consume_unverified_block( fn test_find_fork_case1() { let builder = SharedBuilder::with_temp_db(); let consensus = Consensus::default(); + let proposal_table = ProposalTable::new(consensus.tx_proposal_window()); let (shared, mut pack) = builder.consensus(consensus).build().unwrap(); let chain_controller = start_chain_services(pack.take_chain_services_builder()); @@ -73,7 +74,6 @@ fn test_find_fork_case1() { fork2.gen_empty_block_with_diff(90u64, &mock_store); } - let proposal_table = ProposalTable::new(consensus.tx_proposal_window()); let (verify_failed_blocks_tx, _verify_failed_blocks_rx) = tokio::sync::mpsc::unbounded_channel::(); diff --git a/chain/src/tests/truncate.rs b/chain/src/tests/truncate.rs index 30c42deec9..57fec63256 100644 --- a/chain/src/tests/truncate.rs +++ b/chain/src/tests/truncate.rs @@ -1,3 +1,4 @@ +use crate::start_chain_services; use ckb_chain_spec::consensus::Consensus; use ckb_shared::SharedBuilder; use ckb_store::ChainStore; @@ -10,7 +11,7 @@ fn test_truncate() { let builder = SharedBuilder::with_temp_db(); let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); - let chain_controller = pack.take_chain_services_builder().start(); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); let genesis = shared .store() diff --git a/chain/src/tests/uncle.rs b/chain/src/tests/uncle.rs index 3122038558..fe23f5cf34 100644 --- a/chain/src/tests/uncle.rs +++ b/chain/src/tests/uncle.rs @@ -1,3 +1,4 @@ +use crate::start_chain_services; use ckb_chain_spec::consensus::Consensus; use ckb_shared::SharedBuilder; use ckb_store::ChainStore; @@ -9,7 +10,7 @@ use std::sync::Arc; fn test_get_block_body_after_inserting() { let builder = SharedBuilder::with_temp_db(); let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); - let chain_controller = pack.take_chain_services_builder().start(); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); let genesis = shared .store() diff --git a/chain/src/tests/util.rs b/chain/src/tests/util.rs index 1c66093729..f29cd97ad7 100644 --- a/chain/src/tests/util.rs +++ b/chain/src/tests/util.rs @@ -1,4 +1,4 @@ -use crate::ChainController; +use crate::{start_chain_services, ChainController}; use ckb_app_config::TxPoolConfig; use ckb_app_config::{BlockAssemblerConfig, NetworkConfig}; use ckb_chain_spec::consensus::{Consensus, ConsensusBuilder}; @@ -85,7 +85,7 @@ pub(crate) fn start_chain_with_tx_pool_config( let network = dummy_network(&shared); pack.take_tx_pool_builder().start(network); - let chain_controller = pack.take_chain_services_builder().start(); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); let parent = { let snapshot = shared.snapshot(); snapshot From 692706d53f666601f1b4530eb1ce1060ceda81c5 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 29 Nov 2023 10:40:52 +0800 Subject: [PATCH 212/357] Fix Unit test in `ckb-sync`: Start chain services by start_chain_services --- sync/src/relayer/tests/helper.rs | 3 ++- sync/src/tests/sync_shared.rs | 3 ++- sync/src/tests/synchronizer/basic_sync.rs | 3 ++- sync/src/tests/synchronizer/functions.rs | 4 ++-- sync/src/tests/util.rs | 4 ++-- 5 files changed, 10 insertions(+), 7 deletions(-) diff --git a/sync/src/relayer/tests/helper.rs b/sync/src/relayer/tests/helper.rs index ccfe934f26..f77bcd3f3f 100644 --- a/sync/src/relayer/tests/helper.rs +++ b/sync/src/relayer/tests/helper.rs @@ -1,5 +1,6 @@ use crate::{Relayer, SyncShared}; use ckb_app_config::NetworkConfig; +use ckb_chain::start_chain_services; use ckb_chain_spec::consensus::{build_genesis_epoch_ext, ConsensusBuilder}; use ckb_dao::DaoCalculator; use ckb_dao_utils::genesis_dao_data; @@ -170,7 +171,7 @@ pub(crate) fn build_chain(tip: BlockNumber) -> (Relayer, OutPoint) { let network = dummy_network(&shared); pack.take_tx_pool_builder().start(network); - let chain_controller = pack.take_chain_services_builder().start(); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); // Build 1 ~ (tip-1) heights for i in 0..tip { diff --git a/sync/src/tests/sync_shared.rs b/sync/src/tests/sync_shared.rs index 038787b11c..59a5172234 100644 --- a/sync/src/tests/sync_shared.rs +++ b/sync/src/tests/sync_shared.rs @@ -1,5 +1,6 @@ use crate::tests::util::{build_chain, inherit_block}; use crate::SyncShared; +use ckb_chain::start_chain_services; use ckb_shared::block_status::BlockStatus; use ckb_shared::SharedBuilder; use ckb_store::{self, ChainStore}; @@ -53,7 +54,7 @@ fn test_insert_parent_unknown_block() { .consensus(shared1.consensus().clone()) .build() .unwrap(); - let chain_controller = pack.take_chain_services_builder().start(); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); ( SyncShared::new(shared, Default::default(), pack.take_relay_tx_receiver()), chain_controller, diff --git a/sync/src/tests/synchronizer/basic_sync.rs b/sync/src/tests/synchronizer/basic_sync.rs index 1fc266c778..33c7987649 100644 --- a/sync/src/tests/synchronizer/basic_sync.rs +++ b/sync/src/tests/synchronizer/basic_sync.rs @@ -4,6 +4,7 @@ use crate::synchronizer::{ }; use crate::tests::TestNode; use crate::{SyncShared, Synchronizer}; +use ckb_chain::start_chain_services; use ckb_chain_spec::consensus::ConsensusBuilder; use ckb_channel::bounded; use ckb_dao::DaoCalculator; @@ -98,7 +99,7 @@ fn setup_node(height: u64) -> (TestNode, Shared) { .build() .unwrap(); - let chain_controller = pack.take_chain_services_builder().start(); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); for _i in 0..height { let number = block.header().number() + 1; diff --git a/sync/src/tests/synchronizer/functions.rs b/sync/src/tests/synchronizer/functions.rs index 94c2456dbe..e8b80c00f7 100644 --- a/sync/src/tests/synchronizer/functions.rs +++ b/sync/src/tests/synchronizer/functions.rs @@ -1,4 +1,4 @@ -use ckb_chain::ChainController; +use ckb_chain::{start_chain_services, ChainController}; use ckb_chain_spec::consensus::{Consensus, ConsensusBuilder}; use ckb_constant::sync::{CHAIN_SYNC_TIMEOUT, EVICTION_HEADERS_RESPONSE_TIME, MAX_TIP_AGE}; use ckb_dao::DaoCalculator; @@ -49,7 +49,7 @@ fn start_chain(consensus: Option) -> (ChainController, Shared, Synchr let (shared, mut pack) = builder.build().unwrap(); - let chain_controller = pack.take_chain_services_builder().start(); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); let sync_shared = Arc::new(SyncShared::new( shared.clone(), diff --git a/sync/src/tests/util.rs b/sync/src/tests/util.rs index 63fa305b83..5a765a0f1f 100644 --- a/sync/src/tests/util.rs +++ b/sync/src/tests/util.rs @@ -1,5 +1,5 @@ use crate::SyncShared; -use ckb_chain::ChainController; +use ckb_chain::{start_chain_services, ChainController}; use ckb_dao::DaoCalculator; use ckb_reward_calculator::RewardCalculator; use ckb_shared::{Shared, SharedBuilder, Snapshot}; @@ -19,7 +19,7 @@ pub fn build_chain(tip: BlockNumber) -> (SyncShared, ChainController) { .consensus(always_success_consensus()) .build() .unwrap(); - let chain_controller = pack.take_chain_services_builder().start(); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); generate_blocks(&shared, &chain_controller, tip); let sync_shared = SyncShared::new(shared, Default::default(), pack.take_relay_tx_receiver()); (sync_shared, chain_controller) From e5a37a2c5c52a7199a0ffae218bc55b610921b46 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 29 Nov 2023 11:58:34 +0800 Subject: [PATCH 213/357] Add blocking_insert_new_block for SyncState for Unit Test purpose --- sync/src/types/mod.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 08df411e02..924a7eae62 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1,6 +1,6 @@ use crate::{Status, StatusCode, FAST_INDEX, LOW_INDEX, NORMAL_INDEX, TIME_TRACE_SIZE}; use ckb_app_config::SyncConfig; -use ckb_chain::ChainController; +use ckb_chain::{ChainController, VerifyResult}; use ckb_chain::{LonelyBlock, VerifyCallback}; use ckb_chain_spec::consensus::{Consensus, MAX_BLOCK_INTERVAL, MIN_BLOCK_INTERVAL}; use ckb_channel::Receiver; @@ -1156,6 +1156,16 @@ impl SyncShared { // } // } + // Only used by unit test + // Blocking insert a new block, return the verify result + pub(crate) fn blocking_insert_new_block( + &self, + chain: &ChainController, + block: Arc, + ) -> VerifyResult { + chain.blocking_process_block(block) + } + pub(crate) fn accept_block( &self, chain: &ChainController, From 1811bf5458800051503042fad5b79c1b20326c26 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 29 Nov 2023 11:59:27 +0800 Subject: [PATCH 214/357] Fix lifetime issue for Unit test in `ckb-chain` --- chain/src/tests/find_fork.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/chain/src/tests/find_fork.rs b/chain/src/tests/find_fork.rs index 0b06e44896..ab01477d00 100644 --- a/chain/src/tests/find_fork.rs +++ b/chain/src/tests/find_fork.rs @@ -78,7 +78,7 @@ fn test_find_fork_case1() { tokio::sync::mpsc::unbounded_channel::(); let mut consume_unverified_block_processor = ConsumeUnverifiedBlockProcessor { - shared, + shared: shared.clone(), proposal_table, verify_failed_blocks_tx, }; @@ -141,7 +141,7 @@ fn test_find_fork_case1() { fn test_find_fork_case2() { let builder = SharedBuilder::with_temp_db(); let consensus = Consensus::default(); - let (shared, mut pack) = builder.consensus(consensus).build().unwrap(); + let (shared, mut pack) = builder.consensus(consensus.clone()).build().unwrap(); let genesis = shared .store() @@ -163,7 +163,7 @@ fn test_find_fork_case2() { tokio::sync::mpsc::unbounded_channel::(); let mut consume_unverified_block_processor = ConsumeUnverifiedBlockProcessor { - shared, + shared: shared.clone(), proposal_table, verify_failed_blocks_tx, }; @@ -226,7 +226,7 @@ fn test_find_fork_case2() { fn test_find_fork_case3() { let builder = SharedBuilder::with_temp_db(); let consensus = Consensus::default(); - let (shared, mut pack) = builder.consensus(consensus).build().unwrap(); + let (shared, mut pack) = builder.consensus(consensus.clone()).build().unwrap(); let genesis = shared .store() @@ -249,7 +249,7 @@ fn test_find_fork_case3() { tokio::sync::mpsc::unbounded_channel::(); let mut consume_unverified_block_processor = ConsumeUnverifiedBlockProcessor { - shared, + shared: shared.clone(), proposal_table, verify_failed_blocks_tx, }; @@ -311,7 +311,7 @@ fn test_find_fork_case3() { fn test_find_fork_case4() { let builder = SharedBuilder::with_temp_db(); let consensus = Consensus::default(); - let (shared, mut pack) = builder.consensus(consensus).build().unwrap(); + let (shared, mut pack) = builder.consensus(consensus.clone()).build().unwrap(); let genesis = shared .store() @@ -334,7 +334,7 @@ fn test_find_fork_case4() { tokio::sync::mpsc::unbounded_channel::(); let mut consume_unverified_block_processor = ConsumeUnverifiedBlockProcessor { - shared, + shared: shared.clone(), proposal_table, verify_failed_blocks_tx, }; @@ -394,7 +394,7 @@ fn test_find_fork_case4() { fn repeatedly_switch_fork() { let consensus = Consensus::default(); let (shared, mut pack) = SharedBuilder::with_temp_db() - .consensus(consensus) + .consensus(consensus.clone()) .build() .unwrap(); let genesis = shared @@ -419,7 +419,7 @@ fn repeatedly_switch_fork() { tokio::sync::mpsc::unbounded_channel::(); let mut consume_unverified_block_processor = ConsumeUnverifiedBlockProcessor { - shared, + shared: shared.clone(), proposal_table, verify_failed_blocks_tx, }; From 607b483892ead862a64796cade980b1a2f2ae44b Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 29 Nov 2023 11:59:48 +0800 Subject: [PATCH 215/357] Comment out sync_shared.rs related unit test --- sync/src/tests/sync_shared.rs | 288 +++++++++++++++++----------------- 1 file changed, 147 insertions(+), 141 deletions(-) diff --git a/sync/src/tests/sync_shared.rs b/sync/src/tests/sync_shared.rs index 59a5172234..ad2be693fb 100644 --- a/sync/src/tests/sync_shared.rs +++ b/sync/src/tests/sync_shared.rs @@ -1,6 +1,6 @@ use crate::tests::util::{build_chain, inherit_block}; use crate::SyncShared; -use ckb_chain::start_chain_services; +use ckb_chain::{start_chain_services, VerifiedBlockStatus}; use ckb_shared::block_status::BlockStatus; use ckb_shared::SharedBuilder; use ckb_store::{self, ChainStore}; @@ -18,12 +18,18 @@ fn test_insert_new_block() { Arc::new(next_block) }; - assert!(shared - .insert_new_block(&chain, Arc::clone(&new_block)) - .expect("insert valid block"),); - assert!(!shared - .insert_new_block(&chain, Arc::clone(&new_block)) - .expect("insert duplicated valid block"),); + matches!( + shared + .blocking_insert_new_block(&chain, Arc::clone(&new_block)) + .expect("insert valid block"), + VerifiedBlockStatus::FirstSeenAndVerified, + ); + matches!( + shared + .blocking_insert_new_block(&chain, Arc::clone(&new_block)) + .expect("insert duplicated valid block"), + VerifiedBlockStatus::PreviouslySeenAndVerified, + ); } #[test] @@ -42,141 +48,141 @@ fn test_insert_invalid_block() { }; assert!(shared - .insert_new_block(&chain, Arc::clone(&invalid_block)) + .blocking_insert_new_block(&chain, Arc::clone(&invalid_block)) .is_err(),); } -#[test] -fn test_insert_parent_unknown_block() { - let (shared1, _) = build_chain(2); - let (shared, chain) = { - let (shared, mut pack) = SharedBuilder::with_temp_db() - .consensus(shared1.consensus().clone()) - .build() - .unwrap(); - let chain_controller = start_chain_services(pack.take_chain_services_builder()); - ( - SyncShared::new(shared, Default::default(), pack.take_relay_tx_receiver()), - chain_controller, - ) - }; - - let block = shared1 - .store() - .get_block(&shared1.active_chain().tip_header().hash()) - .unwrap(); - let parent = { - let parent = shared1 - .store() - .get_block(&block.header().parent_hash()) - .unwrap(); - Arc::new(parent) - }; - let invalid_orphan = { - let invalid_orphan = block - .as_advanced_builder() - .header(block.header()) - .number(1000.pack()) - .build(); - - Arc::new(invalid_orphan) - }; - let valid_orphan = Arc::new(block); - let valid_hash = valid_orphan.header().hash(); - let invalid_hash = invalid_orphan.header().hash(); - let parent_hash = parent.header().hash(); - - assert!(!shared - .insert_new_block(&chain, Arc::clone(&valid_orphan)) - .expect("insert orphan block"),); - assert!(!shared - .insert_new_block(&chain, Arc::clone(&invalid_orphan)) - .expect("insert orphan block"),); - assert_eq!( - shared.active_chain().get_block_status(&valid_hash), - BlockStatus::BLOCK_RECEIVED - ); - assert_eq!( - shared.active_chain().get_block_status(&invalid_hash), - BlockStatus::BLOCK_RECEIVED - ); - - // After inserting parent of an orphan block - assert!(shared - .insert_new_block(&chain, Arc::clone(&parent)) - .expect("insert parent of orphan block"),); - assert_eq!( - shared.active_chain().get_block_status(&valid_hash), - BlockStatus::BLOCK_VALID - ); - assert_eq!( - shared.active_chain().get_block_status(&invalid_hash), - BlockStatus::BLOCK_INVALID - ); - assert_eq!( - shared.active_chain().get_block_status(&parent_hash), - BlockStatus::BLOCK_VALID - ); -} - -#[test] -fn test_switch_valid_fork() { - let (shared, chain) = build_chain(4); - let make_valid_block = |shared, parent_hash| -> BlockView { - let header = inherit_block(shared, &parent_hash).build().header(); - let timestamp = header.timestamp() + 3; - let cellbase = inherit_block(shared, &parent_hash).build().transactions()[0].clone(); - BlockBuilder::default() - .header(header) - .timestamp(timestamp.pack()) - .transaction(cellbase) - .build() - }; - - // Insert the valid fork. The fork blocks would not been verified until the fork switches as - // the main chain. And `block_status_map` would mark the fork blocks as `BLOCK_STORED` - let block_number = 1; - let mut parent_hash = shared.store().get_block_hash(block_number).unwrap(); - for number in 0..=block_number { - let block_hash = shared.store().get_block_hash(number).unwrap(); - shared.store().get_block(&block_hash).unwrap(); - } - let mut valid_fork = Vec::new(); - for _ in 2..shared.active_chain().tip_number() { - let block = make_valid_block(shared.shared(), parent_hash.clone()); - assert!(shared - .insert_new_block(&chain, Arc::new(block.clone())) - .expect("insert fork"),); +// #[test] +// fn test_insert_parent_unknown_block() { +// let (shared1, _) = build_chain(2); +// let (shared, chain) = { +// let (shared, mut pack) = SharedBuilder::with_temp_db() +// .consensus(shared1.consensus().clone()) +// .build() +// .unwrap(); +// let chain_controller = start_chain_services(pack.take_chain_services_builder()); +// ( +// SyncShared::new(shared, Default::default(), pack.take_relay_tx_receiver()), +// chain_controller, +// ) +// }; +// +// let block = shared1 +// .store() +// .get_block(&shared1.active_chain().tip_header().hash()) +// .unwrap(); +// let parent = { +// let parent = shared1 +// .store() +// .get_block(&block.header().parent_hash()) +// .unwrap(); +// Arc::new(parent) +// }; +// let invalid_orphan = { +// let invalid_orphan = block +// .as_advanced_builder() +// .header(block.header()) +// .number(1000.pack()) +// .build(); +// +// Arc::new(invalid_orphan) +// }; +// let valid_orphan = Arc::new(block); +// let valid_hash = valid_orphan.header().hash(); +// let invalid_hash = invalid_orphan.header().hash(); +// let parent_hash = parent.header().hash(); +// +// assert!(!shared +// .insert_new_block(&chain, Arc::clone(&valid_orphan)) +// .expect("insert orphan block"),); +// assert!(!shared +// .insert_new_block(&chain, Arc::clone(&invalid_orphan)) +// .expect("insert orphan block"),); +// assert_eq!( +// shared.active_chain().get_block_status(&valid_hash), +// BlockStatus::BLOCK_RECEIVED +// ); +// assert_eq!( +// shared.active_chain().get_block_status(&invalid_hash), +// BlockStatus::BLOCK_RECEIVED +// ); +// +// // After inserting parent of an orphan block +// assert!(shared +// .insert_new_block(&chain, Arc::clone(&parent)) +// .expect("insert parent of orphan block"),); +// assert_eq!( +// shared.active_chain().get_block_status(&valid_hash), +// BlockStatus::BLOCK_VALID +// ); +// assert_eq!( +// shared.active_chain().get_block_status(&invalid_hash), +// BlockStatus::BLOCK_INVALID +// ); +// assert_eq!( +// shared.active_chain().get_block_status(&parent_hash), +// BlockStatus::BLOCK_VALID +// ); +// } - parent_hash = block.header().hash(); - valid_fork.push(block); - } - for block in valid_fork.iter() { - assert_eq!( - shared - .active_chain() - .get_block_status(&block.header().hash()), - BlockStatus::BLOCK_STORED, - ); - } - - let tip_number = shared.active_chain().tip_number(); - // Make the fork switch as the main chain. - for _ in tip_number..tip_number + 2 { - let block = inherit_block(shared.shared(), &parent_hash.clone()).build(); - assert!(shared - .insert_new_block(&chain, Arc::new(block.clone())) - .expect("insert fork"),); - - parent_hash = block.header().hash(); - valid_fork.push(block); - } - for block in valid_fork.iter() { - assert_eq!( - shared - .active_chain() - .get_block_status(&block.header().hash()), - BlockStatus::BLOCK_VALID, - ); - } -} +// #[test] +// fn test_switch_valid_fork() { +// let (shared, chain) = build_chain(4); +// let make_valid_block = |shared, parent_hash| -> BlockView { +// let header = inherit_block(shared, &parent_hash).build().header(); +// let timestamp = header.timestamp() + 3; +// let cellbase = inherit_block(shared, &parent_hash).build().transactions()[0].clone(); +// BlockBuilder::default() +// .header(header) +// .timestamp(timestamp.pack()) +// .transaction(cellbase) +// .build() +// }; +// +// // Insert the valid fork. The fork blocks would not been verified until the fork switches as +// // the main chain. And `block_status_map` would mark the fork blocks as `BLOCK_STORED` +// let block_number = 1; +// let mut parent_hash = shared.store().get_block_hash(block_number).unwrap(); +// for number in 0..=block_number { +// let block_hash = shared.store().get_block_hash(number).unwrap(); +// shared.store().get_block(&block_hash).unwrap(); +// } +// let mut valid_fork = Vec::new(); +// for _ in 2..shared.active_chain().tip_number() { +// let block = make_valid_block(shared.shared(), parent_hash.clone()); +// assert!(shared +// .insert_new_block(&chain, Arc::new(block.clone())) +// .expect("insert fork"),); +// +// parent_hash = block.header().hash(); +// valid_fork.push(block); +// } +// for block in valid_fork.iter() { +// assert_eq!( +// shared +// .active_chain() +// .get_block_status(&block.header().hash()), +// BlockStatus::BLOCK_STORED, +// ); +// } +// +// let tip_number = shared.active_chain().tip_number(); +// // Make the fork switch as the main chain. +// for _ in tip_number..tip_number + 2 { +// let block = inherit_block(shared.shared(), &parent_hash.clone()).build(); +// assert!(shared +// .insert_new_block(&chain, Arc::new(block.clone())) +// .expect("insert fork"),); +// +// parent_hash = block.header().hash(); +// valid_fork.push(block); +// } +// for block in valid_fork.iter() { +// assert_eq!( +// shared +// .active_chain() +// .get_block_status(&block.header().hash()), +// BlockStatus::BLOCK_VALID, +// ); +// } +// } From 4267123e94cb6824222f443c23b7469f3ec373af Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 29 Nov 2023 12:03:09 +0800 Subject: [PATCH 216/357] Remove Synchronizer::verify_failed_blocks_rx Option wrapper --- sync/src/synchronizer/mod.rs | 50 +++++++++++++++++------------------- 1 file changed, 23 insertions(+), 27 deletions(-) diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 50f681c817..e1d095c68d 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -234,8 +234,7 @@ pub struct Synchronizer { pub shared: Arc, fetch_channel: Option>, - pub(crate) verify_failed_blocks_rx: - Option>, + pub(crate) verify_failed_blocks_rx: tokio::sync::mpsc::UnboundedReceiver, } impl Synchronizer { @@ -245,9 +244,7 @@ impl Synchronizer { pub fn new( chain: ChainController, shared: Arc, - verify_failed_blocks_rx: Option< - tokio::sync::mpsc::UnboundedReceiver, - >, + verify_failed_blocks_rx: tokio::sync::mpsc::UnboundedReceiver, ) -> Synchronizer { Synchronizer { chain, @@ -886,29 +883,28 @@ impl CKBProtocolHandler for Synchronizer { } async fn poll(&mut self, nc: Arc) -> Option<()> { - if let Some(verify_failed_blocks_rx) = &mut self.verify_failed_blocks_rx { - let mut have_malformed_peers = false; - while let Some(malformed_peer_info) = verify_failed_blocks_rx.recv().await { - have_malformed_peers = true; - if malformed_peer_info.is_internal_db_error { - // we shouldn't ban that peer if it's an internal db error - continue; - } - - Self::post_sync_process( - nc.as_ref(), - malformed_peer_info.peer_id, - "SendBlock", - malformed_peer_info.message_bytes, - StatusCode::BlockIsInvalid.with_context(format!( - "block {} is invalid, reason: {}", - malformed_peer_info.block_hash, malformed_peer_info.reason - )), - ); - } - if have_malformed_peers { - return Some(()); + let mut have_malformed_peers = false; + while let Some(malformed_peer_info) = self.verify_failed_blocks_rx.recv().await { + have_malformed_peers = true; + if malformed_peer_info.is_internal_db_error { + // we shouldn't ban that peer if it's an internal db error + continue; } + + Self::post_sync_process( + nc.as_ref(), + malformed_peer_info.peer_id, + "SendBlock", + malformed_peer_info.message_bytes, + StatusCode::BlockIsInvalid.with_context(format!( + "block {} is invalid, reason: {}", + malformed_peer_info.block_hash, malformed_peer_info.reason + )), + ); + } + + if have_malformed_peers { + return Some(()); } None } From 8ea7622563b52e6e9e766fff5454e88b5efcd91e Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 29 Nov 2023 12:04:39 +0800 Subject: [PATCH 217/357] Fix Launcher construct Synchronizer --- util/launcher/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/util/launcher/src/lib.rs b/util/launcher/src/lib.rs index c9c114a136..1e105c42e6 100644 --- a/util/launcher/src/lib.rs +++ b/util/launcher/src/lib.rs @@ -289,7 +289,7 @@ impl Launcher { let synchronizer = Synchronizer::new( chain_controller.clone(), Arc::clone(&sync_shared), - Some(verify_failed_block_rx), + verify_failed_block_rx, ); let mut protocols = vec![CKBProtocol::new_with_support_protocol( SupportProtocols::Sync, From 08c405a35a98952444025dd0ba0864dcfef0f71c Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 29 Nov 2023 12:16:17 +0800 Subject: [PATCH 218/357] Remove Synchronizer::process_new_block to asynchronous_process_new_block --- sync/src/synchronizer/block_process.rs | 26 ++++++-------------------- sync/src/synchronizer/mod.rs | 2 +- sync/src/types/mod.rs | 1 + 3 files changed, 8 insertions(+), 21 deletions(-) diff --git a/sync/src/synchronizer/block_process.rs b/sync/src/synchronizer/block_process.rs index 732da3a78a..edfafc572b 100644 --- a/sync/src/synchronizer/block_process.rs +++ b/sync/src/synchronizer/block_process.rs @@ -1,4 +1,5 @@ use crate::synchronizer::Synchronizer; +use ckb_chain::VerifyResult; use ckb_logger::debug; use ckb_network::PeerIndex; use ckb_types::{packed, prelude::*}; @@ -35,26 +36,11 @@ impl<'a> BlockProcess<'a> { let shared = self.synchronizer.shared(); if shared.new_block_received(&block) { - self.synchronizer - .process_new_block(block.clone(), self.peer, self.message_bytes); - // { - // Ok(verify_failed_peers) => { - // return verify_failed_peers; - // } - // Err(err) => { - // error!("BlockProcess process_new_block error: {:?}", err); - // } - // } - - // if let Err(err) = this_block_verify_result { - // if !is_internal_db_error(&err) { - // return StatusCode::BlockIsInvalid.with_context(format!( - // "{}, error: {}", - // block.hash(), - // err, - // )); - // } - // } + self.synchronizer.asynchronous_process_new_block( + block.clone(), + self.peer, + self.message_bytes, + ); } } } diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index e1d095c68d..98d780498a 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -353,7 +353,7 @@ impl Synchronizer { /// Process a new block sync from other peer //TODO: process block which we don't request - pub fn process_new_block( + pub fn asynchronous_process_new_block( &self, block: core::BlockView, peer_id: PeerIndex, diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 924a7eae62..8040c222a1 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1158,6 +1158,7 @@ impl SyncShared { // Only used by unit test // Blocking insert a new block, return the verify result + #[cfg(test)] pub(crate) fn blocking_insert_new_block( &self, chain: &ChainController, From 939af6ba489770bc302eb2a84f6e8e155eccccf7 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 29 Nov 2023 12:45:37 +0800 Subject: [PATCH 219/357] Add `blocking_execute` method for `BlockProcess` for the purpose of unit test --- sync/src/synchronizer/block_process.rs | 29 ++++++++++++++++++++- sync/src/synchronizer/mod.rs | 33 +++++++++++++++++++++++- sync/src/tests/synchronizer/functions.rs | 4 +-- sync/src/types/mod.rs | 16 ++++++++++++ 4 files changed, 78 insertions(+), 4 deletions(-) diff --git a/sync/src/synchronizer/block_process.rs b/sync/src/synchronizer/block_process.rs index edfafc572b..b97bbe1251 100644 --- a/sync/src/synchronizer/block_process.rs +++ b/sync/src/synchronizer/block_process.rs @@ -1,5 +1,4 @@ use crate::synchronizer::Synchronizer; -use ckb_chain::VerifyResult; use ckb_logger::debug; use ckb_network::PeerIndex; use ckb_types::{packed, prelude::*}; @@ -43,4 +42,32 @@ impl<'a> BlockProcess<'a> { ); } } + + #[cfg(test)] + pub fn blocking_execute(self) -> crate::Status { + let block = self.message.block().to_entity().into_view(); + debug!( + "BlockProcess received block {} {}", + block.number(), + block.hash(), + ); + let shared = self.synchronizer.shared(); + + if shared.new_block_received(&block) { + if let Err(err) = self.synchronizer.blocking_process_new_block( + block.clone(), + self.peer, + self.message_bytes, + ) { + if !ckb_error::is_internal_db_error(&err) { + return crate::StatusCode::BlockIsInvalid.with_context(format!( + "{}, error: {}", + block.hash(), + err, + )); + } + } + } + crate::Status::ok() + } } diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 98d780498a..3fae2399f4 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -25,13 +25,14 @@ use crate::utils::{metric_ckb_message_bytes, send_message_to, MetricDirection}; use crate::{Status, StatusCode}; use ckb_shared::block_status::BlockStatus; -use ckb_chain::ChainController; +use ckb_chain::{ChainController, VerifyResult}; use ckb_channel as channel; use ckb_channel::{select, Receiver}; use ckb_constant::sync::{ BAD_MESSAGE_BAN_TIME, CHAIN_SYNC_TIMEOUT, EVICTION_HEADERS_RESPONSE_TIME, INIT_BLOCKS_IN_TRANSIT_PER_PEER, MAX_TIP_AGE, }; +use ckb_error::ErrorKind; use ckb_logger::{debug, error, info, trace, warn}; use ckb_network::{ async_trait, bytes::Bytes, tokio, CKBProtocolContext, CKBProtocolHandler, PeerIndex, @@ -377,6 +378,36 @@ impl Synchronizer { } } + #[cfg(test)] + pub fn blocking_process_new_block( + &self, + block: core::BlockView, + peer_id: PeerIndex, + message_bytes: u64, + ) -> VerifyResult { + let block_hash = block.hash(); + let status = self.shared.active_chain().get_block_status(&block_hash); + // NOTE: Filtering `BLOCK_STORED` but not `BLOCK_RECEIVED`, is for avoiding + // stopping synchronization even when orphan_pool maintains dirty items by bugs. + if status.contains(BlockStatus::BLOCK_PARTIAL_STORED) { + error!("block {} already partial stored", block_hash); + } else if status.contains(BlockStatus::HEADER_VALID) { + self.shared.blocking_insert_new_block_with_verbose_info( + &self.chain, + Arc::new(block), + peer_id, + message_bytes, + ) + } else { + debug!( + "Synchronizer process_new_block unexpected status {:?} {}", + status, block_hash, + ); + // TODO while error should we return? + Err(ErrorKind::other("block status doesn't contain HEADER_VALID").into()) + } + } + /// Get blocks to fetch pub fn get_blocks_to_fetch( &self, diff --git a/sync/src/tests/synchronizer/functions.rs b/sync/src/tests/synchronizer/functions.rs index e8b80c00f7..8b61dc6fd8 100644 --- a/sync/src/tests/synchronizer/functions.rs +++ b/sync/src/tests/synchronizer/functions.rs @@ -353,7 +353,7 @@ fn test_process_new_block() { blocks.into_iter().for_each(|block| { synchronizer .shared() - .insert_new_block(&synchronizer.chain, Arc::new(block)) + .blocking_insert_new_block(&synchronizer.chain, Arc::new(block)) .expect("Insert new block failed"); }); assert_eq!(&chain1_last_block.header(), shared2.snapshot().tip_header()); @@ -663,7 +663,7 @@ fn test_sync_process() { for block in &fetched_blocks { let block = SendBlockBuilder::default().block(block.data()).build(); assert_eq!( - BlockProcess::new(block.as_reader(), &synchronizer1, peer1).execute(), + BlockProcess::new(block.as_reader(), &synchronizer1, peer1, 0).execute(), Status::ok(), ); } diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 8040c222a1..ca77608231 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1167,6 +1167,22 @@ impl SyncShared { chain.blocking_process_block(block) } + #[cfg(test)] + pub(crate) fn blocking_insert_new_block_with_verbose_info( + &self, + chain: &ChainController, + block: Arc, + peer_id: PeerIndex, + message_bytes: u64, + ) -> VerifyResult { + let lonely_block: LonelyBlock = LonelyBlock { + block, + peer_id: Some(PeerIndex::new(0)), + switch: None, + }; + chain.blocking_process_lonely_block(lonely_block) + } + pub(crate) fn accept_block( &self, chain: &ChainController, From 1c4fd55efccc8171a4b3d2917e9a3166409e92f2 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 29 Nov 2023 13:35:14 +0800 Subject: [PATCH 220/357] Modify Synchronizer::blocking_process_new_block return Result --- sync/src/synchronizer/mod.rs | 25 ++++++++++++++++-------- sync/src/tests/synchronizer/functions.rs | 2 +- 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 3fae2399f4..2f9319fe00 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -384,27 +384,36 @@ impl Synchronizer { block: core::BlockView, peer_id: PeerIndex, message_bytes: u64, - ) -> VerifyResult { + ) -> Result { let block_hash = block.hash(); let status = self.shared.active_chain().get_block_status(&block_hash); // NOTE: Filtering `BLOCK_STORED` but not `BLOCK_RECEIVED`, is for avoiding // stopping synchronization even when orphan_pool maintains dirty items by bugs. if status.contains(BlockStatus::BLOCK_PARTIAL_STORED) { error!("block {} already partial stored", block_hash); + Ok(false) } else if status.contains(BlockStatus::HEADER_VALID) { - self.shared.blocking_insert_new_block_with_verbose_info( - &self.chain, - Arc::new(block), - peer_id, - message_bytes, - ) + self.shared + .blocking_insert_new_block_with_verbose_info( + &self.chain, + Arc::new(block), + peer_id, + message_bytes, + ) + .map(|v| { + matches!( + v, + ckb_chain::VerifiedBlockStatus::FirstSeenAndVerified + | ckb_chain::VerifiedBlockStatus::UncleBlockNotVerified + ) + }) } else { debug!( "Synchronizer process_new_block unexpected status {:?} {}", status, block_hash, ); // TODO while error should we return? - Err(ErrorKind::other("block status doesn't contain HEADER_VALID").into()) + Ok(false) } } diff --git a/sync/src/tests/synchronizer/functions.rs b/sync/src/tests/synchronizer/functions.rs index 8b61dc6fd8..dbc95f42e5 100644 --- a/sync/src/tests/synchronizer/functions.rs +++ b/sync/src/tests/synchronizer/functions.rs @@ -663,7 +663,7 @@ fn test_sync_process() { for block in &fetched_blocks { let block = SendBlockBuilder::default().block(block.data()).build(); assert_eq!( - BlockProcess::new(block.as_reader(), &synchronizer1, peer1, 0).execute(), + BlockProcess::new(block.as_reader(), &synchronizer1, peer1, 0).blocking_execute(), Status::ok(), ); } From 8c76b4e2d4ba3b8b2700c3f6c7844943069dc47b Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 29 Nov 2023 13:38:52 +0800 Subject: [PATCH 221/357] Fix ckb-sync test_internal_db_error unit test --- sync/src/tests/synchronizer/functions.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/sync/src/tests/synchronizer/functions.rs b/sync/src/tests/synchronizer/functions.rs index dbc95f42e5..71a63d8e84 100644 --- a/sync/src/tests/synchronizer/functions.rs +++ b/sync/src/tests/synchronizer/functions.rs @@ -1212,8 +1212,9 @@ fn test_internal_db_error() { let (shared, mut pack) = builder.build().unwrap(); - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); - let _chain_controller = chain_service.start::<&str>(None); + // TODO fix later + // let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); + // let _chain_controller = chain_service.start::<&str>(None); let sync_shared = Arc::new(SyncShared::new( shared, @@ -1225,7 +1226,7 @@ fn test_internal_db_error() { let block = Arc::new(BlockBuilder::default().build()); // mock process_block - faux::when!(chain_controller.process_block(Arc::clone(&block))).then_return(Err( + faux::when!(chain_controller.blocking_process_block(Arc::clone(&block))).then_return(Err( InternalErrorKind::Database.other("mocked db error").into(), )); @@ -1237,7 +1238,7 @@ fn test_internal_db_error() { let status = synchronizer .shared() - .accept_block(&synchronizer.chain, Arc::clone(&block)); + .blocking_insert_new_block(&synchronizer.chain, Arc::clone(&block)); assert!(is_internal_db_error(&status.err().unwrap())); } From e815a20f75879aeb374a1dd8e46cdd4a67e5e69e Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 29 Nov 2023 13:48:26 +0800 Subject: [PATCH 222/357] Fix cargo clippy warnings --- ckb-bin/src/subcommand/import.rs | 2 +- ckb-bin/src/subcommand/replay.rs | 2 +- ckb-bin/src/subcommand/run.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ckb-bin/src/subcommand/import.rs b/ckb-bin/src/subcommand/import.rs index 81867f3e48..0b3eabc175 100644 --- a/ckb-bin/src/subcommand/import.rs +++ b/ckb-bin/src/subcommand/import.rs @@ -12,7 +12,7 @@ pub fn import(args: ImportArgs, async_handle: Handle) -> Result<(), ExitCode> { async_handle, args.consensus, )?; - let (shared, mut pack) = builder.build()?; + let (_shared, mut pack) = builder.build()?; let chain_controller = ckb_chain::start_chain_services(pack.take_chain_services_builder()); diff --git a/ckb-bin/src/subcommand/replay.rs b/ckb-bin/src/subcommand/replay.rs index 8414575537..fcadb6a23a 100644 --- a/ckb-bin/src/subcommand/replay.rs +++ b/ckb-bin/src/subcommand/replay.rs @@ -46,7 +46,7 @@ pub fn replay(args: ReplayArgs, async_handle: Handle) -> Result<(), ExitCode> { async_handle, args.consensus, )?; - let (tmp_shared, mut pack) = shared_builder.tx_pool_config(args.config.tx_pool).build()?; + let (_tmp_shared, mut pack) = shared_builder.tx_pool_config(args.config.tx_pool).build()?; let chain_service_builder: ChainServicesBuilder = pack.take_chain_services_builder(); let chain_controller = ckb_chain::start_chain_services(chain_service_builder); diff --git a/ckb-bin/src/subcommand/run.rs b/ckb-bin/src/subcommand/run.rs index cd5f36ca09..f678855304 100644 --- a/ckb-bin/src/subcommand/run.rs +++ b/ckb-bin/src/subcommand/run.rs @@ -4,7 +4,7 @@ use ckb_async_runtime::Handle; use ckb_build_info::Version; use ckb_launcher::Launcher; use ckb_logger::info; -use ckb_shared::types::VerifyFailedBlockInfo; + use ckb_stop_handler::{broadcast_exit_signals, wait_all_ckb_services_exit}; use ckb_types::core::cell::setup_system_cell_cache; From 97288acce9a4f787c59973b5368e2609a3e33048 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 29 Nov 2023 13:49:14 +0800 Subject: [PATCH 223/357] Remove orphan_block_pool from ckb-sync --- sync/src/lib.rs | 1 - sync/src/orphan_block_pool.rs | 171 ---------------------------------- 2 files changed, 172 deletions(-) delete mode 100644 sync/src/orphan_block_pool.rs diff --git a/sync/src/lib.rs b/sync/src/lib.rs index a12ba2596d..e78d3c01a0 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -5,7 +5,6 @@ mod filter; pub(crate) mod net_time_checker; -pub(crate) mod orphan_block_pool; mod relayer; mod status; mod synchronizer; diff --git a/sync/src/orphan_block_pool.rs b/sync/src/orphan_block_pool.rs deleted file mode 100644 index f30b5a5848..0000000000 --- a/sync/src/orphan_block_pool.rs +++ /dev/null @@ -1,171 +0,0 @@ -use ckb_logger::debug; -use ckb_types::core::EpochNumber; -use ckb_types::{core, packed}; -use ckb_util::{parking_lot::RwLock, shrink_to_fit}; -use std::collections::{HashMap, HashSet, VecDeque}; - -pub type ParentHash = packed::Byte32; -const SHRINK_THRESHOLD: usize = 100; - -// Orphan pool will remove expired blocks whose epoch is less than tip_epoch - EXPIRED_EPOCH, -const EXPIRED_EPOCH: u64 = 6; - -#[derive(Default)] -struct InnerPool { - // Group by blocks in the pool by the parent hash. - blocks: HashMap>, - // The map tells the parent hash when given the hash of a block in the pool. - // - // The block is in the orphan pool if and only if the block hash exists as a key in this map. - parents: HashMap, - // Leaders are blocks not in the orphan pool but having at least a child in the pool. - leaders: HashSet, -} - -impl InnerPool { - fn with_capacity(capacity: usize) -> Self { - InnerPool { - blocks: HashMap::with_capacity(capacity), - parents: HashMap::new(), - leaders: HashSet::new(), - } - } - - fn insert(&mut self, block: core::BlockView) { - let hash = block.header().hash(); - let parent_hash = block.data().header().raw().parent_hash(); - self.blocks - .entry(parent_hash.clone()) - .or_insert_with(HashMap::default) - .insert(hash.clone(), block); - // Out-of-order insertion needs to be deduplicated - self.leaders.remove(&hash); - // It is a possible optimization to make the judgment in advance, - // because the parent of the block must not be equal to its own hash, - // so we can judge first, which may reduce one arc clone - if !self.parents.contains_key(&parent_hash) { - // Block referenced by `parent_hash` is not in the pool, - // and it has at least one child, the new inserted block, so add it to leaders. - self.leaders.insert(parent_hash.clone()); - } - self.parents.insert(hash, parent_hash); - } - - pub fn remove_blocks_by_parent(&mut self, parent_hash: &ParentHash) -> Vec { - // try remove leaders first - if !self.leaders.remove(parent_hash) { - return Vec::new(); - } - - let mut queue: VecDeque = VecDeque::new(); - queue.push_back(parent_hash.to_owned()); - - let mut removed: Vec = Vec::new(); - while let Some(parent_hash) = queue.pop_front() { - if let Some(orphaned) = self.blocks.remove(&parent_hash) { - let (hashes, blocks): (Vec<_>, Vec<_>) = orphaned.into_iter().unzip(); - for hash in hashes.iter() { - self.parents.remove(hash); - } - queue.extend(hashes); - removed.extend(blocks); - } - } - - debug!("orphan pool pop chain len: {}", removed.len()); - debug_assert_ne!( - removed.len(), - 0, - "orphan pool removed list must not be zero" - ); - - shrink_to_fit!(self.blocks, SHRINK_THRESHOLD); - shrink_to_fit!(self.parents, SHRINK_THRESHOLD); - shrink_to_fit!(self.leaders, SHRINK_THRESHOLD); - removed - } - - pub fn get_block(&self, hash: &packed::Byte32) -> Option { - self.parents.get(hash).and_then(|parent_hash| { - self.blocks - .get(parent_hash) - .and_then(|blocks| blocks.get(hash).cloned()) - }) - } - - /// cleanup expired blocks(epoch + EXPIRED_EPOCH < tip_epoch) - pub fn clean_expired_blocks(&mut self, tip_epoch: EpochNumber) -> Vec { - let mut result = vec![]; - - for hash in self.leaders.clone().iter() { - if self.need_clean(hash, tip_epoch) { - // remove items in orphan pool and return hash to callee(clean header map) - let descendants = self.remove_blocks_by_parent(hash); - result.extend(descendants.iter().map(|block| block.hash())); - } - } - result - } - - /// get 1st block belongs to that parent and check if it's expired block - fn need_clean(&self, parent_hash: &packed::Byte32, tip_epoch: EpochNumber) -> bool { - self.blocks - .get(parent_hash) - .and_then(|map| { - map.iter() - .next() - .map(|(_, block)| block.header().epoch().number() + EXPIRED_EPOCH < tip_epoch) - }) - .unwrap_or_default() - } -} - -// NOTE: Never use `LruCache` as container. We have to ensure synchronizing between -// orphan_block_pool and block_status_map, but `LruCache` would prune old items implicitly. -// RwLock ensures the consistency between maps. Using multiple concurrent maps does not work here. -#[derive(Default)] -pub struct OrphanBlockPool { - inner: RwLock, -} - -impl OrphanBlockPool { - pub fn with_capacity(capacity: usize) -> Self { - OrphanBlockPool { - inner: RwLock::new(InnerPool::with_capacity(capacity)), - } - } - - /// Insert orphaned block, for which we have already requested its parent block - pub fn insert(&self, block: core::BlockView) { - self.inner.write().insert(block); - } - - pub fn remove_blocks_by_parent(&self, parent_hash: &ParentHash) -> Vec { - self.inner.write().remove_blocks_by_parent(parent_hash) - } - - pub fn get_block(&self, hash: &packed::Byte32) -> Option { - self.inner.read().get_block(hash) - } - - pub fn clean_expired_blocks(&self, epoch: EpochNumber) -> Vec { - self.inner.write().clean_expired_blocks(epoch) - } - - pub fn len(&self) -> usize { - self.inner.read().parents.len() - } - - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - pub fn clone_leaders(&self) -> Vec { - self.inner.read().leaders.iter().cloned().collect() - } - - #[cfg(test)] - pub(crate) fn leaders_len(&self) -> usize { - self.inner.read().leaders.len() - } -} From 1121cd165f3ff66e6ada6434962333e304f3acfe Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 29 Nov 2023 15:14:15 +0800 Subject: [PATCH 224/357] Add Warning message for threads stop notify in ckb-chain --- chain/src/chain.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 1ead84ed6f..a12304f391 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -240,11 +240,15 @@ pub fn start_chain_services(builder: ChainServicesBuilder) -> ChainController { move || { chain_service.start(); - search_orphan_pool_stop_tx.send(()); - search_orphan_pool_thread.join(); + if Err(SendError(_)) = search_orphan_pool_stop_tx.send(()) { + warn!("trying to notify search_orphan_pool thread to stop, but search_orphan_pool_stop_tx already closed") + } + let _ = search_orphan_pool_thread.join(); - unverified_queue_stop_tx.send(()); - consumer_unverified_thread.join(); + if Err(SendError(_))= unverified_queue_stop_tx.send(()){ + warn!("trying to notify consume unverified thread to stop, but unverified_queue_stop_tx already closed"); + } + let _ = consumer_unverified_thread.join(); } }) .expect("start chain_service thread should ok"); From 157a16ed5419ef5659933bb8ec3dff204f0812a7 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 29 Nov 2023 15:32:14 +0800 Subject: [PATCH 225/357] Fix ckb-chain consume_unveriifed_blocks stop handle --- chain/src/consume_orphan.rs | 8 ++++---- chain/src/consume_unverified.rs | 10 +++++----- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index a1636b1f2b..36e8fbf1e0 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -51,10 +51,6 @@ impl ConsumeOrphan { pub(crate) fn start(&self) { loop { select! { - recv(self.stop_rx) -> _ => { - info!("unverified_queue_consumer got exit signal, exit now"); - return; - }, recv(self.lonely_blocks_rx) -> msg => match msg { Ok(lonely_block) => { self.process_lonely_block(lonely_block); @@ -64,6 +60,10 @@ impl ConsumeOrphan { return } }, + recv(self.stop_rx) -> _ => { + info!("unverified_queue_consumer got exit signal, exit now"); + return; + }, } } } diff --git a/chain/src/consume_unverified.rs b/chain/src/consume_unverified.rs index de19f9b37a..ffb472184b 100644 --- a/chain/src/consume_unverified.rs +++ b/chain/src/consume_unverified.rs @@ -64,10 +64,6 @@ impl ConsumeUnverifiedBlocks { loop { begin_loop = std::time::Instant::now(); select! { - recv(self.stop_rx) -> _ => { - info!("unverified_queue_consumer got exit signal, exit now"); - return; - }, recv(self.unverified_block_rx) -> msg => match msg { Ok(unverified_task) => { // process this unverified block @@ -80,7 +76,11 @@ impl ConsumeUnverifiedBlocks { return; }, }, - default => {}, + recv(self.stop_rx) -> _ => { + info!("consume_unverified_blocks thread received exit signal, exit now"); + break; + } + } } } From 5c7cb427bc336cf751ef146a0cc204ea00a45177 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 29 Nov 2023 15:37:34 +0800 Subject: [PATCH 226/357] Pause chunk_process in consume_unverified_blocks thread --- chain/src/consume_unverified.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/chain/src/consume_unverified.rs b/chain/src/consume_unverified.rs index ffb472184b..530529d3dd 100644 --- a/chain/src/consume_unverified.rs +++ b/chain/src/consume_unverified.rs @@ -14,6 +14,7 @@ use ckb_shared::types::VerifyFailedBlockInfo; use ckb_shared::Shared; use ckb_store::{attach_block_cell, detach_block_cell, ChainStore, StoreTransaction}; use ckb_systemtime::unix_time_as_millis; +use ckb_tx_pool::TxPoolController; use ckb_types::core::cell::{ resolve_transaction, BlockCellProvider, HeaderChecker, OverlayCellProvider, ResolvedTransaction, }; @@ -36,6 +37,7 @@ pub(crate) struct ConsumeUnverifiedBlockProcessor { } pub(crate) struct ConsumeUnverifiedBlocks { + tx_pool_controller: TxPoolController, unverified_block_rx: Receiver, stop_rx: Receiver<()>, processor: ConsumeUnverifiedBlockProcessor, @@ -50,6 +52,7 @@ impl ConsumeUnverifiedBlocks { stop_rx: Receiver<()>, ) -> Self { ConsumeUnverifiedBlocks { + tx_pool_controller: shared.tx_pool_controller().to_owned(), unverified_block_rx: unverified_blocks_rx, stop_rx, processor: ConsumeUnverifiedBlockProcessor { @@ -59,8 +62,10 @@ impl ConsumeUnverifiedBlocks { }, } } + pub(crate) fn start(mut self) { let mut begin_loop = std::time::Instant::now(); + let tx_pool_control = self.tx_pool_controller(); loop { begin_loop = std::time::Instant::now(); select! { @@ -68,7 +73,9 @@ impl ConsumeUnverifiedBlocks { Ok(unverified_task) => { // process this unverified block trace!("got an unverified block, wait cost: {:?}", begin_loop.elapsed()); + let _ = tx_pool_control.suspend_chunk_process(); self.processor.consume_unverified_blocks(unverified_task); + let _ = tx_pool_control.resume_chunk_process(); trace!("consume_unverified_blocks cost: {:?}", begin_loop.elapsed()); }, Err(err) => { From 7161ef8a1791eebf1d96fe3287ef1595004f64b3 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 29 Nov 2023 15:41:51 +0800 Subject: [PATCH 227/357] Move truncate_block_request logic to consume_unverified_blocks --- chain/src/consume_unverified.rs | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/chain/src/consume_unverified.rs b/chain/src/consume_unverified.rs index 530529d3dd..d42a65f0e0 100644 --- a/chain/src/consume_unverified.rs +++ b/chain/src/consume_unverified.rs @@ -1,6 +1,7 @@ use crate::{ tell_synchronizer_to_punish_the_bad_peer, utils::forkchanges::ForkChanges, GlobalIndex, - LonelyBlock, LonelyBlockWithCallback, UnverifiedBlock, VerifiedBlockStatus, VerifyResult, + LonelyBlock, LonelyBlockWithCallback, TruncateRequest, UnverifiedBlock, VerifiedBlockStatus, + VerifyResult, }; use ckb_channel::{select, Receiver}; use ckb_error::{Error, InternalErrorKind}; @@ -38,7 +39,10 @@ pub(crate) struct ConsumeUnverifiedBlockProcessor { pub(crate) struct ConsumeUnverifiedBlocks { tx_pool_controller: TxPoolController, + unverified_block_rx: Receiver, + truncate_block_rx: Receiver, + stop_rx: Receiver<()>, processor: ConsumeUnverifiedBlockProcessor, } @@ -47,6 +51,7 @@ impl ConsumeUnverifiedBlocks { pub(crate) fn new( shared: Shared, unverified_blocks_rx: Receiver, + truncate_block_rx: Receiver, proposal_table: ProposalTable, verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, stop_rx: Receiver<()>, @@ -54,6 +59,7 @@ impl ConsumeUnverifiedBlocks { ConsumeUnverifiedBlocks { tx_pool_controller: shared.tx_pool_controller().to_owned(), unverified_block_rx: unverified_blocks_rx, + truncate_block_rx, stop_rx, processor: ConsumeUnverifiedBlockProcessor { shared, @@ -83,6 +89,19 @@ impl ConsumeUnverifiedBlocks { return; }, }, + recv(self.truncate_block_rx) -> msg => match msg { + Ok(Request { responder, arguments: target_tip_hash }) => { + let _ = tx_pool_control.suspend_chunk_process(); + let _ = responder.send(self.truncate( + &mut proposal_table, + &target_tip_hash)); + let _ = tx_pool_control.continue_chunk_process(); + }, + Err(err) => { + error!("truncate_block_tx has been closed,err: {}", err); + return; + }, + }, recv(self.stop_rx) -> _ => { info!("consume_unverified_blocks thread received exit signal, exit now"); break; From a5da29c8054a331cc7b93b1f3b782beb6392bd09 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 29 Nov 2023 15:49:25 +0800 Subject: [PATCH 228/357] ChainService won't need truncate_block_rx anymore --- chain/src/chain.rs | 35 ++++++++++++++++++++++++++++------- 1 file changed, 28 insertions(+), 7 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index a12304f391..6a473403f0 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -174,6 +174,8 @@ impl ChainController { pub fn start_chain_services(builder: ChainServicesBuilder) -> ChainController { let orphan_blocks_broker = Arc::new(OrphanBlockPool::with_capacity(ORPHAN_BLOCK_SIZE)); + let (truncate_block_tx, truncate_block_rx) = channel::bounded(1); + let (unverified_queue_stop_tx, unverified_queue_stop_rx) = ckb_channel::bounded::<()>(1); let (unverified_tx, unverified_rx) = channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize * 3); @@ -187,6 +189,7 @@ pub fn start_chain_services(builder: ChainServicesBuilder) -> ChainController { let consume_unverified = ConsumeUnverifiedBlocks::new( shared, unverified_rx, + truncate_block_rx, builder.proposal_table, verify_failed_blocks_tx, unverified_queue_stop_rx, @@ -225,12 +228,9 @@ pub fn start_chain_services(builder: ChainServicesBuilder) -> ChainController { let (process_block_tx, process_block_rx) = channel::bounded(BLOCK_DOWNLOAD_WINDOW as usize); - let (truncate_block_tx, truncate_block_rx) = channel::bounded(1); - let chain_service: ChainService = ChainService::new( builder.shared, process_block_rx, - truncate_block_rx, lonely_block_tx, builder.verify_failed_blocks_tx, ); @@ -238,7 +238,7 @@ pub fn start_chain_services(builder: ChainServicesBuilder) -> ChainController { .name("ChainService".into()) .spawn({ move || { - chain_service.start(); + chain_service.start_process_block(); if Err(SendError(_)) = search_orphan_pool_stop_tx.send(()) { warn!("trying to notify search_orphan_pool thread to stop, but search_orphan_pool_stop_tx already closed") @@ -265,7 +265,6 @@ pub(crate) struct ChainService { shared: Shared, process_block_rx: Receiver, - truncate_block_rx: Receiver, lonely_block_tx: Sender, verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, @@ -275,7 +274,6 @@ impl ChainService { pub(crate) fn new( shared: Shared, process_block_rx: Receiver, - truncate_block_rx: Receiver, lonely_block_tx: Sender, verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, @@ -283,12 +281,35 @@ impl ChainService { ChainService { shared, process_block_rx, - truncate_block_rx, lonely_block_tx, verify_failed_blocks_tx, } } + pub(crate) fn start_process_block(mut self) { + let signal_receiver = new_crossbeam_exit_rx(); + + loop { + select! { + recv(self.process_block_rx) -> msg => match msg { + Ok(Request { responder, arguments: lonely_block }) => { + // asynchronous_process_block doesn't interact with tx-pool, + // no need to pause tx-pool's chunk_process here. + let _ = responder.send(self.asynchronous_process_block(lonely_block)); + }, + _ => { + error!("process_block_receiver closed"); + break; + }, + }, + recv(signal_receiver) -> _ => { + info!("ChainService received exit signal, exit now"); + break; + } + } + } + } + /// start background single-threaded service with specified thread_name. pub(crate) fn start(mut self) { let signal_receiver = new_crossbeam_exit_rx(); From 9da728a9e5d6c6682182c939cf4b114ceab44a16 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 29 Nov 2023 15:58:30 +0800 Subject: [PATCH 229/357] Remove ChainService::start, since it has replaced by start_process_block --- chain/src/chain.rs | 41 ----------------------------------------- 1 file changed, 41 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 6a473403f0..4212477c9c 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -310,47 +310,6 @@ impl ChainService { } } - /// start background single-threaded service with specified thread_name. - pub(crate) fn start(mut self) { - let signal_receiver = new_crossbeam_exit_rx(); - - // Mainly for test: give an empty thread_name - let tx_control = self.shared.tx_pool_controller().clone(); - loop { - select! { - recv(self.process_block_rx) -> msg => match msg { - Ok(Request { responder, arguments: lonely_block }) => { - let _ = tx_control.suspend_chunk_process(); - let _ = responder.send(self.asynchronous_process_block(lonely_block)); - let _ = tx_control.continue_chunk_process(); - }, - _ => { - error!("process_block_receiver closed"); - break; - }, - }, - recv(self.truncate_block_rx) -> msg => match msg { - Ok(Request { responder, arguments: target_tip_hash }) => { - let _ = tx_control.suspend_chunk_process(); - todo!("move truncate process to consume unverified_block"); - // let _ = responder.send(self.truncate( - // &mut proposal_table, - // &target_tip_hash)); - let _ = tx_control.continue_chunk_process(); - }, - _ => { - error!("truncate_receiver closed"); - break; - }, - }, - recv(signal_receiver) -> _ => { - info!("ChainService received exit signal, exit now"); - break; - } - } - } - } - fn non_contextual_verify(&self, block: &BlockView) -> Result<(), Error> { let consensus = self.shared.consensus(); BlockVerifier::new(consensus).verify(block).map_err(|e| { From 2004431f92b50f3f8cf517ff9a569c7ed0b23fce Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 29 Nov 2023 16:18:37 +0800 Subject: [PATCH 230/357] Fix ChainService stop_rx related handling --- chain/Cargo.toml | 1 + chain/src/chain.rs | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/chain/Cargo.toml b/chain/Cargo.toml index 6989ab8c2f..67b88a9af8 100644 --- a/chain/Cargo.toml +++ b/chain/Cargo.toml @@ -32,6 +32,7 @@ ckb-util = { path = "../util", version = "= 0.113.0-pre" } crossbeam = "0.8.2" ckb-network = { path = "../network", version = "= 0.113.0-pre" } tokio = { version = "1", features = ["sync"] } +ckb-tx-pool = { path = "../tx-pool", version = "= 0.113.0-pre"} [dev-dependencies] ckb-test-chain-utils = { path = "../util/test-chain-utils", version = "= 0.114.0-pre" } diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 4212477c9c..ebef2217e6 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -240,12 +240,12 @@ pub fn start_chain_services(builder: ChainServicesBuilder) -> ChainController { move || { chain_service.start_process_block(); - if Err(SendError(_)) = search_orphan_pool_stop_tx.send(()) { + if let Err(SendError(_)) = search_orphan_pool_stop_tx.send(()) { warn!("trying to notify search_orphan_pool thread to stop, but search_orphan_pool_stop_tx already closed") } let _ = search_orphan_pool_thread.join(); - if Err(SendError(_))= unverified_queue_stop_tx.send(()){ + if let Err(SendError(_))= unverified_queue_stop_tx.send(()){ warn!("trying to notify consume unverified thread to stop, but unverified_queue_stop_tx already closed"); } let _ = consumer_unverified_thread.join(); From 4dca504c302600ddac8e88d66237588033568c71 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 29 Nov 2023 16:23:04 +0800 Subject: [PATCH 231/357] Fix consume_unverified truncate process --- chain/src/consume_unverified.rs | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/chain/src/consume_unverified.rs b/chain/src/consume_unverified.rs index d42a65f0e0..88523d974c 100644 --- a/chain/src/consume_unverified.rs +++ b/chain/src/consume_unverified.rs @@ -19,7 +19,7 @@ use ckb_tx_pool::TxPoolController; use ckb_types::core::cell::{ resolve_transaction, BlockCellProvider, HeaderChecker, OverlayCellProvider, ResolvedTransaction, }; -use ckb_types::core::{BlockExt, BlockNumber, BlockView, Cycle, HeaderView}; +use ckb_types::core::{service::Request, BlockExt, BlockNumber, BlockView, Cycle, HeaderView}; use ckb_types::packed::Byte32; use ckb_types::utilities::merkle_mountain_range::ChainRootMMR; use ckb_types::H256; @@ -71,7 +71,6 @@ impl ConsumeUnverifiedBlocks { pub(crate) fn start(mut self) { let mut begin_loop = std::time::Instant::now(); - let tx_pool_control = self.tx_pool_controller(); loop { begin_loop = std::time::Instant::now(); select! { @@ -79,9 +78,9 @@ impl ConsumeUnverifiedBlocks { Ok(unverified_task) => { // process this unverified block trace!("got an unverified block, wait cost: {:?}", begin_loop.elapsed()); - let _ = tx_pool_control.suspend_chunk_process(); + let _ = self.tx_pool_controller.suspend_chunk_process(); self.processor.consume_unverified_blocks(unverified_task); - let _ = tx_pool_control.resume_chunk_process(); + let _ = self.tx_pool_controller.continue_chunk_process(); trace!("consume_unverified_blocks cost: {:?}", begin_loop.elapsed()); }, Err(err) => { @@ -91,11 +90,9 @@ impl ConsumeUnverifiedBlocks { }, recv(self.truncate_block_rx) -> msg => match msg { Ok(Request { responder, arguments: target_tip_hash }) => { - let _ = tx_pool_control.suspend_chunk_process(); - let _ = responder.send(self.truncate( - &mut proposal_table, - &target_tip_hash)); - let _ = tx_pool_control.continue_chunk_process(); + let _ = self.tx_pool_controller.suspend_chunk_process(); + let _ = responder.send(self.processor.truncate(&target_tip_hash)); + let _ = self.tx_pool_controller.continue_chunk_process(); }, Err(err) => { error!("truncate_block_tx has been closed,err: {}", err); @@ -823,11 +820,7 @@ impl ConsumeUnverifiedBlockProcessor { // Truncate the main chain // Use for testing only - pub(crate) fn truncate( - &mut self, - proposal_table: &mut ProposalTable, - target_tip_hash: &Byte32, - ) -> Result<(), Error> { + pub(crate) fn truncate(&mut self, target_tip_hash: &Byte32) -> Result<(), Error> { let snapshot = Arc::clone(&self.shared.snapshot()); assert!(snapshot.is_main_chain(target_tip_hash)); @@ -852,8 +845,9 @@ impl ConsumeUnverifiedBlockProcessor { db_txn.commit()?; self.update_proposal_table(&fork); - let (detached_proposal_id, new_proposals) = - proposal_table.finalize(origin_proposals, target_tip_header.number()); + let (detached_proposal_id, new_proposals) = self + .proposal_table + .finalize(origin_proposals, target_tip_header.number()); fork.detached_proposal_id = detached_proposal_id; let new_snapshot = self.shared.new_snapshot( From 3cf9525203b270207951bd06abe50b724ac4c219 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 29 Nov 2023 16:24:53 +0800 Subject: [PATCH 232/357] Rename chain/src/chain.rs to chain/src/chain_service.rs --- chain/src/{chain.rs => chain_service.rs} | 0 chain/src/lib.rs | 4 ++-- 2 files changed, 2 insertions(+), 2 deletions(-) rename chain/src/{chain.rs => chain_service.rs} (100%) diff --git a/chain/src/chain.rs b/chain/src/chain_service.rs similarity index 100% rename from chain/src/chain.rs rename to chain/src/chain_service.rs diff --git a/chain/src/lib.rs b/chain/src/lib.rs index 8633ab2100..33b2e7c8c8 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -15,14 +15,14 @@ use ckb_types::core::{BlockNumber, BlockView, HeaderView}; use ckb_types::packed::Byte32; use ckb_verification_traits::Switch; use std::sync::Arc; -mod chain; +mod chain_service; mod consume_orphan; mod consume_unverified; #[cfg(test)] mod tests; mod utils; -pub use chain::{start_chain_services, ChainController}; +pub use chain_service::{start_chain_services, ChainController}; type ProcessBlockRequest = Request; type TruncateRequest = Request>; From ac1e21991783affc2383c378ced271bb073e53ff Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 29 Nov 2023 17:34:16 +0800 Subject: [PATCH 233/357] Fix cargo clippy warnings Signed-off-by: Eval EXEC --- chain/src/chain_service.rs | 2 +- chain/src/consume_orphan.rs | 13 ------------- chain/src/consume_unverified.rs | 3 +-- chain/src/lib.rs | 4 ---- chain/src/tests/find_fork.rs | 11 +++++------ chain/src/tests/orphan_block_pool.rs | 1 + chain/src/utils/forkchanges.rs | 13 +++++++------ chain/src/utils/orphan_block_pool.rs | 1 + sync/src/synchronizer/mod.rs | 3 +-- sync/src/tests/sync_shared.rs | 2 ++ sync/src/types/mod.rs | 5 +++-- 11 files changed, 22 insertions(+), 36 deletions(-) diff --git a/chain/src/chain_service.rs b/chain/src/chain_service.rs index ebef2217e6..acfd5bd609 100644 --- a/chain/src/chain_service.rs +++ b/chain/src/chain_service.rs @@ -286,7 +286,7 @@ impl ChainService { } } - pub(crate) fn start_process_block(mut self) { + pub(crate) fn start_process_block(self) { let signal_receiver = new_crossbeam_exit_rx(); loop { diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index 36e8fbf1e0..efb210cb49 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -131,19 +131,6 @@ impl ConsumeOrphan { ); continue; } - let (first_descendants_number, last_descendants_number, descendants_len) = ( - descendants - .first() - .expect("descdant not empty") - .block() - .number(), - descendants - .last() - .expect("descdant not empty") - .block() - .number(), - descendants.len(), - ); self.accept_descendants(descendants); } } diff --git a/chain/src/consume_unverified.rs b/chain/src/consume_unverified.rs index 88523d974c..93fca61912 100644 --- a/chain/src/consume_unverified.rs +++ b/chain/src/consume_unverified.rs @@ -70,9 +70,8 @@ impl ConsumeUnverifiedBlocks { } pub(crate) fn start(mut self) { - let mut begin_loop = std::time::Instant::now(); loop { - begin_loop = std::time::Instant::now(); + let begin_loop = std::time::Instant::now(); select! { recv(self.unverified_block_rx) -> msg => match msg { Ok(unverified_task) => { diff --git a/chain/src/lib.rs b/chain/src/lib.rs index 33b2e7c8c8..b8af887033 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -114,10 +114,6 @@ impl UnverifiedBlock { self.unverified_block.peer_id() } - pub fn switch(&self) -> Option { - self.unverified_block.switch() - } - pub fn execute_callback(self, verify_result: VerifyResult) { self.unverified_block.execute_callback(verify_result) } diff --git a/chain/src/tests/find_fork.rs b/chain/src/tests/find_fork.rs index ab01477d00..bd3b8b924a 100644 --- a/chain/src/tests/find_fork.rs +++ b/chain/src/tests/find_fork.rs @@ -1,4 +1,4 @@ -use crate::consume_unverified::{ConsumeUnverifiedBlockProcessor, ConsumeUnverifiedBlocks}; +use crate::consume_unverified::ConsumeUnverifiedBlockProcessor; use crate::utils::forkchanges::ForkChanges; use crate::{ start_chain_services, LonelyBlock, LonelyBlockWithCallback, UnverifiedBlock, @@ -54,8 +54,7 @@ fn test_find_fork_case1() { let builder = SharedBuilder::with_temp_db(); let consensus = Consensus::default(); let proposal_table = ProposalTable::new(consensus.tx_proposal_window()); - let (shared, mut pack) = builder.consensus(consensus).build().unwrap(); - let chain_controller = start_chain_services(pack.take_chain_services_builder()); + let (shared, mut _pack) = builder.consensus(consensus).build().unwrap(); let genesis = shared .store() @@ -141,7 +140,7 @@ fn test_find_fork_case1() { fn test_find_fork_case2() { let builder = SharedBuilder::with_temp_db(); let consensus = Consensus::default(); - let (shared, mut pack) = builder.consensus(consensus.clone()).build().unwrap(); + let (shared, _pack) = builder.consensus(consensus.clone()).build().unwrap(); let genesis = shared .store() @@ -226,7 +225,7 @@ fn test_find_fork_case2() { fn test_find_fork_case3() { let builder = SharedBuilder::with_temp_db(); let consensus = Consensus::default(); - let (shared, mut pack) = builder.consensus(consensus.clone()).build().unwrap(); + let (shared, _pack) = builder.consensus(consensus.clone()).build().unwrap(); let genesis = shared .store() @@ -311,7 +310,7 @@ fn test_find_fork_case3() { fn test_find_fork_case4() { let builder = SharedBuilder::with_temp_db(); let consensus = Consensus::default(); - let (shared, mut pack) = builder.consensus(consensus.clone()).build().unwrap(); + let (shared, _pack) = builder.consensus(consensus.clone()).build().unwrap(); let genesis = shared .store() diff --git a/chain/src/tests/orphan_block_pool.rs b/chain/src/tests/orphan_block_pool.rs index d6bd0a9491..2974852483 100644 --- a/chain/src/tests/orphan_block_pool.rs +++ b/chain/src/tests/orphan_block_pool.rs @@ -1,3 +1,4 @@ +#![allow(dead_code)] use crate::{LonelyBlock, LonelyBlockWithCallback}; use ckb_chain_spec::consensus::ConsensusBuilder; use ckb_systemtime::unix_time_as_millis; diff --git a/chain/src/utils/forkchanges.rs b/chain/src/utils/forkchanges.rs index 01e3415c67..4427a46154 100644 --- a/chain/src/utils/forkchanges.rs +++ b/chain/src/utils/forkchanges.rs @@ -1,4 +1,3 @@ -use ckb_rust_unstable_port::IsSorted; use ckb_types::core::hardfork::HardForks; use ckb_types::core::{BlockExt, BlockView}; use ckb_types::packed::ProposalShortId; @@ -46,11 +45,13 @@ impl ForkChanges { /// assertion for make sure attached_blocks and detached_blocks are sorted #[cfg(debug_assertions)] pub fn is_sorted(&self) -> bool { - IsSorted::is_sorted_by_key(&mut self.attached_blocks().iter(), |blk| { - blk.header().number() - }) && IsSorted::is_sorted_by_key(&mut self.detached_blocks().iter(), |blk| { - blk.header().number() - }) + ckb_rust_unstable_port::IsSorted::is_sorted_by_key( + &mut self.attached_blocks().iter(), + |blk| blk.header().number(), + ) && ckb_rust_unstable_port::IsSorted::is_sorted_by_key( + &mut self.detached_blocks().iter(), + |blk| blk.header().number(), + ) } pub fn during_hardfork(&self, hardfork_switch: &HardForks) -> bool { diff --git a/chain/src/utils/orphan_block_pool.rs b/chain/src/utils/orphan_block_pool.rs index 39006a454e..94be59f140 100644 --- a/chain/src/utils/orphan_block_pool.rs +++ b/chain/src/utils/orphan_block_pool.rs @@ -1,3 +1,4 @@ +#![allow(dead_code)] use crate::LonelyBlockWithCallback; use ckb_logger::debug; use ckb_types::core::{BlockView, EpochNumber}; diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 2f9319fe00..7156857da6 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -25,14 +25,13 @@ use crate::utils::{metric_ckb_message_bytes, send_message_to, MetricDirection}; use crate::{Status, StatusCode}; use ckb_shared::block_status::BlockStatus; -use ckb_chain::{ChainController, VerifyResult}; +use ckb_chain::ChainController; use ckb_channel as channel; use ckb_channel::{select, Receiver}; use ckb_constant::sync::{ BAD_MESSAGE_BAN_TIME, CHAIN_SYNC_TIMEOUT, EVICTION_HEADERS_RESPONSE_TIME, INIT_BLOCKS_IN_TRANSIT_PER_PEER, MAX_TIP_AGE, }; -use ckb_error::ErrorKind; use ckb_logger::{debug, error, info, trace, warn}; use ckb_network::{ async_trait, bytes::Bytes, tokio, CKBProtocolContext, CKBProtocolHandler, PeerIndex, diff --git a/sync/src/tests/sync_shared.rs b/sync/src/tests/sync_shared.rs index ad2be693fb..4bfc2c0f5a 100644 --- a/sync/src/tests/sync_shared.rs +++ b/sync/src/tests/sync_shared.rs @@ -1,3 +1,5 @@ +#![allow(unused_imports)] +#![allow(dead_code)] use crate::tests::util::{build_chain, inherit_block}; use crate::SyncShared; use ckb_chain::{start_chain_services, VerifiedBlockStatus}; diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index ca77608231..747efeb6fb 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1,6 +1,8 @@ use crate::{Status, StatusCode, FAST_INDEX, LOW_INDEX, NORMAL_INDEX, TIME_TRACE_SIZE}; use ckb_app_config::SyncConfig; -use ckb_chain::{ChainController, VerifyResult}; +use ckb_chain::ChainController; +#[cfg(test)] +use ckb_chain::VerifyResult; use ckb_chain::{LonelyBlock, VerifyCallback}; use ckb_chain_spec::consensus::{Consensus, MAX_BLOCK_INTERVAL, MIN_BLOCK_INTERVAL}; use ckb_channel::Receiver; @@ -35,7 +37,6 @@ use keyed_priority_queue::{self, KeyedPriorityQueue}; use lru::LruCache; use std::collections::{btree_map::Entry, BTreeMap, HashMap, HashSet}; use std::hash::Hash; -use std::path::{Path, PathBuf}; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::{Duration, Instant}; From 9a6a1268b85149750f1d42176d2702e898672d18 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 30 Nov 2023 12:06:55 +0800 Subject: [PATCH 234/357] Modify VerifyFailedBlockInfo::message_bytes to msg_bytes --- shared/src/types/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shared/src/types/mod.rs b/shared/src/types/mod.rs index ac9a83c317..ea3fb0f648 100644 --- a/shared/src/types/mod.rs +++ b/shared/src/types/mod.rs @@ -310,7 +310,7 @@ pub const SHRINK_THRESHOLD: usize = 300; pub struct VerifyFailedBlockInfo { pub block_hash: Byte32, pub peer_id: PeerIndex, - pub message_bytes: u64, + pub msg_bytes: u64, pub reason: String, pub is_internal_db_error: bool, } From b59807c0ec889c01a18151ff89e03771d253b662 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 30 Nov 2023 12:07:53 +0800 Subject: [PATCH 235/357] Pass and return peer_id_with_msg_bytes in process_block --- chain/src/chain_service.rs | 14 ++++---- chain/src/consume_orphan.rs | 2 +- chain/src/consume_unverified.rs | 6 ++-- chain/src/lib.rs | 19 +++++----- sync/src/relayer/mod.rs | 2 +- sync/src/synchronizer/mod.rs | 2 +- sync/src/types/mod.rs | 64 ++++++--------------------------- 7 files changed, 34 insertions(+), 75 deletions(-) diff --git a/chain/src/chain_service.rs b/chain/src/chain_service.rs index acfd5bd609..e9597ba256 100644 --- a/chain/src/chain_service.rs +++ b/chain/src/chain_service.rs @@ -57,7 +57,7 @@ impl ChainController { pub fn asynchronous_process_block_with_switch(&self, block: Arc, switch: Switch) { self.asynchronous_process_lonely_block(LonelyBlock { block, - peer_id: None, + peer_id_with_msg_bytes: None, switch: Some(switch), }) } @@ -66,7 +66,7 @@ impl ChainController { self.asynchronous_process_lonely_block_with_callback( LonelyBlock { block, - peer_id: None, + peer_id_with_msg_bytes: None, switch: None, } .without_callback(), @@ -81,7 +81,7 @@ impl ChainController { self.asynchronous_process_lonely_block_with_callback( LonelyBlock { block, - peer_id: None, + peer_id_with_msg_bytes: None, switch: None, } .with_callback(Some(verify_callback)), @@ -110,7 +110,7 @@ impl ChainController { pub fn blocking_process_block(&self, block: Arc) -> VerifyResult { self.blocking_process_lonely_block(LonelyBlock { block, - peer_id: None, + peer_id_with_msg_bytes: None, switch: None, }) } @@ -122,7 +122,7 @@ impl ChainController { ) -> VerifyResult { self.blocking_process_lonely_block(LonelyBlock { block, - peer_id: None, + peer_id_with_msg_bytes: None, switch: Some(switch), }) } @@ -345,7 +345,7 @@ impl ChainService { Err(err) => { tell_synchronizer_to_punish_the_bad_peer( self.verify_failed_blocks_tx.clone(), - lonely_block.peer_id(), + lonely_block.peer_id_with_msg_bytes(), lonely_block.block().hash(), &err, ); @@ -368,7 +368,7 @@ impl ChainService { tell_synchronizer_to_punish_the_bad_peer( self.verify_failed_blocks_tx.clone(), - lonely_block.peer_id(), + lonely_block.peer_id_with_msg_bytes(), lonely_block.block().hash(), &err, ); diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index efb210cb49..14d4de8fbb 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -95,7 +95,7 @@ impl ConsumeOrphan { Err(err) => { tell_synchronizer_to_punish_the_bad_peer( self.verify_failed_blocks_tx.clone(), - lonely_block.peer_id(), + lonely_block.peer_id_with_msg_bytes(), lonely_block.block().hash(), &err, ); diff --git a/chain/src/consume_unverified.rs b/chain/src/consume_unverified.rs index 93fca61912..e9274a21ff 100644 --- a/chain/src/consume_unverified.rs +++ b/chain/src/consume_unverified.rs @@ -130,7 +130,7 @@ impl ConsumeUnverifiedBlockProcessor { Err(err) => { error!( "verify [{:?}]'s block {} failed: {}", - unverified_block.peer_id(), + unverified_block.peer_id_with_msg_bytes(), unverified_block.block().hash(), err ); @@ -166,7 +166,7 @@ impl ConsumeUnverifiedBlockProcessor { tell_synchronizer_to_punish_the_bad_peer( self.verify_failed_blocks_tx.clone(), - unverified_block.peer_id(), + unverified_block.peer_id_with_msg_bytes(), unverified_block.block().hash(), err, ); @@ -183,7 +183,7 @@ impl ConsumeUnverifiedBlockProcessor { lonely_block: LonelyBlock { block, - peer_id: _peer_id, + peer_id_with_msg_bytes: _peer_id_with_msg_bytes, switch, }, verify_callback: _verify_callback, diff --git a/chain/src/lib.rs b/chain/src/lib.rs index b8af887033..dcba6911ba 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -48,7 +48,8 @@ pub enum VerifiedBlockStatus { #[derive(Clone)] pub struct LonelyBlock { pub block: Arc, - pub peer_id: Option, + + pub peer_id_with_msg_bytes: Option<(PeerIndex, u64)>, pub switch: Option, } @@ -83,8 +84,8 @@ impl LonelyBlockWithCallback { pub fn block(&self) -> &Arc { &self.lonely_block.block } - pub fn peer_id(&self) -> Option { - self.lonely_block.peer_id + pub fn peer_id_with_msg_bytes(&self) -> Option<(PeerIndex, u64)> { + self.lonely_block.peer_id_with_msg_bytes } pub fn switch(&self) -> Option { self.lonely_block.switch @@ -110,8 +111,8 @@ impl UnverifiedBlock { self.unverified_block.block() } - pub fn peer_id(&self) -> Option { - self.unverified_block.peer_id() + pub fn peer_id_with_msg_bytes(&self) -> Option<(PeerIndex, u64)> { + self.unverified_block.peer_id_with_msg_bytes() } pub fn execute_callback(self, verify_result: VerifyResult) { @@ -142,17 +143,17 @@ impl GlobalIndex { pub(crate) fn tell_synchronizer_to_punish_the_bad_peer( verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, - peer_id: Option, + peer_id_with_msg_bytes: Option<(PeerIndex, u64)>, block_hash: Byte32, err: &Error, ) { let is_internal_db_error = is_internal_db_error(&err); - match peer_id { - Some(peer_id) => { + match peer_id_with_msg_bytes { + Some((peer_id, msg_bytes)) => { let verify_failed_block_info = VerifyFailedBlockInfo { block_hash, peer_id, - message_bytes: 0, + msg_bytes, reason: err.to_string(), is_internal_db_error, }; diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index 9bcdb36103..098663cc0f 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -344,7 +344,7 @@ impl Relayer { self.shared().insert_new_block_with_callback( &self.chain, Arc::clone(&block), - peer, + (peer, 0), Box::new(verify_success_callback), ); } diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 7156857da6..bd6e10c217 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -934,7 +934,7 @@ impl CKBProtocolHandler for Synchronizer { nc.as_ref(), malformed_peer_info.peer_id, "SendBlock", - malformed_peer_info.message_bytes, + malformed_peer_info.msg_bytes, StatusCode::BlockIsInvalid.with_context(format!( "block {} is invalid, reason: {}", malformed_peer_info.block_hash, malformed_peer_info.reason diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 747efeb6fb..7b26064301 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1062,13 +1062,13 @@ impl SyncShared { &self, chain: &ChainController, block: Arc, - peer_id: PeerIndex, + peer_id_with_msg_bytes: (PeerIndex, u64), verify_success_callback: VerifyCallback, ) { self.accept_block( chain, Arc::clone(&block), - peer_id, + peer_id_with_msg_bytes, Some(verify_success_callback), ) } @@ -1081,28 +1081,12 @@ impl SyncShared { peer_id: PeerIndex, message_bytes: u64, ) { - // Insert the given block into orphan_block_pool if its parent is not found - // if !self.is_stored(&block.parent_hash()) { - // debug!( - // "insert new orphan block {} {}", - // block.header().number(), - // block.header().hash() - // ); - // self.state.insert_orphan_block((*block).clone()); - // return Ok(false); - // } - - // Attempt to accept the given block if its parent already exist in database - self.accept_block(chain, Arc::clone(&block), peer_id, None::); - // if ret.is_err() { - // debug!("accept block {:?} {:?}", block, ret); - // return ret; - // } - - // The above block has been accepted. Attempt to accept its descendant blocks in orphan pool. - // The returned blocks of `remove_blocks_by_parent` are in topology order by parents - // self.try_search_orphan_pool(chain); - // ret + self.accept_block( + chain, + Arc::clone(&block), + (peer_id, message_bytes), + None::, + ); } /// Try to find blocks from the orphan block pool that may no longer be orphan @@ -1178,7 +1162,7 @@ impl SyncShared { ) -> VerifyResult { let lonely_block: LonelyBlock = LonelyBlock { block, - peer_id: Some(PeerIndex::new(0)), + peer_id_with_msg_bytes: Some((peer_id, message_bytes)), switch: None, }; chain.blocking_process_lonely_block(lonely_block) @@ -1188,43 +1172,17 @@ impl SyncShared { &self, chain: &ChainController, block: Arc, - peer_id: PeerIndex, + peer_id_with_msg_bytes: (PeerIndex, u64), verify_callback: Option, ) { - // let ret = { - // let mut assume_valid_target = self.state.assume_valid_target(); - // if let Some(ref target) = *assume_valid_target { - // // if the target has been reached, delete it - // let switch = if target == &Unpack::::unpack(&core::BlockView::hash(&block)) { - // assume_valid_target.take(); - // Switch::NONE - // } else { - // Switch::DISABLE_SCRIPT - // }; - // - // chain.blocking_process_block_with_switch(Arc::clone(&block), switch) - // } else { - // chain.process_block(Arc::clone(&block)) - // } - // }; - let lonely_block_with_callback = LonelyBlock { block, - peer_id: Some(peer_id), + peer_id_with_msg_bytes: Some(peer_id_with_msg_bytes), switch: None, } .with_callback(verify_callback); chain.asynchronous_process_lonely_block_with_callback(lonely_block_with_callback); - - // if let Err(ref error) = ret { - // if !is_internal_db_error(error) { - // error!("accept block {:?} {}", block, error); - // self.shared() - // .insert_block_status(block.header().hash(), BlockStatus::BLOCK_INVALID); - // } - // } - // ret } /// Sync a new valid header, try insert to sync state From 1ffa39d35f016f47c7e7e285511e622aed9779bc Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 30 Nov 2023 12:16:37 +0800 Subject: [PATCH 236/357] Fix peer_id_and_msg_bytes for ckb-chain unit tests --- chain/src/tests/find_fork.rs | 5 +++-- chain/src/tests/orphan_block_pool.rs | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/chain/src/tests/find_fork.rs b/chain/src/tests/find_fork.rs index bd3b8b924a..f3ed6e8d66 100644 --- a/chain/src/tests/find_fork.rs +++ b/chain/src/tests/find_fork.rs @@ -25,17 +25,18 @@ fn consume_unverified_block( blk: &BlockView, switch: Switch, ) { + let parent_hash = blk.data().header().raw().parent_hash(); let parent_header = processor .shared .store() - .get_block_header(&blk.data().header().raw().parent_hash()) + .get_block_header(&parent_hash) .unwrap(); let unverified_block = UnverifiedBlock { unverified_block: LonelyBlockWithCallback { lonely_block: LonelyBlock { block: Arc::new(blk.to_owned()), - peer_id: None, + peer_id_with_msg_bytes: None, switch: Some(switch), }, verify_callback: None, diff --git a/chain/src/tests/orphan_block_pool.rs b/chain/src/tests/orphan_block_pool.rs index 2974852483..a0f25f1a90 100644 --- a/chain/src/tests/orphan_block_pool.rs +++ b/chain/src/tests/orphan_block_pool.rs @@ -21,7 +21,7 @@ fn gen_lonely_block(parent_header: &HeaderView) -> LonelyBlock { .build(); LonelyBlock { block: Arc::new(block), - peer_id: None, + peer_id_with_msg_bytes: None, switch: None, } } @@ -163,7 +163,7 @@ fn test_remove_expired_blocks() { let lonely_block_with_callback = LonelyBlockWithCallback { lonely_block: LonelyBlock { block: Arc::new(new_block), - peer_id: None, + peer_id_with_msg_bytes: None, switch: None, }, verify_callback: None, From 470172f8ec9f49a44863dd9d402509024e3264a2 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 30 Nov 2023 12:23:28 +0800 Subject: [PATCH 237/357] Extract `ConsumeDescendantProcessor` from `ConsumeOrphan` --- chain/src/consume_orphan.rs | 31 +++++++++++++++++++++++-------- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index 14d4de8fbb..530b439dc0 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -17,13 +17,20 @@ use ckb_types::U256; use ckb_verification::InvalidParentError; use std::sync::Arc; -pub(crate) struct ConsumeOrphan { +pub(crate) struct ConsumeDescendantProcessor { shared: Shared, - orphan_blocks_broker: Arc, - lonely_blocks_rx: Receiver, unverified_blocks_tx: Sender, verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, +} + +pub(crate) struct ConsumeOrphan { + shared: Shared, + + descendant_processor: ConsumeDescendantProcessor, + + orphan_blocks_broker: Arc, + lonely_blocks_rx: Receiver, stop_rx: Receiver<()>, } @@ -39,11 +46,15 @@ impl ConsumeOrphan { stop_rx: Receiver<()>, ) -> ConsumeOrphan { ConsumeOrphan { - shared, + shared: shared.clone(), + + descendant_processor: ConsumeDescendantProcessor { + shared, + unverified_blocks_tx, + verify_failed_blocks_tx, + }, orphan_blocks_broker: orphan_block_pool, lonely_blocks_rx, - unverified_blocks_tx, - verify_failed_blocks_tx, stop_rx, } } @@ -94,7 +105,7 @@ impl ConsumeOrphan { Err(err) => { tell_synchronizer_to_punish_the_bad_peer( - self.verify_failed_blocks_tx.clone(), + self.descendant_processor.verify_failed_blocks_tx.clone(), lonely_block.peer_id_with_msg_bytes(), lonely_block.block().hash(), &err, @@ -139,7 +150,11 @@ impl ConsumeOrphan { let block_number = unverified_block.block().number(); let block_hash = unverified_block.block().hash(); - let send_success = match self.unverified_blocks_tx.send(unverified_block) { + let send_success = match self + .descendant_processor + .unverified_blocks_tx + .send(unverified_block) + { Ok(_) => { debug!( "process desendant block success {}-{}", From de91b71cfa97b663a478e0c97713dd1a8cc19517 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 30 Nov 2023 12:28:37 +0800 Subject: [PATCH 238/357] Attach `accept_descendant` method to `ConsumeDescendantProcessor` Signed-off-by: Eval EXEC --- chain/src/consume_orphan.rs | 262 ++++++++++++++++++------------------ 1 file changed, 129 insertions(+), 133 deletions(-) diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index 530b439dc0..d804af1c5c 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -24,137 +24,12 @@ pub(crate) struct ConsumeDescendantProcessor { verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, } -pub(crate) struct ConsumeOrphan { - shared: Shared, - - descendant_processor: ConsumeDescendantProcessor, - - orphan_blocks_broker: Arc, - lonely_blocks_rx: Receiver, - - stop_rx: Receiver<()>, -} - -impl ConsumeOrphan { - pub(crate) fn new( - shared: Shared, - orphan_block_pool: Arc, - unverified_blocks_tx: Sender, - lonely_blocks_rx: Receiver, - - verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, - stop_rx: Receiver<()>, - ) -> ConsumeOrphan { - ConsumeOrphan { - shared: shared.clone(), - - descendant_processor: ConsumeDescendantProcessor { - shared, - unverified_blocks_tx, - verify_failed_blocks_tx, - }, - orphan_blocks_broker: orphan_block_pool, - lonely_blocks_rx, - stop_rx, - } - } - - pub(crate) fn start(&self) { - loop { - select! { - recv(self.lonely_blocks_rx) -> msg => match msg { - Ok(lonely_block) => { - self.process_lonely_block(lonely_block); - }, - Err(err) => { - error!("lonely_block_rx err: {}", err); - return - } - }, - recv(self.stop_rx) -> _ => { - info!("unverified_queue_consumer got exit signal, exit now"); - return; - }, - } - } - } - - fn process_lonely_block(&self, lonely_block: LonelyBlockWithCallback) { - let parent_hash = lonely_block.block().parent_hash(); - let parent_status = self.shared.get_block_status(&parent_hash); - if parent_status.contains(BlockStatus::BLOCK_PARTIAL_STORED) { - debug!( - "parent has stored, processing descendant directly {}", - lonely_block.block().hash() - ); - self.process_descendant(lonely_block); - } else { - self.orphan_blocks_broker.insert(lonely_block); - } - self.search_orphan_pool() - } - - fn process_descendant(&self, lonely_block: LonelyBlockWithCallback) { - match self.accept_descendant(lonely_block.block().to_owned()) { - Ok((parent_header, total_difficulty)) => { - let unverified_block: UnverifiedBlock = - lonely_block.combine_parent_header(parent_header); - - self.send_unverified_block(unverified_block, total_difficulty) - } - - Err(err) => { - tell_synchronizer_to_punish_the_bad_peer( - self.descendant_processor.verify_failed_blocks_tx.clone(), - lonely_block.peer_id_with_msg_bytes(), - lonely_block.block().hash(), - &err, - ); - - error!( - "accept block {} failed: {}", - lonely_block.block().hash(), - err - ); - - lonely_block.execute_callback(Err(err)); - } - } - } - - fn search_orphan_pool(&self) { - for leader_hash in self.orphan_blocks_broker.clone_leaders() { - if !self - .shared - .contains_block_status(&leader_hash, BlockStatus::BLOCK_PARTIAL_STORED) - { - trace!("orphan leader: {} not partial stored", leader_hash); - continue; - } - - let descendants: Vec = self - .orphan_blocks_broker - .remove_blocks_by_parent(&leader_hash); - if descendants.is_empty() { - error!( - "leader {} does not have any descendants, this shouldn't happen", - leader_hash - ); - continue; - } - self.accept_descendants(descendants); - } - } - +impl ConsumeDescendantProcessor { fn send_unverified_block(&self, unverified_block: UnverifiedBlock, total_difficulty: U256) { let block_number = unverified_block.block().number(); let block_hash = unverified_block.block().hash(); - let send_success = match self - .descendant_processor - .unverified_blocks_tx - .send(unverified_block) - { + let send_success = match self.unverified_blocks_tx.send(unverified_block) { Ok(_) => { debug!( "process desendant block success {}-{}", @@ -205,12 +80,6 @@ impl ConsumeOrphan { .insert_block_status(block_hash, BlockStatus::BLOCK_PARTIAL_STORED); } - fn accept_descendants(&self, descendants: Vec) { - for descendant_block in descendants { - self.process_descendant(descendant_block); - } - } - fn accept_descendant(&self, block: Arc) -> Result<(HeaderView, U256), Error> { let (block_number, block_hash) = (block.number(), block.hash()); @@ -282,4 +151,131 @@ impl ConsumeOrphan { Ok((parent_header, cannon_total_difficulty)) } + + fn process_descendant(&self, lonely_block: LonelyBlockWithCallback) { + match self.accept_descendant(lonely_block.block().to_owned()) { + Ok((parent_header, total_difficulty)) => { + let unverified_block: UnverifiedBlock = + lonely_block.combine_parent_header(parent_header); + + self.send_unverified_block(unverified_block, total_difficulty) + } + + Err(err) => { + tell_synchronizer_to_punish_the_bad_peer( + self.verify_failed_blocks_tx.clone(), + lonely_block.peer_id_with_msg_bytes(), + lonely_block.block().hash(), + &err, + ); + + error!( + "accept block {} failed: {}", + lonely_block.block().hash(), + err + ); + + lonely_block.execute_callback(Err(err)); + } + } + } + + fn accept_descendants(&self, descendants: Vec) { + for descendant_block in descendants { + self.process_descendant(descendant_block); + } + } +} + +pub(crate) struct ConsumeOrphan { + shared: Shared, + + descendant_processor: ConsumeDescendantProcessor, + + orphan_blocks_broker: Arc, + lonely_blocks_rx: Receiver, + + stop_rx: Receiver<()>, +} + +impl ConsumeOrphan { + pub(crate) fn new( + shared: Shared, + orphan_block_pool: Arc, + unverified_blocks_tx: Sender, + lonely_blocks_rx: Receiver, + verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, + stop_rx: Receiver<()>, + ) -> ConsumeOrphan { + ConsumeOrphan { + shared: shared.clone(), + descendant_processor: ConsumeDescendantProcessor { + shared, + unverified_blocks_tx, + verify_failed_blocks_tx, + }, + orphan_blocks_broker: orphan_block_pool, + lonely_blocks_rx, + stop_rx, + } + } + + pub(crate) fn start(&self) { + loop { + select! { + recv(self.lonely_blocks_rx) -> msg => match msg { + Ok(lonely_block) => { + self.process_lonely_block(lonely_block); + }, + Err(err) => { + error!("lonely_block_rx err: {}", err); + return + } + }, + recv(self.stop_rx) -> _ => { + info!("unverified_queue_consumer got exit signal, exit now"); + return; + }, + } + } + } + + fn search_orphan_pool(&self) { + for leader_hash in self.orphan_blocks_broker.clone_leaders() { + if !self + .shared + .contains_block_status(&leader_hash, BlockStatus::BLOCK_PARTIAL_STORED) + { + trace!("orphan leader: {} not partial stored", leader_hash); + continue; + } + + let descendants: Vec = self + .orphan_blocks_broker + .remove_blocks_by_parent(&leader_hash); + if descendants.is_empty() { + error!( + "leader {} does not have any descendants, this shouldn't happen", + leader_hash + ); + continue; + } + self.descendant_processor.accept_descendants(descendants); + } + } + + fn process_lonely_block(&self, lonely_block: LonelyBlockWithCallback) { + let parent_hash = lonely_block.block().parent_hash(); + let parent_status = self.shared.get_block_status(&parent_hash); + if parent_status.contains(BlockStatus::BLOCK_PARTIAL_STORED) { + debug!( + "parent has stored, processing descendant directly {}", + lonely_block.block().hash() + ); + self.descendant_processor.process_descendant(lonely_block); + } else { + self.orphan_blocks_broker.insert(lonely_block); + } + self.search_orphan_pool() + } } From 184ccc09420716e6e9cfd0a5d8a5069cd27e465e Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 30 Nov 2023 14:02:30 +0800 Subject: [PATCH 239/357] Public ConsumeDescendantProcessor struct fields --- chain/src/consume_orphan.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index d804af1c5c..883ba7b0d8 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -18,10 +18,10 @@ use ckb_verification::InvalidParentError; use std::sync::Arc; pub(crate) struct ConsumeDescendantProcessor { - shared: Shared, - unverified_blocks_tx: Sender, + pub shared: Shared, + pub unverified_blocks_tx: Sender, - verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, + pub verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, } impl ConsumeDescendantProcessor { @@ -152,7 +152,7 @@ impl ConsumeDescendantProcessor { Ok((parent_header, cannon_total_difficulty)) } - fn process_descendant(&self, lonely_block: LonelyBlockWithCallback) { + pub(crate) fn process_descendant(&self, lonely_block: LonelyBlockWithCallback) { match self.accept_descendant(lonely_block.block().to_owned()) { Ok((parent_header, total_difficulty)) => { let unverified_block: UnverifiedBlock = From 88d0b7c22456761766d08f7987318afc61714f96 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 30 Nov 2023 14:02:48 +0800 Subject: [PATCH 240/357] Fix ckb-chain find_fork related Unit Test --- chain/src/tests/find_fork.rs | 99 ++++++++++++++++++++++++------------ 1 file changed, 67 insertions(+), 32 deletions(-) diff --git a/chain/src/tests/find_fork.rs b/chain/src/tests/find_fork.rs index f3ed6e8d66..3cdb57c50f 100644 --- a/chain/src/tests/find_fork.rs +++ b/chain/src/tests/find_fork.rs @@ -1,9 +1,7 @@ +use crate::consume_orphan::ConsumeDescendantProcessor; use crate::consume_unverified::ConsumeUnverifiedBlockProcessor; use crate::utils::forkchanges::ForkChanges; -use crate::{ - start_chain_services, LonelyBlock, LonelyBlockWithCallback, UnverifiedBlock, - VerifyFailedBlockInfo, -}; +use crate::{start_chain_services, LonelyBlock, UnverifiedBlock, VerifyFailedBlockInfo}; use ckb_chain_spec::consensus::{Consensus, ProposalWindow}; use ckb_proposal_table::ProposalTable; use ckb_shared::SharedBuilder; @@ -17,33 +15,36 @@ use ckb_types::{ U256, }; use ckb_verification_traits::Switch; +use crossbeam::channel; use std::collections::HashSet; use std::sync::Arc; -fn consume_unverified_block( - processor: &mut ConsumeUnverifiedBlockProcessor, +fn process_block( + consume_descendant_processor: &ConsumeDescendantProcessor, + consume_unverified_block_processor: &mut ConsumeUnverifiedBlockProcessor, blk: &BlockView, switch: Switch, ) { + let lonely_block = LonelyBlock { + block: Arc::new(blk.to_owned()), + peer_id_with_msg_bytes: None, + switch: Some(switch), + }; + + consume_descendant_processor.process_descendant(lonely_block.clone().without_callback()); + let parent_hash = blk.data().header().raw().parent_hash(); - let parent_header = processor + let parent_header = consume_descendant_processor .shared .store() .get_block_header(&parent_hash) .unwrap(); let unverified_block = UnverifiedBlock { - unverified_block: LonelyBlockWithCallback { - lonely_block: LonelyBlock { - block: Arc::new(blk.to_owned()), - peer_id_with_msg_bytes: None, - switch: Some(switch), - }, - verify_callback: None, - }, + unverified_block: lonely_block.without_callback(), parent_header, }; - processor.consume_unverified_blocks(unverified_block); + consume_unverified_block_processor.consume_unverified_blocks(unverified_block); } // 0--1--2--3--4 @@ -76,7 +77,12 @@ fn test_find_fork_case1() { let (verify_failed_blocks_tx, _verify_failed_blocks_rx) = tokio::sync::mpsc::unbounded_channel::(); - + let (unverified_blocks_tx, _unverified_blocks_rx) = channel::unbounded::(); + let consume_descendant_processor = ConsumeDescendantProcessor { + shared: shared.clone(), + unverified_blocks_tx, + verify_failed_blocks_tx: verify_failed_blocks_tx.clone(), + }; let mut consume_unverified_block_processor = ConsumeUnverifiedBlockProcessor { shared: shared.clone(), proposal_table, @@ -85,7 +91,8 @@ fn test_find_fork_case1() { // fork1 total_difficulty 400 for blk in fork1.blocks() { - consume_unverified_block( + process_block( + &consume_descendant_processor, &mut consume_unverified_block_processor, blk, Switch::DISABLE_ALL, @@ -94,7 +101,8 @@ fn test_find_fork_case1() { // fork2 total_difficulty 270 for blk in fork2.blocks() { - consume_unverified_block( + process_block( + &consume_descendant_processor, &mut consume_unverified_block_processor, blk, Switch::DISABLE_ALL, @@ -161,7 +169,12 @@ fn test_find_fork_case2() { let proposal_table = ProposalTable::new(consensus.tx_proposal_window()); let (verify_failed_blocks_tx, _verify_failed_blocks_rx) = tokio::sync::mpsc::unbounded_channel::(); - + let (unverified_blocks_tx, _unverified_blocks_rx) = channel::unbounded::(); + let consume_descendant_processor = ConsumeDescendantProcessor { + shared: shared.clone(), + unverified_blocks_tx, + verify_failed_blocks_tx: verify_failed_blocks_tx.clone(), + }; let mut consume_unverified_block_processor = ConsumeUnverifiedBlockProcessor { shared: shared.clone(), proposal_table, @@ -170,7 +183,8 @@ fn test_find_fork_case2() { // fork1 total_difficulty 400 for blk in fork1.blocks() { - consume_unverified_block( + process_block( + &consume_descendant_processor, &mut consume_unverified_block_processor, blk, Switch::DISABLE_ALL, @@ -179,7 +193,8 @@ fn test_find_fork_case2() { // fork2 total_difficulty 280 for blk in fork2.blocks() { - consume_unverified_block( + process_block( + &consume_descendant_processor, &mut consume_unverified_block_processor, blk, Switch::DISABLE_ALL, @@ -247,16 +262,21 @@ fn test_find_fork_case3() { let proposal_table = ProposalTable::new(consensus.tx_proposal_window()); let (verify_failed_blocks_tx, _verify_failed_blocks_rx) = tokio::sync::mpsc::unbounded_channel::(); - + let (unverified_blocks_tx, _unverified_blocks_rx) = channel::unbounded::(); + let consume_descendant_processor = ConsumeDescendantProcessor { + shared: shared.clone(), + unverified_blocks_tx, + verify_failed_blocks_tx: verify_failed_blocks_tx.clone(), + }; let mut consume_unverified_block_processor = ConsumeUnverifiedBlockProcessor { shared: shared.clone(), proposal_table, verify_failed_blocks_tx, }; - // fork1 total_difficulty 240 for blk in fork1.blocks() { - consume_unverified_block( + process_block( + &consume_descendant_processor, &mut consume_unverified_block_processor, blk, Switch::DISABLE_ALL, @@ -265,7 +285,8 @@ fn test_find_fork_case3() { // fork2 total_difficulty 200 for blk in fork2.blocks() { - consume_unverified_block( + process_block( + &consume_descendant_processor, &mut consume_unverified_block_processor, blk, Switch::DISABLE_ALL, @@ -332,7 +353,12 @@ fn test_find_fork_case4() { let proposal_table = ProposalTable::new(consensus.tx_proposal_window()); let (verify_failed_blocks_tx, _verify_failed_blocks_rx) = tokio::sync::mpsc::unbounded_channel::(); - + let (unverified_blocks_tx, _unverified_blocks_rx) = channel::unbounded::(); + let consume_descendant_processor = ConsumeDescendantProcessor { + shared: shared.clone(), + unverified_blocks_tx, + verify_failed_blocks_tx: verify_failed_blocks_tx.clone(), + }; let mut consume_unverified_block_processor = ConsumeUnverifiedBlockProcessor { shared: shared.clone(), proposal_table, @@ -341,7 +367,8 @@ fn test_find_fork_case4() { // fork1 total_difficulty 200 for blk in fork1.blocks() { - consume_unverified_block( + process_block( + &consume_descendant_processor, &mut consume_unverified_block_processor, blk, Switch::DISABLE_ALL, @@ -350,7 +377,8 @@ fn test_find_fork_case4() { // fork2 total_difficulty 160 for blk in fork2.blocks() { - consume_unverified_block( + process_block( + &consume_descendant_processor, &mut consume_unverified_block_processor, blk, Switch::DISABLE_ALL, @@ -417,7 +445,12 @@ fn repeatedly_switch_fork() { let proposal_table = ProposalTable::new(consensus.tx_proposal_window()); let (verify_failed_blocks_tx, _verify_failed_blocks_rx) = tokio::sync::mpsc::unbounded_channel::(); - + let (unverified_blocks_tx, _unverified_blocks_rx) = channel::unbounded::(); + let consume_descendant_processor = ConsumeDescendantProcessor { + shared: shared.clone(), + unverified_blocks_tx, + verify_failed_blocks_tx: verify_failed_blocks_tx.clone(), + }; let mut consume_unverified_block_processor = ConsumeUnverifiedBlockProcessor { shared: shared.clone(), proposal_table, @@ -425,7 +458,8 @@ fn repeatedly_switch_fork() { }; for blk in fork1.blocks() { - consume_unverified_block( + process_block( + &consume_descendant_processor, &mut consume_unverified_block_processor, blk, Switch::DISABLE_ALL, @@ -433,7 +467,8 @@ fn repeatedly_switch_fork() { } for blk in fork2.blocks() { - consume_unverified_block( + process_block( + &consume_descendant_processor, &mut consume_unverified_block_processor, blk, Switch::DISABLE_ALL, From 82aa3f04bea76ea2f0e2c9dc2c38fd62d747eae6 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 30 Nov 2023 15:02:55 +0800 Subject: [PATCH 241/357] Fix Cargo fmt --- chain/src/tests/delay_verify.rs | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/chain/src/tests/delay_verify.rs b/chain/src/tests/delay_verify.rs index e9fcb3a2aa..e2b01f4700 100644 --- a/chain/src/tests/delay_verify.rs +++ b/chain/src/tests/delay_verify.rs @@ -346,7 +346,10 @@ fn test_full_dead_transaction() { .build() }; chain_controller - .blocking_process_block_with_switch(Arc::new(new_block.clone()), Switch::DISABLE_EPOCH) + .blocking_process_block_with_switch( + Arc::new(new_block.clone()), + Switch::DISABLE_EPOCH, + ) .expect("process block ok"); mock_store.insert_block(&new_block, &epoch); parent = new_block.header().to_owned(); @@ -426,7 +429,10 @@ fn test_full_dead_transaction() { .build() }; chain_controller - .blocking_process_block_with_switch(Arc::new(new_block.clone()), Switch::DISABLE_EPOCH) + .blocking_process_block_with_switch( + Arc::new(new_block.clone()), + Switch::DISABLE_EPOCH, + ) .expect("process block ok"); mock_store.insert_block(&new_block, &epoch); parent = new_block.header().to_owned(); @@ -495,7 +501,10 @@ fn test_full_dead_transaction() { .build() }; chain_controller - .blocking_process_block_with_switch(Arc::new(new_block.clone()), Switch::DISABLE_EPOCH) + .blocking_process_block_with_switch( + Arc::new(new_block.clone()), + Switch::DISABLE_EPOCH, + ) .expect("process block ok"); mock_store.insert_block(&new_block, &epoch); parent = new_block.header().to_owned(); From 360cb04894232271993f7b0cb35902f8943a72cb Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 30 Nov 2023 15:20:38 +0800 Subject: [PATCH 242/357] Update sync_state RPC document --- chain/src/chain_service.rs | 2 +- rpc/src/module/net.rs | 4 ++++ util/jsonrpc-types/src/net.rs | 3 ++- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/chain/src/chain_service.rs b/chain/src/chain_service.rs index e9597ba256..8ecf9f4104 100644 --- a/chain/src/chain_service.rs +++ b/chain/src/chain_service.rs @@ -31,7 +31,7 @@ const ORPHAN_BLOCK_SIZE: usize = (BLOCK_DOWNLOAD_WINDOW * 2) as usize; /// /// The controller is internally reference-counted and can be freely cloned. /// -/// A controller can invoke [`ChainService`] methods. +/// A controller can invoke ChainService methods. #[cfg_attr(feature = "mock", faux::create)] #[derive(Clone)] pub struct ChainController { diff --git a/rpc/src/module/net.rs b/rpc/src/module/net.rs index be994cd5f9..95830ea26c 100644 --- a/rpc/src/module/net.rs +++ b/rpc/src/module/net.rs @@ -371,6 +371,10 @@ pub trait NetRpc { /// "low_time": "0x5dc", /// "normal_time": "0x4e2", /// "orphan_blocks_count": "0x0" + /// "tip_hash": String("0xa5f5c85987a15de25661e5a214f2c1449cd803f071acc7999820f25246471f40"), + /// "tip_number": String("0x400"), + /// "unverified_tip_hash": String("0xa5f5c85987a15de25661e5a214f2c1449cd803f071acc7999820f25246471f40"), + /// "unverified_tip_number": String("0x400"), /// } /// } /// ``` diff --git a/util/jsonrpc-types/src/net.rs b/util/jsonrpc-types/src/net.rs index 9c01b41cbb..8751621985 100644 --- a/util/jsonrpc-types/src/net.rs +++ b/util/jsonrpc-types/src/net.rs @@ -283,8 +283,9 @@ pub struct SyncState { pub unverified_tip_number: BlockNumber, /// The block hash of current unverified tip block pub unverified_tip_hash: H256, - + /// The block number of current tip block pub tip_number: BlockNumber, + /// The block hash of current tip block pub tip_hash: H256, /// The download scheduler's time analysis data, the fast is the 1/3 of the cut-off point, unit ms pub fast_time: Uint64, From 732a7ab81ed0ab2d30c4446e67a248d3553081e2 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 30 Nov 2023 17:10:52 +0800 Subject: [PATCH 243/357] Add docs for ckb-shared, fix clippy docs warnings --- shared/src/block_status.rs | 2 ++ shared/src/chain_services_builder.rs | 2 ++ shared/src/shared.rs | 3 ++- shared/src/types/mod.rs | 1 + 4 files changed, 7 insertions(+), 1 deletion(-) diff --git a/shared/src/block_status.rs b/shared/src/block_status.rs index 60dd37115d..db3060b8bc 100644 --- a/shared/src/block_status.rs +++ b/shared/src/block_status.rs @@ -1,3 +1,5 @@ +//! Provide BlockStatus +#![allow(missing_docs)] use bitflags::bitflags; bitflags! { pub struct BlockStatus: u32 { diff --git a/shared/src/chain_services_builder.rs b/shared/src/chain_services_builder.rs index a6ee4a76e1..a8c5f08591 100644 --- a/shared/src/chain_services_builder.rs +++ b/shared/src/chain_services_builder.rs @@ -1,3 +1,5 @@ +//! chain_services_builder provide ChainServicesBuilder to build Chain Services +#![allow(missing_docs)] use crate::types::VerifyFailedBlockInfo; use crate::Shared; use ckb_proposal_table::ProposalTable; diff --git a/shared/src/shared.rs b/shared/src/shared.rs index 647cd0eebe..98fa44e215 100644 --- a/shared/src/shared.rs +++ b/shared/src/shared.rs @@ -1,4 +1,5 @@ -//! TODO(doc): @quake +//! Provide Shared +#![allow(missing_docs)] use crate::block_status::BlockStatus; use crate::{HeaderMap, Snapshot, SnapshotMgr}; use arc_swap::{ArcSwap, Guard}; diff --git a/shared/src/types/mod.rs b/shared/src/types/mod.rs index ea3fb0f648..45e6125b06 100644 --- a/shared/src/types/mod.rs +++ b/shared/src/types/mod.rs @@ -1,3 +1,4 @@ +#![allow(missing_docs)] use ckb_network::PeerIndex; use ckb_types::core::{BlockNumber, EpochNumberWithFraction}; use ckb_types::packed::Byte32; From cf06dd60871dc19391b52a66a616517fbd432868 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 30 Nov 2023 17:27:56 +0800 Subject: [PATCH 244/357] Add docs for ckb-chain, fix clippy warnings --- chain/src/chain_service.rs | 43 ++++++++++++++-------------- chain/src/consume_orphan.rs | 8 +++--- chain/src/consume_unverified.rs | 43 ++++++++++++---------------- chain/src/lib.rs | 42 ++++++++++++++++----------- chain/src/utils/orphan_block_pool.rs | 2 +- 5 files changed, 71 insertions(+), 67 deletions(-) diff --git a/chain/src/chain_service.rs b/chain/src/chain_service.rs index 8ecf9f4104..3056e06411 100644 --- a/chain/src/chain_service.rs +++ b/chain/src/chain_service.rs @@ -131,12 +131,13 @@ impl ChainController { let (verify_result_tx, verify_result_rx) = ckb_channel::oneshot::channel::(); let verify_callback = { - move |result: VerifyResult| match verify_result_tx.send(result) { - Err(err) => error!( - "blocking send verify_result failed: {}, this shouldn't happen", - err - ), - _ => {} + move |result: VerifyResult| { + if let Err(err) = verify_result_tx.send(result) { + error!( + "blocking send verify_result failed: {}, this shouldn't happen", + err + ) + } } }; @@ -208,7 +209,7 @@ pub fn start_chain_services(builder: ChainServicesBuilder) -> ChainController { let search_orphan_pool_thread = thread::Builder::new() .name("consume_orphan_blocks".into()) .spawn({ - let orphan_blocks_broker = orphan_blocks_broker.clone(); + let orphan_blocks_broker = Arc::clone(&orphan_blocks_broker); let shared = builder.shared.clone(); use crate::consume_orphan::ConsumeOrphan; let verify_failed_block_tx = builder.verify_failed_blocks_tx.clone(); @@ -295,7 +296,8 @@ impl ChainService { Ok(Request { responder, arguments: lonely_block }) => { // asynchronous_process_block doesn't interact with tx-pool, // no need to pause tx-pool's chunk_process here. - let _ = responder.send(self.asynchronous_process_block(lonely_block)); + self.asynchronous_process_block(lonely_block); + let _ = responder.send(()); }, _ => { error!("process_block_receiver closed"); @@ -340,20 +342,17 @@ impl ChainService { if lonely_block.switch().is_none() || matches!(lonely_block.switch(), Some(switch) if !switch.disable_non_contextual()) { - let result = self.non_contextual_verify(&lonely_block.block()); - match result { - Err(err) => { - tell_synchronizer_to_punish_the_bad_peer( - self.verify_failed_blocks_tx.clone(), - lonely_block.peer_id_with_msg_bytes(), - lonely_block.block().hash(), - &err, - ); - - lonely_block.execute_callback(Err(err)); - return; - } - _ => {} + let result = self.non_contextual_verify(lonely_block.block()); + if let Err(err) = result { + tell_synchronizer_to_punish_the_bad_peer( + self.verify_failed_blocks_tx.clone(), + lonely_block.peer_id_with_msg_bytes(), + lonely_block.block().hash(), + &err, + ); + + lonely_block.execute_callback(Err(err)); + return; } } diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index 883ba7b0d8..7d35145280 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -40,9 +40,9 @@ impl ConsumeDescendantProcessor { Err(SendError(unverified_block)) => { error!("send unverified_block_tx failed, the receiver has been closed"); let err: Error = InternalErrorKind::System - .other(format!( - "send unverified_block_tx failed, the receiver have been close" - )) + .other( + "send unverified_block_tx failed, the receiver have been close".to_string(), + ) .into(); let verify_result: VerifyResult = Err(err); @@ -56,7 +56,7 @@ impl ConsumeDescendantProcessor { if total_difficulty.gt(self.shared.get_unverified_tip().total_difficulty()) { self.shared.set_unverified_tip(ckb_shared::HeaderIndex::new( - block_number.clone(), + block_number, block_hash.clone(), total_difficulty, )); diff --git a/chain/src/consume_unverified.rs b/chain/src/consume_unverified.rs index e9274a21ff..26394a42c5 100644 --- a/chain/src/consume_unverified.rs +++ b/chain/src/consume_unverified.rs @@ -197,7 +197,7 @@ impl ConsumeUnverifiedBlockProcessor { Some(ref target) => { // if the target has been reached, delete it if target - == &ckb_types::prelude::Unpack::::unpack(&BlockView::hash(&block)) + == &ckb_types::prelude::Unpack::::unpack(&BlockView::hash(block)) { assume_valid_target.take(); Switch::NONE @@ -216,25 +216,20 @@ impl ConsumeUnverifiedBlockProcessor { .expect("parent should be stored already"); if let Some(ext) = self.shared.store().get_block_ext(&block.hash()) { - match ext.verified { - Some(verified) => { - debug!( - "block {}-{} has been verified, previously verified result: {}", - block.number(), - block.hash(), - verified - ); - return if verified { - Ok(VerifiedBlockStatus::PreviouslySeenAndVerified) - } else { - Err(InternalErrorKind::Other - .other("block previously verified failed") - .into()) - }; - } - _ => { - // we didn't verify this block, going on verify now - } + if let Some(verified) = ext.verified { + debug!( + "block {}-{} has been verified, previously verified result: {}", + block.number(), + block.hash(), + verified + ); + return if verified { + Ok(VerifiedBlockStatus::PreviouslySeenAndVerified) + } else { + Err(InternalErrorKind::Other + .other("block previously verified failed") + .into()) + }; } } @@ -271,7 +266,7 @@ impl ConsumeUnverifiedBlockProcessor { let next_block_epoch = self .shared .consensus() - .next_epoch_ext(&parent_header, &self.shared.store().borrow_as_data_loader()) + .next_epoch_ext(parent_header, &self.shared.store().borrow_as_data_loader()) .expect("epoch should be stored"); let new_epoch = next_block_epoch.is_head(); let epoch = next_block_epoch.epoch(); @@ -285,7 +280,7 @@ impl ConsumeUnverifiedBlockProcessor { &cannon_total_difficulty - ¤t_total_difficulty, self.shared.get_unverified_tip().number(), ); - self.find_fork(&mut fork, current_tip_header.number(), &block, ext); + self.find_fork(&mut fork, current_tip_header.number(), block, ext); self.rollback(&fork, &db_txn)?; // update and verify chain root @@ -341,7 +336,7 @@ impl ConsumeUnverifiedBlockProcessor { } } - let block_ref: &BlockView = █ + let block_ref: &BlockView = block; self.shared .notify_controller() .notify_new_block(block_ref.clone()); @@ -366,7 +361,7 @@ impl ConsumeUnverifiedBlockProcessor { let tx_pool_controller = self.shared.tx_pool_controller(); if tx_pool_controller.service_started() { - let block_ref: &BlockView = █ + let block_ref: &BlockView = block; if let Err(e) = tx_pool_controller.notify_new_uncle(block_ref.as_uncle()) { error!("[verify block] notify new_uncle error {}", e); } diff --git a/chain/src/lib.rs b/chain/src/lib.rs index dcba6911ba..d1b5df1c1e 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -5,7 +5,6 @@ //! //! [`ChainService`]: chain/struct.ChainService.html //! [`ChainController`]: chain/struct.ChainController.html - use ckb_error::{is_internal_db_error, Error}; use ckb_logger::{debug, error}; use ckb_network::PeerIndex; @@ -27,33 +26,41 @@ pub use chain_service::{start_chain_services, ChainController}; type ProcessBlockRequest = Request; type TruncateRequest = Request>; +/// VerifyResult is the result type to represent the result of block verification pub type VerifyResult = Result; +/// VerifyCallback is the callback type to be called after block verification pub type VerifyCallback = Box; /// VerifiedBlockStatus is #[derive(Debug, Clone, PartialEq)] pub enum VerifiedBlockStatus { - // The block is being seen for the first time, and VM have verified it + /// The block is being seen for the first time, and VM have verified it FirstSeenAndVerified, - // The block is being seen for the first time - // but VM have not verified it since its a uncle block + /// The block is being seen for the first time + /// but VM have not verified it since its a uncle block UncleBlockNotVerified, - // The block has been verified before. + /// The block has been verified before. PreviouslySeenAndVerified, } +/// LonelyBlock is the block which we have not check weather its parent is stored yet #[derive(Clone)] pub struct LonelyBlock { + /// block pub block: Arc, + /// This block is received from which peer, and the message bytes size pub peer_id_with_msg_bytes: Option<(PeerIndex, u64)>, + + /// The Switch to control the verification process pub switch: Option, } impl LonelyBlock { + /// Combine with verify_callback, convert it to LonelyBlockWithCallback pub fn with_callback(self, verify_callback: Option) -> LonelyBlockWithCallback { LonelyBlockWithCallback { lonely_block: self, @@ -61,32 +68,38 @@ impl LonelyBlock { } } + /// Combine with empty verify_callback, convert it to LonelyBlockWithCallback pub fn without_callback(self) -> LonelyBlockWithCallback { self.with_callback(None) } } +/// LonelyBlockWithCallback Combine LonelyBlock with an optional verify_callback pub struct LonelyBlockWithCallback { + /// The LonelyBlock pub lonely_block: LonelyBlock, + /// The optional verify_callback pub verify_callback: Option, } impl LonelyBlockWithCallback { pub(crate) fn execute_callback(self, verify_result: VerifyResult) { - match self.verify_callback { - Some(verify_callback) => { - verify_callback(verify_result); - } - None => {} + if let Some(verify_callback) = self.verify_callback { + verify_callback(verify_result); } } + /// Get reference to block pub fn block(&self) -> &Arc { &self.lonely_block.block } + + /// get peer_id and msg_bytes pub fn peer_id_with_msg_bytes(&self) -> Option<(PeerIndex, u64)> { self.lonely_block.peer_id_with_msg_bytes } + + /// get switch param pub fn switch(&self) -> Option { self.lonely_block.switch } @@ -147,7 +160,7 @@ pub(crate) fn tell_synchronizer_to_punish_the_bad_peer( block_hash: Byte32, err: &Error, ) { - let is_internal_db_error = is_internal_db_error(&err); + let is_internal_db_error = is_internal_db_error(err); match peer_id_with_msg_bytes { Some((peer_id, msg_bytes)) => { let verify_failed_block_info = VerifyFailedBlockInfo { @@ -157,11 +170,8 @@ pub(crate) fn tell_synchronizer_to_punish_the_bad_peer( reason: err.to_string(), is_internal_db_error, }; - match verify_failed_blocks_tx.send(verify_failed_block_info) { - Err(_err) => { - error!("ChainService failed to send verify failed block info to Synchronizer, the receiver side may have been closed, this shouldn't happen") - } - _ => {} + if let Err(_err) = verify_failed_blocks_tx.send(verify_failed_block_info) { + error!("ChainService failed to send verify failed block info to Synchronizer, the receiver side may have been closed, this shouldn't happen") } } _ => { diff --git a/chain/src/utils/orphan_block_pool.rs b/chain/src/utils/orphan_block_pool.rs index 94be59f140..6a6701c93a 100644 --- a/chain/src/utils/orphan_block_pool.rs +++ b/chain/src/utils/orphan_block_pool.rs @@ -95,7 +95,7 @@ impl InnerPool { self.blocks.get(parent_hash).and_then(|blocks| { blocks .get(hash) - .map(|lonely_block| lonely_block.block().clone()) + .map(|lonely_block| Arc::clone(lonely_block.block())) }) }) } From 8abc4f07499faf083d6a1b80dcb334657d2d1b40 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 30 Nov 2023 17:35:08 +0800 Subject: [PATCH 245/357] Fix cargo clippy warnings for ckb-benches --- benches/benches/benchmarks/overall.rs | 2 +- benches/benches/benchmarks/resolve.rs | 2 +- benches/benches/benchmarks/util.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/benches/benches/benchmarks/overall.rs b/benches/benches/benchmarks/overall.rs index 8ac94a2149..d7c34e1996 100644 --- a/benches/benches/benchmarks/overall.rs +++ b/benches/benches/benchmarks/overall.rs @@ -1,7 +1,7 @@ use crate::benchmarks::util::{create_2out_transaction, create_secp_tx, secp_cell}; use ckb_app_config::NetworkConfig; use ckb_app_config::{BlockAssemblerConfig, TxPoolConfig}; -use ckb_chain::ChainController; +use ckb_chain::{start_chain_services, ChainController}; use ckb_chain_spec::consensus::{ConsensusBuilder, ProposalWindow}; use ckb_dao_utils::genesis_dao_data; use ckb_jsonrpc_types::JsonBytes; diff --git a/benches/benches/benchmarks/resolve.rs b/benches/benches/benchmarks/resolve.rs index 65f19dd741..37ec9d11c3 100644 --- a/benches/benches/benchmarks/resolve.rs +++ b/benches/benches/benchmarks/resolve.rs @@ -1,6 +1,6 @@ use crate::benchmarks::util::create_2out_transaction; use ckb_app_config::{BlockAssemblerConfig, TxPoolConfig}; -use ckb_chain::ChainController; +use ckb_chain::{start_chain_services, ChainController}; use ckb_chain_spec::{ChainSpec, IssuedCell}; use ckb_jsonrpc_types::JsonBytes; use ckb_resource::Resource; diff --git a/benches/benches/benchmarks/util.rs b/benches/benches/benchmarks/util.rs index 557885635f..3e91d27e35 100644 --- a/benches/benches/benchmarks/util.rs +++ b/benches/benches/benchmarks/util.rs @@ -1,4 +1,4 @@ -use ckb_chain::ChainController; +use ckb_chain::{start_chain_services, ChainController}; use ckb_chain_spec::consensus::{ConsensusBuilder, ProposalWindow}; use ckb_crypto::secp::Privkey; use ckb_dao::DaoCalculator; From 20bda7f7f6cec2f59a1dd777b0015c25c560764e Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 30 Nov 2023 17:36:29 +0800 Subject: [PATCH 246/357] Fix cargo clippy warnings for ckb-sync --- sync/src/relayer/mod.rs | 8 ++------ sync/src/synchronizer/block_fetcher.rs | 4 ++-- sync/src/types/mod.rs | 5 ++++- 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index 098663cc0f..5d4702c99d 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -315,17 +315,13 @@ impl Relayer { let verify_success_callback = { let broadcast_compact_block_tx = self.broadcast_compact_block_tx.clone(); let block = Arc::clone(&block); - let peer = peer.clone(); move |result: VerifyResult| match result { Ok(verified_block_status) => match verified_block_status { VerifiedBlockStatus::FirstSeenAndVerified => { - match broadcast_compact_block_tx.send((block, peer)) { - Err(_) => { - error!( + if broadcast_compact_block_tx.send((block, peer)).is_err() { + error!( "send block to broadcast_compact_block_tx failed, this shouldn't happen", ); - } - _ => {} } } _ => {} diff --git a/sync/src/synchronizer/block_fetcher.rs b/sync/src/synchronizer/block_fetcher.rs index d053558489..39d8275d73 100644 --- a/sync/src/synchronizer/block_fetcher.rs +++ b/sync/src/synchronizer/block_fetcher.rs @@ -255,8 +255,8 @@ impl BlockFetcher { *state.read_inflight_blocks() ); } else { - let fetch_head = fetch.first().map_or(0_u64.into(), |v| v.number()); - let fetch_last = fetch.last().map_or(0_u64.into(), |v| v.number()); + let fetch_head = fetch.first().map_or(0_u64, |v| v.number()); + let fetch_last = fetch.last().map_or(0_u64, |v| v.number()); let inflight_peer_count = state.read_inflight_blocks().peer_inflight_count(self.peer); let inflight_total_count = state.read_inflight_blocks().total_inflight_count(); debug!( diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 7b26064301..49a2b9e865 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -992,6 +992,7 @@ pub struct SyncShared { } impl SyncShared { + /// Create a SyncShared pub fn new( shared: Shared, sync_config: SyncConfig, @@ -1058,6 +1059,7 @@ impl SyncShared { self.shared.consensus() } + /// Insert new block with callback pub fn insert_new_block_with_callback( &self, chain: &ChainController, @@ -1269,6 +1271,7 @@ impl SyncShared { self.store().get_block_epoch(hash) } + /// Insert peer's unknown_header_list pub fn insert_peer_unknown_header_list(&self, pi: PeerIndex, header_list: Vec) { // update peer's unknown_header_list only once if self.state().peers.unknown_header_list_is_empty(pi) { @@ -1287,7 +1290,7 @@ impl SyncShared { } } - // Return true when the block is that we have requested and received first time. + /// Return true when the block is that we have requested and received first time. pub fn new_block_received(&self, block: &core::BlockView) -> bool { if !self .state() From 04126cea19d304f695ddf4b3f1e6854844d34c27 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 30 Nov 2023 17:39:11 +0800 Subject: [PATCH 247/357] Broadcast compact block when VerifiedStatus is FirstSeenAndVerified or UncleBlockNotVerified --- rpc/src/module/miner.rs | 6 +++++- sync/src/relayer/mod.rs | 3 ++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/rpc/src/module/miner.rs b/rpc/src/module/miner.rs index 9921cd08bd..cb88da52b1 100644 --- a/rpc/src/module/miner.rs +++ b/rpc/src/module/miner.rs @@ -278,7 +278,11 @@ impl MinerRpc for MinerRpcImpl { let verify_result: VerifyResult = self.chain.blocking_process_block(Arc::clone(&block)); // TODO: review this logic - let is_new = matches!(verify_result, Ok(VerifiedBlockStatus::FirstSeenAndVerified)); + let is_new = matches!( + verify_result, + Ok(VerifiedBlockStatus::FirstSeenAndVerified + | VerifiedBlockStatus::UncleBlockNotVerified) + ); // Announce only new block if is_new { diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index 5d4702c99d..a5f677a4a3 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -317,7 +317,8 @@ impl Relayer { let block = Arc::clone(&block); move |result: VerifyResult| match result { Ok(verified_block_status) => match verified_block_status { - VerifiedBlockStatus::FirstSeenAndVerified => { + VerifiedBlockStatus::FirstSeenAndVerified + | VerifiedBlockStatus::UncleBlockNotVerified => { if broadcast_compact_block_tx.send((block, peer)).is_err() { error!( "send block to broadcast_compact_block_tx failed, this shouldn't happen", From 740b0c14cdafbfcff982bd283454dd73f4de22d5 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 30 Nov 2023 17:49:45 +0800 Subject: [PATCH 248/357] Add extra comma for tip_hash in rpc example, execute make gen-rpc-doc --- rpc/README.md | 8 ++++++-- rpc/src/module/net.rs | 10 +++++----- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/rpc/README.md b/rpc/README.md index 03fbac6f0b..e2d5d102fa 100644 --- a/rpc/README.md +++ b/rpc/README.md @@ -4089,7 +4089,11 @@ Response "inflight_blocks_count": "0x0", "low_time": "0x5dc", "normal_time": "0x4e2", - "orphan_blocks_count": "0x0" + "orphan_blocks_count": "0x0", + "tip_hash": "0xa5f5c85987a15de25661e5a214f2c1449cd803f071acc7999820f25246471f40", + "tip_number": "0x400", + "unverified_tip_hash": "0xa5f5c85987a15de25661e5a214f2c1449cd803f071acc7999820f25246471f40", + "unverified_tip_number": "0x400" } } ``` @@ -6507,4 +6511,4 @@ For example, a cellbase transaction is not allowed in `send_transaction` RPC. ### ERROR `PoolRejectedRBF` (-1111): The transaction is rejected for RBF checking. ### ERROR `Indexer` -(-1200): The indexer error. \ No newline at end of file +(-1200): The indexer error. diff --git a/rpc/src/module/net.rs b/rpc/src/module/net.rs index 95830ea26c..5e631de21b 100644 --- a/rpc/src/module/net.rs +++ b/rpc/src/module/net.rs @@ -370,11 +370,11 @@ pub trait NetRpc { /// "inflight_blocks_count": "0x0", /// "low_time": "0x5dc", /// "normal_time": "0x4e2", - /// "orphan_blocks_count": "0x0" - /// "tip_hash": String("0xa5f5c85987a15de25661e5a214f2c1449cd803f071acc7999820f25246471f40"), - /// "tip_number": String("0x400"), - /// "unverified_tip_hash": String("0xa5f5c85987a15de25661e5a214f2c1449cd803f071acc7999820f25246471f40"), - /// "unverified_tip_number": String("0x400"), + /// "orphan_blocks_count": "0x0", + /// "tip_hash": "0xa5f5c85987a15de25661e5a214f2c1449cd803f071acc7999820f25246471f40", + /// "tip_number": "0x400", + /// "unverified_tip_hash": "0xa5f5c85987a15de25661e5a214f2c1449cd803f071acc7999820f25246471f40", + /// "unverified_tip_number": "0x400" /// } /// } /// ``` From f89849249b23aad9e38b3add59c2a016b8b89646 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Fri, 1 Dec 2023 00:56:02 +0800 Subject: [PATCH 249/357] Add `init_for_test` logger helper, only used by unit test --- util/logger-service/src/lib.rs | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/util/logger-service/src/lib.rs b/util/logger-service/src/lib.rs index 3c87957c35..48500e736e 100644 --- a/util/logger-service/src/lib.rs +++ b/util/logger-service/src/lib.rs @@ -527,3 +527,28 @@ fn setup_panic_logger() { }; panic::set_hook(Box::new(panic_logger)); } + +/// Only used by unit test +/// Initializes the [Logger](struct.Logger.html) and run the logging service. +#[cfg(test)] +pub fn init_for_test(filter: &str) -> Result { + setup_panic_logger(); + let config: Config = Config { + filter: Some(filter.to_string()), + color: true, + log_to_stdout: true, + log_to_file: false, + + emit_sentry_breadcrumbs: None, + file: Default::default(), + log_dir: Default::default(), + extra: Default::default(), + }; + + let logger = Logger::new(None, config); + let filter = logger.filter(); + log::set_boxed_logger(Box::new(logger)).map(|_| { + log::set_max_level(filter); + LoggerInitGuard + }) +} From ba11b60b2fb2e4e0a272e092489653b416673440 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Fri, 1 Dec 2023 00:57:16 +0800 Subject: [PATCH 250/357] Fix unit test for synchronizer::basic_sync --- sync/src/tests/synchronizer/basic_sync.rs | 29 ++++++++++++++++------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/sync/src/tests/synchronizer/basic_sync.rs b/sync/src/tests/synchronizer/basic_sync.rs index 33c7987649..3765e79d55 100644 --- a/sync/src/tests/synchronizer/basic_sync.rs +++ b/sync/src/tests/synchronizer/basic_sync.rs @@ -9,6 +9,7 @@ use ckb_chain_spec::consensus::ConsensusBuilder; use ckb_channel::bounded; use ckb_dao::DaoCalculator; use ckb_dao_utils::genesis_dao_data; +use ckb_logger::info; use ckb_network::SupportProtocols; use ckb_reward_calculator::RewardCalculator; use ckb_shared::{Shared, SharedBuilder}; @@ -37,9 +38,13 @@ fn basic_sync() { let thread_name = "fake_time=0".to_string(); let (mut node1, shared1) = setup_node(1); + info!("finished setup node1"); let (mut node2, shared2) = setup_node(3); + info!("finished setup node2"); + info!("connnectiong node1 and node2"); node1.connect(&mut node2, SupportProtocols::Sync.protocol_id()); + info!("node1 and node2 connected"); let (signal_tx1, signal_rx1) = bounded(DEFAULT_CHANNEL); node1.start(thread_name.clone(), signal_tx1, |data| { @@ -61,14 +66,22 @@ fn basic_sync() { // Wait node1 receive block from node2 let _ = signal_rx1.recv(); - node1.stop(); - node2.stop(); - - assert_eq!(shared1.snapshot().tip_number(), 3); - assert_eq!( - shared1.snapshot().tip_number(), - shared2.snapshot().tip_number() - ); + let test_start = std::time::Instant::now(); + while test_start.elapsed().as_secs() < 3 { + info!("node1 tip_number: {}", shared1.snapshot().tip_number()); + if shared1.snapshot().tip_number() == 3 { + assert_eq!(shared1.snapshot().tip_number(), 3); + assert_eq!( + shared1.snapshot().tip_number(), + shared2.snapshot().tip_number() + ); + + node1.stop(); + node2.stop(); + return; + } + } + panic!("node1 and node2 should sync in 3 seconds"); } fn setup_node(height: u64) -> (TestNode, Shared) { From 139dd13f74b1b9a00f7760a3378817b60c827b71 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 21 Dec 2023 13:55:54 +0800 Subject: [PATCH 251/357] Improve sync chart drawer more friendly --- devtools/block_sync/draw_sync_chart.py | 83 +++++++++++++++++++++----- 1 file changed, 69 insertions(+), 14 deletions(-) diff --git a/devtools/block_sync/draw_sync_chart.py b/devtools/block_sync/draw_sync_chart.py index e95e50f629..b2159d4740 100755 --- a/devtools/block_sync/draw_sync_chart.py +++ b/devtools/block_sync/draw_sync_chart.py @@ -7,7 +7,6 @@ from matplotlib.ticker import MultipleLocator - def parse_sync_statics(log_file): """ parse sync statics from log file @@ -23,9 +22,9 @@ def parse_sync_statics(log_file): print("total lines: ", total_lines) with open(log_file, 'r') as f: - pbar = tqdm.tqdm(total=total_lines) + # pbar = tqdm.tqdm(total=total_lines) for line_idx, line in enumerate(f): - pbar.update(1) + # pbar.update(1) if line_idx == 0: timestamp_str = re.search(r'^(\S+ \S+)', line).group(1) # Extract the timestamp string timestamp = datetime.datetime.strptime(timestamp_str, "%Y-%m-%d %H:%M:%S.%f").timestamp() @@ -43,7 +42,7 @@ def parse_sync_statics(log_file): duration.append(timestamp / 60 / 60) height.append(block_number) - pbar.close() + # pbar.close() return duration, height @@ -68,25 +67,59 @@ def parse_sync_statics(log_file): fig, ax = plt.subplots(1, 1, figsize=(10, 8)) lgs = [] -for ckb_log_file, label in tasks: + +def process_task(task): + ckb_log_file, label = task print("ckb_log_file: ", ckb_log_file) print("label: ", label) duration, height = parse_sync_statics(ckb_log_file) + return (duration, height, label) + + +tasks = [(ckb_log_file, label) for ckb_log_file, label in tasks] + + +import multiprocessing +with multiprocessing.Pool() as pool: + results = pool.map(process_task, tasks) + +alabels = [] + +import matplotlib.ticker as ticker + +for duration, height, label in results: +# for ckb_log_file, label in tasks: +# print("ckb_log_file: ", ckb_log_file) +# print("label: ", label) +# duration, height = parse_sync_statics(ckb_log_file) + lg = ax.scatter(duration, height, s=1, label=label) ax.plot(duration, height, label=label) + lgs.append(lg) for i, h in enumerate(height): if h % 1_000_000 == 0: ax.vlines([duration[i]], 0, h, colors="gray", linestyles="dashed") - ax.annotate(str(round(duration[i], 1)), - xy=(duration[i], 0), - xycoords='axes fraction', - xytext=(duration[i], -0.05), - arrowprops=dict(arrowstyle="->", color='b') - ) + + if h == 10_000_000: + alabels.append(((duration[i],h),label)) + + if h == 10_000_000 or h == 11_000_000: + ax.vlines([duration[i]], 0, h, colors="black", linestyles="dashed") + voff=-60 + if h == 11_000_000: + voff=-75 + ax.annotate(round(duration[i],1), + fontsize=8, + xy=(duration[i], 0), xycoords='data', + xytext=(0, voff), textcoords='offset points', + bbox=dict(boxstyle="round", fc="0.9"), + arrowprops=dict(arrowstyle="-"), + horizontalalignment='center', verticalalignment='bottom') + ax.get_yaxis().get_major_formatter().set_scientific(False) ax.get_yaxis().get_major_formatter().set_useOffset(False) @@ -105,13 +138,35 @@ def parse_sync_statics(log_file): ax.xaxis.set_minor_locator(xminorLocator) yminorLocator = MultipleLocator(1_000_000) - ax.yaxis.set_minor_locator(yminorLocator) + ax.yaxis.set_major_locator(yminorLocator) + # plt.xticks(ax.get_xticks(), ax.get_xticklabels(which='both')) # plt.setp(ax.get_xticklabels(which='both'), rotation=30, horizontalalignment='right') -plt.legend(tuple(lgs), tuple(args.label), loc='upper left', shadow=True) +# sort alabsle by .0.1 +alabels.sort(key=lambda x: x[0][0]) + +lheight=80 +loffset=-40 +count=len(alabels) +for (duration,h), label in alabels: + + ax.annotate(label, + fontsize=8, + xy=(duration, h), xycoords='data', + xytext=(loffset, lheight), textcoords='offset points', + bbox=dict(boxstyle="round", fc="0.9"), + arrowprops=dict(arrowstyle="->"), + horizontalalignment='center', verticalalignment='bottom') + loffset += round(80/count,0) + if loffset <0: + lheight += 20 + elif loffset > 0: + lheight -= 20 + +# plt.legend(tuple(lgs), tuple(args.label), loc='upper left', shadow=True) plt.title('CKB Block Sync progress Chart') plt.xlabel('Timecost (hours)') plt.ylabel('Block Height') -plt.savefig(result_path) +plt.savefig(result_path, bbox_inches='tight', dpi=300) From 4c6b6216e9989aab4f9b6291388a7657bce8a27b Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 26 Dec 2023 17:30:41 +0800 Subject: [PATCH 252/357] Fix ckb workspace members crate version to 0.114.0-pre --- Cargo.lock | 5 ++--- chain/Cargo.toml | 8 ++++---- rpc/Cargo.toml | 2 +- shared/Cargo.toml | 4 ++-- 4 files changed, 9 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d7b1edf127..8f94f9ee77 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -691,7 +691,6 @@ dependencies = [ "ckb-chain-spec", "ckb-channel", "ckb-constant", - "ckb-dao", "ckb-dao-utils", "ckb-error", "ckb-jsonrpc-types", @@ -1980,7 +1979,7 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2801af0d36612ae591caa9568261fddce32ce6e08a7275ea334a06a4ad021a2c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crossbeam-channel", "crossbeam-deque", "crossbeam-epoch", @@ -2028,7 +2027,7 @@ version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crossbeam-utils", ] diff --git a/chain/Cargo.toml b/chain/Cargo.toml index 67b88a9af8..ab7ed48001 100644 --- a/chain/Cargo.toml +++ b/chain/Cargo.toml @@ -27,12 +27,12 @@ ckb-rust-unstable-port = { path = "../util/rust-unstable-port", version = "= 0.1 ckb-channel = { path = "../util/channel", version = "= 0.114.0-pre" } faux = { version = "^0.1", optional = true } ckb-merkle-mountain-range = "0.5.2" -ckb-constant = { path = "../util/constant", version = "= 0.113.0-pre" } -ckb-util = { path = "../util", version = "= 0.113.0-pre" } +ckb-constant = { path = "../util/constant", version = "= 0.114.0-pre" } +ckb-util = { path = "../util", version = "= 0.114.0-pre" } crossbeam = "0.8.2" -ckb-network = { path = "../network", version = "= 0.113.0-pre" } +ckb-network = { path = "../network", version = "= 0.114.0-pre" } tokio = { version = "1", features = ["sync"] } -ckb-tx-pool = { path = "../tx-pool", version = "= 0.113.0-pre"} +ckb-tx-pool = { path = "../tx-pool", version = "= 0.114.0-pre"} [dev-dependencies] ckb-test-chain-utils = { path = "../util/test-chain-utils", version = "= 0.114.0-pre" } diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 64bfeb4f48..617ee95c76 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -50,7 +50,7 @@ async-stream = "0.3.3" ckb-async-runtime = { path = "../util/runtime", version = "= 0.114.0-pre" } # issue tracking: https://github.com/GREsau/schemars/pull/251 schemars = { version = "0.8.16", package = "ckb_schemars" } -ckb-channel = { path = "../util/channel", version = "= 0.113.0-pre" } +ckb-channel = { path = "../util/channel", version = "= 0.114.0-pre" } [dev-dependencies] reqwest = { version = "=0.11.20", features = ["blocking", "json"] } diff --git a/shared/Cargo.toml b/shared/Cargo.toml index 44816cfff1..50b6eb1680 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -30,8 +30,8 @@ ckb-channel = { path = "../util/channel", version = "= 0.114.0-pre" } ckb-app-config = {path = "../util/app-config", version = "= 0.114.0-pre"} ckb-migrate = { path = "../util/migrate", version = "= 0.114.0-pre" } once_cell = "1.8.0" -ckb-network = { path = "../network", version = "= 0.113.0-pre" } -ckb-util = { path = "../util", version = "= 0.113.0-pre" } +ckb-network = { path = "../network", version = "= 0.114.0-pre" } +ckb-util = { path = "../util", version = "= 0.114.0-pre" } bitflags = "1.0" tokio = { version = "1", features = ["sync"] } tempfile.workspace = true From c64c2103b8a5ec4cc29dbd89239b44e9dabd3077 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 26 Dec 2023 17:51:59 +0800 Subject: [PATCH 253/357] Fix unit test for ckb-rpc, use blocking_process_block --- rpc/src/tests/mod.rs | 2 +- rpc/src/tests/setup.rs | 15 +++++++++------ 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/rpc/src/tests/mod.rs b/rpc/src/tests/mod.rs index 1d3ed34261..5b3017d5d5 100644 --- a/rpc/src/tests/mod.rs +++ b/rpc/src/tests/mod.rs @@ -1,4 +1,4 @@ -use ckb_chain::{start_chain_services, ChainController}; +use ckb_chain::ChainController; use ckb_chain_spec::consensus::Consensus; use ckb_dao::DaoCalculator; use ckb_reward_calculator::RewardCalculator; diff --git a/rpc/src/tests/setup.rs b/rpc/src/tests/setup.rs index 76535ed091..4680229531 100644 --- a/rpc/src/tests/setup.rs +++ b/rpc/src/tests/setup.rs @@ -5,7 +5,7 @@ use crate::{ use ckb_app_config::{ BlockAssemblerConfig, NetworkAlertConfig, NetworkConfig, RpcConfig, RpcModule, }; -use ckb_chain::chain::ChainService; +use ckb_chain::start_chain_services; use ckb_chain_spec::consensus::{Consensus, ConsensusBuilder}; use ckb_chain_spec::versionbits::{ActiveMode, Deployment, DeploymentPos}; use ckb_dao_utils::genesis_dao_data; @@ -87,8 +87,7 @@ pub(crate) fn setup_rpc_test_suite(height: u64, consensus: Option) -> })) .build() .unwrap(); - let chain_controller = - ChainService::new(shared.clone(), pack.take_proposal_table()).start::<&str>(None); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); // Start network services let temp_dir = tempfile::tempdir().expect("create tmp_dir failed"); @@ -131,7 +130,7 @@ pub(crate) fn setup_rpc_test_suite(height: u64, consensus: Option) -> for _ in 0..height { let block = next_block(&shared, &parent.header()); chain_controller - .process_block(Arc::new(block.clone())) + .blocking_process_block(Arc::new(block.clone())) .expect("processing new block should be ok"); parent = block; } @@ -206,7 +205,11 @@ pub(crate) fn setup_rpc_test_suite(height: u64, consensus: Option) -> chain_controller.clone(), true, ) - .enable_net(network_controller.clone(), sync_shared) + .enable_net( + network_controller.clone(), + sync_shared, + Arc::new(chain_controller.clone()), + ) .enable_stats(shared.clone(), Arc::clone(&alert_notifier)) .enable_experiment(shared.clone()) .enable_integration_test( @@ -256,7 +259,7 @@ pub(crate) fn setup_rpc_test_suite(height: u64, consensus: Option) -> ) .build(); chain_controller - .process_block(Arc::new(fork_block)) + .blocking_process_block(Arc::new(fork_block)) .expect("processing new block should be ok"); } From 02eeaba13c114a335bc56c3dcefe6769dcea60f5 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 9 Jan 2024 15:53:52 +0800 Subject: [PATCH 254/357] Fix BlockFetcher fetch should not use unverified_tip to change last_common --- sync/src/synchronizer/block_fetcher.rs | 28 ++++++++++++++++---------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/sync/src/synchronizer/block_fetcher.rs b/sync/src/synchronizer/block_fetcher.rs index 39d8275d73..8bc25e7493 100644 --- a/sync/src/synchronizer/block_fetcher.rs +++ b/sync/src/synchronizer/block_fetcher.rs @@ -58,15 +58,10 @@ impl BlockFetcher { { header } else { - let unverified_tip_header = self.sync_shared.shared().get_unverified_tip(); - if best_known.number() < unverified_tip_header.number() { - (best_known.number(), best_known.hash()).into() - } else { - (unverified_tip_header.number(), unverified_tip_header.hash()).into() - } - // let guess_number = min(tip_header.number(), best_known.number()); - // let guess_hash = self.active_chain.get_block_hash(guess_number)?; - // (guess_number, guess_hash).into() + let tip_header = self.active_chain.tip_header(); + let guess_number = min(tip_header.number(), best_known.number()); + let guess_hash = self.active_chain.get_block_hash(guess_number)?; + (guess_number, guess_hash).into() }; // If the peer reorganized, our previous last_common_header may not be an ancestor @@ -141,7 +136,7 @@ impl BlockFetcher { // last_common_header, is expected to provide a more realistic picture. Hence here we // specially advance this peer's last_common_header at the case of both us on the same // active chain. - if self.active_chain.is_unverified_chain(&best_known.hash()) { + if self.active_chain.is_main_chain(&best_known.hash()) { self.sync_shared .state() .peers() @@ -157,8 +152,19 @@ impl BlockFetcher { return None; } + if best_known.number() <= self.sync_shared.shared().get_unverified_tip().number() { + debug!( + "Peer {}'s best known: {} is less or equal than unverified_tip : {}", + self.peer, + best_known.number(), + self.sync_shared.shared().get_unverified_tip().number() + ); + return None; + } + let state = self.sync_shared.state(); - let mut start = last_common.number() + 1; + + let mut start = self.sync_shared.shared().get_unverified_tip().number() + 1; let mut end = min(best_known.number(), start + BLOCK_DOWNLOAD_WINDOW); let n_fetch = min( end.saturating_sub(start) as usize + 1, From c50197e69193e69118189c77c0eddba187e90c73 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 9 Jan 2024 15:55:19 +0800 Subject: [PATCH 255/357] Fix unit test: `test_switch_valid_fork` --- sync/src/tests/sync_shared.rs | 275 +++++++++++++++++----------------- sync/src/types/mod.rs | 60 +------- 2 files changed, 145 insertions(+), 190 deletions(-) diff --git a/sync/src/tests/sync_shared.rs b/sync/src/tests/sync_shared.rs index 4bfc2c0f5a..e0880d2024 100644 --- a/sync/src/tests/sync_shared.rs +++ b/sync/src/tests/sync_shared.rs @@ -1,5 +1,6 @@ #![allow(unused_imports)] #![allow(dead_code)] + use crate::tests::util::{build_chain, inherit_block}; use crate::SyncShared; use ckb_chain::{start_chain_services, VerifiedBlockStatus}; @@ -54,137 +55,143 @@ fn test_insert_invalid_block() { .is_err(),); } -// #[test] -// fn test_insert_parent_unknown_block() { -// let (shared1, _) = build_chain(2); -// let (shared, chain) = { -// let (shared, mut pack) = SharedBuilder::with_temp_db() -// .consensus(shared1.consensus().clone()) -// .build() -// .unwrap(); -// let chain_controller = start_chain_services(pack.take_chain_services_builder()); -// ( -// SyncShared::new(shared, Default::default(), pack.take_relay_tx_receiver()), -// chain_controller, -// ) -// }; -// -// let block = shared1 -// .store() -// .get_block(&shared1.active_chain().tip_header().hash()) -// .unwrap(); -// let parent = { -// let parent = shared1 -// .store() -// .get_block(&block.header().parent_hash()) -// .unwrap(); -// Arc::new(parent) -// }; -// let invalid_orphan = { -// let invalid_orphan = block -// .as_advanced_builder() -// .header(block.header()) -// .number(1000.pack()) -// .build(); -// -// Arc::new(invalid_orphan) -// }; -// let valid_orphan = Arc::new(block); -// let valid_hash = valid_orphan.header().hash(); -// let invalid_hash = invalid_orphan.header().hash(); -// let parent_hash = parent.header().hash(); -// -// assert!(!shared -// .insert_new_block(&chain, Arc::clone(&valid_orphan)) -// .expect("insert orphan block"),); -// assert!(!shared -// .insert_new_block(&chain, Arc::clone(&invalid_orphan)) -// .expect("insert orphan block"),); -// assert_eq!( -// shared.active_chain().get_block_status(&valid_hash), -// BlockStatus::BLOCK_RECEIVED -// ); -// assert_eq!( -// shared.active_chain().get_block_status(&invalid_hash), -// BlockStatus::BLOCK_RECEIVED -// ); -// -// // After inserting parent of an orphan block -// assert!(shared -// .insert_new_block(&chain, Arc::clone(&parent)) -// .expect("insert parent of orphan block"),); -// assert_eq!( -// shared.active_chain().get_block_status(&valid_hash), -// BlockStatus::BLOCK_VALID -// ); -// assert_eq!( -// shared.active_chain().get_block_status(&invalid_hash), -// BlockStatus::BLOCK_INVALID -// ); -// assert_eq!( -// shared.active_chain().get_block_status(&parent_hash), -// BlockStatus::BLOCK_VALID -// ); -// } - -// #[test] -// fn test_switch_valid_fork() { -// let (shared, chain) = build_chain(4); -// let make_valid_block = |shared, parent_hash| -> BlockView { -// let header = inherit_block(shared, &parent_hash).build().header(); -// let timestamp = header.timestamp() + 3; -// let cellbase = inherit_block(shared, &parent_hash).build().transactions()[0].clone(); -// BlockBuilder::default() -// .header(header) -// .timestamp(timestamp.pack()) -// .transaction(cellbase) -// .build() -// }; -// -// // Insert the valid fork. The fork blocks would not been verified until the fork switches as -// // the main chain. And `block_status_map` would mark the fork blocks as `BLOCK_STORED` -// let block_number = 1; -// let mut parent_hash = shared.store().get_block_hash(block_number).unwrap(); -// for number in 0..=block_number { -// let block_hash = shared.store().get_block_hash(number).unwrap(); -// shared.store().get_block(&block_hash).unwrap(); -// } -// let mut valid_fork = Vec::new(); -// for _ in 2..shared.active_chain().tip_number() { -// let block = make_valid_block(shared.shared(), parent_hash.clone()); -// assert!(shared -// .insert_new_block(&chain, Arc::new(block.clone())) -// .expect("insert fork"),); -// -// parent_hash = block.header().hash(); -// valid_fork.push(block); -// } -// for block in valid_fork.iter() { -// assert_eq!( -// shared -// .active_chain() -// .get_block_status(&block.header().hash()), -// BlockStatus::BLOCK_STORED, -// ); -// } -// -// let tip_number = shared.active_chain().tip_number(); -// // Make the fork switch as the main chain. -// for _ in tip_number..tip_number + 2 { -// let block = inherit_block(shared.shared(), &parent_hash.clone()).build(); -// assert!(shared -// .insert_new_block(&chain, Arc::new(block.clone())) -// .expect("insert fork"),); -// -// parent_hash = block.header().hash(); -// valid_fork.push(block); -// } -// for block in valid_fork.iter() { -// assert_eq!( -// shared -// .active_chain() -// .get_block_status(&block.header().hash()), -// BlockStatus::BLOCK_VALID, -// ); -// } -// } +#[test] +fn test_insert_parent_unknown_block() { + let (shared1, _) = build_chain(2); + let (shared, chain) = { + let (shared, mut pack) = SharedBuilder::with_temp_db() + .consensus(shared1.consensus().clone()) + .build() + .unwrap(); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); + ( + SyncShared::new(shared, Default::default(), pack.take_relay_tx_receiver()), + chain_controller, + ) + }; + + let block = shared1 + .store() + .get_block(&shared1.active_chain().tip_header().hash()) + .unwrap(); + let parent = { + let parent = shared1 + .store() + .get_block(&block.header().parent_hash()) + .unwrap(); + Arc::new(parent) + }; + let invalid_orphan = { + let invalid_orphan = block + .as_advanced_builder() + .header(block.header()) + .number(1000.pack()) + .build(); + + Arc::new(invalid_orphan) + }; + let valid_orphan = Arc::new(block); + let valid_hash = valid_orphan.header().hash(); + let invalid_hash = invalid_orphan.header().hash(); + let parent_hash = parent.header().hash(); + shared.accept_block(&chain, Arc::clone(&valid_orphan), None, None); + shared.accept_block(&chain, Arc::clone(&invalid_orphan), None, None); + + assert_eq!( + shared.active_chain().get_block_status(&valid_hash), + BlockStatus::BLOCK_RECEIVED + ); + assert_eq!( + shared.active_chain().get_block_status(&invalid_hash), + BlockStatus::BLOCK_RECEIVED + ); + + // After inserting parent of an orphan block + + assert!(matches!( + shared + .blocking_insert_new_block(&chain, Arc::clone(&parent)) + .expect("insert parent of orphan block"), + VerifiedBlockStatus::FirstSeenAndVerified | VerifiedBlockStatus::UncleBlockNotVerified, + )); + assert_eq!( + shared.active_chain().get_block_status(&valid_hash), + BlockStatus::BLOCK_VALID + ); + assert_eq!( + shared.active_chain().get_block_status(&invalid_hash), + BlockStatus::BLOCK_INVALID + ); + assert_eq!( + shared.active_chain().get_block_status(&parent_hash), + BlockStatus::BLOCK_VALID + ); +} + +#[test] +fn test_switch_valid_fork() { + let (shared, chain) = build_chain(4); + let make_valid_block = |shared, parent_hash| -> BlockView { + let header = inherit_block(shared, &parent_hash).build().header(); + let timestamp = header.timestamp() + 3; + let cellbase = inherit_block(shared, &parent_hash).build().transactions()[0].clone(); + BlockBuilder::default() + .header(header) + .timestamp(timestamp.pack()) + .transaction(cellbase) + .build() + }; + + // Insert the valid fork. The fork blocks would not been verified until the fork switches as + // the main chain. And `block_status_map` would mark the fork blocks as `BLOCK_STORED` + let block_number = 1; + let mut parent_hash = shared.store().get_block_hash(block_number).unwrap(); + for number in 0..=block_number { + let block_hash = shared.store().get_block_hash(number).unwrap(); + shared.store().get_block(&block_hash).unwrap(); + } + let mut valid_fork = Vec::new(); + for _ in 2..shared.active_chain().tip_number() { + let block = make_valid_block(shared.shared(), parent_hash.clone()); + assert_eq!( + shared + .blocking_insert_new_block(&chain, Arc::new(block.clone())) + .expect("insert fork"), + VerifiedBlockStatus::UncleBlockNotVerified + ); + + parent_hash = block.header().hash(); + valid_fork.push(block); + } + for block in valid_fork.iter() { + assert_eq!( + shared + .active_chain() + .get_block_status(&block.header().hash()), + BlockStatus::BLOCK_STORED, + ); + } + + let tip_number = shared.active_chain().tip_number(); + // Make the fork switch as the main chain. + for _ in tip_number..tip_number + 2 { + let block = inherit_block(shared.shared(), &parent_hash.clone()).build(); + assert!(matches!( + shared + .blocking_insert_new_block(&chain, Arc::new(block.clone())) + .expect("insert fork"), + VerifiedBlockStatus::FirstSeenAndVerified | VerifiedBlockStatus::UncleBlockNotVerified, + )); + + parent_hash = block.header().hash(); + valid_fork.push(block); + } + for block in valid_fork.iter() { + assert_eq!( + shared + .active_chain() + .get_block_status(&block.header().hash()), + BlockStatus::BLOCK_VALID, + ); + } +} diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 49a2b9e865..774670d42c 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1070,7 +1070,7 @@ impl SyncShared { self.accept_block( chain, Arc::clone(&block), - peer_id_with_msg_bytes, + Some(peer_id_with_msg_bytes), Some(verify_success_callback), ) } @@ -1086,63 +1086,11 @@ impl SyncShared { self.accept_block( chain, Arc::clone(&block), - (peer_id, message_bytes), + Some((peer_id, message_bytes)), None::, ); } - /// Try to find blocks from the orphan block pool that may no longer be orphan - // pub fn try_search_orphan_pool(&self, chain: &ChainController) { - // let leaders = self.state.orphan_pool().clone_leaders(); - // debug!("orphan pool leader parents hash len: {}", leaders.len()); - // - // for hash in leaders { - // if self.state.orphan_pool().is_empty() { - // break; - // } - // if self.is_stored(&hash) { - // let descendants = self.state.remove_orphan_by_parent(&hash); - // debug!( - // "try accepting {} descendant orphan blocks by exist parents hash", - // descendants.len() - // ); - // for block in descendants { - // // If we can not find the block's parent in database, that means it was failed to accept - // // its parent, so we treat it as an invalid block as well. - // if !self.is_stored(&block.parent_hash()) { - // debug!( - // "parent-unknown orphan block, block: {}, {}, parent: {}", - // block.header().number(), - // block.header().hash(), - // block.header().parent_hash(), - // ); - // continue; - // } - // - // let block = Arc::new(block); - // if let Err(err) = self.accept_block(chain, Arc::clone(&block)) { - // debug!( - // "accept descendant orphan block {} error {:?}", - // block.header().hash(), - // err - // ); - // } - // } - // } - // } - // } - // - /// Cleanup orphan_pool, - /// Remove blocks whose epoch is 6 (EXPIRED_EPOCH) epochs behind the current epoch. - // pub(crate) fn periodic_clean_orphan_pool(&self) { - // let hashes = self - // .state - // .clean_expired_blocks(self.active_chain().epoch_ext().number()); - // for hash in hashes { - // self.shared().remove_header_view(&hash); - // } - // } - // Only used by unit test // Blocking insert a new block, return the verify result #[cfg(test)] @@ -1174,12 +1122,12 @@ impl SyncShared { &self, chain: &ChainController, block: Arc, - peer_id_with_msg_bytes: (PeerIndex, u64), + peer_id_with_msg_bytes: Option<(PeerIndex, u64)>, verify_callback: Option, ) { let lonely_block_with_callback = LonelyBlock { block, - peer_id_with_msg_bytes: Some(peer_id_with_msg_bytes), + peer_id_with_msg_bytes, switch: None, } .with_callback(verify_callback); From 20fa1031cb7926a4021fd52f479e03acf3331ef5 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 9 Jan 2024 16:39:05 +0800 Subject: [PATCH 256/357] BlockFetcher should only set_last_common_header when status is BLOCK_STORED --- sync/src/synchronizer/block_fetcher.rs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/sync/src/synchronizer/block_fetcher.rs b/sync/src/synchronizer/block_fetcher.rs index 8bc25e7493..fcf05efda0 100644 --- a/sync/src/synchronizer/block_fetcher.rs +++ b/sync/src/synchronizer/block_fetcher.rs @@ -194,12 +194,15 @@ impl BlockFetcher { let hash = header.hash(); if status.contains(BlockStatus::BLOCK_PARTIAL_STORED) { - // If the block is stored, its ancestor must on store - // So we can skip the search of this space directly - self.sync_shared - .state() - .peers() - .set_last_common_header(self.peer, header.number_and_hash()); + if status.contains(BlockStatus::BLOCK_STORED) { + // If the block is stored, its ancestor must on store + // So we can skip the search of this space directly + self.sync_shared + .state() + .peers() + .set_last_common_header(self.peer, header.number_and_hash()); + } + end = min(best_known.number(), header.number() + BLOCK_DOWNLOAD_WINDOW); break; } else if status.contains(BlockStatus::BLOCK_RECEIVED) { From 8da8c901b00758eb9525a4eb907b34287507c163 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 11 Jan 2024 11:00:27 +0800 Subject: [PATCH 257/357] Simplify `ConsumeDescendantProcessor` unverified_blocks_tx send code --- chain/src/consume_orphan.rs | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index 7d35145280..00c9538621 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -29,13 +29,12 @@ impl ConsumeDescendantProcessor { let block_number = unverified_block.block().number(); let block_hash = unverified_block.block().hash(); - let send_success = match self.unverified_blocks_tx.send(unverified_block) { + match self.unverified_blocks_tx.send(unverified_block) { Ok(_) => { debug!( "process desendant block success {}-{}", block_number, block_hash ); - true } Err(SendError(unverified_block)) => { error!("send unverified_block_tx failed, the receiver has been closed"); @@ -47,12 +46,9 @@ impl ConsumeDescendantProcessor { let verify_result: VerifyResult = Err(err); unverified_block.execute_callback(verify_result); - false + return; } }; - if !send_success { - return; - } if total_difficulty.gt(self.shared.get_unverified_tip().total_difficulty()) { self.shared.set_unverified_tip(ckb_shared::HeaderIndex::new( From 5552e68e089104823ad04738994855d675a910d7 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 11 Jan 2024 11:02:43 +0800 Subject: [PATCH 258/357] Fix matches result did not assert its result --- Makefile | 2 +- sync/src/tests/sync_shared.rs | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index c419d05bcf..1605b81da9 100644 --- a/Makefile +++ b/Makefile @@ -127,7 +127,7 @@ profiling: ## Build binary with for profiling without debug symbols. .PHONY: profiling-with-debug-symbols build-for-profiling: ## Build binary with for profiling. - devtools/release/make-with-debug-symbols profilling + devtools/release/make-with-debug-symbols profiling .PHONY: prod prod: ## Build binary for production release. diff --git a/sync/src/tests/sync_shared.rs b/sync/src/tests/sync_shared.rs index e0880d2024..1e3d99d77d 100644 --- a/sync/src/tests/sync_shared.rs +++ b/sync/src/tests/sync_shared.rs @@ -21,18 +21,18 @@ fn test_insert_new_block() { Arc::new(next_block) }; - matches!( + assert!(matches!( shared .blocking_insert_new_block(&chain, Arc::clone(&new_block)) .expect("insert valid block"), VerifiedBlockStatus::FirstSeenAndVerified, - ); - matches!( + )); + assert!(matches!( shared .blocking_insert_new_block(&chain, Arc::clone(&new_block)) .expect("insert duplicated valid block"), VerifiedBlockStatus::PreviouslySeenAndVerified, - ); + )); } #[test] From ef845c42fc397e27c577cb6335f87aad558e7bde Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 11 Jan 2024 11:05:29 +0800 Subject: [PATCH 259/357] Split get_ancestor's logic to unverified_tip and tip Signed-off-by: Eval EXEC --- sync/src/types/mod.rs | 65 ++++++++++++++++++++++++++++++++----------- 1 file changed, 48 insertions(+), 17 deletions(-) diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 774670d42c..2f94dd9ca3 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1781,25 +1781,56 @@ impl ActiveChain { } pub fn get_ancestor(&self, base: &Byte32, number: BlockNumber) -> Option { - let unverified_tip_number = self.unverified_tip_number(); + self.get_ancestor_internal(base, number, false) + } + + pub fn get_ancestor_with_unverified( + &self, + base: &Byte32, + number: BlockNumber, + ) -> Option { + self.get_ancestor_internal(base, number, true) + } + + fn get_ancestor_internal( + &self, + base: &Byte32, + number: BlockNumber, + with_unverified: bool, + ) -> Option { + let tip_number = { + if with_unverified { + self.unverified_tip_number() + } else { + self.tip_number() + } + }; + + let block_is_on_chain_fn = |hash: &Byte32| { + if with_unverified { + self.is_unverified_chain(hash) + } else { + self.is_main_chain(hash) + } + }; + + let get_header_view_fn: fn(&Byte32, bool) -> Option = + |hash, store_first| self.shared.get_header_index_view(hash, store_first); + + let fast_scanner_fn: fn(BlockNumber, BlockNumberAndHash) -> Option = + |number, current| { + // shortcut to return an ancestor block + if current.number <= tip_number && block_is_on_chain_fn(¤t.hash) { + self.get_block_hash(number) + .and_then(|hash| self.shared.get_header_index_view(&hash, true)) + } else { + None + } + }; + self.shared .get_header_index_view(base, false)? - .get_ancestor( - unverified_tip_number, - number, - |hash, store_first| self.shared.get_header_index_view(hash, store_first), - |number, current| { - // shortcut to return an ancestor block - if current.number <= unverified_tip_number - && self.is_unverified_chain(¤t.hash) - { - self.get_block_hash(number) - .and_then(|hash| self.shared.get_header_index_view(&hash, true)) - } else { - None - } - }, - ) + .get_ancestor(tip_number, number, get_header_view_fn, fast_scanner_fn) } pub fn get_locator(&self, start: BlockNumberAndHash) -> Vec { From c6d1d49fa5019d83238eda5abfba08d73946459e Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 11 Jan 2024 11:12:17 +0800 Subject: [PATCH 260/357] BlockFetcher get ancestor with unverified_tip --- sync/src/synchronizer/block_fetcher.rs | 2 +- sync/src/types/mod.rs | 25 ++++++++++++------------- 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/sync/src/synchronizer/block_fetcher.rs b/sync/src/synchronizer/block_fetcher.rs index fcf05efda0..cd2ba1bbef 100644 --- a/sync/src/synchronizer/block_fetcher.rs +++ b/sync/src/synchronizer/block_fetcher.rs @@ -185,7 +185,7 @@ impl BlockFetcher { // Iterate in range `[start, start+span)` and consider as the next to-fetch candidates. let mut header = self .active_chain - .get_ancestor(&best_known.hash(), start + span - 1)?; + .get_ancestor_with_unverified(&best_known.hash(), start + span - 1)?; let mut status = self.sync_shared.shared().get_block_status(&header.hash()); // Judge whether we should fetch the target block, neither stored nor in-flighted diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 2f94dd9ca3..c68752e46e 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1814,19 +1814,18 @@ impl ActiveChain { } }; - let get_header_view_fn: fn(&Byte32, bool) -> Option = - |hash, store_first| self.shared.get_header_index_view(hash, store_first); - - let fast_scanner_fn: fn(BlockNumber, BlockNumberAndHash) -> Option = - |number, current| { - // shortcut to return an ancestor block - if current.number <= tip_number && block_is_on_chain_fn(¤t.hash) { - self.get_block_hash(number) - .and_then(|hash| self.shared.get_header_index_view(&hash, true)) - } else { - None - } - }; + let get_header_view_fn = + |hash: &Byte32, store_first: bool| self.shared.get_header_index_view(hash, store_first); + + let fast_scanner_fn = |number: BlockNumber, current: BlockNumberAndHash| { + // shortcut to return an ancestor block + if current.number <= tip_number && block_is_on_chain_fn(¤t.hash) { + self.get_block_hash(number) + .and_then(|hash| self.shared.get_header_index_view(&hash, true)) + } else { + None + } + }; self.shared .get_header_index_view(base, false)? From 7ef89dfa60ed7468940de8e70fe0ea34b9a15e90 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 11 Jan 2024 11:45:19 +0800 Subject: [PATCH 261/357] Clean expired orphan blocks Signed-off-by: Eval EXEC --- chain/src/consume_orphan.rs | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index 00c9538621..6804371db2 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -12,7 +12,7 @@ use ckb_shared::types::VerifyFailedBlockInfo; use ckb_shared::Shared; use ckb_store::ChainStore; use ckb_systemtime::unix_time_as_millis; -use ckb_types::core::{BlockExt, BlockView, HeaderView}; +use ckb_types::core::{BlockExt, BlockView, EpochNumber, EpochNumberWithFraction, HeaderView}; use ckb_types::U256; use ckb_verification::InvalidParentError; use std::sync::Arc; @@ -217,11 +217,19 @@ impl ConsumeOrphan { } pub(crate) fn start(&self) { + let mut last_check_expired_orphans_epoch: EpochNumber = 0; loop { select! { recv(self.lonely_blocks_rx) -> msg => match msg { Ok(lonely_block) => { + let lonely_block_epoch: EpochNumberWithFraction = lonely_block.block().epoch(); + self.process_lonely_block(lonely_block); + + if lonely_block_epoch.number() > last_check_expired_orphans_epoch { + self.clean_expired_orphan_blocks(); + last_check_expired_orphans_epoch = lonely_block_epoch.number(); + } }, Err(err) => { error!("lonely_block_rx err: {}", err); @@ -236,6 +244,21 @@ impl ConsumeOrphan { } } + fn clean_expired_orphan_blocks(&self) { + let epoch = self.shared.snapshot().tip_header().epoch(); + let expired_blocks = self + .orphan_blocks_broker + .clean_expired_blocks(epoch.number()); + if expired_blocks.is_empty() { + return; + } + let expired_blocks_count = expired_blocks.len(); + for block_hash in expired_blocks { + self.shared.remove_header_view(&block_hash); + } + debug!("cleaned {} expired orphan blocks", expired_blocks_count); + } + fn search_orphan_pool(&self) { for leader_hash in self.orphan_blocks_broker.clone_leaders() { if !self From 1486dac0fed0625e71cbd89325b119455139def1 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 11 Jan 2024 12:25:13 +0800 Subject: [PATCH 262/357] Split ChainController out chain_service.rs Signed-off-by: Eval EXEC --- chain/src/chain_controller.rs | 162 ++++++++++++++++++++++++++++++++++ chain/src/chain_service.rs | 156 +------------------------------- chain/src/lib.rs | 4 +- 3 files changed, 169 insertions(+), 153 deletions(-) create mode 100644 chain/src/chain_controller.rs diff --git a/chain/src/chain_controller.rs b/chain/src/chain_controller.rs new file mode 100644 index 0000000000..89ace1f46d --- /dev/null +++ b/chain/src/chain_controller.rs @@ -0,0 +1,162 @@ +//! CKB chain controller. +#![allow(missing_docs)] + +use crate::utils::orphan_block_pool::OrphanBlockPool; +use crate::{ + LonelyBlock, LonelyBlockWithCallback, ProcessBlockRequest, TruncateRequest, VerifyCallback, + VerifyResult, +}; +use ckb_channel::Sender; +use ckb_error::{Error, InternalErrorKind}; +use ckb_logger::{self, error}; +use ckb_types::{ + core::{service::Request, BlockView}, + packed::Byte32, +}; +use ckb_verification_traits::{Switch, Verifier}; +use std::sync::Arc; + +/// Controller to the chain service. +/// +/// The controller is internally reference-counted and can be freely cloned. +/// +/// A controller can invoke ChainService methods. +#[cfg_attr(feature = "mock", faux::create)] +#[derive(Clone)] +pub struct ChainController { + process_block_sender: Sender, + truncate_sender: Sender, + orphan_block_broker: Arc, +} + +#[cfg_attr(feature = "mock", faux::methods)] +impl ChainController { + pub(crate) fn new( + process_block_sender: Sender, + truncate_sender: Sender, + orphan_block_broker: Arc, + ) -> Self { + ChainController { + process_block_sender, + truncate_sender, + orphan_block_broker, + } + } + + pub fn asynchronous_process_block_with_switch(&self, block: Arc, switch: Switch) { + self.asynchronous_process_lonely_block(LonelyBlock { + block, + peer_id_with_msg_bytes: None, + switch: Some(switch), + }) + } + + pub fn asynchronous_process_block(&self, block: Arc) { + self.asynchronous_process_lonely_block_with_callback( + LonelyBlock { + block, + peer_id_with_msg_bytes: None, + switch: None, + } + .without_callback(), + ) + } + + pub fn asynchronous_process_block_with_callback( + &self, + block: Arc, + verify_callback: VerifyCallback, + ) { + self.asynchronous_process_lonely_block_with_callback( + LonelyBlock { + block, + peer_id_with_msg_bytes: None, + switch: None, + } + .with_callback(Some(verify_callback)), + ) + } + + pub fn asynchronous_process_lonely_block(&self, lonely_block: LonelyBlock) { + let lonely_block_without_callback: LonelyBlockWithCallback = + lonely_block.without_callback(); + + self.asynchronous_process_lonely_block_with_callback(lonely_block_without_callback); + } + + /// Internal method insert block for test + /// + /// switch bit flags for particular verify, make easier to generating test data + pub fn asynchronous_process_lonely_block_with_callback( + &self, + lonely_block_with_callback: LonelyBlockWithCallback, + ) { + if Request::call(&self.process_block_sender, lonely_block_with_callback).is_none() { + error!("Chain service has gone") + } + } + + pub fn blocking_process_block(&self, block: Arc) -> VerifyResult { + self.blocking_process_lonely_block(LonelyBlock { + block, + peer_id_with_msg_bytes: None, + switch: None, + }) + } + + pub fn blocking_process_block_with_switch( + &self, + block: Arc, + switch: Switch, + ) -> VerifyResult { + self.blocking_process_lonely_block(LonelyBlock { + block, + peer_id_with_msg_bytes: None, + switch: Some(switch), + }) + } + + pub fn blocking_process_lonely_block(&self, lonely_block: LonelyBlock) -> VerifyResult { + let (verify_result_tx, verify_result_rx) = ckb_channel::oneshot::channel::(); + + let verify_callback = { + move |result: VerifyResult| { + if let Err(err) = verify_result_tx.send(result) { + error!( + "blocking send verify_result failed: {}, this shouldn't happen", + err + ) + } + } + }; + + let lonely_block_with_callback = + lonely_block.with_callback(Some(Box::new(verify_callback))); + self.asynchronous_process_lonely_block_with_callback(lonely_block_with_callback); + verify_result_rx.recv().unwrap_or_else(|err| { + Err(InternalErrorKind::System + .other(format!("blocking recv verify_result failed: {}", err)) + .into()) + }) + } + + /// Truncate chain to specified target + /// + /// Should use for testing only + pub fn truncate(&self, target_tip_hash: Byte32) -> Result<(), Error> { + Request::call(&self.truncate_sender, target_tip_hash).unwrap_or_else(|| { + Err(InternalErrorKind::System + .other("Chain service has gone") + .into()) + }) + } + + // Relay need this + pub fn get_orphan_block(&self, hash: &Byte32) -> Option> { + self.orphan_block_broker.get_block(hash) + } + + pub fn orphan_blocks_len(&self) -> usize { + self.orphan_block_broker.len() + } +} diff --git a/chain/src/chain_service.rs b/chain/src/chain_service.rs index 3056e06411..9e0e252765 100644 --- a/chain/src/chain_service.rs +++ b/chain/src/chain_service.rs @@ -4,8 +4,8 @@ use crate::consume_unverified::ConsumeUnverifiedBlocks; use crate::utils::orphan_block_pool::OrphanBlockPool; use crate::{ - tell_synchronizer_to_punish_the_bad_peer, LonelyBlock, LonelyBlockWithCallback, - ProcessBlockRequest, TruncateRequest, UnverifiedBlock, VerifyCallback, VerifyResult, + tell_synchronizer_to_punish_the_bad_peer, ChainController, LonelyBlockWithCallback, + ProcessBlockRequest, UnverifiedBlock, }; use ckb_channel::{self as channel, select, Receiver, SendError, Sender}; use ckb_constant::sync::BLOCK_DOWNLOAD_WINDOW; @@ -16,162 +16,14 @@ use ckb_shared::shared::Shared; use ckb_shared::types::VerifyFailedBlockInfo; use ckb_shared::ChainServicesBuilder; use ckb_stop_handler::{new_crossbeam_exit_rx, register_thread}; -use ckb_types::{ - core::{service::Request, BlockView}, - packed::Byte32, -}; +use ckb_types::core::{service::Request, BlockView}; use ckb_verification::{BlockVerifier, NonContextualBlockTxsVerifier}; -use ckb_verification_traits::{Switch, Verifier}; +use ckb_verification_traits::Verifier; use std::sync::Arc; use std::thread; const ORPHAN_BLOCK_SIZE: usize = (BLOCK_DOWNLOAD_WINDOW * 2) as usize; -/// Controller to the chain service. -/// -/// The controller is internally reference-counted and can be freely cloned. -/// -/// A controller can invoke ChainService methods. -#[cfg_attr(feature = "mock", faux::create)] -#[derive(Clone)] -pub struct ChainController { - process_block_sender: Sender, - truncate_sender: Sender, - orphan_block_broker: Arc, -} - -#[cfg_attr(feature = "mock", faux::methods)] -impl ChainController { - fn new( - process_block_sender: Sender, - truncate_sender: Sender, - orphan_block_broker: Arc, - ) -> Self { - ChainController { - process_block_sender, - truncate_sender, - orphan_block_broker, - } - } - - pub fn asynchronous_process_block_with_switch(&self, block: Arc, switch: Switch) { - self.asynchronous_process_lonely_block(LonelyBlock { - block, - peer_id_with_msg_bytes: None, - switch: Some(switch), - }) - } - - pub fn asynchronous_process_block(&self, block: Arc) { - self.asynchronous_process_lonely_block_with_callback( - LonelyBlock { - block, - peer_id_with_msg_bytes: None, - switch: None, - } - .without_callback(), - ) - } - - pub fn asynchronous_process_block_with_callback( - &self, - block: Arc, - verify_callback: VerifyCallback, - ) { - self.asynchronous_process_lonely_block_with_callback( - LonelyBlock { - block, - peer_id_with_msg_bytes: None, - switch: None, - } - .with_callback(Some(verify_callback)), - ) - } - - pub fn asynchronous_process_lonely_block(&self, lonely_block: LonelyBlock) { - let lonely_block_without_callback: LonelyBlockWithCallback = - lonely_block.without_callback(); - - self.asynchronous_process_lonely_block_with_callback(lonely_block_without_callback); - } - - /// Internal method insert block for test - /// - /// switch bit flags for particular verify, make easier to generating test data - pub fn asynchronous_process_lonely_block_with_callback( - &self, - lonely_block_with_callback: LonelyBlockWithCallback, - ) { - if Request::call(&self.process_block_sender, lonely_block_with_callback).is_none() { - error!("Chain service has gone") - } - } - - pub fn blocking_process_block(&self, block: Arc) -> VerifyResult { - self.blocking_process_lonely_block(LonelyBlock { - block, - peer_id_with_msg_bytes: None, - switch: None, - }) - } - - pub fn blocking_process_block_with_switch( - &self, - block: Arc, - switch: Switch, - ) -> VerifyResult { - self.blocking_process_lonely_block(LonelyBlock { - block, - peer_id_with_msg_bytes: None, - switch: Some(switch), - }) - } - - pub fn blocking_process_lonely_block(&self, lonely_block: LonelyBlock) -> VerifyResult { - let (verify_result_tx, verify_result_rx) = ckb_channel::oneshot::channel::(); - - let verify_callback = { - move |result: VerifyResult| { - if let Err(err) = verify_result_tx.send(result) { - error!( - "blocking send verify_result failed: {}, this shouldn't happen", - err - ) - } - } - }; - - let lonely_block_with_callback = - lonely_block.with_callback(Some(Box::new(verify_callback))); - self.asynchronous_process_lonely_block_with_callback(lonely_block_with_callback); - verify_result_rx.recv().unwrap_or_else(|err| { - Err(InternalErrorKind::System - .other(format!("blocking recv verify_result failed: {}", err)) - .into()) - }) - } - - /// Truncate chain to specified target - /// - /// Should use for testing only - pub fn truncate(&self, target_tip_hash: Byte32) -> Result<(), Error> { - Request::call(&self.truncate_sender, target_tip_hash).unwrap_or_else(|| { - Err(InternalErrorKind::System - .other("Chain service has gone") - .into()) - }) - } - - // Relay need this - pub fn get_orphan_block(&self, hash: &Byte32) -> Option> { - self.orphan_block_broker.get_block(hash) - } - - pub fn orphan_blocks_len(&self) -> usize { - self.orphan_block_broker.len() - } -} - pub fn start_chain_services(builder: ChainServicesBuilder) -> ChainController { let orphan_blocks_broker = Arc::new(OrphanBlockPool::with_capacity(ORPHAN_BLOCK_SIZE)); diff --git a/chain/src/lib.rs b/chain/src/lib.rs index d1b5df1c1e..b8ed3f6053 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -14,6 +14,7 @@ use ckb_types::core::{BlockNumber, BlockView, HeaderView}; use ckb_types::packed::Byte32; use ckb_verification_traits::Switch; use std::sync::Arc; +mod chain_controller; mod chain_service; mod consume_orphan; mod consume_unverified; @@ -21,7 +22,8 @@ mod consume_unverified; mod tests; mod utils; -pub use chain_service::{start_chain_services, ChainController}; +pub use chain_controller::ChainController; +pub use chain_service::start_chain_services; type ProcessBlockRequest = Request; type TruncateRequest = Request>; From f0c2b66f191e0a5cb3426bd2c5856287f1490df4 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 11 Jan 2024 13:48:47 +0800 Subject: [PATCH 263/357] ChainService should mark block as BLOCK_INVALID if it does not pass non_contextual_verify --- chain/src/chain_controller.rs | 2 +- chain/src/chain_service.rs | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/chain/src/chain_controller.rs b/chain/src/chain_controller.rs index 89ace1f46d..3b410601c4 100644 --- a/chain/src/chain_controller.rs +++ b/chain/src/chain_controller.rs @@ -13,7 +13,7 @@ use ckb_types::{ core::{service::Request, BlockView}, packed::Byte32, }; -use ckb_verification_traits::{Switch, Verifier}; +use ckb_verification_traits::Switch; use std::sync::Arc; /// Controller to the chain service. diff --git a/chain/src/chain_service.rs b/chain/src/chain_service.rs index 9e0e252765..447ca811fd 100644 --- a/chain/src/chain_service.rs +++ b/chain/src/chain_service.rs @@ -12,6 +12,7 @@ use ckb_constant::sync::BLOCK_DOWNLOAD_WINDOW; use ckb_error::{Error, InternalErrorKind}; use ckb_logger::{self, debug, error, info, warn}; use ckb_network::tokio; +use ckb_shared::block_status::BlockStatus; use ckb_shared::shared::Shared; use ckb_shared::types::VerifyFailedBlockInfo; use ckb_shared::ChainServicesBuilder; @@ -196,6 +197,12 @@ impl ChainService { { let result = self.non_contextual_verify(lonely_block.block()); if let Err(err) = result { + error!( + "block {}-{} verify failed: {:?}", + block_number, block_hash, err + ); + self.shared + .insert_block_status(lonely_block.block().hash(), BlockStatus::BLOCK_INVALID); tell_synchronizer_to_punish_the_bad_peer( self.verify_failed_blocks_tx.clone(), lonely_block.peer_id_with_msg_bytes(), From fd6514372c589e0c93fd9d8f8bba37026a4b9cd3 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 11 Jan 2024 16:26:24 +0800 Subject: [PATCH 264/357] Change `VerifyResult` to `Result` --- chain/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chain/src/lib.rs b/chain/src/lib.rs index b8ed3f6053..fa0e31c701 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -29,7 +29,7 @@ type ProcessBlockRequest = Request; type TruncateRequest = Request>; /// VerifyResult is the result type to represent the result of block verification -pub type VerifyResult = Result; +pub type VerifyResult = Result; /// VerifyCallback is the callback type to be called after block verification pub type VerifyCallback = Box; From eb10c416908ed41a89f98918595324291c7cc357 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 11 Jan 2024 16:26:59 +0800 Subject: [PATCH 265/357] ConsumeUnverified do not need VerifiedBlockStatus enum type anymore --- chain/src/consume_unverified.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/chain/src/consume_unverified.rs b/chain/src/consume_unverified.rs index 26394a42c5..f98c2b3a3b 100644 --- a/chain/src/consume_unverified.rs +++ b/chain/src/consume_unverified.rs @@ -224,7 +224,7 @@ impl ConsumeUnverifiedBlockProcessor { verified ); return if verified { - Ok(VerifiedBlockStatus::PreviouslySeenAndVerified) + Ok(true) } else { Err(InternalErrorKind::Other .other("block previously verified failed") @@ -346,8 +346,6 @@ impl ConsumeUnverifiedBlockProcessor { if let Some(metrics) = ckb_metrics::handle() { metrics.ckb_chain_tip.set(block.header().number() as i64); } - - Ok(VerifiedBlockStatus::FirstSeenAndVerified) } else { self.shared.refresh_snapshot(); info!( @@ -366,8 +364,8 @@ impl ConsumeUnverifiedBlockProcessor { error!("[verify block] notify new_uncle error {}", e); } } - Ok(VerifiedBlockStatus::UncleBlockNotVerified) } + Ok(true) } pub(crate) fn update_proposal_table(&mut self, fork: &ForkChanges) { From 4556e1aa5673ad6310169411190d24e151a2ebad Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 11 Jan 2024 16:32:45 +0800 Subject: [PATCH 266/357] Do not need VerifiedBlockStatus in ckb-rpc and ckb-sync --- rpc/src/module/miner.rs | 13 +++++-------- sync/src/relayer/mod.rs | 27 +++++++++++++++++---------- sync/src/synchronizer/mod.rs | 20 ++++++-------------- 3 files changed, 28 insertions(+), 32 deletions(-) diff --git a/rpc/src/module/miner.rs b/rpc/src/module/miner.rs index cb88da52b1..f4a97aa254 100644 --- a/rpc/src/module/miner.rs +++ b/rpc/src/module/miner.rs @@ -275,14 +275,11 @@ impl MinerRpc for MinerRpcImpl { .verify(&header) .map_err(|err| handle_submit_error(&work_id, &err))?; - let verify_result: VerifyResult = self.chain.blocking_process_block(Arc::clone(&block)); - - // TODO: review this logic - let is_new = matches!( - verify_result, - Ok(VerifiedBlockStatus::FirstSeenAndVerified - | VerifiedBlockStatus::UncleBlockNotVerified) - ); + // Verify and insert block + let is_new = self + .chain + .blocking_process_block(Arc::clone(&block)) + .map_err(|err| handle_submit_error(&work_id, &err))?; // Announce only new block if is_new { diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index a5f677a4a3..b3facdcef7 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -26,9 +26,11 @@ use crate::utils::{ }; use crate::{Status, StatusCode}; use ckb_chain::ChainController; -use ckb_chain::{VerifiedBlockStatus, VerifyResult}; +use ckb_chain::VerifyResult; use ckb_constant::sync::BAD_MESSAGE_BAN_TIME; -use ckb_logger::{debug_target, error, error_target, info_target, trace_target, warn_target}; +use ckb_logger::{ + debug, debug_target, error, error_target, info_target, trace_target, warn_target, +}; use ckb_network::{ async_trait, bytes::Bytes, tokio, CKBProtocolContext, CKBProtocolHandler, PeerIndex, SupportProtocols, TargetSession, @@ -316,17 +318,22 @@ impl Relayer { let broadcast_compact_block_tx = self.broadcast_compact_block_tx.clone(); let block = Arc::clone(&block); move |result: VerifyResult| match result { - Ok(verified_block_status) => match verified_block_status { - VerifiedBlockStatus::FirstSeenAndVerified - | VerifiedBlockStatus::UncleBlockNotVerified => { - if broadcast_compact_block_tx.send((block, peer)).is_err() { - error!( + Ok(verified) => { + if !verified { + debug!( + "block {}-{} has verified already, won't build compact block and broadcast it", + block.number(), + block.hash() + ); + return; + } + + if broadcast_compact_block_tx.send((block, peer)).is_err() { + error!( "send block to broadcast_compact_block_tx failed, this shouldn't happen", ); - } } - _ => {} - }, + } Err(err) => { error!( "verify block {}-{} failed: {:?}, won't build compact block and broadcast it", diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index bd6e10c217..763c773820 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -392,20 +392,12 @@ impl Synchronizer { error!("block {} already partial stored", block_hash); Ok(false) } else if status.contains(BlockStatus::HEADER_VALID) { - self.shared - .blocking_insert_new_block_with_verbose_info( - &self.chain, - Arc::new(block), - peer_id, - message_bytes, - ) - .map(|v| { - matches!( - v, - ckb_chain::VerifiedBlockStatus::FirstSeenAndVerified - | ckb_chain::VerifiedBlockStatus::UncleBlockNotVerified - ) - }) + self.shared.blocking_insert_new_block_with_verbose_info( + &self.chain, + Arc::new(block), + peer_id, + message_bytes, + ) } else { debug!( "Synchronizer process_new_block unexpected status {:?} {}", From e03d3c04728924346eb22352e24bf22700913ad3 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 11 Jan 2024 16:33:02 +0800 Subject: [PATCH 267/357] Fix unit test: do not need VerifiedBlockStatus --- chain/src/tests/basic.rs | 18 +++----- sync/src/tests/sync_shared.rs | 45 +++++++------------ .../src/tests/utils/chain.rs | 5 +-- 3 files changed, 22 insertions(+), 46 deletions(-) diff --git a/chain/src/tests/basic.rs b/chain/src/tests/basic.rs index b264bb0204..d57269a2f6 100644 --- a/chain/src/tests/basic.rs +++ b/chain/src/tests/basic.rs @@ -34,12 +34,9 @@ fn repeat_process_block() { chain.gen_empty_block_with_nonce(100u128, &mock_store); let block = Arc::new(chain.blocks().last().unwrap().clone()); - assert_eq!( - chain_controller - .blocking_process_block(Arc::clone(&block)) - .expect("process block ok"), - VerifiedBlockStatus::FirstSeenAndVerified - ); + assert!(chain_controller + .blocking_process_block(Arc::clone(&block)) + .expect("process block ok")); assert_eq!( shared .store() @@ -49,12 +46,9 @@ fn repeat_process_block() { Some(true) ); - assert_ne!( - chain_controller - .blocking_process_block(Arc::clone(&block)) - .expect("process block ok"), - VerifiedBlockStatus::FirstSeenAndVerified - ); + assert!(!chain_controller + .blocking_process_block(Arc::clone(&block)) + .expect("process block ok")); assert_eq!( shared .store() diff --git a/sync/src/tests/sync_shared.rs b/sync/src/tests/sync_shared.rs index 1e3d99d77d..7d1a0d435e 100644 --- a/sync/src/tests/sync_shared.rs +++ b/sync/src/tests/sync_shared.rs @@ -21,18 +21,12 @@ fn test_insert_new_block() { Arc::new(next_block) }; - assert!(matches!( - shared - .blocking_insert_new_block(&chain, Arc::clone(&new_block)) - .expect("insert valid block"), - VerifiedBlockStatus::FirstSeenAndVerified, - )); - assert!(matches!( - shared - .blocking_insert_new_block(&chain, Arc::clone(&new_block)) - .expect("insert duplicated valid block"), - VerifiedBlockStatus::PreviouslySeenAndVerified, - )); + assert!(shared + .blocking_insert_new_block(&chain, Arc::clone(&new_block)) + .expect("insert valid block")); + assert!(!shared + .blocking_insert_new_block(&chain, Arc::clone(&new_block)) + .expect("insert duplicated valid block"),); } #[test] @@ -108,12 +102,9 @@ fn test_insert_parent_unknown_block() { // After inserting parent of an orphan block - assert!(matches!( - shared - .blocking_insert_new_block(&chain, Arc::clone(&parent)) - .expect("insert parent of orphan block"), - VerifiedBlockStatus::FirstSeenAndVerified | VerifiedBlockStatus::UncleBlockNotVerified, - )); + assert!(shared + .blocking_insert_new_block(&chain, Arc::clone(&parent)) + .expect("insert parent of orphan block")); assert_eq!( shared.active_chain().get_block_status(&valid_hash), BlockStatus::BLOCK_VALID @@ -153,12 +144,9 @@ fn test_switch_valid_fork() { let mut valid_fork = Vec::new(); for _ in 2..shared.active_chain().tip_number() { let block = make_valid_block(shared.shared(), parent_hash.clone()); - assert_eq!( - shared - .blocking_insert_new_block(&chain, Arc::new(block.clone())) - .expect("insert fork"), - VerifiedBlockStatus::UncleBlockNotVerified - ); + assert!(shared + .blocking_insert_new_block(&chain, Arc::new(block.clone())) + .expect("insert fork")); parent_hash = block.header().hash(); valid_fork.push(block); @@ -176,12 +164,9 @@ fn test_switch_valid_fork() { // Make the fork switch as the main chain. for _ in tip_number..tip_number + 2 { let block = inherit_block(shared.shared(), &parent_hash.clone()).build(); - assert!(matches!( - shared - .blocking_insert_new_block(&chain, Arc::new(block.clone())) - .expect("insert fork"), - VerifiedBlockStatus::FirstSeenAndVerified | VerifiedBlockStatus::UncleBlockNotVerified, - )); + assert!(shared + .blocking_insert_new_block(&chain, Arc::new(block.clone())) + .expect("insert fork")); parent_hash = block.header().hash(); valid_fork.push(block); diff --git a/util/light-client-protocol-server/src/tests/utils/chain.rs b/util/light-client-protocol-server/src/tests/utils/chain.rs index c9d4cd00ad..d8d4f0e276 100644 --- a/util/light-client-protocol-server/src/tests/utils/chain.rs +++ b/util/light-client-protocol-server/src/tests/utils/chain.rs @@ -147,10 +147,7 @@ impl MockChain { .blocking_process_block(Arc::new(block)) .expect("process block"); assert!( - matches!( - verified_block_status, - VerifiedBlockStatus::FirstSeenAndVerified - ), + verified_block_status, "failed to process block {block_number}" ); while self From cf0222651b6145d18320727a870545f2275d59ee Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 11 Jan 2024 16:42:33 +0800 Subject: [PATCH 268/357] Remove VerifiedBlockStatus enum type and related imports statements --- chain/src/consume_unverified.rs | 3 +-- chain/src/lib.rs | 14 -------------- chain/src/tests/basic.rs | 3 +-- docs/ckb_async_block_sync.mermaid | 2 +- rpc/src/module/miner.rs | 2 +- sync/src/tests/sync_shared.rs | 2 +- .../src/tests/utils/chain.rs | 1 - 7 files changed, 5 insertions(+), 22 deletions(-) diff --git a/chain/src/consume_unverified.rs b/chain/src/consume_unverified.rs index f98c2b3a3b..3f973c5702 100644 --- a/chain/src/consume_unverified.rs +++ b/chain/src/consume_unverified.rs @@ -1,7 +1,6 @@ use crate::{ tell_synchronizer_to_punish_the_bad_peer, utils::forkchanges::ForkChanges, GlobalIndex, - LonelyBlock, LonelyBlockWithCallback, TruncateRequest, UnverifiedBlock, VerifiedBlockStatus, - VerifyResult, + LonelyBlock, LonelyBlockWithCallback, TruncateRequest, UnverifiedBlock, VerifyResult, }; use ckb_channel::{select, Receiver}; use ckb_error::{Error, InternalErrorKind}; diff --git a/chain/src/lib.rs b/chain/src/lib.rs index fa0e31c701..1230f9962d 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -34,20 +34,6 @@ pub type VerifyResult = Result; /// VerifyCallback is the callback type to be called after block verification pub type VerifyCallback = Box; -/// VerifiedBlockStatus is -#[derive(Debug, Clone, PartialEq)] -pub enum VerifiedBlockStatus { - /// The block is being seen for the first time, and VM have verified it - FirstSeenAndVerified, - - /// The block is being seen for the first time - /// but VM have not verified it since its a uncle block - UncleBlockNotVerified, - - /// The block has been verified before. - PreviouslySeenAndVerified, -} - /// LonelyBlock is the block which we have not check weather its parent is stored yet #[derive(Clone)] pub struct LonelyBlock { diff --git a/chain/src/tests/basic.rs b/chain/src/tests/basic.rs index d57269a2f6..273cd9c9b8 100644 --- a/chain/src/tests/basic.rs +++ b/chain/src/tests/basic.rs @@ -1,6 +1,5 @@ -use crate::chain::ChainController; use crate::tests::util::start_chain; -use crate::VerifiedBlockStatus; +use crate::ChainController; use ckb_chain_spec::consensus::{Consensus, ConsensusBuilder}; use ckb_dao_utils::genesis_dao_data; use ckb_error::assert_error_eq; diff --git a/docs/ckb_async_block_sync.mermaid b/docs/ckb_async_block_sync.mermaid index bad6ef2efc..cef652da5d 100644 --- a/docs/ckb_async_block_sync.mermaid +++ b/docs/ckb_async_block_sync.mermaid @@ -69,7 +69,7 @@ sequenceDiagram Note over Sp: call nc.ban_peer() to punish the malicious peer end opt Execute Callback - Note over CV: callback: Box) + Send + Sync> + Note over CV: callback: Box) + Send + Sync> end end diff --git a/rpc/src/module/miner.rs b/rpc/src/module/miner.rs index f4a97aa254..d844e33f4d 100644 --- a/rpc/src/module/miner.rs +++ b/rpc/src/module/miner.rs @@ -1,6 +1,6 @@ use crate::error::RPCError; use async_trait::async_trait; -use ckb_chain::{ChainController, VerifiedBlockStatus, VerifyResult}; +use ckb_chain::ChainController; use ckb_jsonrpc_types::{Block, BlockTemplate, Uint64, Version}; use ckb_logger::{debug, error, warn}; use ckb_network::{NetworkController, PeerIndex, SupportProtocols, TargetSession}; diff --git a/sync/src/tests/sync_shared.rs b/sync/src/tests/sync_shared.rs index 7d1a0d435e..bdd069ac9a 100644 --- a/sync/src/tests/sync_shared.rs +++ b/sync/src/tests/sync_shared.rs @@ -3,7 +3,7 @@ use crate::tests::util::{build_chain, inherit_block}; use crate::SyncShared; -use ckb_chain::{start_chain_services, VerifiedBlockStatus}; +use ckb_chain::start_chain_services; use ckb_shared::block_status::BlockStatus; use ckb_shared::SharedBuilder; use ckb_store::{self, ChainStore}; diff --git a/util/light-client-protocol-server/src/tests/utils/chain.rs b/util/light-client-protocol-server/src/tests/utils/chain.rs index d8d4f0e276..4c906dbc4c 100644 --- a/util/light-client-protocol-server/src/tests/utils/chain.rs +++ b/util/light-client-protocol-server/src/tests/utils/chain.rs @@ -4,7 +4,6 @@ use std::{ }; use ckb_app_config::{BlockAssemblerConfig, NetworkConfig}; -use ckb_chain::VerifiedBlockStatus; use ckb_chain::{start_chain_services, ChainController}; use ckb_chain_spec::consensus::{build_genesis_epoch_ext, ConsensusBuilder}; use ckb_dao_utils::genesis_dao_data; From 9e6badf79377e8737088cebeef03bd21c1029d01 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Fri, 12 Jan 2024 00:03:20 +0800 Subject: [PATCH 269/357] Remove useless crate dependencies, fix warnings of check-cargotoml.sh --- Cargo.lock | 3 --- rpc/Cargo.toml | 1 - sync/Cargo.toml | 1 - util/launcher/Cargo.toml | 1 - 4 files changed, 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8f94f9ee77..4c03a2f3ac 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -972,7 +972,6 @@ dependencies = [ "ckb-logger", "ckb-network", "ckb-network-alert", - "ckb-proposal-table", "ckb-resource", "ckb-rpc", "ckb-shared", @@ -1352,7 +1351,6 @@ dependencies = [ "ckb-async-runtime", "ckb-chain", "ckb-chain-spec", - "ckb-channel", "ckb-constant", "ckb-dao", "ckb-dao-utils", @@ -1529,7 +1527,6 @@ name = "ckb-sync" version = "0.114.0-pre" dependencies = [ "ckb-app-config", - "ckb-async-runtime", "ckb-chain", "ckb-chain-spec", "ckb-channel", diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 617ee95c76..9493c673bb 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -50,7 +50,6 @@ async-stream = "0.3.3" ckb-async-runtime = { path = "../util/runtime", version = "= 0.114.0-pre" } # issue tracking: https://github.com/GREsau/schemars/pull/251 schemars = { version = "0.8.16", package = "ckb_schemars" } -ckb-channel = { path = "../util/channel", version = "= 0.114.0-pre" } [dev-dependencies] reqwest = { version = "=0.11.20", features = ["blocking", "json"] } diff --git a/sync/Cargo.toml b/sync/Cargo.toml index a64b886021..0fc96fedba 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -27,7 +27,6 @@ ckb-error = {path = "../error", version = "= 0.114.0-pre"} ckb-tx-pool = { path = "../tx-pool", version = "= 0.114.0-pre" } sentry = { version = "0.26.0", optional = true } ckb-constant = { path = "../util/constant", version = "= 0.114.0-pre" } -ckb-async-runtime = { path = "../util/runtime", version = "= 0.114.0-pre" } ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.114.0-pre" } tokio = { version = "1", features = ["sync"] } lru = "0.7.1" diff --git a/util/launcher/Cargo.toml b/util/launcher/Cargo.toml index fd2fd8b1c0..7bc96c3b94 100644 --- a/util/launcher/Cargo.toml +++ b/util/launcher/Cargo.toml @@ -27,7 +27,6 @@ ckb-sync = { path = "../../sync", version = "= 0.114.0-pre"} ckb-verification = { path = "../../verification", version = "= 0.114.0-pre" } ckb-verification-traits = { path = "../../verification/traits", version = "= 0.114.0-pre" } ckb-async-runtime = { path = "../runtime", version = "= 0.114.0-pre" } -ckb-proposal-table = { path = "../proposal-table", version = "= 0.114.0-pre" } ckb-channel = { path = "../channel", version = "= 0.114.0-pre" } ckb-tx-pool = { path = "../../tx-pool", version = "= 0.114.0-pre" } ckb-light-client-protocol-server = { path = "../light-client-protocol-server", version = "= 0.114.0-pre" } From 4dcea975d23204f69d9e45f50d9cb0d90fbde5e9 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Fri, 12 Jan 2024 00:38:18 +0800 Subject: [PATCH 270/357] Fix ConsumeUnverified: should return `Ok(false)` if it's a block which has been verified before --- chain/src/consume_unverified.rs | 2 +- chain/src/lib.rs | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/chain/src/consume_unverified.rs b/chain/src/consume_unverified.rs index 3f973c5702..4dc65d5938 100644 --- a/chain/src/consume_unverified.rs +++ b/chain/src/consume_unverified.rs @@ -223,7 +223,7 @@ impl ConsumeUnverifiedBlockProcessor { verified ); return if verified { - Ok(true) + Ok(false) } else { Err(InternalErrorKind::Other .other("block previously verified failed") diff --git a/chain/src/lib.rs b/chain/src/lib.rs index 1230f9962d..89537a5d38 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -29,6 +29,10 @@ type ProcessBlockRequest = Request; type TruncateRequest = Request>; /// VerifyResult is the result type to represent the result of block verification +/// +/// Ok(true) : it's a newly verified block +/// Ok(false): it's a block which has been verified before +/// Err(err) : it's a block which failed to verify pub type VerifyResult = Result; /// VerifyCallback is the callback type to be called after block verification From c2fc589a400f2db08d41bbc8b160068965c87008 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Fri, 12 Jan 2024 00:40:56 +0800 Subject: [PATCH 271/357] Fix lint, remove whitespace --- devtools/block_sync/draw_sync_chart.py | 10 +++++----- docs/ckb_sync.mermaid | 8 ++++---- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/devtools/block_sync/draw_sync_chart.py b/devtools/block_sync/draw_sync_chart.py index b2159d4740..e932b7414a 100755 --- a/devtools/block_sync/draw_sync_chart.py +++ b/devtools/block_sync/draw_sync_chart.py @@ -29,8 +29,8 @@ def parse_sync_statics(log_file): timestamp_str = re.search(r'^(\S+ \S+)', line).group(1) # Extract the timestamp string timestamp = datetime.datetime.strptime(timestamp_str, "%Y-%m-%d %H:%M:%S.%f").timestamp() base_timestamp = timestamp - - + + if line.find('INFO ckb_chain::chain block: ') != -1: block_number = int(re.search(r'block: (\d+)', line).group(1)) # Extract the block number using regex @@ -77,7 +77,7 @@ def process_task(task): tasks = [(ckb_log_file, label) for ckb_log_file, label in tasks] - + import multiprocessing with multiprocessing.Pool() as pool: @@ -123,7 +123,7 @@ def process_task(task): ax.get_yaxis().get_major_formatter().set_scientific(False) ax.get_yaxis().get_major_formatter().set_useOffset(False) - + ax.margins(0) ax.set_axisbelow(True) @@ -133,7 +133,7 @@ def process_task(task): ax.xaxis.grid(color='gray', linestyle='dashed', which='minor') ax.yaxis.grid(color='gray', linestyle='dashed', which='minor') - + xminorLocator = MultipleLocator(1.0) ax.xaxis.set_minor_locator(xminorLocator) diff --git a/docs/ckb_sync.mermaid b/docs/ckb_sync.mermaid index 7fa807f337..c24a7f0640 100644 --- a/docs/ckb_sync.mermaid +++ b/docs/ckb_sync.mermaid @@ -15,9 +15,9 @@ sequenceDiagram box crate:ckb_chain participant C end - + Note left of S: synchronizer received
Block(122) from remote peer - + Note over S: try_process SyncMessageUnionReader::SendBlock @@ -27,7 +27,7 @@ sequenceDiagram Note over C: insert_block(Block(122)) C->>-BP: return result of process_block(Block(122)) BP->>-S: return result of BlockProcess::execute(Block(122)) - + alt block is Valid Note over S: going on else block is Invalid @@ -42,7 +42,7 @@ sequenceDiagram Note over C: insert_block(Block(123)) C->>-BP: return result of process_block(Block(123)) BP->>-S: return result of BlockProcess::execute(Block(123)) - + alt block is Valid Note over S: going on else block is Invalid From 0e262b3ef69d91aadd2fc285ecedfd8da139031a Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 15 Jan 2024 10:59:20 +0800 Subject: [PATCH 272/357] BlockFetcher calculate `start` and ancestor header should aware IBDState --- sync/src/synchronizer/block_fetcher.rs | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/sync/src/synchronizer/block_fetcher.rs b/sync/src/synchronizer/block_fetcher.rs index cd2ba1bbef..f6c48a1a00 100644 --- a/sync/src/synchronizer/block_fetcher.rs +++ b/sync/src/synchronizer/block_fetcher.rs @@ -164,7 +164,12 @@ impl BlockFetcher { let state = self.sync_shared.state(); - let mut start = self.sync_shared.shared().get_unverified_tip().number() + 1; + let mut start = { + match self.ibd { + IBDState::In => self.sync_shared.shared().get_unverified_tip().number() + 1, + IBDState::Out => last_common.number() + 1, + } + }; let mut end = min(best_known.number(), start + BLOCK_DOWNLOAD_WINDOW); let n_fetch = min( end.saturating_sub(start) as usize + 1, @@ -183,9 +188,17 @@ impl BlockFetcher { let span = min(end - start + 1, (n_fetch - fetch.len()) as u64); // Iterate in range `[start, start+span)` and consider as the next to-fetch candidates. - let mut header = self - .active_chain - .get_ancestor_with_unverified(&best_known.hash(), start + span - 1)?; + let mut header: HeaderIndexView = { + match self.ibd { + IBDState::In => self + .active_chain + .get_ancestor_with_unverified(&best_known.hash(), start + span - 1), + IBDState::Out => self + .active_chain + .get_ancestor(&best_known.hash(), start + span - 1), + } + }?; + let mut status = self.sync_shared.shared().get_block_status(&header.hash()); // Judge whether we should fetch the target block, neither stored nor in-flighted From cc688cd23a92a0a08a2305a7215a481bdb52d20f Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 15 Jan 2024 11:11:33 +0800 Subject: [PATCH 273/357] BlockFetcher check whether to request block from a peer should aware IBDState --- sync/src/synchronizer/block_fetcher.rs | 34 ++++++++++++++++++-------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/sync/src/synchronizer/block_fetcher.rs b/sync/src/synchronizer/block_fetcher.rs index f6c48a1a00..e8ff0725df 100644 --- a/sync/src/synchronizer/block_fetcher.rs +++ b/sync/src/synchronizer/block_fetcher.rs @@ -151,16 +151,30 @@ impl BlockFetcher { if last_common == best_known { return None; } - - if best_known.number() <= self.sync_shared.shared().get_unverified_tip().number() { - debug!( - "Peer {}'s best known: {} is less or equal than unverified_tip : {}", - self.peer, - best_known.number(), - self.sync_shared.shared().get_unverified_tip().number() - ); - return None; - } + match self.ibd { + IBDState::In => { + if last_common.number() <= self.active_chain.unverified_tip_number() { + debug!( + "In IBD mode, Peer {}'s last common: {} is less or equal than unverified_tip : {}, won't request block from this peer", + self.peer, + last_common.number(), + self.active_chain.unverified_tip_number() + ); + return None; + } + } + IBDState::Out => { + if last_common.number() <= self.active_chain.tip_number() { + debug!( + "Out IBD mode, Peer {}'s last common: {} is less or equal than tip : {}, won't request block from this peer", + self.peer, + last_common.number(), + self.active_chain.tip_number() + ); + return None; + } + } + }; let state = self.sync_shared.state(); From b40d9316ff74ab1069122fbfad163bfc1a7dfbcc Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 15 Jan 2024 12:08:29 +0800 Subject: [PATCH 274/357] BlockFetcher should only return if best_known <= unverified_tip in IBD mode Signed-off-by: Eval EXEC --- sync/src/synchronizer/block_fetcher.rs | 25 ++++++------------------- 1 file changed, 6 insertions(+), 19 deletions(-) diff --git a/sync/src/synchronizer/block_fetcher.rs b/sync/src/synchronizer/block_fetcher.rs index e8ff0725df..50f8203348 100644 --- a/sync/src/synchronizer/block_fetcher.rs +++ b/sync/src/synchronizer/block_fetcher.rs @@ -151,28 +151,15 @@ impl BlockFetcher { if last_common == best_known { return None; } - match self.ibd { - IBDState::In => { - if last_common.number() <= self.active_chain.unverified_tip_number() { - debug!( - "In IBD mode, Peer {}'s last common: {} is less or equal than unverified_tip : {}, won't request block from this peer", + + if matches!(self.ibd, IBDState::In) { + if best_known.number() <= self.active_chain.unverified_tip_number() { + debug!("In IBD mode, Peer {}'s best_known: {} is less or equal than unverified_tip : {}, won't request block from this peer", self.peer, - last_common.number(), + best_known.number(), self.active_chain.unverified_tip_number() ); - return None; - } - } - IBDState::Out => { - if last_common.number() <= self.active_chain.tip_number() { - debug!( - "Out IBD mode, Peer {}'s last common: {} is less or equal than tip : {}, won't request block from this peer", - self.peer, - last_common.number(), - self.active_chain.tip_number() - ); - return None; - } + return None; } }; From 261a90070cc567c8f36788d98114d2e944e8ea23 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 15 Jan 2024 23:06:04 +0800 Subject: [PATCH 275/357] SyncShared::accept_block will mark the block as BLOCK_RECEIVED if its block status is Entry::Vacant --- sync/src/types/mod.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index c68752e46e..fa33a81eae 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1125,6 +1125,16 @@ impl SyncShared { peer_id_with_msg_bytes: Option<(PeerIndex, u64)>, verify_callback: Option, ) { + { + let entry = self + .shared() + .block_status_map() + .entry(block.header().hash()); + if let dashmap::mapref::entry::Entry::Vacant(entry) = entry { + entry.insert(BlockStatus::BLOCK_RECEIVED); + } + } + let lonely_block_with_callback = LonelyBlock { block, peer_id_with_msg_bytes, From 70fac5d137e8c43bc826d3d910cd76f6382be3cb Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 15 Jan 2024 23:07:29 +0800 Subject: [PATCH 276/357] Fix ckb-sync test_insert_parent_unknown_block --- sync/src/tests/sync_shared.rs | 41 ++++++++++++++++++++++++++--------- 1 file changed, 31 insertions(+), 10 deletions(-) diff --git a/sync/src/tests/sync_shared.rs b/sync/src/tests/sync_shared.rs index bdd069ac9a..bc3c383e9a 100644 --- a/sync/src/tests/sync_shared.rs +++ b/sync/src/tests/sync_shared.rs @@ -9,6 +9,7 @@ use ckb_shared::SharedBuilder; use ckb_store::{self, ChainStore}; use ckb_test_chain_utils::always_success_cellbase; use ckb_types::core::{BlockBuilder, BlockView, Capacity}; +use ckb_types::packed::Byte32; use ckb_types::prelude::*; use std::sync::Arc; @@ -91,13 +92,32 @@ fn test_insert_parent_unknown_block() { shared.accept_block(&chain, Arc::clone(&valid_orphan), None, None); shared.accept_block(&chain, Arc::clone(&invalid_orphan), None, None); + let wait_for_block_status_match = |hash: &Byte32, expect_status: BlockStatus| -> bool { + let mut status_match = false; + let now = std::time::Instant::now(); + while now.elapsed().as_secs() < 2 { + if shared.active_chain().get_block_status(hash) == expect_status { + status_match = true; + break; + } + std::thread::sleep(std::time::Duration::from_micros(100)); + } + status_match + }; + assert_eq!( shared.active_chain().get_block_status(&valid_hash), BlockStatus::BLOCK_RECEIVED ); + + if shared.active_chain().get_block_status(&invalid_hash) == BlockStatus::BLOCK_RECEIVED { + wait_for_block_status_match(&invalid_hash, BlockStatus::BLOCK_INVALID); + } + + // This block won't pass non_contextual_check, and will be BLOCK_INVALID immediately assert_eq!( shared.active_chain().get_block_status(&invalid_hash), - BlockStatus::BLOCK_RECEIVED + BlockStatus::BLOCK_INVALID ); // After inserting parent of an orphan block @@ -105,18 +125,19 @@ fn test_insert_parent_unknown_block() { assert!(shared .blocking_insert_new_block(&chain, Arc::clone(&parent)) .expect("insert parent of orphan block")); - assert_eq!( - shared.active_chain().get_block_status(&valid_hash), + + assert!(wait_for_block_status_match( + &valid_hash, BlockStatus::BLOCK_VALID - ); - assert_eq!( - shared.active_chain().get_block_status(&invalid_hash), + )); + assert!(wait_for_block_status_match( + &invalid_hash, BlockStatus::BLOCK_INVALID - ); - assert_eq!( - shared.active_chain().get_block_status(&parent_hash), + )); + assert!(wait_for_block_status_match( + &parent_hash, BlockStatus::BLOCK_VALID - ); + )); } #[test] From cd3caebf7997163d2c22d8e02fb303a57c8211cf Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 16 Jan 2024 01:31:01 +0800 Subject: [PATCH 277/357] ConsumeOrphan should mark the block as PARTIAL_STORED before send it to consume_unverified thread --- chain/src/consume_orphan.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index 6804371db2..9e21474379 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -71,9 +71,6 @@ impl ConsumeDescendantProcessor { self.shared.get_unverified_tip().hash(), ); } - - self.shared - .insert_block_status(block_hash, BlockStatus::BLOCK_PARTIAL_STORED); } fn accept_descendant(&self, block: Arc) -> Result<(HeaderView, U256), Error> { @@ -151,6 +148,11 @@ impl ConsumeDescendantProcessor { pub(crate) fn process_descendant(&self, lonely_block: LonelyBlockWithCallback) { match self.accept_descendant(lonely_block.block().to_owned()) { Ok((parent_header, total_difficulty)) => { + self.shared.insert_block_status( + lonely_block.block().hash(), + BlockStatus::BLOCK_PARTIAL_STORED, + ); + let unverified_block: UnverifiedBlock = lonely_block.combine_parent_header(parent_header); From 0a80a78030fafb3a4fb9a79bf7c3ab1bc411880c Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 16 Jan 2024 09:11:24 +0800 Subject: [PATCH 278/357] Add more log message for debug --- Cargo.lock | 1 + chain/src/consume_orphan.rs | 5 ++++- sync/Cargo.toml | 1 + sync/src/synchronizer/block_fetcher.rs | 10 ++++----- sync/src/tests/sync_shared.rs | 27 +++++++++++++++++++++++ sync/src/tests/synchronizer/basic_sync.rs | 10 ++++++++- util/logger-service/src/lib.rs | 1 - 7 files changed, 47 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4c03a2f3ac..1e537eb52c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1535,6 +1535,7 @@ dependencies = [ "ckb-dao-utils", "ckb-error", "ckb-logger", + "ckb-logger-service", "ckb-metrics", "ckb-network", "ckb-proposal-table", diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index 9e21474379..1585d2a536 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -290,7 +290,10 @@ impl ConsumeOrphan { let parent_status = self.shared.get_block_status(&parent_hash); if parent_status.contains(BlockStatus::BLOCK_PARTIAL_STORED) { debug!( - "parent has stored, processing descendant directly {}", + "parent {} has stored: {:?}, processing descendant directly {}-{}", + parent_hash, + parent_status, + lonely_block.block().number(), lonely_block.block().hash() ); self.descendant_processor.process_descendant(lonely_block); diff --git a/sync/Cargo.toml b/sync/Cargo.toml index 0fc96fedba..6495c07890 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -49,6 +49,7 @@ faux = "^0.1" once_cell = "1.8.0" ckb-systemtime = { path = "../util/systemtime", version = "= 0.114.0-pre" , features = ["enable_faketime"]} ckb-proposal-table = { path = "../util/proposal-table", version = "= 0.114.0-pre" } +ckb-logger-service = { path = "../util/logger-service", version = "= 0.114.0-pre" } [features] default = [] diff --git a/sync/src/synchronizer/block_fetcher.rs b/sync/src/synchronizer/block_fetcher.rs index 50f8203348..484d0977f1 100644 --- a/sync/src/synchronizer/block_fetcher.rs +++ b/sync/src/synchronizer/block_fetcher.rs @@ -152,15 +152,15 @@ impl BlockFetcher { return None; } - if matches!(self.ibd, IBDState::In) { - if best_known.number() <= self.active_chain.unverified_tip_number() { - debug!("In IBD mode, Peer {}'s best_known: {} is less or equal than unverified_tip : {}, won't request block from this peer", + if matches!(self.ibd, IBDState::In) + && best_known.number() <= self.active_chain.unverified_tip_number() + { + debug!("In IBD mode, Peer {}'s best_known: {} is less or equal than unverified_tip : {}, won't request block from this peer", self.peer, best_known.number(), self.active_chain.unverified_tip_number() ); - return None; - } + return None; }; let state = self.sync_shared.state(); diff --git a/sync/src/tests/sync_shared.rs b/sync/src/tests/sync_shared.rs index bc3c383e9a..23effa2114 100644 --- a/sync/src/tests/sync_shared.rs +++ b/sync/src/tests/sync_shared.rs @@ -4,6 +4,8 @@ use crate::tests::util::{build_chain, inherit_block}; use crate::SyncShared; use ckb_chain::start_chain_services; +use ckb_logger::info; +use ckb_logger_service::LoggerInitGuard; use ckb_shared::block_status::BlockStatus; use ckb_shared::SharedBuilder; use ckb_store::{self, ChainStore}; @@ -11,6 +13,7 @@ use ckb_test_chain_utils::always_success_cellbase; use ckb_types::core::{BlockBuilder, BlockView, Capacity}; use ckb_types::packed::Byte32; use ckb_types::prelude::*; +use std::fmt::format; use std::sync::Arc; #[test] @@ -142,6 +145,8 @@ fn test_insert_parent_unknown_block() { #[test] fn test_switch_valid_fork() { + let _log_guard: LoggerInitGuard = + ckb_logger_service::init_for_test("info,ckb_chain=debug").expect("init log"); let (shared, chain) = build_chain(4); let make_valid_block = |shared, parent_hash| -> BlockView { let header = inherit_block(shared, &parent_hash).build().header(); @@ -162,9 +167,20 @@ fn test_switch_valid_fork() { let block_hash = shared.store().get_block_hash(number).unwrap(); shared.store().get_block(&block_hash).unwrap(); } + + info!( + "chain tip is {}={}", + shared.active_chain().tip_number(), + shared.active_chain().tip_hash() + ); let mut valid_fork = Vec::new(); for _ in 2..shared.active_chain().tip_number() { let block = make_valid_block(shared.shared(), parent_hash.clone()); + info!( + "blocking insert valid fork: {}-{}", + block.number(), + block.hash() + ); assert!(shared .blocking_insert_new_block(&chain, Arc::new(block.clone())) .expect("insert fork")); @@ -178,6 +194,9 @@ fn test_switch_valid_fork() { .active_chain() .get_block_status(&block.header().hash()), BlockStatus::BLOCK_STORED, + "block {}-{} should be BLOCK_STORED", + block.number(), + block.hash() ); } @@ -185,6 +204,11 @@ fn test_switch_valid_fork() { // Make the fork switch as the main chain. for _ in tip_number..tip_number + 2 { let block = inherit_block(shared.shared(), &parent_hash.clone()).build(); + info!( + "blocking insert fork block: {}-{}", + block.number(), + block.hash() + ); assert!(shared .blocking_insert_new_block(&chain, Arc::new(block.clone())) .expect("insert fork")); @@ -198,6 +222,9 @@ fn test_switch_valid_fork() { .active_chain() .get_block_status(&block.header().hash()), BlockStatus::BLOCK_VALID, + "block {}-{} should be BLOCK_VALID", + block.number(), + block.hash() ); } } diff --git a/sync/src/tests/synchronizer/basic_sync.rs b/sync/src/tests/synchronizer/basic_sync.rs index 3765e79d55..79b9069319 100644 --- a/sync/src/tests/synchronizer/basic_sync.rs +++ b/sync/src/tests/synchronizer/basic_sync.rs @@ -10,6 +10,7 @@ use ckb_channel::bounded; use ckb_dao::DaoCalculator; use ckb_dao_utils::genesis_dao_data; use ckb_logger::info; +use ckb_logger_service::LoggerInitGuard; use ckb_network::SupportProtocols; use ckb_reward_calculator::RewardCalculator; use ckb_shared::{Shared, SharedBuilder}; @@ -33,6 +34,7 @@ const DEFAULT_CHANNEL: usize = 128; #[test] fn basic_sync() { + let _log_guard: LoggerInitGuard = ckb_logger_service::init_for_test("debug").expect("init log"); let _faketime_guard = ckb_systemtime::faketime(); _faketime_guard.set_faketime(0); let thread_name = "fake_time=0".to_string(); @@ -46,11 +48,17 @@ fn basic_sync() { node1.connect(&mut node2, SupportProtocols::Sync.protocol_id()); info!("node1 and node2 connected"); + let now = std::time::Instant::now(); let (signal_tx1, signal_rx1) = bounded(DEFAULT_CHANNEL); - node1.start(thread_name.clone(), signal_tx1, |data| { + node1.start(thread_name.clone(), signal_tx1, move |data| { let msg = packed::SyncMessage::from_slice(&data) .expect("sync message") .to_enum(); + + assert!( + now.elapsed().as_secs() <= 10, + "node1 should got block(3)'s SendBlock message within 10 seconds" + ); // terminate thread after 3 blocks if let packed::SyncMessageUnionReader::SendBlock(reader) = msg.as_reader() { let block = reader.block().to_entity().into_view(); diff --git a/util/logger-service/src/lib.rs b/util/logger-service/src/lib.rs index 48500e736e..37c7eb2684 100644 --- a/util/logger-service/src/lib.rs +++ b/util/logger-service/src/lib.rs @@ -530,7 +530,6 @@ fn setup_panic_logger() { /// Only used by unit test /// Initializes the [Logger](struct.Logger.html) and run the logging service. -#[cfg(test)] pub fn init_for_test(filter: &str) -> Result { setup_panic_logger(); let config: Config = Config { From 8a11ecef4dab48c77e887be400635d839c7864ae Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 16 Jan 2024 17:55:03 +0800 Subject: [PATCH 279/357] Shared provide generic version of get_block_status for Snapshot and store --- shared/src/shared.rs | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/shared/src/shared.rs b/shared/src/shared.rs index 98fa44e215..c0fe60d9bf 100644 --- a/shared/src/shared.rs +++ b/shared/src/shared.rs @@ -418,34 +418,37 @@ impl Shared { pub fn block_status_map(&self) -> &DashMap { &self.block_status_map } - pub fn get_block_status(&self, block_hash: &Byte32) -> BlockStatus { - match self.block_status_map.get(block_hash) { + + pub fn get_block_status(&self, store: &T, block_hash: &Byte32) -> BlockStatus { + match self.block_status_map().get(block_hash) { Some(status_ref) => *status_ref.value(), None => { - if self.header_map.contains_key(block_hash) { + if self.header_map().contains_key(block_hash) { BlockStatus::HEADER_VALID } else { - let verified = self - .store() + let verified = store .get_block_ext(block_hash) .map(|block_ext| block_ext.verified); match verified { + None => BlockStatus::UNKNOWN, + Some(None) => BlockStatus::BLOCK_STORED, Some(Some(true)) => BlockStatus::BLOCK_VALID, Some(Some(false)) => BlockStatus::BLOCK_INVALID, - Some(None) => BlockStatus::BLOCK_STORED, - None => { - if self.store().get_block_header(block_hash).is_some() { - BlockStatus::BLOCK_PARTIAL_STORED - } else { - BlockStatus::UNKNOWN - } - } } } } } } + pub fn contains_block_status( + &self, + store: &T, + block_hash: &Byte32, + status: BlockStatus, + ) -> bool { + self.get_block_status(store, block_hash).contains(status) + } + pub fn insert_block_status(&self, block_hash: Byte32, status: BlockStatus) { self.block_status_map.insert(block_hash, status); } @@ -460,9 +463,6 @@ impl Shared { log_now.elapsed() ); } - pub fn contains_block_status(&self, block_hash: &Byte32, status: BlockStatus) -> bool { - self.get_block_status(block_hash).contains(status) - } pub fn assume_valid_target(&self) -> MutexGuard> { self.assume_valid_target.lock() From 8da906c045e3615400e67f5e8f37519c799149c6 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 16 Jan 2024 17:55:46 +0800 Subject: [PATCH 280/357] ckb-chain load get_block_status from shared.store() --- chain/src/consume_orphan.rs | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index 1585d2a536..6770bf60ce 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -263,10 +263,11 @@ impl ConsumeOrphan { fn search_orphan_pool(&self) { for leader_hash in self.orphan_blocks_broker.clone_leaders() { - if !self - .shared - .contains_block_status(&leader_hash, BlockStatus::BLOCK_PARTIAL_STORED) - { + if !self.shared.contains_block_status( + self.shared.store(), + &leader_hash, + BlockStatus::BLOCK_PARTIAL_STORED, + ) { trace!("orphan leader: {} not partial stored", leader_hash); continue; } @@ -287,7 +288,9 @@ impl ConsumeOrphan { fn process_lonely_block(&self, lonely_block: LonelyBlockWithCallback) { let parent_hash = lonely_block.block().parent_hash(); - let parent_status = self.shared.get_block_status(&parent_hash); + let parent_status = self + .shared + .get_block_status(self.shared.store(), &parent_hash); if parent_status.contains(BlockStatus::BLOCK_PARTIAL_STORED) { debug!( "parent {} has stored: {:?}, processing descendant directly {}-{}", From 621bee8e9321f150057330dd23943439454ec402 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 16 Jan 2024 18:02:49 +0800 Subject: [PATCH 281/357] ActiveChain get_block_status from snapshot() --- sync/src/types/mod.rs | 20 +------------------- 1 file changed, 1 insertion(+), 19 deletions(-) diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index fa33a81eae..9725a28071 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -2037,25 +2037,7 @@ impl ActiveChain { } pub fn get_block_status(&self, block_hash: &Byte32) -> BlockStatus { - match self.shared().shared().block_status_map().get(block_hash) { - Some(status_ref) => *status_ref.value(), - None => { - if self.shared().shared().header_map().contains_key(block_hash) { - BlockStatus::HEADER_VALID - } else { - let verified = self - .snapshot - .get_block_ext(block_hash) - .map(|block_ext| block_ext.verified); - match verified { - None => BlockStatus::UNKNOWN, - Some(None) => BlockStatus::BLOCK_STORED, - Some(Some(true)) => BlockStatus::BLOCK_VALID, - Some(Some(false)) => BlockStatus::BLOCK_INVALID, - } - } - } - } + self.shared().get_block_status(self.snapshot(), block_hash) } pub fn contains_block_status(&self, block_hash: &Byte32, status: BlockStatus) -> bool { From aebac498bbb9f3e5dc7f40428fc33bb450927e84 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 16 Jan 2024 18:03:33 +0800 Subject: [PATCH 282/357] Fix SyncShared's field name in BlockFetcher and HeadersProcess --- rpc/src/module/net.rs | 4 +- sync/src/synchronizer/block_fetcher.rs | 10 ++- sync/src/synchronizer/headers_process.rs | 21 +++--- sync/src/types/mod.rs | 85 +++++++++--------------- 4 files changed, 49 insertions(+), 71 deletions(-) diff --git a/rpc/src/module/net.rs b/rpc/src/module/net.rs index 5e631de21b..28022e304c 100644 --- a/rpc/src/module/net.rs +++ b/rpc/src/module/net.rs @@ -722,8 +722,8 @@ impl NetRpc for NetRpcImpl { fn sync_state(&self) -> Result { let chain = self.sync_shared.active_chain(); - let shared = chain.shared().shared(); - let state = chain.shared().state(); + let shared = chain.shared(); + let state = chain.state(); let (fast_time, normal_time, low_time) = state.read_inflight_blocks().division_point(); let best_known = state.shared_best_header(); let unverified_tip = shared.get_unverified_tip(); diff --git a/sync/src/synchronizer/block_fetcher.rs b/sync/src/synchronizer/block_fetcher.rs index 484d0977f1..1a021f3983 100644 --- a/sync/src/synchronizer/block_fetcher.rs +++ b/sync/src/synchronizer/block_fetcher.rs @@ -200,7 +200,10 @@ impl BlockFetcher { } }?; - let mut status = self.sync_shared.shared().get_block_status(&header.hash()); + let mut status = self + .sync_shared + .active_chain() + .get_block_status(&header.hash()); // Judge whether we should fetch the target block, neither stored nor in-flighted for _ in 0..span { @@ -236,7 +239,10 @@ impl BlockFetcher { fetch.push(header) } - status = self.sync_shared.shared().get_block_status(&parent_hash); + status = self + .sync_shared + .active_chain() + .get_block_status(&parent_hash); header = self .sync_shared .get_header_index_view(&parent_hash, false)?; diff --git a/sync/src/synchronizer/headers_process.rs b/sync/src/synchronizer/headers_process.rs index 9da100a77c..a4ba60a98f 100644 --- a/sync/src/synchronizer/headers_process.rs +++ b/sync/src/synchronizer/headers_process.rs @@ -281,14 +281,15 @@ impl<'a, DL: HeaderFieldsProvider> HeaderAcceptor<'a, DL> { pub fn accept(&self) -> ValidationResult { let mut result = ValidationResult::default(); - let shared = self.active_chain.shared(); - let state = shared.state(); + let sync_shared = self.active_chain.sync_shared(); + let state = self.active_chain.state(); + let shared = sync_shared.shared(); // FIXME If status == BLOCK_INVALID then return early. But which error // type should we return? let status = self.active_chain.get_block_status(&self.header.hash()); if status.contains(BlockStatus::HEADER_VALID) { - let header_index = shared + let header_index = sync_shared .get_header_index_view( &self.header.hash(), status.contains(BlockStatus::BLOCK_STORED), @@ -307,9 +308,7 @@ impl<'a, DL: HeaderFieldsProvider> HeaderAcceptor<'a, DL> { self.header.number(), self.header.hash(), ); - shared - .shared() - .insert_block_status(self.header.hash(), BlockStatus::BLOCK_INVALID); + shared.insert_block_status(self.header.hash(), BlockStatus::BLOCK_INVALID); return result; } @@ -320,9 +319,7 @@ impl<'a, DL: HeaderFieldsProvider> HeaderAcceptor<'a, DL> { self.header.hash(), ); if is_invalid { - shared - .shared() - .insert_block_status(self.header.hash(), BlockStatus::BLOCK_INVALID); + shared.insert_block_status(self.header.hash(), BlockStatus::BLOCK_INVALID); } return result; } @@ -333,13 +330,11 @@ impl<'a, DL: HeaderFieldsProvider> HeaderAcceptor<'a, DL> { self.header.number(), self.header.hash(), ); - shared - .shared() - .insert_block_status(self.header.hash(), BlockStatus::BLOCK_INVALID); + shared.insert_block_status(self.header.hash(), BlockStatus::BLOCK_INVALID); return result; } - shared.insert_valid_header(self.peer, self.header); + sync_shared.insert_valid_header(self.peer, self.header); result } } diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 9725a28071..631650b48b 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1039,7 +1039,7 @@ impl SyncShared { /// Get snapshot with current chain pub fn active_chain(&self) -> ActiveChain { ActiveChain { - shared: self.clone(), + sync_shared: self.clone(), snapshot: Arc::clone(&self.shared.snapshot()), } } @@ -1258,7 +1258,7 @@ impl SyncShared { return false; } - let status = self.shared().get_block_status(&block.hash()); + let status = self.active_chain().get_block_status(&block.hash()); debug!( "new_block_received {}-{}, status: {:?}", block.number(), @@ -1633,24 +1633,6 @@ impl SyncState { self.inflight_proposals.contains_key(proposal_id) } - // pub fn insert_orphan_block(&self, block: core::BlockView) { - // self.insert_block_status(block.hash(), BlockStatus::BLOCK_RECEIVED); - // self.orphan_block_pool.insert(block); - // } - // - // pub fn remove_orphan_by_parent(&self, parent_hash: &Byte32) -> Vec { - // let blocks = self.orphan_block_pool.remove_blocks_by_parent(parent_hash); - // blocks.iter().for_each(|block| { - // self.block_status_map.remove(&block.hash()); - // }); - // shrink_to_fit!(self.block_status_map, SHRINK_THRESHOLD); - // blocks - // } - // - // pub fn orphan_pool(&self) -> &OrphanBlockPool { - // &self.orphan_block_pool - // } - pub fn drain_get_block_proposals( &self, ) -> DashMap> { @@ -1682,27 +1664,31 @@ impl SyncState { } self.peers().disconnected(pi); } - - // pub fn get_orphan_block(&self, block_hash: &Byte32) -> Option { - // self.orphan_block_pool.get_block(block_hash) - // } - // - // pub fn clean_expired_blocks(&self, epoch: EpochNumber) -> Vec { - // self.orphan_block_pool.clean_expired_blocks(epoch) - // } } /** ActiveChain captures a point-in-time view of indexed chain of blocks. */ #[derive(Clone)] pub struct ActiveChain { - shared: SyncShared, + sync_shared: SyncShared, snapshot: Arc, } #[doc(hidden)] impl ActiveChain { + pub(crate) fn sync_shared(&self) -> &SyncShared { + &self.sync_shared + } + + pub fn shared(&self) -> &Shared { + self.sync_shared.shared() + } + fn store(&self) -> &ChainDB { - self.shared.store() + self.sync_shared.store() + } + + pub fn state(&self) -> &SyncState { + self.sync_shared.state() } fn snapshot(&self) -> &Snapshot { @@ -1740,10 +1726,6 @@ impl ActiveChain { .unwrap_or_default() } - pub fn shared(&self) -> &SyncShared { - &self.shared - } - pub fn total_difficulty(&self) -> &U256 { self.snapshot.total_difficulty() } @@ -1768,18 +1750,14 @@ impl ActiveChain { self.snapshot.is_main_chain(hash) } pub fn is_unverified_chain(&self, hash: &packed::Byte32) -> bool { - self.shared() - .shared() - .store() - .get_block_epoch_index(hash) - .is_some() + self.store().get_block_epoch_index(hash).is_some() } pub fn is_initial_block_download(&self) -> bool { - self.shared.shared().is_initial_block_download() + self.shared().is_initial_block_download() } pub fn unverified_tip_header(&self) -> HeaderIndex { - self.shared.shared.get_unverified_tip() + self.shared().get_unverified_tip() } pub fn unverified_tip_hash(&self) -> Byte32 { @@ -1824,20 +1802,21 @@ impl ActiveChain { } }; - let get_header_view_fn = - |hash: &Byte32, store_first: bool| self.shared.get_header_index_view(hash, store_first); + let get_header_view_fn = |hash: &Byte32, store_first: bool| { + self.sync_shared.get_header_index_view(hash, store_first) + }; let fast_scanner_fn = |number: BlockNumber, current: BlockNumberAndHash| { // shortcut to return an ancestor block if current.number <= tip_number && block_is_on_chain_fn(¤t.hash) { self.get_block_hash(number) - .and_then(|hash| self.shared.get_header_index_view(&hash, true)) + .and_then(|hash| self.sync_shared.get_header_index_view(&hash, true)) } else { None } }; - self.shared + self.sync_shared .get_header_index_view(base, false)? .get_ancestor(tip_number, number, get_header_view_fn, fast_scanner_fn) } @@ -1885,7 +1864,7 @@ impl ActiveChain { } // always include genesis hash if index != 0 { - locator.push(self.shared.consensus().genesis_hash()); + locator.push(self.sync_shared.consensus().genesis_hash()); } break; } @@ -1935,7 +1914,7 @@ impl ActiveChain { } let locator_hash = locator.last().expect("empty checked"); - if locator_hash != &self.shared.consensus().genesis_hash() { + if locator_hash != &self.sync_shared.consensus().genesis_hash() { return None; } @@ -1953,11 +1932,11 @@ impl ActiveChain { if let Some(header) = locator .get(index - 1) - .and_then(|hash| self.shared.store().get_block_header(hash)) + .and_then(|hash| self.sync_shared.store().get_block_header(hash)) { let mut block_hash = header.data().raw().parent_hash(); loop { - let block_header = match self.shared.store().get_block_header(&block_hash) { + let block_header = match self.sync_shared.store().get_block_header(&block_hash) { None => break latest_common, Some(block_header) => block_header, }; @@ -1986,7 +1965,7 @@ impl ActiveChain { (block_number + 1..max_height) .filter_map(|block_number| self.snapshot.get_block_hash(block_number)) .take_while(|block_hash| block_hash != hash_stop) - .filter_map(|block_hash| self.shared.store().get_block_header(&block_hash)) + .filter_map(|block_hash| self.sync_shared.store().get_block_header(&block_hash)) .collect() } @@ -1997,8 +1976,7 @@ impl ActiveChain { block_number_and_hash: BlockNumberAndHash, ) { if let Some(last_time) = self - .shared() - .state + .state() .pending_get_headers .write() .get(&(peer, block_number_and_hash.hash())) @@ -2016,8 +1994,7 @@ impl ActiveChain { ); } } - self.shared() - .state() + self.state() .pending_get_headers .write() .put((peer, block_number_and_hash.hash()), Instant::now()); From 5910012828d873151338b011bccb95392a0a62ab Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 16 Jan 2024 18:04:57 +0800 Subject: [PATCH 283/357] Remove BlockStatus::BLOCK_PARTIAL_STORED Signed-off-by: Eval EXEC --- shared/src/block_status.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/shared/src/block_status.rs b/shared/src/block_status.rs index db3060b8bc..751d38bfd8 100644 --- a/shared/src/block_status.rs +++ b/shared/src/block_status.rs @@ -7,8 +7,7 @@ bitflags! { const HEADER_VALID = 1; const BLOCK_RECEIVED = 1 | Self::HEADER_VALID.bits << 1; - const BLOCK_PARTIAL_STORED = 1 | Self::BLOCK_RECEIVED.bits << 1; - const BLOCK_STORED = 1 | Self::BLOCK_PARTIAL_STORED.bits << 1; + const BLOCK_STORED = 1 | Self::BLOCK_RECEIVED.bits << 1; const BLOCK_VALID = 1 | Self::BLOCK_STORED.bits << 1; const BLOCK_INVALID = 1 << 12; From 291ed19cf4e2aae45b18bef3f435151673f64090 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 16 Jan 2024 20:13:39 +0800 Subject: [PATCH 284/357] Change BLOCK_PARTIAL_STORED to BLOCK_STORED --- chain/src/consume_orphan.rs | 12 +++++------- sync/src/synchronizer/block_fetcher.rs | 4 ++-- sync/src/synchronizer/mod.rs | 8 ++++---- 3 files changed, 11 insertions(+), 13 deletions(-) diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index 6770bf60ce..6550e54616 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -148,10 +148,8 @@ impl ConsumeDescendantProcessor { pub(crate) fn process_descendant(&self, lonely_block: LonelyBlockWithCallback) { match self.accept_descendant(lonely_block.block().to_owned()) { Ok((parent_header, total_difficulty)) => { - self.shared.insert_block_status( - lonely_block.block().hash(), - BlockStatus::BLOCK_PARTIAL_STORED, - ); + self.shared + .insert_block_status(lonely_block.block().hash(), BlockStatus::BLOCK_STORED); let unverified_block: UnverifiedBlock = lonely_block.combine_parent_header(parent_header); @@ -266,9 +264,9 @@ impl ConsumeOrphan { if !self.shared.contains_block_status( self.shared.store(), &leader_hash, - BlockStatus::BLOCK_PARTIAL_STORED, + BlockStatus::BLOCK_STORED, ) { - trace!("orphan leader: {} not partial stored", leader_hash); + trace!("orphan leader: {} not stored", leader_hash); continue; } @@ -291,7 +289,7 @@ impl ConsumeOrphan { let parent_status = self .shared .get_block_status(self.shared.store(), &parent_hash); - if parent_status.contains(BlockStatus::BLOCK_PARTIAL_STORED) { + if parent_status.contains(BlockStatus::BLOCK_STORED) { debug!( "parent {} has stored: {:?}, processing descendant directly {}-{}", parent_hash, diff --git a/sync/src/synchronizer/block_fetcher.rs b/sync/src/synchronizer/block_fetcher.rs index 1a021f3983..c91a005c4d 100644 --- a/sync/src/synchronizer/block_fetcher.rs +++ b/sync/src/synchronizer/block_fetcher.rs @@ -210,8 +210,8 @@ impl BlockFetcher { let parent_hash = header.parent_hash(); let hash = header.hash(); - if status.contains(BlockStatus::BLOCK_PARTIAL_STORED) { - if status.contains(BlockStatus::BLOCK_STORED) { + if status.contains(BlockStatus::BLOCK_STORED) { + if status.contains(BlockStatus::BLOCK_VALID) { // If the block is stored, its ancestor must on store // So we can skip the search of this space directly self.sync_shared diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 763c773820..d3ff3a9a4e 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -363,8 +363,8 @@ impl Synchronizer { let status = self.shared.active_chain().get_block_status(&block_hash); // NOTE: Filtering `BLOCK_STORED` but not `BLOCK_RECEIVED`, is for avoiding // stopping synchronization even when orphan_pool maintains dirty items by bugs. - if status.contains(BlockStatus::BLOCK_PARTIAL_STORED) { - error!("Block {} already partial stored", block_hash); + if status.contains(BlockStatus::BLOCK_STORED) { + error!("Block {} already stored", block_hash); } else if status.contains(BlockStatus::HEADER_VALID) { self.shared .insert_new_block(&self.chain, Arc::new(block), peer_id, message_bytes); @@ -388,8 +388,8 @@ impl Synchronizer { let status = self.shared.active_chain().get_block_status(&block_hash); // NOTE: Filtering `BLOCK_STORED` but not `BLOCK_RECEIVED`, is for avoiding // stopping synchronization even when orphan_pool maintains dirty items by bugs. - if status.contains(BlockStatus::BLOCK_PARTIAL_STORED) { - error!("block {} already partial stored", block_hash); + if status.contains(BlockStatus::BLOCK_STORED) { + error!("block {} already stored", block_hash); Ok(false) } else if status.contains(BlockStatus::HEADER_VALID) { self.shared.blocking_insert_new_block_with_verbose_info( From 85885f1ae518fd383afb2ad35ee4fd91e8252e88 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 17 Jan 2024 16:03:42 +0800 Subject: [PATCH 285/357] Improve draw chart script add more major and minor ticks on x/yaxis --- devtools/block_sync/draw_sync_chart.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/devtools/block_sync/draw_sync_chart.py b/devtools/block_sync/draw_sync_chart.py index e932b7414a..e325d48293 100755 --- a/devtools/block_sync/draw_sync_chart.py +++ b/devtools/block_sync/draw_sync_chart.py @@ -100,6 +100,8 @@ def process_task(task): lgs.append(lg) + ax.hlines([11_500_000], 0, max(duration), colors="gray", linestyles="dashed") + for i, h in enumerate(height): if h % 1_000_000 == 0: ax.vlines([duration[i]], 0, h, colors="gray", linestyles="dashed") @@ -107,7 +109,7 @@ def process_task(task): if h == 10_000_000: alabels.append(((duration[i],h),label)) - if h == 10_000_000 or h == 11_000_000: + if h == 11_000_000 or h == 11_500_000: ax.vlines([duration[i]], 0, h, colors="black", linestyles="dashed") voff=-60 if h == 11_000_000: @@ -135,9 +137,9 @@ def process_task(task): ax.yaxis.grid(color='gray', linestyle='dashed', which='minor') xminorLocator = MultipleLocator(1.0) - ax.xaxis.set_minor_locator(xminorLocator) + ax.xaxis.set_major_locator(xminorLocator) - yminorLocator = MultipleLocator(1_000_000) + yminorLocator = MultipleLocator(500_000) ax.yaxis.set_major_locator(yminorLocator) @@ -165,6 +167,9 @@ def process_task(task): elif loffset > 0: lheight -= 20 + +plt.axhline(y=11_500_000, color='blue', linestyle='--') + # plt.legend(tuple(lgs), tuple(args.label), loc='upper left', shadow=True) plt.title('CKB Block Sync progress Chart') plt.xlabel('Timecost (hours)') From 1f73f466c5d92fe45cbd21a954b8d4e66a9942bd Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 17 Jan 2024 17:27:42 +0800 Subject: [PATCH 286/357] Add log for `MinerRpcImpl::submit_block` new_block check Signed-off-by: Eval EXEC --- rpc/src/module/miner.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/rpc/src/module/miner.rs b/rpc/src/module/miner.rs index d844e33f4d..4b1f9add74 100644 --- a/rpc/src/module/miner.rs +++ b/rpc/src/module/miner.rs @@ -2,7 +2,7 @@ use crate::error::RPCError; use async_trait::async_trait; use ckb_chain::ChainController; use ckb_jsonrpc_types::{Block, BlockTemplate, Uint64, Version}; -use ckb_logger::{debug, error, warn}; +use ckb_logger::{debug, error, info, warn}; use ckb_network::{NetworkController, PeerIndex, SupportProtocols, TargetSession}; use ckb_shared::{shared::Shared, Snapshot}; use ckb_systemtime::unix_time_as_millis; @@ -280,6 +280,13 @@ impl MinerRpc for MinerRpcImpl { .chain .blocking_process_block(Arc::clone(&block)) .map_err(|err| handle_submit_error(&work_id, &err))?; + info!( + "end to submit block, work_id = {}, is_new = {}, block = #{}({})", + work_id, + is_new, + block.number(), + block.hash() + ); // Announce only new block if is_new { From 6730effb6da99c0b201b46dac3e1bbbd4e9fd7df Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 17 Jan 2024 17:54:48 +0800 Subject: [PATCH 287/357] Comment out shrink_to_fit for header_map's MemoryMap --- shared/src/types/header_map/memory.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shared/src/types/header_map/memory.rs b/shared/src/types/header_map/memory.rs index 0bf62d50f4..ebad478089 100644 --- a/shared/src/types/header_map/memory.rs +++ b/shared/src/types/header_map/memory.rs @@ -101,7 +101,7 @@ impl MemoryMap { pub(crate) fn remove(&self, key: &Byte32) -> Option { let mut guard = self.0.write(); let ret = guard.remove(key); - // shrink_to_fit!(guard, SHRINK_THRESHOLD); + shrink_to_fit!(guard, SHRINK_THRESHOLD); ret.map(|inner| (key.clone(), inner).into()) } From 18b2afab1b952cb7821596b4d164bc2a67411016 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 17 Jan 2024 18:05:13 +0800 Subject: [PATCH 288/357] ChainService should not punish remote peer if failed to send block to orphan pool due to channel close --- chain/src/chain_service.rs | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/chain/src/chain_service.rs b/chain/src/chain_service.rs index 447ca811fd..b28e49212d 100644 --- a/chain/src/chain_service.rs +++ b/chain/src/chain_service.rs @@ -218,19 +218,12 @@ impl ChainService { match self.lonely_block_tx.send(lonely_block) { Ok(_) => {} Err(SendError(lonely_block)) => { - error!("failed to notify new block to orphan pool"); + error!("Failed to notify new block to orphan pool, It seems that the orphan pool has exited."); let err: Error = InternalErrorKind::System .other("OrphanBlock broker disconnected") .into(); - tell_synchronizer_to_punish_the_bad_peer( - self.verify_failed_blocks_tx.clone(), - lonely_block.peer_id_with_msg_bytes(), - lonely_block.block().hash(), - &err, - ); - let verify_result = Err(err); lonely_block.execute_callback(verify_result); return; From 8b9111dbb977bdf0767c5e2b082519998f4e8a81 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 18 Jan 2024 10:39:36 +0800 Subject: [PATCH 289/357] Remove meaning less TODO note in test_internal_db_error, improve draw_sync_chart.py --- devtools/block_sync/draw_sync_chart.py | 4 ++-- sync/src/tests/synchronizer/functions.rs | 4 ---- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/devtools/block_sync/draw_sync_chart.py b/devtools/block_sync/draw_sync_chart.py index e325d48293..5ff8dad18d 100755 --- a/devtools/block_sync/draw_sync_chart.py +++ b/devtools/block_sync/draw_sync_chart.py @@ -106,7 +106,7 @@ def process_task(task): if h % 1_000_000 == 0: ax.vlines([duration[i]], 0, h, colors="gray", linestyles="dashed") - if h == 10_000_000: + if i == len(height) -1 : alabels.append(((duration[i],h),label)) if h == 11_000_000 or h == 11_500_000: @@ -149,7 +149,7 @@ def process_task(task): # sort alabsle by .0.1 alabels.sort(key=lambda x: x[0][0]) -lheight=80 +lheight=40 loffset=-40 count=len(alabels) for (duration,h), label in alabels: diff --git a/sync/src/tests/synchronizer/functions.rs b/sync/src/tests/synchronizer/functions.rs index 71a63d8e84..4b81982054 100644 --- a/sync/src/tests/synchronizer/functions.rs +++ b/sync/src/tests/synchronizer/functions.rs @@ -1212,10 +1212,6 @@ fn test_internal_db_error() { let (shared, mut pack) = builder.build().unwrap(); - // TODO fix later - // let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); - // let _chain_controller = chain_service.start::<&str>(None); - let sync_shared = Arc::new(SyncShared::new( shared, Default::default(), From fdfd725b2f46e8316dfa8ac0915f52788594a6e7 Mon Sep 17 00:00:00 2001 From: YI Date: Tue, 16 Jan 2024 12:24:31 +0800 Subject: [PATCH 290/357] test: randomly kill and restart node --- test/src/main.rs | 1 + test/src/specs/fault_injection/mod.rs | 3 ++ .../specs/fault_injection/randomly_kill.rs | 31 +++++++++++++++++++ test/src/specs/mod.rs | 2 ++ 4 files changed, 37 insertions(+) create mode 100644 test/src/specs/fault_injection/mod.rs create mode 100644 test/src/specs/fault_injection/randomly_kill.rs diff --git a/test/src/main.rs b/test/src/main.rs index 97317f1fc5..2d4e5232fa 100644 --- a/test/src/main.rs +++ b/test/src/main.rs @@ -584,6 +584,7 @@ fn all_specs() -> Vec> { Box::new(CheckVmVersion1), Box::new(CheckVmVersion2), Box::new(CheckVmBExtension), + Box::new(RandomlyKill), ]; specs.shuffle(&mut thread_rng()); specs diff --git a/test/src/specs/fault_injection/mod.rs b/test/src/specs/fault_injection/mod.rs new file mode 100644 index 0000000000..aa54ea05d4 --- /dev/null +++ b/test/src/specs/fault_injection/mod.rs @@ -0,0 +1,3 @@ +mod randomly_kill; + +pub use randomly_kill::*; diff --git a/test/src/specs/fault_injection/randomly_kill.rs b/test/src/specs/fault_injection/randomly_kill.rs new file mode 100644 index 0000000000..4bb0033734 --- /dev/null +++ b/test/src/specs/fault_injection/randomly_kill.rs @@ -0,0 +1,31 @@ +use crate::{Node, Spec}; + +use ckb_logger::info; +use rand::{thread_rng, Rng}; + +pub struct RandomlyKill; + +impl Spec for RandomlyKill { + crate::setup!(num_nodes: 1); + + fn run(&self, nodes: &mut Vec) { + let mut rng = thread_rng(); + let node = &mut nodes[0]; + for _ in 0..rng.gen_range(10, 20) { + let n = rng.gen_range(0, 10); + // TODO: the kill of child process and mining are actually sequential here + // We need to find some way to so these two things in parallel. + // It would be great if we can kill and start the node externally (instead of writing + // rust code to manage all the nodes, because in that case we will have to fight + // ownership rules, and monitor node). + if n != 0 { + info!("Mining {} blocks", n); + node.mine(n); + } + info!("Stop the node"); + node.stop(); + info!("Start the node"); + node.start(); + } + } +} diff --git a/test/src/specs/mod.rs b/test/src/specs/mod.rs index 5e9d9fc569..f16e4fc849 100644 --- a/test/src/specs/mod.rs +++ b/test/src/specs/mod.rs @@ -8,6 +8,7 @@ mod relay; mod rpc; mod sync; mod tx_pool; +mod fault_injection; pub use alert::*; pub use consensus::*; @@ -19,6 +20,7 @@ pub use relay::*; pub use rpc::*; pub use sync::*; pub use tx_pool::*; +pub use fault_injection::*; use crate::Node; use ckb_app_config::CKBAppConfig; From 013104e1b2b664aedb605f9ebb07b160076adfd7 Mon Sep 17 00:00:00 2001 From: YI Date: Wed, 17 Jan 2024 14:34:53 +0800 Subject: [PATCH 291/357] add integration test for sync with churn nodes --- test/src/main.rs | 1 + test/src/node.rs | 50 +++++++++++++++++++++++++++---- test/src/specs/sync/mod.rs | 2 ++ test/src/specs/sync/sync_churn.rs | 40 +++++++++++++++++++++++++ 4 files changed, 87 insertions(+), 6 deletions(-) create mode 100644 test/src/specs/sync/sync_churn.rs diff --git a/test/src/main.rs b/test/src/main.rs index 2d4e5232fa..55bf55c2e0 100644 --- a/test/src/main.rs +++ b/test/src/main.rs @@ -398,6 +398,7 @@ fn all_specs() -> Vec> { Box::new(BlockSyncNonAncestorBestBlocks), Box::new(RequestUnverifiedBlocks), Box::new(SyncTimeout), + Box::new(SyncChurn), Box::new(GetBlockFilterCheckPoints), Box::new(GetBlockFilterHashes), Box::new(GetBlockFilters), diff --git a/test/src/node.rs b/test/src/node.rs index 6b095b547b..ddb2e8a85f 100644 --- a/test/src/node.rs +++ b/test/src/node.rs @@ -8,7 +8,8 @@ use ckb_chain_spec::ChainSpec; use ckb_error::AnyError; use ckb_jsonrpc_types::{BlockFilter, BlockTemplate, TxPoolInfo}; use ckb_jsonrpc_types::{PoolTxDetailInfo, TxStatus}; -use ckb_logger::{debug, error}; +use ckb_logger::{debug, error, info}; +use ckb_network::multiaddr::Multiaddr; use ckb_resource::Resource; use ckb_types::{ bytes, @@ -19,8 +20,8 @@ use ckb_types::{ packed::{Block, Byte32, CellDep, CellInput, CellOutput, CellOutputBuilder, OutPoint, Script}, prelude::*, }; -use std::borrow::Borrow; -use std::collections::HashSet; +use std::borrow::{Borrow, BorrowMut}; +use std::collections::{HashMap, HashSet}; use std::convert::Into; use std::fs; use std::path::PathBuf; @@ -749,11 +750,11 @@ pub fn connect_all(nodes: &[Node]) { } // TODO it will be removed out later, in another PR -pub fn disconnect_all(nodes: &[Node]) { +pub fn disconnect_all>(nodes: &[N]) { for node_a in nodes.iter() { for node_b in nodes.iter() { - if node_a.p2p_address() != node_b.p2p_address() { - node_a.disconnect(node_b); + if node_a.borrow().p2p_address() != node_b.borrow().p2p_address() { + node_a.borrow().disconnect(node_b.borrow()); } } } @@ -779,3 +780,40 @@ pub fn waiting_for_sync>(nodes: &[N]) { node.borrow().wait_for_tx_pool(); } } + +// TODO it will be removed out later, in another PR +pub fn make_bootnodes_for_all>(nodes: &mut [N]) { + let node_multiaddrs: HashMap = nodes + .iter() + .map(|n| { + ( + n.borrow().node_id().to_owned(), + n.borrow().p2p_address().try_into().unwrap(), + ) + }) + .collect(); + let other_node_addrs: Vec> = node_multiaddrs + .iter() + .map(|(id, _)| { + let addrs = node_multiaddrs + .iter() + .filter(|(other_id, _)| other_id.as_str() != id.as_str()) + .map(|(_, addr)| addr.to_owned()) + .collect::>(); + addrs + }) + .collect(); + for (i, node) in nodes.iter_mut().enumerate() { + node.borrow_mut() + .modify_app_config(|config: &mut CKBAppConfig| { + info!("Setting bootnodes to {:?}", other_node_addrs[i]); + config.network.bootnodes = other_node_addrs[i].clone(); + }) + } + // Restart nodes to make bootnodes work + for node in nodes.iter_mut() { + node.borrow_mut().stop(); + node.borrow_mut().start(); + info!("Restarted node {:?}", node.borrow_mut().node_id()); + } +} diff --git a/test/src/specs/sync/mod.rs b/test/src/specs/sync/mod.rs index 52c2fe5997..4246e3416e 100644 --- a/test/src/specs/sync/mod.rs +++ b/test/src/specs/sync/mod.rs @@ -8,6 +8,7 @@ mod invalid_locator_size; mod last_common_header; mod sync_and_mine; mod sync_timeout; +mod sync_churn; pub use block_filter::*; pub use block_sync::*; @@ -19,3 +20,4 @@ pub use invalid_locator_size::*; pub use last_common_header::*; pub use sync_and_mine::*; pub use sync_timeout::*; +pub use sync_churn::*; diff --git a/test/src/specs/sync/sync_churn.rs b/test/src/specs/sync/sync_churn.rs new file mode 100644 index 0000000000..9b1e16930e --- /dev/null +++ b/test/src/specs/sync/sync_churn.rs @@ -0,0 +1,40 @@ +use crate::node::{make_bootnodes_for_all, waiting_for_sync}; +use crate::{Node, Spec}; +use ckb_logger::info; +use rand::Rng; + +fn select_random_node<'a, R: Rng>(rng: &mut R, nodes: &'a mut [Node]) -> &'a mut Node { + let index = rng.gen_range(0, nodes.len()); + &mut nodes[index] +} + +fn randomly_restart(rng: &mut R, restart_probilibity: f64, node: &mut Node) { + let should_restart = rng.gen_bool(restart_probilibity); + if should_restart { + node.stop(); + node.start(); + } +} + +pub struct SyncChurn; + +impl Spec for SyncChurn { + crate::setup!(num_nodes: 5); + + fn run(&self, nodes: &mut Vec) { + make_bootnodes_for_all(nodes); + + let mut rng = rand::thread_rng(); + let (mining_nodes, churn_nodes) = nodes.split_at_mut(1); + for _ in 0..1000 { + const RESTART_PROBABILITY: f64 = 0.1; + let mining_node = select_random_node(&mut rng, mining_nodes); + mining_node.mine(1); + let node = select_random_node(&mut rng, churn_nodes); + randomly_restart(&mut rng, RESTART_PROBABILITY, node); + } + + info!("Waiting for all nodes sync"); + waiting_for_sync(&nodes); + } +} From 15d59bc86ebc789cf9733ba23ed79e709bb35d12 Mon Sep 17 00:00:00 2001 From: YI Date: Wed, 17 Jan 2024 16:26:48 +0800 Subject: [PATCH 292/357] test: make Node struct clonable --- test/src/net.rs | 4 +- test/src/node.rs | 79 ++++++++++++++++++++++----------- test/src/specs/p2p/whitelist.rs | 4 +- 3 files changed, 58 insertions(+), 29 deletions(-) diff --git a/test/src/net.rs b/test/src/net.rs index 56c4f5676e..4863c46792 100644 --- a/test/src/net.rs +++ b/test/src/net.rs @@ -140,7 +140,7 @@ impl Net { let protocol_id = protocol.protocol_id(); let peer_index = self .receivers - .get(node_id) + .get(&node_id) .map(|(peer_index, _)| *peer_index) .unwrap_or_else(|| panic!("not connected peer {}", node.p2p_address())); self.controller() @@ -156,7 +156,7 @@ impl Net { let node_id = node.node_id(); let (peer_index, receiver) = self .receivers - .get(node_id) + .get(&node_id) .unwrap_or_else(|| panic!("not connected peer {}", node.p2p_address())); let net_message = receiver.recv_timeout(timeout)?; info!( diff --git a/test/src/node.rs b/test/src/node.rs index ddb2e8a85f..3ad5d1e6b4 100644 --- a/test/src/node.rs +++ b/test/src/node.rs @@ -26,10 +26,11 @@ use std::convert::Into; use std::fs; use std::path::PathBuf; use std::process::{Child, Command, Stdio}; +use std::sync::{Arc, RwLock}; use std::thread::sleep; use std::time::{Duration, Instant}; -struct ProcessGuard { +pub(crate) struct ProcessGuard { pub name: String, pub child: Child, pub killed: bool, @@ -47,7 +48,12 @@ impl Drop for ProcessGuard { } } +#[derive(Clone)] pub struct Node { + inner: Arc, +} + +pub struct InnerNode { spec_node_name: String, working_dir: PathBuf, consensus: Consensus, @@ -55,8 +61,8 @@ pub struct Node { rpc_client: RpcClient, rpc_listen: String, - node_id: Option, // initialize when starts node - guard: Option, // initialize when starts node + node_id: RwLock>, // initialize when starts node + guard: RwLock>, // initialize when starts node } impl Node { @@ -106,7 +112,7 @@ impl Node { modifier(&mut app_config); fs::write(&app_config_path, toml::to_string(&app_config).unwrap()).unwrap(); - *self = Self::init(self.working_dir(), self.spec_node_name.clone()); + *self = Self::init(self.working_dir(), self.inner.spec_node_name.clone()); } pub fn modify_chain_spec(&mut self, modifier: M) @@ -119,7 +125,7 @@ impl Node { modifier(&mut chain_spec); fs::write(&chain_spec_path, toml::to_string(&chain_spec).unwrap()).unwrap(); - *self = Self::init(self.working_dir(), self.spec_node_name.clone()); + *self = Self::init(self.working_dir(), self.inner.spec_node_name.clone()); } // Initialize Node instance based on working directory @@ -151,44 +157,51 @@ impl Node { chain_spec.build_consensus().unwrap() }; Self { - spec_node_name, - working_dir, - consensus, - p2p_listen, - rpc_client, - rpc_listen, - node_id: None, - guard: None, + inner: Arc::new(InnerNode { + spec_node_name, + working_dir, + consensus, + p2p_listen, + rpc_client, + rpc_listen, + node_id: RwLock::new(None), + guard: RwLock::new(None), + }), } } pub fn rpc_client(&self) -> &RpcClient { - &self.rpc_client + &self.inner.rpc_client } pub fn working_dir(&self) -> PathBuf { - self.working_dir.clone() + self.inner.working_dir.clone() } pub fn log_path(&self) -> PathBuf { self.working_dir().join("data/logs/run.log") } - pub fn node_id(&self) -> &str { + pub fn node_id(&self) -> String { // peer_id.to_base58() - self.node_id.as_ref().expect("uninitialized node_id") + self.inner + .node_id + .read() + .expect("read locked node_id") + .clone() + .expect("uninitialized node_id") } pub fn consensus(&self) -> &Consensus { - &self.consensus + &self.inner.consensus } pub fn p2p_listen(&self) -> String { - self.p2p_listen.clone() + self.inner.p2p_listen.clone() } pub fn rpc_listen(&self) -> String { - self.rpc_listen.clone() + self.inner.rpc_listen.clone() } pub fn p2p_address(&self) -> String { @@ -679,21 +692,37 @@ impl Node { self.wait_tx_pool_ready(); - self.guard = Some(ProcessGuard { - name: self.spec_node_name.clone(), + self.set_process_guard(ProcessGuard { + name: self.inner.spec_node_name.clone(), child: child_process, killed: false, }); - self.node_id = Some(node_info.node_id); + self.set_node_id(node_info.node_id.as_str()); + } + + pub(crate) fn set_process_guard(&mut self, guard: ProcessGuard) { + let mut g = self.inner.guard.write().unwrap(); + *g = Some(guard); + } + + pub(crate) fn set_node_id(&mut self, node_id: &str) { + let mut n = self.inner.node_id.write().unwrap(); + *n = Some(node_id.to_owned()); + } + + pub(crate) fn take_guard(&mut self) -> Option { + let mut g = self.inner.guard.write().unwrap(); + g.take() } pub fn stop(&mut self) { - drop(self.guard.take()) + drop(self.take_guard()); } #[cfg(not(target_os = "windows"))] pub fn stop_gracefully(&mut self) { - if let Some(mut guard) = self.guard.take() { + let guard = self.take_guard(); + if let Some(mut guard) = guard { if !guard.killed { // send SIGINT to the child nix::sys::signal::kill( diff --git a/test/src/specs/p2p/whitelist.rs b/test/src/specs/p2p/whitelist.rs index 12bd86b06a..4009a54ead 100644 --- a/test/src/specs/p2p/whitelist.rs +++ b/test/src/specs/p2p/whitelist.rs @@ -49,7 +49,7 @@ impl Spec for WhitelistOnSessionLimit { peers.len() == 2 && peers .into_iter() - .all(|node| id_set.contains(&node.node_id.as_str())) + .all(|node| id_set.contains(&node.node_id)) }); if !is_connect_peer_num_eq_2 { @@ -81,7 +81,7 @@ impl Spec for WhitelistOnSessionLimit { peers.len() == 3 && peers .into_iter() - .all(|node| id_set.contains(&node.node_id.as_str())) + .all(|node| id_set.contains(&node.node_id)) }); if !is_connect_peer_num_eq_3 { From d51b4f9363235a7d47eeb0304070116663be5c5d Mon Sep 17 00:00:00 2001 From: YI Date: Wed, 17 Jan 2024 20:07:37 +0800 Subject: [PATCH 293/357] test: run SyncChurn mining and restart in different threads --- test/src/node.rs | 1 - test/src/specs/sync/sync_churn.rs | 49 ++++++++++++++++++++----------- 2 files changed, 32 insertions(+), 18 deletions(-) diff --git a/test/src/node.rs b/test/src/node.rs index 3ad5d1e6b4..0777cedffd 100644 --- a/test/src/node.rs +++ b/test/src/node.rs @@ -810,7 +810,6 @@ pub fn waiting_for_sync>(nodes: &[N]) { } } -// TODO it will be removed out later, in another PR pub fn make_bootnodes_for_all>(nodes: &mut [N]) { let node_multiaddrs: HashMap = nodes .iter() diff --git a/test/src/specs/sync/sync_churn.rs b/test/src/specs/sync/sync_churn.rs index 9b1e16930e..030b609558 100644 --- a/test/src/specs/sync/sync_churn.rs +++ b/test/src/specs/sync/sync_churn.rs @@ -2,20 +2,14 @@ use crate::node::{make_bootnodes_for_all, waiting_for_sync}; use crate::{Node, Spec}; use ckb_logger::info; use rand::Rng; +use std::sync::mpsc; +use std::thread; fn select_random_node<'a, R: Rng>(rng: &mut R, nodes: &'a mut [Node]) -> &'a mut Node { let index = rng.gen_range(0, nodes.len()); &mut nodes[index] } -fn randomly_restart(rng: &mut R, restart_probilibity: f64, node: &mut Node) { - let should_restart = rng.gen_bool(restart_probilibity); - if should_restart { - node.stop(); - node.start(); - } -} - pub struct SyncChurn; impl Spec for SyncChurn { @@ -24,15 +18,36 @@ impl Spec for SyncChurn { fn run(&self, nodes: &mut Vec) { make_bootnodes_for_all(nodes); - let mut rng = rand::thread_rng(); - let (mining_nodes, churn_nodes) = nodes.split_at_mut(1); - for _ in 0..1000 { - const RESTART_PROBABILITY: f64 = 0.1; - let mining_node = select_random_node(&mut rng, mining_nodes); - mining_node.mine(1); - let node = select_random_node(&mut rng, churn_nodes); - randomly_restart(&mut rng, RESTART_PROBABILITY, node); - } + let mut mining_nodes = nodes.clone(); + let mut churn_nodes = mining_nodes.split_off(1); + + let (restart_stopped_tx, restart_stopped_rx) = mpsc::channel(); + + let mining_thread = thread::spawn(move || { + let mut rng = rand::thread_rng(); + loop { + let mining_node = select_random_node(&mut rng, &mut mining_nodes); + mining_node.mine(1); + waiting_for_sync(&mining_nodes); + if restart_stopped_rx.try_recv().is_ok() { + break; + } + } + }); + + let restart_thread = thread::spawn(move || { + let mut rng = rand::thread_rng(); + for _ in 0..100 { + let node = select_random_node(&mut rng, &mut churn_nodes); + info!("Restarting node {}", node.node_id()); + node.stop(); + node.start(); + } + restart_stopped_tx.send(()).unwrap(); + }); + + mining_thread.join().unwrap(); + restart_thread.join().unwrap(); info!("Waiting for all nodes sync"); waiting_for_sync(&nodes); From 580b16711bccf7ebdc1ab932a13c66db7ba534c4 Mon Sep 17 00:00:00 2001 From: YI Date: Thu, 18 Jan 2024 13:03:01 +0800 Subject: [PATCH 294/357] test: cargo {clippy,fmt} --- test/src/node.rs | 4 ++-- test/src/specs/mod.rs | 4 ++-- test/src/specs/p2p/whitelist.rs | 10 ++-------- test/src/specs/sync/mod.rs | 4 ++-- test/src/specs/sync/sync_churn.rs | 2 +- 5 files changed, 9 insertions(+), 15 deletions(-) diff --git a/test/src/node.rs b/test/src/node.rs index 0777cedffd..309014f249 100644 --- a/test/src/node.rs +++ b/test/src/node.rs @@ -821,8 +821,8 @@ pub fn make_bootnodes_for_all>(nodes: &mut [N]) { }) .collect(); let other_node_addrs: Vec> = node_multiaddrs - .iter() - .map(|(id, _)| { + .keys() + .map(|id| { let addrs = node_multiaddrs .iter() .filter(|(other_id, _)| other_id.as_str() != id.as_str()) diff --git a/test/src/specs/mod.rs b/test/src/specs/mod.rs index f16e4fc849..d981a242a2 100644 --- a/test/src/specs/mod.rs +++ b/test/src/specs/mod.rs @@ -1,6 +1,7 @@ mod alert; mod consensus; mod dao; +mod fault_injection; mod hardfork; mod mining; mod p2p; @@ -8,11 +9,11 @@ mod relay; mod rpc; mod sync; mod tx_pool; -mod fault_injection; pub use alert::*; pub use consensus::*; pub use dao::*; +pub use fault_injection::*; pub use hardfork::*; pub use mining::*; pub use p2p::*; @@ -20,7 +21,6 @@ pub use relay::*; pub use rpc::*; pub use sync::*; pub use tx_pool::*; -pub use fault_injection::*; use crate::Node; use ckb_app_config::CKBAppConfig; diff --git a/test/src/specs/p2p/whitelist.rs b/test/src/specs/p2p/whitelist.rs index 4009a54ead..5141528e19 100644 --- a/test/src/specs/p2p/whitelist.rs +++ b/test/src/specs/p2p/whitelist.rs @@ -46,10 +46,7 @@ impl Spec for WhitelistOnSessionLimit { let rpc_client0 = node0.rpc_client(); let is_connect_peer_num_eq_2 = wait_until(10, || { let peers = rpc_client0.get_peers(); - peers.len() == 2 - && peers - .into_iter() - .all(|node| id_set.contains(&node.node_id)) + peers.len() == 2 && peers.into_iter().all(|node| id_set.contains(&node.node_id)) }); if !is_connect_peer_num_eq_2 { @@ -78,10 +75,7 @@ impl Spec for WhitelistOnSessionLimit { let rpc_client0 = node0.rpc_client(); let is_connect_peer_num_eq_3 = wait_until(10, || { let peers = rpc_client0.get_peers(); - peers.len() == 3 - && peers - .into_iter() - .all(|node| id_set.contains(&node.node_id)) + peers.len() == 3 && peers.into_iter().all(|node| id_set.contains(&node.node_id)) }); if !is_connect_peer_num_eq_3 { diff --git a/test/src/specs/sync/mod.rs b/test/src/specs/sync/mod.rs index 4246e3416e..0c9d9ec231 100644 --- a/test/src/specs/sync/mod.rs +++ b/test/src/specs/sync/mod.rs @@ -7,8 +7,8 @@ mod invalid_block; mod invalid_locator_size; mod last_common_header; mod sync_and_mine; -mod sync_timeout; mod sync_churn; +mod sync_timeout; pub use block_filter::*; pub use block_sync::*; @@ -19,5 +19,5 @@ pub use invalid_block::*; pub use invalid_locator_size::*; pub use last_common_header::*; pub use sync_and_mine::*; -pub use sync_timeout::*; pub use sync_churn::*; +pub use sync_timeout::*; diff --git a/test/src/specs/sync/sync_churn.rs b/test/src/specs/sync/sync_churn.rs index 030b609558..8ac49b2cb8 100644 --- a/test/src/specs/sync/sync_churn.rs +++ b/test/src/specs/sync/sync_churn.rs @@ -50,6 +50,6 @@ impl Spec for SyncChurn { restart_thread.join().unwrap(); info!("Waiting for all nodes sync"); - waiting_for_sync(&nodes); + waiting_for_sync(nodes); } } From bf5b98f4eae47b091ca6b903e4d1964b1df1bdd3 Mon Sep 17 00:00:00 2001 From: YI Date: Thu, 18 Jan 2024 13:09:33 +0800 Subject: [PATCH 295/357] test: document what SyncChurn does and its weakness --- test/src/specs/sync/sync_churn.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/test/src/specs/sync/sync_churn.rs b/test/src/specs/sync/sync_churn.rs index 8ac49b2cb8..aad8e2530c 100644 --- a/test/src/specs/sync/sync_churn.rs +++ b/test/src/specs/sync/sync_churn.rs @@ -12,6 +12,17 @@ fn select_random_node<'a, R: Rng>(rng: &mut R, nodes: &'a mut [Node]) -> &'a mut pub struct SyncChurn; +/// This test will start 5 nodes, and randomly restart 4 nodes in the middle of mining. +/// After all nodes are synced, the test is considered successful. +/// This test is used to test the robustness of the sync protocol. +/// If the sync protocol is not robust enough, the test will fail. +/// But this test is not a complete test, it can only test the robustness of the sync protocol to a certain extent. +/// Some weaknesses of this test: +/// 1. This test only consider the simple case of some nodes restarting in the middle of mining, +/// while other nodes are always mining correctly. +/// 2. This fault injection of restarting nodes is not comprehensive enough. +/// 3. Even if the test fails, we can't deterministically reproduce the same error. +/// We may need some foundationdb-like tools to deterministically reproduce the same error. impl Spec for SyncChurn { crate::setup!(num_nodes: 5); From 3b773351376da227f2c941dc7147fd0ecf2e87f3 Mon Sep 17 00:00:00 2001 From: YI Date: Thu, 18 Jan 2024 14:56:11 +0800 Subject: [PATCH 296/357] test: make two mining nodes in SyncChurn --- test/src/specs/sync/sync_churn.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/src/specs/sync/sync_churn.rs b/test/src/specs/sync/sync_churn.rs index aad8e2530c..63a309f8af 100644 --- a/test/src/specs/sync/sync_churn.rs +++ b/test/src/specs/sync/sync_churn.rs @@ -1,4 +1,5 @@ use crate::node::{make_bootnodes_for_all, waiting_for_sync}; +use crate::util::mining::out_ibd_mode; use crate::{Node, Spec}; use ckb_logger::info; use rand::Rng; @@ -28,9 +29,10 @@ impl Spec for SyncChurn { fn run(&self, nodes: &mut Vec) { make_bootnodes_for_all(nodes); + out_ibd_mode(nodes); let mut mining_nodes = nodes.clone(); - let mut churn_nodes = mining_nodes.split_off(1); + let mut churn_nodes = mining_nodes.split_off(2); let (restart_stopped_tx, restart_stopped_rx) = mpsc::channel(); From d26c625e1c86158a3a261a7ac9962b6367bf1233 Mon Sep 17 00:00:00 2001 From: YI Date: Thu, 18 Jan 2024 15:43:30 +0800 Subject: [PATCH 297/357] test: don't mine too many blocks in SyncChurn --- test/src/specs/sync/sync_churn.rs | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/test/src/specs/sync/sync_churn.rs b/test/src/specs/sync/sync_churn.rs index 63a309f8af..002cfa8e52 100644 --- a/test/src/specs/sync/sync_churn.rs +++ b/test/src/specs/sync/sync_churn.rs @@ -41,16 +41,24 @@ impl Spec for SyncChurn { loop { let mining_node = select_random_node(&mut rng, &mut mining_nodes); mining_node.mine(1); - waiting_for_sync(&mining_nodes); - if restart_stopped_rx.try_recv().is_ok() { + // Because the test that waiting for nodes to sync has a implicit maximum waiting time + // (currently 60 seconds, we can sync about 200 blocks per second, so a maxium blocks of 10000 is reasonable) + // and the implicit waiting time is not long enough when there are too many blocks to sync, + // so we stop mining when the tip block number is greater than 15000. + // Otherwise nodes may not be able to sync within the implicit waiting time. + let too_many_blocks = mining_node.get_tip_block_number() > 10000; + if too_many_blocks || restart_stopped_rx.try_recv().is_ok() { break; } + waiting_for_sync(&mining_nodes); } }); let restart_thread = thread::spawn(move || { let mut rng = rand::thread_rng(); - for _ in 0..100 { + // It takes about 1 second to restart a node. So restarting nodes 100 times takes about 100 seconds. + let num_restarts = 100; + for _ in 0..num_restarts { let node = select_random_node(&mut rng, &mut churn_nodes); info!("Restarting node {}", node.node_id()); node.stop(); From e47684c084003d2886271658775833cd1133ca3e Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Fri, 19 Jan 2024 19:47:34 +0800 Subject: [PATCH 298/357] Improve draw chart script, no vertial line label overcover --- devtools/block_sync/draw_sync_chart.py | 33 ++++++++++++++------------ 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/devtools/block_sync/draw_sync_chart.py b/devtools/block_sync/draw_sync_chart.py index 5ff8dad18d..b983bbc148 100755 --- a/devtools/block_sync/draw_sync_chart.py +++ b/devtools/block_sync/draw_sync_chart.py @@ -87,6 +87,8 @@ def process_task(task): import matplotlib.ticker as ticker +vlabels = [] + for duration, height, label in results: # for ckb_log_file, label in tasks: # print("ckb_log_file: ", ckb_log_file) @@ -103,24 +105,11 @@ def process_task(task): ax.hlines([11_500_000], 0, max(duration), colors="gray", linestyles="dashed") for i, h in enumerate(height): - if h % 1_000_000 == 0: - ax.vlines([duration[i]], 0, h, colors="gray", linestyles="dashed") - if i == len(height) -1 : alabels.append(((duration[i],h),label)) - if h == 11_000_000 or h == 11_500_000: - ax.vlines([duration[i]], 0, h, colors="black", linestyles="dashed") - voff=-60 - if h == 11_000_000: - voff=-75 - ax.annotate(round(duration[i],1), - fontsize=8, - xy=(duration[i], 0), xycoords='data', - xytext=(0, voff), textcoords='offset points', - bbox=dict(boxstyle="round", fc="0.9"), - arrowprops=dict(arrowstyle="-"), - horizontalalignment='center', verticalalignment='bottom') + if h == 11_500_000: + vlabels.append((duration[i],h)) ax.get_yaxis().get_major_formatter().set_scientific(False) @@ -148,6 +137,7 @@ def process_task(task): # sort alabsle by .0.1 alabels.sort(key=lambda x: x[0][0]) +vlabels.sort(key=lambda x: x[0]) lheight=40 loffset=-40 @@ -167,6 +157,19 @@ def process_task(task): elif loffset > 0: lheight -= 20 +for index, (duration, h) in enumerate(vlabels): + ax.vlines([duration], 0, h, colors="black", linestyles="dashed") + voff=-60 + if index % 2 == 0: + voff=-75 + ax.annotate(round(duration, 1), + fontsize=8, + xy=(duration, 0), xycoords='data', + xytext=(0, voff), textcoords='offset points', + bbox=dict(boxstyle="round", fc="0.9"), + arrowprops=dict(arrowstyle="-"), + horizontalalignment='center', verticalalignment='bottom') + plt.axhline(y=11_500_000, color='blue', linestyle='--') From cfff87ba3b5ce97f9f04fff1a7e6ef58a66a7562 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 22 Jan 2024 11:42:59 +0800 Subject: [PATCH 299/357] Add unverified tip related metrics to ckb_metrics --- util/metrics/src/lib.rs | 35 +++++++++++++++++++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/util/metrics/src/lib.rs b/util/metrics/src/lib.rs index 64e06afbc5..c524806c3c 100644 --- a/util/metrics/src/lib.rs +++ b/util/metrics/src/lib.rs @@ -7,8 +7,9 @@ //! [`ckb-metrics-service`]: ../ckb_metrics_service/index.html use prometheus::{ - register_histogram, register_histogram_vec, register_int_counter, register_int_gauge, - register_int_gauge_vec, Histogram, HistogramVec, IntCounter, IntGauge, IntGaugeVec, + register_gauge, register_histogram, register_histogram_vec, register_int_counter, + register_int_gauge, register_int_gauge_vec, Gauge, Histogram, HistogramVec, IntCounter, + IntGauge, IntGaugeVec, }; use prometheus_static_metric::make_static_metric; use std::cell::Cell; @@ -51,6 +52,16 @@ make_static_metric! { pub struct Metrics { /// Gauge metric for CKB chain tip header number pub ckb_chain_tip: IntGauge, + /// CKB chain unverified tip header number + pub ckb_chain_unverified_tip: IntGauge, + /// ckb_chain asynchronous_process duration sum (seconds) + pub ckb_chain_async_process_block_duration_sum: Gauge, + /// ckb_chain consume_orphan thread's process_lonely_block duration sum (seconds) + pub ckb_chain_process_lonely_block_duration_sum: Gauge, + /// ckb_chain consume_unverified thread's consume_unverified_block duration sum (seconds) + pub ckb_chain_consume_unverified_block_duration_sum: Gauge, + /// ckb_chain consume_unverified thread's consume_unverified_block waiting for block duration sum (seconds) + pub ckb_chain_consume_unverified_block_waiting_block_duration_sum: Gauge, /// Gauge for tracking the size of all frozen data pub ckb_freezer_size: IntGauge, /// Counter for measuring the effective amount of data read @@ -87,6 +98,26 @@ pub struct Metrics { static METRICS: once_cell::sync::Lazy = once_cell::sync::Lazy::new(|| Metrics { ckb_chain_tip: register_int_gauge!("ckb_chain_tip", "The CKB chain tip header number").unwrap(), + ckb_chain_unverified_tip: register_int_gauge!( + "ckb_chain_unverified_tip", + "The CKB chain unverified tip header number" + ) + .unwrap(), + ckb_chain_async_process_block_duration_sum: register_gauge!( + "ckb_chain_async_process_block_duration", + "The CKB chain asynchronous_process_block duration sum" + ) + .unwrap(), + ckb_chain_process_lonely_block_duration_sum: register_gauge!( + "ckb_chain_process_lonely_block_duration", + "The CKB chain consume_orphan thread's process_lonely_block duration sum" + ) + .unwrap(), + ckb_chain_consume_unverified_block_duration_sum: register_gauge!( + "ckb_chain_consume_unverified_block_duration", + "The CKB chain consume_unverified thread's consume_unverified_block duration sum" + ) + .unwrap(), ckb_freezer_size: register_int_gauge!("ckb_freezer_size", "The CKB freezer size").unwrap(), ckb_freezer_read: register_int_counter!("ckb_freezer_read", "The CKB freezer read").unwrap(), ckb_relay_transaction_short_id_collide: register_int_counter!( From 44792e7870d77475f291f283f298273d95ed6a6a Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 22 Jan 2024 11:44:30 +0800 Subject: [PATCH 300/357] Add minstant to ckb_chain dependency --- Cargo.lock | 28 ++++++++++++++++++++++++++++ chain/Cargo.toml | 1 + 2 files changed, 29 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 1e537eb52c..7b3b6e6d67 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -715,6 +715,7 @@ dependencies = [ "crossbeam", "faux", "lazy_static", + "minstant", "tempfile", "tokio", ] @@ -2055,6 +2056,16 @@ dependencies = [ "typenum", ] +[[package]] +name = "ctor" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" +dependencies = [ + "quote", + "syn 1.0.109", +] + [[package]] name = "ctrlc" version = "3.4.1" @@ -3348,6 +3359,17 @@ dependencies = [ "adler", ] +[[package]] +name = "minstant" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8dfc09c8abbe145769b6d51fd03f84fdd459906cbd6ac54e438708f016b40bd" +dependencies = [ + "ctor", + "libc", + "wasi 0.7.0", +] + [[package]] name = "mio" version = "0.8.9" @@ -5610,6 +5632,12 @@ dependencies = [ "try-lock", ] +[[package]] +name = "wasi" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b89c3ce4ce14bdc6fb6beaf9ec7928ca331de5df7e5ea278375642a2f478570d" + [[package]] name = "wasi" version = "0.9.0+wasi-snapshot-preview1" diff --git a/chain/Cargo.toml b/chain/Cargo.toml index ab7ed48001..92ee3b3399 100644 --- a/chain/Cargo.toml +++ b/chain/Cargo.toml @@ -33,6 +33,7 @@ crossbeam = "0.8.2" ckb-network = { path = "../network", version = "= 0.114.0-pre" } tokio = { version = "1", features = ["sync"] } ckb-tx-pool = { path = "../tx-pool", version = "= 0.114.0-pre"} +minstant = "0.1.4" [dev-dependencies] ckb-test-chain-utils = { path = "../util/test-chain-utils", version = "= 0.114.0-pre" } From f8155268a8a4ae2ad029ef789f5f5e1f92a3613a Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 22 Jan 2024 11:44:36 +0800 Subject: [PATCH 301/357] Collect ckb_chain timecost and unverified_tip metrics --- chain/src/chain_service.rs | 4 ++++ chain/src/consume_orphan.rs | 7 +++++++ chain/src/consume_unverified.rs | 12 ++++++++++-- 3 files changed, 21 insertions(+), 2 deletions(-) diff --git a/chain/src/chain_service.rs b/chain/src/chain_service.rs index b28e49212d..2f54edf7bb 100644 --- a/chain/src/chain_service.rs +++ b/chain/src/chain_service.rs @@ -149,7 +149,11 @@ impl ChainService { Ok(Request { responder, arguments: lonely_block }) => { // asynchronous_process_block doesn't interact with tx-pool, // no need to pause tx-pool's chunk_process here. + let _trace_now = minstant::Instant::now(); self.asynchronous_process_block(lonely_block); + if let Some(handle) = ckb_metrics::handle(){ + handle.ckb_chain_async_process_block_duration_sum.add(_trace_now.elapsed().as_secs_f64()) + } let _ = responder.send(()); }, _ => { diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index 6550e54616..e89cc759e2 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -56,6 +56,9 @@ impl ConsumeDescendantProcessor { block_hash.clone(), total_difficulty, )); + if let Some(handle) = ckb_metrics::handle() { + handle.ckb_chain_unverified_tip.set(block_number as i64); + } debug!( "set unverified_tip to {}-{}, while unverified_tip - verified_tip = {}", block_number.clone(), @@ -224,7 +227,11 @@ impl ConsumeOrphan { Ok(lonely_block) => { let lonely_block_epoch: EpochNumberWithFraction = lonely_block.block().epoch(); + let _trace_now = minstant::Instant::now(); self.process_lonely_block(lonely_block); + if let Some(handle) = ckb_metrics::handle() { + handle.ckb_chain_process_lonely_block_duration_sum.add(_trace_now.elapsed().as_secs_f64()) + } if lonely_block_epoch.number() > last_check_expired_orphans_epoch { self.clean_expired_orphan_blocks(); diff --git a/chain/src/consume_unverified.rs b/chain/src/consume_unverified.rs index 4dc65d5938..9e36d769e2 100644 --- a/chain/src/consume_unverified.rs +++ b/chain/src/consume_unverified.rs @@ -70,16 +70,24 @@ impl ConsumeUnverifiedBlocks { pub(crate) fn start(mut self) { loop { - let begin_loop = std::time::Instant::now(); + let _trace_begin_loop = minstant::Instant::now(); select! { recv(self.unverified_block_rx) -> msg => match msg { Ok(unverified_task) => { // process this unverified block trace!("got an unverified block, wait cost: {:?}", begin_loop.elapsed()); + if let Some(handle) = ckb_metrics::handle() { + handle.ckb_chain_consume_unverified_block_waiting_block_duration_sum.add(_trace_begin_loop.elapsed().as_secs_f64()) + } let _ = self.tx_pool_controller.suspend_chunk_process(); + + let _trace_now = minstant::Instant::now(); self.processor.consume_unverified_blocks(unverified_task); + if let Some(handle) = ckb_metrics::handle() { + handle.ckb_chain_consume_unverified_block_duration_sum.add(_trace_now.elapsed().as_secs_f64()) + } + let _ = self.tx_pool_controller.continue_chunk_process(); - trace!("consume_unverified_blocks cost: {:?}", begin_loop.elapsed()); }, Err(err) => { error!("unverified_block_rx err: {}", err); From dd67707c81c0126db2fd0ee2cf2e7b0e08f43044 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 22 Jan 2024 13:30:36 +0800 Subject: [PATCH 302/357] Collect execute_callback timecost for metrics --- chain/src/consume_unverified.rs | 1 - chain/src/lib.rs | 8 +++ util/metrics/src/lib.rs | 110 ++++++++++++++++++-------------- 3 files changed, 69 insertions(+), 50 deletions(-) diff --git a/chain/src/consume_unverified.rs b/chain/src/consume_unverified.rs index 9e36d769e2..363bc3a47e 100644 --- a/chain/src/consume_unverified.rs +++ b/chain/src/consume_unverified.rs @@ -75,7 +75,6 @@ impl ConsumeUnverifiedBlocks { recv(self.unverified_block_rx) -> msg => match msg { Ok(unverified_task) => { // process this unverified block - trace!("got an unverified block, wait cost: {:?}", begin_loop.elapsed()); if let Some(handle) = ckb_metrics::handle() { handle.ckb_chain_consume_unverified_block_waiting_block_duration_sum.add(_trace_begin_loop.elapsed().as_secs_f64()) } diff --git a/chain/src/lib.rs b/chain/src/lib.rs index 89537a5d38..9ee595fb7a 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -77,7 +77,15 @@ pub struct LonelyBlockWithCallback { impl LonelyBlockWithCallback { pub(crate) fn execute_callback(self, verify_result: VerifyResult) { if let Some(verify_callback) = self.verify_callback { + let _trace_now = minstant::Instant::now(); + verify_callback(verify_result); + + if let Some(handle) = ckb_metrics::handle() { + handle + .ckb_chain_execute_callback_duration_sum + .add(_trace_now.elapsed().as_secs_f64()) + } } } diff --git a/util/metrics/src/lib.rs b/util/metrics/src/lib.rs index c524806c3c..f4544a7efd 100644 --- a/util/metrics/src/lib.rs +++ b/util/metrics/src/lib.rs @@ -62,6 +62,8 @@ pub struct Metrics { pub ckb_chain_consume_unverified_block_duration_sum: Gauge, /// ckb_chain consume_unverified thread's consume_unverified_block waiting for block duration sum (seconds) pub ckb_chain_consume_unverified_block_waiting_block_duration_sum: Gauge, + /// ckb_chain execute_callback duration sum (seconds) + pub ckb_chain_execute_callback_duration_sum: Gauge, /// Gauge for tracking the size of all frozen data pub ckb_freezer_size: IntGauge, /// Counter for measuring the effective amount of data read @@ -96,95 +98,104 @@ pub struct Metrics { pub ckb_network_ban_peer: IntCounter, } -static METRICS: once_cell::sync::Lazy = once_cell::sync::Lazy::new(|| Metrics { - ckb_chain_tip: register_int_gauge!("ckb_chain_tip", "The CKB chain tip header number").unwrap(), - ckb_chain_unverified_tip: register_int_gauge!( +static METRICS: once_cell::sync::Lazy = once_cell::sync::Lazy::new(|| { + Metrics { + ckb_chain_tip: register_int_gauge!("ckb_chain_tip", "The CKB chain tip header number").unwrap(), + ckb_chain_unverified_tip: register_int_gauge!( "ckb_chain_unverified_tip", "The CKB chain unverified tip header number" ) - .unwrap(), - ckb_chain_async_process_block_duration_sum: register_gauge!( - "ckb_chain_async_process_block_duration", + .unwrap(), + ckb_chain_async_process_block_duration_sum: register_gauge!( + "ckb_chain_async_process_block_duration_sum", "The CKB chain asynchronous_process_block duration sum" ) - .unwrap(), - ckb_chain_process_lonely_block_duration_sum: register_gauge!( - "ckb_chain_process_lonely_block_duration", + .unwrap(), + ckb_chain_process_lonely_block_duration_sum: register_gauge!( + "ckb_chain_process_lonely_block_duration_sum", "The CKB chain consume_orphan thread's process_lonely_block duration sum" ) - .unwrap(), - ckb_chain_consume_unverified_block_duration_sum: register_gauge!( - "ckb_chain_consume_unverified_block_duration", + .unwrap(), + ckb_chain_consume_unverified_block_duration_sum: register_gauge!( + "ckb_chain_consume_unverified_block_duration_sum", "The CKB chain consume_unverified thread's consume_unverified_block duration sum" ) - .unwrap(), - ckb_freezer_size: register_int_gauge!("ckb_freezer_size", "The CKB freezer size").unwrap(), - ckb_freezer_read: register_int_counter!("ckb_freezer_read", "The CKB freezer read").unwrap(), - ckb_relay_transaction_short_id_collide: register_int_counter!( + .unwrap(), + ckb_chain_consume_unverified_block_waiting_block_duration_sum: register_gauge!( + "ckb_chain_consume_unverified_block_waiting_block_duration_sum", + "The CKB chain consume_unverified thread's consume_unverified_block waiting for block duration sum" + ).unwrap(), + ckb_chain_execute_callback_duration_sum: register_gauge!( + "ckb_chain_execute_callback_duration_sum", + "The CKB chain execute_callback duration sum" + ).unwrap(), + ckb_freezer_size: register_int_gauge!("ckb_freezer_size", "The CKB freezer size").unwrap(), + ckb_freezer_read: register_int_counter!("ckb_freezer_read", "The CKB freezer read").unwrap(), + ckb_relay_transaction_short_id_collide: register_int_counter!( "ckb_relay_transaction_short_id_collide", "The CKB relay transaction short id collide" ) - .unwrap(), - ckb_relay_cb_verify_duration: register_histogram!( + .unwrap(), + ckb_relay_cb_verify_duration: register_histogram!( "ckb_relay_cb_verify_duration", "The CKB relay compact block verify duration" ) - .unwrap(), - ckb_block_process_duration: register_histogram!( + .unwrap(), + ckb_block_process_duration: register_histogram!( "ckb_block_process_duration", "The CKB block process duration" ) - .unwrap(), - ckb_relay_cb_transaction_count: register_int_counter!( + .unwrap(), + ckb_relay_cb_transaction_count: register_int_counter!( "ckb_relay_cb_transaction_count", "The CKB relay compact block transaction count" ) - .unwrap(), - ckb_relay_cb_reconstruct_ok: register_int_counter!( + .unwrap(), + ckb_relay_cb_reconstruct_ok: register_int_counter!( "ckb_relay_cb_reconstruct_ok", "The CKB relay compact block reconstruct ok count" ) - .unwrap(), - ckb_relay_cb_fresh_tx_cnt: register_int_counter!( + .unwrap(), + ckb_relay_cb_fresh_tx_cnt: register_int_counter!( "ckb_relay_cb_fresh_tx_cnt", "The CKB relay compact block fresh tx count" ) - .unwrap(), - ckb_relay_cb_reconstruct_fail: register_int_counter!( + .unwrap(), + ckb_relay_cb_reconstruct_fail: register_int_counter!( "ckb_relay_cb_reconstruct_fail", "The CKB relay compact block reconstruct fail count" ) - .unwrap(), - ckb_shared_best_number: register_int_gauge!( + .unwrap(), + ckb_shared_best_number: register_int_gauge!( "ckb_shared_best_number", "The CKB shared best header number" ) - .unwrap(), - ckb_sys_mem_process: CkbSysMemProcessStatistics::from( - ®ister_int_gauge_vec!( + .unwrap(), + ckb_sys_mem_process: CkbSysMemProcessStatistics::from( + ®ister_int_gauge_vec!( "ckb_sys_mem_process", "CKB system memory for process statistics", &["type"] ) - .unwrap(), - ), - ckb_sys_mem_jemalloc: CkbSysMemJemallocStatistics::from( - ®ister_int_gauge_vec!( + .unwrap(), + ), + ckb_sys_mem_jemalloc: CkbSysMemJemallocStatistics::from( + ®ister_int_gauge_vec!( "ckb_sys_mem_jemalloc", "CKB system memory for jemalloc statistics", &["type"] ) - .unwrap(), - ), - ckb_tx_pool_entry: CkbTxPoolEntryStatistics::from( - ®ister_int_gauge_vec!( + .unwrap(), + ), + ckb_tx_pool_entry: CkbTxPoolEntryStatistics::from( + ®ister_int_gauge_vec!( "ckb_tx_pool_entry", "CKB tx-pool entry status statistics", &["type"] ) - .unwrap(), - ), - ckb_message_bytes: register_histogram_vec!( + .unwrap(), + ), + ckb_message_bytes: register_histogram_vec!( "ckb_message_bytes", "The CKB message bytes", &["direction", "protocol_name", "msg_item_name", "status_code"], @@ -192,19 +203,20 @@ static METRICS: once_cell::sync::Lazy = once_cell::sync::Lazy::new(|| M 500.0, 1000.0, 2000.0, 5000.0, 10000.0, 20000.0, 50000.0, 100000.0, 200000.0, 500000.0 ] ) - .unwrap(), + .unwrap(), - ckb_sys_mem_rocksdb: register_int_gauge_vec!( + ckb_sys_mem_rocksdb: register_int_gauge_vec!( "ckb_sys_mem_rocksdb", "CKB system memory for rocksdb statistics", &["type", "cf"] ) - .unwrap(), - ckb_network_ban_peer: register_int_counter!( + .unwrap(), + ckb_network_ban_peer: register_int_counter!( "ckb_network_ban_peer", "CKB network baned peer count" ) - .unwrap(), + .unwrap(), + } }); /// Indicate whether the metrics service is enabled. From 8d1e48cf50b498189ab37df150bea2fc4f2cd69c Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 17 Jan 2024 19:02:47 +0800 Subject: [PATCH 303/357] Unverified block in db Signed-off-by: Eval EXEC --- chain/src/chain_service.rs | 4 +-- chain/src/consume_orphan.rs | 23 +++++++++---- chain/src/consume_unverified.rs | 46 ++++++++++++++++++++++--- chain/src/lib.rs | 61 ++++++++++++++++++++++++++++++++- 4 files changed, 119 insertions(+), 15 deletions(-) diff --git a/chain/src/chain_service.rs b/chain/src/chain_service.rs index 2f54edf7bb..b436117a37 100644 --- a/chain/src/chain_service.rs +++ b/chain/src/chain_service.rs @@ -5,7 +5,7 @@ use crate::consume_unverified::ConsumeUnverifiedBlocks; use crate::utils::orphan_block_pool::OrphanBlockPool; use crate::{ tell_synchronizer_to_punish_the_bad_peer, ChainController, LonelyBlockWithCallback, - ProcessBlockRequest, UnverifiedBlock, + ProcessBlockRequest, UnverifiedBlock, UnverifiedBlockHash, }; use ckb_channel::{self as channel, select, Receiver, SendError, Sender}; use ckb_constant::sync::BLOCK_DOWNLOAD_WINDOW; @@ -32,7 +32,7 @@ pub fn start_chain_services(builder: ChainServicesBuilder) -> ChainController { let (unverified_queue_stop_tx, unverified_queue_stop_rx) = ckb_channel::bounded::<()>(1); let (unverified_tx, unverified_rx) = - channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize * 3); + channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize * 512); let consumer_unverified_thread = thread::Builder::new() .name("consume_unverified_blocks".into()) diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index e89cc759e2..54dd3b7140 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -1,7 +1,7 @@ use crate::utils::orphan_block_pool::OrphanBlockPool; use crate::{ tell_synchronizer_to_punish_the_bad_peer, LonelyBlockWithCallback, UnverifiedBlock, - VerifyResult, + UnverifiedBlockHash, VerifyResult, }; use ckb_channel::{select, Receiver, SendError, Sender}; use ckb_error::{Error, InternalErrorKind}; @@ -19,15 +19,23 @@ use std::sync::Arc; pub(crate) struct ConsumeDescendantProcessor { pub shared: Shared, - pub unverified_blocks_tx: Sender, + pub unverified_blocks_tx: Sender, pub verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, } impl ConsumeDescendantProcessor { - fn send_unverified_block(&self, unverified_block: UnverifiedBlock, total_difficulty: U256) { - let block_number = unverified_block.block().number(); - let block_hash = unverified_block.block().hash(); + fn send_unverified_block(&self, unverified_block: UnverifiedBlockHash, total_difficulty: U256) { + let block_number = unverified_block + .unverified_block + .lonely_block + .block_number_and_hash + .number(); + let block_hash = unverified_block + .unverified_block + .lonely_block + .block_number_and_hash + .hash(); match self.unverified_blocks_tx.send(unverified_block) { Ok(_) => { @@ -157,7 +165,8 @@ impl ConsumeDescendantProcessor { let unverified_block: UnverifiedBlock = lonely_block.combine_parent_header(parent_header); - self.send_unverified_block(unverified_block, total_difficulty) + let unverified_block_hash: UnverifiedBlockHash = unverified_block.into(); + self.send_unverified_block(unverified_block_hash, total_difficulty) } Err(err) => { @@ -201,7 +210,7 @@ impl ConsumeOrphan { pub(crate) fn new( shared: Shared, orphan_block_pool: Arc, - unverified_blocks_tx: Sender, + unverified_blocks_tx: Sender, lonely_blocks_rx: Receiver, verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, stop_rx: Receiver<()>, diff --git a/chain/src/consume_unverified.rs b/chain/src/consume_unverified.rs index 363bc3a47e..d178bb8a4b 100644 --- a/chain/src/consume_unverified.rs +++ b/chain/src/consume_unverified.rs @@ -1,6 +1,7 @@ use crate::{ tell_synchronizer_to_punish_the_bad_peer, utils::forkchanges::ForkChanges, GlobalIndex, - LonelyBlock, LonelyBlockWithCallback, TruncateRequest, UnverifiedBlock, VerifyResult, + LonelyBlock, LonelyBlockWithCallback, TruncateRequest, UnverifiedBlock, UnverifiedBlockHash, + VerifyResult, }; use ckb_channel::{select, Receiver}; use ckb_error::{Error, InternalErrorKind}; @@ -10,7 +11,7 @@ use ckb_logger::{debug, error, info, log_enabled_target, trace_target}; use ckb_merkle_mountain_range::leaf_index_to_mmr_size; use ckb_proposal_table::ProposalTable; use ckb_shared::block_status::BlockStatus; -use ckb_shared::types::VerifyFailedBlockInfo; +use ckb_shared::types::{BlockNumberAndHash, VerifyFailedBlockInfo}; use ckb_shared::Shared; use ckb_store::{attach_block_cell, detach_block_cell, ChainStore, StoreTransaction}; use ckb_systemtime::unix_time_as_millis; @@ -39,7 +40,7 @@ pub(crate) struct ConsumeUnverifiedBlockProcessor { pub(crate) struct ConsumeUnverifiedBlocks { tx_pool_controller: TxPoolController, - unverified_block_rx: Receiver, + unverified_block_rx: Receiver, truncate_block_rx: Receiver, stop_rx: Receiver<()>, @@ -49,7 +50,7 @@ pub(crate) struct ConsumeUnverifiedBlocks { impl ConsumeUnverifiedBlocks { pub(crate) fn new( shared: Shared, - unverified_blocks_rx: Receiver, + unverified_blocks_rx: Receiver, truncate_block_rx: Receiver, proposal_table: ProposalTable, verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, @@ -115,7 +116,42 @@ impl ConsumeUnverifiedBlocks { } impl ConsumeUnverifiedBlockProcessor { - pub(crate) fn consume_unverified_blocks(&mut self, unverified_block: UnverifiedBlock) { + fn load_full_unverified_block(&self, unverified_block: UnverifiedBlockHash) -> UnverifiedBlock { + let block_view = self + .shared + .store() + .get_block( + &unverified_block + .unverified_block + .lonely_block + .block_number_and_hash + .hash(), + ) + .expect("block stored"); + let parent_header_view = self + .shared + .store() + .get_block_header(&block_view.data().header().raw().parent_hash()) + .expect("parent header stored"); + + UnverifiedBlock { + unverified_block: LonelyBlockWithCallback { + lonely_block: LonelyBlock { + block: Arc::new(block_view), + peer_id_with_msg_bytes: unverified_block + .unverified_block + .lonely_block + .peer_id_with_msg_bytes, + switch: unverified_block.unverified_block.lonely_block.switch, + }, + verify_callback: unverified_block.unverified_block.verify_callback, + }, + parent_header: parent_header_view, + } + } + + pub(crate) fn consume_unverified_blocks(&mut self, unverified_block_hash: UnverifiedBlockHash) { + let unverified_block = self.load_full_unverified_block(unverified_block_hash); // process this unverified block let verify_result = self.verify_block(&unverified_block); match &verify_result { diff --git a/chain/src/lib.rs b/chain/src/lib.rs index 9ee595fb7a..3ed896374b 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -8,7 +8,7 @@ use ckb_error::{is_internal_db_error, Error}; use ckb_logger::{debug, error}; use ckb_network::PeerIndex; -use ckb_shared::types::VerifyFailedBlockInfo; +use ckb_shared::types::{BlockNumberAndHash, VerifyFailedBlockInfo}; use ckb_types::core::service::Request; use ckb_types::core::{BlockNumber, BlockView, HeaderView}; use ckb_types::packed::Byte32; @@ -66,6 +66,35 @@ impl LonelyBlock { } } +/// LonelyBlock is the block which we have not check weather its parent is stored yet +#[derive(Clone)] +pub struct LonelyBlockHash { + /// block + pub block_number_and_hash: BlockNumberAndHash, + + /// This block is received from which peer, and the message bytes size + pub peer_id_with_msg_bytes: Option<(PeerIndex, u64)>, + + /// The Switch to control the verification process + pub switch: Option, +} + +/// LonelyBlockWithCallback Combine LonelyBlock with an optional verify_callback +pub struct LonelyBlockHashWithCallback { + /// The LonelyBlock + pub lonely_block: LonelyBlockHash, + /// The optional verify_callback + pub verify_callback: Option, +} + +impl LonelyBlockHashWithCallback { + pub(crate) fn execute_callback(self, verify_result: VerifyResult) { + if let Some(verify_callback) = self.verify_callback { + verify_callback(verify_result); + } + } +} + /// LonelyBlockWithCallback Combine LonelyBlock with an optional verify_callback pub struct LonelyBlockWithCallback { /// The LonelyBlock @@ -114,6 +143,36 @@ impl LonelyBlockWithCallback { } } +pub(crate) struct UnverifiedBlockHash { + pub unverified_block: LonelyBlockHashWithCallback, + pub parent_header: HeaderView, +} + +impl UnverifiedBlockHash { + fn execute_callback(self, verify_result: VerifyResult) { + self.unverified_block.execute_callback(verify_result) + } +} + +impl From for UnverifiedBlockHash { + fn from(value: UnverifiedBlock) -> Self { + Self { + unverified_block: LonelyBlockHashWithCallback { + lonely_block: LonelyBlockHash { + block_number_and_hash: BlockNumberAndHash { + number: value.block().number(), + hash: value.block().hash(), + }, + peer_id_with_msg_bytes: value.peer_id_with_msg_bytes(), + switch: value.unverified_block.switch(), + }, + verify_callback: value.unverified_block.verify_callback, + }, + parent_header: value.parent_header, + } + } +} + pub(crate) struct UnverifiedBlock { pub unverified_block: LonelyBlockWithCallback, pub parent_header: HeaderView, From d753cdcea6fa11fca545d232e463acd8c032bfbd Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 18 Jan 2024 10:51:02 +0800 Subject: [PATCH 304/357] UnverifiedBlockHash size to 3 --- chain/src/chain_service.rs | 2 +- chain/src/consume_unverified.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/chain/src/chain_service.rs b/chain/src/chain_service.rs index b436117a37..a7fa7915e8 100644 --- a/chain/src/chain_service.rs +++ b/chain/src/chain_service.rs @@ -32,7 +32,7 @@ pub fn start_chain_services(builder: ChainServicesBuilder) -> ChainController { let (unverified_queue_stop_tx, unverified_queue_stop_rx) = ckb_channel::bounded::<()>(1); let (unverified_tx, unverified_rx) = - channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize * 512); + channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize * 3); let consumer_unverified_thread = thread::Builder::new() .name("consume_unverified_blocks".into()) diff --git a/chain/src/consume_unverified.rs b/chain/src/consume_unverified.rs index d178bb8a4b..5b56a4bbfb 100644 --- a/chain/src/consume_unverified.rs +++ b/chain/src/consume_unverified.rs @@ -315,7 +315,7 @@ impl ConsumeUnverifiedBlockProcessor { let db_txn = Arc::new(self.shared.store().begin_transaction()); if new_best_block { - debug!( + info!( "[verify block] new best block found: {} => {:#x}, difficulty diff = {:#x}, unverified_tip: {}", block.header().number(), block.header().hash(), From 49853e9bff04aee6f97d70c1b9ff28c4a8a98cc6 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 23 Jan 2024 17:35:41 +0800 Subject: [PATCH 305/357] Comment MemoryMap remove shrink_to_fit --- shared/src/types/header_map/memory.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/shared/src/types/header_map/memory.rs b/shared/src/types/header_map/memory.rs index ebad478089..1fac4cbd04 100644 --- a/shared/src/types/header_map/memory.rs +++ b/shared/src/types/header_map/memory.rs @@ -101,7 +101,9 @@ impl MemoryMap { pub(crate) fn remove(&self, key: &Byte32) -> Option { let mut guard = self.0.write(); let ret = guard.remove(key); - shrink_to_fit!(guard, SHRINK_THRESHOLD); + + // TODO: @eval-exec call shrink_to_fit only when CKB is in non-IBD mode + // shrink_to_fit!(guard, SHRINK_THRESHOLD); ret.map(|inner| (key.clone(), inner).into()) } From d20cf5058f090787f816d5da0dac583815ce34cf Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 23 Jan 2024 17:41:01 +0800 Subject: [PATCH 306/357] Remove UnverifiedBlockHash since consume_unverified can load parent_header from db --- chain/src/lib.rs | 55 ++++++++++++++---------------------------------- 1 file changed, 16 insertions(+), 39 deletions(-) diff --git a/chain/src/lib.rs b/chain/src/lib.rs index 3ed896374b..ab00fc8927 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -95,6 +95,22 @@ impl LonelyBlockHashWithCallback { } } +impl Into for LonelyBlockWithCallback { + fn into(self) -> LonelyBlockHashWithCallback { + LonelyBlockHashWithCallback { + lonely_block: LonelyBlockHash { + block_number_and_hash: BlockNumberAndHash { + number: self.lonely_block.block.number(), + hash: self.lonely_block.block.hash(), + }, + peer_id_with_msg_bytes: self.lonely_block.peer_id_with_msg_bytes, + switch: self.lonely_block.switch, + }, + verify_callback: self.verify_callback, + } + } +} + /// LonelyBlockWithCallback Combine LonelyBlock with an optional verify_callback pub struct LonelyBlockWithCallback { /// The LonelyBlock @@ -134,45 +150,6 @@ impl LonelyBlockWithCallback { } } -impl LonelyBlockWithCallback { - pub(crate) fn combine_parent_header(self, parent_header: HeaderView) -> UnverifiedBlock { - UnverifiedBlock { - unverified_block: self, - parent_header, - } - } -} - -pub(crate) struct UnverifiedBlockHash { - pub unverified_block: LonelyBlockHashWithCallback, - pub parent_header: HeaderView, -} - -impl UnverifiedBlockHash { - fn execute_callback(self, verify_result: VerifyResult) { - self.unverified_block.execute_callback(verify_result) - } -} - -impl From for UnverifiedBlockHash { - fn from(value: UnverifiedBlock) -> Self { - Self { - unverified_block: LonelyBlockHashWithCallback { - lonely_block: LonelyBlockHash { - block_number_and_hash: BlockNumberAndHash { - number: value.block().number(), - hash: value.block().hash(), - }, - peer_id_with_msg_bytes: value.peer_id_with_msg_bytes(), - switch: value.unverified_block.switch(), - }, - verify_callback: value.unverified_block.verify_callback, - }, - parent_header: value.parent_header, - } - } -} - pub(crate) struct UnverifiedBlock { pub unverified_block: LonelyBlockWithCallback, pub parent_header: HeaderView, From 2680ca7e6317dd37d1e9750a169776671aee0b17 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 23 Jan 2024 17:41:32 +0800 Subject: [PATCH 307/357] ConsumeOrphan only need pass LonelyBlockHashWithCallback to ConsumeUnverified --- chain/src/chain_service.rs | 6 ++--- chain/src/consume_orphan.rs | 43 ++++++++++++++------------------- chain/src/consume_unverified.rs | 39 ++++++++++++++---------------- 3 files changed, 39 insertions(+), 49 deletions(-) diff --git a/chain/src/chain_service.rs b/chain/src/chain_service.rs index a7fa7915e8..48d21c060b 100644 --- a/chain/src/chain_service.rs +++ b/chain/src/chain_service.rs @@ -4,8 +4,8 @@ use crate::consume_unverified::ConsumeUnverifiedBlocks; use crate::utils::orphan_block_pool::OrphanBlockPool; use crate::{ - tell_synchronizer_to_punish_the_bad_peer, ChainController, LonelyBlockWithCallback, - ProcessBlockRequest, UnverifiedBlock, UnverifiedBlockHash, + tell_synchronizer_to_punish_the_bad_peer, ChainController, LonelyBlockHashWithCallback, + LonelyBlockWithCallback, ProcessBlockRequest, }; use ckb_channel::{self as channel, select, Receiver, SendError, Sender}; use ckb_constant::sync::BLOCK_DOWNLOAD_WINDOW; @@ -32,7 +32,7 @@ pub fn start_chain_services(builder: ChainServicesBuilder) -> ChainController { let (unverified_queue_stop_tx, unverified_queue_stop_rx) = ckb_channel::bounded::<()>(1); let (unverified_tx, unverified_rx) = - channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize * 3); + channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize * 3); let consumer_unverified_thread = thread::Builder::new() .name("consume_unverified_blocks".into()) diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index 54dd3b7140..1ac81803d9 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -1,7 +1,7 @@ use crate::utils::orphan_block_pool::OrphanBlockPool; use crate::{ - tell_synchronizer_to_punish_the_bad_peer, LonelyBlockWithCallback, UnverifiedBlock, - UnverifiedBlockHash, VerifyResult, + tell_synchronizer_to_punish_the_bad_peer, LonelyBlockHashWithCallback, LonelyBlockWithCallback, + VerifyResult, }; use ckb_channel::{select, Receiver, SendError, Sender}; use ckb_error::{Error, InternalErrorKind}; @@ -19,32 +19,28 @@ use std::sync::Arc; pub(crate) struct ConsumeDescendantProcessor { pub shared: Shared, - pub unverified_blocks_tx: Sender, + pub unverified_blocks_tx: Sender, pub verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, } impl ConsumeDescendantProcessor { - fn send_unverified_block(&self, unverified_block: UnverifiedBlockHash, total_difficulty: U256) { - let block_number = unverified_block - .unverified_block - .lonely_block - .block_number_and_hash - .number(); - let block_hash = unverified_block - .unverified_block - .lonely_block - .block_number_and_hash - .hash(); - - match self.unverified_blocks_tx.send(unverified_block) { + fn send_unverified_block( + &self, + lonely_block: LonelyBlockHashWithCallback, + total_difficulty: U256, + ) { + let block_number = lonely_block.lonely_block.block_number_and_hash.number(); + let block_hash = lonely_block.lonely_block.block_number_and_hash.hash(); + + match self.unverified_blocks_tx.send(lonely_block) { Ok(_) => { debug!( "process desendant block success {}-{}", block_number, block_hash ); } - Err(SendError(unverified_block)) => { + Err(SendError(lonely_block)) => { error!("send unverified_block_tx failed, the receiver has been closed"); let err: Error = InternalErrorKind::System .other( @@ -53,7 +49,7 @@ impl ConsumeDescendantProcessor { .into(); let verify_result: VerifyResult = Err(err); - unverified_block.execute_callback(verify_result); + lonely_block.execute_callback(verify_result); return; } }; @@ -158,15 +154,12 @@ impl ConsumeDescendantProcessor { pub(crate) fn process_descendant(&self, lonely_block: LonelyBlockWithCallback) { match self.accept_descendant(lonely_block.block().to_owned()) { - Ok((parent_header, total_difficulty)) => { + Ok((_parent_header, total_difficulty)) => { self.shared .insert_block_status(lonely_block.block().hash(), BlockStatus::BLOCK_STORED); + let lonely_block_hash = lonely_block.into(); - let unverified_block: UnverifiedBlock = - lonely_block.combine_parent_header(parent_header); - - let unverified_block_hash: UnverifiedBlockHash = unverified_block.into(); - self.send_unverified_block(unverified_block_hash, total_difficulty) + self.send_unverified_block(lonely_block_hash, total_difficulty) } Err(err) => { @@ -210,7 +203,7 @@ impl ConsumeOrphan { pub(crate) fn new( shared: Shared, orphan_block_pool: Arc, - unverified_blocks_tx: Sender, + unverified_blocks_tx: Sender, lonely_blocks_rx: Receiver, verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, stop_rx: Receiver<()>, diff --git a/chain/src/consume_unverified.rs b/chain/src/consume_unverified.rs index 5b56a4bbfb..8c1d4689ee 100644 --- a/chain/src/consume_unverified.rs +++ b/chain/src/consume_unverified.rs @@ -1,7 +1,7 @@ +use crate::LonelyBlockHashWithCallback; use crate::{ tell_synchronizer_to_punish_the_bad_peer, utils::forkchanges::ForkChanges, GlobalIndex, - LonelyBlock, LonelyBlockWithCallback, TruncateRequest, UnverifiedBlock, UnverifiedBlockHash, - VerifyResult, + LonelyBlock, LonelyBlockWithCallback, TruncateRequest, UnverifiedBlock, VerifyResult, }; use ckb_channel::{select, Receiver}; use ckb_error::{Error, InternalErrorKind}; @@ -11,7 +11,7 @@ use ckb_logger::{debug, error, info, log_enabled_target, trace_target}; use ckb_merkle_mountain_range::leaf_index_to_mmr_size; use ckb_proposal_table::ProposalTable; use ckb_shared::block_status::BlockStatus; -use ckb_shared::types::{BlockNumberAndHash, VerifyFailedBlockInfo}; +use ckb_shared::types::VerifyFailedBlockInfo; use ckb_shared::Shared; use ckb_store::{attach_block_cell, detach_block_cell, ChainStore, StoreTransaction}; use ckb_systemtime::unix_time_as_millis; @@ -40,7 +40,7 @@ pub(crate) struct ConsumeUnverifiedBlockProcessor { pub(crate) struct ConsumeUnverifiedBlocks { tx_pool_controller: TxPoolController, - unverified_block_rx: Receiver, + unverified_block_rx: Receiver, truncate_block_rx: Receiver, stop_rx: Receiver<()>, @@ -50,7 +50,7 @@ pub(crate) struct ConsumeUnverifiedBlocks { impl ConsumeUnverifiedBlocks { pub(crate) fn new( shared: Shared, - unverified_blocks_rx: Receiver, + unverified_blocks_rx: Receiver, truncate_block_rx: Receiver, proposal_table: ProposalTable, verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, @@ -116,17 +116,14 @@ impl ConsumeUnverifiedBlocks { } impl ConsumeUnverifiedBlockProcessor { - fn load_full_unverified_block(&self, unverified_block: UnverifiedBlockHash) -> UnverifiedBlock { + fn load_full_unverified_block( + &self, + lonely_block: LonelyBlockHashWithCallback, + ) -> UnverifiedBlock { let block_view = self .shared .store() - .get_block( - &unverified_block - .unverified_block - .lonely_block - .block_number_and_hash - .hash(), - ) + .get_block(&lonely_block.lonely_block.block_number_and_hash.hash()) .expect("block stored"); let parent_header_view = self .shared @@ -138,20 +135,20 @@ impl ConsumeUnverifiedBlockProcessor { unverified_block: LonelyBlockWithCallback { lonely_block: LonelyBlock { block: Arc::new(block_view), - peer_id_with_msg_bytes: unverified_block - .unverified_block - .lonely_block - .peer_id_with_msg_bytes, - switch: unverified_block.unverified_block.lonely_block.switch, + peer_id_with_msg_bytes: lonely_block.lonely_block.peer_id_with_msg_bytes, + switch: lonely_block.lonely_block.switch, }, - verify_callback: unverified_block.unverified_block.verify_callback, + verify_callback: lonely_block.verify_callback, }, parent_header: parent_header_view, } } - pub(crate) fn consume_unverified_blocks(&mut self, unverified_block_hash: UnverifiedBlockHash) { - let unverified_block = self.load_full_unverified_block(unverified_block_hash); + pub(crate) fn consume_unverified_blocks( + &mut self, + lonely_block_hash: LonelyBlockHashWithCallback, + ) { + let unverified_block = self.load_full_unverified_block(lonely_block_hash); // process this unverified block let verify_result = self.verify_block(&unverified_block); match &verify_result { From 89f01c77b5e0d7d3f2f69248c1bbfb88de26ff0b Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 23 Jan 2024 17:54:10 +0800 Subject: [PATCH 308/357] Replace the `Into` implementation LonelyBlockWithCallback with `From` form --- chain/src/lib.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/chain/src/lib.rs b/chain/src/lib.rs index ab00fc8927..ce170d8691 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -95,18 +95,18 @@ impl LonelyBlockHashWithCallback { } } -impl Into for LonelyBlockWithCallback { - fn into(self) -> LonelyBlockHashWithCallback { +impl From for LonelyBlockHashWithCallback { + fn from(val: LonelyBlockWithCallback) -> Self { LonelyBlockHashWithCallback { lonely_block: LonelyBlockHash { block_number_and_hash: BlockNumberAndHash { - number: self.lonely_block.block.number(), - hash: self.lonely_block.block.hash(), + number: val.lonely_block.block.number(), + hash: val.lonely_block.block.hash(), }, - peer_id_with_msg_bytes: self.lonely_block.peer_id_with_msg_bytes, - switch: self.lonely_block.switch, + peer_id_with_msg_bytes: val.lonely_block.peer_id_with_msg_bytes, + switch: val.lonely_block.switch, }, - verify_callback: self.verify_callback, + verify_callback: val.verify_callback, } } } From dd960bc9dec63f50964a8af7d1152ff740bd4331 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 23 Jan 2024 17:55:19 +0800 Subject: [PATCH 309/357] Fix ConsumeUnverified thread need LonelyBlockHashWithCallback in find_fork.rs --- chain/src/tests/find_fork.rs | 49 ++++++++++++++++++++++-------------- 1 file changed, 30 insertions(+), 19 deletions(-) diff --git a/chain/src/tests/find_fork.rs b/chain/src/tests/find_fork.rs index 3cdb57c50f..dbbaabddb0 100644 --- a/chain/src/tests/find_fork.rs +++ b/chain/src/tests/find_fork.rs @@ -1,9 +1,13 @@ use crate::consume_orphan::ConsumeDescendantProcessor; use crate::consume_unverified::ConsumeUnverifiedBlockProcessor; use crate::utils::forkchanges::ForkChanges; -use crate::{start_chain_services, LonelyBlock, UnverifiedBlock, VerifyFailedBlockInfo}; +use crate::{ + start_chain_services, LonelyBlock, LonelyBlockHash, LonelyBlockHashWithCallback, + LonelyBlockWithCallback, VerifyFailedBlockInfo, +}; use ckb_chain_spec::consensus::{Consensus, ProposalWindow}; use ckb_proposal_table::ProposalTable; +use ckb_shared::types::BlockNumberAndHash; use ckb_shared::SharedBuilder; use ckb_store::ChainStore; use ckb_systemtime::unix_time_as_millis; @@ -25,26 +29,28 @@ fn process_block( blk: &BlockView, switch: Switch, ) { - let lonely_block = LonelyBlock { - block: Arc::new(blk.to_owned()), + let lonely_block_hash = LonelyBlockHash { peer_id_with_msg_bytes: None, switch: Some(switch), + block_number_and_hash: BlockNumberAndHash::new(blk.number(), blk.hash()), }; - consume_descendant_processor.process_descendant(lonely_block.clone().without_callback()); + let lonely_block = LonelyBlock { + peer_id_with_msg_bytes: None, + switch: Some(switch), + block: Arc::new(blk.to_owned()), + }; - let parent_hash = blk.data().header().raw().parent_hash(); - let parent_header = consume_descendant_processor - .shared - .store() - .get_block_header(&parent_hash) - .unwrap(); + consume_descendant_processor.process_descendant(LonelyBlockWithCallback { + verify_callback: None, + lonely_block, + }); - let unverified_block = UnverifiedBlock { - unverified_block: lonely_block.without_callback(), - parent_header, + let lonely_block_hash = LonelyBlockHashWithCallback { + verify_callback: None, + lonely_block: lonely_block_hash, }; - consume_unverified_block_processor.consume_unverified_blocks(unverified_block); + consume_unverified_block_processor.consume_unverified_blocks(lonely_block_hash); } // 0--1--2--3--4 @@ -77,7 +83,8 @@ fn test_find_fork_case1() { let (verify_failed_blocks_tx, _verify_failed_blocks_rx) = tokio::sync::mpsc::unbounded_channel::(); - let (unverified_blocks_tx, _unverified_blocks_rx) = channel::unbounded::(); + let (unverified_blocks_tx, _unverified_blocks_rx) = + channel::unbounded::(); let consume_descendant_processor = ConsumeDescendantProcessor { shared: shared.clone(), unverified_blocks_tx, @@ -169,7 +176,8 @@ fn test_find_fork_case2() { let proposal_table = ProposalTable::new(consensus.tx_proposal_window()); let (verify_failed_blocks_tx, _verify_failed_blocks_rx) = tokio::sync::mpsc::unbounded_channel::(); - let (unverified_blocks_tx, _unverified_blocks_rx) = channel::unbounded::(); + let (unverified_blocks_tx, _unverified_blocks_rx) = + channel::unbounded::(); let consume_descendant_processor = ConsumeDescendantProcessor { shared: shared.clone(), unverified_blocks_tx, @@ -262,7 +270,8 @@ fn test_find_fork_case3() { let proposal_table = ProposalTable::new(consensus.tx_proposal_window()); let (verify_failed_blocks_tx, _verify_failed_blocks_rx) = tokio::sync::mpsc::unbounded_channel::(); - let (unverified_blocks_tx, _unverified_blocks_rx) = channel::unbounded::(); + let (unverified_blocks_tx, _unverified_blocks_rx) = + channel::unbounded::(); let consume_descendant_processor = ConsumeDescendantProcessor { shared: shared.clone(), unverified_blocks_tx, @@ -353,7 +362,8 @@ fn test_find_fork_case4() { let proposal_table = ProposalTable::new(consensus.tx_proposal_window()); let (verify_failed_blocks_tx, _verify_failed_blocks_rx) = tokio::sync::mpsc::unbounded_channel::(); - let (unverified_blocks_tx, _unverified_blocks_rx) = channel::unbounded::(); + let (unverified_blocks_tx, _unverified_blocks_rx) = + channel::unbounded::(); let consume_descendant_processor = ConsumeDescendantProcessor { shared: shared.clone(), unverified_blocks_tx, @@ -445,7 +455,8 @@ fn repeatedly_switch_fork() { let proposal_table = ProposalTable::new(consensus.tx_proposal_window()); let (verify_failed_blocks_tx, _verify_failed_blocks_rx) = tokio::sync::mpsc::unbounded_channel::(); - let (unverified_blocks_tx, _unverified_blocks_rx) = channel::unbounded::(); + let (unverified_blocks_tx, _unverified_blocks_rx) = + channel::unbounded::(); let consume_descendant_processor = ConsumeDescendantProcessor { shared: shared.clone(), unverified_blocks_tx, From 35a01d126fd89b369b88e785ad138578eacad582 Mon Sep 17 00:00:00 2001 From: YI Date: Mon, 22 Jan 2024 17:07:01 +0800 Subject: [PATCH 310/357] Consider the edge case of processing genesis block --- chain/src/chain_service.rs | 26 ++++++++++++++++++- chain/src/tests/basic.rs | 53 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 78 insertions(+), 1 deletion(-) diff --git a/chain/src/chain_service.rs b/chain/src/chain_service.rs index 48d21c060b..a3ff0036d4 100644 --- a/chain/src/chain_service.rs +++ b/chain/src/chain_service.rs @@ -192,8 +192,32 @@ impl ChainService { fn asynchronous_process_block(&self, lonely_block: LonelyBlockWithCallback) { let block_number = lonely_block.block().number(); let block_hash = lonely_block.block().hash(); + // Skip verifying a genesis block if its hash is equal to our genesis hash, + // otherwise, return error and ban peer. if block_number < 1 { - warn!("receive 0 number block: 0-{}", block_hash); + if self.shared.genesis_hash() != block_hash { + warn!( + "receive 0 number block: 0-{}, expect genesis hash: {}", + block_hash, + self.shared.genesis_hash() + ); + self.shared + .insert_block_status(lonely_block.block().hash(), BlockStatus::BLOCK_INVALID); + let error = InternalErrorKind::System + .other("Invalid genesis block received") + .into(); + tell_synchronizer_to_punish_the_bad_peer( + self.verify_failed_blocks_tx.clone(), + lonely_block.peer_id_with_msg_bytes(), + lonely_block.block().hash(), + &error, + ); + lonely_block.execute_callback(Err(error)); + } else { + warn!("receive 0 number block: 0-{}", block_hash); + lonely_block.execute_callback(Ok(false)); + } + return; } if lonely_block.switch().is_none() diff --git a/chain/src/tests/basic.rs b/chain/src/tests/basic.rs index 273cd9c9b8..be8f139572 100644 --- a/chain/src/tests/basic.rs +++ b/chain/src/tests/basic.rs @@ -58,6 +58,59 @@ fn repeat_process_block() { ); } +#[test] +fn process_genesis_block() { + let tx = TransactionBuilder::default() + .witness(Script::default().into_witness()) + .input(CellInput::new(OutPoint::null(), 0)) + .outputs(vec![ + CellOutputBuilder::default() + .capacity(capacity_bytes!(100_000_000).pack()) + .build(); + 100 + ]) + .outputs_data(vec![Bytes::new(); 100].pack()) + .build(); + let always_success_tx = create_always_success_tx(); + + let dao = genesis_dao_data(vec![&tx, &always_success_tx]).unwrap(); + + let genesis_block = BlockBuilder::default() + .transaction(tx.clone()) + .transaction(always_success_tx.clone()) + .compact_target(difficulty_to_compact(U256::from(1000u64)).pack()) + .dao(dao.clone()) + .build(); + + let consensus = ConsensusBuilder::default() + .genesis_block(genesis_block) + .build(); + let (chain_controller, shared, _parent) = start_chain(Some(consensus)); + + let block = Arc::new(shared.consensus().genesis_block().clone()); + + let result = chain_controller.blocking_process_block(Arc::clone(&block)); + assert!(!result.expect("process block ok")); + assert_eq!( + shared + .store() + .get_block_ext(&block.header().hash()) + .unwrap() + .verified, + Some(true) + ); + + let different_genesis_block = BlockBuilder::default() + .transaction(tx) + .transaction(always_success_tx) + // Difficulty is changed here + .compact_target(difficulty_to_compact(U256::from(999u64)).pack()) + .dao(dao) + .build(); + let result = chain_controller.blocking_process_block(Arc::new(different_genesis_block)); + assert!(result.is_err()); +} + #[test] fn test_genesis_transaction_spend() { // let data: Vec = ; From c1729f73fec0d931186796feb49645533869a11d Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 24 Jan 2024 12:19:23 +0800 Subject: [PATCH 311/357] Copy ibd_finished field from shared to HeaderMapKernal --- shared/src/types/header_map/kernel_lru.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/shared/src/types/header_map/kernel_lru.rs b/shared/src/types/header_map/kernel_lru.rs index 7471128513..fd1ecac840 100644 --- a/shared/src/types/header_map/kernel_lru.rs +++ b/shared/src/types/header_map/kernel_lru.rs @@ -18,6 +18,8 @@ where pub(crate) backend: Backend, // Configuration memory_limit: usize, + // if ckb is in IBD mode, don't shrink memory map + ibd_finished: Arc, // Statistics #[cfg(feature = "stats")] stats: Mutex, @@ -43,7 +45,11 @@ impl HeaderMapKernel where Backend: KeyValueBackend, { - pub(crate) fn new

(tmpdir: Option

, memory_limit: usize) -> Self + pub(crate) fn new

( + tmpdir: Option

, + memory_limit: usize, + ibd_finished: Arc, + ) -> Self where P: AsRef, { @@ -56,6 +62,7 @@ where memory, backend, memory_limit, + ibd_finished, } } @@ -65,6 +72,7 @@ where memory, backend, memory_limit, + ibd_finished, stats: Mutex::new(HeaderMapKernelStats::new(50_000)), } } From cbee77f98b15058831228a8dae8ab475006a8219 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 24 Jan 2024 12:21:44 +0800 Subject: [PATCH 312/357] HeaderMap do not shrink_to_fit in IBD mode --- shared/src/shared_builder.rs | 4 +++- shared/src/types/header_map/kernel_lru.rs | 11 +++++++++-- shared/src/types/header_map/memory.rs | 13 ++++++++----- shared/src/types/header_map/mod.rs | 10 ++++++++-- 4 files changed, 28 insertions(+), 10 deletions(-) diff --git a/shared/src/shared_builder.rs b/shared/src/shared_builder.rs index 9d74cf280b..fb23ec7e0e 100644 --- a/shared/src/shared_builder.rs +++ b/shared/src/shared_builder.rs @@ -360,10 +360,13 @@ impl SharedBuilder { let header_map_memory_limit = header_map_memory_limit .unwrap_or(HeaderMapConfig::default().memory_limit.as_u64() as usize); + let ibd_finished = Arc::new(AtomicBool::new(false)); + let header_map = Arc::new(HeaderMap::new( header_map_tmp_dir, header_map_memory_limit, &async_handle.clone(), + Arc::clone(&ibd_finished), )); let tx_pool_config = tx_pool_config.unwrap_or_default(); @@ -405,7 +408,6 @@ impl SharedBuilder { let block_status_map = Arc::new(DashMap::new()); let assume_valid_target = Arc::new(Mutex::new(sync_config.assume_valid_target)); - let ibd_finished = Arc::new(AtomicBool::new(false)); let shared = Shared::new( store, tx_pool_controller, diff --git a/shared/src/types/header_map/kernel_lru.rs b/shared/src/types/header_map/kernel_lru.rs index fd1ecac840..d3e463c65f 100644 --- a/shared/src/types/header_map/kernel_lru.rs +++ b/shared/src/types/header_map/kernel_lru.rs @@ -1,4 +1,6 @@ use std::path; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; #[cfg(feature = "stats")] use ckb_logger::info; @@ -138,7 +140,9 @@ where self.trace(); self.stats().tick_primary_delete(); } - self.memory.remove(hash); + // If IBD is not finished, don't shrink memory map + let allow_shrink_to_fit = self.ibd_finished.load(Ordering::Relaxed); + self.memory.remove(hash, allow_shrink_to_fit); if self.backend.is_empty() { return; } @@ -150,8 +154,11 @@ where tokio::task::block_in_place(|| { self.backend.insert_batch(&values); }); + + // If IBD is not finished, don't shrink memory map + let allow_shrink_to_fit = self.ibd_finished.load(Ordering::Relaxed); self.memory - .remove_batch(values.iter().map(|value| value.hash())); + .remove_batch(values.iter().map(|value| value.hash()), allow_shrink_to_fit); } } diff --git a/shared/src/types/header_map/memory.rs b/shared/src/types/header_map/memory.rs index 1fac4cbd04..e7664c1f8f 100644 --- a/shared/src/types/header_map/memory.rs +++ b/shared/src/types/header_map/memory.rs @@ -98,12 +98,13 @@ impl MemoryMap { guard.insert(key, value).map(|_| ()) } - pub(crate) fn remove(&self, key: &Byte32) -> Option { + pub(crate) fn remove(&self, key: &Byte32, shrink_to_fit: bool) -> Option { let mut guard = self.0.write(); let ret = guard.remove(key); - // TODO: @eval-exec call shrink_to_fit only when CKB is in non-IBD mode - // shrink_to_fit!(guard, SHRINK_THRESHOLD); + if shrink_to_fit { + shrink_to_fit!(guard, SHRINK_THRESHOLD); + } ret.map(|inner| (key.clone(), inner).into()) } @@ -124,11 +125,13 @@ impl MemoryMap { } } - pub(crate) fn remove_batch(&self, keys: impl Iterator) { + pub(crate) fn remove_batch(&self, keys: impl Iterator, shrink_to_fit: bool) { let mut guard = self.0.write(); for key in keys { guard.remove(&key); } - shrink_to_fit!(guard, SHRINK_THRESHOLD); + if shrink_to_fit { + shrink_to_fit!(guard, SHRINK_THRESHOLD); + } } } diff --git a/shared/src/types/header_map/mod.rs b/shared/src/types/header_map/mod.rs index 40554afb34..731e898a6e 100644 --- a/shared/src/types/header_map/mod.rs +++ b/shared/src/types/header_map/mod.rs @@ -2,6 +2,7 @@ use ckb_async_runtime::Handle; use ckb_logger::{debug, info}; use ckb_stop_handler::{new_tokio_exit_rx, CancellationToken}; use ckb_types::packed::Byte32; +use std::sync::atomic::AtomicBool; use std::sync::Arc; use std::time::Duration; use std::{mem::size_of, path}; @@ -29,7 +30,12 @@ const ITEM_BYTES_SIZE: usize = size_of::(); const WARN_THRESHOLD: usize = ITEM_BYTES_SIZE * 100_000; impl HeaderMap { - pub fn new

(tmpdir: Option

, memory_limit: usize, async_handle: &Handle) -> Self + pub fn new

( + tmpdir: Option

, + memory_limit: usize, + async_handle: &Handle, + ibd_finished: Arc, + ) -> Self where P: AsRef, { @@ -43,7 +49,7 @@ impl HeaderMap { ); } let size_limit = memory_limit / ITEM_BYTES_SIZE; - let inner = Arc::new(HeaderMapKernel::new(tmpdir, size_limit)); + let inner = Arc::new(HeaderMapKernel::new(tmpdir, size_limit, ibd_finished)); let map = Arc::clone(&inner); let stop_rx: CancellationToken = new_tokio_exit_rx(); From e46325e71e3b041159e99ac64c49f611747fa2d5 Mon Sep 17 00:00:00 2001 From: YI Date: Wed, 24 Jan 2024 14:22:04 +0800 Subject: [PATCH 313/357] test inserting block with stored but unverified parent chore: rename store_block to store_unverified_block --- chain/src/consume_orphan.rs | 151 +++++++++++++++++----------------- chain/src/lib.rs | 1 + sync/src/tests/sync_shared.rs | 72 +++++++++++++++- 3 files changed, 147 insertions(+), 77 deletions(-) diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index 1ac81803d9..82d5b87643 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -24,6 +24,77 @@ pub(crate) struct ConsumeDescendantProcessor { pub verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, } +// Store the an unverified block to the database. We may usually do this +// for an orphan block with unknown parent. But this function is also useful in testing. +pub fn store_unverified_block(shared: &Shared, block: Arc) -> Result<(HeaderView, U256), Error> { + let (block_number, block_hash) = (block.number(), block.hash()); + + let parent_header = shared + .store() + .get_block_header(&block.data().header().raw().parent_hash()) + .expect("parent already store"); + + if let Some(ext) = shared.store().get_block_ext(&block.hash()) { + debug!("block {}-{} has stored BlockExt", block_number, block_hash); + return Ok((parent_header, ext.total_difficulty)); + } + + trace!("begin accept block: {}-{}", block.number(), block.hash()); + + let parent_ext = shared + .store() + .get_block_ext(&block.data().header().raw().parent_hash()) + .expect("parent already store"); + + if parent_ext.verified == Some(false) { + return Err(InvalidParentError { + parent_hash: parent_header.hash(), + } + .into()); + } + + let cannon_total_difficulty = + parent_ext.total_difficulty.to_owned() + block.header().difficulty(); + + let db_txn = Arc::new(shared.store().begin_transaction()); + + let txn_snapshot = db_txn.get_snapshot(); + let _snapshot_block_ext = db_txn.get_update_for_block_ext(&block.hash(), &txn_snapshot); + + db_txn.insert_block(block.as_ref())?; + + let next_block_epoch = shared + .consensus() + .next_epoch_ext(&parent_header, &db_txn.borrow_as_data_loader()) + .expect("epoch should be stored"); + let new_epoch = next_block_epoch.is_head(); + let epoch = next_block_epoch.epoch(); + + db_txn.insert_block_epoch_index( + &block.header().hash(), + &epoch.last_block_hash_in_previous_epoch(), + )?; + if new_epoch { + db_txn.insert_epoch_ext(&epoch.last_block_hash_in_previous_epoch(), &epoch)?; + } + + let ext = BlockExt { + received_at: unix_time_as_millis(), + total_difficulty: cannon_total_difficulty.clone(), + total_uncles_count: parent_ext.total_uncles_count + block.data().uncles().len() as u64, + verified: None, + txs_fees: vec![], + cycles: None, + txs_sizes: None, + }; + + db_txn.insert_block_ext(&block.header().hash(), &ext)?; + + db_txn.commit()?; + + Ok((parent_header, cannon_total_difficulty)) +} + impl ConsumeDescendantProcessor { fn send_unverified_block( &self, @@ -80,84 +151,12 @@ impl ConsumeDescendantProcessor { } } - fn accept_descendant(&self, block: Arc) -> Result<(HeaderView, U256), Error> { - let (block_number, block_hash) = (block.number(), block.hash()); - - let parent_header = self - .shared - .store() - .get_block_header(&block.data().header().raw().parent_hash()) - .expect("parent already store"); - - if let Some(ext) = self.shared.store().get_block_ext(&block.hash()) { - debug!("block {}-{} has stored BlockExt", block_number, block_hash); - return Ok((parent_header, ext.total_difficulty)); - } - - trace!("begin accept block: {}-{}", block.number(), block.hash()); - - let parent_ext = self - .shared - .store() - .get_block_ext(&block.data().header().raw().parent_hash()) - .expect("parent already store"); - - if parent_ext.verified == Some(false) { - return Err(InvalidParentError { - parent_hash: parent_header.hash(), - } - .into()); - } - - let cannon_total_difficulty = - parent_ext.total_difficulty.to_owned() + block.header().difficulty(); - - let db_txn = Arc::new(self.shared.store().begin_transaction()); - - let txn_snapshot = db_txn.get_snapshot(); - let _snapshot_block_ext = db_txn.get_update_for_block_ext(&block.hash(), &txn_snapshot); - - db_txn.insert_block(block.as_ref())?; - - let next_block_epoch = self - .shared - .consensus() - .next_epoch_ext(&parent_header, &db_txn.borrow_as_data_loader()) - .expect("epoch should be stored"); - let new_epoch = next_block_epoch.is_head(); - let epoch = next_block_epoch.epoch(); - - db_txn.insert_block_epoch_index( - &block.header().hash(), - &epoch.last_block_hash_in_previous_epoch(), - )?; - if new_epoch { - db_txn.insert_epoch_ext(&epoch.last_block_hash_in_previous_epoch(), &epoch)?; - } - - let ext = BlockExt { - received_at: unix_time_as_millis(), - total_difficulty: cannon_total_difficulty.clone(), - total_uncles_count: parent_ext.total_uncles_count + block.data().uncles().len() as u64, - verified: None, - txs_fees: vec![], - cycles: None, - txs_sizes: None, - }; - - db_txn.insert_block_ext(&block.header().hash(), &ext)?; - - db_txn.commit()?; - - Ok((parent_header, cannon_total_difficulty)) - } - pub(crate) fn process_descendant(&self, lonely_block: LonelyBlockWithCallback) { - match self.accept_descendant(lonely_block.block().to_owned()) { + match store_unverified_block(&self.shared, lonely_block.block().to_owned()) { Ok((_parent_header, total_difficulty)) => { - self.shared - .insert_block_status(lonely_block.block().hash(), BlockStatus::BLOCK_STORED); - let lonely_block_hash = lonely_block.into(); + self.shared.insert_block_status(lonely_block.block().hash(), BlockStatus::BLOCK_STORED); + + let lonely_block_hash: LonelyBlockHashWithCallback = lonely_block.into(); self.send_unverified_block(lonely_block_hash, total_difficulty) } diff --git a/chain/src/lib.rs b/chain/src/lib.rs index ce170d8691..b3d15af220 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -24,6 +24,7 @@ mod utils; pub use chain_controller::ChainController; pub use chain_service::start_chain_services; +pub use consume_orphan::store_unverified_block; type ProcessBlockRequest = Request; type TruncateRequest = Request>; diff --git a/sync/src/tests/sync_shared.rs b/sync/src/tests/sync_shared.rs index 23effa2114..8674c7cf28 100644 --- a/sync/src/tests/sync_shared.rs +++ b/sync/src/tests/sync_shared.rs @@ -3,7 +3,7 @@ use crate::tests::util::{build_chain, inherit_block}; use crate::SyncShared; -use ckb_chain::start_chain_services; +use ckb_chain::{start_chain_services, store_unverified_block}; use ckb_logger::info; use ckb_logger_service::LoggerInitGuard; use ckb_shared::block_status::BlockStatus; @@ -16,6 +16,22 @@ use ckb_types::prelude::*; use std::fmt::format; use std::sync::Arc; +fn wait_for_expected_block_status( + shared: &SyncShared, + hash: &Byte32, + expect_status: BlockStatus, +) -> bool { + let now = std::time::Instant::now(); + while now.elapsed().as_secs() < 2 { + let current_status = shared.active_chain().get_block_status(hash); + if current_status == expect_status { + return true; + } + std::thread::sleep(std::time::Duration::from_micros(100)); + } + return false; +} + #[test] fn test_insert_new_block() { let (shared, chain) = build_chain(2); @@ -143,6 +159,60 @@ fn test_insert_parent_unknown_block() { )); } +#[test] +fn test_insert_child_block_with_stored_but_unverified_parent() { + let (shared1, _) = build_chain(2); + let (shared, chain) = { + let (shared, mut pack) = SharedBuilder::with_temp_db() + .consensus(shared1.consensus().clone()) + .build() + .unwrap(); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); + ( + SyncShared::new(shared, Default::default(), pack.take_relay_tx_receiver()), + chain_controller, + ) + }; + + let block = shared1 + .store() + .get_block(&shared1.active_chain().tip_header().hash()) + .unwrap(); + let parent = { + let parent = shared1 + .store() + .get_block(&block.header().parent_hash()) + .unwrap(); + Arc::new(parent) + }; + let parent_hash = parent.header().hash(); + let child = Arc::new(block); + let child_hash = child.header().hash(); + + store_unverified_block(shared.shared(), Arc::clone(&parent)).expect("store parent block"); + + // Note that we will not find the block status obtained from + // shared.active_chain().get_block_status(&parent_hash) to be BLOCK_STORED, + // because `get_block_status` does not read the block status from the database, + // it use snapshot to get the block status, and the snapshot is not updated. + assert!(shared.store().get_block_ext(&parent_hash).is_some(), "parent block should be stored"); + + assert!(shared + .blocking_insert_new_block(&chain, Arc::clone(&child)) + .expect("insert child block")); + + assert!(wait_for_expected_block_status( + &shared, + &child_hash, + BlockStatus::BLOCK_VALID + )); + assert!(wait_for_expected_block_status( + &shared, + &parent_hash, + BlockStatus::BLOCK_VALID + )); +} + #[test] fn test_switch_valid_fork() { let _log_guard: LoggerInitGuard = From 4979c4b6394988aaf93781f2336b945007225594 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 25 Jan 2024 12:46:24 +0800 Subject: [PATCH 314/357] ConsumeUnverified should call get_update_for_tip_hash after begin_transaction, keep coincident with develop branch --- chain/src/consume_orphan.rs | 3 --- chain/src/consume_unverified.rs | 3 +++ store/src/transaction.rs | 25 ------------------------- 3 files changed, 3 insertions(+), 28 deletions(-) diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index 82d5b87643..8b74b95db9 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -58,9 +58,6 @@ pub fn store_unverified_block(shared: &Shared, block: Arc) -> Result< let db_txn = Arc::new(shared.store().begin_transaction()); - let txn_snapshot = db_txn.get_snapshot(); - let _snapshot_block_ext = db_txn.get_update_for_block_ext(&block.hash(), &txn_snapshot); - db_txn.insert_block(block.as_ref())?; let next_block_epoch = shared diff --git a/chain/src/consume_unverified.rs b/chain/src/consume_unverified.rs index 8c1d4689ee..9eaa0cc33e 100644 --- a/chain/src/consume_unverified.rs +++ b/chain/src/consume_unverified.rs @@ -311,6 +311,9 @@ impl ConsumeUnverifiedBlockProcessor { let epoch = next_block_epoch.epoch(); let db_txn = Arc::new(self.shared.store().begin_transaction()); + let txn_snapshot = db_txn.get_snapshot(); + let _snapshot_tip_hash = db_txn.get_update_for_tip_hash(&txn_snapshot); + if new_best_block { info!( "[verify block] new best block found: {} => {:#x}, difficulty diff = {:#x}, unverified_tip: {}", diff --git a/store/src/transaction.rs b/store/src/transaction.rs index 48ef652a95..62ba110b0f 100644 --- a/store/src/transaction.rs +++ b/store/src/transaction.rs @@ -165,31 +165,6 @@ impl StoreTransaction { .map(|slice| packed::Byte32Reader::from_slice_should_be_ok(slice.as_ref()).to_entity()) } - /// TODO(doc): @eval-exec - pub fn get_update_for_block_ext( - &self, - hash: &packed::Byte32, - snapshot: &StoreTransactionSnapshot<'_>, - ) -> Option { - self.inner - .get_for_update(COLUMN_BLOCK_EXT, hash.as_slice(), &snapshot.inner) - .expect("db operation should be ok") - .map(|slice| { - let reader = - packed::BlockExtReader::from_compatible_slice_should_be_ok(slice.as_ref()); - match reader.count_extra_fields() { - 0 => reader.unpack(), - 2 => packed::BlockExtV1Reader::from_slice_should_be_ok(slice.as_ref()).unpack(), - _ => { - panic!( - "BlockExt storage field count doesn't match, expect 7 or 5, actual {}", - reader.field_count() - ) - } - } - }) - } - /// TODO(doc): @quake pub fn insert_tip_header(&self, h: &HeaderView) -> Result<(), Error> { self.insert_raw(COLUMN_META, META_TIP_HEADER_KEY, h.hash().as_slice()) From 71e3715df05a5df74782171c2e8bd2d88a661bbe Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 25 Jan 2024 12:49:24 +0800 Subject: [PATCH 315/357] Cargo fmt, apply check-whitespaces --- chain/src/consume_orphan.rs | 8 ++++++-- devtools/block_sync/draw_sync_chart.py | 10 +++++----- sync/src/tests/sync_shared.rs | 7 +++++-- 3 files changed, 16 insertions(+), 9 deletions(-) diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index 8b74b95db9..d7ac2e57e6 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -26,7 +26,10 @@ pub(crate) struct ConsumeDescendantProcessor { // Store the an unverified block to the database. We may usually do this // for an orphan block with unknown parent. But this function is also useful in testing. -pub fn store_unverified_block(shared: &Shared, block: Arc) -> Result<(HeaderView, U256), Error> { +pub fn store_unverified_block( + shared: &Shared, + block: Arc, +) -> Result<(HeaderView, U256), Error> { let (block_number, block_hash) = (block.number(), block.hash()); let parent_header = shared @@ -151,7 +154,8 @@ impl ConsumeDescendantProcessor { pub(crate) fn process_descendant(&self, lonely_block: LonelyBlockWithCallback) { match store_unverified_block(&self.shared, lonely_block.block().to_owned()) { Ok((_parent_header, total_difficulty)) => { - self.shared.insert_block_status(lonely_block.block().hash(), BlockStatus::BLOCK_STORED); + self.shared + .insert_block_status(lonely_block.block().hash(), BlockStatus::BLOCK_STORED); let lonely_block_hash: LonelyBlockHashWithCallback = lonely_block.into(); diff --git a/devtools/block_sync/draw_sync_chart.py b/devtools/block_sync/draw_sync_chart.py index b983bbc148..e9b164a440 100755 --- a/devtools/block_sync/draw_sync_chart.py +++ b/devtools/block_sync/draw_sync_chart.py @@ -29,8 +29,8 @@ def parse_sync_statics(log_file): timestamp_str = re.search(r'^(\S+ \S+)', line).group(1) # Extract the timestamp string timestamp = datetime.datetime.strptime(timestamp_str, "%Y-%m-%d %H:%M:%S.%f").timestamp() base_timestamp = timestamp - - + + if line.find('INFO ckb_chain::chain block: ') != -1: block_number = int(re.search(r'block: (\d+)', line).group(1)) # Extract the block number using regex @@ -77,7 +77,7 @@ def process_task(task): tasks = [(ckb_log_file, label) for ckb_log_file, label in tasks] - + import multiprocessing with multiprocessing.Pool() as pool: @@ -114,7 +114,7 @@ def process_task(task): ax.get_yaxis().get_major_formatter().set_scientific(False) ax.get_yaxis().get_major_formatter().set_useOffset(False) - + ax.margins(0) ax.set_axisbelow(True) @@ -124,7 +124,7 @@ def process_task(task): ax.xaxis.grid(color='gray', linestyle='dashed', which='minor') ax.yaxis.grid(color='gray', linestyle='dashed', which='minor') - + xminorLocator = MultipleLocator(1.0) ax.xaxis.set_major_locator(xminorLocator) diff --git a/sync/src/tests/sync_shared.rs b/sync/src/tests/sync_shared.rs index 8674c7cf28..a25060165e 100644 --- a/sync/src/tests/sync_shared.rs +++ b/sync/src/tests/sync_shared.rs @@ -191,11 +191,14 @@ fn test_insert_child_block_with_stored_but_unverified_parent() { store_unverified_block(shared.shared(), Arc::clone(&parent)).expect("store parent block"); - // Note that we will not find the block status obtained from + // Note that we will not find the block status obtained from // shared.active_chain().get_block_status(&parent_hash) to be BLOCK_STORED, // because `get_block_status` does not read the block status from the database, // it use snapshot to get the block status, and the snapshot is not updated. - assert!(shared.store().get_block_ext(&parent_hash).is_some(), "parent block should be stored"); + assert!( + shared.store().get_block_ext(&parent_hash).is_some(), + "parent block should be stored" + ); assert!(shared .blocking_insert_new_block(&chain, Arc::clone(&child)) From c166fce50def3e95367f54e9dd346baa5c9e9f54 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 25 Jan 2024 13:28:53 +0800 Subject: [PATCH 316/357] Change process duration from Gauge to Histogram, add metrics for ckb-sync proc timecost Signed-off-by: Eval EXEC --- util/metrics/src/lib.rs | 70 ++++++++++++++++++++++++----------------- 1 file changed, 42 insertions(+), 28 deletions(-) diff --git a/util/metrics/src/lib.rs b/util/metrics/src/lib.rs index f4544a7efd..72f25e3aaf 100644 --- a/util/metrics/src/lib.rs +++ b/util/metrics/src/lib.rs @@ -7,13 +7,14 @@ //! [`ckb-metrics-service`]: ../ckb_metrics_service/index.html use prometheus::{ - register_gauge, register_histogram, register_histogram_vec, register_int_counter, - register_int_gauge, register_int_gauge_vec, Gauge, Histogram, HistogramVec, IntCounter, - IntGauge, IntGaugeVec, + register_histogram, register_histogram_vec, register_int_counter, register_int_gauge, + register_int_gauge_vec, Histogram, HistogramVec, IntCounter, IntGauge, IntGaugeVec, }; use prometheus_static_metric::make_static_metric; use std::cell::Cell; +pub use prometheus::*; + pub fn gather() -> Vec { prometheus::gather() } @@ -54,16 +55,20 @@ pub struct Metrics { pub ckb_chain_tip: IntGauge, /// CKB chain unverified tip header number pub ckb_chain_unverified_tip: IntGauge, - /// ckb_chain asynchronous_process duration sum (seconds) - pub ckb_chain_async_process_block_duration_sum: Gauge, - /// ckb_chain consume_orphan thread's process_lonely_block duration sum (seconds) - pub ckb_chain_process_lonely_block_duration_sum: Gauge, - /// ckb_chain consume_unverified thread's consume_unverified_block duration sum (seconds) - pub ckb_chain_consume_unverified_block_duration_sum: Gauge, - /// ckb_chain consume_unverified thread's consume_unverified_block waiting for block duration sum (seconds) - pub ckb_chain_consume_unverified_block_waiting_block_duration_sum: Gauge, - /// ckb_chain execute_callback duration sum (seconds) - pub ckb_chain_execute_callback_duration_sum: Gauge, + /// ckb_chain asynchronous_process duration (seconds) + pub ckb_chain_async_process_block_duration: Histogram, + /// ckb_chain consume_orphan thread's process_lonely_block duration (seconds) + pub ckb_chain_process_lonely_block_duration: Histogram, + /// ckb_chain consume_unverified thread's consume_unverified_block duration (seconds) + pub ckb_chain_consume_unverified_block_duration: Histogram, + /// ckb_chain consume_unverified thread's consume_unverified_block waiting for block duration (seconds) + pub ckb_chain_consume_unverified_block_waiting_block_duration: Histogram, + /// ckb_chain execute_callback duration (seconds) + pub ckb_chain_execute_callback_duration: Histogram, + /// ckb_sync_msg_process duration (seconds) + pub ckb_sync_msg_process_duration: HistogramVec, + /// ckb_sync_block_fetch duraiton (seconds) + pub ckb_sync_block_fetch_duration: Histogram, /// Gauge for tracking the size of all frozen data pub ckb_freezer_size: IntGauge, /// Counter for measuring the effective amount of data read @@ -106,28 +111,37 @@ static METRICS: once_cell::sync::Lazy = once_cell::sync::Lazy::new(|| { "The CKB chain unverified tip header number" ) .unwrap(), - ckb_chain_async_process_block_duration_sum: register_gauge!( - "ckb_chain_async_process_block_duration_sum", - "The CKB chain asynchronous_process_block duration sum" + ckb_chain_async_process_block_duration: register_histogram!( + "ckb_chain_async_process_block_duration", + "The CKB chain asynchronous_process_block duration (seconds)" ) .unwrap(), - ckb_chain_process_lonely_block_duration_sum: register_gauge!( - "ckb_chain_process_lonely_block_duration_sum", - "The CKB chain consume_orphan thread's process_lonely_block duration sum" + ckb_chain_process_lonely_block_duration: register_histogram!( + "ckb_chain_process_lonely_block_duration", + "The CKB chain consume_orphan thread's process_lonely_block duration (seconds)" ) .unwrap(), - ckb_chain_consume_unverified_block_duration_sum: register_gauge!( - "ckb_chain_consume_unverified_block_duration_sum", - "The CKB chain consume_unverified thread's consume_unverified_block duration sum" + ckb_chain_consume_unverified_block_duration: register_histogram!( + "ckb_chain_consume_unverified_block_duration", + "The CKB chain consume_unverified thread's consume_unverified_block duration (seconds)" ) .unwrap(), - ckb_chain_consume_unverified_block_waiting_block_duration_sum: register_gauge!( - "ckb_chain_consume_unverified_block_waiting_block_duration_sum", - "The CKB chain consume_unverified thread's consume_unverified_block waiting for block duration sum" + ckb_chain_consume_unverified_block_waiting_block_duration: register_histogram!( + "ckb_chain_consume_unverified_block_waiting_block_duration", + "The CKB chain consume_unverified thread's consume_unverified_block waiting for block duration (seconds)" ).unwrap(), - ckb_chain_execute_callback_duration_sum: register_gauge!( - "ckb_chain_execute_callback_duration_sum", - "The CKB chain execute_callback duration sum" + ckb_chain_execute_callback_duration: register_histogram!( + "ckb_chain_execute_callback_duration", + "The CKB chain execute_callback duration (seconds)" + ).unwrap(), + ckb_sync_msg_process_duration: register_histogram_vec!( + "ckb_sync_msg_process_duration", + "The CKB sync message process duration (seconds)", + &["msg_type"], + ).unwrap(), + ckb_sync_block_fetch_duration: register_histogram!( + "ckb_sync_block_fetch_duration", + "The CKB sync block fetch duration (seconds)" ).unwrap(), ckb_freezer_size: register_int_gauge!("ckb_freezer_size", "The CKB freezer size").unwrap(), ckb_freezer_read: register_int_counter!("ckb_freezer_read", "The CKB freezer read").unwrap(), From 46dceed5c065c38231c94372cdd13eb7f412f9c7 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 25 Jan 2024 13:34:55 +0800 Subject: [PATCH 317/357] Collect ckb-chain and ckb-sync timecost Histogram metrics --- chain/src/chain_service.rs | 2 +- chain/src/consume_orphan.rs | 2 +- chain/src/consume_unverified.rs | 4 ++-- chain/src/lib.rs | 4 ++-- sync/src/synchronizer/block_fetcher.rs | 11 ++++++----- sync/src/synchronizer/mod.rs | 10 ++++++++++ 6 files changed, 22 insertions(+), 11 deletions(-) diff --git a/chain/src/chain_service.rs b/chain/src/chain_service.rs index a3ff0036d4..2ae833bb4e 100644 --- a/chain/src/chain_service.rs +++ b/chain/src/chain_service.rs @@ -152,7 +152,7 @@ impl ChainService { let _trace_now = minstant::Instant::now(); self.asynchronous_process_block(lonely_block); if let Some(handle) = ckb_metrics::handle(){ - handle.ckb_chain_async_process_block_duration_sum.add(_trace_now.elapsed().as_secs_f64()) + handle.ckb_chain_async_process_block_duration.observe(_trace_now.elapsed().as_secs_f64()) } let _ = responder.send(()); }, diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index d7ac2e57e6..c225899a3b 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -232,7 +232,7 @@ impl ConsumeOrphan { let _trace_now = minstant::Instant::now(); self.process_lonely_block(lonely_block); if let Some(handle) = ckb_metrics::handle() { - handle.ckb_chain_process_lonely_block_duration_sum.add(_trace_now.elapsed().as_secs_f64()) + handle.ckb_chain_process_lonely_block_duration.observe(_trace_now.elapsed().as_secs_f64()) } if lonely_block_epoch.number() > last_check_expired_orphans_epoch { diff --git a/chain/src/consume_unverified.rs b/chain/src/consume_unverified.rs index 9eaa0cc33e..8fefec2216 100644 --- a/chain/src/consume_unverified.rs +++ b/chain/src/consume_unverified.rs @@ -77,14 +77,14 @@ impl ConsumeUnverifiedBlocks { Ok(unverified_task) => { // process this unverified block if let Some(handle) = ckb_metrics::handle() { - handle.ckb_chain_consume_unverified_block_waiting_block_duration_sum.add(_trace_begin_loop.elapsed().as_secs_f64()) + handle.ckb_chain_consume_unverified_block_waiting_block_duration.observe(_trace_begin_loop.elapsed().as_secs_f64()) } let _ = self.tx_pool_controller.suspend_chunk_process(); let _trace_now = minstant::Instant::now(); self.processor.consume_unverified_blocks(unverified_task); if let Some(handle) = ckb_metrics::handle() { - handle.ckb_chain_consume_unverified_block_duration_sum.add(_trace_now.elapsed().as_secs_f64()) + handle.ckb_chain_consume_unverified_block_duration.observe(_trace_now.elapsed().as_secs_f64()) } let _ = self.tx_pool_controller.continue_chunk_process(); diff --git a/chain/src/lib.rs b/chain/src/lib.rs index b3d15af220..6c39c196a0 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -129,8 +129,8 @@ impl LonelyBlockWithCallback { if let Some(handle) = ckb_metrics::handle() { handle - .ckb_chain_execute_callback_duration_sum - .add(_trace_now.elapsed().as_secs_f64()) + .ckb_chain_execute_callback_duration + .observe(_trace_now.elapsed().as_secs_f64()) } } } diff --git a/sync/src/synchronizer/block_fetcher.rs b/sync/src/synchronizer/block_fetcher.rs index c91a005c4d..c74eccb044 100644 --- a/sync/src/synchronizer/block_fetcher.rs +++ b/sync/src/synchronizer/block_fetcher.rs @@ -4,6 +4,7 @@ use ckb_constant::sync::{ BLOCK_DOWNLOAD_WINDOW, CHECK_POINT_WINDOW, INIT_BLOCKS_IN_TRANSIT_PER_PEER, }; use ckb_logger::{debug, trace}; +use ckb_metrics::HistogramTimer; use ckb_network::PeerIndex; use ckb_shared::block_status::BlockStatus; use ckb_shared::types::{BlockNumberAndHash, HeaderIndex, HeaderIndexView}; @@ -90,7 +91,9 @@ impl BlockFetcher { } pub fn fetch(self) -> Option>> { - let trace_timecost_now = std::time::Instant::now(); + let _trace_timecost: Option = { + ckb_metrics::handle().map(|handle| handle.ckb_sync_block_fetch_duration.start_timer()) + }; if self.reached_inflight_limit() { trace!( @@ -269,14 +272,13 @@ impl BlockFetcher { if fetch.is_empty() { debug!( "[block fetch empty] peer-{}, fixed_last_common_header = {} \ - best_known_header = {}, [tip/unverified_tip]: [{}/{}], inflight_len = {}, time_cost: {:?}", + best_known_header = {}, [tip/unverified_tip]: [{}/{}], inflight_len = {}", self.peer, last_common.number(), best_known.number(), tip, unverified_tip, state.read_inflight_blocks().total_inflight_count(), - trace_timecost_now.elapsed(), ); trace!( "[block fetch empty] peer-{}, inflight_state = {:?}", @@ -289,7 +291,7 @@ impl BlockFetcher { let inflight_peer_count = state.read_inflight_blocks().peer_inflight_count(self.peer); let inflight_total_count = state.read_inflight_blocks().total_inflight_count(); debug!( - "request peer-{} for batch blocks: [{}-{}], batch len:{}, [tip/unverified_tip]: [{}/{}], [peer/total inflight count]: [{} / {}], timecost: {:?}, blocks: {}", + "request peer-{} for batch blocks: [{}-{}], batch len:{}, [tip/unverified_tip]: [{}/{}], [peer/total inflight count]: [{} / {}], blocks: {}", self.peer, fetch_head, fetch_last, @@ -298,7 +300,6 @@ impl BlockFetcher { self.sync_shared.shared().get_unverified_tip().number(), inflight_peer_count, inflight_total_count, - trace_timecost_now.elapsed(), fetch.iter().map(|h| h.number().to_string()).collect::>().join(","), ); } diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index d3ff3a9a4e..5afa2e1368 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -33,6 +33,7 @@ use ckb_constant::sync::{ INIT_BLOCKS_IN_TRANSIT_PER_PEER, MAX_TIP_AGE, }; use ckb_logger::{debug, error, info, trace, warn}; +use ckb_metrics::HistogramTimer; use ckb_network::{ async_trait, bytes::Bytes, tokio, CKBProtocolContext, CKBProtocolHandler, PeerIndex, ServiceControl, SupportProtocols, @@ -265,6 +266,15 @@ impl Synchronizer { peer: PeerIndex, message: packed::SyncMessageUnionReader<'_>, ) -> Status { + let _trace_timecost: Option = { + ckb_metrics::handle().map(|handle| { + handle + .ckb_sync_msg_process_duration + .with_label_values(&[message.item_name()]) + .start_timer() + }) + }; + match message { packed::SyncMessageUnionReader::GetHeaders(reader) => { GetHeadersProcess::new(reader, self, peer, nc).execute() From 8b8157902093d595669ca553f7b3f8931e1d65c0 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 25 Jan 2024 17:12:10 +0800 Subject: [PATCH 318/357] Add orphan blocks count metric --- chain/src/consume_orphan.rs | 8 +++++++- util/metrics/src/lib.rs | 6 ++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index c225899a3b..1d9a83e125 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -310,6 +310,12 @@ impl ConsumeOrphan { } else { self.orphan_blocks_broker.insert(lonely_block); } - self.search_orphan_pool() + self.search_orphan_pool(); + + ckb_metrics::handle().map(|handle| { + handle + .ckb_chain_orphan_count + .set(self.orphan_blocks_broker.len() as i64) + }); } } diff --git a/util/metrics/src/lib.rs b/util/metrics/src/lib.rs index 72f25e3aaf..0be5693187 100644 --- a/util/metrics/src/lib.rs +++ b/util/metrics/src/lib.rs @@ -65,6 +65,8 @@ pub struct Metrics { pub ckb_chain_consume_unverified_block_waiting_block_duration: Histogram, /// ckb_chain execute_callback duration (seconds) pub ckb_chain_execute_callback_duration: Histogram, + /// ckb_chain orphan blocks count + pub ckb_chain_orphan_count: IntGauge, /// ckb_sync_msg_process duration (seconds) pub ckb_sync_msg_process_duration: HistogramVec, /// ckb_sync_block_fetch duraiton (seconds) @@ -134,6 +136,10 @@ static METRICS: once_cell::sync::Lazy = once_cell::sync::Lazy::new(|| { "ckb_chain_execute_callback_duration", "The CKB chain execute_callback duration (seconds)" ).unwrap(), + ckb_chain_orphan_count: register_int_gauge!( + "ckb_chain_orphan_count", + "The CKB chain orphan blocks count", + ).unwrap(), ckb_sync_msg_process_duration: register_histogram_vec!( "ckb_sync_msg_process_duration", "The CKB sync message process duration (seconds)", From 2436cb52eced0465d577f08b1179ac70604730d3 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Fri, 26 Jan 2024 13:14:47 +0800 Subject: [PATCH 319/357] Add header_map limit_memory and operation duration metric --- util/metrics/src/lib.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/util/metrics/src/lib.rs b/util/metrics/src/lib.rs index 0be5693187..f7243753a0 100644 --- a/util/metrics/src/lib.rs +++ b/util/metrics/src/lib.rs @@ -71,6 +71,10 @@ pub struct Metrics { pub ckb_sync_msg_process_duration: HistogramVec, /// ckb_sync_block_fetch duraiton (seconds) pub ckb_sync_block_fetch_duration: Histogram, + // ckb_header_map_limit_memory duration (seconds) + pub ckb_header_map_limit_memory_duration: Histogram, + // ckb_header_map_limit_memory operation duration (seconds) + pub ckb_header_map_ops_duration: HistogramVec, /// Gauge for tracking the size of all frozen data pub ckb_freezer_size: IntGauge, /// Counter for measuring the effective amount of data read @@ -149,6 +153,15 @@ static METRICS: once_cell::sync::Lazy = once_cell::sync::Lazy::new(|| { "ckb_sync_block_fetch_duration", "The CKB sync block fetch duration (seconds)" ).unwrap(), + ckb_header_map_limit_memory_duration: register_histogram!( + "ckb_header_map_limit_memory_duration", + "The CKB header map limit_memory job duration (seconds)" + ).unwrap(), + ckb_header_map_ops_duration: register_histogram_vec!( + "ckb_header_map_ops_duration", + "The CKB header map operation duration (seconds)", + &["operation"], + ).unwrap(), ckb_freezer_size: register_int_gauge!("ckb_freezer_size", "The CKB freezer size").unwrap(), ckb_freezer_read: register_int_counter!("ckb_freezer_read", "The CKB freezer read").unwrap(), ckb_relay_transaction_short_id_collide: register_int_counter!( From bf1bf2215781ee83a3b95a74ac8eedec0586eb35 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Fri, 26 Jan 2024 13:15:15 +0800 Subject: [PATCH 320/357] Collect header_map limit_memory and operations timecost --- Cargo.lock | 1 + shared/Cargo.toml | 1 + shared/src/types/header_map/kernel_lru.rs | 4 +++ shared/src/types/header_map/mod.rs | 32 ++++++++++++++++++++--- 4 files changed, 35 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7b3b6e6d67..91a8d28225 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1450,6 +1450,7 @@ dependencies = [ "ckb-db-schema", "ckb-error", "ckb-logger", + "ckb-metrics", "ckb-migrate", "ckb-network", "ckb-notify", diff --git a/shared/Cargo.toml b/shared/Cargo.toml index 50b6eb1680..c81e3051a8 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -32,6 +32,7 @@ ckb-migrate = { path = "../util/migrate", version = "= 0.114.0-pre" } once_cell = "1.8.0" ckb-network = { path = "../network", version = "= 0.114.0-pre" } ckb-util = { path = "../util", version = "= 0.114.0-pre" } +ckb-metrics = { path = "../util/metrics", version = "= 0.114.0-pre" } bitflags = "1.0" tokio = { version = "1", features = ["sync"] } tempfile.workspace = true diff --git a/shared/src/types/header_map/kernel_lru.rs b/shared/src/types/header_map/kernel_lru.rs index d3e463c65f..ae00494bd8 100644 --- a/shared/src/types/header_map/kernel_lru.rs +++ b/shared/src/types/header_map/kernel_lru.rs @@ -4,6 +4,7 @@ use std::sync::Arc; #[cfg(feature = "stats")] use ckb_logger::info; +use ckb_metrics::HistogramTimer; #[cfg(feature = "stats")] use ckb_util::{Mutex, MutexGuard}; @@ -150,6 +151,9 @@ where } pub(crate) fn limit_memory(&self) { + let _trace_timer: Option = ckb_metrics::handle() + .map(|handle| handle.ckb_header_map_limit_memory_duration.start_timer()); + if let Some(values) = self.memory.front_n(self.memory_limit) { tokio::task::block_in_place(|| { self.backend.insert_batch(&values); diff --git a/shared/src/types/header_map/mod.rs b/shared/src/types/header_map/mod.rs index 731e898a6e..e7536e5cf2 100644 --- a/shared/src/types/header_map/mod.rs +++ b/shared/src/types/header_map/mod.rs @@ -1,5 +1,5 @@ use ckb_async_runtime::Handle; -use ckb_logger::{debug, info}; +use ckb_logger::info; use ckb_stop_handler::{new_tokio_exit_rx, CancellationToken}; use ckb_types::packed::Byte32; use std::sync::atomic::AtomicBool; @@ -7,6 +7,7 @@ use std::sync::Arc; use std::time::Duration; use std::{mem::size_of, path}; +use ckb_metrics::HistogramTimer; use tokio::time::MissedTickBehavior; mod backend; @@ -59,9 +60,7 @@ impl HeaderMap { loop { tokio::select! { _ = interval.tick() => { - let now = std::time::Instant::now(); map.limit_memory(); - debug!("HeaderMap limit_memory cost: {:?}", now.elapsed()); } _ = stop_rx.cancelled() => { info!("HeaderMap limit_memory received exit signal, exit now"); @@ -75,18 +74,45 @@ impl HeaderMap { } pub fn contains_key(&self, hash: &Byte32) -> bool { + let _trace_timer: Option = ckb_metrics::handle().map(|metric| { + metric + .ckb_header_map_ops_duration + .with_label_values(&["contains_key"]) + .start_timer() + }); + self.inner.contains_key(hash) } pub fn get(&self, hash: &Byte32) -> Option { + let _trace_timer: Option = ckb_metrics::handle().map(|metric| { + metric + .ckb_header_map_ops_duration + .with_label_values(&["get"]) + .start_timer() + }); self.inner.get(hash) } pub fn insert(&self, view: HeaderIndexView) -> Option<()> { + let _trace_timer: Option = ckb_metrics::handle().map(|metric| { + metric + .ckb_header_map_ops_duration + .with_label_values(&["insert"]) + .start_timer() + }); + self.inner.insert(view) } pub fn remove(&self, hash: &Byte32) { + let _trace_timer: Option = ckb_metrics::handle().map(|metric| { + metric + .ckb_header_map_ops_duration + .with_label_values(&["remove"]) + .start_timer() + }); + self.inner.remove(hash) } } From c0d15391eacc54b93bfe2028b75004cea50b2364 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 29 Jan 2024 11:47:52 +0800 Subject: [PATCH 321/357] ConsumeOrphan should not insert a block to orphan pool if its parent is invalid --- chain/src/consume_orphan.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index 1d9a83e125..63c002e8fd 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -307,6 +307,14 @@ impl ConsumeOrphan { lonely_block.block().hash() ); self.descendant_processor.process_descendant(lonely_block); + } else if parent_status.eq(&BlockStatus::BLOCK_INVALID) { + // ignore this block, because parent block is invalid + info!( + "parent: {} is INVALID, ignore this block {}-{}", + parent_hash, + lonely_block.block().number(), + lonely_block.block().hash() + ); } else { self.orphan_blocks_broker.insert(lonely_block); } From 38116d2d7c6b3e5e50baf573f340a12c4169aa43 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 29 Jan 2024 17:46:06 +0800 Subject: [PATCH 322/357] Remove `ChainController`'s useless methods, fix method comments --- chain/src/chain_controller.rs | 52 ++++------------------------------- chain/src/chain_service.rs | 9 +++--- 2 files changed, 9 insertions(+), 52 deletions(-) diff --git a/chain/src/chain_controller.rs b/chain/src/chain_controller.rs index 3b410601c4..be2a688d38 100644 --- a/chain/src/chain_controller.rs +++ b/chain/src/chain_controller.rs @@ -3,8 +3,7 @@ use crate::utils::orphan_block_pool::OrphanBlockPool; use crate::{ - LonelyBlock, LonelyBlockWithCallback, ProcessBlockRequest, TruncateRequest, VerifyCallback, - VerifyResult, + LonelyBlock, LonelyBlockWithCallback, ProcessBlockRequest, TruncateRequest, VerifyResult, }; use ckb_channel::Sender; use ckb_error::{Error, InternalErrorKind}; @@ -43,50 +42,6 @@ impl ChainController { } } - pub fn asynchronous_process_block_with_switch(&self, block: Arc, switch: Switch) { - self.asynchronous_process_lonely_block(LonelyBlock { - block, - peer_id_with_msg_bytes: None, - switch: Some(switch), - }) - } - - pub fn asynchronous_process_block(&self, block: Arc) { - self.asynchronous_process_lonely_block_with_callback( - LonelyBlock { - block, - peer_id_with_msg_bytes: None, - switch: None, - } - .without_callback(), - ) - } - - pub fn asynchronous_process_block_with_callback( - &self, - block: Arc, - verify_callback: VerifyCallback, - ) { - self.asynchronous_process_lonely_block_with_callback( - LonelyBlock { - block, - peer_id_with_msg_bytes: None, - switch: None, - } - .with_callback(Some(verify_callback)), - ) - } - - pub fn asynchronous_process_lonely_block(&self, lonely_block: LonelyBlock) { - let lonely_block_without_callback: LonelyBlockWithCallback = - lonely_block.without_callback(); - - self.asynchronous_process_lonely_block_with_callback(lonely_block_without_callback); - } - - /// Internal method insert block for test - /// - /// switch bit flags for particular verify, make easier to generating test data pub fn asynchronous_process_lonely_block_with_callback( &self, lonely_block_with_callback: LonelyBlockWithCallback, @@ -96,6 +51,7 @@ impl ChainController { } } + /// MinerRpc::submit_block and `ckb import` need this blocking way to process block pub fn blocking_process_block(&self, block: Arc) -> VerifyResult { self.blocking_process_lonely_block(LonelyBlock { block, @@ -104,6 +60,7 @@ impl ChainController { }) } + /// `IntegrationTestRpcImpl::process_block_without_verify` need this pub fn blocking_process_block_with_switch( &self, block: Arc, @@ -151,11 +108,12 @@ impl ChainController { }) } - // Relay need this + /// `Relayer::reconstruct_block` need this pub fn get_orphan_block(&self, hash: &Byte32) -> Option> { self.orphan_block_broker.get_block(hash) } + /// `NetRpcImpl::sync_state` rpc need this pub fn orphan_blocks_len(&self) -> usize { self.orphan_block_broker.len() } diff --git a/chain/src/chain_service.rs b/chain/src/chain_service.rs index 2ae833bb4e..97ccf1d808 100644 --- a/chain/src/chain_service.rs +++ b/chain/src/chain_service.rs @@ -111,9 +111,7 @@ pub fn start_chain_services(builder: ChainServicesBuilder) -> ChainController { ChainController::new(process_block_tx, truncate_block_tx, orphan_blocks_broker) } -/// Chain background service -/// -/// The ChainService provides a single-threaded background executor. +/// Chain background service to receive LonelyBlock and only do `non_contextual_verify` #[derive(Clone)] pub(crate) struct ChainService { shared: Shared, @@ -124,7 +122,7 @@ pub(crate) struct ChainService { verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, } impl ChainService { - /// Create a new ChainService instance with shared and initial proposal_table. + /// Create a new ChainService instance with shared. pub(crate) fn new( shared: Shared, process_block_rx: Receiver, @@ -140,6 +138,7 @@ impl ChainService { } } + /// Receive block from `process_block_rx` and do `non_contextual_verify` pub(crate) fn start_process_block(self) { let signal_receiver = new_crossbeam_exit_rx(); @@ -188,7 +187,7 @@ impl ChainService { .map(|_| ()) } - // make block IO and verify asynchronize + // `self.non_contextual_verify` is very fast. fn asynchronous_process_block(&self, lonely_block: LonelyBlockWithCallback) { let block_number = lonely_block.block().number(); let block_hash = lonely_block.block().hash(); From e94b143d381a826be87564dfb72c477cdacb1f3f Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 29 Jan 2024 18:17:10 +0800 Subject: [PATCH 323/357] Add HeaderMap memory count and cache hit/miss count metrics --- util/metrics/src/lib.rs | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/util/metrics/src/lib.rs b/util/metrics/src/lib.rs index f7243753a0..78c544fcb5 100644 --- a/util/metrics/src/lib.rs +++ b/util/metrics/src/lib.rs @@ -48,6 +48,13 @@ make_static_metric! { proposed, }, } + + struct CkbHeaderMapMemoryHitMissStatistics: IntCounter{ + "type" => { + hit, + miss, + }, + } } pub struct Metrics { @@ -75,6 +82,10 @@ pub struct Metrics { pub ckb_header_map_limit_memory_duration: Histogram, // ckb_header_map_limit_memory operation duration (seconds) pub ckb_header_map_ops_duration: HistogramVec, + // how many headers in the HeaderMap's memory map? + pub ckb_header_map_memory_count: IntGauge, + // how many times the HeaderMap's memory map is hit? + pub ckb_header_map_memory_hit_miss_count: CkbHeaderMapMemoryHitMissStatistics, /// Gauge for tracking the size of all frozen data pub ckb_freezer_size: IntGauge, /// Counter for measuring the effective amount of data read @@ -162,6 +173,18 @@ static METRICS: once_cell::sync::Lazy = once_cell::sync::Lazy::new(|| { "The CKB header map operation duration (seconds)", &["operation"], ).unwrap(), + ckb_header_map_memory_count: register_int_gauge!( + "ckb_header_map_memory_count", + "The CKB HeaderMap memory count", + ).unwrap(), + ckb_header_map_memory_hit_miss_count: CkbHeaderMapMemoryHitMissStatistics::from( + ®ister_int_counter_vec!( + "ckb_header_map_memory_hit_miss_count", + "The CKB HeaderMap memory hit count", + &["type"] + ) + .unwrap() + ), ckb_freezer_size: register_int_gauge!("ckb_freezer_size", "The CKB freezer size").unwrap(), ckb_freezer_read: register_int_counter!("ckb_freezer_read", "The CKB freezer read").unwrap(), ckb_relay_transaction_short_id_collide: register_int_counter!( From 6560feb96bfef6f11535d194d369220ca4785221 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 29 Jan 2024 18:17:47 +0800 Subject: [PATCH 324/357] Collect HeaderMap MemoryMap cache hit/miss count and total count --- shared/src/types/header_map/kernel_lru.rs | 6 ++++++ shared/src/types/header_map/memory.rs | 9 +++++++++ 2 files changed, 15 insertions(+) diff --git a/shared/src/types/header_map/kernel_lru.rs b/shared/src/types/header_map/kernel_lru.rs index ae00494bd8..bec90314a6 100644 --- a/shared/src/types/header_map/kernel_lru.rs +++ b/shared/src/types/header_map/kernel_lru.rs @@ -105,8 +105,14 @@ where self.stats().tick_primary_select(); } if let Some(view) = self.memory.get_refresh(hash) { + ckb_metrics::handle() + .map(|metrics| metrics.ckb_header_map_memory_hit_miss_count.hit.inc()); return Some(view); } + + ckb_metrics::handle() + .map(|metrics| metrics.ckb_header_map_memory_hit_miss_count.miss.inc()); + if self.backend.is_empty() { return None; } diff --git a/shared/src/types/header_map/memory.rs b/shared/src/types/header_map/memory.rs index e7664c1f8f..3def8951d3 100644 --- a/shared/src/types/header_map/memory.rs +++ b/shared/src/types/header_map/memory.rs @@ -93,12 +93,16 @@ impl MemoryMap { } pub(crate) fn insert(&self, header: HeaderIndexView) -> Option<()> { + ckb_metrics::handle().map(|metrics| metrics.ckb_header_map_memory_count.inc()); + let mut guard = self.0.write(); let (key, value) = header.into(); guard.insert(key, value).map(|_| ()) } pub(crate) fn remove(&self, key: &Byte32, shrink_to_fit: bool) -> Option { + ckb_metrics::handle().map(|metrics| metrics.ckb_header_map_memory_count.dec()); + let mut guard = self.0.write(); let ret = guard.remove(key); @@ -127,9 +131,14 @@ impl MemoryMap { pub(crate) fn remove_batch(&self, keys: impl Iterator, shrink_to_fit: bool) { let mut guard = self.0.write(); + let mut keys_count = 0; for key in keys { guard.remove(&key); + keys_count += 1; } + + ckb_metrics::handle().map(|metrics| metrics.ckb_header_map_memory_count.sub(keys_count)); + if shrink_to_fit { shrink_to_fit!(guard, SHRINK_THRESHOLD); } From 02ff2e8ef1f24cbca48167fc35baad4ae74ce10e Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 29 Jan 2024 21:42:16 +0800 Subject: [PATCH 325/357] ConsumeUnverified do not realy need pass whole UnverifiedBlock to fn verify_block --- chain/src/consume_unverified.rs | 33 ++++++++++++++------------------- 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/chain/src/consume_unverified.rs b/chain/src/consume_unverified.rs index 8fefec2216..869f1ae136 100644 --- a/chain/src/consume_unverified.rs +++ b/chain/src/consume_unverified.rs @@ -150,7 +150,11 @@ impl ConsumeUnverifiedBlockProcessor { ) { let unverified_block = self.load_full_unverified_block(lonely_block_hash); // process this unverified block - let verify_result = self.verify_block(&unverified_block); + let verify_result = self.verify_block( + unverified_block.block(), + &unverified_block.parent_header, + unverified_block.unverified_block.switch(), + ); match &verify_result { Ok(_) => { let log_now = std::time::Instant::now(); @@ -215,21 +219,12 @@ impl ConsumeUnverifiedBlockProcessor { unverified_block.execute_callback(verify_result); } - fn verify_block(&mut self, unverified_block: &UnverifiedBlock) -> VerifyResult { - let UnverifiedBlock { - unverified_block: - LonelyBlockWithCallback { - lonely_block: - LonelyBlock { - block, - peer_id_with_msg_bytes: _peer_id_with_msg_bytes, - switch, - }, - verify_callback: _verify_callback, - }, - parent_header, - } = unverified_block; - + fn verify_block( + &mut self, + block: &BlockView, + parent_header: &HeaderView, + switch: Option, + ) -> VerifyResult { let switch: Switch = switch.unwrap_or_else(|| { let mut assume_valid_target = self.shared.assume_valid_target(); match *assume_valid_target { @@ -322,7 +317,7 @@ impl ConsumeUnverifiedBlockProcessor { &cannon_total_difficulty - ¤t_total_difficulty, self.shared.get_unverified_tip().number(), ); - self.find_fork(&mut fork, current_tip_header.number(), block, ext); + self.find_fork(&mut fork, current_tip_header.number(), &block, ext); self.rollback(&fork, &db_txn)?; // update and verify chain root @@ -378,7 +373,7 @@ impl ConsumeUnverifiedBlockProcessor { } } - let block_ref: &BlockView = block; + let block_ref: &BlockView = █ self.shared .notify_controller() .notify_new_block(block_ref.clone()); @@ -401,7 +396,7 @@ impl ConsumeUnverifiedBlockProcessor { let tx_pool_controller = self.shared.tx_pool_controller(); if tx_pool_controller.service_started() { - let block_ref: &BlockView = block; + let block_ref: &BlockView = █ if let Err(e) = tx_pool_controller.notify_new_uncle(block_ref.as_uncle()) { error!("[verify block] notify new_uncle error {}", e); } From 54aa62c89080f06c1a44d11539f8037bcaa12492 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 29 Jan 2024 22:50:42 +0800 Subject: [PATCH 326/357] Remove msg_bytes from LonelyBlock --- chain/src/chain_controller.rs | 4 ++-- chain/src/chain_service.rs | 4 ++-- chain/src/consume_orphan.rs | 2 +- chain/src/consume_unverified.rs | 6 +++--- chain/src/lib.rs | 26 +++++++++++++------------- chain/src/tests/find_fork.rs | 4 ++-- chain/src/tests/orphan_block_pool.rs | 4 ++-- 7 files changed, 25 insertions(+), 25 deletions(-) diff --git a/chain/src/chain_controller.rs b/chain/src/chain_controller.rs index be2a688d38..a6a71dbeca 100644 --- a/chain/src/chain_controller.rs +++ b/chain/src/chain_controller.rs @@ -55,7 +55,7 @@ impl ChainController { pub fn blocking_process_block(&self, block: Arc) -> VerifyResult { self.blocking_process_lonely_block(LonelyBlock { block, - peer_id_with_msg_bytes: None, + peer_id: None, switch: None, }) } @@ -68,7 +68,7 @@ impl ChainController { ) -> VerifyResult { self.blocking_process_lonely_block(LonelyBlock { block, - peer_id_with_msg_bytes: None, + peer_id: None, switch: Some(switch), }) } diff --git a/chain/src/chain_service.rs b/chain/src/chain_service.rs index 97ccf1d808..4838b2d873 100644 --- a/chain/src/chain_service.rs +++ b/chain/src/chain_service.rs @@ -207,7 +207,7 @@ impl ChainService { .into(); tell_synchronizer_to_punish_the_bad_peer( self.verify_failed_blocks_tx.clone(), - lonely_block.peer_id_with_msg_bytes(), + lonely_block.peer_id(), lonely_block.block().hash(), &error, ); @@ -232,7 +232,7 @@ impl ChainService { .insert_block_status(lonely_block.block().hash(), BlockStatus::BLOCK_INVALID); tell_synchronizer_to_punish_the_bad_peer( self.verify_failed_blocks_tx.clone(), - lonely_block.peer_id_with_msg_bytes(), + lonely_block.peer_id(), lonely_block.block().hash(), &err, ); diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index 63c002e8fd..6c1694a9f3 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -165,7 +165,7 @@ impl ConsumeDescendantProcessor { Err(err) => { tell_synchronizer_to_punish_the_bad_peer( self.verify_failed_blocks_tx.clone(), - lonely_block.peer_id_with_msg_bytes(), + lonely_block.peer_id(), lonely_block.block().hash(), &err, ); diff --git a/chain/src/consume_unverified.rs b/chain/src/consume_unverified.rs index 869f1ae136..db612abdb1 100644 --- a/chain/src/consume_unverified.rs +++ b/chain/src/consume_unverified.rs @@ -135,7 +135,7 @@ impl ConsumeUnverifiedBlockProcessor { unverified_block: LonelyBlockWithCallback { lonely_block: LonelyBlock { block: Arc::new(block_view), - peer_id_with_msg_bytes: lonely_block.lonely_block.peer_id_with_msg_bytes, + peer_id: lonely_block.lonely_block.peer_id, switch: lonely_block.lonely_block.switch, }, verify_callback: lonely_block.verify_callback, @@ -173,7 +173,7 @@ impl ConsumeUnverifiedBlockProcessor { Err(err) => { error!( "verify [{:?}]'s block {} failed: {}", - unverified_block.peer_id_with_msg_bytes(), + unverified_block.peer_id(), unverified_block.block().hash(), err ); @@ -209,7 +209,7 @@ impl ConsumeUnverifiedBlockProcessor { tell_synchronizer_to_punish_the_bad_peer( self.verify_failed_blocks_tx.clone(), - unverified_block.peer_id_with_msg_bytes(), + unverified_block.peer_id(), unverified_block.block().hash(), err, ); diff --git a/chain/src/lib.rs b/chain/src/lib.rs index 6c39c196a0..81023b9e3d 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -14,6 +14,7 @@ use ckb_types::core::{BlockNumber, BlockView, HeaderView}; use ckb_types::packed::Byte32; use ckb_verification_traits::Switch; use std::sync::Arc; + mod chain_controller; mod chain_service; mod consume_orphan; @@ -45,8 +46,8 @@ pub struct LonelyBlock { /// block pub block: Arc, - /// This block is received from which peer, and the message bytes size - pub peer_id_with_msg_bytes: Option<(PeerIndex, u64)>, + /// This block is received from which peer + pub peer_id: Option, /// The Switch to control the verification process pub switch: Option, @@ -73,8 +74,8 @@ pub struct LonelyBlockHash { /// block pub block_number_and_hash: BlockNumberAndHash, - /// This block is received from which peer, and the message bytes size - pub peer_id_with_msg_bytes: Option<(PeerIndex, u64)>, + /// This block is received from which peer + pub peer_id: Option, /// The Switch to control the verification process pub switch: Option, @@ -104,7 +105,7 @@ impl From for LonelyBlockHashWithCallback { number: val.lonely_block.block.number(), hash: val.lonely_block.block.hash(), }, - peer_id_with_msg_bytes: val.lonely_block.peer_id_with_msg_bytes, + peer_id: val.lonely_block.peer_id, switch: val.lonely_block.switch, }, verify_callback: val.verify_callback, @@ -141,8 +142,8 @@ impl LonelyBlockWithCallback { } /// get peer_id and msg_bytes - pub fn peer_id_with_msg_bytes(&self) -> Option<(PeerIndex, u64)> { - self.lonely_block.peer_id_with_msg_bytes + pub fn peer_id(&self) -> Option { + self.lonely_block.peer_id } /// get switch param @@ -161,8 +162,8 @@ impl UnverifiedBlock { self.unverified_block.block() } - pub fn peer_id_with_msg_bytes(&self) -> Option<(PeerIndex, u64)> { - self.unverified_block.peer_id_with_msg_bytes() + pub fn peer_id(&self) -> Option { + self.unverified_block.peer_id() } pub fn execute_callback(self, verify_result: VerifyResult) { @@ -193,17 +194,16 @@ impl GlobalIndex { pub(crate) fn tell_synchronizer_to_punish_the_bad_peer( verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, - peer_id_with_msg_bytes: Option<(PeerIndex, u64)>, + peer_id: Option, block_hash: Byte32, err: &Error, ) { let is_internal_db_error = is_internal_db_error(err); - match peer_id_with_msg_bytes { - Some((peer_id, msg_bytes)) => { + match peer_id { + Some(peer_id) => { let verify_failed_block_info = VerifyFailedBlockInfo { block_hash, peer_id, - msg_bytes, reason: err.to_string(), is_internal_db_error, }; diff --git a/chain/src/tests/find_fork.rs b/chain/src/tests/find_fork.rs index dbbaabddb0..cf2538a6a2 100644 --- a/chain/src/tests/find_fork.rs +++ b/chain/src/tests/find_fork.rs @@ -30,13 +30,13 @@ fn process_block( switch: Switch, ) { let lonely_block_hash = LonelyBlockHash { - peer_id_with_msg_bytes: None, + peer_id: None, switch: Some(switch), block_number_and_hash: BlockNumberAndHash::new(blk.number(), blk.hash()), }; let lonely_block = LonelyBlock { - peer_id_with_msg_bytes: None, + peer_id: None, switch: Some(switch), block: Arc::new(blk.to_owned()), }; diff --git a/chain/src/tests/orphan_block_pool.rs b/chain/src/tests/orphan_block_pool.rs index a0f25f1a90..2974852483 100644 --- a/chain/src/tests/orphan_block_pool.rs +++ b/chain/src/tests/orphan_block_pool.rs @@ -21,7 +21,7 @@ fn gen_lonely_block(parent_header: &HeaderView) -> LonelyBlock { .build(); LonelyBlock { block: Arc::new(block), - peer_id_with_msg_bytes: None, + peer_id: None, switch: None, } } @@ -163,7 +163,7 @@ fn test_remove_expired_blocks() { let lonely_block_with_callback = LonelyBlockWithCallback { lonely_block: LonelyBlock { block: Arc::new(new_block), - peer_id_with_msg_bytes: None, + peer_id: None, switch: None, }, verify_callback: None, From c133520af98c197ee1cb84cb450e753c444a1427 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 29 Jan 2024 22:51:03 +0800 Subject: [PATCH 327/357] Remove msg_bytes from VerifyFailedBlockInfo --- shared/src/types/mod.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/shared/src/types/mod.rs b/shared/src/types/mod.rs index 45e6125b06..ca848229ed 100644 --- a/shared/src/types/mod.rs +++ b/shared/src/types/mod.rs @@ -311,7 +311,6 @@ pub const SHRINK_THRESHOLD: usize = 300; pub struct VerifyFailedBlockInfo { pub block_hash: Byte32, pub peer_id: PeerIndex, - pub msg_bytes: u64, pub reason: String, pub is_internal_db_error: bool, } From d645bb6908b45ef5a7d6fe41f90a43a6abdf75ba Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 29 Jan 2024 22:52:01 +0800 Subject: [PATCH 328/357] ckb-sync do not need pass msg_bytes to ckb-chain --- sync/src/relayer/mod.rs | 2 +- sync/src/synchronizer/block_process.rs | 15 +++++------- sync/src/synchronizer/mod.rs | 33 ++++++++++---------------- sync/src/types/mod.rs | 13 +++++----- 4 files changed, 25 insertions(+), 38 deletions(-) diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index b3facdcef7..981a59c5b4 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -348,7 +348,7 @@ impl Relayer { self.shared().insert_new_block_with_callback( &self.chain, Arc::clone(&block), - (peer, 0), + peer, Box::new(verify_success_callback), ); } diff --git a/sync/src/synchronizer/block_process.rs b/sync/src/synchronizer/block_process.rs index b97bbe1251..7b3bb52912 100644 --- a/sync/src/synchronizer/block_process.rs +++ b/sync/src/synchronizer/block_process.rs @@ -7,7 +7,6 @@ pub struct BlockProcess<'a> { message: packed::SendBlockReader<'a>, synchronizer: &'a Synchronizer, peer: PeerIndex, - message_bytes: u64, } impl<'a> BlockProcess<'a> { @@ -15,17 +14,15 @@ impl<'a> BlockProcess<'a> { message: packed::SendBlockReader<'a>, synchronizer: &'a Synchronizer, peer: PeerIndex, - message_bytes: u64, ) -> Self { BlockProcess { message, synchronizer, peer, - message_bytes, } } - pub fn execute(self) { + pub fn execute(self) -> crate::Status { let block = self.message.block().to_entity().into_view(); debug!( "BlockProcess received block {} {}", @@ -35,12 +32,12 @@ impl<'a> BlockProcess<'a> { let shared = self.synchronizer.shared(); if shared.new_block_received(&block) { - self.synchronizer.asynchronous_process_new_block( - block.clone(), - self.peer, - self.message_bytes, - ); + self.synchronizer + .asynchronous_process_new_block(block.clone(), self.peer); } + + // block process is asynchronous, so we only return ignored here + crate::Status::ignored() } #[cfg(test)] diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 5afa2e1368..d33e2cf6af 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -287,9 +287,7 @@ impl Synchronizer { } packed::SyncMessageUnionReader::SendBlock(reader) => { if reader.check_data() { - BlockProcess::new(reader, self, peer, message.as_slice().len() as u64) - .execute(); - Status::ignored() + BlockProcess::new(reader, self, peer).execute() } else { StatusCode::ProtocolMessageIsMalformed.with_context("SendBlock is invalid") } @@ -308,16 +306,6 @@ impl Synchronizer { let item_bytes = message.as_slice().len() as u64; let status = self.try_process(nc, peer, message); - Self::post_sync_process(nc, peer, item_name, item_bytes, status); - } - - fn post_sync_process( - nc: &dyn CKBProtocolContext, - peer: PeerIndex, - item_name: &str, - item_bytes: u64, - status: Status, - ) { metric_ckb_message_bytes( MetricDirection::In, &SupportProtocols::Sync.name(), @@ -326,6 +314,15 @@ impl Synchronizer { item_bytes, ); + Self::post_sync_process(nc, peer, item_name, status); + } + + fn post_sync_process( + nc: &dyn CKBProtocolContext, + peer: PeerIndex, + item_name: &str, + status: Status, + ) { if let Some(ban_time) = status.should_ban() { error!( "Receive {} from {}. Ban {:?} for {}", @@ -363,12 +360,7 @@ impl Synchronizer { /// Process a new block sync from other peer //TODO: process block which we don't request - pub fn asynchronous_process_new_block( - &self, - block: core::BlockView, - peer_id: PeerIndex, - message_bytes: u64, - ) { + pub fn asynchronous_process_new_block(&self, block: core::BlockView, peer_id: PeerIndex) { let block_hash = block.hash(); let status = self.shared.active_chain().get_block_status(&block_hash); // NOTE: Filtering `BLOCK_STORED` but not `BLOCK_RECEIVED`, is for avoiding @@ -377,7 +369,7 @@ impl Synchronizer { error!("Block {} already stored", block_hash); } else if status.contains(BlockStatus::HEADER_VALID) { self.shared - .insert_new_block(&self.chain, Arc::new(block), peer_id, message_bytes); + .insert_new_block(&self.chain, Arc::new(block), peer_id); } else { debug!( "Synchronizer process_new_block unexpected status {:?} {}", @@ -936,7 +928,6 @@ impl CKBProtocolHandler for Synchronizer { nc.as_ref(), malformed_peer_info.peer_id, "SendBlock", - malformed_peer_info.msg_bytes, StatusCode::BlockIsInvalid.with_context(format!( "block {} is invalid, reason: {}", malformed_peer_info.block_hash, malformed_peer_info.reason diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 631650b48b..c11d42215a 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1064,13 +1064,13 @@ impl SyncShared { &self, chain: &ChainController, block: Arc, - peer_id_with_msg_bytes: (PeerIndex, u64), + peer_id: PeerIndex, verify_success_callback: VerifyCallback, ) { self.accept_block( chain, Arc::clone(&block), - Some(peer_id_with_msg_bytes), + Some(peer_id), Some(verify_success_callback), ) } @@ -1081,12 +1081,11 @@ impl SyncShared { chain: &ChainController, block: Arc, peer_id: PeerIndex, - message_bytes: u64, ) { self.accept_block( chain, Arc::clone(&block), - Some((peer_id, message_bytes)), + Some(peer_id), None::, ); } @@ -1112,7 +1111,7 @@ impl SyncShared { ) -> VerifyResult { let lonely_block: LonelyBlock = LonelyBlock { block, - peer_id_with_msg_bytes: Some((peer_id, message_bytes)), + peer_id: Some((peer_id, message_bytes)), switch: None, }; chain.blocking_process_lonely_block(lonely_block) @@ -1122,7 +1121,7 @@ impl SyncShared { &self, chain: &ChainController, block: Arc, - peer_id_with_msg_bytes: Option<(PeerIndex, u64)>, + peer_id: Option, verify_callback: Option, ) { { @@ -1137,7 +1136,7 @@ impl SyncShared { let lonely_block_with_callback = LonelyBlock { block, - peer_id_with_msg_bytes, + peer_id, switch: None, } .with_callback(verify_callback); From 67d07254e8a6b0aaeb32b4288ac46e01f8444c45 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 29 Jan 2024 22:55:24 +0800 Subject: [PATCH 329/357] `ckb-sync`'s unit test won't need `msg_bytes` anymore Signed-off-by: Eval EXEC --- sync/src/synchronizer/block_process.rs | 9 ++++----- sync/src/synchronizer/mod.rs | 2 -- sync/src/tests/synchronizer/functions.rs | 2 +- sync/src/types/mod.rs | 3 +-- 4 files changed, 6 insertions(+), 10 deletions(-) diff --git a/sync/src/synchronizer/block_process.rs b/sync/src/synchronizer/block_process.rs index 7b3bb52912..b9f3fe6cab 100644 --- a/sync/src/synchronizer/block_process.rs +++ b/sync/src/synchronizer/block_process.rs @@ -51,11 +51,10 @@ impl<'a> BlockProcess<'a> { let shared = self.synchronizer.shared(); if shared.new_block_received(&block) { - if let Err(err) = self.synchronizer.blocking_process_new_block( - block.clone(), - self.peer, - self.message_bytes, - ) { + if let Err(err) = self + .synchronizer + .blocking_process_new_block(block.clone(), self.peer) + { if !ckb_error::is_internal_db_error(&err) { return crate::StatusCode::BlockIsInvalid.with_context(format!( "{}, error: {}", diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index d33e2cf6af..5729ec5f0f 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -384,7 +384,6 @@ impl Synchronizer { &self, block: core::BlockView, peer_id: PeerIndex, - message_bytes: u64, ) -> Result { let block_hash = block.hash(); let status = self.shared.active_chain().get_block_status(&block_hash); @@ -398,7 +397,6 @@ impl Synchronizer { &self.chain, Arc::new(block), peer_id, - message_bytes, ) } else { debug!( diff --git a/sync/src/tests/synchronizer/functions.rs b/sync/src/tests/synchronizer/functions.rs index 4b81982054..2b1b12c497 100644 --- a/sync/src/tests/synchronizer/functions.rs +++ b/sync/src/tests/synchronizer/functions.rs @@ -663,7 +663,7 @@ fn test_sync_process() { for block in &fetched_blocks { let block = SendBlockBuilder::default().block(block.data()).build(); assert_eq!( - BlockProcess::new(block.as_reader(), &synchronizer1, peer1, 0).blocking_execute(), + BlockProcess::new(block.as_reader(), &synchronizer1, peer1).blocking_execute(), Status::ok(), ); } diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index c11d42215a..f6b466c887 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1107,11 +1107,10 @@ impl SyncShared { chain: &ChainController, block: Arc, peer_id: PeerIndex, - message_bytes: u64, ) -> VerifyResult { let lonely_block: LonelyBlock = LonelyBlock { block, - peer_id: Some((peer_id, message_bytes)), + peer_id: Some(peer_id), switch: None, }; chain.blocking_process_lonely_block(lonely_block) From 72c48cf15d7f658863ef6e82e9257dbca3908b33 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 29 Jan 2024 23:26:25 +0800 Subject: [PATCH 330/357] `blocking_process_block` returns a bool, use a proper name --- util/light-client-protocol-server/src/tests/utils/chain.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/util/light-client-protocol-server/src/tests/utils/chain.rs b/util/light-client-protocol-server/src/tests/utils/chain.rs index 4c906dbc4c..03e37e704b 100644 --- a/util/light-client-protocol-server/src/tests/utils/chain.rs +++ b/util/light-client-protocol-server/src/tests/utils/chain.rs @@ -141,14 +141,11 @@ impl MockChain { let block: packed::Block = block_template.into(); let block = build(block); let block_number = block.number(); - let verified_block_status = self + let is_ok = self .controller() .blocking_process_block(Arc::new(block)) .expect("process block"); - assert!( - verified_block_status, - "failed to process block {block_number}" - ); + assert!(is_ok, "failed to process block {block_number}"); while self .tx_pool() .get_tx_pool_info() From 571255ebdbbe674a3e403dc18152351f1469ab9e Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 30 Jan 2024 22:22:55 +0800 Subject: [PATCH 331/357] Metrics: header_map hit/miss should contains `contains_key` --- shared/src/types/header_map/kernel_lru.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/shared/src/types/header_map/kernel_lru.rs b/shared/src/types/header_map/kernel_lru.rs index bec90314a6..07dbb3d440 100644 --- a/shared/src/types/header_map/kernel_lru.rs +++ b/shared/src/types/header_map/kernel_lru.rs @@ -87,8 +87,13 @@ where self.stats().tick_primary_contain(); } if self.memory.contains_key(hash) { + ckb_metrics::handle() + .map(|metrics| metrics.ckb_header_map_memory_hit_miss_count.hit.inc()); return true; } + ckb_metrics::handle() + .map(|metrics| metrics.ckb_header_map_memory_hit_miss_count.miss.inc()); + if self.backend.is_empty() { return false; } From 1df318cf1068ee80d4b5fc680cb0a55e171f5165 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 31 Jan 2024 10:18:36 +0800 Subject: [PATCH 332/357] Add `struct RemoteBlock` for ckb-chain, let ckb-sync and ckb-relayer use RemoteBlock --- chain/src/lib.rs | 107 ++++++++++++++++++----------------------------- 1 file changed, 40 insertions(+), 67 deletions(-) diff --git a/chain/src/lib.rs b/chain/src/lib.rs index 81023b9e3d..62d9b206bb 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -27,7 +27,7 @@ pub use chain_controller::ChainController; pub use chain_service::start_chain_services; pub use consume_orphan::store_unverified_block; -type ProcessBlockRequest = Request; +type ProcessBlockRequest = Request; type TruncateRequest = Request>; /// VerifyResult is the result type to represent the result of block verification @@ -40,8 +40,16 @@ pub type VerifyResult = Result; /// VerifyCallback is the callback type to be called after block verification pub type VerifyCallback = Box; +/// RemoteBlock is received from ckb-sync and ckb-relayer +pub struct RemoteBlock { + /// block + pub block: Arc, + + /// This block is received from which peer + pub peer_id: PeerIndex, +} + /// LonelyBlock is the block which we have not check weather its parent is stored yet -#[derive(Clone)] pub struct LonelyBlock { /// block pub block: Arc, @@ -51,25 +59,12 @@ pub struct LonelyBlock { /// The Switch to control the verification process pub switch: Option, -} - -impl LonelyBlock { - /// Combine with verify_callback, convert it to LonelyBlockWithCallback - pub fn with_callback(self, verify_callback: Option) -> LonelyBlockWithCallback { - LonelyBlockWithCallback { - lonely_block: self, - verify_callback, - } - } - /// Combine with empty verify_callback, convert it to LonelyBlockWithCallback - pub fn without_callback(self) -> LonelyBlockWithCallback { - self.with_callback(None) - } + /// The optional verify_callback + pub verify_callback: Option, } /// LonelyBlock is the block which we have not check weather its parent is stored yet -#[derive(Clone)] pub struct LonelyBlockHash { /// block pub block_number_and_hash: BlockNumberAndHash, @@ -79,17 +74,12 @@ pub struct LonelyBlockHash { /// The Switch to control the verification process pub switch: Option, -} -/// LonelyBlockWithCallback Combine LonelyBlock with an optional verify_callback -pub struct LonelyBlockHashWithCallback { - /// The LonelyBlock - pub lonely_block: LonelyBlockHash, /// The optional verify_callback pub verify_callback: Option, } -impl LonelyBlockHashWithCallback { +impl LonelyBlockHash { pub(crate) fn execute_callback(self, verify_result: VerifyResult) { if let Some(verify_callback) = self.verify_callback { verify_callback(verify_result); @@ -97,77 +87,60 @@ impl LonelyBlockHashWithCallback { } } -impl From for LonelyBlockHashWithCallback { - fn from(val: LonelyBlockWithCallback) -> Self { - LonelyBlockHashWithCallback { - lonely_block: LonelyBlockHash { - block_number_and_hash: BlockNumberAndHash { - number: val.lonely_block.block.number(), - hash: val.lonely_block.block.hash(), - }, - peer_id: val.lonely_block.peer_id, - switch: val.lonely_block.switch, +impl From for LonelyBlockHash { + fn from(val: LonelyBlock) -> Self { + LonelyBlockHash { + block_number_and_hash: BlockNumberAndHash { + number: val.block.number(), + hash: val.block.hash(), }, + peer_id: val.peer_id, + switch: val.switch, verify_callback: val.verify_callback, } } } -/// LonelyBlockWithCallback Combine LonelyBlock with an optional verify_callback -pub struct LonelyBlockWithCallback { - /// The LonelyBlock - pub lonely_block: LonelyBlock, - /// The optional verify_callback - pub verify_callback: Option, -} - -impl LonelyBlockWithCallback { - pub(crate) fn execute_callback(self, verify_result: VerifyResult) { - if let Some(verify_callback) = self.verify_callback { - let _trace_now = minstant::Instant::now(); - - verify_callback(verify_result); - - if let Some(handle) = ckb_metrics::handle() { - handle - .ckb_chain_execute_callback_duration - .observe(_trace_now.elapsed().as_secs_f64()) - } - } - } - - /// Get reference to block - pub fn block(&self) -> &Arc { - &self.lonely_block.block +impl LonelyBlock { + pub(crate) fn block(&self) -> &Arc { + &self.block } - /// get peer_id and msg_bytes pub fn peer_id(&self) -> Option { - self.lonely_block.peer_id + self.peer_id } - /// get switch param pub fn switch(&self) -> Option { - self.lonely_block.switch + self.switch + } + + pub fn execute_callback(self, verify_result: VerifyResult) { + if let Some(verify_callback) = self.verify_callback { + verify_callback(verify_result); + } } } pub(crate) struct UnverifiedBlock { - pub unverified_block: LonelyBlockWithCallback, + pub lonely_block: LonelyBlock, pub parent_header: HeaderView, } impl UnverifiedBlock { pub(crate) fn block(&self) -> &Arc { - self.unverified_block.block() + self.lonely_block.block() } pub fn peer_id(&self) -> Option { - self.unverified_block.peer_id() + self.lonely_block.peer_id() + } + + pub fn switch(&self) -> Option { + self.lonely_block.switch() } pub fn execute_callback(self, verify_result: VerifyResult) { - self.unverified_block.execute_callback(verify_result) + self.lonely_block.execute_callback(verify_result) } } From fe1836db34519cf9a8ebab147d293b2461ff13f1 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 31 Jan 2024 10:19:35 +0800 Subject: [PATCH 333/357] Modify ckb-chain use RemoteBlock, drop LonelyBlockWithCallback --- chain/src/chain_controller.rs | 48 +++++++++++++++++----------- chain/src/chain_service.rs | 14 ++++---- chain/src/consume_orphan.rs | 33 ++++++++----------- chain/src/consume_unverified.rs | 32 +++++++------------ chain/src/utils/orphan_block_pool.rs | 20 ++++-------- 5 files changed, 69 insertions(+), 78 deletions(-) diff --git a/chain/src/chain_controller.rs b/chain/src/chain_controller.rs index a6a71dbeca..d07872ad8d 100644 --- a/chain/src/chain_controller.rs +++ b/chain/src/chain_controller.rs @@ -3,7 +3,7 @@ use crate::utils::orphan_block_pool::OrphanBlockPool; use crate::{ - LonelyBlock, LonelyBlockWithCallback, ProcessBlockRequest, TruncateRequest, VerifyResult, + LonelyBlock, ProcessBlockRequest, RemoteBlock, TruncateRequest, VerifyCallback, VerifyResult, }; use ckb_channel::Sender; use ckb_error::{Error, InternalErrorKind}; @@ -42,22 +42,29 @@ impl ChainController { } } - pub fn asynchronous_process_lonely_block_with_callback( + pub fn asynchronous_process_remote_block( &self, - lonely_block_with_callback: LonelyBlockWithCallback, + remote_block: RemoteBlock, + verify_callback: Option, ) { - if Request::call(&self.process_block_sender, lonely_block_with_callback).is_none() { + let lonely_block = LonelyBlock { + block: remote_block.block, + peer_id: Some(remote_block.peer_id), + switch: None, + verify_callback, + }; + self.asynchronous_process_lonely_block(lonely_block); + } + + fn asynchronous_process_lonely_block(&self, lonely_block: LonelyBlock) { + if Request::call(&self.process_block_sender, lonely_block).is_none() { error!("Chain service has gone") } } /// MinerRpc::submit_block and `ckb import` need this blocking way to process block pub fn blocking_process_block(&self, block: Arc) -> VerifyResult { - self.blocking_process_lonely_block(LonelyBlock { - block, - peer_id: None, - switch: None, - }) + self.blocking_process_block_with_opt_switch(block, None) } /// `IntegrationTestRpcImpl::process_block_without_verify` need this @@ -66,14 +73,14 @@ impl ChainController { block: Arc, switch: Switch, ) -> VerifyResult { - self.blocking_process_lonely_block(LonelyBlock { - block, - peer_id: None, - switch: Some(switch), - }) + self.blocking_process_block_with_opt_switch(block, Some(switch)) } - pub fn blocking_process_lonely_block(&self, lonely_block: LonelyBlock) -> VerifyResult { + pub fn blocking_process_block_with_opt_switch( + &self, + block: Arc, + switch: Option, + ) -> VerifyResult { let (verify_result_tx, verify_result_rx) = ckb_channel::oneshot::channel::(); let verify_callback = { @@ -87,9 +94,14 @@ impl ChainController { } }; - let lonely_block_with_callback = - lonely_block.with_callback(Some(Box::new(verify_callback))); - self.asynchronous_process_lonely_block_with_callback(lonely_block_with_callback); + let lonely_block = LonelyBlock { + block, + peer_id: None, + switch, + verify_callback: Some(Box::new(verify_callback)), + }; + + self.asynchronous_process_lonely_block(lonely_block); verify_result_rx.recv().unwrap_or_else(|err| { Err(InternalErrorKind::System .other(format!("blocking recv verify_result failed: {}", err)) diff --git a/chain/src/chain_service.rs b/chain/src/chain_service.rs index 4838b2d873..1dd355a0b7 100644 --- a/chain/src/chain_service.rs +++ b/chain/src/chain_service.rs @@ -4,8 +4,8 @@ use crate::consume_unverified::ConsumeUnverifiedBlocks; use crate::utils::orphan_block_pool::OrphanBlockPool; use crate::{ - tell_synchronizer_to_punish_the_bad_peer, ChainController, LonelyBlockHashWithCallback, - LonelyBlockWithCallback, ProcessBlockRequest, + tell_synchronizer_to_punish_the_bad_peer, ChainController, LonelyBlock, LonelyBlockHash, + ProcessBlockRequest, }; use ckb_channel::{self as channel, select, Receiver, SendError, Sender}; use ckb_constant::sync::BLOCK_DOWNLOAD_WINDOW; @@ -32,7 +32,7 @@ pub fn start_chain_services(builder: ChainServicesBuilder) -> ChainController { let (unverified_queue_stop_tx, unverified_queue_stop_rx) = ckb_channel::bounded::<()>(1); let (unverified_tx, unverified_rx) = - channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize * 3); + channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize * 3); let consumer_unverified_thread = thread::Builder::new() .name("consume_unverified_blocks".into()) @@ -55,7 +55,7 @@ pub fn start_chain_services(builder: ChainServicesBuilder) -> ChainController { .expect("start unverified_queue consumer thread should ok"); let (lonely_block_tx, lonely_block_rx) = - channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize); + channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize); let (search_orphan_pool_stop_tx, search_orphan_pool_stop_rx) = ckb_channel::bounded::<()>(1); @@ -118,7 +118,7 @@ pub(crate) struct ChainService { process_block_rx: Receiver, - lonely_block_tx: Sender, + lonely_block_tx: Sender, verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, } impl ChainService { @@ -127,7 +127,7 @@ impl ChainService { shared: Shared, process_block_rx: Receiver, - lonely_block_tx: Sender, + lonely_block_tx: Sender, verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, ) -> ChainService { ChainService { @@ -188,7 +188,7 @@ impl ChainService { } // `self.non_contextual_verify` is very fast. - fn asynchronous_process_block(&self, lonely_block: LonelyBlockWithCallback) { + fn asynchronous_process_block(&self, lonely_block: LonelyBlock) { let block_number = lonely_block.block().number(); let block_hash = lonely_block.block().hash(); // Skip verifying a genesis block if its hash is equal to our genesis hash, diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index 6c1694a9f3..6cfed7d7e9 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -1,8 +1,5 @@ use crate::utils::orphan_block_pool::OrphanBlockPool; -use crate::{ - tell_synchronizer_to_punish_the_bad_peer, LonelyBlockHashWithCallback, LonelyBlockWithCallback, - VerifyResult, -}; +use crate::{tell_synchronizer_to_punish_the_bad_peer, LonelyBlock, LonelyBlockHash, VerifyResult}; use ckb_channel::{select, Receiver, SendError, Sender}; use ckb_error::{Error, InternalErrorKind}; use ckb_logger::internal::trace; @@ -19,7 +16,7 @@ use std::sync::Arc; pub(crate) struct ConsumeDescendantProcessor { pub shared: Shared, - pub unverified_blocks_tx: Sender, + pub unverified_blocks_tx: Sender, pub verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, } @@ -96,13 +93,9 @@ pub fn store_unverified_block( } impl ConsumeDescendantProcessor { - fn send_unverified_block( - &self, - lonely_block: LonelyBlockHashWithCallback, - total_difficulty: U256, - ) { - let block_number = lonely_block.lonely_block.block_number_and_hash.number(); - let block_hash = lonely_block.lonely_block.block_number_and_hash.hash(); + fn send_unverified_block(&self, lonely_block: LonelyBlockHash, total_difficulty: U256) { + let block_number = lonely_block.block_number_and_hash.number(); + let block_hash = lonely_block.block_number_and_hash.hash(); match self.unverified_blocks_tx.send(lonely_block) { Ok(_) => { @@ -151,13 +144,13 @@ impl ConsumeDescendantProcessor { } } - pub(crate) fn process_descendant(&self, lonely_block: LonelyBlockWithCallback) { + pub(crate) fn process_descendant(&self, lonely_block: LonelyBlock) { match store_unverified_block(&self.shared, lonely_block.block().to_owned()) { Ok((_parent_header, total_difficulty)) => { self.shared .insert_block_status(lonely_block.block().hash(), BlockStatus::BLOCK_STORED); - let lonely_block_hash: LonelyBlockHashWithCallback = lonely_block.into(); + let lonely_block_hash: LonelyBlockHash = lonely_block.into(); self.send_unverified_block(lonely_block_hash, total_difficulty) } @@ -181,7 +174,7 @@ impl ConsumeDescendantProcessor { } } - fn accept_descendants(&self, descendants: Vec) { + fn accept_descendants(&self, descendants: Vec) { for descendant_block in descendants { self.process_descendant(descendant_block); } @@ -194,7 +187,7 @@ pub(crate) struct ConsumeOrphan { descendant_processor: ConsumeDescendantProcessor, orphan_blocks_broker: Arc, - lonely_blocks_rx: Receiver, + lonely_blocks_rx: Receiver, stop_rx: Receiver<()>, } @@ -203,8 +196,8 @@ impl ConsumeOrphan { pub(crate) fn new( shared: Shared, orphan_block_pool: Arc, - unverified_blocks_tx: Sender, - lonely_blocks_rx: Receiver, + unverified_blocks_tx: Sender, + lonely_blocks_rx: Receiver, verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, stop_rx: Receiver<()>, ) -> ConsumeOrphan { @@ -279,7 +272,7 @@ impl ConsumeOrphan { continue; } - let descendants: Vec = self + let descendants: Vec = self .orphan_blocks_broker .remove_blocks_by_parent(&leader_hash); if descendants.is_empty() { @@ -293,7 +286,7 @@ impl ConsumeOrphan { } } - fn process_lonely_block(&self, lonely_block: LonelyBlockWithCallback) { + fn process_lonely_block(&self, lonely_block: LonelyBlock) { let parent_hash = lonely_block.block().parent_hash(); let parent_status = self .shared diff --git a/chain/src/consume_unverified.rs b/chain/src/consume_unverified.rs index db612abdb1..6dab14213d 100644 --- a/chain/src/consume_unverified.rs +++ b/chain/src/consume_unverified.rs @@ -1,7 +1,7 @@ -use crate::LonelyBlockHashWithCallback; +use crate::LonelyBlockHash; use crate::{ tell_synchronizer_to_punish_the_bad_peer, utils::forkchanges::ForkChanges, GlobalIndex, - LonelyBlock, LonelyBlockWithCallback, TruncateRequest, UnverifiedBlock, VerifyResult, + LonelyBlock, TruncateRequest, UnverifiedBlock, VerifyResult, }; use ckb_channel::{select, Receiver}; use ckb_error::{Error, InternalErrorKind}; @@ -40,7 +40,7 @@ pub(crate) struct ConsumeUnverifiedBlockProcessor { pub(crate) struct ConsumeUnverifiedBlocks { tx_pool_controller: TxPoolController, - unverified_block_rx: Receiver, + unverified_block_rx: Receiver, truncate_block_rx: Receiver, stop_rx: Receiver<()>, @@ -50,7 +50,7 @@ pub(crate) struct ConsumeUnverifiedBlocks { impl ConsumeUnverifiedBlocks { pub(crate) fn new( shared: Shared, - unverified_blocks_rx: Receiver, + unverified_blocks_rx: Receiver, truncate_block_rx: Receiver, proposal_table: ProposalTable, verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, @@ -116,14 +116,11 @@ impl ConsumeUnverifiedBlocks { } impl ConsumeUnverifiedBlockProcessor { - fn load_full_unverified_block( - &self, - lonely_block: LonelyBlockHashWithCallback, - ) -> UnverifiedBlock { + fn load_full_unverified_block(&self, lonely_block: LonelyBlockHash) -> UnverifiedBlock { let block_view = self .shared .store() - .get_block(&lonely_block.lonely_block.block_number_and_hash.hash()) + .get_block(&lonely_block.block_number_and_hash.hash()) .expect("block stored"); let parent_header_view = self .shared @@ -132,28 +129,23 @@ impl ConsumeUnverifiedBlockProcessor { .expect("parent header stored"); UnverifiedBlock { - unverified_block: LonelyBlockWithCallback { - lonely_block: LonelyBlock { - block: Arc::new(block_view), - peer_id: lonely_block.lonely_block.peer_id, - switch: lonely_block.lonely_block.switch, - }, + lonely_block: LonelyBlock { + block: Arc::new(block_view), + peer_id: lonely_block.peer_id, + switch: lonely_block.switch, verify_callback: lonely_block.verify_callback, }, parent_header: parent_header_view, } } - pub(crate) fn consume_unverified_blocks( - &mut self, - lonely_block_hash: LonelyBlockHashWithCallback, - ) { + pub(crate) fn consume_unverified_blocks(&mut self, lonely_block_hash: LonelyBlockHash) { let unverified_block = self.load_full_unverified_block(lonely_block_hash); // process this unverified block let verify_result = self.verify_block( unverified_block.block(), &unverified_block.parent_header, - unverified_block.unverified_block.switch(), + unverified_block.switch(), ); match &verify_result { Ok(_) => { diff --git a/chain/src/utils/orphan_block_pool.rs b/chain/src/utils/orphan_block_pool.rs index 6a6701c93a..7556f6d6c7 100644 --- a/chain/src/utils/orphan_block_pool.rs +++ b/chain/src/utils/orphan_block_pool.rs @@ -1,5 +1,5 @@ #![allow(dead_code)] -use crate::LonelyBlockWithCallback; +use crate::LonelyBlock; use ckb_logger::debug; use ckb_types::core::{BlockView, EpochNumber}; use ckb_types::packed; @@ -15,7 +15,7 @@ const EXPIRED_EPOCH: u64 = 6; #[derive(Default)] struct InnerPool { // Group by blocks in the pool by the parent hash. - blocks: HashMap>, + blocks: HashMap>, // The map tells the parent hash when given the hash of a block in the pool. // // The block is in the orphan pool if and only if the block hash exists as a key in this map. @@ -33,7 +33,7 @@ impl InnerPool { } } - fn insert(&mut self, lonely_block: LonelyBlockWithCallback) { + fn insert(&mut self, lonely_block: LonelyBlock) { let hash = lonely_block.block().header().hash(); let parent_hash = lonely_block.block().data().header().raw().parent_hash(); self.blocks @@ -53,10 +53,7 @@ impl InnerPool { self.parents.insert(hash, parent_hash); } - pub fn remove_blocks_by_parent( - &mut self, - parent_hash: &ParentHash, - ) -> Vec { + pub fn remove_blocks_by_parent(&mut self, parent_hash: &ParentHash) -> Vec { // try remove leaders first if !self.leaders.remove(parent_hash) { return Vec::new(); @@ -65,7 +62,7 @@ impl InnerPool { let mut queue: VecDeque = VecDeque::new(); queue.push_back(parent_hash.to_owned()); - let mut removed: Vec = Vec::new(); + let mut removed: Vec = Vec::new(); while let Some(parent_hash) = queue.pop_front() { if let Some(orphaned) = self.blocks.remove(&parent_hash) { let (hashes, blocks): (Vec<_>, Vec<_>) = orphaned.into_iter().unzip(); @@ -151,14 +148,11 @@ impl OrphanBlockPool { } /// Insert orphaned block, for which we have already requested its parent block - pub fn insert(&self, lonely_block: LonelyBlockWithCallback) { + pub fn insert(&self, lonely_block: LonelyBlock) { self.inner.write().insert(lonely_block); } - pub fn remove_blocks_by_parent( - &self, - parent_hash: &ParentHash, - ) -> Vec { + pub fn remove_blocks_by_parent(&self, parent_hash: &ParentHash) -> Vec { self.inner.write().remove_blocks_by_parent(parent_hash) } From c96293bfc7e67f9bacb9953e0c3890c0f4ed1e1e Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 31 Jan 2024 10:20:10 +0800 Subject: [PATCH 334/357] ckb-sync use accept_remote_block to process remote block --- sync/src/relayer/mod.rs | 17 +++++---- sync/src/synchronizer/block_process.rs | 8 +++- sync/src/synchronizer/mod.rs | 10 ++--- sync/src/types/mod.rs | 51 +++----------------------- 4 files changed, 28 insertions(+), 58 deletions(-) diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index 981a59c5b4..a66d575cef 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -25,8 +25,8 @@ use crate::utils::{ is_internal_db_error, metric_ckb_message_bytes, send_message_to, MetricDirection, }; use crate::{Status, StatusCode}; -use ckb_chain::ChainController; use ckb_chain::VerifyResult; +use ckb_chain::{ChainController, RemoteBlock}; use ckb_constant::sync::BAD_MESSAGE_BAN_TIME; use ckb_logger::{ debug, debug_target, error, error_target, info_target, trace_target, warn_target, @@ -301,7 +301,7 @@ impl Relayer { pub fn accept_block( &self, _nc: &dyn CKBProtocolContext, - peer: PeerIndex, + peer_id: PeerIndex, block: core::BlockView, ) -> Status { if self @@ -313,6 +313,10 @@ impl Relayer { } let block = Arc::new(block); + let remote_block = RemoteBlock { + block: Arc::clone(&block), + peer_id, + }; let verify_success_callback = { let broadcast_compact_block_tx = self.broadcast_compact_block_tx.clone(); @@ -328,7 +332,7 @@ impl Relayer { return; } - if broadcast_compact_block_tx.send((block, peer)).is_err() { + if broadcast_compact_block_tx.send((block, peer_id)).is_err() { error!( "send block to broadcast_compact_block_tx failed, this shouldn't happen", ); @@ -345,11 +349,10 @@ impl Relayer { } }; - self.shared().insert_new_block_with_callback( + self.shared.accept_remote_block( &self.chain, - Arc::clone(&block), - peer, - Box::new(verify_success_callback), + remote_block, + Some(Box::new(verify_success_callback)), ); } diff --git a/sync/src/synchronizer/block_process.rs b/sync/src/synchronizer/block_process.rs index b9f3fe6cab..76cec28376 100644 --- a/sync/src/synchronizer/block_process.rs +++ b/sync/src/synchronizer/block_process.rs @@ -1,7 +1,9 @@ use crate::synchronizer::Synchronizer; +use ckb_chain::RemoteBlock; use ckb_logger::debug; use ckb_network::PeerIndex; use ckb_types::{packed, prelude::*}; +use std::sync::Arc; pub struct BlockProcess<'a> { message: packed::SendBlockReader<'a>, @@ -32,8 +34,12 @@ impl<'a> BlockProcess<'a> { let shared = self.synchronizer.shared(); if shared.new_block_received(&block) { + let remote_block = RemoteBlock { + block: Arc::new(block), + peer_id: self.peer, + }; self.synchronizer - .asynchronous_process_new_block(block.clone(), self.peer); + .asynchronous_process_remote_block(remote_block); } // block process is asynchronous, so we only return ignored here diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 5729ec5f0f..f24fa0ab0d 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -25,7 +25,7 @@ use crate::utils::{metric_ckb_message_bytes, send_message_to, MetricDirection}; use crate::{Status, StatusCode}; use ckb_shared::block_status::BlockStatus; -use ckb_chain::ChainController; +use ckb_chain::{ChainController, RemoteBlock}; use ckb_channel as channel; use ckb_channel::{select, Receiver}; use ckb_constant::sync::{ @@ -42,7 +42,7 @@ use ckb_shared::types::{HeaderIndexView, VerifyFailedBlockInfo}; use ckb_stop_handler::{new_crossbeam_exit_rx, register_thread}; use ckb_systemtime::unix_time_as_millis; use ckb_types::{ - core::{self, BlockNumber}, + core::BlockNumber, packed::{self, Byte32}, prelude::*, }; @@ -360,8 +360,8 @@ impl Synchronizer { /// Process a new block sync from other peer //TODO: process block which we don't request - pub fn asynchronous_process_new_block(&self, block: core::BlockView, peer_id: PeerIndex) { - let block_hash = block.hash(); + pub fn asynchronous_process_remote_block(&self, remote_block: RemoteBlock) { + let block_hash = remote_block.block.hash(); let status = self.shared.active_chain().get_block_status(&block_hash); // NOTE: Filtering `BLOCK_STORED` but not `BLOCK_RECEIVED`, is for avoiding // stopping synchronization even when orphan_pool maintains dirty items by bugs. @@ -369,7 +369,7 @@ impl Synchronizer { error!("Block {} already stored", block_hash); } else if status.contains(BlockStatus::HEADER_VALID) { self.shared - .insert_new_block(&self.chain, Arc::new(block), peer_id); + .accept_remote_block(&self.chain, remote_block, None); } else { debug!( "Synchronizer process_new_block unexpected status {:?} {}", diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index f6b466c887..f42974ea73 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1,9 +1,9 @@ use crate::{Status, StatusCode, FAST_INDEX, LOW_INDEX, NORMAL_INDEX, TIME_TRACE_SIZE}; use ckb_app_config::SyncConfig; -use ckb_chain::ChainController; +use ckb_chain::VerifyCallback; #[cfg(test)] use ckb_chain::VerifyResult; -use ckb_chain::{LonelyBlock, VerifyCallback}; +use ckb_chain::{ChainController, RemoteBlock}; use ckb_chain_spec::consensus::{Consensus, MAX_BLOCK_INTERVAL, MIN_BLOCK_INTERVAL}; use ckb_channel::Receiver; use ckb_constant::sync::{ @@ -1059,37 +1059,6 @@ impl SyncShared { self.shared.consensus() } - /// Insert new block with callback - pub fn insert_new_block_with_callback( - &self, - chain: &ChainController, - block: Arc, - peer_id: PeerIndex, - verify_success_callback: VerifyCallback, - ) { - self.accept_block( - chain, - Arc::clone(&block), - Some(peer_id), - Some(verify_success_callback), - ) - } - - /// Insert new block to chain store - pub fn insert_new_block( - &self, - chain: &ChainController, - block: Arc, - peer_id: PeerIndex, - ) { - self.accept_block( - chain, - Arc::clone(&block), - Some(peer_id), - None::, - ); - } - // Only used by unit test // Blocking insert a new block, return the verify result #[cfg(test)] @@ -1116,31 +1085,23 @@ impl SyncShared { chain.blocking_process_lonely_block(lonely_block) } - pub(crate) fn accept_block( + pub(crate) fn accept_remote_block( &self, chain: &ChainController, - block: Arc, - peer_id: Option, + remote_block: RemoteBlock, verify_callback: Option, ) { { let entry = self .shared() .block_status_map() - .entry(block.header().hash()); + .entry(remote_block.block.header().hash()); if let dashmap::mapref::entry::Entry::Vacant(entry) = entry { entry.insert(BlockStatus::BLOCK_RECEIVED); } } - let lonely_block_with_callback = LonelyBlock { - block, - peer_id, - switch: None, - } - .with_callback(verify_callback); - - chain.asynchronous_process_lonely_block_with_callback(lonely_block_with_callback); + chain.asynchronous_process_remote_block(remote_block, verify_callback) } /// Sync a new valid header, try insert to sync state From c063800916bd91afb2838ea0944260d88388ad60 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 31 Jan 2024 10:43:25 +0800 Subject: [PATCH 335/357] Modify unit test in ckb-sync and ckb-chain to use RemoteBlock related API --- chain/src/chain_controller.rs | 14 +++-- chain/src/tests/find_fork.rs | 31 +++-------- chain/src/tests/orphan_block_pool.rs | 82 +++++++++++++++++++--------- sync/src/synchronizer/mod.rs | 11 ++-- sync/src/tests/sync_shared.rs | 20 ++++++- sync/src/types/mod.rs | 15 ----- 6 files changed, 99 insertions(+), 74 deletions(-) diff --git a/chain/src/chain_controller.rs b/chain/src/chain_controller.rs index d07872ad8d..550f8cc945 100644 --- a/chain/src/chain_controller.rs +++ b/chain/src/chain_controller.rs @@ -8,6 +8,7 @@ use crate::{ use ckb_channel::Sender; use ckb_error::{Error, InternalErrorKind}; use ckb_logger::{self, error}; +use ckb_network::PeerIndex; use ckb_types::{ core::{service::Request, BlockView}, packed::Byte32, @@ -64,7 +65,11 @@ impl ChainController { /// MinerRpc::submit_block and `ckb import` need this blocking way to process block pub fn blocking_process_block(&self, block: Arc) -> VerifyResult { - self.blocking_process_block_with_opt_switch(block, None) + self.blocking_process_block_internal(block, None, None) + } + + pub fn blocking_process_remote_block(&self, remote_block: RemoteBlock) -> VerifyResult { + self.blocking_process_block_internal(remote_block.block, Some(remote_block.peer_id), None) } /// `IntegrationTestRpcImpl::process_block_without_verify` need this @@ -73,12 +78,13 @@ impl ChainController { block: Arc, switch: Switch, ) -> VerifyResult { - self.blocking_process_block_with_opt_switch(block, Some(switch)) + self.blocking_process_block_internal(block, None, Some(switch)) } - pub fn blocking_process_block_with_opt_switch( + fn blocking_process_block_internal( &self, block: Arc, + peer_id: Option, switch: Option, ) -> VerifyResult { let (verify_result_tx, verify_result_rx) = ckb_channel::oneshot::channel::(); @@ -96,7 +102,7 @@ impl ChainController { let lonely_block = LonelyBlock { block, - peer_id: None, + peer_id, switch, verify_callback: Some(Box::new(verify_callback)), }; diff --git a/chain/src/tests/find_fork.rs b/chain/src/tests/find_fork.rs index cf2538a6a2..309fb86853 100644 --- a/chain/src/tests/find_fork.rs +++ b/chain/src/tests/find_fork.rs @@ -1,10 +1,7 @@ use crate::consume_orphan::ConsumeDescendantProcessor; use crate::consume_unverified::ConsumeUnverifiedBlockProcessor; use crate::utils::forkchanges::ForkChanges; -use crate::{ - start_chain_services, LonelyBlock, LonelyBlockHash, LonelyBlockHashWithCallback, - LonelyBlockWithCallback, VerifyFailedBlockInfo, -}; +use crate::{start_chain_services, LonelyBlock, LonelyBlockHash, VerifyFailedBlockInfo}; use ckb_chain_spec::consensus::{Consensus, ProposalWindow}; use ckb_proposal_table::ProposalTable; use ckb_shared::types::BlockNumberAndHash; @@ -33,23 +30,18 @@ fn process_block( peer_id: None, switch: Some(switch), block_number_and_hash: BlockNumberAndHash::new(blk.number(), blk.hash()), + verify_callback: None, }; let lonely_block = LonelyBlock { peer_id: None, switch: Some(switch), block: Arc::new(blk.to_owned()), + verify_callback: None, }; - consume_descendant_processor.process_descendant(LonelyBlockWithCallback { - verify_callback: None, - lonely_block, - }); + consume_descendant_processor.process_descendant(lonely_block); - let lonely_block_hash = LonelyBlockHashWithCallback { - verify_callback: None, - lonely_block: lonely_block_hash, - }; consume_unverified_block_processor.consume_unverified_blocks(lonely_block_hash); } @@ -83,8 +75,7 @@ fn test_find_fork_case1() { let (verify_failed_blocks_tx, _verify_failed_blocks_rx) = tokio::sync::mpsc::unbounded_channel::(); - let (unverified_blocks_tx, _unverified_blocks_rx) = - channel::unbounded::(); + let (unverified_blocks_tx, _unverified_blocks_rx) = channel::unbounded::(); let consume_descendant_processor = ConsumeDescendantProcessor { shared: shared.clone(), unverified_blocks_tx, @@ -176,8 +167,7 @@ fn test_find_fork_case2() { let proposal_table = ProposalTable::new(consensus.tx_proposal_window()); let (verify_failed_blocks_tx, _verify_failed_blocks_rx) = tokio::sync::mpsc::unbounded_channel::(); - let (unverified_blocks_tx, _unverified_blocks_rx) = - channel::unbounded::(); + let (unverified_blocks_tx, _unverified_blocks_rx) = channel::unbounded::(); let consume_descendant_processor = ConsumeDescendantProcessor { shared: shared.clone(), unverified_blocks_tx, @@ -270,8 +260,7 @@ fn test_find_fork_case3() { let proposal_table = ProposalTable::new(consensus.tx_proposal_window()); let (verify_failed_blocks_tx, _verify_failed_blocks_rx) = tokio::sync::mpsc::unbounded_channel::(); - let (unverified_blocks_tx, _unverified_blocks_rx) = - channel::unbounded::(); + let (unverified_blocks_tx, _unverified_blocks_rx) = channel::unbounded::(); let consume_descendant_processor = ConsumeDescendantProcessor { shared: shared.clone(), unverified_blocks_tx, @@ -362,8 +351,7 @@ fn test_find_fork_case4() { let proposal_table = ProposalTable::new(consensus.tx_proposal_window()); let (verify_failed_blocks_tx, _verify_failed_blocks_rx) = tokio::sync::mpsc::unbounded_channel::(); - let (unverified_blocks_tx, _unverified_blocks_rx) = - channel::unbounded::(); + let (unverified_blocks_tx, _unverified_blocks_rx) = channel::unbounded::(); let consume_descendant_processor = ConsumeDescendantProcessor { shared: shared.clone(), unverified_blocks_tx, @@ -455,8 +443,7 @@ fn repeatedly_switch_fork() { let proposal_table = ProposalTable::new(consensus.tx_proposal_window()); let (verify_failed_blocks_tx, _verify_failed_blocks_rx) = tokio::sync::mpsc::unbounded_channel::(); - let (unverified_blocks_tx, _unverified_blocks_rx) = - channel::unbounded::(); + let (unverified_blocks_tx, _unverified_blocks_rx) = channel::unbounded::(); let consume_descendant_processor = ConsumeDescendantProcessor { shared: shared.clone(), unverified_blocks_tx, diff --git a/chain/src/tests/orphan_block_pool.rs b/chain/src/tests/orphan_block_pool.rs index 2974852483..f73319495f 100644 --- a/chain/src/tests/orphan_block_pool.rs +++ b/chain/src/tests/orphan_block_pool.rs @@ -1,5 +1,5 @@ #![allow(dead_code)] -use crate::{LonelyBlock, LonelyBlockWithCallback}; +use crate::LonelyBlock; use ckb_chain_spec::consensus::ConsensusBuilder; use ckb_systemtime::unix_time_as_millis; use ckb_types::core::{BlockBuilder, BlockView, EpochNumberWithFraction, HeaderView}; @@ -23,13 +23,10 @@ fn gen_lonely_block(parent_header: &HeaderView) -> LonelyBlock { block: Arc::new(block), peer_id: None, switch: None, + verify_callback: None, } } -fn gen_lonely_block_with_callback(parent_header: &HeaderView) -> LonelyBlockWithCallback { - gen_lonely_block(parent_header).without_callback() -} - #[test] fn test_remove_blocks_by_parent() { let consensus = ConsensusBuilder::default().build(); @@ -39,8 +36,13 @@ fn test_remove_blocks_by_parent() { let pool = OrphanBlockPool::with_capacity(200); for _ in 1..block_number { let lonely_block = gen_lonely_block(&parent); - let new_block_clone = lonely_block.clone().without_callback(); - let new_block = lonely_block.without_callback(); + let new_block_clone = lonely_block.block().clone(); + let new_block = LonelyBlock { + block: new_block_clone.clone(), + peer_id: None, + switch: None, + verify_callback: None, + }; blocks.push(new_block_clone); parent = new_block.block().header(); @@ -48,8 +50,8 @@ fn test_remove_blocks_by_parent() { } let orphan = pool.remove_blocks_by_parent(&consensus.genesis_block().hash()); - let orphan_set: HashSet<_> = orphan.into_iter().map(|b| b.lonely_block.block).collect(); - let blocks_set: HashSet<_> = blocks.into_iter().map(|b| b.lonely_block.block).collect(); + let orphan_set: HashSet<_> = orphan.into_iter().map(|b| b.block).collect(); + let blocks_set: HashSet<_> = blocks.into_iter().map(|b| b.to_owned()).collect(); assert_eq!(orphan_set, blocks_set) } @@ -61,10 +63,15 @@ fn test_remove_blocks_by_parent_and_get_block_should_not_deadlock() { let mut hashes = Vec::new(); for _ in 1..1024 { let lonely_block = gen_lonely_block(&header); - let new_block = lonely_block.clone().without_callback(); - let new_block_clone = lonely_block.without_callback(); + let new_block = lonely_block.block(); + let new_block_clone = LonelyBlock { + block: Arc::clone(new_block), + peer_id: None, + switch: None, + verify_callback: None, + }; pool.insert(new_block_clone); - header = new_block.block().header(); + header = new_block.header(); hashes.push(header.hash()); } @@ -91,7 +98,12 @@ fn test_leaders() { let pool = OrphanBlockPool::with_capacity(20); for i in 0..block_number - 1 { let lonely_block = gen_lonely_block(&parent); - let new_block = lonely_block.clone().without_callback(); + let new_block = LonelyBlock { + block: Arc::clone(lonely_block.block()), + peer_id: None, + switch: None, + verify_callback: None, + }; blocks.push(lonely_block); parent = new_block.block().header(); if i % 5 != 0 { @@ -102,11 +114,21 @@ fn test_leaders() { assert_eq!(pool.len(), 15); assert_eq!(pool.leaders_len(), 4); - pool.insert(blocks[5].clone().without_callback()); + pool.insert(LonelyBlock { + block: blocks[5].block().clone(), + peer_id: None, + switch: None, + verify_callback: None, + }); assert_eq!(pool.len(), 16); assert_eq!(pool.leaders_len(), 3); - pool.insert(blocks[10].clone().without_callback()); + pool.insert(LonelyBlock { + block: blocks[10].block().clone(), + peer_id: None, + switch: None, + verify_callback: None, + }); assert_eq!(pool.len(), 17); assert_eq!(pool.leaders_len(), 2); @@ -116,7 +138,12 @@ fn test_leaders() { assert_eq!(pool.len(), 17); assert_eq!(pool.leaders_len(), 2); - pool.insert(blocks[0].clone().without_callback()); + pool.insert(LonelyBlock { + block: blocks[0].block().clone(), + peer_id: None, + switch: None, + verify_callback: None, + }); assert_eq!(pool.len(), 18); assert_eq!(pool.leaders_len(), 2); @@ -124,7 +151,12 @@ fn test_leaders() { assert_eq!(pool.len(), 3); assert_eq!(pool.leaders_len(), 1); - pool.insert(blocks[15].clone().without_callback()); + pool.insert(LonelyBlock { + block: blocks[15].block().clone(), + peer_id: None, + switch: None, + verify_callback: None, + }); assert_eq!(pool.len(), 4); assert_eq!(pool.leaders_len(), 1); @@ -132,8 +164,8 @@ fn test_leaders() { let orphan_set: HashSet> = orphan .into_iter() - .map(|b| b.lonely_block.block) - .chain(orphan_1.into_iter().map(|b| b.lonely_block.block)) + .map(|b| b.block) + .chain(orphan_1.into_iter().map(|b| b.block)) .collect(); let blocks_set: HashSet> = blocks.into_iter().map(|b| b.block).collect(); assert_eq!(orphan_set, blocks_set); @@ -160,15 +192,13 @@ fn test_remove_expired_blocks() { .build(); parent = new_block.header(); - let lonely_block_with_callback = LonelyBlockWithCallback { - lonely_block: LonelyBlock { - block: Arc::new(new_block), - peer_id: None, - switch: None, - }, + let lonely_block = LonelyBlock { + block: Arc::new(new_block), + peer_id: None, + switch: None, verify_callback: None, }; - pool.insert(lonely_block_with_callback); + pool.insert(lonely_block); } assert_eq!(pool.leaders_len(), 1); diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index f24fa0ab0d..3b72f57605 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -41,6 +41,9 @@ use ckb_network::{ use ckb_shared::types::{HeaderIndexView, VerifyFailedBlockInfo}; use ckb_stop_handler::{new_crossbeam_exit_rx, register_thread}; use ckb_systemtime::unix_time_as_millis; + +#[cfg(test)] +use ckb_types::core; use ckb_types::{ core::BlockNumber, packed::{self, Byte32}, @@ -393,11 +396,11 @@ impl Synchronizer { error!("block {} already stored", block_hash); Ok(false) } else if status.contains(BlockStatus::HEADER_VALID) { - self.shared.blocking_insert_new_block_with_verbose_info( - &self.chain, - Arc::new(block), + let remote_block = RemoteBlock { + block: Arc::new(block), peer_id, - ) + }; + self.chain.blocking_process_remote_block(remote_block) } else { debug!( "Synchronizer process_new_block unexpected status {:?} {}", diff --git a/sync/src/tests/sync_shared.rs b/sync/src/tests/sync_shared.rs index a25060165e..456ecb70bc 100644 --- a/sync/src/tests/sync_shared.rs +++ b/sync/src/tests/sync_shared.rs @@ -3,7 +3,7 @@ use crate::tests::util::{build_chain, inherit_block}; use crate::SyncShared; -use ckb_chain::{start_chain_services, store_unverified_block}; +use ckb_chain::{start_chain_services, store_unverified_block, RemoteBlock}; use ckb_logger::info; use ckb_logger_service::LoggerInitGuard; use ckb_shared::block_status::BlockStatus; @@ -108,8 +108,22 @@ fn test_insert_parent_unknown_block() { let valid_hash = valid_orphan.header().hash(); let invalid_hash = invalid_orphan.header().hash(); let parent_hash = parent.header().hash(); - shared.accept_block(&chain, Arc::clone(&valid_orphan), None, None); - shared.accept_block(&chain, Arc::clone(&invalid_orphan), None, None); + shared.accept_remote_block( + &chain, + RemoteBlock { + block: Arc::clone(&valid_orphan), + peer_id: Default::default(), + }, + None, + ); + shared.accept_remote_block( + &chain, + RemoteBlock { + block: Arc::clone(&invalid_orphan), + peer_id: Default::default(), + }, + None, + ); let wait_for_block_status_match = |hash: &Byte32, expect_status: BlockStatus| -> bool { let mut status_match = false; diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index f42974ea73..2766ddc723 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1070,21 +1070,6 @@ impl SyncShared { chain.blocking_process_block(block) } - #[cfg(test)] - pub(crate) fn blocking_insert_new_block_with_verbose_info( - &self, - chain: &ChainController, - block: Arc, - peer_id: PeerIndex, - ) -> VerifyResult { - let lonely_block: LonelyBlock = LonelyBlock { - block, - peer_id: Some(peer_id), - switch: None, - }; - chain.blocking_process_lonely_block(lonely_block) - } - pub(crate) fn accept_remote_block( &self, chain: &ChainController, From 1b9b72b330866688e83ceb986a95859e32d981a7 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 1 Feb 2024 11:10:19 +0800 Subject: [PATCH 336/357] Fix HeaderMap memory count --- shared/src/types/header_map/memory.rs | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/shared/src/types/header_map/memory.rs b/shared/src/types/header_map/memory.rs index 3def8951d3..b88a504256 100644 --- a/shared/src/types/header_map/memory.rs +++ b/shared/src/types/header_map/memory.rs @@ -93,23 +93,27 @@ impl MemoryMap { } pub(crate) fn insert(&self, header: HeaderIndexView) -> Option<()> { - ckb_metrics::handle().map(|metrics| metrics.ckb_header_map_memory_count.inc()); - let mut guard = self.0.write(); let (key, value) = header.into(); - guard.insert(key, value).map(|_| ()) + let ret = guard.insert(key, value); + if ret.is_none() { + ckb_metrics::handle().map(|metrics| metrics.ckb_header_map_memory_count.inc()); + } + ret.map(|_| ()) } pub(crate) fn remove(&self, key: &Byte32, shrink_to_fit: bool) -> Option { - ckb_metrics::handle().map(|metrics| metrics.ckb_header_map_memory_count.dec()); - let mut guard = self.0.write(); let ret = guard.remove(key); if shrink_to_fit { shrink_to_fit!(guard, SHRINK_THRESHOLD); } - ret.map(|inner| (key.clone(), inner).into()) + ret.map(|inner| { + ckb_metrics::handle().map(|metrics| metrics.ckb_header_map_memory_count.dec()); + + (key.clone(), inner).into() + }) } pub(crate) fn front_n(&self, size_limit: usize) -> Option> { @@ -133,8 +137,9 @@ impl MemoryMap { let mut guard = self.0.write(); let mut keys_count = 0; for key in keys { - guard.remove(&key); - keys_count += 1; + if let Some(_old_value) = guard.remove(&key) { + keys_count += 1; + } } ckb_metrics::handle().map(|metrics| metrics.ckb_header_map_memory_count.sub(keys_count)); From 7afa494e6e3a97b102a2db913c15f48d2d220520 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Fri, 2 Feb 2024 16:26:45 +0800 Subject: [PATCH 337/357] Fix rebase conflicts with develop: https://github.com/nervosnetwork/ckb/tree/dfa4f3753862261818eb93c540e7a3679ef4acc9 --- sync/src/relayer/block_transactions_process.rs | 5 ++--- sync/src/relayer/compact_block_process.rs | 5 ++--- sync/src/relayer/mod.rs | 8 +++----- 3 files changed, 7 insertions(+), 11 deletions(-) diff --git a/sync/src/relayer/block_transactions_process.rs b/sync/src/relayer/block_transactions_process.rs index 6b1161b36e..7c8487c94c 100644 --- a/sync/src/relayer/block_transactions_process.rs +++ b/sync/src/relayer/block_transactions_process.rs @@ -116,10 +116,9 @@ impl<'a> BlockTransactionsProcess<'a> { match ret { ReconstructionResult::Block(block) => { pending.remove(); - let status = self - .relayer + self.relayer .accept_block(self.nc.as_ref(), self.peer, block); - return status; + return Status::ok(); } ReconstructionResult::Missing(transactions, uncles) => { // We need to get all transactions and uncles that do not exist locally diff --git a/sync/src/relayer/compact_block_process.rs b/sync/src/relayer/compact_block_process.rs index 426b38da42..30b255c658 100644 --- a/sync/src/relayer/compact_block_process.rs +++ b/sync/src/relayer/compact_block_process.rs @@ -117,8 +117,7 @@ impl<'a> CompactBlockProcess<'a> { >= block.epoch().number() }); shrink_to_fit!(pending_compact_blocks, 20); - let status = self - .relayer + self.relayer .accept_block(self.nc.as_ref(), self.peer, block); if let Some(metrics) = ckb_metrics::handle() { @@ -126,7 +125,7 @@ impl<'a> CompactBlockProcess<'a> { .ckb_relay_cb_verify_duration .observe(instant.elapsed().as_secs_f64()); } - status + Status::ok() } ReconstructionResult::Missing(transactions, uncles) => { let missing_transactions: Vec = diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index a66d575cef..dd9644fbd2 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -21,9 +21,7 @@ use self::get_transactions_process::GetTransactionsProcess; use self::transaction_hashes_process::TransactionHashesProcess; use self::transactions_process::TransactionsProcess; use crate::types::{ActiveChain, SyncShared}; -use crate::utils::{ - is_internal_db_error, metric_ckb_message_bytes, send_message_to, MetricDirection, -}; +use crate::utils::{metric_ckb_message_bytes, send_message_to, MetricDirection}; use crate::{Status, StatusCode}; use ckb_chain::VerifyResult; use ckb_chain::{ChainController, RemoteBlock}; @@ -303,13 +301,13 @@ impl Relayer { _nc: &dyn CKBProtocolContext, peer_id: PeerIndex, block: core::BlockView, - ) -> Status { + ) { if self .shared() .active_chain() .contains_block_status(&block.hash(), BlockStatus::BLOCK_STORED) { - return Status::ok(); + return; } let block = Arc::new(block); From cc6fab7b83adf22af02ecc2345614ebbcfea9694 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Sun, 4 Feb 2024 15:18:47 +0800 Subject: [PATCH 338/357] Fix chain service builder for test_accept_block --- .../relayer/tests/compact_block_process.rs | 26 ++++++++++++------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/sync/src/relayer/tests/compact_block_process.rs b/sync/src/relayer/tests/compact_block_process.rs index 78a27a6128..1e69b94d46 100644 --- a/sync/src/relayer/tests/compact_block_process.rs +++ b/sync/src/relayer/tests/compact_block_process.rs @@ -3,9 +3,10 @@ use crate::relayer::tests::helper::{ build_chain, gen_block, new_header_builder, MockProtocolContext, }; use crate::{Status, StatusCode}; -use ckb_chain::chain::ChainService; +use ckb_chain::start_chain_services; use ckb_network::{PeerIndex, SupportProtocols}; use ckb_shared::block_status::BlockStatus; +use ckb_shared::ChainServicesBuilder; use ckb_store::ChainStore; use ckb_systemtime::unix_time_as_millis; use ckb_tx_pool::{PlugTarget, TxEntry}; @@ -377,16 +378,23 @@ fn test_accept_block() { ); } + let (verify_failed_blocks_tx, _verify_failed_blocks_rx) = + tokio::sync::mpsc::unbounded_channel(); { - let chain_controller = { - let proposal_window = ckb_proposal_table::ProposalTable::new( - relayer.shared().shared().consensus().tx_proposal_window(), - ); - let chain_service = - ChainService::new(relayer.shared().shared().to_owned(), proposal_window); - chain_service.start::<&str>(None) + let proposal_table = ckb_proposal_table::ProposalTable::new( + relayer.shared().shared().consensus().tx_proposal_window(), + ); + let chain_service_builder = ChainServicesBuilder { + shared: relayer.shared().shared().to_owned(), + proposal_table, + verify_failed_blocks_tx, }; - chain_controller.process_block(Arc::new(uncle)).unwrap(); + + let chain_controller = start_chain_services(chain_service_builder); + + chain_controller + .blocking_process_block(Arc::new(uncle)) + .unwrap(); } let mut prefilled_transactions_indexes = HashSet::new(); From 79b70900b3b932aa376e40fffde1fbd0c85431c4 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Sun, 4 Feb 2024 15:57:08 +0800 Subject: [PATCH 339/357] add inflight count and inflight timeout count for ckb-metrics --- sync/src/synchronizer/block_fetcher.rs | 10 ++++++++-- sync/src/types/mod.rs | 4 ++++ util/metrics/src/lib.rs | 14 +++++++++++++- 3 files changed, 25 insertions(+), 3 deletions(-) diff --git a/sync/src/synchronizer/block_fetcher.rs b/sync/src/synchronizer/block_fetcher.rs index c74eccb044..7dc2c241ee 100644 --- a/sync/src/synchronizer/block_fetcher.rs +++ b/sync/src/synchronizer/block_fetcher.rs @@ -269,6 +269,13 @@ impl BlockFetcher { .mark_slow_block(unverified_tip); } + let inflight_total_count = state.read_inflight_blocks().total_inflight_count(); + ckb_metrics::handle().map(|metrics| { + metrics + .ckb_inflight_blocks_count + .set(inflight_total_count as i64); + }); + if fetch.is_empty() { debug!( "[block fetch empty] peer-{}, fixed_last_common_header = {} \ @@ -278,7 +285,7 @@ impl BlockFetcher { best_known.number(), tip, unverified_tip, - state.read_inflight_blocks().total_inflight_count(), + inflight_total_count, ); trace!( "[block fetch empty] peer-{}, inflight_state = {:?}", @@ -289,7 +296,6 @@ impl BlockFetcher { let fetch_head = fetch.first().map_or(0_u64, |v| v.number()); let fetch_last = fetch.last().map_or(0_u64, |v| v.number()); let inflight_peer_count = state.read_inflight_blocks().peer_inflight_count(self.peer); - let inflight_total_count = state.read_inflight_blocks().total_inflight_count(); debug!( "request peer-{} for batch blocks: [{}-{}], batch len:{}, [tip/unverified_tip]: [{}/{}], [peer/total inflight count]: [{} / {}], blocks: {}", self.peer, diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 2766ddc723..eca7e1a50f 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -677,6 +677,10 @@ impl InflightBlocks { "prune: remove InflightState: remove {}-{} from {}", key.number, key.hash, value.peer ); + + ckb_metrics::handle().map(|metrics| { + metrics.ckb_inflight_timeout_count.inc(); + }); } } diff --git a/util/metrics/src/lib.rs b/util/metrics/src/lib.rs index 78c544fcb5..760d7480c6 100644 --- a/util/metrics/src/lib.rs +++ b/util/metrics/src/lib.rs @@ -118,6 +118,8 @@ pub struct Metrics { pub ckb_sys_mem_rocksdb: IntGaugeVec, /// Counter for CKB network ban peers pub ckb_network_ban_peer: IntCounter, + pub ckb_inflight_blocks_count: IntGauge, + pub ckb_inflight_timeout_count: IntCounter, } static METRICS: once_cell::sync::Lazy = once_cell::sync::Lazy::new(|| { @@ -183,7 +185,7 @@ static METRICS: once_cell::sync::Lazy = once_cell::sync::Lazy::new(|| { "The CKB HeaderMap memory hit count", &["type"] ) - .unwrap() + .unwrap() ), ckb_freezer_size: register_int_gauge!("ckb_freezer_size", "The CKB freezer size").unwrap(), ckb_freezer_read: register_int_counter!("ckb_freezer_read", "The CKB freezer read").unwrap(), @@ -270,6 +272,16 @@ static METRICS: once_cell::sync::Lazy = once_cell::sync::Lazy::new(|| { ckb_network_ban_peer: register_int_counter!( "ckb_network_ban_peer", "CKB network baned peer count" + ) + .unwrap(), + ckb_inflight_blocks_count: register_int_gauge!( + "ckb_inflight_blocks_count", + "The CKB inflight blocks count" + ) + .unwrap(), + ckb_inflight_timeout_count: register_int_counter!( + "ckb_inflight_timeout_count", + "The CKB inflight timeout count" ) .unwrap(), } From 3d8a24edaab93b6491e3490ec76d8e3310c428b8 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Sun, 4 Feb 2024 16:09:44 +0800 Subject: [PATCH 340/357] Add lonely_block channel and unverified_block channel length --- chain/src/chain_service.rs | 6 ++++++ chain/src/consume_orphan.rs | 5 +++++ util/metrics/src/lib.rs | 10 ++++++++++ 3 files changed, 21 insertions(+) diff --git a/chain/src/chain_service.rs b/chain/src/chain_service.rs index 1dd355a0b7..478ac39ec7 100644 --- a/chain/src/chain_service.rs +++ b/chain/src/chain_service.rs @@ -242,6 +242,12 @@ impl ChainService { } } + ckb_metrics::handle().map(|metrics| { + metrics + .ckb_chain_lonely_block_ch_len + .set(self.lonely_block_tx.len() as i64) + }); + match self.lonely_block_tx.send(lonely_block) { Ok(_) => {} Err(SendError(lonely_block)) => { diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index 6cfed7d7e9..8d4de4c8d5 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -96,6 +96,11 @@ impl ConsumeDescendantProcessor { fn send_unverified_block(&self, lonely_block: LonelyBlockHash, total_difficulty: U256) { let block_number = lonely_block.block_number_and_hash.number(); let block_hash = lonely_block.block_number_and_hash.hash(); + ckb_metrics::handle().map(|metrics| { + metrics + .ckb_chain_unverified_block_ch_len + .set(self.unverified_blocks_tx.len() as i64) + }); match self.unverified_blocks_tx.send(lonely_block) { Ok(_) => { diff --git a/util/metrics/src/lib.rs b/util/metrics/src/lib.rs index 760d7480c6..990a8b0800 100644 --- a/util/metrics/src/lib.rs +++ b/util/metrics/src/lib.rs @@ -74,6 +74,8 @@ pub struct Metrics { pub ckb_chain_execute_callback_duration: Histogram, /// ckb_chain orphan blocks count pub ckb_chain_orphan_count: IntGauge, + pub ckb_chain_lonely_block_ch_len: IntGauge, + pub ckb_chain_unverified_block_ch_len: IntGauge, /// ckb_sync_msg_process duration (seconds) pub ckb_sync_msg_process_duration: HistogramVec, /// ckb_sync_block_fetch duraiton (seconds) @@ -157,6 +159,14 @@ static METRICS: once_cell::sync::Lazy = once_cell::sync::Lazy::new(|| { "ckb_chain_orphan_count", "The CKB chain orphan blocks count", ).unwrap(), + ckb_chain_lonely_block_ch_len: register_int_gauge!( + "ckb_chain_lonely_block_ch_len", + "The CKB chain lonely block channel length", + ).unwrap(), + ckb_chain_unverified_block_ch_len: register_int_gauge!( + "ckb_chain_unverified_block_ch_len", + "The CKB chain unverified block channel length", + ).unwrap(), ckb_sync_msg_process_duration: register_histogram_vec!( "ckb_sync_msg_process_duration", "The CKB sync message process duration (seconds)", From 650ea900c38f1e40d34560589398b47a28bde284 Mon Sep 17 00:00:00 2001 From: YI Date: Mon, 5 Feb 2024 12:37:29 +0800 Subject: [PATCH 341/357] Add test to ensure the results of remove_blocks_by_parent are sorted --- chain/src/tests/orphan_block_pool.rs | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/chain/src/tests/orphan_block_pool.rs b/chain/src/tests/orphan_block_pool.rs index f73319495f..4f1b9a5bb2 100644 --- a/chain/src/tests/orphan_block_pool.rs +++ b/chain/src/tests/orphan_block_pool.rs @@ -50,6 +50,28 @@ fn test_remove_blocks_by_parent() { } let orphan = pool.remove_blocks_by_parent(&consensus.genesis_block().hash()); + + let mut parent_hash = consensus.genesis_block().hash(); + assert_eq!(orphan[0].block.header().parent_hash(), parent_hash); + let mut windows = orphan.windows(2); + // Orphans are sorted in a BFS manner. We iterate through them and check that this is the case. + // The `parent_or_sibling` may be a sibling or child of current `parent_hash`, + // and `child_or_sibling` may be a sibling or child of `parent_or_sibling`. + while let Some([parent_or_sibling, child_or_sibling]) = windows.next() { + // `parent_or_sibling` is a child of the block with current `parent_hash`. + // Make `parent_or_sibling`'s parent the current `parent_hash`. + if parent_or_sibling.block.header().parent_hash() != parent_hash { + parent_hash = parent_or_sibling.block.header().parent_hash(); + } + + // If `child_or_sibling`'s parent is not the current `parent_hash`, i.e. it is not a sibling of + // `parent_or_sibling`, then it must be a child of `parent_or_sibling`. + if child_or_sibling.block.header().parent_hash() != parent_hash { + // Move `parent_hash` forward. + parent_hash = child_or_sibling.block.header().parent_hash(); + assert_eq!(child_or_sibling.block.header().parent_hash(), parent_hash); + } + } let orphan_set: HashSet<_> = orphan.into_iter().map(|b| b.block).collect(); let blocks_set: HashSet<_> = blocks.into_iter().map(|b| b.to_owned()).collect(); assert_eq!(orphan_set, blocks_set) From 3dea464acc6d8ddbcfc8d917320369fb0b89b369 Mon Sep 17 00:00:00 2001 From: YI Date: Mon, 5 Feb 2024 13:11:30 +0800 Subject: [PATCH 342/357] Add test to ensure that leader always have children --- chain/src/tests/orphan_block_pool.rs | 66 +++++++++++++++++++--------- 1 file changed, 45 insertions(+), 21 deletions(-) diff --git a/chain/src/tests/orphan_block_pool.rs b/chain/src/tests/orphan_block_pool.rs index 4f1b9a5bb2..186d484b25 100644 --- a/chain/src/tests/orphan_block_pool.rs +++ b/chain/src/tests/orphan_block_pool.rs @@ -27,6 +27,41 @@ fn gen_lonely_block(parent_header: &HeaderView) -> LonelyBlock { } } +fn assert_leaders_have_children(pool: &OrphanBlockPool) { + for leader in pool.clone_leaders() { + let children = pool.remove_blocks_by_parent(&leader); + assert!(!children.is_empty()); + // `remove_blocks_by_parent` will remove all children from the pool, + // so we need to put them back here. + for child in children { + pool.insert(child); + } + } +} + +fn assert_blocks_are_sorted(blocks: &[LonelyBlock]) { + let mut parent_hash = blocks[0].block.header().parent_hash(); + let mut windows = blocks.windows(2); + // Orphans are sorted in a BFS manner. We iterate through them and check that this is the case. + // The `parent_or_sibling` may be a sibling or child of current `parent_hash`, + // and `child_or_sibling` may be a sibling or child of `parent_or_sibling`. + while let Some([parent_or_sibling, child_or_sibling]) = windows.next() { + // `parent_or_sibling` is a child of the block with current `parent_hash`. + // Make `parent_or_sibling`'s parent the current `parent_hash`. + if parent_or_sibling.block.header().parent_hash() != parent_hash { + parent_hash = parent_or_sibling.block.header().parent_hash(); + } + + // If `child_or_sibling`'s parent is not the current `parent_hash`, i.e. it is not a sibling of + // `parent_or_sibling`, then it must be a child of `parent_or_sibling`. + if child_or_sibling.block.header().parent_hash() != parent_hash { + // Move `parent_hash` forward. + parent_hash = child_or_sibling.block.header().parent_hash(); + assert_eq!(child_or_sibling.block.header().parent_hash(), parent_hash); + } + } +} + #[test] fn test_remove_blocks_by_parent() { let consensus = ConsensusBuilder::default().build(); @@ -51,27 +86,12 @@ fn test_remove_blocks_by_parent() { let orphan = pool.remove_blocks_by_parent(&consensus.genesis_block().hash()); - let mut parent_hash = consensus.genesis_block().hash(); - assert_eq!(orphan[0].block.header().parent_hash(), parent_hash); - let mut windows = orphan.windows(2); - // Orphans are sorted in a BFS manner. We iterate through them and check that this is the case. - // The `parent_or_sibling` may be a sibling or child of current `parent_hash`, - // and `child_or_sibling` may be a sibling or child of `parent_or_sibling`. - while let Some([parent_or_sibling, child_or_sibling]) = windows.next() { - // `parent_or_sibling` is a child of the block with current `parent_hash`. - // Make `parent_or_sibling`'s parent the current `parent_hash`. - if parent_or_sibling.block.header().parent_hash() != parent_hash { - parent_hash = parent_or_sibling.block.header().parent_hash(); - } + assert_eq!( + orphan[0].block.header().parent_hash(), + consensus.genesis_block().hash() + ); + assert_blocks_are_sorted(orphan.as_slice()); - // If `child_or_sibling`'s parent is not the current `parent_hash`, i.e. it is not a sibling of - // `parent_or_sibling`, then it must be a child of `parent_or_sibling`. - if child_or_sibling.block.header().parent_hash() != parent_hash { - // Move `parent_hash` forward. - parent_hash = child_or_sibling.block.header().parent_hash(); - assert_eq!(child_or_sibling.block.header().parent_hash(), parent_hash); - } - } let orphan_set: HashSet<_> = orphan.into_iter().map(|b| b.block).collect(); let blocks_set: HashSet<_> = blocks.into_iter().map(|b| b.to_owned()).collect(); assert_eq!(orphan_set, blocks_set) @@ -132,7 +152,7 @@ fn test_leaders() { pool.insert(new_block); } } - + assert_leaders_have_children(&pool); assert_eq!(pool.len(), 15); assert_eq!(pool.leaders_len(), 4); @@ -142,6 +162,7 @@ fn test_leaders() { switch: None, verify_callback: None, }); + assert_leaders_have_children(&pool); assert_eq!(pool.len(), 16); assert_eq!(pool.leaders_len(), 3); @@ -151,6 +172,7 @@ fn test_leaders() { switch: None, verify_callback: None, }); + assert_leaders_have_children(&pool); assert_eq!(pool.len(), 17); assert_eq!(pool.leaders_len(), 2); @@ -166,6 +188,7 @@ fn test_leaders() { switch: None, verify_callback: None, }); + assert_leaders_have_children(&pool); assert_eq!(pool.len(), 18); assert_eq!(pool.leaders_len(), 2); @@ -179,6 +202,7 @@ fn test_leaders() { switch: None, verify_callback: None, }); + assert_leaders_have_children(&pool); assert_eq!(pool.len(), 4); assert_eq!(pool.leaders_len(), 1); From a579e4b38c09c9ce369b38fb5124521d2e6d2f94 Mon Sep 17 00:00:00 2001 From: YI Date: Mon, 5 Feb 2024 13:27:24 +0800 Subject: [PATCH 343/357] Fix blocks_are_sorted checking logic error --- chain/src/tests/orphan_block_pool.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/chain/src/tests/orphan_block_pool.rs b/chain/src/tests/orphan_block_pool.rs index 186d484b25..cf9861fb27 100644 --- a/chain/src/tests/orphan_block_pool.rs +++ b/chain/src/tests/orphan_block_pool.rs @@ -42,7 +42,8 @@ fn assert_leaders_have_children(pool: &OrphanBlockPool) { fn assert_blocks_are_sorted(blocks: &[LonelyBlock]) { let mut parent_hash = blocks[0].block.header().parent_hash(); let mut windows = blocks.windows(2); - // Orphans are sorted in a BFS manner. We iterate through them and check that this is the case. + // Orphans are sorted in a breadth-first search manner. We iterate through them and + // check that this is the case. // The `parent_or_sibling` may be a sibling or child of current `parent_hash`, // and `child_or_sibling` may be a sibling or child of `parent_or_sibling`. while let Some([parent_or_sibling, child_or_sibling]) = windows.next() { @@ -55,9 +56,9 @@ fn assert_blocks_are_sorted(blocks: &[LonelyBlock]) { // If `child_or_sibling`'s parent is not the current `parent_hash`, i.e. it is not a sibling of // `parent_or_sibling`, then it must be a child of `parent_or_sibling`. if child_or_sibling.block.header().parent_hash() != parent_hash { + assert_eq!(child_or_sibling.block.header().parent_hash(), parent_or_sibling.block.header().hash()); // Move `parent_hash` forward. parent_hash = child_or_sibling.block.header().parent_hash(); - assert_eq!(child_or_sibling.block.header().parent_hash(), parent_hash); } } } From 3e16b59a81d810b1aaf8b13414cb28636b569194 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 6 Feb 2024 09:50:12 +0800 Subject: [PATCH 344/357] Remove tell_synchronizer_to_punish_the_bad_peer from ckb-chain --- chain/src/chain_controller.rs | 24 ++++------------- chain/src/chain_service.rs | 18 +------------ chain/src/consume_orphan.rs | 9 +------ chain/src/consume_unverified.rs | 15 +++-------- chain/src/lib.rs | 48 +++------------------------------ 5 files changed, 13 insertions(+), 101 deletions(-) diff --git a/chain/src/chain_controller.rs b/chain/src/chain_controller.rs index 550f8cc945..48902434e5 100644 --- a/chain/src/chain_controller.rs +++ b/chain/src/chain_controller.rs @@ -2,13 +2,10 @@ #![allow(missing_docs)] use crate::utils::orphan_block_pool::OrphanBlockPool; -use crate::{ - LonelyBlock, ProcessBlockRequest, RemoteBlock, TruncateRequest, VerifyCallback, VerifyResult, -}; +use crate::{LonelyBlock, ProcessBlockRequest, RemoteBlock, TruncateRequest, VerifyResult}; use ckb_channel::Sender; use ckb_error::{Error, InternalErrorKind}; use ckb_logger::{self, error}; -use ckb_network::PeerIndex; use ckb_types::{ core::{service::Request, BlockView}, packed::Byte32, @@ -43,16 +40,11 @@ impl ChainController { } } - pub fn asynchronous_process_remote_block( - &self, - remote_block: RemoteBlock, - verify_callback: Option, - ) { + pub fn asynchronous_process_remote_block(&self, remote_block: RemoteBlock) { let lonely_block = LonelyBlock { block: remote_block.block, - peer_id: Some(remote_block.peer_id), + verify_callback: Some(remote_block.verify_callback), switch: None, - verify_callback, }; self.asynchronous_process_lonely_block(lonely_block); } @@ -65,11 +57,7 @@ impl ChainController { /// MinerRpc::submit_block and `ckb import` need this blocking way to process block pub fn blocking_process_block(&self, block: Arc) -> VerifyResult { - self.blocking_process_block_internal(block, None, None) - } - - pub fn blocking_process_remote_block(&self, remote_block: RemoteBlock) -> VerifyResult { - self.blocking_process_block_internal(remote_block.block, Some(remote_block.peer_id), None) + self.blocking_process_block_internal(block, None) } /// `IntegrationTestRpcImpl::process_block_without_verify` need this @@ -78,13 +66,12 @@ impl ChainController { block: Arc, switch: Switch, ) -> VerifyResult { - self.blocking_process_block_internal(block, None, Some(switch)) + self.blocking_process_block_internal(block, Some(switch)) } fn blocking_process_block_internal( &self, block: Arc, - peer_id: Option, switch: Option, ) -> VerifyResult { let (verify_result_tx, verify_result_rx) = ckb_channel::oneshot::channel::(); @@ -102,7 +89,6 @@ impl ChainController { let lonely_block = LonelyBlock { block, - peer_id, switch, verify_callback: Some(Box::new(verify_callback)), }; diff --git a/chain/src/chain_service.rs b/chain/src/chain_service.rs index 478ac39ec7..186768293c 100644 --- a/chain/src/chain_service.rs +++ b/chain/src/chain_service.rs @@ -3,10 +3,7 @@ use crate::consume_unverified::ConsumeUnverifiedBlocks; use crate::utils::orphan_block_pool::OrphanBlockPool; -use crate::{ - tell_synchronizer_to_punish_the_bad_peer, ChainController, LonelyBlock, LonelyBlockHash, - ProcessBlockRequest, -}; +use crate::{ChainController, LonelyBlock, LonelyBlockHash, ProcessBlockRequest}; use ckb_channel::{self as channel, select, Receiver, SendError, Sender}; use ckb_constant::sync::BLOCK_DOWNLOAD_WINDOW; use ckb_error::{Error, InternalErrorKind}; @@ -205,12 +202,6 @@ impl ChainService { let error = InternalErrorKind::System .other("Invalid genesis block received") .into(); - tell_synchronizer_to_punish_the_bad_peer( - self.verify_failed_blocks_tx.clone(), - lonely_block.peer_id(), - lonely_block.block().hash(), - &error, - ); lonely_block.execute_callback(Err(error)); } else { warn!("receive 0 number block: 0-{}", block_hash); @@ -230,13 +221,6 @@ impl ChainService { ); self.shared .insert_block_status(lonely_block.block().hash(), BlockStatus::BLOCK_INVALID); - tell_synchronizer_to_punish_the_bad_peer( - self.verify_failed_blocks_tx.clone(), - lonely_block.peer_id(), - lonely_block.block().hash(), - &err, - ); - lonely_block.execute_callback(Err(err)); return; } diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index 8d4de4c8d5..dde75d268c 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -1,5 +1,5 @@ use crate::utils::orphan_block_pool::OrphanBlockPool; -use crate::{tell_synchronizer_to_punish_the_bad_peer, LonelyBlock, LonelyBlockHash, VerifyResult}; +use crate::{LonelyBlock, LonelyBlockHash, VerifyResult}; use ckb_channel::{select, Receiver, SendError, Sender}; use ckb_error::{Error, InternalErrorKind}; use ckb_logger::internal::trace; @@ -161,13 +161,6 @@ impl ConsumeDescendantProcessor { } Err(err) => { - tell_synchronizer_to_punish_the_bad_peer( - self.verify_failed_blocks_tx.clone(), - lonely_block.peer_id(), - lonely_block.block().hash(), - &err, - ); - error!( "accept block {} failed: {}", lonely_block.block().hash(), diff --git a/chain/src/consume_unverified.rs b/chain/src/consume_unverified.rs index 6dab14213d..a368c706a4 100644 --- a/chain/src/consume_unverified.rs +++ b/chain/src/consume_unverified.rs @@ -1,7 +1,7 @@ use crate::LonelyBlockHash; use crate::{ - tell_synchronizer_to_punish_the_bad_peer, utils::forkchanges::ForkChanges, GlobalIndex, - LonelyBlock, TruncateRequest, UnverifiedBlock, VerifyResult, + utils::forkchanges::ForkChanges, GlobalIndex, LonelyBlock, TruncateRequest, UnverifiedBlock, + VerifyResult, }; use ckb_channel::{select, Receiver}; use ckb_error::{Error, InternalErrorKind}; @@ -131,7 +131,6 @@ impl ConsumeUnverifiedBlockProcessor { UnverifiedBlock { lonely_block: LonelyBlock { block: Arc::new(block_view), - peer_id: lonely_block.peer_id, switch: lonely_block.switch, verify_callback: lonely_block.verify_callback, }, @@ -164,8 +163,7 @@ impl ConsumeUnverifiedBlockProcessor { } Err(err) => { error!( - "verify [{:?}]'s block {} failed: {}", - unverified_block.peer_id(), + "verify block {} failed: {}", unverified_block.block().hash(), err ); @@ -198,13 +196,6 @@ impl ConsumeUnverifiedBlockProcessor { unverified_block.block().hash(), err ); - - tell_synchronizer_to_punish_the_bad_peer( - self.verify_failed_blocks_tx.clone(), - unverified_block.peer_id(), - unverified_block.block().hash(), - err, - ); } } diff --git a/chain/src/lib.rs b/chain/src/lib.rs index 62d9b206bb..9e14d70622 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -5,9 +5,7 @@ //! //! [`ChainService`]: chain/struct.ChainService.html //! [`ChainController`]: chain/struct.ChainController.html -use ckb_error::{is_internal_db_error, Error}; -use ckb_logger::{debug, error}; -use ckb_network::PeerIndex; +use ckb_error::Error; use ckb_shared::types::{BlockNumberAndHash, VerifyFailedBlockInfo}; use ckb_types::core::service::Request; use ckb_types::core::{BlockNumber, BlockView, HeaderView}; @@ -45,8 +43,8 @@ pub struct RemoteBlock { /// block pub block: Arc, - /// This block is received from which peer - pub peer_id: PeerIndex, + /// Relayer and Synchronizer will have callback to ban peer + pub verify_callback: VerifyCallback, } /// LonelyBlock is the block which we have not check weather its parent is stored yet @@ -54,9 +52,6 @@ pub struct LonelyBlock { /// block pub block: Arc, - /// This block is received from which peer - pub peer_id: Option, - /// The Switch to control the verification process pub switch: Option, @@ -69,9 +64,6 @@ pub struct LonelyBlockHash { /// block pub block_number_and_hash: BlockNumberAndHash, - /// This block is received from which peer - pub peer_id: Option, - /// The Switch to control the verification process pub switch: Option, @@ -94,7 +86,6 @@ impl From for LonelyBlockHash { number: val.block.number(), hash: val.block.hash(), }, - peer_id: val.peer_id, switch: val.switch, verify_callback: val.verify_callback, } @@ -106,10 +97,6 @@ impl LonelyBlock { &self.block } - pub fn peer_id(&self) -> Option { - self.peer_id - } - pub fn switch(&self) -> Option { self.switch } @@ -131,10 +118,6 @@ impl UnverifiedBlock { self.lonely_block.block() } - pub fn peer_id(&self) -> Option { - self.lonely_block.peer_id() - } - pub fn switch(&self) -> Option { self.lonely_block.switch() } @@ -164,28 +147,3 @@ impl GlobalIndex { self.hash = hash; } } - -pub(crate) fn tell_synchronizer_to_punish_the_bad_peer( - verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, - peer_id: Option, - block_hash: Byte32, - err: &Error, -) { - let is_internal_db_error = is_internal_db_error(err); - match peer_id { - Some(peer_id) => { - let verify_failed_block_info = VerifyFailedBlockInfo { - block_hash, - peer_id, - reason: err.to_string(), - is_internal_db_error, - }; - if let Err(_err) = verify_failed_blocks_tx.send(verify_failed_block_info) { - error!("ChainService failed to send verify failed block info to Synchronizer, the receiver side may have been closed, this shouldn't happen") - } - } - _ => { - debug!("Don't know which peer to punish, or don't have a channel Sender to Synchronizer, skip it") - } - } -} From 9233d6a5e45242655781d73317174e952c4ed764 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 6 Feb 2024 09:55:12 +0800 Subject: [PATCH 345/357] Use verify_callbacl to handle peer punish and compact block broadcast --- .../src/relayer/block_transactions_process.rs | 6 +- sync/src/relayer/compact_block_process.rs | 8 +- sync/src/relayer/mod.rs | 235 +++++++++--------- sync/src/synchronizer/block_process.rs | 45 +++- sync/src/synchronizer/mod.rs | 80 ++---- sync/src/types/mod.rs | 31 ++- 6 files changed, 203 insertions(+), 202 deletions(-) diff --git a/sync/src/relayer/block_transactions_process.rs b/sync/src/relayer/block_transactions_process.rs index 7c8487c94c..fa5522e349 100644 --- a/sync/src/relayer/block_transactions_process.rs +++ b/sync/src/relayer/block_transactions_process.rs @@ -23,7 +23,7 @@ use std::sync::Arc; pub struct BlockTransactionsProcess<'a> { message: packed::BlockTransactionsReader<'a>, relayer: &'a Relayer, - nc: Arc, + nc: Arc, peer: PeerIndex, } @@ -31,7 +31,7 @@ impl<'a> BlockTransactionsProcess<'a> { pub fn new( message: packed::BlockTransactionsReader<'a>, relayer: &'a Relayer, - nc: Arc, + nc: Arc, peer: PeerIndex, ) -> Self { BlockTransactionsProcess { @@ -117,7 +117,7 @@ impl<'a> BlockTransactionsProcess<'a> { ReconstructionResult::Block(block) => { pending.remove(); self.relayer - .accept_block(self.nc.as_ref(), self.peer, block); + .accept_block(self.nc, self.peer, block, "BlockTransactions"); return Status::ok(); } ReconstructionResult::Missing(transactions, uncles) => { diff --git a/sync/src/relayer/compact_block_process.rs b/sync/src/relayer/compact_block_process.rs index 30b255c658..b46dcca1ef 100644 --- a/sync/src/relayer/compact_block_process.rs +++ b/sync/src/relayer/compact_block_process.rs @@ -35,7 +35,7 @@ use std::time::Instant; pub struct CompactBlockProcess<'a> { message: packed::CompactBlockReader<'a>, relayer: &'a Relayer, - nc: Arc, + nc: Arc, peer: PeerIndex, } @@ -43,7 +43,7 @@ impl<'a> CompactBlockProcess<'a> { pub fn new( message: packed::CompactBlockReader<'a>, relayer: &'a Relayer, - nc: Arc, + nc: Arc, peer: PeerIndex, ) -> Self { CompactBlockProcess { @@ -118,7 +118,7 @@ impl<'a> CompactBlockProcess<'a> { }); shrink_to_fit!(pending_compact_blocks, 20); self.relayer - .accept_block(self.nc.as_ref(), self.peer, block); + .accept_block(Arc::clone(&self.nc), self.peer, block, "CompactBlock"); if let Some(metrics) = ckb_metrics::handle() { metrics @@ -231,7 +231,7 @@ fn contextual_check( compact_block_header: &HeaderView, shared: &Arc, active_chain: &ActiveChain, - nc: &Arc, + nc: &Arc, peer: PeerIndex, ) -> Status { let block_hash = compact_block_header.hash(); diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index dd9644fbd2..c49b9fb8bb 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -20,12 +20,13 @@ use self::get_block_transactions_process::GetBlockTransactionsProcess; use self::get_transactions_process::GetTransactionsProcess; use self::transaction_hashes_process::TransactionHashesProcess; use self::transactions_process::TransactionsProcess; -use crate::types::{ActiveChain, SyncShared}; +use crate::types::{post_sync_process, ActiveChain, SyncShared}; use crate::utils::{metric_ckb_message_bytes, send_message_to, MetricDirection}; use crate::{Status, StatusCode}; use ckb_chain::VerifyResult; use ckb_chain::{ChainController, RemoteBlock}; use ckb_constant::sync::BAD_MESSAGE_BAN_TIME; +use ckb_error::is_internal_db_error; use ckb_logger::{ debug, debug_target, error, error_target, info_target, trace_target, warn_target, }; @@ -298,9 +299,10 @@ impl Relayer { #[allow(clippy::needless_collect)] pub fn accept_block( &self, - _nc: &dyn CKBProtocolContext, + nc: Arc, peer_id: PeerIndex, block: core::BlockView, + msg_name: &str, ) { if self .shared() @@ -311,15 +313,13 @@ impl Relayer { } let block = Arc::new(block); - let remote_block = RemoteBlock { - block: Arc::clone(&block), - peer_id, - }; - let verify_success_callback = { - let broadcast_compact_block_tx = self.broadcast_compact_block_tx.clone(); + let verify_callback = { + let nc: Arc = Arc::clone(&nc); let block = Arc::clone(&block); - move |result: VerifyResult| match result { + let shared = Arc::clone(self.shared()); + let msg_name = msg_name.to_owned(); + Box::new(move |result: VerifyResult| match result { Ok(verified) => { if !verified { debug!( @@ -330,11 +330,7 @@ impl Relayer { return; } - if broadcast_compact_block_tx.send((block, peer_id)).is_err() { - error!( - "send block to broadcast_compact_block_tx failed, this shouldn't happen", - ); - } + build_and_broadcast_compact_block(nc.as_ref(), shared.shared(), peer_id, block); } Err(err) => { error!( @@ -343,101 +339,33 @@ impl Relayer { block.hash(), err ); - } - } - }; - - self.shared.accept_remote_block( - &self.chain, - remote_block, - Some(Box::new(verify_success_callback)), - ); - } - fn build_and_broadcast_compact_block( - nc: &dyn CKBProtocolContext, - shared: &Shared, - peer: PeerIndex, - block: Arc, - ) { - debug_target!( - crate::LOG_TARGET_RELAY, - "[block_relay] relayer accept_block {} {}", - block.header().hash(), - unix_time_as_millis() - ); - let block_hash = block.hash(); - shared.remove_header_view(&block_hash); - let cb = packed::CompactBlock::build_from_block(&block, &HashSet::new()); - let message = packed::RelayMessage::new_builder().set(cb).build(); - - let selected_peers: Vec = nc - .connected_peers() - .into_iter() - .filter(|target_peer| peer != *target_peer) - .take(MAX_RELAY_PEERS) - .collect(); - if let Err(err) = nc.quick_filter_broadcast( - TargetSession::Multi(Box::new(selected_peers.into_iter())), - message.as_bytes(), - ) { - debug_target!( - crate::LOG_TARGET_RELAY, - "relayer send block when accept block error: {:?}", - err, - ); - } - - if let Some(p2p_control) = nc.p2p_control() { - let snapshot = shared.snapshot(); - let parent_chain_root = { - let mmr = snapshot.chain_root_mmr(block.header().number() - 1); - match mmr.get_root() { - Ok(root) => root, - Err(err) => { - error_target!( - crate::LOG_TARGET_RELAY, - "Generate last state to light client failed: {:?}", - err - ); + let is_internal_db_error = is_internal_db_error(&err); + if is_internal_db_error { return; } + + // punish the malicious peer + post_sync_process( + nc.as_ref(), + peer_id, + &msg_name, + StatusCode::BlockIsInvalid.with_context(format!( + "block {} is invalid, reason: {}", + block.hash(), + err.to_string() + )), + ); } - }; + }) + }; - let tip_header = packed::VerifiableHeader::new_builder() - .header(block.header().data()) - .uncles_hash(block.calc_uncles_hash()) - .extension(Pack::pack(&block.extension())) - .parent_chain_root(parent_chain_root) - .build(); - let light_client_message = { - let content = packed::SendLastState::new_builder() - .last_header(tip_header) - .build(); - packed::LightClientMessage::new_builder() - .set(content) - .build() - }; - let light_client_peers: HashSet = nc - .connected_peers() - .into_iter() - .filter_map(|index| nc.get_peer(index).map(|peer| (index, peer))) - .filter(|(_id, peer)| peer.if_lightclient_subscribed) - .map(|(id, _)| id) - .collect(); - if let Err(err) = p2p_control.filter_broadcast( - TargetSession::Filter(Box::new(move |id| light_client_peers.contains(id))), - SupportProtocols::LightClient.protocol_id(), - light_client_message.as_bytes(), - ) { - debug_target!( - crate::LOG_TARGET_RELAY, - "relayer send last state to light client when accept block, error: {:?}", - err, - ); - } - } + let remote_block = RemoteBlock { + block, + verify_callback, + }; + + self.shared.accept_remote_block(&self.chain, remote_block); } /// Reorganize the full block according to the compact block/txs/uncles @@ -808,6 +736,92 @@ impl Relayer { } } +fn build_and_broadcast_compact_block( + nc: &dyn CKBProtocolContext, + shared: &Shared, + peer: PeerIndex, + block: Arc, +) { + debug_target!( + crate::LOG_TARGET_RELAY, + "[block_relay] relayer accept_block {} {}", + block.header().hash(), + unix_time_as_millis() + ); + let block_hash = block.hash(); + shared.remove_header_view(&block_hash); + let cb = packed::CompactBlock::build_from_block(&block, &HashSet::new()); + let message = packed::RelayMessage::new_builder().set(cb).build(); + + let selected_peers: Vec = nc + .connected_peers() + .into_iter() + .filter(|target_peer| peer != *target_peer) + .take(MAX_RELAY_PEERS) + .collect(); + if let Err(err) = nc.quick_filter_broadcast( + TargetSession::Multi(Box::new(selected_peers.into_iter())), + message.as_bytes(), + ) { + debug_target!( + crate::LOG_TARGET_RELAY, + "relayer send block when accept block error: {:?}", + err, + ); + } + + if let Some(p2p_control) = nc.p2p_control() { + let snapshot = shared.snapshot(); + let parent_chain_root = { + let mmr = snapshot.chain_root_mmr(block.header().number() - 1); + match mmr.get_root() { + Ok(root) => root, + Err(err) => { + error_target!( + crate::LOG_TARGET_RELAY, + "Generate last state to light client failed: {:?}", + err + ); + return; + } + } + }; + + let tip_header = packed::VerifiableHeader::new_builder() + .header(block.header().data()) + .uncles_hash(block.calc_uncles_hash()) + .extension(Pack::pack(&block.extension())) + .parent_chain_root(parent_chain_root) + .build(); + let light_client_message = { + let content = packed::SendLastState::new_builder() + .last_header(tip_header) + .build(); + packed::LightClientMessage::new_builder() + .set(content) + .build() + }; + let light_client_peers: HashSet = nc + .connected_peers() + .into_iter() + .filter_map(|index| nc.get_peer(index).map(|peer| (index, peer))) + .filter(|(_id, peer)| peer.if_lightclient_subscribed) + .map(|(id, _)| id) + .collect(); + if let Err(err) = p2p_control.filter_broadcast( + TargetSession::Filter(Box::new(move |id| light_client_peers.contains(id))), + SupportProtocols::LightClient.protocol_id(), + light_client_message.as_bytes(), + ) { + debug_target!( + crate::LOG_TARGET_RELAY, + "relayer send last state to light client when accept block, error: {:?}", + err, + ); + } + } +} + #[async_trait] impl CKBProtocolHandler for Relayer { async fn init(&mut self, nc: Arc) { @@ -996,19 +1010,6 @@ impl CKBProtocolHandler for Relayer { Instant::now().saturating_duration_since(start_time) ); } - - async fn poll(&mut self, nc: Arc) -> Option<()> { - if let Some((block, peer)) = self.broadcast_compact_block_rx.recv().await { - Self::build_and_broadcast_compact_block( - nc.as_ref(), - self.shared().shared(), - peer, - block, - ); - return Some(()); - } - None - } } #[derive(Copy, Clone, Debug)] diff --git a/sync/src/synchronizer/block_process.rs b/sync/src/synchronizer/block_process.rs index 76cec28376..089895dbd1 100644 --- a/sync/src/synchronizer/block_process.rs +++ b/sync/src/synchronizer/block_process.rs @@ -1,7 +1,11 @@ use crate::synchronizer::Synchronizer; +use crate::types::post_sync_process; +use crate::StatusCode; use ckb_chain::RemoteBlock; -use ckb_logger::debug; -use ckb_network::PeerIndex; +use ckb_error::is_internal_db_error; +use ckb_logger::{debug, info}; +use ckb_network::{CKBProtocolContext, PeerIndex}; +use ckb_types::packed::Byte32; use ckb_types::{packed, prelude::*}; use std::sync::Arc; @@ -9,6 +13,7 @@ pub struct BlockProcess<'a> { message: packed::SendBlockReader<'a>, synchronizer: &'a Synchronizer, peer: PeerIndex, + nc: Arc, } impl<'a> BlockProcess<'a> { @@ -16,16 +21,18 @@ impl<'a> BlockProcess<'a> { message: packed::SendBlockReader<'a>, synchronizer: &'a Synchronizer, peer: PeerIndex, + nc: Arc, ) -> Self { BlockProcess { message, synchronizer, peer, + nc, } } pub fn execute(self) -> crate::Status { - let block = self.message.block().to_entity().into_view(); + let block = Arc::new(self.message.block().to_entity().into_view()); debug!( "BlockProcess received block {} {}", block.number(), @@ -34,9 +41,37 @@ impl<'a> BlockProcess<'a> { let shared = self.synchronizer.shared(); if shared.new_block_received(&block) { + let verify_callback = { + let nc: Arc = Arc::clone(&self.nc); + let peer_id: PeerIndex = self.peer; + let block_hash: Byte32 = block.hash(); + Box::new(move |verify_result: Result| { + match verify_result { + Ok(_) => {} + Err(err) => { + let is_internal_db_error = is_internal_db_error(&err); + if is_internal_db_error { + return; + } + + // punish the malicious peer + post_sync_process( + nc.as_ref(), + peer_id, + "SendBlock", + StatusCode::BlockIsInvalid.with_context(format!( + "block {} is invalid, reason: {}", + block_hash, + err.to_string() + )), + ); + } + }; + }) + }; let remote_block = RemoteBlock { - block: Arc::new(block), - peer_id: self.peer, + block, + verify_callback, }; self.synchronizer .asynchronous_process_remote_block(remote_block); diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 3b72f57605..ab281a3ae3 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -20,7 +20,7 @@ pub(crate) use self::get_headers_process::GetHeadersProcess; pub(crate) use self::headers_process::HeadersProcess; pub(crate) use self::in_ibd_process::InIBDProcess; -use crate::types::{HeadersSyncController, IBDState, Peers, SyncShared}; +use crate::types::{post_sync_process, HeadersSyncController, IBDState, Peers, SyncShared}; use crate::utils::{metric_ckb_message_bytes, send_message_to, MetricDirection}; use crate::{Status, StatusCode}; use ckb_shared::block_status::BlockStatus; @@ -32,7 +32,7 @@ use ckb_constant::sync::{ BAD_MESSAGE_BAN_TIME, CHAIN_SYNC_TIMEOUT, EVICTION_HEADERS_RESPONSE_TIME, INIT_BLOCKS_IN_TRANSIT_PER_PEER, MAX_TIP_AGE, }; -use ckb_logger::{debug, error, info, trace, warn}; +use ckb_logger::{debug, error, info, trace}; use ckb_metrics::HistogramTimer; use ckb_network::{ async_trait, bytes::Bytes, tokio, CKBProtocolContext, CKBProtocolHandler, PeerIndex, @@ -265,7 +265,7 @@ impl Synchronizer { fn try_process( &self, - nc: &dyn CKBProtocolContext, + nc: Arc, peer: PeerIndex, message: packed::SyncMessageUnionReader<'_>, ) -> Status { @@ -280,34 +280,36 @@ impl Synchronizer { match message { packed::SyncMessageUnionReader::GetHeaders(reader) => { - GetHeadersProcess::new(reader, self, peer, nc).execute() + GetHeadersProcess::new(reader, self, peer, nc.as_ref()).execute() } packed::SyncMessageUnionReader::SendHeaders(reader) => { - HeadersProcess::new(reader, self, peer, nc).execute() + HeadersProcess::new(reader, self, peer, nc.as_ref()).execute() } packed::SyncMessageUnionReader::GetBlocks(reader) => { - GetBlocksProcess::new(reader, self, peer, nc).execute() + GetBlocksProcess::new(reader, self, peer, nc.as_ref()).execute() } packed::SyncMessageUnionReader::SendBlock(reader) => { if reader.check_data() { - BlockProcess::new(reader, self, peer).execute() + BlockProcess::new(reader, self, peer, nc).execute() } else { StatusCode::ProtocolMessageIsMalformed.with_context("SendBlock is invalid") } } - packed::SyncMessageUnionReader::InIBD(_) => InIBDProcess::new(self, peer, nc).execute(), + packed::SyncMessageUnionReader::InIBD(_) => { + InIBDProcess::new(self, peer, nc.as_ref()).execute() + } } } fn process( &self, - nc: &dyn CKBProtocolContext, + nc: Arc, peer: PeerIndex, message: packed::SyncMessageUnionReader<'_>, ) { let item_name = message.item_name(); let item_bytes = message.as_slice().len() as u64; - let status = self.try_process(nc, peer, message); + let status = self.try_process(Arc::clone(&nc), peer, message); metric_ckb_message_bytes( MetricDirection::In, @@ -317,26 +319,7 @@ impl Synchronizer { item_bytes, ); - Self::post_sync_process(nc, peer, item_name, status); - } - - fn post_sync_process( - nc: &dyn CKBProtocolContext, - peer: PeerIndex, - item_name: &str, - status: Status, - ) { - if let Some(ban_time) = status.should_ban() { - error!( - "Receive {} from {}. Ban {:?} for {}", - item_name, peer, ban_time, status - ); - nc.ban_peer(peer, ban_time, status.to_string()); - } else if status.should_warn() { - warn!("Receive {} from {}, {}", item_name, peer, status); - } else if !status.is_ok() { - debug!("Receive {} from {}, {}", item_name, peer, status); - } + post_sync_process(nc.as_ref(), peer, item_name, status); } /// Get peers info @@ -371,8 +354,7 @@ impl Synchronizer { if status.contains(BlockStatus::BLOCK_STORED) { error!("Block {} already stored", block_hash); } else if status.contains(BlockStatus::HEADER_VALID) { - self.shared - .accept_remote_block(&self.chain, remote_block, None); + self.shared.accept_remote_block(&self.chain, remote_block); } else { debug!( "Synchronizer process_new_block unexpected status {:?} {}", @@ -396,11 +378,7 @@ impl Synchronizer { error!("block {} already stored", block_hash); Ok(false) } else if status.contains(BlockStatus::HEADER_VALID) { - let remote_block = RemoteBlock { - block: Arc::new(block), - peer_id, - }; - self.chain.blocking_process_remote_block(remote_block) + self.chain.blocking_process_block(Arc::new(block)) } else { debug!( "Synchronizer process_new_block unexpected status {:?} {}", @@ -844,7 +822,7 @@ impl CKBProtocolHandler for Synchronizer { } let start_time = Instant::now(); - tokio::task::block_in_place(|| self.process(nc.as_ref(), peer_index, msg)); + tokio::task::block_in_place(|| self.process(nc, peer_index, msg)); debug!( "Process message={}, peer={}, cost={:?}", msg.item_name(), @@ -915,30 +893,4 @@ impl CKBProtocolHandler for Synchronizer { debug!("No peers connected"); } } - - async fn poll(&mut self, nc: Arc) -> Option<()> { - let mut have_malformed_peers = false; - while let Some(malformed_peer_info) = self.verify_failed_blocks_rx.recv().await { - have_malformed_peers = true; - if malformed_peer_info.is_internal_db_error { - // we shouldn't ban that peer if it's an internal db error - continue; - } - - Self::post_sync_process( - nc.as_ref(), - malformed_peer_info.peer_id, - "SendBlock", - StatusCode::BlockIsInvalid.with_context(format!( - "block {} is invalid, reason: {}", - malformed_peer_info.block_hash, malformed_peer_info.reason - )), - ); - } - - if have_malformed_peers { - return Some(()); - } - None - } } diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index eca7e1a50f..f38e545824 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1,6 +1,5 @@ use crate::{Status, StatusCode, FAST_INDEX, LOW_INDEX, NORMAL_INDEX, TIME_TRACE_SIZE}; use ckb_app_config::SyncConfig; -use ckb_chain::VerifyCallback; #[cfg(test)] use ckb_chain::VerifyResult; use ckb_chain::{ChainController, RemoteBlock}; @@ -13,7 +12,7 @@ use ckb_constant::sync::{ MAX_UNKNOWN_TX_HASHES_SIZE, MAX_UNKNOWN_TX_HASHES_SIZE_PER_PEER, POW_INTERVAL, RETRY_ASK_TX_TIMEOUT_INCREASE, SUSPEND_SYNC_TIME, }; -use ckb_logger::{debug, trace}; +use ckb_logger::{debug, error, trace, warn}; use ckb_network::{CKBProtocolContext, PeerIndex, SupportProtocols}; use ckb_shared::{ block_status::BlockStatus, @@ -1074,12 +1073,7 @@ impl SyncShared { chain.blocking_process_block(block) } - pub(crate) fn accept_remote_block( - &self, - chain: &ChainController, - remote_block: RemoteBlock, - verify_callback: Option, - ) { + pub(crate) fn accept_remote_block(&self, chain: &ChainController, remote_block: RemoteBlock) { { let entry = self .shared() @@ -1090,7 +1084,7 @@ impl SyncShared { } } - chain.asynchronous_process_remote_block(remote_block, verify_callback) + chain.asynchronous_process_remote_block(remote_block) } /// Sync a new valid header, try insert to sync state @@ -1996,3 +1990,22 @@ impl From for bool { } } } + +pub(crate) fn post_sync_process( + nc: &dyn CKBProtocolContext, + peer: PeerIndex, + item_name: &str, + status: Status, +) { + if let Some(ban_time) = status.should_ban() { + error!( + "Receive {} from {}. Ban {:?} for {}", + item_name, peer, ban_time, status + ); + nc.ban_peer(peer, ban_time, status.to_string()); + } else if status.should_warn() { + warn!("Receive {} from {}, {}", item_name, peer, status); + } else if !status.is_ok() { + debug!("Receive {} from {}, {}", item_name, peer, status); + } +} From e22d9e4358192b3c69a669d1e894af9dd87080de Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 6 Feb 2024 09:55:34 +0800 Subject: [PATCH 346/357] Fix sync and relayer unit test by verify_callback --- chain/src/tests/find_fork.rs | 2 -- chain/src/tests/orphan_block_pool.rs | 9 --------- sync/src/tests/sync_shared.rs | 9 ++++----- sync/src/tests/synchronizer/functions.rs | 6 ++++-- 4 files changed, 8 insertions(+), 18 deletions(-) diff --git a/chain/src/tests/find_fork.rs b/chain/src/tests/find_fork.rs index 309fb86853..4004652d78 100644 --- a/chain/src/tests/find_fork.rs +++ b/chain/src/tests/find_fork.rs @@ -27,14 +27,12 @@ fn process_block( switch: Switch, ) { let lonely_block_hash = LonelyBlockHash { - peer_id: None, switch: Some(switch), block_number_and_hash: BlockNumberAndHash::new(blk.number(), blk.hash()), verify_callback: None, }; let lonely_block = LonelyBlock { - peer_id: None, switch: Some(switch), block: Arc::new(blk.to_owned()), verify_callback: None, diff --git a/chain/src/tests/orphan_block_pool.rs b/chain/src/tests/orphan_block_pool.rs index cf9861fb27..3a441e3bae 100644 --- a/chain/src/tests/orphan_block_pool.rs +++ b/chain/src/tests/orphan_block_pool.rs @@ -21,7 +21,6 @@ fn gen_lonely_block(parent_header: &HeaderView) -> LonelyBlock { .build(); LonelyBlock { block: Arc::new(block), - peer_id: None, switch: None, verify_callback: None, } @@ -75,7 +74,6 @@ fn test_remove_blocks_by_parent() { let new_block_clone = lonely_block.block().clone(); let new_block = LonelyBlock { block: new_block_clone.clone(), - peer_id: None, switch: None, verify_callback: None, }; @@ -109,7 +107,6 @@ fn test_remove_blocks_by_parent_and_get_block_should_not_deadlock() { let new_block = lonely_block.block(); let new_block_clone = LonelyBlock { block: Arc::clone(new_block), - peer_id: None, switch: None, verify_callback: None, }; @@ -143,7 +140,6 @@ fn test_leaders() { let lonely_block = gen_lonely_block(&parent); let new_block = LonelyBlock { block: Arc::clone(lonely_block.block()), - peer_id: None, switch: None, verify_callback: None, }; @@ -159,7 +155,6 @@ fn test_leaders() { pool.insert(LonelyBlock { block: blocks[5].block().clone(), - peer_id: None, switch: None, verify_callback: None, }); @@ -169,7 +164,6 @@ fn test_leaders() { pool.insert(LonelyBlock { block: blocks[10].block().clone(), - peer_id: None, switch: None, verify_callback: None, }); @@ -185,7 +179,6 @@ fn test_leaders() { pool.insert(LonelyBlock { block: blocks[0].block().clone(), - peer_id: None, switch: None, verify_callback: None, }); @@ -199,7 +192,6 @@ fn test_leaders() { pool.insert(LonelyBlock { block: blocks[15].block().clone(), - peer_id: None, switch: None, verify_callback: None, }); @@ -241,7 +233,6 @@ fn test_remove_expired_blocks() { parent = new_block.header(); let lonely_block = LonelyBlock { block: Arc::new(new_block), - peer_id: None, switch: None, verify_callback: None, }; diff --git a/sync/src/tests/sync_shared.rs b/sync/src/tests/sync_shared.rs index 456ecb70bc..04d79e700d 100644 --- a/sync/src/tests/sync_shared.rs +++ b/sync/src/tests/sync_shared.rs @@ -3,7 +3,7 @@ use crate::tests::util::{build_chain, inherit_block}; use crate::SyncShared; -use ckb_chain::{start_chain_services, store_unverified_block, RemoteBlock}; +use ckb_chain::{start_chain_services, store_unverified_block, RemoteBlock, VerifyResult}; use ckb_logger::info; use ckb_logger_service::LoggerInitGuard; use ckb_shared::block_status::BlockStatus; @@ -112,17 +112,16 @@ fn test_insert_parent_unknown_block() { &chain, RemoteBlock { block: Arc::clone(&valid_orphan), - peer_id: Default::default(), + + verify_callback: Box::new(|_: VerifyResult| {}), }, - None, ); shared.accept_remote_block( &chain, RemoteBlock { block: Arc::clone(&invalid_orphan), - peer_id: Default::default(), + verify_callback: Box::new(|_: VerifyResult| {}), }, - None, ); let wait_for_block_status_match = |hash: &Byte32, expect_status: BlockStatus| -> bool { diff --git a/sync/src/tests/synchronizer/functions.rs b/sync/src/tests/synchronizer/functions.rs index 2b1b12c497..3ad00c64a1 100644 --- a/sync/src/tests/synchronizer/functions.rs +++ b/sync/src/tests/synchronizer/functions.rs @@ -5,7 +5,7 @@ use ckb_dao::DaoCalculator; use ckb_error::InternalErrorKind; use ckb_network::{ async_trait, bytes::Bytes, Behaviour, CKBProtocolContext, Peer, PeerId, PeerIndex, ProtocolId, - SessionType, TargetSession, + SessionType, SupportProtocols, TargetSession, }; use ckb_reward_calculator::RewardCalculator; use ckb_shared::types::HeaderIndex; @@ -662,8 +662,10 @@ fn test_sync_process() { for block in &fetched_blocks { let block = SendBlockBuilder::default().block(block.data()).build(); + + let nc = Arc::new(mock_network_context(1)); assert_eq!( - BlockProcess::new(block.as_reader(), &synchronizer1, peer1).blocking_execute(), + BlockProcess::new(block.as_reader(), &synchronizer1, peer1, nc).blocking_execute(), Status::ok(), ); } From 71ea04b4a767b9ecab950a2354e99e0277329b6c Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 6 Feb 2024 10:02:26 +0800 Subject: [PATCH 347/357] Remove VerifyFailedInfo, remove verify_failed_block channel --- chain/src/chain_service.rs | 17 ++------------ chain/src/consume_orphan.rs | 5 ----- chain/src/consume_unverified.rs | 4 ---- chain/src/lib.rs | 2 +- chain/src/tests/find_fork.rs | 22 +------------------ ckb-bin/src/subcommand/run.rs | 1 - shared/src/chain_services_builder.rs | 9 +------- shared/src/shared_builder.rs | 20 ++--------------- shared/src/types/mod.rs | 9 -------- .../relayer/tests/compact_block_process.rs | 3 --- sync/src/synchronizer/block_process.rs | 2 +- sync/src/synchronizer/mod.rs | 11 ++-------- sync/src/tests/synchronizer/basic_sync.rs | 6 +---- sync/src/tests/synchronizer/functions.rs | 14 +++--------- util/launcher/src/lib.rs | 8 +------ 15 files changed, 15 insertions(+), 118 deletions(-) diff --git a/chain/src/chain_service.rs b/chain/src/chain_service.rs index 186768293c..711331823d 100644 --- a/chain/src/chain_service.rs +++ b/chain/src/chain_service.rs @@ -8,10 +8,8 @@ use ckb_channel::{self as channel, select, Receiver, SendError, Sender}; use ckb_constant::sync::BLOCK_DOWNLOAD_WINDOW; use ckb_error::{Error, InternalErrorKind}; use ckb_logger::{self, debug, error, info, warn}; -use ckb_network::tokio; use ckb_shared::block_status::BlockStatus; use ckb_shared::shared::Shared; -use ckb_shared::types::VerifyFailedBlockInfo; use ckb_shared::ChainServicesBuilder; use ckb_stop_handler::{new_crossbeam_exit_rx, register_thread}; use ckb_types::core::{service::Request, BlockView}; @@ -35,14 +33,12 @@ pub fn start_chain_services(builder: ChainServicesBuilder) -> ChainController { .name("consume_unverified_blocks".into()) .spawn({ let shared = builder.shared.clone(); - let verify_failed_blocks_tx = builder.verify_failed_blocks_tx.clone(); move || { let consume_unverified = ConsumeUnverifiedBlocks::new( shared, unverified_rx, truncate_block_rx, builder.proposal_table, - verify_failed_blocks_tx, unverified_queue_stop_rx, ); @@ -62,14 +58,12 @@ pub fn start_chain_services(builder: ChainServicesBuilder) -> ChainController { let orphan_blocks_broker = Arc::clone(&orphan_blocks_broker); let shared = builder.shared.clone(); use crate::consume_orphan::ConsumeOrphan; - let verify_failed_block_tx = builder.verify_failed_blocks_tx.clone(); move || { let consume_orphan = ConsumeOrphan::new( shared, orphan_blocks_broker, unverified_tx, lonely_block_rx, - verify_failed_block_tx, search_orphan_pool_stop_rx, ); consume_orphan.start(); @@ -79,12 +73,8 @@ pub fn start_chain_services(builder: ChainServicesBuilder) -> ChainController { let (process_block_tx, process_block_rx) = channel::bounded(BLOCK_DOWNLOAD_WINDOW as usize); - let chain_service: ChainService = ChainService::new( - builder.shared, - process_block_rx, - lonely_block_tx, - builder.verify_failed_blocks_tx, - ); + let chain_service: ChainService = + ChainService::new(builder.shared, process_block_rx, lonely_block_tx); let chain_service_thread = thread::Builder::new() .name("ChainService".into()) .spawn({ @@ -116,7 +106,6 @@ pub(crate) struct ChainService { process_block_rx: Receiver, lonely_block_tx: Sender, - verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, } impl ChainService { /// Create a new ChainService instance with shared. @@ -125,13 +114,11 @@ impl ChainService { process_block_rx: Receiver, lonely_block_tx: Sender, - verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, ) -> ChainService { ChainService { shared, process_block_rx, lonely_block_tx, - verify_failed_blocks_tx, } } diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index dde75d268c..43ca96a8b4 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -5,7 +5,6 @@ use ckb_error::{Error, InternalErrorKind}; use ckb_logger::internal::trace; use ckb_logger::{debug, error, info}; use ckb_shared::block_status::BlockStatus; -use ckb_shared::types::VerifyFailedBlockInfo; use ckb_shared::Shared; use ckb_store::ChainStore; use ckb_systemtime::unix_time_as_millis; @@ -17,8 +16,6 @@ use std::sync::Arc; pub(crate) struct ConsumeDescendantProcessor { pub shared: Shared, pub unverified_blocks_tx: Sender, - - pub verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, } // Store the an unverified block to the database. We may usually do this @@ -196,7 +193,6 @@ impl ConsumeOrphan { orphan_block_pool: Arc, unverified_blocks_tx: Sender, lonely_blocks_rx: Receiver, - verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, stop_rx: Receiver<()>, ) -> ConsumeOrphan { ConsumeOrphan { @@ -204,7 +200,6 @@ impl ConsumeOrphan { descendant_processor: ConsumeDescendantProcessor { shared, unverified_blocks_tx, - verify_failed_blocks_tx, }, orphan_blocks_broker: orphan_block_pool, lonely_blocks_rx, diff --git a/chain/src/consume_unverified.rs b/chain/src/consume_unverified.rs index a368c706a4..9506c26baf 100644 --- a/chain/src/consume_unverified.rs +++ b/chain/src/consume_unverified.rs @@ -11,7 +11,6 @@ use ckb_logger::{debug, error, info, log_enabled_target, trace_target}; use ckb_merkle_mountain_range::leaf_index_to_mmr_size; use ckb_proposal_table::ProposalTable; use ckb_shared::block_status::BlockStatus; -use ckb_shared::types::VerifyFailedBlockInfo; use ckb_shared::Shared; use ckb_store::{attach_block_cell, detach_block_cell, ChainStore, StoreTransaction}; use ckb_systemtime::unix_time_as_millis; @@ -34,7 +33,6 @@ use std::sync::Arc; pub(crate) struct ConsumeUnverifiedBlockProcessor { pub(crate) shared: Shared, pub(crate) proposal_table: ProposalTable, - pub(crate) verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, } pub(crate) struct ConsumeUnverifiedBlocks { @@ -53,7 +51,6 @@ impl ConsumeUnverifiedBlocks { unverified_blocks_rx: Receiver, truncate_block_rx: Receiver, proposal_table: ProposalTable, - verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, stop_rx: Receiver<()>, ) -> Self { ConsumeUnverifiedBlocks { @@ -64,7 +61,6 @@ impl ConsumeUnverifiedBlocks { processor: ConsumeUnverifiedBlockProcessor { shared, proposal_table, - verify_failed_blocks_tx, }, } } diff --git a/chain/src/lib.rs b/chain/src/lib.rs index 9e14d70622..26513cec6f 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -6,7 +6,7 @@ //! [`ChainService`]: chain/struct.ChainService.html //! [`ChainController`]: chain/struct.ChainController.html use ckb_error::Error; -use ckb_shared::types::{BlockNumberAndHash, VerifyFailedBlockInfo}; +use ckb_shared::types::BlockNumberAndHash; use ckb_types::core::service::Request; use ckb_types::core::{BlockNumber, BlockView, HeaderView}; use ckb_types::packed::Byte32; diff --git a/chain/src/tests/find_fork.rs b/chain/src/tests/find_fork.rs index 4004652d78..dfe71e52eb 100644 --- a/chain/src/tests/find_fork.rs +++ b/chain/src/tests/find_fork.rs @@ -1,7 +1,7 @@ use crate::consume_orphan::ConsumeDescendantProcessor; use crate::consume_unverified::ConsumeUnverifiedBlockProcessor; use crate::utils::forkchanges::ForkChanges; -use crate::{start_chain_services, LonelyBlock, LonelyBlockHash, VerifyFailedBlockInfo}; +use crate::{start_chain_services, LonelyBlock, LonelyBlockHash}; use ckb_chain_spec::consensus::{Consensus, ProposalWindow}; use ckb_proposal_table::ProposalTable; use ckb_shared::types::BlockNumberAndHash; @@ -71,18 +71,14 @@ fn test_find_fork_case1() { fork2.gen_empty_block_with_diff(90u64, &mock_store); } - let (verify_failed_blocks_tx, _verify_failed_blocks_rx) = - tokio::sync::mpsc::unbounded_channel::(); let (unverified_blocks_tx, _unverified_blocks_rx) = channel::unbounded::(); let consume_descendant_processor = ConsumeDescendantProcessor { shared: shared.clone(), unverified_blocks_tx, - verify_failed_blocks_tx: verify_failed_blocks_tx.clone(), }; let mut consume_unverified_block_processor = ConsumeUnverifiedBlockProcessor { shared: shared.clone(), proposal_table, - verify_failed_blocks_tx, }; // fork1 total_difficulty 400 @@ -163,18 +159,14 @@ fn test_find_fork_case2() { fork2.gen_empty_block_with_diff(90u64, &mock_store); } let proposal_table = ProposalTable::new(consensus.tx_proposal_window()); - let (verify_failed_blocks_tx, _verify_failed_blocks_rx) = - tokio::sync::mpsc::unbounded_channel::(); let (unverified_blocks_tx, _unverified_blocks_rx) = channel::unbounded::(); let consume_descendant_processor = ConsumeDescendantProcessor { shared: shared.clone(), unverified_blocks_tx, - verify_failed_blocks_tx: verify_failed_blocks_tx.clone(), }; let mut consume_unverified_block_processor = ConsumeUnverifiedBlockProcessor { shared: shared.clone(), proposal_table, - verify_failed_blocks_tx, }; // fork1 total_difficulty 400 @@ -256,18 +248,14 @@ fn test_find_fork_case3() { fork2.gen_empty_block_with_diff(40u64, &mock_store) } let proposal_table = ProposalTable::new(consensus.tx_proposal_window()); - let (verify_failed_blocks_tx, _verify_failed_blocks_rx) = - tokio::sync::mpsc::unbounded_channel::(); let (unverified_blocks_tx, _unverified_blocks_rx) = channel::unbounded::(); let consume_descendant_processor = ConsumeDescendantProcessor { shared: shared.clone(), unverified_blocks_tx, - verify_failed_blocks_tx: verify_failed_blocks_tx.clone(), }; let mut consume_unverified_block_processor = ConsumeUnverifiedBlockProcessor { shared: shared.clone(), proposal_table, - verify_failed_blocks_tx, }; // fork1 total_difficulty 240 for blk in fork1.blocks() { @@ -347,18 +335,14 @@ fn test_find_fork_case4() { fork2.gen_empty_block_with_diff(80u64, &mock_store); } let proposal_table = ProposalTable::new(consensus.tx_proposal_window()); - let (verify_failed_blocks_tx, _verify_failed_blocks_rx) = - tokio::sync::mpsc::unbounded_channel::(); let (unverified_blocks_tx, _unverified_blocks_rx) = channel::unbounded::(); let consume_descendant_processor = ConsumeDescendantProcessor { shared: shared.clone(), unverified_blocks_tx, - verify_failed_blocks_tx: verify_failed_blocks_tx.clone(), }; let mut consume_unverified_block_processor = ConsumeUnverifiedBlockProcessor { shared: shared.clone(), proposal_table, - verify_failed_blocks_tx, }; // fork1 total_difficulty 200 @@ -439,18 +423,14 @@ fn repeatedly_switch_fork() { fork2.gen_empty_block_with_nonce(2u128, &mock_store); } let proposal_table = ProposalTable::new(consensus.tx_proposal_window()); - let (verify_failed_blocks_tx, _verify_failed_blocks_rx) = - tokio::sync::mpsc::unbounded_channel::(); let (unverified_blocks_tx, _unverified_blocks_rx) = channel::unbounded::(); let consume_descendant_processor = ConsumeDescendantProcessor { shared: shared.clone(), unverified_blocks_tx, - verify_failed_blocks_tx: verify_failed_blocks_tx.clone(), }; let mut consume_unverified_block_processor = ConsumeUnverifiedBlockProcessor { shared: shared.clone(), proposal_table, - verify_failed_blocks_tx, }; for blk in fork1.blocks() { diff --git a/ckb-bin/src/subcommand/run.rs b/ckb-bin/src/subcommand/run.rs index f678855304..7c2a639349 100644 --- a/ckb-bin/src/subcommand/run.rs +++ b/ckb-bin/src/subcommand/run.rs @@ -51,7 +51,6 @@ pub fn run(args: RunArgs, version: Version, async_handle: Handle) -> Result<(), chain_controller.clone(), miner_enable, pack.take_relay_tx_receiver(), - pack.take_verify_failed_block_rx(), ); let tx_pool_builder = pack.take_tx_pool_builder(); diff --git a/shared/src/chain_services_builder.rs b/shared/src/chain_services_builder.rs index a8c5f08591..3260971157 100644 --- a/shared/src/chain_services_builder.rs +++ b/shared/src/chain_services_builder.rs @@ -1,25 +1,18 @@ //! chain_services_builder provide ChainServicesBuilder to build Chain Services #![allow(missing_docs)] -use crate::types::VerifyFailedBlockInfo; use crate::Shared; use ckb_proposal_table::ProposalTable; pub struct ChainServicesBuilder { pub shared: Shared, pub proposal_table: ProposalTable, - pub verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, } impl ChainServicesBuilder { - pub fn new( - shared: Shared, - proposal_table: ProposalTable, - verify_failed_blocks_tx: tokio::sync::mpsc::UnboundedSender, - ) -> Self { + pub fn new(shared: Shared, proposal_table: ProposalTable) -> Self { ChainServicesBuilder { shared, proposal_table, - verify_failed_blocks_tx, } } } diff --git a/shared/src/shared_builder.rs b/shared/src/shared_builder.rs index fb23ec7e0e..3698361d4b 100644 --- a/shared/src/shared_builder.rs +++ b/shared/src/shared_builder.rs @@ -1,6 +1,6 @@ //! shared_builder provide SharedBuilder and SharedPacakge use crate::ChainServicesBuilder; -use crate::{types::VerifyFailedBlockInfo, HeaderMap, Shared}; +use crate::{HeaderMap, Shared}; use ckb_app_config::{ BlockAssemblerConfig, DBConfig, ExitCode, HeaderMapConfig, NotifyConfig, StoreConfig, SyncConfig, TxPoolConfig, @@ -422,17 +422,12 @@ impl SharedBuilder { block_status_map, ); - let (verify_failed_block_tx, verify_failed_block_rx) = - tokio::sync::mpsc::unbounded_channel::(); - - let chain_services_builder = - ChainServicesBuilder::new(shared.clone(), table, verify_failed_block_tx); + let chain_services_builder = ChainServicesBuilder::new(shared.clone(), table); let pack = SharedPackage { chain_services_builder: Some(chain_services_builder), tx_pool_builder: Some(tx_pool_builder), relay_tx_receiver: Some(receiver), - verify_failed_block_rx: Some(verify_failed_block_rx), }; Ok((shared, pack)) @@ -445,8 +440,6 @@ pub struct SharedPackage { chain_services_builder: Option, tx_pool_builder: Option, relay_tx_receiver: Option>, - - verify_failed_block_rx: Option>, } impl SharedPackage { @@ -468,15 +461,6 @@ impl SharedPackage { .take() .expect("take relay_tx_receiver") } - - /// Takes the verify_failed_block_rx out of the package, leaving a None in its place. - pub fn take_verify_failed_block_rx( - &mut self, - ) -> tokio::sync::mpsc::UnboundedReceiver { - self.verify_failed_block_rx - .take() - .expect("take verify_failed_block_rx") - } } fn start_notify_service(notify_config: NotifyConfig, handle: Handle) -> NotifyController { diff --git a/shared/src/types/mod.rs b/shared/src/types/mod.rs index ca848229ed..22653eff68 100644 --- a/shared/src/types/mod.rs +++ b/shared/src/types/mod.rs @@ -1,5 +1,4 @@ #![allow(missing_docs)] -use ckb_network::PeerIndex; use ckb_types::core::{BlockNumber, EpochNumberWithFraction}; use ckb_types::packed::Byte32; use ckb_types::prelude::{Entity, FromSliceShouldBeOk, Reader}; @@ -306,11 +305,3 @@ fn get_skip_height(height: BlockNumber) -> BlockNumber { } pub const SHRINK_THRESHOLD: usize = 300; - -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct VerifyFailedBlockInfo { - pub block_hash: Byte32, - pub peer_id: PeerIndex, - pub reason: String, - pub is_internal_db_error: bool, -} diff --git a/sync/src/relayer/tests/compact_block_process.rs b/sync/src/relayer/tests/compact_block_process.rs index 1e69b94d46..09025013fb 100644 --- a/sync/src/relayer/tests/compact_block_process.rs +++ b/sync/src/relayer/tests/compact_block_process.rs @@ -378,8 +378,6 @@ fn test_accept_block() { ); } - let (verify_failed_blocks_tx, _verify_failed_blocks_rx) = - tokio::sync::mpsc::unbounded_channel(); { let proposal_table = ckb_proposal_table::ProposalTable::new( relayer.shared().shared().consensus().tx_proposal_window(), @@ -387,7 +385,6 @@ fn test_accept_block() { let chain_service_builder = ChainServicesBuilder { shared: relayer.shared().shared().to_owned(), proposal_table, - verify_failed_blocks_tx, }; let chain_controller = start_chain_services(chain_service_builder); diff --git a/sync/src/synchronizer/block_process.rs b/sync/src/synchronizer/block_process.rs index 089895dbd1..3be0e42221 100644 --- a/sync/src/synchronizer/block_process.rs +++ b/sync/src/synchronizer/block_process.rs @@ -3,7 +3,7 @@ use crate::types::post_sync_process; use crate::StatusCode; use ckb_chain::RemoteBlock; use ckb_error::is_internal_db_error; -use ckb_logger::{debug, info}; +use ckb_logger::debug; use ckb_network::{CKBProtocolContext, PeerIndex}; use ckb_types::packed::Byte32; use ckb_types::{packed, prelude::*}; diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index ab281a3ae3..390fa5890a 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -38,7 +38,7 @@ use ckb_network::{ async_trait, bytes::Bytes, tokio, CKBProtocolContext, CKBProtocolHandler, PeerIndex, ServiceControl, SupportProtocols, }; -use ckb_shared::types::{HeaderIndexView, VerifyFailedBlockInfo}; +use ckb_shared::types::HeaderIndexView; use ckb_stop_handler::{new_crossbeam_exit_rx, register_thread}; use ckb_systemtime::unix_time_as_millis; @@ -237,24 +237,17 @@ pub struct Synchronizer { /// Sync shared state pub shared: Arc, fetch_channel: Option>, - - pub(crate) verify_failed_blocks_rx: tokio::sync::mpsc::UnboundedReceiver, } impl Synchronizer { /// Init sync protocol handle /// /// This is a runtime sync protocol shared state, and any Sync protocol messages will be processed and forwarded by it - pub fn new( - chain: ChainController, - shared: Arc, - verify_failed_blocks_rx: tokio::sync::mpsc::UnboundedReceiver, - ) -> Synchronizer { + pub fn new(chain: ChainController, shared: Arc) -> Synchronizer { Synchronizer { chain, shared, fetch_channel: None, - verify_failed_blocks_rx, } } diff --git a/sync/src/tests/synchronizer/basic_sync.rs b/sync/src/tests/synchronizer/basic_sync.rs index 79b9069319..ec6514453b 100644 --- a/sync/src/tests/synchronizer/basic_sync.rs +++ b/sync/src/tests/synchronizer/basic_sync.rs @@ -189,11 +189,7 @@ fn setup_node(height: u64) -> (TestNode, Shared) { Default::default(), pack.take_relay_tx_receiver(), )); - let synchronizer = Synchronizer::new( - chain_controller, - sync_shared, - pack.take_verify_failed_block_rx(), - ); + let synchronizer = Synchronizer::new(chain_controller, sync_shared); let mut node = TestNode::new(); let protocol = Arc::new(RwLock::new(synchronizer)) as Arc<_>; node.add_protocol( diff --git a/sync/src/tests/synchronizer/functions.rs b/sync/src/tests/synchronizer/functions.rs index 3ad00c64a1..99a1d4c119 100644 --- a/sync/src/tests/synchronizer/functions.rs +++ b/sync/src/tests/synchronizer/functions.rs @@ -5,7 +5,7 @@ use ckb_dao::DaoCalculator; use ckb_error::InternalErrorKind; use ckb_network::{ async_trait, bytes::Bytes, Behaviour, CKBProtocolContext, Peer, PeerId, PeerIndex, ProtocolId, - SessionType, SupportProtocols, TargetSession, + SessionType, TargetSession, }; use ckb_reward_calculator::RewardCalculator; use ckb_shared::types::HeaderIndex; @@ -56,11 +56,7 @@ fn start_chain(consensus: Option) -> (ChainController, Shared, Synchr Default::default(), pack.take_relay_tx_receiver(), )); - let synchronizer = Synchronizer::new( - chain_controller.clone(), - sync_shared, - pack.take_verify_failed_block_rx(), - ); + let synchronizer = Synchronizer::new(chain_controller.clone(), sync_shared); (chain_controller, shared, synchronizer) } @@ -1228,11 +1224,7 @@ fn test_internal_db_error() { InternalErrorKind::Database.other("mocked db error").into(), )); - let synchronizer = Synchronizer::new( - chain_controller, - sync_shared, - pack.take_verify_failed_block_rx(), - ); + let synchronizer = Synchronizer::new(chain_controller, sync_shared); let status = synchronizer .shared() diff --git a/util/launcher/src/lib.rs b/util/launcher/src/lib.rs index 1e105c42e6..862d8857c6 100644 --- a/util/launcher/src/lib.rs +++ b/util/launcher/src/lib.rs @@ -23,7 +23,6 @@ use ckb_rpc::{RpcServer, ServiceBuilder}; use ckb_shared::{ChainServicesBuilder, Shared}; use ckb_shared::shared_builder::{SharedBuilder, SharedPackage}; -use ckb_shared::types::VerifyFailedBlockInfo; use ckb_store::{ChainDB, ChainStore}; use ckb_sync::{BlockFilter, NetTimeProtocol, Relayer, SyncShared, Synchronizer}; use ckb_tx_pool::service::TxVerificationResult; @@ -264,7 +263,6 @@ impl Launcher { chain_controller: ChainController, miner_enable: bool, relay_tx_receiver: Receiver, - verify_failed_block_rx: tokio::sync::mpsc::UnboundedReceiver, ) -> NetworkController { let sync_shared = Arc::new(SyncShared::new( shared.clone(), @@ -286,11 +284,7 @@ impl Launcher { ); // Sync is a core protocol, user cannot disable it via config - let synchronizer = Synchronizer::new( - chain_controller.clone(), - Arc::clone(&sync_shared), - verify_failed_block_rx, - ); + let synchronizer = Synchronizer::new(chain_controller.clone(), Arc::clone(&sync_shared)); let mut protocols = vec![CKBProtocol::new_with_support_protocol( SupportProtocols::Sync, Box::new(synchronizer), From e0d006e14d4c9a024ad653dceb38cae610109c94 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 6 Feb 2024 10:04:52 +0800 Subject: [PATCH 348/357] Remove broadcast_compact_block channel --- sync/src/relayer/mod.rs | 13 ------------- sync/src/synchronizer/mod.rs | 2 +- 2 files changed, 1 insertion(+), 14 deletions(-) diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index c49b9fb8bb..4597c83722 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -72,19 +72,12 @@ pub enum ReconstructionResult { Error(Status), } -type BroadcastCompactBlockType = (Arc, PeerIndex); - /// Relayer protocol handle pub struct Relayer { chain: ChainController, pub(crate) shared: Arc, rate_limiter: Arc>>, v3: bool, - - pub(crate) broadcast_compact_block_tx: - tokio::sync::mpsc::UnboundedSender, - pub(crate) broadcast_compact_block_rx: - tokio::sync::mpsc::UnboundedReceiver, } impl Relayer { @@ -97,17 +90,11 @@ impl Relayer { let quota = governor::Quota::per_second(std::num::NonZeroU32::new(30).unwrap()); let rate_limiter = Arc::new(Mutex::new(RateLimiter::keyed(quota))); - let (broadcast_compact_block_tx, broadcast_compact_block_rx) = - tokio::sync::mpsc::unbounded_channel::(); - Relayer { chain, shared, rate_limiter, v3: false, - - broadcast_compact_block_tx, - broadcast_compact_block_rx, } } diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 390fa5890a..2d2af690c5 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -361,7 +361,7 @@ impl Synchronizer { pub fn blocking_process_new_block( &self, block: core::BlockView, - peer_id: PeerIndex, + _peer_id: PeerIndex, ) -> Result { let block_hash = block.hash(); let status = self.shared.active_chain().get_block_status(&block_hash); From 490c64b1785001c6bd5138827bfc6dbb24feca69 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 6 Feb 2024 19:55:27 +0800 Subject: [PATCH 349/357] Add debug log for block_fetcher->get_ancestor --- sync/src/synchronizer/block_fetcher.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/sync/src/synchronizer/block_fetcher.rs b/sync/src/synchronizer/block_fetcher.rs index 7dc2c241ee..d4ca821dd6 100644 --- a/sync/src/synchronizer/block_fetcher.rs +++ b/sync/src/synchronizer/block_fetcher.rs @@ -202,6 +202,14 @@ impl BlockFetcher { .get_ancestor(&best_known.hash(), start + span - 1), } }?; + debug!( + "get_ancestor({}, {}) -> {}-{}; IBD: {:?}", + best_known.hash(), + start + span - 1, + header.number(), + header.hash(), + self.ibd, + ); let mut status = self .sync_shared From 2511ec61abe95a5f71ef5bc804766c63b4738bbe Mon Sep 17 00:00:00 2001 From: quake Date: Tue, 6 Feb 2024 11:49:14 +0800 Subject: [PATCH 350/357] chore: remove channel send error callback --- chain/src/chain_service.rs | 27 ++++++++++----------------- chain/src/consume_orphan.rs | 16 ++++------------ chain/src/lib.rs | 8 -------- chain/src/tests/orphan_block_pool.rs | 5 ++++- 4 files changed, 18 insertions(+), 38 deletions(-) diff --git a/chain/src/chain_service.rs b/chain/src/chain_service.rs index 711331823d..cb89a78f66 100644 --- a/chain/src/chain_service.rs +++ b/chain/src/chain_service.rs @@ -220,25 +220,18 @@ impl ChainService { }); match self.lonely_block_tx.send(lonely_block) { - Ok(_) => {} - Err(SendError(lonely_block)) => { + Ok(_) => { + debug!( + "processing block: {}-{}, (tip:unverified_tip):({}:{})", + block_number, + block_hash, + self.shared.snapshot().tip_number(), + self.shared.get_unverified_tip().number(), + ); + } + Err(_) => { error!("Failed to notify new block to orphan pool, It seems that the orphan pool has exited."); - - let err: Error = InternalErrorKind::System - .other("OrphanBlock broker disconnected") - .into(); - - let verify_result = Err(err); - lonely_block.execute_callback(verify_result); - return; } } - debug!( - "processing block: {}-{}, (tip:unverified_tip):({}:{})", - block_number, - block_hash, - self.shared.snapshot().tip_number(), - self.shared.get_unverified_tip().number(), - ); } } diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index 43ca96a8b4..4f9bbb30d3 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -1,7 +1,7 @@ use crate::utils::orphan_block_pool::OrphanBlockPool; -use crate::{LonelyBlock, LonelyBlockHash, VerifyResult}; -use ckb_channel::{select, Receiver, SendError, Sender}; -use ckb_error::{Error, InternalErrorKind}; +use crate::{LonelyBlock, LonelyBlockHash}; +use ckb_channel::{select, Receiver, Sender}; +use ckb_error::Error; use ckb_logger::internal::trace; use ckb_logger::{debug, error, info}; use ckb_shared::block_status::BlockStatus; @@ -106,16 +106,8 @@ impl ConsumeDescendantProcessor { block_number, block_hash ); } - Err(SendError(lonely_block)) => { + Err(_) => { error!("send unverified_block_tx failed, the receiver has been closed"); - let err: Error = InternalErrorKind::System - .other( - "send unverified_block_tx failed, the receiver have been close".to_string(), - ) - .into(); - - let verify_result: VerifyResult = Err(err); - lonely_block.execute_callback(verify_result); return; } }; diff --git a/chain/src/lib.rs b/chain/src/lib.rs index 26513cec6f..7bb06fa456 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -71,14 +71,6 @@ pub struct LonelyBlockHash { pub verify_callback: Option, } -impl LonelyBlockHash { - pub(crate) fn execute_callback(self, verify_result: VerifyResult) { - if let Some(verify_callback) = self.verify_callback { - verify_callback(verify_result); - } - } -} - impl From for LonelyBlockHash { fn from(val: LonelyBlock) -> Self { LonelyBlockHash { diff --git a/chain/src/tests/orphan_block_pool.rs b/chain/src/tests/orphan_block_pool.rs index 3a441e3bae..db094e478d 100644 --- a/chain/src/tests/orphan_block_pool.rs +++ b/chain/src/tests/orphan_block_pool.rs @@ -55,7 +55,10 @@ fn assert_blocks_are_sorted(blocks: &[LonelyBlock]) { // If `child_or_sibling`'s parent is not the current `parent_hash`, i.e. it is not a sibling of // `parent_or_sibling`, then it must be a child of `parent_or_sibling`. if child_or_sibling.block.header().parent_hash() != parent_hash { - assert_eq!(child_or_sibling.block.header().parent_hash(), parent_or_sibling.block.header().hash()); + assert_eq!( + child_or_sibling.block.header().parent_hash(), + parent_or_sibling.block.header().hash() + ); // Move `parent_hash` forward. parent_hash = child_or_sibling.block.header().parent_hash(); } From 7ed75fd2fa9f3e5f715a9d6ded19a33635081b9d Mon Sep 17 00:00:00 2001 From: quake Date: Tue, 6 Feb 2024 15:20:44 +0800 Subject: [PATCH 351/357] chore: remove UnverifiedBlock --- chain/src/consume_unverified.rs | 55 +++++++++++++++------------------ chain/src/lib.rs | 21 +------------ 2 files changed, 26 insertions(+), 50 deletions(-) diff --git a/chain/src/consume_unverified.rs b/chain/src/consume_unverified.rs index 9506c26baf..1e643c8a6f 100644 --- a/chain/src/consume_unverified.rs +++ b/chain/src/consume_unverified.rs @@ -1,6 +1,6 @@ use crate::LonelyBlockHash; use crate::{ - utils::forkchanges::ForkChanges, GlobalIndex, LonelyBlock, TruncateRequest, UnverifiedBlock, + utils::forkchanges::ForkChanges, GlobalIndex, TruncateRequest, VerifyResult, }; use ckb_channel::{select, Receiver}; @@ -112,11 +112,14 @@ impl ConsumeUnverifiedBlocks { } impl ConsumeUnverifiedBlockProcessor { - fn load_full_unverified_block(&self, lonely_block: LonelyBlockHash) -> UnverifiedBlock { + fn load_unverified_block_and_parent_header( + &self, + block_hash: &Byte32, + ) -> (BlockView, HeaderView) { let block_view = self .shared .store() - .get_block(&lonely_block.block_number_and_hash.hash()) + .get_block(block_hash) .expect("block stored"); let parent_header_view = self .shared @@ -124,35 +127,28 @@ impl ConsumeUnverifiedBlockProcessor { .get_block_header(&block_view.data().header().raw().parent_hash()) .expect("parent header stored"); - UnverifiedBlock { - lonely_block: LonelyBlock { - block: Arc::new(block_view), - switch: lonely_block.switch, - verify_callback: lonely_block.verify_callback, - }, - parent_header: parent_header_view, - } + (block_view, parent_header_view) } pub(crate) fn consume_unverified_blocks(&mut self, lonely_block_hash: LonelyBlockHash) { - let unverified_block = self.load_full_unverified_block(lonely_block_hash); + let LonelyBlockHash { + block_number_and_hash, + switch, + verify_callback, + } = lonely_block_hash; + let (unverified_block, parent_header) = + self.load_unverified_block_and_parent_header(&block_number_and_hash.hash); // process this unverified block - let verify_result = self.verify_block( - unverified_block.block(), - &unverified_block.parent_header, - unverified_block.switch(), - ); + let verify_result = self.verify_block(&unverified_block, &parent_header, switch); match &verify_result { Ok(_) => { let log_now = std::time::Instant::now(); - self.shared - .remove_block_status(&unverified_block.block().hash()); + self.shared.remove_block_status(&block_number_and_hash.hash); let log_elapsed_remove_block_status = log_now.elapsed(); - self.shared - .remove_header_view(&unverified_block.block().hash()); + self.shared.remove_header_view(&block_number_and_hash.hash); debug!( "block {} remove_block_status cost: {:?}, and header_view cost: {:?}", - unverified_block.block().hash(), + block_number_and_hash.hash, log_elapsed_remove_block_status, log_now.elapsed() ); @@ -160,8 +156,7 @@ impl ConsumeUnverifiedBlockProcessor { Err(err) => { error!( "verify block {} failed: {}", - unverified_block.block().hash(), - err + block_number_and_hash.hash, err ); let tip = self @@ -181,21 +176,21 @@ impl ConsumeUnverifiedBlockProcessor { tip_ext.total_difficulty, )); - self.shared.insert_block_status( - unverified_block.block().hash(), - BlockStatus::BLOCK_INVALID, - ); + self.shared + .insert_block_status(block_number_and_hash.hash(), BlockStatus::BLOCK_INVALID); error!( "set_unverified tip to {}-{}, because verify {} failed: {}", tip.number(), tip.hash(), - unverified_block.block().hash(), + block_number_and_hash.hash, err ); } } - unverified_block.execute_callback(verify_result); + if let Some(callback) = verify_callback { + callback(verify_result); + } } fn verify_block( diff --git a/chain/src/lib.rs b/chain/src/lib.rs index 7bb06fa456..5f98d77557 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -8,7 +8,7 @@ use ckb_error::Error; use ckb_shared::types::BlockNumberAndHash; use ckb_types::core::service::Request; -use ckb_types::core::{BlockNumber, BlockView, HeaderView}; +use ckb_types::core::{BlockNumber, BlockView}; use ckb_types::packed::Byte32; use ckb_verification_traits::Switch; use std::sync::Arc; @@ -100,25 +100,6 @@ impl LonelyBlock { } } -pub(crate) struct UnverifiedBlock { - pub lonely_block: LonelyBlock, - pub parent_header: HeaderView, -} - -impl UnverifiedBlock { - pub(crate) fn block(&self) -> &Arc { - self.lonely_block.block() - } - - pub fn switch(&self) -> Option { - self.lonely_block.switch() - } - - pub fn execute_callback(self, verify_result: VerifyResult) { - self.lonely_block.execute_callback(verify_result) - } -} - pub(crate) struct GlobalIndex { pub(crate) number: BlockNumber, pub(crate) hash: Byte32, From 5e0d3725a5d76b39ff7f58a0b0bb0cf05995bb03 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 7 Feb 2024 20:46:47 +0800 Subject: [PATCH 352/357] Find and verify unverified blocks on ckb startup --- Cargo.lock | 2 + chain/Cargo.toml | 2 + chain/src/chain_controller.rs | 12 ++- chain/src/chain_service.rs | 96 ++------------------- chain/src/init.rs | 126 +++++++++++++++++++++++++++ chain/src/init_load_unverified.rs | 139 ++++++++++++++++++++++++++++++ chain/src/lib.rs | 4 +- sync/src/synchronizer/mod.rs | 7 ++ 8 files changed, 295 insertions(+), 93 deletions(-) create mode 100644 chain/src/init.rs create mode 100644 chain/src/init_load_unverified.rs diff --git a/Cargo.lock b/Cargo.lock index 91a8d28225..5e689dcaf8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -692,6 +692,8 @@ dependencies = [ "ckb-channel", "ckb-constant", "ckb-dao-utils", + "ckb-db", + "ckb-db-schema", "ckb-error", "ckb-jsonrpc-types", "ckb-logger", diff --git a/chain/Cargo.toml b/chain/Cargo.toml index 92ee3b3399..91ac5cccea 100644 --- a/chain/Cargo.toml +++ b/chain/Cargo.toml @@ -25,6 +25,8 @@ ckb-error = { path = "../error", version = "= 0.114.0-pre" } ckb-app-config = { path = "../util/app-config", version = "= 0.114.0-pre" } ckb-rust-unstable-port = { path = "../util/rust-unstable-port", version = "= 0.114.0-pre" } ckb-channel = { path = "../util/channel", version = "= 0.114.0-pre" } +ckb-db = { path = "../db", version = "= 0.114.0-pre" } +ckb-db-schema = { path = "../db-schema", version = "= 0.114.0-pre" } faux = { version = "^0.1", optional = true } ckb-merkle-mountain-range = "0.5.2" ckb-constant = { path = "../util/constant", version = "= 0.114.0-pre" } diff --git a/chain/src/chain_controller.rs b/chain/src/chain_controller.rs index 48902434e5..89cfb68146 100644 --- a/chain/src/chain_controller.rs +++ b/chain/src/chain_controller.rs @@ -11,6 +11,7 @@ use ckb_types::{ packed::Byte32, }; use ckb_verification_traits::Switch; +use std::sync::atomic::AtomicBool; use std::sync::Arc; /// Controller to the chain service. @@ -24,6 +25,8 @@ pub struct ChainController { process_block_sender: Sender, truncate_sender: Sender, orphan_block_broker: Arc, + + is_verifying_unverified_blocks_on_startup: Arc, } #[cfg_attr(feature = "mock", faux::methods)] @@ -32,14 +35,21 @@ impl ChainController { process_block_sender: Sender, truncate_sender: Sender, orphan_block_broker: Arc, + is_verifying_unverified_blocks_on_startup: Arc, ) -> Self { ChainController { process_block_sender, truncate_sender, orphan_block_broker, + is_verifying_unverified_blocks_on_startup, } } + pub fn is_verifying_unverified_blocks_on_startup(&self) -> bool { + self.is_verifying_unverified_blocks_on_startup + .load(std::sync::atomic::Ordering::Relaxed) + } + pub fn asynchronous_process_remote_block(&self, remote_block: RemoteBlock) { let lonely_block = LonelyBlock { block: remote_block.block, @@ -49,7 +59,7 @@ impl ChainController { self.asynchronous_process_lonely_block(lonely_block); } - fn asynchronous_process_lonely_block(&self, lonely_block: LonelyBlock) { + pub fn asynchronous_process_lonely_block(&self, lonely_block: LonelyBlock) { if Request::call(&self.process_block_sender, lonely_block).is_none() { error!("Chain service has gone") } diff --git a/chain/src/chain_service.rs b/chain/src/chain_service.rs index cb89a78f66..e60effadc8 100644 --- a/chain/src/chain_service.rs +++ b/chain/src/chain_service.rs @@ -1,102 +1,16 @@ //! CKB chain service. #![allow(missing_docs)] -use crate::consume_unverified::ConsumeUnverifiedBlocks; -use crate::utils::orphan_block_pool::OrphanBlockPool; -use crate::{ChainController, LonelyBlock, LonelyBlockHash, ProcessBlockRequest}; -use ckb_channel::{self as channel, select, Receiver, SendError, Sender}; -use ckb_constant::sync::BLOCK_DOWNLOAD_WINDOW; +use crate::{LonelyBlock, ProcessBlockRequest}; +use ckb_channel::{select, Receiver, Sender}; use ckb_error::{Error, InternalErrorKind}; use ckb_logger::{self, debug, error, info, warn}; use ckb_shared::block_status::BlockStatus; use ckb_shared::shared::Shared; -use ckb_shared::ChainServicesBuilder; -use ckb_stop_handler::{new_crossbeam_exit_rx, register_thread}; +use ckb_stop_handler::new_crossbeam_exit_rx; use ckb_types::core::{service::Request, BlockView}; use ckb_verification::{BlockVerifier, NonContextualBlockTxsVerifier}; use ckb_verification_traits::Verifier; -use std::sync::Arc; -use std::thread; - -const ORPHAN_BLOCK_SIZE: usize = (BLOCK_DOWNLOAD_WINDOW * 2) as usize; - -pub fn start_chain_services(builder: ChainServicesBuilder) -> ChainController { - let orphan_blocks_broker = Arc::new(OrphanBlockPool::with_capacity(ORPHAN_BLOCK_SIZE)); - - let (truncate_block_tx, truncate_block_rx) = channel::bounded(1); - - let (unverified_queue_stop_tx, unverified_queue_stop_rx) = ckb_channel::bounded::<()>(1); - let (unverified_tx, unverified_rx) = - channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize * 3); - - let consumer_unverified_thread = thread::Builder::new() - .name("consume_unverified_blocks".into()) - .spawn({ - let shared = builder.shared.clone(); - move || { - let consume_unverified = ConsumeUnverifiedBlocks::new( - shared, - unverified_rx, - truncate_block_rx, - builder.proposal_table, - unverified_queue_stop_rx, - ); - - consume_unverified.start(); - } - }) - .expect("start unverified_queue consumer thread should ok"); - - let (lonely_block_tx, lonely_block_rx) = - channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize); - - let (search_orphan_pool_stop_tx, search_orphan_pool_stop_rx) = ckb_channel::bounded::<()>(1); - - let search_orphan_pool_thread = thread::Builder::new() - .name("consume_orphan_blocks".into()) - .spawn({ - let orphan_blocks_broker = Arc::clone(&orphan_blocks_broker); - let shared = builder.shared.clone(); - use crate::consume_orphan::ConsumeOrphan; - move || { - let consume_orphan = ConsumeOrphan::new( - shared, - orphan_blocks_broker, - unverified_tx, - lonely_block_rx, - search_orphan_pool_stop_rx, - ); - consume_orphan.start(); - } - }) - .expect("start search_orphan_pool thread should ok"); - - let (process_block_tx, process_block_rx) = channel::bounded(BLOCK_DOWNLOAD_WINDOW as usize); - - let chain_service: ChainService = - ChainService::new(builder.shared, process_block_rx, lonely_block_tx); - let chain_service_thread = thread::Builder::new() - .name("ChainService".into()) - .spawn({ - move || { - chain_service.start_process_block(); - - if let Err(SendError(_)) = search_orphan_pool_stop_tx.send(()) { - warn!("trying to notify search_orphan_pool thread to stop, but search_orphan_pool_stop_tx already closed") - } - let _ = search_orphan_pool_thread.join(); - - if let Err(SendError(_))= unverified_queue_stop_tx.send(()){ - warn!("trying to notify consume unverified thread to stop, but unverified_queue_stop_tx already closed"); - } - let _ = consumer_unverified_thread.join(); - } - }) - .expect("start chain_service thread should ok"); - register_thread("ChainServices", chain_service_thread); - - ChainController::new(process_block_tx, truncate_block_tx, orphan_blocks_broker) -} /// Chain background service to receive LonelyBlock and only do `non_contextual_verify` #[derive(Clone)] @@ -213,11 +127,11 @@ impl ChainService { } } - ckb_metrics::handle().map(|metrics| { + if let Some(metrics) = ckb_metrics::handle() { metrics .ckb_chain_lonely_block_ch_len .set(self.lonely_block_tx.len() as i64) - }); + } match self.lonely_block_tx.send(lonely_block) { Ok(_) => { diff --git a/chain/src/init.rs b/chain/src/init.rs new file mode 100644 index 0000000000..89223275af --- /dev/null +++ b/chain/src/init.rs @@ -0,0 +1,126 @@ +#![allow(missing_docs)] + +//! Bootstrap ChainService, ConsumeOrphan and ConsumeUnverified threads. +use crate::chain_service::ChainService; +use crate::consume_unverified::ConsumeUnverifiedBlocks; +use crate::init_load_unverified::InitLoadUnverified; +use crate::utils::orphan_block_pool::OrphanBlockPool; +use crate::{ChainController, LonelyBlock, LonelyBlockHash}; +use ckb_channel::{self as channel, SendError}; +use ckb_constant::sync::BLOCK_DOWNLOAD_WINDOW; +use ckb_logger::warn; +use ckb_shared::ChainServicesBuilder; +use ckb_stop_handler::{new_crossbeam_exit_rx, register_thread}; +use std::sync::atomic::AtomicBool; +use std::sync::Arc; +use std::thread; + +const ORPHAN_BLOCK_SIZE: usize = (BLOCK_DOWNLOAD_WINDOW * 2) as usize; + +pub fn start_chain_services(builder: ChainServicesBuilder) -> ChainController { + let orphan_blocks_broker = Arc::new(OrphanBlockPool::with_capacity(ORPHAN_BLOCK_SIZE)); + + let (truncate_block_tx, truncate_block_rx) = channel::bounded(1); + + let (unverified_queue_stop_tx, unverified_queue_stop_rx) = ckb_channel::bounded::<()>(1); + let (unverified_tx, unverified_rx) = + channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize * 3); + + let consumer_unverified_thread = thread::Builder::new() + .name("consume_unverified_blocks".into()) + .spawn({ + let shared = builder.shared.clone(); + move || { + let consume_unverified = ConsumeUnverifiedBlocks::new( + shared, + unverified_rx, + truncate_block_rx, + builder.proposal_table, + unverified_queue_stop_rx, + ); + + consume_unverified.start(); + } + }) + .expect("start unverified_queue consumer thread should ok"); + + let (lonely_block_tx, lonely_block_rx) = + channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize); + + let (search_orphan_pool_stop_tx, search_orphan_pool_stop_rx) = ckb_channel::bounded::<()>(1); + + let search_orphan_pool_thread = thread::Builder::new() + .name("consume_orphan_blocks".into()) + .spawn({ + let orphan_blocks_broker = Arc::clone(&orphan_blocks_broker); + let shared = builder.shared.clone(); + use crate::consume_orphan::ConsumeOrphan; + move || { + let consume_orphan = ConsumeOrphan::new( + shared, + orphan_blocks_broker, + unverified_tx, + lonely_block_rx, + search_orphan_pool_stop_rx, + ); + consume_orphan.start(); + } + }) + .expect("start search_orphan_pool thread should ok"); + + let (process_block_tx, process_block_rx) = channel::bounded(BLOCK_DOWNLOAD_WINDOW as usize); + + let is_verifying_unverified_blocks_on_startup = Arc::new(AtomicBool::new(true)); + + let chain_controller = ChainController::new( + process_block_tx, + truncate_block_tx, + orphan_blocks_broker, + Arc::clone(&is_verifying_unverified_blocks_on_startup), + ); + + let init_load_unverified_thread = thread::Builder::new() + .name("init_load_unverified_blocks".into()) + .spawn({ + let chain_controller = chain_controller.clone(); + let signal_receiver = new_crossbeam_exit_rx(); + let shared = builder.shared.clone(); + + move || { + let init_load_unverified: InitLoadUnverified = InitLoadUnverified::new( + shared, + chain_controller, + signal_receiver, + is_verifying_unverified_blocks_on_startup, + ); + init_load_unverified.start(); + } + }) + .expect("start unverified_queue consumer thread should ok"); + + let chain_service: ChainService = + ChainService::new(builder.shared, process_block_rx, lonely_block_tx); + let chain_service_thread = thread::Builder::new() + .name("ChainService".into()) + .spawn({ + move || { + chain_service.start_process_block(); + + let _ = init_load_unverified_thread.join(); + + if let Err(SendError(_)) = search_orphan_pool_stop_tx.send(()) { + warn!("trying to notify search_orphan_pool thread to stop, but search_orphan_pool_stop_tx already closed") + } + let _ = search_orphan_pool_thread.join(); + + if let Err(SendError(_)) = unverified_queue_stop_tx.send(()) { + warn!("trying to notify consume unverified thread to stop, but unverified_queue_stop_tx already closed"); + } + let _ = consumer_unverified_thread.join(); + } + }) + .expect("start chain_service thread should ok"); + register_thread("ChainServices", chain_service_thread); + + chain_controller +} diff --git a/chain/src/init_load_unverified.rs b/chain/src/init_load_unverified.rs new file mode 100644 index 0000000000..af85925197 --- /dev/null +++ b/chain/src/init_load_unverified.rs @@ -0,0 +1,139 @@ +use crate::{ChainController, LonelyBlock}; +use ckb_channel::{select, Receiver}; +use ckb_db::{Direction, IteratorMode}; +use ckb_db_schema::COLUMN_NUMBER_HASH; +use ckb_logger::info; +use ckb_shared::Shared; +use ckb_store::ChainStore; +use ckb_types::core::{BlockNumber, BlockView}; +use ckb_types::packed; +use ckb_types::prelude::{Entity, FromSliceShouldBeOk, Pack, Reader}; +use std::sync::atomic::AtomicBool; +use std::sync::Arc; + +pub(crate) struct InitLoadUnverified { + shared: Shared, + chain_controller: ChainController, + is_verifying_unverified_blocks_on_startup: Arc, + + stop_rx: Receiver<()>, +} + +impl InitLoadUnverified { + pub(crate) fn new( + shared: Shared, + chain_controller: ChainController, + stop_rx: Receiver<()>, + is_verifying_unverified_blocks_on_startup: Arc, + ) -> Self { + InitLoadUnverified { + shared, + chain_controller, + is_verifying_unverified_blocks_on_startup, + stop_rx, + } + } + fn print_unverified_blocks_count(&self) { + let tip_number: BlockNumber = self.shared.snapshot().tip_number(); + let mut check_unverified_number = tip_number + 1; + let mut unverified_block_count = 0; + loop { + // start checking `check_unverified_number` have COLUMN_NUMBER_HASH value in db? + let unverified_hashes: Vec = + self.find_unverified_block_hashes(check_unverified_number); + unverified_block_count += unverified_hashes.len(); + if unverified_hashes.is_empty() { + info!( + "found {} unverified blocks, verifying...", + unverified_block_count + ); + break; + } + check_unverified_number += 1; + } + } + + fn find_unverified_block_hashes(&self, check_unverified_number: u64) -> Vec { + let pack_number: packed::Uint64 = check_unverified_number.pack(); + let prefix = pack_number.as_slice(); + + let unverified_hashes: Vec = self + .shared + .store() + .get_iter( + COLUMN_NUMBER_HASH, + IteratorMode::From(prefix, Direction::Forward), + ) + .take_while(|(key, _)| key.starts_with(prefix)) + .map(|(key_number_hash, _v)| { + let reader = + packed::NumberHashReader::from_slice_should_be_ok(key_number_hash.as_ref()); + let unverified_block_hash = reader.block_hash().to_entity(); + unverified_block_hash + }) + .collect::>(); + unverified_hashes + } + + pub(crate) fn start(&self) { + info!( + "finding unverified blocks, current tip: {}-{}", + self.shared.snapshot().tip_number(), + self.shared.snapshot().tip_hash() + ); + self.print_unverified_blocks_count(); + + self.find_and_verify_unverified_blocks(); + + self.is_verifying_unverified_blocks_on_startup + .store(false, std::sync::atomic::Ordering::Relaxed); + } + + fn find_and_verify_unverified_blocks(&self) { + let tip_number: BlockNumber = self.shared.snapshot().tip_number(); + let mut check_unverified_number = tip_number + 1; + + loop { + select! { + recv(self.stop_rx) -> _msg => { + info!("init_unverified_blocks thread received exit signal, exit now"); + break; + }, + default => {} + } + + // start checking `check_unverified_number` have COLUMN_NUMBER_HASH value in db? + let unverified_hashes: Vec = + self.find_unverified_block_hashes(check_unverified_number); + + if unverified_hashes.is_empty() { + if check_unverified_number == tip_number + 1 { + info!("no unverified blocks found."); + } else { + info!( + "found and verify unverified blocks finish, current tip: {}-{}", + self.shared.snapshot().tip_number(), + self.shared.snapshot().tip_header() + ); + } + return; + } + + for unverified_hash in unverified_hashes { + let unverified_block: BlockView = self + .shared + .store() + .get_block(&unverified_hash) + .expect("unverified block must be in db"); + self.chain_controller + .asynchronous_process_lonely_block(LonelyBlock { + block: Arc::new(unverified_block), + switch: None, + verify_callback: None, + }); + } + + check_unverified_number += 1; + } + } +} diff --git a/chain/src/lib.rs b/chain/src/lib.rs index 5f98d77557..a35a12ff4d 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -17,13 +17,15 @@ mod chain_controller; mod chain_service; mod consume_orphan; mod consume_unverified; +mod init; +mod init_load_unverified; #[cfg(test)] mod tests; mod utils; pub use chain_controller::ChainController; -pub use chain_service::start_chain_services; pub use consume_orphan::store_unverified_block; +pub use init::start_chain_services; type ProcessBlockRequest = Request; type TruncateRequest = Request>; diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 2d2af690c5..8e334d07a1 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -608,6 +608,13 @@ impl Synchronizer { } fn find_blocks_to_fetch(&mut self, nc: &dyn CKBProtocolContext, ibd: IBDState) { + if self.chain.is_verifying_unverified_blocks_on_startup() { + trace!( + "skip find_blocks_to_fetch, ckb_chain is verifying unverified blocks on startup" + ); + return; + } + let unverified_tip = self.shared.active_chain().unverified_tip_number(); let disconnect_list = { From 2479ff4ac243f2e8877bc00ebf9fd72d49a6a93a Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 7 Feb 2024 20:55:04 +0800 Subject: [PATCH 353/357] Fix cargo clippy warnings Signed-off-by: Eval EXEC --- Cargo.lock | 3 --- chain/Cargo.toml | 1 - chain/src/consume_orphan.rs | 12 +++++++----- chain/src/consume_unverified.rs | 13 ++++--------- chain/src/lib.rs | 2 ++ chain/src/tests/orphan_block_pool.rs | 12 ++++++------ shared/Cargo.toml | 1 - shared/src/types/header_map/kernel_lru.rs | 20 ++++++++++++-------- shared/src/types/header_map/memory.rs | 12 +++++++++--- sync/src/relayer/mod.rs | 2 +- sync/src/synchronizer/block_fetcher.rs | 4 ++-- sync/src/synchronizer/block_process.rs | 3 +-- sync/src/tests/sync_shared.rs | 2 +- sync/src/types/mod.rs | 4 ++-- util/launcher/Cargo.toml | 1 - 15 files changed, 47 insertions(+), 45 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5e689dcaf8..0f5e0578b8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -719,7 +719,6 @@ dependencies = [ "lazy_static", "minstant", "tempfile", - "tokio", ] [[package]] @@ -984,7 +983,6 @@ dependencies = [ "ckb-types", "ckb-verification", "ckb-verification-traits", - "tokio", ] [[package]] @@ -1454,7 +1452,6 @@ dependencies = [ "ckb-logger", "ckb-metrics", "ckb-migrate", - "ckb-network", "ckb-notify", "ckb-proposal-table", "ckb-snapshot", diff --git a/chain/Cargo.toml b/chain/Cargo.toml index 91ac5cccea..6ac25957fc 100644 --- a/chain/Cargo.toml +++ b/chain/Cargo.toml @@ -33,7 +33,6 @@ ckb-constant = { path = "../util/constant", version = "= 0.114.0-pre" } ckb-util = { path = "../util", version = "= 0.114.0-pre" } crossbeam = "0.8.2" ckb-network = { path = "../network", version = "= 0.114.0-pre" } -tokio = { version = "1", features = ["sync"] } ckb-tx-pool = { path = "../tx-pool", version = "= 0.114.0-pre"} minstant = "0.1.4" diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index 4f9bbb30d3..82e10b7388 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -1,3 +1,5 @@ +#![allow(missing_docs)] + use crate::utils::orphan_block_pool::OrphanBlockPool; use crate::{LonelyBlock, LonelyBlockHash}; use ckb_channel::{select, Receiver, Sender}; @@ -93,11 +95,11 @@ impl ConsumeDescendantProcessor { fn send_unverified_block(&self, lonely_block: LonelyBlockHash, total_difficulty: U256) { let block_number = lonely_block.block_number_and_hash.number(); let block_hash = lonely_block.block_number_and_hash.hash(); - ckb_metrics::handle().map(|metrics| { + if let Some(metrics) = ckb_metrics::handle() { metrics .ckb_chain_unverified_block_ch_len .set(self.unverified_blocks_tx.len() as i64) - }); + }; match self.unverified_blocks_tx.send(lonely_block) { Ok(_) => { @@ -298,10 +300,10 @@ impl ConsumeOrphan { } self.search_orphan_pool(); - ckb_metrics::handle().map(|handle| { - handle + if let Some(metrics) = ckb_metrics::handle() { + metrics .ckb_chain_orphan_count .set(self.orphan_blocks_broker.len() as i64) - }); + }; } } diff --git a/chain/src/consume_unverified.rs b/chain/src/consume_unverified.rs index 1e643c8a6f..2fb0be6088 100644 --- a/chain/src/consume_unverified.rs +++ b/chain/src/consume_unverified.rs @@ -1,8 +1,5 @@ use crate::LonelyBlockHash; -use crate::{ - utils::forkchanges::ForkChanges, GlobalIndex, TruncateRequest, - VerifyResult, -}; +use crate::{utils::forkchanges::ForkChanges, GlobalIndex, TruncateRequest, VerifyResult}; use ckb_channel::{select, Receiver}; use ckb_error::{Error, InternalErrorKind}; use ckb_logger::internal::{log_enabled, trace}; @@ -291,7 +288,7 @@ impl ConsumeUnverifiedBlockProcessor { &cannon_total_difficulty - ¤t_total_difficulty, self.shared.get_unverified_tip().number(), ); - self.find_fork(&mut fork, current_tip_header.number(), &block, ext); + self.find_fork(&mut fork, current_tip_header.number(), block, ext); self.rollback(&fork, &db_txn)?; // update and verify chain root @@ -347,10 +344,9 @@ impl ConsumeUnverifiedBlockProcessor { } } - let block_ref: &BlockView = █ self.shared .notify_controller() - .notify_new_block(block_ref.clone()); + .notify_new_block(block.to_owned()); if log_enabled!(ckb_logger::Level::Trace) { self.print_chain(10); } @@ -370,8 +366,7 @@ impl ConsumeUnverifiedBlockProcessor { let tx_pool_controller = self.shared.tx_pool_controller(); if tx_pool_controller.service_started() { - let block_ref: &BlockView = █ - if let Err(e) = tx_pool_controller.notify_new_uncle(block_ref.as_uncle()) { + if let Err(e) = tx_pool_controller.notify_new_uncle(block.as_uncle()) { error!("[verify block] notify new_uncle error {}", e); } } diff --git a/chain/src/lib.rs b/chain/src/lib.rs index a35a12ff4d..d656a1ba1d 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -1,3 +1,5 @@ +#![allow(missing_docs)] + //! CKB chain service. //! //! [`ChainService`] background base on database, handle block importing, diff --git a/chain/src/tests/orphan_block_pool.rs b/chain/src/tests/orphan_block_pool.rs index db094e478d..3c14890fba 100644 --- a/chain/src/tests/orphan_block_pool.rs +++ b/chain/src/tests/orphan_block_pool.rs @@ -74,9 +74,9 @@ fn test_remove_blocks_by_parent() { let pool = OrphanBlockPool::with_capacity(200); for _ in 1..block_number { let lonely_block = gen_lonely_block(&parent); - let new_block_clone = lonely_block.block().clone(); + let new_block_clone = Arc::clone(lonely_block.block()); let new_block = LonelyBlock { - block: new_block_clone.clone(), + block: Arc::clone(&new_block_clone), switch: None, verify_callback: None, }; @@ -157,7 +157,7 @@ fn test_leaders() { assert_eq!(pool.leaders_len(), 4); pool.insert(LonelyBlock { - block: blocks[5].block().clone(), + block: Arc::clone(blocks[5].block()), switch: None, verify_callback: None, }); @@ -166,7 +166,7 @@ fn test_leaders() { assert_eq!(pool.leaders_len(), 3); pool.insert(LonelyBlock { - block: blocks[10].block().clone(), + block: Arc::clone(blocks[10].block()), switch: None, verify_callback: None, }); @@ -181,7 +181,7 @@ fn test_leaders() { assert_eq!(pool.leaders_len(), 2); pool.insert(LonelyBlock { - block: blocks[0].block().clone(), + block: Arc::clone(blocks[0].block()), switch: None, verify_callback: None, }); @@ -194,7 +194,7 @@ fn test_leaders() { assert_eq!(pool.leaders_len(), 1); pool.insert(LonelyBlock { - block: blocks[15].block().clone(), + block: Arc::clone(blocks[15].block()), switch: None, verify_callback: None, }); diff --git a/shared/Cargo.toml b/shared/Cargo.toml index c81e3051a8..e30690a448 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -30,7 +30,6 @@ ckb-channel = { path = "../util/channel", version = "= 0.114.0-pre" } ckb-app-config = {path = "../util/app-config", version = "= 0.114.0-pre"} ckb-migrate = { path = "../util/migrate", version = "= 0.114.0-pre" } once_cell = "1.8.0" -ckb-network = { path = "../network", version = "= 0.114.0-pre" } ckb-util = { path = "../util", version = "= 0.114.0-pre" } ckb-metrics = { path = "../util/metrics", version = "= 0.114.0-pre" } bitflags = "1.0" diff --git a/shared/src/types/header_map/kernel_lru.rs b/shared/src/types/header_map/kernel_lru.rs index 07dbb3d440..46dba8eb35 100644 --- a/shared/src/types/header_map/kernel_lru.rs +++ b/shared/src/types/header_map/kernel_lru.rs @@ -87,12 +87,14 @@ where self.stats().tick_primary_contain(); } if self.memory.contains_key(hash) { - ckb_metrics::handle() - .map(|metrics| metrics.ckb_header_map_memory_hit_miss_count.hit.inc()); + if let Some(metrics) = ckb_metrics::handle() { + metrics.ckb_header_map_memory_hit_miss_count.hit.inc() + } return true; } - ckb_metrics::handle() - .map(|metrics| metrics.ckb_header_map_memory_hit_miss_count.miss.inc()); + if let Some(metrics) = ckb_metrics::handle() { + metrics.ckb_header_map_memory_hit_miss_count.miss.inc(); + } if self.backend.is_empty() { return false; @@ -110,13 +112,15 @@ where self.stats().tick_primary_select(); } if let Some(view) = self.memory.get_refresh(hash) { - ckb_metrics::handle() - .map(|metrics| metrics.ckb_header_map_memory_hit_miss_count.hit.inc()); + if let Some(metrics) = ckb_metrics::handle() { + metrics.ckb_header_map_memory_hit_miss_count.hit.inc(); + } return Some(view); } - ckb_metrics::handle() - .map(|metrics| metrics.ckb_header_map_memory_hit_miss_count.miss.inc()); + if let Some(metrics) = ckb_metrics::handle() { + metrics.ckb_header_map_memory_hit_miss_count.miss.inc(); + } if self.backend.is_empty() { return None; diff --git a/shared/src/types/header_map/memory.rs b/shared/src/types/header_map/memory.rs index b88a504256..7a01b83891 100644 --- a/shared/src/types/header_map/memory.rs +++ b/shared/src/types/header_map/memory.rs @@ -97,7 +97,9 @@ impl MemoryMap { let (key, value) = header.into(); let ret = guard.insert(key, value); if ret.is_none() { - ckb_metrics::handle().map(|metrics| metrics.ckb_header_map_memory_count.inc()); + if let Some(metrics) = ckb_metrics::handle() { + metrics.ckb_header_map_memory_count.inc(); + } } ret.map(|_| ()) } @@ -110,7 +112,9 @@ impl MemoryMap { shrink_to_fit!(guard, SHRINK_THRESHOLD); } ret.map(|inner| { - ckb_metrics::handle().map(|metrics| metrics.ckb_header_map_memory_count.dec()); + if let Some(metrics) = ckb_metrics::handle() { + metrics.ckb_header_map_memory_count.dec(); + } (key.clone(), inner).into() }) @@ -142,7 +146,9 @@ impl MemoryMap { } } - ckb_metrics::handle().map(|metrics| metrics.ckb_header_map_memory_count.sub(keys_count)); + if let Some(metrics) = ckb_metrics::handle() { + metrics.ckb_header_map_memory_count.sub(keys_count) + } if shrink_to_fit { shrink_to_fit!(guard, SHRINK_THRESHOLD); diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index 4597c83722..bd139f7c86 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -340,7 +340,7 @@ impl Relayer { StatusCode::BlockIsInvalid.with_context(format!( "block {} is invalid, reason: {}", block.hash(), - err.to_string() + err )), ); } diff --git a/sync/src/synchronizer/block_fetcher.rs b/sync/src/synchronizer/block_fetcher.rs index d4ca821dd6..c2c4ce0eb0 100644 --- a/sync/src/synchronizer/block_fetcher.rs +++ b/sync/src/synchronizer/block_fetcher.rs @@ -278,11 +278,11 @@ impl BlockFetcher { } let inflight_total_count = state.read_inflight_blocks().total_inflight_count(); - ckb_metrics::handle().map(|metrics| { + if let Some(metrics) = ckb_metrics::handle() { metrics .ckb_inflight_blocks_count .set(inflight_total_count as i64); - }); + } if fetch.is_empty() { debug!( diff --git a/sync/src/synchronizer/block_process.rs b/sync/src/synchronizer/block_process.rs index 3be0e42221..074f0ac4d9 100644 --- a/sync/src/synchronizer/block_process.rs +++ b/sync/src/synchronizer/block_process.rs @@ -61,8 +61,7 @@ impl<'a> BlockProcess<'a> { "SendBlock", StatusCode::BlockIsInvalid.with_context(format!( "block {} is invalid, reason: {}", - block_hash, - err.to_string() + block_hash, err )), ); } diff --git a/sync/src/tests/sync_shared.rs b/sync/src/tests/sync_shared.rs index 04d79e700d..875a2dfa39 100644 --- a/sync/src/tests/sync_shared.rs +++ b/sync/src/tests/sync_shared.rs @@ -29,7 +29,7 @@ fn wait_for_expected_block_status( } std::thread::sleep(std::time::Duration::from_micros(100)); } - return false; + false } #[test] diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index f38e545824..eb8625726f 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -677,9 +677,9 @@ impl InflightBlocks { key.number, key.hash, value.peer ); - ckb_metrics::handle().map(|metrics| { + if let Some(metrics) = ckb_metrics::handle() { metrics.ckb_inflight_timeout_count.inc(); - }); + } } } diff --git a/util/launcher/Cargo.toml b/util/launcher/Cargo.toml index 7bc96c3b94..44223eb004 100644 --- a/util/launcher/Cargo.toml +++ b/util/launcher/Cargo.toml @@ -31,7 +31,6 @@ ckb-channel = { path = "../channel", version = "= 0.114.0-pre" } ckb-tx-pool = { path = "../../tx-pool", version = "= 0.114.0-pre" } ckb-light-client-protocol-server = { path = "../light-client-protocol-server", version = "= 0.114.0-pre" } ckb-block-filter = { path = "../../block-filter", version = "= 0.114.0-pre" } -tokio = { version = "1", features = ["sync"] } [features] with_sentry = [ "ckb-sync/with_sentry", "ckb-network/with_sentry", "ckb-app-config/with_sentry" ] From 5123fb85024bdaf0488e19c44a32f0c5e849aa13 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 8 Feb 2024 16:28:49 +0800 Subject: [PATCH 354/357] Use `Release` to `store`, `Acquire` to `read` for atomic: `is_verifying_unverified_blocks_on_startup` Signed-off-by: Eval EXEC --- chain/src/chain_controller.rs | 2 +- chain/src/init_load_unverified.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/chain/src/chain_controller.rs b/chain/src/chain_controller.rs index 89cfb68146..fa3e6c10d1 100644 --- a/chain/src/chain_controller.rs +++ b/chain/src/chain_controller.rs @@ -47,7 +47,7 @@ impl ChainController { pub fn is_verifying_unverified_blocks_on_startup(&self) -> bool { self.is_verifying_unverified_blocks_on_startup - .load(std::sync::atomic::Ordering::Relaxed) + .load(std::sync::atomic::Ordering::Acquire) } pub fn asynchronous_process_remote_block(&self, remote_block: RemoteBlock) { diff --git a/chain/src/init_load_unverified.rs b/chain/src/init_load_unverified.rs index af85925197..4d02b7dfc2 100644 --- a/chain/src/init_load_unverified.rs +++ b/chain/src/init_load_unverified.rs @@ -86,7 +86,7 @@ impl InitLoadUnverified { self.find_and_verify_unverified_blocks(); self.is_verifying_unverified_blocks_on_startup - .store(false, std::sync::atomic::Ordering::Relaxed); + .store(false, std::sync::atomic::Ordering::Release); } fn find_and_verify_unverified_blocks(&self) { From b9fba578b51c658a0b3e0fb467895711d48daa5c Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 21 Feb 2024 11:42:42 +0800 Subject: [PATCH 355/357] Do not accept descendants if parent is invalid Signed-off-by: Eval EXEC --- chain/src/consume_orphan.rs | 110 +++++++++++++++++++++++++++-------- chain/src/tests/find_fork.rs | 4 +- 2 files changed, 89 insertions(+), 25 deletions(-) diff --git a/chain/src/consume_orphan.rs b/chain/src/consume_orphan.rs index 82e10b7388..51a1ee30ba 100644 --- a/chain/src/consume_orphan.rs +++ b/chain/src/consume_orphan.rs @@ -34,8 +34,24 @@ pub fn store_unverified_block( .expect("parent already store"); if let Some(ext) = shared.store().get_block_ext(&block.hash()) { - debug!("block {}-{} has stored BlockExt", block_number, block_hash); - return Ok((parent_header, ext.total_difficulty)); + debug!( + "block {}-{} has stored BlockExt: {:?}", + block_number, block_hash, ext + ); + match ext.verified { + Some(true) => { + return Ok((parent_header, ext.total_difficulty)); + } + Some(false) => { + return Err(InvalidParentError { + parent_hash: parent_header.hash(), + } + .into()); + } + None => { + // continue to process + } + } } trace!("begin accept block: {}-{}", block.number(), block.hash()); @@ -140,32 +156,67 @@ impl ConsumeDescendantProcessor { } } - pub(crate) fn process_descendant(&self, lonely_block: LonelyBlock) { - match store_unverified_block(&self.shared, lonely_block.block().to_owned()) { + pub(crate) fn process_descendant(&self, lonely_block: LonelyBlock) -> Result<(), Error> { + return match store_unverified_block(&self.shared, lonely_block.block().to_owned()) { Ok((_parent_header, total_difficulty)) => { self.shared .insert_block_status(lonely_block.block().hash(), BlockStatus::BLOCK_STORED); let lonely_block_hash: LonelyBlockHash = lonely_block.into(); - self.send_unverified_block(lonely_block_hash, total_difficulty) + self.send_unverified_block(lonely_block_hash, total_difficulty); + Ok(()) } Err(err) => { + if let Some(_invalid_parent_err) = err.downcast_ref::() { + self.shared + .block_status_map() + .insert(lonely_block.block().hash(), BlockStatus::BLOCK_INVALID); + } + + lonely_block.execute_callback(Err(err.clone())); + Err(err) + } + }; + } + + fn accept_descendants(&self, descendants: Vec) { + let mut has_parent_invalid_error = false; + for descendant_block in descendants { + let block_number = descendant_block.block().number(); + let block_hash = descendant_block.block().hash(); + + if has_parent_invalid_error { + self.shared + .block_status_map() + .insert(block_hash.clone(), BlockStatus::BLOCK_INVALID); + let err = Err(InvalidParentError { + parent_hash: descendant_block.block().parent_hash(), + } + .into()); + error!( - "accept block {} failed: {}", - lonely_block.block().hash(), + "process descendant {}-{}, failed {:?}", + block_number, + block_hash.clone(), err ); - lonely_block.execute_callback(Err(err)); + descendant_block.execute_callback(err); + continue; } - } - } - fn accept_descendants(&self, descendants: Vec) { - for descendant_block in descendants { - self.process_descendant(descendant_block); + if let Err(err) = self.process_descendant(descendant_block) { + error!( + "process descendant {}-{}, failed {:?}", + block_number, block_hash, err + ); + + if let Some(_invalid_parent_err) = err.downcast_ref::() { + has_parent_invalid_error = true; + } + } } } } @@ -275,26 +326,37 @@ impl ConsumeOrphan { fn process_lonely_block(&self, lonely_block: LonelyBlock) { let parent_hash = lonely_block.block().parent_hash(); + let block_hash = lonely_block.block().hash(); + let block_number = lonely_block.block().number(); let parent_status = self .shared .get_block_status(self.shared.store(), &parent_hash); if parent_status.contains(BlockStatus::BLOCK_STORED) { debug!( "parent {} has stored: {:?}, processing descendant directly {}-{}", - parent_hash, - parent_status, - lonely_block.block().number(), - lonely_block.block().hash() + parent_hash, parent_status, block_number, block_hash, ); - self.descendant_processor.process_descendant(lonely_block); + + if let Err(err) = self.descendant_processor.process_descendant(lonely_block) { + error!( + "process descendant {}-{}, failed {:?}", + block_number, block_hash, err + ); + } } else if parent_status.eq(&BlockStatus::BLOCK_INVALID) { - // ignore this block, because parent block is invalid - info!( - "parent: {} is INVALID, ignore this block {}-{}", - parent_hash, - lonely_block.block().number(), - lonely_block.block().hash() + // don't accept this block, because parent block is invalid + error!( + "parent: {} is INVALID, won't accept this block {}-{}", + parent_hash, block_number, block_hash, ); + self.shared + .block_status_map() + .insert(lonely_block.block().hash(), BlockStatus::BLOCK_INVALID); + let err = Err(InvalidParentError { + parent_hash: parent_hash.clone(), + } + .into()); + lonely_block.execute_callback(err); } else { self.orphan_blocks_broker.insert(lonely_block); } diff --git a/chain/src/tests/find_fork.rs b/chain/src/tests/find_fork.rs index dfe71e52eb..b07f2a3725 100644 --- a/chain/src/tests/find_fork.rs +++ b/chain/src/tests/find_fork.rs @@ -38,7 +38,9 @@ fn process_block( verify_callback: None, }; - consume_descendant_processor.process_descendant(lonely_block); + consume_descendant_processor + .process_descendant(lonely_block) + .unwrap(); consume_unverified_block_processor.consume_unverified_blocks(lonely_block_hash); } From cfd7aed632f55b6f34bc6d6eb74669482d5a1e0f Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Fri, 23 Feb 2024 11:23:38 +0800 Subject: [PATCH 356/357] Update rpc docs, fix CI test Signed-off-by: Eval EXEC --- chain/src/init.rs | 2 +- docs/ckb_rpc_openrpc | 2 +- rpc/README.md | 52 +++++++++++-------- util/app-config/src/tests/export_import.bats | 4 +- .../src/tests/graceful_shutdown.bats | 4 +- 5 files changed, 37 insertions(+), 27 deletions(-) diff --git a/chain/src/init.rs b/chain/src/init.rs index 89223275af..2759a75cb4 100644 --- a/chain/src/init.rs +++ b/chain/src/init.rs @@ -120,7 +120,7 @@ pub fn start_chain_services(builder: ChainServicesBuilder) -> ChainController { } }) .expect("start chain_service thread should ok"); - register_thread("ChainServices", chain_service_thread); + register_thread("ChainService", chain_service_thread); chain_controller } diff --git a/docs/ckb_rpc_openrpc b/docs/ckb_rpc_openrpc index 5d696307ed..1faaef7135 160000 --- a/docs/ckb_rpc_openrpc +++ b/docs/ckb_rpc_openrpc @@ -1 +1 @@ -Subproject commit 5d696307edb59dfa198fb78800ae14588d4bafd8 +Subproject commit 1faaef7135c8190bba0a2d65cf51d7b311897c5c diff --git a/rpc/README.md b/rpc/README.md index e2d5d102fa..820cddeab7 100644 --- a/rpc/README.md +++ b/rpc/README.md @@ -31,9 +31,9 @@ The crate `ckb-rpc`'s minimum supported rustc version is 1.71.1. * [RPC Methods](#rpc-methods) - * [Module Alert](#module-alert) [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Alert&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/5d696307edb59dfa198fb78800ae14588d4bafd8/json/alert_rpc_doc.json) + * [Module Alert](#module-alert) [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Alert&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/1faaef7135c8190bba0a2d65cf51d7b311897c5c/json/alert_rpc_doc.json) * [Method `send_alert`](#alert-send_alert) - * [Module Chain](#module-chain) [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Chain&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/5d696307edb59dfa198fb78800ae14588d4bafd8/json/chain_rpc_doc.json) + * [Module Chain](#module-chain) [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Chain&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/1faaef7135c8190bba0a2d65cf51d7b311897c5c/json/chain_rpc_doc.json) * [Method `get_block`](#chain-get_block) * [Method `get_block_by_number`](#chain-get_block_by_number) * [Method `get_header`](#chain-get_header) @@ -57,19 +57,19 @@ The crate `ckb-rpc`'s minimum supported rustc version is 1.71.1. * [Method `estimate_cycles`](#chain-estimate_cycles) * [Method `get_fee_rate_statics`](#chain-get_fee_rate_statics) * [Method `get_fee_rate_statistics`](#chain-get_fee_rate_statistics) - * [Module Debug](#module-debug) [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Debug&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/5d696307edb59dfa198fb78800ae14588d4bafd8/json/debug_rpc_doc.json) + * [Module Debug](#module-debug) [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Debug&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/1faaef7135c8190bba0a2d65cf51d7b311897c5c/json/debug_rpc_doc.json) * [Method `jemalloc_profiling_dump`](#debug-jemalloc_profiling_dump) * [Method `update_main_logger`](#debug-update_main_logger) * [Method `set_extra_logger`](#debug-set_extra_logger) - * [Module Experiment](#module-experiment) [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Experiment&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/5d696307edb59dfa198fb78800ae14588d4bafd8/json/experiment_rpc_doc.json) + * [Module Experiment](#module-experiment) [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Experiment&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/1faaef7135c8190bba0a2d65cf51d7b311897c5c/json/experiment_rpc_doc.json) * [Method `dry_run_transaction`](#experiment-dry_run_transaction) * [Method `calculate_dao_maximum_withdraw`](#experiment-calculate_dao_maximum_withdraw) - * [Module Indexer](#module-indexer) [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Indexer&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/5d696307edb59dfa198fb78800ae14588d4bafd8/json/indexer_rpc_doc.json) + * [Module Indexer](#module-indexer) [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Indexer&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/1faaef7135c8190bba0a2d65cf51d7b311897c5c/json/indexer_rpc_doc.json) * [Method `get_indexer_tip`](#indexer-get_indexer_tip) * [Method `get_cells`](#indexer-get_cells) * [Method `get_transactions`](#indexer-get_transactions) * [Method `get_cells_capacity`](#indexer-get_cells_capacity) - * [Module Integration_test](#module-integration_test) [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Integration_test&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/5d696307edb59dfa198fb78800ae14588d4bafd8/json/integration_test_rpc_doc.json) + * [Module Integration_test](#module-integration_test) [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Integration_test&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/1faaef7135c8190bba0a2d65cf51d7b311897c5c/json/integration_test_rpc_doc.json) * [Method `process_block_without_verify`](#integration_test-process_block_without_verify) * [Method `truncate`](#integration_test-truncate) * [Method `generate_block`](#integration_test-generate_block) @@ -77,10 +77,10 @@ The crate `ckb-rpc`'s minimum supported rustc version is 1.71.1. * [Method `notify_transaction`](#integration_test-notify_transaction) * [Method `generate_block_with_template`](#integration_test-generate_block_with_template) * [Method `calculate_dao_field`](#integration_test-calculate_dao_field) - * [Module Miner](#module-miner) [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Miner&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/5d696307edb59dfa198fb78800ae14588d4bafd8/json/miner_rpc_doc.json) + * [Module Miner](#module-miner) [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Miner&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/1faaef7135c8190bba0a2d65cf51d7b311897c5c/json/miner_rpc_doc.json) * [Method `get_block_template`](#miner-get_block_template) * [Method `submit_block`](#miner-submit_block) - * [Module Net](#module-net) [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Net&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/5d696307edb59dfa198fb78800ae14588d4bafd8/json/net_rpc_doc.json) + * [Module Net](#module-net) [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Net&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/1faaef7135c8190bba0a2d65cf51d7b311897c5c/json/net_rpc_doc.json) * [Method `local_node_info`](#net-local_node_info) * [Method `get_peers`](#net-get_peers) * [Method `get_banned_addresses`](#net-get_banned_addresses) @@ -91,7 +91,7 @@ The crate `ckb-rpc`'s minimum supported rustc version is 1.71.1. * [Method `add_node`](#net-add_node) * [Method `remove_node`](#net-remove_node) * [Method `ping_peers`](#net-ping_peers) - * [Module Pool](#module-pool) [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Pool&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/5d696307edb59dfa198fb78800ae14588d4bafd8/json/pool_rpc_doc.json) + * [Module Pool](#module-pool) [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Pool&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/1faaef7135c8190bba0a2d65cf51d7b311897c5c/json/pool_rpc_doc.json) * [Method `send_transaction`](#pool-send_transaction) * [Method `remove_transaction`](#pool-remove_transaction) * [Method `tx_pool_info`](#pool-tx_pool_info) @@ -99,10 +99,10 @@ The crate `ckb-rpc`'s minimum supported rustc version is 1.71.1. * [Method `get_raw_tx_pool`](#pool-get_raw_tx_pool) * [Method `get_pool_tx_detail_info`](#pool-get_pool_tx_detail_info) * [Method `tx_pool_ready`](#pool-tx_pool_ready) - * [Module Stats](#module-stats) [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Stats&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/5d696307edb59dfa198fb78800ae14588d4bafd8/json/stats_rpc_doc.json) + * [Module Stats](#module-stats) [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Stats&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/1faaef7135c8190bba0a2d65cf51d7b311897c5c/json/stats_rpc_doc.json) * [Method `get_blockchain_info`](#stats-get_blockchain_info) * [Method `get_deployments_info`](#stats-get_deployments_info) - * [Module Subscription](#module-subscription) [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Subscription&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/5d696307edb59dfa198fb78800ae14588d4bafd8/json/subscription_rpc_doc.json) + * [Module Subscription](#module-subscription) [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Subscription&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/1faaef7135c8190bba0a2d65cf51d7b311897c5c/json/subscription_rpc_doc.json) * [Method `subscribe`](#subscription-subscribe) * [Method `unsubscribe`](#subscription-unsubscribe) * [RPC Types](#rpc-types) @@ -207,7 +207,7 @@ The crate `ckb-rpc`'s minimum supported rustc version is 1.71.1. ## RPC Modules ### Module `Alert` -- [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Alert&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/5d696307edb59dfa198fb78800ae14588d4bafd8/json/alert_rpc_doc.json) +- [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Alert&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/1faaef7135c8190bba0a2d65cf51d7b311897c5c/json/alert_rpc_doc.json) RPC Module Alert for network alerts. @@ -273,7 +273,7 @@ Response ``` ### Module `Chain` -- [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Chain&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/5d696307edb59dfa198fb78800ae14588d4bafd8/json/chain_rpc_doc.json) +- [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Chain&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/1faaef7135c8190bba0a2d65cf51d7b311897c5c/json/chain_rpc_doc.json) RPC Module Chain for methods related to the canonical chain. @@ -1913,7 +1913,7 @@ Response ``` ### Module `Debug` -- [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Debug&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/5d696307edb59dfa198fb78800ae14588d4bafd8/json/debug_rpc_doc.json) +- [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Debug&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/1faaef7135c8190bba0a2d65cf51d7b311897c5c/json/debug_rpc_doc.json) RPC Module Debug for internal RPC methods. @@ -1959,7 +1959,7 @@ they only append logs to their log files. Removes the logger when this is null. ### Module `Experiment` -- [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Experiment&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/5d696307edb59dfa198fb78800ae14588d4bafd8/json/experiment_rpc_doc.json) +- [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Experiment&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/1faaef7135c8190bba0a2d65cf51d7b311897c5c/json/experiment_rpc_doc.json) RPC Module Experiment for experimenting methods. @@ -2113,7 +2113,7 @@ Response ``` ### Module `Indexer` -- [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Indexer&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/5d696307edb59dfa198fb78800ae14588d4bafd8/json/indexer_rpc_doc.json) +- [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Indexer&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/1faaef7135c8190bba0a2d65cf51d7b311897c5c/json/indexer_rpc_doc.json) RPC Module Indexer. @@ -2995,7 +2995,7 @@ Response ``` ### Module `Integration_test` -- [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Integration_test&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/5d696307edb59dfa198fb78800ae14588d4bafd8/json/integration_test_rpc_doc.json) +- [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Integration_test&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/1faaef7135c8190bba0a2d65cf51d7b311897c5c/json/integration_test_rpc_doc.json) RPC for Integration Test. @@ -3496,7 +3496,7 @@ Response ``` ### Module `Miner` -- [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Miner&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/5d696307edb59dfa198fb78800ae14588d4bafd8/json/miner_rpc_doc.json) +- [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Miner&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/1faaef7135c8190bba0a2d65cf51d7b311897c5c/json/miner_rpc_doc.json) RPC Module Miner for miners. @@ -3712,7 +3712,7 @@ Response ``` ### Module `Net` -- [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Net&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/5d696307edb59dfa198fb78800ae14588d4bafd8/json/net_rpc_doc.json) +- [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Net&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/1faaef7135c8190bba0a2d65cf51d7b311897c5c/json/net_rpc_doc.json) RPC Module Net for P2P network. @@ -4273,7 +4273,7 @@ Response ``` ### Module `Pool` -- [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Pool&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/5d696307edb59dfa198fb78800ae14588d4bafd8/json/pool_rpc_doc.json) +- [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Pool&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/1faaef7135c8190bba0a2d65cf51d7b311897c5c/json/pool_rpc_doc.json) RPC Module Pool for transaction memory pool. @@ -4616,7 +4616,7 @@ Response ``` ### Module `Stats` -- [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Stats&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/5d696307edb59dfa198fb78800ae14588d4bafd8/json/stats_rpc_doc.json) +- [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Stats&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/main/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/cryptape/ckb-rpc-resources/1faaef7135c8190bba0a2d65cf51d7b311897c5c/json/stats_rpc_doc.json) RPC Module Stats for getting various statistic data. @@ -6111,6 +6111,14 @@ The overall chain synchronization state of this local node. If this number is too high, it indicates that block download has stuck at some block. +* `tip_hash`: [`H256`](#type-h256) - The block hash of current tip block + +* `tip_number`: [`Uint64`](#type-uint64) - The block number of current tip block + +* `unverified_tip_hash`: [`H256`](#type-h256) - The block hash of current unverified tip block + +* `unverified_tip_number`: [`Uint64`](#type-uint64) - The block number of current unverified tip block + ### Type `Transaction` The transaction. @@ -6511,4 +6519,4 @@ For example, a cellbase transaction is not allowed in `send_transaction` RPC. ### ERROR `PoolRejectedRBF` (-1111): The transaction is rejected for RBF checking. ### ERROR `Indexer` -(-1200): The indexer error. +(-1200): The indexer error. \ No newline at end of file diff --git a/util/app-config/src/tests/export_import.bats b/util/app-config/src/tests/export_import.bats index 555ce26402..1c53da1f9d 100644 --- a/util/app-config/src/tests/export_import.bats +++ b/util/app-config/src/tests/export_import.bats @@ -13,7 +13,8 @@ function export { #@test } _import() { - bash -c "ckb import -C ${CKB_DIRNAME} ${TMP_DIR}/ckb*.json" + bash -c "ckb init -C ${TMP_DIR}/import" + bash -c "ckb import -C ${TMP_DIR}/import ${TMP_DIR}/ckb*.json" } function ckb_import { #@test @@ -27,4 +28,5 @@ setup_file() { teardown_file() { rm -f ${TMP_DIR}/ckb*.json + rm -rvf ${TMP_DIR}/import } diff --git a/util/app-config/src/tests/graceful_shutdown.bats b/util/app-config/src/tests/graceful_shutdown.bats index 17ac2661b3..368a8afd07 100644 --- a/util/app-config/src/tests/graceful_shutdown.bats +++ b/util/app-config/src/tests/graceful_shutdown.bats @@ -21,7 +21,7 @@ function ckb_graceful_shutdown { #@test [ "$status" -eq 0 ] assert_output --regexp "INFO ckb_bin::subcommand::run Trapped exit signal, exiting..." - assert_output --regexp "INFO ckb_chain::chain ChainService received exit signal, exit now" + assert_output --regexp "INFO ckb_chain::chain_service ChainService received exit signal, exit now" assert_output --regexp "INFO ckb_sync::synchronizer BlockDownload received exit signal, exit now" assert_output --regexp "INFO ckb_tx_pool::chunk_process TxPool chunk_command service received exit signal, exit now" assert_output --regexp "INFO ckb_tx_pool::service TxPool is saving, please wait..." @@ -29,7 +29,7 @@ function ckb_graceful_shutdown { #@test assert_output --regexp "INFO ckb_indexer::service Indexer received exit signal, exit now" assert_output --regexp "INFO ckb_notify NotifyService received exit signal, exit now" assert_output --regexp "INFO ckb_block_filter::filter BlockFilter received exit signal, exit now" - assert_output --regexp "INFO ckb_sync::types::header_map HeaderMap limit_memory received exit signal, exit now" + assert_output --regexp "INFO ckb_shared::types::header_map HeaderMap limit_memory received exit signal, exit now" assert_output --regexp "INFO ckb_network::network NetworkService receive exit signal, start shutdown..." assert_output --regexp "INFO ckb_network::network NetworkService shutdown now" assert_output --regexp "INFO ckb_tx_pool::process TxPool saved successfully" From ab07eae802be62540ba74754bbb7b821dbe563a2 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 26 Feb 2024 10:03:16 +0800 Subject: [PATCH 357/357] Release ckb-async-download rc0