diff --git a/Cargo.lock b/Cargo.lock index e21da639c..835a1bb30 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5022,6 +5022,8 @@ dependencies = [ "clap", "common", "crypto", + "csv", + "ctor", "directories", "file-rotate", "fs4 0.12.0", @@ -5032,11 +5034,13 @@ dependencies = [ "paste", "randomness", "rpc", + "rstest", "serde", "storage-lmdb", "subsystem", "tempfile", "test-rpc-functions", + "thiserror 1.0.69", "tokio", "toml 0.8.22", "utils", diff --git a/Cargo.toml b/Cargo.toml index c42867fbc..6bd181da2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -149,6 +149,7 @@ cfg-if = "1.0" chacha20poly1305 = "0.10" chrono = "0.4" clap = "4.5" +csv = "1.3" ctor = "0.2" criterion = "0.5" crossterm = "0.28" diff --git a/blockprod/src/lib.rs b/blockprod/src/lib.rs index 522040194..958196ee9 100644 --- a/blockprod/src/lib.rs +++ b/blockprod/src/lib.rs @@ -274,6 +274,7 @@ mod tests { max_db_commit_attempts: Default::default(), max_orphan_blocks: Default::default(), min_max_bootstrap_import_buffer_sizes: Default::default(), + allow_checkpoints_mismatch: Default::default(), }; let mempool_config = MempoolConfig::new(); diff --git a/build-tools/docker/build.py b/build-tools/docker/build.py index 04324cf3a..527c7b5ca 100644 --- a/build-tools/docker/build.py +++ b/build-tools/docker/build.py @@ -1,7 +1,12 @@ -import toml +import argparse import os +import pathlib import subprocess -import argparse +import toml + + +ROOT_DIR = pathlib.Path(__file__).resolve().parent.parent.parent +ROOT_CARGO_TOML = ROOT_DIR.joinpath("Cargo.toml") def get_cargo_version(cargo_toml_path): @@ -44,7 +49,7 @@ def build_docker_image(dockerfile_path, image_name, tags, num_jobs=None): try: # Run the command - subprocess.check_call(command, shell=True) + subprocess.check_call(command, shell=True, cwd=ROOT_DIR) print(f"Built {image_name} successfully (the tags are: {full_tags}).") except subprocess.CalledProcessError as error: print(f"Failed to build {image_name}: {error}") @@ -121,7 +126,7 @@ def main(): parser.add_argument('--local_tags', nargs='*', help='Additional tags to apply (these won\'t be pushed)', default=[]) args = parser.parse_args() - version = args.version if args.version else get_cargo_version("Cargo.toml") + version = args.version if args.version else get_cargo_version(ROOT_CARGO_TOML) # Note: the CI currently takes the version from the release tag, so it always starts with "v", # but the version from Cargo.toml doesn't have this prefix. version = version.removeprefix("v") diff --git a/build-tools/docker/example-mainnet/docker-compose.yml b/build-tools/docker/example-mainnet/docker-compose.yml index ccce4bc9e..27571eb55 100644 --- a/build-tools/docker/example-mainnet/docker-compose.yml +++ b/build-tools/docker/example-mainnet/docker-compose.yml @@ -65,6 +65,7 @@ services: - node-daemon environment: <<: *ml-common-env + ML_API_SCANNER_DAEMON_NETWORK: mainnet ML_API_SCANNER_DAEMON_POSTGRES_HOST: api-postgres-db ML_API_SCANNER_DAEMON_POSTGRES_USER: $API_SERVER_POSTGRES_USER ML_API_SCANNER_DAEMON_POSTGRES_PASSWORD: $API_SERVER_POSTGRES_PASSWORD @@ -83,6 +84,7 @@ services: - node-daemon environment: <<: *ml-common-env + ML_API_WEB_SRV_NETWORK: mainnet ML_API_WEB_SRV_BIND_ADDRESS: 0.0.0.0:3000 ML_API_WEB_SRV_POSTGRES_HOST: api-postgres-db ML_API_WEB_SRV_POSTGRES_USER: $API_SERVER_POSTGRES_USER diff --git a/chainstate/src/config.rs b/chainstate/src/config.rs index 5412603af..a68e091a5 100644 --- a/chainstate/src/config.rs +++ b/chainstate/src/config.rs @@ -38,17 +38,24 @@ make_config_setting!(MaxTipAge, Duration, Duration::from_secs(60 * 60 * 24)); pub struct ChainstateConfig { /// The number of maximum attempts to process a block. pub max_db_commit_attempts: MaxDbCommitAttempts, + /// The maximum capacity of the orphan blocks pool. pub max_orphan_blocks: MaxOrphanBlocks, + /// When importing bootstrap file, this controls the buffer sizes (min, max) /// (see bootstrap import function for more information) pub min_max_bootstrap_import_buffer_sizes: MinMaxBootstrapImportBufferSizes, + /// The initial block download is finished if the difference between the current time and the /// tip time is less than this value. pub max_tip_age: MaxTipAge, + /// If true, additional computationally-expensive consistency checks will be performed by /// the chainstate. The default value depends on the chain type. pub enable_heavy_checks: Option, + + /// If true, blocks and block headers will not be rejected if checkpoints mismatch is detected. + pub allow_checkpoints_mismatch: Option, } impl ChainstateConfig { @@ -90,4 +97,8 @@ impl ChainstateConfig { ChainType::Regtest => true, } } + + pub fn checkpoints_mismatch_allowed(&self) -> bool { + self.allow_checkpoints_mismatch.unwrap_or(false) + } } diff --git a/chainstate/src/detail/ban_score.rs b/chainstate/src/detail/ban_score.rs index 4e70c75a9..5023477cf 100644 --- a/chainstate/src/detail/ban_score.rs +++ b/chainstate/src/detail/ban_score.rs @@ -317,13 +317,13 @@ impl BanScore for CheckBlockError { CheckBlockError::MerkleRootCalculationFailed(_, _) => 100, CheckBlockError::BlockRewardMaturityError(err) => err.ban_score(), CheckBlockError::PropertyQueryError(_) => 100, - CheckBlockError::CheckpointMismatch(_, _) => 100, - CheckBlockError::ParentCheckpointMismatch(_, _, _) => 100, + CheckBlockError::CheckpointMismatch { .. } => 100, CheckBlockError::GetAncestorError(_) => 100, - CheckBlockError::AttemptedToAddBlockBeforeReorgLimit(_, _, _) => 100, + CheckBlockError::AttemptedToAddBlockBeforeReorgLimit { .. } => 100, CheckBlockError::EpochSealError(err) => err.ban_score(), CheckBlockError::InvalidParent { .. } => 100, CheckBlockError::InMemoryReorgFailed(err) => err.ban_score(), + CheckBlockError::InvalidBlockAlreadyProcessed(_) => 100, } } } diff --git a/chainstate/src/detail/chainstateref/mod.rs b/chainstate/src/detail/chainstateref/mod.rs index 63aa5ba39..392521cb9 100644 --- a/chainstate/src/detail/chainstateref/mod.rs +++ b/chainstate/src/detail/chainstateref/mod.rs @@ -76,7 +76,7 @@ pub use in_memory_reorg::InMemoryReorgError; pub struct ChainstateRef<'a, S, V> { chain_config: &'a ChainConfig, - _chainstate_config: &'a ChainstateConfig, + chainstate_config: &'a ChainstateConfig, tx_verification_strategy: &'a V, db_tx: S, time_getter: &'a TimeGetter, @@ -141,7 +141,7 @@ impl<'a, S: BlockchainStorageRead, V: TransactionVerificationStrategy> Chainstat ) -> Self { ChainstateRef { chain_config, - _chainstate_config: chainstate_config, + chainstate_config, db_tx, tx_verification_strategy, time_getter, @@ -157,7 +157,7 @@ impl<'a, S: BlockchainStorageRead, V: TransactionVerificationStrategy> Chainstat ) -> Self { ChainstateRef { chain_config, - _chainstate_config: chainstate_config, + chainstate_config, db_tx, tx_verification_strategy, time_getter, @@ -457,6 +457,34 @@ impl<'a, S: BlockchainStorageRead, V: TransactionVerificationStrategy> Chainstat Ok(result) } + fn enforce_checkpoint_impl( + &self, + height: BlockHeight, + expected: &Id, + given: &Id, + ) -> Result<(), CheckBlockError> { + if given != expected { + // Note: we only log the mismatch if we're going to ignore it (because if it's + // not ignored, we'll log the error anyway). + if self.chainstate_config.checkpoints_mismatch_allowed() { + log::warn!( + "Checkpoint mismatch at height {}, expected: {:x}, actual: {:x}", + height, + expected, + given, + ); + } else { + return Err(CheckBlockError::CheckpointMismatch { + height, + expected: *expected, + given: *given, + }); + } + } + + Ok(()) + } + // If the header height is at an exact checkpoint height, check that the block id matches the checkpoint id. // Return true if the header height is at an exact checkpoint height. fn enforce_exact_checkpoint_assuming_height( @@ -464,15 +492,10 @@ impl<'a, S: BlockchainStorageRead, V: TransactionVerificationStrategy> Chainstat header: &SignedBlockHeader, header_height: BlockHeight, ) -> Result { - if let Some(e) = self.chain_config.height_checkpoints().checkpoint_at_height(&header_height) + if let Some(expected_id) = + self.chain_config.height_checkpoints().checkpoint_at_height(&header_height) { - let expected_id = Id::::new(e.to_hash()); - if expected_id != header.get_id() { - return Err(CheckBlockError::CheckpointMismatch( - expected_id, - header.get_id(), - )); - } + self.enforce_checkpoint_impl(header_height, expected_id, &header.get_id().into())?; Ok(true) } else { Ok(false) @@ -499,17 +522,13 @@ impl<'a, S: BlockchainStorageRead, V: TransactionVerificationStrategy> Chainstat let parent_checkpoint_block_index = self.get_ancestor(&prev_block_index, expected_checkpoint_height)?; - let parent_checkpoint_id = parent_checkpoint_block_index.block_id(); - if parent_checkpoint_id != expected_checkpoint_id { - return Err(CheckBlockError::ParentCheckpointMismatch( - expected_checkpoint_height, - expected_checkpoint_id, - parent_checkpoint_id, - )); - } - + self.enforce_checkpoint_impl( + expected_checkpoint_height, + &expected_checkpoint_id, + &parent_checkpoint_id, + )?; Ok(()) } @@ -564,11 +583,11 @@ impl<'a, S: BlockchainStorageRead, V: TransactionVerificationStrategy> Chainstat if common_ancestor_height < min_allowed_height { let tip_block_height = self.get_best_block_index()?.block_height(); - return Err(CheckBlockError::AttemptedToAddBlockBeforeReorgLimit( + return Err(CheckBlockError::AttemptedToAddBlockBeforeReorgLimit { common_ancestor_height, tip_block_height, min_allowed_height, - )); + }); } Ok(()) @@ -599,8 +618,45 @@ impl<'a, S: BlockchainStorageRead, V: TransactionVerificationStrategy> Chainstat Ok(parent_block_index) } + /// This function is intended to be used in check_block and check_block_header. + /// + /// Return true if the block already exists in the chainstate and has an "ok" status + /// with the validation stage CheckBlockOk or later. + /// If it has a non-"ok" status, return an error. + /// If the block is new, or if its validation stage is below CheckBlockOk (i.e. it's Unchecked), + /// return false. + fn skip_check_block_because_block_exists_and_is_checked( + &self, + block_id: &Id, + ) -> Result { + if let Some(block_index) = self.get_block_index(block_id)? { + let status = block_index.status(); + + if status.is_ok() { + let checked = status.last_valid_stage() >= BlockValidationStage::CheckBlockOk; + Ok(checked) + } else { + Err(CheckBlockError::InvalidBlockAlreadyProcessed(*block_id)) + } + } else { + Ok(false) + } + } + #[log_error] pub fn check_block_header(&self, header: &SignedBlockHeader) -> Result<(), CheckBlockError> { + let header = WithId::new(header); + if self.skip_check_block_because_block_exists_and_is_checked(&WithId::id(&header))? { + return Ok(()); + } + + self.check_block_header_impl(&header) + } + + fn check_block_header_impl( + &self, + header: &WithId<&SignedBlockHeader>, + ) -> Result<(), CheckBlockError> { let parent_block_index = self.check_block_parent(header)?; self.check_header_size(header)?; self.enforce_checkpoints(header)?; @@ -662,7 +718,7 @@ impl<'a, S: BlockchainStorageRead, V: TransactionVerificationStrategy> Chainstat ensure!( block_timestamp.as_duration_since_epoch() <= current_time_as_secs + max_future_offset, CheckBlockError::BlockFromTheFuture { - block_id: header.block_id(), + block_id: WithId::id(header), block_timestamp, current_time }, @@ -833,7 +889,14 @@ impl<'a, S: BlockchainStorageRead, V: TransactionVerificationStrategy> Chainstat #[log_error] pub fn check_block(&self, block: &WithId) -> Result<(), CheckBlockError> { - self.check_block_header(block.header())?; + let header_with_id = WithId::as_sub_obj(block); + if self + .skip_check_block_because_block_exists_and_is_checked(&WithId::id(&header_with_id))? + { + return Ok(()); + } + + self.check_block_header_impl(&header_with_id)?; self.check_block_size(block).map_err(CheckBlockError::BlockSizeError)?; diff --git a/chainstate/src/detail/error.rs b/chainstate/src/detail/error.rs index f9523b3fc..4d78d9206 100644 --- a/chainstate/src/detail/error.rs +++ b/chainstate/src/detail/error.rs @@ -159,14 +159,25 @@ pub enum CheckBlockError { InvalidBlockRewardOutputType(Id), #[error("Block reward maturity error: {0}")] BlockRewardMaturityError(#[from] tx_verifier::timelock_check::OutputMaturityError), - #[error("Checkpoint mismatch: expected {0} vs given {1}")] - CheckpointMismatch(Id, Id), - #[error("Parent checkpoint mismatch at height {0}: expected {1} vs given {2}")] - ParentCheckpointMismatch(BlockHeight, Id, Id), + #[error("Checkpoint mismatch at height {height}: expected {expected:x}, given {given:x}")] + CheckpointMismatch { + height: BlockHeight, + expected: Id, + given: Id, + }, #[error("CRITICAL: Failed to retrieve ancestor of submitted block: {0}")] GetAncestorError(#[from] GetAncestorError), - #[error("Attempted to add a block before reorg limit (attempted at height: {0} while current height is: {1} and min allowed is: {2})")] - AttemptedToAddBlockBeforeReorgLimit(BlockHeight, BlockHeight, BlockHeight), + #[error( + "Attempted to add a block before reorg limit (attempted at height: {} while current height is: {} and min allowed is: {})", + common_ancestor_height, + tip_block_height, + min_allowed_height + )] + AttemptedToAddBlockBeforeReorgLimit { + common_ancestor_height: BlockHeight, + tip_block_height: BlockHeight, + min_allowed_height: BlockHeight, + }, #[error("TransactionVerifier error: {0}")] TransactionVerifierError(#[from] TransactionVerifierStorageError), #[error("Error during sealing an epoch: {0}")] @@ -178,6 +189,8 @@ pub enum CheckBlockError { }, #[error("In-memory reorg failed: {0}")] InMemoryReorgFailed(#[from] InMemoryReorgError), + #[error("Block {0} has already been processed and marked as invalid")] + InvalidBlockAlreadyProcessed(Id), } #[derive(Error, Debug, PartialEq, Eq, Clone)] diff --git a/chainstate/src/detail/error_classification.rs b/chainstate/src/detail/error_classification.rs index ad4326a84..7952d85bb 100644 --- a/chainstate/src/detail/error_classification.rs +++ b/chainstate/src/detail/error_classification.rs @@ -169,10 +169,12 @@ impl BlockProcessingErrorClassification for CheckBlockError { | CheckBlockError::ParentBlockMissing { .. } | CheckBlockError::BlockTimeOrderInvalid(_, _) | CheckBlockError::InvalidBlockRewardOutputType(_) - | CheckBlockError::CheckpointMismatch(_, _) - | CheckBlockError::ParentCheckpointMismatch(_, _, _) - | CheckBlockError::AttemptedToAddBlockBeforeReorgLimit(_, _, _) - | CheckBlockError::InvalidParent { .. } => BlockProcessingErrorClass::BadBlock, + | CheckBlockError::CheckpointMismatch { .. } + | CheckBlockError::AttemptedToAddBlockBeforeReorgLimit { .. } + | CheckBlockError::InvalidParent { .. } + | CheckBlockError::InvalidBlockAlreadyProcessed(_) => { + BlockProcessingErrorClass::BadBlock + } CheckBlockError::BlockFromTheFuture { .. } => { BlockProcessingErrorClass::TemporarilyBadBlock diff --git a/chainstate/src/detail/mod.rs b/chainstate/src/detail/mod.rs index a4702e004..33d0178e2 100644 --- a/chainstate/src/detail/mod.rs +++ b/chainstate/src/detail/mod.rs @@ -94,6 +94,11 @@ type ChainstateEventHandler = EventHandler; pub type OrphanErrorHandler = dyn Fn(&BlockError) + Send + Sync; +/// A tracing target that either forces full block ids to be printed where they're normally +/// printed in the abbreviated form, or just makes block ids be printed where normally they won't +/// be. +pub const CHAINSTATE_TRACING_TARGET_VERBOSE_BLOCK_IDS: &str = "chainstate_verbose_block_ids"; + #[must_use] pub struct Chainstate { chain_config: Arc, @@ -261,15 +266,13 @@ impl Chainstate Ok(()) } - fn broadcast_new_tip_event(&mut self, new_block_index: &Option) { - if let Some(new_block_index) = new_block_index { - let new_height = new_block_index.block_height(); - let new_id = *new_block_index.block_id(); - let event = ChainstateEvent::NewTip(new_id, new_height); + fn broadcast_new_tip_event(&mut self, new_block_index: &BlockIndex) { + let new_height = new_block_index.block_height(); + let new_id = *new_block_index.block_id(); + let event = ChainstateEvent::NewTip(new_id, new_height); - self.rpc_events.broadcast(&event); - self.subsystem_events.broadcast(event); - } + self.rpc_events.broadcast(&event); + self.subsystem_events.broadcast(event); } /// Create a read-write transaction, call `main_action` on it and commit. @@ -609,9 +612,9 @@ impl Chainstate None => result, }; - self.broadcast_new_tip_event(&result); + if let Some(bi) = &result { + self.broadcast_new_tip_event(bi); - if let Some(ref bi) = result { let compact_target = match bi.block_header().consensus_data() { common::chain::block::ConsensusData::None => Compact::from(Uint256::ZERO), common::chain::block::ConsensusData::PoW(data) => data.bits(), @@ -632,6 +635,11 @@ impl Chainstate self.update_initial_block_download_flag() .map_err(BlockError::BestBlockIdQueryError)?; + } else { + tracing::debug!( + target: CHAINSTATE_TRACING_TARGET_VERBOSE_BLOCK_IDS, + "Stale block received: {block_id}" + ); } Ok(result) diff --git a/chainstate/src/interface/chainstate_interface_impl.rs b/chainstate/src/interface/chainstate_interface_impl.rs index c58953c25..95998ad41 100644 --- a/chainstate/src/interface/chainstate_interface_impl.rs +++ b/chainstate/src/interface/chainstate_interface_impl.rs @@ -23,7 +23,7 @@ use crate::{ bootstrap::{export_bootstrap_stream, import_bootstrap_stream}, calculate_median_time_past, tx_verification_strategy::TransactionVerificationStrategy, - BlockSource, OrphanBlocksRef, + BlockSource, OrphanBlocksRef, CHAINSTATE_TRACING_TARGET_VERBOSE_BLOCK_IDS, }, ChainInfo, ChainstateConfig, ChainstateError, ChainstateEvent, ChainstateInterface, Locator, NonZeroPoolBalances, @@ -71,7 +71,20 @@ where self.chainstate.subscribe_to_event_broadcast() } - #[tracing::instrument(skip_all, fields(block_id = %block.get_id()))] + // Note: in this and some other functions below (in particular, in those that are called from + // p2p when processing blocks coming from peers) we add an additional DEBUG span that prints + // the block via `format!("{:x}")`. This is because the other span prints the id via Display + // (due to the '%' sigil), in which case it is shortened, e.g. "778b…b100". + // Always printing the full id would clutter the log, so we don't want to do that. + // So we add an additional span for the cases when the full id is needed. + // Also note that we add the extra span first, but in the output it will be printed after + // the normal one. + #[tracing::instrument( + skip_all, level = tracing::Level::DEBUG, name = "", + fields(id = format!("{:x}", block.get_id())), + target = CHAINSTATE_TRACING_TARGET_VERBOSE_BLOCK_IDS + )] + #[tracing::instrument(skip_all, fields(id = %block.get_id()))] fn process_block( &mut self, block: Block, @@ -82,23 +95,34 @@ where .map_err(ChainstateError::ProcessBlockError) } - #[tracing::instrument(skip_all, fields(block_id = %block_id))] + #[tracing::instrument(skip_all, fields(id = %block_id))] fn invalidate_block(&mut self, block_id: &Id) -> Result<(), ChainstateError> { self.chainstate .invalidate_block(block_id) .map_err(ChainstateError::BlockInvalidatorError) } - #[tracing::instrument(skip_all, fields(block_id = %block_id))] + #[tracing::instrument(skip_all, fields(id = %block_id))] fn reset_block_failure_flags(&mut self, block_id: &Id) -> Result<(), ChainstateError> { BlockInvalidator::new(&mut self.chainstate) .reset_block_failure_flags(block_id) .map_err(ChainstateError::BlockInvalidatorError) } + #[tracing::instrument( + skip_all, level = tracing::Level::DEBUG, name = "", + fields(first_id = + if let Some(first_header) = headers.first() { + format!("{:x}", first_header.get_id()) + } else { + "None".to_owned() + } + ), + target = CHAINSTATE_TRACING_TARGET_VERBOSE_BLOCK_IDS + )] #[tracing::instrument( skip_all, - fields(first_block_id = %headers.first().map(|header| header.get_id()).as_displayable()) + fields(first_id = %headers.first().map(|header| header.get_id()).as_displayable()) )] fn preliminary_headers_check( &self, @@ -109,7 +133,12 @@ where .map_err(ChainstateError::ProcessBlockError) } - #[tracing::instrument(skip_all, fields(block_id = %block.get_id()))] + #[tracing::instrument( + skip_all, level = tracing::Level::DEBUG, name = "", + fields(id = format!("{:x}", block.get_id())), + target = CHAINSTATE_TRACING_TARGET_VERBOSE_BLOCK_IDS + )] + #[tracing::instrument(skip_all, fields(id = %block.get_id()))] fn preliminary_block_check(&self, block: Block) -> Result { let block = BlockChecker::new(&self.chainstate) .preliminary_block_check(block.into()) @@ -126,7 +155,7 @@ where .map_err(ChainstateError::FailedToReadProperty) } - #[tracing::instrument(skip_all, fields(block_id = %block_id))] + #[tracing::instrument(skip_all, fields(id = %block_id))] fn is_block_in_main_chain(&self, block_id: &Id) -> Result { self.chainstate .query() @@ -144,7 +173,7 @@ where .map_err(ChainstateError::FailedToReadProperty) } - #[tracing::instrument(skip_all, fields(block_id = %block_id))] + #[tracing::instrument(skip_all, fields(id = %block_id))] fn get_block_height_in_main_chain( &self, block_id: &Id, @@ -168,7 +197,7 @@ where .map_err(ChainstateError::FailedToReadProperty) } - #[tracing::instrument(skip_all, fields(block_id = %block_id))] + #[tracing::instrument(skip_all, fields(id = %block_id))] fn get_block(&self, block_id: Id) -> Result, ChainstateError> { self.chainstate .query() @@ -190,7 +219,7 @@ where .map_err(ChainstateError::FailedToReadProperty) } - #[tracing::instrument(skip_all, fields(block_id = %block_id))] + #[tracing::instrument(skip_all, fields(id = %block_id))] fn get_block_header( &self, block_id: Id, @@ -311,7 +340,7 @@ where .map_err(ChainstateError::FailedToReadProperty) } - #[tracing::instrument(skip_all, fields(block_id = %block_id))] + #[tracing::instrument(skip_all, fields(id = %block_id))] fn get_block_index_for_persisted_block( &self, block_id: &Id, @@ -323,7 +352,7 @@ where .map_err(ChainstateError::FailedToReadProperty) } - #[tracing::instrument(skip_all, fields(block_id = %block_id))] + #[tracing::instrument(skip_all, fields(id = %block_id))] fn get_block_index_for_any_block( &self, block_id: &Id, @@ -389,7 +418,7 @@ where Ok(calculate_median_time_past(&dbtx, starting_block)) } - #[tracing::instrument(skip_all, fields(block_id = %block_id))] + #[tracing::instrument(skip_all, fields(id = %block_id))] fn is_already_an_orphan(&self, block_id: &Id) -> bool { self.chainstate.orphan_blocks_pool().is_already_an_orphan(block_id) } @@ -401,7 +430,7 @@ where #[tracing::instrument( skip_all, - fields(block_id = %block_index.block_id(), ancestor_height = %ancestor_height) + fields(id = %block_index.block_id(), ancestor_height = %ancestor_height) )] fn get_ancestor( &self, @@ -420,8 +449,8 @@ where #[tracing::instrument( skip_all, fields( - first_block_id = %first_block_index.block_id(), - second_block_id = %second_block_index.block_id() + first_id = %first_block_index.block_id(), + second_id = %second_block_index.block_id() ) )] fn last_common_ancestor( @@ -461,7 +490,7 @@ where } } - #[tracing::instrument(skip_all, fields(block_id = %block_index.block_id()))] + #[tracing::instrument(skip_all, fields(id = %block_index.block_id()))] fn get_block_reward( &self, block_index: &BlockIndex, diff --git a/chainstate/src/interface/chainstate_interface_impl_delegation.rs b/chainstate/src/interface/chainstate_interface_impl_delegation.rs index 3ad9f8e14..b5742ec68 100644 --- a/chainstate/src/interface/chainstate_interface_impl_delegation.rs +++ b/chainstate/src/interface/chainstate_interface_impl_delegation.rs @@ -480,6 +480,7 @@ mod tests { min_max_bootstrap_import_buffer_sizes: Default::default(), max_tip_age: Default::default(), enable_heavy_checks: Some(true), + allow_checkpoints_mismatch: Default::default(), }; let chainstate_storage = Store::new_empty().unwrap(); diff --git a/chainstate/test-suite/src/tests/syncing_tests.rs b/chainstate/test-suite/src/tests/syncing_tests.rs index 0431e32c9..9a8fb6c50 100644 --- a/chainstate/test-suite/src/tests/syncing_tests.rs +++ b/chainstate/test-suite/src/tests/syncing_tests.rs @@ -17,20 +17,32 @@ use std::{collections::BTreeMap, iter, num::NonZeroUsize, time::Duration}; use rstest::rstest; -use chainstate::{BlockSource, ChainstateConfig, ChainstateError, CheckBlockError}; +use chainstate::{ + BlockError, BlockSource, ChainstateConfig, ChainstateError, CheckBlockError, + CheckBlockTransactionsError, +}; use chainstate_test_framework::TestFramework; -use chainstate_types::PropertyQueryError; +use chainstate_types::{BlockStatus, BlockValidationStage, PropertyQueryError}; use common::{ chain::{ self, block::{signed_block_header::SignedBlockHeader, timestamp::BlockTimestamp}, - GenBlock, + Block, GenBlock, }, primitives::{BlockDistance, BlockHeight, Id, Idable, H256}, Uint256, }; +use logging::log; use randomness::Rng; -use test_utils::random::{make_seedable_rng, Seed}; +use test_utils::{ + assert_matches, + random::{make_seedable_rng, Seed}, +}; +use tx_verifier::CheckTransactionError; + +use crate::tests::helpers::{ + block_creation_helpers::build_block_with_empty_tx, block_status_helpers::get_block_status, +}; #[rstest] #[trace] @@ -228,8 +240,8 @@ fn get_headers_branching_chains(#[case] seed: Seed) { let mut tf = TestFramework::builder(&mut rng) .with_chain_config( - common::chain::config::Builder::new(common::chain::config::ChainType::Regtest) - .consensus_upgrades(common::chain::NetUpgrades::unit_tests()) + chain::config::Builder::new(chain::config::ChainType::Regtest) + .consensus_upgrades(chain::NetUpgrades::unit_tests()) .max_depth_for_reorg(BlockDistance::new(5000)) .build(), ) @@ -432,8 +444,8 @@ fn try_reorg_past_limit(#[case] seed: Seed) { let mut tf = TestFramework::builder(&mut rng) .with_chain_config( - common::chain::config::Builder::new(common::chain::config::ChainType::Regtest) - .consensus_upgrades(common::chain::NetUpgrades::unit_tests()) + chain::config::Builder::new(chain::config::ChainType::Regtest) + .consensus_upgrades(chain::NetUpgrades::unit_tests()) .max_depth_for_reorg(BlockDistance::new(1)) .build(), ) @@ -444,12 +456,12 @@ fn try_reorg_past_limit(#[case] seed: Seed) { let res = tf.create_chain(&common_block_id, 1, &mut rng).unwrap_err(); assert_eq!( res, - ChainstateError::ProcessBlockError(chainstate::BlockError::CheckBlockFailed( - chainstate::CheckBlockError::AttemptedToAddBlockBeforeReorgLimit( - BlockHeight::new(0), - BlockHeight::new(2), - BlockHeight::new(1) - ) + ChainstateError::ProcessBlockError(BlockError::CheckBlockFailed( + CheckBlockError::AttemptedToAddBlockBeforeReorgLimit { + common_ancestor_height: BlockHeight::new(0), + tip_block_height: BlockHeight::new(2), + min_allowed_height: BlockHeight::new(1), + } )) ) }); @@ -458,14 +470,14 @@ fn try_reorg_past_limit(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn otry_reorg_past_limit_in_fork(#[case] seed: Seed) { +fn try_reorg_past_limit_in_fork(#[case] seed: Seed) { utils::concurrency::model(move || { let mut rng = make_seedable_rng(seed); let mut tf = TestFramework::builder(&mut rng) .with_chain_config( - common::chain::config::Builder::new(common::chain::config::ChainType::Regtest) - .consensus_upgrades(common::chain::NetUpgrades::unit_tests()) + chain::config::Builder::new(chain::config::ChainType::Regtest) + .consensus_upgrades(chain::NetUpgrades::unit_tests()) .max_depth_for_reorg(BlockDistance::new(2)) .build(), ) @@ -482,12 +494,12 @@ fn otry_reorg_past_limit_in_fork(#[case] seed: Seed) { let res = tf.create_chain(&fork_tip_id, 1, &mut rng).unwrap_err(); assert_eq!( res, - ChainstateError::ProcessBlockError(chainstate::BlockError::CheckBlockFailed( - chainstate::CheckBlockError::AttemptedToAddBlockBeforeReorgLimit( - BlockHeight::new(0), - BlockHeight::new(3), - BlockHeight::new(1) - ) + ChainstateError::ProcessBlockError(BlockError::CheckBlockFailed( + CheckBlockError::AttemptedToAddBlockBeforeReorgLimit { + common_ancestor_height: BlockHeight::new(0), + tip_block_height: BlockHeight::new(3), + min_allowed_height: BlockHeight::new(1), + } )) ) }); @@ -640,6 +652,7 @@ fn initial_block_download(#[case] seed: Seed) { min_max_bootstrap_import_buffer_sizes: Default::default(), max_tip_age: Duration::from_secs(1).into(), enable_heavy_checks: Some(true), + allow_checkpoints_mismatch: Default::default(), }) .with_initial_time_since_genesis(2) .build(); @@ -695,8 +708,8 @@ fn header_check_for_orphan(#[case] seed: Seed) { .unwrap_err(); assert_eq!( err, - ChainstateError::ProcessBlockError(chainstate::BlockError::CheckBlockFailed( - chainstate::CheckBlockError::ParentBlockMissing { + ChainstateError::ProcessBlockError(BlockError::CheckBlockFailed( + CheckBlockError::ParentBlockMissing { block_id: block.get_id(), parent_block_id: block.prev_block_id(), }, @@ -706,8 +719,8 @@ fn header_check_for_orphan(#[case] seed: Seed) { let err = tf.chainstate.preliminary_block_check(block.clone()).unwrap_err(); assert_eq!( err, - ChainstateError::ProcessBlockError(chainstate::BlockError::CheckBlockFailed( - chainstate::CheckBlockError::ParentBlockMissing { + ChainstateError::ProcessBlockError(BlockError::CheckBlockFailed( + CheckBlockError::ParentBlockMissing { block_id: block.get_id(), parent_block_id: block.prev_block_id(), }, @@ -717,9 +730,7 @@ fn header_check_for_orphan(#[case] seed: Seed) { let err = tf.chainstate.process_block(block, BlockSource::Peer).unwrap_err(); assert_eq!( err, - ChainstateError::ProcessBlockError( - chainstate::BlockError::PrevBlockNotFoundForNewBlock(block_id) - ) + ChainstateError::ProcessBlockError(BlockError::PrevBlockNotFoundForNewBlock(block_id)) ); }); } @@ -798,8 +809,9 @@ fn headers_check_with_checkpoints(#[case] seed: Seed) { .enumerate() .map(|(idx, header)| (BlockHeight::new(idx as u64 + 2), header.block_id().into())) .collect::>(); + let bad_checkpoint_height = BlockHeight::new(5); let good_block_id = Id::new(Uint256::from_u64(12345).into()); - let bad_block_id = checkpoints.insert(BlockHeight::new(5), good_block_id).unwrap(); + let bad_block_id = checkpoints.insert(bad_checkpoint_height, good_block_id).unwrap(); let mut tf = TestFramework::builder(&mut rng) .with_chain_config( @@ -813,11 +825,12 @@ fn headers_check_with_checkpoints(#[case] seed: Seed) { let err = tf.chainstate.preliminary_headers_check(&block_headers).unwrap_err(); assert_eq!( err, - ChainstateError::ProcessBlockError(chainstate::BlockError::CheckBlockFailed( - CheckBlockError::CheckpointMismatch( - tf.to_chain_block_id(&good_block_id), - tf.to_chain_block_id(&bad_block_id) - ) + ChainstateError::ProcessBlockError(BlockError::CheckBlockFailed( + CheckBlockError::CheckpointMismatch { + height: bad_checkpoint_height, + expected: good_block_id, + given: bad_block_id + } )) ); } @@ -826,9 +839,10 @@ fn headers_check_with_checkpoints(#[case] seed: Seed) { { let good_block_id = Id::new(Uint256::from_u64(12345).into()); let bad_block_id = block_headers[5].block_id().into(); + let bad_checkpoint_height = BlockHeight::new(7); let checkpoints = [ (BlockHeight::new(3), block_headers[1].block_id().into()), - (BlockHeight::new(7), good_block_id), + (bad_checkpoint_height, good_block_id), ] .into_iter() .collect::>(); @@ -845,11 +859,12 @@ fn headers_check_with_checkpoints(#[case] seed: Seed) { let err = tf.chainstate.preliminary_headers_check(&block_headers).unwrap_err(); assert_eq!( err, - ChainstateError::ProcessBlockError(chainstate::BlockError::CheckBlockFailed( - CheckBlockError::CheckpointMismatch( - tf.to_chain_block_id(&good_block_id), - tf.to_chain_block_id(&bad_block_id) - ) + ChainstateError::ProcessBlockError(BlockError::CheckBlockFailed( + CheckBlockError::CheckpointMismatch { + height: bad_checkpoint_height, + expected: good_block_id, + given: bad_block_id + } )) ); } @@ -942,3 +957,269 @@ fn get_block_ids_as_checkpoints(#[case] seed: Seed) { assert_eq!(result, []); }); } + +// Check that preliminary_block_check and preliminary_headers_check take into account whether +// the block already exists: +// 1) If the block has an "ok" status and the validation stage is CheckBlockOk or later, they succeed. +// Important special case: the block height is below the reorg limit. +// 2) If the block has a non-"ok" status, they fail with a specific error. +// For consistency, we'll be checking all combinations - ok/non-ok above/below the reorg limit. +#[rstest] +#[trace] +#[case(Seed::from_entropy())] +fn preliminary_checks_for_existing_block(#[case] seed: Seed) { + utils::concurrency::model(move || { + let mut rng = make_seedable_rng(seed); + let max_reorg_limit: usize = rng.gen_range(10..20); + + let mut tf = TestFramework::builder(&mut rng) + .with_chain_config( + chain::config::create_unit_test_config_builder() + .max_depth_for_reorg(BlockDistance::new(max_reorg_limit as i64)) + .build(), + ) + .build(); + let genesis_id = tf.genesis().get_id().into(); + + // This will be the full chain length. + let chain_len = rng.gen_range(max_reorg_limit + 1..max_reorg_limit * 2); + // This is the lowest-height parent block, whose children will still be considered + // above the reorg limit after the full chain is constructed. + let first_parent_height_above_reorg_limit = chain_len - max_reorg_limit; + // The height of the parent block, whose children will be considered below the + // reorg limit (after the full chain is constructed). + let parent_height_below_reorg_limit = + rng.gen_range(0..first_parent_height_above_reorg_limit); + + // Create the first part of the chain - until height_below_reorg_limit. + let parent_below_reorg_limit_id = if parent_height_below_reorg_limit != 0 { + tf.create_chain(&genesis_id, parent_height_below_reorg_limit, &mut rng).unwrap() + } else { + genesis_id + }; + // Sanity check + assert_eq!( + tf.best_block_height(), + BlockHeight::new(parent_height_below_reorg_limit as u64) + ); + + let assert_empty_tx_error = |err: &ChainstateError| { + assert_matches!( + err, + ChainstateError::ProcessBlockError(BlockError::CheckBlockFailed( + CheckBlockError::CheckTransactionFailed( + CheckBlockTransactionsError::CheckTransactionError( + CheckTransactionError::EmptyInputsInTransaction(_) + ) + ) + )) + ); + }; + + // Create a bad block at a height that will become below the reorg limit. + let bad_block_below_reorg_limit = + build_block_with_empty_tx(&mut rng, &mut tf, &parent_below_reorg_limit_id); + let bad_block_below_reorg_limit_id = bad_block_below_reorg_limit.get_id(); + let err = tf + .process_block(bad_block_below_reorg_limit.clone(), BlockSource::Local) + .unwrap_err(); + assert_empty_tx_error(&err); + // Sanity check + { + let status = get_block_status(&tf, &bad_block_below_reorg_limit_id); + assert!(!status.is_ok()); + } + + // Now create a good block at a height that will become below the reorg limit. + // We want it to be on a stale chain, so that its BlockValidationStage is not FullyChecked. + + // So first we extend the mainchain. + let latest_mainchain_block_id = + tf.create_chain(&parent_below_reorg_limit_id, 1, &mut rng).unwrap(); + + // And now we can create the good stale block + let good_block_below_reorg_limit_id = + tf.create_chain(&parent_below_reorg_limit_id, 1, &mut rng).unwrap(); + let good_block_below_reorg_limit_id = + tf.to_chain_block_id(&good_block_below_reorg_limit_id); + let good_block_below_reorg_limit = tf.block(good_block_below_reorg_limit_id); + // Some sanity checks + { + assert_eq!(tf.best_block_id(), latest_mainchain_block_id); + + let status = tf.block_index(&good_block_below_reorg_limit_id).status(); + assert!(status.is_ok()); + assert_eq!( + status.last_valid_stage(), + BlockValidationStage::CheckBlockOk + ); + } + + // Create the rest of the chain + let latest_mainchain_block_id = tf + .create_chain( + &latest_mainchain_block_id, + // -1 because we've created an extra block above. + chain_len - parent_height_below_reorg_limit - 1, + &mut rng, + ) + .unwrap(); + // Sanity check + assert_eq!(tf.best_block_height(), BlockHeight::new(chain_len as u64)); + + // Expected error when adding a child on top of parent_below_reorg_limit. + let block_reorg_limit_error = ChainstateError::ProcessBlockError( + BlockError::CheckBlockFailed(CheckBlockError::AttemptedToAddBlockBeforeReorgLimit { + common_ancestor_height: BlockHeight::new(parent_height_below_reorg_limit as u64), + tip_block_height: BlockHeight::new(chain_len as u64), + min_allowed_height: BlockHeight::new(first_parent_height_above_reorg_limit as u64), + }), + ); + + // Sanity checks - parent_below_reorg_limit is indeed below the reorg limit, + // and first_parent_height_above_reorg_limit is above it + { + let err = tf.create_chain(&parent_below_reorg_limit_id, 1, &mut rng).unwrap_err(); + assert_eq!(err, block_reorg_limit_error); + + let first_parent_above_reorg_limit_id = + tf.block_id(first_parent_height_above_reorg_limit as u64); + tf.create_chain(&first_parent_above_reorg_limit_id, 1, &mut rng).unwrap(); + } + + // Note: the height is below the tip height, since we want to get a child of this block. + let parent_height_above_reorg_limit = + rng.gen_range(first_parent_height_above_reorg_limit..chain_len); + let parent_above_reorg_limit_id = tf.block_id(parent_height_above_reorg_limit as u64); + + // Create a bad block at a height above the reorg limit. + let bad_block_above_reorg_limit = + build_block_with_empty_tx(&mut rng, &mut tf, &parent_above_reorg_limit_id); + let bad_block_above_reorg_limit_id = bad_block_above_reorg_limit.get_id(); + let err = tf + .process_block(bad_block_above_reorg_limit.clone(), BlockSource::Local) + .unwrap_err(); + assert_empty_tx_error(&err); + // Sanity check + { + let status = get_block_status(&tf, &bad_block_above_reorg_limit_id); + assert!(!status.is_ok()); + } + + // Create a good block at a height above the reorg limit. + let good_block_above_reorg_limit_id = + tf.create_chain(&parent_above_reorg_limit_id, 1, &mut rng).unwrap(); + let good_block_above_reorg_limit_id = + tf.to_chain_block_id(&good_block_above_reorg_limit_id); + let good_block_above_reorg_limit = tf.block(good_block_above_reorg_limit_id); + // Some sanity checks + { + assert_eq!(tf.best_block_id(), latest_mainchain_block_id); + + let status = tf.block_index(&good_block_above_reorg_limit_id).status(); + assert!(status.is_ok()); + assert_eq!( + status.last_valid_stage(), + BlockValidationStage::CheckBlockOk + ); + } + + // Currently our good blocks are at the CheckBlockOk validation stage. + // Optionally, force set it to FullyChecked, the expected results remain the same. + if rng.gen_bool(0.5) { + log::debug!("Resetting good block statuses to fully checked"); + + tf.set_block_status( + &good_block_below_reorg_limit_id, + BlockStatus::new_fully_checked(), + ); + tf.set_block_status( + &good_block_above_reorg_limit_id, + BlockStatus::new_fully_checked(), + ); + } + + // Now we can actually do the checks + + // Good block below the reorg limit + tf.chainstate + .preliminary_headers_check(std::slice::from_ref(good_block_below_reorg_limit.header())) + .unwrap(); + let _: Block = tf.chainstate.preliminary_block_check(good_block_below_reorg_limit).unwrap(); + + // Good block above the reorg limit + tf.chainstate + .preliminary_headers_check(std::slice::from_ref(good_block_above_reorg_limit.header())) + .unwrap(); + let _: Block = tf.chainstate.preliminary_block_check(good_block_above_reorg_limit).unwrap(); + + // Bad block below the reorg limit + let err = tf + .chainstate + .preliminary_headers_check(std::slice::from_ref(bad_block_below_reorg_limit.header())) + .unwrap_err(); + assert_eq!( + err, + ChainstateError::ProcessBlockError(BlockError::CheckBlockFailed( + CheckBlockError::InvalidBlockAlreadyProcessed(bad_block_below_reorg_limit_id) + )) + ); + let err = tf + .chainstate + .preliminary_block_check(bad_block_below_reorg_limit.clone()) + .unwrap_err(); + assert_eq!( + err, + ChainstateError::ProcessBlockError(BlockError::CheckBlockFailed( + CheckBlockError::InvalidBlockAlreadyProcessed(bad_block_below_reorg_limit_id) + )) + ); + + // Bad block above the reorg limit + let err = tf + .chainstate + .preliminary_headers_check(std::slice::from_ref(bad_block_above_reorg_limit.header())) + .unwrap_err(); + assert_eq!( + err, + ChainstateError::ProcessBlockError(BlockError::CheckBlockFailed( + CheckBlockError::InvalidBlockAlreadyProcessed(bad_block_above_reorg_limit_id) + )) + ); + let err = tf + .chainstate + .preliminary_block_check(bad_block_above_reorg_limit.clone()) + .unwrap_err(); + assert_eq!( + err, + ChainstateError::ProcessBlockError(BlockError::CheckBlockFailed( + CheckBlockError::InvalidBlockAlreadyProcessed(bad_block_above_reorg_limit_id) + )) + ); + + // Now reset the statuses of the bad blocks, so that they're now "ok" and "Unchecked". + // The preliminary_xxx_check functions should now do the actual check. + + tf.set_block_status(&bad_block_below_reorg_limit_id, BlockStatus::new()); + tf.set_block_status(&bad_block_above_reorg_limit_id, BlockStatus::new()); + + // Bad block below the reorg limit - this now produces the error about reorg limit violation. + let err = tf + .chainstate + .preliminary_headers_check(std::slice::from_ref(bad_block_below_reorg_limit.header())) + .unwrap_err(); + assert_eq!(err, block_reorg_limit_error); + let err = tf.chainstate.preliminary_block_check(bad_block_below_reorg_limit).unwrap_err(); + assert_eq!(err, block_reorg_limit_error); + + // Bad block above the reorg limit - here preliminary_headers_check will succeed (because + // there is nothing wrong at the header level), and preliminary_block_check will fail + // with the error about block's empty tx. + tf.chainstate + .preliminary_headers_check(std::slice::from_ref(bad_block_above_reorg_limit.header())) + .unwrap(); + assert_eq!(err, block_reorg_limit_error); + let err = tf.chainstate.preliminary_block_check(bad_block_above_reorg_limit).unwrap_err(); + assert_empty_tx_error(&err); + }); +} diff --git a/common/src/chain/block/mod.rs b/common/src/chain/block/mod.rs index 0f273d1f1..81652da01 100644 --- a/common/src/chain/block/mod.rs +++ b/common/src/chain/block/mod.rs @@ -38,7 +38,10 @@ use utils::ensure; use crate::{ chain::block::{block_size::BlockSize, block_v1::BlockV1, timestamp::BlockTimestamp}, - primitives::{id::WithId, Id, Idable, VersionTag, H256}, + primitives::{ + id::{HasSubObjWithSameId, WithId}, + Id, Idable, VersionTag, H256, + }, }; use self::{ @@ -212,6 +215,12 @@ impl Idable for Block { } } +impl HasSubObjWithSameId for Block { + fn get_sub_obj(&self) -> &SignedBlockHeader { + self.header() + } +} + impl PartialEq for WithId { fn eq(&self, other: &Self) -> bool { self.get_id() == other.get_id() diff --git a/common/src/chain/config/mod.rs b/common/src/chain/config/mod.rs index 0ab57c10d..e5c368b30 100644 --- a/common/src/chain/config/mod.rs +++ b/common/src/chain/config/mod.rs @@ -63,12 +63,10 @@ use super::{ TokensFeeVersion, }; -use self::{ - checkpoints::Checkpoints, - emission_schedule::{CoinUnit, DEFAULT_INITIAL_MINT}, -}; +use self::emission_schedule::{CoinUnit, DEFAULT_INITIAL_MINT}; pub use builder::Builder; +pub use checkpoints::Checkpoints; pub use emission_schedule::{EmissionSchedule, EmissionScheduleFn, EmissionScheduleTabular}; const DEFAULT_MAX_FUTURE_BLOCK_TIME_OFFSET_V1: Duration = Duration::from_secs(120); diff --git a/common/src/primitives/id/mod.rs b/common/src/primitives/id/mod.rs index 88e8f0fe8..2072cd0a9 100644 --- a/common/src/primitives/id/mod.rs +++ b/common/src/primitives/id/mod.rs @@ -253,6 +253,18 @@ impl Idable for &T { } } +/// Implementing this trait for some type `T` means that: +/// 1) `T` has a sub-object of type `SubObj`. +/// 2) Id of `SubObj` is the same as id of `T`. +/// +/// Example: `Block` contains `SignedHeader` and the block id is the same as its header's. +pub trait HasSubObjWithSameId: Idable +where + SubObj: Idable::Tag>, +{ + fn get_sub_obj(&self) -> &SubObj; +} + // we use a cropping stream (64 => 32) because // we want a hash result to H256 and a byte array // of the hash to be identical, while benefiting diff --git a/common/src/primitives/id/with_id.rs b/common/src/primitives/id/with_id.rs index ef92d42dc..a8f3bde96 100644 --- a/common/src/primitives/id/with_id.rs +++ b/common/src/primitives/id/with_id.rs @@ -15,9 +15,10 @@ //! ID caching mechanism -use super::{Id, Idable}; use serialization::{WrapperTypeDecode, WrapperTypeEncode}; +use super::{HasSubObjWithSameId, Id, Idable}; + /// An object together with its pre-calculated ID. /// /// This only allows immutable access to the underlying object to prevent it from going out of sync @@ -43,6 +44,18 @@ impl WithId { pub fn take(this: Self) -> T { this.object } + + /// Convert this `WithId` into a `WithId` for a sub-object, without recalculating the id. + pub fn as_sub_obj(this: &Self) -> WithId<&SubObj> + where + T: HasSubObjWithSameId, + SubObj: Idable::Tag>, + { + WithId::<&SubObj> { + id: this.id, + object: this.object.get_sub_obj(), + } + } } impl WithId { diff --git a/mempool/src/pool/mod.rs b/mempool/src/pool/mod.rs index b58f047c7..e50fefa67 100644 --- a/mempool/src/pool/mod.rs +++ b/mempool/src/pool/mod.rs @@ -220,7 +220,7 @@ impl Mempool { &mut self, evt: ChainstateEvent, ) -> Result<(), ChainstateEventError> { - log::debug!("mempool: Processing chainstate event {evt:?}"); + log::debug!("Processing chainstate event {evt:?}"); match evt { ChainstateEvent::NewTip(block_id, height) => self.on_new_tip(block_id, height)?, }; @@ -228,7 +228,7 @@ impl Mempool { } fn on_new_tip(&mut self, block_id: Id, height: BlockHeight) -> Result<(), ReorgError> { - log::info!("New block tip: {block_id:?} at height {height}"); + log::debug!("New block tip: {block_id:x} at height {height}"); let mut finalizer = TxFinalizer::new( &mut self.orphans, diff --git a/node-daemon/docs/RPC.md b/node-daemon/docs/RPC.md index e5882a82a..f5a1f7755 100644 --- a/node-daemon/docs/RPC.md +++ b/node-daemon/docs/RPC.md @@ -1089,6 +1089,9 @@ Returns: "ping_min": EITHER OF 1) number 2) null, + "last_tip_block_time": EITHER OF + 1) number + 2) null, }, .. ] ``` diff --git a/node-lib/Cargo.toml b/node-lib/Cargo.toml index 3c32847be..7a75a7c90 100644 --- a/node-lib/Cargo.toml +++ b/node-lib/Cargo.toml @@ -24,17 +24,21 @@ utils-networking = { path = "../utils/networking" } anyhow.workspace = true clap = { workspace = true, features = ["derive"] } +csv.workspace = true +directories.workspace = true file-rotate.workspace = true +fs4.workspace = true jsonrpsee = { workspace = true, features = ["macros"] } -tokio = { workspace = true, default-features = false } +paste.workspace = true serde = { workspace = true, features = ["derive"] } +thiserror.workspace = true +tokio = { workspace = true, default-features = false } toml.workspace = true -directories.workspace = true -paste.workspace = true -fs4.workspace = true [dev-dependencies] crypto = { path = "../crypto" } randomness = { path = "../randomness" } +ctor.workspace = true +rstest.workspace = true tempfile.workspace = true diff --git a/node-lib/src/checkpoints_from_file.rs b/node-lib/src/checkpoints_from_file.rs new file mode 100644 index 000000000..1f84b3850 --- /dev/null +++ b/node-lib/src/checkpoints_from_file.rs @@ -0,0 +1,225 @@ +// Copyright (c) 2025 RBB S.r.l +// opensource@mintlayer.org +// SPDX-License-Identifier: MIT +// Licensed under the MIT License; +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://github.com/mintlayer/mintlayer-core/blob/master/LICENSE +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{ + collections::BTreeMap, + path::{Path, PathBuf}, + str::FromStr as _, +}; + +use common::{ + chain::GenBlock, + primitives::{BlockHeight, Id, H256}, +}; +use utils::ensure; + +pub fn read_checkpoints_from_csv_file( + csv_file: &Path, +) -> Result>, CheckpontsFromCsvReadError> { + let file = + std::fs::File::open(csv_file).map_err(|err| CheckpontsFromCsvReadError::FileOpenError { + file: csv_file.to_owned(), + error: err.to_string(), + })?; + + read_checkpoints_from_csv(file) +} + +pub fn read_checkpoints_from_csv( + csv: impl std::io::Read, +) -> Result>, CheckpontsFromCsvReadError> { + // Note: flexible(true) means that lines with different field counts are allowed. + // Our fields count is fixed to 2 and we only specify this to simplify the tests, where + // we check for specific errors. + let mut reader = csv::ReaderBuilder::new().has_headers(false).flexible(true).from_reader(csv); + let expected_fields_count = 2; + + let mut checkpoints = BTreeMap::new(); + + for (record_idx, result) in reader.records().enumerate() { + let record = result.map_err(|err| CheckpontsFromCsvReadError::RecordReadError { + error: err.to_string(), + })?; + + ensure!( + record.len() == expected_fields_count, + CheckpontsFromCsvReadError::UnexpectedFieldsCount { + record_idx, + actual_fields_count: record.len(), + expected_fields_count + } + ); + + let block_height = record + .get(0) + .expect("field is known to be present") + .parse::() + .map_err(|_| CheckpontsFromCsvReadError::BadBlockHeight { record_idx })?; + + let block_id = H256::from_str(record.get(1).expect("field is known to be present")) + .map_err(|_| CheckpontsFromCsvReadError::BadBlockId { record_idx })?; + + let already_existed = + checkpoints.insert(BlockHeight::new(block_height), Id::new(block_id)).is_some(); + ensure!( + !already_existed, + CheckpontsFromCsvReadError::DuplicateCheckpoint { + height: block_height + } + ); + } + + Ok(checkpoints) +} + +#[derive(thiserror::Error, Clone, Debug, Eq, PartialEq)] +pub enum CheckpontsFromCsvReadError { + #[error("Cannon open file '{file}': {error}")] + FileOpenError { file: PathBuf, error: String }, + + #[error("Error reading a record: {error}")] + RecordReadError { error: String }, + + #[error("Unexpected fields count in record {record_idx}: expected {expected_fields_count}, got {actual_fields_count}")] + UnexpectedFieldsCount { + record_idx: usize, + actual_fields_count: usize, + expected_fields_count: usize, + }, + + #[error("Bad block height in record {record_idx}")] + BadBlockHeight { record_idx: usize }, + + #[error("Bad block id in record {record_idx}")] + BadBlockId { record_idx: usize }, + + #[error("Duplicate checkpoint at height {height}")] + DuplicateCheckpoint { height: u64 }, +} + +#[cfg(test)] +mod tests { + use utils::concatln; + + use super::*; + + #[test] + fn correct_read() { + let mk_id = |id_str| Id::new(H256::from_str(id_str).unwrap()); + let data = concatln!( + "500, C91C3DB7DFDCC296010546EC38F48A557D035DD0B34260BD6C5174709F8A7EB0", + "1000, 1DCFB22374DA757882EEF26AF2B2D3ABDD1A4887C744346F6413C8D0B51DEBDF", + "1500, 3F81279C128FF628C8F4055DF89173DDAA6597DAB7636E8B12CA386E7864DFE9" + ); + let expected_checkpoints = BTreeMap::from([ + ( + BlockHeight::new(500), + mk_id("C91C3DB7DFDCC296010546EC38F48A557D035DD0B34260BD6C5174709F8A7EB0"), + ), + ( + BlockHeight::new(1000), + mk_id("1DCFB22374DA757882EEF26AF2B2D3ABDD1A4887C744346F6413C8D0B51DEBDF"), + ), + ( + BlockHeight::new(1500), + mk_id("3F81279C128FF628C8F4055DF89173DDAA6597DAB7636E8B12CA386E7864DFE9"), + ), + ]); + + let checkpoints = read_checkpoints_from_csv(data.as_bytes()).unwrap(); + assert_eq!(checkpoints, expected_checkpoints); + + // Now write the csv to file and read it via `read_checkpoints_from_csv_file`. + let temp_file = tempfile::NamedTempFile::new().unwrap(); + std::fs::write(temp_file.path(), data.as_bytes()).unwrap(); + let checkpoints_from_file = read_checkpoints_from_csv_file(temp_file.path()).unwrap(); + assert_eq!(checkpoints_from_file, expected_checkpoints); + } + + #[test] + fn bad_fields_count() { + let data1 = concatln!( + "500, C91C3DB7DFDCC296010546EC38F48A557D035DD0B34260BD6C5174709F8A7EB0", + "1000, 1DCFB22374DA757882EEF26AF2B2D3ABDD1A4887C744346F6413C8D0B51DEBDF, 111", + "1500, 3F81279C128FF628C8F4055DF89173DDAA6597DAB7636E8B12CA386E7864DFE9" + ); + let err = read_checkpoints_from_csv(data1.as_bytes()).unwrap_err(); + assert_eq!( + err, + CheckpontsFromCsvReadError::UnexpectedFieldsCount { + record_idx: 1, + actual_fields_count: 3, + expected_fields_count: 2 + } + ); + + let data1 = concatln!( + "500, C91C3DB7DFDCC296010546EC38F48A557D035DD0B34260BD6C5174709F8A7EB0", + "1000", + "1500, 3F81279C128FF628C8F4055DF89173DDAA6597DAB7636E8B12CA386E7864DFE9" + ); + let err = read_checkpoints_from_csv(data1.as_bytes()).unwrap_err(); + assert_eq!( + err, + CheckpontsFromCsvReadError::UnexpectedFieldsCount { + record_idx: 1, + actual_fields_count: 1, + expected_fields_count: 2 + } + ); + } + + #[test] + fn bad_block_height() { + let data = concatln!( + "500, C91C3DB7DFDCC296010546EC38F48A557D035DD0B34260BD6C5174709F8A7EB0", + "X000, 1DCFB22374DA757882EEF26AF2B2D3ABDD1A4887C744346F6413C8D0B51DEBDF", + "1500, 3F81279C128FF628C8F4055DF89173DDAA6597DAB7636E8B12CA386E7864DFE9" + ); + let err = read_checkpoints_from_csv(data.as_bytes()).unwrap_err(); + assert_eq!( + err, + CheckpontsFromCsvReadError::BadBlockHeight { record_idx: 1 } + ); + } + + #[test] + fn bad_block_id() { + let data = concatln!( + "500, C91C3DB7DFDCC296010546EC38F48A557D035DD0B34260BD6C5174709F8A7EB0", + "1000, XDCFB22374DA757882EEF26AF2B2D3ABDD1A4887C744346F6413C8D0B51DEBDF", + "1500, 3F81279C128FF628C8F4055DF89173DDAA6597DAB7636E8B12CA386E7864DFE9" + ); + let err = read_checkpoints_from_csv(data.as_bytes()).unwrap_err(); + assert_eq!( + err, + CheckpontsFromCsvReadError::BadBlockId { record_idx: 1 } + ); + } + + #[test] + fn duplicate_checkpoint() { + let data = concatln!( + "500, C91C3DB7DFDCC296010546EC38F48A557D035DD0B34260BD6C5174709F8A7EB0", + "500, 1DCFB22374DA757882EEF26AF2B2D3ABDD1A4887C744346F6413C8D0B51DEBDF", + "1500, 3F81279C128FF628C8F4055DF89173DDAA6597DAB7636E8B12CA386E7864DFE9" + ); + let err = read_checkpoints_from_csv(data.as_bytes()).unwrap_err(); + assert_eq!( + err, + CheckpontsFromCsvReadError::DuplicateCheckpoint { height: 500 } + ); + } +} diff --git a/node-lib/src/config_files/chainstate/mod.rs b/node-lib/src/config_files/chainstate/mod.rs index d5809e2f4..f40b7552d 100644 --- a/node-lib/src/config_files/chainstate/mod.rs +++ b/node-lib/src/config_files/chainstate/mod.rs @@ -24,18 +24,25 @@ use chainstate::ChainstateConfig; pub struct ChainstateConfigFile { /// The number of maximum attempts to process a block. pub max_db_commit_attempts: Option, + /// The maximum capacity of the orphan blocks pool. pub max_orphan_blocks: Option, + /// When importing bootstrap file, this controls the buffer sizes (min, max) /// (see bootstrap import function for more information) pub min_max_bootstrap_import_buffer_sizes: Option<(usize, usize)>, + /// A maximum tip age in seconds. /// /// The initial block download is finished if the difference between the current time and the /// tip time is less than this value. pub max_tip_age: Option, + /// If true, additional computationally-expensive consistency checks will be performed by the chainstate. pub enable_heavy_checks: Option, + + /// If true, blocks and block headers will not be rejected if checkpoints mismatch is detected. + pub allow_checkpoints_mismatch: Option, } impl From for ChainstateConfig { @@ -46,6 +53,7 @@ impl From for ChainstateConfig { min_max_bootstrap_import_buffer_sizes, max_tip_age, enable_heavy_checks, + allow_checkpoints_mismatch, } = config_file; ChainstateConfig { @@ -54,6 +62,7 @@ impl From for ChainstateConfig { min_max_bootstrap_import_buffer_sizes: min_max_bootstrap_import_buffer_sizes.into(), max_tip_age: max_tip_age.map(Duration::from_secs).into(), enable_heavy_checks, + allow_checkpoints_mismatch, } } } diff --git a/node-lib/src/config_files/mod.rs b/node-lib/src/config_files/mod.rs index 70228abb8..3cff99202 100644 --- a/node-lib/src/config_files/mod.rs +++ b/node-lib/src/config_files/mod.rs @@ -152,6 +152,7 @@ fn chainstate_config( min_max_bootstrap_import_buffer_sizes, max_tip_age, enable_heavy_checks, + allow_checkpoints_mismatch, } = chainstate_config; let storage_backend = options.storage_backend.clone().unwrap_or(storage_backend); @@ -159,6 +160,8 @@ fn chainstate_config( let max_orphan_blocks = options.max_orphan_blocks.or(max_orphan_blocks); let max_tip_age = options.max_tip_age.or(max_tip_age); let enable_heavy_checks = options.enable_chainstate_heavy_checks.or(enable_heavy_checks); + let allow_checkpoints_mismatch = + options.allow_checkpoints_mismatch.or(allow_checkpoints_mismatch); let chainstate_config = ChainstateConfigFile { max_db_commit_attempts, @@ -166,6 +169,7 @@ fn chainstate_config( min_max_bootstrap_import_buffer_sizes, max_tip_age, enable_heavy_checks, + allow_checkpoints_mismatch, }; ChainstateLauncherConfigFile { storage_backend, diff --git a/node-lib/src/lib.rs b/node-lib/src/lib.rs index ef289c161..1250332c7 100644 --- a/node-lib/src/lib.rs +++ b/node-lib/src/lib.rs @@ -15,6 +15,7 @@ //! Top-level node runner as a library +mod checkpoints_from_file; mod config_files; mod mock_time; pub mod node_controller; @@ -43,3 +44,11 @@ pub fn default_rpc_config(chain_config: &ChainConfig) -> RpcConfigFile { pub fn init_logging(_opts: &Options) { logging::init_logging() } + +#[cfg(test)] +mod tests { + #[ctor::ctor] + fn init() { + logging::init_logging(); + } +} diff --git a/node-lib/src/options.rs b/node-lib/src/options.rs index 2a71c8865..e946704a7 100644 --- a/node-lib/src/options.rs +++ b/node-lib/src/options.rs @@ -19,22 +19,28 @@ use std::{ ffi::OsString, net::{IpAddr, SocketAddr}, num::NonZeroU64, - path::PathBuf, + path::{Path, PathBuf}, }; use clap::{Args, Parser, Subcommand}; use chainstate_launcher::ChainConfig; -use common::chain::config::{ - regtest_options::{regtest_chain_config, ChainConfigOptions}, - ChainType, +use common::chain::{ + self, + config::{ + regtest_options::{regtest_chain_config_builder, ChainConfigOptions}, + ChainType, + }, }; use utils::{ clap_utils, default_data_dir::default_data_dir_common, root_user::ForceRunAsRootOptions, }; use utils_networking::IpOrSocketAddress; -use crate::config_files::{NodeTypeConfigFile, StorageBackendConfigFile}; +use crate::{ + checkpoints_from_file::read_checkpoints_from_csv_file, + config_files::{NodeTypeConfigFile, StorageBackendConfigFile}, +}; const CONFIG_NAME: &str = "config.toml"; @@ -147,15 +153,25 @@ impl Command { } pub fn create_chain_config(&self) -> anyhow::Result { - let chain_config = match self { - Command::Mainnet(_) => common::chain::config::create_mainnet(), - Command::Testnet(_) => common::chain::config::create_testnet(), - Command::Regtest(regtest_options) => { - regtest_chain_config(®test_options.chain_config)? + let (mut chain_config_builder, run_options) = match self { + Command::Mainnet(run_options) => { + (chain::config::Builder::new(ChainType::Mainnet), run_options) + } + Command::Testnet(run_options) => { + (chain::config::Builder::new(ChainType::Testnet), run_options) } + Command::Regtest(regtest_options) => ( + regtest_chain_config_builder(®test_options.chain_config)?, + ®test_options.run_options, + ), }; - Ok(chain_config) + if let Some(csv_file) = &run_options.custom_checkpoints_csv_file { + let checkpoints = read_checkpoints_from_csv_file(Path::new(csv_file))?; + chain_config_builder = chain_config_builder.checkpoints(checkpoints); + } + + Ok(chain_config_builder.build()) } pub fn chain_type(&self) -> ChainType { @@ -343,8 +359,140 @@ pub struct RunOptions { /// Defaults to true for regtest and false in other cases. #[clap(long, value_name = "VAL")] pub enable_chainstate_heavy_checks: Option, + + /// If true, blocks and block headers will not be rejected if checkpoints mismatch is detected. + #[clap(long, action = clap::ArgAction::SetTrue, hide = true)] + pub allow_checkpoints_mismatch: Option, + + /// Path to a CSV file with custom checkpoints that must be used instead of the predefined ones. + #[clap(long, hide = true)] + pub custom_checkpoints_csv_file: Option, } pub fn default_data_dir(chain_type: ChainType) -> PathBuf { default_data_dir_common().join(chain_type.name()) } + +#[cfg(test)] +mod tests { + use std::{collections::BTreeMap, str::FromStr as _}; + + use rstest::rstest; + + use common::{ + chain::config::Checkpoints, + primitives::{BlockHeight, Id, Idable as _, H256}, + }; + use utils::concatln; + + use super::*; + + #[rstest] + fn checkpoints_from_csv( + #[values(ChainType::Mainnet, ChainType::Testnet, ChainType::Regtest)] chain_type: ChainType, + ) { + let (genesis_id, default_checkpoints) = { + let default_chain_config = chain::config::Builder::new(chain_type).build(); + + ( + default_chain_config.genesis_block().get_id(), + default_chain_config.height_checkpoints().clone(), + ) + }; + + let make_run_options = |custom_checkpoints_csv_file: Option| RunOptions { + clean_data: Default::default(), + blockprod_min_peers_to_produce_blocks: Default::default(), + blockprod_skip_ibd_check: Default::default(), + blockprod_use_current_time_if_non_pos: Default::default(), + storage_backend: Default::default(), + node_type: Default::default(), + mock_time: Default::default(), + max_db_commit_attempts: Default::default(), + max_orphan_blocks: Default::default(), + p2p_networking_enabled: Default::default(), + p2p_bind_addresses: Default::default(), + p2p_socks5_proxy: Default::default(), + p2p_disable_noise: Default::default(), + p2p_boot_nodes: Default::default(), + p2p_reserved_nodes: Default::default(), + p2p_whitelist_addr: Default::default(), + p2p_max_inbound_connections: Default::default(), + p2p_discouragement_threshold: Default::default(), + p2p_discouragement_duration: Default::default(), + p2p_outbound_connection_timeout: Default::default(), + p2p_ping_check_period: Default::default(), + p2p_ping_timeout: Default::default(), + p2p_sync_stalling_timeout: Default::default(), + p2p_max_clock_diff: Default::default(), + p2p_force_dns_query_if_no_global_addresses_known: Default::default(), + max_tip_age: Default::default(), + rpc_bind_address: Default::default(), + rpc_enabled: Default::default(), + rpc_username: Default::default(), + rpc_password: Default::default(), + rpc_cookie_file: Default::default(), + min_tx_relay_fee_rate: Default::default(), + force_allow_run_as_root_outer: Default::default(), + enable_chainstate_heavy_checks: Default::default(), + allow_checkpoints_mismatch: Default::default(), + custom_checkpoints_csv_file, + }; + let make_cmd = |run_options| match chain_type { + ChainType::Mainnet => Command::Mainnet(run_options), + ChainType::Testnet => Command::Testnet(run_options), + ChainType::Regtest => Command::Regtest(Box::new(RegtestOptions { + run_options, + chain_config: Default::default(), + })), + ChainType::Signet => panic!("Signet is not among possible chain types for this test"), + }; + + // Loaded checkpoints + { + let mk_id = |id_str| Id::new(H256::from_str(id_str).unwrap()); + let data = concatln!( + "111, AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", + "222, BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB", + "333, CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC" + ); + + let expected_loaded_checkpoints = Checkpoints::new( + BTreeMap::from([ + ( + BlockHeight::new(111), + mk_id("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"), + ), + ( + BlockHeight::new(222), + mk_id("BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB"), + ), + ( + BlockHeight::new(333), + mk_id("CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC"), + ), + ]), + genesis_id, + ) + .unwrap(); + + let temp_file = tempfile::NamedTempFile::new().unwrap(); + std::fs::write(temp_file.path(), data.as_bytes()).unwrap(); + + let run_options = make_run_options(Some(temp_file.path().to_path_buf())); + let cmd = make_cmd(run_options); + let chain_config = cmd.create_chain_config().unwrap(); + assert_eq!( + chain_config.height_checkpoints(), + &expected_loaded_checkpoints + ); + } + + // Default checkpoints + { + let cmd = make_cmd(make_run_options(None)); + let chain_config = cmd.create_chain_config().unwrap(); + assert_eq!(chain_config.height_checkpoints(), &default_checkpoints); + } + } +} diff --git a/node-lib/tests/cli.rs b/node-lib/tests/cli.rs index 0ed1303aa..fea328037 100644 --- a/node-lib/tests/cli.rs +++ b/node-lib/tests/cli.rs @@ -123,6 +123,7 @@ fn read_config_override_values() { let rpc_cookie_file = "cookie_file"; let min_tx_relay_fee_rate = 321; let enable_chainstate_heavy_checks = true; + let allow_checkpoints_mismatch = true; let options = RunOptions { blockprod_min_peers_to_produce_blocks: Some(blockprod_min_peers_to_produce_blocks), @@ -161,34 +162,38 @@ fn read_config_override_values() { min_tx_relay_fee_rate: Some(min_tx_relay_fee_rate), force_allow_run_as_root_outer: Default::default(), enable_chainstate_heavy_checks: Some(enable_chainstate_heavy_checks), + allow_checkpoints_mismatch: Some(allow_checkpoints_mismatch), + // Note: there is no correspondence to this option inside NodeConfigFile; + // the contents of the csv file will become part of ChainConfig. + custom_checkpoints_csv_file: Some("foo.csv".to_owned().into()), }; let config = NodeConfigFile::read(&chain_config, &config_path, &options).unwrap(); assert_eq!( - config.blockprod.clone().unwrap().min_peers_to_produce_blocks, + config.blockprod.as_ref().unwrap().min_peers_to_produce_blocks, Some(blockprod_min_peers_to_produce_blocks), ); assert_eq!( - config.blockprod.clone().unwrap().skip_ibd_check, + config.blockprod.as_ref().unwrap().skip_ibd_check, Some(blockprod_skip_ibd_check) ); assert_eq!( - config.blockprod.clone().unwrap().use_current_time_if_non_pos, + config.blockprod.as_ref().unwrap().use_current_time_if_non_pos, Some(blockprod_use_current_time_if_non_pos) ); assert_eq!( - config.chainstate.clone().unwrap().chainstate_config.max_db_commit_attempts, + config.chainstate.as_ref().unwrap().chainstate_config.max_db_commit_attempts, Some(max_db_commit_attempts) ); assert_eq!( - config.chainstate.clone().unwrap().chainstate_config.max_orphan_blocks, + config.chainstate.as_ref().unwrap().chainstate_config.max_orphan_blocks, Some(max_orphan_blocks) ); assert_eq!( - config.chainstate.clone().unwrap().chainstate_config.max_tip_age, + config.chainstate.as_ref().unwrap().chainstate_config.max_tip_age, Some(max_tip_age) ); @@ -198,69 +203,74 @@ fn read_config_override_values() { ); assert_eq!( - config.chainstate.clone().unwrap().chainstate_config.enable_heavy_checks, + config.chainstate.as_ref().unwrap().chainstate_config.enable_heavy_checks, Some(enable_chainstate_heavy_checks) ); assert_eq!( - config.p2p.clone().unwrap().networking_enabled, + config.chainstate.as_ref().unwrap().chainstate_config.allow_checkpoints_mismatch, + Some(allow_checkpoints_mismatch) + ); + + assert_eq!( + config.p2p.as_ref().unwrap().networking_enabled, Some(p2p_networking_enabled) ); assert_eq!( - config.p2p.clone().unwrap().bind_addresses, + config.p2p.as_ref().unwrap().bind_addresses, Some(vec!(p2p_bind_addr)) ); assert_eq!( - config.p2p.clone().unwrap().socks5_proxy, + config.p2p.as_ref().unwrap().socks5_proxy, Some(p2p_socks5_proxy.to_owned()) ); assert_eq!( - config.p2p.clone().unwrap().disable_noise, + config.p2p.as_ref().unwrap().disable_noise, Some(p2p_disable_noise) ); assert_eq!( - config.p2p.clone().unwrap().boot_nodes, + config.p2p.as_ref().unwrap().boot_nodes, Some(vec!(p2p_boot_node)) ); assert_eq!( - config.p2p.clone().unwrap().reserved_nodes, + config.p2p.as_ref().unwrap().reserved_nodes, Some(vec!(p2p_reserved_node)) ); assert_eq!( - config.p2p.clone().unwrap().max_inbound_connections, + config.p2p.as_ref().unwrap().max_inbound_connections, Some(p2p_max_inbound_connections) ); assert_eq!( - config.p2p.clone().unwrap().discouragement_threshold, + config.p2p.as_ref().unwrap().discouragement_threshold, Some(p2p_discouragement_threshold) ); assert_eq!( - config.p2p.clone().unwrap().discouragement_duration, + config.p2p.as_ref().unwrap().discouragement_duration, Some(p2p_discouragement_duration) ); assert_eq!( - config.p2p.clone().unwrap().outbound_connection_timeout, + config.p2p.as_ref().unwrap().outbound_connection_timeout, Some(p2p_timeout) ); assert_eq!( - config.p2p.clone().unwrap().ping_check_period, + config.p2p.as_ref().unwrap().ping_check_period, Some(p2p_ping_check_period) ); assert_eq!( - config.p2p.clone().unwrap().ping_timeout, + config.p2p.as_ref().unwrap().ping_timeout, Some(p2p_ping_timeout) ); assert_eq!( - config.p2p.clone().unwrap().sync_stalling_timeout, + config.p2p.as_ref().unwrap().sync_stalling_timeout, Some(p2p_sync_stalling_timeout) ); assert_eq!( - config.p2p.clone().unwrap().max_clock_diff, + config.p2p.as_ref().unwrap().max_clock_diff, Some(p2p_max_clock_diff) ); - assert_eq!(config.p2p.clone().unwrap().node_type, Some(node_type)); + assert_eq!(config.p2p.as_ref().unwrap().node_type, Some(node_type)); assert_eq!( - config.p2p.clone().unwrap().force_dns_query_if_no_global_addresses_known, + config.p2p.as_ref().unwrap().force_dns_query_if_no_global_addresses_known, Some(p2p_force_dns_query_if_no_global_addresses_known) ); diff --git a/p2p/src/disconnection_reason.rs b/p2p/src/disconnection_reason.rs index 8b3d67b0d..1a1baf311 100644 --- a/p2p/src/disconnection_reason.rs +++ b/p2p/src/disconnection_reason.rs @@ -28,6 +28,7 @@ use crate::{ /// /// Note: we derive `thiserror::Error` here just for the convenience of implementing `Display`. /// But conceptually this enum is not an error and it's not supposed to be used with `Result`. +// TODO: use `derive_more::Display` instead of `thiserror::Error`. #[derive(Error, Debug, Clone, PartialEq, Eq)] pub enum DisconnectionReason { #[error("Your address is banned")] @@ -57,7 +58,7 @@ pub enum DisconnectionReason { remote_time: Time, accepted_peer_time: std::ops::RangeInclusive