diff --git a/Cargo.lock b/Cargo.lock index fdc2aa985..531ff0a7e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2557,7 +2557,7 @@ dependencies = [ [[package]] name = "integritee-cli" -version = "0.15.5" +version = "0.15.6" dependencies = [ "array-bytes 6.1.0", "base58", @@ -2610,7 +2610,7 @@ dependencies = [ [[package]] name = "integritee-service" -version = "0.15.5" +version = "0.15.6" dependencies = [ "anyhow", "async-trait", diff --git a/cli/Cargo.toml b/cli/Cargo.toml index f583a8bdd..e72e9c16e 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "integritee-cli" -version = "0.15.5" +version = "0.15.6" authors = ["Integritee AG "] edition = "2021" diff --git a/core-primitives/settings/src/lib.rs b/core-primitives/settings/src/lib.rs index 9d8abbd81..84e543fc9 100644 --- a/core-primitives/settings/src/lib.rs +++ b/core-primitives/settings/src/lib.rs @@ -37,7 +37,7 @@ pub mod files { pub static SHIELDING_KEY_FILE: &str = "enclave-shielding-pubkey.json"; pub static SIGNING_KEY_FILE: &str = "enclave-signing-pubkey.bin"; /// sidechain database path - pub static SIDECHAIN_STORAGE_PATH: &str = "sidechain_db"; + pub static SIDECHAIN_BLOCKS_DB_STORAGE_PATH: &str = "sidechain_db"; pub static SIDECHAIN_PURGE_INTERVAL: u64 = 7200; // purge sidechain every .. s pub static SIDECHAIN_PURGE_LIMIT: u64 = 100; // keep the last.. sidechainblocks when purging diff --git a/enclave-runtime/Cargo.lock b/enclave-runtime/Cargo.lock index aceaf1905..cf9049bcd 100644 --- a/enclave-runtime/Cargo.lock +++ b/enclave-runtime/Cargo.lock @@ -771,7 +771,7 @@ dependencies = [ [[package]] name = "enclave-runtime" -version = "0.15.5" +version = "0.15.6" dependencies = [ "array-bytes 6.2.2", "cid", diff --git a/enclave-runtime/Cargo.toml b/enclave-runtime/Cargo.toml index cf277a263..97bafd23c 100644 --- a/enclave-runtime/Cargo.toml +++ b/enclave-runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "enclave-runtime" -version = "0.15.5" +version = "0.15.6" authors = ["Integritee AG "] edition = "2021" diff --git a/service/Cargo.toml b/service/Cargo.toml index 8e104b693..00195d575 100644 --- a/service/Cargo.toml +++ b/service/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "integritee-service" -version = "0.15.5" +version = "0.15.6" authors = ["Integritee AG "] build = "build.rs" edition = "2021" diff --git a/service/src/config.rs b/service/src/config.rs index c8219d0fd..99d1d8a42 100644 --- a/service/src/config.rs +++ b/service/src/config.rs @@ -202,6 +202,12 @@ impl Config { pub fn try_parse_untrusted_http_server_port(&self) -> Option { self.untrusted_http_port.parse::().ok() } + + pub fn with_test_data_dir(&self) -> Self { + let mut new = self.clone(); + new.data_dir.push("test"); + new + } } impl From<&ArgMatches<'_>> for Config { diff --git a/service/src/main_impl.rs b/service/src/main_impl.rs index a486a87c8..bffa918c6 100644 --- a/service/src/main_impl.rs +++ b/service/src/main_impl.rs @@ -85,6 +85,7 @@ use ita_parentchain_interface::{ }; use itc_parentchain::primitives::ParentchainId; use itp_node_api::api_client::ChainApi; +use itp_settings::files::SHARDS_PATH; use itp_types::parentchain::{AccountId, Balance, Index}; use sp_core::crypto::{AccountId32, Ss58Codec}; use sp_keyring::AccountKeyring; @@ -148,7 +149,10 @@ pub(crate) fn main() { let clean_reset = matches.is_present("clean-reset"); if clean_reset { - crate::setup::purge_files_from_dir(config.data_dir()).unwrap(); + println!("[+] Performing a clean reset of the worker"); + setup::purge_integritee_lcdb_unless_protected(config.data_dir()).unwrap(); + setup::purge_target_a_lcdb_unless_protected(config.data_dir()).unwrap(); + setup::purge_target_b_lcdb_unless_protected(config.data_dir()).unwrap(); } // build the entire dependency tree @@ -227,11 +231,18 @@ pub(crate) fn main() { }; if let Some(run_config) = config.run_config() { - let shard = extract_shard(run_config.shard(), enclave.as_ref()); - println!("Worker Config: {:?}", config); - if clean_reset { + let shard = extract_shard(run_config.shard(), enclave.as_ref()); + + let mut shard_path = PathBuf::from(config.data_dir()); + shard_path.push(SHARDS_PATH); + shard_path.push(shard.encode().to_base58()); + println!("Worker Shard Path: {:?}", shard_path); + if clean_reset || std::fs::metadata(shard_path).is_err() { + // we default to purge here because we don't want to leave behind blocks + // for deprectated shards in the sidechain_db + setup::purge_shards_unless_protected(config.data_dir()).unwrap(); setup::initialize_shard_and_keys(enclave.as_ref(), &shard).unwrap(); } diff --git a/service/src/setup.rs b/service/src/setup.rs index f45bab61c..321194283 100644 --- a/service/src/setup.rs +++ b/service/src/setup.rs @@ -18,15 +18,17 @@ use crate::error::{Error, ServiceResult}; use itp_settings::files::{ - INTEGRITEE_PARENTCHAIN_LIGHT_CLIENT_DB_PATH, SHARDS_PATH, SIDECHAIN_STORAGE_PATH, + INTEGRITEE_PARENTCHAIN_LIGHT_CLIENT_DB_PATH, SHARDS_PATH, SIDECHAIN_BLOCKS_DB_STORAGE_PATH, TARGET_A_PARENTCHAIN_LIGHT_CLIENT_DB_PATH, TARGET_B_PARENTCHAIN_LIGHT_CLIENT_DB_PATH, }; -use std::{fs, path::Path}; - #[cfg(feature = "link-binary")] pub(crate) use needs_enclave::{ generate_shielding_key_file, generate_signing_key_file, init_shard, initialize_shard_and_keys, }; +use std::{ + fs, + path::{Path, PathBuf}, +}; #[cfg(feature = "link-binary")] mod needs_enclave { @@ -35,8 +37,8 @@ mod needs_enclave { use itp_enclave_api::{enclave_base::EnclaveBase, Enclave}; use itp_settings::files::{ INTEGRITEE_PARENTCHAIN_LIGHT_CLIENT_DB_PATH, SHARDS_PATH, SHIELDING_KEY_FILE, - SIDECHAIN_STORAGE_PATH, SIGNING_KEY_FILE, TARGET_A_PARENTCHAIN_LIGHT_CLIENT_DB_PATH, - TARGET_B_PARENTCHAIN_LIGHT_CLIENT_DB_PATH, + SIDECHAIN_BLOCKS_DB_STORAGE_PATH, SIGNING_KEY_FILE, + TARGET_A_PARENTCHAIN_LIGHT_CLIENT_DB_PATH, TARGET_B_PARENTCHAIN_LIGHT_CLIENT_DB_PATH, }; use itp_types::ShardIdentifier; use log::*; @@ -47,7 +49,7 @@ mod needs_enclave { enclave: &Enclave, shard_identifier: &ShardIdentifier, ) -> ServiceResult<()> { - println!("[+] Initialize the shard"); + println!("[+] Initialize the shard: {:?}", shard_identifier); init_shard(enclave, shard_identifier); let pubkey = enclave.get_ecc_signing_pubkey().unwrap(); @@ -103,25 +105,53 @@ mod needs_enclave { } } -/// Purge all worker files from `dir`. -pub(crate) fn purge_files_from_dir(dir: &Path) -> ServiceResult<()> { - println!("[+] Performing a clean reset of the worker"); - - println!("[+] Purge all files from previous runs"); - purge_files(dir)?; - +/// Purge all worker files in a given path. +pub(crate) fn purge_shards_unless_protected(root_directory: &Path) -> ServiceResult<()> { + let mut protectfile = PathBuf::from(root_directory); + protectfile.push("shards.protect"); + if fs::metadata(protectfile.clone()).is_ok() { + println!(" all shards and sidechain db are protected by {:?}", protectfile); + } else { + println!("[+] Purge all shards and sidechain blocks from previous runs"); + remove_dir_if_it_exists(root_directory, SHARDS_PATH)?; + remove_dir_if_it_exists(root_directory, SIDECHAIN_BLOCKS_DB_STORAGE_PATH)?; + } Ok(()) } -/// Purge all worker files in a given path. -fn purge_files(root_directory: &Path) -> ServiceResult<()> { - remove_dir_if_it_exists(root_directory, SHARDS_PATH)?; - remove_dir_if_it_exists(root_directory, SIDECHAIN_STORAGE_PATH)?; +pub(crate) fn purge_integritee_lcdb_unless_protected(root_directory: &Path) -> ServiceResult<()> { + let mut protectfile = PathBuf::from(root_directory); + protectfile.push("integritee_lcdb.protect"); + if fs::metadata(protectfile.clone()).is_ok() { + println!(" Integritee light-client dB is protected by {:?}", protectfile); + } else { + println!("[+] Purge Integritee light-client db from previous runs"); + remove_dir_if_it_exists(root_directory, INTEGRITEE_PARENTCHAIN_LIGHT_CLIENT_DB_PATH)?; + } + Ok(()) +} - remove_dir_if_it_exists(root_directory, INTEGRITEE_PARENTCHAIN_LIGHT_CLIENT_DB_PATH)?; - remove_dir_if_it_exists(root_directory, TARGET_A_PARENTCHAIN_LIGHT_CLIENT_DB_PATH)?; - remove_dir_if_it_exists(root_directory, TARGET_B_PARENTCHAIN_LIGHT_CLIENT_DB_PATH)?; +pub(crate) fn purge_target_a_lcdb_unless_protected(root_directory: &Path) -> ServiceResult<()> { + let mut protectfile = PathBuf::from(root_directory); + protectfile.push("target_a_lcdb.protect"); + if fs::metadata(protectfile.clone()).is_ok() { + println!(" TargetA light-client dB is protected by {:?}", protectfile); + } else { + println!("[+] Purge TargetA light-client db from previous runs"); + remove_dir_if_it_exists(root_directory, TARGET_A_PARENTCHAIN_LIGHT_CLIENT_DB_PATH)?; + } + Ok(()) +} +pub(crate) fn purge_target_b_lcdb_unless_protected(root_directory: &Path) -> ServiceResult<()> { + let mut protectfile = PathBuf::from(root_directory); + protectfile.push("target_b_lcdb.protect"); + if fs::metadata(protectfile.clone()).is_ok() { + println!(" TargetB light-client dB is protected by {:?}", protectfile); + } else { + println!("[+] Purge TargetB light-client db from previous runs"); + remove_dir_if_it_exists(root_directory, TARGET_B_PARENTCHAIN_LIGHT_CLIENT_DB_PATH)?; + } Ok(()) } @@ -150,7 +180,7 @@ mod tests { fs::File::create(&shards_path.join("state_1.bin")).unwrap(); fs::File::create(&shards_path.join("state_2.bin")).unwrap(); - let sidechain_db_path = root_directory.join(SIDECHAIN_STORAGE_PATH); + let sidechain_db_path = root_directory.join(SIDECHAIN_BLOCKS_DB_STORAGE_PATH); fs::create_dir_all(&sidechain_db_path).unwrap(); fs::File::create(&sidechain_db_path.join("sidechain_db_1.bin")).unwrap(); fs::File::create(&sidechain_db_path.join("sidechain_db_2.bin")).unwrap(); @@ -163,12 +193,14 @@ mod tests { fs::create_dir_all(&root_directory.join(TARGET_B_PARENTCHAIN_LIGHT_CLIENT_DB_PATH)) .unwrap(); - purge_files(&root_directory).unwrap(); - + purge_shards_unless_protected(&root_directory).unwrap(); assert!(!shards_path.exists()); assert!(!sidechain_db_path.exists()); + purge_integritee_lcdb_unless_protected(&root_directory).unwrap(); assert!(!root_directory.join(INTEGRITEE_PARENTCHAIN_LIGHT_CLIENT_DB_PATH).exists()); + purge_target_a_lcdb_unless_protected(&root_directory).unwrap(); assert!(!root_directory.join(TARGET_A_PARENTCHAIN_LIGHT_CLIENT_DB_PATH).exists()); + purge_target_b_lcdb_unless_protected(&root_directory).unwrap(); assert!(!root_directory.join(TARGET_B_PARENTCHAIN_LIGHT_CLIENT_DB_PATH).exists()); } @@ -179,9 +211,103 @@ mod tests { )); let root_directory = test_directory_handle.path(); - assert!(purge_files(&root_directory).is_ok()); + assert!(purge_shards_unless_protected(&root_directory).is_ok()); + assert!(purge_integritee_lcdb_unless_protected(&root_directory).is_ok()); + assert!(purge_target_a_lcdb_unless_protected(&root_directory).is_ok()); + assert!(purge_target_b_lcdb_unless_protected(&root_directory).is_ok()); } + #[test] + fn purge_shards_protect_file_respected() { + let test_directory_handle = TestDirectoryHandle::new(PathBuf::from("test_protect_shard")); + let root_directory = test_directory_handle.path(); + + let shards_path = root_directory.join(SHARDS_PATH); + fs::create_dir_all(&shards_path).unwrap(); + fs::File::create(&shards_path.join("state_1.bin")).unwrap(); + fs::File::create(&shards_path.join("state_2.bin")).unwrap(); + + let sidechain_db_path = root_directory.join(SIDECHAIN_BLOCKS_DB_STORAGE_PATH); + fs::create_dir_all(&sidechain_db_path).unwrap(); + fs::File::create(&sidechain_db_path.join("sidechain_db_1.bin")).unwrap(); + fs::File::create(&sidechain_db_path.join("sidechain_db_2.bin")).unwrap(); + fs::File::create(&sidechain_db_path.join("sidechain_db_3.bin")).unwrap(); + + let protector_path = root_directory.join("shards.protect"); + fs::File::create(&protector_path).unwrap(); + + purge_shards_unless_protected(&root_directory).unwrap(); + assert!(shards_path.exists()); + assert!(sidechain_db_path.exists()); + + fs::remove_file(&protector_path).unwrap(); + while protector_path.exists() { + std::thread::sleep(std::time::Duration::from_millis(100)); + } + purge_shards_unless_protected(&root_directory).unwrap(); + assert!(!shards_path.exists()); + assert!(!sidechain_db_path.exists()); + } + + #[test] + fn purge_integritee_lcdb_protect_file_respected() { + let test_directory_handle = + TestDirectoryHandle::new(PathBuf::from("test_protect_integritee_lcdb")); + let root_directory = test_directory_handle.path(); + + let lcdb_path = root_directory.join(INTEGRITEE_PARENTCHAIN_LIGHT_CLIENT_DB_PATH); + fs::create_dir_all(&lcdb_path).unwrap(); + + let protector_path = root_directory.join("integritee_lcdb.protect"); + fs::File::create(&protector_path).unwrap(); + + purge_integritee_lcdb_unless_protected(&root_directory).unwrap(); + assert!(lcdb_path.exists()); + + fs::remove_file(&protector_path).unwrap(); + purge_integritee_lcdb_unless_protected(&root_directory).unwrap(); + assert!(!lcdb_path.exists()); + } + + #[test] + fn purge_target_a_lcdb_protect_file_respected() { + let test_directory_handle = + TestDirectoryHandle::new(PathBuf::from("test_protect_target_a_lcdb")); + let root_directory = test_directory_handle.path(); + + let lcdb_path = root_directory.join(TARGET_A_PARENTCHAIN_LIGHT_CLIENT_DB_PATH); + fs::create_dir_all(&lcdb_path).unwrap(); + + let protector_path = root_directory.join("target_a_lcdb.protect"); + fs::File::create(&protector_path).unwrap(); + + purge_target_a_lcdb_unless_protected(&root_directory).unwrap(); + assert!(lcdb_path.exists()); + + fs::remove_file(&protector_path).unwrap(); + purge_target_a_lcdb_unless_protected(&root_directory).unwrap(); + assert!(!lcdb_path.exists()); + } + + #[test] + fn purge_target_b_lcdb_protect_file_respected() { + let test_directory_handle = + TestDirectoryHandle::new(PathBuf::from("test_protect_target_b_lcdb")); + let root_directory = test_directory_handle.path(); + + let lcdb_path = root_directory.join(TARGET_B_PARENTCHAIN_LIGHT_CLIENT_DB_PATH); + fs::create_dir_all(&lcdb_path).unwrap(); + + let protector_path = root_directory.join("target_b_lcdb.protect"); + fs::File::create(&protector_path).unwrap(); + + purge_target_b_lcdb_unless_protected(&root_directory).unwrap(); + assert!(lcdb_path.exists()); + + fs::remove_file(&protector_path).unwrap(); + purge_target_b_lcdb_unless_protected(&root_directory).unwrap(); + assert!(!lcdb_path.exists()); + } /// Directory handle to automatically initialize a directory /// and upon dropping the reference, removing it again. struct TestDirectoryHandle { diff --git a/service/src/tests/mod.rs b/service/src/tests/mod.rs index 0ef2c4f25..663306cdb 100644 --- a/service/src/tests/mod.rs +++ b/service/src/tests/mod.rs @@ -34,8 +34,14 @@ pub fn run_enclave_tests(matches: &ArgMatches) { use itp_enclave_api::enclave_test::EnclaveTest; println!("*** Starting Test enclave"); - let config = Config::from(matches); - setup::purge_files_from_dir(config.data_dir()).unwrap(); + let mut config = Config::from(matches).with_test_data_dir(); + println!(" creating temporary working dir for tests: {:?}", config.data_dir()); + std::fs::create_dir_all(config.data_dir()).unwrap(); + setup::purge_shards_unless_protected(config.data_dir()).unwrap(); + setup::purge_integritee_lcdb_unless_protected(config.data_dir()).unwrap(); + setup::purge_target_a_lcdb_unless_protected(config.data_dir()).unwrap(); + setup::purge_target_b_lcdb_unless_protected(config.data_dir()).unwrap(); + let enclave = enclave_init(&config).unwrap(); if matches.is_present("all") || matches.is_present("unit") { @@ -43,6 +49,7 @@ pub fn run_enclave_tests(matches: &ArgMatches) { enclave.test_main_entrance().unwrap(); println!("[+] unit_test ended!"); } - + // clean up test directory + std::fs::remove_dir_all(config.data_dir()).unwrap(); println!("[+] All tests ended!"); } diff --git a/sidechain/storage/src/storage.rs b/sidechain/storage/src/storage.rs index 0313abfee..cca5c8584 100644 --- a/sidechain/storage/src/storage.rs +++ b/sidechain/storage/src/storage.rs @@ -17,7 +17,7 @@ use super::{db::SidechainDB, Error, Result}; use codec::{Decode, Encode}; -use itp_settings::files::SIDECHAIN_STORAGE_PATH; +use itp_settings::files::SIDECHAIN_BLOCKS_DB_STORAGE_PATH; use its_primitives::{ traits::{Block as BlockTrait, Header as HeaderTrait, SignedBlock as SignedBlockT}, types::{BlockHash, BlockNumber}, @@ -63,7 +63,7 @@ impl SidechainStorage { /// Loads existing shards and their last blocks in memory for better performance. pub fn load_from_base_path(base_path: PathBuf) -> Result> { // load db - let db = SidechainDB::open_default(base_path.join(SIDECHAIN_STORAGE_PATH))?; + let db = SidechainDB::open_default(base_path.join(SIDECHAIN_BLOCKS_DB_STORAGE_PATH))?; let mut storage = SidechainStorage { db, shards: vec![], last_blocks: HashMap::new() }; storage.shards = storage.load_shards_from_db()?; // get last block of each shard