diff --git a/script/src/lib.rs b/script/src/lib.rs index 3158a281b4..2016aeb3b1 100644 --- a/script/src/lib.rs +++ b/script/src/lib.rs @@ -10,9 +10,10 @@ mod verify_env; pub use crate::error::{ScriptError, TransactionScriptError}; pub use crate::scheduler::{Scheduler, ROOT_VM_ID}; +pub use crate::syscalls::generator::generate_ckb_syscalls; pub use crate::types::{ ChunkCommand, CoreMachine, DataPieceId, RunMode, ScriptGroup, ScriptGroupType, ScriptVersion, TransactionState, TxData, VerifyResult, VmIsa, VmState, VmVersion, }; -pub use crate::verify::{TransactionScriptsSyscallsGenerator, TransactionScriptsVerifier}; +pub use crate::verify::TransactionScriptsVerifier; pub use crate::verify_env::TxVerifyEnv; diff --git a/script/src/scheduler.rs b/script/src/scheduler.rs index b83b1b5b9d..348b5caeaa 100644 --- a/script/src/scheduler.rs +++ b/script/src/scheduler.rs @@ -1,15 +1,13 @@ use crate::cost_model::transferred_byte_cycles; use crate::syscalls::{ - EXEC_LOAD_ELF_V2_CYCLES_BASE, INVALID_FD, MAX_FDS_CREATED, MAX_VMS_SPAWNED, OTHER_END_CLOSED, - SPAWN_EXTRA_CYCLES_BASE, SUCCESS, WAIT_FAILURE, + generator::generate_ckb_syscalls, EXEC_LOAD_ELF_V2_CYCLES_BASE, INVALID_FD, MAX_FDS_CREATED, + MAX_VMS_SPAWNED, OTHER_END_CLOSED, SPAWN_EXTRA_CYCLES_BASE, SUCCESS, WAIT_FAILURE, }; -use crate::types::MachineContext; -use crate::verify::TransactionScriptsSyscallsGenerator; -use crate::ScriptVersion; use crate::types::{ - CoreMachineType, DataLocation, DataPieceId, Fd, FdArgs, FullSuspendedState, Machine, Message, - ReadState, RunMode, TxData, VmId, VmState, WriteState, FIRST_FD_SLOT, FIRST_VM_ID, + CoreMachineType, DataLocation, DataPieceId, DebugContext, Fd, FdArgs, FullSuspendedState, + Machine, Message, ReadState, RunMode, SgData, VmContext, VmId, VmState, WriteState, + FIRST_FD_SLOT, FIRST_VM_ID, }; use ckb_traits::{CellDataProvider, ExtensionProvider, HeaderProvider}; use ckb_types::core::Cycle; @@ -25,7 +23,10 @@ use ckb_vm::{ Error, FlattenedArgsReader, Register, }; use std::collections::{BTreeMap, HashMap}; -use std::sync::{Arc, Mutex}; +use std::sync::{ + atomic::{AtomicU64, Ordering}, + Arc, Mutex, +}; /// Root process's id. pub const ROOT_VM_ID: VmId = FIRST_VM_ID; @@ -44,23 +45,45 @@ pub const MAX_FDS: u64 = 64; /// of the core for IO operations. pub struct Scheduler
where - DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static, + DL: CellDataProvider, { - /// Context data for current running transaction & script. - pub tx_data: TxData
, - /// In fact, Scheduler here has the potential to totally replace - /// TransactionScriptsVerifier, nonetheless much of current syscall - /// implementation is strictly tied to TransactionScriptsVerifier, we - /// are using it here to save some extra code. - pub script_version: ScriptVersion, - /// Generate system calls. - pub syscalls_generator: TransactionScriptsSyscallsGenerator
, - - /// Total cycles. - pub total_cycles: Cycle, - /// Current iteration cycles. This value is periodically added to - /// total_cycles and cleared - pub current_iteration_cycles: Cycle, + /// Immutable context data for current running transaction & script. + pub sg_data: SgData
, + + /// Mutable context data used by current scheduler + pub debug_context: DebugContext, + + /// Total cycles. When a scheduler executes, there are 3 variables + /// that might all contain charged cycles: +total_cycles+, + /// +iteration_cycles+ and +machine.cycles()+ from the current + /// executing virtual machine. At any given time, the sum of all 3 + /// variables here, represent the total consumed cycles by the current + /// scheduler. + /// But there are also exceptions: at certain period of time, the cycles + /// stored in `machine.cycles()` are moved over to +iteration_cycles+, + /// the cycles stored in +iteration_cycles+ would also be moved over to + /// +total_cycles+: + /// + /// * The current running virtual machine would contain consumed + /// cycles in its own machine.cycles() structure. + /// * +iteration_cycles+ holds the current consumed cycles each time + /// we executed a virtual machine(also named an iteration). It will + /// always be zero before each iteration(i.e., before each VM starts + /// execution). When a virtual machine finishes execution, the cycles + /// stored in `machine.cycles()` will be moved over to +iteration_cycles+. + /// `machine.cycles()` will then be reset to zero. + /// * Processing messages in the message box would alao charge cycles + /// for operations, such as suspending/resuming VMs, transferring data + /// etc. Those cycles were added to +iteration_cycles+ directly. When all + /// postprocessing work is completed, the cycles consumed in + /// +iteration_cycles+ will then be moved to +total_cycles+. + /// +iteration_cycles+ will then be reset to zero. + /// + /// One can consider that +total_cycles+ contains the total cycles + /// consumed in current scheduler, when the scheduler is not busy executing. + pub total_cycles: Arc, + /// Iteration cycles, see +total_cycles+ on its usage + pub iteration_cycles: Cycle, /// Next vm id used by spawn. pub next_vm_id: VmId, /// Next fd used by pipe. @@ -72,7 +95,7 @@ where /// Verify the VM's inherited fd list. pub inherited_fd: BTreeMap>, /// Instantiated vms. - pub instantiated: BTreeMap, Machine)>, + pub instantiated: BTreeMap, Machine)>, /// Suspended vms. pub suspended: BTreeMap>, /// Terminated vms. @@ -88,18 +111,12 @@ where DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static, { /// Create a new scheduler from empty state - pub fn new( - tx_data: TxData
, - script_version: ScriptVersion, - syscalls_generator: TransactionScriptsSyscallsGenerator
, - ) -> Self { - let message_box = Arc::clone(&syscalls_generator.message_box); + pub fn new(sg_data: SgData
, debug_context: DebugContext) -> Self { Self { - tx_data, - script_version, - syscalls_generator, - total_cycles: 0, - current_iteration_cycles: 0, + sg_data, + debug_context, + total_cycles: Arc::new(AtomicU64::new(0)), + iteration_cycles: 0, next_vm_id: FIRST_VM_ID, next_fd_slot: FIRST_FD_SLOT, states: BTreeMap::default(), @@ -107,39 +124,39 @@ where inherited_fd: BTreeMap::default(), instantiated: BTreeMap::default(), suspended: BTreeMap::default(), - message_box, + message_box: Arc::new(Mutex::new(Vec::new())), terminated_vms: BTreeMap::default(), } } /// Return total cycles. pub fn consumed_cycles(&self) -> Cycle { - self.total_cycles + self.total_cycles.load(Ordering::Acquire) } /// Add cycles to total cycles. - pub fn consumed_cycles_add(&mut self, cycles: Cycle) -> Result<(), Error> { - self.total_cycles = self + pub fn consume_cycles(&mut self, cycles: Cycle) -> Result<(), Error> { + match self .total_cycles - .checked_add(cycles) - .ok_or(Error::CyclesExceeded)?; - Ok(()) + .fetch_update(Ordering::AcqRel, Ordering::Acquire, |total_cycles| { + total_cycles.checked_add(cycles) + }) { + Ok(_) => Ok(()), + Err(_) => Err(Error::CyclesExceeded), + } } /// Resume a previously suspended scheduler state pub fn resume( - tx_data: TxData
, - script_version: ScriptVersion, - syscalls_generator: TransactionScriptsSyscallsGenerator
, + sg_data: SgData
, + debug_context: DebugContext, full: FullSuspendedState, ) -> Self { - let message_box = Arc::clone(&syscalls_generator.message_box); let mut scheduler = Self { - tx_data, - script_version, - syscalls_generator, - total_cycles: full.total_cycles, - current_iteration_cycles: 0, + sg_data, + debug_context, + total_cycles: Arc::new(AtomicU64::new(full.total_cycles)), + iteration_cycles: 0, next_vm_id: full.next_vm_id, next_fd_slot: full.next_fd_slot, states: full @@ -155,12 +172,16 @@ where .into_iter() .map(|(id, _, snapshot)| (id, snapshot)) .collect(), - message_box, + message_box: Arc::new(Mutex::new(Vec::new())), terminated_vms: full.terminated_vms.into_iter().collect(), }; scheduler .ensure_vms_instantiated(&full.instantiated_ids) .unwrap(); + // NOTE: suspending/resuming a scheduler is part of CKB's implementation + // details. It is not part of execution consensue. We should not charge + // cycles for them. + scheduler.iteration_cycles = 0; scheduler } @@ -180,7 +201,11 @@ where vms.push((id, state, snapshot)); } Ok(FullSuspendedState { - total_cycles: self.total_cycles, + // NOTE: suspending a scheduler is actually part of CKB's + // internal execution logic, it does not belong to VM execution + // consensus. We are not charging cycles for suspending + // a VM in the process of suspending the whole scheduler. + total_cycles: self.total_cycles.load(Ordering::Acquire), next_vm_id: self.next_vm_id, next_fd_slot: self.next_fd_slot, vms, @@ -207,10 +232,11 @@ where pub fn run(&mut self, mode: RunMode) -> Result<(i8, Cycle), Error> { if self.states.is_empty() { // Booting phase, we will need to initialize the first VM. + let program_id = self.sg_data.sg_info.program_data_piece_id.clone(); assert_eq!( self.boot_vm( &DataLocation { - data_piece_id: DataPieceId::Program, + data_piece_id: program_id, offset: 0, length: u64::MAX, }, @@ -227,26 +253,24 @@ where }; while self.states[&ROOT_VM_ID] != VmState::Terminated { - self.current_iteration_cycles = 0; + assert_eq!(self.iteration_cycles, 0); let iterate_return = self.iterate(pause.clone(), limit_cycles); - self.consumed_cycles_add(self.current_iteration_cycles)?; + self.consume_cycles(self.iteration_cycles)?; limit_cycles = limit_cycles - .checked_sub(self.current_iteration_cycles) + .checked_sub(self.iteration_cycles) .ok_or(Error::CyclesExceeded)?; + // Clear iteration cycles intentionally after each run + self.iteration_cycles = 0; iterate_return?; } // At this point, root VM cannot be suspended let root_vm = &self.instantiated[&ROOT_VM_ID]; - Ok((root_vm.1.machine.exit_code(), self.total_cycles)) + Ok((root_vm.1.machine.exit_code(), self.consumed_cycles())) } /// Returns the machine that needs to be executed in the current iterate. - pub fn iterate_prepare_machine( - &mut self, - pause: Pause, - limit_cycles: Cycle, - ) -> Result<(u64, &mut Machine), Error> { + pub fn iterate_prepare_machine(&mut self) -> Result<(u64, &mut Machine), Error> { // Process all pending VM reads & writes. self.process_io()?; // Find a runnable VM that has the largest ID. @@ -260,11 +284,7 @@ where let vm_id_to_run = vm_id_to_run.ok_or_else(|| { Error::Unexpected("A deadlock situation has been reached!".to_string()) })?; - let total_cycles = self.total_cycles; - let (context, machine) = self.ensure_get_instantiated(&vm_id_to_run)?; - context.set_base_cycles(total_cycles); - machine.set_max_cycles(limit_cycles); - machine.machine.set_pause(pause); + let (_context, machine) = self.ensure_get_instantiated(&vm_id_to_run)?; Ok((vm_id_to_run, machine)) } @@ -273,12 +293,7 @@ where &mut self, vm_id_to_run: u64, result: Result, - cycles: u64, ) -> Result<(), Error> { - self.current_iteration_cycles = self - .current_iteration_cycles - .checked_add(cycles) - .ok_or(Error::CyclesOverflow)?; // Process message box, update VM states accordingly self.process_message_box()?; assert!(self.message_box.lock().expect("lock").is_empty()); @@ -336,11 +351,23 @@ where // Here both pause signal and limit_cycles are provided so as to simplify // branches. fn iterate(&mut self, pause: Pause, limit_cycles: Cycle) -> Result<(), Error> { - let (id, vm) = self.iterate_prepare_machine(pause, limit_cycles)?; - let result = vm.run(); - let cycles = vm.machine.cycles(); - vm.machine.set_cycles(0); - self.iterate_process_results(id, result, cycles) + // Execute the VM for real, consumed cycles in the virtual machine is + // moved over to +iteration_cycles+, then we reset virtual machine's own + // cycle count to zero. + let (id, result, cycles) = { + let (id, vm) = self.iterate_prepare_machine()?; + vm.set_max_cycles(limit_cycles); + vm.machine.set_pause(pause); + let result = vm.run(); + let cycles = vm.machine.cycles(); + vm.machine.set_cycles(0); + (id, result, cycles) + }; + self.iteration_cycles = self + .iteration_cycles + .checked_add(cycles) + .ok_or(Error::CyclesExceeded)?; + self.iterate_process_results(id, result) } fn process_message_box(&mut self) -> Result<(), Error> { @@ -358,7 +385,7 @@ where let old_cycles = old_machine.machine.cycles(); let max_cycles = old_machine.machine.max_cycles(); let program = { - let mut sc = old_context.snapshot2_context().lock().expect("lock"); + let mut sc = old_context.snapshot2_context.lock().expect("lock"); sc.load_data( &args.location.data_piece_id, args.location.offset, @@ -731,7 +758,7 @@ where fn ensure_get_instantiated( &mut self, id: &VmId, - ) -> Result<&mut (MachineContext
, Machine), Error> { + ) -> Result<&mut (VmContext
, Machine), Error> { self.ensure_vms_instantiated(&[*id])?; self.instantiated .get_mut(id) @@ -744,13 +771,13 @@ where return Err(Error::Unexpected(format!("VM {:?} is not suspended!", id))); } let snapshot = &self.suspended[id]; - self.current_iteration_cycles = self - .current_iteration_cycles + self.iteration_cycles = self + .iteration_cycles .checked_add(SPAWN_EXTRA_CYCLES_BASE) .ok_or(Error::CyclesExceeded)?; let (context, mut machine) = self.create_dummy_vm(id)?; { - let mut sc = context.snapshot2_context().lock().expect("lock"); + let mut sc = context.snapshot2_context.lock().expect("lock"); sc.resume(&mut machine.machine, snapshot)?; } self.instantiated.insert(*id, (context, machine)); @@ -766,8 +793,8 @@ where id ))); } - self.current_iteration_cycles = self - .current_iteration_cycles + self.iteration_cycles = self + .iteration_cycles .checked_add(SPAWN_EXTRA_CYCLES_BASE) .ok_or(Error::CyclesExceeded)?; let (context, machine) = self @@ -775,7 +802,7 @@ where .get_mut(id) .ok_or_else(|| Error::Unexpected("Unable to find VM Id".to_string()))?; let snapshot = { - let sc = context.snapshot2_context().lock().expect("lock"); + let sc = context.snapshot2_context.lock().expect("lock"); sc.make_snapshot(&mut machine.machine)? }; self.suspended.insert(*id, snapshot); @@ -793,7 +820,7 @@ where self.next_vm_id += 1; let (context, mut machine) = self.create_dummy_vm(&id)?; let (program, _) = { - let mut sc = context.snapshot2_context().lock().expect("lock"); + let mut sc = context.snapshot2_context.lock().expect("lock"); sc.load_data(&location.data_piece_id, location.offset, location.length)? }; self.load_vm_program(&context, &mut machine, location, program, args)?; @@ -807,6 +834,7 @@ where .key(); self.suspend_vm(&id)?; } + self.instantiated.insert(id, (context, machine)); self.states.insert(id, VmState::Runnable); @@ -816,7 +844,7 @@ where // Load the program into an empty vm. fn load_vm_program( &mut self, - context: &MachineContext
, + context: &VmContext
, machine: &mut Machine, location: &DataLocation, program: Bytes, @@ -831,7 +859,7 @@ where } None => machine.load_program_with_metadata(&program, &metadata, vec![].into_iter())?, }; - let mut sc = context.snapshot2_context().lock().expect("lock"); + let mut sc = context.snapshot2_context.lock().expect("lock"); sc.mark_program( &mut machine.machine, &metadata, @@ -845,35 +873,30 @@ where } // Create a new VM instance with syscalls attached - fn create_dummy_vm(&self, id: &VmId) -> Result<(MachineContext
, Machine), Error> { + fn create_dummy_vm(&self, id: &VmId) -> Result<(VmContext
, Machine), Error> { // The code here looks slightly weird, since I don't want to copy over all syscall // impls here again. Ideally, this scheduler package should be merged with ckb-script, // or simply replace ckb-script. That way, the quirks here will be eliminated. - let version = self.script_version; + let version = &self.sg_data.sg_info.script_version; let core_machine = CoreMachineType::new( version.vm_isa(), version.vm_version(), // We will update max_cycles for each machine when it gets a chance to run u64::MAX, ); - let snapshot2_context = Arc::new(Mutex::new(Snapshot2Context::new(self.tx_data.clone()))); - let mut syscalls_generator = self.syscalls_generator.clone(); - syscalls_generator.vm_id = *id; - let mut machine_context = MachineContext::new(self.tx_data.clone()); - machine_context.base_cycles = Arc::clone(&self.syscalls_generator.base_cycles); - machine_context.snapshot2_context = Arc::clone(&snapshot2_context); + let vm_context = VmContext { + base_cycles: Arc::clone(&self.total_cycles), + message_box: Arc::clone(&self.message_box), + snapshot2_context: Arc::new(Mutex::new(Snapshot2Context::new(self.sg_data.clone()))), + }; let machine_builder = DefaultMachineBuilder::new(core_machine) .instruction_cycle_func(Box::new(estimate_cycles)); - let machine_builder = syscalls_generator - .generate_syscalls( - version, - &self.tx_data.script_group, - Arc::clone(&snapshot2_context), - ) - .into_iter() - .fold(machine_builder, |builder, syscall| builder.syscall(syscall)); + let machine_builder = + generate_ckb_syscalls(id, &self.sg_data, &vm_context, &self.debug_context) + .into_iter() + .fold(machine_builder, |builder, syscall| builder.syscall(syscall)); let default_machine = machine_builder.build(); - Ok((machine_context, Machine::new(default_machine))) + Ok((vm_context, Machine::new(default_machine))) } } diff --git a/script/src/syscalls/close.rs b/script/src/syscalls/close.rs index 1e359095be..536465b9da 100644 --- a/script/src/syscalls/close.rs +++ b/script/src/syscalls/close.rs @@ -1,5 +1,6 @@ use crate::syscalls::{CLOSE, SPAWN_YIELD_CYCLES_BASE}; -use crate::types::{Fd, Message, VmId}; +use crate::types::{Fd, Message, VmContext, VmId}; +use ckb_traits::{CellDataProvider, ExtensionProvider, HeaderProvider}; use ckb_vm::{ registers::{A0, A7}, Error as VMError, Register, SupportMachine, Syscalls, @@ -13,8 +14,14 @@ pub struct Close { } impl Close { - pub fn new(id: VmId, message_box: Arc>>) -> Self { - Self { id, message_box } + pub fn new
(vm_id: &VmId, vm_context: &VmContext
) -> Self + where + DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static, + { + Self { + id: *vm_id, + message_box: Arc::clone(&vm_context.message_box), + } } } diff --git a/script/src/syscalls/current_cycles.rs b/script/src/syscalls/current_cycles.rs index a791cef7a4..14e13da807 100644 --- a/script/src/syscalls/current_cycles.rs +++ b/script/src/syscalls/current_cycles.rs @@ -1,18 +1,27 @@ -use crate::syscalls::CURRENT_CYCLES; +use crate::{syscalls::CURRENT_CYCLES, types::VmContext}; +use ckb_traits::{CellDataProvider, ExtensionProvider, HeaderProvider}; use ckb_vm::{ registers::{A0, A7}, Error as VMError, Register, SupportMachine, Syscalls, }; -use std::sync::{Arc, Mutex}; +use std::sync::{ + atomic::{AtomicU64, Ordering}, + Arc, +}; #[derive(Debug, Default)] pub struct CurrentCycles { - base: Arc>, + base: Arc, } impl CurrentCycles { - pub fn new(base: Arc>) -> Self { - Self { base } + pub fn new
(vm_context: &VmContext
) -> Self + where + DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static, + { + Self { + base: Arc::clone(&vm_context.base_cycles), + } } } @@ -27,8 +36,7 @@ impl Syscalls for CurrentCycles { } let cycles = self .base - .lock() - .map_err(|e| VMError::Unexpected(e.to_string()))? + .load(Ordering::Acquire) .checked_add(machine.cycles()) .ok_or(VMError::CyclesOverflow)?; machine.set_register(A0, Mac::REG::from_u64(cycles)); diff --git a/script/src/syscalls/debugger.rs b/script/src/syscalls/debugger.rs index 1b2158a7e7..75d2771638 100644 --- a/script/src/syscalls/debugger.rs +++ b/script/src/syscalls/debugger.rs @@ -1,19 +1,24 @@ -use crate::types::DebugPrinter; +use crate::types::{ + DebugContext, DebugPrinter, {SgData, SgInfo}, +}; use crate::{cost_model::transferred_byte_cycles, syscalls::DEBUG_PRINT_SYSCALL_NUMBER}; -use ckb_types::packed::Byte32; use ckb_vm::{ registers::{A0, A7}, Error as VMError, Memory, Register, SupportMachine, Syscalls, }; +use std::sync::Arc; pub struct Debugger { - hash: Byte32, + sg_info: Arc, printer: DebugPrinter, } impl Debugger { - pub fn new(hash: Byte32, printer: DebugPrinter) -> Debugger { - Debugger { hash, printer } + pub fn new
(sg_data: &SgData
, debug_context: &DebugContext) -> Debugger { + Debugger { + sg_info: Arc::clone(&sg_data.sg_info), + printer: Arc::clone(&debug_context.debug_printer), + } } } @@ -46,7 +51,7 @@ impl Syscalls for Debugger { machine.add_cycles_no_checking(transferred_byte_cycles(buffer.len() as u64))?; let s = String::from_utf8(buffer) .map_err(|e| VMError::External(format!("String from buffer {e:?}")))?; - (self.printer)(&self.hash, s.as_str()); + (self.printer)(&self.sg_info.script_hash, s.as_str()); Ok(true) } diff --git a/script/src/syscalls/exec.rs b/script/src/syscalls/exec.rs index 1e934fddf8..ac1ce8cb70 100644 --- a/script/src/syscalls/exec.rs +++ b/script/src/syscalls/exec.rs @@ -3,9 +3,9 @@ use crate::syscalls::{ Place, Source, SourceEntry, EXEC, INDEX_OUT_OF_BOUND, MAX_ARGV_LENGTH, SLICE_OUT_OF_BOUND, WRONG_FORMAT, }; -use crate::types::Indices; +use crate::types::SgData; use ckb_traits::CellDataProvider; -use ckb_types::core::cell::{CellMeta, ResolvedTransaction}; +use ckb_types::core::cell::CellMeta; use ckb_types::core::error::ARGV_TOO_LONG_TEXT; use ckb_types::packed::{Bytes as PackedBytes, BytesVec}; use ckb_vm::memory::load_c_string_byte_by_byte; @@ -15,62 +15,49 @@ use ckb_vm::{ Error as VMError, Register, SupportMachine, Syscalls, }; use ckb_vm::{DEFAULT_STACK_SIZE, RISCV_MAX_MEMORY}; -use std::sync::Arc; #[derive(Debug)] pub struct Exec
{ - data_loader: DL, - rtx: Arc, - outputs: Arc>, - group_inputs: Indices, - group_outputs: Indices, + sg_data: SgData
, } -impl Exec
{ - pub fn new( - data_loader: DL, - rtx: Arc, - outputs: Arc>, - group_inputs: Indices, - group_outputs: Indices, - ) -> Exec
{ +impl Exec
{ + pub fn new(sg_data: &SgData
) -> Exec
{ Exec { - data_loader, - rtx, - outputs, - group_inputs, - group_outputs, + sg_data: sg_data.clone(), } } #[inline] fn resolved_inputs(&self) -> &Vec { - &self.rtx.resolved_inputs + &self.sg_data.rtx.resolved_inputs } #[inline] fn resolved_cell_deps(&self) -> &Vec { - &self.rtx.resolved_cell_deps + &self.sg_data.rtx.resolved_cell_deps } #[inline] fn witnesses(&self) -> BytesVec { - self.rtx.transaction.witnesses() + self.sg_data.rtx.transaction.witnesses() } fn fetch_cell(&self, source: Source, index: usize) -> Result<&CellMeta, u8> { let cell_opt = match source { Source::Transaction(SourceEntry::Input) => self.resolved_inputs().get(index), - Source::Transaction(SourceEntry::Output) => self.outputs.get(index), + Source::Transaction(SourceEntry::Output) => self.sg_data.outputs().get(index), Source::Transaction(SourceEntry::CellDep) => self.resolved_cell_deps().get(index), Source::Group(SourceEntry::Input) => self - .group_inputs + .sg_data + .group_inputs() .get(index) .and_then(|actual_index| self.resolved_inputs().get(*actual_index)), Source::Group(SourceEntry::Output) => self - .group_outputs + .sg_data + .group_outputs() .get(index) - .and_then(|actual_index| self.outputs.get(*actual_index)), + .and_then(|actual_index| self.sg_data.outputs().get(*actual_index)), Source::Transaction(SourceEntry::HeaderDep) | Source::Group(SourceEntry::CellDep) | Source::Group(SourceEntry::HeaderDep) => { @@ -84,11 +71,13 @@ impl Exec
{ fn fetch_witness(&self, source: Source, index: usize) -> Result { let witness_opt = match source { Source::Group(SourceEntry::Input) => self - .group_inputs + .sg_data + .group_inputs() .get(index) .and_then(|actual_index| self.witnesses().get(*actual_index)), Source::Group(SourceEntry::Output) => self - .group_outputs + .sg_data + .group_outputs() .get(index) .and_then(|actual_index| self.witnesses().get(*actual_index)), Source::Transaction(SourceEntry::Input) => self.witnesses().get(index), @@ -102,7 +91,7 @@ impl Exec
{ } } -impl Syscalls for Exec
{ +impl Syscalls for Exec
{ fn initialize(&mut self, _machine: &mut Mac) -> Result<(), VMError> { Ok(()) } @@ -126,12 +115,15 @@ impl Syscalls for return Ok(true); } let cell = cell.unwrap(); - self.data_loader.load_cell_data(cell).ok_or_else(|| { - VMError::Unexpected(format!( - "Unexpected load_cell_data failed {}", - cell.out_point, - )) - })? + self.sg_data + .data_loader() + .load_cell_data(cell) + .ok_or_else(|| { + VMError::Unexpected(format!( + "Unexpected load_cell_data failed {}", + cell.out_point, + )) + })? } Place::Witness => { let witness = self.fetch_witness(source, index as usize); diff --git a/script/src/syscalls/exec_v2.rs b/script/src/syscalls/exec_v2.rs index ab5b2facce..5f08ab39a9 100644 --- a/script/src/syscalls/exec_v2.rs +++ b/script/src/syscalls/exec_v2.rs @@ -1,5 +1,6 @@ use crate::syscalls::{EXEC, INDEX_OUT_OF_BOUND}; -use crate::types::{DataLocation, DataPieceId, ExecV2Args, Message, VmId}; +use crate::types::{DataLocation, DataPieceId, ExecV2Args, Message, VmContext, VmId}; +use ckb_traits::CellDataProvider; use ckb_vm::{ registers::{A0, A1, A2, A3, A4, A5, A7}, Error as VMError, Register, SupportMachine, Syscalls, @@ -12,8 +13,11 @@ pub struct ExecV2 { } impl ExecV2 { - pub fn new(id: VmId, message_box: Arc>>) -> ExecV2 { - ExecV2 { id, message_box } + pub fn new(vm_id: &VmId, vm_context: &VmContext
) -> ExecV2 { + ExecV2 { + id: *vm_id, + message_box: Arc::clone(&vm_context.message_box), + } } } diff --git a/script/src/syscalls/generator.rs b/script/src/syscalls/generator.rs new file mode 100644 index 0000000000..98e8e7c1d5 --- /dev/null +++ b/script/src/syscalls/generator.rs @@ -0,0 +1,62 @@ +use crate::{ + syscalls::{ + Close, CurrentCycles, Debugger, Exec, ExecV2, InheritedFd, LoadBlockExtension, LoadCell, + LoadCellData, LoadHeader, LoadInput, LoadScript, LoadScriptHash, LoadTx, LoadWitness, Pipe, + ProcessID, Read, Spawn, VMVersion, Wait, Write, + }, + types::{CoreMachine, DebugContext, ScriptVersion, SgData, VmContext, VmId}, +}; +use ckb_traits::{CellDataProvider, ExtensionProvider, HeaderProvider}; +use ckb_vm::Syscalls; + +/// Generate RISC-V syscalls in CKB environment +pub fn generate_ckb_syscalls
( + vm_id: &VmId, + sg_data: &SgData
, + vm_context: &VmContext
, + debug_context: &DebugContext, +) -> Vec)>> +where + DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static, +{ + let mut syscalls: Vec)>> = vec![ + Box::new(LoadScriptHash::new(sg_data)), + Box::new(LoadTx::new(sg_data)), + Box::new(LoadCell::new(sg_data)), + Box::new(LoadInput::new(sg_data)), + Box::new(LoadHeader::new(sg_data)), + Box::new(LoadWitness::new(sg_data)), + Box::new(LoadScript::new(sg_data)), + Box::new(LoadCellData::new(vm_context)), + Box::new(Debugger::new(sg_data, debug_context)), + ]; + let script_version = &sg_data.sg_info.script_version; + if script_version >= &ScriptVersion::V1 { + syscalls.append(&mut vec![ + Box::new(VMVersion::new()), + Box::new(CurrentCycles::new(vm_context)), + ]); + } + if script_version == &ScriptVersion::V1 { + syscalls.push(Box::new(Exec::new(sg_data))); + } + if script_version >= &ScriptVersion::V2 { + syscalls.append(&mut vec![ + Box::new(ExecV2::new(vm_id, vm_context)), + Box::new(LoadBlockExtension::new(sg_data)), + Box::new(Spawn::new(vm_id, vm_context)), + Box::new(ProcessID::new(vm_id)), + Box::new(Pipe::new(vm_id, vm_context)), + Box::new(Wait::new(vm_id, vm_context)), + Box::new(Write::new(vm_id, vm_context)), + Box::new(Read::new(vm_id, vm_context)), + Box::new(InheritedFd::new(vm_id, vm_context)), + Box::new(Close::new(vm_id, vm_context)), + ]); + } + #[cfg(test)] + syscalls.push(Box::new(crate::syscalls::Pause::new( + std::sync::Arc::clone(&debug_context.skip_pause), + ))); + syscalls +} diff --git a/script/src/syscalls/inherited_fd.rs b/script/src/syscalls/inherited_fd.rs index 4d041a0ba1..510b82969c 100644 --- a/script/src/syscalls/inherited_fd.rs +++ b/script/src/syscalls/inherited_fd.rs @@ -1,5 +1,6 @@ use crate::syscalls::{INHERITED_FD, SPAWN_YIELD_CYCLES_BASE}; -use crate::types::{Fd, FdArgs, Message, VmId}; +use crate::types::{Fd, FdArgs, Message, VmContext, VmId}; +use ckb_traits::{CellDataProvider, ExtensionProvider, HeaderProvider}; use ckb_vm::{ registers::{A0, A1, A7}, Error as VMError, Register, SupportMachine, Syscalls, @@ -13,8 +14,14 @@ pub struct InheritedFd { } impl InheritedFd { - pub fn new(id: VmId, message_box: Arc>>) -> Self { - Self { id, message_box } + pub fn new
(vm_id: &VmId, vm_context: &VmContext
) -> Self + where + DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static, + { + Self { + id: *vm_id, + message_box: Arc::clone(&vm_context.message_box), + } } } diff --git a/script/src/syscalls/load_block_extension.rs b/script/src/syscalls/load_block_extension.rs index 061bb7c4c8..d38cf4b8cc 100644 --- a/script/src/syscalls/load_block_extension.rs +++ b/script/src/syscalls/load_block_extension.rs @@ -1,13 +1,12 @@ -use crate::types::Indices; use crate::{ cost_model::transferred_byte_cycles, syscalls::{ utils::store_data, Source, SourceEntry, INDEX_OUT_OF_BOUND, ITEM_MISSING, LOAD_BLOCK_EXTENSION, SUCCESS, }, + types::SgData, }; use ckb_traits::ExtensionProvider; -use ckb_types::core::cell::ResolvedTransaction; use ckb_types::{ core::cell::CellMeta, packed::{self, Byte32Vec}, @@ -16,41 +15,32 @@ use ckb_vm::{ registers::{A0, A3, A4, A7}, Error as VMError, Register, SupportMachine, Syscalls, }; -use std::sync::Arc; #[derive(Debug)] pub struct LoadBlockExtension
{ - data_loader: DL, - rtx: Arc, - group_inputs: Indices, + sg_data: SgData
, } -impl LoadBlockExtension
{ - pub fn new( - data_loader: DL, - rtx: Arc, - group_inputs: Indices, - ) -> LoadBlockExtension
{ +impl LoadBlockExtension
{ + pub fn new(sg_data: &SgData
) -> LoadBlockExtension
{ LoadBlockExtension { - data_loader, - rtx, - group_inputs, + sg_data: sg_data.clone(), } } #[inline] fn header_deps(&self) -> Byte32Vec { - self.rtx.transaction.header_deps() + self.sg_data.rtx.transaction.header_deps() } #[inline] fn resolved_inputs(&self) -> &Vec { - &self.rtx.resolved_inputs + &self.sg_data.rtx.resolved_inputs } #[inline] fn resolved_cell_deps(&self) -> &Vec { - &self.rtx.resolved_cell_deps + &self.sg_data.rtx.resolved_cell_deps } fn load_block_extension(&self, cell_meta: &CellMeta) -> Option { @@ -64,7 +54,7 @@ impl LoadBlockExtension
{ .into_iter() .any(|hash| &hash == block_hash) { - self.data_loader.get_block_extension(block_hash) + self.sg_data.data_loader().get_block_extension(block_hash) } else { None } @@ -88,12 +78,14 @@ impl LoadBlockExtension
{ .get(index) .ok_or(INDEX_OUT_OF_BOUND) .and_then(|block_hash| { - self.data_loader + self.sg_data + .data_loader() .get_block_extension(&block_hash) .ok_or(ITEM_MISSING) }), Source::Group(SourceEntry::Input) => self - .group_inputs + .sg_data + .group_inputs() .get(index) .ok_or(INDEX_OUT_OF_BOUND) .and_then(|actual_index| { @@ -109,7 +101,7 @@ impl LoadBlockExtension
{ } } -impl Syscalls +impl Syscalls for LoadBlockExtension
{ fn initialize(&mut self, _machine: &mut Mac) -> Result<(), VMError> { diff --git a/script/src/syscalls/load_cell.rs b/script/src/syscalls/load_cell.rs index 0dee77937f..3c6212b1bd 100644 --- a/script/src/syscalls/load_cell.rs +++ b/script/src/syscalls/load_cell.rs @@ -1,18 +1,15 @@ -use crate::types::Indices; use crate::{ cost_model::transferred_byte_cycles, syscalls::{ utils::store_data, CellField, Source, SourceEntry, INDEX_OUT_OF_BOUND, ITEM_MISSING, LOAD_CELL_BY_FIELD_SYSCALL_NUMBER, LOAD_CELL_SYSCALL_NUMBER, SUCCESS, }, + types::{SgData, TxInfo}, }; use byteorder::{LittleEndian, WriteBytesExt}; use ckb_traits::CellDataProvider; use ckb_types::{ - core::{ - cell::{CellMeta, ResolvedTransaction}, - Capacity, - }, + core::{cell::CellMeta, Capacity}, packed::CellOutput, prelude::*, }; @@ -20,41 +17,31 @@ use ckb_vm::{ registers::{A0, A3, A4, A5, A7}, Error as VMError, Register, SupportMachine, Syscalls, }; -use std::sync::Arc; pub struct LoadCell
{ - data_loader: DL, - rtx: Arc, - outputs: Arc>, - group_inputs: Indices, - group_outputs: Indices, + sg_data: SgData
, } -impl LoadCell
{ - pub fn new( - data_loader: DL, - rtx: Arc, - outputs: Arc>, - group_inputs: Indices, - group_outputs: Indices, - ) -> LoadCell
{ +impl LoadCell
{ + pub fn new(sg_data: &SgData
) -> LoadCell
{ LoadCell { - data_loader, - rtx, - outputs, - group_inputs, - group_outputs, + sg_data: sg_data.clone(), } } + #[inline] + fn tx_info(&self) -> &TxInfo
{ + &self.sg_data.tx_info + } + #[inline] fn resolved_inputs(&self) -> &Vec { - &self.rtx.resolved_inputs + &self.sg_data.rtx.resolved_inputs } #[inline] fn resolved_cell_deps(&self) -> &Vec { - &self.rtx.resolved_cell_deps + &self.sg_data.rtx.resolved_cell_deps } fn fetch_cell(&self, source: Source, index: usize) -> Result<&CellMeta, u8> { @@ -63,7 +50,7 @@ impl LoadCell
{ self.resolved_inputs().get(index).ok_or(INDEX_OUT_OF_BOUND) } Source::Transaction(SourceEntry::Output) => { - self.outputs.get(index).ok_or(INDEX_OUT_OF_BOUND) + self.tx_info().outputs.get(index).ok_or(INDEX_OUT_OF_BOUND) } Source::Transaction(SourceEntry::CellDep) => self .resolved_cell_deps() @@ -71,7 +58,8 @@ impl LoadCell
{ .ok_or(INDEX_OUT_OF_BOUND), Source::Transaction(SourceEntry::HeaderDep) => Err(INDEX_OUT_OF_BOUND), Source::Group(SourceEntry::Input) => self - .group_inputs + .sg_data + .group_inputs() .get(index) .ok_or(INDEX_OUT_OF_BOUND) .and_then(|actual_index| { @@ -80,10 +68,16 @@ impl LoadCell
{ .ok_or(INDEX_OUT_OF_BOUND) }), Source::Group(SourceEntry::Output) => self - .group_outputs + .sg_data + .group_outputs() .get(index) .ok_or(INDEX_OUT_OF_BOUND) - .and_then(|actual_index| self.outputs.get(*actual_index).ok_or(INDEX_OUT_OF_BOUND)), + .and_then(|actual_index| { + self.tx_info() + .outputs + .get(*actual_index) + .ok_or(INDEX_OUT_OF_BOUND) + }), Source::Group(SourceEntry::CellDep) => Err(INDEX_OUT_OF_BOUND), Source::Group(SourceEntry::HeaderDep) => Err(INDEX_OUT_OF_BOUND), } @@ -115,7 +109,7 @@ impl LoadCell
{ (SUCCESS, store_data(machine, &buffer)?) } CellField::DataHash => { - if let Some(bytes) = self.data_loader.load_cell_data_hash(cell) { + if let Some(bytes) = self.tx_info().data_loader.load_cell_data_hash(cell) { (SUCCESS, store_data(machine, &bytes.as_bytes())?) } else { (ITEM_MISSING, 0) @@ -165,7 +159,9 @@ impl LoadCell
{ } } -impl Syscalls for LoadCell
{ +impl Syscalls + for LoadCell
+{ fn initialize(&mut self, _machine: &mut Mac) -> Result<(), VMError> { Ok(()) } diff --git a/script/src/syscalls/load_cell_data.rs b/script/src/syscalls/load_cell_data.rs index d9ee50be4b..3aa6b24645 100644 --- a/script/src/syscalls/load_cell_data.rs +++ b/script/src/syscalls/load_cell_data.rs @@ -1,4 +1,4 @@ -use crate::types::{DataPieceId, TxData}; +use crate::types::{DataPieceId, SgData, VmContext}; use crate::{ cost_model::transferred_byte_cycles, syscalls::{ @@ -20,17 +20,17 @@ pub struct LoadCellData
where DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static, { - snapshot2_context: Arc>>>, + snapshot2_context: Arc>>>, } impl
LoadCellData
where DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static, { - pub fn new( - snapshot2_context: Arc>>>, - ) -> LoadCellData
{ - LoadCellData { snapshot2_context } + pub fn new(vm_context: &VmContext
) -> LoadCellData
{ + LoadCellData { + snapshot2_context: Arc::clone(&vm_context.snapshot2_context), + } } fn load_data(&self, machine: &mut Mac) -> Result<(), VMError> { diff --git a/script/src/syscalls/load_header.rs b/script/src/syscalls/load_header.rs index 41db861e7b..10629c844c 100644 --- a/script/src/syscalls/load_header.rs +++ b/script/src/syscalls/load_header.rs @@ -1,4 +1,3 @@ -use crate::types::Indices; use crate::{ cost_model::transferred_byte_cycles, syscalls::{ @@ -6,9 +5,9 @@ use crate::{ HeaderField, Source, SourceEntry, INDEX_OUT_OF_BOUND, ITEM_MISSING, LOAD_HEADER_BY_FIELD_SYSCALL_NUMBER, LOAD_HEADER_SYSCALL_NUMBER, SUCCESS, }, + types::SgData, }; use ckb_traits::HeaderProvider; -use ckb_types::core::cell::ResolvedTransaction; use ckb_types::{ core::{cell::CellMeta, HeaderView}, packed::Byte32Vec, @@ -18,45 +17,40 @@ use ckb_vm::{ registers::{A0, A3, A4, A5, A7}, Error as VMError, Register, SupportMachine, Syscalls, }; -use std::sync::Arc; - #[derive(Debug)] pub struct LoadHeader
{ - data_loader: DL, - rtx: Arc, - // This can only be used for liner search - // header_deps: Byte32Vec, - // resolved_inputs: &'a [CellMeta], - // resolved_cell_deps: &'a [CellMeta], - group_inputs: Indices, + sg_data: SgData
, } -impl LoadHeader
{ - pub fn new( - data_loader: DL, - rtx: Arc, - group_inputs: Indices, - ) -> LoadHeader
{ +impl LoadHeader
{ + pub fn new(sg_data: &SgData
) -> LoadHeader
{ LoadHeader { - data_loader, - rtx, - group_inputs, + sg_data: sg_data.clone(), } } + // This can only be used for liner search + // header_deps: Byte32Vec, + // resolved_inputs: &'a [CellMeta], + // resolved_cell_deps: &'a [CellMeta], + #[inline] + fn group_inputs(&self) -> &[usize] { + self.sg_data.group_inputs() + } + #[inline] fn header_deps(&self) -> Byte32Vec { - self.rtx.transaction.header_deps() + self.sg_data.rtx.transaction.header_deps() } #[inline] fn resolved_inputs(&self) -> &Vec { - &self.rtx.resolved_inputs + &self.sg_data.rtx.resolved_inputs } #[inline] fn resolved_cell_deps(&self) -> &Vec { - &self.rtx.resolved_cell_deps + &self.sg_data.rtx.resolved_cell_deps } fn load_header(&self, cell_meta: &CellMeta) -> Option { @@ -70,7 +64,7 @@ impl LoadHeader
{ .into_iter() .any(|hash| &hash == block_hash) { - self.data_loader.get_header(block_hash) + self.sg_data.tx_info.data_loader.get_header(block_hash) } else { None } @@ -94,10 +88,14 @@ impl LoadHeader
{ .get(index) .ok_or(INDEX_OUT_OF_BOUND) .and_then(|block_hash| { - self.data_loader.get_header(&block_hash).ok_or(ITEM_MISSING) + self.sg_data + .tx_info + .data_loader + .get_header(&block_hash) + .ok_or(ITEM_MISSING) }), Source::Group(SourceEntry::Input) => self - .group_inputs + .group_inputs() .get(index) .ok_or(INDEX_OUT_OF_BOUND) .and_then(|actual_index| { @@ -146,7 +144,9 @@ impl LoadHeader
{ } } -impl Syscalls for LoadHeader
{ +impl Syscalls + for LoadHeader
+{ fn initialize(&mut self, _machine: &mut Mac) -> Result<(), VMError> { Ok(()) } diff --git a/script/src/syscalls/load_input.rs b/script/src/syscalls/load_input.rs index 8f19c0d6c9..2d1f415f32 100644 --- a/script/src/syscalls/load_input.rs +++ b/script/src/syscalls/load_input.rs @@ -1,13 +1,12 @@ -use crate::types::Indices; use crate::{ cost_model::transferred_byte_cycles, syscalls::{ utils::store_data, InputField, Source, SourceEntry, INDEX_OUT_OF_BOUND, LOAD_INPUT_BY_FIELD_SYSCALL_NUMBER, LOAD_INPUT_SYSCALL_NUMBER, SUCCESS, }, + types::SgData, }; use byteorder::{LittleEndian, WriteBytesExt}; -use ckb_types::core::cell::ResolvedTransaction; use ckb_types::{ packed::{CellInput, CellInputVec}, prelude::*, @@ -16,22 +15,22 @@ use ckb_vm::{ registers::{A0, A3, A4, A5, A7}, Error as VMError, Register, SupportMachine, Syscalls, }; -use std::sync::Arc; #[derive(Debug)] -pub struct LoadInput { - rtx: Arc, - group_inputs: Indices, +pub struct LoadInput
{ + sg_data: SgData
, } -impl LoadInput { - pub fn new(rtx: Arc, group_inputs: Indices) -> LoadInput { - LoadInput { rtx, group_inputs } +impl LoadInput
{ + pub fn new(sg_data: &SgData
) -> Self { + LoadInput { + sg_data: sg_data.clone(), + } } #[inline] fn inputs(&self) -> CellInputVec { - self.rtx.transaction.inputs() + self.sg_data.rtx.transaction.inputs() } fn fetch_input(&self, source: Source, index: usize) -> Result { @@ -43,7 +42,8 @@ impl LoadInput { Source::Transaction(SourceEntry::CellDep) => Err(INDEX_OUT_OF_BOUND), Source::Transaction(SourceEntry::HeaderDep) => Err(INDEX_OUT_OF_BOUND), Source::Group(SourceEntry::Input) => self - .group_inputs + .sg_data + .group_inputs() .get(index) .ok_or(INDEX_OUT_OF_BOUND) .and_then(|actual_index| { @@ -87,7 +87,7 @@ impl LoadInput { } } -impl Syscalls for LoadInput { +impl Syscalls for LoadInput
{ fn initialize(&mut self, _machine: &mut Mac) -> Result<(), VMError> { Ok(()) } diff --git a/script/src/syscalls/load_script.rs b/script/src/syscalls/load_script.rs index 6fa9f1f8d9..545e8e7492 100644 --- a/script/src/syscalls/load_script.rs +++ b/script/src/syscalls/load_script.rs @@ -1,21 +1,25 @@ use crate::{ cost_model::transferred_byte_cycles, syscalls::{utils::store_data, LOAD_SCRIPT_SYSCALL_NUMBER, SUCCESS}, + types::{SgData, SgInfo}, }; -use ckb_types::{packed::Script, prelude::*}; +use ckb_types::prelude::*; use ckb_vm::{ registers::{A0, A7}, Error as VMError, Register, SupportMachine, Syscalls, }; +use std::sync::Arc; #[derive(Debug)] pub struct LoadScript { - script: Script, + sg_info: Arc, } impl LoadScript { - pub fn new(script: Script) -> Self { - Self { script } + pub fn new
(sg_data: &SgData
) -> Self { + Self { + sg_info: Arc::clone(&sg_data.sg_info), + } } } @@ -29,7 +33,7 @@ impl Syscalls for LoadScript { return Ok(false); } - let data = self.script.as_slice(); + let data = self.sg_info.script_group.script.as_slice(); let wrote_size = store_data(machine, data)?; machine.add_cycles_no_checking(transferred_byte_cycles(wrote_size))?; diff --git a/script/src/syscalls/load_script_hash.rs b/script/src/syscalls/load_script_hash.rs index d097cf0810..0fe93e2f30 100644 --- a/script/src/syscalls/load_script_hash.rs +++ b/script/src/syscalls/load_script_hash.rs @@ -1,21 +1,24 @@ use crate::{ cost_model::transferred_byte_cycles, syscalls::{utils::store_data, LOAD_SCRIPT_HASH_SYSCALL_NUMBER, SUCCESS}, + types::{SgData, SgInfo}, }; -use ckb_types::packed::Byte32; use ckb_vm::{ registers::{A0, A7}, Error as VMError, Register, SupportMachine, Syscalls, }; +use std::sync::Arc; #[derive(Debug)] pub struct LoadScriptHash { - hash: Byte32, + sg_info: Arc, } impl LoadScriptHash { - pub fn new(hash: Byte32) -> LoadScriptHash { - LoadScriptHash { hash } + pub fn new
(sg_data: &SgData
) -> LoadScriptHash { + LoadScriptHash { + sg_info: Arc::clone(&sg_data.sg_info), + } } } @@ -29,7 +32,7 @@ impl Syscalls for LoadScriptHash { return Ok(false); } - let data = self.hash.as_reader().raw_data(); + let data = self.sg_info.script_hash.as_reader().raw_data(); let wrote_size = store_data(machine, data)?; machine.add_cycles_no_checking(transferred_byte_cycles(wrote_size))?; diff --git a/script/src/syscalls/load_tx.rs b/script/src/syscalls/load_tx.rs index 5e933848fd..d0db1a3ce0 100644 --- a/script/src/syscalls/load_tx.rs +++ b/script/src/syscalls/load_tx.rs @@ -3,6 +3,7 @@ use crate::{ syscalls::{ utils::store_data, LOAD_TRANSACTION_SYSCALL_NUMBER, LOAD_TX_HASH_SYSCALL_NUMBER, SUCCESS, }, + types::SgData, }; use ckb_types::{core::cell::ResolvedTransaction, prelude::*}; use ckb_vm::{ @@ -17,8 +18,10 @@ pub struct LoadTx { } impl LoadTx { - pub fn new(rtx: Arc) -> LoadTx { - LoadTx { rtx } + pub fn new
(sg_data: &SgData
) -> LoadTx { + LoadTx { + rtx: Arc::clone(&sg_data.rtx), + } } } diff --git a/script/src/syscalls/load_witness.rs b/script/src/syscalls/load_witness.rs index b5039be5c4..3ff4863199 100644 --- a/script/src/syscalls/load_witness.rs +++ b/script/src/syscalls/load_witness.rs @@ -4,51 +4,41 @@ use crate::{ utils::store_data, Source, SourceEntry, INDEX_OUT_OF_BOUND, LOAD_WITNESS_SYSCALL_NUMBER, SUCCESS, }, - types::Indices, -}; -use ckb_types::{ - core::cell::ResolvedTransaction, - packed::{Bytes, BytesVec}, + types::SgData, }; +use ckb_types::packed::{Bytes, BytesVec}; use ckb_vm::{ registers::{A0, A3, A4, A7}, Error as VMError, Register, SupportMachine, Syscalls, }; -use std::sync::Arc; #[derive(Debug)] -pub struct LoadWitness { - rtx: Arc, - group_inputs: Indices, - group_outputs: Indices, +pub struct LoadWitness
{ + sg_data: SgData
, } -impl LoadWitness { - pub fn new( - rtx: Arc, - group_inputs: Indices, - group_outputs: Indices, - ) -> LoadWitness { +impl LoadWitness
{ + pub fn new(sg_data: &SgData
) -> Self { LoadWitness { - rtx, - group_inputs, - group_outputs, + sg_data: sg_data.clone(), } } #[inline] fn witnesses(&self) -> BytesVec { - self.rtx.transaction.witnesses() + self.sg_data.rtx.transaction.witnesses() } fn fetch_witness(&self, source: Source, index: usize) -> Option { match source { Source::Group(SourceEntry::Input) => self - .group_inputs + .sg_data + .group_inputs() .get(index) .and_then(|actual_index| self.witnesses().get(*actual_index)), Source::Group(SourceEntry::Output) => self - .group_outputs + .sg_data + .group_outputs() .get(index) .and_then(|actual_index| self.witnesses().get(*actual_index)), Source::Transaction(SourceEntry::Input) => self.witnesses().get(index), @@ -58,7 +48,7 @@ impl LoadWitness { } } -impl Syscalls for LoadWitness { +impl Syscalls for LoadWitness
{ fn initialize(&mut self, _machine: &mut Mac) -> Result<(), VMError> { Ok(()) } diff --git a/script/src/syscalls/mod.rs b/script/src/syscalls/mod.rs index 24a4ddcef3..a0ed55c168 100644 --- a/script/src/syscalls/mod.rs +++ b/script/src/syscalls/mod.rs @@ -28,6 +28,8 @@ mod pause; #[cfg(test)] mod tests; +pub mod generator; + pub use self::close::Close; pub use self::current_cycles::CurrentCycles; pub use self::debugger::Debugger; diff --git a/script/src/syscalls/pipe.rs b/script/src/syscalls/pipe.rs index 3bb61ba22d..db7004b548 100644 --- a/script/src/syscalls/pipe.rs +++ b/script/src/syscalls/pipe.rs @@ -1,5 +1,6 @@ use crate::syscalls::{PIPE, SPAWN_YIELD_CYCLES_BASE}; -use crate::types::{Message, PipeArgs, VmId}; +use crate::types::{Message, PipeArgs, VmContext, VmId}; +use ckb_traits::{CellDataProvider, ExtensionProvider, HeaderProvider}; use ckb_vm::{ registers::{A0, A7}, Error as VMError, Register, SupportMachine, Syscalls, @@ -13,8 +14,14 @@ pub struct Pipe { } impl Pipe { - pub fn new(id: VmId, message_box: Arc>>) -> Self { - Self { id, message_box } + pub fn new
(vm_id: &VmId, vm_context: &VmContext
) -> Self + where + DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static, + { + Self { + id: *vm_id, + message_box: Arc::clone(&vm_context.message_box), + } } } diff --git a/script/src/syscalls/process_id.rs b/script/src/syscalls/process_id.rs index 55114ae18a..37b4df21e3 100644 --- a/script/src/syscalls/process_id.rs +++ b/script/src/syscalls/process_id.rs @@ -1,4 +1,5 @@ use crate::syscalls::PROCESS_ID; +use crate::types::VmId; use ckb_vm::{ registers::{A0, A7}, Error as VMError, Register, SupportMachine, Syscalls, @@ -10,8 +11,8 @@ pub struct ProcessID { } impl ProcessID { - pub fn new(id: u64) -> Self { - Self { id } + pub fn new(vm_id: &VmId) -> Self { + Self { id: *vm_id } } } diff --git a/script/src/syscalls/read.rs b/script/src/syscalls/read.rs index 63976ef096..7c96c7125b 100644 --- a/script/src/syscalls/read.rs +++ b/script/src/syscalls/read.rs @@ -1,5 +1,6 @@ use crate::syscalls::{INVALID_FD, READ, SPAWN_YIELD_CYCLES_BASE}; -use crate::types::{Fd, FdArgs, Message, VmId}; +use crate::types::{Fd, FdArgs, Message, VmContext, VmId}; +use ckb_traits::{CellDataProvider, ExtensionProvider, HeaderProvider}; use ckb_vm::{ registers::{A0, A1, A2, A7}, Error as VMError, Memory, Register, SupportMachine, Syscalls, @@ -13,8 +14,14 @@ pub struct Read { } impl Read { - pub fn new(id: VmId, message_box: Arc>>) -> Self { - Self { id, message_box } + pub fn new
(vm_id: &VmId, vm_context: &VmContext
) -> Self + where + DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static, + { + Self { + id: *vm_id, + message_box: Arc::clone(&vm_context.message_box), + } } } diff --git a/script/src/syscalls/spawn.rs b/script/src/syscalls/spawn.rs index f4c7e82b87..a90ed3082a 100644 --- a/script/src/syscalls/spawn.rs +++ b/script/src/syscalls/spawn.rs @@ -2,7 +2,7 @@ use crate::syscalls::{ Source, INDEX_OUT_OF_BOUND, SLICE_OUT_OF_BOUND, SOURCE_ENTRY_MASK, SOURCE_GROUP_FLAG, SPAWN, SPAWN_EXTRA_CYCLES_BASE, SPAWN_YIELD_CYCLES_BASE, }; -use crate::types::{DataLocation, DataPieceId, Fd, Message, SpawnArgs, TxData, VmId}; +use crate::types::{DataLocation, DataPieceId, Fd, Message, SgData, SpawnArgs, VmContext, VmId}; use ckb_traits::{CellDataProvider, ExtensionProvider, HeaderProvider}; use ckb_vm::{ machine::SupportMachine, @@ -20,22 +20,18 @@ where { id: VmId, message_box: Arc>>, - snapshot2_context: Arc>>>, + snapshot2_context: Arc>>>, } impl
Spawn
where DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static, { - pub fn new( - id: VmId, - message_box: Arc>>, - snapshot2_context: Arc>>>, - ) -> Self { + pub fn new(vm_id: &VmId, vm_context: &VmContext
) -> Self { Self { - id, - message_box, - snapshot2_context, + id: *vm_id, + message_box: Arc::clone(&vm_context.message_box), + snapshot2_context: Arc::clone(&vm_context.snapshot2_context), } } } diff --git a/script/src/syscalls/tests/utils.rs b/script/src/syscalls/tests/utils.rs index de9ca6f542..4f2da451e9 100644 --- a/script/src/syscalls/tests/utils.rs +++ b/script/src/syscalls/tests/utils.rs @@ -1,11 +1,22 @@ +use crate::{ + types::{ + DataPieceId, ScriptGroup, ScriptGroupType, ScriptVersion, SgData, SgInfo, TxData, TxInfo, + }, + verify_env::TxVerifyEnv, +}; +use ckb_chain_spec::consensus::ConsensusBuilder; use ckb_traits::{CellDataProvider, ExtensionProvider, HeaderProvider}; use ckb_types::{ bytes::Bytes, - core::{cell::CellMeta, Capacity, HeaderView}, - packed::{self, Byte32, CellOutput, OutPoint}, + core::{ + cell::{CellMeta, ResolvedTransaction}, + Capacity, HeaderBuilder, HeaderView, + }, + packed::{self, Byte32, CellOutput, OutPoint, Script}, prelude::*, }; -use std::collections::HashMap; +use std::collections::{BTreeMap, HashMap}; +use std::sync::Arc; #[derive(Default, Clone)] pub(crate) struct MockDataLoader { @@ -52,3 +63,79 @@ pub(crate) fn build_cell_meta(capacity_bytes: usize, data: Bytes) -> CellMeta { mem_cell_data_hash: Some(data_hash), } } + +fn build_tx_data_with_loader( + rtx: Arc, + data_loader: MockDataLoader, +) -> TxData { + let consensus = ConsensusBuilder::default().build(); + let tx_env = TxVerifyEnv::new_commit(&HeaderBuilder::default().build()); + + TxData { + rtx, + info: Arc::new(TxInfo { + data_loader, + consensus: Arc::new(consensus), + tx_env: Arc::new(tx_env), + binaries_by_data_hash: HashMap::default(), + binaries_by_type_hash: HashMap::default(), + lock_groups: BTreeMap::default(), + type_groups: BTreeMap::default(), + outputs: Vec::new(), + }), + } +} + +pub(crate) fn build_sg_data( + rtx: Arc, + input_indices: Vec, + output_indices: Vec, +) -> SgData { + build_sg_data_with_loader(rtx, new_mock_data_loader(), input_indices, output_indices) +} + +pub(crate) fn build_sg_data_with_loader( + rtx: Arc, + data_loader: MockDataLoader, + input_indices: Vec, + output_indices: Vec, +) -> SgData { + let tx_data = build_tx_data_with_loader(rtx, data_loader); + let script_group = ScriptGroup { + script: Script::default(), + group_type: ScriptGroupType::Lock, + input_indices, + output_indices, + }; + let script_hash = script_group.script.calc_script_hash(); + SgData { + rtx: tx_data.rtx, + tx_info: tx_data.info, + sg_info: Arc::new(SgInfo { + script_version: ScriptVersion::latest(), + script_group, + script_hash, + program_data_piece_id: DataPieceId::CellDep(0), + }), + } +} + +pub(crate) fn update_tx_info)>( + mut sg_data: SgData, + f: F, +) -> SgData { + let mut tx_info = sg_data.tx_info.as_ref().clone(); + f(&mut tx_info); + sg_data.tx_info = Arc::new(tx_info); + sg_data +} + +pub(crate) fn update_sg_info( + mut sg_data: SgData, + f: F, +) -> SgData { + let mut sg_info = sg_data.sg_info.as_ref().clone(); + f(&mut sg_info); + sg_data.sg_info = Arc::new(sg_info); + sg_data +} diff --git a/script/src/syscalls/tests/vm_latest/syscalls_1.rs b/script/src/syscalls/tests/vm_latest/syscalls_1.rs index f7ca26ecac..98f7843dcc 100644 --- a/script/src/syscalls/tests/vm_latest/syscalls_1.rs +++ b/script/src/syscalls/tests/vm_latest/syscalls_1.rs @@ -1,3 +1,4 @@ +use crate::types::VmContext; use byteorder::{ByteOrder, LittleEndian, WriteBytesExt}; use ckb_hash::blake2b_256; use ckb_types::{ @@ -14,7 +15,6 @@ use ckb_types::{ use ckb_vm::{ memory::{FLAG_DIRTY, FLAG_EXECUTABLE, FLAG_FREEZED, FLAG_WRITABLE}, registers::{A0, A1, A2, A3, A4, A5, A7}, - snapshot2::Snapshot2Context, CoreMachine, Error as VMError, Memory, Syscalls, RISCV_PAGESIZE, }; use proptest::{collection::size_range, prelude::*}; @@ -23,8 +23,6 @@ use std::sync::{Arc, Mutex}; use super::SCRIPT_VERSION; use crate::syscalls::{tests::utils::*, *}; -use crate::types::TxData; -use crate::types::{ScriptGroup, ScriptGroupType}; fn _test_load_cell_not_exist(data: &[u8]) -> Result<(), TestCaseError> { let mut machine = SCRIPT_VERSION.init_core_machine_without_limit(); @@ -47,10 +45,6 @@ fn _test_load_cell_not_exist(data: &[u8]) -> Result<(), TestCaseError> { let output = build_cell_meta(100, output_cell_data); let input_cell_data: Bytes = data.iter().rev().cloned().collect(); let input_cell = build_cell_meta(100, input_cell_data); - let outputs = Arc::new(vec![output]); - let group_inputs = Arc::new(vec![]); - let group_outputs = Arc::new(vec![]); - let data_loader = new_mock_data_loader(); let rtx = Arc::new(ResolvedTransaction { transaction: TransactionBuilder::default().build(), @@ -59,7 +53,10 @@ fn _test_load_cell_not_exist(data: &[u8]) -> Result<(), TestCaseError> { resolved_dep_groups: vec![], }); - let mut load_cell = LoadCell::new(data_loader, rtx, outputs, group_inputs, group_outputs); + let sg_data = build_sg_data(rtx, vec![], vec![]); + let sg_data = update_tx_info(sg_data, |tx_info| tx_info.outputs = vec![output.clone()]); + + let mut load_cell = LoadCell::new(&sg_data); prop_assert!(load_cell.ecall(&mut machine).is_ok()); prop_assert_eq!(machine.registers()[A0], u64::from(INDEX_OUT_OF_BOUND)); @@ -89,10 +86,6 @@ fn _test_load_cell_all(data: &[u8]) -> Result<(), TestCaseError> { let output = build_cell_meta(100, output_cell_data); let input_cell_data: Bytes = data.iter().rev().cloned().collect(); let input_cell = build_cell_meta(100, input_cell_data); - let outputs = Arc::new(vec![output.clone()]); - let group_inputs = Arc::new(vec![]); - let group_outputs = Arc::new(vec![]); - let data_loader = new_mock_data_loader(); let rtx = Arc::new(ResolvedTransaction { transaction: TransactionBuilder::default().build(), @@ -101,7 +94,10 @@ fn _test_load_cell_all(data: &[u8]) -> Result<(), TestCaseError> { resolved_dep_groups: vec![], }); - let mut load_cell = LoadCell::new(data_loader, rtx, outputs, group_inputs, group_outputs); + let sg_data = build_sg_data(rtx, vec![], vec![]); + let sg_data = update_tx_info(sg_data, |tx_info| tx_info.outputs = vec![output.clone()]); + + let mut load_cell = LoadCell::new(&sg_data); let input_correct_data = input_cell.cell_output.as_slice(); let output_correct_data = output.cell_output.as_slice(); @@ -179,10 +175,6 @@ fn _test_load_cell_from_group(data: &[u8], source: SourceEntry) -> Result<(), Te let output = build_cell_meta(100, output_cell_data); let input_cell_data: Bytes = data.iter().rev().cloned().collect(); let input_cell = build_cell_meta(100, input_cell_data); - let outputs = Arc::new(vec![output.clone()]); - let group_inputs = Arc::new(vec![0]); - let group_outputs = Arc::new(vec![0]); - let data_loader = new_mock_data_loader(); let rtx = Arc::new(ResolvedTransaction { transaction: TransactionBuilder::default().build(), @@ -191,7 +183,10 @@ fn _test_load_cell_from_group(data: &[u8], source: SourceEntry) -> Result<(), Te resolved_dep_groups: vec![], }); - let mut load_cell = LoadCell::new(data_loader, rtx, outputs, group_inputs, group_outputs); + let sg_data = build_sg_data(rtx, vec![0], vec![0]); + let sg_data = update_tx_info(sg_data, |tx_info| tx_info.outputs = vec![output.clone()]); + + let mut load_cell = LoadCell::new(&sg_data); let input_correct_data = input_cell.cell_output.as_slice(); let output_correct_data = output.cell_output.as_slice(); @@ -267,11 +262,6 @@ fn _test_load_cell_out_of_bound(index: u64, source: u64) -> Result<(), TestCaseE let input_cell = build_cell_meta(100, data); - let outputs = Arc::new(vec![output]); - let group_inputs = Arc::new(vec![0]); - let group_outputs = Arc::new(vec![0]); - let data_loader = new_mock_data_loader(); - let rtx = Arc::new(ResolvedTransaction { transaction: TransactionBuilder::default().build(), resolved_cell_deps: vec![], @@ -279,7 +269,11 @@ fn _test_load_cell_out_of_bound(index: u64, source: u64) -> Result<(), TestCaseE resolved_dep_groups: vec![], }); - let mut load_cell = LoadCell::new(data_loader, rtx, outputs, group_inputs, group_outputs); + let sg_data = build_sg_data(rtx, vec![0], vec![0]); + let sg_data = update_tx_info(sg_data, |tx_info| tx_info.outputs = vec![output.clone()]); + + let mut load_cell = LoadCell::new(&sg_data); + prop_assert!(load_cell.ecall(&mut machine).is_ok()); prop_assert_eq!(machine.registers()[A0], u64::from(INDEX_OUT_OF_BOUND)); Ok(()) @@ -325,11 +319,6 @@ fn _test_load_cell_length(data: &[u8]) -> Result<(), TestCaseError> { let input_cell_data: Bytes = data.iter().rev().cloned().collect(); let input_cell = build_cell_meta(100, input_cell_data); - let outputs = Arc::new(vec![output]); - let group_inputs = Arc::new(vec![]); - let group_outputs = Arc::new(vec![]); - let data_loader = new_mock_data_loader(); - let rtx = Arc::new(ResolvedTransaction { transaction: TransactionBuilder::default().build(), resolved_cell_deps: vec![], @@ -337,7 +326,10 @@ fn _test_load_cell_length(data: &[u8]) -> Result<(), TestCaseError> { resolved_dep_groups: vec![], }); - let mut load_cell = LoadCell::new(data_loader, rtx, outputs, group_inputs, group_outputs); + let sg_data = build_sg_data(rtx, vec![], vec![]); + let sg_data = update_tx_info(sg_data, |tx_info| tx_info.outputs = vec![output.clone()]); + + let mut load_cell = LoadCell::new(&sg_data); let input_correct_data = input_cell.cell_output.as_slice(); @@ -378,11 +370,6 @@ fn _test_load_cell_partial(data: &[u8], offset: u64) -> Result<(), TestCaseError let input_cell_data: Bytes = data.iter().rev().cloned().collect(); let input_cell = build_cell_meta(100, input_cell_data); - let outputs = Arc::new(vec![output]); - let group_inputs = Arc::new(vec![]); - let group_outputs = Arc::new(vec![]); - let data_loader = new_mock_data_loader(); - let rtx = Arc::new(ResolvedTransaction { transaction: TransactionBuilder::default().build(), resolved_cell_deps: vec![], @@ -390,7 +377,10 @@ fn _test_load_cell_partial(data: &[u8], offset: u64) -> Result<(), TestCaseError resolved_dep_groups: vec![], }); - let mut load_cell = LoadCell::new(data_loader, rtx, outputs, group_inputs, group_outputs); + let sg_data = build_sg_data(rtx, vec![], vec![]); + let sg_data = update_tx_info(sg_data, |tx_info| tx_info.outputs = vec![output.clone()]); + + let mut load_cell = LoadCell::new(&sg_data); let input_correct_data = input_cell.cell_output.as_slice(); @@ -444,11 +434,6 @@ fn _test_load_cell_capacity(capacity: Capacity) -> Result<(), TestCaseError> { mem_cell_data_hash: Some(data_hash), }; - let outputs = Arc::new(vec![]); - let group_inputs = Arc::new(vec![]); - let group_outputs = Arc::new(vec![]); - let data_loader = new_mock_data_loader(); - let rtx = Arc::new(ResolvedTransaction { transaction: TransactionBuilder::default().build(), resolved_cell_deps: vec![], @@ -456,7 +441,9 @@ fn _test_load_cell_capacity(capacity: Capacity) -> Result<(), TestCaseError> { resolved_dep_groups: vec![], }); - let mut load_cell = LoadCell::new(data_loader, rtx, outputs, group_inputs, group_outputs); + let sg_data = build_sg_data(rtx, vec![], vec![]); + + let mut load_cell = LoadCell::new(&sg_data); prop_assert!(machine.memory_mut().store64(&size_addr, &16).is_ok()); @@ -505,11 +492,6 @@ fn _test_load_cell_occupied_capacity(data: &[u8]) -> Result<(), TestCaseError> { mem_cell_data_hash: Some(data_hash), }; - let outputs = Arc::new(vec![]); - let group_inputs = Arc::new(vec![]); - let group_outputs = Arc::new(vec![]); - let data_loader = new_mock_data_loader(); - let rtx = Arc::new(ResolvedTransaction { transaction: TransactionBuilder::default().build(), resolved_cell_deps: vec![], @@ -517,7 +499,9 @@ fn _test_load_cell_occupied_capacity(data: &[u8]) -> Result<(), TestCaseError> { resolved_dep_groups: vec![], }); - let mut load_cell = LoadCell::new(data_loader, rtx, outputs, group_inputs, group_outputs); + let sg_data = build_sg_data(rtx, vec![], vec![]); + + let mut load_cell = LoadCell::new(&sg_data); prop_assert!(machine.memory_mut().store64(&size_addr, &16).is_ok()); @@ -566,10 +550,6 @@ fn test_load_missing_data_hash() { mem_cell_data: None, mem_cell_data_hash: None, }; - let outputs = Arc::new(vec![]); - let group_inputs = Arc::new(vec![]); - let group_outputs = Arc::new(vec![]); - let data_loader = new_mock_data_loader(); let rtx = Arc::new(ResolvedTransaction { transaction: TransactionBuilder::default().build(), @@ -578,7 +558,9 @@ fn test_load_missing_data_hash() { resolved_dep_groups: vec![], }); - let mut load_cell = LoadCell::new(data_loader, rtx, outputs, group_inputs, group_outputs); + let sg_data = build_sg_data(rtx, vec![], vec![]); + + let mut load_cell = LoadCell::new(&sg_data); assert!(machine.memory_mut().store64(&size_addr, &100).is_ok()); @@ -612,10 +594,6 @@ fn _test_load_missing_contract(field: CellField) { machine.set_register(A7, LOAD_CELL_BY_FIELD_SYSCALL_NUMBER); // syscall number let output_cell = build_cell_meta(100, Bytes::new()); - let outputs = Arc::new(vec![output_cell]); - let group_inputs = Arc::new(vec![]); - let group_outputs = Arc::new(vec![]); - let data_loader = new_mock_data_loader(); let rtx = Arc::new(ResolvedTransaction { transaction: TransactionBuilder::default().build(), @@ -624,7 +602,12 @@ fn _test_load_missing_contract(field: CellField) { resolved_dep_groups: vec![], }); - let mut load_cell = LoadCell::new(data_loader, rtx, outputs, group_inputs, group_outputs); + let sg_data = build_sg_data(rtx, vec![], vec![]); + let sg_data = update_tx_info(sg_data, |tx_info| { + tx_info.outputs = vec![output_cell.clone()] + }); + + let mut load_cell = LoadCell::new(&sg_data); assert!(machine.memory_mut().store64(&size_addr, &100).is_ok()); @@ -683,7 +666,6 @@ fn _test_load_header( headers, ..Default::default() }; - let group_inputs = Arc::new(vec![0]); let rtx = Arc::new(ResolvedTransaction { transaction: TransactionBuilder::default() @@ -694,7 +676,9 @@ fn _test_load_header( resolved_dep_groups: vec![], }); - let mut load_header = LoadHeader::new(data_loader, rtx, group_inputs); + let sg_data = build_sg_data_with_loader(rtx, data_loader, vec![0], vec![]); + + let mut load_header = LoadHeader::new(&sg_data); prop_assert!(machine .memory_mut() @@ -800,7 +784,6 @@ fn _test_load_header_by_field(data: &[u8], field: HeaderField) -> Result<(), Tes headers, ..Default::default() }; - let group_inputs = Arc::new(vec![]); let rtx = Arc::new(ResolvedTransaction { transaction: TransactionBuilder::default() .header_dep(header.hash()) @@ -810,7 +793,9 @@ fn _test_load_header_by_field(data: &[u8], field: HeaderField) -> Result<(), Tes resolved_dep_groups: vec![], }); - let mut load_header = LoadHeader::new(data_loader, rtx, group_inputs); + let sg_data = build_sg_data_with_loader(rtx, data_loader, vec![], vec![]); + + let mut load_header = LoadHeader::new(&sg_data); prop_assert!(machine .memory_mut() @@ -860,7 +845,9 @@ fn _test_load_tx_hash(data: &[u8]) -> Result<(), TestCaseError> { resolved_dep_groups: vec![], }); - let mut load_tx = LoadTx::new(rtx); + let sg_data = build_sg_data(rtx, vec![], vec![]); + + let mut load_tx = LoadTx::new(&sg_data); prop_assert!(machine .memory_mut() @@ -911,7 +898,9 @@ fn _test_load_tx(data: &[u8]) -> Result<(), TestCaseError> { resolved_dep_groups: vec![], }); - let mut load_tx = LoadTx::new(rtx); + let sg_data = build_sg_data(rtx, vec![], vec![]); + + let mut load_tx = LoadTx::new(&sg_data); prop_assert!(machine .memory_mut() @@ -955,7 +944,22 @@ fn _test_load_current_script_hash(data: &[u8]) -> Result<(), TestCaseError> { .build(); let hash = script.calc_script_hash(); let data = hash.raw_data(); - let mut load_script_hash = LoadScriptHash::new(hash); + + let rtx = Arc::new(ResolvedTransaction { + transaction: TransactionBuilder::default().build(), + resolved_cell_deps: vec![], + resolved_inputs: vec![], + resolved_dep_groups: vec![], + }); + + let sg_data = build_sg_data(rtx, vec![], vec![]); + // Swap the internal script in VmData + let sg_data = update_sg_info(sg_data, |sg_info| { + sg_info.script_hash = script.calc_script_hash(); + sg_info.script_group.script = script.clone(); + }); + + let mut load_script_hash = LoadScriptHash::new(&sg_data); prop_assert!(machine.memory_mut().store64(&size_addr, &64).is_ok()); @@ -1020,11 +1024,6 @@ fn _test_load_input_lock_script_hash(data: &[u8]) -> Result<(), TestCaseError> { .build(); input_cell.cell_output = output_with_lock; - let outputs = Arc::new(vec![]); - let group_inputs = Arc::new(vec![]); - let group_outputs = Arc::new(vec![]); - let data_loader = new_mock_data_loader(); - let rtx = Arc::new(ResolvedTransaction { transaction: TransactionBuilder::default().build(), resolved_cell_deps: vec![], @@ -1032,7 +1031,9 @@ fn _test_load_input_lock_script_hash(data: &[u8]) -> Result<(), TestCaseError> { resolved_dep_groups: vec![], }); - let mut load_cell = LoadCell::new(data_loader, rtx, outputs, group_inputs, group_outputs); + let sg_data = build_sg_data(rtx, vec![], vec![]); + + let mut load_cell = LoadCell::new(&sg_data); prop_assert!(machine.memory_mut().store64(&size_addr, &64).is_ok()); @@ -1085,11 +1086,6 @@ fn _test_load_input_lock_script(data: &[u8]) -> Result<(), TestCaseError> { .build(); input_cell.cell_output = output_with_lock; - let outputs = Arc::new(vec![]); - let group_inputs = Arc::new(vec![]); - let group_outputs = Arc::new(vec![]); - let data_loader = new_mock_data_loader(); - let rtx = Arc::new(ResolvedTransaction { transaction: TransactionBuilder::default().build(), resolved_cell_deps: vec![], @@ -1097,7 +1093,9 @@ fn _test_load_input_lock_script(data: &[u8]) -> Result<(), TestCaseError> { resolved_dep_groups: vec![], }); - let mut load_cell = LoadCell::new(data_loader, rtx, outputs, group_inputs, group_outputs); + let sg_data = build_sg_data(rtx, vec![], vec![]); + + let mut load_cell = LoadCell::new(&sg_data); prop_assert!(machine .memory_mut() @@ -1153,11 +1151,6 @@ fn _test_load_input_type_script(data: &[u8]) -> Result<(), TestCaseError> { .build(); input_cell.cell_output = output_with_type; - let outputs = Arc::new(vec![]); - let group_inputs = Arc::new(vec![]); - let group_outputs = Arc::new(vec![]); - let data_loader = new_mock_data_loader(); - let rtx = Arc::new(ResolvedTransaction { transaction: TransactionBuilder::default().build(), resolved_cell_deps: vec![], @@ -1165,7 +1158,9 @@ fn _test_load_input_type_script(data: &[u8]) -> Result<(), TestCaseError> { resolved_dep_groups: vec![], }); - let mut load_cell = LoadCell::new(data_loader, rtx, outputs, group_inputs, group_outputs); + let sg_data = build_sg_data(rtx, vec![], vec![]); + + let mut load_cell = LoadCell::new(&sg_data); prop_assert!(machine .memory_mut() @@ -1222,10 +1217,6 @@ fn _test_load_input_type_script_hash(data: &[u8]) -> Result<(), TestCaseError> { .type_(Some(script).pack()) .build(); input_cell.cell_output = output_with_type; - let outputs = Arc::new(vec![]); - let group_inputs = Arc::new(vec![]); - let group_outputs = Arc::new(vec![]); - let data_loader = new_mock_data_loader(); let rtx = Arc::new(ResolvedTransaction { transaction: TransactionBuilder::default().build(), @@ -1234,7 +1225,9 @@ fn _test_load_input_type_script_hash(data: &[u8]) -> Result<(), TestCaseError> { resolved_dep_groups: vec![], }); - let mut load_cell = LoadCell::new(data_loader, rtx, outputs, group_inputs, group_outputs); + let sg_data = build_sg_data(rtx, vec![], vec![]); + + let mut load_cell = LoadCell::new(&sg_data); prop_assert!(machine .memory_mut() @@ -1279,8 +1272,6 @@ fn _test_load_witness(data: &[u8], source: SourceEntry) -> Result<(), TestCaseEr let witness_correct_data = witness.raw_data(); let witnesses = vec![witness]; - let group_inputs = Arc::new(vec![]); - let group_outputs = Arc::new(vec![]); let rtx = Arc::new(ResolvedTransaction { transaction: TransactionBuilder::default() .witnesses(witnesses.pack()) @@ -1290,7 +1281,9 @@ fn _test_load_witness(data: &[u8], source: SourceEntry) -> Result<(), TestCaseEr resolved_dep_groups: vec![], }); - let mut load_witness = LoadWitness::new(rtx, group_inputs, group_outputs); + let sg_data = build_sg_data(rtx, vec![], vec![]); + + let mut load_witness = LoadWitness::new(&sg_data); prop_assert!(machine .memory_mut() @@ -1344,8 +1337,6 @@ fn _test_load_group_witness(data: &[u8], source: SourceEntry) -> Result<(), Test let dummy_witness = Bytes::default().pack(); let witnesses = vec![dummy_witness, witness]; - let group_inputs = Arc::new(vec![1]); - let group_outputs = Arc::new(vec![1]); let rtx = Arc::new(ResolvedTransaction { transaction: TransactionBuilder::default() .witnesses(witnesses.pack()) @@ -1354,7 +1345,10 @@ fn _test_load_group_witness(data: &[u8], source: SourceEntry) -> Result<(), Test resolved_inputs: vec![], resolved_dep_groups: vec![], }); - let mut load_witness = LoadWitness::new(rtx, group_inputs, group_outputs); + + let sg_data = build_sg_data(rtx, vec![1], vec![1]); + + let mut load_witness = LoadWitness::new(&sg_data); prop_assert!(machine .memory_mut() @@ -1401,7 +1395,21 @@ fn _test_load_script(data: &[u8]) -> Result<(), TestCaseError> { .build(); let script_correct_data = script.as_slice(); - let mut load_script = LoadScript::new(script.clone()); + let rtx = Arc::new(ResolvedTransaction { + transaction: TransactionBuilder::default().build(), + resolved_cell_deps: vec![], + resolved_inputs: vec![], + resolved_dep_groups: vec![], + }); + + let sg_data = build_sg_data(rtx, vec![], vec![]); + // Swap the internal script in VmData + let sg_data = update_sg_info(sg_data, |sg_info| { + sg_info.script_hash = script.calc_script_hash(); + sg_info.script_group.script = script.clone(); + }); + + let mut load_script = LoadScript::new(&sg_data); prop_assert!(machine .memory_mut() @@ -1455,7 +1463,6 @@ fn _test_load_cell_data_as_code( let dep_cell = build_cell_meta(10000, data.clone()); let input_cell = build_cell_meta(100, data.clone()); - let data_loader = new_mock_data_loader(); let rtx = Arc::new(ResolvedTransaction { transaction: TransactionBuilder::default() .output_data(data.pack()) @@ -1464,17 +1471,12 @@ fn _test_load_cell_data_as_code( resolved_inputs: vec![input_cell], resolved_dep_groups: vec![], }); - let mut load_code = LoadCellData::new(Arc::new(Mutex::new(Snapshot2Context::new(TxData { - rtx, - data_loader, - program: Bytes::new(), - script_group: Arc::new(ScriptGroup { - script: Default::default(), - group_type: ScriptGroupType::Lock, - input_indices: vec![0], - output_indices: vec![0], - }), - })))); + + let sg_data = build_sg_data(rtx, vec![0], vec![0]); + + let vm_context = VmContext::new(&sg_data, &Arc::new(Mutex::new(Vec::new()))); + + let mut load_code = LoadCellData::new(&vm_context); prop_assert!(machine.memory_mut().store_byte(addr, addr_size, 1).is_ok()); @@ -1527,7 +1529,6 @@ fn _test_load_cell_data( let data = Bytes::from(data.to_owned()); let dep_cell = build_cell_meta(10000, data.clone()); let input_cell = build_cell_meta(100, data.clone()); - let data_loader = new_mock_data_loader(); let rtx = Arc::new(ResolvedTransaction { transaction: TransactionBuilder::default() @@ -1537,17 +1538,12 @@ fn _test_load_cell_data( resolved_inputs: vec![input_cell], resolved_dep_groups: vec![], }); - let mut load_code = LoadCellData::new(Arc::new(Mutex::new(Snapshot2Context::new(TxData { - rtx, - data_loader, - program: Bytes::new(), - script_group: Arc::new(ScriptGroup { - script: Default::default(), - group_type: ScriptGroupType::Lock, - input_indices: vec![0], - output_indices: vec![0], - }), - })))); + + let sg_data = build_sg_data(rtx, vec![0], vec![0]); + + let vm_context = VmContext::new(&sg_data, &Arc::new(Mutex::new(Vec::new()))); + + let mut load_code = LoadCellData::new(&vm_context); prop_assert!(load_code.ecall(&mut machine).is_ok()); @@ -1638,8 +1634,6 @@ fn test_load_overflowed_cell_data_as_code() { let dep_cell_data = Bytes::from(data); let dep_cell = build_cell_meta(10000, dep_cell_data); - let data_loader = new_mock_data_loader(); - let rtx = Arc::new(ResolvedTransaction { transaction: TransactionBuilder::default().build(), resolved_cell_deps: vec![dep_cell], @@ -1647,17 +1641,11 @@ fn test_load_overflowed_cell_data_as_code() { resolved_dep_groups: vec![], }); - let mut load_code = LoadCellData::new(Arc::new(Mutex::new(Snapshot2Context::new(TxData { - rtx, - data_loader, - program: Bytes::new(), - script_group: Arc::new(ScriptGroup { - script: Default::default(), - group_type: ScriptGroupType::Lock, - input_indices: Default::default(), - output_indices: Default::default(), - }), - })))); + let sg_data = build_sg_data(rtx, vec![], vec![]); + + let vm_context = VmContext::new(&sg_data, &Arc::new(Mutex::new(Vec::new()))); + + let mut load_code = LoadCellData::new(&vm_context); assert!(machine.memory_mut().store_byte(addr, addr_size, 1).is_ok()); @@ -1690,8 +1678,6 @@ fn _test_load_cell_data_on_freezed_memory(data: &[u8]) -> Result<(), TestCaseErr let dep_cell_data = Bytes::from(data.to_owned()); let dep_cell = build_cell_meta(10000, dep_cell_data); - let data_loader = new_mock_data_loader(); - let rtx = Arc::new(ResolvedTransaction { transaction: TransactionBuilder::default().build(), resolved_cell_deps: vec![dep_cell], @@ -1699,17 +1685,11 @@ fn _test_load_cell_data_on_freezed_memory(data: &[u8]) -> Result<(), TestCaseErr resolved_dep_groups: vec![], }); - let mut load_code = LoadCellData::new(Arc::new(Mutex::new(Snapshot2Context::new(TxData { - rtx, - data_loader, - program: Bytes::new(), - script_group: Arc::new(ScriptGroup { - script: Default::default(), - group_type: ScriptGroupType::Lock, - input_indices: Default::default(), - output_indices: Default::default(), - }), - })))); + let sg_data = build_sg_data(rtx, vec![], vec![]); + + let vm_context = VmContext::new(&sg_data, &Arc::new(Mutex::new(Vec::new()))); + + let mut load_code = LoadCellData::new(&vm_context); prop_assert!(load_code.ecall(&mut machine).is_err()); @@ -1740,8 +1720,6 @@ fn _test_load_cell_data_as_code_on_freezed_memory(data: &[u8]) -> Result<(), Tes let dep_cell_data = Bytes::from(data.to_owned()); let dep_cell = build_cell_meta(10000, dep_cell_data); - let data_loader = new_mock_data_loader(); - let rtx = Arc::new(ResolvedTransaction { transaction: TransactionBuilder::default().build(), resolved_cell_deps: vec![dep_cell], @@ -1749,17 +1727,11 @@ fn _test_load_cell_data_as_code_on_freezed_memory(data: &[u8]) -> Result<(), Tes resolved_dep_groups: vec![], }); - let mut load_code = LoadCellData::new(Arc::new(Mutex::new(Snapshot2Context::new(TxData { - rtx, - data_loader, - program: Bytes::new(), - script_group: Arc::new(ScriptGroup { - script: Default::default(), - group_type: ScriptGroupType::Lock, - input_indices: Default::default(), - output_indices: Default::default(), - }), - })))); + let sg_data = build_sg_data(rtx, vec![], vec![]); + + let vm_context = VmContext::new(&sg_data, &Arc::new(Mutex::new(Vec::new()))); + + let mut load_code = LoadCellData::new(&vm_context); prop_assert!(load_code.ecall(&mut machine).is_err()); @@ -1801,8 +1773,6 @@ fn test_load_code_unaligned_error() { let dep_cell_data = Bytes::from(data.to_vec()); let dep_cell = build_cell_meta(10000, dep_cell_data); - let data_loader = new_mock_data_loader(); - let rtx = Arc::new(ResolvedTransaction { transaction: TransactionBuilder::default().build(), resolved_cell_deps: vec![dep_cell], @@ -1810,17 +1780,11 @@ fn test_load_code_unaligned_error() { resolved_dep_groups: vec![], }); - let mut load_code = LoadCellData::new(Arc::new(Mutex::new(Snapshot2Context::new(TxData { - rtx, - data_loader, - program: Bytes::new(), - script_group: Arc::new(ScriptGroup { - script: Default::default(), - group_type: ScriptGroupType::Lock, - input_indices: Default::default(), - output_indices: Default::default(), - }), - })))); + let sg_data = build_sg_data(rtx, vec![], vec![]); + + let vm_context = VmContext::new(&sg_data, &Arc::new(Mutex::new(Vec::new()))); + + let mut load_code = LoadCellData::new(&vm_context); assert!(machine.memory_mut().store_byte(addr, addr_size, 1).is_ok()); @@ -1849,8 +1813,6 @@ fn test_load_code_slice_out_of_bound_error() { let dep_cell_data = Bytes::from(data.to_vec()); let dep_cell = build_cell_meta(10000, dep_cell_data); - let data_loader = new_mock_data_loader(); - let rtx = Arc::new(ResolvedTransaction { transaction: TransactionBuilder::default().build(), resolved_cell_deps: vec![dep_cell], @@ -1858,17 +1820,11 @@ fn test_load_code_slice_out_of_bound_error() { resolved_dep_groups: vec![], }); - let mut load_code = LoadCellData::new(Arc::new(Mutex::new(Snapshot2Context::new(TxData { - rtx, - data_loader, - program: Bytes::new(), - script_group: Arc::new(ScriptGroup { - script: Default::default(), - group_type: ScriptGroupType::Lock, - input_indices: Default::default(), - output_indices: Default::default(), - }), - })))); + let sg_data = build_sg_data(rtx, vec![], vec![]); + + let vm_context = VmContext::new(&sg_data, &Arc::new(Mutex::new(Vec::new()))); + + let mut load_code = LoadCellData::new(&vm_context); assert!(machine.memory_mut().store_byte(addr, addr_size, 1).is_ok()); @@ -1900,8 +1856,6 @@ fn test_load_code_not_enough_space_error() { let dep_cell_data = Bytes::from(data); let dep_cell = build_cell_meta(10000, dep_cell_data); - let data_loader = new_mock_data_loader(); - let rtx = Arc::new(ResolvedTransaction { transaction: TransactionBuilder::default().build(), resolved_cell_deps: vec![dep_cell], @@ -1909,17 +1863,11 @@ fn test_load_code_not_enough_space_error() { resolved_dep_groups: vec![], }); - let mut load_code = LoadCellData::new(Arc::new(Mutex::new(Snapshot2Context::new(TxData { - rtx, - data_loader, - program: Bytes::new(), - script_group: Arc::new(ScriptGroup { - script: Default::default(), - group_type: ScriptGroupType::Lock, - input_indices: Default::default(), - output_indices: Default::default(), - }), - })))); + let sg_data = build_sg_data(rtx, vec![], vec![]); + + let vm_context = VmContext::new(&sg_data, &Arc::new(Mutex::new(Vec::new()))); + + let mut load_code = LoadCellData::new(&vm_context); assert!(machine.memory_mut().store_byte(addr, addr_size, 1).is_ok()); @@ -1980,8 +1928,10 @@ fn _test_load_input( resolved_inputs: vec![], resolved_dep_groups: vec![], }); - let group_inputs = Arc::new(vec![0]); - let mut load_input = LoadInput::new(rtx, group_inputs); + + let sg_data = build_sg_data(rtx, vec![0], vec![]); + + let mut load_input = LoadInput::new(&sg_data); let mut buffer = vec![]; let expect = if let Some(field) = field { @@ -2111,25 +2061,19 @@ fn test_load_cell_data_size_zero() { let dep_cell = build_cell_meta(10000, data.clone()); let input_cell = build_cell_meta(100, data); - let data_loader = new_mock_data_loader(); - let rtx = Arc::new(ResolvedTransaction { transaction: TransactionBuilder::default().build(), resolved_cell_deps: vec![dep_cell], resolved_inputs: vec![input_cell], resolved_dep_groups: vec![], }); - let mut load_code = LoadCellData::new(Arc::new(Mutex::new(Snapshot2Context::new(TxData { - rtx, - data_loader, - program: Bytes::new(), - script_group: Arc::new(ScriptGroup { - script: Default::default(), - group_type: ScriptGroupType::Lock, - input_indices: vec![0], - output_indices: vec![0], - }), - })))); + + let sg_data = build_sg_data(rtx, vec![0], vec![0]); + + let vm_context = VmContext::new(&sg_data, &Arc::new(Mutex::new(Vec::new()))); + + let mut load_code = LoadCellData::new(&vm_context); + load_code.ecall(&mut machine).unwrap(); assert_eq!(machine.registers()[A0], u64::from(SUCCESS)); assert_eq!(machine.memory_mut().load64(&size_addr).unwrap(), 256); @@ -2156,25 +2100,19 @@ fn test_load_cell_data_size_zero_index_out_of_bound() { let dep_cell = build_cell_meta(10000, data.clone()); let input_cell = build_cell_meta(100, data); - let data_loader = new_mock_data_loader(); - let rtx = Arc::new(ResolvedTransaction { transaction: TransactionBuilder::default().build(), resolved_cell_deps: vec![dep_cell], resolved_inputs: vec![input_cell], resolved_dep_groups: vec![], }); - let mut load_code = LoadCellData::new(Arc::new(Mutex::new(Snapshot2Context::new(TxData { - rtx, - data_loader, - program: Bytes::new(), - script_group: Arc::new(ScriptGroup { - script: Default::default(), - group_type: ScriptGroupType::Lock, - input_indices: vec![0], - output_indices: vec![0], - }), - })))); + + let sg_data = build_sg_data(rtx, vec![0], vec![0]); + + let vm_context = VmContext::new(&sg_data, &Arc::new(Mutex::new(Vec::new()))); + + let mut load_code = LoadCellData::new(&vm_context); + load_code.ecall(&mut machine).unwrap(); assert_eq!(machine.registers()[A0], u64::from(INDEX_OUT_OF_BOUND)); } diff --git a/script/src/syscalls/tests/vm_latest/syscalls_2.rs b/script/src/syscalls/tests/vm_latest/syscalls_2.rs index 4cac872f56..0f40dc1e51 100644 --- a/script/src/syscalls/tests/vm_latest/syscalls_2.rs +++ b/script/src/syscalls/tests/vm_latest/syscalls_2.rs @@ -1,4 +1,4 @@ -use crate::syscalls::tests::utils::MockDataLoader; +use crate::{syscalls::tests::utils::*, types::VmContext}; use ckb_types::{ bytes::Bytes, core::{ @@ -53,7 +53,18 @@ fn test_current_cycles() { machine.set_cycles(cycles); - let result = CurrentCycles::new(Arc::new(Mutex::new(0))).ecall(&mut machine); + let rtx = Arc::new(ResolvedTransaction { + transaction: TransactionBuilder::default().build(), + resolved_cell_deps: vec![], + resolved_inputs: vec![], + resolved_dep_groups: vec![], + }); + + let sg_data = build_sg_data(rtx, vec![], vec![]); + + let vm_context = VmContext::new(&sg_data, &Arc::new(Mutex::new(Vec::new()))); + + let result = CurrentCycles::new(&vm_context).ecall(&mut machine); assert!(result.unwrap()); assert_eq!(machine.registers()[A0], cycles); @@ -99,7 +110,6 @@ fn _test_load_extension( extensions, ..Default::default() }; - let group_inputs = Arc::new(vec![0]); let rtx = Arc::new(ResolvedTransaction { transaction: TransactionBuilder::default() @@ -110,8 +120,9 @@ fn _test_load_extension( resolved_dep_groups: vec![], }); - let mut load_block_extension: LoadBlockExtension = - LoadBlockExtension::new(data_loader, rtx, group_inputs); + let sg_data = build_sg_data_with_loader(rtx, data_loader, vec![0], vec![]); + + let mut load_block_extension = LoadBlockExtension::new(&sg_data); prop_assert!(machine .memory_mut() diff --git a/script/src/syscalls/wait.rs b/script/src/syscalls/wait.rs index c9a49566aa..11ae3693e3 100644 --- a/script/src/syscalls/wait.rs +++ b/script/src/syscalls/wait.rs @@ -1,5 +1,6 @@ use crate::syscalls::{SPAWN_YIELD_CYCLES_BASE, WAIT}; -use crate::types::{Message, VmId, WaitArgs}; +use crate::types::{Message, VmContext, VmId, WaitArgs}; +use ckb_traits::{CellDataProvider, ExtensionProvider, HeaderProvider}; use ckb_vm::{ registers::{A0, A1, A7}, Error as VMError, Register, SupportMachine, Syscalls, @@ -13,8 +14,14 @@ pub struct Wait { } impl Wait { - pub fn new(id: VmId, message_box: Arc>>) -> Self { - Self { id, message_box } + pub fn new
(vm_id: &VmId, vm_context: &VmContext
) -> Self + where + DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static, + { + Self { + id: *vm_id, + message_box: Arc::clone(&vm_context.message_box), + } } } diff --git a/script/src/syscalls/write.rs b/script/src/syscalls/write.rs index a193f5589b..2784abbeb4 100644 --- a/script/src/syscalls/write.rs +++ b/script/src/syscalls/write.rs @@ -1,5 +1,6 @@ use crate::syscalls::{INVALID_FD, SPAWN_YIELD_CYCLES_BASE, WRITE}; -use crate::types::{Fd, FdArgs, Message, VmId}; +use crate::types::{Fd, FdArgs, Message, VmContext, VmId}; +use ckb_traits::{CellDataProvider, ExtensionProvider, HeaderProvider}; use ckb_vm::{ registers::{A0, A1, A2, A7}, Error as VMError, Memory, Register, SupportMachine, Syscalls, @@ -13,8 +14,14 @@ pub struct Write { } impl Write { - pub fn new(id: VmId, message_box: Arc>>) -> Self { - Self { id, message_box } + pub fn new
(vm_id: &VmId, vm_context: &VmContext
) -> Self + where + DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static, + { + Self { + id: *vm_id, + message_box: Arc::clone(&vm_context.message_box), + } } } diff --git a/script/src/types.rs b/script/src/types.rs index 8f25a98799..a7c9cdfbac 100644 --- a/script/src/types.rs +++ b/script/src/types.rs @@ -1,14 +1,24 @@ +use crate::{error::ScriptError, verify_env::TxVerifyEnv}; +use ckb_chain_spec::consensus::Consensus; use ckb_types::{ - core::{Cycle, ScriptHashType}, - packed::{Byte32, Script}, + core::{ + cell::{CellMeta, ResolvedTransaction}, + Cycle, ScriptHashType, + }, + packed::{Byte32, CellOutput, OutPoint, Script}, + prelude::*, }; use ckb_vm::{ machine::{VERSION0, VERSION1, VERSION2}, ISA_B, ISA_IMC, ISA_MOP, }; use serde::{Deserialize, Serialize}; +use std::collections::{BTreeMap, HashMap}; use std::fmt; -use std::sync::{Arc, Mutex}; +use std::sync::{ + atomic::{AtomicU64, Ordering}, + Arc, Mutex, RwLock, +}; #[cfg(has_asm)] use ckb_vm::machine::asm::{AsmCoreMachine, AsmMachine}; @@ -16,10 +26,9 @@ use ckb_vm::machine::asm::{AsmCoreMachine, AsmMachine}; #[cfg(not(has_asm))] use ckb_vm::{DefaultCoreMachine, TraceMachine, WXorXMemory}; -use ckb_traits::{CellDataProvider, ExtensionProvider, HeaderProvider}; +use ckb_traits::CellDataProvider; use ckb_vm::snapshot2::Snapshot2Context; -use ckb_types::core::cell::ResolvedTransaction; use ckb_vm::{ bytes::Bytes, machine::Pause, @@ -54,10 +63,14 @@ pub(crate) type Machine = AsmMachine; #[cfg(not(has_asm))] pub(crate) type Machine = TraceMachine; -pub(crate) type Indices = Arc>; - pub(crate) type DebugPrinter = Arc; +pub struct DebugContext { + pub debug_printer: DebugPrinter, + #[cfg(test)] + pub skip_pause: Arc, +} + /// The version of CKB Script Verifier. #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] pub enum ScriptVersion { @@ -126,7 +139,7 @@ impl ScriptVersion { /// A script group will only be executed once per transaction, the /// script itself should check against all inputs/outputs in its group /// if needed. -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct ScriptGroup { /// The script. /// @@ -140,9 +153,19 @@ pub struct ScriptGroup { pub output_indices: Vec, } +/// The methods included here are defected in a way: all construction +/// methods here create ScriptGroup without any `input_indices` or +/// `output_indices` filled. One has to manually fill them later(or forgot +/// about this). +/// As a result, we are marking them as crate-only methods for now. This +/// forces users to one of the following 2 solutions: +/// * Call `groups()` on `TxData` so they can fetch `ScriptGroup` data with +/// all correct data filled. +/// * Manually construct the struct where they have to think what shall be +/// used for `input_indices` and `output_indices`. impl ScriptGroup { /// Creates a new script group struct. - pub fn new(script: &Script, group_type: ScriptGroupType) -> Self { + pub(crate) fn new(script: &Script, group_type: ScriptGroupType) -> Self { Self { group_type, script: script.to_owned(), @@ -152,12 +175,12 @@ impl ScriptGroup { } /// Creates a lock script group. - pub fn from_lock_script(script: &Script) -> Self { + pub(crate) fn from_lock_script(script: &Script) -> Self { Self::new(script, ScriptGroupType::Lock) } /// Creates a type script group. - pub fn from_type_script(script: &Script) -> Self { + pub(crate) fn from_type_script(script: &Script) -> Self { Self::new(script, ScriptGroupType::Type) } } @@ -186,6 +209,7 @@ impl fmt::Display for ScriptGroupType { /// Struct specifies which script has verified so far. /// State is lifetime free, but capture snapshot need heavy memory copy +#[derive(Clone)] pub struct TransactionState { /// current suspended script index pub current: usize, @@ -257,34 +281,6 @@ pub enum ChunkCommand { Stop, } -#[derive(Clone)] -pub struct MachineContext< - DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static, -> { - pub(crate) base_cycles: Arc>, - pub(crate) snapshot2_context: Arc>>>, -} - -impl
MachineContext
-where - DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static, -{ - pub fn new(tx_data: TxData
) -> Self { - Self { - base_cycles: Arc::new(Mutex::new(0)), - snapshot2_context: Arc::new(Mutex::new(Snapshot2Context::new(tx_data))), - } - } - - pub fn snapshot2_context(&self) -> &Arc>>> { - &self.snapshot2_context - } - - pub fn set_base_cycles(&mut self, base_cycles: u64) { - *self.base_cycles.lock().expect("lock") = base_cycles; - } -} - pub type VmId = u64; pub const FIRST_VM_ID: VmId = 0; @@ -408,8 +404,6 @@ pub enum Message { /// A pointer to the data that is part of the transaction. #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum DataPieceId { - /// Target program. Usually located in cell data. - Program, /// The nth input cell data. Input(u32), /// The nth output data. @@ -495,38 +489,466 @@ impl FullSuspendedState { } } -/// Context data for current running transaction & script -#[derive(Clone)] +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum DataGuard { + NotLoaded(OutPoint), + Loaded(Bytes), +} + +/// LazyData wrapper make sure not-loaded data will be loaded only after one access +#[derive(Debug, Clone)] +pub struct LazyData(Arc>); + +impl LazyData { + fn from_cell_meta(cell_meta: &CellMeta) -> LazyData { + match &cell_meta.mem_cell_data { + Some(data) => LazyData(Arc::new(RwLock::new(DataGuard::Loaded(data.to_owned())))), + None => LazyData(Arc::new(RwLock::new(DataGuard::NotLoaded( + cell_meta.out_point.clone(), + )))), + } + } + + fn access(&self, data_loader: &DL) -> Result { + let guard = self + .0 + .read() + .map_err(|_| ScriptError::Other("RwLock poisoned".into()))? + .to_owned(); + match guard { + DataGuard::NotLoaded(out_point) => { + let data = data_loader + .get_cell_data(&out_point) + .ok_or(ScriptError::Other("cell data not found".into()))?; + let mut write_guard = self + .0 + .write() + .map_err(|_| ScriptError::Other("RwLock poisoned".into()))?; + *write_guard = DataGuard::Loaded(data.clone()); + Ok(data) + } + DataGuard::Loaded(bytes) => Ok(bytes), + } + } +} + +#[derive(Debug, Clone)] +pub enum Binaries { + Unique(Byte32, usize, LazyData), + Duplicate(Byte32, usize, LazyData), + Multiple, +} + +impl Binaries { + fn new(data_hash: Byte32, dep_index: usize, data: LazyData) -> Self { + Self::Unique(data_hash, dep_index, data) + } + + fn merge(&mut self, data_hash: &Byte32) { + match self { + Self::Unique(ref hash, dep_index, data) + | Self::Duplicate(ref hash, dep_index, data) => { + if hash != data_hash { + *self = Self::Multiple; + } else { + *self = Self::Duplicate(hash.to_owned(), *dep_index, data.to_owned()); + } + } + Self::Multiple => {} + } + } +} + +/// Immutable context data at transaction level +#[derive(Clone, Debug)] pub struct TxData
{ /// ResolvedTransaction. pub rtx: Arc, + + /// Passed & derived information. + pub info: Arc>, +} + +/// Information that is either passed as the context of the transaction, +/// or can be derived from the transaction. +#[derive(Clone, Debug)] +pub struct TxInfo
{ /// Data loader. pub data_loader: DL, - /// Ideally one might not want to keep program here, since program is totally - /// deducible from rtx + data_loader, however, for a demo here, program - /// does help us save some extra coding. - pub program: Bytes, - /// The script group to which the current program belongs. - pub script_group: Arc, + /// Chain consensus parameters + pub consensus: Arc, + /// Transaction verification environment + pub tx_env: Arc, + + /// Potential binaries in current transaction indexed by data hash + pub binaries_by_data_hash: HashMap, + /// Potential binaries in current transaction indexed by type script hash + pub binaries_by_type_hash: HashMap, + /// Lock script groups, orders here are important + pub lock_groups: BTreeMap, + /// Type script groups, orders here are important + pub type_groups: BTreeMap, + /// Output cells in current transaction reorganized in CellMeta format + pub outputs: Vec, +} + +impl
TxData
+where + DL: CellDataProvider, +{ + /// Creates a new TxData structure + pub fn new( + rtx: Arc, + data_loader: DL, + consensus: Arc, + tx_env: Arc, + ) -> Self { + let tx_hash = rtx.transaction.hash(); + let resolved_cell_deps = &rtx.resolved_cell_deps; + let resolved_inputs = &rtx.resolved_inputs; + let outputs = rtx + .transaction + .outputs_with_data_iter() + .enumerate() + .map(|(index, (cell_output, data))| { + let out_point = OutPoint::new_builder() + .tx_hash(tx_hash.clone()) + .index(index.pack()) + .build(); + let data_hash = CellOutput::calc_data_hash(&data); + CellMeta { + cell_output, + out_point, + transaction_info: None, + data_bytes: data.len() as u64, + mem_cell_data: Some(data), + mem_cell_data_hash: Some(data_hash), + } + }) + .collect(); + + let mut binaries_by_data_hash: HashMap = HashMap::default(); + let mut binaries_by_type_hash: HashMap = HashMap::default(); + for (i, cell_meta) in resolved_cell_deps.iter().enumerate() { + let data_hash = data_loader + .load_cell_data_hash(cell_meta) + .expect("cell data hash"); + let lazy = LazyData::from_cell_meta(cell_meta); + binaries_by_data_hash.insert(data_hash.to_owned(), (i, lazy.to_owned())); + + if let Some(t) = &cell_meta.cell_output.type_().to_opt() { + binaries_by_type_hash + .entry(t.calc_script_hash()) + .and_modify(|bin| bin.merge(&data_hash)) + .or_insert_with(|| Binaries::new(data_hash.to_owned(), i, lazy.to_owned())); + } + } + + let mut lock_groups = BTreeMap::default(); + let mut type_groups = BTreeMap::default(); + for (i, cell_meta) in resolved_inputs.iter().enumerate() { + // here we are only pre-processing the data, verify method validates + // each input has correct script setup. + let output = &cell_meta.cell_output; + let lock_group_entry = lock_groups + .entry(output.calc_lock_hash()) + .or_insert_with(|| ScriptGroup::from_lock_script(&output.lock())); + lock_group_entry.input_indices.push(i); + if let Some(t) = &output.type_().to_opt() { + let type_group_entry = type_groups + .entry(t.calc_script_hash()) + .or_insert_with(|| ScriptGroup::from_type_script(t)); + type_group_entry.input_indices.push(i); + } + } + for (i, output) in rtx.transaction.outputs().into_iter().enumerate() { + if let Some(t) = &output.type_().to_opt() { + let type_group_entry = type_groups + .entry(t.calc_script_hash()) + .or_insert_with(|| ScriptGroup::from_type_script(t)); + type_group_entry.output_indices.push(i); + } + } + + Self { + rtx, + info: Arc::new(TxInfo { + data_loader, + consensus, + tx_env, + binaries_by_data_hash, + binaries_by_type_hash, + lock_groups, + type_groups, + outputs, + }), + } + } + + #[inline] + /// Extracts actual script binary either in dep cells. + pub fn extract_script(&self, script: &Script) -> Result { + self.info.extract_script(script) + } } -impl
DataSource for TxData
+impl
TxInfo
where - DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static, + DL: CellDataProvider, +{ + #[inline] + /// Extracts actual script binary either in dep cells. + pub fn extract_script(&self, script: &Script) -> Result { + let (lazy, _) = self.extract_script_and_dep_index(script)?; + lazy.access(&self.data_loader) + } +} + +impl
TxData
{ + #[inline] + /// Calculates transaction hash + pub fn tx_hash(&self) -> Byte32 { + self.rtx.transaction.hash() + } + + #[inline] + /// Extracts the index of the script binary in dep cells + pub fn extract_referenced_dep_index(&self, script: &Script) -> Result { + self.info.extract_referenced_dep_index(script) + } + + #[inline] + /// Finds the script group from cell deps. + pub fn find_script_group( + &self, + script_group_type: ScriptGroupType, + script_hash: &Byte32, + ) -> Option<&ScriptGroup> { + self.info.find_script_group(script_group_type, script_hash) + } + + #[inline] + /// Returns the version of the machine based on the script and the consensus rules. + pub fn select_version(&self, script: &Script) -> Result { + self.info.select_version(script) + } + + #[inline] + /// Returns all script groups. + pub fn groups(&self) -> impl Iterator { + self.info.groups() + } + + #[inline] + /// Returns all script groups with type. + pub fn groups_with_type( + &self, + ) -> impl Iterator { + self.info.groups_with_type() + } +} + +impl
TxInfo
{ + #[inline] + /// Extracts the index of the script binary in dep cells + pub fn extract_referenced_dep_index(&self, script: &Script) -> Result { + let (_, dep_index) = self.extract_script_and_dep_index(script)?; + Ok(*dep_index) + } + + fn extract_script_and_dep_index( + &self, + script: &Script, + ) -> Result<(&LazyData, &usize), ScriptError> { + let script_hash_type = ScriptHashType::try_from(script.hash_type()) + .map_err(|err| ScriptError::InvalidScriptHashType(err.to_string()))?; + match script_hash_type { + ScriptHashType::Data | ScriptHashType::Data1 | ScriptHashType::Data2 => { + if let Some((dep_index, lazy)) = self.binaries_by_data_hash.get(&script.code_hash()) + { + Ok((lazy, dep_index)) + } else { + Err(ScriptError::ScriptNotFound(script.code_hash())) + } + } + ScriptHashType::Type => { + if let Some(ref bin) = self.binaries_by_type_hash.get(&script.code_hash()) { + match bin { + Binaries::Unique(_, dep_index, ref lazy) => Ok((lazy, dep_index)), + Binaries::Duplicate(_, dep_index, ref lazy) => Ok((lazy, dep_index)), + Binaries::Multiple => Err(ScriptError::MultipleMatches), + } + } else { + Err(ScriptError::ScriptNotFound(script.code_hash())) + } + } + } + } + + /// Finds the script group from cell deps. + pub fn find_script_group( + &self, + script_group_type: ScriptGroupType, + script_hash: &Byte32, + ) -> Option<&ScriptGroup> { + match script_group_type { + ScriptGroupType::Lock => self.lock_groups.get(script_hash), + ScriptGroupType::Type => self.type_groups.get(script_hash), + } + } + + fn is_vm_version_1_and_syscalls_2_enabled(&self) -> bool { + // If the proposal window is allowed to prejudge on the vm version, + // it will cause proposal tx to start a new vm in the blocks before hardfork, + // destroying the assumption that the transaction execution only uses the old vm + // before hardfork, leading to unexpected network splits. + let epoch_number = self.tx_env.epoch_number_without_proposal_window(); + let hardfork_switch = self.consensus.hardfork_switch(); + hardfork_switch + .ckb2021 + .is_vm_version_1_and_syscalls_2_enabled(epoch_number) + } + + fn is_vm_version_2_and_syscalls_3_enabled(&self) -> bool { + // If the proposal window is allowed to prejudge on the vm version, + // it will cause proposal tx to start a new vm in the blocks before hardfork, + // destroying the assumption that the transaction execution only uses the old vm + // before hardfork, leading to unexpected network splits. + let epoch_number = self.tx_env.epoch_number_without_proposal_window(); + let hardfork_switch = self.consensus.hardfork_switch(); + hardfork_switch + .ckb2023 + .is_vm_version_2_and_syscalls_3_enabled(epoch_number) + } + + /// Returns the version of the machine based on the script and the consensus rules. + pub fn select_version(&self, script: &Script) -> Result { + let is_vm_version_2_and_syscalls_3_enabled = self.is_vm_version_2_and_syscalls_3_enabled(); + let is_vm_version_1_and_syscalls_2_enabled = self.is_vm_version_1_and_syscalls_2_enabled(); + let script_hash_type = ScriptHashType::try_from(script.hash_type()) + .map_err(|err| ScriptError::InvalidScriptHashType(err.to_string()))?; + match script_hash_type { + ScriptHashType::Data => Ok(ScriptVersion::V0), + ScriptHashType::Data1 => { + if is_vm_version_1_and_syscalls_2_enabled { + Ok(ScriptVersion::V1) + } else { + Err(ScriptError::InvalidVmVersion(1)) + } + } + ScriptHashType::Data2 => { + if is_vm_version_2_and_syscalls_3_enabled { + Ok(ScriptVersion::V2) + } else { + Err(ScriptError::InvalidVmVersion(2)) + } + } + ScriptHashType::Type => { + if is_vm_version_2_and_syscalls_3_enabled { + Ok(ScriptVersion::V2) + } else if is_vm_version_1_and_syscalls_2_enabled { + Ok(ScriptVersion::V1) + } else { + Ok(ScriptVersion::V0) + } + } + } + } + + /// Returns all script groups. + pub fn groups(&self) -> impl Iterator { + self.lock_groups.iter().chain(self.type_groups.iter()) + } + + /// Returns all script groups with type. + pub fn groups_with_type( + &self, + ) -> impl Iterator { + self.lock_groups + .iter() + .map(|(hash, group)| (ScriptGroupType::Lock, hash, group)) + .chain( + self.type_groups + .iter() + .map(|(hash, group)| (ScriptGroupType::Type, hash, group)), + ) + } +} + +/// Immutable context data at script group level +#[derive(Clone, Debug)] +pub struct SgData
{ + /// ResolvedTransaction. + pub rtx: Arc, + + /// Passed & derived information at transaction level. + pub tx_info: Arc>, + + /// Passed & derived information at script group level. + pub sg_info: Arc, +} + +/// Script group level derived information. +#[derive(Clone, Debug)] +pub struct SgInfo { + /// Currently executed script version + pub script_version: ScriptVersion, + /// Currently executed script group + pub script_group: ScriptGroup, + /// Currently executed script hash + pub script_hash: Byte32, + /// DataPieceId for the root program + pub program_data_piece_id: DataPieceId, +} + +impl
SgData
{ + pub fn new(tx_data: &TxData
, script_group: &ScriptGroup) -> Result { + let script_hash = script_group.script.calc_script_hash(); + let script_version = tx_data.select_version(&script_group.script)?; + let dep_index = tx_data + .extract_referenced_dep_index(&script_group.script)? + .try_into() + .map_err(|_| ScriptError::Other("u32 overflow".to_string()))?; + Ok(Self { + rtx: Arc::clone(&tx_data.rtx), + tx_info: Arc::clone(&tx_data.info), + sg_info: Arc::new(SgInfo { + script_version, + script_hash, + script_group: script_group.clone(), + program_data_piece_id: DataPieceId::CellDep(dep_index), + }), + }) + } + + pub fn data_loader(&self) -> &DL { + &self.tx_info.data_loader + } + + pub fn group_inputs(&self) -> &[usize] { + &self.sg_info.script_group.input_indices + } + + pub fn group_outputs(&self) -> &[usize] { + &self.sg_info.script_group.output_indices + } + + pub fn outputs(&self) -> &[CellMeta] { + &self.tx_info.outputs + } +} + +impl
DataSource for SgData
+where + DL: CellDataProvider, { fn load_data(&self, id: &DataPieceId, offset: u64, length: u64) -> Option<(Bytes, u64)> { match id { - DataPieceId::Program => { - // This is just a shortcut so we don't have to copy over the logic in extract_script, - // ideally you can also only define the rest 5, then figure out a way to convert - // script group to the actual cell dep index. - Some(self.program.clone()) - } DataPieceId::Input(i) => self .rtx .resolved_inputs .get(*i as usize) - .and_then(|cell| self.data_loader.load_cell_data(cell)), + .and_then(|cell| self.data_loader().load_cell_data(cell)), DataPieceId::Output(i) => self .rtx .transaction @@ -537,14 +959,16 @@ where .rtx .resolved_cell_deps .get(*i as usize) - .and_then(|cell| self.data_loader.load_cell_data(cell)), + .and_then(|cell| self.data_loader().load_cell_data(cell)), DataPieceId::GroupInput(i) => self + .sg_info .script_group .input_indices .get(*i as usize) .and_then(|gi| self.rtx.resolved_inputs.get(*gi)) - .and_then(|cell| self.data_loader.load_cell_data(cell)), + .and_then(|cell| self.data_loader().load_cell_data(cell)), DataPieceId::GroupOutput(i) => self + .sg_info .script_group .output_indices .get(*i as usize) @@ -557,12 +981,14 @@ where .get(*i as usize) .map(|data| data.raw_data()), DataPieceId::WitnessGroupInput(i) => self + .sg_info .script_group .input_indices .get(*i as usize) .and_then(|gi| self.rtx.transaction.witnesses().get(*gi)) .map(|data| data.raw_data()), DataPieceId::WitnessGroupOutput(i) => self + .sg_info .script_group .output_indices .get(*i as usize) @@ -582,6 +1008,38 @@ where } } +/// Mutable data at virtual machine level +#[derive(Clone)] +pub struct VmContext
+where + DL: CellDataProvider, +{ + pub(crate) base_cycles: Arc, + /// A mutable reference to scheduler's message box + pub(crate) message_box: Arc>>, + pub(crate) snapshot2_context: Arc>>>, +} + +impl
VmContext
+where + DL: CellDataProvider + Clone, +{ + /// Creates a new VM context. It is by design that parameters to this function + /// are references. It is a reminder that the inputs are designed to be shared + /// among different entities. + pub fn new(sg_data: &SgData
, message_box: &Arc>>) -> Self { + Self { + base_cycles: Arc::new(AtomicU64::new(0)), + message_box: Arc::clone(message_box), + snapshot2_context: Arc::new(Mutex::new(Snapshot2Context::new(sg_data.clone()))), + } + } + + pub fn set_base_cycles(&mut self, base_cycles: u64) { + self.base_cycles.store(base_cycles, Ordering::Release); + } +} + /// The scheduler's running mode. #[derive(Clone)] pub enum RunMode { diff --git a/script/src/verify.rs b/script/src/verify.rs index a9315e3a79..aaa5fd41d0 100644 --- a/script/src/verify.rs +++ b/script/src/verify.rs @@ -1,21 +1,12 @@ use crate::scheduler::Scheduler; -#[cfg(test)] -use crate::syscalls::Pause; -use crate::syscalls::{InheritedFd, ProcessID}; -use crate::types::{DataPieceId, FullSuspendedState, Message, RunMode, TxData, VmId, FIRST_VM_ID}; #[cfg(not(target_family = "wasm"))] use crate::ChunkCommand; use crate::{ error::{ScriptError, TransactionScriptError}, - syscalls::{ - Close, CurrentCycles, Debugger, Exec, ExecV2, LoadBlockExtension, LoadCell, LoadCellData, - LoadHeader, LoadInput, LoadScript, LoadScriptHash, LoadTx, LoadWitness, Pipe, Read, Spawn, - VMVersion, Wait, Write, - }, type_id::TypeIdSystemScript, types::{ - CoreMachine, DebugPrinter, Indices, ScriptGroup, ScriptGroupType, ScriptVersion, - TransactionState, VerifyResult, + DebugContext, DebugPrinter, FullSuspendedState, RunMode, ScriptGroup, ScriptGroupType, + ScriptVersion, SgData, TransactionState, TxData, VerifyResult, }, verify_env::TxVerifyEnv, }; @@ -26,21 +17,14 @@ use ckb_logger::{debug, info}; use ckb_traits::{CellDataProvider, ExtensionProvider, HeaderProvider}; use ckb_types::{ bytes::Bytes, - core::{ - cell::{CellMeta, ResolvedTransaction}, - Cycle, ScriptHashType, - }, - packed::{Byte32, CellOutput, OutPoint, Script}, + core::{cell::ResolvedTransaction, Cycle, ScriptHashType}, + packed::{Byte32, Script}, prelude::*, }; #[cfg(not(target_family = "wasm"))] use ckb_vm::machine::Pause as VMPause; -use ckb_vm::{snapshot2::Snapshot2Context, Error as VMInternalError, Syscalls}; -use std::sync::{Arc, Mutex}; -use std::{ - collections::{BTreeMap, HashMap}, - sync::RwLock, -}; +use ckb_vm::Error as VMInternalError; +use std::sync::Arc; #[cfg(not(target_family = "wasm"))] use tokio::sync::{ oneshot, @@ -69,317 +53,13 @@ impl ChunkState { } } -#[derive(Debug, PartialEq, Eq, Clone)] -enum DataGuard { - NotLoaded(OutPoint), - Loaded(Bytes), -} - -/// LazyData wrapper make sure not-loaded data will be loaded only after one access -#[derive(Debug, Clone)] -struct LazyData(Arc>); - -impl LazyData { - fn from_cell_meta(cell_meta: &CellMeta) -> LazyData { - match &cell_meta.mem_cell_data { - Some(data) => LazyData(Arc::new(RwLock::new(DataGuard::Loaded(data.to_owned())))), - None => LazyData(Arc::new(RwLock::new(DataGuard::NotLoaded( - cell_meta.out_point.clone(), - )))), - } - } - - fn access(&self, data_loader: &DL) -> Result { - let guard = self - .0 - .read() - .map_err(|_| ScriptError::Other("RwLock poisoned".into()))? - .to_owned(); - match guard { - DataGuard::NotLoaded(out_point) => { - let data = data_loader - .get_cell_data(&out_point) - .ok_or(ScriptError::Other("cell data not found".into()))?; - let mut write_guard = self - .0 - .write() - .map_err(|_| ScriptError::Other("RwLock poisoned".into()))?; - *write_guard = DataGuard::Loaded(data.clone()); - Ok(data) - } - DataGuard::Loaded(bytes) => Ok(bytes), - } - } -} - -#[derive(Debug, Clone)] -enum Binaries { - Unique(Byte32, LazyData), - Duplicate(Byte32, LazyData), - Multiple, -} - -impl Binaries { - fn new(data_hash: Byte32, data: LazyData) -> Self { - Self::Unique(data_hash, data) - } - - fn merge(&mut self, data_hash: &Byte32) { - match self { - Self::Unique(ref hash, data) | Self::Duplicate(ref hash, data) => { - if hash != data_hash { - *self = Self::Multiple; - } else { - *self = Self::Duplicate(hash.to_owned(), data.to_owned()); - } - } - Self::Multiple => {} - } - } -} - -/// Syscalls can be generated individually by TransactionScriptsSyscallsGenerator. -/// -/// TransactionScriptsSyscallsGenerator can be cloned. -#[derive(Clone)] -pub struct TransactionScriptsSyscallsGenerator
-where - DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static, -{ - pub(crate) base_cycles: Arc>, - pub(crate) data_loader: DL, - pub(crate) debug_printer: DebugPrinter, - pub(crate) message_box: Arc>>, - pub(crate) outputs: Arc>, - pub(crate) rtx: Arc, - #[cfg(test)] - pub(crate) skip_pause: Arc, - pub(crate) vm_id: VmId, -} - -impl
TransactionScriptsSyscallsGenerator
-where - DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static, -{ - /// Build syscall: current_cycles - pub fn build_current_cycles(&self) -> CurrentCycles { - CurrentCycles::new(Arc::clone(&self.base_cycles)) - } - - /// Build syscall: vm_version - pub fn build_vm_version(&self) -> VMVersion { - VMVersion::new() - } - - /// Build syscall: exec - pub fn build_exec(&self, group_inputs: Indices, group_outputs: Indices) -> Exec
{ - Exec::new( - self.data_loader.clone(), - Arc::clone(&self.rtx), - Arc::clone(&self.outputs), - group_inputs, - group_outputs, - ) - } - - /// Build syscall: exec. When script version >= V2, this exec implementation is used. - pub fn build_exec_v2(&self) -> ExecV2 { - ExecV2::new(self.vm_id, Arc::clone(&self.message_box)) - } - - /// Build syscall: load_tx - pub fn build_load_tx(&self) -> LoadTx { - LoadTx::new(Arc::clone(&self.rtx)) - } - - /// Build syscall: load_cell - pub fn build_load_cell(&self, group_inputs: Indices, group_outputs: Indices) -> LoadCell
{ - LoadCell::new( - self.data_loader.clone(), - Arc::clone(&self.rtx), - Arc::clone(&self.outputs), - group_inputs, - group_outputs, - ) - } - - /// Build syscall: load_cell_data - pub fn build_load_cell_data( - &self, - snapshot2_context: Arc>>>, - ) -> LoadCellData
{ - LoadCellData::new(snapshot2_context) - } - - ///Build syscall: load_input - pub fn build_load_input(&self, group_inputs: Indices) -> LoadInput { - LoadInput::new(Arc::clone(&self.rtx), group_inputs) - } - - /// Build syscall: load_script_hash - pub fn build_load_script_hash(&self, hash: Byte32) -> LoadScriptHash { - LoadScriptHash::new(hash) - } - - /// Build syscall: load_header - pub fn build_load_header(&self, group_inputs: Indices) -> LoadHeader
{ - LoadHeader::new( - self.data_loader.clone(), - Arc::clone(&self.rtx), - group_inputs, - ) - } - - /// Build syscall: load_block_extension - pub fn build_load_block_extension(&self, group_inputs: Indices) -> LoadBlockExtension
{ - LoadBlockExtension::new( - self.data_loader.clone(), - Arc::clone(&self.rtx), - group_inputs, - ) - } - - /// Build syscall: load_witness - pub fn build_load_witness(&self, group_inputs: Indices, group_outputs: Indices) -> LoadWitness { - LoadWitness::new(Arc::clone(&self.rtx), group_inputs, group_outputs) - } - - /// Build syscall: load_script - pub fn build_load_script(&self, script: Script) -> LoadScript { - LoadScript::new(script) - } - - /// Build syscall: spawn - pub fn build_spawn( - &self, - snapshot2_context: Arc>>>, - ) -> Spawn
{ - Spawn::new(self.vm_id, Arc::clone(&self.message_box), snapshot2_context) - } - - /// Build syscall: wait - pub fn build_wait(&self) -> Wait { - Wait::new(self.vm_id, Arc::clone(&self.message_box)) - } - - /// Build syscall: process_id - pub fn build_process_id(&self) -> ProcessID { - ProcessID::new(self.vm_id) - } - - /// Build syscall: pipe - pub fn build_pipe(&self) -> Pipe { - Pipe::new(self.vm_id, Arc::clone(&self.message_box)) - } - - /// Build syscall: write - pub fn build_write(&self) -> Write { - Write::new(self.vm_id, Arc::clone(&self.message_box)) - } - - /// Build syscall: read - pub fn build_read(&self) -> Read { - Read::new(self.vm_id, Arc::clone(&self.message_box)) - } - - /// Build syscall: inherited_fd - pub fn inherited_fd(&self) -> InheritedFd { - InheritedFd::new(self.vm_id, Arc::clone(&self.message_box)) - } - - /// Build syscall: close - pub fn close(&self) -> Close { - Close::new(self.vm_id, Arc::clone(&self.message_box)) - } - - /// Generate syscalls. - pub fn generate_syscalls( - &self, - script_version: ScriptVersion, - script_group: &ScriptGroup, - snapshot2_context: Arc>>>, - ) -> Vec)>> { - let current_script_hash = script_group.script.calc_script_hash(); - let script_group_input_indices = Arc::new(script_group.input_indices.clone()); - let script_group_output_indices = Arc::new(script_group.output_indices.clone()); - let mut syscalls: Vec)>> = vec![ - Box::new(self.build_load_script_hash(current_script_hash.clone())), - Box::new(self.build_load_tx()), - Box::new(self.build_load_cell( - Arc::clone(&script_group_input_indices), - Arc::clone(&script_group_output_indices), - )), - Box::new(self.build_load_input(Arc::clone(&script_group_input_indices))), - Box::new(self.build_load_header(Arc::clone(&script_group_input_indices))), - Box::new(self.build_load_witness( - Arc::clone(&script_group_input_indices), - Arc::clone(&script_group_output_indices), - )), - Box::new(self.build_load_script(script_group.script.clone())), - Box::new(self.build_load_cell_data(Arc::clone(&snapshot2_context))), - Box::new(Debugger::new( - current_script_hash, - Arc::clone(&self.debug_printer), - )), - ]; - if script_version >= ScriptVersion::V1 { - syscalls.append(&mut vec![ - Box::new(self.build_vm_version()), - if script_version >= ScriptVersion::V2 { - Box::new(self.build_exec_v2()) - } else { - Box::new(self.build_exec( - Arc::clone(&script_group_input_indices), - Arc::clone(&script_group_output_indices), - )) - }, - Box::new(self.build_current_cycles()), - ]); - } - if script_version >= ScriptVersion::V2 { - syscalls.append(&mut vec![ - Box::new(self.build_load_block_extension(Arc::clone(&script_group_input_indices))), - Box::new(self.build_spawn(Arc::clone(&snapshot2_context))), - Box::new(self.build_process_id()), - Box::new(self.build_pipe()), - Box::new(self.build_wait()), - Box::new(self.build_write()), - Box::new(self.build_read()), - Box::new(self.inherited_fd()), - Box::new(self.close()), - ]); - } - #[cfg(test)] - syscalls.push(Box::new(Pause::new(Arc::clone(&self.skip_pause)))); - syscalls - } -} - /// This struct leverages CKB VM to verify transaction inputs. -/// -/// FlatBufferBuilder owned `Vec` that grows as needed, in the -/// future, we might refactor this to share buffer to achieve zero-copy -pub struct TransactionScriptsVerifier
-where - DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static, -{ - data_loader: DL, - - rtx: Arc, - - binaries_by_data_hash: HashMap, - binaries_by_type_hash: HashMap, - - lock_groups: BTreeMap, - type_groups: BTreeMap, +pub struct TransactionScriptsVerifier
{ + tx_data: Arc>, + debug_printer: DebugPrinter, #[cfg(test)] skip_pause: Arc, - - consensus: Arc, - tx_env: Arc, - - syscalls_generator: TransactionScriptsSyscallsGenerator
, } impl
TransactionScriptsVerifier
@@ -398,73 +78,7 @@ where consensus: Arc, tx_env: Arc, ) -> TransactionScriptsVerifier
{ - let tx_hash = rtx.transaction.hash(); - let resolved_cell_deps = &rtx.resolved_cell_deps; - let resolved_inputs = &rtx.resolved_inputs; - let outputs = Arc::new( - rtx.transaction - .outputs_with_data_iter() - .enumerate() - .map(|(index, (cell_output, data))| { - let out_point = OutPoint::new_builder() - .tx_hash(tx_hash.clone()) - .index(index.pack()) - .build(); - let data_hash = CellOutput::calc_data_hash(&data); - CellMeta { - cell_output, - out_point, - transaction_info: None, - data_bytes: data.len() as u64, - mem_cell_data: Some(data), - mem_cell_data_hash: Some(data_hash), - } - }) - .collect(), - ); - - let mut binaries_by_data_hash: HashMap = HashMap::default(); - let mut binaries_by_type_hash: HashMap = HashMap::default(); - for cell_meta in resolved_cell_deps { - let data_hash = data_loader - .load_cell_data_hash(cell_meta) - .expect("cell data hash"); - let lazy = LazyData::from_cell_meta(cell_meta); - binaries_by_data_hash.insert(data_hash.to_owned(), lazy.to_owned()); - - if let Some(t) = &cell_meta.cell_output.type_().to_opt() { - binaries_by_type_hash - .entry(t.calc_script_hash()) - .and_modify(|bin| bin.merge(&data_hash)) - .or_insert_with(|| Binaries::new(data_hash.to_owned(), lazy.to_owned())); - } - } - - let mut lock_groups = BTreeMap::default(); - let mut type_groups = BTreeMap::default(); - for (i, cell_meta) in resolved_inputs.iter().enumerate() { - // here we are only pre-processing the data, verify method validates - // each input has correct script setup. - let output = &cell_meta.cell_output; - let lock_group_entry = lock_groups - .entry(output.calc_lock_hash()) - .or_insert_with(|| ScriptGroup::from_lock_script(&output.lock())); - lock_group_entry.input_indices.push(i); - if let Some(t) = &output.type_().to_opt() { - let type_group_entry = type_groups - .entry(t.calc_script_hash()) - .or_insert_with(|| ScriptGroup::from_type_script(t)); - type_group_entry.input_indices.push(i); - } - } - for (i, output) in rtx.transaction.outputs().into_iter().enumerate() { - if let Some(t) = &output.type_().to_opt() { - let type_group_entry = type_groups - .entry(t.calc_script_hash()) - .or_insert_with(|| ScriptGroup::from_type_script(t)); - type_group_entry.output_indices.push(i); - } - } + let tx_data = Arc::new(TxData::new(rtx, data_loader, consensus, tx_env)); let debug_printer: DebugPrinter = Arc::new( #[allow(unused_variables)] @@ -473,33 +87,15 @@ where debug!("script group: {} DEBUG OUTPUT: {}", hash, message); }, ); + #[cfg(test)] let skip_pause = Arc::new(AtomicBool::new(false)); - let syscalls_generator = TransactionScriptsSyscallsGenerator { - base_cycles: Arc::new(Mutex::new(0)), - data_loader: data_loader.clone(), - debug_printer: Arc::clone(&debug_printer), - message_box: Arc::new(Mutex::new(Vec::new())), - outputs: Arc::clone(&outputs), - rtx: Arc::clone(&rtx), - #[cfg(test)] - skip_pause: Arc::clone(&skip_pause), - vm_id: FIRST_VM_ID, - }; - TransactionScriptsVerifier { - data_loader, - binaries_by_data_hash, - binaries_by_type_hash, - rtx, - lock_groups, - type_groups, + tx_data, + debug_printer, #[cfg(test)] skip_pause, - consensus, - tx_env, - syscalls_generator, } } @@ -513,7 +109,7 @@ where /// * `hash: &Byte32`: this is the script hash of currently running script group. /// * `message: &str`: message passed to the debug syscall. pub fn set_debug_printer(&mut self, func: F) { - self.syscalls_generator.debug_printer = Arc::new(func); + self.debug_printer = Arc::new(func); } #[cfg(test)] @@ -521,96 +117,54 @@ where self.skip_pause.store(skip_pause, Ordering::SeqCst); } + ////////////////////////////////////////////////////////////////// + // Functions below have been moved from verifier struct to TxData, + // however we still preserve all the public APIs by delegating + // them to TxData. + ////////////////////////////////////////////////////////////////// + #[inline] #[allow(dead_code)] fn hash(&self) -> Byte32 { - self.rtx.transaction.hash() + self.tx_data.tx_hash() } /// Extracts actual script binary either in dep cells. pub fn extract_script(&self, script: &Script) -> Result { - let script_hash_type = ScriptHashType::try_from(script.hash_type()) - .map_err(|err| ScriptError::InvalidScriptHashType(err.to_string()))?; - match script_hash_type { - ScriptHashType::Data | ScriptHashType::Data1 | ScriptHashType::Data2 => { - if let Some(lazy) = self.binaries_by_data_hash.get(&script.code_hash()) { - Ok(lazy.access(&self.data_loader)?) - } else { - Err(ScriptError::ScriptNotFound(script.code_hash())) - } - } - ScriptHashType::Type => { - if let Some(ref bin) = self.binaries_by_type_hash.get(&script.code_hash()) { - match bin { - Binaries::Unique(_, ref lazy) => Ok(lazy.access(&self.data_loader)?), - Binaries::Duplicate(_, ref lazy) => Ok(lazy.access(&self.data_loader)?), - Binaries::Multiple => Err(ScriptError::MultipleMatches), - } - } else { - Err(ScriptError::ScriptNotFound(script.code_hash())) - } - } - } + self.tx_data.extract_script(script) + } + + /// Returns the version of the machine based on the script and the consensus rules. + pub fn select_version(&self, script: &Script) -> Result { + self.tx_data.select_version(script) } - fn is_vm_version_1_and_syscalls_2_enabled(&self) -> bool { - // If the proposal window is allowed to prejudge on the vm version, - // it will cause proposal tx to start a new vm in the blocks before hardfork, - // destroying the assumption that the transaction execution only uses the old vm - // before hardfork, leading to unexpected network splits. - let epoch_number = self.tx_env.epoch_number_without_proposal_window(); - let hardfork_switch = self.consensus.hardfork_switch(); - hardfork_switch - .ckb2021 - .is_vm_version_1_and_syscalls_2_enabled(epoch_number) + /// Returns all script groups. + pub fn groups(&self) -> impl Iterator { + self.tx_data.groups() } - fn is_vm_version_2_and_syscalls_3_enabled(&self) -> bool { - // If the proposal window is allowed to prejudge on the vm version, - // it will cause proposal tx to start a new vm in the blocks before hardfork, - // destroying the assumption that the transaction execution only uses the old vm - // before hardfork, leading to unexpected network splits. - let epoch_number = self.tx_env.epoch_number_without_proposal_window(); - let hardfork_switch = self.consensus.hardfork_switch(); - hardfork_switch - .ckb2023 - .is_vm_version_2_and_syscalls_3_enabled(epoch_number) + /// Returns all script groups with type. + pub fn groups_with_type( + &self, + ) -> impl Iterator { + self.tx_data.groups_with_type() } - /// Returns the version of the machine based on the script and the consensus rules. - pub fn select_version(&self, script: &Script) -> Result { - let is_vm_version_2_and_syscalls_3_enabled = self.is_vm_version_2_and_syscalls_3_enabled(); - let is_vm_version_1_and_syscalls_2_enabled = self.is_vm_version_1_and_syscalls_2_enabled(); - let script_hash_type = ScriptHashType::try_from(script.hash_type()) - .map_err(|err| ScriptError::InvalidScriptHashType(err.to_string()))?; - match script_hash_type { - ScriptHashType::Data => Ok(ScriptVersion::V0), - ScriptHashType::Data1 => { - if is_vm_version_1_and_syscalls_2_enabled { - Ok(ScriptVersion::V1) - } else { - Err(ScriptError::InvalidVmVersion(1)) - } - } - ScriptHashType::Data2 => { - if is_vm_version_2_and_syscalls_3_enabled { - Ok(ScriptVersion::V2) - } else { - Err(ScriptError::InvalidVmVersion(2)) - } - } - ScriptHashType::Type => { - if is_vm_version_2_and_syscalls_3_enabled { - Ok(ScriptVersion::V2) - } else if is_vm_version_1_and_syscalls_2_enabled { - Ok(ScriptVersion::V1) - } else { - Ok(ScriptVersion::V0) - } - } - } + /// Finds the script group from cell deps. + pub fn find_script_group( + &self, + script_group_type: ScriptGroupType, + script_hash: &Byte32, + ) -> Option<&ScriptGroup> { + self.tx_data + .find_script_group(script_group_type, script_hash) } + ////////////////////////////////////////////////////////////////// + // This marks the end of delegated functions. + ////////////////////////////////////////////////////////////////// + /// Verifies the transaction by running scripts. /// /// ## Params @@ -724,77 +278,6 @@ where Ok(cycles) } - /// Resuming an suspended verify from snapshot - /// - /// ## Params - /// - /// * `snap` - Captured transaction verification state. - /// - /// * `limit_cycles` - Maximum allowed cycles to run the scripts. The verification quits early - /// when the consumed cycles exceed the limit. - /// - /// ## Returns - /// - /// It returns the total consumed cycles if verification completed, - /// If verify is suspended, a borrowed state will returned. - pub fn resume_from_snap( - &self, - snap: &TransactionState, - limit_cycles: Cycle, - ) -> Result { - let mut cycles = snap.current_cycles; - let mut current_used = 0; - - let (_hash, current_group) = self.groups().nth(snap.current).ok_or_else(|| { - ScriptError::Other(format!("snapshot group missing {:?}", snap.current)) - .unknown_source() - })?; - - // continue snapshot current script - match self.verify_group_with_chunk(current_group, limit_cycles, &snap.state) { - Ok(ChunkState::Completed(used_cycles, consumed_cycles)) => { - current_used = wrapping_cycles_add(current_used, consumed_cycles, current_group)?; - cycles = wrapping_cycles_add(cycles, used_cycles, current_group)?; - } - Ok(ChunkState::Suspended(state)) => { - let current = snap.current; - let state = TransactionState::new(state, current, cycles, limit_cycles); - return Ok(VerifyResult::Suspended(state)); - } - Err(e) => { - #[cfg(feature = "logging")] - logging::on_script_error(_hash, &self.hash(), &e); - return Err(e.source(current_group).into()); - } - } - - for (idx, (_hash, group)) in self.groups().enumerate().skip(snap.current + 1) { - let remain_cycles = limit_cycles.checked_sub(current_used).ok_or_else(|| { - ScriptError::Other(format!("expect invalid cycles {limit_cycles} {cycles}")) - .source(group) - })?; - - match self.verify_group_with_chunk(group, remain_cycles, &None) { - Ok(ChunkState::Completed(used_cycles, consumed_cycles)) => { - current_used = wrapping_cycles_add(current_used, consumed_cycles, group)?; - cycles = wrapping_cycles_add(cycles, used_cycles, group)?; - } - Ok(ChunkState::Suspended(state)) => { - let current = idx; - let state = TransactionState::new(state, current, cycles, remain_cycles); - return Ok(VerifyResult::Suspended(state)); - } - Err(e) => { - #[cfg(feature = "logging")] - logging::on_script_error(_hash, &self.hash(), &e); - return Err(e.source(group).into()); - } - } - } - - Ok(VerifyResult::Completed(cycles)) - } - /// Resuming an suspended verify from vm state /// /// ## Params @@ -810,7 +293,7 @@ where /// If verify is suspended, a borrowed state will returned. pub fn resume_from_state( &self, - state: TransactionState, + state: &TransactionState, limit_cycles: Cycle, ) -> Result { let TransactionState { @@ -821,14 +304,14 @@ where } = state; let mut current_used = 0; - let mut cycles = current_cycles; + let mut cycles = *current_cycles; - let (_hash, current_group) = self.groups().nth(current).ok_or_else(|| { + let (_hash, current_group) = self.groups().nth(*current).ok_or_else(|| { ScriptError::Other(format!("snapshot group missing {current:?}")).unknown_source() })?; let resumed_script_result = - self.verify_group_with_chunk(current_group, limit_cycles, &state); + self.verify_group_with_chunk(current_group, limit_cycles, state); match resumed_script_result { Ok(ChunkState::Completed(used_cycles, consumed_cycles)) => { @@ -836,7 +319,7 @@ where cycles = wrapping_cycles_add(cycles, used_cycles, current_group)?; } Ok(ChunkState::Suspended(state)) => { - let state = TransactionState::new(state, current, cycles, limit_cycles); + let state = TransactionState::new(state, *current, cycles, limit_cycles); return Ok(VerifyResult::Suspended(state)); } Err(e) => { @@ -968,7 +451,7 @@ where && Into::::into(group.script.hash_type()) == Into::::into(ScriptHashType::Type) { let verifier = TypeIdSystemScript { - rtx: &self.rtx, + rtx: &self.tx_data.rtx, script_group: group, max_cycles, }; @@ -977,24 +460,6 @@ where self.run(group, max_cycles) } } - /// Returns all script groups. - pub fn groups(&self) -> impl Iterator { - self.lock_groups.iter().chain(self.type_groups.iter()) - } - - /// Returns all script groups with type. - pub fn groups_with_type( - &self, - ) -> impl Iterator { - self.lock_groups - .iter() - .map(|(hash, group)| (ScriptGroupType::Lock, hash, group)) - .chain( - self.type_groups - .iter() - .map(|(hash, group)| (ScriptGroupType::Type, hash, group)), - ) - } fn verify_group_with_chunk( &self, @@ -1006,7 +471,7 @@ where && Into::::into(group.script.hash_type()) == Into::::into(ScriptHashType::Type) { let verifier = TypeIdSystemScript { - rtx: &self.rtx, + rtx: &self.tx_data.rtx, script_group: group, max_cycles, }; @@ -1026,24 +491,11 @@ where max_cycles: Cycle, state: &Option, ) -> Result { - let program = self.extract_script(&script_group.script)?; - let tx_data = TxData { - rtx: Arc::clone(&self.rtx), - data_loader: self.data_loader.clone(), - program, - script_group: Arc::new(script_group.clone()), - }; - let version = self.select_version(&script_group.script)?; let mut scheduler = if let Some(state) = state { - Scheduler::resume( - tx_data, - version, - self.syscalls_generator.clone(), - state.clone(), - ) + self.resume_scheduler(script_group, state) } else { - Scheduler::new(tx_data, version, self.syscalls_generator.clone()) - }; + self.create_scheduler(script_group) + }?; let previous_cycles = scheduler.consumed_cycles(); let res = scheduler.run(RunMode::LimitCycles(max_cycles)); match res { @@ -1083,7 +535,7 @@ where && Into::::into(group.script.hash_type()) == Into::::into(ScriptHashType::Type) { let verifier = TypeIdSystemScript { - rtx: &self.rtx, + rtx: &self.tx_data.rtx, script_group: group, max_cycles, }; @@ -1094,47 +546,33 @@ where } } - /// Finds the script group from cell deps. - pub fn find_script_group( - &self, - script_group_type: ScriptGroupType, - script_hash: &Byte32, - ) -> Option<&ScriptGroup> { - match script_group_type { - ScriptGroupType::Lock => self.lock_groups.get(script_hash), - ScriptGroupType::Type => self.type_groups.get(script_hash), - } - } - - /// Prepares syscalls. - pub fn generate_syscalls( + /// Create a scheduler to manage virtual machine instances. + pub fn create_scheduler( &self, - script_version: ScriptVersion, script_group: &ScriptGroup, - snapshot2_context: Arc>>>, - ) -> Vec)>> { - self.syscalls_generator - .generate_syscalls(script_version, script_group, snapshot2_context) + ) -> Result, ScriptError> { + let sg_data = SgData::new(&self.tx_data, script_group)?; + let debug_context = DebugContext { + debug_printer: Arc::clone(&self.debug_printer), + #[cfg(test)] + skip_pause: Arc::clone(&self.skip_pause), + }; + Ok(Scheduler::new(sg_data, debug_context)) } - /// Create a scheduler to manage virtual machine instances. - pub fn create_scheduler( + /// Resumes a scheduler from a previous state. + pub fn resume_scheduler( &self, script_group: &ScriptGroup, + state: &FullSuspendedState, ) -> Result, ScriptError> { - let program = self.extract_script(&script_group.script)?; - let tx_data = TxData { - rtx: Arc::clone(&self.rtx), - data_loader: self.data_loader.clone(), - program, - script_group: Arc::new(script_group.clone()), + let sg_data = SgData::new(&self.tx_data, script_group)?; + let debug_context = DebugContext { + debug_printer: Arc::clone(&self.debug_printer), + #[cfg(test)] + skip_pause: Arc::clone(&self.skip_pause), }; - let version = self.select_version(&script_group.script)?; - Ok(Scheduler::new( - tx_data, - version, - self.syscalls_generator.clone(), - )) + Ok(Scheduler::resume(sg_data, debug_context, state.clone())) } /// Runs a single program, then returns the exit code together with the entire @@ -1175,15 +613,7 @@ where max_cycles: Cycle, signal: &mut Receiver, ) -> Result { - let program = self.extract_script(&script_group.script)?; - let tx_data = TxData { - rtx: Arc::clone(&self.rtx), - data_loader: self.data_loader.clone(), - program, - script_group: Arc::new(script_group.clone()), - }; - let version = self.select_version(&script_group.script)?; - let mut scheduler = Scheduler::new(tx_data, version, self.syscalls_generator.clone()); + let mut scheduler = self.create_scheduler(script_group)?; let mut pause = VMPause::new(); let child_pause = pause.clone(); let (finish_tx, mut finish_rx) = oneshot::channel::>(); diff --git a/script/src/verify/tests/ckb_latest/features_since_v2019.rs b/script/src/verify/tests/ckb_latest/features_since_v2019.rs index a69c887575..74eccdef75 100644 --- a/script/src/verify/tests/ckb_latest/features_since_v2019.rs +++ b/script/src/verify/tests/ckb_latest/features_since_v2019.rs @@ -1193,7 +1193,7 @@ fn _check_typical_secp256k1_blake160_2_in_2_out_resume_load_cycles(step_cycles: loop { let state = init_state.take().unwrap(); let (limit_cycles, _last) = state.next_limit_cycles(step_cycles, TWO_IN_TWO_OUT_CYCLES); - match verifier.resume_from_state(state, limit_cycles).unwrap() { + match verifier.resume_from_state(&state, limit_cycles).unwrap() { VerifyResult::Suspended(state) => init_state = Some(state), VerifyResult::Completed(cycle) => { cycles = cycle; diff --git a/script/src/verify/tests/ckb_latest/features_since_v2021.rs b/script/src/verify/tests/ckb_latest/features_since_v2021.rs index 4d274616fe..32a3196dc2 100644 --- a/script/src/verify/tests/ckb_latest/features_since_v2021.rs +++ b/script/src/verify/tests/ckb_latest/features_since_v2021.rs @@ -729,7 +729,7 @@ fn _check_type_id_one_in_one_out_resume_with_state( loop { times += 1; let state = init_state.take().unwrap(); - match verifier.resume_from_state(state, limit).unwrap() { + match verifier.resume_from_state(&state, limit).unwrap() { VerifyResult::Suspended(state) => { init_state = Some(state); limit *= 2; @@ -860,7 +860,7 @@ fn _check_typical_secp256k1_blake160_2_in_2_out_tx_with_state(step_cycles: Cycle loop { let state = init_state.take().unwrap(); let (limit_cycles, _last) = state.next_limit_cycles(step_cycles, TWO_IN_TWO_OUT_CYCLES); - match verifier.resume_from_state(state, limit_cycles).unwrap() { + match verifier.resume_from_state(&state, limit_cycles).unwrap() { VerifyResult::Suspended(state) => init_state = Some(state), VerifyResult::Completed(cycle) => { cycles = cycle; @@ -927,7 +927,7 @@ fn _check_typical_secp256k1_blake160_2_in_2_out_tx_with_snap(step_cycles: Cycle) let snap = init_snap.take().unwrap(); let (limit_cycles, _last) = snap.next_limit_cycles(step_cycles, TWO_IN_TWO_OUT_CYCLES); - match verifier.resume_from_snap(&snap, limit_cycles).unwrap() { + match verifier.resume_from_state(&snap, limit_cycles).unwrap() { VerifyResult::Suspended(state) => { if count % 500 == 0 { init_snap = Some(state); @@ -944,7 +944,7 @@ fn _check_typical_secp256k1_blake160_2_in_2_out_tx_with_snap(step_cycles: Cycle) let state = init_state.take().unwrap(); let (limit_cycles, _last) = state.next_limit_cycles(step_cycles, TWO_IN_TWO_OUT_CYCLES); - match verifier.resume_from_state(state, limit_cycles).unwrap() { + match verifier.resume_from_state(&state, limit_cycles).unwrap() { VerifyResult::Suspended(state) => { if count % 500 == 0 { init_snap = Some(state); @@ -1020,7 +1020,7 @@ fn check_typical_secp256k1_blake160_2_in_2_out_tx_with_complete() { let snap = init_snap.take().unwrap(); let (limit_cycles, _last) = snap.next_limit_cycles(TWO_IN_TWO_OUT_CYCLES / 10, TWO_IN_TWO_OUT_CYCLES); - match verifier.resume_from_snap(&snap, limit_cycles).unwrap() { + match verifier.resume_from_state(&snap, limit_cycles).unwrap() { VerifyResult::Suspended(state) => init_snap = Some(state), VerifyResult::Completed(_) => { unreachable!() @@ -1164,7 +1164,7 @@ fn load_code_with_snapshot() { } let snap = init_snap.take().unwrap(); - let result = verifier.resume_from_snap(&snap, max_cycles); + let result = verifier.resume_from_state(&snap, max_cycles); match result.unwrap() { VerifyResult::Suspended(state) => { @@ -1264,7 +1264,7 @@ fn load_code_with_snapshot_more_times() { loop { let snap = init_snap.take().unwrap(); - let result = verifier.resume_from_snap(&snap, max_cycles); + let result = verifier.resume_from_state(&snap, max_cycles); match result.unwrap() { VerifyResult::Suspended(state) => { diff --git a/script/src/verify/tests/ckb_latest/features_since_v2023.rs b/script/src/verify/tests/ckb_latest/features_since_v2023.rs index 7ab47dfc9a..389fcd1600 100644 --- a/script/src/verify/tests/ckb_latest/features_since_v2023.rs +++ b/script/src/verify/tests/ckb_latest/features_since_v2023.rs @@ -8,6 +8,7 @@ use ckb_types::{ }; use proptest::prelude::*; use proptest::proptest; +use std::collections::{BTreeMap, HashMap}; fn simple_spawn_test(bin_path: &str, args: &[u8]) -> Result { let script_version = SCRIPT_VERSION; @@ -598,7 +599,7 @@ fn check_spawn_state() { loop { times += 1; let state: TransactionState = init_state.take().unwrap(); - match verifier.resume_from_snap(&state, max_cycles).unwrap() { + match verifier.resume_from_state(&state, max_cycles).unwrap() { VerifyResult::Suspended(state) => { init_state = Some(state); } diff --git a/script/src/verify/tests/utils.rs b/script/src/verify/tests/utils.rs index 035b8c3da9..1ebd0d0b8a 100644 --- a/script/src/verify/tests/utils.rs +++ b/script/src/verify/tests/utils.rs @@ -15,14 +15,14 @@ use ckb_test_chain_utils::{ use ckb_types::{ core::{ capacity_bytes, - cell::CellMetaBuilder, + cell::{CellMeta, CellMetaBuilder}, hardfork::{HardForks, CKB2021, CKB2023}, Capacity, Cycle, DepType, EpochNumber, EpochNumberWithFraction, HeaderView, ScriptHashType, TransactionBuilder, TransactionInfo, }, h256, packed::{ - Byte32, CellDep, CellInput, OutPoint, Script, TransactionInfoBuilder, + Byte32, CellDep, CellInput, CellOutput, OutPoint, Script, TransactionInfoBuilder, TransactionKeyBuilder, WitnessArgs, }, H256, @@ -246,7 +246,7 @@ impl TransactionScriptsVerifierWithEnv { loop { times += 1; let snap = init_snap.take().unwrap(); - match verifier.resume_from_snap(&snap, max_cycles) { + match verifier.resume_from_state(&snap, max_cycles) { Ok(VerifyResult::Suspended(state)) => { init_snap = Some(state); }