Skip to content

Commit

Permalink
Use AtomicU64 for base cycles instead of Mutex<Cycle>
Browse files Browse the repository at this point in the history
  • Loading branch information
xxuejie committed Feb 11, 2025
1 parent 29ebdd2 commit 3595333
Show file tree
Hide file tree
Showing 3 changed files with 30 additions and 19 deletions.
28 changes: 17 additions & 11 deletions script/src/scheduler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,10 @@ use ckb_vm::{
Error, FlattenedArgsReader, Register,
};
use std::collections::{BTreeMap, HashMap};
use std::sync::{Arc, Mutex};
use std::sync::{
atomic::{AtomicU64, Ordering},
Arc, Mutex,
};

/// Root process's id.
pub const ROOT_VM_ID: VmId = FIRST_VM_ID;
Expand Down Expand Up @@ -78,7 +81,7 @@ where
///
/// One can consider that +total_cycles+ contains the total cycles
/// consumed in current scheduler, when the scheduler is not busy executing.
pub total_cycles: Arc<Mutex<Cycle>>,
pub total_cycles: Arc<AtomicU64>,
/// Iteration cycles, see +total_cycles+ on its usage
pub iteration_cycles: Cycle,
/// Next vm id used by spawn.
Expand Down Expand Up @@ -112,7 +115,7 @@ where
Self {
sg_data: Arc::new(sg_data),
debug_context,
total_cycles: Arc::new(Mutex::new(0)),
total_cycles: Arc::new(AtomicU64::new(0)),
iteration_cycles: 0,
next_vm_id: FIRST_VM_ID,
next_fd_slot: FIRST_FD_SLOT,
Expand All @@ -128,16 +131,19 @@ where

/// Return total cycles.
pub fn consumed_cycles(&self) -> Cycle {
*self.total_cycles.lock().expect("mutex")
self.total_cycles.load(Ordering::Acquire)
}

/// Add cycles to total cycles.
pub fn consume_cycles(&mut self, cycles: Cycle) -> Result<(), Error> {
let mut total_cycles = self.total_cycles.lock().expect("mutex");
*total_cycles = (*total_cycles)
.checked_add(cycles)
.ok_or(Error::CyclesExceeded)?;
Ok(())
match self
.total_cycles
.fetch_update(Ordering::AcqRel, Ordering::Acquire, |total_cycles| {
total_cycles.checked_add(cycles)
}) {
Ok(_) => Ok(()),
Err(_) => Err(Error::CyclesExceeded),
}
}

/// Resume a previously suspended scheduler state
Expand All @@ -149,7 +155,7 @@ where
let mut scheduler = Self {
sg_data: Arc::new(sg_data),
debug_context,
total_cycles: Arc::new(Mutex::new(full.total_cycles)),
total_cycles: Arc::new(AtomicU64::new(full.total_cycles)),
iteration_cycles: 0,
next_vm_id: full.next_vm_id,
next_fd_slot: full.next_fd_slot,
Expand Down Expand Up @@ -199,7 +205,7 @@ where
// internal execution logic, it does not belong to VM execution
// consensus. We are not charging cycles for suspending
// a VM in the process of suspending the whole scheduler.
total_cycles: *self.total_cycles.lock().expect("mutex"),
total_cycles: self.total_cycles.load(Ordering::Acquire),
next_vm_id: self.next_vm_id,
next_fd_slot: self.next_fd_slot,
vms,
Expand Down
10 changes: 6 additions & 4 deletions script/src/syscalls/current_cycles.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,14 @@ use ckb_vm::{
registers::{A0, A7},
Error as VMError, Register, SupportMachine, Syscalls,
};
use std::sync::{Arc, Mutex};
use std::sync::{
atomic::{AtomicU64, Ordering},
Arc,
};

#[derive(Debug, Default)]
pub struct CurrentCycles {
base: Arc<Mutex<u64>>,
base: Arc<AtomicU64>,
}

impl CurrentCycles {
Expand All @@ -33,8 +36,7 @@ impl<Mac: SupportMachine> Syscalls<Mac> for CurrentCycles {
}
let cycles = self
.base
.lock()
.map_err(|e| VMError::Unexpected(e.to_string()))?
.load(Ordering::Acquire)
.checked_add(machine.cycles())
.ok_or(VMError::CyclesOverflow)?;
machine.set_register(A0, Mac::REG::from_u64(cycles));
Expand Down
11 changes: 7 additions & 4 deletions script/src/types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,10 @@ use ckb_vm::{
use serde::{Deserialize, Serialize};
use std::collections::{BTreeMap, HashMap};
use std::fmt;
use std::sync::{Arc, Mutex, RwLock};
use std::sync::{
atomic::{AtomicU64, Ordering},
Arc, Mutex, RwLock,
};

#[cfg(has_asm)]
use ckb_vm::machine::asm::{AsmCoreMachine, AsmMachine};
Expand Down Expand Up @@ -963,7 +966,7 @@ pub struct VmContext<DL>
where
DL: CellDataProvider,
{
pub(crate) base_cycles: Arc<Mutex<u64>>,
pub(crate) base_cycles: Arc<AtomicU64>,
/// A mutable reference to scheduler's message box
pub(crate) message_box: Arc<Mutex<Vec<Message>>>,
pub(crate) snapshot2_context: Arc<Mutex<Snapshot2Context<DataPieceId, Arc<VmData<DL>>>>>,
Expand All @@ -978,14 +981,14 @@ where
/// among different entities.
pub fn new(vm_data: &Arc<VmData<DL>>, message_box: &Arc<Mutex<Vec<Message>>>) -> Self {
Self {
base_cycles: Arc::new(Mutex::new(0)),
base_cycles: Arc::new(AtomicU64::new(0)),
message_box: Arc::clone(message_box),
snapshot2_context: Arc::new(Mutex::new(Snapshot2Context::new(Arc::clone(vm_data)))),
}
}

pub fn set_base_cycles(&mut self, base_cycles: u64) {
*self.base_cycles.lock().expect("lock") = base_cycles;
self.base_cycles.store(base_cycles, Ordering::Release);
}
}

Expand Down

0 comments on commit 3595333

Please sign in to comment.