diff --git a/src/memory_manager.rs b/src/memory_manager.rs index 54df61cce4..c4e6331766 100644 --- a/src/memory_manager.rs +++ b/src/memory_manager.rs @@ -17,8 +17,9 @@ use crate::plan::AllocationSemantics; use crate::plan::{Mutator, MutatorContext}; use crate::scheduler::WorkBucketStage; use crate::scheduler::{GCWork, GCWorker}; +use crate::util::alloc::allocator::AllocationOptions; use crate::util::alloc::allocators::AllocatorSelector; -use crate::util::constants::{LOG_BYTES_IN_PAGE, MIN_OBJECT_SIZE}; +use crate::util::constants::LOG_BYTES_IN_PAGE; use crate::util::heap::layout::vm_layout::vm_layout; use crate::util::opaque_pointer::*; use crate::util::{Address, ObjectReference}; @@ -139,11 +140,28 @@ pub fn flush_mutator(mutator: &mut Mutator) { mutator.flush() } -/// Allocate memory for an object. For performance reasons, a VM should -/// implement the allocation fast-path on their side rather than just calling this function. +/// Allocate memory for an object. /// -/// If the VM provides a non-zero `offset` parameter, then the returned address will be -/// such that the `RETURNED_ADDRESS + offset` is aligned to the `align` parameter. +/// When the allocation is successful, it returns the starting address of the new object. The +/// memory range for the new object is `size` bytes starting from the returned address, and +/// `RETURNED_ADDRESS + offset` is guaranteed to be aligned to the `align` parameter. The returned +/// address of a successful allocation will never be zero. +/// +/// If MMTk fails to allocate memory, it will attempt a GC to free up some memory and retry the +/// allocation. After triggering GC, it will call [`crate::vm::Collection::block_for_gc`] to suspend +/// the current thread that is allocating. Callers of `alloc` must be aware of this behavior. +/// For example, JIT compilers that support +/// precise stack scanning need to make the call site of `alloc` a GC-safe point by generating stack maps. See +/// [`alloc_with_options`] if it is undesirable to trigger GC at this allocation site. +/// +/// If MMTk has attempted at least one GC, and still cannot free up enough memory, it will call +/// [`crate::vm::Collection::out_of_memory`] to inform the binding. The VM binding +/// can implement that method to handle the out-of-memory event in a VM-specific way, including but +/// not limited to throwing exceptions or errors. If [`crate::vm::Collection::out_of_memory`] returns +/// normally without panicking or throwing exceptions, this function will return zero. +/// +/// For performance reasons, a VM should implement the allocation fast-path on their side rather +/// than just calling this function. /// /// Arguments: /// * `mutator`: The mutator to perform this allocation request. @@ -158,24 +176,46 @@ pub fn alloc( offset: usize, semantics: AllocationSemantics, ) -> Address { - // MMTk has assumptions about minimal object size. - // We need to make sure that all allocations comply with the min object size. - // Ideally, we check the allocation size, and if it is smaller, we transparently allocate the min - // object size (the VM does not need to know this). However, for the VM bindings we support at the moment, - // their object sizes are all larger than MMTk's min object size, so we simply put an assertion here. - // If you plan to use MMTk with a VM with its object size smaller than MMTk's min object size, you should - // meet the min object size in the fastpath. - debug_assert!(size >= MIN_OBJECT_SIZE); - // Assert alignment - debug_assert!(align >= VM::MIN_ALIGNMENT); - debug_assert!(align <= VM::MAX_ALIGNMENT); - // Assert offset - debug_assert!(VM::USE_ALLOCATION_OFFSET || offset == 0); + #[cfg(debug_assertions)] + crate::util::alloc::allocator::assert_allocation_args::(size, align, offset); mutator.alloc(size, align, offset, semantics) } -/// Invoke the allocation slow path. This is only intended for use when a binding implements the fastpath on +/// Allocate memory for an object. +/// +/// This allocation function allows alternation to the allocation behaviors, specified by the +/// [`crate::util::alloc::AllocationOptions`]. For example, one can allow +/// overcommit the memory to go beyond the heap size without triggering a GC. This function can be +/// used in certain cases where the runtime needs a different allocation behavior other than +/// what the default [`alloc`] provides. +/// +/// Arguments: +/// * `mutator`: The mutator to perform this allocation request. +/// * `size`: The number of bytes required for the object. +/// * `align`: Required alignment for the object. +/// * `offset`: Offset associated with the alignment. +/// * `semantics`: The allocation semantic required for the allocation. +/// * `options`: the allocation options to change the default allocation behavior for this request. +pub fn alloc_with_options( + mutator: &mut Mutator, + size: usize, + align: usize, + offset: usize, + semantics: AllocationSemantics, + options: crate::util::alloc::allocator::AllocationOptions, +) -> Address { + #[cfg(debug_assertions)] + crate::util::alloc::allocator::assert_allocation_args::(size, align, offset); + + mutator.alloc_with_options(size, align, offset, semantics, options) +} + +/// Invoke the allocation slow path of [`alloc`]. +/// Like [`alloc`], this function may trigger GC and call [`crate::vm::Collection::block_for_gc`] or +/// [`crate::vm::Collection::out_of_memory`]. The caller needs to be aware of that. +/// +/// *Notes*: This is only intended for use when a binding implements the fastpath on /// the binding side. When the binding handles fast path allocation and the fast path fails, it can use this /// method for slow path allocation. Calling before exhausting fast path allocaiton buffer will lead to bad /// performance. @@ -196,6 +236,34 @@ pub fn alloc_slow( mutator.alloc_slow(size, align, offset, semantics) } +/// Invoke the allocation slow path of [`alloc_with_options`]. +/// +/// Like [`alloc_with_options`], This allocation function allows alternation to the allocation behaviors, specified by the +/// [`crate::util::alloc::AllocationOptions`]. For example, one can allow +/// overcommit the memory to go beyond the heap size without triggering a GC. This function can be +/// used in certain cases where the runtime needs a different allocation behavior other than +/// what the default [`alloc`] provides. +/// +/// Like [`alloc_slow`], this function is also only intended for use when a binding implements the +/// fastpath on the binding side. +/// +/// Arguments: +/// * `mutator`: The mutator to perform this allocation request. +/// * `size`: The number of bytes required for the object. +/// * `align`: Required alignment for the object. +/// * `offset`: Offset associated with the alignment. +/// * `semantics`: The allocation semantic required for the allocation. +pub fn alloc_slow_with_options( + mutator: &mut Mutator, + size: usize, + align: usize, + offset: usize, + semantics: AllocationSemantics, + options: AllocationOptions, +) -> Address { + mutator.alloc_slow_with_options(size, align, offset, semantics, options) +} + /// Perform post-allocation actions, usually initializing object metadata. For many allocators none are /// required. For performance reasons, a VM should implement the post alloc fast-path on their side /// rather than just calling this function. diff --git a/src/plan/mutator_context.rs b/src/plan/mutator_context.rs index 4ed41190cd..58c2c4868d 100644 --- a/src/plan/mutator_context.rs +++ b/src/plan/mutator_context.rs @@ -4,6 +4,7 @@ use crate::plan::barriers::Barrier; use crate::plan::global::Plan; use crate::plan::AllocationSemantics; use crate::policy::space::Space; +use crate::util::alloc::allocator::AllocationOptions; use crate::util::alloc::allocators::{AllocatorSelector, Allocators}; use crate::util::alloc::Allocator; use crate::util::{Address, ObjectReference}; @@ -112,11 +113,30 @@ impl MutatorContext for Mutator { offset: usize, allocator: AllocationSemantics, ) -> Address { - unsafe { + let allocator = unsafe { self.allocators .get_allocator_mut(self.config.allocator_mapping[allocator]) - } - .alloc(size, align, offset) + }; + // The value should be default/unset at the beginning of an allocation request. + debug_assert!(allocator.get_context().get_alloc_options().is_default()); + allocator.alloc(size, align, offset) + } + + fn alloc_with_options( + &mut self, + size: usize, + align: usize, + offset: usize, + allocator: AllocationSemantics, + options: AllocationOptions, + ) -> Address { + let allocator = unsafe { + self.allocators + .get_allocator_mut(self.config.allocator_mapping[allocator]) + }; + // The value should be default/unset at the beginning of an allocation request. + debug_assert!(allocator.get_context().get_alloc_options().is_default()); + allocator.alloc_with_options(size, align, offset, options) } fn alloc_slow( @@ -126,11 +146,30 @@ impl MutatorContext for Mutator { offset: usize, allocator: AllocationSemantics, ) -> Address { - unsafe { + let allocator = unsafe { self.allocators .get_allocator_mut(self.config.allocator_mapping[allocator]) - } - .alloc_slow(size, align, offset) + }; + // The value should be default/unset at the beginning of an allocation request. + debug_assert!(allocator.get_context().get_alloc_options().is_default()); + allocator.alloc_slow(size, align, offset) + } + + fn alloc_slow_with_options( + &mut self, + size: usize, + align: usize, + offset: usize, + allocator: AllocationSemantics, + options: AllocationOptions, + ) -> Address { + let allocator = unsafe { + self.allocators + .get_allocator_mut(self.config.allocator_mapping[allocator]) + }; + // The value should be default/unset at the beginning of an allocation request. + debug_assert!(allocator.get_context().get_alloc_options().is_default()); + allocator.alloc_slow_with_options(size, align, offset, options) } // Note that this method is slow, and we expect VM bindings that care about performance to implement allocation fastpath sequence in their bindings. @@ -262,7 +301,7 @@ pub trait MutatorContext: Send + 'static { fn prepare(&mut self, tls: VMWorkerThread); /// Do the release work for this mutator. fn release(&mut self, tls: VMWorkerThread); - /// Allocate memory for an object. + /// Allocate memory for an object. This function will trigger a GC on failed allocation. /// /// Arguments: /// * `size`: the number of bytes required for the object. @@ -276,7 +315,25 @@ pub trait MutatorContext: Send + 'static { offset: usize, allocator: AllocationSemantics, ) -> Address; - /// The slow path allocation. This is only useful when the binding + /// Allocate memory for an object with more options to control this allocation request, e.g. not triggering a GC on fail. + /// + /// Arguments: + /// * `size`: the number of bytes required for the object. + /// * `align`: required alignment for the object. + /// * `offset`: offset associated with the alignment. The result plus the offset will be aligned to the given alignment. + /// * `allocator`: the allocation semantic used for this object. + /// * `options`: the allocation options to change the default allocation behavior for this request. + fn alloc_with_options( + &mut self, + size: usize, + align: usize, + offset: usize, + allocator: AllocationSemantics, + options: AllocationOptions, + ) -> Address; + /// The slow path allocation for [`MutatorContext::alloc`]. This function will trigger a GC on failed allocation. + /// + /// This is only useful when the binding /// implements the fast path allocation, and would like to explicitly /// call the slow path after the fast path allocation fails. fn alloc_slow( @@ -286,6 +343,19 @@ pub trait MutatorContext: Send + 'static { offset: usize, allocator: AllocationSemantics, ) -> Address; + /// The slow path allocation for [`MutatorContext::alloc_with_options`]. + /// + /// This is only useful when the binding + /// implements the fast path allocation, and would like to explicitly + /// call the slow path after the fast path allocation fails. + fn alloc_slow_with_options( + &mut self, + size: usize, + align: usize, + offset: usize, + allocator: AllocationSemantics, + options: AllocationOptions, + ) -> Address; /// Perform post-allocation actions. For many allocators none are /// required. /// diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index 331600bc91..f946dbd7ff 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -7,6 +7,7 @@ use crate::policy::sft::GCWorkerMutRef; use crate::policy::sft::SFT; use crate::policy::sft_map::SFTMap; use crate::policy::space::{CommonSpace, Space}; +use crate::util::alloc::allocator::AllocationOptions; use crate::util::alloc::allocator::AllocatorContext; use crate::util::constants::LOG_BYTES_IN_PAGE; use crate::util::heap::chunk_map::*; @@ -516,8 +517,13 @@ impl ImmixSpace { } /// Allocate a clean block. - pub fn get_clean_block(&self, tls: VMThread, copy: bool) -> Option { - let block_address = self.acquire(tls, Block::PAGES); + pub fn get_clean_block( + &self, + tls: VMThread, + copy: bool, + alloc_options: AllocationOptions, + ) -> Option { + let block_address = self.acquire(tls, Block::PAGES, alloc_options); if block_address.is_zero() { return None; } diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index cdeb87a9c0..2fe78c8779 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -5,6 +5,7 @@ use crate::plan::VectorObjectQueue; use crate::policy::sft::GCWorkerMutRef; use crate::policy::sft::SFT; use crate::policy::space::{CommonSpace, Space}; +use crate::util::alloc::allocator::AllocationOptions; use crate::util::constants::BYTES_IN_PAGE; use crate::util::heap::{FreeListPageResource, PageResource}; use crate::util::metadata; @@ -303,8 +304,13 @@ impl LargeObjectSpace { } /// Allocate an object - pub fn allocate_pages(&self, tls: VMThread, pages: usize) -> Address { - self.acquire(tls, pages) + pub fn allocate_pages( + &self, + tls: VMThread, + pages: usize, + alloc_options: AllocationOptions, + ) -> Address { + self.acquire(tls, pages, alloc_options) } /// Test if the object's mark bit is the same as the given value. If it is not the same, diff --git a/src/policy/lockfreeimmortalspace.rs b/src/policy/lockfreeimmortalspace.rs index 7a7160d8bc..76e44c55ee 100644 --- a/src/policy/lockfreeimmortalspace.rs +++ b/src/policy/lockfreeimmortalspace.rs @@ -8,6 +8,7 @@ use crate::policy::sft::SFT; use crate::policy::space::{CommonSpace, Space}; use crate::util::address::Address; +use crate::util::alloc::allocator::AllocationOptions; use crate::util::conversions; use crate::util::heap::gc_trigger::GCTrigger; use crate::util::heap::layout::vm_layout::vm_layout; @@ -140,7 +141,7 @@ impl Space for LockFreeImmortalSpace { data_pages + meta_pages } - fn acquire(&self, _tls: VMThread, pages: usize) -> Address { + fn acquire(&self, _tls: VMThread, pages: usize, alloc_options: AllocationOptions) -> Address { trace!("LockFreeImmortalSpace::acquire"); let bytes = conversions::pages_to_bytes(pages); let start = self @@ -150,7 +151,11 @@ impl Space for LockFreeImmortalSpace { }) .expect("update cursor failed"); if start + bytes > self.limit { - panic!("OutOfMemory") + if alloc_options.on_fail.allow_oom_call() { + panic!("OutOfMemory"); + } else { + return Address::ZERO; + } } if self.slow_path_zeroing { crate::util::memory::zero(start, bytes); diff --git a/src/policy/marksweepspace/native_ms/global.rs b/src/policy/marksweepspace/native_ms/global.rs index f39b051156..3c46df3f7a 100644 --- a/src/policy/marksweepspace/native_ms/global.rs +++ b/src/policy/marksweepspace/native_ms/global.rs @@ -24,6 +24,7 @@ use crate::plan::ObjectQueue; use crate::plan::VectorObjectQueue; use crate::policy::sft::SFT; use crate::policy::space::{CommonSpace, Space}; +use crate::util::alloc::allocator::AllocationOptions; use crate::util::constants::LOG_BYTES_IN_PAGE; use crate::util::heap::chunk_map::*; use crate::util::linear_scan::Region; @@ -454,7 +455,13 @@ impl MarkSweepSpace { crate::util::metadata::vo_bit::bzero_vo_bit(block.start(), Block::BYTES); } - pub fn acquire_block(&self, tls: VMThread, size: usize, align: usize) -> BlockAcquireResult { + pub fn acquire_block( + &self, + tls: VMThread, + size: usize, + align: usize, + alloc_options: AllocationOptions, + ) -> BlockAcquireResult { { let mut abandoned = self.abandoned.lock().unwrap(); let bin = mi_bin::(size, align); @@ -476,7 +483,7 @@ impl MarkSweepSpace { } } - let acquired = self.acquire(tls, Block::BYTES >> LOG_BYTES_IN_PAGE); + let acquired = self.acquire(tls, Block::BYTES >> LOG_BYTES_IN_PAGE, alloc_options); if acquired.is_zero() { BlockAcquireResult::Exhausted } else { diff --git a/src/policy/space.rs b/src/policy/space.rs index 3ce1ee3fe1..d300efbbf9 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -22,6 +22,7 @@ use crate::mmtk::SFT_MAP; #[cfg(debug_assertions)] use crate::policy::sft::EMPTY_SFT_NAME; use crate::policy::sft::SFT; +use crate::util::alloc::allocator::AllocationOptions; use crate::util::copy::*; use crate::util::heap::gc_trigger::GCTrigger; use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK; @@ -67,35 +68,57 @@ pub trait Space: 'static + SFT + Sync + Downcast { /// avoid arithmatic overflow. If we have to do computation in the allocation fastpath and /// overflow happens there, there is nothing we can do about it. /// Return a boolean to indicate if we will be out of memory, determined by the check. - fn will_oom_on_acquire(&self, tls: VMThread, size: usize) -> bool { + fn will_oom_on_acquire(&self, size: usize) -> bool { let max_pages = self.get_gc_trigger().policy.get_max_heap_size_in_pages(); let requested_pages = size >> LOG_BYTES_IN_PAGE; - if requested_pages > max_pages { - VM::VMCollection::out_of_memory( - tls, - crate::util::alloc::AllocationError::HeapOutOfMemory, - ); + requested_pages > max_pages + } + + /// Check if the requested `size` is an obvious out-of-memory case using + /// [`Self::will_oom_on_acquire`] and, if it is, call `Collection::out_of_memory`. Return the + /// result of `will_oom_on_acquire`. + fn handle_obvious_oom_request( + &self, + tls: VMThread, + size: usize, + alloc_options: AllocationOptions, + ) -> bool { + if self.will_oom_on_acquire(size) { + if alloc_options.on_fail.allow_oom_call() { + VM::VMCollection::out_of_memory( + tls, + crate::util::alloc::AllocationError::HeapOutOfMemory, + ); + } return true; } false } - fn acquire(&self, tls: VMThread, pages: usize) -> Address { - trace!("Space.acquire, tls={:?}", tls); + fn acquire(&self, tls: VMThread, pages: usize, alloc_options: AllocationOptions) -> Address { + trace!( + "Space.acquire, tls={:?}, alloc_options={:?}", + tls, + alloc_options + ); debug_assert!( - !self.will_oom_on_acquire(tls, pages << LOG_BYTES_IN_PAGE), + !self.will_oom_on_acquire(pages << LOG_BYTES_IN_PAGE), "The requested pages is larger than the max heap size. Is will_go_oom_on_acquire used before acquring memory?" ); // Should we poll to attempt to GC? // - If tls is collector, we cannot attempt a GC. // - If gc is disabled, we cannot attempt a GC. - let should_poll = - VM::VMActivePlan::is_mutator(tls) && VM::VMCollection::is_collection_enabled(); + // - If overcommit is allowed, we don't attempt a GC. + let should_poll = VM::VMActivePlan::is_mutator(tls) + && VM::VMCollection::is_collection_enabled() + && !alloc_options.on_fail.allow_overcommit(); // Is a GC allowed here? If we should poll but are not allowed to poll, we will panic. // initialize_collection() has to be called so we know GC is initialized. - let allow_gc = should_poll && self.common().global_state.is_initialized(); + let allow_gc = should_poll + && self.common().global_state.is_initialized() + && alloc_options.on_fail.allow_gc(); trace!("Reserving pages"); let pr = self.get_page_resource(); @@ -104,12 +127,19 @@ pub trait Space: 'static + SFT + Sync + Downcast { trace!("Polling .."); if should_poll && self.get_gc_trigger().poll(false, Some(self.as_space())) { + // Clear the request + pr.clear_request(pages_reserved); + + // If we do not want GC on fail, just return zero. + if !alloc_options.on_fail.allow_gc() { + return Address::ZERO; + } + + // Otherwise do GC here debug!("Collection required"); assert!(allow_gc, "GC is not allowed here: collection is not initialized (did you call initialize_collection()?)."); - // Clear the request, and inform GC trigger about the pending allocation. - pr.clear_request(pages_reserved); - + // Inform GC trigger about the pending allocation. let meta_pages_reserved = self.estimate_side_meta_pages(pages_reserved); let total_pages_reserved = pages_reserved + meta_pages_reserved; self.get_gc_trigger() @@ -117,7 +147,9 @@ pub trait Space: 'static + SFT + Sync + Downcast { .on_pending_allocation(total_pages_reserved); VM::VMCollection::block_for_gc(VMMutatorThread(tls)); // We have checked that this is mutator - unsafe { Address::zero() } + + // Return zero -- the caller will handle re-attempting allocation + Address::ZERO } else { debug!("Collection not required"); @@ -221,6 +253,14 @@ pub trait Space: 'static + SFT + Sync + Downcast { Err(_) => { drop(lock); // drop the lock immediately + // Clear the request + pr.clear_request(pages_reserved); + + // If we do not want GC on fail, just return zero. + if !alloc_options.on_fail.allow_gc() { + return Address::ZERO; + } + // We thought we had memory to allocate, but somehow failed the allocation. Will force a GC. assert!( allow_gc, @@ -230,14 +270,13 @@ pub trait Space: 'static + SFT + Sync + Downcast { let gc_performed = self.get_gc_trigger().poll(true, Some(self.as_space())); debug_assert!(gc_performed, "GC not performed when forced."); - // Clear the request, and inform GC trigger about the pending allocation. - pr.clear_request(pages_reserved); + // Inform GC trigger about the pending allocation. self.get_gc_trigger() .policy .on_pending_allocation(pages_reserved); VM::VMCollection::block_for_gc(VMMutatorThread(tls)); // We asserted that this is mutator. - unsafe { Address::zero() } + Address::ZERO } } } diff --git a/src/policy/vmspace.rs b/src/policy/vmspace.rs index c19e3a516b..99dcd9852c 100644 --- a/src/policy/vmspace.rs +++ b/src/policy/vmspace.rs @@ -4,6 +4,7 @@ use crate::policy::sft::GCWorkerMutRef; use crate::policy::sft::SFT; use crate::policy::space::{CommonSpace, Space}; use crate::util::address::Address; +use crate::util::alloc::allocator::AllocationOptions; use crate::util::constants::BYTES_IN_PAGE; use crate::util::heap::externalpageresource::{ExternalPageResource, ExternalPages}; use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK; @@ -136,7 +137,7 @@ impl Space for VMSpace { unreachable!() } - fn acquire(&self, _tls: VMThread, _pages: usize) -> Address { + fn acquire(&self, _tls: VMThread, _pages: usize, _alloc_options: AllocationOptions) -> Address { unreachable!() } diff --git a/src/util/alloc/allocator.rs b/src/util/alloc/allocator.rs index a60d26935c..4e53ab953d 100644 --- a/src/util/alloc/allocator.rs +++ b/src/util/alloc/allocator.rs @@ -6,6 +6,7 @@ use crate::util::heap::gc_trigger::GCTrigger; use crate::util::options::Options; use crate::MMTK; +use atomic::Atomic; use std::sync::atomic::Ordering; use std::sync::Arc; @@ -27,6 +28,47 @@ pub enum AllocationError { MmapOutOfMemory, } +/// Behavior when an allocation fails, and a GC is expected. +#[repr(u8)] +#[derive(Copy, Clone, Default, PartialEq, bytemuck::NoUninit, Debug)] +pub enum OnAllocationFail { + /// Request the GC. This is the default behavior. + #[default] + RequestGC, + /// Instead of requesting GC, the allocation request returns with a failure value. + ReturnFailure, + /// Instead of requesting GC, the allocation request simply overcommits the memory, + /// and return a valid result at its best efforts. + OverCommit, +} + +impl OnAllocationFail { + pub(crate) fn allow_oom_call(&self) -> bool { + *self == Self::RequestGC + } + pub(crate) fn allow_gc(&self) -> bool { + *self == Self::RequestGC + } + pub(crate) fn allow_overcommit(&self) -> bool { + *self == Self::OverCommit + } +} + +/// Allow specifying different behaviors with [`Allocator::alloc_with_options`]. +#[repr(C)] +#[derive(Copy, Clone, Default, PartialEq, bytemuck::NoUninit, Debug)] +pub struct AllocationOptions { + /// When the allocation fails and a GC is originally expected, on_fail + /// allows a different behavior to avoid the GC. + pub on_fail: OnAllocationFail, +} + +impl AllocationOptions { + pub(crate) fn is_default(&self) -> bool { + *self == AllocationOptions::default() + } +} + pub fn align_allocation_no_fill( region: Address, alignment: usize, @@ -130,8 +172,26 @@ pub fn get_maximum_aligned_size_inner( } } +#[cfg(debug_assertions)] +pub(crate) fn assert_allocation_args(size: usize, align: usize, offset: usize) { + // MMTk has assumptions about minimal object size. + // We need to make sure that all allocations comply with the min object size. + // Ideally, we check the allocation size, and if it is smaller, we transparently allocate the min + // object size (the VM does not need to know this). However, for the VM bindings we support at the moment, + // their object sizes are all larger than MMTk's min object size, so we simply put an assertion here. + // If you plan to use MMTk with a VM with its object size smaller than MMTk's min object size, you should + // meet the min object size in the fastpath. + debug_assert!(size >= MIN_OBJECT_SIZE); + // Assert alignment + debug_assert!(align >= VM::MIN_ALIGNMENT); + debug_assert!(align <= VM::MAX_ALIGNMENT); + // Assert offset + debug_assert!(VM::USE_ALLOCATION_OFFSET || offset == 0); +} + /// The context an allocator needs to access in order to perform allocation. pub struct AllocatorContext { + pub alloc_options: Atomic, pub state: Arc, pub options: Arc, pub gc_trigger: Arc>, @@ -142,6 +202,7 @@ pub struct AllocatorContext { impl AllocatorContext { pub fn new(mmtk: &MMTK) -> Self { Self { + alloc_options: Atomic::new(AllocationOptions::default()), state: mmtk.state.clone(), options: mmtk.options.clone(), gc_trigger: mmtk.gc_trigger.clone(), @@ -149,6 +210,19 @@ impl AllocatorContext { analysis_manager: mmtk.analysis_manager.clone(), } } + + pub fn set_alloc_options(&self, options: AllocationOptions) { + self.alloc_options.store(options, Ordering::Relaxed); + } + + pub fn clear_alloc_options(&self) { + self.alloc_options + .store(AllocationOptions::default(), Ordering::Relaxed); + } + + pub fn get_alloc_options(&self) -> AllocationOptions { + self.alloc_options.load(Ordering::Relaxed) + } } /// A trait which implements allocation routines. Every allocator needs to implements this trait. @@ -180,9 +254,13 @@ pub trait Allocator: Downcast { /// If an allocator supports thread local allocations, then the allocation will be serviced /// from its TLAB, otherwise it will default to using the slowpath, i.e. [`alloc_slow`](Allocator::alloc_slow). /// + /// If the heap is full, we trigger a GC and attempt to free up + /// more memory, and re-attempt the allocation. + /// /// Note that in the case where the VM is out of memory, we invoke /// [`Collection::out_of_memory`] to inform the binding and then return a null pointer back to /// it. We have no assumptions on whether the VM will continue executing or abort immediately. + /// If the VM continues execution, the function will return a null address. /// /// An allocator needs to make sure the object reference for the returned address is in the same /// chunk as the returned address (so the side metadata and the SFT for an object reference is valid). @@ -194,6 +272,26 @@ pub trait Allocator: Downcast { /// * `offset` the required offset in bytes. fn alloc(&mut self, size: usize, align: usize, offset: usize) -> Address; + /// An allocation attempt. The allocation options may specify different behaviors for this allocation request. + /// + /// Arguments: + /// * `size`: the allocation size in bytes. + /// * `align`: the required alignment in bytes. + /// * `offset` the required offset in bytes. + /// * `options`: the allocation options to change the default allocation behavior for this request. + fn alloc_with_options( + &mut self, + size: usize, + align: usize, + offset: usize, + alloc_options: AllocationOptions, + ) -> Address { + self.get_context().set_alloc_options(alloc_options); + let ret = self.alloc(size, align, offset); + self.get_context().clear_alloc_options(); + ret + } + /// Slowpath allocation attempt. This function is explicitly not inlined for performance /// considerations. /// @@ -206,6 +304,30 @@ pub trait Allocator: Downcast { self.alloc_slow_inline(size, align, offset) } + /// Slowpath allocation attempt. Mostly the same as [`Allocator::alloc_slow`], except that the allocation options + /// may specify different behaviors for this allocation request. + /// + /// This function is not used internally. It is mostly for the bindings. + /// [`Allocator::alloc_with_options`] still calls the normal [`Allocator::alloc_slow`]. + /// + /// Arguments: + /// * `size`: the allocation size in bytes. + /// * `align`: the required alignment in bytes. + /// * `offset` the required offset in bytes. + fn alloc_slow_with_options( + &mut self, + size: usize, + align: usize, + offset: usize, + alloc_options: AllocationOptions, + ) -> Address { + // The function is not used internally. We won't set no_gc_on_fail redundantly. + self.get_context().set_alloc_options(alloc_options); + let ret = self.alloc_slow(size, align, offset); + self.get_context().clear_alloc_options(); + ret + } + /// Slowpath allocation attempt. This function executes the actual slowpath allocation. A /// slowpath allocation in MMTk attempts to allocate the object using the per-allocator /// definition of [`alloc_slow_once`](Allocator::alloc_slow_once). This function also accounts for increasing the @@ -256,6 +378,12 @@ pub trait Allocator: Downcast { return result; } + if result.is_zero() + && self.get_context().get_alloc_options().on_fail == OnAllocationFail::ReturnFailure + { + return result; + } + if !result.is_zero() { // Report allocation success to assist OutOfMemory handling. if !self diff --git a/src/util/alloc/bumpallocator.rs b/src/util/alloc/bumpallocator.rs index 76cc628c89..db832eb677 100644 --- a/src/util/alloc/bumpallocator.rs +++ b/src/util/alloc/bumpallocator.rs @@ -194,12 +194,20 @@ impl BumpAllocator { offset: usize, stress_test: bool, ) -> Address { - if self.space.will_oom_on_acquire(self.tls, size) { + if self.space.handle_obvious_oom_request( + self.tls, + size, + self.get_context().get_alloc_options(), + ) { return Address::ZERO; } let block_size = (size + BLOCK_MASK) & (!BLOCK_MASK); - let acquired_start = self.space.acquire(self.tls, bytes_to_pages_up(block_size)); + let acquired_start = self.space.acquire( + self.tls, + bytes_to_pages_up(block_size), + self.get_context().get_alloc_options(), + ); if acquired_start.is_zero() { trace!("Failed to acquire a new block"); acquired_start diff --git a/src/util/alloc/free_list_allocator.rs b/src/util/alloc/free_list_allocator.rs index 28958a1759..b443fe0c6a 100644 --- a/src/util/alloc/free_list_allocator.rs +++ b/src/util/alloc/free_list_allocator.rs @@ -296,7 +296,7 @@ impl FreeListAllocator { ) -> Option { let bin = mi_bin::(size, align); loop { - match self.space.acquire_block(self.tls, size, align) { + match self.space.acquire_block(self.tls, size, align, self.get_context().get_alloc_options()) { crate::policy::marksweepspace::native_ms::BlockAcquireResult::Exhausted => { debug!("Acquire global block: None"); // GC diff --git a/src/util/alloc/immix_allocator.rs b/src/util/alloc/immix_allocator.rs index 4ebeb69e7c..807ddded90 100644 --- a/src/util/alloc/immix_allocator.rs +++ b/src/util/alloc/immix_allocator.rs @@ -289,7 +289,11 @@ impl ImmixAllocator { // Get a clean block from ImmixSpace. fn acquire_clean_block(&mut self, size: usize, align: usize, offset: usize) -> Address { - match self.immix_space().get_clean_block(self.tls, self.copy) { + match self.immix_space().get_clean_block( + self.tls, + self.copy, + self.get_context().get_alloc_options(), + ) { None => Address::ZERO, Some(block) => { trace!( diff --git a/src/util/alloc/large_object_allocator.rs b/src/util/alloc/large_object_allocator.rs index baf0738274..89b5526c2e 100644 --- a/src/util/alloc/large_object_allocator.rs +++ b/src/util/alloc/large_object_allocator.rs @@ -49,13 +49,18 @@ impl Allocator for LargeObjectAllocator { } fn alloc_slow_once(&mut self, size: usize, align: usize, _offset: usize) -> Address { - if self.space.will_oom_on_acquire(self.tls, size) { + if self.space.handle_obvious_oom_request( + self.tls, + size, + self.get_context().get_alloc_options(), + ) { return Address::ZERO; } let maxbytes = allocator::get_maximum_aligned_size::(size, align); let pages = crate::util::conversions::bytes_to_pages_up(maxbytes); - self.space.allocate_pages(self.tls, pages) + self.space + .allocate_pages(self.tls, pages, self.get_context().get_alloc_options()) } } diff --git a/src/util/alloc/mod.rs b/src/util/alloc/mod.rs index 5f6453b6fe..d3148dceed 100644 --- a/src/util/alloc/mod.rs +++ b/src/util/alloc/mod.rs @@ -4,7 +4,9 @@ pub(crate) mod allocator; pub use allocator::fill_alignment_gap; pub use allocator::AllocationError; +pub use allocator::AllocationOptions; pub use allocator::Allocator; +pub use allocator::OnAllocationFail; /// A list of all the allocators, embedded in Mutator pub(crate) mod allocators; diff --git a/src/vm/tests/mock_tests/mock_test_allocate_no_gc_oom_on_acquire.rs b/src/vm/tests/mock_tests/mock_test_allocate_no_gc_oom_on_acquire.rs new file mode 100644 index 0000000000..6aa6a3f207 --- /dev/null +++ b/src/vm/tests/mock_tests/mock_test_allocate_no_gc_oom_on_acquire.rs @@ -0,0 +1,39 @@ +use super::mock_test_prelude::*; + +use crate::util::alloc::allocator::{AllocationOptions, OnAllocationFail}; +use crate::AllocationSemantics; + +/// This test will allocate an object that is larger than the heap size. The call will fail. +#[test] +pub fn allocate_no_gc_oom_on_acquire() { + // 1MB heap + with_mockvm( + default_setup, + || { + const KB: usize = 1024; + let mut fixture = MutatorFixture::create_with_heapsize(KB); + + // Attempt to allocate an object that is larger than the heap size. + let addr = memory_manager::alloc_with_options( + &mut fixture.mutator, + 1024 * 10, + 8, + 0, + AllocationSemantics::Default, + AllocationOptions { + on_fail: OnAllocationFail::ReturnFailure, + }, + ); + // We should get zero. + assert!(addr.is_zero()); + // block_for_gc and out_of_memory won't be called. + read_mockvm(|mock| { + assert!(!mock.block_for_gc.is_called()); + }); + read_mockvm(|mock| { + assert!(!mock.out_of_memory.is_called()); + }); + }, + no_cleanup, + ) +} diff --git a/src/vm/tests/mock_tests/mock_test_allocate_no_gc_simple.rs b/src/vm/tests/mock_tests/mock_test_allocate_no_gc_simple.rs new file mode 100644 index 0000000000..f9492c7e01 --- /dev/null +++ b/src/vm/tests/mock_tests/mock_test_allocate_no_gc_simple.rs @@ -0,0 +1,47 @@ +use super::mock_test_prelude::*; + +use crate::util::alloc::allocator::{AllocationOptions, OnAllocationFail}; +use crate::AllocationSemantics; + +/// This test will do alloc_with_options in a loop, and evetually fill up the heap. +/// As we require alloc_with_options not to trigger a GC, we expect to see a return value of zero, and no GC is triggered. +#[test] +pub fn allocate_no_gc_simple() { + // 1MB heap + with_mockvm( + default_setup, + || { + const MB: usize = 1024 * 1024; + let mut fixture = MutatorFixture::create_with_heapsize(MB); + + let mut last_result = crate::util::Address::MAX; + + // Attempt allocation: allocate 1024 bytes. We should fill up the heap by 1024 allocations or fewer (some plans reserves more memory, such as semispace and generational GCs) + // Run a few more times to test if we set/unset no_gc_on_fail properly. + for _ in 0..1100 { + last_result = memory_manager::alloc_with_options( + &mut fixture.mutator, + 1024, + 8, + 0, + AllocationSemantics::Default, + AllocationOptions { + on_fail: OnAllocationFail::ReturnFailure, + }, + ); + if last_result.is_zero() { + read_mockvm(|mock| { + assert!(!mock.block_for_gc.is_called()); + }); + read_mockvm(|mock| { + assert!(!mock.out_of_memory.is_called()); + }); + } + } + + // The allocation should consume all the heap, and the last result should be zero (failure). + assert!(last_result.is_zero()); + }, + no_cleanup, + ) +} diff --git a/src/vm/tests/mock_tests/mock_test_allocate_overcommit.rs b/src/vm/tests/mock_tests/mock_test_allocate_overcommit.rs new file mode 100644 index 0000000000..6f41413116 --- /dev/null +++ b/src/vm/tests/mock_tests/mock_test_allocate_overcommit.rs @@ -0,0 +1,46 @@ +use super::mock_test_prelude::*; + +use crate::util::alloc::allocator::{AllocationOptions, OnAllocationFail}; +use crate::AllocationSemantics; + +/// This test will do alloc_with_options in a loop, and evetually fill up the heap. +/// As we require alloc_with_options to over commit, we expect to see valid return values, and no GC is triggered. +#[test] +pub fn allocate_overcommit() { + // 1MB heap + with_mockvm( + default_setup, + || { + const MB: usize = 1024 * 1024; + let mut fixture = MutatorFixture::create_with_heapsize(MB); + + let mut last_result = crate::util::Address::MAX; + + // Attempt allocation: allocate 1024 bytes. We should fill up the heap by 1024 allocations or fewer (some plans reserves more memory, such as semispace and generational GCs) + // Run a few more times to test if we set/unset no_gc_on_fail properly. + for _ in 0..1100 { + last_result = memory_manager::alloc_with_options( + &mut fixture.mutator, + 1024, + 8, + 0, + AllocationSemantics::Default, + AllocationOptions { + on_fail: OnAllocationFail::ReturnFailure, + }, + ); + assert!(!last_result.is_zero()); + read_mockvm(|mock| { + assert!(!mock.block_for_gc.is_called()); + }); + read_mockvm(|mock| { + assert!(!mock.out_of_memory.is_called()); + }); + } + + // The allocation should consume all the heap, but we allow over commit and the last result should be not zero (failure). + assert!(!last_result.is_zero()); + }, + no_cleanup, + ) +} diff --git a/src/vm/tests/mock_tests/mod.rs b/src/vm/tests/mock_tests/mod.rs index 114d8f1859..71f60f2be9 100644 --- a/src/vm/tests/mock_tests/mod.rs +++ b/src/vm/tests/mock_tests/mod.rs @@ -24,6 +24,8 @@ pub(crate) mod mock_test_prelude { } mod mock_test_allocate_align_offset; +mod mock_test_allocate_no_gc_oom_on_acquire; +mod mock_test_allocate_no_gc_simple; mod mock_test_allocate_with_disable_collection; mod mock_test_allocate_with_initialize_collection; mod mock_test_allocate_with_re_enable_collection;