Skip to content

Commit 03dfc5a

Browse files
committed
alloc_no_gc will not call out_of_memory when allocating an erroneously
large object. Minor fixes for comments
1 parent c120f11 commit 03dfc5a

9 files changed

+62
-26
lines changed

src/memory_manager.rs

+2-8
Original file line numberDiff line numberDiff line change
@@ -160,12 +160,6 @@ pub fn flush_mutator<VM: VMBinding>(mutator: &mut Mutator<VM>) {
160160
/// not limited to throwing exceptions or errors. If [`crate::vm::Collection::out_of_memory`] returns
161161
/// normally without panicking or throwing exceptions, this function will return zero.
162162
///
163-
/// This function in most cases returns a valid memory address.
164-
/// This function may return a zero address iif 1. MMTk attempts at least one GC,
165-
/// 2. the GC does not free up enough memory, 3. MMTk calls [`crate::vm::Collection::out_of_memory`]
166-
/// to the binding, and 4. the binding returns from [`crate::vm::Collection::out_of_memory`]
167-
/// instead of throwing an exception/error.
168-
///
169163
/// For performance reasons, a VM should implement the allocation fast-path on their side rather
170164
/// than just calling this function.
171165
///
@@ -183,7 +177,7 @@ pub fn alloc<VM: VMBinding>(
183177
semantics: AllocationSemantics,
184178
) -> Address {
185179
#[cfg(debug_assertions)]
186-
crate::util::alloc::allocator::asset_allocation_args::<VM>(size, align, offset);
180+
crate::util::alloc::allocator::assert_allocation_args::<VM>(size, align, offset);
187181

188182
mutator.alloc(size, align, offset, semantics)
189183
}
@@ -212,7 +206,7 @@ pub fn alloc_no_gc<VM: VMBinding>(
212206
semantics: AllocationSemantics,
213207
) -> Address {
214208
#[cfg(debug_assertions)]
215-
crate::util::alloc::allocator::asset_allocation_args::<VM>(size, align, offset);
209+
crate::util::alloc::allocator::assert_allocation_args::<VM>(size, align, offset);
216210

217211
mutator.alloc_no_gc(size, align, offset, semantics)
218212
}

src/plan/mutator_context.rs

+4-6
Original file line numberDiff line numberDiff line change
@@ -300,7 +300,7 @@ pub trait MutatorContext<VM: VMBinding>: Send + 'static {
300300
fn prepare(&mut self, tls: VMWorkerThread);
301301
/// Do the release work for this mutator.
302302
fn release(&mut self, tls: VMWorkerThread);
303-
/// Allocate memory for an object. This is a GC safepoint. GC will be triggered if the allocation fails.
303+
/// Allocate memory for an object. This function will trigger a GC on failed allocation.
304304
///
305305
/// Arguments:
306306
/// * `size`: the number of bytes required for the object.
@@ -314,8 +314,7 @@ pub trait MutatorContext<VM: VMBinding>: Send + 'static {
314314
offset: usize,
315315
allocator: AllocationSemantics,
316316
) -> Address;
317-
/// Allocate memory for an object. This is NOT a GC safepoint. If the allocation fails,
318-
/// the function returns a zero value without triggering a GC.
317+
/// Allocate memory for an object. This function will not trigger a GC on failed allocation.
319318
///
320319
/// Arguments:
321320
/// * `size`: the number of bytes required for the object.
@@ -329,7 +328,7 @@ pub trait MutatorContext<VM: VMBinding>: Send + 'static {
329328
offset: usize,
330329
allocator: AllocationSemantics,
331330
) -> Address;
332-
/// The slow path allocation. This is a GC safepoint. GC will be triggered if the allocation fails.
331+
/// The slow path allocation for [`MutatorContext::alloc`]. This function will trigger a GC on failed allocation.
333332
///
334333
/// This is only useful when the binding
335334
/// implements the fast path allocation, and would like to explicitly
@@ -341,8 +340,7 @@ pub trait MutatorContext<VM: VMBinding>: Send + 'static {
341340
offset: usize,
342341
allocator: AllocationSemantics,
343342
) -> Address;
344-
/// The slow path allocation. This is NOT a GC safepoint. If the allocation fails,
345-
/// the function returns a zero value without triggering a GC.
343+
/// The slow path allocation for [`MutatorContext::alloc_no_gc`]. This function will not trigger a GC on failed allocation.
346344
///
347345
/// This is only useful when the binding
348346
/// implements the fast path allocation, and would like to explicitly

src/policy/space.rs

+8-6
Original file line numberDiff line numberDiff line change
@@ -67,14 +67,16 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
6767
/// avoid arithmatic overflow. If we have to do computation in the allocation fastpath and
6868
/// overflow happens there, there is nothing we can do about it.
6969
/// Return a boolean to indicate if we will be out of memory, determined by the check.
70-
fn will_oom_on_acquire(&self, tls: VMThread, size: usize) -> bool {
70+
fn will_oom_on_acquire(&self, tls: VMThread, size: usize, allow_oom_call: bool) -> bool {
7171
let max_pages = self.get_gc_trigger().policy.get_max_heap_size_in_pages();
7272
let requested_pages = size >> LOG_BYTES_IN_PAGE;
7373
if requested_pages > max_pages {
74-
VM::VMCollection::out_of_memory(
75-
tls,
76-
crate::util::alloc::AllocationError::HeapOutOfMemory,
77-
);
74+
if allow_oom_call {
75+
VM::VMCollection::out_of_memory(
76+
tls,
77+
crate::util::alloc::AllocationError::HeapOutOfMemory,
78+
);
79+
}
7880
return true;
7981
}
8082
false
@@ -88,7 +90,7 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
8890
);
8991

9092
debug_assert!(
91-
!self.will_oom_on_acquire(tls, pages << LOG_BYTES_IN_PAGE),
93+
!self.will_oom_on_acquire(tls, pages << LOG_BYTES_IN_PAGE, !no_gc_on_fail),
9294
"The requested pages is larger than the max heap size. Is will_go_oom_on_acquire used before acquring memory?"
9395
);
9496

src/util/alloc/allocator.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ pub fn get_maximum_aligned_size_inner<VM: VMBinding>(
132132
}
133133

134134
#[cfg(debug_assertions)]
135-
pub(crate) fn asset_allocation_args<VM: VMBinding>(size: usize, align: usize, offset: usize) {
135+
pub(crate) fn assert_allocation_args<VM: VMBinding>(size: usize, align: usize, offset: usize) {
136136
// MMTk has assumptions about minimal object size.
137137
// We need to make sure that all allocations comply with the min object size.
138138
// Ideally, we check the allocation size, and if it is smaller, we transparently allocate the min
@@ -150,7 +150,7 @@ pub(crate) fn asset_allocation_args<VM: VMBinding>(size: usize, align: usize, of
150150

151151
/// The context an allocator needs to access in order to perform allocation.
152152
pub struct AllocatorContext<VM: VMBinding> {
153-
/// Whether we should trigger a GC when the current alloccation fails.
153+
/// Whether we should avoid trigger a GC when the current allocation fails.
154154
/// The default value is `false`. This value is only set to `true` when
155155
/// the binding requests a 'no-gc' alloc, and will be set back to `false`
156156
/// when the allocation is done (successfully or not).

src/util/alloc/bumpallocator.rs

+4-1
Original file line numberDiff line numberDiff line change
@@ -194,7 +194,10 @@ impl<VM: VMBinding> BumpAllocator<VM> {
194194
offset: usize,
195195
stress_test: bool,
196196
) -> Address {
197-
if self.space.will_oom_on_acquire(self.tls, size) {
197+
if self
198+
.space
199+
.will_oom_on_acquire(self.tls, size, !self.get_context().is_no_gc_on_fail())
200+
{
198201
return Address::ZERO;
199202
}
200203

src/util/alloc/large_object_allocator.rs

+4-1
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,10 @@ impl<VM: VMBinding> Allocator<VM> for LargeObjectAllocator<VM> {
4949
}
5050

5151
fn alloc_slow_once(&mut self, size: usize, align: usize, _offset: usize) -> Address {
52-
if self.space.will_oom_on_acquire(self.tls, size) {
52+
if self
53+
.space
54+
.will_oom_on_acquire(self.tls, size, !self.get_context().is_no_gc_on_fail())
55+
{
5356
return Address::ZERO;
5457
}
5558

Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
use super::mock_test_prelude::*;
2+
3+
use crate::AllocationSemantics;
4+
5+
/// This test will allocate an object that is larger than the heap size. The call will fail.
6+
#[test]
7+
pub fn allocate_no_gc_oom_on_acquire() {
8+
// 1MB heap
9+
with_mockvm(
10+
default_setup,
11+
|| {
12+
const KB: usize = 1024;
13+
let mut fixture = MutatorFixture::create_with_heapsize(KB);
14+
15+
// Attempt to allocate an object that is larger than the heap size.
16+
let addr = memory_manager::alloc_no_gc(
17+
&mut fixture.mutator,
18+
1024 * 10,
19+
8,
20+
0,
21+
AllocationSemantics::Default,
22+
);
23+
// We should get zero.
24+
assert!(addr.is_zero());
25+
// block_for_gc and out_of_memory won't be called.
26+
read_mockvm(|mock| {
27+
assert!(!mock.block_for_gc.is_called());
28+
});
29+
read_mockvm(|mock| {
30+
assert!(!mock.out_of_memory.is_called());
31+
});
32+
},
33+
no_cleanup,
34+
)
35+
}

src/vm/tests/mock_tests/mock_test_allocate_no_gc.rs src/vm/tests/mock_tests/mock_test_allocate_no_gc_simple.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ use crate::AllocationSemantics;
55
/// This test will do alloc_no_gc in a loop, and evetually fill up the heap.
66
/// As alloc_no_gc will not trigger a GC, we expect to see a return value of zero, and no GC is triggered.
77
#[test]
8-
pub fn allocate_no_gc() {
8+
pub fn allocate_no_gc_simple() {
99
// 1MB heap
1010
with_mockvm(
1111
default_setup,

src/vm/tests/mock_tests/mod.rs

+2-1
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,8 @@ pub(crate) mod mock_test_prelude {
2424
}
2525

2626
mod mock_test_allocate_align_offset;
27-
mod mock_test_allocate_no_gc;
27+
mod mock_test_allocate_no_gc_oom_on_acquire;
28+
mod mock_test_allocate_no_gc_simple;
2829
mod mock_test_allocate_with_disable_collection;
2930
mod mock_test_allocate_with_initialize_collection;
3031
mod mock_test_allocate_with_re_enable_collection;

0 commit comments

Comments
 (0)