diff --git a/julia/mmtk_julia.c b/julia/mmtk_julia.c index 3b730474..bf5cfd00 100644 --- a/julia/mmtk_julia.c +++ b/julia/mmtk_julia.c @@ -35,25 +35,20 @@ JL_DLLEXPORT void (jl_mmtk_harness_end)(void) harness_end(); } -JL_DLLEXPORT jl_value_t *jl_mmtk_gc_alloc_default_llvm(int pool_offset, int osize) +JL_DLLEXPORT jl_value_t *jl_mmtk_gc_alloc_default_llvm(int pool_offset, size_t osize) { jl_ptls_t ptls = (jl_ptls_t)jl_get_ptls_states(); // safepoint - if (__unlikely(jl_atomic_load(&jl_gc_running))) { - int8_t old_state = ptls->gc_state; - jl_atomic_store_release(&ptls->gc_state, JL_GC_STATE_WAITING); - jl_safepoint_wait_gc(); - jl_atomic_store_release(&ptls->gc_state, old_state); - } + jl_gc_safepoint_(ptls); jl_value_t *v; + jl_taggedvalue_t *v_tagged; + // v needs to be 16 byte aligned, therefore v_tagged needs to be offset accordingly to consider the size of header ptls->mmtk_mutator_ptr->allocators.immix[0].cursor = ptls->cursor; - // v needs to be 16 byte aligned, therefore v_tagged needs to be offset accordingly to consider the size of header - jl_taggedvalue_t *v_tagged = - (jl_taggedvalue_t *) alloc(ptls->mmtk_mutator_ptr, osize, 16, 8, 0); + v_tagged = (jl_taggedvalue_t *) alloc(ptls->mmtk_mutator_ptr, osize, 16, 8, 0); ptls->cursor = ptls->mmtk_mutator_ptr->allocators.immix[0].cursor; ptls->limit = ptls->mmtk_mutator_ptr->allocators.immix[0].limit; @@ -84,16 +79,10 @@ STATIC_INLINE void* alloc_default_object(jl_ptls_t ptls, size_t size, int offset } } -JL_DLLEXPORT jl_value_t *jl_mmtk_gc_alloc_default(jl_ptls_t ptls, int pool_offset, - int osize, void *ty) +JL_DLLEXPORT jl_value_t *jl_mmtk_gc_alloc_default(jl_ptls_t ptls, size_t osize, void *ty) { // safepoint - if (__unlikely(jl_atomic_load(&jl_gc_running))) { - int8_t old_state = ptls->gc_state; - jl_atomic_store_release(&ptls->gc_state, JL_GC_STATE_WAITING); - jl_safepoint_wait_gc(); - jl_atomic_store_release(&ptls->gc_state, old_state); - } + jl_gc_safepoint_(ptls); jl_value_t *v; if ((uintptr_t)ty != jl_buff_tag) { @@ -119,12 +108,7 @@ JL_DLLEXPORT jl_value_t *jl_mmtk_gc_alloc_default(jl_ptls_t ptls, int pool_offse JL_DLLEXPORT jl_value_t *jl_mmtk_gc_alloc_big(jl_ptls_t ptls, size_t sz) { // safepoint - if (__unlikely(jl_atomic_load(&jl_gc_running))) { - int8_t old_state = ptls->gc_state; - jl_atomic_store_release(&ptls->gc_state, JL_GC_STATE_WAITING); - jl_safepoint_wait_gc(); - jl_atomic_store_release(&ptls->gc_state, old_state); - } + jl_gc_safepoint_(ptls); size_t offs = offsetof(bigval_t, header); assert(sz >= sizeof(jl_taggedvalue_t) && "sz must include tag"); @@ -326,112 +310,50 @@ size_t get_so_size(void* obj) jl_array_t* a = (jl_array_t*) obj; if (a->flags.how == 0) { int ndimwords = jl_array_ndimwords(jl_array_ndims(a)); - int tsz = sizeof(jl_array_t) + ndimwords*sizeof(size_t); + size_t tsz = sizeof(jl_array_t) + ndimwords*sizeof(size_t); if (object_is_managed_by_mmtk(a->data)) { size_t pre_data_bytes = ((size_t)a->data - a->offset*a->elsize) - (size_t)a; if (pre_data_bytes > 0) { // a->data is allocated after a tsz = ((size_t)a->data - a->offset*a->elsize) - (size_t)a; tsz += jl_array_nbytes(a); } - if (tsz + sizeof(jl_taggedvalue_t) > 2032) { // if it's too large to be inlined (a->data and a are disjoint objects) - tsz = sizeof(jl_array_t) + ndimwords*sizeof(size_t); // simply keep the info before data - } } - if (tsz + sizeof(jl_taggedvalue_t) > 2032) { - printf("size greater than minimum!\n"); - runtime_panic(); + if (a->flags.pooled && tsz > 2032) { // a->data is actually a separate object and not inlined + tsz = sizeof(jl_array_t) + ndimwords*sizeof(size_t); } - int pool_id = jl_gc_szclass(tsz + sizeof(jl_taggedvalue_t)); - int osize = jl_gc_sizeclasses[pool_id]; - return osize; + + return tsz + sizeof(jl_taggedvalue_t); } else if (a->flags.how == 1) { int ndimwords = jl_array_ndimwords(jl_array_ndims(a)); - int tsz = sizeof(jl_array_t) + ndimwords*sizeof(size_t); - if (tsz + sizeof(jl_taggedvalue_t) > 2032) { - printf("size greater than minimum!\n"); - runtime_panic(); - } - int pool_id = jl_gc_szclass(tsz + sizeof(jl_taggedvalue_t)); - int osize = jl_gc_sizeclasses[pool_id]; - - return osize; + size_t tsz = sizeof(jl_array_t) + ndimwords*sizeof(size_t); + return tsz + sizeof(jl_taggedvalue_t); } else if (a->flags.how == 2) { int ndimwords = jl_array_ndimwords(jl_array_ndims(a)); - int tsz = sizeof(jl_array_t) + ndimwords*sizeof(size_t); - if (tsz + sizeof(jl_taggedvalue_t) > 2032) { - printf("size greater than minimum!\n"); - runtime_panic(); - } - int pool_id = jl_gc_szclass(tsz + sizeof(jl_taggedvalue_t)); - int osize = jl_gc_sizeclasses[pool_id]; - - return osize; + size_t tsz = sizeof(jl_array_t) + ndimwords*sizeof(size_t); + return tsz + sizeof(jl_taggedvalue_t); } else if (a->flags.how == 3) { int ndimwords = jl_array_ndimwords(jl_array_ndims(a)); - int tsz = sizeof(jl_array_t) + ndimwords * sizeof(size_t) + sizeof(void*); - if (tsz + sizeof(jl_taggedvalue_t) > 2032) { - printf("size greater than minimum!\n"); - runtime_panic(); - } - int pool_id = jl_gc_szclass(tsz + sizeof(jl_taggedvalue_t)); - int osize = jl_gc_sizeclasses[pool_id]; - return osize; + size_t tsz = sizeof(jl_array_t) + ndimwords * sizeof(size_t) + sizeof(void*); + return tsz + sizeof(jl_taggedvalue_t); } } else if (vt == jl_simplevector_type) { size_t l = jl_svec_len(obj); - if (l * sizeof(void*) + sizeof(jl_svec_t) + sizeof(jl_taggedvalue_t) > 2032) { - printf("size greater than minimum!\n"); - runtime_panic(); - } - int pool_id = jl_gc_szclass(l * sizeof(void*) + sizeof(jl_svec_t) + sizeof(jl_taggedvalue_t)); - int osize = jl_gc_sizeclasses[pool_id]; - return osize; + return l * sizeof(void*) + sizeof(jl_svec_t) + sizeof(jl_taggedvalue_t); } else if (vt == jl_module_type) { size_t dtsz = sizeof(jl_module_t); - if (dtsz + sizeof(jl_taggedvalue_t) > 2032) { - printf("size greater than minimum!\n"); - runtime_panic(); - } - int pool_id = jl_gc_szclass(dtsz + sizeof(jl_taggedvalue_t)); - int osize = jl_gc_sizeclasses[pool_id]; - return osize; + return dtsz + sizeof(jl_taggedvalue_t); } else if (vt == jl_task_type) { size_t dtsz = sizeof(jl_task_t); - if (dtsz + sizeof(jl_taggedvalue_t) > 2032) { - printf("size greater than minimum!\n"); - runtime_panic(); - } - int pool_id = jl_gc_szclass(dtsz + sizeof(jl_taggedvalue_t)); - int osize = jl_gc_sizeclasses[pool_id]; - return osize; + return dtsz + sizeof(jl_taggedvalue_t); } else if (vt == jl_string_type) { size_t dtsz = jl_string_len(obj) + sizeof(size_t) + 1; - if (dtsz + sizeof(jl_taggedvalue_t) > 2032) { - printf("size greater than minimum!\n"); - runtime_panic(); - } - int pool_id = jl_gc_szclass_align8(dtsz + sizeof(jl_taggedvalue_t)); - int osize = jl_gc_sizeclasses[pool_id]; - return osize; + return dtsz + sizeof(jl_taggedvalue_t); } else if (vt == jl_method_type) { size_t dtsz = sizeof(jl_method_t); - if (dtsz + sizeof(jl_taggedvalue_t) > 2032) { - printf("size greater than minimum!\n"); - runtime_panic(); - } - int pool_id = jl_gc_szclass(dtsz + sizeof(jl_taggedvalue_t)); - - int osize = jl_gc_sizeclasses[pool_id]; - return osize; + return dtsz + sizeof(jl_taggedvalue_t); } else { size_t dtsz = jl_datatype_size(vt); - if (dtsz + sizeof(jl_taggedvalue_t) > 2032) { - printf("size greater than minimum!\n"); - runtime_panic(); - } - int pool_id = jl_gc_szclass(dtsz + sizeof(jl_taggedvalue_t)); - int osize = jl_gc_sizeclasses[pool_id]; - return osize; + return dtsz + sizeof(jl_taggedvalue_t); } } diff --git a/mmtk/julia b/mmtk/julia deleted file mode 160000 index f3792d56..00000000 --- a/mmtk/julia +++ /dev/null @@ -1 +0,0 @@ -Subproject commit f3792d56fcfaf8d3180a60fabadcccb042dee262 diff --git a/mmtk/src/api.rs b/mmtk/src/api.rs index 2e0df1a1..a4eaa32d 100644 --- a/mmtk/src/api.rs +++ b/mmtk/src/api.rs @@ -26,6 +26,9 @@ use std::ffi::CStr; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::RwLockWriteGuard; +#[cfg(feature = "immix")] +use crate::MAX_STANDARD_OBJECT_SIZE; + #[no_mangle] pub extern "C" fn gc_init( min_heap_size: usize, @@ -136,6 +139,30 @@ pub extern "C" fn destroy_mutator(mutator: *mut Mutator) { memory_manager::destroy_mutator(unsafe { &mut *mutator }) } +#[cfg(feature = "immix")] +#[no_mangle] +pub extern "C" fn alloc( + mutator: *mut Mutator, + size: usize, + align: usize, + offset: isize, + semantics: AllocationSemantics, +) -> Address { + if size >= MAX_STANDARD_OBJECT_SIZE { + // MAX_IMMIX_OBJECT_SIZE + memory_manager::alloc::( + unsafe { &mut *mutator }, + size, + 64, + offset, + AllocationSemantics::Los, + ) + } else { + memory_manager::alloc::(unsafe { &mut *mutator }, size, align, offset, semantics) + } +} + +#[cfg(not(feature = "immix"))] #[no_mangle] pub extern "C" fn alloc( mutator: *mut Mutator, @@ -163,6 +190,7 @@ pub extern "C" fn alloc_large( ) } +#[cfg(feature = "immix")] #[no_mangle] pub extern "C" fn post_alloc( mutator: *mut Mutator, @@ -170,16 +198,30 @@ pub extern "C" fn post_alloc( bytes: usize, semantics: AllocationSemantics, ) { - match semantics { - AllocationSemantics::Los => { - memory_manager::post_alloc::(unsafe { &mut *mutator }, refer, bytes, semantics) - } - _ => { - memory_manager::post_alloc::(unsafe { &mut *mutator }, refer, bytes, semantics) - } + if bytes >= MAX_STANDARD_OBJECT_SIZE { + // MAX_IMMIX_OBJECT_SIZE + memory_manager::post_alloc::( + unsafe { &mut *mutator }, + refer, + bytes, + AllocationSemantics::Los, + ) + } else { + memory_manager::post_alloc::(unsafe { &mut *mutator }, refer, bytes, semantics) } } +#[cfg(not(feature = "immix"))] +#[no_mangle] +pub extern "C" fn post_alloc( + mutator: *mut Mutator, + refer: ObjectReference, + bytes: usize, + semantics: AllocationSemantics, +) { + memory_manager::post_alloc::(unsafe { &mut *mutator }, refer, bytes, semantics) +} + #[no_mangle] pub extern "C" fn will_never_move(object: ObjectReference) -> bool { !object.is_movable() diff --git a/mmtk/src/collection.rs b/mmtk/src/collection.rs index 4e9c6e31..d90de371 100644 --- a/mmtk/src/collection.rs +++ b/mmtk/src/collection.rs @@ -181,10 +181,9 @@ pub extern "C" fn mmtk_run_finalizers(at_exit: bool) { { // if the finalizer function triggers GC you don't want the objects to be GC-ed let mut fin_roots = FINALIZER_ROOTS.write().unwrap(); - + let inserted = fin_roots.insert(obj); assert!(inserted); - } unsafe { ((*UPCALLS).run_finalizer_function)(obj.0, obj.1, obj.2) } { @@ -192,7 +191,6 @@ pub extern "C" fn mmtk_run_finalizers(at_exit: bool) { let removed = fin_roots.remove(&obj); assert!(removed); } - } None => break, } diff --git a/mmtk/src/lib.rs b/mmtk/src/lib.rs index 71ff205d..02294013 100644 --- a/mmtk/src/lib.rs +++ b/mmtk/src/lib.rs @@ -75,6 +75,16 @@ extern "C" { pub static BI_METADATA_END_ALIGNED_UP: usize; } +#[cfg(feature = "immix")] +#[no_mangle] +pub static MAX_STANDARD_OBJECT_SIZE: usize = + mmtk::plan::IMMIX_CONSTRAINTS.max_non_los_default_alloc_bytes; + +#[cfg(not(feature = "immix"))] +#[no_mangle] +pub static MAX_STANDARD_OBJECT_SIZE: usize = // default to size of Julia's max size class + 2032 - std::mem::size_of::
(); + #[no_mangle] pub static BLOCK_FOR_GC: AtomicBool = AtomicBool::new(false);