@ -19,6 +19,9 @@
# include "absl/base/internal/low_level_alloc.h"
# include "absl/base/internal/low_level_alloc.h"
# include <type_traits>
# include "absl/base/call_once.h"
# include "absl/base/config.h"
# include "absl/base/config.h"
# include "absl/base/internal/scheduling_mode.h"
# include "absl/base/internal/scheduling_mode.h"
# include "absl/base/macros.h"
# include "absl/base/macros.h"
@ -194,42 +197,79 @@ static void LLA_SkiplistDelete(AllocList *head, AllocList *e,
// ---------------------------------------------------------------------------
// ---------------------------------------------------------------------------
// Arena implementation
// Arena implementation
// Metadata for an LowLevelAlloc arena instance.
struct LowLevelAlloc : : Arena {
struct LowLevelAlloc : : Arena {
// This constructor does nothing, and relies on zero-initialization to get
// Constructs an arena with the given LowLevelAlloc flags.
// the proper initial state.
explicit Arena ( uint32_t flags_value ) ;
Arena ( ) : mu ( base_internal : : kLinkerInitialized ) { } // NOLINT
explicit Arena ( int ) // NOLINT(readability/casting)
base_internal : : SpinLock mu ;
: // Avoid recursive cooperative scheduling w/ kernel scheduling.
// Head of free list, sorted by address
mu ( base_internal : : SCHEDULE_KERNEL_ONLY ) ,
AllocList freelist GUARDED_BY ( mu ) ;
// Set pagesize to zero explicitly for non-static init.
// Count of allocated blocks
pagesize ( 0 ) ,
int32_t allocation_count GUARDED_BY ( mu ) ;
random ( 0 ) { }
// flags passed to NewArena
const uint32_t flags ;
base_internal : : SpinLock mu ; // protects freelist, allocation_count,
// Result of getpagesize()
// pagesize, roundup, min_size
const size_t pagesize ;
AllocList freelist ; // head of free list; sorted by addr (under mu)
// Lowest power of two >= max(16, sizeof(AllocList))
int32_t allocation_count ; // count of allocated blocks (under mu)
const size_t roundup ;
std : : atomic < uint32_t > flags ; // flags passed to NewArena (ro after init)
// Smallest allocation block size
size_t pagesize ; // ==getpagesize() (init under mu, then ro)
const size_t min_size ;
size_t roundup ; // lowest 2^n >= max(16,sizeof (AllocList))
// PRNG state
// (init under mu, then ro)
uint32_t random GUARDED_BY ( mu ) ;
size_t min_size ; // smallest allocation block size
// (init under mu, then ro)
uint32_t random ; // PRNG state
} ;
} ;
// The default arena, which is used when 0 is passed instead of an Arena
namespace {
// pointer.
using ArenaStorage = std : : aligned_storage < sizeof ( LowLevelAlloc : : Arena ) ,
static struct LowLevelAlloc : : Arena default_arena ; // NOLINT
alignof ( LowLevelAlloc : : Arena ) > : : type ;
// Static storage space for the lazily-constructed, default global arena
// instances. We require this space because the whole point of LowLevelAlloc
// is to avoid relying on malloc/new.
ArenaStorage default_arena_storage ;
ArenaStorage unhooked_arena_storage ;
# ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
ArenaStorage unhooked_async_sig_safe_arena_storage ;
# endif
// Non-malloc-hooked arenas: used only to allocate metadata for arenas that
// We must use LowLevelCallOnce here to construct the global arenas, rather than
// do not want malloc hook reporting, so that for them there's no malloc hook
// using function-level statics, to avoid recursively invoking the scheduler.
// reporting even during arena creation.
absl : : once_flag create_globals_once ;
static struct LowLevelAlloc : : Arena unhooked_arena ; // NOLINT
void CreateGlobalArenas ( ) {
new ( & default_arena_storage )
LowLevelAlloc : : Arena ( LowLevelAlloc : : kCallMallocHook ) ;
new ( & unhooked_arena_storage ) LowLevelAlloc : : Arena ( 0 ) ;
# ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
# ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
static struct LowLevelAlloc : : Arena unhooked_async_sig_safe_arena ; // NOLINT
new ( & unhooked_async_sig_safe_arena_storage )
LowLevelAlloc : : Arena ( LowLevelAlloc : : kAsyncSignalSafe ) ;
# endif
# endif
}
// Returns a global arena that does not call into hooks. Used by NewArena()
// when kCallMallocHook is not set.
LowLevelAlloc : : Arena * UnhookedArena ( ) {
base_internal : : LowLevelCallOnce ( & create_globals_once , CreateGlobalArenas ) ;
return reinterpret_cast < LowLevelAlloc : : Arena * > ( & unhooked_arena_storage ) ;
}
# ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
// Returns a global arena that is async-signal safe. Used by NewArena() when
// kAsyncSignalSafe is set.
LowLevelAlloc : : Arena * UnhookedAsyncSigSafeArena ( ) {
base_internal : : LowLevelCallOnce ( & create_globals_once , CreateGlobalArenas ) ;
return reinterpret_cast < LowLevelAlloc : : Arena * > (
& unhooked_async_sig_safe_arena_storage ) ;
}
# endif
} // namespace
// Returns the default arena, as used by LowLevelAlloc::Alloc() and friends.
LowLevelAlloc : : Arena * LowLevelAlloc : : DefaultArena ( ) {
base_internal : : LowLevelCallOnce ( & create_globals_once , CreateGlobalArenas ) ;
return reinterpret_cast < LowLevelAlloc : : Arena * > ( & default_arena_storage ) ;
}
// magic numbers to identify allocated and unallocated blocks
// magic numbers to identify allocated and unallocated blocks
static const uintptr_t kMagicAllocated = 0x4c833e95U ;
static const uintptr_t kMagicAllocated = 0x4c833e95U ;
@ -242,9 +282,7 @@ class SCOPED_LOCKABLE ArenaLock {
EXCLUSIVE_LOCK_FUNCTION ( arena - > mu )
EXCLUSIVE_LOCK_FUNCTION ( arena - > mu )
: arena_ ( arena ) {
: arena_ ( arena ) {
# ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
# ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
if ( arena = = & unhooked_async_sig_safe_arena | |
if ( ( arena - > flags & LowLevelAlloc : : kAsyncSignalSafe ) ! = 0 ) {
( arena - > flags . load ( std : : memory_order_relaxed ) &
LowLevelAlloc : : kAsyncSignalSafe ) ! = 0 ) {
sigset_t all ;
sigset_t all ;
sigfillset ( & all ) ;
sigfillset ( & all ) ;
mask_valid_ = pthread_sigmask ( SIG_BLOCK , & all , & mask_ ) = = 0 ;
mask_valid_ = pthread_sigmask ( SIG_BLOCK , & all , & mask_ ) = = 0 ;
@ -281,84 +319,73 @@ inline static uintptr_t Magic(uintptr_t magic, AllocList::Header *ptr) {
return magic ^ reinterpret_cast < uintptr_t > ( ptr ) ;
return magic ^ reinterpret_cast < uintptr_t > ( ptr ) ;
}
}
// Initialize the fields of an Arena
namespace {
static void ArenaInit ( LowLevelAlloc : : Arena * arena ) {
size_t GetPageSize ( ) {
if ( arena - > pagesize = = 0 ) {
# ifdef _WIN32
# ifdef _WIN32
SYSTEM_INFO system_info ;
SYSTEM_INFO system_info ;
GetSystemInfo ( & system_info ) ;
GetSystemInfo ( & system_info ) ;
arena - > pagesize = std : : max ( system_info . dwPageSize ,
return std : : max ( system_info . dwPageSize , system_info . dwAllocationGranularity ) ;
system_info . dwAllocationGranularity ) ;
# else
# else
arena - > pagesize = getpagesize ( ) ;
return getpagesize ( ) ;
# endif
# endif
// Round up block sizes to a power of two close to the header size.
arena - > roundup = 16 ;
while ( arena - > roundup < sizeof ( arena - > freelist . header ) ) {
arena - > roundup + = arena - > roundup ;
}
// Don't allocate blocks less than twice the roundup size to avoid tiny
// free blocks.
arena - > min_size = 2 * arena - > roundup ;
arena - > freelist . header . size = 0 ;
arena - > freelist . header . magic =
Magic ( kMagicUnallocated , & arena - > freelist . header ) ;
arena - > freelist . header . arena = arena ;
arena - > freelist . levels = 0 ;
memset ( arena - > freelist . next , 0 , sizeof ( arena - > freelist . next ) ) ;
arena - > allocation_count = 0 ;
if ( arena = = & default_arena ) {
// Default arena should be hooked, e.g. for heap-checker to trace
// pointer chains through objects in the default arena.
arena - > flags . store ( LowLevelAlloc : : kCallMallocHook ,
std : : memory_order_relaxed ) ;
}
# ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
else if ( arena = = // NOLINT(readability/braces)
& unhooked_async_sig_safe_arena ) {
arena - > flags . store ( LowLevelAlloc : : kAsyncSignalSafe ,
std : : memory_order_relaxed ) ;
}
}
# endif
else { // NOLINT(readability/braces)
size_t RoundedUpBlockSize ( ) {
// other arenas' flags may be overridden by client,
// Round up block sizes to a power of two close to the header size.
// but unhooked_arena will have 0 in 'flags'.
size_t roundup = 16 ;
arena - > flags . store ( 0 , std : : memory_order_relaxed ) ;
while ( roundup < sizeof ( AllocList : : Header ) ) {
roundup + = roundup ;
}
}
return roundup ;
}
}
} // namespace
LowLevelAlloc : : Arena : : Arena ( uint32_t flags_value )
: mu ( base_internal : : SCHEDULE_KERNEL_ONLY ) ,
allocation_count ( 0 ) ,
flags ( flags_value ) ,
pagesize ( GetPageSize ( ) ) ,
roundup ( RoundedUpBlockSize ( ) ) ,
min_size ( 2 * roundup ) ,
random ( 0 ) {
freelist . header . size = 0 ;
freelist . header . magic =
Magic ( kMagicUnallocated , & freelist . header ) ;
freelist . header . arena = this ;
freelist . levels = 0 ;
memset ( freelist . next , 0 , sizeof ( freelist . next ) ) ;
}
}
// L < meta_data_arena->mu
// L < meta_data_arena->mu
LowLevelAlloc : : Arena * LowLevelAlloc : : NewArena ( int32_t flags ,
LowLevelAlloc : : Arena * LowLevelAlloc : : NewArena ( int32_t flags ,
Arena * meta_data_arena ) {
Arena * meta_data_arena ) {
ABSL_RAW_CHECK ( meta_data_arena ! = nullptr , " must pass a valid arena " ) ;
ABSL_RAW_CHECK ( meta_data_arena ! = nullptr , " must pass a valid arena " ) ;
if ( meta_data_arena = = & default_arena ) {
if ( meta_data_arena = = DefaultArena ( ) ) {
# ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
# ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
if ( ( flags & LowLevelAlloc : : kAsyncSignalSafe ) ! = 0 ) {
if ( ( flags & LowLevelAlloc : : kAsyncSignalSafe ) ! = 0 ) {
meta_data_arena = & unhooked_async_sig_safe_arena ;
meta_data_arena = UnhookedAsyncSigSafeArena ( ) ;
} else // NOLINT(readability/braces)
} else // NOLINT(readability/braces)
# endif
# endif
if ( ( flags & LowLevelAlloc : : kCallMallocHook ) = = 0 ) {
if ( ( flags & LowLevelAlloc : : kCallMallocHook ) = = 0 ) {
meta_data_arena = & unhooked_arena ;
meta_data_arena = UnhookedArena ( ) ;
}
}
}
}
// Arena(0) uses the constructor for non-static contexts
Arena * result =
Arena * result =
new ( AllocWithArena ( sizeof ( * result ) , meta_data_arena ) ) Arena ( 0 ) ;
new ( AllocWithArena ( sizeof ( * result ) , meta_data_arena ) ) Arena ( flags ) ;
ArenaInit ( result ) ;
result - > flags . store ( flags , std : : memory_order_relaxed ) ;
return result ;
return result ;
}
}
// L < arena->mu, L < arena->arena->mu
// L < arena->mu, L < arena->arena->mu
bool LowLevelAlloc : : DeleteArena ( Arena * arena ) {
bool LowLevelAlloc : : DeleteArena ( Arena * arena ) {
ABSL_RAW_CHECK (
ABSL_RAW_CHECK (
arena ! = nullptr & & arena ! = & default_arena & & arena ! = & unhooked_arena ,
arena ! = nullptr & & arena ! = DefaultArena ( ) & & arena ! = UnhookedArena ( ) ,
" may not delete default arena " ) ;
" may not delete default arena " ) ;
ArenaLock section ( arena ) ;
ArenaLock section ( arena ) ;
bool empty = ( arena - > allocation_count = = 0 ) ;
if ( arena - > allocation_count ! = 0 ) {
section . Leave ( ) ;
section . Leave ( ) ;
if ( empty ) {
return false ;
}
while ( arena - > freelist . next [ 0 ] ! = nullptr ) {
while ( arena - > freelist . next [ 0 ] ! = nullptr ) {
AllocList * region = arena - > freelist . next [ 0 ] ;
AllocList * region = arena - > freelist . next [ 0 ] ;
size_t size = region - > header . size ;
size_t size = region - > header . size ;
@ -378,8 +405,7 @@ bool LowLevelAlloc::DeleteArena(Arena *arena) {
ABSL_RAW_CHECK ( munmap_result ! = 0 ,
ABSL_RAW_CHECK ( munmap_result ! = 0 ,
" LowLevelAlloc::DeleteArena: VitualFree failed " ) ;
" LowLevelAlloc::DeleteArena: VitualFree failed " ) ;
# else
# else
if ( ( arena - > flags . load ( std : : memory_order_relaxed ) &
if ( ( arena - > flags & LowLevelAlloc : : kAsyncSignalSafe ) = = 0 ) {
LowLevelAlloc : : kAsyncSignalSafe ) = = 0 ) {
munmap_result = munmap ( region , size ) ;
munmap_result = munmap ( region , size ) ;
} else {
} else {
munmap_result = MallocHook : : UnhookedMUnmap ( region , size ) ;
munmap_result = MallocHook : : UnhookedMUnmap ( region , size ) ;
@ -390,9 +416,10 @@ bool LowLevelAlloc::DeleteArena(Arena *arena) {
}
}
# endif
# endif
}
}
section . Leave ( ) ;
arena - > ~ Arena ( ) ;
Free ( arena ) ;
Free ( arena ) ;
}
return true ;
return empty ;
}
}
// ---------------------------------------------------------------------------
// ---------------------------------------------------------------------------
@ -479,7 +506,7 @@ void LowLevelAlloc::Free(void *v) {
ABSL_RAW_CHECK ( f - > header . magic = = Magic ( kMagicAllocated , & f - > header ) ,
ABSL_RAW_CHECK ( f - > header . magic = = Magic ( kMagicAllocated , & f - > header ) ,
" bad magic number in Free() " ) ;
" bad magic number in Free() " ) ;
LowLevelAlloc : : Arena * arena = f - > header . arena ;
LowLevelAlloc : : Arena * arena = f - > header . arena ;
if ( ( arena - > flags . load ( std : : memory_order_relaxed ) & kCallMallocHook ) ! = 0 ) {
if ( ( arena - > flags & kCallMallocHook ) ! = 0 ) {
MallocHook : : InvokeDeleteHook ( v ) ;
MallocHook : : InvokeDeleteHook ( v ) ;
}
}
ArenaLock section ( arena ) ;
ArenaLock section ( arena ) ;
@ -497,7 +524,6 @@ static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
if ( request ! = 0 ) {
if ( request ! = 0 ) {
AllocList * s ; // will point to region that satisfies request
AllocList * s ; // will point to region that satisfies request
ArenaLock section ( arena ) ;
ArenaLock section ( arena ) ;
ArenaInit ( arena ) ;
// round up with header
// round up with header
size_t req_rnd = RoundUp ( CheckedAdd ( request , sizeof ( s - > header ) ) ,
size_t req_rnd = RoundUp ( CheckedAdd ( request , sizeof ( s - > header ) ) ,
arena - > roundup ) ;
arena - > roundup ) ;
@ -526,8 +552,7 @@ static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
MEM_RESERVE | MEM_COMMIT , PAGE_READWRITE ) ;
MEM_RESERVE | MEM_COMMIT , PAGE_READWRITE ) ;
ABSL_RAW_CHECK ( new_pages ! = nullptr , " VirtualAlloc failed " ) ;
ABSL_RAW_CHECK ( new_pages ! = nullptr , " VirtualAlloc failed " ) ;
# else
# else
if ( ( arena - > flags . load ( std : : memory_order_relaxed ) &
if ( ( arena - > flags & LowLevelAlloc : : kAsyncSignalSafe ) ! = 0 ) {
LowLevelAlloc : : kAsyncSignalSafe ) ! = 0 ) {
new_pages = MallocHook : : UnhookedMMap ( nullptr , new_pages_size ,
new_pages = MallocHook : : UnhookedMMap ( nullptr , new_pages_size ,
PROT_WRITE | PROT_READ , MAP_ANONYMOUS | MAP_PRIVATE , - 1 , 0 ) ;
PROT_WRITE | PROT_READ , MAP_ANONYMOUS | MAP_PRIVATE , - 1 , 0 ) ;
} else {
} else {
@ -570,20 +595,18 @@ static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
}
}
void * LowLevelAlloc : : Alloc ( size_t request ) {
void * LowLevelAlloc : : Alloc ( size_t request ) {
void * result = DoAllocWithArena ( request , & default_arena ) ;
void * result = DoAllocWithArena ( request , DefaultArena ( ) ) ;
if ( ( default_arena . flags . load ( std : : memory_order_relaxed ) &
// The default arena always calls the malloc hook.
kCallMallocHook ) ! = 0 ) {
// This call must be directly in the user-called allocator function
// this call must be directly in the user-called allocator function
// for MallocHook::GetCallerStackTrace to work properly
// for MallocHook::GetCallerStackTrace to work properly
MallocHook : : InvokeNewHook ( result , request ) ;
MallocHook : : InvokeNewHook ( result , request ) ;
}
return result ;
return result ;
}
}
void * LowLevelAlloc : : AllocWithArena ( size_t request , Arena * arena ) {
void * LowLevelAlloc : : AllocWithArena ( size_t request , Arena * arena ) {
ABSL_RAW_CHECK ( arena ! = nullptr , " must pass a valid arena " ) ;
ABSL_RAW_CHECK ( arena ! = nullptr , " must pass a valid arena " ) ;
void * result = DoAllocWithArena ( request , arena ) ;
void * result = DoAllocWithArena ( request , arena ) ;
if ( ( arena - > flags . load ( std : : memory_order_relaxed ) & kCallMallocHook ) ! = 0 ) {
if ( ( arena - > flags & kCallMallocHook ) ! = 0 ) {
// this call must be directly in the user-called allocator function
// this call must be directly in the user-called allocator function
// for MallocHook::GetCallerStackTrace to work properly
// for MallocHook::GetCallerStackTrace to work properly
MallocHook : : InvokeNewHook ( result , request ) ;
MallocHook : : InvokeNewHook ( result , request ) ;
@ -591,10 +614,6 @@ void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) {
return result ;
return result ;
}
}
LowLevelAlloc : : Arena * LowLevelAlloc : : DefaultArena ( ) {
return & default_arena ;
}
} // namespace base_internal
} // namespace base_internal
} // namespace absl
} // namespace absl