Changes imported from Abseil "staging" branch:

- 8320b38cd9f4f271fb6b278bd1e10d93f6ac3856 Use overloads for int32/int64/uint32/uint64 rather than i... by Jorg Brown <jorg@google.com>
  - f8b582b8deb3f78a3c6de2114b3ec4640f5427dd Internal change by Juemin Yang <jueminyang@google.com>
  - 240ff55ebf493ab1233ebe6976853a5fa2b3ec46 Remove the internal LowLevelAlloc's dependence on kLinker... by Greg Falcon <gfalcon@google.com>

GitOrigin-RevId: 8320b38cd9f4f271fb6b278bd1e10d93f6ac3856
Change-Id: If5004efa2b43856948390ab357b8e9403e4461b4
pull/62/head
Abseil Team 7 years ago committed by Titus Winters
parent 720c017e30
commit 6280bddf55
  1. 3
      absl/base/attributes.h
  2. 213
      absl/base/internal/low_level_alloc.cc
  3. 28
      absl/strings/numbers.cc
  4. 26
      absl/strings/numbers.h
  5. 74
      absl/strings/numbers_test.cc

@ -305,6 +305,7 @@
__attribute__((section(#name))) __attribute__((noinline)) __attribute__((section(#name))) __attribute__((noinline))
#endif #endif
// ABSL_ATTRIBUTE_SECTION_VARIABLE // ABSL_ATTRIBUTE_SECTION_VARIABLE
// //
// Tells the compiler/linker to put a given variable into a section and define // Tells the compiler/linker to put a given variable into a section and define
@ -344,6 +345,7 @@
(reinterpret_cast<void *>(__start_##name)) (reinterpret_cast<void *>(__start_##name))
#define ABSL_ATTRIBUTE_SECTION_STOP(name) \ #define ABSL_ATTRIBUTE_SECTION_STOP(name) \
(reinterpret_cast<void *>(__stop_##name)) (reinterpret_cast<void *>(__stop_##name))
#else // !ABSL_HAVE_ATTRIBUTE_SECTION #else // !ABSL_HAVE_ATTRIBUTE_SECTION
#define ABSL_HAVE_ATTRIBUTE_SECTION 0 #define ABSL_HAVE_ATTRIBUTE_SECTION 0
@ -356,6 +358,7 @@
#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) #define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name)
#define ABSL_ATTRIBUTE_SECTION_START(name) (reinterpret_cast<void *>(0)) #define ABSL_ATTRIBUTE_SECTION_START(name) (reinterpret_cast<void *>(0))
#define ABSL_ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast<void *>(0)) #define ABSL_ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast<void *>(0))
#endif // ABSL_ATTRIBUTE_SECTION #endif // ABSL_ATTRIBUTE_SECTION
// ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC // ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC

@ -19,6 +19,9 @@
#include "absl/base/internal/low_level_alloc.h" #include "absl/base/internal/low_level_alloc.h"
#include <type_traits>
#include "absl/base/call_once.h"
#include "absl/base/config.h" #include "absl/base/config.h"
#include "absl/base/internal/scheduling_mode.h" #include "absl/base/internal/scheduling_mode.h"
#include "absl/base/macros.h" #include "absl/base/macros.h"
@ -194,42 +197,79 @@ static void LLA_SkiplistDelete(AllocList *head, AllocList *e,
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Arena implementation // Arena implementation
// Metadata for an LowLevelAlloc arena instance.
struct LowLevelAlloc::Arena { struct LowLevelAlloc::Arena {
// This constructor does nothing, and relies on zero-initialization to get // Constructs an arena with the given LowLevelAlloc flags.
// the proper initial state. explicit Arena(uint32_t flags_value);
Arena() : mu(base_internal::kLinkerInitialized) {} // NOLINT
explicit Arena(int) // NOLINT(readability/casting) base_internal::SpinLock mu;
: // Avoid recursive cooperative scheduling w/ kernel scheduling. // Head of free list, sorted by address
mu(base_internal::SCHEDULE_KERNEL_ONLY), AllocList freelist GUARDED_BY(mu);
// Set pagesize to zero explicitly for non-static init. // Count of allocated blocks
pagesize(0), int32_t allocation_count GUARDED_BY(mu);
random(0) {} // flags passed to NewArena
const uint32_t flags;
base_internal::SpinLock mu; // protects freelist, allocation_count, // Result of getpagesize()
// pagesize, roundup, min_size const size_t pagesize;
AllocList freelist; // head of free list; sorted by addr (under mu) // Lowest power of two >= max(16, sizeof(AllocList))
int32_t allocation_count; // count of allocated blocks (under mu) const size_t roundup;
std::atomic<uint32_t> flags; // flags passed to NewArena (ro after init) // Smallest allocation block size
size_t pagesize; // ==getpagesize() (init under mu, then ro) const size_t min_size;
size_t roundup; // lowest 2^n >= max(16,sizeof (AllocList)) // PRNG state
// (init under mu, then ro) uint32_t random GUARDED_BY(mu);
size_t min_size; // smallest allocation block size
// (init under mu, then ro)
uint32_t random; // PRNG state
}; };
// The default arena, which is used when 0 is passed instead of an Arena namespace {
// pointer. using ArenaStorage = std::aligned_storage<sizeof(LowLevelAlloc::Arena),
static struct LowLevelAlloc::Arena default_arena; // NOLINT alignof(LowLevelAlloc::Arena)>::type;
// Static storage space for the lazily-constructed, default global arena
// instances. We require this space because the whole point of LowLevelAlloc
// is to avoid relying on malloc/new.
ArenaStorage default_arena_storage;
ArenaStorage unhooked_arena_storage;
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
ArenaStorage unhooked_async_sig_safe_arena_storage;
#endif
// Non-malloc-hooked arenas: used only to allocate metadata for arenas that // We must use LowLevelCallOnce here to construct the global arenas, rather than
// do not want malloc hook reporting, so that for them there's no malloc hook // using function-level statics, to avoid recursively invoking the scheduler.
// reporting even during arena creation. absl::once_flag create_globals_once;
static struct LowLevelAlloc::Arena unhooked_arena; // NOLINT
void CreateGlobalArenas() {
new (&default_arena_storage)
LowLevelAlloc::Arena(LowLevelAlloc::kCallMallocHook);
new (&unhooked_arena_storage) LowLevelAlloc::Arena(0);
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
static struct LowLevelAlloc::Arena unhooked_async_sig_safe_arena; // NOLINT new (&unhooked_async_sig_safe_arena_storage)
LowLevelAlloc::Arena(LowLevelAlloc::kAsyncSignalSafe);
#endif #endif
}
// Returns a global arena that does not call into hooks. Used by NewArena()
// when kCallMallocHook is not set.
LowLevelAlloc::Arena* UnhookedArena() {
base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
return reinterpret_cast<LowLevelAlloc::Arena*>(&unhooked_arena_storage);
}
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
// Returns a global arena that is async-signal safe. Used by NewArena() when
// kAsyncSignalSafe is set.
LowLevelAlloc::Arena *UnhookedAsyncSigSafeArena() {
base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
return reinterpret_cast<LowLevelAlloc::Arena *>(
&unhooked_async_sig_safe_arena_storage);
}
#endif
} // namespace
// Returns the default arena, as used by LowLevelAlloc::Alloc() and friends.
LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() {
base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
return reinterpret_cast<LowLevelAlloc::Arena*>(&default_arena_storage);
}
// magic numbers to identify allocated and unallocated blocks // magic numbers to identify allocated and unallocated blocks
static const uintptr_t kMagicAllocated = 0x4c833e95U; static const uintptr_t kMagicAllocated = 0x4c833e95U;
@ -242,9 +282,7 @@ class SCOPED_LOCKABLE ArenaLock {
EXCLUSIVE_LOCK_FUNCTION(arena->mu) EXCLUSIVE_LOCK_FUNCTION(arena->mu)
: arena_(arena) { : arena_(arena) {
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
if (arena == &unhooked_async_sig_safe_arena || if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
(arena->flags.load(std::memory_order_relaxed) &
LowLevelAlloc::kAsyncSignalSafe) != 0) {
sigset_t all; sigset_t all;
sigfillset(&all); sigfillset(&all);
mask_valid_ = pthread_sigmask(SIG_BLOCK, &all, &mask_) == 0; mask_valid_ = pthread_sigmask(SIG_BLOCK, &all, &mask_) == 0;
@ -281,84 +319,73 @@ inline static uintptr_t Magic(uintptr_t magic, AllocList::Header *ptr) {
return magic ^ reinterpret_cast<uintptr_t>(ptr); return magic ^ reinterpret_cast<uintptr_t>(ptr);
} }
// Initialize the fields of an Arena namespace {
static void ArenaInit(LowLevelAlloc::Arena *arena) { size_t GetPageSize() {
if (arena->pagesize == 0) {
#ifdef _WIN32 #ifdef _WIN32
SYSTEM_INFO system_info; SYSTEM_INFO system_info;
GetSystemInfo(&system_info); GetSystemInfo(&system_info);
arena->pagesize = std::max(system_info.dwPageSize, return std::max(system_info.dwPageSize, system_info.dwAllocationGranularity);
system_info.dwAllocationGranularity);
#else #else
arena->pagesize = getpagesize(); return getpagesize();
#endif #endif
// Round up block sizes to a power of two close to the header size.
arena->roundup = 16;
while (arena->roundup < sizeof (arena->freelist.header)) {
arena->roundup += arena->roundup;
}
// Don't allocate blocks less than twice the roundup size to avoid tiny
// free blocks.
arena->min_size = 2 * arena->roundup;
arena->freelist.header.size = 0;
arena->freelist.header.magic =
Magic(kMagicUnallocated, &arena->freelist.header);
arena->freelist.header.arena = arena;
arena->freelist.levels = 0;
memset(arena->freelist.next, 0, sizeof (arena->freelist.next));
arena->allocation_count = 0;
if (arena == &default_arena) {
// Default arena should be hooked, e.g. for heap-checker to trace
// pointer chains through objects in the default arena.
arena->flags.store(LowLevelAlloc::kCallMallocHook,
std::memory_order_relaxed);
}
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
else if (arena == // NOLINT(readability/braces)
&unhooked_async_sig_safe_arena) {
arena->flags.store(LowLevelAlloc::kAsyncSignalSafe,
std::memory_order_relaxed);
} }
#endif
else { // NOLINT(readability/braces) size_t RoundedUpBlockSize() {
// other arenas' flags may be overridden by client, // Round up block sizes to a power of two close to the header size.
// but unhooked_arena will have 0 in 'flags'. size_t roundup = 16;
arena->flags.store(0, std::memory_order_relaxed); while (roundup < sizeof(AllocList::Header)) {
roundup += roundup;
} }
return roundup;
} }
} // namespace
LowLevelAlloc::Arena::Arena(uint32_t flags_value)
: mu(base_internal::SCHEDULE_KERNEL_ONLY),
allocation_count(0),
flags(flags_value),
pagesize(GetPageSize()),
roundup(RoundedUpBlockSize()),
min_size(2 * roundup),
random(0) {
freelist.header.size = 0;
freelist.header.magic =
Magic(kMagicUnallocated, &freelist.header);
freelist.header.arena = this;
freelist.levels = 0;
memset(freelist.next, 0, sizeof(freelist.next));
} }
// L < meta_data_arena->mu // L < meta_data_arena->mu
LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32_t flags, LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32_t flags,
Arena *meta_data_arena) { Arena *meta_data_arena) {
ABSL_RAW_CHECK(meta_data_arena != nullptr, "must pass a valid arena"); ABSL_RAW_CHECK(meta_data_arena != nullptr, "must pass a valid arena");
if (meta_data_arena == &default_arena) { if (meta_data_arena == DefaultArena()) {
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
meta_data_arena = &unhooked_async_sig_safe_arena; meta_data_arena = UnhookedAsyncSigSafeArena();
} else // NOLINT(readability/braces) } else // NOLINT(readability/braces)
#endif #endif
if ((flags & LowLevelAlloc::kCallMallocHook) == 0) { if ((flags & LowLevelAlloc::kCallMallocHook) == 0) {
meta_data_arena = &unhooked_arena; meta_data_arena = UnhookedArena();
} }
} }
// Arena(0) uses the constructor for non-static contexts
Arena *result = Arena *result =
new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(0); new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(flags);
ArenaInit(result);
result->flags.store(flags, std::memory_order_relaxed);
return result; return result;
} }
// L < arena->mu, L < arena->arena->mu // L < arena->mu, L < arena->arena->mu
bool LowLevelAlloc::DeleteArena(Arena *arena) { bool LowLevelAlloc::DeleteArena(Arena *arena) {
ABSL_RAW_CHECK( ABSL_RAW_CHECK(
arena != nullptr && arena != &default_arena && arena != &unhooked_arena, arena != nullptr && arena != DefaultArena() && arena != UnhookedArena(),
"may not delete default arena"); "may not delete default arena");
ArenaLock section(arena); ArenaLock section(arena);
bool empty = (arena->allocation_count == 0); if (arena->allocation_count != 0) {
section.Leave(); section.Leave();
if (empty) { return false;
}
while (arena->freelist.next[0] != nullptr) { while (arena->freelist.next[0] != nullptr) {
AllocList *region = arena->freelist.next[0]; AllocList *region = arena->freelist.next[0];
size_t size = region->header.size; size_t size = region->header.size;
@ -378,8 +405,7 @@ bool LowLevelAlloc::DeleteArena(Arena *arena) {
ABSL_RAW_CHECK(munmap_result != 0, ABSL_RAW_CHECK(munmap_result != 0,
"LowLevelAlloc::DeleteArena: VitualFree failed"); "LowLevelAlloc::DeleteArena: VitualFree failed");
#else #else
if ((arena->flags.load(std::memory_order_relaxed) & if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) {
LowLevelAlloc::kAsyncSignalSafe) == 0) {
munmap_result = munmap(region, size); munmap_result = munmap(region, size);
} else { } else {
munmap_result = MallocHook::UnhookedMUnmap(region, size); munmap_result = MallocHook::UnhookedMUnmap(region, size);
@ -390,9 +416,10 @@ bool LowLevelAlloc::DeleteArena(Arena *arena) {
} }
#endif #endif
} }
section.Leave();
arena->~Arena();
Free(arena); Free(arena);
} return true;
return empty;
} }
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
@ -479,7 +506,7 @@ void LowLevelAlloc::Free(void *v) {
ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header), ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
"bad magic number in Free()"); "bad magic number in Free()");
LowLevelAlloc::Arena *arena = f->header.arena; LowLevelAlloc::Arena *arena = f->header.arena;
if ((arena->flags.load(std::memory_order_relaxed) & kCallMallocHook) != 0) { if ((arena->flags & kCallMallocHook) != 0) {
MallocHook::InvokeDeleteHook(v); MallocHook::InvokeDeleteHook(v);
} }
ArenaLock section(arena); ArenaLock section(arena);
@ -497,7 +524,6 @@ static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
if (request != 0) { if (request != 0) {
AllocList *s; // will point to region that satisfies request AllocList *s; // will point to region that satisfies request
ArenaLock section(arena); ArenaLock section(arena);
ArenaInit(arena);
// round up with header // round up with header
size_t req_rnd = RoundUp(CheckedAdd(request, sizeof (s->header)), size_t req_rnd = RoundUp(CheckedAdd(request, sizeof (s->header)),
arena->roundup); arena->roundup);
@ -526,8 +552,7 @@ static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
ABSL_RAW_CHECK(new_pages != nullptr, "VirtualAlloc failed"); ABSL_RAW_CHECK(new_pages != nullptr, "VirtualAlloc failed");
#else #else
if ((arena->flags.load(std::memory_order_relaxed) & if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
LowLevelAlloc::kAsyncSignalSafe) != 0) {
new_pages = MallocHook::UnhookedMMap(nullptr, new_pages_size, new_pages = MallocHook::UnhookedMMap(nullptr, new_pages_size,
PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
} else { } else {
@ -570,20 +595,18 @@ static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
} }
void *LowLevelAlloc::Alloc(size_t request) { void *LowLevelAlloc::Alloc(size_t request) {
void *result = DoAllocWithArena(request, &default_arena); void *result = DoAllocWithArena(request, DefaultArena());
if ((default_arena.flags.load(std::memory_order_relaxed) & // The default arena always calls the malloc hook.
kCallMallocHook) != 0) { // This call must be directly in the user-called allocator function
// this call must be directly in the user-called allocator function
// for MallocHook::GetCallerStackTrace to work properly // for MallocHook::GetCallerStackTrace to work properly
MallocHook::InvokeNewHook(result, request); MallocHook::InvokeNewHook(result, request);
}
return result; return result;
} }
void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) { void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) {
ABSL_RAW_CHECK(arena != nullptr, "must pass a valid arena"); ABSL_RAW_CHECK(arena != nullptr, "must pass a valid arena");
void *result = DoAllocWithArena(request, arena); void *result = DoAllocWithArena(request, arena);
if ((arena->flags.load(std::memory_order_relaxed) & kCallMallocHook) != 0) { if ((arena->flags & kCallMallocHook) != 0) {
// this call must be directly in the user-called allocator function // this call must be directly in the user-called allocator function
// for MallocHook::GetCallerStackTrace to work properly // for MallocHook::GetCallerStackTrace to work properly
MallocHook::InvokeNewHook(result, request); MallocHook::InvokeNewHook(result, request);
@ -591,10 +614,6 @@ void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) {
return result; return result;
} }
LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() {
return &default_arena;
}
} // namespace base_internal } // namespace base_internal
} // namespace absl } // namespace absl

@ -135,16 +135,12 @@ bool SimpleAtob(absl::string_view str, bool* value) {
} }
// ---------------------------------------------------------------------- // ----------------------------------------------------------------------
// FastInt32ToBuffer() // FastIntToBuffer() overloads
// FastUInt32ToBuffer()
// FastInt64ToBuffer()
// FastUInt64ToBuffer()
// //
// Like the Fast*ToBuffer() functions above, these are intended for speed. // Like the Fast*ToBuffer() functions above, these are intended for speed.
// Unlike the Fast*ToBuffer() functions, however, these functions write // Unlike the Fast*ToBuffer() functions, however, these functions write
// their output to the beginning of the buffer (hence the name, as the // their output to the beginning of the buffer. The caller is responsible
// output is left-aligned). The caller is responsible for ensuring that // for ensuring that the buffer has enough space to hold the output.
// the buffer has enough space to hold the output.
// //
// Returns a pointer to the end of the std::string (i.e. the null character // Returns a pointer to the end of the std::string (i.e. the null character
// terminating the std::string). // terminating the std::string).
@ -160,7 +156,7 @@ const char one_ASCII_final_digits[10][2] {
} // namespace } // namespace
char* numbers_internal::FastUInt32ToBuffer(uint32_t i, char* buffer) { char* numbers_internal::FastIntToBuffer(uint32_t i, char* buffer) {
uint32_t digits; uint32_t digits;
// The idea of this implementation is to trim the number of divides to as few // The idea of this implementation is to trim the number of divides to as few
// as possible, and also reducing memory stores and branches, by going in // as possible, and also reducing memory stores and branches, by going in
@ -230,7 +226,7 @@ char* numbers_internal::FastUInt32ToBuffer(uint32_t i, char* buffer) {
goto lt100_000_000; goto lt100_000_000;
} }
char* numbers_internal::FastInt32ToBuffer(int32_t i, char* buffer) { char* numbers_internal::FastIntToBuffer(int32_t i, char* buffer) {
uint32_t u = i; uint32_t u = i;
if (i < 0) { if (i < 0) {
*buffer++ = '-'; *buffer++ = '-';
@ -239,12 +235,12 @@ char* numbers_internal::FastInt32ToBuffer(int32_t i, char* buffer) {
// we write the equivalent expression "0 - u" instead. // we write the equivalent expression "0 - u" instead.
u = 0 - u; u = 0 - u;
} }
return numbers_internal::FastUInt32ToBuffer(u, buffer); return numbers_internal::FastIntToBuffer(u, buffer);
} }
char* numbers_internal::FastUInt64ToBuffer(uint64_t i, char* buffer) { char* numbers_internal::FastIntToBuffer(uint64_t i, char* buffer) {
uint32_t u32 = static_cast<uint32_t>(i); uint32_t u32 = static_cast<uint32_t>(i);
if (u32 == i) return numbers_internal::FastUInt32ToBuffer(u32, buffer); if (u32 == i) return numbers_internal::FastIntToBuffer(u32, buffer);
// Here we know i has at least 10 decimal digits. // Here we know i has at least 10 decimal digits.
uint64_t top_1to11 = i / 1000000000; uint64_t top_1to11 = i / 1000000000;
@ -252,12 +248,12 @@ char* numbers_internal::FastUInt64ToBuffer(uint64_t i, char* buffer) {
uint32_t top_1to11_32 = static_cast<uint32_t>(top_1to11); uint32_t top_1to11_32 = static_cast<uint32_t>(top_1to11);
if (top_1to11_32 == top_1to11) { if (top_1to11_32 == top_1to11) {
buffer = numbers_internal::FastUInt32ToBuffer(top_1to11_32, buffer); buffer = numbers_internal::FastIntToBuffer(top_1to11_32, buffer);
} else { } else {
// top_1to11 has more than 32 bits too; print it in two steps. // top_1to11 has more than 32 bits too; print it in two steps.
uint32_t top_8to9 = static_cast<uint32_t>(top_1to11 / 100); uint32_t top_8to9 = static_cast<uint32_t>(top_1to11 / 100);
uint32_t mid_2 = static_cast<uint32_t>(top_1to11 - top_8to9 * 100); uint32_t mid_2 = static_cast<uint32_t>(top_1to11 - top_8to9 * 100);
buffer = numbers_internal::FastUInt32ToBuffer(top_8to9, buffer); buffer = numbers_internal::FastIntToBuffer(top_8to9, buffer);
PutTwoDigits(mid_2, buffer); PutTwoDigits(mid_2, buffer);
buffer += 2; buffer += 2;
} }
@ -283,13 +279,13 @@ char* numbers_internal::FastUInt64ToBuffer(uint64_t i, char* buffer) {
return buffer + 1; return buffer + 1;
} }
char* numbers_internal::FastInt64ToBuffer(int64_t i, char* buffer) { char* numbers_internal::FastIntToBuffer(int64_t i, char* buffer) {
uint64_t u = i; uint64_t u = i;
if (i < 0) { if (i < 0) {
*buffer++ = '-'; *buffer++ = '-';
u = 0 - u; u = 0 - u;
} }
return numbers_internal::FastUInt64ToBuffer(u, buffer); return numbers_internal::FastIntToBuffer(u, buffer);
} }
// Returns the number of leading 0 bits in a 64-bit value. // Returns the number of leading 0 bits in a 64-bit value.

@ -81,14 +81,6 @@ bool safe_strto64_base(absl::string_view text, int64_t* value, int base);
bool safe_strtou32_base(absl::string_view text, uint32_t* value, int base); bool safe_strtou32_base(absl::string_view text, uint32_t* value, int base);
bool safe_strtou64_base(absl::string_view text, uint64_t* value, int base); bool safe_strtou64_base(absl::string_view text, uint64_t* value, int base);
// These functions are intended for speed. All functions take an output buffer
// as an argument and return a pointer to the last byte they wrote, which is the
// terminating '\0'. At most `kFastToBufferSize` bytes are written.
char* FastInt32ToBuffer(int32_t i, char* buffer);
char* FastUInt32ToBuffer(uint32_t i, char* buffer);
char* FastInt64ToBuffer(int64_t i, char* buffer);
char* FastUInt64ToBuffer(uint64_t i, char* buffer);
static const int kFastToBufferSize = 32; static const int kFastToBufferSize = 32;
static const int kSixDigitsToBufferSize = 16; static const int kSixDigitsToBufferSize = 16;
@ -100,6 +92,16 @@ static const int kSixDigitsToBufferSize = 16;
// Required buffer size is `kSixDigitsToBufferSize`. // Required buffer size is `kSixDigitsToBufferSize`.
size_t SixDigitsToBuffer(double d, char* buffer); size_t SixDigitsToBuffer(double d, char* buffer);
// These functions are intended for speed. All functions take an output buffer
// as an argument and return a pointer to the last byte they wrote, which is the
// terminating '\0'. At most `kFastToBufferSize` bytes are written.
char* FastIntToBuffer(int32_t, char*);
char* FastIntToBuffer(uint32_t, char*);
char* FastIntToBuffer(int64_t, char*);
char* FastIntToBuffer(uint64_t, char*);
// For enums and integer types that are not an exact match for the types above,
// use templates to call the appropriate one of the four overloads above.
template <typename int_type> template <typename int_type>
char* FastIntToBuffer(int_type i, char* buffer) { char* FastIntToBuffer(int_type i, char* buffer) {
static_assert(sizeof(i) <= 64 / 8, static_assert(sizeof(i) <= 64 / 8,
@ -109,15 +111,15 @@ char* FastIntToBuffer(int_type i, char* buffer) {
// If one day something like std::is_signed<enum E> works, switch to it. // If one day something like std::is_signed<enum E> works, switch to it.
if (static_cast<int_type>(1) - 2 < 0) { // Signed if (static_cast<int_type>(1) - 2 < 0) { // Signed
if (sizeof(i) > 32 / 8) { // 33-bit to 64-bit if (sizeof(i) > 32 / 8) { // 33-bit to 64-bit
return numbers_internal::FastInt64ToBuffer(i, buffer); return FastIntToBuffer(static_cast<int64_t>(i), buffer);
} else { // 32-bit or less } else { // 32-bit or less
return numbers_internal::FastInt32ToBuffer(i, buffer); return FastIntToBuffer(static_cast<int32_t>(i), buffer);
} }
} else { // Unsigned } else { // Unsigned
if (sizeof(i) > 32 / 8) { // 33-bit to 64-bit if (sizeof(i) > 32 / 8) { // 33-bit to 64-bit
return numbers_internal::FastUInt64ToBuffer(i, buffer); return FastIntToBuffer(static_cast<uint64_t>(i), buffer);
} else { // 32-bit or less } else { // 32-bit or less
return numbers_internal::FastUInt32ToBuffer(i, buffer); return FastIntToBuffer(static_cast<uint32_t>(i), buffer);
} }
} }
} }

@ -110,13 +110,38 @@ TEST(ToString, PerfectDtoa) {
} }
} }
template <typename integer>
struct MyInteger {
integer i;
explicit constexpr MyInteger(integer i) : i(i) {}
constexpr operator integer() const { return i; }
constexpr MyInteger operator+(MyInteger other) const { return i + other.i; }
constexpr MyInteger operator-(MyInteger other) const { return i - other.i; }
constexpr MyInteger operator*(MyInteger other) const { return i * other.i; }
constexpr MyInteger operator/(MyInteger other) const { return i / other.i; }
constexpr bool operator<(MyInteger other) const { return i < other.i; }
constexpr bool operator<=(MyInteger other) const { return i <= other.i; }
constexpr bool operator==(MyInteger other) const { return i == other.i; }
constexpr bool operator>=(MyInteger other) const { return i >= other.i; }
constexpr bool operator>(MyInteger other) const { return i > other.i; }
constexpr bool operator!=(MyInteger other) const { return i != other.i; }
integer as_integer() const { return i; }
};
typedef MyInteger<int64_t> MyInt64;
typedef MyInteger<uint64_t> MyUInt64;
void CheckInt32(int32_t x) { void CheckInt32(int32_t x) {
char buffer[absl::numbers_internal::kFastToBufferSize]; char buffer[absl::numbers_internal::kFastToBufferSize];
char* actual = absl::numbers_internal::FastInt32ToBuffer(x, buffer); char* actual = absl::numbers_internal::FastIntToBuffer(x, buffer);
std::string expected = std::to_string(x); std::string expected = std::to_string(x);
ASSERT_TRUE(expected == std::string(buffer, actual)) EXPECT_EQ(expected, std::string(buffer, actual)) << " Input " << x;
<< "Expected \"" << expected << "\", Actual \"" << actual << "\", Input "
<< x; char* generic_actual = absl::numbers_internal::FastIntToBuffer(x, buffer);
EXPECT_EQ(expected, std::string(buffer, generic_actual)) << " Input " << x;
} }
void CheckInt64(int64_t x) { void CheckInt64(int64_t x) {
@ -124,40 +149,47 @@ void CheckInt64(int64_t x) {
buffer[0] = '*'; buffer[0] = '*';
buffer[23] = '*'; buffer[23] = '*';
buffer[24] = '*'; buffer[24] = '*';
char* actual = absl::numbers_internal::FastInt64ToBuffer(x, &buffer[1]); char* actual = absl::numbers_internal::FastIntToBuffer(x, &buffer[1]);
std::string expected = std::to_string(x); std::string expected = std::to_string(x);
ASSERT_TRUE(expected == std::string(&buffer[1], actual)) EXPECT_EQ(expected, std::string(&buffer[1], actual)) << " Input " << x;
<< "Expected \"" << expected << "\", Actual \"" << actual << "\", Input " EXPECT_EQ(buffer[0], '*');
<< x; EXPECT_EQ(buffer[23], '*');
ASSERT_EQ(buffer[0], '*'); EXPECT_EQ(buffer[24], '*');
ASSERT_EQ(buffer[23], '*');
ASSERT_EQ(buffer[24], '*'); char* my_actual =
absl::numbers_internal::FastIntToBuffer(MyInt64(x), &buffer[1]);
EXPECT_EQ(expected, std::string(&buffer[1], my_actual)) << " Input " << x;
} }
void CheckUInt32(uint32_t x) { void CheckUInt32(uint32_t x) {
char buffer[absl::numbers_internal::kFastToBufferSize]; char buffer[absl::numbers_internal::kFastToBufferSize];
char* actual = absl::numbers_internal::FastUInt32ToBuffer(x, buffer); char* actual = absl::numbers_internal::FastIntToBuffer(x, buffer);
std::string expected = std::to_string(x); std::string expected = std::to_string(x);
ASSERT_TRUE(expected == std::string(buffer, actual)) EXPECT_EQ(expected, std::string(buffer, actual)) << " Input " << x;
<< "Expected \"" << expected << "\", Actual \"" << actual << "\", Input "
<< x; char* generic_actual = absl::numbers_internal::FastIntToBuffer(x, buffer);
EXPECT_EQ(expected, std::string(buffer, generic_actual)) << " Input " << x;
} }
void CheckUInt64(uint64_t x) { void CheckUInt64(uint64_t x) {
char buffer[absl::numbers_internal::kFastToBufferSize + 1]; char buffer[absl::numbers_internal::kFastToBufferSize + 1];
char* actual = absl::numbers_internal::FastUInt64ToBuffer(x, &buffer[1]); char* actual = absl::numbers_internal::FastIntToBuffer(x, &buffer[1]);
std::string expected = std::to_string(x); std::string expected = std::to_string(x);
ASSERT_TRUE(expected == std::string(&buffer[1], actual)) EXPECT_EQ(expected, std::string(&buffer[1], actual)) << " Input " << x;
<< "Expected \"" << expected << "\", Actual \"" << actual << "\", Input "
<< x; char* generic_actual = absl::numbers_internal::FastIntToBuffer(x, &buffer[1]);
EXPECT_EQ(expected, std::string(&buffer[1], generic_actual)) << " Input " << x;
char* my_actual =
absl::numbers_internal::FastIntToBuffer(MyUInt64(x), &buffer[1]);
EXPECT_EQ(expected, std::string(&buffer[1], my_actual)) << " Input " << x;
} }
void CheckHex64(uint64_t v) { void CheckHex64(uint64_t v) {
char expected[16 + 1]; char expected[16 + 1];
std::string actual = absl::StrCat(absl::Hex(v, absl::kZeroPad16)); std::string actual = absl::StrCat(absl::Hex(v, absl::kZeroPad16));
snprintf(expected, sizeof(expected), "%016" PRIx64, static_cast<uint64_t>(v)); snprintf(expected, sizeof(expected), "%016" PRIx64, static_cast<uint64_t>(v));
ASSERT_TRUE(expected == actual) EXPECT_EQ(expected, actual) << " Input " << v;
<< "Expected \"" << expected << "\", Actual \"" << actual << "\"";
} }
TEST(Numbers, TestFastPrints) { TEST(Numbers, TestFastPrints) {

Loading…
Cancel
Save