Auto-generate files after cl/712751905

pull/19886/head
Protobuf Team Bot 2 months ago
parent 301275dea4
commit c197570cc1
  1. 94
      php/ext/google/protobuf/php-upb.c
  2. 37
      php/ext/google/protobuf/php-upb.h
  3. 94
      ruby/ext/google/protobuf_c/ruby-upb.c
  4. 37
      ruby/ext/google/protobuf_c/ruby-upb.h

@ -326,8 +326,14 @@ Error, UINTPTR_MAX is undefined
#else
#define UPB_CLANG_ASAN 0
#endif
#if __has_feature(thread_sanitizer)
#define UPB_CLANG_TSAN 1
#else
#define UPB_CLANG_TSAN 0
#endif
#else
#define UPB_CLANG_ASAN 0
#define UPB_CLANG_TSAN 0
#endif
#if defined(__SANITIZE_ADDRESS__) || UPB_CLANG_ASAN
@ -354,6 +360,21 @@ void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
((void)(addr), (void)(size))
#endif
#if defined(__SANITIZE_THREAD__) || UPB_CLANG_TSAN
#define UPB_TSAN_PUBLISHED_MEMBER uintptr_t upb_tsan_safely_published;
#define UPB_TSAN_INIT_PUBLISHED(ptr) (ptr)->upb_tsan_safely_published = 0x5AFE
#define UPB_TSAN_CHECK_PUBLISHED(ptr) \
UPB_ASSERT((ptr)->upb_tsan_safely_published == 0x5AFE)
#define UPB_TSAN_PUBLISH 1
#else
#define UPB_TSAN_PUBLISHED_MEMBER
#define UPB_TSAN_INIT_PUBLISHED(ptr)
#define UPB_TSAN_CHECK_PUBLISHED(ptr) \
do { \
} while (false && (ptr))
#define UPB_TSAN_PUBLISH 0
#endif
/* Disable proto2 arena behavior (TEMPORARY) **********************************/
#ifdef UPB_DISABLE_CLOSED_ENUM_CHECKING
@ -2944,6 +2965,8 @@ typedef struct upb_ArenaInternal {
// Total space allocated in blocks, atomic only for SpaceAllocated
UPB_ATOMIC(size_t) space_allocated;
UPB_TSAN_PUBLISHED_MEMBER
} upb_ArenaInternal;
// All public + private state for an arena.
@ -3047,6 +3070,7 @@ static upb_ArenaRoot _upb_Arena_FindRoot(const upb_Arena* a) {
uintptr_t poc = upb_Atomic_Load(&ai->parent_or_count, memory_order_acquire);
while (_upb_Arena_IsTaggedPointer(poc)) {
upb_ArenaInternal* next = _upb_Arena_PointerFromTagged(poc);
UPB_TSAN_CHECK_PUBLISHED(next);
UPB_ASSERT(ai != next);
uintptr_t next_poc =
upb_Atomic_Load(&next->parent_or_count, memory_order_acquire);
@ -3057,22 +3081,8 @@ static upb_ArenaRoot _upb_Arena_FindRoot(const upb_Arena* a) {
//
// Path splitting keeps time complexity down, see:
// https://en.wikipedia.org/wiki/Disjoint-set_data_structure
//
// We can safely use a relaxed atomic here because all threads doing this
// will converge on the same value and we don't need memory orderings to
// be visible.
//
// This is true because:
// - If no fuses occur, this will eventually become the root.
// - If fuses are actively occurring, the root may change, but the
// invariant is that `parent_or_count` merely points to *a* parent.
//
// In other words, it is moving towards "the" root, and that root may move
// further away over time, but the path towards that root will continue to
// be valid and the creation of the path carries all the memory orderings
// required.
UPB_ASSERT(ai != _upb_Arena_PointerFromTagged(next_poc));
upb_Atomic_Store(&ai->parent_or_count, next_poc, memory_order_relaxed);
upb_Atomic_Store(&ai->parent_or_count, next_poc, memory_order_release);
}
ai = next;
poc = next_poc;
@ -3086,8 +3096,11 @@ size_t upb_Arena_SpaceAllocated(upb_Arena* arena, size_t* fused_count) {
size_t local_fused_count = 0;
while (ai != NULL) {
UPB_TSAN_CHECK_PUBLISHED(ai);
// Relaxed is safe - no subsequent reads depend this one
memsize += upb_Atomic_Load(&ai->space_allocated, memory_order_relaxed);
ai = upb_Atomic_Load(&ai->next, memory_order_relaxed);
ai = upb_Atomic_Load(&ai->next, memory_order_acquire);
local_fused_count++;
}
@ -3097,11 +3110,11 @@ size_t upb_Arena_SpaceAllocated(upb_Arena* arena, size_t* fused_count) {
uint32_t upb_Arena_DebugRefCount(upb_Arena* a) {
upb_ArenaInternal* ai = upb_Arena_Internal(a);
// These loads could probably be relaxed, but given that this is debug-only,
// it's not worth introducing a new variant for it.
UPB_TSAN_CHECK_PUBLISHED(ai);
uintptr_t poc = upb_Atomic_Load(&ai->parent_or_count, memory_order_acquire);
while (_upb_Arena_IsTaggedPointer(poc)) {
ai = _upb_Arena_PointerFromTagged(poc);
UPB_TSAN_CHECK_PUBLISHED(ai);
poc = upb_Atomic_Load(&ai->parent_or_count, memory_order_acquire);
}
return _upb_Arena_RefCountFromTagged(poc);
@ -3150,7 +3163,8 @@ static bool _upb_Arena_AllocBlock(upb_Arena* a, size_t size) {
_upb_Arena_AddBlock(a, block, block_size);
// Atomic add not required here, as threads won't race allocating blocks, plus
// atomic fetch-add is slower than load/add/store on arm devices compiled
// targetting pre-v8.1.
// targetting pre-v8.1. Relaxed order is safe as nothing depends on order of
// size allocated.
size_t old_space_allocated =
upb_Atomic_Load(&ai->space_allocated, memory_order_relaxed);
upb_Atomic_Store(&ai->space_allocated, old_space_allocated + block_size,
@ -3186,6 +3200,7 @@ static upb_Arena* _upb_Arena_InitSlow(upb_alloc* alloc) {
upb_Atomic_Init(&a->body.space_allocated, n);
a->body.blocks = NULL;
a->body.upb_alloc_cleanup = NULL;
UPB_TSAN_INIT_PUBLISHED(&a->body);
_upb_Arena_AddBlock(&a->head, mem, n);
@ -3226,10 +3241,10 @@ upb_Arena* upb_Arena_Init(void* mem, size_t n, upb_alloc* alloc) {
upb_Atomic_Init(&a->body.space_allocated, 0);
a->body.blocks = NULL;
a->body.upb_alloc_cleanup = NULL;
a->body.block_alloc = _upb_Arena_MakeBlockAlloc(alloc, 1);
a->head.UPB_PRIVATE(ptr) = mem;
a->head.UPB_PRIVATE(end) = UPB_PTR_AT(mem, n - sizeof(upb_ArenaState), char);
UPB_TSAN_INIT_PUBLISHED(&a->body);
#ifdef UPB_TRACING_ENABLED
upb_Arena_LogInit(&a->head, n);
#endif
@ -3239,9 +3254,15 @@ upb_Arena* upb_Arena_Init(void* mem, size_t n, upb_alloc* alloc) {
static void _upb_Arena_DoFree(upb_ArenaInternal* ai) {
UPB_ASSERT(_upb_Arena_RefCountFromTagged(ai->parent_or_count) == 1);
while (ai != NULL) {
UPB_TSAN_CHECK_PUBLISHED(ai);
// Load first since arena itself is likely from one of its blocks.
upb_ArenaInternal* next_arena =
(upb_ArenaInternal*)upb_Atomic_Load(&ai->next, memory_order_acquire);
// Freeing may have memory barriers that confuse tsan, so assert immdiately
// after load here
if (next_arena) {
UPB_TSAN_CHECK_PUBLISHED(next_arena);
}
upb_alloc* block_alloc = _upb_ArenaInternal_BlockAlloc(ai);
upb_MemBlock* block = ai->blocks;
upb_AllocCleanupFunc* alloc_cleanup = *ai->upb_alloc_cleanup;
@ -3264,6 +3285,7 @@ void upb_Arena_Free(upb_Arena* a) {
retry:
while (_upb_Arena_IsTaggedPointer(poc)) {
ai = _upb_Arena_PointerFromTagged(poc);
UPB_TSAN_CHECK_PUBLISHED(ai);
poc = upb_Atomic_Load(&ai->parent_or_count, memory_order_acquire);
}
@ -3293,29 +3315,33 @@ retry:
static void _upb_Arena_DoFuseArenaLists(upb_ArenaInternal* const parent,
upb_ArenaInternal* child) {
UPB_TSAN_CHECK_PUBLISHED(parent);
upb_ArenaInternal* parent_tail =
upb_Atomic_Load(&parent->tail, memory_order_relaxed);
upb_Atomic_Load(&parent->tail, memory_order_acquire);
do {
UPB_TSAN_CHECK_PUBLISHED(parent_tail);
// Our tail might be stale, but it will always converge to the true tail.
upb_ArenaInternal* parent_tail_next =
upb_Atomic_Load(&parent_tail->next, memory_order_relaxed);
upb_Atomic_Load(&parent_tail->next, memory_order_acquire);
while (parent_tail_next != NULL) {
parent_tail = parent_tail_next;
UPB_TSAN_CHECK_PUBLISHED(parent_tail);
parent_tail_next =
upb_Atomic_Load(&parent_tail->next, memory_order_relaxed);
upb_Atomic_Load(&parent_tail->next, memory_order_acquire);
}
UPB_TSAN_CHECK_PUBLISHED(child);
upb_ArenaInternal* displaced =
upb_Atomic_Exchange(&parent_tail->next, child, memory_order_relaxed);
parent_tail = upb_Atomic_Load(&child->tail, memory_order_relaxed);
upb_Atomic_Exchange(&parent_tail->next, child, memory_order_acq_rel);
parent_tail = upb_Atomic_Load(&child->tail, memory_order_acquire);
// If we displaced something that got installed racily, we can simply
// reinstall it on our new tail.
child = displaced;
} while (child != NULL);
upb_Atomic_Store(&parent->tail, parent_tail, memory_order_relaxed);
upb_Atomic_Store(&parent->tail, parent_tail, memory_order_release);
}
void upb_Arena_SetAllocCleanup(upb_Arena* a, upb_AllocCleanupFunc* func) {
@ -3387,11 +3413,23 @@ static upb_ArenaInternal* _upb_Arena_DoFuse(const upb_Arena* a1,
static bool _upb_Arena_FixupRefs(upb_ArenaInternal* new_root,
uintptr_t ref_delta) {
if (ref_delta == 0) return true; // No fixup required.
// Relaxed order is safe here as if the value is a pointer, we don't deref it
// or publish it anywhere else. The refcount does provide memory order
// between allocations on arenas and the eventual free and thus normally
// requires acquire/release; but in this case any edges provided by the refs
// we are cleaning up were already provided by the fuse operation itself. It's
// not valid for a decrement that could cause the overall fused arena to reach
// a zero refcount to race with this function, as that could result in a
// use-after-free anyway.
uintptr_t poc =
upb_Atomic_Load(&new_root->parent_or_count, memory_order_relaxed);
if (_upb_Arena_IsTaggedPointer(poc)) return false;
uintptr_t with_refs = poc - ref_delta;
UPB_ASSERT(!_upb_Arena_IsTaggedPointer(with_refs));
// Relaxed order on success is safe here, for the same reasons as the relaxed
// read above. Relaxed order is safe on failure because the updated value is
// stored in a local variable which goes immediately out of scope; the retry
// loop will reread what it needs with proper memory order.
return upb_Atomic_CompareExchangeStrong(&new_root->parent_or_count, &poc,
with_refs, memory_order_relaxed,
memory_order_relaxed);
@ -17390,6 +17428,10 @@ upb_ServiceDef* _upb_ServiceDefs_New(upb_DefBuilder* ctx, int n,
#undef UPB_ASAN
#undef UPB_ASAN_GUARD_SIZE
#undef UPB_CLANG_ASAN
#undef UPB_TSAN_PUBLISHED_MEMBER
#undef UPB_TSAN_INIT_PUBLISHED
#undef UPB_TSAN_CHECK_PUBLISHED
#undef UPB_TSAN_PUBLISH
#undef UPB_TREAT_CLOSED_ENUMS_LIKE_OPEN
#undef UPB_DEPRECATED
#undef UPB_GNUC_MIN

@ -325,8 +325,14 @@ Error, UINTPTR_MAX is undefined
#else
#define UPB_CLANG_ASAN 0
#endif
#if __has_feature(thread_sanitizer)
#define UPB_CLANG_TSAN 1
#else
#define UPB_CLANG_TSAN 0
#endif
#else
#define UPB_CLANG_ASAN 0
#define UPB_CLANG_TSAN 0
#endif
#if defined(__SANITIZE_ADDRESS__) || UPB_CLANG_ASAN
@ -353,6 +359,21 @@ void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
((void)(addr), (void)(size))
#endif
#if defined(__SANITIZE_THREAD__) || UPB_CLANG_TSAN
#define UPB_TSAN_PUBLISHED_MEMBER uintptr_t upb_tsan_safely_published;
#define UPB_TSAN_INIT_PUBLISHED(ptr) (ptr)->upb_tsan_safely_published = 0x5AFE
#define UPB_TSAN_CHECK_PUBLISHED(ptr) \
UPB_ASSERT((ptr)->upb_tsan_safely_published == 0x5AFE)
#define UPB_TSAN_PUBLISH 1
#else
#define UPB_TSAN_PUBLISHED_MEMBER
#define UPB_TSAN_INIT_PUBLISHED(ptr)
#define UPB_TSAN_CHECK_PUBLISHED(ptr) \
do { \
} while (false && (ptr))
#define UPB_TSAN_PUBLISH 0
#endif
/* Disable proto2 arena behavior (TEMPORARY) **********************************/
#ifdef UPB_DISABLE_CLOSED_ENUM_CHECKING
@ -623,7 +644,7 @@ UPB_INLINE void upb_gfree(void* ptr) { upb_free(&upb_alloc_global, ptr); }
//
// We need this because the decoder inlines a upb_Arena for performance but
// the full struct is not visible outside of arena.c. Yes, I know, it's awful.
#define UPB_ARENA_SIZE_HACK 9
#define UPB_ARENA_SIZE_HACK (9 + UPB_TSAN_PUBLISH)
// LINT.IfChange(upb_Arena)
@ -732,15 +753,25 @@ UPB_API void upb_Arena_Free(upb_Arena* a);
// freed.
UPB_API void upb_Arena_SetAllocCleanup(upb_Arena* a,
upb_AllocCleanupFunc* func);
// Fuses the lifetime of two arenas, such that no arenas that have been
// transitively fused together will be freed until all of them have reached a
// zero refcount. This operation is safe to use concurrently from multiple
// threads.
UPB_API bool upb_Arena_Fuse(const upb_Arena* a, const upb_Arena* b);
// This operation is safe to use concurrently from multiple threads.
UPB_API bool upb_Arena_IsFused(const upb_Arena* a, const upb_Arena* b);
// Returns the upb_alloc used by the arena.
UPB_API upb_alloc* upb_Arena_GetUpbAlloc(upb_Arena* a);
// This operation is safe to use concurrently from multiple threads.
bool upb_Arena_IncRefFor(const upb_Arena* a, const void* owner);
// This operation is safe to use concurrently from multiple threads.
void upb_Arena_DecRefFor(const upb_Arena* a, const void* owner);
// This operation is safe to use concurrently from multiple threads.
size_t upb_Arena_SpaceAllocated(upb_Arena* a, size_t* fused_count);
uint32_t upb_Arena_DebugRefCount(upb_Arena* a);
@ -15782,6 +15813,10 @@ upb_MethodDef* _upb_MethodDefs_New(upb_DefBuilder* ctx, int n,
#undef UPB_ASAN
#undef UPB_ASAN_GUARD_SIZE
#undef UPB_CLANG_ASAN
#undef UPB_TSAN_PUBLISHED_MEMBER
#undef UPB_TSAN_INIT_PUBLISHED
#undef UPB_TSAN_CHECK_PUBLISHED
#undef UPB_TSAN_PUBLISH
#undef UPB_TREAT_CLOSED_ENUMS_LIKE_OPEN
#undef UPB_DEPRECATED
#undef UPB_GNUC_MIN

@ -326,8 +326,14 @@ Error, UINTPTR_MAX is undefined
#else
#define UPB_CLANG_ASAN 0
#endif
#if __has_feature(thread_sanitizer)
#define UPB_CLANG_TSAN 1
#else
#define UPB_CLANG_TSAN 0
#endif
#else
#define UPB_CLANG_ASAN 0
#define UPB_CLANG_TSAN 0
#endif
#if defined(__SANITIZE_ADDRESS__) || UPB_CLANG_ASAN
@ -354,6 +360,21 @@ void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
((void)(addr), (void)(size))
#endif
#if defined(__SANITIZE_THREAD__) || UPB_CLANG_TSAN
#define UPB_TSAN_PUBLISHED_MEMBER uintptr_t upb_tsan_safely_published;
#define UPB_TSAN_INIT_PUBLISHED(ptr) (ptr)->upb_tsan_safely_published = 0x5AFE
#define UPB_TSAN_CHECK_PUBLISHED(ptr) \
UPB_ASSERT((ptr)->upb_tsan_safely_published == 0x5AFE)
#define UPB_TSAN_PUBLISH 1
#else
#define UPB_TSAN_PUBLISHED_MEMBER
#define UPB_TSAN_INIT_PUBLISHED(ptr)
#define UPB_TSAN_CHECK_PUBLISHED(ptr) \
do { \
} while (false && (ptr))
#define UPB_TSAN_PUBLISH 0
#endif
/* Disable proto2 arena behavior (TEMPORARY) **********************************/
#ifdef UPB_DISABLE_CLOSED_ENUM_CHECKING
@ -2944,6 +2965,8 @@ typedef struct upb_ArenaInternal {
// Total space allocated in blocks, atomic only for SpaceAllocated
UPB_ATOMIC(size_t) space_allocated;
UPB_TSAN_PUBLISHED_MEMBER
} upb_ArenaInternal;
// All public + private state for an arena.
@ -3047,6 +3070,7 @@ static upb_ArenaRoot _upb_Arena_FindRoot(const upb_Arena* a) {
uintptr_t poc = upb_Atomic_Load(&ai->parent_or_count, memory_order_acquire);
while (_upb_Arena_IsTaggedPointer(poc)) {
upb_ArenaInternal* next = _upb_Arena_PointerFromTagged(poc);
UPB_TSAN_CHECK_PUBLISHED(next);
UPB_ASSERT(ai != next);
uintptr_t next_poc =
upb_Atomic_Load(&next->parent_or_count, memory_order_acquire);
@ -3057,22 +3081,8 @@ static upb_ArenaRoot _upb_Arena_FindRoot(const upb_Arena* a) {
//
// Path splitting keeps time complexity down, see:
// https://en.wikipedia.org/wiki/Disjoint-set_data_structure
//
// We can safely use a relaxed atomic here because all threads doing this
// will converge on the same value and we don't need memory orderings to
// be visible.
//
// This is true because:
// - If no fuses occur, this will eventually become the root.
// - If fuses are actively occurring, the root may change, but the
// invariant is that `parent_or_count` merely points to *a* parent.
//
// In other words, it is moving towards "the" root, and that root may move
// further away over time, but the path towards that root will continue to
// be valid and the creation of the path carries all the memory orderings
// required.
UPB_ASSERT(ai != _upb_Arena_PointerFromTagged(next_poc));
upb_Atomic_Store(&ai->parent_or_count, next_poc, memory_order_relaxed);
upb_Atomic_Store(&ai->parent_or_count, next_poc, memory_order_release);
}
ai = next;
poc = next_poc;
@ -3086,8 +3096,11 @@ size_t upb_Arena_SpaceAllocated(upb_Arena* arena, size_t* fused_count) {
size_t local_fused_count = 0;
while (ai != NULL) {
UPB_TSAN_CHECK_PUBLISHED(ai);
// Relaxed is safe - no subsequent reads depend this one
memsize += upb_Atomic_Load(&ai->space_allocated, memory_order_relaxed);
ai = upb_Atomic_Load(&ai->next, memory_order_relaxed);
ai = upb_Atomic_Load(&ai->next, memory_order_acquire);
local_fused_count++;
}
@ -3097,11 +3110,11 @@ size_t upb_Arena_SpaceAllocated(upb_Arena* arena, size_t* fused_count) {
uint32_t upb_Arena_DebugRefCount(upb_Arena* a) {
upb_ArenaInternal* ai = upb_Arena_Internal(a);
// These loads could probably be relaxed, but given that this is debug-only,
// it's not worth introducing a new variant for it.
UPB_TSAN_CHECK_PUBLISHED(ai);
uintptr_t poc = upb_Atomic_Load(&ai->parent_or_count, memory_order_acquire);
while (_upb_Arena_IsTaggedPointer(poc)) {
ai = _upb_Arena_PointerFromTagged(poc);
UPB_TSAN_CHECK_PUBLISHED(ai);
poc = upb_Atomic_Load(&ai->parent_or_count, memory_order_acquire);
}
return _upb_Arena_RefCountFromTagged(poc);
@ -3150,7 +3163,8 @@ static bool _upb_Arena_AllocBlock(upb_Arena* a, size_t size) {
_upb_Arena_AddBlock(a, block, block_size);
// Atomic add not required here, as threads won't race allocating blocks, plus
// atomic fetch-add is slower than load/add/store on arm devices compiled
// targetting pre-v8.1.
// targetting pre-v8.1. Relaxed order is safe as nothing depends on order of
// size allocated.
size_t old_space_allocated =
upb_Atomic_Load(&ai->space_allocated, memory_order_relaxed);
upb_Atomic_Store(&ai->space_allocated, old_space_allocated + block_size,
@ -3186,6 +3200,7 @@ static upb_Arena* _upb_Arena_InitSlow(upb_alloc* alloc) {
upb_Atomic_Init(&a->body.space_allocated, n);
a->body.blocks = NULL;
a->body.upb_alloc_cleanup = NULL;
UPB_TSAN_INIT_PUBLISHED(&a->body);
_upb_Arena_AddBlock(&a->head, mem, n);
@ -3226,10 +3241,10 @@ upb_Arena* upb_Arena_Init(void* mem, size_t n, upb_alloc* alloc) {
upb_Atomic_Init(&a->body.space_allocated, 0);
a->body.blocks = NULL;
a->body.upb_alloc_cleanup = NULL;
a->body.block_alloc = _upb_Arena_MakeBlockAlloc(alloc, 1);
a->head.UPB_PRIVATE(ptr) = mem;
a->head.UPB_PRIVATE(end) = UPB_PTR_AT(mem, n - sizeof(upb_ArenaState), char);
UPB_TSAN_INIT_PUBLISHED(&a->body);
#ifdef UPB_TRACING_ENABLED
upb_Arena_LogInit(&a->head, n);
#endif
@ -3239,9 +3254,15 @@ upb_Arena* upb_Arena_Init(void* mem, size_t n, upb_alloc* alloc) {
static void _upb_Arena_DoFree(upb_ArenaInternal* ai) {
UPB_ASSERT(_upb_Arena_RefCountFromTagged(ai->parent_or_count) == 1);
while (ai != NULL) {
UPB_TSAN_CHECK_PUBLISHED(ai);
// Load first since arena itself is likely from one of its blocks.
upb_ArenaInternal* next_arena =
(upb_ArenaInternal*)upb_Atomic_Load(&ai->next, memory_order_acquire);
// Freeing may have memory barriers that confuse tsan, so assert immdiately
// after load here
if (next_arena) {
UPB_TSAN_CHECK_PUBLISHED(next_arena);
}
upb_alloc* block_alloc = _upb_ArenaInternal_BlockAlloc(ai);
upb_MemBlock* block = ai->blocks;
upb_AllocCleanupFunc* alloc_cleanup = *ai->upb_alloc_cleanup;
@ -3264,6 +3285,7 @@ void upb_Arena_Free(upb_Arena* a) {
retry:
while (_upb_Arena_IsTaggedPointer(poc)) {
ai = _upb_Arena_PointerFromTagged(poc);
UPB_TSAN_CHECK_PUBLISHED(ai);
poc = upb_Atomic_Load(&ai->parent_or_count, memory_order_acquire);
}
@ -3293,29 +3315,33 @@ retry:
static void _upb_Arena_DoFuseArenaLists(upb_ArenaInternal* const parent,
upb_ArenaInternal* child) {
UPB_TSAN_CHECK_PUBLISHED(parent);
upb_ArenaInternal* parent_tail =
upb_Atomic_Load(&parent->tail, memory_order_relaxed);
upb_Atomic_Load(&parent->tail, memory_order_acquire);
do {
UPB_TSAN_CHECK_PUBLISHED(parent_tail);
// Our tail might be stale, but it will always converge to the true tail.
upb_ArenaInternal* parent_tail_next =
upb_Atomic_Load(&parent_tail->next, memory_order_relaxed);
upb_Atomic_Load(&parent_tail->next, memory_order_acquire);
while (parent_tail_next != NULL) {
parent_tail = parent_tail_next;
UPB_TSAN_CHECK_PUBLISHED(parent_tail);
parent_tail_next =
upb_Atomic_Load(&parent_tail->next, memory_order_relaxed);
upb_Atomic_Load(&parent_tail->next, memory_order_acquire);
}
UPB_TSAN_CHECK_PUBLISHED(child);
upb_ArenaInternal* displaced =
upb_Atomic_Exchange(&parent_tail->next, child, memory_order_relaxed);
parent_tail = upb_Atomic_Load(&child->tail, memory_order_relaxed);
upb_Atomic_Exchange(&parent_tail->next, child, memory_order_acq_rel);
parent_tail = upb_Atomic_Load(&child->tail, memory_order_acquire);
// If we displaced something that got installed racily, we can simply
// reinstall it on our new tail.
child = displaced;
} while (child != NULL);
upb_Atomic_Store(&parent->tail, parent_tail, memory_order_relaxed);
upb_Atomic_Store(&parent->tail, parent_tail, memory_order_release);
}
void upb_Arena_SetAllocCleanup(upb_Arena* a, upb_AllocCleanupFunc* func) {
@ -3387,11 +3413,23 @@ static upb_ArenaInternal* _upb_Arena_DoFuse(const upb_Arena* a1,
static bool _upb_Arena_FixupRefs(upb_ArenaInternal* new_root,
uintptr_t ref_delta) {
if (ref_delta == 0) return true; // No fixup required.
// Relaxed order is safe here as if the value is a pointer, we don't deref it
// or publish it anywhere else. The refcount does provide memory order
// between allocations on arenas and the eventual free and thus normally
// requires acquire/release; but in this case any edges provided by the refs
// we are cleaning up were already provided by the fuse operation itself. It's
// not valid for a decrement that could cause the overall fused arena to reach
// a zero refcount to race with this function, as that could result in a
// use-after-free anyway.
uintptr_t poc =
upb_Atomic_Load(&new_root->parent_or_count, memory_order_relaxed);
if (_upb_Arena_IsTaggedPointer(poc)) return false;
uintptr_t with_refs = poc - ref_delta;
UPB_ASSERT(!_upb_Arena_IsTaggedPointer(with_refs));
// Relaxed order on success is safe here, for the same reasons as the relaxed
// read above. Relaxed order is safe on failure because the updated value is
// stored in a local variable which goes immediately out of scope; the retry
// loop will reread what it needs with proper memory order.
return upb_Atomic_CompareExchangeStrong(&new_root->parent_or_count, &poc,
with_refs, memory_order_relaxed,
memory_order_relaxed);
@ -16877,6 +16915,10 @@ upb_ServiceDef* _upb_ServiceDefs_New(upb_DefBuilder* ctx, int n,
#undef UPB_ASAN
#undef UPB_ASAN_GUARD_SIZE
#undef UPB_CLANG_ASAN
#undef UPB_TSAN_PUBLISHED_MEMBER
#undef UPB_TSAN_INIT_PUBLISHED
#undef UPB_TSAN_CHECK_PUBLISHED
#undef UPB_TSAN_PUBLISH
#undef UPB_TREAT_CLOSED_ENUMS_LIKE_OPEN
#undef UPB_DEPRECATED
#undef UPB_GNUC_MIN

@ -327,8 +327,14 @@ Error, UINTPTR_MAX is undefined
#else
#define UPB_CLANG_ASAN 0
#endif
#if __has_feature(thread_sanitizer)
#define UPB_CLANG_TSAN 1
#else
#define UPB_CLANG_TSAN 0
#endif
#else
#define UPB_CLANG_ASAN 0
#define UPB_CLANG_TSAN 0
#endif
#if defined(__SANITIZE_ADDRESS__) || UPB_CLANG_ASAN
@ -355,6 +361,21 @@ void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
((void)(addr), (void)(size))
#endif
#if defined(__SANITIZE_THREAD__) || UPB_CLANG_TSAN
#define UPB_TSAN_PUBLISHED_MEMBER uintptr_t upb_tsan_safely_published;
#define UPB_TSAN_INIT_PUBLISHED(ptr) (ptr)->upb_tsan_safely_published = 0x5AFE
#define UPB_TSAN_CHECK_PUBLISHED(ptr) \
UPB_ASSERT((ptr)->upb_tsan_safely_published == 0x5AFE)
#define UPB_TSAN_PUBLISH 1
#else
#define UPB_TSAN_PUBLISHED_MEMBER
#define UPB_TSAN_INIT_PUBLISHED(ptr)
#define UPB_TSAN_CHECK_PUBLISHED(ptr) \
do { \
} while (false && (ptr))
#define UPB_TSAN_PUBLISH 0
#endif
/* Disable proto2 arena behavior (TEMPORARY) **********************************/
#ifdef UPB_DISABLE_CLOSED_ENUM_CHECKING
@ -625,7 +646,7 @@ UPB_INLINE void upb_gfree(void* ptr) { upb_free(&upb_alloc_global, ptr); }
//
// We need this because the decoder inlines a upb_Arena for performance but
// the full struct is not visible outside of arena.c. Yes, I know, it's awful.
#define UPB_ARENA_SIZE_HACK 9
#define UPB_ARENA_SIZE_HACK (9 + UPB_TSAN_PUBLISH)
// LINT.IfChange(upb_Arena)
@ -734,15 +755,25 @@ UPB_API void upb_Arena_Free(upb_Arena* a);
// freed.
UPB_API void upb_Arena_SetAllocCleanup(upb_Arena* a,
upb_AllocCleanupFunc* func);
// Fuses the lifetime of two arenas, such that no arenas that have been
// transitively fused together will be freed until all of them have reached a
// zero refcount. This operation is safe to use concurrently from multiple
// threads.
UPB_API bool upb_Arena_Fuse(const upb_Arena* a, const upb_Arena* b);
// This operation is safe to use concurrently from multiple threads.
UPB_API bool upb_Arena_IsFused(const upb_Arena* a, const upb_Arena* b);
// Returns the upb_alloc used by the arena.
UPB_API upb_alloc* upb_Arena_GetUpbAlloc(upb_Arena* a);
// This operation is safe to use concurrently from multiple threads.
bool upb_Arena_IncRefFor(const upb_Arena* a, const void* owner);
// This operation is safe to use concurrently from multiple threads.
void upb_Arena_DecRefFor(const upb_Arena* a, const void* owner);
// This operation is safe to use concurrently from multiple threads.
size_t upb_Arena_SpaceAllocated(upb_Arena* a, size_t* fused_count);
uint32_t upb_Arena_DebugRefCount(upb_Arena* a);
@ -15595,6 +15626,10 @@ upb_MethodDef* _upb_MethodDefs_New(upb_DefBuilder* ctx, int n,
#undef UPB_ASAN
#undef UPB_ASAN_GUARD_SIZE
#undef UPB_CLANG_ASAN
#undef UPB_TSAN_PUBLISHED_MEMBER
#undef UPB_TSAN_INIT_PUBLISHED
#undef UPB_TSAN_CHECK_PUBLISHED
#undef UPB_TSAN_PUBLISH
#undef UPB_TREAT_CLOSED_ENUMS_LIKE_OPEN
#undef UPB_DEPRECATED
#undef UPB_GNUC_MIN

Loading…
Cancel
Save