Protocol Buffers - Google's data interchange format (grpc依赖)
https://developers.google.com/protocol-buffers/
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
160 lines
5.3 KiB
160 lines
5.3 KiB
/* |
|
* Copyright (c) 2009-2021, Google LLC |
|
* All rights reserved. |
|
* |
|
* Redistribution and use in source and binary forms, with or without |
|
* modification, are permitted provided that the following conditions are met: |
|
* * Redistributions of source code must retain the above copyright |
|
* notice, this list of conditions and the following disclaimer. |
|
* * Redistributions in binary form must reproduce the above copyright |
|
* notice, this list of conditions and the following disclaimer in the |
|
* documentation and/or other materials provided with the distribution. |
|
* * Neither the name of Google LLC nor the |
|
* names of its contributors may be used to endorse or promote products |
|
* derived from this software without specific prior written permission. |
|
* |
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
|
* ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, |
|
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
|
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
|
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
|
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
|
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
|
*/ |
|
|
|
/* upb_Arena is a specific allocator implementation that uses arena allocation. |
|
* The user provides an allocator that will be used to allocate the underlying |
|
* arena blocks. Arenas by nature do not require the individual allocations |
|
* to be freed. However the Arena does allow users to register cleanup |
|
* functions that will run when the arena is destroyed. |
|
* |
|
* A upb_Arena is *not* thread-safe. |
|
* |
|
* You could write a thread-safe arena allocator that satisfies the |
|
* upb_alloc interface, but it would not be as efficient for the |
|
* single-threaded case. */ |
|
|
|
#ifndef UPB_MEM_ARENA_H_ |
|
#define UPB_MEM_ARENA_H_ |
|
|
|
#include <string.h> |
|
|
|
#include "upb/mem/alloc.h" |
|
|
|
// Must be last. |
|
#include "upb/port/def.inc" |
|
|
|
typedef struct upb_Arena upb_Arena; |
|
|
|
typedef void upb_CleanupFunc(void* context); |
|
|
|
typedef struct { |
|
char *ptr, *end; |
|
} _upb_ArenaHead; |
|
|
|
#ifdef __cplusplus |
|
extern "C" { |
|
#endif |
|
|
|
// Creates an arena from the given initial block (if any -- n may be 0). |
|
// Additional blocks will be allocated from |alloc|. If |alloc| is NULL, this |
|
// is a fixed-size arena and cannot grow. |
|
UPB_API upb_Arena* upb_Arena_Init(void* mem, size_t n, upb_alloc* alloc); |
|
|
|
UPB_API void upb_Arena_Free(upb_Arena* a); |
|
UPB_API bool upb_Arena_AddCleanup(upb_Arena* a, void* ud, |
|
upb_CleanupFunc* func); |
|
UPB_API bool upb_Arena_Fuse(upb_Arena* a, upb_Arena* b); |
|
|
|
void* _upb_Arena_SlowMalloc(upb_Arena* a, size_t size); |
|
size_t upb_Arena_SpaceAllocated(upb_Arena* arena); |
|
uint32_t upb_Arena_DebugRefCount(upb_Arena* arena); |
|
|
|
UPB_INLINE size_t _upb_ArenaHas(upb_Arena* a) { |
|
_upb_ArenaHead* h = (_upb_ArenaHead*)a; |
|
return (size_t)(h->end - h->ptr); |
|
} |
|
|
|
UPB_API_INLINE void* upb_Arena_Malloc(upb_Arena* a, size_t size) { |
|
size = UPB_ALIGN_MALLOC(size); |
|
if (UPB_UNLIKELY(_upb_ArenaHas(a) < size)) { |
|
return _upb_Arena_SlowMalloc(a, size); |
|
} |
|
|
|
// We have enough space to do a fast malloc. |
|
_upb_ArenaHead* h = (_upb_ArenaHead*)a; |
|
void* ret = h->ptr; |
|
UPB_ASSERT(UPB_ALIGN_MALLOC((uintptr_t)ret) == (uintptr_t)ret); |
|
UPB_ASSERT(UPB_ALIGN_MALLOC(size) == size); |
|
UPB_UNPOISON_MEMORY_REGION(ret, size); |
|
|
|
h->ptr += size; |
|
|
|
#if UPB_ASAN |
|
{ |
|
size_t guard_size = 32; |
|
if (_upb_ArenaHas(a) >= guard_size) { |
|
h->ptr += guard_size; |
|
} else { |
|
h->ptr = h->end; |
|
} |
|
} |
|
#endif |
|
|
|
return ret; |
|
} |
|
|
|
// Shrinks the last alloc from arena. |
|
// REQUIRES: (ptr, oldsize) was the last malloc/realloc from this arena. |
|
// We could also add a upb_Arena_TryShrinkLast() which is simply a no-op if |
|
// this was not the last alloc. |
|
UPB_API_INLINE void upb_Arena_ShrinkLast(upb_Arena* a, void* ptr, |
|
size_t oldsize, size_t size) { |
|
_upb_ArenaHead* h = (_upb_ArenaHead*)a; |
|
oldsize = UPB_ALIGN_MALLOC(oldsize); |
|
size = UPB_ALIGN_MALLOC(size); |
|
UPB_ASSERT((char*)ptr + oldsize == h->ptr); // Must be the last alloc. |
|
UPB_ASSERT(size <= oldsize); |
|
h->ptr = (char*)ptr + size; |
|
} |
|
|
|
UPB_API_INLINE void* upb_Arena_Realloc(upb_Arena* a, void* ptr, size_t oldsize, |
|
size_t size) { |
|
_upb_ArenaHead* h = (_upb_ArenaHead*)a; |
|
oldsize = UPB_ALIGN_MALLOC(oldsize); |
|
size = UPB_ALIGN_MALLOC(size); |
|
bool is_most_recent_alloc = (uintptr_t)ptr + oldsize == (uintptr_t)h->ptr; |
|
|
|
if (is_most_recent_alloc) { |
|
ptrdiff_t diff = size - oldsize; |
|
if ((ptrdiff_t)_upb_ArenaHas(a) >= diff) { |
|
h->ptr += diff; |
|
return ptr; |
|
} |
|
} else if (size <= oldsize) { |
|
return ptr; |
|
} |
|
|
|
void* ret = upb_Arena_Malloc(a, size); |
|
|
|
if (ret && oldsize > 0) { |
|
memcpy(ret, ptr, UPB_MIN(oldsize, size)); |
|
} |
|
|
|
return ret; |
|
} |
|
|
|
UPB_API_INLINE upb_Arena* upb_Arena_New(void) { |
|
return upb_Arena_Init(NULL, 0, &upb_alloc_global); |
|
} |
|
|
|
#ifdef __cplusplus |
|
} /* extern "C" */ |
|
#endif |
|
|
|
#include "upb/port/undef.inc" |
|
|
|
#endif /* UPB_MEM_ARENA_H_ */
|
|
|