Merge tag 'refs/tags/sync-piper' into sync-stage

pull/10360/head
Matt Kulukundis 2 years ago
commit 7513a1743f
  1. 8
      python/google/protobuf/descriptor.py
  2. 13
      python/google/protobuf/internal/message_test.py
  3. 46
      src/google/protobuf/arenaz_sampler.cc
  4. 29
      src/google/protobuf/arenaz_sampler.h
  5. 147
      src/google/protobuf/arenaz_sampler_test.cc
  6. 3
      src/google/protobuf/compiler/command_line_interface.cc
  7. 2
      src/google/protobuf/compiler/cpp/options.h
  8. 3
      src/google/protobuf/compiler/java/file.cc
  9. 32
      src/google/protobuf/compiler/python/generator.cc
  10. 1
      src/google/protobuf/compiler/python/generator.h
  11. 6
      src/google/protobuf/descriptor.h
  12. 5
      src/google/protobuf/message.h
  13. 8
      src/google/protobuf/message_unittest.inc
  14. 53
      src/google/protobuf/repeated_field.h
  15. 54
      src/google/protobuf/repeated_field_unittest.cc

@ -1021,13 +1021,7 @@ class FileDescriptor(DescriptorBase):
# FileDescriptor() is called from various places, not only from generated # FileDescriptor() is called from various places, not only from generated
# files, to register dynamic proto files and messages. # files, to register dynamic proto files and messages.
# pylint: disable=g-explicit-bool-comparison # pylint: disable=g-explicit-bool-comparison
if serialized_pb == b'': if serialized_pb:
# Cpp generated code must be linked in if serialized_pb is ''
try:
return _message.default_pool.FindFileByName(name)
except KeyError:
raise RuntimeError('Please link in cpp generated lib for %s' % (name))
elif serialized_pb:
return _message.default_pool.AddSerializedFile(serialized_pb) return _message.default_pool.AddSerializedFile(serialized_pb)
else: else:
return super(FileDescriptor, cls).__new__(cls) return super(FileDescriptor, cls).__new__(cls)

@ -1768,6 +1768,19 @@ class Proto3Test(unittest.TestCase):
with self.assertRaises(TypeError): with self.assertRaises(TypeError):
123 in msg.map_string_string 123 in msg.map_string_string
def testScalarMapComparison(self):
msg1 = map_unittest_pb2.TestMap()
msg2 = map_unittest_pb2.TestMap()
self.assertEqual(msg1.map_int32_int32, msg2.map_int32_int32)
def testMessageMapComparison(self):
msg1 = map_unittest_pb2.TestMap()
msg2 = map_unittest_pb2.TestMap()
self.assertEqual(msg1.map_int32_foreign_message,
msg2.map_int32_foreign_message)
def testMapGet(self): def testMapGet(self):
# Need to test that get() properly returns the default, even though the dict # Need to test that get() properly returns the default, even though the dict
# has defaultdict-like semantics. # has defaultdict-like semantics.

@ -33,6 +33,7 @@
#include <atomic> #include <atomic>
#include <cstdint> #include <cstdint>
#include <limits> #include <limits>
#include <utility>
// Must be included last. // Must be included last.
@ -74,11 +75,15 @@ PROTOBUF_THREAD_LOCAL SamplingState global_sampling_state = {
ThreadSafeArenaStats::ThreadSafeArenaStats() { PrepareForSampling(0); } ThreadSafeArenaStats::ThreadSafeArenaStats() { PrepareForSampling(0); }
ThreadSafeArenaStats::~ThreadSafeArenaStats() = default; ThreadSafeArenaStats::~ThreadSafeArenaStats() = default;
void ThreadSafeArenaStats::PrepareForSampling(int64_t stride) { void ThreadSafeArenaStats::BlockStats::PrepareForSampling() {
num_allocations.store(0, std::memory_order_relaxed); num_allocations.store(0, std::memory_order_relaxed);
bytes_used.store(0, std::memory_order_relaxed);
bytes_allocated.store(0, std::memory_order_relaxed); bytes_allocated.store(0, std::memory_order_relaxed);
bytes_used.store(0, std::memory_order_relaxed);
bytes_wasted.store(0, std::memory_order_relaxed); bytes_wasted.store(0, std::memory_order_relaxed);
}
void ThreadSafeArenaStats::PrepareForSampling(int64_t stride) {
for (auto& blockstats : block_histogram) blockstats.PrepareForSampling();
max_block_size.store(0, std::memory_order_relaxed); max_block_size.store(0, std::memory_order_relaxed);
thread_ids.store(0, std::memory_order_relaxed); thread_ids.store(0, std::memory_order_relaxed);
weight = stride; weight = stride;
@ -88,12 +93,41 @@ void ThreadSafeArenaStats::PrepareForSampling(int64_t stride) {
depth = absl::GetStackTrace(stack, kMaxStackDepth, /* skip_count= */ 0); depth = absl::GetStackTrace(stack, kMaxStackDepth, /* skip_count= */ 0);
} }
size_t ThreadSafeArenaStats::FindBin(size_t bytes) {
if (bytes <= kMaxSizeForBinZero) return 0;
if (bytes <= kMaxSizeForPenultimateBin) {
// absl::bit_width() returns one plus the base-2 logarithm of x, with any
// fractional part discarded.
return absl::bit_width(absl::bit_ceil(bytes)) - kLogMaxSizeForBinZero - 1;
}
return kBlockHistogramBins - 1;
}
std::pair<size_t, size_t> ThreadSafeArenaStats::MinMaxBlockSizeForBin(
size_t bin) {
ABSL_ASSERT(bin < kBlockHistogramBins);
if (bin == 0) return {1, kMaxSizeForBinZero};
if (bin < kBlockHistogramBins - 1) {
return {(1 << (kLogMaxSizeForBinZero + bin - 1)) + 1,
1 << (kLogMaxSizeForBinZero + bin)};
}
return {kMaxSizeForPenultimateBin + 1, std::numeric_limits<size_t>::max()};
}
void RecordAllocateSlow(ThreadSafeArenaStats* info, size_t used, void RecordAllocateSlow(ThreadSafeArenaStats* info, size_t used,
size_t allocated, size_t wasted) { size_t allocated, size_t wasted) {
info->num_allocations.fetch_add(1, std::memory_order_relaxed); // Update the allocated bytes for the current block.
info->bytes_used.fetch_add(used, std::memory_order_relaxed); ThreadSafeArenaStats::BlockStats& curr =
info->bytes_allocated.fetch_add(allocated, std::memory_order_relaxed); info->block_histogram[ThreadSafeArenaStats::FindBin(allocated)];
info->bytes_wasted.fetch_add(wasted, std::memory_order_relaxed); curr.bytes_allocated.fetch_add(allocated, std::memory_order_relaxed);
curr.num_allocations.fetch_add(1, std::memory_order_relaxed);
// Update the used and wasted bytes for the previous block.
ThreadSafeArenaStats::BlockStats& prev =
info->block_histogram[ThreadSafeArenaStats::FindBin(used + wasted)];
prev.bytes_used.fetch_add(used, std::memory_order_relaxed);
prev.bytes_wasted.fetch_add(wasted, std::memory_order_relaxed);
if (info->max_block_size.load(std::memory_order_relaxed) < allocated) { if (info->max_block_size.load(std::memory_order_relaxed) < allocated) {
info->max_block_size.store(allocated, std::memory_order_relaxed); info->max_block_size.store(allocated, std::memory_order_relaxed);
} }

@ -31,9 +31,11 @@
#ifndef GOOGLE_PROTOBUF_SRC_GOOGLE_PROTOBUF_ARENAZ_SAMPLER_H__ #ifndef GOOGLE_PROTOBUF_SRC_GOOGLE_PROTOBUF_ARENAZ_SAMPLER_H__
#define GOOGLE_PROTOBUF_SRC_GOOGLE_PROTOBUF_ARENAZ_SAMPLER_H__ #define GOOGLE_PROTOBUF_SRC_GOOGLE_PROTOBUF_ARENAZ_SAMPLER_H__
#include <array>
#include <atomic> #include <atomic>
#include <cstddef> #include <cstddef>
#include <cstdint> #include <cstdint>
#include <utility>
// Must be included last. // Must be included last.
@ -65,10 +67,28 @@ struct ThreadSafeArenaStats
// These fields are mutated by the various Record* APIs and need to be // These fields are mutated by the various Record* APIs and need to be
// thread-safe. // thread-safe.
struct BlockStats {
std::atomic<int> num_allocations; std::atomic<int> num_allocations;
std::atomic<size_t> bytes_used;
std::atomic<size_t> bytes_allocated; std::atomic<size_t> bytes_allocated;
std::atomic<size_t> bytes_used;
std::atomic<size_t> bytes_wasted; std::atomic<size_t> bytes_wasted;
void PrepareForSampling();
};
// block_histogram is a kBlockHistogramBins sized histogram. The zeroth bin
// stores info about blocks of size \in [1, 1 << kLogMaxSizeForBinZero]. Bin
// i, where i > 0, stores info for blocks of size \in (max_size_bin (i-1),
// 1 << (kLogMaxSizeForBinZero + i)]. The final bin stores info about blocks
// of size \in [kMaxSizeForPenultimateBin + 1,
// std::numeric_limits<size_t>::max()].
static constexpr size_t kBlockHistogramBins = 15;
static constexpr size_t kLogMaxSizeForBinZero = 7;
static constexpr size_t kMaxSizeForBinZero = (1 << kLogMaxSizeForBinZero);
static constexpr size_t kMaxSizeForPenultimateBin =
1 << (kLogMaxSizeForBinZero + kBlockHistogramBins - 2);
std::array<BlockStats, kBlockHistogramBins> block_histogram;
// Records the largest block allocated for the arena. // Records the largest block allocated for the arena.
std::atomic<size_t> max_block_size; std::atomic<size_t> max_block_size;
// Bit `i` is set to 1 indicates that a thread with `tid % 63 = i` accessed // Bit `i` is set to 1 indicates that a thread with `tid % 63 = i` accessed
@ -90,6 +110,13 @@ struct ThreadSafeArenaStats
if (PROTOBUF_PREDICT_TRUE(info == nullptr)) return; if (PROTOBUF_PREDICT_TRUE(info == nullptr)) return;
RecordAllocateSlow(info, used, allocated, wasted); RecordAllocateSlow(info, used, allocated, wasted);
} }
// Returns the bin for the provided size.
static size_t FindBin(size_t bytes);
// Returns the min and max bytes that can be stored in the histogram for
// blocks in the provided bin.
static std::pair<size_t, size_t> MinMaxBlockSizeForBin(size_t bin);
}; };
struct SamplingState { struct SamplingState {

@ -30,6 +30,8 @@
#include <google/protobuf/arenaz_sampler.h> #include <google/protobuf/arenaz_sampler.h>
#include <atomic>
#include <limits>
#include <memory> #include <memory>
#include <random> #include <random>
#include <utility> #include <utility>
@ -37,7 +39,6 @@
#include <gmock/gmock.h> #include <gmock/gmock.h>
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <google/protobuf/stubs/strutil.h>
// Must be included last. // Must be included last.
@ -57,10 +58,17 @@ class ThreadSafeArenaStatsHandlePeer {
return h->info_; return h->info_;
} }
}; };
std::vector<size_t> GetBytesAllocated(ThreadSafeArenazSampler* s) { std::vector<size_t> GetBytesAllocated(ThreadSafeArenazSampler* s) {
std::vector<size_t> res; std::vector<size_t> res;
s->Iterate([&](const ThreadSafeArenaStats& info) { s->Iterate([&](const ThreadSafeArenaStats& info) {
res.push_back(info.bytes_allocated.load(std::memory_order_acquire)); for (const auto& block_stats : info.block_histogram) {
size_t bytes_allocated =
block_stats.bytes_allocated.load(std::memory_order_acquire);
if (bytes_allocated != 0) {
res.push_back(bytes_allocated);
}
}
}); });
return res; return res;
} }
@ -69,7 +77,8 @@ ThreadSafeArenaStats* Register(ThreadSafeArenazSampler* s, size_t size,
int64_t stride) { int64_t stride) {
auto* info = s->Register(stride); auto* info = s->Register(stride);
assert(info != nullptr); assert(info != nullptr);
info->bytes_allocated.store(size); info->block_histogram[0].bytes_allocated.store(size,
std::memory_order_relaxed);
return info; return info;
} }
@ -85,46 +94,100 @@ TEST(ThreadSafeArenaStatsTest, PrepareForSampling) {
MutexLock l(&info.init_mu); MutexLock l(&info.init_mu);
info.PrepareForSampling(kTestStride); info.PrepareForSampling(kTestStride);
EXPECT_EQ(info.num_allocations.load(), 0); for (const auto& block_stats : info.block_histogram) {
EXPECT_EQ(info.bytes_used.load(), 0); EXPECT_EQ(block_stats.num_allocations.load(std::memory_order_relaxed), 0);
EXPECT_EQ(info.bytes_allocated.load(), 0); EXPECT_EQ(block_stats.bytes_used.load(std::memory_order_relaxed), 0);
EXPECT_EQ(info.bytes_wasted.load(), 0); EXPECT_EQ(block_stats.bytes_allocated.load(std::memory_order_relaxed), 0);
EXPECT_EQ(info.max_block_size.load(), 0); EXPECT_EQ(block_stats.bytes_wasted.load(std::memory_order_relaxed), 0);
}
EXPECT_EQ(info.max_block_size.load(std::memory_order_relaxed), 0);
EXPECT_EQ(info.weight, kTestStride); EXPECT_EQ(info.weight, kTestStride);
info.num_allocations.store(1, std::memory_order_relaxed); for (auto& block_stats : info.block_histogram) {
info.bytes_used.store(1, std::memory_order_relaxed); block_stats.num_allocations.store(1, std::memory_order_relaxed);
info.bytes_allocated.store(1, std::memory_order_relaxed); block_stats.bytes_used.store(1, std::memory_order_relaxed);
info.bytes_wasted.store(1, std::memory_order_relaxed); block_stats.bytes_allocated.store(1, std::memory_order_relaxed);
block_stats.bytes_wasted.store(1, std::memory_order_relaxed);
}
info.max_block_size.store(1, std::memory_order_relaxed); info.max_block_size.store(1, std::memory_order_relaxed);
info.PrepareForSampling(2 * kTestStride); info.PrepareForSampling(2 * kTestStride);
EXPECT_EQ(info.num_allocations.load(), 0); for (auto& block_stats : info.block_histogram) {
EXPECT_EQ(info.bytes_used.load(), 0); EXPECT_EQ(block_stats.num_allocations.load(std::memory_order_relaxed), 0);
EXPECT_EQ(info.bytes_allocated.load(), 0); EXPECT_EQ(block_stats.bytes_used.load(std::memory_order_relaxed), 0);
EXPECT_EQ(info.bytes_wasted.load(), 0); EXPECT_EQ(block_stats.bytes_allocated.load(std::memory_order_relaxed), 0);
EXPECT_EQ(info.max_block_size.load(), 0); EXPECT_EQ(block_stats.bytes_wasted.load(std::memory_order_relaxed), 0);
}
EXPECT_EQ(info.max_block_size.load(std::memory_order_relaxed), 0);
EXPECT_EQ(info.weight, 2 * kTestStride); EXPECT_EQ(info.weight, 2 * kTestStride);
} }
TEST(ThreadSafeArenaStatsTest, FindBin) {
size_t current_bin = 0;
size_t bytes = 1;
while (current_bin < ThreadSafeArenaStats::kBlockHistogramBins - 1) {
size_t next_bin = ThreadSafeArenaStats::FindBin(bytes);
if (next_bin != current_bin) {
// Test the bins increase linearly.
EXPECT_EQ(next_bin, current_bin + 1);
// Test the bins change only at values of the form 2^k + 1.
EXPECT_EQ(absl::popcount(bytes - 1), 1);
current_bin = next_bin;
}
++bytes;
}
}
TEST(ThreadSafeArenaStatsTest, MinMaxBlockSizeForBin) {
std::pair<size_t, size_t> current_limits =
ThreadSafeArenaStats::MinMaxBlockSizeForBin(0);
EXPECT_EQ(current_limits.first, 1);
EXPECT_LT(current_limits.first, current_limits.second);
for (size_t i = 1; i < ThreadSafeArenaStats::kBlockHistogramBins; ++i) {
std::pair<size_t, size_t> next_limits =
ThreadSafeArenaStats::MinMaxBlockSizeForBin(i);
EXPECT_LT(next_limits.first, next_limits.second);
// Test the limits do not have gaps.
EXPECT_EQ(next_limits.first, current_limits.second + 1);
if (i != ThreadSafeArenaStats::kBlockHistogramBins - 1) {
EXPECT_EQ(next_limits.second, 2 * current_limits.second);
}
current_limits = next_limits;
}
// Test the limits cover the entire range possible.
EXPECT_EQ(current_limits.second, std::numeric_limits<size_t>::max());
}
TEST(ThreadSafeArenaStatsTest, RecordAllocateSlow) { TEST(ThreadSafeArenaStatsTest, RecordAllocateSlow) {
ThreadSafeArenaStats info; ThreadSafeArenaStats info;
constexpr int64_t kTestStride = 458; constexpr int64_t kTestStride = 458;
MutexLock l(&info.init_mu); MutexLock l(&info.init_mu);
info.PrepareForSampling(kTestStride); info.PrepareForSampling(kTestStride);
RecordAllocateSlow(&info, /*requested=*/100, /*allocated=*/128, /*wasted=*/0); RecordAllocateSlow(&info, /*requested=*/0, /*allocated=*/128, /*wasted=*/0);
EXPECT_EQ(info.num_allocations.load(), 1); EXPECT_EQ(
EXPECT_EQ(info.bytes_used.load(), 100); info.block_histogram[0].num_allocations.load(std::memory_order_relaxed),
EXPECT_EQ(info.bytes_allocated.load(), 128); 1);
EXPECT_EQ(info.bytes_wasted.load(), 0); EXPECT_EQ(info.block_histogram[0].bytes_used.load(std::memory_order_relaxed),
EXPECT_EQ(info.max_block_size.load(), 128); 0);
EXPECT_EQ(
info.block_histogram[0].bytes_allocated.load(std::memory_order_relaxed),
128);
EXPECT_EQ(
info.block_histogram[0].bytes_wasted.load(std::memory_order_relaxed), 0);
EXPECT_EQ(info.max_block_size.load(std::memory_order_relaxed), 128);
RecordAllocateSlow(&info, /*requested=*/100, /*allocated=*/256, RecordAllocateSlow(&info, /*requested=*/100, /*allocated=*/256,
/*wasted=*/28); /*wasted=*/28);
EXPECT_EQ(info.num_allocations.load(), 2); EXPECT_EQ(info.block_histogram[0].bytes_used.load(std::memory_order_relaxed),
EXPECT_EQ(info.bytes_used.load(), 200); 100);
EXPECT_EQ(info.bytes_allocated.load(), 384); EXPECT_EQ(
EXPECT_EQ(info.bytes_wasted.load(), 28); info.block_histogram[0].bytes_wasted.load(std::memory_order_relaxed), 28);
EXPECT_EQ(info.max_block_size.load(), 256); EXPECT_EQ(
info.block_histogram[1].num_allocations.load(std::memory_order_relaxed),
1);
EXPECT_EQ(
info.block_histogram[1].bytes_allocated.load(std::memory_order_relaxed),
256);
EXPECT_EQ(info.max_block_size.load(std::memory_order_relaxed), 256);
} }
TEST(ThreadSafeArenaStatsTest, RecordAllocateSlowMaxBlockSizeTest) { TEST(ThreadSafeArenaStatsTest, RecordAllocateSlowMaxBlockSizeTest) {
@ -133,13 +196,13 @@ TEST(ThreadSafeArenaStatsTest, RecordAllocateSlowMaxBlockSizeTest) {
MutexLock l(&info.init_mu); MutexLock l(&info.init_mu);
info.PrepareForSampling(kTestStride); info.PrepareForSampling(kTestStride);
RecordAllocateSlow(&info, /*requested=*/100, /*allocated=*/128, /*wasted=*/0); RecordAllocateSlow(&info, /*requested=*/100, /*allocated=*/128, /*wasted=*/0);
EXPECT_EQ(info.max_block_size.load(), 128); EXPECT_EQ(info.max_block_size.load(std::memory_order_relaxed), 128);
RecordAllocateSlow(&info, /*requested=*/100, /*allocated=*/256, RecordAllocateSlow(&info, /*requested=*/100, /*allocated=*/256,
/*wasted=*/28); /*wasted=*/28);
EXPECT_EQ(info.max_block_size.load(), 256); EXPECT_EQ(info.max_block_size.load(std::memory_order_relaxed), 256);
RecordAllocateSlow(&info, /*requested=*/100, /*allocated=*/128, RecordAllocateSlow(&info, /*requested=*/100, /*allocated=*/128,
/*wasted=*/28); /*wasted=*/28);
EXPECT_EQ(info.max_block_size.load(), 256); EXPECT_EQ(info.max_block_size.load(std::memory_order_relaxed), 256);
} }
TEST(ThreadSafeArenazSamplerTest, SamplingCorrectness) { TEST(ThreadSafeArenazSamplerTest, SamplingCorrectness) {
@ -212,12 +275,15 @@ TEST(ThreadSafeArenazSamplerTest, Handle) {
constexpr int64_t kTestStride = 17; constexpr int64_t kTestStride = 17;
ThreadSafeArenaStatsHandle h(sampler.Register(kTestStride)); ThreadSafeArenaStatsHandle h(sampler.Register(kTestStride));
auto* info = ThreadSafeArenaStatsHandlePeer::GetInfo(&h); auto* info = ThreadSafeArenaStatsHandlePeer::GetInfo(&h);
info->bytes_allocated.store(0x12345678, std::memory_order_relaxed); info->block_histogram[0].bytes_allocated.store(0x12345678,
std::memory_order_relaxed);
bool found = false; bool found = false;
sampler.Iterate([&](const ThreadSafeArenaStats& h) { sampler.Iterate([&](const ThreadSafeArenaStats& h) {
if (&h == info) { if (&h == info) {
EXPECT_EQ(h.bytes_allocated.load(), 0x12345678); EXPECT_EQ(
h.block_histogram[0].bytes_allocated.load(std::memory_order_relaxed),
0x12345678);
EXPECT_EQ(h.weight, kTestStride); EXPECT_EQ(h.weight, kTestStride);
found = true; found = true;
} }
@ -230,7 +296,8 @@ TEST(ThreadSafeArenazSamplerTest, Handle) {
if (&h == info) { if (&h == info) {
// this will only happen if some other thread has resurrected the info // this will only happen if some other thread has resurrected the info
// the old handle was using. // the old handle was using.
if (h.bytes_allocated.load() == 0x12345678) { if (h.block_histogram[0].bytes_allocated.load(
std::memory_order_relaxed) == 0x12345678) {
found = true; found = true;
} }
} }
@ -246,7 +313,7 @@ TEST(ThreadSafeArenazSamplerTest, Registration) {
auto* info2 = Register(&sampler, 2, kTestStride); auto* info2 = Register(&sampler, 2, kTestStride);
EXPECT_THAT(GetBytesAllocated(&sampler), UnorderedElementsAre(1, 2)); EXPECT_THAT(GetBytesAllocated(&sampler), UnorderedElementsAre(1, 2));
info1->bytes_allocated.store(3); info1->block_histogram[0].bytes_allocated.store(3, std::memory_order_relaxed);
EXPECT_THAT(GetBytesAllocated(&sampler), UnorderedElementsAre(3, 2)); EXPECT_THAT(GetBytesAllocated(&sampler), UnorderedElementsAre(3, 2));
sampler.Unregister(info1); sampler.Unregister(info1);
@ -258,18 +325,18 @@ TEST(ThreadSafeArenazSamplerTest, Unregistration) {
std::vector<ThreadSafeArenaStats*> infos; std::vector<ThreadSafeArenaStats*> infos;
constexpr int64_t kTestStride = 200; constexpr int64_t kTestStride = 200;
for (size_t i = 0; i < 3; ++i) { for (size_t i = 0; i < 3; ++i) {
infos.push_back(Register(&sampler, i, kTestStride)); infos.push_back(Register(&sampler, i + 1, kTestStride));
} }
EXPECT_THAT(GetBytesAllocated(&sampler), UnorderedElementsAre(0, 1, 2)); EXPECT_THAT(GetBytesAllocated(&sampler), UnorderedElementsAre(1, 2, 3));
sampler.Unregister(infos[1]); sampler.Unregister(infos[1]);
EXPECT_THAT(GetBytesAllocated(&sampler), UnorderedElementsAre(0, 2)); EXPECT_THAT(GetBytesAllocated(&sampler), UnorderedElementsAre(1, 3));
infos.push_back(Register(&sampler, 3, kTestStride)); infos.push_back(Register(&sampler, 3, kTestStride));
infos.push_back(Register(&sampler, 4, kTestStride)); infos.push_back(Register(&sampler, 4, kTestStride));
EXPECT_THAT(GetBytesAllocated(&sampler), UnorderedElementsAre(0, 2, 3, 4)); EXPECT_THAT(GetBytesAllocated(&sampler), UnorderedElementsAre(1, 3, 3, 4));
sampler.Unregister(infos[3]); sampler.Unregister(infos[3]);
EXPECT_THAT(GetBytesAllocated(&sampler), UnorderedElementsAre(0, 2, 4)); EXPECT_THAT(GetBytesAllocated(&sampler), UnorderedElementsAre(1, 3, 4));
sampler.Unregister(infos[0]); sampler.Unregister(infos[0]);
sampler.Unregister(infos[2]); sampler.Unregister(infos[2]);

@ -2124,7 +2124,8 @@ bool CommandLineInterface::EnforceProto3OptionalSupport(
<< codegen_name << codegen_name
<< " hasn't been updated to support optional fields in " << " hasn't been updated to support optional fields in "
"proto3. Please ask the owner of this code generator to " "proto3. Please ask the owner of this code generator to "
"support proto3 optional."; "support proto3 optional."
<< std::endl;
return false; return false;
} }
} }

@ -85,7 +85,7 @@ struct Options {
bool profile_driven_inline_string = true; bool profile_driven_inline_string = true;
bool message_owned_arena_trial = false; bool message_owned_arena_trial = false;
bool force_split = false; bool force_split = false;
bool profile_driven_split = false; bool profile_driven_split = true;
#ifdef PROTOBUF_STABLE_EXPERIMENTS #ifdef PROTOBUF_STABLE_EXPERIMENTS
bool force_eagerly_verified_lazy = true; bool force_eagerly_verified_lazy = true;
bool force_inline_string = true; bool force_inline_string = true;

@ -708,6 +708,9 @@ void FileGenerator::GenerateKotlinSiblings(
"// source: $filename$\n" "// source: $filename$\n"
"\n", "\n",
"filename", descriptor->file()->name()); "filename", descriptor->file()->name());
printer.Print(
"// Generated files should ignore deprecation warnings\n"
"@file:Suppress(\"DEPRECATION\")");
if (!java_package_.empty()) { if (!java_package_.empty()) {
printer.Print( printer.Print(
"package $package$;\n" "package $package$;\n"

@ -217,15 +217,12 @@ bool Generator::Generate(const FileDescriptor* file,
GeneratorContext* context, std::string* error) const { GeneratorContext* context, std::string* error) const {
// ----------------------------------------------------------------- // -----------------------------------------------------------------
// parse generator options // parse generator options
bool cpp_generated_lib_linked = false;
std::vector<std::pair<std::string, std::string> > options; std::vector<std::pair<std::string, std::string> > options;
ParseGeneratorParameter(parameter, &options); ParseGeneratorParameter(parameter, &options);
for (int i = 0; i < options.size(); i++) { for (int i = 0; i < options.size(); i++) {
if (options[i].first == "cpp_generated_lib_linked") { if (options[i].first == "pyi_out") {
cpp_generated_lib_linked = true;
} else if (options[i].first == "pyi_out") {
python::PyiGenerator pyi_generator; python::PyiGenerator pyi_generator;
if (!pyi_generator.Generate(file, "", context, error)) { if (!pyi_generator.Generate(file, "", context, error)) {
return false; return false;
@ -247,10 +244,6 @@ bool Generator::Generate(const FileDescriptor* file,
file_ = file; file_ = file;
std::string filename = GetFileName(file, ".py"); std::string filename = GetFileName(file, ".py");
pure_python_workable_ = !cpp_generated_lib_linked;
if (HasPrefixString(file->name(), "google/protobuf/")) {
pure_python_workable_ = true;
}
FileDescriptorProto fdp; FileDescriptorProto fdp;
file_->CopyTo(&fdp); file_->CopyTo(&fdp);
@ -263,11 +256,8 @@ bool Generator::Generate(const FileDescriptor* file,
printer_ = &printer; printer_ = &printer;
PrintTopBoilerplate(printer_, file_, GeneratingDescriptorProto()); PrintTopBoilerplate(printer_, file_, GeneratingDescriptorProto());
if (pure_python_workable_) {
PrintImports(); PrintImports();
}
PrintFileDescriptor(); PrintFileDescriptor();
if (pure_python_workable_) {
if (GeneratingDescriptorProto()) { if (GeneratingDescriptorProto()) {
printer_->Print("if _descriptor._USE_C_DESCRIPTORS == False:\n"); printer_->Print("if _descriptor._USE_C_DESCRIPTORS == False:\n");
printer_->Indent(); printer_->Indent();
@ -286,13 +276,11 @@ bool Generator::Generate(const FileDescriptor* file,
if (GeneratingDescriptorProto()) { if (GeneratingDescriptorProto()) {
printer_->Outdent(); printer_->Outdent();
} }
}
std::string module_name = ModuleName(file->name()); std::string module_name = ModuleName(file->name());
printer_->Print( printer_->Print(
"_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '$module_name$', " "_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '$module_name$', "
"globals())\n", "globals())\n",
"module_name", module_name); "module_name", module_name);
if (pure_python_workable_) {
printer.Print("if _descriptor._USE_C_DESCRIPTORS == False:\n"); printer.Print("if _descriptor._USE_C_DESCRIPTORS == False:\n");
printer_->Indent(); printer_->Indent();
@ -311,7 +299,6 @@ bool Generator::Generate(const FileDescriptor* file,
SetSerializedPbInterval(); SetSerializedPbInterval();
printer_->Outdent(); printer_->Outdent();
}
if (HasGenericServices(file)) { if (HasGenericServices(file)) {
printer_->Print( printer_->Print(
"_builder.BuildServices(DESCRIPTOR, '$module_name$', globals())\n", "_builder.BuildServices(DESCRIPTOR, '$module_name$', globals())\n",
@ -389,7 +376,6 @@ void Generator::PrintFileDescriptor() const {
" create_key=_descriptor._internal_create_key,\n"; " create_key=_descriptor._internal_create_key,\n";
printer_->Print(m, file_descriptor_template); printer_->Print(m, file_descriptor_template);
printer_->Indent(); printer_->Indent();
if (pure_python_workable_) {
printer_->Print("serialized_pb=b'$value$'\n", "value", printer_->Print("serialized_pb=b'$value$'\n", "value",
strings::CHexEscape(file_descriptor_serialized_)); strings::CHexEscape(file_descriptor_serialized_));
if (file_->dependency_count() != 0) { if (file_->dependency_count() != 0) {
@ -411,9 +397,6 @@ void Generator::PrintFileDescriptor() const {
} }
printer_->Print("]"); printer_->Print("]");
} }
} else {
printer_->Print("serialized_pb=''\n");
}
// TODO(falk): Also print options and fix the message_type, enum_type, // TODO(falk): Also print options and fix the message_type, enum_type,
// service and extension later in the generation. // service and extension later in the generation.
@ -467,12 +450,10 @@ void Generator::PrintEnum(const EnumDescriptor& enum_descriptor) const {
printer_->Indent(); printer_->Indent();
printer_->Indent(); printer_->Indent();
if (pure_python_workable_) {
for (int i = 0; i < enum_descriptor.value_count(); ++i) { for (int i = 0; i < enum_descriptor.value_count(); ++i) {
PrintEnumValueDescriptor(*enum_descriptor.value(i)); PrintEnumValueDescriptor(*enum_descriptor.value(i));
printer_->Print(",\n"); printer_->Print(",\n");
} }
}
printer_->Outdent(); printer_->Outdent();
printer_->Print("],\n"); printer_->Print("],\n");
@ -482,10 +463,8 @@ void Generator::PrintEnum(const EnumDescriptor& enum_descriptor) const {
EnumDescriptorProto edp; EnumDescriptorProto edp;
printer_->Outdent(); printer_->Outdent();
printer_->Print(")\n"); printer_->Print(")\n");
if (pure_python_workable_) {
printer_->Print("_sym_db.RegisterEnumDescriptor($name$)\n", "name", printer_->Print("_sym_db.RegisterEnumDescriptor($name$)\n", "name",
module_level_descriptor_name); module_level_descriptor_name);
}
printer_->Print("\n"); printer_->Print("\n");
} }
@ -535,10 +514,6 @@ void Generator::PrintServiceDescriptor(
void Generator::PrintDescriptorKeyAndModuleName( void Generator::PrintDescriptorKeyAndModuleName(
const ServiceDescriptor& descriptor) const { const ServiceDescriptor& descriptor) const {
std::string name = ModuleLevelServiceDescriptorName(descriptor); std::string name = ModuleLevelServiceDescriptorName(descriptor);
if (!pure_python_workable_) {
name = "_descriptor.ServiceDescriptor(full_name='" +
descriptor.full_name() + "')";
}
printer_->Print("$descriptor_key$ = $descriptor_name$,\n", "descriptor_key", printer_->Print("$descriptor_key$ = $descriptor_name$,\n", "descriptor_key",
kDescriptorKey, "descriptor_name", name); kDescriptorKey, "descriptor_name", name);
std::string module_name = ModuleName(file_->name()); std::string module_name = ModuleName(file_->name());
@ -728,12 +703,7 @@ void Generator::PrintMessage(const Descriptor& message_descriptor,
PrintNestedMessages(message_descriptor, qualified_name, to_register); PrintNestedMessages(message_descriptor, qualified_name, to_register);
std::map<std::string, std::string> m; std::map<std::string, std::string> m;
m["descriptor_key"] = kDescriptorKey; m["descriptor_key"] = kDescriptorKey;
if (pure_python_workable_) {
m["descriptor_name"] = ModuleLevelDescriptorName(message_descriptor); m["descriptor_name"] = ModuleLevelDescriptorName(message_descriptor);
} else {
m["descriptor_name"] = "_descriptor.Descriptor(full_name='" +
message_descriptor.full_name() + "')";
}
printer_->Print(m, "'$descriptor_key$' : $descriptor_name$,\n"); printer_->Print(m, "'$descriptor_key$' : $descriptor_name$,\n");
std::string module_name = ModuleName(file_->name()); std::string module_name = ModuleName(file_->name());
printer_->Print("'__module__' : '$module_name$'\n", "module_name", printer_->Print("'__module__' : '$module_name$'\n", "module_name",

@ -174,7 +174,6 @@ class PROTOC_EXPORT Generator : public CodeGenerator {
mutable const FileDescriptor* file_; // Set in Generate(). Under mutex_. mutable const FileDescriptor* file_; // Set in Generate(). Under mutex_.
mutable std::string file_descriptor_serialized_; mutable std::string file_descriptor_serialized_;
mutable io::Printer* printer_; // Set in Generate(). Under mutex_. mutable io::Printer* printer_; // Set in Generate(). Under mutex_.
mutable bool pure_python_workable_;
bool opensource_runtime_ = true; bool opensource_runtime_ = true;

@ -1577,7 +1577,11 @@ class PROTOBUF_EXPORT FileDescriptor : private internal::SymbolBase {
const FileOptions& options() const; const FileOptions& options() const;
// Syntax of this file. // Syntax of this file.
enum Syntax { enum Syntax
#ifndef SWIG
: int
#endif // !SWIG
{
SYNTAX_UNKNOWN = 0, SYNTAX_UNKNOWN = 0,
SYNTAX_PROTO2 = 2, SYNTAX_PROTO2 = 2,
SYNTAX_PROTO3 = 3, SYNTAX_PROTO3 = 3,

@ -485,6 +485,11 @@ class PROTOBUF_EXPORT Reflection final {
return internal::ToIntSize(SpaceUsedLong(message)); return internal::ToIntSize(SpaceUsedLong(message));
} }
// Returns true if the given message is a default message instance.
bool IsDefaultInstance(const Message& message) const {
return schema_.IsDefaultInstance(message);
}
// Check if the given non-repeated field is set. // Check if the given non-repeated field is set.
bool HasField(const Message& message, const FieldDescriptor* field) const; bool HasField(const Message& message, const FieldDescriptor* field) const;

@ -1343,5 +1343,13 @@ TEST(MESSAGE_TEST_NAME, TestBoolParsers) {
} }
} }
TEST(MESSAGE_TEST_NAME, IsDefaultInstance) {
UNITTEST::TestAllTypes msg;
const auto& default_msg = UNITTEST::TestAllTypes::default_instance();
const auto* r = msg.GetReflection();
EXPECT_TRUE(r->IsDefaultInstance(default_msg));
EXPECT_FALSE(r->IsDefaultInstance(msg));
}
} // namespace protobuf } // namespace protobuf
} // namespace google } // namespace google

@ -94,43 +94,30 @@ constexpr int RepeatedFieldLowerClampLimit() {
constexpr int kRepeatedFieldUpperClampLimit = constexpr int kRepeatedFieldUpperClampLimit =
(std::numeric_limits<int>::max() / 2) + 1; (std::numeric_limits<int>::max() / 2) + 1;
// Swaps two blocks of memory of size sizeof(T).
template <typename T>
inline void SwapBlock(char* p, char* q) {
T tmp;
memcpy(&tmp, p, sizeof(T));
memcpy(p, q, sizeof(T));
memcpy(q, &tmp, sizeof(T));
}
// Swaps two blocks of memory of size kSize: // Swaps two blocks of memory of size kSize:
// template <int kSize> void memswap(char* p, char* q); template <size_t kSize>
template <int kSize> void memswap(char* a, char* b) {
inline typename std::enable_if<(kSize == 0), void>::type memswap(char*, char*) { #if __SIZEOF_INT128__
} using Buffer = __uint128_t;
#define PROTO_MEMSWAP_DEF_SIZE(reg_type, max_size) \
template <int kSize> \
typename std::enable_if<(kSize >= sizeof(reg_type) && kSize < (max_size)), \
void>::type \
memswap(char* p, char* q) { \
SwapBlock<reg_type>(p, q); \
memswap<kSize - sizeof(reg_type)>(p + sizeof(reg_type), \
q + sizeof(reg_type)); \
}
PROTO_MEMSWAP_DEF_SIZE(uint8_t, 2)
PROTO_MEMSWAP_DEF_SIZE(uint16_t, 4)
PROTO_MEMSWAP_DEF_SIZE(uint32_t, 8)
#ifdef __SIZEOF_INT128__
PROTO_MEMSWAP_DEF_SIZE(uint64_t, 16)
PROTO_MEMSWAP_DEF_SIZE(__uint128_t, (1u << 31))
#else #else
PROTO_MEMSWAP_DEF_SIZE(uint64_t, (1u << 31)) using Buffer = uint64_t;
#endif #endif
#undef PROTO_MEMSWAP_DEF_SIZE constexpr size_t kBlockSize = sizeof(Buffer);
Buffer buf;
for (size_t i = 0; i < kSize / kBlockSize; ++i) {
memcpy(&buf, a, kBlockSize);
memcpy(a, b, kBlockSize);
memcpy(b, &buf, kBlockSize);
a += kBlockSize;
b += kBlockSize;
}
// Swap the leftover bytes, could be zero.
memcpy(&buf, a, kSize % kBlockSize);
memcpy(a, b, kSize % kBlockSize);
memcpy(b, &buf, kSize % kBlockSize);
}
template <typename Element> template <typename Element>
class RepeatedIterator; class RepeatedIterator;

@ -305,6 +305,60 @@ TEST(RepeatedField, SwapLargeLarge) {
} }
} }
template <int kSize>
void TestMemswap() {
SCOPED_TRACE(kSize);
const auto a_char = [](int i) -> char { return (i % ('z' - 'a')) + 'a'; };
const auto b_char = [](int i) -> char { return (i % ('Z' - 'A')) + 'A'; };
std::string a, b;
for (int i = 0; i < kSize; ++i) {
a += a_char(i);
b += b_char(i);
}
// We will not swap these.
a += "+";
b += "-";
std::string expected_a = b, expected_b = a;
expected_a.back() = '+';
expected_b.back() = '-';
internal::memswap<kSize>(&a[0], &b[0]);
// ODR use the functions in a way that forces the linker to keep them. That
// way we can see their generated code.
volatile auto odr_use_for_asm_dump = &internal::memswap<kSize>;
(void)odr_use_for_asm_dump;
EXPECT_EQ(expected_a, a);
EXPECT_EQ(expected_b, b);
}
TEST(Memswap, VerifyWithSmallAndLargeSizes) {
// Arbitrary sizes
TestMemswap<0>();
TestMemswap<1>();
TestMemswap<10>();
TestMemswap<100>();
TestMemswap<1000>();
TestMemswap<10000>();
TestMemswap<100000>();
TestMemswap<1000000>();
// Pointer aligned sizes
TestMemswap<sizeof(void*) * 1>();
TestMemswap<sizeof(void*) * 7>();
TestMemswap<sizeof(void*) * 17>();
TestMemswap<sizeof(void*) * 27>();
// Test also just the block size and no leftover.
TestMemswap<64 * 1>();
TestMemswap<64 * 2>();
TestMemswap<64 * 3>();
TestMemswap<64 * 4>();
}
// Determines how much space was reserved by the given field by adding elements // Determines how much space was reserved by the given field by adding elements
// to it until it re-allocates its space. // to it until it re-allocates its space.
static int ReservedSpace(RepeatedField<int>* field) { static int ReservedSpace(RepeatedField<int>* field) {

Loading…
Cancel
Save