Merge pull request #8076 from haberman/sync-stage

Integrate from Piper for C++, Java, and Python
pull/7180/head^2
Joshua Haberman 4 years ago committed by GitHub
commit bd9a7104e1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 26
      CHANGES.txt
  2. 12
      conformance/conformance_cpp.cc
  3. 42
      java/core/src/main/java/com/google/protobuf/FieldSet.java
  4. 25
      java/core/src/main/java/com/google/protobuf/TextFormat.java
  5. 2
      java/core/src/main/java/com/google/protobuf/UnsafeUtil.java
  6. 24
      java/core/src/test/java/com/google/protobuf/MapForProto2LiteTest.java
  7. 86
      java/core/src/test/java/com/google/protobuf/MapForProto2Test.java
  8. 24
      java/core/src/test/java/com/google/protobuf/MapLiteTest.java
  9. 24
      java/core/src/test/java/com/google/protobuf/MapTest.java
  10. 4
      java/core/src/test/java/com/google/protobuf/TestUtil.java
  11. 4
      java/core/src/test/java/com/google/protobuf/TestUtilLite.java
  12. 29
      java/util/src/main/java/com/google/protobuf/util/Durations.java
  13. 28
      java/util/src/main/java/com/google/protobuf/util/Timestamps.java
  14. 3
      python/google/protobuf/internal/containers.py
  15. 65
      python/google/protobuf/internal/reflection_test.py
  16. 251
      python/google/protobuf/pyext/repeated_composite_container.cc
  17. 413
      python/google/protobuf/pyext/repeated_scalar_container.cc
  18. 10
      src/google/protobuf/any.cc
  19. 17
      src/google/protobuf/any.h
  20. 4
      src/google/protobuf/any.pb.cc
  21. 16
      src/google/protobuf/any.pb.h
  22. 4
      src/google/protobuf/any_lite.cc
  23. 11
      src/google/protobuf/any_test.cc
  24. 3
      src/google/protobuf/any_test.proto
  25. 4
      src/google/protobuf/api.pb.cc
  26. 593
      src/google/protobuf/arena.cc
  27. 216
      src/google/protobuf/arena.h
  28. 358
      src/google/protobuf/arena_impl.h
  29. 10
      src/google/protobuf/arena_unittest.cc
  30. 2
      src/google/protobuf/arenastring.cc
  31. 2
      src/google/protobuf/arenastring_unittest.cc
  32. 237
      src/google/protobuf/compiler/command_line_interface.cc
  33. 4
      src/google/protobuf/compiler/cpp/cpp_extension.cc
  34. 11
      src/google/protobuf/compiler/cpp/cpp_file.cc
  35. 8
      src/google/protobuf/compiler/cpp/cpp_helpers.cc
  36. 4
      src/google/protobuf/compiler/cpp/cpp_helpers.h
  37. 53
      src/google/protobuf/compiler/cpp/cpp_message.cc
  38. 12
      src/google/protobuf/compiler/java/java_helpers.cc
  39. 1
      src/google/protobuf/compiler/java/java_helpers.h
  40. 1
      src/google/protobuf/compiler/java/java_name_resolver.cc
  41. 6
      src/google/protobuf/compiler/js/js_generator.cc
  42. 4
      src/google/protobuf/compiler/plugin.pb.cc
  43. 18
      src/google/protobuf/descriptor.cc
  44. 3
      src/google/protobuf/descriptor.h
  45. 4
      src/google/protobuf/descriptor.pb.cc
  46. 8
      src/google/protobuf/descriptor_database.cc
  47. 17
      src/google/protobuf/descriptor_unittest.cc
  48. 4
      src/google/protobuf/duration.pb.cc
  49. 2
      src/google/protobuf/dynamic_message.cc
  50. 4
      src/google/protobuf/empty.pb.cc
  51. 3
      src/google/protobuf/extension_set.cc
  52. 4
      src/google/protobuf/extension_set.h
  53. 2
      src/google/protobuf/extension_set_heavy.cc
  54. 2
      src/google/protobuf/extension_set_unittest.cc
  55. 4
      src/google/protobuf/field_mask.pb.cc
  56. 2
      src/google/protobuf/generated_enum_util.cc
  57. 43
      src/google/protobuf/generated_message_reflection.cc
  58. 11
      src/google/protobuf/generated_message_reflection.h
  59. 2
      src/google/protobuf/generated_message_table_driven_lite.h
  60. 18
      src/google/protobuf/generated_message_util.cc
  61. 2
      src/google/protobuf/implicit_weak_message.cc
  62. 8
      src/google/protobuf/io/coded_stream.cc
  63. 2
      src/google/protobuf/io/coded_stream_unittest.cc
  64. 2
      src/google/protobuf/io/gzip_stream.cc
  65. 8
      src/google/protobuf/io/printer.cc
  66. 10
      src/google/protobuf/io/tokenizer.cc
  67. 2
      src/google/protobuf/io/zero_copy_stream_impl_lite.cc
  68. 2
      src/google/protobuf/map.h
  69. 2
      src/google/protobuf/map_field.cc
  70. 22
      src/google/protobuf/map_field.h
  71. 2
      src/google/protobuf/map_field_test.cc
  72. 2
      src/google/protobuf/map_test.cc
  73. 2
      src/google/protobuf/message.cc
  74. 3
      src/google/protobuf/message.h
  75. 6
      src/google/protobuf/message_lite.cc
  76. 2
      src/google/protobuf/message_lite.h
  77. 4
      src/google/protobuf/message_unittest.inc
  78. 2
      src/google/protobuf/parse_context.cc
  79. 4
      src/google/protobuf/port.h
  80. 69
      src/google/protobuf/port_def.inc
  81. 10
      src/google/protobuf/port_undef.inc
  82. 2
      src/google/protobuf/reflection_ops.cc
  83. 8
      src/google/protobuf/repeated_field.cc
  84. 6
      src/google/protobuf/repeated_field.h
  85. 2
      src/google/protobuf/repeated_field_unittest.cc
  86. 4
      src/google/protobuf/source_context.pb.cc
  87. 4
      src/google/protobuf/struct.pb.cc
  88. 2
      src/google/protobuf/stubs/common.cc
  89. 2
      src/google/protobuf/stubs/int128.cc
  90. 2
      src/google/protobuf/stubs/int128_unittest.cc
  91. 33
      src/google/protobuf/stubs/mutex.h
  92. 10
      src/google/protobuf/stubs/port.h
  93. 15
      src/google/protobuf/text_format.cc
  94. 2
      src/google/protobuf/text_format_unittest.cc
  95. 4
      src/google/protobuf/timestamp.pb.cc
  96. 4
      src/google/protobuf/type.pb.cc
  97. 8
      src/google/protobuf/unknown_field_set.cc
  98. 6
      src/google/protobuf/util/delimited_message_util.cc
  99. 29
      src/google/protobuf/util/delimited_message_util_test.cc
  100. 22
      src/google/protobuf/util/message_differencer.cc
  101. Some files were not shown because too many files have changed in this diff Show More

@ -1,3 +1,29 @@
Unreleased Changes (C++/Java/Python/PHP/Objective-C/C#/Ruby/JavaScript)
C++
* MessageDifferencer: fixed bug when using custom ignore with multiple
unknown fields
* Use init_seg in MSVC to push initialization to an earlier phase.
* Runtime no longer triggers -Wsign-compare warnings.
* Fixed -Wtautological-constant-out-of-range-compare warning.
* DynamicCastToGenerated works for nullptr input for even if RTTI is disabled
* Arena is refactored and optimized.
* Clarified/specified that the exact value of Arena::SpaceAllocated() is an
implementation detail users must not rely on. It should not be used in
unit tests.
* Change the signature of Any::PackFrom() to return false on error.
Java
* Avoid possible UnsupportedOperationException when using CodedInputSteam
with a direct ByteBuffer.
* Make Durations.comparator() and Timestamps.comparator() Serializable.
* Add more detailed error information for dynamic message field type
validation failure
Python
* Provided an override for the reverse() method that will reverse the internal
collection directly instead of using the other methods of the BaseContainer.
2020-11-11 version 3.14.0 (C++/Java/Python/PHP/Objective-C/C#/Ruby/JavaScript) 2020-11-11 version 3.14.0 (C++/Java/Python/PHP/Objective-C/C#/Ruby/JavaScript)
Protocol Compiler Protocol Compiler

@ -36,6 +36,7 @@
#include <google/protobuf/text_format.h> #include <google/protobuf/text_format.h>
#include <google/protobuf/util/json_util.h> #include <google/protobuf/util/json_util.h>
#include <google/protobuf/util/type_resolver_util.h> #include <google/protobuf/util/type_resolver_util.h>
#include <google/protobuf/stubs/status.h>
#include "conformance.pb.h" #include "conformance.pb.h"
#include <google/protobuf/test_messages_proto2.pb.h> #include <google/protobuf/test_messages_proto2.pb.h>
#include <google/protobuf/test_messages_proto3.pb.h> #include <google/protobuf/test_messages_proto3.pb.h>
@ -125,9 +126,9 @@ void DoTest(const ConformanceRequest& request, ConformanceResponse* response) {
options.ignore_unknown_fields = options.ignore_unknown_fields =
(request.test_category() == (request.test_category() ==
conformance::JSON_IGNORE_UNKNOWN_PARSING_TEST); conformance::JSON_IGNORE_UNKNOWN_PARSING_TEST);
Status status = JsonToBinaryString(type_resolver, *type_url, util::Status status =
request.json_payload(), &proto_binary, JsonToBinaryString(type_resolver, *type_url, request.json_payload(),
options); &proto_binary, options);
if (!status.ok()) { if (!status.ok()) {
response->set_parse_error(string("Parse error: ") + response->set_parse_error(string("Parse error: ") +
std::string(status.error_message())); std::string(status.error_message()));
@ -179,8 +180,9 @@ void DoTest(const ConformanceRequest& request, ConformanceResponse* response) {
case conformance::JSON: { case conformance::JSON: {
string proto_binary; string proto_binary;
GOOGLE_CHECK(test_message->SerializeToString(&proto_binary)); GOOGLE_CHECK(test_message->SerializeToString(&proto_binary));
Status status = BinaryToJsonString(type_resolver, *type_url, proto_binary, util::Status status =
response->mutable_json_payload()); BinaryToJsonString(type_resolver, *type_url, proto_binary,
response->mutable_json_payload());
if (!status.ok()) { if (!status.ok()) {
response->set_serialize_error( response->set_serialize_error(
string("Failed to serialize JSON output: ") + string("Failed to serialize JSON output: ") +

@ -119,7 +119,6 @@ final class FieldSet<T extends FieldSet.FieldDescriptorLite<T>> {
} }
/** Make this FieldSet immutable from this point forward. */ /** Make this FieldSet immutable from this point forward. */
@SuppressWarnings("unchecked")
public void makeImmutable() { public void makeImmutable() {
if (isImmutable) { if (isImmutable) {
return; return;
@ -286,11 +285,11 @@ final class FieldSet<T extends FieldSet.FieldDescriptorLite<T>> {
final List newList = new ArrayList(); final List newList = new ArrayList();
newList.addAll((List) value); newList.addAll((List) value);
for (final Object element : newList) { for (final Object element : newList) {
verifyType(descriptor.getLiteType(), element); verifyType(descriptor, element);
} }
value = newList; value = newList;
} else { } else {
verifyType(descriptor.getLiteType(), value); verifyType(descriptor, value);
} }
if (value instanceof LazyField) { if (value instanceof LazyField) {
@ -354,7 +353,7 @@ final class FieldSet<T extends FieldSet.FieldDescriptorLite<T>> {
throw new IndexOutOfBoundsException(); throw new IndexOutOfBoundsException();
} }
verifyType(descriptor.getLiteType(), value); verifyType(descriptor, value);
((List<Object>) list).set(index, value); ((List<Object>) list).set(index, value);
} }
@ -369,7 +368,7 @@ final class FieldSet<T extends FieldSet.FieldDescriptorLite<T>> {
"addRepeatedField() can only be called on repeated fields."); "addRepeatedField() can only be called on repeated fields.");
} }
verifyType(descriptor.getLiteType(), value); verifyType(descriptor, value);
final Object existingValue = getField(descriptor); final Object existingValue = getField(descriptor);
List<Object> list; List<Object> list;
@ -390,8 +389,8 @@ final class FieldSet<T extends FieldSet.FieldDescriptorLite<T>> {
* *
* @throws IllegalArgumentException The value is not of the right type. * @throws IllegalArgumentException The value is not of the right type.
*/ */
private void verifyType(final WireFormat.FieldType type, final Object value) { private void verifyType(final T descriptor, final Object value) {
if (!isValidType(type, value)) { if (!isValidType(descriptor.getLiteType(), value)) {
// TODO(kenton): When chaining calls to setField(), it can be hard to // TODO(kenton): When chaining calls to setField(), it can be hard to
// tell from the stack trace which exact call failed, since the whole // tell from the stack trace which exact call failed, since the whole
// chain is considered one line of code. It would be nice to print // chain is considered one line of code. It would be nice to print
@ -400,10 +399,16 @@ final class FieldSet<T extends FieldSet.FieldDescriptorLite<T>> {
// isn't a big deal, though, since it would only really apply when using // isn't a big deal, though, since it would only really apply when using
// reflection and generally people don't chain reflection setters. // reflection and generally people don't chain reflection setters.
throw new IllegalArgumentException( throw new IllegalArgumentException(
"Wrong object type used with protocol message reflection."); String.format(
"Wrong object type used with protocol message reflection.\n"
+ "Field number: %d, field java type: %s, value type: %s\n",
descriptor.getNumber(),
descriptor.getLiteType().getJavaType(),
value.getClass().getName()));
} }
} }
private static boolean isValidType(final WireFormat.FieldType type, final Object value) { private static boolean isValidType(final WireFormat.FieldType type, final Object value) {
checkNotNull(value); checkNotNull(value);
switch (type.getJavaType()) { switch (type.getJavaType()) {
@ -1081,12 +1086,12 @@ final class FieldSet<T extends FieldSet.FieldDescriptorLite<T>> {
final List newList = new ArrayList(); final List newList = new ArrayList();
newList.addAll((List) value); newList.addAll((List) value);
for (final Object element : newList) { for (final Object element : newList) {
verifyType(descriptor.getLiteType(), element); verifyType(descriptor, element);
hasNestedBuilders = hasNestedBuilders || element instanceof MessageLite.Builder; hasNestedBuilders = hasNestedBuilders || element instanceof MessageLite.Builder;
} }
value = newList; value = newList;
} else { } else {
verifyType(descriptor.getLiteType(), value); verifyType(descriptor, value);
} }
if (value instanceof LazyField) { if (value instanceof LazyField) {
@ -1172,7 +1177,7 @@ final class FieldSet<T extends FieldSet.FieldDescriptorLite<T>> {
throw new IndexOutOfBoundsException(); throw new IndexOutOfBoundsException();
} }
verifyType(descriptor.getLiteType(), value); verifyType(descriptor, value);
((List<Object>) list).set(index, value); ((List<Object>) list).set(index, value);
} }
@ -1190,7 +1195,7 @@ final class FieldSet<T extends FieldSet.FieldDescriptorLite<T>> {
hasNestedBuilders = hasNestedBuilders || value instanceof MessageLite.Builder; hasNestedBuilders = hasNestedBuilders || value instanceof MessageLite.Builder;
verifyType(descriptor.getLiteType(), value); verifyType(descriptor, value);
final Object existingValue = getField(descriptor); final Object existingValue = getField(descriptor);
List<Object> list; List<Object> list;
@ -1211,15 +1216,20 @@ final class FieldSet<T extends FieldSet.FieldDescriptorLite<T>> {
* *
* @throws IllegalArgumentException The value is not of the right type. * @throws IllegalArgumentException The value is not of the right type.
*/ */
private static void verifyType(final WireFormat.FieldType type, final Object value) { private void verifyType(final T descriptor, final Object value) {
if (!FieldSet.isValidType(type, value)) { if (!FieldSet.isValidType(descriptor.getLiteType(), value)) {
// Builder can accept Message.Builder values even though FieldSet will reject. // Builder can accept Message.Builder values even though FieldSet will reject.
if (type.getJavaType() == WireFormat.JavaType.MESSAGE if (descriptor.getLiteType().getJavaType() == WireFormat.JavaType.MESSAGE
&& value instanceof MessageLite.Builder) { && value instanceof MessageLite.Builder) {
return; return;
} }
throw new IllegalArgumentException( throw new IllegalArgumentException(
"Wrong object type used with protocol message reflection."); String.format(
"Wrong object type used with protocol message reflection.\n"
+ "Field number: %d, field java type: %s, value type: %s\n",
descriptor.getNumber(),
descriptor.getLiteType().getJavaType(),
value.getClass().getName()));
} }
} }

@ -61,6 +61,7 @@ public final class TextFormat {
private static final Logger logger = Logger.getLogger(TextFormat.class.getName()); private static final Logger logger = Logger.getLogger(TextFormat.class.getName());
/** /**
* Outputs a textual representation of the Protocol Message supplied into the parameter output. * Outputs a textual representation of the Protocol Message supplied into the parameter output.
* (This representation is the new version of the classic "ProtocolPrinter" output from the * (This representation is the new version of the classic "ProtocolPrinter" output from the
@ -727,9 +728,9 @@ public final class TextFormat {
// Groups must be serialized with their original capitalization. // Groups must be serialized with their original capitalization.
generator.print(field.getMessageType().getName()); generator.print(field.getMessageType().getName());
} else { } else {
generator.print(field.getName()); generator.print(field.getName());
}
} }
}
if (field.getJavaType() == FieldDescriptor.JavaType.MESSAGE) { if (field.getJavaType() == FieldDescriptor.JavaType.MESSAGE) {
generator.print(" {"); generator.print(" {");
@ -1811,16 +1812,16 @@ public final class TextFormat {
extension = target.findExtensionByName(extensionRegistry, name.toString()); extension = target.findExtensionByName(extensionRegistry, name.toString());
if (extension == null) { if (extension == null) {
String message = String message =
(tokenizer.getPreviousLine() + 1) (tokenizer.getPreviousLine() + 1)
+ ":" + ":"
+ (tokenizer.getPreviousColumn() + 1) + (tokenizer.getPreviousColumn() + 1)
+ ":\t" + ":\t"
+ type.getFullName() + type.getFullName()
+ ".[" + ".["
+ name + name
+ "]"; + "]";
unknownFields.add(new UnknownField(message, UnknownField.Type.EXTENSION)); unknownFields.add(new UnknownField(message, UnknownField.Type.EXTENSION));
} else { } else {
if (extension.descriptor.getContainingType() != type) { if (extension.descriptor.getContainingType() != type) {
throw tokenizer.parseExceptionPreviousToken( throw tokenizer.parseExceptionPreviousToken(

@ -388,7 +388,7 @@ final class UnsafeUtil {
} }
if (Android.isOnAndroidDevice()) { if (Android.isOnAndroidDevice()) {
return true; return false;
} }
clazz.getMethod("getByte", long.class); clazz.getMethod("getByte", long.class);
clazz.getMethod("putByte", long.class, byte.class); clazz.getMethod("putByte", long.class, byte.class);

@ -184,17 +184,17 @@ public final class MapForProto2LiteTest extends TestCase {
} }
private void assertMapValuesCleared(TestMapOrBuilder testMapOrBuilder) { private void assertMapValuesCleared(TestMapOrBuilder testMapOrBuilder) {
assertEquals(0, testMapOrBuilder.getInt32ToInt32Field().size()); assertEquals(0, testMapOrBuilder.getInt32ToInt32FieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToInt32FieldCount()); assertEquals(0, testMapOrBuilder.getInt32ToInt32FieldCount());
assertEquals(0, testMapOrBuilder.getInt32ToStringField().size()); assertEquals(0, testMapOrBuilder.getInt32ToStringFieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToStringFieldCount()); assertEquals(0, testMapOrBuilder.getInt32ToStringFieldCount());
assertEquals(0, testMapOrBuilder.getInt32ToBytesField().size()); assertEquals(0, testMapOrBuilder.getInt32ToBytesFieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToBytesFieldCount()); assertEquals(0, testMapOrBuilder.getInt32ToBytesFieldCount());
assertEquals(0, testMapOrBuilder.getInt32ToEnumField().size()); assertEquals(0, testMapOrBuilder.getInt32ToEnumFieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToEnumFieldCount()); assertEquals(0, testMapOrBuilder.getInt32ToEnumFieldCount());
assertEquals(0, testMapOrBuilder.getInt32ToMessageField().size()); assertEquals(0, testMapOrBuilder.getInt32ToMessageFieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToMessageFieldCount()); assertEquals(0, testMapOrBuilder.getInt32ToMessageFieldCount());
assertEquals(0, testMapOrBuilder.getStringToInt32Field().size()); assertEquals(0, testMapOrBuilder.getStringToInt32FieldMap().size());
assertEquals(0, testMapOrBuilder.getStringToInt32FieldCount()); assertEquals(0, testMapOrBuilder.getStringToInt32FieldCount());
} }
@ -226,13 +226,13 @@ public final class MapForProto2LiteTest extends TestCase {
} }
private void assertMapsAreImmutable(TestMapOrBuilder testMapOrBuilder) { private void assertMapsAreImmutable(TestMapOrBuilder testMapOrBuilder) {
assertImmutable(testMapOrBuilder.getInt32ToInt32Field(), 1, 2); assertImmutable(testMapOrBuilder.getInt32ToInt32FieldMap(), 1, 2);
assertImmutable(testMapOrBuilder.getInt32ToStringField(), 1, "2"); assertImmutable(testMapOrBuilder.getInt32ToStringFieldMap(), 1, "2");
assertImmutable(testMapOrBuilder.getInt32ToBytesField(), 1, TestUtil.toBytes("2")); assertImmutable(testMapOrBuilder.getInt32ToBytesFieldMap(), 1, TestUtil.toBytes("2"));
assertImmutable(testMapOrBuilder.getInt32ToEnumField(), 1, TestMap.EnumValue.FOO); assertImmutable(testMapOrBuilder.getInt32ToEnumFieldMap(), 1, TestMap.EnumValue.FOO);
assertImmutable( assertImmutable(
testMapOrBuilder.getInt32ToMessageField(), 1, MessageValue.getDefaultInstance()); testMapOrBuilder.getInt32ToMessageFieldMap(), 1, MessageValue.getDefaultInstance());
assertImmutable(testMapOrBuilder.getStringToInt32Field(), "1", 2); assertImmutable(testMapOrBuilder.getStringToInt32FieldMap(), "1", 2);
} }
private <K, V> void assertImmutable(Map<K, V> map, K key, V value) { private <K, V> void assertImmutable(Map<K, V> map, K key, V value) {

@ -128,35 +128,35 @@ public class MapForProto2Test extends TestCase {
} }
private void assertMapValuesSet(TestMapOrBuilder message) { private void assertMapValuesSet(TestMapOrBuilder message) {
assertEquals(3, message.getInt32ToInt32Field().size()); assertEquals(3, message.getInt32ToInt32FieldMap().size());
assertEquals(11, message.getInt32ToInt32Field().get(1).intValue()); assertEquals(11, message.getInt32ToInt32FieldMap().get(1).intValue());
assertEquals(22, message.getInt32ToInt32Field().get(2).intValue()); assertEquals(22, message.getInt32ToInt32FieldMap().get(2).intValue());
assertEquals(33, message.getInt32ToInt32Field().get(3).intValue()); assertEquals(33, message.getInt32ToInt32FieldMap().get(3).intValue());
assertEquals(3, message.getInt32ToStringField().size()); assertEquals(3, message.getInt32ToStringFieldMap().size());
assertEquals("11", message.getInt32ToStringField().get(1)); assertEquals("11", message.getInt32ToStringFieldMap().get(1));
assertEquals("22", message.getInt32ToStringField().get(2)); assertEquals("22", message.getInt32ToStringFieldMap().get(2));
assertEquals("33", message.getInt32ToStringField().get(3)); assertEquals("33", message.getInt32ToStringFieldMap().get(3));
assertEquals(3, message.getInt32ToBytesField().size()); assertEquals(3, message.getInt32ToBytesFieldMap().size());
assertEquals(TestUtil.toBytes("11"), message.getInt32ToBytesField().get(1)); assertEquals(TestUtil.toBytes("11"), message.getInt32ToBytesFieldMap().get(1));
assertEquals(TestUtil.toBytes("22"), message.getInt32ToBytesField().get(2)); assertEquals(TestUtil.toBytes("22"), message.getInt32ToBytesFieldMap().get(2));
assertEquals(TestUtil.toBytes("33"), message.getInt32ToBytesField().get(3)); assertEquals(TestUtil.toBytes("33"), message.getInt32ToBytesFieldMap().get(3));
assertEquals(3, message.getInt32ToEnumField().size()); assertEquals(3, message.getInt32ToEnumFieldMap().size());
assertEquals(TestMap.EnumValue.FOO, message.getInt32ToEnumField().get(1)); assertEquals(TestMap.EnumValue.FOO, message.getInt32ToEnumFieldMap().get(1));
assertEquals(TestMap.EnumValue.BAR, message.getInt32ToEnumField().get(2)); assertEquals(TestMap.EnumValue.BAR, message.getInt32ToEnumFieldMap().get(2));
assertEquals(TestMap.EnumValue.BAZ, message.getInt32ToEnumField().get(3)); assertEquals(TestMap.EnumValue.BAZ, message.getInt32ToEnumFieldMap().get(3));
assertEquals(3, message.getInt32ToMessageField().size()); assertEquals(3, message.getInt32ToMessageFieldMap().size());
assertEquals(11, message.getInt32ToMessageField().get(1).getValue()); assertEquals(11, message.getInt32ToMessageFieldMap().get(1).getValue());
assertEquals(22, message.getInt32ToMessageField().get(2).getValue()); assertEquals(22, message.getInt32ToMessageFieldMap().get(2).getValue());
assertEquals(33, message.getInt32ToMessageField().get(3).getValue()); assertEquals(33, message.getInt32ToMessageFieldMap().get(3).getValue());
assertEquals(3, message.getStringToInt32Field().size()); assertEquals(3, message.getStringToInt32FieldMap().size());
assertEquals(11, message.getStringToInt32Field().get("1").intValue()); assertEquals(11, message.getStringToInt32FieldMap().get("1").intValue());
assertEquals(22, message.getStringToInt32Field().get("2").intValue()); assertEquals(22, message.getStringToInt32FieldMap().get("2").intValue());
assertEquals(33, message.getStringToInt32Field().get("3").intValue()); assertEquals(33, message.getStringToInt32FieldMap().get("3").intValue());
} }
private void updateMapValuesUsingMutableMap(TestMap.Builder builder) { private void updateMapValuesUsingMutableMap(TestMap.Builder builder) {
@ -268,17 +268,17 @@ public class MapForProto2Test extends TestCase {
} }
private void assertMapValuesCleared(TestMapOrBuilder testMapOrBuilder) { private void assertMapValuesCleared(TestMapOrBuilder testMapOrBuilder) {
assertEquals(0, testMapOrBuilder.getInt32ToInt32Field().size()); assertEquals(0, testMapOrBuilder.getInt32ToInt32FieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToInt32FieldCount()); assertEquals(0, testMapOrBuilder.getInt32ToInt32FieldCount());
assertEquals(0, testMapOrBuilder.getInt32ToStringField().size()); assertEquals(0, testMapOrBuilder.getInt32ToStringFieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToStringFieldCount()); assertEquals(0, testMapOrBuilder.getInt32ToStringFieldCount());
assertEquals(0, testMapOrBuilder.getInt32ToBytesField().size()); assertEquals(0, testMapOrBuilder.getInt32ToBytesFieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToBytesFieldCount()); assertEquals(0, testMapOrBuilder.getInt32ToBytesFieldCount());
assertEquals(0, testMapOrBuilder.getInt32ToEnumField().size()); assertEquals(0, testMapOrBuilder.getInt32ToEnumFieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToEnumFieldCount()); assertEquals(0, testMapOrBuilder.getInt32ToEnumFieldCount());
assertEquals(0, testMapOrBuilder.getInt32ToMessageField().size()); assertEquals(0, testMapOrBuilder.getInt32ToMessageFieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToMessageFieldCount()); assertEquals(0, testMapOrBuilder.getInt32ToMessageFieldCount());
assertEquals(0, testMapOrBuilder.getStringToInt32Field().size()); assertEquals(0, testMapOrBuilder.getStringToInt32FieldMap().size());
assertEquals(0, testMapOrBuilder.getStringToInt32FieldCount()); assertEquals(0, testMapOrBuilder.getStringToInt32FieldCount());
} }
@ -293,13 +293,13 @@ public class MapForProto2Test extends TestCase {
} }
private void assertMapsAreImmutable(TestMapOrBuilder testMapOrBuilder) { private void assertMapsAreImmutable(TestMapOrBuilder testMapOrBuilder) {
assertImmutable(testMapOrBuilder.getInt32ToInt32Field(), 1, 2); assertImmutable(testMapOrBuilder.getInt32ToInt32FieldMap(), 1, 2);
assertImmutable(testMapOrBuilder.getInt32ToStringField(), 1, "2"); assertImmutable(testMapOrBuilder.getInt32ToStringFieldMap(), 1, "2");
assertImmutable(testMapOrBuilder.getInt32ToBytesField(), 1, TestUtil.toBytes("2")); assertImmutable(testMapOrBuilder.getInt32ToBytesFieldMap(), 1, TestUtil.toBytes("2"));
assertImmutable(testMapOrBuilder.getInt32ToEnumField(), 1, TestMap.EnumValue.FOO); assertImmutable(testMapOrBuilder.getInt32ToEnumFieldMap(), 1, TestMap.EnumValue.FOO);
assertImmutable( assertImmutable(
testMapOrBuilder.getInt32ToMessageField(), 1, MessageValue.getDefaultInstance()); testMapOrBuilder.getInt32ToMessageFieldMap(), 1, MessageValue.getDefaultInstance());
assertImmutable(testMapOrBuilder.getStringToInt32Field(), "1", 2); assertImmutable(testMapOrBuilder.getStringToInt32FieldMap(), "1", 2);
} }
private <K, V> void assertImmutable(Map<K, V> map, K key, V value) { private <K, V> void assertImmutable(Map<K, V> map, K key, V value) {
@ -874,8 +874,8 @@ public class MapForProto2Test extends TestCase {
ByteString data = builder.build().toByteString(); ByteString data = builder.build().toByteString();
TestRecursiveMap message = TestRecursiveMap.parseFrom(data); TestRecursiveMap message = TestRecursiveMap.parseFrom(data);
assertEquals(2, message.getRecursiveMapField().get(1).getValue()); assertEquals(2, message.getRecursiveMapFieldMap().get(1).getValue());
assertEquals(4, message.getRecursiveMapField().get(3).getValue()); assertEquals(4, message.getRecursiveMapFieldMap().get(3).getValue());
} }
public void testIterationOrder() throws Exception { public void testIterationOrder() throws Exception {

@ -191,17 +191,17 @@ public final class MapLiteTest extends TestCase {
} }
private void assertMapValuesCleared(TestMapOrBuilder testMapOrBuilder) { private void assertMapValuesCleared(TestMapOrBuilder testMapOrBuilder) {
assertEquals(0, testMapOrBuilder.getInt32ToInt32Field().size()); assertEquals(0, testMapOrBuilder.getInt32ToInt32FieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToInt32FieldCount()); assertEquals(0, testMapOrBuilder.getInt32ToInt32FieldCount());
assertEquals(0, testMapOrBuilder.getInt32ToStringField().size()); assertEquals(0, testMapOrBuilder.getInt32ToStringFieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToStringFieldCount()); assertEquals(0, testMapOrBuilder.getInt32ToStringFieldCount());
assertEquals(0, testMapOrBuilder.getInt32ToBytesField().size()); assertEquals(0, testMapOrBuilder.getInt32ToBytesFieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToBytesFieldCount()); assertEquals(0, testMapOrBuilder.getInt32ToBytesFieldCount());
assertEquals(0, testMapOrBuilder.getInt32ToEnumField().size()); assertEquals(0, testMapOrBuilder.getInt32ToEnumFieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToEnumFieldCount()); assertEquals(0, testMapOrBuilder.getInt32ToEnumFieldCount());
assertEquals(0, testMapOrBuilder.getInt32ToMessageField().size()); assertEquals(0, testMapOrBuilder.getInt32ToMessageFieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToMessageFieldCount()); assertEquals(0, testMapOrBuilder.getInt32ToMessageFieldCount());
assertEquals(0, testMapOrBuilder.getStringToInt32Field().size()); assertEquals(0, testMapOrBuilder.getStringToInt32FieldMap().size());
assertEquals(0, testMapOrBuilder.getStringToInt32FieldCount()); assertEquals(0, testMapOrBuilder.getStringToInt32FieldCount());
} }
@ -232,13 +232,13 @@ public final class MapLiteTest extends TestCase {
} }
private void assertMapsAreImmutable(TestMapOrBuilder testMapOrBuilder) { private void assertMapsAreImmutable(TestMapOrBuilder testMapOrBuilder) {
assertImmutable(testMapOrBuilder.getInt32ToInt32Field(), 1, 2); assertImmutable(testMapOrBuilder.getInt32ToInt32FieldMap(), 1, 2);
assertImmutable(testMapOrBuilder.getInt32ToStringField(), 1, "2"); assertImmutable(testMapOrBuilder.getInt32ToStringFieldMap(), 1, "2");
assertImmutable(testMapOrBuilder.getInt32ToBytesField(), 1, TestUtil.toBytes("2")); assertImmutable(testMapOrBuilder.getInt32ToBytesFieldMap(), 1, TestUtil.toBytes("2"));
assertImmutable(testMapOrBuilder.getInt32ToEnumField(), 1, TestMap.EnumValue.FOO); assertImmutable(testMapOrBuilder.getInt32ToEnumFieldMap(), 1, TestMap.EnumValue.FOO);
assertImmutable( assertImmutable(
testMapOrBuilder.getInt32ToMessageField(), 1, MessageValue.getDefaultInstance()); testMapOrBuilder.getInt32ToMessageFieldMap(), 1, MessageValue.getDefaultInstance());
assertImmutable(testMapOrBuilder.getStringToInt32Field(), "1", 2); assertImmutable(testMapOrBuilder.getStringToInt32FieldMap(), "1", 2);
} }
private <K, V> void assertImmutable(Map<K, V> map, K key, V value) { private <K, V> void assertImmutable(Map<K, V> map, K key, V value) {

@ -271,17 +271,17 @@ public class MapTest extends TestCase {
} }
private void assertMapValuesCleared(TestMapOrBuilder testMapOrBuilder) { private void assertMapValuesCleared(TestMapOrBuilder testMapOrBuilder) {
assertEquals(0, testMapOrBuilder.getInt32ToInt32Field().size()); assertEquals(0, testMapOrBuilder.getInt32ToInt32FieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToInt32FieldCount()); assertEquals(0, testMapOrBuilder.getInt32ToInt32FieldCount());
assertEquals(0, testMapOrBuilder.getInt32ToStringField().size()); assertEquals(0, testMapOrBuilder.getInt32ToStringFieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToStringFieldCount()); assertEquals(0, testMapOrBuilder.getInt32ToStringFieldCount());
assertEquals(0, testMapOrBuilder.getInt32ToBytesField().size()); assertEquals(0, testMapOrBuilder.getInt32ToBytesFieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToBytesFieldCount()); assertEquals(0, testMapOrBuilder.getInt32ToBytesFieldCount());
assertEquals(0, testMapOrBuilder.getInt32ToEnumField().size()); assertEquals(0, testMapOrBuilder.getInt32ToEnumFieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToEnumFieldCount()); assertEquals(0, testMapOrBuilder.getInt32ToEnumFieldCount());
assertEquals(0, testMapOrBuilder.getInt32ToMessageField().size()); assertEquals(0, testMapOrBuilder.getInt32ToMessageFieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToMessageFieldCount()); assertEquals(0, testMapOrBuilder.getInt32ToMessageFieldCount());
assertEquals(0, testMapOrBuilder.getStringToInt32Field().size()); assertEquals(0, testMapOrBuilder.getStringToInt32FieldMap().size());
assertEquals(0, testMapOrBuilder.getStringToInt32FieldCount()); assertEquals(0, testMapOrBuilder.getStringToInt32FieldCount());
} }
@ -296,13 +296,13 @@ public class MapTest extends TestCase {
} }
private void assertMapsAreImmutable(TestMapOrBuilder testMapOrBuilder) { private void assertMapsAreImmutable(TestMapOrBuilder testMapOrBuilder) {
assertImmutable(testMapOrBuilder.getInt32ToInt32Field(), 1, 2); assertImmutable(testMapOrBuilder.getInt32ToInt32FieldMap(), 1, 2);
assertImmutable(testMapOrBuilder.getInt32ToStringField(), 1, "2"); assertImmutable(testMapOrBuilder.getInt32ToStringFieldMap(), 1, "2");
assertImmutable(testMapOrBuilder.getInt32ToBytesField(), 1, TestUtil.toBytes("2")); assertImmutable(testMapOrBuilder.getInt32ToBytesFieldMap(), 1, TestUtil.toBytes("2"));
assertImmutable(testMapOrBuilder.getInt32ToEnumField(), 1, TestMap.EnumValue.FOO); assertImmutable(testMapOrBuilder.getInt32ToEnumFieldMap(), 1, TestMap.EnumValue.FOO);
assertImmutable( assertImmutable(
testMapOrBuilder.getInt32ToMessageField(), 1, MessageValue.getDefaultInstance()); testMapOrBuilder.getInt32ToMessageFieldMap(), 1, MessageValue.getDefaultInstance());
assertImmutable(testMapOrBuilder.getStringToInt32Field(), "1", 2); assertImmutable(testMapOrBuilder.getStringToInt32FieldMap(), "1", 2);
} }
private <K, V> void assertImmutable(Map<K, V> map, K key, V value) { private <K, V> void assertImmutable(Map<K, V> map, K key, V value) {

@ -260,8 +260,8 @@ public final class TestUtil {
TestRequired.newBuilder().setA(1).setB(2).setC(3).build(); TestRequired.newBuilder().setA(1).setB(2).setC(3).build();
/** Helper to convert a String to ByteString. */ /** Helper to convert a String to ByteString. */
static ByteString toBytes(String str) { public static ByteString toBytes(String str) {
return ByteString.copyFrom(str.getBytes(Internal.UTF_8)); return ByteString.copyFromUtf8(str);
} }
// BEGIN FULL-RUNTIME // BEGIN FULL-RUNTIME

@ -142,8 +142,8 @@ public final class TestUtilLite {
private TestUtilLite() {} private TestUtilLite() {}
/** Helper to convert a String to ByteString. */ /** Helper to convert a String to ByteString. */
static ByteString toBytes(String str) { public static ByteString toBytes(String str) {
return ByteString.copyFrom(str.getBytes(Internal.UTF_8)); return ByteString.copyFromUtf8(str);
} }
/** /**

@ -44,6 +44,7 @@ import static com.google.protobuf.util.Timestamps.NANOS_PER_SECOND;
import com.google.errorprone.annotations.CanIgnoreReturnValue; import com.google.errorprone.annotations.CanIgnoreReturnValue;
import com.google.protobuf.Duration; import com.google.protobuf.Duration;
import java.io.Serializable;
import java.text.ParseException; import java.text.ParseException;
import java.util.Comparator; import java.util.Comparator;
@ -72,23 +73,25 @@ public final class Durations {
private Durations() {} private Durations() {}
private static final Comparator<Duration> COMPARATOR = private static enum DurationComparator implements Comparator<Duration>, Serializable {
new Comparator<Duration>() { INSTANCE;
@Override
public int compare(Duration d1, Duration d2) { @Override
checkValid(d1); public int compare(Duration d1, Duration d2) {
checkValid(d2); checkValid(d1);
int secDiff = Long.compare(d1.getSeconds(), d2.getSeconds()); checkValid(d2);
return (secDiff != 0) ? secDiff : Integer.compare(d1.getNanos(), d2.getNanos()); int secDiff = Long.compare(d1.getSeconds(), d2.getSeconds());
} return (secDiff != 0) ? secDiff : Integer.compare(d1.getNanos(), d2.getNanos());
}; }
}
/** /**
* Returns a {@link Comparator} for {@link Duration}s which sorts in increasing chronological * Returns a {@link Comparator} for {@link Duration}s which sorts in increasing chronological
* order. Nulls and invalid {@link Duration}s are not allowed (see {@link #isValid}). * order. Nulls and invalid {@link Duration}s are not allowed (see {@link #isValid}). The returned
* comparator is serializable.
*/ */
public static Comparator<Duration> comparator() { public static Comparator<Duration> comparator() {
return COMPARATOR; return DurationComparator.INSTANCE;
} }
/** /**
@ -99,7 +102,7 @@ public final class Durations {
* and a value greater than {@code 0} if {@code x > y} * and a value greater than {@code 0} if {@code x > y}
*/ */
public static int compare(Duration x, Duration y) { public static int compare(Duration x, Duration y) {
return COMPARATOR.compare(x, y); return DurationComparator.INSTANCE.compare(x, y);
} }
/** /**

@ -39,6 +39,7 @@ import static com.google.common.math.LongMath.checkedSubtract;
import com.google.errorprone.annotations.CanIgnoreReturnValue; import com.google.errorprone.annotations.CanIgnoreReturnValue;
import com.google.protobuf.Duration; import com.google.protobuf.Duration;
import com.google.protobuf.Timestamp; import com.google.protobuf.Timestamp;
import java.io.Serializable;
import java.text.ParseException; import java.text.ParseException;
import java.text.SimpleDateFormat; import java.text.SimpleDateFormat;
import java.util.Comparator; import java.util.Comparator;
@ -100,24 +101,25 @@ public final class Timestamps {
private Timestamps() {} private Timestamps() {}
private static final Comparator<Timestamp> COMPARATOR = private static enum TimestampComparator implements Comparator<Timestamp>, Serializable {
new Comparator<Timestamp>() { INSTANCE;
@Override
public int compare(Timestamp t1, Timestamp t2) { @Override
checkValid(t1); public int compare(Timestamp t1, Timestamp t2) {
checkValid(t2); checkValid(t1);
int secDiff = Long.compare(t1.getSeconds(), t2.getSeconds()); checkValid(t2);
return (secDiff != 0) ? secDiff : Integer.compare(t1.getNanos(), t2.getNanos()); int secDiff = Long.compare(t1.getSeconds(), t2.getSeconds());
} return (secDiff != 0) ? secDiff : Integer.compare(t1.getNanos(), t2.getNanos());
}; }
}
/** /**
* Returns a {@link Comparator} for {@link Timestamp Timestamps} which sorts in increasing * Returns a {@link Comparator} for {@link Timestamp Timestamps} which sorts in increasing
* chronological order. Nulls and invalid {@link Timestamp Timestamps} are not allowed (see * chronological order. Nulls and invalid {@link Timestamp Timestamps} are not allowed (see
* {@link #isValid}). * {@link #isValid}). The returned comparator is serializable.
*/ */
public static Comparator<Timestamp> comparator() { public static Comparator<Timestamp> comparator() {
return COMPARATOR; return TimestampComparator.INSTANCE;
} }
/** /**
@ -128,7 +130,7 @@ public final class Timestamps {
* and a value greater than {@code 0} if {@code x > y} * and a value greater than {@code 0} if {@code x > y}
*/ */
public static int compare(Timestamp x, Timestamp y) { public static int compare(Timestamp x, Timestamp y) {
return COMPARATOR.compare(x, y); return TimestampComparator.INSTANCE.compare(x, y);
} }
/** /**

@ -231,6 +231,9 @@ class BaseContainer(object):
kwargs['cmp'] = kwargs.pop('sort_function') kwargs['cmp'] = kwargs.pop('sort_function')
self._values.sort(*args, **kwargs) self._values.sort(*args, **kwargs)
def reverse(self):
self._values.reverse()
collections_abc.MutableSequence.register(BaseContainer) collections_abc.MutableSequence.register(BaseContainer)

@ -1311,6 +1311,38 @@ class Proto2ReflectionTest(unittest.TestCase):
# Remove a non-existent element. # Remove a non-existent element.
self.assertRaises(ValueError, proto.repeated_int32.remove, 123) self.assertRaises(ValueError, proto.repeated_int32.remove, 123)
def testRepeatedScalarsReverse_Empty(self):
proto = unittest_pb2.TestAllTypes()
self.assertFalse(proto.repeated_int32)
self.assertEqual(0, len(proto.repeated_int32))
self.assertIsNone(proto.repeated_int32.reverse())
self.assertFalse(proto.repeated_int32)
self.assertEqual(0, len(proto.repeated_int32))
def testRepeatedScalarsReverse_NonEmpty(self):
proto = unittest_pb2.TestAllTypes()
self.assertFalse(proto.repeated_int32)
self.assertEqual(0, len(proto.repeated_int32))
proto.repeated_int32.append(1)
proto.repeated_int32.append(2)
proto.repeated_int32.append(3)
proto.repeated_int32.append(4)
self.assertEqual(4, len(proto.repeated_int32))
self.assertIsNone(proto.repeated_int32.reverse())
self.assertEqual(4, len(proto.repeated_int32))
self.assertEqual(4, proto.repeated_int32[0])
self.assertEqual(3, proto.repeated_int32[1])
self.assertEqual(2, proto.repeated_int32[2])
self.assertEqual(1, proto.repeated_int32[3])
def testRepeatedComposites(self): def testRepeatedComposites(self):
proto = unittest_pb2.TestAllTypes() proto = unittest_pb2.TestAllTypes()
self.assertFalse(proto.repeated_nested_message) self.assertFalse(proto.repeated_nested_message)
@ -1423,6 +1455,35 @@ class Proto2ReflectionTest(unittest.TestCase):
self.assertEqual(1, len(proto.repeated_nested_message)) self.assertEqual(1, len(proto.repeated_nested_message))
self.assertEqual(m1, proto.repeated_nested_message[0]) self.assertEqual(m1, proto.repeated_nested_message[0])
def testRepeatedCompositeReverse_Empty(self):
proto = unittest_pb2.TestAllTypes()
self.assertFalse(proto.repeated_nested_message)
self.assertEqual(0, len(proto.repeated_nested_message))
self.assertIsNone(proto.repeated_nested_message.reverse())
self.assertFalse(proto.repeated_nested_message)
self.assertEqual(0, len(proto.repeated_nested_message))
def testRepeatedCompositeReverse_NonEmpty(self):
proto = unittest_pb2.TestAllTypes()
self.assertFalse(proto.repeated_nested_message)
self.assertEqual(0, len(proto.repeated_nested_message))
m0 = proto.repeated_nested_message.add()
m0.bb = len(proto.repeated_nested_message)
m1 = proto.repeated_nested_message.add()
m1.bb = len(proto.repeated_nested_message)
m2 = proto.repeated_nested_message.add()
m2.bb = len(proto.repeated_nested_message)
self.assertListsEqual([m0, m1, m2], proto.repeated_nested_message)
self.assertIsNone(proto.repeated_nested_message.reverse())
self.assertListsEqual([m2, m1, m0], proto.repeated_nested_message)
def testHandWrittenReflection(self): def testHandWrittenReflection(self):
# Hand written extensions are only supported by the pure-Python # Hand written extensions are only supported by the pure-Python
# implementation of the API. # implementation of the API.
@ -3061,10 +3122,10 @@ class SerializationTest(unittest.TestCase):
unittest_pb2.ForeignMessage.c.__get__(msg) unittest_pb2.ForeignMessage.c.__get__(msg)
except TypeError: except TypeError:
pass # The cpp implementation cannot mix fields from other messages. pass # The cpp implementation cannot mix fields from other messages.
# This test exercises a specific check that avoids a crash. # This test exercises a specific check that avoids a crash.
else: else:
pass # The python implementation allows fields from other messages. pass # The python implementation allows fields from other messages.
# This is useless, but works. # This is useless, but works.
def testInitKwargs(self): def testInitKwargs(self):
proto = unittest_pb2.TestAllTypes( proto = unittest_pb2.TestAllTypes(

@ -77,8 +77,7 @@ static Py_ssize_t Length(PyObject* pself) {
PyObject* Add(RepeatedCompositeContainer* self, PyObject* args, PyObject* Add(RepeatedCompositeContainer* self, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
if (cmessage::AssureWritable(self->parent) == -1) if (cmessage::AssureWritable(self->parent) == -1) return nullptr;
return NULL;
Message* message = self->parent->message; Message* message = self->parent->message;
Message* sub_message = Message* sub_message =
@ -93,7 +92,7 @@ PyObject* Add(RepeatedCompositeContainer* self, PyObject* args,
message->GetReflection()->RemoveLast( message->GetReflection()->RemoveLast(
message, self->parent_field_descriptor); message, self->parent_field_descriptor);
Py_DECREF(cmsg); Py_DECREF(cmsg);
return NULL; return nullptr;
} }
return cmsg->AsPyObject(); return cmsg->AsPyObject();
@ -172,28 +171,28 @@ static PyObject* Insert(PyObject* pself, PyObject* args) {
PyObject* Extend(RepeatedCompositeContainer* self, PyObject* value) { PyObject* Extend(RepeatedCompositeContainer* self, PyObject* value) {
cmessage::AssureWritable(self->parent); cmessage::AssureWritable(self->parent);
ScopedPyObjectPtr iter(PyObject_GetIter(value)); ScopedPyObjectPtr iter(PyObject_GetIter(value));
if (iter == NULL) { if (iter == nullptr) {
PyErr_SetString(PyExc_TypeError, "Value must be iterable"); PyErr_SetString(PyExc_TypeError, "Value must be iterable");
return NULL; return nullptr;
} }
ScopedPyObjectPtr next; ScopedPyObjectPtr next;
while ((next.reset(PyIter_Next(iter.get()))) != NULL) { while ((next.reset(PyIter_Next(iter.get()))) != nullptr) {
if (!PyObject_TypeCheck(next.get(), CMessage_Type)) { if (!PyObject_TypeCheck(next.get(), CMessage_Type)) {
PyErr_SetString(PyExc_TypeError, "Not a cmessage"); PyErr_SetString(PyExc_TypeError, "Not a cmessage");
return NULL; return nullptr;
} }
ScopedPyObjectPtr new_message(Add(self, NULL, NULL)); ScopedPyObjectPtr new_message(Add(self, nullptr, nullptr));
if (new_message == NULL) { if (new_message == nullptr) {
return NULL; return nullptr;
} }
CMessage* new_cmessage = reinterpret_cast<CMessage*>(new_message.get()); CMessage* new_cmessage = reinterpret_cast<CMessage*>(new_message.get());
if (ScopedPyObjectPtr(cmessage::MergeFrom(new_cmessage, next.get())) == if (ScopedPyObjectPtr(cmessage::MergeFrom(new_cmessage, next.get())) ==
NULL) { nullptr) {
return NULL; return nullptr;
} }
} }
if (PyErr_Occurred()) { if (PyErr_Occurred()) {
return NULL; return nullptr;
} }
Py_RETURN_NONE; Py_RETURN_NONE;
} }
@ -220,7 +219,7 @@ static PyObject* GetItem(RepeatedCompositeContainer* self, Py_ssize_t index,
} }
if (index < 0 || index >= length) { if (index < 0 || index >= length) {
PyErr_Format(PyExc_IndexError, "list index (%zd) out of range", index); PyErr_Format(PyExc_IndexError, "list index (%zd) out of range", index);
return NULL; return nullptr;
} }
Message* message = self->parent->message; Message* message = self->parent->message;
Message* sub_message = message->GetReflection()->MutableRepeatedMessage( Message* sub_message = message->GetReflection()->MutableRepeatedMessage(
@ -240,7 +239,7 @@ PyObject* Subscript(RepeatedCompositeContainer* self, PyObject* item) {
if (PyIndex_Check(item)) { if (PyIndex_Check(item)) {
Py_ssize_t index; Py_ssize_t index;
index = PyNumber_AsSsize_t(item, PyExc_IndexError); index = PyNumber_AsSsize_t(item, PyExc_IndexError);
if (index == -1 && PyErr_Occurred()) return NULL; if (index == -1 && PyErr_Occurred()) return nullptr;
if (index < 0) index += length; if (index < 0) index += length;
return GetItem(self, index, length); return GetItem(self, index, length);
} else if (PySlice_Check(item)) { } else if (PySlice_Check(item)) {
@ -254,14 +253,14 @@ PyObject* Subscript(RepeatedCompositeContainer* self, PyObject* item) {
if (PySlice_GetIndicesEx(reinterpret_cast<PySliceObject*>(item), if (PySlice_GetIndicesEx(reinterpret_cast<PySliceObject*>(item),
length, &from, &to, &step, &slicelength) == -1) { length, &from, &to, &step, &slicelength) == -1) {
#endif #endif
return NULL; return nullptr;
} }
if (slicelength <= 0) { if (slicelength <= 0) {
return PyList_New(0); return PyList_New(0);
} else { } else {
result = PyList_New(slicelength); result = PyList_New(slicelength);
if (!result) return NULL; if (!result) return nullptr;
for (cur = from, i = 0; i < slicelength; cur += step, i++) { for (cur = from, i = 0; i < slicelength; cur += step, i++) {
PyList_SET_ITEM(result, i, GetItem(self, cur, length)); PyList_SET_ITEM(result, i, GetItem(self, cur, length));
@ -272,7 +271,7 @@ PyObject* Subscript(RepeatedCompositeContainer* self, PyObject* item) {
} else { } else {
PyErr_Format(PyExc_TypeError, "indices must be integers, not %.200s", PyErr_Format(PyExc_TypeError, "indices must be integers, not %.200s",
item->ob_type->tp_name); item->ob_type->tp_name);
return NULL; return nullptr;
} }
} }
@ -283,7 +282,7 @@ static PyObject* SubscriptMethod(PyObject* self, PyObject* slice) {
int AssignSubscript(RepeatedCompositeContainer* self, int AssignSubscript(RepeatedCompositeContainer* self,
PyObject* slice, PyObject* slice,
PyObject* value) { PyObject* value) {
if (value != NULL) { if (value != nullptr) {
PyErr_SetString(PyExc_TypeError, "does not support assignment"); PyErr_SetString(PyExc_TypeError, "does not support assignment");
return -1; return -1;
} }
@ -305,23 +304,23 @@ static PyObject* Remove(PyObject* pself, PyObject* value) {
for (Py_ssize_t i = 0; i < len; i++) { for (Py_ssize_t i = 0; i < len; i++) {
ScopedPyObjectPtr item(GetItem(self, i, len)); ScopedPyObjectPtr item(GetItem(self, i, len));
if (item == NULL) { if (item == nullptr) {
return NULL; return nullptr;
} }
int result = PyObject_RichCompareBool(item.get(), value, Py_EQ); int result = PyObject_RichCompareBool(item.get(), value, Py_EQ);
if (result < 0) { if (result < 0) {
return NULL; return nullptr;
} }
if (result) { if (result) {
ScopedPyObjectPtr py_index(PyLong_FromSsize_t(i)); ScopedPyObjectPtr py_index(PyLong_FromSsize_t(i));
if (AssignSubscript(self, py_index.get(), NULL) < 0) { if (AssignSubscript(self, py_index.get(), nullptr) < 0) {
return NULL; return nullptr;
} }
Py_RETURN_NONE; Py_RETURN_NONE;
} }
} }
PyErr_SetString(PyExc_ValueError, "Item to delete not in list"); PyErr_SetString(PyExc_ValueError, "Item to delete not in list");
return NULL; return nullptr;
} }
static PyObject* RichCompare(PyObject* pself, PyObject* other, int opid) { static PyObject* RichCompare(PyObject* pself, PyObject* other, int opid) {
@ -332,23 +331,23 @@ static PyObject* RichCompare(PyObject* pself, PyObject* other, int opid) {
PyErr_SetString(PyExc_TypeError, PyErr_SetString(PyExc_TypeError,
"Can only compare repeated composite fields " "Can only compare repeated composite fields "
"against other repeated composite fields."); "against other repeated composite fields.");
return NULL; return nullptr;
} }
if (opid == Py_EQ || opid == Py_NE) { if (opid == Py_EQ || opid == Py_NE) {
// TODO(anuraag): Don't make new lists just for this... // TODO(anuraag): Don't make new lists just for this...
ScopedPyObjectPtr full_slice(PySlice_New(NULL, NULL, NULL)); ScopedPyObjectPtr full_slice(PySlice_New(nullptr, nullptr, nullptr));
if (full_slice == NULL) { if (full_slice == nullptr) {
return NULL; return nullptr;
} }
ScopedPyObjectPtr list(Subscript(self, full_slice.get())); ScopedPyObjectPtr list(Subscript(self, full_slice.get()));
if (list == NULL) { if (list == nullptr) {
return NULL; return nullptr;
} }
ScopedPyObjectPtr other_list( ScopedPyObjectPtr other_list(
Subscript(reinterpret_cast<RepeatedCompositeContainer*>(other), Subscript(reinterpret_cast<RepeatedCompositeContainer*>(other),
full_slice.get())); full_slice.get()));
if (other_list == NULL) { if (other_list == nullptr) {
return NULL; return nullptr;
} }
return PyObject_RichCompare(list.get(), other_list.get(), opid); return PyObject_RichCompare(list.get(), other_list.get(), opid);
} else { } else {
@ -358,14 +357,14 @@ static PyObject* RichCompare(PyObject* pself, PyObject* other, int opid) {
} }
static PyObject* ToStr(PyObject* pself) { static PyObject* ToStr(PyObject* pself) {
ScopedPyObjectPtr full_slice(PySlice_New(NULL, NULL, NULL)); ScopedPyObjectPtr full_slice(PySlice_New(nullptr, nullptr, nullptr));
if (full_slice == NULL) { if (full_slice == nullptr) {
return NULL; return nullptr;
} }
ScopedPyObjectPtr list(Subscript( ScopedPyObjectPtr list(Subscript(
reinterpret_cast<RepeatedCompositeContainer*>(pself), full_slice.get())); reinterpret_cast<RepeatedCompositeContainer*>(pself), full_slice.get()));
if (list == NULL) { if (list == nullptr) {
return NULL; return nullptr;
} }
return PyObject_Repr(list.get()); return PyObject_Repr(list.get());
} }
@ -400,13 +399,12 @@ static int SortPythonMessages(RepeatedCompositeContainer* self,
PyObject* kwds) { PyObject* kwds) {
ScopedPyObjectPtr child_list( ScopedPyObjectPtr child_list(
PySequence_List(reinterpret_cast<PyObject*>(self))); PySequence_List(reinterpret_cast<PyObject*>(self)));
if (child_list == NULL) { if (child_list == nullptr) {
return -1; return -1;
} }
ScopedPyObjectPtr m(PyObject_GetAttrString(child_list.get(), "sort")); ScopedPyObjectPtr m(PyObject_GetAttrString(child_list.get(), "sort"));
if (m == NULL) if (m == nullptr) return -1;
return -1; if (ScopedPyObjectPtr(PyObject_Call(m.get(), args, kwds)) == nullptr)
if (ScopedPyObjectPtr(PyObject_Call(m.get(), args, kwds)) == NULL)
return -1; return -1;
ReorderAttached(self, child_list.get()); ReorderAttached(self, child_list.get());
return 0; return 0;
@ -418,9 +416,9 @@ static PyObject* Sort(PyObject* pself, PyObject* args, PyObject* kwds) {
// Support the old sort_function argument for backwards // Support the old sort_function argument for backwards
// compatibility. // compatibility.
if (kwds != NULL) { if (kwds != nullptr) {
PyObject* sort_func = PyDict_GetItemString(kwds, "sort_function"); PyObject* sort_func = PyDict_GetItemString(kwds, "sort_function");
if (sort_func != NULL) { if (sort_func != nullptr) {
// Must set before deleting as sort_func is a borrowed reference // Must set before deleting as sort_func is a borrowed reference
// and kwds might be the only thing keeping it alive. // and kwds might be the only thing keeping it alive.
PyDict_SetItemString(kwds, "cmp", sort_func); PyDict_SetItemString(kwds, "cmp", sort_func);
@ -429,7 +427,35 @@ static PyObject* Sort(PyObject* pself, PyObject* args, PyObject* kwds) {
} }
if (SortPythonMessages(self, args, kwds) < 0) { if (SortPythonMessages(self, args, kwds) < 0) {
return NULL; return nullptr;
}
Py_RETURN_NONE;
}
// ---------------------------------------------------------------------
// reverse()
// Returns 0 if successful; returns -1 and sets an exception if
// unsuccessful.
static int ReversePythonMessages(RepeatedCompositeContainer* self) {
ScopedPyObjectPtr child_list(
PySequence_List(reinterpret_cast<PyObject*>(self)));
if (child_list == nullptr) {
return -1;
}
if (ScopedPyObjectPtr(
PyObject_CallMethod(child_list.get(), "reverse", nullptr)) == nullptr)
return -1;
ReorderAttached(self, child_list.get());
return 0;
}
static PyObject* Reverse(PyObject* pself) {
RepeatedCompositeContainer* self =
reinterpret_cast<RepeatedCompositeContainer*>(pself);
if (ReversePythonMessages(self) < 0) {
return nullptr;
} }
Py_RETURN_NONE; Py_RETURN_NONE;
} }
@ -448,17 +474,17 @@ static PyObject* Pop(PyObject* pself, PyObject* args) {
Py_ssize_t index = -1; Py_ssize_t index = -1;
if (!PyArg_ParseTuple(args, "|n", &index)) { if (!PyArg_ParseTuple(args, "|n", &index)) {
return NULL; return nullptr;
} }
Py_ssize_t length = Length(pself); Py_ssize_t length = Length(pself);
if (index < 0) index += length; if (index < 0) index += length;
PyObject* item = GetItem(self, index, length); PyObject* item = GetItem(self, index, length);
if (item == NULL) { if (item == nullptr) {
return NULL; return nullptr;
} }
ScopedPyObjectPtr py_index(PyLong_FromSsize_t(index)); ScopedPyObjectPtr py_index(PyLong_FromSsize_t(index));
if (AssignSubscript(self, py_index.get(), NULL) < 0) { if (AssignSubscript(self, py_index.get(), nullptr) < 0) {
return NULL; return nullptr;
} }
return item; return item;
} }
@ -473,14 +499,14 @@ RepeatedCompositeContainer *NewContainer(
const FieldDescriptor* parent_field_descriptor, const FieldDescriptor* parent_field_descriptor,
CMessageClass* child_message_class) { CMessageClass* child_message_class) {
if (!CheckFieldBelongsToMessage(parent_field_descriptor, parent->message)) { if (!CheckFieldBelongsToMessage(parent_field_descriptor, parent->message)) {
return NULL; return nullptr;
} }
RepeatedCompositeContainer* self = RepeatedCompositeContainer* self =
reinterpret_cast<RepeatedCompositeContainer*>( reinterpret_cast<RepeatedCompositeContainer*>(
PyType_GenericAlloc(&RepeatedCompositeContainer_Type, 0)); PyType_GenericAlloc(&RepeatedCompositeContainer_Type, 0));
if (self == NULL) { if (self == nullptr) {
return NULL; return nullptr;
} }
Py_INCREF(parent); Py_INCREF(parent);
@ -500,10 +526,10 @@ static void Dealloc(PyObject* pself) {
} }
static PySequenceMethods SqMethods = { static PySequenceMethods SqMethods = {
Length, /* sq_length */ Length, /* sq_length */
0, /* sq_concat */ nullptr, /* sq_concat */
0, /* sq_repeat */ nullptr, /* sq_repeat */
Item /* sq_item */ Item /* sq_item */
}; };
static PyMappingMethods MpMethods = { static PyMappingMethods MpMethods = {
@ -513,66 +539,65 @@ static PyMappingMethods MpMethods = {
}; };
static PyMethodDef Methods[] = { static PyMethodDef Methods[] = {
{ "__deepcopy__", DeepCopy, METH_VARARGS, {"__deepcopy__", DeepCopy, METH_VARARGS, "Makes a deep copy of the class."},
"Makes a deep copy of the class." }, {"add", reinterpret_cast<PyCFunction>(AddMethod),
{ "add", (PyCFunction)AddMethod, METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS, "Adds an object to the repeated container."},
"Adds an object to the repeated container." }, {"append", AppendMethod, METH_O,
{ "append", AppendMethod, METH_O, "Appends a message to the end of the repeated container."},
"Appends a message to the end of the repeated container."}, {"insert", Insert, METH_VARARGS,
{ "insert", Insert, METH_VARARGS, "Inserts a message before the specified index."},
"Inserts a message before the specified index." }, {"extend", ExtendMethod, METH_O, "Adds objects to the repeated container."},
{ "extend", ExtendMethod, METH_O, {"pop", Pop, METH_VARARGS,
"Adds objects to the repeated container." }, "Removes an object from the repeated container and returns it."},
{ "pop", Pop, METH_VARARGS, {"remove", Remove, METH_O,
"Removes an object from the repeated container and returns it." }, "Removes an object from the repeated container."},
{ "remove", Remove, METH_O, {"sort", reinterpret_cast<PyCFunction>(Sort), METH_VARARGS | METH_KEYWORDS,
"Removes an object from the repeated container." }, "Sorts the repeated container."},
{ "sort", (PyCFunction)Sort, METH_VARARGS | METH_KEYWORDS, {"reverse", reinterpret_cast<PyCFunction>(Reverse), METH_NOARGS,
"Sorts the repeated container." }, "Reverses elements order of the repeated container."},
{ "MergeFrom", MergeFromMethod, METH_O, {"MergeFrom", MergeFromMethod, METH_O,
"Adds objects to the repeated container." }, "Adds objects to the repeated container."},
{ NULL, NULL } {nullptr, nullptr}};
};
} // namespace repeated_composite_container } // namespace repeated_composite_container
PyTypeObject RepeatedCompositeContainer_Type = { PyTypeObject RepeatedCompositeContainer_Type = {
PyVarObject_HEAD_INIT(&PyType_Type, 0) PyVarObject_HEAD_INIT(&PyType_Type, 0) FULL_MODULE_NAME
FULL_MODULE_NAME ".RepeatedCompositeContainer", // tp_name ".RepeatedCompositeContainer", // tp_name
sizeof(RepeatedCompositeContainer), // tp_basicsize sizeof(RepeatedCompositeContainer), // tp_basicsize
0, // tp_itemsize 0, // tp_itemsize
repeated_composite_container::Dealloc, // tp_dealloc repeated_composite_container::Dealloc, // tp_dealloc
0, // tp_print 0, // tp_print, in Python >=3.8: Py_ssize_t tp_vectorcall_offset
0, // tp_getattr nullptr, // tp_getattr
0, // tp_setattr nullptr, // tp_setattr
0, // tp_compare nullptr, // tp_compare
repeated_composite_container::ToStr, // tp_repr repeated_composite_container::ToStr, // tp_repr
0, // tp_as_number nullptr, // tp_as_number
&repeated_composite_container::SqMethods, // tp_as_sequence &repeated_composite_container::SqMethods, // tp_as_sequence
&repeated_composite_container::MpMethods, // tp_as_mapping &repeated_composite_container::MpMethods, // tp_as_mapping
PyObject_HashNotImplemented, // tp_hash PyObject_HashNotImplemented, // tp_hash
0, // tp_call nullptr, // tp_call
0, // tp_str nullptr, // tp_str
0, // tp_getattro nullptr, // tp_getattro
0, // tp_setattro nullptr, // tp_setattro
0, // tp_as_buffer nullptr, // tp_as_buffer
Py_TPFLAGS_DEFAULT, // tp_flags Py_TPFLAGS_DEFAULT, // tp_flags
"A Repeated scalar container", // tp_doc "A Repeated scalar container", // tp_doc
0, // tp_traverse nullptr, // tp_traverse
0, // tp_clear nullptr, // tp_clear
repeated_composite_container::RichCompare, // tp_richcompare repeated_composite_container::RichCompare, // tp_richcompare
0, // tp_weaklistoffset 0, // tp_weaklistoffset
0, // tp_iter nullptr, // tp_iter
0, // tp_iternext nullptr, // tp_iternext
repeated_composite_container::Methods, // tp_methods repeated_composite_container::Methods, // tp_methods
0, // tp_members nullptr, // tp_members
0, // tp_getset nullptr, // tp_getset
0, // tp_base nullptr, // tp_base
0, // tp_dict nullptr, // tp_dict
0, // tp_descr_get nullptr, // tp_descr_get
0, // tp_descr_set nullptr, // tp_descr_set
0, // tp_dictoffset 0, // tp_dictoffset
0, // tp_init nullptr, // tp_init
}; };
} // namespace python } // namespace python

@ -46,13 +46,13 @@
#include <google/protobuf/pyext/scoped_pyobject_ptr.h> #include <google/protobuf/pyext/scoped_pyobject_ptr.h>
#if PY_MAJOR_VERSION >= 3 #if PY_MAJOR_VERSION >= 3
#define PyInt_FromLong PyLong_FromLong #define PyInt_FromLong PyLong_FromLong
#if PY_VERSION_HEX < 0x03030000 #if PY_VERSION_HEX < 0x03030000
#error "Python 3.0 - 3.2 are not supported." #error "Python 3.0 - 3.2 are not supported."
#else #else
#define PyString_AsString(ob) \ #define PyString_AsString(ob) \
(PyUnicode_Check(ob)? PyUnicode_AsUTF8(ob): PyBytes_AsString(ob)) (PyUnicode_Check(ob) ? PyUnicode_AsUTF8(ob) : PyBytes_AsString(ob))
#endif #endif
#endif #endif
namespace google { namespace google {
@ -61,13 +61,13 @@ namespace python {
namespace repeated_scalar_container { namespace repeated_scalar_container {
static int InternalAssignRepeatedField( static int InternalAssignRepeatedField(RepeatedScalarContainer* self,
RepeatedScalarContainer* self, PyObject* list) { PyObject* list) {
Message* message = self->parent->message; Message* message = self->parent->message;
message->GetReflection()->ClearField(message, self->parent_field_descriptor); message->GetReflection()->ClearField(message, self->parent_field_descriptor);
for (Py_ssize_t i = 0; i < PyList_GET_SIZE(list); ++i) { for (Py_ssize_t i = 0; i < PyList_GET_SIZE(list); ++i) {
PyObject* value = PyList_GET_ITEM(list, i); PyObject* value = PyList_GET_ITEM(list, i);
if (ScopedPyObjectPtr(Append(self, value)) == NULL) { if (ScopedPyObjectPtr(Append(self, value)) == nullptr) {
return -1; return -1;
} }
} }
@ -96,13 +96,12 @@ static int AssignItem(PyObject* pself, Py_ssize_t index, PyObject* arg) {
index = field_size + index; index = field_size + index;
} }
if (index < 0 || index >= field_size) { if (index < 0 || index >= field_size) {
PyErr_Format(PyExc_IndexError, PyErr_Format(PyExc_IndexError, "list assignment index (%d) out of range",
"list assignment index (%d) out of range",
static_cast<int>(index)); static_cast<int>(index));
return -1; return -1;
} }
if (arg == NULL) { if (arg == nullptr) {
ScopedPyObjectPtr py_index(PyLong_FromLong(index)); ScopedPyObjectPtr py_index(PyLong_FromLong(index));
return cmessage::DeleteRepeatedField(self->parent, field_descriptor, return cmessage::DeleteRepeatedField(self->parent, field_descriptor,
py_index.get()); py_index.get());
@ -150,8 +149,8 @@ static int AssignItem(PyObject* pself, Py_ssize_t index, PyObject* arg) {
break; break;
} }
case FieldDescriptor::CPPTYPE_STRING: { case FieldDescriptor::CPPTYPE_STRING: {
if (!CheckAndSetString( if (!CheckAndSetString(arg, message, field_descriptor, reflection, false,
arg, message, field_descriptor, reflection, false, index)) { index)) {
return -1; return -1;
} }
break; break;
@ -165,12 +164,12 @@ static int AssignItem(PyObject* pself, Py_ssize_t index, PyObject* arg) {
const EnumDescriptor* enum_descriptor = field_descriptor->enum_type(); const EnumDescriptor* enum_descriptor = field_descriptor->enum_type();
const EnumValueDescriptor* enum_value = const EnumValueDescriptor* enum_value =
enum_descriptor->FindValueByNumber(value); enum_descriptor->FindValueByNumber(value);
if (enum_value != NULL) { if (enum_value != nullptr) {
reflection->SetRepeatedEnum(message, field_descriptor, index, reflection->SetRepeatedEnum(message, field_descriptor, index,
enum_value); enum_value);
} else { } else {
ScopedPyObjectPtr s(PyObject_Str(arg)); ScopedPyObjectPtr s(PyObject_Str(arg));
if (s != NULL) { if (s != nullptr) {
PyErr_Format(PyExc_ValueError, "Unknown enum value: %s", PyErr_Format(PyExc_ValueError, "Unknown enum value: %s",
PyString_AsString(s.get())); PyString_AsString(s.get()));
} }
@ -180,9 +179,9 @@ static int AssignItem(PyObject* pself, Py_ssize_t index, PyObject* arg) {
break; break;
} }
default: default:
PyErr_Format( PyErr_Format(PyExc_SystemError,
PyExc_SystemError, "Adding value to a field of unknown type %d", "Adding value to a field of unknown type %d",
field_descriptor->cpp_type()); field_descriptor->cpp_type());
return -1; return -1;
} }
return 0; return 0;
@ -201,60 +200,58 @@ static PyObject* Item(PyObject* pself, Py_ssize_t index) {
index = field_size + index; index = field_size + index;
} }
if (index < 0 || index >= field_size) { if (index < 0 || index >= field_size) {
PyErr_Format(PyExc_IndexError, PyErr_Format(PyExc_IndexError, "list index (%zd) out of range", index);
"list index (%zd) out of range", return nullptr;
index);
return NULL;
} }
PyObject* result = NULL; PyObject* result = nullptr;
switch (field_descriptor->cpp_type()) { switch (field_descriptor->cpp_type()) {
case FieldDescriptor::CPPTYPE_INT32: { case FieldDescriptor::CPPTYPE_INT32: {
int32 value = reflection->GetRepeatedInt32( int32 value =
*message, field_descriptor, index); reflection->GetRepeatedInt32(*message, field_descriptor, index);
result = PyInt_FromLong(value); result = PyInt_FromLong(value);
break; break;
} }
case FieldDescriptor::CPPTYPE_INT64: { case FieldDescriptor::CPPTYPE_INT64: {
int64 value = reflection->GetRepeatedInt64( int64 value =
*message, field_descriptor, index); reflection->GetRepeatedInt64(*message, field_descriptor, index);
result = PyLong_FromLongLong(value); result = PyLong_FromLongLong(value);
break; break;
} }
case FieldDescriptor::CPPTYPE_UINT32: { case FieldDescriptor::CPPTYPE_UINT32: {
uint32 value = reflection->GetRepeatedUInt32( uint32 value =
*message, field_descriptor, index); reflection->GetRepeatedUInt32(*message, field_descriptor, index);
result = PyLong_FromLongLong(value); result = PyLong_FromLongLong(value);
break; break;
} }
case FieldDescriptor::CPPTYPE_UINT64: { case FieldDescriptor::CPPTYPE_UINT64: {
uint64 value = reflection->GetRepeatedUInt64( uint64 value =
*message, field_descriptor, index); reflection->GetRepeatedUInt64(*message, field_descriptor, index);
result = PyLong_FromUnsignedLongLong(value); result = PyLong_FromUnsignedLongLong(value);
break; break;
} }
case FieldDescriptor::CPPTYPE_FLOAT: { case FieldDescriptor::CPPTYPE_FLOAT: {
float value = reflection->GetRepeatedFloat( float value =
*message, field_descriptor, index); reflection->GetRepeatedFloat(*message, field_descriptor, index);
result = PyFloat_FromDouble(value); result = PyFloat_FromDouble(value);
break; break;
} }
case FieldDescriptor::CPPTYPE_DOUBLE: { case FieldDescriptor::CPPTYPE_DOUBLE: {
double value = reflection->GetRepeatedDouble( double value =
*message, field_descriptor, index); reflection->GetRepeatedDouble(*message, field_descriptor, index);
result = PyFloat_FromDouble(value); result = PyFloat_FromDouble(value);
break; break;
} }
case FieldDescriptor::CPPTYPE_BOOL: { case FieldDescriptor::CPPTYPE_BOOL: {
bool value = reflection->GetRepeatedBool( bool value =
*message, field_descriptor, index); reflection->GetRepeatedBool(*message, field_descriptor, index);
result = PyBool_FromLong(value ? 1 : 0); result = PyBool_FromLong(value ? 1 : 0);
break; break;
} }
case FieldDescriptor::CPPTYPE_ENUM: { case FieldDescriptor::CPPTYPE_ENUM: {
const EnumValueDescriptor* enum_value = const EnumValueDescriptor* enum_value =
message->GetReflection()->GetRepeatedEnum( message->GetReflection()->GetRepeatedEnum(*message, field_descriptor,
*message, field_descriptor, index); index);
result = PyInt_FromLong(enum_value->number()); result = PyInt_FromLong(enum_value->number());
break; break;
} }
@ -266,10 +263,9 @@ static PyObject* Item(PyObject* pself, Py_ssize_t index) {
break; break;
} }
default: default:
PyErr_Format( PyErr_Format(PyExc_SystemError,
PyExc_SystemError, "Getting value from a repeated field of unknown type %d",
"Getting value from a repeated field of unknown type %d", field_descriptor->cpp_type());
field_descriptor->cpp_type());
} }
return result; return result;
@ -287,23 +283,23 @@ static PyObject* Subscript(PyObject* pself, PyObject* slice) {
from = to = PyInt_AsLong(slice); from = to = PyInt_AsLong(slice);
} else // NOLINT } else // NOLINT
#endif #endif
if (PyLong_Check(slice)) { if (PyLong_Check(slice)) {
from = to = PyLong_AsLong(slice); from = to = PyLong_AsLong(slice);
} else if (PySlice_Check(slice)) { } else if (PySlice_Check(slice)) {
length = Len(pself); length = Len(pself);
#if PY_MAJOR_VERSION >= 3 #if PY_MAJOR_VERSION >= 3
if (PySlice_GetIndicesEx(slice, if (PySlice_GetIndicesEx(slice, length, &from, &to, &step, &slicelength) ==
length, &from, &to, &step, &slicelength) == -1) { -1) {
#else #else
if (PySlice_GetIndicesEx(reinterpret_cast<PySliceObject*>(slice), if (PySlice_GetIndicesEx(reinterpret_cast<PySliceObject*>(slice), length,
length, &from, &to, &step, &slicelength) == -1) { &from, &to, &step, &slicelength) == -1) {
#endif #endif
return NULL; return nullptr;
} }
return_list = true; return_list = true;
} else { } else {
PyErr_SetString(PyExc_TypeError, "list indices must be integers"); PyErr_SetString(PyExc_TypeError, "list indices must be integers");
return NULL; return nullptr;
} }
if (!return_list) { if (!return_list) {
@ -311,8 +307,8 @@ static PyObject* Subscript(PyObject* pself, PyObject* slice) {
} }
PyObject* list = PyList_New(0); PyObject* list = PyList_New(0);
if (list == NULL) { if (list == nullptr) {
return NULL; return nullptr;
} }
if (from <= to) { if (from <= to) {
if (step < 0) { if (step < 0) {
@ -348,73 +344,73 @@ PyObject* Append(RepeatedScalarContainer* self, PyObject* item) {
const Reflection* reflection = message->GetReflection(); const Reflection* reflection = message->GetReflection();
switch (field_descriptor->cpp_type()) { switch (field_descriptor->cpp_type()) {
case FieldDescriptor::CPPTYPE_INT32: { case FieldDescriptor::CPPTYPE_INT32: {
GOOGLE_CHECK_GET_INT32(item, value, NULL); GOOGLE_CHECK_GET_INT32(item, value, nullptr);
reflection->AddInt32(message, field_descriptor, value); reflection->AddInt32(message, field_descriptor, value);
break; break;
} }
case FieldDescriptor::CPPTYPE_INT64: { case FieldDescriptor::CPPTYPE_INT64: {
GOOGLE_CHECK_GET_INT64(item, value, NULL); GOOGLE_CHECK_GET_INT64(item, value, nullptr);
reflection->AddInt64(message, field_descriptor, value); reflection->AddInt64(message, field_descriptor, value);
break; break;
} }
case FieldDescriptor::CPPTYPE_UINT32: { case FieldDescriptor::CPPTYPE_UINT32: {
GOOGLE_CHECK_GET_UINT32(item, value, NULL); GOOGLE_CHECK_GET_UINT32(item, value, nullptr);
reflection->AddUInt32(message, field_descriptor, value); reflection->AddUInt32(message, field_descriptor, value);
break; break;
} }
case FieldDescriptor::CPPTYPE_UINT64: { case FieldDescriptor::CPPTYPE_UINT64: {
GOOGLE_CHECK_GET_UINT64(item, value, NULL); GOOGLE_CHECK_GET_UINT64(item, value, nullptr);
reflection->AddUInt64(message, field_descriptor, value); reflection->AddUInt64(message, field_descriptor, value);
break; break;
} }
case FieldDescriptor::CPPTYPE_FLOAT: { case FieldDescriptor::CPPTYPE_FLOAT: {
GOOGLE_CHECK_GET_FLOAT(item, value, NULL); GOOGLE_CHECK_GET_FLOAT(item, value, nullptr);
reflection->AddFloat(message, field_descriptor, value); reflection->AddFloat(message, field_descriptor, value);
break; break;
} }
case FieldDescriptor::CPPTYPE_DOUBLE: { case FieldDescriptor::CPPTYPE_DOUBLE: {
GOOGLE_CHECK_GET_DOUBLE(item, value, NULL); GOOGLE_CHECK_GET_DOUBLE(item, value, nullptr);
reflection->AddDouble(message, field_descriptor, value); reflection->AddDouble(message, field_descriptor, value);
break; break;
} }
case FieldDescriptor::CPPTYPE_BOOL: { case FieldDescriptor::CPPTYPE_BOOL: {
GOOGLE_CHECK_GET_BOOL(item, value, NULL); GOOGLE_CHECK_GET_BOOL(item, value, nullptr);
reflection->AddBool(message, field_descriptor, value); reflection->AddBool(message, field_descriptor, value);
break; break;
} }
case FieldDescriptor::CPPTYPE_STRING: { case FieldDescriptor::CPPTYPE_STRING: {
if (!CheckAndSetString( if (!CheckAndSetString(item, message, field_descriptor, reflection, true,
item, message, field_descriptor, reflection, true, -1)) { -1)) {
return NULL; return nullptr;
} }
break; break;
} }
case FieldDescriptor::CPPTYPE_ENUM: { case FieldDescriptor::CPPTYPE_ENUM: {
GOOGLE_CHECK_GET_INT32(item, value, NULL); GOOGLE_CHECK_GET_INT32(item, value, nullptr);
if (reflection->SupportsUnknownEnumValues()) { if (reflection->SupportsUnknownEnumValues()) {
reflection->AddEnumValue(message, field_descriptor, value); reflection->AddEnumValue(message, field_descriptor, value);
} else { } else {
const EnumDescriptor* enum_descriptor = field_descriptor->enum_type(); const EnumDescriptor* enum_descriptor = field_descriptor->enum_type();
const EnumValueDescriptor* enum_value = const EnumValueDescriptor* enum_value =
enum_descriptor->FindValueByNumber(value); enum_descriptor->FindValueByNumber(value);
if (enum_value != NULL) { if (enum_value != nullptr) {
reflection->AddEnum(message, field_descriptor, enum_value); reflection->AddEnum(message, field_descriptor, enum_value);
} else { } else {
ScopedPyObjectPtr s(PyObject_Str(item)); ScopedPyObjectPtr s(PyObject_Str(item));
if (s != NULL) { if (s != nullptr) {
PyErr_Format(PyExc_ValueError, "Unknown enum value: %s", PyErr_Format(PyExc_ValueError, "Unknown enum value: %s",
PyString_AsString(s.get())); PyString_AsString(s.get()));
} }
return NULL; return nullptr;
} }
} }
break; break;
} }
default: default:
PyErr_Format( PyErr_Format(PyExc_SystemError,
PyExc_SystemError, "Adding value to a field of unknown type %d", "Adding value to a field of unknown type %d",
field_descriptor->cpp_type()); field_descriptor->cpp_type());
return NULL; return nullptr;
} }
Py_RETURN_NONE; Py_RETURN_NONE;
@ -437,25 +433,24 @@ static int AssSubscript(PyObject* pself, PyObject* slice, PyObject* value) {
cmessage::AssureWritable(self->parent); cmessage::AssureWritable(self->parent);
Message* message = self->parent->message; Message* message = self->parent->message;
const FieldDescriptor* field_descriptor = const FieldDescriptor* field_descriptor = self->parent_field_descriptor;
self->parent_field_descriptor;
#if PY_MAJOR_VERSION < 3 #if PY_MAJOR_VERSION < 3
if (PyInt_Check(slice)) { if (PyInt_Check(slice)) {
from = to = PyInt_AsLong(slice); from = to = PyInt_AsLong(slice);
} else // NOLINT } else // NOLINT
#endif #endif
if (PyLong_Check(slice)) { if (PyLong_Check(slice)) {
from = to = PyLong_AsLong(slice); from = to = PyLong_AsLong(slice);
} else if (PySlice_Check(slice)) { } else if (PySlice_Check(slice)) {
const Reflection* reflection = message->GetReflection(); const Reflection* reflection = message->GetReflection();
length = reflection->FieldSize(*message, field_descriptor); length = reflection->FieldSize(*message, field_descriptor);
#if PY_MAJOR_VERSION >= 3 #if PY_MAJOR_VERSION >= 3
if (PySlice_GetIndicesEx(slice, if (PySlice_GetIndicesEx(slice, length, &from, &to, &step, &slicelength) ==
length, &from, &to, &step, &slicelength) == -1) { -1) {
#else #else
if (PySlice_GetIndicesEx(reinterpret_cast<PySliceObject*>(slice), if (PySlice_GetIndicesEx(reinterpret_cast<PySliceObject*>(slice), length,
length, &from, &to, &step, &slicelength) == -1) { &from, &to, &step, &slicelength) == -1) {
#endif #endif
return -1; return -1;
} }
@ -465,7 +460,7 @@ static int AssSubscript(PyObject* pself, PyObject* slice, PyObject* value) {
return -1; return -1;
} }
if (value == NULL) { if (value == nullptr) {
return cmessage::DeleteRepeatedField(self->parent, field_descriptor, slice); return cmessage::DeleteRepeatedField(self->parent, field_descriptor, slice);
} }
@ -473,12 +468,12 @@ static int AssSubscript(PyObject* pself, PyObject* slice, PyObject* value) {
return AssignItem(pself, from, value); return AssignItem(pself, from, value);
} }
ScopedPyObjectPtr full_slice(PySlice_New(NULL, NULL, NULL)); ScopedPyObjectPtr full_slice(PySlice_New(nullptr, nullptr, nullptr));
if (full_slice == NULL) { if (full_slice == nullptr) {
return -1; return -1;
} }
ScopedPyObjectPtr new_list(Subscript(pself, full_slice.get())); ScopedPyObjectPtr new_list(Subscript(pself, full_slice.get()));
if (new_list == NULL) { if (new_list == nullptr) {
return -1; return -1;
} }
if (PySequence_SetSlice(new_list.get(), from, to, value) < 0) { if (PySequence_SetSlice(new_list.get(), from, to, value) < 0) {
@ -495,23 +490,23 @@ PyObject* Extend(RepeatedScalarContainer* self, PyObject* value) {
if (value == Py_None) { if (value == Py_None) {
Py_RETURN_NONE; Py_RETURN_NONE;
} }
if ((Py_TYPE(value)->tp_as_sequence == NULL) && PyObject_Not(value)) { if ((Py_TYPE(value)->tp_as_sequence == nullptr) && PyObject_Not(value)) {
Py_RETURN_NONE; Py_RETURN_NONE;
} }
ScopedPyObjectPtr iter(PyObject_GetIter(value)); ScopedPyObjectPtr iter(PyObject_GetIter(value));
if (iter == NULL) { if (iter == nullptr) {
PyErr_SetString(PyExc_TypeError, "Value must be iterable"); PyErr_SetString(PyExc_TypeError, "Value must be iterable");
return NULL; return nullptr;
} }
ScopedPyObjectPtr next; ScopedPyObjectPtr next;
while ((next.reset(PyIter_Next(iter.get()))) != NULL) { while ((next.reset(PyIter_Next(iter.get()))) != nullptr) {
if (ScopedPyObjectPtr(Append(self, next.get())) == NULL) { if (ScopedPyObjectPtr(Append(self, next.get())) == nullptr) {
return NULL; return nullptr;
} }
} }
if (PyErr_Occurred()) { if (PyErr_Occurred()) {
return NULL; return nullptr;
} }
Py_RETURN_NONE; Py_RETURN_NONE;
} }
@ -523,16 +518,16 @@ static PyObject* Insert(PyObject* pself, PyObject* args) {
Py_ssize_t index; Py_ssize_t index;
PyObject* value; PyObject* value;
if (!PyArg_ParseTuple(args, "lO", &index, &value)) { if (!PyArg_ParseTuple(args, "lO", &index, &value)) {
return NULL; return nullptr;
} }
ScopedPyObjectPtr full_slice(PySlice_New(NULL, NULL, NULL)); ScopedPyObjectPtr full_slice(PySlice_New(nullptr, nullptr, nullptr));
ScopedPyObjectPtr new_list(Subscript(pself, full_slice.get())); ScopedPyObjectPtr new_list(Subscript(pself, full_slice.get()));
if (PyList_Insert(new_list.get(), index, value) < 0) { if (PyList_Insert(new_list.get(), index, value) < 0) {
return NULL; return nullptr;
} }
int ret = InternalAssignRepeatedField(self, new_list.get()); int ret = InternalAssignRepeatedField(self, new_list.get());
if (ret < 0) { if (ret < 0) {
return NULL; return nullptr;
} }
Py_RETURN_NONE; Py_RETURN_NONE;
} }
@ -548,10 +543,10 @@ static PyObject* Remove(PyObject* pself, PyObject* value) {
} }
if (match_index == -1) { if (match_index == -1) {
PyErr_SetString(PyExc_ValueError, "remove(x): x not in container"); PyErr_SetString(PyExc_ValueError, "remove(x): x not in container");
return NULL; return nullptr;
} }
if (AssignItem(pself, match_index, NULL) < 0) { if (AssignItem(pself, match_index, nullptr) < 0) {
return NULL; return nullptr;
} }
Py_RETURN_NONE; Py_RETURN_NONE;
} }
@ -570,9 +565,9 @@ static PyObject* RichCompare(PyObject* pself, PyObject* other, int opid) {
// also a repeated scalar container, into Python lists so we can delegate // also a repeated scalar container, into Python lists so we can delegate
// to the list's compare method. // to the list's compare method.
ScopedPyObjectPtr full_slice(PySlice_New(NULL, NULL, NULL)); ScopedPyObjectPtr full_slice(PySlice_New(nullptr, nullptr, nullptr));
if (full_slice == NULL) { if (full_slice == nullptr) {
return NULL; return nullptr;
} }
ScopedPyObjectPtr other_list_deleter; ScopedPyObjectPtr other_list_deleter;
@ -582,54 +577,72 @@ static PyObject* RichCompare(PyObject* pself, PyObject* other, int opid) {
} }
ScopedPyObjectPtr list(Subscript(pself, full_slice.get())); ScopedPyObjectPtr list(Subscript(pself, full_slice.get()));
if (list == NULL) { if (list == nullptr) {
return NULL; return nullptr;
} }
return PyObject_RichCompare(list.get(), other, opid); return PyObject_RichCompare(list.get(), other, opid);
} }
PyObject* Reduce(PyObject* unused_self, PyObject* unused_other) { PyObject* Reduce(PyObject* unused_self, PyObject* unused_other) {
PyErr_Format( PyErr_Format(PickleError_class,
PickleError_class, "can't pickle repeated message fields, convert to list first");
"can't pickle repeated message fields, convert to list first"); return nullptr;
return NULL;
} }
static PyObject* Sort(PyObject* pself, PyObject* args, PyObject* kwds) { static PyObject* Sort(PyObject* pself, PyObject* args, PyObject* kwds) {
// Support the old sort_function argument for backwards // Support the old sort_function argument for backwards
// compatibility. // compatibility.
if (kwds != NULL) { if (kwds != nullptr) {
PyObject* sort_func = PyDict_GetItemString(kwds, "sort_function"); PyObject* sort_func = PyDict_GetItemString(kwds, "sort_function");
if (sort_func != NULL) { if (sort_func != nullptr) {
// Must set before deleting as sort_func is a borrowed reference // Must set before deleting as sort_func is a borrowed reference
// and kwds might be the only thing keeping it alive. // and kwds might be the only thing keeping it alive.
if (PyDict_SetItemString(kwds, "cmp", sort_func) == -1) if (PyDict_SetItemString(kwds, "cmp", sort_func) == -1) return nullptr;
return NULL; if (PyDict_DelItemString(kwds, "sort_function") == -1) return nullptr;
if (PyDict_DelItemString(kwds, "sort_function") == -1)
return NULL;
} }
} }
ScopedPyObjectPtr full_slice(PySlice_New(NULL, NULL, NULL)); ScopedPyObjectPtr full_slice(PySlice_New(nullptr, nullptr, nullptr));
if (full_slice == NULL) { if (full_slice == nullptr) {
return NULL; return nullptr;
} }
ScopedPyObjectPtr list(Subscript(pself, full_slice.get())); ScopedPyObjectPtr list(Subscript(pself, full_slice.get()));
if (list == NULL) { if (list == nullptr) {
return NULL; return nullptr;
} }
ScopedPyObjectPtr m(PyObject_GetAttrString(list.get(), "sort")); ScopedPyObjectPtr m(PyObject_GetAttrString(list.get(), "sort"));
if (m == NULL) { if (m == nullptr) {
return NULL; return nullptr;
} }
ScopedPyObjectPtr res(PyObject_Call(m.get(), args, kwds)); ScopedPyObjectPtr res(PyObject_Call(m.get(), args, kwds));
if (res == NULL) { if (res == nullptr) {
return NULL; return nullptr;
}
int ret = InternalAssignRepeatedField(
reinterpret_cast<RepeatedScalarContainer*>(pself), list.get());
if (ret < 0) {
return nullptr;
}
Py_RETURN_NONE;
}
static PyObject* Reverse(PyObject* pself) {
ScopedPyObjectPtr full_slice(PySlice_New(nullptr, nullptr, nullptr));
if (full_slice == nullptr) {
return nullptr;
}
ScopedPyObjectPtr list(Subscript(pself, full_slice.get()));
if (list == nullptr) {
return nullptr;
}
ScopedPyObjectPtr res(PyObject_CallMethod(list.get(), "reverse", nullptr));
if (res == nullptr) {
return nullptr;
} }
int ret = InternalAssignRepeatedField( int ret = InternalAssignRepeatedField(
reinterpret_cast<RepeatedScalarContainer*>(pself), list.get()); reinterpret_cast<RepeatedScalarContainer*>(pself), list.get());
if (ret < 0) { if (ret < 0) {
return NULL; return nullptr;
} }
Py_RETURN_NONE; Py_RETURN_NONE;
} }
@ -637,27 +650,27 @@ static PyObject* Sort(PyObject* pself, PyObject* args, PyObject* kwds) {
static PyObject* Pop(PyObject* pself, PyObject* args) { static PyObject* Pop(PyObject* pself, PyObject* args) {
Py_ssize_t index = -1; Py_ssize_t index = -1;
if (!PyArg_ParseTuple(args, "|n", &index)) { if (!PyArg_ParseTuple(args, "|n", &index)) {
return NULL; return nullptr;
} }
PyObject* item = Item(pself, index); PyObject* item = Item(pself, index);
if (item == NULL) { if (item == nullptr) {
PyErr_Format(PyExc_IndexError, "list index (%zd) out of range", index); PyErr_Format(PyExc_IndexError, "list index (%zd) out of range", index);
return NULL; return nullptr;
} }
if (AssignItem(pself, index, NULL) < 0) { if (AssignItem(pself, index, nullptr) < 0) {
return NULL; return nullptr;
} }
return item; return item;
} }
static PyObject* ToStr(PyObject* pself) { static PyObject* ToStr(PyObject* pself) {
ScopedPyObjectPtr full_slice(PySlice_New(NULL, NULL, NULL)); ScopedPyObjectPtr full_slice(PySlice_New(nullptr, nullptr, nullptr));
if (full_slice == NULL) { if (full_slice == nullptr) {
return NULL; return nullptr;
} }
ScopedPyObjectPtr list(Subscript(pself, full_slice.get())); ScopedPyObjectPtr list(Subscript(pself, full_slice.get()));
if (list == NULL) { if (list == nullptr) {
return NULL; return nullptr;
} }
return PyObject_Repr(list.get()); return PyObject_Repr(list.get());
} }
@ -670,13 +683,13 @@ static PyObject* MergeFrom(PyObject* pself, PyObject* arg) {
RepeatedScalarContainer* NewContainer( RepeatedScalarContainer* NewContainer(
CMessage* parent, const FieldDescriptor* parent_field_descriptor) { CMessage* parent, const FieldDescriptor* parent_field_descriptor) {
if (!CheckFieldBelongsToMessage(parent_field_descriptor, parent->message)) { if (!CheckFieldBelongsToMessage(parent_field_descriptor, parent->message)) {
return NULL; return nullptr;
} }
RepeatedScalarContainer* self = reinterpret_cast<RepeatedScalarContainer*>( RepeatedScalarContainer* self = reinterpret_cast<RepeatedScalarContainer*>(
PyType_GenericAlloc(&RepeatedScalarContainer_Type, 0)); PyType_GenericAlloc(&RepeatedScalarContainer_Type, 0));
if (self == NULL) { if (self == nullptr) {
return NULL; return nullptr;
} }
Py_INCREF(parent); Py_INCREF(parent);
@ -696,81 +709,81 @@ static void Dealloc(PyObject* pself) {
} }
static PySequenceMethods SqMethods = { static PySequenceMethods SqMethods = {
Len, /* sq_length */ Len, /* sq_length */
0, /* sq_concat */ nullptr, /* sq_concat */
0, /* sq_repeat */ nullptr, /* sq_repeat */
Item, /* sq_item */ Item, /* sq_item */
0, /* sq_slice */ nullptr, /* sq_slice */
AssignItem /* sq_ass_item */ AssignItem /* sq_ass_item */
}; };
static PyMappingMethods MpMethods = { static PyMappingMethods MpMethods = {
Len, /* mp_length */ Len, /* mp_length */
Subscript, /* mp_subscript */ Subscript, /* mp_subscript */
AssSubscript, /* mp_ass_subscript */ AssSubscript, /* mp_ass_subscript */
}; };
static PyMethodDef Methods[] = { static PyMethodDef Methods[] = {
{ "__deepcopy__", DeepCopy, METH_VARARGS, {"__deepcopy__", DeepCopy, METH_VARARGS, "Makes a deep copy of the class."},
"Makes a deep copy of the class." }, {"__reduce__", Reduce, METH_NOARGS,
{ "__reduce__", Reduce, METH_NOARGS, "Outputs picklable representation of the repeated field."},
"Outputs picklable representation of the repeated field." }, {"append", AppendMethod, METH_O,
{ "append", AppendMethod, METH_O, "Appends an object to the repeated container."},
"Appends an object to the repeated container." }, {"extend", ExtendMethod, METH_O,
{ "extend", ExtendMethod, METH_O, "Appends objects to the repeated container."},
"Appends objects to the repeated container." }, {"insert", Insert, METH_VARARGS,
{ "insert", Insert, METH_VARARGS, "Inserts an object at the specified position in the container."},
"Inserts an object at the specified position in the container." }, {"pop", Pop, METH_VARARGS,
{ "pop", Pop, METH_VARARGS, "Removes an object from the repeated container and returns it."},
"Removes an object from the repeated container and returns it." }, {"remove", Remove, METH_O,
{ "remove", Remove, METH_O, "Removes an object from the repeated container."},
"Removes an object from the repeated container." }, {"sort", reinterpret_cast<PyCFunction>(Sort), METH_VARARGS | METH_KEYWORDS,
{ "sort", (PyCFunction)Sort, METH_VARARGS | METH_KEYWORDS, "Sorts the repeated container."},
"Sorts the repeated container."}, {"reverse", reinterpret_cast<PyCFunction>(Reverse), METH_NOARGS,
{ "MergeFrom", (PyCFunction)MergeFrom, METH_O, "Reverses elements order of the repeated container."},
"Merges a repeated container into the current container." }, {"MergeFrom", static_cast<PyCFunction>(MergeFrom), METH_O,
{ NULL, NULL } "Merges a repeated container into the current container."},
}; {nullptr, nullptr}};
} // namespace repeated_scalar_container } // namespace repeated_scalar_container
PyTypeObject RepeatedScalarContainer_Type = { PyTypeObject RepeatedScalarContainer_Type = {
PyVarObject_HEAD_INIT(&PyType_Type, 0) PyVarObject_HEAD_INIT(&PyType_Type, 0) FULL_MODULE_NAME
FULL_MODULE_NAME ".RepeatedScalarContainer", // tp_name ".RepeatedScalarContainer", // tp_name
sizeof(RepeatedScalarContainer), // tp_basicsize sizeof(RepeatedScalarContainer), // tp_basicsize
0, // tp_itemsize 0, // tp_itemsize
repeated_scalar_container::Dealloc, // tp_dealloc repeated_scalar_container::Dealloc, // tp_dealloc
0, // tp_print 0, // tp_print, in Python >=3.8: Py_ssize_t tp_vectorcall_offset
0, // tp_getattr nullptr, // tp_getattr
0, // tp_setattr nullptr, // tp_setattr
0, // tp_compare nullptr, // tp_compare
repeated_scalar_container::ToStr, // tp_repr repeated_scalar_container::ToStr, // tp_repr
0, // tp_as_number nullptr, // tp_as_number
&repeated_scalar_container::SqMethods, // tp_as_sequence &repeated_scalar_container::SqMethods, // tp_as_sequence
&repeated_scalar_container::MpMethods, // tp_as_mapping &repeated_scalar_container::MpMethods, // tp_as_mapping
PyObject_HashNotImplemented, // tp_hash PyObject_HashNotImplemented, // tp_hash
0, // tp_call nullptr, // tp_call
0, // tp_str nullptr, // tp_str
0, // tp_getattro nullptr, // tp_getattro
0, // tp_setattro nullptr, // tp_setattro
0, // tp_as_buffer nullptr, // tp_as_buffer
Py_TPFLAGS_DEFAULT, // tp_flags Py_TPFLAGS_DEFAULT, // tp_flags
"A Repeated scalar container", // tp_doc "A Repeated scalar container", // tp_doc
0, // tp_traverse nullptr, // tp_traverse
0, // tp_clear nullptr, // tp_clear
repeated_scalar_container::RichCompare, // tp_richcompare repeated_scalar_container::RichCompare, // tp_richcompare
0, // tp_weaklistoffset 0, // tp_weaklistoffset
0, // tp_iter nullptr, // tp_iter
0, // tp_iternext nullptr, // tp_iternext
repeated_scalar_container::Methods, // tp_methods repeated_scalar_container::Methods, // tp_methods
0, // tp_members nullptr, // tp_members
0, // tp_getset nullptr, // tp_getset
0, // tp_base nullptr, // tp_base
0, // tp_dict nullptr, // tp_dict
0, // tp_descr_get nullptr, // tp_descr_get
0, // tp_descr_set nullptr, // tp_descr_set
0, // tp_dictoffset 0, // tp_dictoffset
0, // tp_init nullptr, // tp_init
}; };
} // namespace python } // namespace python

@ -41,17 +41,17 @@ namespace google {
namespace protobuf { namespace protobuf {
namespace internal { namespace internal {
void AnyMetadata::PackFrom(const Message& message) { bool AnyMetadata::PackFrom(const Message& message) {
PackFrom(message, kTypeGoogleApisComPrefix); return PackFrom(message, kTypeGoogleApisComPrefix);
} }
void AnyMetadata::PackFrom(const Message& message, bool AnyMetadata::PackFrom(const Message& message,
StringPiece type_url_prefix) { StringPiece type_url_prefix) {
type_url_->Set( type_url_->Set(
&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString(), &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString(),
GetTypeUrl(message.GetDescriptor()->full_name(), type_url_prefix), GetTypeUrl(message.GetDescriptor()->full_name(), type_url_prefix),
nullptr); nullptr);
message.SerializeToString( return message.SerializeToString(
value_->Mutable(ArenaStringPtr::EmptyDefault{}, nullptr)); value_->Mutable(ArenaStringPtr::EmptyDefault{}, nullptr));
} }
@ -80,3 +80,5 @@ bool GetAnyFieldDescriptors(const Message& message,
} // namespace internal } // namespace internal
} // namespace protobuf } // namespace protobuf
} // namespace google } // namespace google
#include <google/protobuf/port_undef.inc>

@ -65,12 +65,14 @@ class PROTOBUF_EXPORT AnyMetadata {
// Packs a message using the default type URL prefix: "type.googleapis.com". // Packs a message using the default type URL prefix: "type.googleapis.com".
// The resulted type URL will be "type.googleapis.com/<message_full_name>". // The resulted type URL will be "type.googleapis.com/<message_full_name>".
// Returns false if serializing the message failed.
template <typename T> template <typename T>
void PackFrom(const T& message) { bool PackFrom(const T& message) {
InternalPackFrom(message, kTypeGoogleApisComPrefix, T::FullMessageName()); return InternalPackFrom(message, kTypeGoogleApisComPrefix,
T::FullMessageName());
} }
void PackFrom(const Message& message); bool PackFrom(const Message& message);
// Packs a message using the given type URL prefix. The type URL will be // Packs a message using the given type URL prefix. The type URL will be
// constructed by concatenating the message type's full name to the prefix // constructed by concatenating the message type's full name to the prefix
@ -78,12 +80,13 @@ class PROTOBUF_EXPORT AnyMetadata {
// For example, both PackFrom(message, "type.googleapis.com") and // For example, both PackFrom(message, "type.googleapis.com") and
// PackFrom(message, "type.googleapis.com/") yield the same result type // PackFrom(message, "type.googleapis.com/") yield the same result type
// URL: "type.googleapis.com/<message_full_name>". // URL: "type.googleapis.com/<message_full_name>".
// Returns false if serializing the message failed.
template <typename T> template <typename T>
void PackFrom(const T& message, StringPiece type_url_prefix) { bool PackFrom(const T& message, StringPiece type_url_prefix) {
InternalPackFrom(message, type_url_prefix, T::FullMessageName()); return InternalPackFrom(message, type_url_prefix, T::FullMessageName());
} }
void PackFrom(const Message& message, StringPiece type_url_prefix); bool PackFrom(const Message& message, StringPiece type_url_prefix);
// Unpacks the payload into the given message. Returns false if the message's // Unpacks the payload into the given message. Returns false if the message's
// type doesn't match the type specified in the type URL (i.e., the full // type doesn't match the type specified in the type URL (i.e., the full
@ -105,7 +108,7 @@ class PROTOBUF_EXPORT AnyMetadata {
} }
private: private:
void InternalPackFrom(const MessageLite& message, bool InternalPackFrom(const MessageLite& message,
StringPiece type_url_prefix, StringPiece type_url_prefix,
StringPiece type_name); StringPiece type_name);
bool InternalUnpackTo(StringPiece type_name, bool InternalUnpackTo(StringPiece type_name,

@ -14,6 +14,8 @@
#include <google/protobuf/wire_format.h> #include <google/protobuf/wire_format.h>
// @@protoc_insertion_point(includes) // @@protoc_insertion_point(includes)
#include <google/protobuf/port_def.inc> #include <google/protobuf/port_def.inc>
PROTOBUF_PRAGMA_INIT_SEG
PROTOBUF_NAMESPACE_OPEN PROTOBUF_NAMESPACE_OPEN
class AnyDefaultTypeInternal { class AnyDefaultTypeInternal {
public: public:
@ -76,7 +78,7 @@ const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_google
}; };
// Force running AddDescriptors() at dynamic initialization time. // Force running AddDescriptors() at dynamic initialization time.
static bool dynamic_init_dummy_google_2fprotobuf_2fany_2eproto = (static_cast<void>(::PROTOBUF_NAMESPACE_ID::internal::AddDescriptors(&descriptor_table_google_2fprotobuf_2fany_2eproto)), true); PROTOBUF_ATTRIBUTE_INIT_PRIORITY static ::PROTOBUF_NAMESPACE_ID::internal::AddDescriptorsRunner dynamic_init_dummy_google_2fprotobuf_2fany_2eproto(&descriptor_table_google_2fprotobuf_2fany_2eproto);
PROTOBUF_NAMESPACE_OPEN PROTOBUF_NAMESPACE_OPEN
// =================================================================== // ===================================================================

@ -110,12 +110,12 @@ class PROTOBUF_EXPORT Any PROTOBUF_FINAL :
// implements Any ----------------------------------------------- // implements Any -----------------------------------------------
void PackFrom(const ::PROTOBUF_NAMESPACE_ID::Message& message) { bool PackFrom(const ::PROTOBUF_NAMESPACE_ID::Message& message) {
_any_metadata_.PackFrom(message); return _any_metadata_.PackFrom(message);
} }
void PackFrom(const ::PROTOBUF_NAMESPACE_ID::Message& message, bool PackFrom(const ::PROTOBUF_NAMESPACE_ID::Message& message,
::PROTOBUF_NAMESPACE_ID::ConstStringParam type_url_prefix) { ::PROTOBUF_NAMESPACE_ID::ConstStringParam type_url_prefix) {
_any_metadata_.PackFrom(message, type_url_prefix); return _any_metadata_.PackFrom(message, type_url_prefix);
} }
bool UnpackTo(::PROTOBUF_NAMESPACE_ID::Message* message) const { bool UnpackTo(::PROTOBUF_NAMESPACE_ID::Message* message) const {
return _any_metadata_.UnpackTo(message); return _any_metadata_.UnpackTo(message);
@ -125,13 +125,13 @@ class PROTOBUF_EXPORT Any PROTOBUF_FINAL :
const ::PROTOBUF_NAMESPACE_ID::FieldDescriptor** type_url_field, const ::PROTOBUF_NAMESPACE_ID::FieldDescriptor** type_url_field,
const ::PROTOBUF_NAMESPACE_ID::FieldDescriptor** value_field); const ::PROTOBUF_NAMESPACE_ID::FieldDescriptor** value_field);
template <typename T, class = typename std::enable_if<!std::is_convertible<T, const ::PROTOBUF_NAMESPACE_ID::Message&>::value>::type> template <typename T, class = typename std::enable_if<!std::is_convertible<T, const ::PROTOBUF_NAMESPACE_ID::Message&>::value>::type>
void PackFrom(const T& message) { bool PackFrom(const T& message) {
_any_metadata_.PackFrom<T>(message); return _any_metadata_.PackFrom<T>(message);
} }
template <typename T, class = typename std::enable_if<!std::is_convertible<T, const ::PROTOBUF_NAMESPACE_ID::Message&>::value>::type> template <typename T, class = typename std::enable_if<!std::is_convertible<T, const ::PROTOBUF_NAMESPACE_ID::Message&>::value>::type>
void PackFrom(const T& message, bool PackFrom(const T& message,
::PROTOBUF_NAMESPACE_ID::ConstStringParam type_url_prefix) { ::PROTOBUF_NAMESPACE_ID::ConstStringParam type_url_prefix) {
_any_metadata_.PackFrom<T>(message, type_url_prefix);} return _any_metadata_.PackFrom<T>(message, type_url_prefix);}
template <typename T, class = typename std::enable_if<!std::is_convertible<T, const ::PROTOBUF_NAMESPACE_ID::Message&>::value>::type> template <typename T, class = typename std::enable_if<!std::is_convertible<T, const ::PROTOBUF_NAMESPACE_ID::Message&>::value>::type>
bool UnpackTo(T* message) const { bool UnpackTo(T* message) const {
return _any_metadata_.UnpackTo<T>(message); return _any_metadata_.UnpackTo<T>(message);

@ -53,12 +53,12 @@ const char kAnyFullTypeName[] = "google.protobuf.Any";
const char kTypeGoogleApisComPrefix[] = "type.googleapis.com/"; const char kTypeGoogleApisComPrefix[] = "type.googleapis.com/";
const char kTypeGoogleProdComPrefix[] = "type.googleprod.com/"; const char kTypeGoogleProdComPrefix[] = "type.googleprod.com/";
void AnyMetadata::InternalPackFrom(const MessageLite& message, bool AnyMetadata::InternalPackFrom(const MessageLite& message,
StringPiece type_url_prefix, StringPiece type_url_prefix,
StringPiece type_name) { StringPiece type_name) {
type_url_->Set(&::google::protobuf::internal::GetEmptyString(), type_url_->Set(&::google::protobuf::internal::GetEmptyString(),
GetTypeUrl(type_name, type_url_prefix), nullptr); GetTypeUrl(type_name, type_url_prefix), nullptr);
message.SerializeToString( return message.SerializeToString(
value_->Mutable(ArenaStringPtr::EmptyDefault{}, nullptr)); value_->Mutable(ArenaStringPtr::EmptyDefault{}, nullptr));
} }

@ -49,7 +49,7 @@ TEST(AnyTest, TestPackAndUnpack) {
protobuf_unittest::TestAny submessage; protobuf_unittest::TestAny submessage;
submessage.set_int32_value(12345); submessage.set_int32_value(12345);
protobuf_unittest::TestAny message; protobuf_unittest::TestAny message;
message.mutable_any_value()->PackFrom(submessage); ASSERT_TRUE(message.mutable_any_value()->PackFrom(submessage));
std::string data = message.SerializeAsString(); std::string data = message.SerializeAsString();
@ -60,6 +60,13 @@ TEST(AnyTest, TestPackAndUnpack) {
EXPECT_EQ(12345, submessage.int32_value()); EXPECT_EQ(12345, submessage.int32_value());
} }
TEST(AnyTest, TestPackFromSerializationExceedsSizeLimit) {
protobuf_unittest::TestAny submessage;
submessage.mutable_text()->resize(INT_MAX, 'a');
protobuf_unittest::TestAny message;
EXPECT_FALSE(message.mutable_any_value()->PackFrom(submessage));
}
TEST(AnyTest, TestUnpackWithTypeMismatch) { TEST(AnyTest, TestUnpackWithTypeMismatch) {
protobuf_unittest::TestAny payload; protobuf_unittest::TestAny payload;
payload.set_int32_value(13); payload.set_int32_value(13);
@ -173,3 +180,5 @@ TEST(AnyTest, MoveAssignment) {
} // namespace } // namespace
} // namespace protobuf } // namespace protobuf
} // namespace google } // namespace google
#include <google/protobuf/port_undef.inc>

@ -34,8 +34,11 @@ package protobuf_unittest;
import "google/protobuf/any.proto"; import "google/protobuf/any.proto";
option java_outer_classname = "TestAnyProto";
message TestAny { message TestAny {
int32 int32_value = 1; int32 int32_value = 1;
google.protobuf.Any any_value = 2; google.protobuf.Any any_value = 2;
repeated google.protobuf.Any repeated_any_value = 3; repeated google.protobuf.Any repeated_any_value = 3;
string text = 4;
} }

@ -14,6 +14,8 @@
#include <google/protobuf/wire_format.h> #include <google/protobuf/wire_format.h>
// @@protoc_insertion_point(includes) // @@protoc_insertion_point(includes)
#include <google/protobuf/port_def.inc> #include <google/protobuf/port_def.inc>
PROTOBUF_PRAGMA_INIT_SEG
extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2fapi_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_Method_google_2fprotobuf_2fapi_2eproto; extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2fapi_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_Method_google_2fprotobuf_2fapi_2eproto;
extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2fapi_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_Mixin_google_2fprotobuf_2fapi_2eproto; extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2fapi_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_Mixin_google_2fprotobuf_2fapi_2eproto;
extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2ftype_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_Option_google_2fprotobuf_2ftype_2eproto; extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2ftype_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_Option_google_2fprotobuf_2ftype_2eproto;
@ -164,7 +166,7 @@ const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_google
}; };
// Force running AddDescriptors() at dynamic initialization time. // Force running AddDescriptors() at dynamic initialization time.
static bool dynamic_init_dummy_google_2fprotobuf_2fapi_2eproto = (static_cast<void>(::PROTOBUF_NAMESPACE_ID::internal::AddDescriptors(&descriptor_table_google_2fprotobuf_2fapi_2eproto)), true); PROTOBUF_ATTRIBUTE_INIT_PRIORITY static ::PROTOBUF_NAMESPACE_ID::internal::AddDescriptorsRunner dynamic_init_dummy_google_2fprotobuf_2fapi_2eproto(&descriptor_table_google_2fprotobuf_2fapi_2eproto);
PROTOBUF_NAMESPACE_OPEN PROTOBUF_NAMESPACE_OPEN
// =================================================================== // ===================================================================

@ -32,10 +32,14 @@
#include <algorithm> #include <algorithm>
#include <atomic> #include <atomic>
#include <cstddef>
#include <cstdint>
#include <limits> #include <limits>
#include <typeinfo>
#include <google/protobuf/stubs/mutex.h> #include <google/protobuf/arena_impl.h>
#include <google/protobuf/stubs/mutex.h>
#ifdef ADDRESS_SANITIZER #ifdef ADDRESS_SANITIZER
#include <sanitizer/asan_interface.h> #include <sanitizer/asan_interface.h>
#endif // ADDRESS_SANITIZER #endif // ADDRESS_SANITIZER
@ -47,370 +51,388 @@ static const size_t kMaxCleanupListElements = 64; // 1kB on 64-bit.
namespace google { namespace google {
namespace protobuf { namespace protobuf {
namespace internal {
PROTOBUF_EXPORT /*static*/ void* (*const ArenaOptions::kDefaultBlockAlloc)( static SerialArena::Memory AllocateMemory(const AllocationPolicy* policy_ptr,
size_t) = &::operator new; size_t last_size, size_t min_bytes) {
AllocationPolicy policy; // default policy
if (policy_ptr) policy = *policy_ptr;
size_t size;
if (last_size != 0) {
// Double the current block size, up to a limit.
auto max_size = policy.max_block_size;
size = std::min(2 * last_size, max_size);
} else {
size = policy.start_block_size;
}
// Verify that min_bytes + kBlockHeaderSize won't overflow.
GOOGLE_CHECK_LE(min_bytes,
std::numeric_limits<size_t>::max() - SerialArena::kBlockHeaderSize);
size = std::max(size, SerialArena::kBlockHeaderSize + min_bytes);
namespace internal { void* mem;
if (policy.block_alloc == nullptr) {
mem = ::operator new(size);
} else {
mem = policy.block_alloc(size);
}
return {mem, size};
}
class GetDeallocator {
public:
GetDeallocator(const AllocationPolicy* policy, size_t* space_allocated)
: dealloc_(policy ? policy->block_dealloc : nullptr),
space_allocated_(space_allocated) {}
void operator()(SerialArena::Memory mem) const {
#ifdef ADDRESS_SANITIZER
// This memory was provided by the underlying allocator as unpoisoned,
// so return it in an unpoisoned state.
ASAN_UNPOISON_MEMORY_REGION(mem.ptr, mem.size);
#endif // ADDRESS_SANITIZER
if (dealloc_) {
dealloc_(mem.ptr, mem.size);
} else {
#if defined(__GXX_DELETE_WITH_SIZE__) || defined(__cpp_sized_deallocation)
::operator delete(mem.ptr, mem.size);
#else
::operator delete(mem.ptr);
#endif
}
*space_allocated_ += mem.size;
}
private:
void (*dealloc_)(void*, size_t);
size_t* space_allocated_;
};
SerialArena::SerialArena(Block* b, void* owner) : space_allocated_(b->size) {
owner_ = owner;
head_ = b;
ptr_ = b->Pointer(kBlockHeaderSize + ThreadSafeArena::kSerialArenaSize);
limit_ = b->Pointer(b->size & static_cast<size_t>(-8));
}
SerialArena* SerialArena::New(Memory mem, void* owner) {
GOOGLE_DCHECK_LE(kBlockHeaderSize + ThreadSafeArena::kSerialArenaSize, mem.size);
auto b = new (mem.ptr) Block{nullptr, mem.size};
return new (b->Pointer(kBlockHeaderSize)) SerialArena(b, owner);
}
template <typename Deallocator>
SerialArena::Memory SerialArena::Free(Deallocator deallocator) {
Block* b = head_;
Memory mem = {b, b->size};
while (b->next) {
b = b->next; // We must first advance before deleting this block
deallocator(mem);
mem = {b, b->size};
}
return mem;
}
PROTOBUF_NOINLINE
std::pair<void*, SerialArena::CleanupNode*>
SerialArena::AllocateAlignedWithCleanupFallback(
size_t n, const AllocationPolicy* policy) {
AllocateNewBlock(n + kCleanupSize, policy);
return AllocateAlignedWithCleanup(n, policy);
}
PROTOBUF_NOINLINE
void* SerialArena::AllocateAlignedFallback(size_t n,
const AllocationPolicy* policy) {
AllocateNewBlock(n, policy);
return AllocateAligned(n, policy);
}
void SerialArena::AllocateNewBlock(size_t n, const AllocationPolicy* policy) {
// Sync limit to block
head_->start = reinterpret_cast<CleanupNode*>(limit_);
// Record how much used in this block.
space_used_ += ptr_ - head_->Pointer(kBlockHeaderSize);
auto mem = AllocateMemory(policy, head_->size, n);
// We don't want to emit an expensive RMW instruction that requires
// exclusive access to a cacheline. Hence we write it in terms of a
// regular add.
auto relaxed = std::memory_order_relaxed;
space_allocated_.store(space_allocated_.load(relaxed) + mem.size, relaxed);
head_ = new (mem.ptr) Block{head_, mem.size};
ptr_ = head_->Pointer(kBlockHeaderSize);
limit_ = head_->Pointer(head_->size);
#ifdef ADDRESS_SANITIZER
ASAN_POISON_MEMORY_REGION(ptr_, limit_ - ptr_);
#endif // ADDRESS_SANITIZER
}
uint64 SerialArena::SpaceUsed() const {
uint64 space_used = ptr_ - head_->Pointer(kBlockHeaderSize);
space_used += space_used_;
// Remove the overhead of the SerialArena itself.
space_used -= ThreadSafeArena::kSerialArenaSize;
return space_used;
}
void SerialArena::CleanupList() {
Block* b = head_;
b->start = reinterpret_cast<CleanupNode*>(limit_);
do {
auto* limit = reinterpret_cast<CleanupNode*>(
b->Pointer(b->size & static_cast<size_t>(-8)));
auto it = b->start;
auto num = limit - it;
if (num > 0) {
for (; it < limit; it++) {
it->cleanup(it->elem);
}
}
b = b->next;
} while (b);
}
ArenaImpl::CacheAlignedLifecycleIdGenerator ArenaImpl::lifecycle_id_generator_;
ThreadSafeArena::CacheAlignedLifecycleIdGenerator
ThreadSafeArena::lifecycle_id_generator_;
#if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL) #if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL)
ArenaImpl::ThreadCache& ArenaImpl::thread_cache() { ThreadSafeArena::ThreadCache& ThreadSafeArena::thread_cache() {
static internal::ThreadLocalStorage<ThreadCache>* thread_cache_ = static internal::ThreadLocalStorage<ThreadCache>* thread_cache_ =
new internal::ThreadLocalStorage<ThreadCache>(); new internal::ThreadLocalStorage<ThreadCache>();
return *thread_cache_->Get(); return *thread_cache_->Get();
} }
#elif defined(PROTOBUF_USE_DLLS) #elif defined(PROTOBUF_USE_DLLS)
ArenaImpl::ThreadCache& ArenaImpl::thread_cache() { ThreadSafeArena::ThreadCache& ThreadSafeArena::thread_cache() {
static PROTOBUF_THREAD_LOCAL ThreadCache thread_cache_ = { static PROTOBUF_THREAD_LOCAL ThreadCache thread_cache_ = {
0, static_cast<LifecycleIdAtomic>(-1), nullptr}; 0, static_cast<LifecycleIdAtomic>(-1), nullptr};
return thread_cache_; return thread_cache_;
} }
#else #else
PROTOBUF_THREAD_LOCAL ArenaImpl::ThreadCache ArenaImpl::thread_cache_ = { PROTOBUF_THREAD_LOCAL ThreadSafeArena::ThreadCache
0, static_cast<LifecycleIdAtomic>(-1), nullptr}; ThreadSafeArena::thread_cache_ = {0, static_cast<LifecycleIdAtomic>(-1),
nullptr};
#endif #endif
void ArenaFree(void* object, size_t size) { void ThreadSafeArena::InitializeFrom(void* mem, size_t size) {
#if defined(__GXX_DELETE_WITH_SIZE__) || defined(__cpp_sized_deallocation) GOOGLE_DCHECK_EQ(reinterpret_cast<uintptr_t>(mem) & 7, 0u);
::operator delete(object, size); Init(false);
#else
(void)size;
::operator delete(object);
#endif
}
ArenaImpl::ArenaImpl(const ArenaOptions& options) {
ArenaMetricsCollector* collector = nullptr;
bool record_allocs = false;
if (options.make_metrics_collector != nullptr) {
collector = (*options.make_metrics_collector)();
record_allocs = (collector && collector->RecordAllocs());
}
// Get memory where we can store non-default options if needed. // Ignore initial block if it is too small.
// Use supplied initial_block if it is large enough. if (mem != nullptr && size >= kBlockHeaderSize + kSerialArenaSize) {
size_t min_block_size = kOptionsSize + kBlockHeaderSize + kSerialArenaSize; alloc_policy_ |= kUserOwnedInitialBlock;
char* mem = options.initial_block; SetInitialBlock(mem, size);
size_t mem_size = options.initial_block_size;
GOOGLE_DCHECK_EQ(reinterpret_cast<uintptr_t>(mem) & 7, 0);
if (mem == nullptr || mem_size < min_block_size) {
// Supplied initial block is not big enough.
mem_size = std::max(min_block_size, options.start_block_size);
mem = reinterpret_cast<char*>((*options.block_alloc)(mem_size));
} }
}
// Create the special block. void ThreadSafeArena::InitializeWithPolicy(void* mem, size_t size,
const bool special = true; bool record_allocs,
const bool user_owned = (mem == options.initial_block); AllocationPolicy policy) {
auto block = GOOGLE_DCHECK_EQ(reinterpret_cast<uintptr_t>(mem) & 7, 0u);
new (mem) SerialArena::Block(mem_size, nullptr, special, user_owned);
// Options occupy the beginning of the initial block.
options_ = new (block->Pointer(block->pos())) Options;
#ifdef ADDRESS_SANITIZER
ASAN_UNPOISON_MEMORY_REGION(options_, kOptionsSize);
#endif // ADDRESS_SANITIZER
options_->start_block_size = options.start_block_size;
options_->max_block_size = options.max_block_size;
options_->block_alloc = options.block_alloc;
options_->block_dealloc = options.block_dealloc;
options_->metrics_collector = collector;
block->set_pos(block->pos() + kOptionsSize);
Init(record_allocs); Init(record_allocs);
SetInitialBlock(block);
// Ignore initial block if it is too small. We include an optional
// AllocationPolicy in this check, so that this can be allocated on the
// first block.
constexpr size_t kAPSize = internal::AlignUpTo8(sizeof(AllocationPolicy));
constexpr size_t kMinimumSize = kBlockHeaderSize + kSerialArenaSize + kAPSize;
if (mem != nullptr && size >= kMinimumSize) {
alloc_policy_ = kUserOwnedInitialBlock;
} else {
alloc_policy_ = 0;
auto tmp = AllocateMemory(&policy, 0, kMinimumSize);
mem = tmp.ptr;
size = tmp.size;
}
SetInitialBlock(mem, size);
auto sa = threads_.load(std::memory_order_relaxed);
// We ensured enough space so this cannot fail.
void* p;
if (!sa || !sa->MaybeAllocateAligned(kAPSize, &p)) {
GOOGLE_LOG(FATAL) << "MaybeAllocateAligned cannot fail here.";
return;
}
new (p) AllocationPolicy{policy};
alloc_policy_ |= reinterpret_cast<intptr_t>(p);
} }
void ArenaImpl::Init(bool record_allocs) { void ThreadSafeArena::Init(bool record_allocs) {
ThreadCache& tc = thread_cache(); ThreadCache& tc = thread_cache();
auto id = tc.next_lifecycle_id; auto id = tc.next_lifecycle_id;
constexpr uint64 kInc = ThreadCache::kPerThreadIds * 2; // We increment lifecycle_id's by multiples of two so we can use bit 0 as
// a tag.
constexpr uint64 kDelta = 2;
constexpr uint64 kInc = ThreadCache::kPerThreadIds * kDelta;
if (PROTOBUF_PREDICT_FALSE((id & (kInc - 1)) == 0)) { if (PROTOBUF_PREDICT_FALSE((id & (kInc - 1)) == 0)) {
if (sizeof(lifecycle_id_generator_.id) == 4) { constexpr auto relaxed = std::memory_order_relaxed;
// 2^32 is dangerous low to guarantee uniqueness. If we start dolling out // On platforms that don't support uint64 atomics we can certainly not
// unique id's in ranges of kInc it's unacceptably low. In this case // afford to increment by large intervals and expect uniqueness due to
// we increment by 1. The additional range of kPerThreadIds that are used // wrapping, hence we only add by 1.
// per thread effectively pushes the overflow time from weeks to years id = lifecycle_id_generator_.id.fetch_add(1, relaxed) * kInc;
// of continuous running.
id = lifecycle_id_generator_.id.fetch_add(1, std::memory_order_relaxed) *
kInc;
} else {
id =
lifecycle_id_generator_.id.fetch_add(kInc, std::memory_order_relaxed);
}
} }
tc.next_lifecycle_id = id + 2; tc.next_lifecycle_id = id + kDelta;
// We store "record_allocs" in the low bit of lifecycle_id_. tag_and_id_ = id | (record_allocs ? kRecordAllocs : 0);
lifecycle_id_ = id | (record_allocs ? 1 : 0);
hint_.store(nullptr, std::memory_order_relaxed); hint_.store(nullptr, std::memory_order_relaxed);
threads_.store(nullptr, std::memory_order_relaxed); threads_.store(nullptr, std::memory_order_relaxed);
space_allocated_.store(0, std::memory_order_relaxed);
} }
void ArenaImpl::SetInitialBlock(SerialArena::Block* block) { void ThreadSafeArena::SetInitialBlock(void* mem, size_t size) {
// Calling thread owns the first block. This allows the single-threaded case SerialArena* serial = SerialArena::New({mem, size}, &thread_cache());
// to allocate on the first block without having to perform atomic operations.
SerialArena* serial = SerialArena::New(block, &thread_cache(), this);
serial->set_next(NULL); serial->set_next(NULL);
threads_.store(serial, std::memory_order_relaxed); threads_.store(serial, std::memory_order_relaxed);
space_allocated_.store(block->size(), std::memory_order_relaxed);
CacheSerialArena(serial); CacheSerialArena(serial);
} }
ArenaImpl::~ArenaImpl() { ThreadSafeArena::~ThreadSafeArena() {
// Have to do this in a first pass, because some of the destructors might // Have to do this in a first pass, because some of the destructors might
// refer to memory in other blocks. // refer to memory in other blocks.
CleanupList(); CleanupList();
ArenaMetricsCollector* collector = nullptr; size_t space_allocated = 0;
auto deallocator = &ArenaFree; auto mem = Free(&space_allocated);
if (options_) {
collector = options_->metrics_collector;
deallocator = options_->block_dealloc;
}
PerBlock([deallocator](SerialArena::Block* b) { // Policy is about to get deleted.
#ifdef ADDRESS_SANITIZER auto p = AllocPolicy();
// This memory was provided by the underlying allocator as unpoisoned, so ArenaMetricsCollector* collector = p ? p->metrics_collector : nullptr;
// return it in an unpoisoned state.
ASAN_UNPOISON_MEMORY_REGION(b->Pointer(0), b->size());
#endif // ADDRESS_SANITIZER
if (!b->user_owned()) {
(*deallocator)(b, b->size());
}
});
if (collector) { if (alloc_policy_ & kUserOwnedInitialBlock) {
collector->OnDestroy(SpaceAllocated()); space_allocated += mem.size;
} else {
GetDeallocator(AllocPolicy(), &space_allocated)(mem);
} }
if (collector) collector->OnDestroy(space_allocated);
} }
uint64 ArenaImpl::Reset() { SerialArena::Memory ThreadSafeArena::Free(size_t* space_allocated) {
if (options_ && options_->metrics_collector) { SerialArena::Memory mem = {nullptr, 0};
options_->metrics_collector->OnReset(SpaceAllocated()); auto deallocator = GetDeallocator(AllocPolicy(), space_allocated);
} PerSerialArena([deallocator, &mem](SerialArena* a) {
if (mem.ptr) deallocator(mem);
mem = a->Free(deallocator);
});
return mem;
}
uint64 ThreadSafeArena::Reset() {
// Have to do this in a first pass, because some of the destructors might // Have to do this in a first pass, because some of the destructors might
// refer to memory in other blocks. // refer to memory in other blocks.
CleanupList(); CleanupList();
// Discard all blocks except the special block (if present). // Discard all blocks except the special block (if present).
uint64 space_allocated = 0; size_t space_allocated = 0;
SerialArena::Block* special_block = nullptr; auto mem = Free(&space_allocated);
auto deallocator = (options_ ? options_->block_dealloc : &ArenaFree);
PerBlock(
[&space_allocated, &special_block, deallocator](SerialArena::Block* b) {
space_allocated += b->size();
#ifdef ADDRESS_SANITIZER
// This memory was provided by the underlying allocator as unpoisoned,
// so return it in an unpoisoned state.
ASAN_UNPOISON_MEMORY_REGION(b->Pointer(0), b->size());
#endif // ADDRESS_SANITIZER
if (!b->special()) {
(*deallocator)(b, b->size());
} else {
// Prepare special block for reuse.
// Note: if options_ is present, it occupies the beginning of the
// block and therefore pos is advanced past it.
GOOGLE_DCHECK(special_block == nullptr);
special_block = b;
}
});
Init(record_allocs());
if (special_block != nullptr) {
// next() should still be nullptr since we are using a stack discipline, but
// clear it anyway to reduce fragility.
GOOGLE_DCHECK_EQ(special_block->next(), nullptr);
special_block->clear_next();
special_block->set_pos(kBlockHeaderSize + (options_ ? kOptionsSize : 0));
SetInitialBlock(special_block);
}
return space_allocated;
}
std::pair<void*, size_t> ArenaImpl::NewBuffer(size_t last_size, if (AllocPolicy()) {
size_t min_bytes) { auto saved_policy = *AllocPolicy();
size_t size; if (alloc_policy_ & kUserOwnedInitialBlock) {
if (last_size != -1) { space_allocated += mem.size;
// Double the current block size, up to a limit. } else {
auto max_size = options_ ? options_->max_block_size : kDefaultMaxBlockSize; GetDeallocator(AllocPolicy(), &space_allocated)(mem);
size = std::min(2 * last_size, max_size); mem.ptr = nullptr;
mem.size = 0;
}
ArenaMetricsCollector* collector = saved_policy.metrics_collector;
if (collector) collector->OnReset(space_allocated);
InitializeWithPolicy(mem.ptr, mem.size, ShouldRecordAlloc(), saved_policy);
} else { } else {
size = options_ ? options_->start_block_size : kDefaultStartBlockSize; // Nullptr policy
if (alloc_policy_ & kUserOwnedInitialBlock) {
space_allocated += mem.size;
InitializeFrom(mem.ptr, mem.size);
} else {
GetDeallocator(AllocPolicy(), &space_allocated)(mem);
Init(false);
}
} }
// Verify that min_bytes + kBlockHeaderSize won't overflow.
GOOGLE_CHECK_LE(min_bytes, std::numeric_limits<size_t>::max() - kBlockHeaderSize);
size = std::max(size, kBlockHeaderSize + min_bytes);
void* mem = options_ ? (*options_->block_alloc)(size) : ::operator new(size); return space_allocated;
space_allocated_.fetch_add(size, std::memory_order_relaxed);
return {mem, size};
}
SerialArena::Block* SerialArena::NewBlock(SerialArena::Block* last_block,
size_t min_bytes, ArenaImpl* arena) {
void* mem;
size_t size;
std::tie(mem, size) =
arena->NewBuffer(last_block ? last_block->size() : -1, min_bytes);
Block* b = new (mem) Block(size, last_block, false, false);
return b;
}
PROTOBUF_NOINLINE
void SerialArena::AddCleanupFallback(void* elem, void (*cleanup)(void*)) {
size_t size = cleanup_ ? cleanup_->size * 2 : kMinCleanupListElements;
size = std::min(size, kMaxCleanupListElements);
size_t bytes = internal::AlignUpTo8(CleanupChunk::SizeOf(size));
CleanupChunk* list = reinterpret_cast<CleanupChunk*>(AllocateAligned(bytes));
list->next = cleanup_;
list->size = size;
cleanup_ = list;
cleanup_ptr_ = &list->nodes[0];
cleanup_limit_ = &list->nodes[size];
AddCleanup(elem, cleanup);
} }
void* ArenaImpl::AllocateAlignedAndAddCleanup(size_t n, std::pair<void*, SerialArena::CleanupNode*>
void (*cleanup)(void*)) { ThreadSafeArena::AllocateAlignedWithCleanup(size_t n,
const std::type_info* type) {
SerialArena* arena; SerialArena* arena;
if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(&arena))) { if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(tag_and_id_, &arena))) {
return arena->AllocateAlignedAndAddCleanup(n, cleanup); return arena->AllocateAlignedWithCleanup(n, AllocPolicy());
} else { } else {
return AllocateAlignedAndAddCleanupFallback(n, cleanup); return AllocateAlignedWithCleanupFallback(n, type);
} }
} }
void ArenaImpl::AddCleanup(void* elem, void (*cleanup)(void*)) { void ThreadSafeArena::AddCleanup(void* elem, void (*cleanup)(void*)) {
SerialArena* arena; SerialArena* arena;
if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(&arena))) { if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(LifeCycleId(), &arena))) {
arena->AddCleanup(elem, cleanup); arena->AddCleanup(elem, cleanup, AllocPolicy());
} else { } else {
return AddCleanupFallback(elem, cleanup); return AddCleanupFallback(elem, cleanup);
} }
} }
PROTOBUF_NOINLINE PROTOBUF_NOINLINE
void* ArenaImpl::AllocateAlignedFallback(size_t n) { void* ThreadSafeArena::AllocateAlignedFallback(size_t n,
return GetSerialArenaFallback(&thread_cache())->AllocateAligned(n); const std::type_info* type) {
if (ShouldRecordAlloc()) {
RecordAlloc(type, n);
SerialArena* arena;
if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(LifeCycleId(), &arena))) {
return arena->AllocateAligned(n, AllocPolicy());
}
}
return GetSerialArenaFallback(&thread_cache())
->AllocateAligned(n, AllocPolicy());
} }
PROTOBUF_NOINLINE PROTOBUF_NOINLINE
void* ArenaImpl::AllocateAlignedAndAddCleanupFallback(size_t n, std::pair<void*, SerialArena::CleanupNode*>
void (*cleanup)(void*)) { ThreadSafeArena::AllocateAlignedWithCleanupFallback(
return GetSerialArenaFallback( size_t n, const std::type_info* type) {
&thread_cache())->AllocateAlignedAndAddCleanup(n, cleanup); if (ShouldRecordAlloc()) {
RecordAlloc(type, n);
SerialArena* arena;
if (GetSerialArenaFast(LifeCycleId(), &arena)) {
return arena->AllocateAlignedWithCleanup(n, AllocPolicy());
}
}
return GetSerialArenaFallback(&thread_cache())
->AllocateAlignedWithCleanup(n, AllocPolicy());
} }
PROTOBUF_NOINLINE PROTOBUF_NOINLINE
void ArenaImpl::AddCleanupFallback(void* elem, void (*cleanup)(void*)) { void ThreadSafeArena::AddCleanupFallback(void* elem, void (*cleanup)(void*)) {
GetSerialArenaFallback(&thread_cache())->AddCleanup(elem, cleanup); GetSerialArenaFallback(&thread_cache())
->AddCleanup(elem, cleanup, AllocPolicy());
} }
PROTOBUF_NOINLINE uint64 ThreadSafeArena::SpaceAllocated() const {
void* SerialArena::AllocateAlignedFallback(size_t n) {
// Sync back to current's pos.
head_->set_pos(head_->size() - (limit_ - ptr_));
head_ = NewBlock(head_, n, arena_);
ptr_ = head_->Pointer(head_->pos());
limit_ = head_->Pointer(head_->size());
#ifdef ADDRESS_SANITIZER
ASAN_POISON_MEMORY_REGION(ptr_, limit_ - ptr_);
#endif // ADDRESS_SANITIZER
return AllocateAligned(n);
}
uint64 ArenaImpl::SpaceAllocated() const {
return space_allocated_.load(std::memory_order_relaxed);
}
uint64 ArenaImpl::SpaceUsed() const {
SerialArena* serial = threads_.load(std::memory_order_acquire); SerialArena* serial = threads_.load(std::memory_order_acquire);
uint64 space_used = 0; uint64 res = 0;
for (; serial; serial = serial->next()) { for (; serial; serial = serial->next()) {
space_used += serial->SpaceUsed(); res += serial->SpaceAllocated();
}
// Remove the overhead of Options structure, if any.
if (options_) {
space_used -= kOptionsSize;
} }
return space_used; return res;
} }
uint64 SerialArena::SpaceUsed() const { uint64 ThreadSafeArena::SpaceUsed() const {
// Get current block's size from ptr_ (since we can't trust head_->pos(). SerialArena* serial = threads_.load(std::memory_order_acquire);
uint64 space_used = ptr_ - head_->Pointer(kBlockHeaderSize); uint64 space_used = 0;
// Get subsequent block size from b->pos().
for (Block* b = head_->next(); b; b = b->next()) {
space_used += (b->pos() - kBlockHeaderSize);
}
// Remove the overhead of the SerialArena itself.
space_used -= ArenaImpl::kSerialArenaSize;
return space_used;
}
void ArenaImpl::CleanupList() {
// By omitting an Acquire barrier we ensure that any user code that doesn't
// properly synchronize Reset() or the destructor will throw a TSAN warning.
SerialArena* serial = threads_.load(std::memory_order_relaxed);
for (; serial; serial = serial->next()) { for (; serial; serial = serial->next()) {
serial->CleanupList(); space_used += serial->SpaceUsed();
}
}
void SerialArena::CleanupList() {
if (cleanup_ != NULL) {
CleanupListFallback();
} }
return space_used - (AllocPolicy() ? sizeof(AllocationPolicy) : 0);
} }
void SerialArena::CleanupListFallback() { void ThreadSafeArena::CleanupList() {
// The first chunk might be only partially full, so calculate its size PerSerialArena([](SerialArena* a) { a->CleanupList(); });
// from cleanup_ptr_. Subsequent chunks are always full, so use list->size.
size_t n = cleanup_ptr_ - &cleanup_->nodes[0];
CleanupChunk* list = cleanup_;
while (true) {
CleanupNode* node = &list->nodes[0];
// Cleanup newest elements first (allocated last).
for (size_t i = n; i > 0; i--) {
node[i - 1].cleanup(node[i - 1].elem);
}
list = list->next;
if (list == nullptr) {
break;
}
// All but the first chunk are always full.
n = list->size;
}
}
SerialArena* SerialArena::New(Block* b, void* owner, ArenaImpl* arena) {
auto pos = b->pos();
GOOGLE_DCHECK_LE(pos + ArenaImpl::kSerialArenaSize, b->size());
SerialArena* serial = reinterpret_cast<SerialArena*>(b->Pointer(pos));
b->set_pos(pos + ArenaImpl::kSerialArenaSize);
serial->arena_ = arena;
serial->owner_ = owner;
serial->head_ = b;
serial->ptr_ = b->Pointer(b->pos());
serial->limit_ = b->Pointer(b->size());
serial->cleanup_ = NULL;
serial->cleanup_ptr_ = NULL;
serial->cleanup_limit_ = NULL;
return serial;
} }
PROTOBUF_NOINLINE PROTOBUF_NOINLINE
SerialArena* ArenaImpl::GetSerialArenaFallback(void* me) { SerialArena* ThreadSafeArena::GetSerialArenaFallback(void* me) {
// Look for this SerialArena in our linked list. // Look for this SerialArena in our linked list.
SerialArena* serial = threads_.load(std::memory_order_acquire); SerialArena* serial = threads_.load(std::memory_order_acquire);
for (; serial; serial = serial->next()) { for (; serial; serial = serial->next()) {
@ -422,8 +444,8 @@ SerialArena* ArenaImpl::GetSerialArenaFallback(void* me) {
if (!serial) { if (!serial) {
// This thread doesn't have any SerialArena, which also means it doesn't // This thread doesn't have any SerialArena, which also means it doesn't
// have any blocks yet. So we'll allocate its first block now. // have any blocks yet. So we'll allocate its first block now.
SerialArena::Block* b = SerialArena::NewBlock(NULL, kSerialArenaSize, this); serial = SerialArena::New(
serial = SerialArena::New(b, me, this); AllocateMemory(AllocPolicy(), 0, kSerialArenaSize), me);
SerialArena* head = threads_.load(std::memory_order_relaxed); SerialArena* head = threads_.load(std::memory_order_relaxed);
do { do {
@ -436,14 +458,25 @@ SerialArena* ArenaImpl::GetSerialArenaFallback(void* me) {
return serial; return serial;
} }
ArenaMetricsCollector::~ArenaMetricsCollector() {}
} // namespace internal } // namespace internal
PROTOBUF_FUNC_ALIGN(32) PROTOBUF_FUNC_ALIGN(32)
void* Arena::AllocateAlignedNoHook(size_t n) { void* Arena::AllocateAlignedNoHook(size_t n) {
return impl_.AllocateAligned(n); return impl_.AllocateAligned(n, nullptr);
}
PROTOBUF_FUNC_ALIGN(32)
void* Arena::AllocateAlignedWithHook(size_t n, const std::type_info* type) {
return impl_.AllocateAligned(n, type);
}
PROTOBUF_FUNC_ALIGN(32)
std::pair<void*, internal::SerialArena::CleanupNode*>
Arena::AllocateAlignedWithCleanup(size_t n, const std::type_info* type) {
return impl_.AllocateAlignedWithCleanup(n, type);
} }
} // namespace protobuf } // namespace protobuf
} // namespace google } // namespace google
#include <google/protobuf/port_undef.inc>

@ -93,11 +93,28 @@ class EpsCopyInputStream; // defined in parse_context.h
template <typename Type> template <typename Type>
class GenericTypeHandler; // defined in repeated_field.h class GenericTypeHandler; // defined in repeated_field.h
PROTOBUF_ALWAYS_INLINE
inline void* AlignTo(void* ptr, size_t align) {
return reinterpret_cast<void*>(
(reinterpret_cast<uintptr_t>(ptr) + align - 1) & (~align + 1));
}
// Templated cleanup methods. // Templated cleanup methods.
template <typename T> template <typename T>
void arena_destruct_object(void* object) { void arena_destruct_object(void* object) {
reinterpret_cast<T*>(object)->~T(); reinterpret_cast<T*>(object)->~T();
} }
template <bool destructor_skippable, typename T>
struct ObjectDestructor {
constexpr static void (*destructor)(void*) = &arena_destruct_object<T>;
};
template <typename T>
struct ObjectDestructor<true, T> {
constexpr static void (*destructor)(void*) = nullptr;
};
template <typename T> template <typename T>
void arena_delete_object(void* object) { void arena_delete_object(void* object) {
delete reinterpret_cast<T*>(object); delete reinterpret_cast<T*>(object);
@ -138,34 +155,38 @@ struct ArenaOptions {
void (*block_dealloc)(void*, size_t); void (*block_dealloc)(void*, size_t);
ArenaOptions() ArenaOptions()
: start_block_size(kDefaultStartBlockSize), : start_block_size(internal::AllocationPolicy::kDefaultStartBlockSize),
max_block_size(kDefaultMaxBlockSize), max_block_size(internal::AllocationPolicy::kDefaultMaxBlockSize),
initial_block(NULL), initial_block(NULL),
initial_block_size(0), initial_block_size(0),
block_alloc(kDefaultBlockAlloc), block_alloc(nullptr),
block_dealloc(&internal::ArenaFree), block_dealloc(nullptr),
make_metrics_collector(nullptr) {} make_metrics_collector(nullptr) {}
PROTOBUF_EXPORT static void* (*const kDefaultBlockAlloc)(size_t);
private: private:
// If make_metrics_collector is not nullptr, it will be called at Arena init // If make_metrics_collector is not nullptr, it will be called at Arena init
// time. It may return a pointer to a collector instance that will be notified // time. It may return a pointer to a collector instance that will be notified
// of interesting events related to the arena. // of interesting events related to the arena.
internal::ArenaMetricsCollector* (*make_metrics_collector)(); internal::ArenaMetricsCollector* (*make_metrics_collector)();
// Constants define default starting block size and max block size for internal::ArenaMetricsCollector* MetricsCollector() const {
// arena allocator behavior -- see descriptions above. return make_metrics_collector ? (*make_metrics_collector)() : nullptr;
static const size_t kDefaultStartBlockSize = }
internal::ArenaImpl::kDefaultStartBlockSize;
static const size_t kDefaultMaxBlockSize = internal::AllocationPolicy AllocationPolicy() const {
internal::ArenaImpl::kDefaultMaxBlockSize; internal::AllocationPolicy res;
res.start_block_size = start_block_size;
res.max_block_size = max_block_size;
res.block_alloc = block_alloc;
res.block_dealloc = block_dealloc;
res.metrics_collector = MetricsCollector();
return res;
}
friend void arena_metrics::EnableArenaMetrics(ArenaOptions*); friend void arena_metrics::EnableArenaMetrics(ArenaOptions*);
friend class Arena; friend class Arena;
friend class ArenaOptionsTestFriend; friend class ArenaOptionsTestFriend;
friend class internal::ArenaImpl;
}; };
// Support for non-RTTI environments. (The metrics hooks API uses type // Support for non-RTTI environments. (The metrics hooks API uses type
@ -236,7 +257,9 @@ class PROTOBUF_EXPORT PROTOBUF_ALIGNAS(8) Arena final {
// Arena constructor taking custom options. See ArenaOptions above for // Arena constructor taking custom options. See ArenaOptions above for
// descriptions of the options available. // descriptions of the options available.
explicit Arena(const ArenaOptions& options) : impl_(options) {} explicit Arena(const ArenaOptions& options)
: impl_(options.initial_block, options.initial_block_size,
options.AllocationPolicy()) {}
// Block overhead. Use this as a guide for how much to over-allocate the // Block overhead. Use this as a guide for how much to over-allocate the
// initial block if you want an allocation of size N to fit inside it. // initial block if you want an allocation of size N to fit inside it.
@ -244,8 +267,9 @@ class PROTOBUF_EXPORT PROTOBUF_ALIGNAS(8) Arena final {
// WARNING: if you allocate multiple objects, it is difficult to guarantee // WARNING: if you allocate multiple objects, it is difficult to guarantee
// that a series of allocations will fit in the initial block, especially if // that a series of allocations will fit in the initial block, especially if
// Arena changes its alignment guarantees in the future! // Arena changes its alignment guarantees in the future!
static const size_t kBlockOverhead = internal::ArenaImpl::kBlockHeaderSize + static const size_t kBlockOverhead =
internal::ArenaImpl::kSerialArenaSize; internal::ThreadSafeArena::kBlockHeaderSize +
internal::ThreadSafeArena::kSerialArenaSize;
inline ~Arena() {} inline ~Arena() {}
@ -290,8 +314,16 @@ class PROTOBUF_EXPORT PROTOBUF_ALIGNAS(8) Arena final {
// is obtained from the arena). // is obtained from the arena).
template <typename T, typename... Args> template <typename T, typename... Args>
PROTOBUF_ALWAYS_INLINE static T* Create(Arena* arena, Args&&... args) { PROTOBUF_ALWAYS_INLINE static T* Create(Arena* arena, Args&&... args) {
return CreateNoMessage<T>(arena, is_arena_constructable<T>(), if (arena == NULL) {
std::forward<Args>(args)...); return new T(std::forward<Args>(args)...);
} else {
auto destructor =
internal::ObjectDestructor<std::is_trivially_destructible<T>::value,
T>::destructor;
return new (arena->AllocateInternal(sizeof(T), alignof(T), destructor,
RTTI_TYPE_ID(T)))
T(std::forward<Args>(args)...);
}
} }
// Create an array of object type T on the arena *without* invoking the // Create an array of object type T on the arena *without* invoking the
@ -316,9 +348,12 @@ class PROTOBUF_EXPORT PROTOBUF_ALIGNAS(8) Arena final {
} }
} }
// The following are routines are for monitoring. They will approximate the
// total sum allocated and used memory, but the exact value is an
// implementation deal. For instance allocated space depends on growth
// policies. Do not use these in unit tests.
// Returns the total space allocated by the arena, which is the sum of the // Returns the total space allocated by the arena, which is the sum of the
// sizes of the underlying blocks. This method is relatively fast; a counter // sizes of the underlying blocks.
// is kept as blocks are allocated.
uint64 SpaceAllocated() const { return impl_.SpaceAllocated(); } uint64 SpaceAllocated() const { return impl_.SpaceAllocated(); }
// Returns the total space used by the arena. Similar to SpaceAllocated but // Returns the total space used by the arena. Similar to SpaceAllocated but
// does not include free space and block overhead. The total space returned // does not include free space and block overhead. The total space returned
@ -336,8 +371,8 @@ class PROTOBUF_EXPORT PROTOBUF_ALIGNAS(8) Arena final {
// Adds |object| to a list of heap-allocated objects to be freed with |delete| // Adds |object| to a list of heap-allocated objects to be freed with |delete|
// when the arena is destroyed or reset. // when the arena is destroyed or reset.
template <typename T> template <typename T>
PROTOBUF_NOINLINE void Own(T* object) { PROTOBUF_ALWAYS_INLINE void Own(T* object) {
OwnInternal(object, std::is_convertible<T*, Message*>()); OwnInternal(object, std::is_convertible<T*, MessageLite*>());
} }
// Adds |object| to a list of objects whose destructors will be manually // Adds |object| to a list of objects whose destructors will be manually
@ -346,7 +381,7 @@ class PROTOBUF_EXPORT PROTOBUF_ALIGNAS(8) Arena final {
// normally only used for objects that are placement-newed into // normally only used for objects that are placement-newed into
// arena-allocated memory. // arena-allocated memory.
template <typename T> template <typename T>
PROTOBUF_NOINLINE void OwnDestructor(T* object) { PROTOBUF_ALWAYS_INLINE void OwnDestructor(T* object) {
if (object != NULL) { if (object != NULL) {
impl_.AddCleanup(object, &internal::arena_destruct_object<T>); impl_.AddCleanup(object, &internal::arena_destruct_object<T>);
} }
@ -356,8 +391,8 @@ class PROTOBUF_EXPORT PROTOBUF_ALIGNAS(8) Arena final {
// will be manually called when the arena is destroyed or reset. This differs // will be manually called when the arena is destroyed or reset. This differs
// from OwnDestructor() in that any member function may be specified, not only // from OwnDestructor() in that any member function may be specified, not only
// the class destructor. // the class destructor.
PROTOBUF_NOINLINE void OwnCustomDestructor(void* object, PROTOBUF_ALWAYS_INLINE void OwnCustomDestructor(void* object,
void (*destruct)(void*)) { void (*destruct)(void*)) {
impl_.AddCleanup(object, destruct); impl_.AddCleanup(object, destruct);
} }
@ -436,6 +471,8 @@ class PROTOBUF_EXPORT PROTOBUF_ALIGNAS(8) Arena final {
}; };
private: private:
internal::ThreadSafeArena impl_;
template <typename T> template <typename T>
struct has_get_arena : InternalHelper<T>::has_get_arena {}; struct has_get_arena : InternalHelper<T>::has_get_arena {};
@ -467,41 +504,26 @@ class PROTOBUF_EXPORT PROTOBUF_ALIGNAS(8) Arena final {
} }
} }
template <typename T, typename... Args>
PROTOBUF_ALWAYS_INLINE static T* CreateInternal(Arena* arena,
Args&&... args) {
if (arena == NULL) {
return new T(std::forward<Args>(args)...);
} else {
return arena->DoCreate<T>(std::is_trivially_destructible<T>::value,
std::forward<Args>(args)...);
}
}
inline void AllocHook(const std::type_info* allocated_type, size_t n) const {
impl_.RecordAlloc(allocated_type, n);
}
// Allocate and also optionally call collector with the allocated type info // Allocate and also optionally call collector with the allocated type info
// when allocation recording is enabled. // when allocation recording is enabled.
template <typename T> PROTOBUF_ALWAYS_INLINE void* AllocateInternal(size_t size, size_t align,
PROTOBUF_ALWAYS_INLINE void* AllocateInternal(bool skip_explicit_ownership) { void (*destructor)(void*),
const size_t n = internal::AlignUpTo8(sizeof(T)); const std::type_info* type) {
// Monitor allocation if needed. // Monitor allocation if needed.
impl_.RecordAlloc(RTTI_TYPE_ID(T), n); if (destructor == nullptr) {
if (skip_explicit_ownership) { return AllocateAlignedWithHook(size, align, type);
return AllocateAlignedTo<alignof(T)>(sizeof(T));
} else { } else {
if (alignof(T) <= 8) { if (align <= 8) {
return impl_.AllocateAlignedAndAddCleanup( auto res = AllocateAlignedWithCleanup(internal::AlignUpTo8(size), type);
n, &internal::arena_destruct_object<T>); res.second->elem = res.first;
res.second->cleanup = destructor;
return res.first;
} else { } else {
auto ptr = auto res = AllocateAlignedWithCleanup(size + align - 8, type);
reinterpret_cast<uintptr_t>(impl_.AllocateAlignedAndAddCleanup( auto ptr = internal::AlignTo(res.first, align);
sizeof(T) + alignof(T) - 8, res.second->elem = ptr;
&internal::arena_destruct_object<T>)); res.second->cleanup = destructor;
return reinterpret_cast<void*>((ptr + alignof(T) - 8) & return ptr;
(~alignof(T) + 1));
} }
} }
} }
@ -522,7 +544,7 @@ class PROTOBUF_EXPORT PROTOBUF_ALIGNAS(8) Arena final {
PROTOBUF_ALWAYS_INLINE static T* DoCreateMaybeMessage(Arena* arena, PROTOBUF_ALWAYS_INLINE static T* DoCreateMaybeMessage(Arena* arena,
std::false_type, std::false_type,
Args&&... args) { Args&&... args) {
return CreateInternal<T>(arena, std::forward<Args>(args)...); return Create<T>(arena, std::forward<Args>(args)...);
} }
template <typename T, typename... Args> template <typename T, typename... Args>
@ -532,25 +554,6 @@ class PROTOBUF_EXPORT PROTOBUF_ALIGNAS(8) Arena final {
std::forward<Args>(args)...); std::forward<Args>(args)...);
} }
template <typename T, typename... Args>
PROTOBUF_ALWAYS_INLINE static T* CreateNoMessage(Arena* arena, std::true_type,
Args&&... args) {
// User is constructing with Create() despite the fact that T supports arena
// construction. In this case we have to delegate to CreateInternal(), and
// we can't use any CreateMaybeMessage() specialization that may be defined.
return CreateInternal<T>(arena, std::forward<Args>(args)...);
}
template <typename T, typename... Args>
PROTOBUF_ALWAYS_INLINE static T* CreateNoMessage(Arena* arena,
std::false_type,
Args&&... args) {
// User is constructing with Create() and the type does not support arena
// construction. In this case we can delegate to CreateMaybeMessage() and
// use any specialization that may be available for that.
return CreateMaybeMessage<T>(arena, std::forward<Args>(args)...);
}
// Just allocate the required size for the given type assuming the // Just allocate the required size for the given type assuming the
// type has a trivial constructor. // type has a trivial constructor.
template <typename T> template <typename T>
@ -559,22 +562,19 @@ class PROTOBUF_EXPORT PROTOBUF_ALIGNAS(8) Arena final {
<< "Requested size is too large to fit into size_t."; << "Requested size is too large to fit into size_t.";
// We count on compiler to realize that if sizeof(T) is a multiple of // We count on compiler to realize that if sizeof(T) is a multiple of
// 8 AlignUpTo can be elided. // 8 AlignUpTo can be elided.
const size_t n = internal::AlignUpTo8(sizeof(T) * num_elements); const size_t n = sizeof(T) * num_elements;
// Monitor allocation if needed. return static_cast<T*>(
impl_.RecordAlloc(RTTI_TYPE_ID(T), n); AllocateAlignedWithHook(n, alignof(T), RTTI_TYPE_ID(T)));
return static_cast<T*>(AllocateAlignedTo<alignof(T)>(n));
} }
template <typename T, typename... Args>
PROTOBUF_ALWAYS_INLINE T* DoCreate(bool skip_explicit_ownership,
Args&&... args) {
return new (AllocateInternal<T>(skip_explicit_ownership))
T(std::forward<Args>(args)...);
}
template <typename T, typename... Args> template <typename T, typename... Args>
PROTOBUF_ALWAYS_INLINE T* DoCreateMessage(Args&&... args) { PROTOBUF_ALWAYS_INLINE T* DoCreateMessage(Args&&... args) {
return InternalHelper<T>::Construct( return InternalHelper<T>::Construct(
AllocateInternal<T>(InternalHelper<T>::is_destructor_skippable::value), AllocateInternal(sizeof(T), alignof(T),
internal::ObjectDestructor<
InternalHelper<T>::is_destructor_skippable::value,
T>::destructor,
RTTI_TYPE_ID(T)),
this, std::forward<Args>(args)...); this, std::forward<Args>(args)...);
} }
@ -619,7 +619,7 @@ class PROTOBUF_EXPORT PROTOBUF_ALIGNAS(8) Arena final {
template <typename T> template <typename T>
PROTOBUF_ALWAYS_INLINE void OwnInternal(T* object, std::true_type) { PROTOBUF_ALWAYS_INLINE void OwnInternal(T* object, std::true_type) {
if (object != NULL) { if (object != NULL) {
impl_.AddCleanup(object, &internal::arena_delete_object<Message>); impl_.AddCleanup(object, &internal::arena_delete_object<MessageLite>);
} }
} }
template <typename T> template <typename T>
@ -654,24 +654,38 @@ class PROTOBUF_EXPORT PROTOBUF_ALIGNAS(8) Arena final {
} }
// For friends of arena. // For friends of arena.
void* AllocateAligned(size_t n) { void* AllocateAligned(size_t n, size_t align = 8) {
return AllocateAlignedNoHook(internal::AlignUpTo8(n)); if (align <= 8) {
return AllocateAlignedNoHook(internal::AlignUpTo8(n));
} else {
// We are wasting space by over allocating align - 8 bytes. Compared
// to a dedicated function that takes current alignment in consideration.
// Such a scheme would only waste (align - 8)/2 bytes on average, but
// requires a dedicated function in the outline arena allocation
// functions. Possibly re-evaluate tradeoffs later.
return internal::AlignTo(AllocateAlignedNoHook(n + align - 8), align);
}
} }
template<size_t Align>
void* AllocateAlignedTo(size_t n) { void* AllocateAlignedWithHook(size_t n, size_t align,
static_assert(Align > 0, "Alignment must be greater than 0"); const std::type_info* type) {
static_assert((Align & (Align - 1)) == 0, "Alignment must be power of two"); if (align <= 8) {
if (Align <= 8) return AllocateAligned(n); return AllocateAlignedWithHook(internal::AlignUpTo8(n), type);
// TODO(b/151247138): if the pointer would have been aligned already, } else {
// this is wasting space. We should pass the alignment down. // We are wasting space by over allocating align - 8 bytes. Compared
uintptr_t ptr = reinterpret_cast<uintptr_t>(AllocateAligned(n + Align - 8)); // to a dedicated function that takes current alignment in consideration.
ptr = (ptr + Align - 1) & (~Align + 1); // Such a schemee would only waste (align - 8)/2 bytes on average, but
return reinterpret_cast<void*>(ptr); // requires a dedicated function in the outline arena allocation
// functions. Possibly re-evaluate tradeoffs later.
return internal::AlignTo(AllocateAlignedWithHook(n + align - 8, type),
align);
}
} }
void* AllocateAlignedNoHook(size_t n); void* AllocateAlignedNoHook(size_t n);
void* AllocateAlignedWithHook(size_t n, const std::type_info* type);
internal::ArenaImpl impl_; std::pair<void*, internal::SerialArena::CleanupNode*>
AllocateAlignedWithCleanup(size_t n, const std::type_info* type);
template <typename Type> template <typename Type>
friend class internal::GenericTypeHandler; friend class internal::GenericTypeHandler;

@ -35,6 +35,7 @@
#include <atomic> #include <atomic>
#include <limits> #include <limits>
#include <typeinfo>
#include <google/protobuf/stubs/common.h> #include <google/protobuf/stubs/common.h>
#include <google/protobuf/stubs/logging.h> #include <google/protobuf/stubs/logging.h>
@ -48,24 +49,19 @@
namespace google { namespace google {
namespace protobuf { namespace protobuf {
struct ArenaOptions;
namespace internal { namespace internal {
inline size_t AlignUpTo8(size_t n) { inline constexpr size_t AlignUpTo8(size_t n) {
// Align n to next multiple of 8 (from Hacker's Delight, Chapter 3.) // Align n to next multiple of 8 (from Hacker's Delight, Chapter 3.)
return (n + 7) & static_cast<size_t>(-8); return (n + 7) & static_cast<size_t>(-8);
} }
using LifecycleIdAtomic = uint64_t; using LifecycleIdAtomic = uint64_t;
void PROTOBUF_EXPORT ArenaFree(void* object, size_t size);
// MetricsCollector collects stats for a particular arena. // MetricsCollector collects stats for a particular arena.
class PROTOBUF_EXPORT ArenaMetricsCollector { class PROTOBUF_EXPORT ArenaMetricsCollector {
public: public:
virtual ~ArenaMetricsCollector(); ArenaMetricsCollector(bool record_allocs) : record_allocs_(record_allocs) {}
// Invoked when the arena is about to be destroyed. This method will // Invoked when the arena is about to be destroyed. This method will
// typically finalize any metric collection and delete the collector. // typically finalize any metric collection and delete the collector.
@ -76,10 +72,6 @@ class PROTOBUF_EXPORT ArenaMetricsCollector {
// space_allocated is the space used by the arena just before the reset. // space_allocated is the space used by the arena just before the reset.
virtual void OnReset(uint64 space_allocated) = 0; virtual void OnReset(uint64 space_allocated) = 0;
// Does OnAlloc() need to be called? If false, metric collection overhead
// will be reduced since we will not do extra work per allocation.
virtual bool RecordAllocs() = 0;
// OnAlloc is called when an allocation happens. // OnAlloc is called when an allocation happens.
// type_info is promised to be static - its lifetime extends to // type_info is promised to be static - its lifetime extends to
// match program's lifetime (It is given by typeid operator). // match program's lifetime (It is given by typeid operator).
@ -88,78 +80,79 @@ class PROTOBUF_EXPORT ArenaMetricsCollector {
// allocations for managing the arena) // allocations for managing the arena)
virtual void OnAlloc(const std::type_info* allocated_type, virtual void OnAlloc(const std::type_info* allocated_type,
uint64 alloc_size) = 0; uint64 alloc_size) = 0;
};
class ArenaImpl;
// A thread-unsafe Arena that can only be used within its owning thread.
class PROTOBUF_EXPORT SerialArena {
public:
// Blocks are variable length malloc-ed objects. The following structure
// describes the common header for all blocks.
class PROTOBUF_EXPORT Block {
public:
Block(size_t size, Block* next, bool special, bool user_owned)
: next_and_bits_(reinterpret_cast<uintptr_t>(next) | (special ? 1 : 0) |
(user_owned ? 2 : 0)),
pos_(kBlockHeaderSize),
size_(size) {
GOOGLE_DCHECK_EQ(reinterpret_cast<uintptr_t>(next) & 3, 0u);
}
char* Pointer(size_t n) {
GOOGLE_DCHECK(n <= size_);
return reinterpret_cast<char*>(this) + n;
}
// One of the blocks may be special. This is either a user-supplied // Does OnAlloc() need to be called? If false, metric collection overhead
// initial block, or a block we created at startup to hold Options info. // will be reduced since we will not do extra work per allocation.
// A special block is not deleted by Reset. bool RecordAllocs() { return record_allocs_; }
bool special() const { return (next_and_bits_ & 1) != 0; }
// Whether or not this current block is owned by the user.
// Only special blocks can be user_owned.
bool user_owned() const { return (next_and_bits_ & 2) != 0; }
Block* next() const { protected:
const uintptr_t bottom_bits = 3; // This class is destructed by the call to OnDestroy().
return reinterpret_cast<Block*>(next_and_bits_ & ~bottom_bits); ~ArenaMetricsCollector() = default;
} const bool record_allocs_;
};
void clear_next() { struct AllocationPolicy {
next_and_bits_ &= 3; // Set next to nullptr, preserve bottom bits. static constexpr size_t kDefaultStartBlockSize = 256;
} static constexpr size_t kDefaultMaxBlockSize = 8192;
size_t pos() const { return pos_; } size_t start_block_size = kDefaultStartBlockSize;
size_t size() const { return size_; } size_t max_block_size = kDefaultMaxBlockSize;
void set_pos(size_t pos) { pos_ = pos; } void* (*block_alloc)(size_t) = nullptr;
void (*block_dealloc)(void*, size_t) = nullptr;
ArenaMetricsCollector* metrics_collector = nullptr;
private: bool IsDefault() const {
// Holds pointer to next block for this thread + special/user_owned bits. return start_block_size == kDefaultMaxBlockSize &&
uintptr_t next_and_bits_; max_block_size == kDefaultMaxBlockSize && block_alloc == nullptr &&
block_dealloc == nullptr && metrics_collector == nullptr;
}
};
size_t pos_; // A simple arena allocator. Calls to allocate functions must be properly
size_t size_; // serialized by the caller, hence this class cannot be used as a general
// data follows // purpose allocator in a multi-threaded program. It serves as a building block
// for ThreadSafeArena, which provides a thread-safe arena allocator.
//
// This class manages
// 1) Arena bump allocation + owning memory blocks.
// 2) Maintaining a cleanup list.
// It delagetes the actual memory allocation back to ThreadSafeArena, which
// contains the information on block growth policy and backing memory allocation
// used.
class PROTOBUF_EXPORT SerialArena {
public:
struct Memory {
void* ptr;
size_t size;
}; };
// The allocate/free methods here are a little strange, since SerialArena is // Node contains the ptr of the object to be cleaned up and the associated
// allocated inside a Block which it also manages. This is to avoid doing // cleanup function ptr.
// an extra allocation for the SerialArena itself. struct CleanupNode {
void* elem; // Pointer to the object to be cleaned up.
void (*cleanup)(void*); // Function pointer to the destructor or deleter.
};
// Creates a new SerialArena inside Block* and returns it. // Creates a new SerialArena inside mem using the remaining memory as for
static SerialArena* New(Block* b, void* owner, ArenaImpl* arena); // future allocations.
static SerialArena* New(SerialArena::Memory mem, void* owner);
// Free SerialArena returning the memory passed in to New
template <typename Deallocator>
Memory Free(Deallocator deallocator);
void CleanupList(); void CleanupList();
uint64 SpaceAllocated() const {
return space_allocated_.load(std::memory_order_relaxed);
}
uint64 SpaceUsed() const; uint64 SpaceUsed() const;
bool HasSpace(size_t n) { return n <= static_cast<size_t>(limit_ - ptr_); } bool HasSpace(size_t n) { return n <= static_cast<size_t>(limit_ - ptr_); }
void* AllocateAligned(size_t n) { void* AllocateAligned(size_t n, const AllocationPolicy* policy) {
GOOGLE_DCHECK_EQ(internal::AlignUpTo8(n), n); // Must be already aligned. GOOGLE_DCHECK_EQ(internal::AlignUpTo8(n), n); // Must be already aligned.
GOOGLE_DCHECK_GE(limit_, ptr_); GOOGLE_DCHECK_GE(limit_, ptr_);
if (PROTOBUF_PREDICT_FALSE(!HasSpace(n))) { if (PROTOBUF_PREDICT_FALSE(!HasSpace(n))) {
return AllocateAlignedFallback(n); return AllocateAlignedFallback(n, policy);
} }
void* ret = ptr_; void* ret = ptr_;
ptr_ += n; ptr_ += n;
@ -183,51 +176,52 @@ class PROTOBUF_EXPORT SerialArena {
return true; return true;
} }
void AddCleanup(void* elem, void (*cleanup)(void*)) { std::pair<void*, CleanupNode*> AllocateAlignedWithCleanup(
if (PROTOBUF_PREDICT_FALSE(cleanup_ptr_ == cleanup_limit_)) { size_t n, const AllocationPolicy* policy) {
AddCleanupFallback(elem, cleanup); if (PROTOBUF_PREDICT_FALSE(!HasSpace(n + kCleanupSize))) {
return; return AllocateAlignedWithCleanupFallback(n, policy);
} }
cleanup_ptr_->elem = elem; void* ret = ptr_;
cleanup_ptr_->cleanup = cleanup; ptr_ += n;
cleanup_ptr_++; limit_ -= kCleanupSize;
#ifdef ADDRESS_SANITIZER
ASAN_UNPOISON_MEMORY_REGION(ret, n);
ASAN_UNPOISON_MEMORY_REGION(limit_, kCleanupSize);
#endif // ADDRESS_SANITIZER
return CreatePair(ret, reinterpret_cast<CleanupNode*>(limit_));
} }
void* AllocateAlignedAndAddCleanup(size_t n, void (*cleanup)(void*)) { void AddCleanup(void* elem, void (*cleanup)(void*),
void* ret = AllocateAligned(n); const AllocationPolicy* policy) {
AddCleanup(ret, cleanup); auto res = AllocateAlignedWithCleanup(0, policy);
return ret; res.second->elem = elem;
res.second->cleanup = cleanup;
} }
Block* head() const { return head_; }
void* owner() const { return owner_; } void* owner() const { return owner_; }
SerialArena* next() const { return next_; } SerialArena* next() const { return next_; }
void set_next(SerialArena* next) { next_ = next; } void set_next(SerialArena* next) { next_ = next; }
static Block* NewBlock(Block* last_block, size_t min_bytes, ArenaImpl* arena);
private: private:
// Node contains the ptr of the object to be cleaned up and the associated // Blocks are variable length malloc-ed objects. The following structure
// cleanup function ptr. // describes the common header for all blocks.
struct CleanupNode { struct Block {
void* elem; // Pointer to the object to be cleaned up. char* Pointer(size_t n) {
void (*cleanup)(void*); // Function pointer to the destructor or deleter. GOOGLE_DCHECK(n <= size);
}; return reinterpret_cast<char*>(this) + n;
// Cleanup uses a chunked linked list, to reduce pointer chasing.
struct CleanupChunk {
static size_t SizeOf(size_t i) {
return sizeof(CleanupChunk) + (sizeof(CleanupNode) * (i - 1));
} }
size_t size; // Total elements in the list.
CleanupChunk* next; // Next node in the list. Block* next;
CleanupNode nodes[1]; // True length is |size|. size_t size;
CleanupNode* start;
// data follows
}; };
ArenaImpl* arena_; // Containing arena.
void* owner_; // &ThreadCache of this thread; void* owner_; // &ThreadCache of this thread;
Block* head_; // Head of linked list of blocks. Block* head_; // Head of linked list of blocks.
CleanupChunk* cleanup_; // Head of cleanup list.
SerialArena* next_; // Next SerialArena in this linked list. SerialArena* next_; // Next SerialArena in this linked list.
size_t space_used_ = 0; // Necessary for metrics.
std::atomic<size_t> space_allocated_;
// Next pointer to allocate from. Always 8-byte aligned. Points inside // Next pointer to allocate from. Always 8-byte aligned. Points inside
// head_ (and head_->pos will always be non-canonical). We keep these // head_ (and head_->pos will always be non-canonical). We keep these
@ -235,17 +229,20 @@ class PROTOBUF_EXPORT SerialArena {
char* ptr_; char* ptr_;
char* limit_; char* limit_;
// Next CleanupList members to append to. These point inside cleanup_. // Constructor is private as only New() should be used.
CleanupNode* cleanup_ptr_; inline SerialArena(Block* b, void* owner);
CleanupNode* cleanup_limit_; void* AllocateAlignedFallback(size_t n, const AllocationPolicy* policy);
std::pair<void*, CleanupNode*> AllocateAlignedWithCleanupFallback(
size_t n, const AllocationPolicy* policy);
void AllocateNewBlock(size_t n, const AllocationPolicy* policy);
void* AllocateAlignedFallback(size_t n); std::pair<void*, CleanupNode*> CreatePair(void* ptr, CleanupNode* node) {
void AddCleanupFallback(void* elem, void (*cleanup)(void*)); return {ptr, node};
void CleanupListFallback(); }
public: public:
static constexpr size_t kBlockHeaderSize = static constexpr size_t kBlockHeaderSize = AlignUpTo8(sizeof(Block));
(sizeof(Block) + 7) & static_cast<size_t>(-8); static constexpr size_t kCleanupSize = AlignUpTo8(sizeof(CleanupNode));
}; };
// This class provides the core Arena memory allocation library. Different // This class provides the core Arena memory allocation library. Different
@ -254,42 +251,43 @@ class PROTOBUF_EXPORT SerialArena {
// in turn would be templates, which will/cannot happen. However separating // in turn would be templates, which will/cannot happen. However separating
// the memory allocation part from the cruft of the API users expect we can // the memory allocation part from the cruft of the API users expect we can
// use #ifdef the select the best implementation based on hardware / OS. // use #ifdef the select the best implementation based on hardware / OS.
class PROTOBUF_EXPORT ArenaImpl { class PROTOBUF_EXPORT ThreadSafeArena {
public: public:
static const size_t kDefaultStartBlockSize = 256; ThreadSafeArena() { Init(false); }
static const size_t kDefaultMaxBlockSize = 8192;
ArenaImpl() { Init(false); }
ArenaImpl(char* mem, size_t size) { ThreadSafeArena(char* mem, size_t size) { InitializeFrom(mem, size); }
GOOGLE_DCHECK_EQ(reinterpret_cast<uintptr_t>(mem) & 7, 0u);
Init(false);
// Ignore initial block if it is too small. explicit ThreadSafeArena(void* mem, size_t size,
if (mem != nullptr && size >= kBlockHeaderSize + kSerialArenaSize) { const AllocationPolicy& policy) {
SetInitialBlock(new (mem) SerialArena::Block(size, nullptr, true, true)); if (policy.IsDefault()) {
// Legacy code doesn't use the API above, but provides the initial block
// through ArenaOptions. I suspect most do not touch the allocation
// policy parameters.
InitializeFrom(mem, size);
} else {
auto collector = policy.metrics_collector;
bool record_allocs = collector && collector->RecordAllocs();
InitializeWithPolicy(mem, size, record_allocs, policy);
} }
} }
explicit ArenaImpl(const ArenaOptions& options);
// Destructor deletes all owned heap allocated objects, and destructs objects // Destructor deletes all owned heap allocated objects, and destructs objects
// that have non-trivial destructors, except for proto2 message objects whose // that have non-trivial destructors, except for proto2 message objects whose
// destructors can be skipped. Also, frees all blocks except the initial block // destructors can be skipped. Also, frees all blocks except the initial block
// if it was passed in. // if it was passed in.
~ArenaImpl(); ~ThreadSafeArena();
uint64 Reset(); uint64 Reset();
uint64 SpaceAllocated() const; uint64 SpaceAllocated() const;
uint64 SpaceUsed() const; uint64 SpaceUsed() const;
void* AllocateAligned(size_t n) { void* AllocateAligned(size_t n, const std::type_info* type) {
SerialArena* arena; SerialArena* arena;
if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(&arena))) { if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(tag_and_id_, &arena))) {
return arena->AllocateAligned(n); return arena->AllocateAligned(n, AllocPolicy());
} else { } else {
return AllocateAlignedFallback(n); return AllocateAlignedFallback(n, type);
} }
} }
@ -300,90 +298,74 @@ class PROTOBUF_EXPORT ArenaImpl {
// code for the happy path. // code for the happy path.
PROTOBUF_ALWAYS_INLINE bool MaybeAllocateAligned(size_t n, void** out) { PROTOBUF_ALWAYS_INLINE bool MaybeAllocateAligned(size_t n, void** out) {
SerialArena* a; SerialArena* a;
if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFromThreadCache(&a))) { if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFromThreadCache(tag_and_id_, &a))) {
return a->MaybeAllocateAligned(n, out); return a->MaybeAllocateAligned(n, out);
} }
return false; return false;
} }
void* AllocateAlignedAndAddCleanup(size_t n, void (*cleanup)(void*)); std::pair<void*, SerialArena::CleanupNode*> AllocateAlignedWithCleanup(
size_t n, const std::type_info* type);
// Add object pointer and cleanup function pointer to the list. // Add object pointer and cleanup function pointer to the list.
void AddCleanup(void* elem, void (*cleanup)(void*)); void AddCleanup(void* elem, void (*cleanup)(void*));
inline void RecordAlloc(const std::type_info* allocated_type, private:
size_t n) const { // Unique for each arena. Changes on Reset().
if (PROTOBUF_PREDICT_FALSE(record_allocs())) { uint64 tag_and_id_;
options_->metrics_collector->OnAlloc(allocated_type, n); // The LSB of tag_and_id_ indicates if allocs in this arena are recorded.
} enum { kRecordAllocs = 1 };
}
std::pair<void*, size_t> NewBuffer(size_t last_size, size_t min_bytes); intptr_t alloc_policy_ = 0; // Tagged pointer to AllocPolicy.
// The LSB of alloc_policy_ indicates if the user owns the initial block.
enum { kUserOwnedInitialBlock = 1 };
private:
// Pointer to a linked list of SerialArena. // Pointer to a linked list of SerialArena.
std::atomic<SerialArena*> threads_; std::atomic<SerialArena*> threads_;
std::atomic<SerialArena*> hint_; // Fast thread-local block access std::atomic<SerialArena*> hint_; // Fast thread-local block access
std::atomic<size_t> space_allocated_; // Total size of all allocated blocks.
// Unique for each arena. Changes on Reset().
// Least-significant-bit is 1 iff allocations should be recorded.
uint64 lifecycle_id_;
struct Options {
size_t start_block_size;
size_t max_block_size;
void* (*block_alloc)(size_t);
void (*block_dealloc)(void*, size_t);
ArenaMetricsCollector* metrics_collector;
};
Options* options_ = nullptr; const AllocationPolicy* AllocPolicy() const {
return reinterpret_cast<const AllocationPolicy*>(alloc_policy_ & -8);
void* AllocateAlignedFallback(size_t n); }
void* AllocateAlignedAndAddCleanupFallback(size_t n, void (*cleanup)(void*)); void InitializeFrom(void* mem, size_t size);
void InitializeWithPolicy(void* mem, size_t size, bool record_allocs,
AllocationPolicy policy);
void* AllocateAlignedFallback(size_t n, const std::type_info* type);
std::pair<void*, SerialArena::CleanupNode*>
AllocateAlignedWithCleanupFallback(size_t n, const std::type_info* type);
void AddCleanupFallback(void* elem, void (*cleanup)(void*)); void AddCleanupFallback(void* elem, void (*cleanup)(void*));
void Init(bool record_allocs); void Init(bool record_allocs);
void SetInitialBlock( void SetInitialBlock(void* mem, size_t size);
SerialArena::Block* block); // Can be called right after Init()
// Return true iff allocations should be recorded in a metrics collector. // Delete or Destruct all objects owned by the arena.
inline bool record_allocs() const { return lifecycle_id_ & 1; } void CleanupList();
// Invoke fn(b) for every Block* b. inline bool ShouldRecordAlloc() const { return tag_and_id_ & kRecordAllocs; }
template <typename Functor>
void PerBlock(Functor fn) { inline uint64 LifeCycleId() const {
// By omitting an Acquire barrier we ensure that any user code that doesn't return tag_and_id_ & (-kRecordAllocs - 1);
// properly synchronize Reset() or the destructor will throw a TSAN warning.
SerialArena* serial = threads_.load(std::memory_order_relaxed);
while (serial) {
// fn() may delete blocks and arenas, so fetch next pointers before fn();
SerialArena* cur = serial;
serial = serial->next();
for (auto* block = cur->head(); block != nullptr;) {
auto* b = block;
block = b->next();
fn(b);
}
}
} }
// Delete or Destruct all objects owned by the arena. inline void RecordAlloc(const std::type_info* allocated_type,
void CleanupList(); size_t n) const {
AllocPolicy()->metrics_collector->OnAlloc(allocated_type, n);
}
inline void CacheSerialArena(SerialArena* serial) { inline void CacheSerialArena(SerialArena* serial) {
thread_cache().last_serial_arena = serial; thread_cache().last_serial_arena = serial;
thread_cache().last_lifecycle_id_seen = lifecycle_id_; thread_cache().last_lifecycle_id_seen = LifeCycleId();
// TODO(haberman): evaluate whether we would gain efficiency by getting rid // TODO(haberman): evaluate whether we would gain efficiency by getting rid
// of hint_. It's the only write we do to ArenaImpl in the allocation path, // of hint_. It's the only write we do to ThreadSafeArena in the allocation
// which will dirty the cache line. // path, which will dirty the cache line.
hint_.store(serial, std::memory_order_release); hint_.store(serial, std::memory_order_release);
} }
PROTOBUF_ALWAYS_INLINE bool GetSerialArenaFast(SerialArena** arena) { PROTOBUF_ALWAYS_INLINE bool GetSerialArenaFast(uint64 lifecycle_id,
if (GetSerialArenaFromThreadCache(arena)) return true; SerialArena** arena) {
if (GetSerialArenaFromThreadCache(lifecycle_id, arena)) return true;
if (lifecycle_id & kRecordAllocs) return false;
// Check whether we own the last accessed SerialArena on this arena. This // Check whether we own the last accessed SerialArena on this arena. This
// fast path optimizes the case where a single thread uses multiple arenas. // fast path optimizes the case where a single thread uses multiple arenas.
@ -397,12 +379,12 @@ class PROTOBUF_EXPORT ArenaImpl {
} }
PROTOBUF_ALWAYS_INLINE bool GetSerialArenaFromThreadCache( PROTOBUF_ALWAYS_INLINE bool GetSerialArenaFromThreadCache(
SerialArena** arena) { uint64 lifecycle_id, SerialArena** arena) {
// If this thread already owns a block in this arena then try to use that. // If this thread already owns a block in this arena then try to use that.
// This fast path optimizes the case where multiple threads allocate from // This fast path optimizes the case where multiple threads allocate from
// the same arena. // the same arena.
ThreadCache* tc = &thread_cache(); ThreadCache* tc = &thread_cache();
if (PROTOBUF_PREDICT_TRUE(tc->last_lifecycle_id_seen == lifecycle_id_)) { if (PROTOBUF_PREDICT_TRUE(tc->last_lifecycle_id_seen == lifecycle_id)) {
*arena = tc->last_serial_arena; *arena = tc->last_serial_arena;
return true; return true;
} }
@ -410,6 +392,20 @@ class PROTOBUF_EXPORT ArenaImpl {
} }
SerialArena* GetSerialArenaFallback(void* me); SerialArena* GetSerialArenaFallback(void* me);
template <typename Functor>
void PerSerialArena(Functor fn) {
// By omitting an Acquire barrier we ensure that any user code that doesn't
// properly synchronize Reset() or the destructor will throw a TSAN warning.
SerialArena* serial = threads_.load(std::memory_order_relaxed);
for (; serial; serial = serial->next()) fn(serial);
}
// Releases all memory except the first block which it returns. The first
// block might be owned by the user and thus need some extra checks before
// deleting.
SerialArena::Memory Free(size_t* space_allocated);
#ifdef _MSC_VER #ifdef _MSC_VER
#pragma warning(disable : 4324) #pragma warning(disable : 4324)
#endif #endif
@ -462,11 +458,11 @@ class PROTOBUF_EXPORT ArenaImpl {
static ThreadCache& thread_cache() { return thread_cache_; } static ThreadCache& thread_cache() { return thread_cache_; }
#endif #endif
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ArenaImpl); GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ThreadSafeArena);
// All protos have pointers back to the arena hence Arena must have // All protos have pointers back to the arena hence Arena must have
// pointer stability. // pointer stability.
ArenaImpl(ArenaImpl&&) = delete; ThreadSafeArena(ThreadSafeArena&&) = delete;
ArenaImpl& operator=(ArenaImpl&&) = delete; ThreadSafeArena& operator=(ThreadSafeArena&&) = delete;
public: public:
// kBlockHeaderSize is sizeof(Block), aligned up to the nearest multiple of 8 // kBlockHeaderSize is sizeof(Block), aligned up to the nearest multiple of 8
@ -474,8 +470,6 @@ class PROTOBUF_EXPORT ArenaImpl {
static constexpr size_t kBlockHeaderSize = SerialArena::kBlockHeaderSize; static constexpr size_t kBlockHeaderSize = SerialArena::kBlockHeaderSize;
static constexpr size_t kSerialArenaSize = static constexpr size_t kSerialArenaSize =
(sizeof(SerialArena) + 7) & static_cast<size_t>(-8); (sizeof(SerialArena) + 7) & static_cast<size_t>(-8);
static constexpr size_t kOptionsSize =
(sizeof(Options) + 7) & static_cast<size_t>(-8);
static_assert(kBlockHeaderSize % 8 == 0, static_assert(kBlockHeaderSize % 8 == 0,
"kBlockHeaderSize must be a multiple of 8."); "kBlockHeaderSize must be a multiple of 8.");
static_assert(kSerialArenaSize % 8 == 0, static_assert(kSerialArenaSize % 8 == 0,

@ -1331,7 +1331,7 @@ void ClearHookCounts() {
} // namespace } // namespace
// A helper utility class that handles arena callbacks. // A helper utility class that handles arena callbacks.
class ArenaOptionsTestFriend : public internal::ArenaMetricsCollector { class ArenaOptionsTestFriend final : public internal::ArenaMetricsCollector {
public: public:
static internal::ArenaMetricsCollector* NewWithAllocs() { static internal::ArenaMetricsCollector* NewWithAllocs() {
return new ArenaOptionsTestFriend(true); return new ArenaOptionsTestFriend(true);
@ -1352,7 +1352,7 @@ class ArenaOptionsTestFriend : public internal::ArenaMetricsCollector {
} }
explicit ArenaOptionsTestFriend(bool record_allocs) explicit ArenaOptionsTestFriend(bool record_allocs)
: record_allocs_(record_allocs) { : ArenaMetricsCollector(record_allocs) {
++hooks_num_init; ++hooks_num_init;
} }
void OnDestroy(uint64 space_allocated) override { void OnDestroy(uint64 space_allocated) override {
@ -1360,14 +1360,10 @@ class ArenaOptionsTestFriend : public internal::ArenaMetricsCollector {
delete this; delete this;
} }
void OnReset(uint64 space_allocated) override { ++hooks_num_reset; } void OnReset(uint64 space_allocated) override { ++hooks_num_reset; }
bool RecordAllocs() override { return record_allocs_; }
void OnAlloc(const std::type_info* allocated_type, void OnAlloc(const std::type_info* allocated_type,
uint64 alloc_size) override { uint64 alloc_size) override {
++hooks_num_allocations; ++hooks_num_allocations;
} }
private:
bool record_allocs_;
}; };
// Test the hooks are correctly called. // Test the hooks are correctly called.
@ -1408,3 +1404,5 @@ TEST(ArenaTest, ArenaHooksWhenAllocationsNotNeeded) {
} // namespace protobuf } // namespace protobuf
} // namespace google } // namespace google
#include <google/protobuf/port_undef.inc>

@ -252,3 +252,5 @@ void ArenaStringPtr::ClearToDefault(const LazyString& default_value,
} // namespace internal } // namespace internal
} // namespace protobuf } // namespace protobuf
} // namespace google } // namespace google
#include <google/protobuf/port_undef.inc>

@ -173,3 +173,5 @@ TEST(ArenaStringPtrTest, ArenaStringPtrOnArenaNoSSO) {
} // namespace protobuf } // namespace protobuf
} // namespace google } // namespace google
#include <google/protobuf/port_undef.inc>

@ -2018,130 +2018,76 @@ CommandLineInterface::InterpretArgument(const std::string& name,
void CommandLineInterface::PrintHelpText() { void CommandLineInterface::PrintHelpText() {
// Sorry for indentation here; line wrapping would be uglier. // Sorry for indentation here; line wrapping would be uglier.
std::cout std::cout << "Usage: " << executable_name_ << " [OPTION] PROTO_FILES";
<< std::cout << R"(
"Usage: " << executable_name_ Parse PROTO_FILES and generate output based on the options given:
<< " [OPTION] PROTO_FILES\n" -IPATH, --proto_path=PATH Specify the directory in which to search for
"Parse PROTO_FILES and generate output based on the options given:\n" imports. May be specified multiple times;
" -IPATH, --proto_path=PATH Specify the directory in which to " directories will be searched in order. If not
"search for\n" given, the current working directory is used.
" imports. May be specified multiple " If not found in any of the these directories,
"times;\n" the --descriptor_set_in descriptors will be
" directories will be searched in order. " checked for required proto file.
" If not\n" --version Show version info and exit.
" given, the current working directory " -h, --help Show this text and exit.
"is used.\n" --encode=MESSAGE_TYPE Read a text-format message of the given type
" If not found in any of the these " from standard input and write it in binary
"directories,\n" to standard output. The message type must
" the --descriptor_set_in descriptors " be defined in PROTO_FILES or their imports.
"will be\n" --deterministic_output When using --encode, ensure map fields are
" checked for required proto file.\n" deterministically ordered. Note that this order
" --version Show version info and exit.\n" is not canonical, and changes across builds or
" -h, --help Show this text and exit.\n" releases of protoc.
" --encode=MESSAGE_TYPE Read a text-format message of the " --decode=MESSAGE_TYPE Read a binary message of the given type from
"given type\n" standard input and write it in text format
" from standard input and write it in " to standard output. The message type must
"binary\n" be defined in PROTO_FILES or their imports.
" to standard output. The message type " --decode_raw Read an arbitrary protocol message from
"must\n" standard input and write the raw tag/value
" be defined in PROTO_FILES or their " pairs in text format to standard output. No
"imports.\n" PROTO_FILES should be given when using this
" --deterministic_output When using --encode, ensure map fields " flag.
"are\n" --descriptor_set_in=FILES Specifies a delimited list of FILES
" deterministically ordered. Note that" each containing a FileDescriptorSet (a
"this order is not\n" protocol buffer defined in descriptor.proto).
" canonical, and changes across builds" The FileDescriptor for each of the PROTO_FILES
"or releases of protoc.\n" provided will be loaded from these
" --decode=MESSAGE_TYPE Read a binary message of the given " FileDescriptorSets. If a FileDescriptor
"type from\n" appears multiple times, the first occurrence
" standard input and write it in text " will be used.
"format\n" -oFILE, Writes a FileDescriptorSet (a protocol buffer,
" to standard output. The message type " --descriptor_set_out=FILE defined in descriptor.proto) containing all of
"must\n" the input files to FILE.
" be defined in PROTO_FILES or their " --include_imports When using --descriptor_set_out, also include
"imports.\n" all dependencies of the input files in the
" --decode_raw Read an arbitrary protocol message " set, so that the set is self-contained.
"from\n" --include_source_info When using --descriptor_set_out, do not strip
" standard input and write the raw " SourceCodeInfo from the FileDescriptorProto.
"tag/value\n" This results in vastly larger descriptors that
" pairs in text format to standard " include information about the original
"output. No\n" location of each decl in the source file as
" PROTO_FILES should be given when using " well as surrounding comments.
"this\n" --dependency_out=FILE Write a dependency output file in the format
" flag.\n" expected by make. This writes the transitive
" --descriptor_set_in=FILES Specifies a delimited list of FILES\n" set of input file paths to FILE
" each containing a FileDescriptorSet " --error_format=FORMAT Set the format in which to print errors.
"(a\n" FORMAT may be 'gcc' (the default) or 'msvs'
" protocol buffer defined in " (Microsoft Visual Studio format).
"descriptor.proto).\n" --print_free_field_numbers Print the free field numbers of the messages
" The FileDescriptor for each of the " defined in the given proto files. Groups share
"PROTO_FILES\n" the same field number space with the parent
" provided will be loaded from these\n" message. Extension ranges are counted as
" FileDescriptorSets. If a " occupied fields numbers.)";
"FileDescriptor\n"
" appears multiple times, the first "
"occurrence\n"
" will be used.\n"
" -oFILE, Writes a FileDescriptorSet (a protocol "
"buffer,\n"
" --descriptor_set_out=FILE defined in descriptor.proto) "
"containing all of\n"
" the input files to FILE.\n"
" --include_imports When using --descriptor_set_out, also "
"include\n"
" all dependencies of the input files in "
"the\n"
" set, so that the set is "
"self-contained.\n"
" --include_source_info When using --descriptor_set_out, do "
"not strip\n"
" SourceCodeInfo from the "
"FileDescriptorProto.\n"
" This results in vastly larger "
"descriptors that\n"
" include information about the "
"original\n"
" location of each decl in the source "
"file as\n"
" well as surrounding comments.\n"
" --dependency_out=FILE Write a dependency output file in the "
"format\n"
" expected by make. This writes the "
"transitive\n"
" set of input file paths to FILE\n"
" --error_format=FORMAT Set the format in which to print "
"errors.\n"
" FORMAT may be 'gcc' (the default) or "
"'msvs'\n"
" (Microsoft Visual Studio format).\n"
" --print_free_field_numbers Print the free field numbers of the "
"messages\n"
" defined in the given proto files. "
"Groups share\n"
" the same field number space with the "
"parent \n"
" message. Extension ranges are counted "
"as \n"
" occupied fields numbers.\n"
<< std::endl;
if (!plugin_prefix_.empty()) { if (!plugin_prefix_.empty()) {
std::cout std::cout << R"(
<< " --plugin=EXECUTABLE Specifies a plugin executable to " --plugin=EXECUTABLE Specifies a plugin executable to use.
"use.\n" Normally, protoc searches the PATH for
" Normally, protoc searches the PATH " plugins, but you may specify additional
"for\n" executables not in the path using this flag.
" plugins, but you may specify " Additionally, EXECUTABLE may be of the form
"additional\n" NAME=PATH, in which case the given plugin name
" executables not in the path using " is mapped to the given executable even if
"this flag.\n" the executable's own name differs.)";
" Additionally, EXECUTABLE may be of "
"the form\n"
" NAME=PATH, in which case the given "
"plugin name\n"
" is mapped to the given executable "
"even if\n"
" the executable's own name differs."
<< std::endl;
} }
for (GeneratorMap::iterator iter = generators_by_flag_name_.begin(); for (GeneratorMap::iterator iter = generators_by_flag_name_.begin();
@ -2149,35 +2095,26 @@ void CommandLineInterface::PrintHelpText() {
// FIXME(kenton): If the text is long enough it will wrap, which is ugly, // FIXME(kenton): If the text is long enough it will wrap, which is ugly,
// but fixing this nicely (e.g. splitting on spaces) is probably more // but fixing this nicely (e.g. splitting on spaces) is probably more
// trouble than it's worth. // trouble than it's worth.
std::cout << " " << iter->first << "=OUT_DIR " std::cout << std::endl
<< " " << iter->first << "=OUT_DIR "
<< std::string(19 - iter->first.size(), << std::string(19 - iter->first.size(),
' ') // Spaces for alignment. ' ') // Spaces for alignment.
<< iter->second.help_text << std::endl; << iter->second.help_text;
} }
std::cout << " @<filename> Read options and filenames from " std::cout << R"(
"file. If a\n" @<filename> Read options and filenames from file. If a
" relative file path is specified, " relative file path is specified, the file
"the file\n" will be searched in the working directory.
" will be searched in the working " The --proto_path option will not affect how
"directory.\n" this argument file is searched. Content of
" The --proto_path option will not " the file will be expanded in the position of
"affect how\n" @<filename> as in the argument list. Note
" this argument file is searched. " that shell expansion is not applied to the
"Content of\n" content of the file (i.e., you cannot use
" the file will be expanded in the " quotes, wildcards, escapes, commands, etc.).
"position of\n" Each line corresponds to a single argument,
" @<filename> as in the argument " even if it contains spaces.)";
"list. Note\n" std::cout << std::endl;
" that shell expansion is not "
"applied to the\n"
" content of the file (i.e., you "
"cannot use\n"
" quotes, wildcards, escapes, "
"commands, etc.).\n"
" Each line corresponds to a "
"single argument,\n"
" even if it contains spaces."
<< std::endl;
} }
bool CommandLineInterface::EnforceProto3OptionalSupport( bool CommandLineInterface::EnforceProto3OptionalSupport(

@ -99,8 +99,7 @@ ExtensionGenerator::ExtensionGenerator(const FieldDescriptor* descriptor,
std::string scope = std::string scope =
IsScoped() ? ClassName(descriptor_->extension_scope(), false) + "::" : ""; IsScoped() ? ClassName(descriptor_->extension_scope(), false) + "::" : "";
variables_["scope"] = scope; variables_["scope"] = scope;
std::string scoped_name = scope + ResolveKeyword(name); variables_["scoped_name"] = ExtensionName(descriptor_);
variables_["scoped_name"] = scoped_name;
variables_["number"] = StrCat(descriptor_->number()); variables_["number"] = StrCat(descriptor_->number());
} }
@ -175,6 +174,7 @@ void ExtensionGenerator::GenerateDefinition(io::Printer* printer) {
} }
format( format(
"PROTOBUF_ATTRIBUTE_INIT_PRIORITY "
"::$proto_ns$::internal::ExtensionIdentifier< $extendee$,\n" "::$proto_ns$::internal::ExtensionIdentifier< $extendee$,\n"
" ::$proto_ns$::internal::$type_traits$, $field_type$, $packed$ >\n" " ::$proto_ns$::internal::$type_traits$, $field_type$, $packed$ >\n"
" $scoped_name$($constant_name$, $1$);\n", " $scoped_name$($constant_name$, $1$);\n",

@ -429,6 +429,12 @@ void FileGenerator::GenerateSourceIncludes(io::Printer* printer) {
format("// @@protoc_insertion_point(includes)\n"); format("// @@protoc_insertion_point(includes)\n");
IncludeFile("net/proto2/public/port_def.inc", printer); IncludeFile("net/proto2/public/port_def.inc", printer);
// For MSVC builds, we use #pragma init_seg to move the initialization of our
// libraries to happen before the user code.
// This worksaround the fact that MSVC does not do constant initializers when
// required by the standard.
format("\nPROTOBUF_PRAGMA_INIT_SEG\n");
} }
void FileGenerator::GenerateSourceDefaultInstance(int idx, void FileGenerator::GenerateSourceDefaultInstance(int idx,
@ -916,8 +922,9 @@ void FileGenerator::GenerateReflectionInitializationCode(io::Printer* printer) {
if (file_->name() != "net/proto2/proto/descriptor.proto") { if (file_->name() != "net/proto2/proto/descriptor.proto") {
format( format(
"// Force running AddDescriptors() at dynamic initialization time.\n" "// Force running AddDescriptors() at dynamic initialization time.\n"
"static bool $1$ = (static_cast<void>(" "PROTOBUF_ATTRIBUTE_INIT_PRIORITY "
"::$proto_ns$::internal::AddDescriptors(&$desc_table$)), true);\n", "static ::$proto_ns$::internal::AddDescriptorsRunner "
"$1$(&$desc_table$);\n",
UniqueName("dynamic_init_dummy", file_, options_)); UniqueName("dynamic_init_dummy", file_, options_));
} }
} }

@ -362,10 +362,16 @@ std::string QualifiedClassName(const EnumDescriptor* d) {
return QualifiedClassName(d, Options()); return QualifiedClassName(d, Options());
} }
std::string ExtensionName(const FieldDescriptor* d) {
if (const Descriptor* scope = d->extension_scope())
return StrCat(ClassName(scope), "::", ResolveKeyword(d->name()));
return ResolveKeyword(d->name());
}
std::string QualifiedExtensionName(const FieldDescriptor* d, std::string QualifiedExtensionName(const FieldDescriptor* d,
const Options& options) { const Options& options) {
GOOGLE_DCHECK(d->is_extension()); GOOGLE_DCHECK(d->is_extension());
return QualifiedFileLevelSymbol(d->file(), FieldName(d), options); return QualifiedFileLevelSymbol(d->file(), ExtensionName(d), options);
} }
std::string QualifiedExtensionName(const FieldDescriptor* d) { std::string QualifiedExtensionName(const FieldDescriptor* d) {

@ -134,6 +134,10 @@ inline std::string ClassName(const EnumDescriptor* descriptor, bool qualified) {
: ClassName(descriptor); : ClassName(descriptor);
} }
// Returns the extension name prefixed with the class name if nested but without
// the package name.
std::string ExtensionName(const FieldDescriptor* d);
std::string QualifiedExtensionName(const FieldDescriptor* d, std::string QualifiedExtensionName(const FieldDescriptor* d,
const Options& options); const Options& options);
std::string QualifiedExtensionName(const FieldDescriptor* d); std::string QualifiedExtensionName(const FieldDescriptor* d);

@ -308,12 +308,7 @@ bool ShouldMarkIsInitializedAsFinal(const Descriptor* descriptor,
bool ShouldMarkNewAsFinal(const Descriptor* descriptor, bool ShouldMarkNewAsFinal(const Descriptor* descriptor,
const Options& options) { const Options& options) {
static std::set<std::string> exclusions{ return true;
};
const std::string name = ClassName(descriptor, true);
return exclusions.find(name) == exclusions.end() ||
options.opensource_runtime;
} }
// Returns true to make the message serialize in order, decided by the following // Returns true to make the message serialize in order, decided by the following
@ -1232,13 +1227,13 @@ void MessageGenerator::GenerateClassDefinition(io::Printer* printer) {
"\n"); "\n");
if (HasDescriptorMethods(descriptor_->file(), options_)) { if (HasDescriptorMethods(descriptor_->file(), options_)) {
format( format(
"void PackFrom(const ::$proto_ns$::Message& message) {\n" "bool PackFrom(const ::$proto_ns$::Message& message) {\n"
" _any_metadata_.PackFrom(message);\n" " return _any_metadata_.PackFrom(message);\n"
"}\n" "}\n"
"void PackFrom(const ::$proto_ns$::Message& message,\n" "bool PackFrom(const ::$proto_ns$::Message& message,\n"
" ::PROTOBUF_NAMESPACE_ID::ConstStringParam " " ::PROTOBUF_NAMESPACE_ID::ConstStringParam "
"type_url_prefix) {\n" "type_url_prefix) {\n"
" _any_metadata_.PackFrom(message, type_url_prefix);\n" " return _any_metadata_.PackFrom(message, type_url_prefix);\n"
"}\n" "}\n"
"bool UnpackTo(::$proto_ns$::Message* message) const {\n" "bool UnpackTo(::$proto_ns$::Message* message) const {\n"
" return _any_metadata_.UnpackTo(message);\n" " return _any_metadata_.UnpackTo(message);\n"
@ -1250,16 +1245,16 @@ void MessageGenerator::GenerateClassDefinition(io::Printer* printer) {
"template <typename T, class = typename std::enable_if<" "template <typename T, class = typename std::enable_if<"
"!std::is_convertible<T, const ::$proto_ns$::Message&>" "!std::is_convertible<T, const ::$proto_ns$::Message&>"
"::value>::type>\n" "::value>::type>\n"
"void PackFrom(const T& message) {\n" "bool PackFrom(const T& message) {\n"
" _any_metadata_.PackFrom<T>(message);\n" " return _any_metadata_.PackFrom<T>(message);\n"
"}\n" "}\n"
"template <typename T, class = typename std::enable_if<" "template <typename T, class = typename std::enable_if<"
"!std::is_convertible<T, const ::$proto_ns$::Message&>" "!std::is_convertible<T, const ::$proto_ns$::Message&>"
"::value>::type>\n" "::value>::type>\n"
"void PackFrom(const T& message,\n" "bool PackFrom(const T& message,\n"
" ::PROTOBUF_NAMESPACE_ID::ConstStringParam " " ::PROTOBUF_NAMESPACE_ID::ConstStringParam "
"type_url_prefix) {\n" "type_url_prefix) {\n"
" _any_metadata_.PackFrom<T>(message, type_url_prefix);" " return _any_metadata_.PackFrom<T>(message, type_url_prefix);"
"}\n" "}\n"
"template <typename T, class = typename std::enable_if<" "template <typename T, class = typename std::enable_if<"
"!std::is_convertible<T, const ::$proto_ns$::Message&>" "!std::is_convertible<T, const ::$proto_ns$::Message&>"
@ -1270,14 +1265,14 @@ void MessageGenerator::GenerateClassDefinition(io::Printer* printer) {
} else { } else {
format( format(
"template <typename T>\n" "template <typename T>\n"
"void PackFrom(const T& message) {\n" "bool PackFrom(const T& message) {\n"
" _any_metadata_.PackFrom(message);\n" " return _any_metadata_.PackFrom(message);\n"
"}\n" "}\n"
"template <typename T>\n" "template <typename T>\n"
"void PackFrom(const T& message,\n" "bool PackFrom(const T& message,\n"
" ::PROTOBUF_NAMESPACE_ID::ConstStringParam " " ::PROTOBUF_NAMESPACE_ID::ConstStringParam "
"type_url_prefix) {\n" "type_url_prefix) {\n"
" _any_metadata_.PackFrom(message, type_url_prefix);\n" " return _any_metadata_.PackFrom(message, type_url_prefix);\n"
"}\n" "}\n"
"template <typename T>\n" "template <typename T>\n"
"bool UnpackTo(T* message) const {\n" "bool UnpackTo(T* message) const {\n"
@ -2464,7 +2459,8 @@ void MessageGenerator::GenerateConstructorBody(io::Printer* printer,
} else { } else {
pod_template = pod_template =
"::memset(reinterpret_cast<char*>(this) + static_cast<size_t>(\n" "::memset(reinterpret_cast<char*>(this) + static_cast<size_t>(\n"
" reinterpret_cast<char*>(&$first$_) - reinterpret_cast<char*>(this)),\n" " reinterpret_cast<char*>(&$first$_) - "
"reinterpret_cast<char*>(this)),\n"
" 0, static_cast<size_t>(reinterpret_cast<char*>(&$last$_) -\n" " 0, static_cast<size_t>(reinterpret_cast<char*>(&$last$_) -\n"
" reinterpret_cast<char*>(&$first$_)) + sizeof($last$_));\n"; " reinterpret_cast<char*>(&$first$_)) + sizeof($last$_));\n";
} }
@ -3603,29 +3599,17 @@ void MessageGenerator::GenerateSerializeWithCachedSizesBodyShuffled(
"_weak_field_map_);\n"); "_weak_field_map_);\n");
} }
format( format("for (int i = $1$; i >= 0; i-- ) {\n", num_fields - 1);
"static const int kStart = GetInvariantPerBuild($1$UL) % $2$;\n"
"bool first_pass = true;\n"
"for (int i = kStart; i != kStart || first_pass; i = ((i + $3$) % $2$)) "
"{\n",
0,
num_fields, kLargePrime);
format.Indent(); format.Indent();
format("switch(i) {\n"); format("switch(i) {\n");
format.Indent(); format.Indent();
bool first_pass_set = false;
int index = 0; int index = 0;
for (const auto* f : ordered_fields) { for (const auto* f : ordered_fields) {
format("case $1$: {\n", index++); format("case $1$: {\n", index++);
format.Indent(); format.Indent();
if (!first_pass_set) {
first_pass_set = true;
format("first_pass = false;\n");
}
GenerateSerializeOneField(printer, f, -1); GenerateSerializeOneField(printer, f, -1);
format("break;\n"); format("break;\n");
@ -3637,11 +3621,6 @@ void MessageGenerator::GenerateSerializeWithCachedSizesBodyShuffled(
format("case $1$: {\n", index++); format("case $1$: {\n", index++);
format.Indent(); format.Indent();
if (!first_pass_set) {
first_pass_set = true;
format("first_pass = false;\n");
}
GenerateSerializeOneExtensionRange(printer, r); GenerateSerializeOneExtensionRange(printer, r);
format("break;\n"); format("break;\n");

@ -89,6 +89,17 @@ const std::unordered_set<std::string>* kReservedNames =
"transient", "try", "void", "volatile", "while", "transient", "try", "void", "volatile", "while",
}); });
// Names that should be avoided as field names in Kotlin.
// All Kotlin hard keywords are in this list.
const std::unordered_set<std::string>* kKotlinForbiddenNames =
new std::unordered_set<std::string>({
"as", "as?", "break", "class", "continue", "do", "else",
"false", "for", "fun", "if", "in", "!in", "interface",
"is", "!is", "null", "object", "package", "return", "super",
"this", "throw", "true", "try", "typealias", "typeof", "val",
"var", "when", "while",
});
bool IsForbidden(const std::string& field_name) { bool IsForbidden(const std::string& field_name) {
for (int i = 0; i < GOOGLE_ARRAYSIZE(kForbiddenWordList); ++i) { for (int i = 0; i < GOOGLE_ARRAYSIZE(kForbiddenWordList); ++i) {
if (field_name == kForbiddenWordList[i]) { if (field_name == kForbiddenWordList[i]) {
@ -215,6 +226,7 @@ std::string UnderscoresToCamelCaseCheckReserved(const FieldDescriptor* field) {
return name; return name;
} }
std::string UniqueFileScopeIdentifier(const Descriptor* descriptor) { std::string UniqueFileScopeIdentifier(const Descriptor* descriptor) {
return "static_" + StringReplace(descriptor->full_name(), ".", "_", true); return "static_" + StringReplace(descriptor->full_name(), ".", "_", true);
} }

@ -51,6 +51,7 @@ namespace java {
extern const char kThickSeparator[]; extern const char kThickSeparator[];
extern const char kThinSeparator[]; extern const char kThinSeparator[];
// If annotation_file is non-empty, prints a javax.annotation.Generated // If annotation_file is non-empty, prints a javax.annotation.Generated
// annotation to the given Printer. annotation_file will be referenced in the // annotation to the given Printer. annotation_file will be referenced in the
// annotation's comments field. delimiter should be the Printer's delimiter // annotation's comments field. delimiter should be the Printer's delimiter

@ -68,6 +68,7 @@ std::string ClassNameWithoutPackage(const Descriptor* descriptor,
return StripPackageName(descriptor->full_name(), descriptor->file()); return StripPackageName(descriptor->full_name(), descriptor->file());
} }
// Get the name of an enum's Java class without package name prefix. // Get the name of an enum's Java class without package name prefix.
std::string ClassNameWithoutPackage(const EnumDescriptor* descriptor, std::string ClassNameWithoutPackage(const EnumDescriptor* descriptor,
bool immutable) { bool immutable) {

@ -3162,8 +3162,7 @@ void Generator::GenerateClassDeserializeBinaryField(
"fieldtype", "fieldtype",
JSFieldTypeAnnotation(options, field, false, true, JSFieldTypeAnnotation(options, field, false, true,
/* singular_if_not_packed */ false, BYTES_U8), /* singular_if_not_packed */ false, BYTES_U8),
"reader", "reader", JSBinaryReaderMethodType(field));
JSBinaryReaderMethodType(field));
} else { } else {
printer->Print( printer->Print(
" var value = /** @type {$fieldtype$} */ " " var value = /** @type {$fieldtype$} */ "
@ -3179,7 +3178,8 @@ void Generator::GenerateClassDeserializeBinaryField(
printer->Print( printer->Print(
" for (var i = 0; i < values.length; i++) {\n" " for (var i = 0; i < values.length; i++) {\n"
" msg.add$name$(values[i]);\n" " msg.add$name$(values[i]);\n"
" }\n", "name", " }\n",
"name",
JSGetterName(options, field, BYTES_DEFAULT, /* drop_list = */ true)); JSGetterName(options, field, BYTES_DEFAULT, /* drop_list = */ true));
} else if (field->is_repeated()) { } else if (field->is_repeated()) {
printer->Print( printer->Print(

@ -14,6 +14,8 @@
#include <google/protobuf/wire_format.h> #include <google/protobuf/wire_format.h>
// @@protoc_insertion_point(includes) // @@protoc_insertion_point(includes)
#include <google/protobuf/port_def.inc> #include <google/protobuf/port_def.inc>
PROTOBUF_PRAGMA_INIT_SEG
extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2fdescriptor_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<6> scc_info_FileDescriptorProto_google_2fprotobuf_2fdescriptor_2eproto; extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2fdescriptor_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<6> scc_info_FileDescriptorProto_google_2fprotobuf_2fdescriptor_2eproto;
extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2fdescriptor_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_GeneratedCodeInfo_google_2fprotobuf_2fdescriptor_2eproto; extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2fdescriptor_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_GeneratedCodeInfo_google_2fprotobuf_2fdescriptor_2eproto;
extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2fcompiler_2fplugin_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_CodeGeneratorResponse_File_google_2fprotobuf_2fcompiler_2fplugin_2eproto; extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2fcompiler_2fplugin_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_CodeGeneratorResponse_File_google_2fprotobuf_2fcompiler_2fplugin_2eproto;
@ -204,7 +206,7 @@ const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_google
}; };
// Force running AddDescriptors() at dynamic initialization time. // Force running AddDescriptors() at dynamic initialization time.
static bool dynamic_init_dummy_google_2fprotobuf_2fcompiler_2fplugin_2eproto = (static_cast<void>(::PROTOBUF_NAMESPACE_ID::internal::AddDescriptors(&descriptor_table_google_2fprotobuf_2fcompiler_2fplugin_2eproto)), true); PROTOBUF_ATTRIBUTE_INIT_PRIORITY static ::PROTOBUF_NAMESPACE_ID::internal::AddDescriptorsRunner dynamic_init_dummy_google_2fprotobuf_2fcompiler_2fplugin_2eproto(&descriptor_table_google_2fprotobuf_2fcompiler_2fplugin_2eproto);
PROTOBUF_NAMESPACE_OPEN PROTOBUF_NAMESPACE_OPEN
namespace compiler { namespace compiler {
const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* CodeGeneratorResponse_Feature_descriptor() { const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* CodeGeneratorResponse_Feature_descriptor() {

@ -855,15 +855,15 @@ void DescriptorPool::Tables::RollbackToLastCheckpoint() {
GOOGLE_DCHECK(!checkpoints_.empty()); GOOGLE_DCHECK(!checkpoints_.empty());
const CheckPoint& checkpoint = checkpoints_.back(); const CheckPoint& checkpoint = checkpoints_.back();
for (int i = checkpoint.pending_symbols_before_checkpoint; for (size_t i = checkpoint.pending_symbols_before_checkpoint;
i < symbols_after_checkpoint_.size(); i++) { i < symbols_after_checkpoint_.size(); i++) {
symbols_by_name_.erase(symbols_after_checkpoint_[i]); symbols_by_name_.erase(symbols_after_checkpoint_[i]);
} }
for (int i = checkpoint.pending_files_before_checkpoint; for (size_t i = checkpoint.pending_files_before_checkpoint;
i < files_after_checkpoint_.size(); i++) { i < files_after_checkpoint_.size(); i++) {
files_by_name_.erase(files_after_checkpoint_[i]); files_by_name_.erase(files_after_checkpoint_[i]);
} }
for (int i = checkpoint.pending_extensions_before_checkpoint; for (size_t i = checkpoint.pending_extensions_before_checkpoint;
i < extensions_after_checkpoint_.size(); i++) { i < extensions_after_checkpoint_.size(); i++) {
extensions_.erase(extensions_after_checkpoint_[i]); extensions_.erase(extensions_after_checkpoint_[i]);
} }
@ -4203,13 +4203,13 @@ void DescriptorBuilder::AllocateOptionsImpl(
void DescriptorBuilder::AddRecursiveImportError( void DescriptorBuilder::AddRecursiveImportError(
const FileDescriptorProto& proto, int from_here) { const FileDescriptorProto& proto, int from_here) {
std::string error_message("File recursively imports itself: "); std::string error_message("File recursively imports itself: ");
for (int i = from_here; i < tables_->pending_files_.size(); i++) { for (size_t i = from_here; i < tables_->pending_files_.size(); i++) {
error_message.append(tables_->pending_files_[i]); error_message.append(tables_->pending_files_[i]);
error_message.append(" -> "); error_message.append(" -> ");
} }
error_message.append(proto.name()); error_message.append(proto.name());
if (from_here < tables_->pending_files_.size() - 1) { if (static_cast<size_t>(from_here) < tables_->pending_files_.size() - 1) {
AddError(tables_->pending_files_[from_here + 1], proto, AddError(tables_->pending_files_[from_here + 1], proto,
DescriptorPool::ErrorCollector::IMPORT, error_message); DescriptorPool::ErrorCollector::IMPORT, error_message);
} else { } else {
@ -4282,7 +4282,7 @@ const FileDescriptor* DescriptorBuilder::BuildFile(
// mid-file, but that's pretty ugly, and I'm pretty sure there are // mid-file, but that's pretty ugly, and I'm pretty sure there are
// some languages out there that do not allow recursive dependencies // some languages out there that do not allow recursive dependencies
// at all. // at all.
for (int i = 0; i < tables_->pending_files_.size(); i++) { for (size_t i = 0; i < tables_->pending_files_.size(); i++) {
if (tables_->pending_files_[i] == proto.name()) { if (tables_->pending_files_[i] == proto.name()) {
AddRecursiveImportError(proto, i); AddRecursiveImportError(proto, i);
return nullptr; return nullptr;
@ -6702,10 +6702,10 @@ void DescriptorBuilder::OptionInterpreter::UpdateSourceCodeInfo(
if (matched) { if (matched) {
// see if this location is in the range to remove // see if this location is in the range to remove
bool loc_matches = true; bool loc_matches = true;
if (loc->path_size() < pathv.size()) { if (loc->path_size() < static_cast<int64>(pathv.size())) {
loc_matches = false; loc_matches = false;
} else { } else {
for (int j = 0; j < pathv.size(); j++) { for (size_t j = 0; j < pathv.size(); j++) {
if (loc->path(j) != pathv[j]) { if (loc->path(j) != pathv[j]) {
loc_matches = false; loc_matches = false;
break; break;
@ -7444,3 +7444,5 @@ void LazyDescriptor::OnceInternal() {
} // namespace protobuf } // namespace protobuf
} // namespace google } // namespace google
#include <google/protobuf/port_undef.inc>

@ -1491,7 +1491,8 @@ class PROTOBUF_EXPORT FileDescriptor {
Syntax syntax() const; Syntax syntax() const;
static const char* SyntaxName(Syntax syntax); static const char* SyntaxName(Syntax syntax);
// Find a top-level message type by name. Returns nullptr if not found. // Find a top-level message type by name (not full_name). Returns nullptr if
// not found.
const Descriptor* FindMessageTypeByName(ConstStringParam name) const; const Descriptor* FindMessageTypeByName(ConstStringParam name) const;
// Find a top-level enum type by name. Returns nullptr if not found. // Find a top-level enum type by name. Returns nullptr if not found.
const EnumDescriptor* FindEnumTypeByName(ConstStringParam name) const; const EnumDescriptor* FindEnumTypeByName(ConstStringParam name) const;

@ -14,6 +14,8 @@
#include <google/protobuf/wire_format.h> #include <google/protobuf/wire_format.h>
// @@protoc_insertion_point(includes) // @@protoc_insertion_point(includes)
#include <google/protobuf/port_def.inc> #include <google/protobuf/port_def.inc>
PROTOBUF_PRAGMA_INIT_SEG
extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2fdescriptor_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<6> scc_info_DescriptorProto_google_2fprotobuf_2fdescriptor_2eproto; extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2fdescriptor_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<6> scc_info_DescriptorProto_google_2fprotobuf_2fdescriptor_2eproto;
extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2fdescriptor_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_DescriptorProto_ExtensionRange_google_2fprotobuf_2fdescriptor_2eproto; extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2fdescriptor_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_DescriptorProto_ExtensionRange_google_2fprotobuf_2fdescriptor_2eproto;
extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2fdescriptor_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_DescriptorProto_ReservedRange_google_2fprotobuf_2fdescriptor_2eproto; extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2fdescriptor_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_DescriptorProto_ReservedRange_google_2fprotobuf_2fdescriptor_2eproto;
@ -1174,7 +1176,7 @@ const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_google
}; };
// Force running AddDescriptors() at dynamic initialization time. // Force running AddDescriptors() at dynamic initialization time.
static bool dynamic_init_dummy_google_2fprotobuf_2fdescriptor_2eproto = (static_cast<void>(::PROTOBUF_NAMESPACE_ID::internal::AddDescriptors(&descriptor_table_google_2fprotobuf_2fdescriptor_2eproto)), true); PROTOBUF_ATTRIBUTE_INIT_PRIORITY static ::PROTOBUF_NAMESPACE_ID::internal::AddDescriptorsRunner dynamic_init_dummy_google_2fprotobuf_2fdescriptor_2eproto(&descriptor_table_google_2fprotobuf_2fdescriptor_2eproto);
PROTOBUF_NAMESPACE_OPEN PROTOBUF_NAMESPACE_OPEN
const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* FieldDescriptorProto_Type_descriptor() { const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* FieldDescriptorProto_Type_descriptor() {
::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_google_2fprotobuf_2fdescriptor_2eproto); ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_google_2fprotobuf_2fdescriptor_2eproto);

@ -962,14 +962,14 @@ bool MergedDescriptorDatabase::FindFileByName(const std::string& filename,
bool MergedDescriptorDatabase::FindFileContainingSymbol( bool MergedDescriptorDatabase::FindFileContainingSymbol(
const std::string& symbol_name, FileDescriptorProto* output) { const std::string& symbol_name, FileDescriptorProto* output) {
for (int i = 0; i < sources_.size(); i++) { for (size_t i = 0; i < sources_.size(); i++) {
if (sources_[i]->FindFileContainingSymbol(symbol_name, output)) { if (sources_[i]->FindFileContainingSymbol(symbol_name, output)) {
// The symbol was found in source i. However, if one of the previous // The symbol was found in source i. However, if one of the previous
// sources defines a file with the same name (which presumably doesn't // sources defines a file with the same name (which presumably doesn't
// contain the symbol, since it wasn't found in that source), then we // contain the symbol, since it wasn't found in that source), then we
// must hide it from the caller. // must hide it from the caller.
FileDescriptorProto temp; FileDescriptorProto temp;
for (int j = 0; j < i; j++) { for (size_t j = 0; j < i; j++) {
if (sources_[j]->FindFileByName(output->name(), &temp)) { if (sources_[j]->FindFileByName(output->name(), &temp)) {
// Found conflicting file in a previous source. // Found conflicting file in a previous source.
return false; return false;
@ -984,7 +984,7 @@ bool MergedDescriptorDatabase::FindFileContainingSymbol(
bool MergedDescriptorDatabase::FindFileContainingExtension( bool MergedDescriptorDatabase::FindFileContainingExtension(
const std::string& containing_type, int field_number, const std::string& containing_type, int field_number,
FileDescriptorProto* output) { FileDescriptorProto* output) {
for (int i = 0; i < sources_.size(); i++) { for (size_t i = 0; i < sources_.size(); i++) {
if (sources_[i]->FindFileContainingExtension(containing_type, field_number, if (sources_[i]->FindFileContainingExtension(containing_type, field_number,
output)) { output)) {
// The symbol was found in source i. However, if one of the previous // The symbol was found in source i. However, if one of the previous
@ -992,7 +992,7 @@ bool MergedDescriptorDatabase::FindFileContainingExtension(
// contain the symbol, since it wasn't found in that source), then we // contain the symbol, since it wasn't found in that source), then we
// must hide it from the caller. // must hide it from the caller.
FileDescriptorProto temp; FileDescriptorProto temp;
for (int j = 0; j < i; j++) { for (size_t j = 0; j < i; j++) {
if (sources_[j]->FindFileByName(output->name(), &temp)) { if (sources_[j]->FindFileByName(output->name(), &temp)) {
// Found conflicting file in a previous source. // Found conflicting file in a previous source.
return false; return false;

@ -1851,12 +1851,17 @@ class ExtensionDescriptorTest : public testing::Test {
// extensions 10 to 19; // extensions 10 to 19;
// extensions 30 to 39; // extensions 30 to 39;
// } // }
// extends Foo with optional int32 foo_int32 = 10; // extend Foo {
// extends Foo with repeated TestEnum foo_enum = 19; // optional int32 foo_int32 = 10;
// }
// extend Foo {
// repeated TestEnum foo_enum = 19;
// }
// message Bar { // message Bar {
// extends Foo with optional Qux foo_message = 30; // extend Foo {
// // (using Qux as the group type) // optional Qux foo_message = 30;
// extends Foo with repeated group foo_group = 39; // repeated Qux foo_group = 39; // (but internally set to TYPE_GROUP)
// }
// } // }
FileDescriptorProto foo_file; FileDescriptorProto foo_file;
@ -8110,3 +8115,5 @@ TEST_F(LazilyBuildDependenciesTest, Dependency) {
} // namespace descriptor_unittest } // namespace descriptor_unittest
} // namespace protobuf } // namespace protobuf
} // namespace google } // namespace google
#include <google/protobuf/port_undef.inc>

@ -14,6 +14,8 @@
#include <google/protobuf/wire_format.h> #include <google/protobuf/wire_format.h>
// @@protoc_insertion_point(includes) // @@protoc_insertion_point(includes)
#include <google/protobuf/port_def.inc> #include <google/protobuf/port_def.inc>
PROTOBUF_PRAGMA_INIT_SEG
PROTOBUF_NAMESPACE_OPEN PROTOBUF_NAMESPACE_OPEN
class DurationDefaultTypeInternal { class DurationDefaultTypeInternal {
public: public:
@ -76,7 +78,7 @@ const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_google
}; };
// Force running AddDescriptors() at dynamic initialization time. // Force running AddDescriptors() at dynamic initialization time.
static bool dynamic_init_dummy_google_2fprotobuf_2fduration_2eproto = (static_cast<void>(::PROTOBUF_NAMESPACE_ID::internal::AddDescriptors(&descriptor_table_google_2fprotobuf_2fduration_2eproto)), true); PROTOBUF_ATTRIBUTE_INIT_PRIORITY static ::PROTOBUF_NAMESPACE_ID::internal::AddDescriptorsRunner dynamic_init_dummy_google_2fprotobuf_2fduration_2eproto(&descriptor_table_google_2fprotobuf_2fduration_2eproto);
PROTOBUF_NAMESPACE_OPEN PROTOBUF_NAMESPACE_OPEN
// =================================================================== // ===================================================================

@ -509,7 +509,7 @@ DynamicMessage::~DynamicMessage() {
void* field_ptr = void* field_ptr =
OffsetToPointer(type_info_->oneof_case_offset + OffsetToPointer(type_info_->oneof_case_offset +
sizeof(uint32) * field->containing_oneof()->index()); sizeof(uint32) * field->containing_oneof()->index());
if (*(reinterpret_cast<const uint32*>(field_ptr)) == field->number()) { if (*(reinterpret_cast<const int32*>(field_ptr)) == field->number()) {
field_ptr = OffsetToPointer( field_ptr = OffsetToPointer(
type_info_->offsets[descriptor->field_count() + type_info_->offsets[descriptor->field_count() +
field->containing_oneof()->index()]); field->containing_oneof()->index()]);

@ -14,6 +14,8 @@
#include <google/protobuf/wire_format.h> #include <google/protobuf/wire_format.h>
// @@protoc_insertion_point(includes) // @@protoc_insertion_point(includes)
#include <google/protobuf/port_def.inc> #include <google/protobuf/port_def.inc>
PROTOBUF_PRAGMA_INIT_SEG
PROTOBUF_NAMESPACE_OPEN PROTOBUF_NAMESPACE_OPEN
class EmptyDefaultTypeInternal { class EmptyDefaultTypeInternal {
public: public:
@ -73,7 +75,7 @@ const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_google
}; };
// Force running AddDescriptors() at dynamic initialization time. // Force running AddDescriptors() at dynamic initialization time.
static bool dynamic_init_dummy_google_2fprotobuf_2fempty_2eproto = (static_cast<void>(::PROTOBUF_NAMESPACE_ID::internal::AddDescriptors(&descriptor_table_google_2fprotobuf_2fempty_2eproto)), true); PROTOBUF_ATTRIBUTE_INIT_PRIORITY static ::PROTOBUF_NAMESPACE_ID::internal::AddDescriptorsRunner dynamic_init_dummy_google_2fprotobuf_2fempty_2eproto(&descriptor_table_google_2fprotobuf_2fempty_2eproto);
PROTOBUF_NAMESPACE_OPEN PROTOBUF_NAMESPACE_OPEN
// =================================================================== // ===================================================================

@ -180,7 +180,6 @@ void ExtensionSet::RegisterMessageExtension(const MessageLite* containing_type,
Register(containing_type, number, info); Register(containing_type, number, info);
} }
// =================================================================== // ===================================================================
// Constructors and basic methods. // Constructors and basic methods.
@ -2141,3 +2140,5 @@ size_t ExtensionSet::MessageSetByteSize() const {
} // namespace internal } // namespace internal
} // namespace protobuf } // namespace protobuf
} // namespace google } // namespace google
#include <google/protobuf/port_undef.inc>

@ -1328,7 +1328,9 @@ RepeatedMessageTypeTraits<Type>::GetDefaultRepeatedField() {
// ExtensionIdentifier // ExtensionIdentifier
// This is the type of actual extension objects. E.g. if you have: // This is the type of actual extension objects. E.g. if you have:
// extends Foo with optional int32 bar = 1234; // extend Foo {
// optional int32 bar = 1234;
// }
// then "bar" will be defined in C++ as: // then "bar" will be defined in C++ as:
// ExtensionIdentifier<Foo, PrimitiveTypeTraits<int32>, 5, false> bar(1234); // ExtensionIdentifier<Foo, PrimitiveTypeTraits<int32>, 5, false> bar(1234);
// //

@ -534,3 +534,5 @@ bool ExtensionSet::ParseMessageSetItem(io::CodedInputStream* input,
} // namespace internal } // namespace internal
} // namespace protobuf } // namespace protobuf
} // namespace google } // namespace google
#include <google/protobuf/port_undef.inc>

@ -1335,3 +1335,5 @@ TEST(ExtensionSetTest, ConstInit) {
} // namespace internal } // namespace internal
} // namespace protobuf } // namespace protobuf
} // namespace google } // namespace google
#include <google/protobuf/port_undef.inc>

@ -14,6 +14,8 @@
#include <google/protobuf/wire_format.h> #include <google/protobuf/wire_format.h>
// @@protoc_insertion_point(includes) // @@protoc_insertion_point(includes)
#include <google/protobuf/port_def.inc> #include <google/protobuf/port_def.inc>
PROTOBUF_PRAGMA_INIT_SEG
PROTOBUF_NAMESPACE_OPEN PROTOBUF_NAMESPACE_OPEN
class FieldMaskDefaultTypeInternal { class FieldMaskDefaultTypeInternal {
public: public:
@ -75,7 +77,7 @@ const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_google
}; };
// Force running AddDescriptors() at dynamic initialization time. // Force running AddDescriptors() at dynamic initialization time.
static bool dynamic_init_dummy_google_2fprotobuf_2ffield_5fmask_2eproto = (static_cast<void>(::PROTOBUF_NAMESPACE_ID::internal::AddDescriptors(&descriptor_table_google_2fprotobuf_2ffield_5fmask_2eproto)), true); PROTOBUF_ATTRIBUTE_INIT_PRIORITY static ::PROTOBUF_NAMESPACE_ID::internal::AddDescriptorsRunner dynamic_init_dummy_google_2fprotobuf_2ffield_5fmask_2eproto(&descriptor_table_google_2fprotobuf_2ffield_5fmask_2eproto);
PROTOBUF_NAMESPACE_OPEN PROTOBUF_NAMESPACE_OPEN
// =================================================================== // ===================================================================

@ -83,7 +83,7 @@ int LookUpEnumName(const EnumEntry* enums, const int* sorted_indices,
bool InitializeEnumStrings( bool InitializeEnumStrings(
const EnumEntry* enums, const int* sorted_indices, size_t size, const EnumEntry* enums, const int* sorted_indices, size_t size,
internal::ExplicitlyConstructed<std::string>* enum_strings) { internal::ExplicitlyConstructed<std::string>* enum_strings) {
for (int i = 0; i < size; ++i) { for (size_t i = 0; i < size; ++i) {
enum_strings[i].Construct(enums[sorted_indices[i]].name); enum_strings[i].Construct(enums[sorted_indices[i]].name);
internal::OnShutdownDestroyString(enum_strings[i].get_mutable()); internal::OnShutdownDestroyString(enum_strings[i].get_mutable());
} }

@ -836,7 +836,7 @@ void Reflection::ClearField(Message* message,
} }
case FieldDescriptor::CPPTYPE_MESSAGE: case FieldDescriptor::CPPTYPE_MESSAGE:
if (schema_.HasBitIndex(field) == -1) { if (schema_.HasBitIndex(field) == static_cast<uint32>(-1)) {
// Proto3 does not have has-bits and we need to set a message field // Proto3 does not have has-bits and we need to set a message field
// to nullptr in order to indicate its un-presence. // to nullptr in order to indicate its un-presence.
if (GetArena(message) == nullptr) { if (GetArena(message) == nullptr) {
@ -1046,7 +1046,8 @@ void Reflection::ListFieldsMayFailOnStripped(
schema_.HasHasbits() ? GetHasBits(message) : nullptr; schema_.HasHasbits() ? GetHasBits(message) : nullptr;
const uint32* const has_bits_indices = schema_.has_bit_indices_; const uint32* const has_bits_indices = schema_.has_bit_indices_;
output->reserve(descriptor_->field_count()); output->reserve(descriptor_->field_count());
for (int i = 0; i <= last_non_weak_field_index_; i++) { const int last_non_weak_field_index = last_non_weak_field_index_;
for (int i = 0; i <= last_non_weak_field_index; i++) {
const FieldDescriptor* field = descriptor_->field(i); const FieldDescriptor* field = descriptor_->field(i);
if (!should_fail && schema_.IsFieldStripped(field)) { if (!should_fail && schema_.IsFieldStripped(field)) {
continue; continue;
@ -1061,10 +1062,11 @@ void Reflection::ListFieldsMayFailOnStripped(
const uint32* const oneof_case_array = GetConstPointerAtOffset<uint32>( const uint32* const oneof_case_array = GetConstPointerAtOffset<uint32>(
&message, schema_.oneof_case_offset_); &message, schema_.oneof_case_offset_);
// Equivalent to: HasOneofField(message, field) // Equivalent to: HasOneofField(message, field)
if (oneof_case_array[containing_oneof->index()] == field->number()) { if (static_cast<int64>(oneof_case_array[containing_oneof->index()]) ==
field->number()) {
output->push_back(field); output->push_back(field);
} }
} else if (has_bits && has_bits_indices[i] != -1) { } else if (has_bits && has_bits_indices[i] != static_cast<uint32>(-1)) {
CheckInvalidAccess(schema_, field); CheckInvalidAccess(schema_, field);
// Equivalent to: HasBit(message, field) // Equivalent to: HasBit(message, field)
if (IsIndexInHasBitSet(has_bits, has_bits_indices[i])) { if (IsIndexInHasBitSet(has_bits, has_bits_indices[i])) {
@ -2005,7 +2007,7 @@ InternalMetadata* Reflection::MutableInternalMetadata(Message* message) const {
bool Reflection::HasBit(const Message& message, bool Reflection::HasBit(const Message& message,
const FieldDescriptor* field) const { const FieldDescriptor* field) const {
GOOGLE_DCHECK(!field->options().weak()); GOOGLE_DCHECK(!field->options().weak());
if (schema_.HasBitIndex(field) != -1) { if (schema_.HasBitIndex(field) != static_cast<uint32>(-1)) {
return IsIndexInHasBitSet(GetHasBits(message), schema_.HasBitIndex(field)); return IsIndexInHasBitSet(GetHasBits(message), schema_.HasBitIndex(field));
} }
@ -2064,7 +2066,7 @@ bool Reflection::HasBit(const Message& message,
void Reflection::SetBit(Message* message, const FieldDescriptor* field) const { void Reflection::SetBit(Message* message, const FieldDescriptor* field) const {
GOOGLE_DCHECK(!field->options().weak()); GOOGLE_DCHECK(!field->options().weak());
const uint32 index = schema_.HasBitIndex(field); const uint32 index = schema_.HasBitIndex(field);
if (index == -1) return; if (index == static_cast<uint32>(-1)) return;
MutableHasBits(message)[index / 32] |= MutableHasBits(message)[index / 32] |=
(static_cast<uint32>(1) << (index % 32)); (static_cast<uint32>(1) << (index % 32));
} }
@ -2073,7 +2075,7 @@ void Reflection::ClearBit(Message* message,
const FieldDescriptor* field) const { const FieldDescriptor* field) const {
GOOGLE_DCHECK(!field->options().weak()); GOOGLE_DCHECK(!field->options().weak());
const uint32 index = schema_.HasBitIndex(field); const uint32 index = schema_.HasBitIndex(field);
if (index == -1) return; if (index == static_cast<uint32>(-1)) return;
MutableHasBits(message)[index / 32] &= MutableHasBits(message)[index / 32] &=
~(static_cast<uint32>(1) << (index % 32)); ~(static_cast<uint32>(1) << (index % 32));
} }
@ -2107,7 +2109,8 @@ bool Reflection::HasOneof(const Message& message,
bool Reflection::HasOneofField(const Message& message, bool Reflection::HasOneofField(const Message& message,
const FieldDescriptor* field) const { const FieldDescriptor* field) const {
return (GetOneofCase(message, field->containing_oneof()) == field->number()); return (GetOneofCase(message, field->containing_oneof()) ==
static_cast<uint32>(field->number()));
} }
void Reflection::SetOneofCase(Message* message, void Reflection::SetOneofCase(Message* message,
@ -2416,6 +2419,8 @@ struct MetadataOwner {
std::vector<std::pair<const Metadata*, const Metadata*> > metadata_arrays_; std::vector<std::pair<const Metadata*, const Metadata*> > metadata_arrays_;
}; };
void AddDescriptors(const DescriptorTable* table);
void AssignDescriptorsImpl(const DescriptorTable* table, bool eager) { void AssignDescriptorsImpl(const DescriptorTable* table, bool eager) {
// Ensure the file descriptor is added to the pool. // Ensure the file descriptor is added to the pool.
{ {
@ -2493,6 +2498,16 @@ void AddDescriptorsImpl(const DescriptorTable* table) {
MessageFactory::InternalRegisterGeneratedFile(table); MessageFactory::InternalRegisterGeneratedFile(table);
} }
void AddDescriptors(const DescriptorTable* table) {
// AddDescriptors is not thread safe. Callers need to ensure calls are
// properly serialized. This function is only called pre-main by global
// descriptors and we can assume single threaded access or it's called
// by AssignDescriptorImpl which uses a mutex to sequence calls.
if (table->is_initialized) return;
table->is_initialized = true;
AddDescriptorsImpl(table);
}
} // namespace } // namespace
// Separate function because it needs to be a friend of // Separate function because it needs to be a friend of
@ -2513,14 +2528,8 @@ void AssignDescriptors(const DescriptorTable* table, bool eager) {
call_once(*table->once, AssignDescriptorsImpl, table, eager); call_once(*table->once, AssignDescriptorsImpl, table, eager);
} }
void AddDescriptors(const DescriptorTable* table) { AddDescriptorsRunner::AddDescriptorsRunner(const DescriptorTable* table) {
// AddDescriptors is not thread safe. Callers need to ensure calls are AddDescriptors(table);
// properly serialized. This function is only called pre-main by global
// descriptors and we can assume single threaded access or it's called
// by AssignDescriptorImpl which uses a mutex to sequence calls.
if (table->is_initialized) return;
table->is_initialized = true;
AddDescriptorsImpl(table);
} }
void RegisterFileLevelMetadata(const DescriptorTable* table) { void RegisterFileLevelMetadata(const DescriptorTable* table) {
@ -2544,3 +2553,5 @@ void UnknownFieldSetSerializer(const uint8* base, uint32 offset, uint32 tag,
} // namespace internal } // namespace internal
} // namespace protobuf } // namespace protobuf
} // namespace google } // namespace google
#include <google/protobuf/port_undef.inc>

@ -291,18 +291,15 @@ enum {
void PROTOBUF_EXPORT AssignDescriptors(const DescriptorTable* table, void PROTOBUF_EXPORT AssignDescriptors(const DescriptorTable* table,
bool eager = false); bool eager = false);
// AddDescriptors() is a file-level procedure which adds the encoded
// FileDescriptorProto for this .proto file to the global DescriptorPool for
// generated files (DescriptorPool::generated_pool()). It ordinarily runs at
// static initialization time, but is not used at all in LITE_RUNTIME mode.
// AddDescriptors() is *not* thread-safe.
void PROTOBUF_EXPORT AddDescriptors(const DescriptorTable* table);
// These cannot be in lite so we put them in the reflection. // These cannot be in lite so we put them in the reflection.
PROTOBUF_EXPORT void UnknownFieldSetSerializer(const uint8* base, uint32 offset, PROTOBUF_EXPORT void UnknownFieldSetSerializer(const uint8* base, uint32 offset,
uint32 tag, uint32 has_offset, uint32 tag, uint32 has_offset,
io::CodedOutputStream* output); io::CodedOutputStream* output);
struct PROTOBUF_EXPORT AddDescriptorsRunner {
explicit AddDescriptorsRunner(const DescriptorTable* table);
};
} // namespace internal } // namespace internal
} // namespace protobuf } // namespace protobuf
} // namespace google } // namespace google

@ -172,7 +172,7 @@ template <ProcessingType field_type>
inline void ResetOneofField(const ParseTable& table, int field_number, inline void ResetOneofField(const ParseTable& table, int field_number,
Arena* arena, MessageLite* msg, uint32* oneof_case, Arena* arena, MessageLite* msg, uint32* oneof_case,
int64 offset, const void* default_ptr) { int64 offset, const void* default_ptr) {
if (*oneof_case == field_number) { if (static_cast<int64>(*oneof_case) == field_number) {
// The oneof is already set to the right type, so there is no need to clear // The oneof is already set to the right type, so there is no need to clear
// it. // it.
return; return;

@ -52,10 +52,14 @@
#include <google/protobuf/message_lite.h> #include <google/protobuf/message_lite.h>
#include <google/protobuf/metadata_lite.h> #include <google/protobuf/metadata_lite.h>
#include <google/protobuf/stubs/mutex.h> #include <google/protobuf/stubs/mutex.h>
#include <google/protobuf/port_def.inc>
#include <google/protobuf/repeated_field.h> #include <google/protobuf/repeated_field.h>
#include <google/protobuf/wire_format_lite.h> #include <google/protobuf/wire_format_lite.h>
// Must be included last
#include <google/protobuf/port_def.inc>
PROTOBUF_PRAGMA_INIT_SEG
namespace google { namespace google {
namespace protobuf { namespace protobuf {
@ -68,8 +72,9 @@ void DestroyString(const void* s) {
static_cast<const std::string*>(s)->~basic_string(); static_cast<const std::string*>(s)->~basic_string();
} }
PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT EmptyString PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT
fixed_address_empty_string; // NOLINT PROTOBUF_ATTRIBUTE_INIT_PRIORITY EmptyString
fixed_address_empty_string; // NOLINT
PROTOBUF_CONSTINIT std::atomic<bool> init_protobuf_defaults_state{false}; PROTOBUF_CONSTINIT std::atomic<bool> init_protobuf_defaults_state{false};
@ -90,6 +95,11 @@ void InitProtobufDefaultsSlow() {
static bool is_inited = InitProtobufDefaultsImpl(); static bool is_inited = InitProtobufDefaultsImpl();
(void)is_inited; (void)is_inited;
} }
// Force the initialization of the empty string.
// Normally, registration would do it, but we don't have any guarantee that
// there is any object with reflection.
PROTOBUF_ATTRIBUTE_INIT_PRIORITY static std::true_type init_empty_string =
(InitProtobufDefaultsSlow(), std::true_type{});
size_t StringSpaceUsedExcludingSelfLong(const std::string& str) { size_t StringSpaceUsedExcludingSelfLong(const std::string& str) {
const void* start = &str; const void* start = &str;
@ -802,3 +812,5 @@ void InitSCCImpl(SCCInfoBase* scc) {
} // namespace internal } // namespace internal
} // namespace protobuf } // namespace protobuf
} // namespace google } // namespace google
#include <google/protobuf/port_undef.inc>

@ -63,3 +63,5 @@ const ImplicitWeakMessage* ImplicitWeakMessage::default_instance() {
} // namespace internal } // namespace internal
} // namespace protobuf } // namespace protobuf
} // namespace google } // namespace google
#include <google/protobuf/port_undef.inc>

@ -312,7 +312,7 @@ bool CodedInputStream::ReadLittleEndian32Fallback(uint32* value) {
uint8 bytes[sizeof(*value)]; uint8 bytes[sizeof(*value)];
const uint8* ptr; const uint8* ptr;
if (BufferSize() >= sizeof(*value)) { if (BufferSize() >= static_cast<int64>(sizeof(*value))) {
// Fast path: Enough bytes in the buffer to read directly. // Fast path: Enough bytes in the buffer to read directly.
ptr = buffer_; ptr = buffer_;
Advance(sizeof(*value)); Advance(sizeof(*value));
@ -329,7 +329,7 @@ bool CodedInputStream::ReadLittleEndian64Fallback(uint64* value) {
uint8 bytes[sizeof(*value)]; uint8 bytes[sizeof(*value)];
const uint8* ptr; const uint8* ptr;
if (BufferSize() >= sizeof(*value)) { if (BufferSize() >= static_cast<int64>(sizeof(*value))) {
// Fast path: Enough bytes in the buffer to read directly. // Fast path: Enough bytes in the buffer to read directly.
ptr = buffer_; ptr = buffer_;
Advance(sizeof(*value)); Advance(sizeof(*value));
@ -351,7 +351,7 @@ template <size_t N>
const uint8* DecodeVarint64KnownSize(const uint8* buffer, uint64* value) { const uint8* DecodeVarint64KnownSize(const uint8* buffer, uint64* value) {
GOOGLE_DCHECK_GT(N, 0); GOOGLE_DCHECK_GT(N, 0);
uint64 result = static_cast<uint64>(buffer[N - 1]) << (7 * (N - 1)); uint64 result = static_cast<uint64>(buffer[N - 1]) << (7 * (N - 1));
for (int i = 0, offset = 0; i < N - 1; i++, offset += 7) { for (size_t i = 0, offset = 0; i < N - 1; i++, offset += 7) {
result += static_cast<uint64>(buffer[i] - 0x80) << offset; result += static_cast<uint64>(buffer[i] - 0x80) << offset;
} }
*value = result; *value = result;
@ -954,3 +954,5 @@ uint8* CodedOutputStream::WriteStringWithSizeToArray(const std::string& str,
} // namespace io } // namespace io
} // namespace protobuf } // namespace protobuf
} // namespace google } // namespace google
#include <google/protobuf/port_undef.inc>

@ -1344,3 +1344,5 @@ TEST_F(CodedStreamTest, InputOver2G) {
} // namespace io } // namespace io
} // namespace protobuf } // namespace protobuf
} // namespace google } // namespace google
#include <google/protobuf/port_undef.inc>

@ -298,7 +298,7 @@ bool GzipOutputStream::Next(void** data, int* size) {
return true; return true;
} }
void GzipOutputStream::BackUp(int count) { void GzipOutputStream::BackUp(int count) {
GOOGLE_CHECK_GE(zcontext_.avail_in, count); GOOGLE_CHECK_GE(zcontext_.avail_in, static_cast<uInt>(count));
zcontext_.avail_in -= count; zcontext_.avail_in -= count;
} }
int64_t GzipOutputStream::ByteCount() const { int64_t GzipOutputStream::ByteCount() const {

@ -296,7 +296,7 @@ void Printer::FormatInternal(const std::vector<std::string>& args,
} }
push_back(c); push_back(c);
} }
if (arg_index != args.size()) { if (arg_index != static_cast<int>(args.size())) {
GOOGLE_LOG(FATAL) << " Unused arguments. " << save; GOOGLE_LOG(FATAL) << " Unused arguments. " << save;
} }
if (!annotations.empty()) { if (!annotations.empty()) {
@ -324,7 +324,7 @@ const char* Printer::WriteVariable(
GOOGLE_CHECK(std::isdigit(start[1])); GOOGLE_CHECK(std::isdigit(start[1]));
GOOGLE_CHECK_EQ(end - start, 2); GOOGLE_CHECK_EQ(end - start, 2);
int idx = start[1] - '1'; int idx = start[1] - '1';
if (idx < 0 || idx >= args.size()) { if (idx < 0 || static_cast<size_t>(idx) >= args.size()) {
GOOGLE_LOG(FATAL) << "Annotation ${" << idx + 1 << "$ is out of bounds."; GOOGLE_LOG(FATAL) << "Annotation ${" << idx + 1 << "$ is out of bounds.";
} }
if (idx > *arg_index) { if (idx > *arg_index) {
@ -358,10 +358,10 @@ const char* Printer::WriteVariable(
start_var, static_cast<std::string::size_type>(end_var - start_var)}; start_var, static_cast<std::string::size_type>(end_var - start_var)};
std::string sub; std::string sub;
if (std::isdigit(var_name[0])) { if (std::isdigit(var_name[0])) {
GOOGLE_CHECK_EQ(var_name.size(), 1); // No need for multi-digits GOOGLE_CHECK_EQ(var_name.size(), 1U); // No need for multi-digits
int idx = var_name[0] - '1'; // Start counting at 1 int idx = var_name[0] - '1'; // Start counting at 1
GOOGLE_CHECK_GE(idx, 0); GOOGLE_CHECK_GE(idx, 0);
if (idx >= args.size()) { if (static_cast<size_t>(idx) >= args.size()) {
GOOGLE_LOG(FATAL) << "Argument $" << idx + 1 << "$ is out of bounds."; GOOGLE_LOG(FATAL) << "Argument $" << idx + 1 << "$ is out of bounds.";
} }
if (idx > *arg_index) { if (idx > *arg_index) {

@ -888,7 +888,8 @@ bool Tokenizer::ParseInteger(const std::string& text, uint64 max_value,
// token, but Tokenizer still think it's integer. // token, but Tokenizer still think it's integer.
return false; return false;
} }
if (digit > max_value || result > (max_value - digit) / base) { if (static_cast<uint64>(digit) > max_value ||
result > (max_value - digit) / base) {
// Overflow. // Overflow.
return false; return false;
} }
@ -918,7 +919,8 @@ double Tokenizer::ParseFloat(const std::string& text) {
++end; ++end;
} }
GOOGLE_LOG_IF(DFATAL, end - start != text.size() || *start == '-') GOOGLE_LOG_IF(DFATAL,
static_cast<size_t>(end - start) != text.size() || *start == '-')
<< " Tokenizer::ParseFloat() passed text that could not have been" << " Tokenizer::ParseFloat() passed text that could not have been"
" tokenized as a float: " " tokenized as a float: "
<< CEscape(text); << CEscape(text);
@ -1114,8 +1116,8 @@ void Tokenizer::ParseStringAppend(const std::string& text,
template <typename CharacterClass> template <typename CharacterClass>
static bool AllInClass(const std::string& s) { static bool AllInClass(const std::string& s) {
for (int i = 0; i < s.size(); ++i) { for (const char character : s) {
if (!CharacterClass::InClass(s[i])) return false; if (!CharacterClass::InClass(character)) return false;
} }
return true; return true;
} }

@ -168,7 +168,7 @@ bool StringOutputStream::Next(void** data, int* size) {
void StringOutputStream::BackUp(int count) { void StringOutputStream::BackUp(int count) {
GOOGLE_CHECK_GE(count, 0); GOOGLE_CHECK_GE(count, 0);
GOOGLE_CHECK(target_ != NULL); GOOGLE_CHECK(target_ != NULL);
GOOGLE_CHECK_LE(count, target_->size()); GOOGLE_CHECK_LE(static_cast<size_t>(count), target_->size());
target_->resize(target_->size() - count); target_->resize(target_->size() - count);
} }

@ -838,7 +838,7 @@ class Map {
// non-determinism to the map ordering. // non-determinism to the map ordering.
bool ShouldInsertAfterHead(void* node) { bool ShouldInsertAfterHead(void* node) {
#ifdef NDEBUG #ifdef NDEBUG
(void) node; (void)node;
return false; return false;
#else #else
// Doing modulo with a prime mixes the bits more. // Doing modulo with a prime mixes the bits more.

@ -601,3 +601,5 @@ size_t DynamicMapField::SpaceUsedExcludingSelfNoLock() const {
} // namespace internal } // namespace internal
} // namespace protobuf } // namespace protobuf
} // namespace google } // namespace google
#include <google/protobuf/port_undef.inc>

@ -72,8 +72,8 @@ class MapIterator;
// map key. // map key.
class PROTOBUF_EXPORT MapKey { class PROTOBUF_EXPORT MapKey {
public: public:
MapKey() : type_(0) {} MapKey() : type_() {}
MapKey(const MapKey& other) : type_(0) { CopyFrom(other); } MapKey(const MapKey& other) : type_() { CopyFrom(other); }
MapKey& operator=(const MapKey& other) { MapKey& operator=(const MapKey& other) {
CopyFrom(other); CopyFrom(other);
@ -87,12 +87,12 @@ class PROTOBUF_EXPORT MapKey {
} }
FieldDescriptor::CppType type() const { FieldDescriptor::CppType type() const {
if (type_ == 0) { if (type_ == FieldDescriptor::CppType()) {
GOOGLE_LOG(FATAL) << "Protocol Buffer map usage error:\n" GOOGLE_LOG(FATAL) << "Protocol Buffer map usage error:\n"
<< "MapKey::type MapKey is not initialized. " << "MapKey::type MapKey is not initialized. "
<< "Call set methods to initialize MapKey."; << "Call set methods to initialize MapKey.";
} }
return (FieldDescriptor::CppType)type_; return type_;
} }
void SetInt64Value(int64 value) { void SetInt64Value(int64 value) {
@ -261,7 +261,8 @@ class PROTOBUF_EXPORT MapKey {
} }
// type_ is 0 or a valid FieldDescriptor::CppType. // type_ is 0 or a valid FieldDescriptor::CppType.
int type_; // Use "CppType()" to indicate zero.
FieldDescriptor::CppType type_;
}; };
} // namespace protobuf } // namespace protobuf
@ -329,7 +330,7 @@ class PROTOBUF_EXPORT MapFieldBase {
// It uses a linker initialized mutex, so it is not compatible with regular // It uses a linker initialized mutex, so it is not compatible with regular
// runtime instances. // runtime instances.
// Except in MSVC, where we can't have a constinit mutex. // Except in MSVC, where we can't have a constinit mutex.
explicit PROTOBUF_MAYBE_CONSTEXPR MapFieldBase(ConstantInitialized) explicit constexpr MapFieldBase(ConstantInitialized)
: arena_(nullptr), : arena_(nullptr),
repeated_field_(nullptr), repeated_field_(nullptr),
mutex_(GOOGLE_PROTOBUF_LINKER_INITIALIZED), mutex_(GOOGLE_PROTOBUF_LINKER_INITIALIZED),
@ -678,7 +679,7 @@ class PROTOBUF_EXPORT DynamicMapField
// the map value. // the map value.
class PROTOBUF_EXPORT MapValueConstRef { class PROTOBUF_EXPORT MapValueConstRef {
public: public:
MapValueConstRef() : data_(nullptr), type_(0) {} MapValueConstRef() : data_(nullptr), type_() {}
int64 GetInt64Value() const { int64 GetInt64Value() const {
TYPE_CHECK(FieldDescriptor::CPPTYPE_INT64, TYPE_CHECK(FieldDescriptor::CPPTYPE_INT64,
@ -735,15 +736,16 @@ class PROTOBUF_EXPORT MapValueConstRef {
// own this value. // own this value.
void* data_; void* data_;
// type_ is 0 or a valid FieldDescriptor::CppType. // type_ is 0 or a valid FieldDescriptor::CppType.
int type_; // Use "CppType()" to indicate zero.
FieldDescriptor::CppType type_;
FieldDescriptor::CppType type() const { FieldDescriptor::CppType type() const {
if (type_ == 0 || data_ == nullptr) { if (type_ == FieldDescriptor::CppType() || data_ == nullptr) {
GOOGLE_LOG(FATAL) GOOGLE_LOG(FATAL)
<< "Protocol Buffer map usage error:\n" << "Protocol Buffer map usage error:\n"
<< "MapValueConstRef::type MapValueConstRef is not initialized."; << "MapValueConstRef::type MapValueConstRef is not initialized.";
} }
return static_cast<FieldDescriptor::CppType>(type_); return type_;
} }
private: private:

@ -501,3 +501,5 @@ TEST(MapFieldTest, ConstInit) {
} // namespace internal } // namespace internal
} // namespace protobuf } // namespace protobuf
} // namespace google } // namespace google
#include <google/protobuf/port_undef.inc>

@ -3770,3 +3770,5 @@ TEST(MoveTest, MoveAssignmentWorks) {
} // namespace internal } // namespace internal
} // namespace protobuf } // namespace protobuf
} // namespace google } // namespace google
#include <google/protobuf/port_undef.inc>

@ -363,3 +363,5 @@ PROTOBUF_NOINLINE
} // namespace protobuf } // namespace protobuf
} // namespace google } // namespace google
#include <google/protobuf/port_undef.inc>

@ -1252,7 +1252,8 @@ const T* DynamicCastToGenerated(const Message* from) {
#if PROTOBUF_RTTI #if PROTOBUF_RTTI
return dynamic_cast<const T*>(from); return dynamic_cast<const T*>(from);
#else #else
bool ok = T::default_instance().GetReflection() == from->GetReflection(); bool ok = from != nullptr &&
T::default_instance().GetReflection() == from->GetReflection();
return ok ? down_cast<const T*>(from) : nullptr; return ok ? down_cast<const T*>(from) : nullptr;
#endif #endif
} }

@ -389,7 +389,7 @@ bool MessageLite::SerializePartialToCodedStream(
} }
int final_byte_count = output->ByteCount(); int final_byte_count = output->ByteCount();
if (final_byte_count - original_byte_count != size) { if (final_byte_count - original_byte_count != static_cast<int64>(size)) {
ByteSizeConsistencyError(size, ByteSizeLong(), ByteSizeConsistencyError(size, ByteSizeLong(),
final_byte_count - original_byte_count, *this); final_byte_count - original_byte_count, *this);
} }
@ -488,7 +488,7 @@ bool MessageLite::SerializePartialToArray(void* data, int size) const {
<< " exceeded maximum protobuf size of 2GB: " << byte_size; << " exceeded maximum protobuf size of 2GB: " << byte_size;
return false; return false;
} }
if (size < byte_size) return false; if (size < static_cast<int64>(byte_size)) return false;
uint8* start = reinterpret_cast<uint8*>(data); uint8* start = reinterpret_cast<uint8*>(data);
SerializeToArrayImpl(*this, start, byte_size); SerializeToArrayImpl(*this, start, byte_size);
return true; return true;
@ -581,3 +581,5 @@ void ShutdownProtobufLibrary() {
} // namespace protobuf } // namespace protobuf
} // namespace google } // namespace google
#include <google/protobuf/port_undef.inc>

@ -151,7 +151,6 @@ class ExplicitlyConstructed {
} union_; } union_;
}; };
PROTOBUF_DISABLE_MSVC_UNION_WARNING
// We need a publicly accessible `value` object to allow constexpr // We need a publicly accessible `value` object to allow constexpr
// support in C++11. // support in C++11.
// A constexpr accessor does not work portably. // A constexpr accessor does not work portably.
@ -163,7 +162,6 @@ union EmptyString {
std::false_type dummy; std::false_type dummy;
std::string value; std::string value;
}; };
PROTOBUF_ENABLE_MSVC_UNION_WARNING
// Default empty string object. Don't use this directly. Instead, call // Default empty string object. Don't use this directly. Instead, call
// GetEmptyString() to get the reference. // GetEmptyString() to get the reference.

@ -235,6 +235,10 @@ TEST(MESSAGE_TEST_NAME, DynamicCastToGenerated) {
test_all_types_pointer_const)); test_all_types_pointer_const));
EXPECT_EQ(nullptr, DynamicCastToGenerated<const UNITTEST::TestRequired>( EXPECT_EQ(nullptr, DynamicCastToGenerated<const UNITTEST::TestRequired>(
test_all_types_pointer_const)); test_all_types_pointer_const));
Message* test_all_types_pointer_nullptr = nullptr;
EXPECT_EQ(nullptr, DynamicCastToGenerated<UNITTEST::TestAllTypes>(
test_all_types_pointer_nullptr));
} }
#ifdef PROTOBUF_HAS_DEATH_TEST // death tests do not work on Windows yet. #ifdef PROTOBUF_HAS_DEATH_TEST // death tests do not work on Windows yet.

@ -591,3 +591,5 @@ const char* UnknownFieldParse(uint32 tag, std::string* unknown, const char* ptr,
} // namespace internal } // namespace internal
} // namespace protobuf } // namespace protobuf
} // namespace google } // namespace google
#include <google/protobuf/port_undef.inc>

@ -39,5 +39,9 @@
#include <google/protobuf/stubs/port.h> #include <google/protobuf/stubs/port.h>
// Protobuf intends to move into the pb:: namespace.
namespace protobuf_future_namespace_placeholder {}
namespace pb = ::protobuf_future_namespace_placeholder;
#endif // GOOGLE_PROTOBUF_PORT_H__ #endif // GOOGLE_PROTOBUF_PORT_H__

@ -139,21 +139,21 @@
#ifdef PROTOBUF_FINAL #ifdef PROTOBUF_FINAL
#error PROTOBUF_FINAL was previously defined #error PROTOBUF_FINAL was previously defined
#endif #endif
#ifdef PROTOBUF_DISABLE_MSVC_UNION_WARNING
#error PROTOBUF_DISABLE_MSVC_UNION_WARNING was previously defined
#endif
#ifdef PROTOBUF_ENABLE_MSVC_UNION_WARNING #ifdef PROTOBUF_ENABLE_MSVC_UNION_WARNING
#error PROTOBUF_ENABLE_MSVC_UNION_WARNING was previously defined #error PROTOBUF_ENABLE_MSVC_UNION_WARNING was previously defined
#endif #endif
#ifdef PROTOBUF_CONSTINIT #ifdef PROTOBUF_CONSTINIT
#error PROTOBUF_CONSTINIT was previously defined #error PROTOBUF_CONSTINIT was previously defined
#endif #endif
#ifdef PROTOBUF_MAYBE_CONSTEXPR
#error PROTOBUF_MAYBE_CONSTEXPR was previously defined
#endif
#ifdef PROTOBUF_ATTRIBUTE_NO_DESTROY #ifdef PROTOBUF_ATTRIBUTE_NO_DESTROY
#error PROTOBUF_ATTRIBUTE_NO_DESTROY was previously defined #error PROTOBUF_ATTRIBUTE_NO_DESTROY was previously defined
#endif #endif
#ifdef PROTOBUF_ATTRIBUTE_INIT_PRIORITY
#error PROTOBUF_ATTRIBUTE_INIT_PRIORITY was previously defined
#endif
#ifdef PROTOBUF_PRAGMA_INIT_SEG
#error PROTOBUF_PRAGMA_INIT_SEG was previously defined
#endif
#define PROTOBUF_NAMESPACE "google::protobuf" #define PROTOBUF_NAMESPACE "google::protobuf"
@ -464,10 +464,6 @@
// name. // name.
#pragma push_macro("DEBUG") #pragma push_macro("DEBUG")
#undef DEBUG #undef DEBUG
#pragma push_macro("TRUE")
#undef TRUE
#pragma push_macro("FALSE")
#undef FALSE
#endif // defined(__clang__) || defined(__GNUC__) || defined(_MSC_VER) #endif // defined(__clang__) || defined(__GNUC__) || defined(_MSC_VER)
#if defined(__clang__) #if defined(__clang__)
@ -560,28 +556,6 @@
#define PROTOBUF_CONSTINIT #define PROTOBUF_CONSTINIT
#endif #endif
// Some constructors can't be constexpr under MSVC, but given that MSVC will not
// do constant initialization of globals anyway we can omit `constexpr` from
// them. These constructors are marked with PROTOBUF_MAYBE_CONSTEXPR
#if defined(_MSC_VER)
#define PROTOBUF_MAYBE_CONSTEXPR
#else
#define PROTOBUF_MAYBE_CONSTEXPR constexpr
#endif
#if _MSC_VER
#define PROTOBUF_DISABLE_MSVC_UNION_WARNING \
__pragma(warning(push)) \
__pragma(warning(disable : 4582)) \
__pragma(warning(disable : 4583))
#define PROTOBUF_ENABLE_MSVC_UNION_WARNING \
__pragma(warning(pop))
#else
#define PROTOBUF_DISABLE_MSVC_UNION_WARNING
#define PROTOBUF_ENABLE_MSVC_UNION_WARNING
#endif
#if defined(__cpp_constinit) #if defined(__cpp_constinit)
#define PROTOBUF_CONSTINIT constinit #define PROTOBUF_CONSTINIT constinit
#elif defined(__has_cpp_attribute) #elif defined(__has_cpp_attribute)
@ -604,3 +578,34 @@
#if !defined(PROTOBUF_ATTRIBUTE_NO_DESTROY) #if !defined(PROTOBUF_ATTRIBUTE_NO_DESTROY)
#define PROTOBUF_ATTRIBUTE_NO_DESTROY #define PROTOBUF_ATTRIBUTE_NO_DESTROY
#endif #endif
#if defined(__GNUC__)
// Protobuf extensions and reflection require registration of the protos linked
// in the binary. Not until everything is registered does the runtime have a
// complete view on all protos. When code is using reflection or extensions
// in between registration calls this can lead to surprising behavior. By
// having the registration run first we mitigate this scenario.
// Highest priority is 101. We use 102 to allow code that really wants to
// higher priority to still beat us.
#define PROTOBUF_ATTRIBUTE_INIT_PRIORITY __attribute__((init_priority((102))))
#else
#define PROTOBUF_ATTRIBUTE_INIT_PRIORITY
#endif
#if _MSC_VER
#define PROTOBUF_PRAGMA_INIT_SEG __pragma(init_seg(lib))
#else
#define PROTOBUF_PRAGMA_INIT_SEG
#endif
// Silence some MSVC warnings in all our code.
#if _MSC_VER
#pragma warning(push)
// For non-trivial unions
#pragma warning(disable : 4582)
#pragma warning(disable : 4583)
// For init_seg(lib)
#pragma warning(disable : 4073)
// To silence the fact that we will pop this push from another file
#pragma warning(disable : 5031)
#endif

@ -75,8 +75,9 @@
#undef PROTOBUF_DISABLE_MSVC_UNION_WARNING #undef PROTOBUF_DISABLE_MSVC_UNION_WARNING
#undef PROTOBUF_ENABLE_MSVC_UNION_WARNING #undef PROTOBUF_ENABLE_MSVC_UNION_WARNING
#undef PROTOBUF_CONSTINIT #undef PROTOBUF_CONSTINIT
#undef PROTOBUF_MAYBE_CONSTEXPR
#undef PROTOBUF_ATTRIBUTE_NO_DESTROY #undef PROTOBUF_ATTRIBUTE_NO_DESTROY
#undef PROTOBUF_ATTRIBUTE_INIT_PRIORITY
#undef PROTOBUF_PRAGMA_INIT_SEG
// Restore macro that may have been #undef'd in port_def.inc. // Restore macro that may have been #undef'd in port_def.inc.
#ifdef _MSC_VER #ifdef _MSC_VER
@ -105,8 +106,6 @@
#if defined(__clang__) || defined(__GNUC__) || defined(_MSC_VER) #if defined(__clang__) || defined(__GNUC__) || defined(_MSC_VER)
#pragma pop_macro("DEBUG") #pragma pop_macro("DEBUG")
#pragma pop_macro("TRUE")
#pragma pop_macro("FALSE")
#endif // defined(__clang__) || defined(__GNUC__) || defined(_MSC_VER) #endif // defined(__clang__) || defined(__GNUC__) || defined(_MSC_VER)
#if defined(__clang__) #if defined(__clang__)
@ -114,3 +113,8 @@
#elif defined(__GNUC__) #elif defined(__GNUC__)
#pragma GCC diagnostic pop #pragma GCC diagnostic pop
#endif #endif
// Pop the warning(push) from port_def.inc
#if _MSC_VER
#pragma warning(pop)
#endif

@ -436,3 +436,5 @@ void GenericSwap(Message* m1, Message* m2) {
} // namespace internal } // namespace internal
} // namespace protobuf } // namespace protobuf
} // namespace google } // namespace google
#include <google/protobuf/port_undef.inc>

@ -58,8 +58,10 @@ void** RepeatedPtrFieldBase::InternalExtend(int extend_amount) {
Arena* arena = GetArena(); Arena* arena = GetArena();
new_size = std::max(internal::kRepeatedFieldLowerClampLimit, new_size = std::max(internal::kRepeatedFieldLowerClampLimit,
std::max(total_size_ * 2, new_size)); std::max(total_size_ * 2, new_size));
GOOGLE_CHECK_LE(new_size, (std::numeric_limits<size_t>::max() - kRepHeaderSize) / GOOGLE_CHECK_LE(
sizeof(old_rep->elements[0])) static_cast<int64>(new_size),
static_cast<int64>((std::numeric_limits<size_t>::max() - kRepHeaderSize) /
sizeof(old_rep->elements[0])))
<< "Requested size is too large to fit into size_t."; << "Requested size is too large to fit into size_t.";
size_t bytes = kRepHeaderSize + sizeof(old_rep->elements[0]) * new_size; size_t bytes = kRepHeaderSize + sizeof(old_rep->elements[0]) * new_size;
if (arena == NULL) { if (arena == NULL) {
@ -134,3 +136,5 @@ template class PROTOBUF_EXPORT_TEMPLATE_DEFINE RepeatedPtrField<std::string>;
} // namespace protobuf } // namespace protobuf
} // namespace google } // namespace google
#include <google/protobuf/port_undef.inc>

@ -1206,6 +1206,12 @@ RepeatedField<Element>::RepeatedField(Iter begin, const Iter& end)
template <typename Element> template <typename Element>
RepeatedField<Element>::~RepeatedField() { RepeatedField<Element>::~RepeatedField() {
#ifndef NDEBUG
// Try to trigger segfault / asan failure in non-opt builds. If arena_
// lifetime has ended before the destructor.
auto arena = GetArena();
if (arena) (void)arena->SpaceAllocated();
#endif
if (total_size_ > 0) { if (total_size_ > 0) {
InternalDeallocate(rep(), total_size_); InternalDeallocate(rep(), total_size_);
} }

@ -2090,3 +2090,5 @@ TEST_F(RepeatedFieldInsertionIteratorsTest, MoveProtos) {
} // namespace protobuf } // namespace protobuf
} // namespace google } // namespace google
#include <google/protobuf/port_undef.inc>

@ -14,6 +14,8 @@
#include <google/protobuf/wire_format.h> #include <google/protobuf/wire_format.h>
// @@protoc_insertion_point(includes) // @@protoc_insertion_point(includes)
#include <google/protobuf/port_def.inc> #include <google/protobuf/port_def.inc>
PROTOBUF_PRAGMA_INIT_SEG
PROTOBUF_NAMESPACE_OPEN PROTOBUF_NAMESPACE_OPEN
class SourceContextDefaultTypeInternal { class SourceContextDefaultTypeInternal {
public: public:
@ -75,7 +77,7 @@ const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_google
}; };
// Force running AddDescriptors() at dynamic initialization time. // Force running AddDescriptors() at dynamic initialization time.
static bool dynamic_init_dummy_google_2fprotobuf_2fsource_5fcontext_2eproto = (static_cast<void>(::PROTOBUF_NAMESPACE_ID::internal::AddDescriptors(&descriptor_table_google_2fprotobuf_2fsource_5fcontext_2eproto)), true); PROTOBUF_ATTRIBUTE_INIT_PRIORITY static ::PROTOBUF_NAMESPACE_ID::internal::AddDescriptorsRunner dynamic_init_dummy_google_2fprotobuf_2fsource_5fcontext_2eproto(&descriptor_table_google_2fprotobuf_2fsource_5fcontext_2eproto);
PROTOBUF_NAMESPACE_OPEN PROTOBUF_NAMESPACE_OPEN
// =================================================================== // ===================================================================

@ -14,6 +14,8 @@
#include <google/protobuf/wire_format.h> #include <google/protobuf/wire_format.h>
// @@protoc_insertion_point(includes) // @@protoc_insertion_point(includes)
#include <google/protobuf/port_def.inc> #include <google/protobuf/port_def.inc>
PROTOBUF_PRAGMA_INIT_SEG
extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2fstruct_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_ListValue_google_2fprotobuf_2fstruct_2eproto; extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2fstruct_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_ListValue_google_2fprotobuf_2fstruct_2eproto;
PROTOBUF_NAMESPACE_OPEN PROTOBUF_NAMESPACE_OPEN
class Struct_FieldsEntry_DoNotUseDefaultTypeInternal { class Struct_FieldsEntry_DoNotUseDefaultTypeInternal {
@ -145,7 +147,7 @@ const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_google
}; };
// Force running AddDescriptors() at dynamic initialization time. // Force running AddDescriptors() at dynamic initialization time.
static bool dynamic_init_dummy_google_2fprotobuf_2fstruct_2eproto = (static_cast<void>(::PROTOBUF_NAMESPACE_ID::internal::AddDescriptors(&descriptor_table_google_2fprotobuf_2fstruct_2eproto)), true); PROTOBUF_ATTRIBUTE_INIT_PRIORITY static ::PROTOBUF_NAMESPACE_ID::internal::AddDescriptorsRunner dynamic_init_dummy_google_2fprotobuf_2fstruct_2eproto(&descriptor_table_google_2fprotobuf_2fstruct_2eproto);
PROTOBUF_NAMESPACE_OPEN PROTOBUF_NAMESPACE_OPEN
const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* NullValue_descriptor() { const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* NullValue_descriptor() {
::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_google_2fprotobuf_2fstruct_2eproto); ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_google_2fprotobuf_2fstruct_2eproto);

@ -327,3 +327,5 @@ const char* FatalException::what() const throw() {
} // namespace protobuf } // namespace protobuf
} // namespace google } // namespace google
#include <google/protobuf/port_undef.inc>

@ -190,3 +190,5 @@ std::ostream& operator<<(std::ostream& o, const uint128& b) {
} // namespace protobuf } // namespace protobuf
} // namespace google } // namespace google
#include <google/protobuf/port_undef.inc> // NOLINT

@ -515,3 +515,5 @@ TEST(Int128, OStream) {
} }
} // namespace protobuf } // namespace protobuf
} // namespace google } // namespace google
#include <google/protobuf/port_undef.inc>

@ -90,12 +90,33 @@ class PROTOBUF_EXPORT CriticalSectionLock {
#endif #endif
// In MSVC std::mutex does not have a constexpr constructor.
// This wrapper makes the constructor constexpr.
template <typename T>
class CallOnceInitializedMutex {
public:
constexpr CallOnceInitializedMutex() : flag_{}, buf_{} {}
~CallOnceInitializedMutex() { get().~T(); }
void lock() { get().lock(); }
void unlock() { get().unlock(); }
private:
T& get() {
std::call_once(flag_, [&] { ::new (static_cast<void*>(&buf_)) T(); });
return reinterpret_cast<T&>(buf_);
}
std::once_flag flag_;
alignas(T) char buf_[sizeof(T)];
};
// Mutex is a natural type to wrap. As both google and other organization have // Mutex is a natural type to wrap. As both google and other organization have
// specialized mutexes. gRPC also provides an injection mechanism for custom // specialized mutexes. gRPC also provides an injection mechanism for custom
// mutexes. // mutexes.
class GOOGLE_PROTOBUF_CAPABILITY("mutex") PROTOBUF_EXPORT WrappedMutex { class GOOGLE_PROTOBUF_CAPABILITY("mutex") PROTOBUF_EXPORT WrappedMutex {
public: public:
WrappedMutex() = default; constexpr WrappedMutex() = default;
void Lock() GOOGLE_PROTOBUF_ACQUIRE() { mu_.lock(); } void Lock() GOOGLE_PROTOBUF_ACQUIRE() { mu_.lock(); }
void Unlock() GOOGLE_PROTOBUF_RELEASE() { mu_.unlock(); } void Unlock() GOOGLE_PROTOBUF_RELEASE() { mu_.unlock(); }
// Crash if this Mutex is not held exclusively by this thread. // Crash if this Mutex is not held exclusively by this thread.
@ -103,11 +124,13 @@ class GOOGLE_PROTOBUF_CAPABILITY("mutex") PROTOBUF_EXPORT WrappedMutex {
void AssertHeld() const {} void AssertHeld() const {}
private: private:
#ifndef GOOGLE_PROTOBUF_SUPPORT_WINDOWS_XP #if defined(GOOGLE_PROTOBUF_SUPPORT_WINDOWS_XP)
CallOnceInitializedMutex<CriticalSectionLock> mu_;
#elif defined(_MSC_VER)
CallOnceInitializedMutex<std::mutex> mu_;
#else
std::mutex mu_; std::mutex mu_;
#else // ifndef GOOGLE_PROTOBUF_SUPPORT_WINDOWS_XP #endif
CriticalSectionLock mu_;
#endif // #ifndef GOOGLE_PROTOBUF_SUPPORT_WINDOWS_XP
}; };
using Mutex = WrappedMutex; using Mutex = WrappedMutex;

@ -51,11 +51,11 @@
#if !defined(PROTOBUF_DISABLE_LITTLE_ENDIAN_OPT_FOR_TEST) #if !defined(PROTOBUF_DISABLE_LITTLE_ENDIAN_OPT_FOR_TEST)
#define PROTOBUF_LITTLE_ENDIAN 1 #define PROTOBUF_LITTLE_ENDIAN 1
#endif #endif
#if defined(_MSC_VER) && _MSC_VER >= 1300 && !defined(__INTEL_COMPILER) #if defined(_MSC_VER) && _MSC_VER >= 1300 && !defined(__INTEL_COMPILER)
// If MSVC has "/RTCc" set, it will complain about truncating casts at // If MSVC has "/RTCc" set, it will complain about truncating casts at
// runtime. This file contains some intentional truncating casts. // runtime. This file contains some intentional truncating casts.
#pragma runtime_checks("c", off) #pragma runtime_checks("c", off)
#endif #endif
#else #else
#include <sys/param.h> // __BYTE_ORDER #include <sys/param.h> // __BYTE_ORDER
#if defined(__OpenBSD__) #if defined(__OpenBSD__)

@ -54,7 +54,6 @@
#include <google/protobuf/dynamic_message.h> #include <google/protobuf/dynamic_message.h>
#include <google/protobuf/map_field.h> #include <google/protobuf/map_field.h>
#include <google/protobuf/message.h> #include <google/protobuf/message.h>
#include <google/protobuf/port_def.inc>
#include <google/protobuf/repeated_field.h> #include <google/protobuf/repeated_field.h>
#include <google/protobuf/unknown_field_set.h> #include <google/protobuf/unknown_field_set.h>
#include <google/protobuf/wire_format_lite.h> #include <google/protobuf/wire_format_lite.h>
@ -63,6 +62,9 @@
#include <google/protobuf/stubs/map_util.h> #include <google/protobuf/stubs/map_util.h>
#include <google/protobuf/stubs/stl_util.h> #include <google/protobuf/stubs/stl_util.h>
// Must be included last.
#include <google/protobuf/port_def.inc>
namespace google { namespace google {
namespace protobuf { namespace protobuf {
@ -161,7 +163,7 @@ TextFormat::ParseLocation TextFormat::ParseInfoTree::GetLocation(
const std::vector<TextFormat::ParseLocation>* locations = const std::vector<TextFormat::ParseLocation>* locations =
FindOrNull(locations_, field); FindOrNull(locations_, field);
if (locations == nullptr || index >= locations->size()) { if (locations == nullptr || index >= static_cast<int64>(locations->size())) {
return TextFormat::ParseLocation(); return TextFormat::ParseLocation();
} }
@ -176,7 +178,7 @@ TextFormat::ParseInfoTree* TextFormat::ParseInfoTree::GetTreeForNested(
} }
auto it = nested_.find(field); auto it = nested_.find(field);
if (it == nested_.end() || index >= it->second.size()) { if (it == nested_.end() || index >= static_cast<int64>(it->second.size())) {
return nullptr; return nullptr;
} }
@ -1321,7 +1323,7 @@ class TextFormat::Printer::TextGenerator
if (failed_) return; if (failed_) return;
} }
while (size > buffer_size_) { while (static_cast<int64>(size) > buffer_size_) {
// Data exceeds space in the buffer. Copy what we can and request a // Data exceeds space in the buffer. Copy what we can and request a
// new buffer. // new buffer.
if (buffer_size_ > 0) { if (buffer_size_ > 0) {
@ -2393,7 +2395,8 @@ void TextFormat::Printer::PrintFieldValue(const Message& message,
const std::string* value_to_print = &value; const std::string* value_to_print = &value;
std::string truncated_value; std::string truncated_value;
if (truncate_string_field_longer_than_ > 0 && if (truncate_string_field_longer_than_ > 0 &&
truncate_string_field_longer_than_ < value.size()) { static_cast<size_t>(truncate_string_field_longer_than_) <
value.size()) {
truncated_value = value.substr(0, truncate_string_field_longer_than_) + truncated_value = value.substr(0, truncate_string_field_longer_than_) +
"...<truncated>..."; "...<truncated>...";
value_to_print = &truncated_value; value_to_print = &truncated_value;
@ -2576,3 +2579,5 @@ void TextFormat::Printer::PrintUnknownFields(
} // namespace protobuf } // namespace protobuf
} // namespace google } // namespace google
#include <google/protobuf/port_undef.inc>

@ -2160,3 +2160,5 @@ TEST(TextFormatUnknownFieldTest, TestUnknownExtension) {
} // namespace text_format_unittest } // namespace text_format_unittest
} // namespace protobuf } // namespace protobuf
} // namespace google } // namespace google
#include <google/protobuf/port_undef.inc>

@ -14,6 +14,8 @@
#include <google/protobuf/wire_format.h> #include <google/protobuf/wire_format.h>
// @@protoc_insertion_point(includes) // @@protoc_insertion_point(includes)
#include <google/protobuf/port_def.inc> #include <google/protobuf/port_def.inc>
PROTOBUF_PRAGMA_INIT_SEG
PROTOBUF_NAMESPACE_OPEN PROTOBUF_NAMESPACE_OPEN
class TimestampDefaultTypeInternal { class TimestampDefaultTypeInternal {
public: public:
@ -76,7 +78,7 @@ const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_google
}; };
// Force running AddDescriptors() at dynamic initialization time. // Force running AddDescriptors() at dynamic initialization time.
static bool dynamic_init_dummy_google_2fprotobuf_2ftimestamp_2eproto = (static_cast<void>(::PROTOBUF_NAMESPACE_ID::internal::AddDescriptors(&descriptor_table_google_2fprotobuf_2ftimestamp_2eproto)), true); PROTOBUF_ATTRIBUTE_INIT_PRIORITY static ::PROTOBUF_NAMESPACE_ID::internal::AddDescriptorsRunner dynamic_init_dummy_google_2fprotobuf_2ftimestamp_2eproto(&descriptor_table_google_2fprotobuf_2ftimestamp_2eproto);
PROTOBUF_NAMESPACE_OPEN PROTOBUF_NAMESPACE_OPEN
// =================================================================== // ===================================================================

@ -14,6 +14,8 @@
#include <google/protobuf/wire_format.h> #include <google/protobuf/wire_format.h>
// @@protoc_insertion_point(includes) // @@protoc_insertion_point(includes)
#include <google/protobuf/port_def.inc> #include <google/protobuf/port_def.inc>
PROTOBUF_PRAGMA_INIT_SEG
extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2fany_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_Any_google_2fprotobuf_2fany_2eproto; extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2fany_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_Any_google_2fprotobuf_2fany_2eproto;
extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2ftype_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_EnumValue_google_2fprotobuf_2ftype_2eproto; extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2ftype_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_EnumValue_google_2fprotobuf_2ftype_2eproto;
extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2ftype_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_Field_google_2fprotobuf_2ftype_2eproto; extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2ftype_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_Field_google_2fprotobuf_2ftype_2eproto;
@ -250,7 +252,7 @@ const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_google
}; };
// Force running AddDescriptors() at dynamic initialization time. // Force running AddDescriptors() at dynamic initialization time.
static bool dynamic_init_dummy_google_2fprotobuf_2ftype_2eproto = (static_cast<void>(::PROTOBUF_NAMESPACE_ID::internal::AddDescriptors(&descriptor_table_google_2fprotobuf_2ftype_2eproto)), true); PROTOBUF_ATTRIBUTE_INIT_PRIORITY static ::PROTOBUF_NAMESPACE_ID::internal::AddDescriptorsRunner dynamic_init_dummy_google_2fprotobuf_2ftype_2eproto(&descriptor_table_google_2fprotobuf_2ftype_2eproto);
PROTOBUF_NAMESPACE_OPEN PROTOBUF_NAMESPACE_OPEN
const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* Field_Kind_descriptor() { const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* Field_Kind_descriptor() {
::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_google_2fprotobuf_2ftype_2eproto); ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_google_2fprotobuf_2ftype_2eproto);

@ -183,7 +183,7 @@ void UnknownFieldSet::DeleteSubrange(int start, int num) {
(fields_)[i + start].Delete(); (fields_)[i + start].Delete();
} }
// Slide down the remaining fields. // Slide down the remaining fields.
for (int i = start + num; i < fields_.size(); ++i) { for (size_t i = start + num; i < fields_.size(); ++i) {
(fields_)[i - num] = (fields_)[i]; (fields_)[i - num] = (fields_)[i];
} }
// Pop off the # of deleted fields. // Pop off the # of deleted fields.
@ -193,8 +193,8 @@ void UnknownFieldSet::DeleteSubrange(int start, int num) {
} }
void UnknownFieldSet::DeleteByNumber(int number) { void UnknownFieldSet::DeleteByNumber(int number) {
int left = 0; // The number of fields left after deletion. size_t left = 0; // The number of fields left after deletion.
for (int i = 0; i < fields_.size(); ++i) { for (size_t i = 0; i < fields_.size(); ++i) {
UnknownField* field = &(fields_)[i]; UnknownField* field = &(fields_)[i];
if (field->number() == number) { if (field->number() == number) {
field->Delete(); field->Delete();
@ -324,3 +324,5 @@ const char* UnknownFieldParse(uint64 tag, UnknownFieldSet* unknown,
} // namespace internal } // namespace internal
} // namespace protobuf } // namespace protobuf
} // namespace google } // namespace google
#include <google/protobuf/port_undef.inc>

@ -74,12 +74,18 @@ bool ParseDelimitedFromCodedStream(MessageLite* message,
return false; return false;
} }
// Get the position after any size bytes have been read (and only the message
// itself remains).
int position_after_size = input->CurrentPosition();
// Tell the stream not to read beyond that size. // Tell the stream not to read beyond that size.
io::CodedInputStream::Limit limit = input->PushLimit(size); io::CodedInputStream::Limit limit = input->PushLimit(size);
// Parse the message. // Parse the message.
if (!message->MergeFromCodedStream(input)) return false; if (!message->MergeFromCodedStream(input)) return false;
if (!input->ConsumedEntireMessage()) return false; if (!input->ConsumedEntireMessage()) return false;
if (input->CurrentPosition() - position_after_size != static_cast<int>(size))
return false;
// Release the limit. // Release the limit.
input->PopLimit(limit); input->PopLimit(limit);

@ -82,6 +82,35 @@ TEST(DelimitedMessageUtilTest, DelimitedMessages) {
} }
} }
TEST(DelimitedMessageUtilTest, FailsAtEndOfStream) {
std::stringstream full_stream;
std::stringstream partial_stream;
{
protobuf_unittest::ForeignMessage message;
message.set_c(42);
message.set_d(24);
EXPECT_TRUE(SerializeDelimitedToOstream(message, &full_stream));
std::string full_output = full_stream.str();
ASSERT_GT(full_output.size(), size_t{2});
ASSERT_EQ(full_output[0], 4);
partial_stream << full_output[0] << full_output[1] << full_output[2];
}
{
bool clean_eof;
io::IstreamInputStream zstream(&partial_stream);
protobuf_unittest::ForeignMessage message;
clean_eof = true;
EXPECT_FALSE(ParseDelimitedFromZeroCopyStream(&message,
&zstream, &clean_eof));
EXPECT_FALSE(clean_eof);
}
}
} // namespace util } // namespace util
} // namespace protobuf } // namespace protobuf
} // namespace google } // namespace google

@ -141,7 +141,7 @@ class MessageDifferencer::MultipleFieldsMapKeyComparator
int path_index) const { int path_index) const {
const FieldDescriptor* field = key_field_path[path_index]; const FieldDescriptor* field = key_field_path[path_index];
std::vector<SpecificField> current_parent_fields(parent_fields); std::vector<SpecificField> current_parent_fields(parent_fields);
if (path_index == key_field_path.size() - 1) { if (path_index == static_cast<int64>(key_field_path.size() - 1)) {
if (field->is_repeated()) { if (field->is_repeated()) {
if (!message_differencer_->CompareRepeatedField( if (!message_differencer_->CompareRepeatedField(
message1, message2, field, &current_parent_fields)) { message1, message2, field, &current_parent_fields)) {
@ -187,7 +187,7 @@ class MessageDifferencer::MultipleFieldsMapKeyComparator
void MatchIndicesPostProcessorForSmartList( void MatchIndicesPostProcessorForSmartList(
std::vector<int>* match_list1, std::vector<int>* match_list2) { std::vector<int>* match_list1, std::vector<int>* match_list2) {
int last_matched_index = -1; int last_matched_index = -1;
for (int i = 0; i < match_list1->size(); ++i) { for (size_t i = 0; i < match_list1->size(); ++i) {
if (match_list1->at(i) < 0) { if (match_list1->at(i) < 0) {
continue; continue;
} }
@ -395,7 +395,7 @@ void MessageDifferencer::TreatAsMapWithMultipleFieldPathsAsKey(
GOOGLE_CHECK_EQ(FieldDescriptor::CPPTYPE_MESSAGE, field->cpp_type()) GOOGLE_CHECK_EQ(FieldDescriptor::CPPTYPE_MESSAGE, field->cpp_type())
<< "Field has to be message type. Field name is: " << field->full_name(); << "Field has to be message type. Field name is: " << field->full_name();
for (const auto& key_field_path : key_field_paths) { for (const auto& key_field_path : key_field_paths) {
for (int j = 0; j < key_field_path.size(); ++j) { for (size_t j = 0; j < key_field_path.size(); ++j) {
const FieldDescriptor* parent_field = const FieldDescriptor* parent_field =
j == 0 ? field : key_field_path[j - 1]; j == 0 ? field : key_field_path[j - 1];
const FieldDescriptor* child_field = key_field_path[j]; const FieldDescriptor* child_field = key_field_path[j];
@ -669,8 +669,8 @@ bool MessageDifferencer::CompareRequestedFieldsUsingSettings(
FieldDescriptorArray MessageDifferencer::CombineFields( FieldDescriptorArray MessageDifferencer::CombineFields(
const FieldDescriptorArray& fields1, Scope fields1_scope, const FieldDescriptorArray& fields1, Scope fields1_scope,
const FieldDescriptorArray& fields2, Scope fields2_scope) { const FieldDescriptorArray& fields2, Scope fields2_scope) {
int index1 = 0; size_t index1 = 0;
int index2 = 0; size_t index2 = 0;
tmp_message_fields_.clear(); tmp_message_fields_.clear();
@ -1417,8 +1417,8 @@ bool MessageDifferencer::CompareUnknownFields(
// Now that we have two sorted lists, we can detect fields which appear only // Now that we have two sorted lists, we can detect fields which appear only
// in one list or the other by traversing them simultaneously. // in one list or the other by traversing them simultaneously.
int index1 = 0; size_t index1 = 0;
int index2 = 0; size_t index2 = 0;
while (index1 < fields1.size() || index2 < fields2.size()) { while (index1 < fields1.size() || index2 < fields2.size()) {
enum { enum {
ADDITION, ADDITION,
@ -1523,12 +1523,14 @@ bool MessageDifferencer::CompareUnknownFields(
if (IsUnknownFieldIgnored(message1, message2, specific_field, if (IsUnknownFieldIgnored(message1, message2, specific_field,
*parent_field)) { *parent_field)) {
if (reporter_ != NULL) { if (report_ignores_ && reporter_ != NULL) {
parent_field->push_back(specific_field); parent_field->push_back(specific_field);
reporter_->ReportUnknownFieldIgnored(message1, message2, *parent_field); reporter_->ReportUnknownFieldIgnored(message1, message2, *parent_field);
parent_field->pop_back(); parent_field->pop_back();
} }
return true; if (change_type != ADDITION) ++index1;
if (change_type != DELETION) ++index2;
continue;
} }
if (change_type == ADDITION || change_type == DELETION || if (change_type == ADDITION || change_type == DELETION ||
@ -1881,7 +1883,7 @@ MessageDifferencer::StreamReporter::~StreamReporter() {
void MessageDifferencer::StreamReporter::PrintPath( void MessageDifferencer::StreamReporter::PrintPath(
const std::vector<SpecificField>& field_path, bool left_side) { const std::vector<SpecificField>& field_path, bool left_side) {
for (int i = 0; i < field_path.size(); ++i) { for (size_t i = 0; i < field_path.size(); ++i) {
if (i > 0) { if (i > 0) {
printer_->Print("."); printer_->Print(".");
} }

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save