Merge tag 'refs/tags/sync-piper' into sync-stage

pull/8076/head
Joshua Haberman 4 years ago
commit 920803e18d
  1. 12
      conformance/conformance_cpp.cc
  2. 42
      java/core/src/main/java/com/google/protobuf/FieldSet.java
  3. 25
      java/core/src/main/java/com/google/protobuf/TextFormat.java
  4. 2
      java/core/src/main/java/com/google/protobuf/UnsafeUtil.java
  5. 24
      java/core/src/test/java/com/google/protobuf/MapForProto2LiteTest.java
  6. 86
      java/core/src/test/java/com/google/protobuf/MapForProto2Test.java
  7. 24
      java/core/src/test/java/com/google/protobuf/MapLiteTest.java
  8. 24
      java/core/src/test/java/com/google/protobuf/MapTest.java
  9. 4
      java/core/src/test/java/com/google/protobuf/TestUtil.java
  10. 4
      java/core/src/test/java/com/google/protobuf/TestUtilLite.java
  11. 29
      java/util/src/main/java/com/google/protobuf/util/Durations.java
  12. 28
      java/util/src/main/java/com/google/protobuf/util/Timestamps.java
  13. 3
      python/google/protobuf/internal/containers.py
  14. 65
      python/google/protobuf/internal/reflection_test.py
  15. 251
      python/google/protobuf/pyext/repeated_composite_container.cc
  16. 413
      python/google/protobuf/pyext/repeated_scalar_container.cc
  17. 2
      python/setup.py
  18. 10
      src/google/protobuf/any.cc
  19. 17
      src/google/protobuf/any.h
  20. 4
      src/google/protobuf/any.pb.cc
  21. 16
      src/google/protobuf/any.pb.h
  22. 4
      src/google/protobuf/any_lite.cc
  23. 11
      src/google/protobuf/any_test.cc
  24. 3
      src/google/protobuf/any_test.proto
  25. 4
      src/google/protobuf/api.pb.cc
  26. 593
      src/google/protobuf/arena.cc
  27. 216
      src/google/protobuf/arena.h
  28. 358
      src/google/protobuf/arena_impl.h
  29. 10
      src/google/protobuf/arena_unittest.cc
  30. 2
      src/google/protobuf/arenastring.cc
  31. 2
      src/google/protobuf/arenastring_unittest.cc
  32. 237
      src/google/protobuf/compiler/command_line_interface.cc
  33. 4
      src/google/protobuf/compiler/cpp/cpp_extension.cc
  34. 11
      src/google/protobuf/compiler/cpp/cpp_file.cc
  35. 8
      src/google/protobuf/compiler/cpp/cpp_helpers.cc
  36. 4
      src/google/protobuf/compiler/cpp/cpp_helpers.h
  37. 53
      src/google/protobuf/compiler/cpp/cpp_message.cc
  38. 12
      src/google/protobuf/compiler/java/java_helpers.cc
  39. 1
      src/google/protobuf/compiler/java/java_helpers.h
  40. 1
      src/google/protobuf/compiler/java/java_name_resolver.cc
  41. 6
      src/google/protobuf/compiler/js/js_generator.cc
  42. 4
      src/google/protobuf/compiler/plugin.pb.cc
  43. 18
      src/google/protobuf/descriptor.cc
  44. 3
      src/google/protobuf/descriptor.h
  45. 4
      src/google/protobuf/descriptor.pb.cc
  46. 8
      src/google/protobuf/descriptor_database.cc
  47. 17
      src/google/protobuf/descriptor_unittest.cc
  48. 4
      src/google/protobuf/duration.pb.cc
  49. 2
      src/google/protobuf/dynamic_message.cc
  50. 4
      src/google/protobuf/empty.pb.cc
  51. 3
      src/google/protobuf/extension_set.cc
  52. 4
      src/google/protobuf/extension_set.h
  53. 2
      src/google/protobuf/extension_set_heavy.cc
  54. 2
      src/google/protobuf/extension_set_unittest.cc
  55. 4
      src/google/protobuf/field_mask.pb.cc
  56. 2
      src/google/protobuf/generated_enum_util.cc
  57. 43
      src/google/protobuf/generated_message_reflection.cc
  58. 11
      src/google/protobuf/generated_message_reflection.h
  59. 2
      src/google/protobuf/generated_message_table_driven_lite.h
  60. 18
      src/google/protobuf/generated_message_util.cc
  61. 2
      src/google/protobuf/implicit_weak_message.cc
  62. 8
      src/google/protobuf/io/coded_stream.cc
  63. 2
      src/google/protobuf/io/coded_stream_unittest.cc
  64. 2
      src/google/protobuf/io/gzip_stream.cc
  65. 8
      src/google/protobuf/io/printer.cc
  66. 10
      src/google/protobuf/io/tokenizer.cc
  67. 2
      src/google/protobuf/io/zero_copy_stream_impl_lite.cc
  68. 2
      src/google/protobuf/map.h
  69. 2
      src/google/protobuf/map_field.cc
  70. 22
      src/google/protobuf/map_field.h
  71. 2
      src/google/protobuf/map_field_test.cc
  72. 2
      src/google/protobuf/map_test.cc
  73. 2
      src/google/protobuf/message.cc
  74. 3
      src/google/protobuf/message.h
  75. 6
      src/google/protobuf/message_lite.cc
  76. 2
      src/google/protobuf/message_lite.h
  77. 4
      src/google/protobuf/message_unittest.inc
  78. 2
      src/google/protobuf/parse_context.cc
  79. 4
      src/google/protobuf/port.h
  80. 69
      src/google/protobuf/port_def.inc
  81. 10
      src/google/protobuf/port_undef.inc
  82. 2
      src/google/protobuf/reflection_ops.cc
  83. 8
      src/google/protobuf/repeated_field.cc
  84. 6
      src/google/protobuf/repeated_field.h
  85. 2
      src/google/protobuf/repeated_field_unittest.cc
  86. 4
      src/google/protobuf/source_context.pb.cc
  87. 4
      src/google/protobuf/struct.pb.cc
  88. 2
      src/google/protobuf/stubs/common.cc
  89. 2
      src/google/protobuf/stubs/int128.cc
  90. 2
      src/google/protobuf/stubs/int128_unittest.cc
  91. 33
      src/google/protobuf/stubs/mutex.h
  92. 10
      src/google/protobuf/stubs/port.h
  93. 15
      src/google/protobuf/text_format.cc
  94. 2
      src/google/protobuf/text_format_unittest.cc
  95. 4
      src/google/protobuf/timestamp.pb.cc
  96. 4
      src/google/protobuf/type.pb.cc
  97. 8
      src/google/protobuf/unknown_field_set.cc
  98. 6
      src/google/protobuf/util/delimited_message_util.cc
  99. 29
      src/google/protobuf/util/delimited_message_util_test.cc
  100. 22
      src/google/protobuf/util/message_differencer.cc
  101. Some files were not shown because too many files have changed in this diff Show More

@ -36,6 +36,7 @@
#include <google/protobuf/text_format.h>
#include <google/protobuf/util/json_util.h>
#include <google/protobuf/util/type_resolver_util.h>
#include <google/protobuf/stubs/status.h>
#include "conformance.pb.h"
#include <google/protobuf/test_messages_proto2.pb.h>
#include <google/protobuf/test_messages_proto3.pb.h>
@ -125,9 +126,9 @@ void DoTest(const ConformanceRequest& request, ConformanceResponse* response) {
options.ignore_unknown_fields =
(request.test_category() ==
conformance::JSON_IGNORE_UNKNOWN_PARSING_TEST);
Status status = JsonToBinaryString(type_resolver, *type_url,
request.json_payload(), &proto_binary,
options);
util::Status status =
JsonToBinaryString(type_resolver, *type_url, request.json_payload(),
&proto_binary, options);
if (!status.ok()) {
response->set_parse_error(string("Parse error: ") +
std::string(status.error_message()));
@ -179,8 +180,9 @@ void DoTest(const ConformanceRequest& request, ConformanceResponse* response) {
case conformance::JSON: {
string proto_binary;
GOOGLE_CHECK(test_message->SerializeToString(&proto_binary));
Status status = BinaryToJsonString(type_resolver, *type_url, proto_binary,
response->mutable_json_payload());
util::Status status =
BinaryToJsonString(type_resolver, *type_url, proto_binary,
response->mutable_json_payload());
if (!status.ok()) {
response->set_serialize_error(
string("Failed to serialize JSON output: ") +

@ -119,7 +119,6 @@ final class FieldSet<T extends FieldSet.FieldDescriptorLite<T>> {
}
/** Make this FieldSet immutable from this point forward. */
@SuppressWarnings("unchecked")
public void makeImmutable() {
if (isImmutable) {
return;
@ -286,11 +285,11 @@ final class FieldSet<T extends FieldSet.FieldDescriptorLite<T>> {
final List newList = new ArrayList();
newList.addAll((List) value);
for (final Object element : newList) {
verifyType(descriptor.getLiteType(), element);
verifyType(descriptor, element);
}
value = newList;
} else {
verifyType(descriptor.getLiteType(), value);
verifyType(descriptor, value);
}
if (value instanceof LazyField) {
@ -354,7 +353,7 @@ final class FieldSet<T extends FieldSet.FieldDescriptorLite<T>> {
throw new IndexOutOfBoundsException();
}
verifyType(descriptor.getLiteType(), value);
verifyType(descriptor, value);
((List<Object>) list).set(index, value);
}
@ -369,7 +368,7 @@ final class FieldSet<T extends FieldSet.FieldDescriptorLite<T>> {
"addRepeatedField() can only be called on repeated fields.");
}
verifyType(descriptor.getLiteType(), value);
verifyType(descriptor, value);
final Object existingValue = getField(descriptor);
List<Object> list;
@ -390,8 +389,8 @@ final class FieldSet<T extends FieldSet.FieldDescriptorLite<T>> {
*
* @throws IllegalArgumentException The value is not of the right type.
*/
private void verifyType(final WireFormat.FieldType type, final Object value) {
if (!isValidType(type, value)) {
private void verifyType(final T descriptor, final Object value) {
if (!isValidType(descriptor.getLiteType(), value)) {
// TODO(kenton): When chaining calls to setField(), it can be hard to
// tell from the stack trace which exact call failed, since the whole
// chain is considered one line of code. It would be nice to print
@ -400,10 +399,16 @@ final class FieldSet<T extends FieldSet.FieldDescriptorLite<T>> {
// isn't a big deal, though, since it would only really apply when using
// reflection and generally people don't chain reflection setters.
throw new IllegalArgumentException(
"Wrong object type used with protocol message reflection.");
String.format(
"Wrong object type used with protocol message reflection.\n"
+ "Field number: %d, field java type: %s, value type: %s\n",
descriptor.getNumber(),
descriptor.getLiteType().getJavaType(),
value.getClass().getName()));
}
}
private static boolean isValidType(final WireFormat.FieldType type, final Object value) {
checkNotNull(value);
switch (type.getJavaType()) {
@ -1081,12 +1086,12 @@ final class FieldSet<T extends FieldSet.FieldDescriptorLite<T>> {
final List newList = new ArrayList();
newList.addAll((List) value);
for (final Object element : newList) {
verifyType(descriptor.getLiteType(), element);
verifyType(descriptor, element);
hasNestedBuilders = hasNestedBuilders || element instanceof MessageLite.Builder;
}
value = newList;
} else {
verifyType(descriptor.getLiteType(), value);
verifyType(descriptor, value);
}
if (value instanceof LazyField) {
@ -1172,7 +1177,7 @@ final class FieldSet<T extends FieldSet.FieldDescriptorLite<T>> {
throw new IndexOutOfBoundsException();
}
verifyType(descriptor.getLiteType(), value);
verifyType(descriptor, value);
((List<Object>) list).set(index, value);
}
@ -1190,7 +1195,7 @@ final class FieldSet<T extends FieldSet.FieldDescriptorLite<T>> {
hasNestedBuilders = hasNestedBuilders || value instanceof MessageLite.Builder;
verifyType(descriptor.getLiteType(), value);
verifyType(descriptor, value);
final Object existingValue = getField(descriptor);
List<Object> list;
@ -1211,15 +1216,20 @@ final class FieldSet<T extends FieldSet.FieldDescriptorLite<T>> {
*
* @throws IllegalArgumentException The value is not of the right type.
*/
private static void verifyType(final WireFormat.FieldType type, final Object value) {
if (!FieldSet.isValidType(type, value)) {
private void verifyType(final T descriptor, final Object value) {
if (!FieldSet.isValidType(descriptor.getLiteType(), value)) {
// Builder can accept Message.Builder values even though FieldSet will reject.
if (type.getJavaType() == WireFormat.JavaType.MESSAGE
if (descriptor.getLiteType().getJavaType() == WireFormat.JavaType.MESSAGE
&& value instanceof MessageLite.Builder) {
return;
}
throw new IllegalArgumentException(
"Wrong object type used with protocol message reflection.");
String.format(
"Wrong object type used with protocol message reflection.\n"
+ "Field number: %d, field java type: %s, value type: %s\n",
descriptor.getNumber(),
descriptor.getLiteType().getJavaType(),
value.getClass().getName()));
}
}

@ -61,6 +61,7 @@ public final class TextFormat {
private static final Logger logger = Logger.getLogger(TextFormat.class.getName());
/**
* Outputs a textual representation of the Protocol Message supplied into the parameter output.
* (This representation is the new version of the classic "ProtocolPrinter" output from the
@ -727,9 +728,9 @@ public final class TextFormat {
// Groups must be serialized with their original capitalization.
generator.print(field.getMessageType().getName());
} else {
generator.print(field.getName());
generator.print(field.getName());
}
}
}
if (field.getJavaType() == FieldDescriptor.JavaType.MESSAGE) {
generator.print(" {");
@ -1811,16 +1812,16 @@ public final class TextFormat {
extension = target.findExtensionByName(extensionRegistry, name.toString());
if (extension == null) {
String message =
(tokenizer.getPreviousLine() + 1)
+ ":"
+ (tokenizer.getPreviousColumn() + 1)
+ ":\t"
+ type.getFullName()
+ ".["
+ name
+ "]";
unknownFields.add(new UnknownField(message, UnknownField.Type.EXTENSION));
String message =
(tokenizer.getPreviousLine() + 1)
+ ":"
+ (tokenizer.getPreviousColumn() + 1)
+ ":\t"
+ type.getFullName()
+ ".["
+ name
+ "]";
unknownFields.add(new UnknownField(message, UnknownField.Type.EXTENSION));
} else {
if (extension.descriptor.getContainingType() != type) {
throw tokenizer.parseExceptionPreviousToken(

@ -388,7 +388,7 @@ final class UnsafeUtil {
}
if (Android.isOnAndroidDevice()) {
return true;
return false;
}
clazz.getMethod("getByte", long.class);
clazz.getMethod("putByte", long.class, byte.class);

@ -184,17 +184,17 @@ public final class MapForProto2LiteTest extends TestCase {
}
private void assertMapValuesCleared(TestMapOrBuilder testMapOrBuilder) {
assertEquals(0, testMapOrBuilder.getInt32ToInt32Field().size());
assertEquals(0, testMapOrBuilder.getInt32ToInt32FieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToInt32FieldCount());
assertEquals(0, testMapOrBuilder.getInt32ToStringField().size());
assertEquals(0, testMapOrBuilder.getInt32ToStringFieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToStringFieldCount());
assertEquals(0, testMapOrBuilder.getInt32ToBytesField().size());
assertEquals(0, testMapOrBuilder.getInt32ToBytesFieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToBytesFieldCount());
assertEquals(0, testMapOrBuilder.getInt32ToEnumField().size());
assertEquals(0, testMapOrBuilder.getInt32ToEnumFieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToEnumFieldCount());
assertEquals(0, testMapOrBuilder.getInt32ToMessageField().size());
assertEquals(0, testMapOrBuilder.getInt32ToMessageFieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToMessageFieldCount());
assertEquals(0, testMapOrBuilder.getStringToInt32Field().size());
assertEquals(0, testMapOrBuilder.getStringToInt32FieldMap().size());
assertEquals(0, testMapOrBuilder.getStringToInt32FieldCount());
}
@ -226,13 +226,13 @@ public final class MapForProto2LiteTest extends TestCase {
}
private void assertMapsAreImmutable(TestMapOrBuilder testMapOrBuilder) {
assertImmutable(testMapOrBuilder.getInt32ToInt32Field(), 1, 2);
assertImmutable(testMapOrBuilder.getInt32ToStringField(), 1, "2");
assertImmutable(testMapOrBuilder.getInt32ToBytesField(), 1, TestUtil.toBytes("2"));
assertImmutable(testMapOrBuilder.getInt32ToEnumField(), 1, TestMap.EnumValue.FOO);
assertImmutable(testMapOrBuilder.getInt32ToInt32FieldMap(), 1, 2);
assertImmutable(testMapOrBuilder.getInt32ToStringFieldMap(), 1, "2");
assertImmutable(testMapOrBuilder.getInt32ToBytesFieldMap(), 1, TestUtil.toBytes("2"));
assertImmutable(testMapOrBuilder.getInt32ToEnumFieldMap(), 1, TestMap.EnumValue.FOO);
assertImmutable(
testMapOrBuilder.getInt32ToMessageField(), 1, MessageValue.getDefaultInstance());
assertImmutable(testMapOrBuilder.getStringToInt32Field(), "1", 2);
testMapOrBuilder.getInt32ToMessageFieldMap(), 1, MessageValue.getDefaultInstance());
assertImmutable(testMapOrBuilder.getStringToInt32FieldMap(), "1", 2);
}
private <K, V> void assertImmutable(Map<K, V> map, K key, V value) {

@ -128,35 +128,35 @@ public class MapForProto2Test extends TestCase {
}
private void assertMapValuesSet(TestMapOrBuilder message) {
assertEquals(3, message.getInt32ToInt32Field().size());
assertEquals(11, message.getInt32ToInt32Field().get(1).intValue());
assertEquals(22, message.getInt32ToInt32Field().get(2).intValue());
assertEquals(33, message.getInt32ToInt32Field().get(3).intValue());
assertEquals(3, message.getInt32ToStringField().size());
assertEquals("11", message.getInt32ToStringField().get(1));
assertEquals("22", message.getInt32ToStringField().get(2));
assertEquals("33", message.getInt32ToStringField().get(3));
assertEquals(3, message.getInt32ToBytesField().size());
assertEquals(TestUtil.toBytes("11"), message.getInt32ToBytesField().get(1));
assertEquals(TestUtil.toBytes("22"), message.getInt32ToBytesField().get(2));
assertEquals(TestUtil.toBytes("33"), message.getInt32ToBytesField().get(3));
assertEquals(3, message.getInt32ToEnumField().size());
assertEquals(TestMap.EnumValue.FOO, message.getInt32ToEnumField().get(1));
assertEquals(TestMap.EnumValue.BAR, message.getInt32ToEnumField().get(2));
assertEquals(TestMap.EnumValue.BAZ, message.getInt32ToEnumField().get(3));
assertEquals(3, message.getInt32ToMessageField().size());
assertEquals(11, message.getInt32ToMessageField().get(1).getValue());
assertEquals(22, message.getInt32ToMessageField().get(2).getValue());
assertEquals(33, message.getInt32ToMessageField().get(3).getValue());
assertEquals(3, message.getStringToInt32Field().size());
assertEquals(11, message.getStringToInt32Field().get("1").intValue());
assertEquals(22, message.getStringToInt32Field().get("2").intValue());
assertEquals(33, message.getStringToInt32Field().get("3").intValue());
assertEquals(3, message.getInt32ToInt32FieldMap().size());
assertEquals(11, message.getInt32ToInt32FieldMap().get(1).intValue());
assertEquals(22, message.getInt32ToInt32FieldMap().get(2).intValue());
assertEquals(33, message.getInt32ToInt32FieldMap().get(3).intValue());
assertEquals(3, message.getInt32ToStringFieldMap().size());
assertEquals("11", message.getInt32ToStringFieldMap().get(1));
assertEquals("22", message.getInt32ToStringFieldMap().get(2));
assertEquals("33", message.getInt32ToStringFieldMap().get(3));
assertEquals(3, message.getInt32ToBytesFieldMap().size());
assertEquals(TestUtil.toBytes("11"), message.getInt32ToBytesFieldMap().get(1));
assertEquals(TestUtil.toBytes("22"), message.getInt32ToBytesFieldMap().get(2));
assertEquals(TestUtil.toBytes("33"), message.getInt32ToBytesFieldMap().get(3));
assertEquals(3, message.getInt32ToEnumFieldMap().size());
assertEquals(TestMap.EnumValue.FOO, message.getInt32ToEnumFieldMap().get(1));
assertEquals(TestMap.EnumValue.BAR, message.getInt32ToEnumFieldMap().get(2));
assertEquals(TestMap.EnumValue.BAZ, message.getInt32ToEnumFieldMap().get(3));
assertEquals(3, message.getInt32ToMessageFieldMap().size());
assertEquals(11, message.getInt32ToMessageFieldMap().get(1).getValue());
assertEquals(22, message.getInt32ToMessageFieldMap().get(2).getValue());
assertEquals(33, message.getInt32ToMessageFieldMap().get(3).getValue());
assertEquals(3, message.getStringToInt32FieldMap().size());
assertEquals(11, message.getStringToInt32FieldMap().get("1").intValue());
assertEquals(22, message.getStringToInt32FieldMap().get("2").intValue());
assertEquals(33, message.getStringToInt32FieldMap().get("3").intValue());
}
private void updateMapValuesUsingMutableMap(TestMap.Builder builder) {
@ -268,17 +268,17 @@ public class MapForProto2Test extends TestCase {
}
private void assertMapValuesCleared(TestMapOrBuilder testMapOrBuilder) {
assertEquals(0, testMapOrBuilder.getInt32ToInt32Field().size());
assertEquals(0, testMapOrBuilder.getInt32ToInt32FieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToInt32FieldCount());
assertEquals(0, testMapOrBuilder.getInt32ToStringField().size());
assertEquals(0, testMapOrBuilder.getInt32ToStringFieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToStringFieldCount());
assertEquals(0, testMapOrBuilder.getInt32ToBytesField().size());
assertEquals(0, testMapOrBuilder.getInt32ToBytesFieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToBytesFieldCount());
assertEquals(0, testMapOrBuilder.getInt32ToEnumField().size());
assertEquals(0, testMapOrBuilder.getInt32ToEnumFieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToEnumFieldCount());
assertEquals(0, testMapOrBuilder.getInt32ToMessageField().size());
assertEquals(0, testMapOrBuilder.getInt32ToMessageFieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToMessageFieldCount());
assertEquals(0, testMapOrBuilder.getStringToInt32Field().size());
assertEquals(0, testMapOrBuilder.getStringToInt32FieldMap().size());
assertEquals(0, testMapOrBuilder.getStringToInt32FieldCount());
}
@ -293,13 +293,13 @@ public class MapForProto2Test extends TestCase {
}
private void assertMapsAreImmutable(TestMapOrBuilder testMapOrBuilder) {
assertImmutable(testMapOrBuilder.getInt32ToInt32Field(), 1, 2);
assertImmutable(testMapOrBuilder.getInt32ToStringField(), 1, "2");
assertImmutable(testMapOrBuilder.getInt32ToBytesField(), 1, TestUtil.toBytes("2"));
assertImmutable(testMapOrBuilder.getInt32ToEnumField(), 1, TestMap.EnumValue.FOO);
assertImmutable(testMapOrBuilder.getInt32ToInt32FieldMap(), 1, 2);
assertImmutable(testMapOrBuilder.getInt32ToStringFieldMap(), 1, "2");
assertImmutable(testMapOrBuilder.getInt32ToBytesFieldMap(), 1, TestUtil.toBytes("2"));
assertImmutable(testMapOrBuilder.getInt32ToEnumFieldMap(), 1, TestMap.EnumValue.FOO);
assertImmutable(
testMapOrBuilder.getInt32ToMessageField(), 1, MessageValue.getDefaultInstance());
assertImmutable(testMapOrBuilder.getStringToInt32Field(), "1", 2);
testMapOrBuilder.getInt32ToMessageFieldMap(), 1, MessageValue.getDefaultInstance());
assertImmutable(testMapOrBuilder.getStringToInt32FieldMap(), "1", 2);
}
private <K, V> void assertImmutable(Map<K, V> map, K key, V value) {
@ -874,8 +874,8 @@ public class MapForProto2Test extends TestCase {
ByteString data = builder.build().toByteString();
TestRecursiveMap message = TestRecursiveMap.parseFrom(data);
assertEquals(2, message.getRecursiveMapField().get(1).getValue());
assertEquals(4, message.getRecursiveMapField().get(3).getValue());
assertEquals(2, message.getRecursiveMapFieldMap().get(1).getValue());
assertEquals(4, message.getRecursiveMapFieldMap().get(3).getValue());
}
public void testIterationOrder() throws Exception {

@ -191,17 +191,17 @@ public final class MapLiteTest extends TestCase {
}
private void assertMapValuesCleared(TestMapOrBuilder testMapOrBuilder) {
assertEquals(0, testMapOrBuilder.getInt32ToInt32Field().size());
assertEquals(0, testMapOrBuilder.getInt32ToInt32FieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToInt32FieldCount());
assertEquals(0, testMapOrBuilder.getInt32ToStringField().size());
assertEquals(0, testMapOrBuilder.getInt32ToStringFieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToStringFieldCount());
assertEquals(0, testMapOrBuilder.getInt32ToBytesField().size());
assertEquals(0, testMapOrBuilder.getInt32ToBytesFieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToBytesFieldCount());
assertEquals(0, testMapOrBuilder.getInt32ToEnumField().size());
assertEquals(0, testMapOrBuilder.getInt32ToEnumFieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToEnumFieldCount());
assertEquals(0, testMapOrBuilder.getInt32ToMessageField().size());
assertEquals(0, testMapOrBuilder.getInt32ToMessageFieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToMessageFieldCount());
assertEquals(0, testMapOrBuilder.getStringToInt32Field().size());
assertEquals(0, testMapOrBuilder.getStringToInt32FieldMap().size());
assertEquals(0, testMapOrBuilder.getStringToInt32FieldCount());
}
@ -232,13 +232,13 @@ public final class MapLiteTest extends TestCase {
}
private void assertMapsAreImmutable(TestMapOrBuilder testMapOrBuilder) {
assertImmutable(testMapOrBuilder.getInt32ToInt32Field(), 1, 2);
assertImmutable(testMapOrBuilder.getInt32ToStringField(), 1, "2");
assertImmutable(testMapOrBuilder.getInt32ToBytesField(), 1, TestUtil.toBytes("2"));
assertImmutable(testMapOrBuilder.getInt32ToEnumField(), 1, TestMap.EnumValue.FOO);
assertImmutable(testMapOrBuilder.getInt32ToInt32FieldMap(), 1, 2);
assertImmutable(testMapOrBuilder.getInt32ToStringFieldMap(), 1, "2");
assertImmutable(testMapOrBuilder.getInt32ToBytesFieldMap(), 1, TestUtil.toBytes("2"));
assertImmutable(testMapOrBuilder.getInt32ToEnumFieldMap(), 1, TestMap.EnumValue.FOO);
assertImmutable(
testMapOrBuilder.getInt32ToMessageField(), 1, MessageValue.getDefaultInstance());
assertImmutable(testMapOrBuilder.getStringToInt32Field(), "1", 2);
testMapOrBuilder.getInt32ToMessageFieldMap(), 1, MessageValue.getDefaultInstance());
assertImmutable(testMapOrBuilder.getStringToInt32FieldMap(), "1", 2);
}
private <K, V> void assertImmutable(Map<K, V> map, K key, V value) {

@ -271,17 +271,17 @@ public class MapTest extends TestCase {
}
private void assertMapValuesCleared(TestMapOrBuilder testMapOrBuilder) {
assertEquals(0, testMapOrBuilder.getInt32ToInt32Field().size());
assertEquals(0, testMapOrBuilder.getInt32ToInt32FieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToInt32FieldCount());
assertEquals(0, testMapOrBuilder.getInt32ToStringField().size());
assertEquals(0, testMapOrBuilder.getInt32ToStringFieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToStringFieldCount());
assertEquals(0, testMapOrBuilder.getInt32ToBytesField().size());
assertEquals(0, testMapOrBuilder.getInt32ToBytesFieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToBytesFieldCount());
assertEquals(0, testMapOrBuilder.getInt32ToEnumField().size());
assertEquals(0, testMapOrBuilder.getInt32ToEnumFieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToEnumFieldCount());
assertEquals(0, testMapOrBuilder.getInt32ToMessageField().size());
assertEquals(0, testMapOrBuilder.getInt32ToMessageFieldMap().size());
assertEquals(0, testMapOrBuilder.getInt32ToMessageFieldCount());
assertEquals(0, testMapOrBuilder.getStringToInt32Field().size());
assertEquals(0, testMapOrBuilder.getStringToInt32FieldMap().size());
assertEquals(0, testMapOrBuilder.getStringToInt32FieldCount());
}
@ -296,13 +296,13 @@ public class MapTest extends TestCase {
}
private void assertMapsAreImmutable(TestMapOrBuilder testMapOrBuilder) {
assertImmutable(testMapOrBuilder.getInt32ToInt32Field(), 1, 2);
assertImmutable(testMapOrBuilder.getInt32ToStringField(), 1, "2");
assertImmutable(testMapOrBuilder.getInt32ToBytesField(), 1, TestUtil.toBytes("2"));
assertImmutable(testMapOrBuilder.getInt32ToEnumField(), 1, TestMap.EnumValue.FOO);
assertImmutable(testMapOrBuilder.getInt32ToInt32FieldMap(), 1, 2);
assertImmutable(testMapOrBuilder.getInt32ToStringFieldMap(), 1, "2");
assertImmutable(testMapOrBuilder.getInt32ToBytesFieldMap(), 1, TestUtil.toBytes("2"));
assertImmutable(testMapOrBuilder.getInt32ToEnumFieldMap(), 1, TestMap.EnumValue.FOO);
assertImmutable(
testMapOrBuilder.getInt32ToMessageField(), 1, MessageValue.getDefaultInstance());
assertImmutable(testMapOrBuilder.getStringToInt32Field(), "1", 2);
testMapOrBuilder.getInt32ToMessageFieldMap(), 1, MessageValue.getDefaultInstance());
assertImmutable(testMapOrBuilder.getStringToInt32FieldMap(), "1", 2);
}
private <K, V> void assertImmutable(Map<K, V> map, K key, V value) {

@ -260,8 +260,8 @@ public final class TestUtil {
TestRequired.newBuilder().setA(1).setB(2).setC(3).build();
/** Helper to convert a String to ByteString. */
static ByteString toBytes(String str) {
return ByteString.copyFrom(str.getBytes(Internal.UTF_8));
public static ByteString toBytes(String str) {
return ByteString.copyFromUtf8(str);
}
// BEGIN FULL-RUNTIME

@ -142,8 +142,8 @@ public final class TestUtilLite {
private TestUtilLite() {}
/** Helper to convert a String to ByteString. */
static ByteString toBytes(String str) {
return ByteString.copyFrom(str.getBytes(Internal.UTF_8));
public static ByteString toBytes(String str) {
return ByteString.copyFromUtf8(str);
}
/**

@ -44,6 +44,7 @@ import static com.google.protobuf.util.Timestamps.NANOS_PER_SECOND;
import com.google.errorprone.annotations.CanIgnoreReturnValue;
import com.google.protobuf.Duration;
import java.io.Serializable;
import java.text.ParseException;
import java.util.Comparator;
@ -72,23 +73,25 @@ public final class Durations {
private Durations() {}
private static final Comparator<Duration> COMPARATOR =
new Comparator<Duration>() {
@Override
public int compare(Duration d1, Duration d2) {
checkValid(d1);
checkValid(d2);
int secDiff = Long.compare(d1.getSeconds(), d2.getSeconds());
return (secDiff != 0) ? secDiff : Integer.compare(d1.getNanos(), d2.getNanos());
}
};
private static enum DurationComparator implements Comparator<Duration>, Serializable {
INSTANCE;
@Override
public int compare(Duration d1, Duration d2) {
checkValid(d1);
checkValid(d2);
int secDiff = Long.compare(d1.getSeconds(), d2.getSeconds());
return (secDiff != 0) ? secDiff : Integer.compare(d1.getNanos(), d2.getNanos());
}
}
/**
* Returns a {@link Comparator} for {@link Duration}s which sorts in increasing chronological
* order. Nulls and invalid {@link Duration}s are not allowed (see {@link #isValid}).
* order. Nulls and invalid {@link Duration}s are not allowed (see {@link #isValid}). The returned
* comparator is serializable.
*/
public static Comparator<Duration> comparator() {
return COMPARATOR;
return DurationComparator.INSTANCE;
}
/**
@ -99,7 +102,7 @@ public final class Durations {
* and a value greater than {@code 0} if {@code x > y}
*/
public static int compare(Duration x, Duration y) {
return COMPARATOR.compare(x, y);
return DurationComparator.INSTANCE.compare(x, y);
}
/**

@ -39,6 +39,7 @@ import static com.google.common.math.LongMath.checkedSubtract;
import com.google.errorprone.annotations.CanIgnoreReturnValue;
import com.google.protobuf.Duration;
import com.google.protobuf.Timestamp;
import java.io.Serializable;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Comparator;
@ -100,24 +101,25 @@ public final class Timestamps {
private Timestamps() {}
private static final Comparator<Timestamp> COMPARATOR =
new Comparator<Timestamp>() {
@Override
public int compare(Timestamp t1, Timestamp t2) {
checkValid(t1);
checkValid(t2);
int secDiff = Long.compare(t1.getSeconds(), t2.getSeconds());
return (secDiff != 0) ? secDiff : Integer.compare(t1.getNanos(), t2.getNanos());
}
};
private static enum TimestampComparator implements Comparator<Timestamp>, Serializable {
INSTANCE;
@Override
public int compare(Timestamp t1, Timestamp t2) {
checkValid(t1);
checkValid(t2);
int secDiff = Long.compare(t1.getSeconds(), t2.getSeconds());
return (secDiff != 0) ? secDiff : Integer.compare(t1.getNanos(), t2.getNanos());
}
}
/**
* Returns a {@link Comparator} for {@link Timestamp Timestamps} which sorts in increasing
* chronological order. Nulls and invalid {@link Timestamp Timestamps} are not allowed (see
* {@link #isValid}).
* {@link #isValid}). The returned comparator is serializable.
*/
public static Comparator<Timestamp> comparator() {
return COMPARATOR;
return TimestampComparator.INSTANCE;
}
/**
@ -128,7 +130,7 @@ public final class Timestamps {
* and a value greater than {@code 0} if {@code x > y}
*/
public static int compare(Timestamp x, Timestamp y) {
return COMPARATOR.compare(x, y);
return TimestampComparator.INSTANCE.compare(x, y);
}
/**

@ -231,6 +231,9 @@ class BaseContainer(object):
kwargs['cmp'] = kwargs.pop('sort_function')
self._values.sort(*args, **kwargs)
def reverse(self):
self._values.reverse()
collections_abc.MutableSequence.register(BaseContainer)

@ -1311,6 +1311,38 @@ class Proto2ReflectionTest(unittest.TestCase):
# Remove a non-existent element.
self.assertRaises(ValueError, proto.repeated_int32.remove, 123)
def testRepeatedScalarsReverse_Empty(self):
proto = unittest_pb2.TestAllTypes()
self.assertFalse(proto.repeated_int32)
self.assertEqual(0, len(proto.repeated_int32))
self.assertIsNone(proto.repeated_int32.reverse())
self.assertFalse(proto.repeated_int32)
self.assertEqual(0, len(proto.repeated_int32))
def testRepeatedScalarsReverse_NonEmpty(self):
proto = unittest_pb2.TestAllTypes()
self.assertFalse(proto.repeated_int32)
self.assertEqual(0, len(proto.repeated_int32))
proto.repeated_int32.append(1)
proto.repeated_int32.append(2)
proto.repeated_int32.append(3)
proto.repeated_int32.append(4)
self.assertEqual(4, len(proto.repeated_int32))
self.assertIsNone(proto.repeated_int32.reverse())
self.assertEqual(4, len(proto.repeated_int32))
self.assertEqual(4, proto.repeated_int32[0])
self.assertEqual(3, proto.repeated_int32[1])
self.assertEqual(2, proto.repeated_int32[2])
self.assertEqual(1, proto.repeated_int32[3])
def testRepeatedComposites(self):
proto = unittest_pb2.TestAllTypes()
self.assertFalse(proto.repeated_nested_message)
@ -1423,6 +1455,35 @@ class Proto2ReflectionTest(unittest.TestCase):
self.assertEqual(1, len(proto.repeated_nested_message))
self.assertEqual(m1, proto.repeated_nested_message[0])
def testRepeatedCompositeReverse_Empty(self):
proto = unittest_pb2.TestAllTypes()
self.assertFalse(proto.repeated_nested_message)
self.assertEqual(0, len(proto.repeated_nested_message))
self.assertIsNone(proto.repeated_nested_message.reverse())
self.assertFalse(proto.repeated_nested_message)
self.assertEqual(0, len(proto.repeated_nested_message))
def testRepeatedCompositeReverse_NonEmpty(self):
proto = unittest_pb2.TestAllTypes()
self.assertFalse(proto.repeated_nested_message)
self.assertEqual(0, len(proto.repeated_nested_message))
m0 = proto.repeated_nested_message.add()
m0.bb = len(proto.repeated_nested_message)
m1 = proto.repeated_nested_message.add()
m1.bb = len(proto.repeated_nested_message)
m2 = proto.repeated_nested_message.add()
m2.bb = len(proto.repeated_nested_message)
self.assertListsEqual([m0, m1, m2], proto.repeated_nested_message)
self.assertIsNone(proto.repeated_nested_message.reverse())
self.assertListsEqual([m2, m1, m0], proto.repeated_nested_message)
def testHandWrittenReflection(self):
# Hand written extensions are only supported by the pure-Python
# implementation of the API.
@ -3061,10 +3122,10 @@ class SerializationTest(unittest.TestCase):
unittest_pb2.ForeignMessage.c.__get__(msg)
except TypeError:
pass # The cpp implementation cannot mix fields from other messages.
# This test exercises a specific check that avoids a crash.
# This test exercises a specific check that avoids a crash.
else:
pass # The python implementation allows fields from other messages.
# This is useless, but works.
# This is useless, but works.
def testInitKwargs(self):
proto = unittest_pb2.TestAllTypes(

@ -77,8 +77,7 @@ static Py_ssize_t Length(PyObject* pself) {
PyObject* Add(RepeatedCompositeContainer* self, PyObject* args,
PyObject* kwargs) {
if (cmessage::AssureWritable(self->parent) == -1)
return NULL;
if (cmessage::AssureWritable(self->parent) == -1) return nullptr;
Message* message = self->parent->message;
Message* sub_message =
@ -93,7 +92,7 @@ PyObject* Add(RepeatedCompositeContainer* self, PyObject* args,
message->GetReflection()->RemoveLast(
message, self->parent_field_descriptor);
Py_DECREF(cmsg);
return NULL;
return nullptr;
}
return cmsg->AsPyObject();
@ -172,28 +171,28 @@ static PyObject* Insert(PyObject* pself, PyObject* args) {
PyObject* Extend(RepeatedCompositeContainer* self, PyObject* value) {
cmessage::AssureWritable(self->parent);
ScopedPyObjectPtr iter(PyObject_GetIter(value));
if (iter == NULL) {
if (iter == nullptr) {
PyErr_SetString(PyExc_TypeError, "Value must be iterable");
return NULL;
return nullptr;
}
ScopedPyObjectPtr next;
while ((next.reset(PyIter_Next(iter.get()))) != NULL) {
while ((next.reset(PyIter_Next(iter.get()))) != nullptr) {
if (!PyObject_TypeCheck(next.get(), CMessage_Type)) {
PyErr_SetString(PyExc_TypeError, "Not a cmessage");
return NULL;
return nullptr;
}
ScopedPyObjectPtr new_message(Add(self, NULL, NULL));
if (new_message == NULL) {
return NULL;
ScopedPyObjectPtr new_message(Add(self, nullptr, nullptr));
if (new_message == nullptr) {
return nullptr;
}
CMessage* new_cmessage = reinterpret_cast<CMessage*>(new_message.get());
if (ScopedPyObjectPtr(cmessage::MergeFrom(new_cmessage, next.get())) ==
NULL) {
return NULL;
nullptr) {
return nullptr;
}
}
if (PyErr_Occurred()) {
return NULL;
return nullptr;
}
Py_RETURN_NONE;
}
@ -220,7 +219,7 @@ static PyObject* GetItem(RepeatedCompositeContainer* self, Py_ssize_t index,
}
if (index < 0 || index >= length) {
PyErr_Format(PyExc_IndexError, "list index (%zd) out of range", index);
return NULL;
return nullptr;
}
Message* message = self->parent->message;
Message* sub_message = message->GetReflection()->MutableRepeatedMessage(
@ -240,7 +239,7 @@ PyObject* Subscript(RepeatedCompositeContainer* self, PyObject* item) {
if (PyIndex_Check(item)) {
Py_ssize_t index;
index = PyNumber_AsSsize_t(item, PyExc_IndexError);
if (index == -1 && PyErr_Occurred()) return NULL;
if (index == -1 && PyErr_Occurred()) return nullptr;
if (index < 0) index += length;
return GetItem(self, index, length);
} else if (PySlice_Check(item)) {
@ -254,14 +253,14 @@ PyObject* Subscript(RepeatedCompositeContainer* self, PyObject* item) {
if (PySlice_GetIndicesEx(reinterpret_cast<PySliceObject*>(item),
length, &from, &to, &step, &slicelength) == -1) {
#endif
return NULL;
return nullptr;
}
if (slicelength <= 0) {
return PyList_New(0);
} else {
result = PyList_New(slicelength);
if (!result) return NULL;
if (!result) return nullptr;
for (cur = from, i = 0; i < slicelength; cur += step, i++) {
PyList_SET_ITEM(result, i, GetItem(self, cur, length));
@ -272,7 +271,7 @@ PyObject* Subscript(RepeatedCompositeContainer* self, PyObject* item) {
} else {
PyErr_Format(PyExc_TypeError, "indices must be integers, not %.200s",
item->ob_type->tp_name);
return NULL;
return nullptr;
}
}
@ -283,7 +282,7 @@ static PyObject* SubscriptMethod(PyObject* self, PyObject* slice) {
int AssignSubscript(RepeatedCompositeContainer* self,
PyObject* slice,
PyObject* value) {
if (value != NULL) {
if (value != nullptr) {
PyErr_SetString(PyExc_TypeError, "does not support assignment");
return -1;
}
@ -305,23 +304,23 @@ static PyObject* Remove(PyObject* pself, PyObject* value) {
for (Py_ssize_t i = 0; i < len; i++) {
ScopedPyObjectPtr item(GetItem(self, i, len));
if (item == NULL) {
return NULL;
if (item == nullptr) {
return nullptr;
}
int result = PyObject_RichCompareBool(item.get(), value, Py_EQ);
if (result < 0) {
return NULL;
return nullptr;
}
if (result) {
ScopedPyObjectPtr py_index(PyLong_FromSsize_t(i));
if (AssignSubscript(self, py_index.get(), NULL) < 0) {
return NULL;
if (AssignSubscript(self, py_index.get(), nullptr) < 0) {
return nullptr;
}
Py_RETURN_NONE;
}
}
PyErr_SetString(PyExc_ValueError, "Item to delete not in list");
return NULL;
return nullptr;
}
static PyObject* RichCompare(PyObject* pself, PyObject* other, int opid) {
@ -332,23 +331,23 @@ static PyObject* RichCompare(PyObject* pself, PyObject* other, int opid) {
PyErr_SetString(PyExc_TypeError,
"Can only compare repeated composite fields "
"against other repeated composite fields.");
return NULL;
return nullptr;
}
if (opid == Py_EQ || opid == Py_NE) {
// TODO(anuraag): Don't make new lists just for this...
ScopedPyObjectPtr full_slice(PySlice_New(NULL, NULL, NULL));
if (full_slice == NULL) {
return NULL;
ScopedPyObjectPtr full_slice(PySlice_New(nullptr, nullptr, nullptr));
if (full_slice == nullptr) {
return nullptr;
}
ScopedPyObjectPtr list(Subscript(self, full_slice.get()));
if (list == NULL) {
return NULL;
if (list == nullptr) {
return nullptr;
}
ScopedPyObjectPtr other_list(
Subscript(reinterpret_cast<RepeatedCompositeContainer*>(other),
full_slice.get()));
if (other_list == NULL) {
return NULL;
if (other_list == nullptr) {
return nullptr;
}
return PyObject_RichCompare(list.get(), other_list.get(), opid);
} else {
@ -358,14 +357,14 @@ static PyObject* RichCompare(PyObject* pself, PyObject* other, int opid) {
}
static PyObject* ToStr(PyObject* pself) {
ScopedPyObjectPtr full_slice(PySlice_New(NULL, NULL, NULL));
if (full_slice == NULL) {
return NULL;
ScopedPyObjectPtr full_slice(PySlice_New(nullptr, nullptr, nullptr));
if (full_slice == nullptr) {
return nullptr;
}
ScopedPyObjectPtr list(Subscript(
reinterpret_cast<RepeatedCompositeContainer*>(pself), full_slice.get()));
if (list == NULL) {
return NULL;
if (list == nullptr) {
return nullptr;
}
return PyObject_Repr(list.get());
}
@ -400,13 +399,12 @@ static int SortPythonMessages(RepeatedCompositeContainer* self,
PyObject* kwds) {
ScopedPyObjectPtr child_list(
PySequence_List(reinterpret_cast<PyObject*>(self)));
if (child_list == NULL) {
if (child_list == nullptr) {
return -1;
}
ScopedPyObjectPtr m(PyObject_GetAttrString(child_list.get(), "sort"));
if (m == NULL)
return -1;
if (ScopedPyObjectPtr(PyObject_Call(m.get(), args, kwds)) == NULL)
if (m == nullptr) return -1;
if (ScopedPyObjectPtr(PyObject_Call(m.get(), args, kwds)) == nullptr)
return -1;
ReorderAttached(self, child_list.get());
return 0;
@ -418,9 +416,9 @@ static PyObject* Sort(PyObject* pself, PyObject* args, PyObject* kwds) {
// Support the old sort_function argument for backwards
// compatibility.
if (kwds != NULL) {
if (kwds != nullptr) {
PyObject* sort_func = PyDict_GetItemString(kwds, "sort_function");
if (sort_func != NULL) {
if (sort_func != nullptr) {
// Must set before deleting as sort_func is a borrowed reference
// and kwds might be the only thing keeping it alive.
PyDict_SetItemString(kwds, "cmp", sort_func);
@ -429,7 +427,35 @@ static PyObject* Sort(PyObject* pself, PyObject* args, PyObject* kwds) {
}
if (SortPythonMessages(self, args, kwds) < 0) {
return NULL;
return nullptr;
}
Py_RETURN_NONE;
}
// ---------------------------------------------------------------------
// reverse()
// Returns 0 if successful; returns -1 and sets an exception if
// unsuccessful.
static int ReversePythonMessages(RepeatedCompositeContainer* self) {
ScopedPyObjectPtr child_list(
PySequence_List(reinterpret_cast<PyObject*>(self)));
if (child_list == nullptr) {
return -1;
}
if (ScopedPyObjectPtr(
PyObject_CallMethod(child_list.get(), "reverse", nullptr)) == nullptr)
return -1;
ReorderAttached(self, child_list.get());
return 0;
}
static PyObject* Reverse(PyObject* pself) {
RepeatedCompositeContainer* self =
reinterpret_cast<RepeatedCompositeContainer*>(pself);
if (ReversePythonMessages(self) < 0) {
return nullptr;
}
Py_RETURN_NONE;
}
@ -448,17 +474,17 @@ static PyObject* Pop(PyObject* pself, PyObject* args) {
Py_ssize_t index = -1;
if (!PyArg_ParseTuple(args, "|n", &index)) {
return NULL;
return nullptr;
}
Py_ssize_t length = Length(pself);
if (index < 0) index += length;
PyObject* item = GetItem(self, index, length);
if (item == NULL) {
return NULL;
if (item == nullptr) {
return nullptr;
}
ScopedPyObjectPtr py_index(PyLong_FromSsize_t(index));
if (AssignSubscript(self, py_index.get(), NULL) < 0) {
return NULL;
if (AssignSubscript(self, py_index.get(), nullptr) < 0) {
return nullptr;
}
return item;
}
@ -473,14 +499,14 @@ RepeatedCompositeContainer *NewContainer(
const FieldDescriptor* parent_field_descriptor,
CMessageClass* child_message_class) {
if (!CheckFieldBelongsToMessage(parent_field_descriptor, parent->message)) {
return NULL;
return nullptr;
}
RepeatedCompositeContainer* self =
reinterpret_cast<RepeatedCompositeContainer*>(
PyType_GenericAlloc(&RepeatedCompositeContainer_Type, 0));
if (self == NULL) {
return NULL;
if (self == nullptr) {
return nullptr;
}
Py_INCREF(parent);
@ -500,10 +526,10 @@ static void Dealloc(PyObject* pself) {
}
static PySequenceMethods SqMethods = {
Length, /* sq_length */
0, /* sq_concat */
0, /* sq_repeat */
Item /* sq_item */
Length, /* sq_length */
nullptr, /* sq_concat */
nullptr, /* sq_repeat */
Item /* sq_item */
};
static PyMappingMethods MpMethods = {
@ -513,66 +539,65 @@ static PyMappingMethods MpMethods = {
};
static PyMethodDef Methods[] = {
{ "__deepcopy__", DeepCopy, METH_VARARGS,
"Makes a deep copy of the class." },
{ "add", (PyCFunction)AddMethod, METH_VARARGS | METH_KEYWORDS,
"Adds an object to the repeated container." },
{ "append", AppendMethod, METH_O,
"Appends a message to the end of the repeated container."},
{ "insert", Insert, METH_VARARGS,
"Inserts a message before the specified index." },
{ "extend", ExtendMethod, METH_O,
"Adds objects to the repeated container." },
{ "pop", Pop, METH_VARARGS,
"Removes an object from the repeated container and returns it." },
{ "remove", Remove, METH_O,
"Removes an object from the repeated container." },
{ "sort", (PyCFunction)Sort, METH_VARARGS | METH_KEYWORDS,
"Sorts the repeated container." },
{ "MergeFrom", MergeFromMethod, METH_O,
"Adds objects to the repeated container." },
{ NULL, NULL }
};
{"__deepcopy__", DeepCopy, METH_VARARGS, "Makes a deep copy of the class."},
{"add", reinterpret_cast<PyCFunction>(AddMethod),
METH_VARARGS | METH_KEYWORDS, "Adds an object to the repeated container."},
{"append", AppendMethod, METH_O,
"Appends a message to the end of the repeated container."},
{"insert", Insert, METH_VARARGS,
"Inserts a message before the specified index."},
{"extend", ExtendMethod, METH_O, "Adds objects to the repeated container."},
{"pop", Pop, METH_VARARGS,
"Removes an object from the repeated container and returns it."},
{"remove", Remove, METH_O,
"Removes an object from the repeated container."},
{"sort", reinterpret_cast<PyCFunction>(Sort), METH_VARARGS | METH_KEYWORDS,
"Sorts the repeated container."},
{"reverse", reinterpret_cast<PyCFunction>(Reverse), METH_NOARGS,
"Reverses elements order of the repeated container."},
{"MergeFrom", MergeFromMethod, METH_O,
"Adds objects to the repeated container."},
{nullptr, nullptr}};
} // namespace repeated_composite_container
PyTypeObject RepeatedCompositeContainer_Type = {
PyVarObject_HEAD_INIT(&PyType_Type, 0)
FULL_MODULE_NAME ".RepeatedCompositeContainer", // tp_name
sizeof(RepeatedCompositeContainer), // tp_basicsize
0, // tp_itemsize
repeated_composite_container::Dealloc, // tp_dealloc
0, // tp_print
0, // tp_getattr
0, // tp_setattr
0, // tp_compare
repeated_composite_container::ToStr, // tp_repr
0, // tp_as_number
&repeated_composite_container::SqMethods, // tp_as_sequence
&repeated_composite_container::MpMethods, // tp_as_mapping
PyObject_HashNotImplemented, // tp_hash
0, // tp_call
0, // tp_str
0, // tp_getattro
0, // tp_setattro
0, // tp_as_buffer
Py_TPFLAGS_DEFAULT, // tp_flags
"A Repeated scalar container", // tp_doc
0, // tp_traverse
0, // tp_clear
repeated_composite_container::RichCompare, // tp_richcompare
0, // tp_weaklistoffset
0, // tp_iter
0, // tp_iternext
repeated_composite_container::Methods, // tp_methods
0, // tp_members
0, // tp_getset
0, // tp_base
0, // tp_dict
0, // tp_descr_get
0, // tp_descr_set
0, // tp_dictoffset
0, // tp_init
PyVarObject_HEAD_INIT(&PyType_Type, 0) FULL_MODULE_NAME
".RepeatedCompositeContainer", // tp_name
sizeof(RepeatedCompositeContainer), // tp_basicsize
0, // tp_itemsize
repeated_composite_container::Dealloc, // tp_dealloc
nullptr, // tp_print
nullptr, // tp_getattr
nullptr, // tp_setattr
nullptr, // tp_compare
repeated_composite_container::ToStr, // tp_repr
nullptr, // tp_as_number
&repeated_composite_container::SqMethods, // tp_as_sequence
&repeated_composite_container::MpMethods, // tp_as_mapping
PyObject_HashNotImplemented, // tp_hash
nullptr, // tp_call
nullptr, // tp_str
nullptr, // tp_getattro
nullptr, // tp_setattro
nullptr, // tp_as_buffer
Py_TPFLAGS_DEFAULT, // tp_flags
"A Repeated scalar container", // tp_doc
nullptr, // tp_traverse
nullptr, // tp_clear
repeated_composite_container::RichCompare, // tp_richcompare
0, // tp_weaklistoffset
nullptr, // tp_iter
nullptr, // tp_iternext
repeated_composite_container::Methods, // tp_methods
nullptr, // tp_members
nullptr, // tp_getset
nullptr, // tp_base
nullptr, // tp_dict
nullptr, // tp_descr_get
nullptr, // tp_descr_set
0, // tp_dictoffset
nullptr, // tp_init
};
} // namespace python

@ -46,13 +46,13 @@
#include <google/protobuf/pyext/scoped_pyobject_ptr.h>
#if PY_MAJOR_VERSION >= 3
#define PyInt_FromLong PyLong_FromLong
#if PY_VERSION_HEX < 0x03030000
#error "Python 3.0 - 3.2 are not supported."
#else
#define PyString_AsString(ob) \
(PyUnicode_Check(ob)? PyUnicode_AsUTF8(ob): PyBytes_AsString(ob))
#endif
#define PyInt_FromLong PyLong_FromLong
#if PY_VERSION_HEX < 0x03030000
#error "Python 3.0 - 3.2 are not supported."
#else
#define PyString_AsString(ob) \
(PyUnicode_Check(ob) ? PyUnicode_AsUTF8(ob) : PyBytes_AsString(ob))
#endif
#endif
namespace google {
@ -61,13 +61,13 @@ namespace python {
namespace repeated_scalar_container {
static int InternalAssignRepeatedField(
RepeatedScalarContainer* self, PyObject* list) {
static int InternalAssignRepeatedField(RepeatedScalarContainer* self,
PyObject* list) {
Message* message = self->parent->message;
message->GetReflection()->ClearField(message, self->parent_field_descriptor);
for (Py_ssize_t i = 0; i < PyList_GET_SIZE(list); ++i) {
PyObject* value = PyList_GET_ITEM(list, i);
if (ScopedPyObjectPtr(Append(self, value)) == NULL) {
if (ScopedPyObjectPtr(Append(self, value)) == nullptr) {
return -1;
}
}
@ -96,13 +96,12 @@ static int AssignItem(PyObject* pself, Py_ssize_t index, PyObject* arg) {
index = field_size + index;
}
if (index < 0 || index >= field_size) {
PyErr_Format(PyExc_IndexError,
"list assignment index (%d) out of range",
PyErr_Format(PyExc_IndexError, "list assignment index (%d) out of range",
static_cast<int>(index));
return -1;
}
if (arg == NULL) {
if (arg == nullptr) {
ScopedPyObjectPtr py_index(PyLong_FromLong(index));
return cmessage::DeleteRepeatedField(self->parent, field_descriptor,
py_index.get());
@ -150,8 +149,8 @@ static int AssignItem(PyObject* pself, Py_ssize_t index, PyObject* arg) {
break;
}
case FieldDescriptor::CPPTYPE_STRING: {
if (!CheckAndSetString(
arg, message, field_descriptor, reflection, false, index)) {
if (!CheckAndSetString(arg, message, field_descriptor, reflection, false,
index)) {
return -1;
}
break;
@ -165,12 +164,12 @@ static int AssignItem(PyObject* pself, Py_ssize_t index, PyObject* arg) {
const EnumDescriptor* enum_descriptor = field_descriptor->enum_type();
const EnumValueDescriptor* enum_value =
enum_descriptor->FindValueByNumber(value);
if (enum_value != NULL) {
if (enum_value != nullptr) {
reflection->SetRepeatedEnum(message, field_descriptor, index,
enum_value);
} else {
ScopedPyObjectPtr s(PyObject_Str(arg));
if (s != NULL) {
if (s != nullptr) {
PyErr_Format(PyExc_ValueError, "Unknown enum value: %s",
PyString_AsString(s.get()));
}
@ -180,9 +179,9 @@ static int AssignItem(PyObject* pself, Py_ssize_t index, PyObject* arg) {
break;
}
default:
PyErr_Format(
PyExc_SystemError, "Adding value to a field of unknown type %d",
field_descriptor->cpp_type());
PyErr_Format(PyExc_SystemError,
"Adding value to a field of unknown type %d",
field_descriptor->cpp_type());
return -1;
}
return 0;
@ -201,60 +200,58 @@ static PyObject* Item(PyObject* pself, Py_ssize_t index) {
index = field_size + index;
}
if (index < 0 || index >= field_size) {
PyErr_Format(PyExc_IndexError,
"list index (%zd) out of range",
index);
return NULL;
PyErr_Format(PyExc_IndexError, "list index (%zd) out of range", index);
return nullptr;
}
PyObject* result = NULL;
PyObject* result = nullptr;
switch (field_descriptor->cpp_type()) {
case FieldDescriptor::CPPTYPE_INT32: {
int32 value = reflection->GetRepeatedInt32(
*message, field_descriptor, index);
int32 value =
reflection->GetRepeatedInt32(*message, field_descriptor, index);
result = PyInt_FromLong(value);
break;
}
case FieldDescriptor::CPPTYPE_INT64: {
int64 value = reflection->GetRepeatedInt64(
*message, field_descriptor, index);
int64 value =
reflection->GetRepeatedInt64(*message, field_descriptor, index);
result = PyLong_FromLongLong(value);
break;
}
case FieldDescriptor::CPPTYPE_UINT32: {
uint32 value = reflection->GetRepeatedUInt32(
*message, field_descriptor, index);
uint32 value =
reflection->GetRepeatedUInt32(*message, field_descriptor, index);
result = PyLong_FromLongLong(value);
break;
}
case FieldDescriptor::CPPTYPE_UINT64: {
uint64 value = reflection->GetRepeatedUInt64(
*message, field_descriptor, index);
uint64 value =
reflection->GetRepeatedUInt64(*message, field_descriptor, index);
result = PyLong_FromUnsignedLongLong(value);
break;
}
case FieldDescriptor::CPPTYPE_FLOAT: {
float value = reflection->GetRepeatedFloat(
*message, field_descriptor, index);
float value =
reflection->GetRepeatedFloat(*message, field_descriptor, index);
result = PyFloat_FromDouble(value);
break;
}
case FieldDescriptor::CPPTYPE_DOUBLE: {
double value = reflection->GetRepeatedDouble(
*message, field_descriptor, index);
double value =
reflection->GetRepeatedDouble(*message, field_descriptor, index);
result = PyFloat_FromDouble(value);
break;
}
case FieldDescriptor::CPPTYPE_BOOL: {
bool value = reflection->GetRepeatedBool(
*message, field_descriptor, index);
bool value =
reflection->GetRepeatedBool(*message, field_descriptor, index);
result = PyBool_FromLong(value ? 1 : 0);
break;
}
case FieldDescriptor::CPPTYPE_ENUM: {
const EnumValueDescriptor* enum_value =
message->GetReflection()->GetRepeatedEnum(
*message, field_descriptor, index);
message->GetReflection()->GetRepeatedEnum(*message, field_descriptor,
index);
result = PyInt_FromLong(enum_value->number());
break;
}
@ -266,10 +263,9 @@ static PyObject* Item(PyObject* pself, Py_ssize_t index) {
break;
}
default:
PyErr_Format(
PyExc_SystemError,
"Getting value from a repeated field of unknown type %d",
field_descriptor->cpp_type());
PyErr_Format(PyExc_SystemError,
"Getting value from a repeated field of unknown type %d",
field_descriptor->cpp_type());
}
return result;
@ -287,23 +283,23 @@ static PyObject* Subscript(PyObject* pself, PyObject* slice) {
from = to = PyInt_AsLong(slice);
} else // NOLINT
#endif
if (PyLong_Check(slice)) {
if (PyLong_Check(slice)) {
from = to = PyLong_AsLong(slice);
} else if (PySlice_Check(slice)) {
length = Len(pself);
#if PY_MAJOR_VERSION >= 3
if (PySlice_GetIndicesEx(slice,
length, &from, &to, &step, &slicelength) == -1) {
if (PySlice_GetIndicesEx(slice, length, &from, &to, &step, &slicelength) ==
-1) {
#else
if (PySlice_GetIndicesEx(reinterpret_cast<PySliceObject*>(slice),
length, &from, &to, &step, &slicelength) == -1) {
if (PySlice_GetIndicesEx(reinterpret_cast<PySliceObject*>(slice), length,
&from, &to, &step, &slicelength) == -1) {
#endif
return NULL;
return nullptr;
}
return_list = true;
} else {
PyErr_SetString(PyExc_TypeError, "list indices must be integers");
return NULL;
return nullptr;
}
if (!return_list) {
@ -311,8 +307,8 @@ static PyObject* Subscript(PyObject* pself, PyObject* slice) {
}
PyObject* list = PyList_New(0);
if (list == NULL) {
return NULL;
if (list == nullptr) {
return nullptr;
}
if (from <= to) {
if (step < 0) {
@ -348,73 +344,73 @@ PyObject* Append(RepeatedScalarContainer* self, PyObject* item) {
const Reflection* reflection = message->GetReflection();
switch (field_descriptor->cpp_type()) {
case FieldDescriptor::CPPTYPE_INT32: {
GOOGLE_CHECK_GET_INT32(item, value, NULL);
GOOGLE_CHECK_GET_INT32(item, value, nullptr);
reflection->AddInt32(message, field_descriptor, value);
break;
}
case FieldDescriptor::CPPTYPE_INT64: {
GOOGLE_CHECK_GET_INT64(item, value, NULL);
GOOGLE_CHECK_GET_INT64(item, value, nullptr);
reflection->AddInt64(message, field_descriptor, value);
break;
}
case FieldDescriptor::CPPTYPE_UINT32: {
GOOGLE_CHECK_GET_UINT32(item, value, NULL);
GOOGLE_CHECK_GET_UINT32(item, value, nullptr);
reflection->AddUInt32(message, field_descriptor, value);
break;
}
case FieldDescriptor::CPPTYPE_UINT64: {
GOOGLE_CHECK_GET_UINT64(item, value, NULL);
GOOGLE_CHECK_GET_UINT64(item, value, nullptr);
reflection->AddUInt64(message, field_descriptor, value);
break;
}
case FieldDescriptor::CPPTYPE_FLOAT: {
GOOGLE_CHECK_GET_FLOAT(item, value, NULL);
GOOGLE_CHECK_GET_FLOAT(item, value, nullptr);
reflection->AddFloat(message, field_descriptor, value);
break;
}
case FieldDescriptor::CPPTYPE_DOUBLE: {
GOOGLE_CHECK_GET_DOUBLE(item, value, NULL);
GOOGLE_CHECK_GET_DOUBLE(item, value, nullptr);
reflection->AddDouble(message, field_descriptor, value);
break;
}
case FieldDescriptor::CPPTYPE_BOOL: {
GOOGLE_CHECK_GET_BOOL(item, value, NULL);
GOOGLE_CHECK_GET_BOOL(item, value, nullptr);
reflection->AddBool(message, field_descriptor, value);
break;
}
case FieldDescriptor::CPPTYPE_STRING: {
if (!CheckAndSetString(
item, message, field_descriptor, reflection, true, -1)) {
return NULL;
if (!CheckAndSetString(item, message, field_descriptor, reflection, true,
-1)) {
return nullptr;
}
break;
}
case FieldDescriptor::CPPTYPE_ENUM: {
GOOGLE_CHECK_GET_INT32(item, value, NULL);
GOOGLE_CHECK_GET_INT32(item, value, nullptr);
if (reflection->SupportsUnknownEnumValues()) {
reflection->AddEnumValue(message, field_descriptor, value);
} else {
const EnumDescriptor* enum_descriptor = field_descriptor->enum_type();
const EnumValueDescriptor* enum_value =
enum_descriptor->FindValueByNumber(value);
if (enum_value != NULL) {
if (enum_value != nullptr) {
reflection->AddEnum(message, field_descriptor, enum_value);
} else {
ScopedPyObjectPtr s(PyObject_Str(item));
if (s != NULL) {
if (s != nullptr) {
PyErr_Format(PyExc_ValueError, "Unknown enum value: %s",
PyString_AsString(s.get()));
}
return NULL;
return nullptr;
}
}
break;
}
default:
PyErr_Format(
PyExc_SystemError, "Adding value to a field of unknown type %d",
field_descriptor->cpp_type());
return NULL;
PyErr_Format(PyExc_SystemError,
"Adding value to a field of unknown type %d",
field_descriptor->cpp_type());
return nullptr;
}
Py_RETURN_NONE;
@ -437,25 +433,24 @@ static int AssSubscript(PyObject* pself, PyObject* slice, PyObject* value) {
cmessage::AssureWritable(self->parent);
Message* message = self->parent->message;
const FieldDescriptor* field_descriptor =
self->parent_field_descriptor;
const FieldDescriptor* field_descriptor = self->parent_field_descriptor;
#if PY_MAJOR_VERSION < 3
if (PyInt_Check(slice)) {
from = to = PyInt_AsLong(slice);
} else // NOLINT
#endif
if (PyLong_Check(slice)) {
if (PyLong_Check(slice)) {
from = to = PyLong_AsLong(slice);
} else if (PySlice_Check(slice)) {
const Reflection* reflection = message->GetReflection();
length = reflection->FieldSize(*message, field_descriptor);
#if PY_MAJOR_VERSION >= 3
if (PySlice_GetIndicesEx(slice,
length, &from, &to, &step, &slicelength) == -1) {
if (PySlice_GetIndicesEx(slice, length, &from, &to, &step, &slicelength) ==
-1) {
#else
if (PySlice_GetIndicesEx(reinterpret_cast<PySliceObject*>(slice),
length, &from, &to, &step, &slicelength) == -1) {
if (PySlice_GetIndicesEx(reinterpret_cast<PySliceObject*>(slice), length,
&from, &to, &step, &slicelength) == -1) {
#endif
return -1;
}
@ -465,7 +460,7 @@ static int AssSubscript(PyObject* pself, PyObject* slice, PyObject* value) {
return -1;
}
if (value == NULL) {
if (value == nullptr) {
return cmessage::DeleteRepeatedField(self->parent, field_descriptor, slice);
}
@ -473,12 +468,12 @@ static int AssSubscript(PyObject* pself, PyObject* slice, PyObject* value) {
return AssignItem(pself, from, value);
}
ScopedPyObjectPtr full_slice(PySlice_New(NULL, NULL, NULL));
if (full_slice == NULL) {
ScopedPyObjectPtr full_slice(PySlice_New(nullptr, nullptr, nullptr));
if (full_slice == nullptr) {
return -1;
}
ScopedPyObjectPtr new_list(Subscript(pself, full_slice.get()));
if (new_list == NULL) {
if (new_list == nullptr) {
return -1;
}
if (PySequence_SetSlice(new_list.get(), from, to, value) < 0) {
@ -495,23 +490,23 @@ PyObject* Extend(RepeatedScalarContainer* self, PyObject* value) {
if (value == Py_None) {
Py_RETURN_NONE;
}
if ((Py_TYPE(value)->tp_as_sequence == NULL) && PyObject_Not(value)) {
if ((Py_TYPE(value)->tp_as_sequence == nullptr) && PyObject_Not(value)) {
Py_RETURN_NONE;
}
ScopedPyObjectPtr iter(PyObject_GetIter(value));
if (iter == NULL) {
if (iter == nullptr) {
PyErr_SetString(PyExc_TypeError, "Value must be iterable");
return NULL;
return nullptr;
}
ScopedPyObjectPtr next;
while ((next.reset(PyIter_Next(iter.get()))) != NULL) {
if (ScopedPyObjectPtr(Append(self, next.get())) == NULL) {
return NULL;
while ((next.reset(PyIter_Next(iter.get()))) != nullptr) {
if (ScopedPyObjectPtr(Append(self, next.get())) == nullptr) {
return nullptr;
}
}
if (PyErr_Occurred()) {
return NULL;
return nullptr;
}
Py_RETURN_NONE;
}
@ -523,16 +518,16 @@ static PyObject* Insert(PyObject* pself, PyObject* args) {
Py_ssize_t index;
PyObject* value;
if (!PyArg_ParseTuple(args, "lO", &index, &value)) {
return NULL;
return nullptr;
}
ScopedPyObjectPtr full_slice(PySlice_New(NULL, NULL, NULL));
ScopedPyObjectPtr full_slice(PySlice_New(nullptr, nullptr, nullptr));
ScopedPyObjectPtr new_list(Subscript(pself, full_slice.get()));
if (PyList_Insert(new_list.get(), index, value) < 0) {
return NULL;
return nullptr;
}
int ret = InternalAssignRepeatedField(self, new_list.get());
if (ret < 0) {
return NULL;
return nullptr;
}
Py_RETURN_NONE;
}
@ -548,10 +543,10 @@ static PyObject* Remove(PyObject* pself, PyObject* value) {
}
if (match_index == -1) {
PyErr_SetString(PyExc_ValueError, "remove(x): x not in container");
return NULL;
return nullptr;
}
if (AssignItem(pself, match_index, NULL) < 0) {
return NULL;
if (AssignItem(pself, match_index, nullptr) < 0) {
return nullptr;
}
Py_RETURN_NONE;
}
@ -570,9 +565,9 @@ static PyObject* RichCompare(PyObject* pself, PyObject* other, int opid) {
// also a repeated scalar container, into Python lists so we can delegate
// to the list's compare method.
ScopedPyObjectPtr full_slice(PySlice_New(NULL, NULL, NULL));
if (full_slice == NULL) {
return NULL;
ScopedPyObjectPtr full_slice(PySlice_New(nullptr, nullptr, nullptr));
if (full_slice == nullptr) {
return nullptr;
}
ScopedPyObjectPtr other_list_deleter;
@ -582,54 +577,72 @@ static PyObject* RichCompare(PyObject* pself, PyObject* other, int opid) {
}
ScopedPyObjectPtr list(Subscript(pself, full_slice.get()));
if (list == NULL) {
return NULL;
if (list == nullptr) {
return nullptr;
}
return PyObject_RichCompare(list.get(), other, opid);
}
PyObject* Reduce(PyObject* unused_self, PyObject* unused_other) {
PyErr_Format(
PickleError_class,
"can't pickle repeated message fields, convert to list first");
return NULL;
PyErr_Format(PickleError_class,
"can't pickle repeated message fields, convert to list first");
return nullptr;
}
static PyObject* Sort(PyObject* pself, PyObject* args, PyObject* kwds) {
// Support the old sort_function argument for backwards
// compatibility.
if (kwds != NULL) {
if (kwds != nullptr) {
PyObject* sort_func = PyDict_GetItemString(kwds, "sort_function");
if (sort_func != NULL) {
if (sort_func != nullptr) {
// Must set before deleting as sort_func is a borrowed reference
// and kwds might be the only thing keeping it alive.
if (PyDict_SetItemString(kwds, "cmp", sort_func) == -1)
return NULL;
if (PyDict_DelItemString(kwds, "sort_function") == -1)
return NULL;
if (PyDict_SetItemString(kwds, "cmp", sort_func) == -1) return nullptr;
if (PyDict_DelItemString(kwds, "sort_function") == -1) return nullptr;
}
}
ScopedPyObjectPtr full_slice(PySlice_New(NULL, NULL, NULL));
if (full_slice == NULL) {
return NULL;
ScopedPyObjectPtr full_slice(PySlice_New(nullptr, nullptr, nullptr));
if (full_slice == nullptr) {
return nullptr;
}
ScopedPyObjectPtr list(Subscript(pself, full_slice.get()));
if (list == NULL) {
return NULL;
if (list == nullptr) {
return nullptr;
}
ScopedPyObjectPtr m(PyObject_GetAttrString(list.get(), "sort"));
if (m == NULL) {
return NULL;
if (m == nullptr) {
return nullptr;
}
ScopedPyObjectPtr res(PyObject_Call(m.get(), args, kwds));
if (res == NULL) {
return NULL;
if (res == nullptr) {
return nullptr;
}
int ret = InternalAssignRepeatedField(
reinterpret_cast<RepeatedScalarContainer*>(pself), list.get());
if (ret < 0) {
return nullptr;
}
Py_RETURN_NONE;
}
static PyObject* Reverse(PyObject* pself) {
ScopedPyObjectPtr full_slice(PySlice_New(nullptr, nullptr, nullptr));
if (full_slice == nullptr) {
return nullptr;
}
ScopedPyObjectPtr list(Subscript(pself, full_slice.get()));
if (list == nullptr) {
return nullptr;
}
ScopedPyObjectPtr res(PyObject_CallMethod(list.get(), "reverse", nullptr));
if (res == nullptr) {
return nullptr;
}
int ret = InternalAssignRepeatedField(
reinterpret_cast<RepeatedScalarContainer*>(pself), list.get());
if (ret < 0) {
return NULL;
return nullptr;
}
Py_RETURN_NONE;
}
@ -637,27 +650,27 @@ static PyObject* Sort(PyObject* pself, PyObject* args, PyObject* kwds) {
static PyObject* Pop(PyObject* pself, PyObject* args) {
Py_ssize_t index = -1;
if (!PyArg_ParseTuple(args, "|n", &index)) {
return NULL;
return nullptr;
}
PyObject* item = Item(pself, index);
if (item == NULL) {
if (item == nullptr) {
PyErr_Format(PyExc_IndexError, "list index (%zd) out of range", index);
return NULL;
return nullptr;
}
if (AssignItem(pself, index, NULL) < 0) {
return NULL;
if (AssignItem(pself, index, nullptr) < 0) {
return nullptr;
}
return item;
}
static PyObject* ToStr(PyObject* pself) {
ScopedPyObjectPtr full_slice(PySlice_New(NULL, NULL, NULL));
if (full_slice == NULL) {
return NULL;
ScopedPyObjectPtr full_slice(PySlice_New(nullptr, nullptr, nullptr));
if (full_slice == nullptr) {
return nullptr;
}
ScopedPyObjectPtr list(Subscript(pself, full_slice.get()));
if (list == NULL) {
return NULL;
if (list == nullptr) {
return nullptr;
}
return PyObject_Repr(list.get());
}
@ -670,13 +683,13 @@ static PyObject* MergeFrom(PyObject* pself, PyObject* arg) {
RepeatedScalarContainer* NewContainer(
CMessage* parent, const FieldDescriptor* parent_field_descriptor) {
if (!CheckFieldBelongsToMessage(parent_field_descriptor, parent->message)) {
return NULL;
return nullptr;
}
RepeatedScalarContainer* self = reinterpret_cast<RepeatedScalarContainer*>(
PyType_GenericAlloc(&RepeatedScalarContainer_Type, 0));
if (self == NULL) {
return NULL;
if (self == nullptr) {
return nullptr;
}
Py_INCREF(parent);
@ -696,81 +709,81 @@ static void Dealloc(PyObject* pself) {
}
static PySequenceMethods SqMethods = {
Len, /* sq_length */
0, /* sq_concat */
0, /* sq_repeat */
Item, /* sq_item */
0, /* sq_slice */
AssignItem /* sq_ass_item */
Len, /* sq_length */
nullptr, /* sq_concat */
nullptr, /* sq_repeat */
Item, /* sq_item */
nullptr, /* sq_slice */
AssignItem /* sq_ass_item */
};
static PyMappingMethods MpMethods = {
Len, /* mp_length */
Subscript, /* mp_subscript */
AssSubscript, /* mp_ass_subscript */
Len, /* mp_length */
Subscript, /* mp_subscript */
AssSubscript, /* mp_ass_subscript */
};
static PyMethodDef Methods[] = {
{ "__deepcopy__", DeepCopy, METH_VARARGS,
"Makes a deep copy of the class." },
{ "__reduce__", Reduce, METH_NOARGS,
"Outputs picklable representation of the repeated field." },
{ "append", AppendMethod, METH_O,
"Appends an object to the repeated container." },
{ "extend", ExtendMethod, METH_O,
"Appends objects to the repeated container." },
{ "insert", Insert, METH_VARARGS,
"Inserts an object at the specified position in the container." },
{ "pop", Pop, METH_VARARGS,
"Removes an object from the repeated container and returns it." },
{ "remove", Remove, METH_O,
"Removes an object from the repeated container." },
{ "sort", (PyCFunction)Sort, METH_VARARGS | METH_KEYWORDS,
"Sorts the repeated container."},
{ "MergeFrom", (PyCFunction)MergeFrom, METH_O,
"Merges a repeated container into the current container." },
{ NULL, NULL }
};
{"__deepcopy__", DeepCopy, METH_VARARGS, "Makes a deep copy of the class."},
{"__reduce__", Reduce, METH_NOARGS,
"Outputs picklable representation of the repeated field."},
{"append", AppendMethod, METH_O,
"Appends an object to the repeated container."},
{"extend", ExtendMethod, METH_O,
"Appends objects to the repeated container."},
{"insert", Insert, METH_VARARGS,
"Inserts an object at the specified position in the container."},
{"pop", Pop, METH_VARARGS,
"Removes an object from the repeated container and returns it."},
{"remove", Remove, METH_O,
"Removes an object from the repeated container."},
{"sort", reinterpret_cast<PyCFunction>(Sort), METH_VARARGS | METH_KEYWORDS,
"Sorts the repeated container."},
{"reverse", reinterpret_cast<PyCFunction>(Reverse), METH_NOARGS,
"Reverses elements order of the repeated container."},
{"MergeFrom", static_cast<PyCFunction>(MergeFrom), METH_O,
"Merges a repeated container into the current container."},
{nullptr, nullptr}};
} // namespace repeated_scalar_container
PyTypeObject RepeatedScalarContainer_Type = {
PyVarObject_HEAD_INIT(&PyType_Type, 0)
FULL_MODULE_NAME ".RepeatedScalarContainer", // tp_name
sizeof(RepeatedScalarContainer), // tp_basicsize
0, // tp_itemsize
repeated_scalar_container::Dealloc, // tp_dealloc
0, // tp_print
0, // tp_getattr
0, // tp_setattr
0, // tp_compare
repeated_scalar_container::ToStr, // tp_repr
0, // tp_as_number
&repeated_scalar_container::SqMethods, // tp_as_sequence
&repeated_scalar_container::MpMethods, // tp_as_mapping
PyObject_HashNotImplemented, // tp_hash
0, // tp_call
0, // tp_str
0, // tp_getattro
0, // tp_setattro
0, // tp_as_buffer
Py_TPFLAGS_DEFAULT, // tp_flags
"A Repeated scalar container", // tp_doc
0, // tp_traverse
0, // tp_clear
repeated_scalar_container::RichCompare, // tp_richcompare
0, // tp_weaklistoffset
0, // tp_iter
0, // tp_iternext
repeated_scalar_container::Methods, // tp_methods
0, // tp_members
0, // tp_getset
0, // tp_base
0, // tp_dict
0, // tp_descr_get
0, // tp_descr_set
0, // tp_dictoffset
0, // tp_init
PyVarObject_HEAD_INIT(&PyType_Type, 0) FULL_MODULE_NAME
".RepeatedScalarContainer", // tp_name
sizeof(RepeatedScalarContainer), // tp_basicsize
0, // tp_itemsize
repeated_scalar_container::Dealloc, // tp_dealloc
nullptr, // tp_print
nullptr, // tp_getattr
nullptr, // tp_setattr
nullptr, // tp_compare
repeated_scalar_container::ToStr, // tp_repr
nullptr, // tp_as_number
&repeated_scalar_container::SqMethods, // tp_as_sequence
&repeated_scalar_container::MpMethods, // tp_as_mapping
PyObject_HashNotImplemented, // tp_hash
nullptr, // tp_call
nullptr, // tp_str
nullptr, // tp_getattro
nullptr, // tp_setattro
nullptr, // tp_as_buffer
Py_TPFLAGS_DEFAULT, // tp_flags
"A Repeated scalar container", // tp_doc
nullptr, // tp_traverse
nullptr, // tp_clear
repeated_scalar_container::RichCompare, // tp_richcompare
0, // tp_weaklistoffset
nullptr, // tp_iter
nullptr, // tp_iternext
repeated_scalar_container::Methods, // tp_methods
nullptr, // tp_members
nullptr, // tp_getset
nullptr, // tp_base
nullptr, // tp_dict
nullptr, // tp_descr_get
nullptr, // tp_descr_set
0, // tp_dictoffset
nullptr, // tp_init
};
} // namespace python

@ -254,7 +254,7 @@ if __name__ == '__main__':
os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'cpp'
# Keep this list of dependencies in sync with tox.ini.
install_requires = ['six>=1.9']
install_requires = ['six>=1.9', 'setuptools']
if sys.version_info <= (2,7):
install_requires.append('ordereddict')
install_requires.append('unittest2')

@ -41,17 +41,17 @@ namespace google {
namespace protobuf {
namespace internal {
void AnyMetadata::PackFrom(const Message& message) {
PackFrom(message, kTypeGoogleApisComPrefix);
bool AnyMetadata::PackFrom(const Message& message) {
return PackFrom(message, kTypeGoogleApisComPrefix);
}
void AnyMetadata::PackFrom(const Message& message,
bool AnyMetadata::PackFrom(const Message& message,
StringPiece type_url_prefix) {
type_url_->Set(
&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString(),
GetTypeUrl(message.GetDescriptor()->full_name(), type_url_prefix),
nullptr);
message.SerializeToString(
return message.SerializeToString(
value_->Mutable(ArenaStringPtr::EmptyDefault{}, nullptr));
}
@ -80,3 +80,5 @@ bool GetAnyFieldDescriptors(const Message& message,
} // namespace internal
} // namespace protobuf
} // namespace google
#include <google/protobuf/port_undef.inc>

@ -65,12 +65,14 @@ class PROTOBUF_EXPORT AnyMetadata {
// Packs a message using the default type URL prefix: "type.googleapis.com".
// The resulted type URL will be "type.googleapis.com/<message_full_name>".
// Returns false if serializing the message failed.
template <typename T>
void PackFrom(const T& message) {
InternalPackFrom(message, kTypeGoogleApisComPrefix, T::FullMessageName());
bool PackFrom(const T& message) {
return InternalPackFrom(message, kTypeGoogleApisComPrefix,
T::FullMessageName());
}
void PackFrom(const Message& message);
bool PackFrom(const Message& message);
// Packs a message using the given type URL prefix. The type URL will be
// constructed by concatenating the message type's full name to the prefix
@ -78,12 +80,13 @@ class PROTOBUF_EXPORT AnyMetadata {
// For example, both PackFrom(message, "type.googleapis.com") and
// PackFrom(message, "type.googleapis.com/") yield the same result type
// URL: "type.googleapis.com/<message_full_name>".
// Returns false if serializing the message failed.
template <typename T>
void PackFrom(const T& message, StringPiece type_url_prefix) {
InternalPackFrom(message, type_url_prefix, T::FullMessageName());
bool PackFrom(const T& message, StringPiece type_url_prefix) {
return InternalPackFrom(message, type_url_prefix, T::FullMessageName());
}
void PackFrom(const Message& message, StringPiece type_url_prefix);
bool PackFrom(const Message& message, StringPiece type_url_prefix);
// Unpacks the payload into the given message. Returns false if the message's
// type doesn't match the type specified in the type URL (i.e., the full
@ -105,7 +108,7 @@ class PROTOBUF_EXPORT AnyMetadata {
}
private:
void InternalPackFrom(const MessageLite& message,
bool InternalPackFrom(const MessageLite& message,
StringPiece type_url_prefix,
StringPiece type_name);
bool InternalUnpackTo(StringPiece type_name,

@ -14,6 +14,8 @@
#include <google/protobuf/wire_format.h>
// @@protoc_insertion_point(includes)
#include <google/protobuf/port_def.inc>
PROTOBUF_PRAGMA_INIT_SEG
PROTOBUF_NAMESPACE_OPEN
class AnyDefaultTypeInternal {
public:
@ -76,7 +78,7 @@ const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_google
};
// Force running AddDescriptors() at dynamic initialization time.
static bool dynamic_init_dummy_google_2fprotobuf_2fany_2eproto = (static_cast<void>(::PROTOBUF_NAMESPACE_ID::internal::AddDescriptors(&descriptor_table_google_2fprotobuf_2fany_2eproto)), true);
PROTOBUF_ATTRIBUTE_INIT_PRIORITY static ::PROTOBUF_NAMESPACE_ID::internal::AddDescriptorsRunner dynamic_init_dummy_google_2fprotobuf_2fany_2eproto(&descriptor_table_google_2fprotobuf_2fany_2eproto);
PROTOBUF_NAMESPACE_OPEN
// ===================================================================

@ -110,12 +110,12 @@ class PROTOBUF_EXPORT Any PROTOBUF_FINAL :
// implements Any -----------------------------------------------
void PackFrom(const ::PROTOBUF_NAMESPACE_ID::Message& message) {
_any_metadata_.PackFrom(message);
bool PackFrom(const ::PROTOBUF_NAMESPACE_ID::Message& message) {
return _any_metadata_.PackFrom(message);
}
void PackFrom(const ::PROTOBUF_NAMESPACE_ID::Message& message,
bool PackFrom(const ::PROTOBUF_NAMESPACE_ID::Message& message,
::PROTOBUF_NAMESPACE_ID::ConstStringParam type_url_prefix) {
_any_metadata_.PackFrom(message, type_url_prefix);
return _any_metadata_.PackFrom(message, type_url_prefix);
}
bool UnpackTo(::PROTOBUF_NAMESPACE_ID::Message* message) const {
return _any_metadata_.UnpackTo(message);
@ -125,13 +125,13 @@ class PROTOBUF_EXPORT Any PROTOBUF_FINAL :
const ::PROTOBUF_NAMESPACE_ID::FieldDescriptor** type_url_field,
const ::PROTOBUF_NAMESPACE_ID::FieldDescriptor** value_field);
template <typename T, class = typename std::enable_if<!std::is_convertible<T, const ::PROTOBUF_NAMESPACE_ID::Message&>::value>::type>
void PackFrom(const T& message) {
_any_metadata_.PackFrom<T>(message);
bool PackFrom(const T& message) {
return _any_metadata_.PackFrom<T>(message);
}
template <typename T, class = typename std::enable_if<!std::is_convertible<T, const ::PROTOBUF_NAMESPACE_ID::Message&>::value>::type>
void PackFrom(const T& message,
bool PackFrom(const T& message,
::PROTOBUF_NAMESPACE_ID::ConstStringParam type_url_prefix) {
_any_metadata_.PackFrom<T>(message, type_url_prefix);}
return _any_metadata_.PackFrom<T>(message, type_url_prefix);}
template <typename T, class = typename std::enable_if<!std::is_convertible<T, const ::PROTOBUF_NAMESPACE_ID::Message&>::value>::type>
bool UnpackTo(T* message) const {
return _any_metadata_.UnpackTo<T>(message);

@ -53,12 +53,12 @@ const char kAnyFullTypeName[] = "google.protobuf.Any";
const char kTypeGoogleApisComPrefix[] = "type.googleapis.com/";
const char kTypeGoogleProdComPrefix[] = "type.googleprod.com/";
void AnyMetadata::InternalPackFrom(const MessageLite& message,
bool AnyMetadata::InternalPackFrom(const MessageLite& message,
StringPiece type_url_prefix,
StringPiece type_name) {
type_url_->Set(&::google::protobuf::internal::GetEmptyString(),
GetTypeUrl(type_name, type_url_prefix), nullptr);
message.SerializeToString(
return message.SerializeToString(
value_->Mutable(ArenaStringPtr::EmptyDefault{}, nullptr));
}

@ -49,7 +49,7 @@ TEST(AnyTest, TestPackAndUnpack) {
protobuf_unittest::TestAny submessage;
submessage.set_int32_value(12345);
protobuf_unittest::TestAny message;
message.mutable_any_value()->PackFrom(submessage);
ASSERT_TRUE(message.mutable_any_value()->PackFrom(submessage));
std::string data = message.SerializeAsString();
@ -60,6 +60,13 @@ TEST(AnyTest, TestPackAndUnpack) {
EXPECT_EQ(12345, submessage.int32_value());
}
TEST(AnyTest, TestPackFromSerializationExceedsSizeLimit) {
protobuf_unittest::TestAny submessage;
submessage.mutable_text()->resize(INT_MAX, 'a');
protobuf_unittest::TestAny message;
EXPECT_FALSE(message.mutable_any_value()->PackFrom(submessage));
}
TEST(AnyTest, TestUnpackWithTypeMismatch) {
protobuf_unittest::TestAny payload;
payload.set_int32_value(13);
@ -173,3 +180,5 @@ TEST(AnyTest, MoveAssignment) {
} // namespace
} // namespace protobuf
} // namespace google
#include <google/protobuf/port_undef.inc>

@ -34,8 +34,11 @@ package protobuf_unittest;
import "google/protobuf/any.proto";
option java_outer_classname = "TestAnyProto";
message TestAny {
int32 int32_value = 1;
google.protobuf.Any any_value = 2;
repeated google.protobuf.Any repeated_any_value = 3;
string text = 4;
}

@ -14,6 +14,8 @@
#include <google/protobuf/wire_format.h>
// @@protoc_insertion_point(includes)
#include <google/protobuf/port_def.inc>
PROTOBUF_PRAGMA_INIT_SEG
extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2fapi_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_Method_google_2fprotobuf_2fapi_2eproto;
extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2fapi_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_Mixin_google_2fprotobuf_2fapi_2eproto;
extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2ftype_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_Option_google_2fprotobuf_2ftype_2eproto;
@ -164,7 +166,7 @@ const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_google
};
// Force running AddDescriptors() at dynamic initialization time.
static bool dynamic_init_dummy_google_2fprotobuf_2fapi_2eproto = (static_cast<void>(::PROTOBUF_NAMESPACE_ID::internal::AddDescriptors(&descriptor_table_google_2fprotobuf_2fapi_2eproto)), true);
PROTOBUF_ATTRIBUTE_INIT_PRIORITY static ::PROTOBUF_NAMESPACE_ID::internal::AddDescriptorsRunner dynamic_init_dummy_google_2fprotobuf_2fapi_2eproto(&descriptor_table_google_2fprotobuf_2fapi_2eproto);
PROTOBUF_NAMESPACE_OPEN
// ===================================================================

@ -32,10 +32,14 @@
#include <algorithm>
#include <atomic>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <typeinfo>
#include <google/protobuf/stubs/mutex.h>
#include <google/protobuf/arena_impl.h>
#include <google/protobuf/stubs/mutex.h>
#ifdef ADDRESS_SANITIZER
#include <sanitizer/asan_interface.h>
#endif // ADDRESS_SANITIZER
@ -47,370 +51,388 @@ static const size_t kMaxCleanupListElements = 64; // 1kB on 64-bit.
namespace google {
namespace protobuf {
namespace internal {
PROTOBUF_EXPORT /*static*/ void* (*const ArenaOptions::kDefaultBlockAlloc)(
size_t) = &::operator new;
static SerialArena::Memory AllocateMemory(const AllocationPolicy* policy_ptr,
size_t last_size, size_t min_bytes) {
AllocationPolicy policy; // default policy
if (policy_ptr) policy = *policy_ptr;
size_t size;
if (last_size != 0) {
// Double the current block size, up to a limit.
auto max_size = policy.max_block_size;
size = std::min(2 * last_size, max_size);
} else {
size = policy.start_block_size;
}
// Verify that min_bytes + kBlockHeaderSize won't overflow.
GOOGLE_CHECK_LE(min_bytes,
std::numeric_limits<size_t>::max() - SerialArena::kBlockHeaderSize);
size = std::max(size, SerialArena::kBlockHeaderSize + min_bytes);
namespace internal {
void* mem;
if (policy.block_alloc == nullptr) {
mem = ::operator new(size);
} else {
mem = policy.block_alloc(size);
}
return {mem, size};
}
class GetDeallocator {
public:
GetDeallocator(const AllocationPolicy* policy, size_t* space_allocated)
: dealloc_(policy ? policy->block_dealloc : nullptr),
space_allocated_(space_allocated) {}
void operator()(SerialArena::Memory mem) const {
#ifdef ADDRESS_SANITIZER
// This memory was provided by the underlying allocator as unpoisoned,
// so return it in an unpoisoned state.
ASAN_UNPOISON_MEMORY_REGION(mem.ptr, mem.size);
#endif // ADDRESS_SANITIZER
if (dealloc_) {
dealloc_(mem.ptr, mem.size);
} else {
#if defined(__GXX_DELETE_WITH_SIZE__) || defined(__cpp_sized_deallocation)
::operator delete(mem.ptr, mem.size);
#else
::operator delete(mem.ptr);
#endif
}
*space_allocated_ += mem.size;
}
private:
void (*dealloc_)(void*, size_t);
size_t* space_allocated_;
};
SerialArena::SerialArena(Block* b, void* owner) : space_allocated_(b->size) {
owner_ = owner;
head_ = b;
ptr_ = b->Pointer(kBlockHeaderSize + ThreadSafeArena::kSerialArenaSize);
limit_ = b->Pointer(b->size & static_cast<size_t>(-8));
}
SerialArena* SerialArena::New(Memory mem, void* owner) {
GOOGLE_DCHECK_LE(kBlockHeaderSize + ThreadSafeArena::kSerialArenaSize, mem.size);
auto b = new (mem.ptr) Block{nullptr, mem.size};
return new (b->Pointer(kBlockHeaderSize)) SerialArena(b, owner);
}
template <typename Deallocator>
SerialArena::Memory SerialArena::Free(Deallocator deallocator) {
Block* b = head_;
Memory mem = {b, b->size};
while (b->next) {
b = b->next; // We must first advance before deleting this block
deallocator(mem);
mem = {b, b->size};
}
return mem;
}
PROTOBUF_NOINLINE
std::pair<void*, SerialArena::CleanupNode*>
SerialArena::AllocateAlignedWithCleanupFallback(
size_t n, const AllocationPolicy* policy) {
AllocateNewBlock(n + kCleanupSize, policy);
return AllocateAlignedWithCleanup(n, policy);
}
PROTOBUF_NOINLINE
void* SerialArena::AllocateAlignedFallback(size_t n,
const AllocationPolicy* policy) {
AllocateNewBlock(n, policy);
return AllocateAligned(n, policy);
}
void SerialArena::AllocateNewBlock(size_t n, const AllocationPolicy* policy) {
// Sync limit to block
head_->start = reinterpret_cast<CleanupNode*>(limit_);
// Record how much used in this block.
space_used_ += ptr_ - head_->Pointer(kBlockHeaderSize);
auto mem = AllocateMemory(policy, head_->size, n);
// We don't want to emit an expensive RMW instruction that requires
// exclusive access to a cacheline. Hence we write it in terms of a
// regular add.
auto relaxed = std::memory_order_relaxed;
space_allocated_.store(space_allocated_.load(relaxed) + mem.size, relaxed);
head_ = new (mem.ptr) Block{head_, mem.size};
ptr_ = head_->Pointer(kBlockHeaderSize);
limit_ = head_->Pointer(head_->size);
#ifdef ADDRESS_SANITIZER
ASAN_POISON_MEMORY_REGION(ptr_, limit_ - ptr_);
#endif // ADDRESS_SANITIZER
}
uint64 SerialArena::SpaceUsed() const {
uint64 space_used = ptr_ - head_->Pointer(kBlockHeaderSize);
space_used += space_used_;
// Remove the overhead of the SerialArena itself.
space_used -= ThreadSafeArena::kSerialArenaSize;
return space_used;
}
void SerialArena::CleanupList() {
Block* b = head_;
b->start = reinterpret_cast<CleanupNode*>(limit_);
do {
auto* limit = reinterpret_cast<CleanupNode*>(
b->Pointer(b->size & static_cast<size_t>(-8)));
auto it = b->start;
auto num = limit - it;
if (num > 0) {
for (; it < limit; it++) {
it->cleanup(it->elem);
}
}
b = b->next;
} while (b);
}
ArenaImpl::CacheAlignedLifecycleIdGenerator ArenaImpl::lifecycle_id_generator_;
ThreadSafeArena::CacheAlignedLifecycleIdGenerator
ThreadSafeArena::lifecycle_id_generator_;
#if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL)
ArenaImpl::ThreadCache& ArenaImpl::thread_cache() {
ThreadSafeArena::ThreadCache& ThreadSafeArena::thread_cache() {
static internal::ThreadLocalStorage<ThreadCache>* thread_cache_ =
new internal::ThreadLocalStorage<ThreadCache>();
return *thread_cache_->Get();
}
#elif defined(PROTOBUF_USE_DLLS)
ArenaImpl::ThreadCache& ArenaImpl::thread_cache() {
ThreadSafeArena::ThreadCache& ThreadSafeArena::thread_cache() {
static PROTOBUF_THREAD_LOCAL ThreadCache thread_cache_ = {
0, static_cast<LifecycleIdAtomic>(-1), nullptr};
return thread_cache_;
}
#else
PROTOBUF_THREAD_LOCAL ArenaImpl::ThreadCache ArenaImpl::thread_cache_ = {
0, static_cast<LifecycleIdAtomic>(-1), nullptr};
PROTOBUF_THREAD_LOCAL ThreadSafeArena::ThreadCache
ThreadSafeArena::thread_cache_ = {0, static_cast<LifecycleIdAtomic>(-1),
nullptr};
#endif
void ArenaFree(void* object, size_t size) {
#if defined(__GXX_DELETE_WITH_SIZE__) || defined(__cpp_sized_deallocation)
::operator delete(object, size);
#else
(void)size;
::operator delete(object);
#endif
}
ArenaImpl::ArenaImpl(const ArenaOptions& options) {
ArenaMetricsCollector* collector = nullptr;
bool record_allocs = false;
if (options.make_metrics_collector != nullptr) {
collector = (*options.make_metrics_collector)();
record_allocs = (collector && collector->RecordAllocs());
}
void ThreadSafeArena::InitializeFrom(void* mem, size_t size) {
GOOGLE_DCHECK_EQ(reinterpret_cast<uintptr_t>(mem) & 7, 0u);
Init(false);
// Get memory where we can store non-default options if needed.
// Use supplied initial_block if it is large enough.
size_t min_block_size = kOptionsSize + kBlockHeaderSize + kSerialArenaSize;
char* mem = options.initial_block;
size_t mem_size = options.initial_block_size;
GOOGLE_DCHECK_EQ(reinterpret_cast<uintptr_t>(mem) & 7, 0);
if (mem == nullptr || mem_size < min_block_size) {
// Supplied initial block is not big enough.
mem_size = std::max(min_block_size, options.start_block_size);
mem = reinterpret_cast<char*>((*options.block_alloc)(mem_size));
// Ignore initial block if it is too small.
if (mem != nullptr && size >= kBlockHeaderSize + kSerialArenaSize) {
alloc_policy_ |= kUserOwnedInitialBlock;
SetInitialBlock(mem, size);
}
}
// Create the special block.
const bool special = true;
const bool user_owned = (mem == options.initial_block);
auto block =
new (mem) SerialArena::Block(mem_size, nullptr, special, user_owned);
// Options occupy the beginning of the initial block.
options_ = new (block->Pointer(block->pos())) Options;
#ifdef ADDRESS_SANITIZER
ASAN_UNPOISON_MEMORY_REGION(options_, kOptionsSize);
#endif // ADDRESS_SANITIZER
options_->start_block_size = options.start_block_size;
options_->max_block_size = options.max_block_size;
options_->block_alloc = options.block_alloc;
options_->block_dealloc = options.block_dealloc;
options_->metrics_collector = collector;
block->set_pos(block->pos() + kOptionsSize);
void ThreadSafeArena::InitializeWithPolicy(void* mem, size_t size,
bool record_allocs,
AllocationPolicy policy) {
GOOGLE_DCHECK_EQ(reinterpret_cast<uintptr_t>(mem) & 7, 0u);
Init(record_allocs);
SetInitialBlock(block);
// Ignore initial block if it is too small. We include an optional
// AllocationPolicy in this check, so that this can be allocated on the
// first block.
constexpr size_t kAPSize = internal::AlignUpTo8(sizeof(AllocationPolicy));
constexpr size_t kMinimumSize = kBlockHeaderSize + kSerialArenaSize + kAPSize;
if (mem != nullptr && size >= kMinimumSize) {
alloc_policy_ = kUserOwnedInitialBlock;
} else {
alloc_policy_ = 0;
auto tmp = AllocateMemory(&policy, 0, kMinimumSize);
mem = tmp.ptr;
size = tmp.size;
}
SetInitialBlock(mem, size);
auto sa = threads_.load(std::memory_order_relaxed);
// We ensured enough space so this cannot fail.
void* p;
if (!sa || !sa->MaybeAllocateAligned(kAPSize, &p)) {
GOOGLE_LOG(FATAL) << "MaybeAllocateAligned cannot fail here.";
return;
}
new (p) AllocationPolicy{policy};
alloc_policy_ |= reinterpret_cast<intptr_t>(p);
}
void ArenaImpl::Init(bool record_allocs) {
void ThreadSafeArena::Init(bool record_allocs) {
ThreadCache& tc = thread_cache();
auto id = tc.next_lifecycle_id;
constexpr uint64 kInc = ThreadCache::kPerThreadIds * 2;
// We increment lifecycle_id's by multiples of two so we can use bit 0 as
// a tag.
constexpr uint64 kDelta = 2;
constexpr uint64 kInc = ThreadCache::kPerThreadIds * kDelta;
if (PROTOBUF_PREDICT_FALSE((id & (kInc - 1)) == 0)) {
if (sizeof(lifecycle_id_generator_.id) == 4) {
// 2^32 is dangerous low to guarantee uniqueness. If we start dolling out
// unique id's in ranges of kInc it's unacceptably low. In this case
// we increment by 1. The additional range of kPerThreadIds that are used
// per thread effectively pushes the overflow time from weeks to years
// of continuous running.
id = lifecycle_id_generator_.id.fetch_add(1, std::memory_order_relaxed) *
kInc;
} else {
id =
lifecycle_id_generator_.id.fetch_add(kInc, std::memory_order_relaxed);
}
constexpr auto relaxed = std::memory_order_relaxed;
// On platforms that don't support uint64 atomics we can certainly not
// afford to increment by large intervals and expect uniqueness due to
// wrapping, hence we only add by 1.
id = lifecycle_id_generator_.id.fetch_add(1, relaxed) * kInc;
}
tc.next_lifecycle_id = id + 2;
// We store "record_allocs" in the low bit of lifecycle_id_.
lifecycle_id_ = id | (record_allocs ? 1 : 0);
tc.next_lifecycle_id = id + kDelta;
tag_and_id_ = id | (record_allocs ? kRecordAllocs : 0);
hint_.store(nullptr, std::memory_order_relaxed);
threads_.store(nullptr, std::memory_order_relaxed);
space_allocated_.store(0, std::memory_order_relaxed);
}
void ArenaImpl::SetInitialBlock(SerialArena::Block* block) {
// Calling thread owns the first block. This allows the single-threaded case
// to allocate on the first block without having to perform atomic operations.
SerialArena* serial = SerialArena::New(block, &thread_cache(), this);
void ThreadSafeArena::SetInitialBlock(void* mem, size_t size) {
SerialArena* serial = SerialArena::New({mem, size}, &thread_cache());
serial->set_next(NULL);
threads_.store(serial, std::memory_order_relaxed);
space_allocated_.store(block->size(), std::memory_order_relaxed);
CacheSerialArena(serial);
}
ArenaImpl::~ArenaImpl() {
ThreadSafeArena::~ThreadSafeArena() {
// Have to do this in a first pass, because some of the destructors might
// refer to memory in other blocks.
CleanupList();
ArenaMetricsCollector* collector = nullptr;
auto deallocator = &ArenaFree;
if (options_) {
collector = options_->metrics_collector;
deallocator = options_->block_dealloc;
}
size_t space_allocated = 0;
auto mem = Free(&space_allocated);
PerBlock([deallocator](SerialArena::Block* b) {
#ifdef ADDRESS_SANITIZER
// This memory was provided by the underlying allocator as unpoisoned, so
// return it in an unpoisoned state.
ASAN_UNPOISON_MEMORY_REGION(b->Pointer(0), b->size());
#endif // ADDRESS_SANITIZER
if (!b->user_owned()) {
(*deallocator)(b, b->size());
}
});
// Policy is about to get deleted.
auto p = AllocPolicy();
ArenaMetricsCollector* collector = p ? p->metrics_collector : nullptr;
if (collector) {
collector->OnDestroy(SpaceAllocated());
if (alloc_policy_ & kUserOwnedInitialBlock) {
space_allocated += mem.size;
} else {
GetDeallocator(AllocPolicy(), &space_allocated)(mem);
}
if (collector) collector->OnDestroy(space_allocated);
}
uint64 ArenaImpl::Reset() {
if (options_ && options_->metrics_collector) {
options_->metrics_collector->OnReset(SpaceAllocated());
}
SerialArena::Memory ThreadSafeArena::Free(size_t* space_allocated) {
SerialArena::Memory mem = {nullptr, 0};
auto deallocator = GetDeallocator(AllocPolicy(), space_allocated);
PerSerialArena([deallocator, &mem](SerialArena* a) {
if (mem.ptr) deallocator(mem);
mem = a->Free(deallocator);
});
return mem;
}
uint64 ThreadSafeArena::Reset() {
// Have to do this in a first pass, because some of the destructors might
// refer to memory in other blocks.
CleanupList();
// Discard all blocks except the special block (if present).
uint64 space_allocated = 0;
SerialArena::Block* special_block = nullptr;
auto deallocator = (options_ ? options_->block_dealloc : &ArenaFree);
PerBlock(
[&space_allocated, &special_block, deallocator](SerialArena::Block* b) {
space_allocated += b->size();
#ifdef ADDRESS_SANITIZER
// This memory was provided by the underlying allocator as unpoisoned,
// so return it in an unpoisoned state.
ASAN_UNPOISON_MEMORY_REGION(b->Pointer(0), b->size());
#endif // ADDRESS_SANITIZER
if (!b->special()) {
(*deallocator)(b, b->size());
} else {
// Prepare special block for reuse.
// Note: if options_ is present, it occupies the beginning of the
// block and therefore pos is advanced past it.
GOOGLE_DCHECK(special_block == nullptr);
special_block = b;
}
});
Init(record_allocs());
if (special_block != nullptr) {
// next() should still be nullptr since we are using a stack discipline, but
// clear it anyway to reduce fragility.
GOOGLE_DCHECK_EQ(special_block->next(), nullptr);
special_block->clear_next();
special_block->set_pos(kBlockHeaderSize + (options_ ? kOptionsSize : 0));
SetInitialBlock(special_block);
}
return space_allocated;
}
size_t space_allocated = 0;
auto mem = Free(&space_allocated);
std::pair<void*, size_t> ArenaImpl::NewBuffer(size_t last_size,
size_t min_bytes) {
size_t size;
if (last_size != -1) {
// Double the current block size, up to a limit.
auto max_size = options_ ? options_->max_block_size : kDefaultMaxBlockSize;
size = std::min(2 * last_size, max_size);
if (AllocPolicy()) {
auto saved_policy = *AllocPolicy();
if (alloc_policy_ & kUserOwnedInitialBlock) {
space_allocated += mem.size;
} else {
GetDeallocator(AllocPolicy(), &space_allocated)(mem);
mem.ptr = nullptr;
mem.size = 0;
}
ArenaMetricsCollector* collector = saved_policy.metrics_collector;
if (collector) collector->OnReset(space_allocated);
InitializeWithPolicy(mem.ptr, mem.size, ShouldRecordAlloc(), saved_policy);
} else {
size = options_ ? options_->start_block_size : kDefaultStartBlockSize;
// Nullptr policy
if (alloc_policy_ & kUserOwnedInitialBlock) {
space_allocated += mem.size;
InitializeFrom(mem.ptr, mem.size);
} else {
GetDeallocator(AllocPolicy(), &space_allocated)(mem);
Init(false);
}
}
// Verify that min_bytes + kBlockHeaderSize won't overflow.
GOOGLE_CHECK_LE(min_bytes, std::numeric_limits<size_t>::max() - kBlockHeaderSize);
size = std::max(size, kBlockHeaderSize + min_bytes);
void* mem = options_ ? (*options_->block_alloc)(size) : ::operator new(size);
space_allocated_.fetch_add(size, std::memory_order_relaxed);
return {mem, size};
}
SerialArena::Block* SerialArena::NewBlock(SerialArena::Block* last_block,
size_t min_bytes, ArenaImpl* arena) {
void* mem;
size_t size;
std::tie(mem, size) =
arena->NewBuffer(last_block ? last_block->size() : -1, min_bytes);
Block* b = new (mem) Block(size, last_block, false, false);
return b;
}
PROTOBUF_NOINLINE
void SerialArena::AddCleanupFallback(void* elem, void (*cleanup)(void*)) {
size_t size = cleanup_ ? cleanup_->size * 2 : kMinCleanupListElements;
size = std::min(size, kMaxCleanupListElements);
size_t bytes = internal::AlignUpTo8(CleanupChunk::SizeOf(size));
CleanupChunk* list = reinterpret_cast<CleanupChunk*>(AllocateAligned(bytes));
list->next = cleanup_;
list->size = size;
cleanup_ = list;
cleanup_ptr_ = &list->nodes[0];
cleanup_limit_ = &list->nodes[size];
AddCleanup(elem, cleanup);
return space_allocated;
}
void* ArenaImpl::AllocateAlignedAndAddCleanup(size_t n,
void (*cleanup)(void*)) {
std::pair<void*, SerialArena::CleanupNode*>
ThreadSafeArena::AllocateAlignedWithCleanup(size_t n,
const std::type_info* type) {
SerialArena* arena;
if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(&arena))) {
return arena->AllocateAlignedAndAddCleanup(n, cleanup);
if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(tag_and_id_, &arena))) {
return arena->AllocateAlignedWithCleanup(n, AllocPolicy());
} else {
return AllocateAlignedAndAddCleanupFallback(n, cleanup);
return AllocateAlignedWithCleanupFallback(n, type);
}
}
void ArenaImpl::AddCleanup(void* elem, void (*cleanup)(void*)) {
void ThreadSafeArena::AddCleanup(void* elem, void (*cleanup)(void*)) {
SerialArena* arena;
if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(&arena))) {
arena->AddCleanup(elem, cleanup);
if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(LifeCycleId(), &arena))) {
arena->AddCleanup(elem, cleanup, AllocPolicy());
} else {
return AddCleanupFallback(elem, cleanup);
}
}
PROTOBUF_NOINLINE
void* ArenaImpl::AllocateAlignedFallback(size_t n) {
return GetSerialArenaFallback(&thread_cache())->AllocateAligned(n);
void* ThreadSafeArena::AllocateAlignedFallback(size_t n,
const std::type_info* type) {
if (ShouldRecordAlloc()) {
RecordAlloc(type, n);
SerialArena* arena;
if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(LifeCycleId(), &arena))) {
return arena->AllocateAligned(n, AllocPolicy());
}
}
return GetSerialArenaFallback(&thread_cache())
->AllocateAligned(n, AllocPolicy());
}
PROTOBUF_NOINLINE
void* ArenaImpl::AllocateAlignedAndAddCleanupFallback(size_t n,
void (*cleanup)(void*)) {
return GetSerialArenaFallback(
&thread_cache())->AllocateAlignedAndAddCleanup(n, cleanup);
std::pair<void*, SerialArena::CleanupNode*>
ThreadSafeArena::AllocateAlignedWithCleanupFallback(
size_t n, const std::type_info* type) {
if (ShouldRecordAlloc()) {
RecordAlloc(type, n);
SerialArena* arena;
if (GetSerialArenaFast(LifeCycleId(), &arena)) {
return arena->AllocateAlignedWithCleanup(n, AllocPolicy());
}
}
return GetSerialArenaFallback(&thread_cache())
->AllocateAlignedWithCleanup(n, AllocPolicy());
}
PROTOBUF_NOINLINE
void ArenaImpl::AddCleanupFallback(void* elem, void (*cleanup)(void*)) {
GetSerialArenaFallback(&thread_cache())->AddCleanup(elem, cleanup);
void ThreadSafeArena::AddCleanupFallback(void* elem, void (*cleanup)(void*)) {
GetSerialArenaFallback(&thread_cache())
->AddCleanup(elem, cleanup, AllocPolicy());
}
PROTOBUF_NOINLINE
void* SerialArena::AllocateAlignedFallback(size_t n) {
// Sync back to current's pos.
head_->set_pos(head_->size() - (limit_ - ptr_));
head_ = NewBlock(head_, n, arena_);
ptr_ = head_->Pointer(head_->pos());
limit_ = head_->Pointer(head_->size());
#ifdef ADDRESS_SANITIZER
ASAN_POISON_MEMORY_REGION(ptr_, limit_ - ptr_);
#endif // ADDRESS_SANITIZER
return AllocateAligned(n);
}
uint64 ArenaImpl::SpaceAllocated() const {
return space_allocated_.load(std::memory_order_relaxed);
}
uint64 ArenaImpl::SpaceUsed() const {
uint64 ThreadSafeArena::SpaceAllocated() const {
SerialArena* serial = threads_.load(std::memory_order_acquire);
uint64 space_used = 0;
uint64 res = 0;
for (; serial; serial = serial->next()) {
space_used += serial->SpaceUsed();
}
// Remove the overhead of Options structure, if any.
if (options_) {
space_used -= kOptionsSize;
res += serial->SpaceAllocated();
}
return space_used;
return res;
}
uint64 SerialArena::SpaceUsed() const {
// Get current block's size from ptr_ (since we can't trust head_->pos().
uint64 space_used = ptr_ - head_->Pointer(kBlockHeaderSize);
// Get subsequent block size from b->pos().
for (Block* b = head_->next(); b; b = b->next()) {
space_used += (b->pos() - kBlockHeaderSize);
}
// Remove the overhead of the SerialArena itself.
space_used -= ArenaImpl::kSerialArenaSize;
return space_used;
}
void ArenaImpl::CleanupList() {
// By omitting an Acquire barrier we ensure that any user code that doesn't
// properly synchronize Reset() or the destructor will throw a TSAN warning.
SerialArena* serial = threads_.load(std::memory_order_relaxed);
uint64 ThreadSafeArena::SpaceUsed() const {
SerialArena* serial = threads_.load(std::memory_order_acquire);
uint64 space_used = 0;
for (; serial; serial = serial->next()) {
serial->CleanupList();
}
}
void SerialArena::CleanupList() {
if (cleanup_ != NULL) {
CleanupListFallback();
space_used += serial->SpaceUsed();
}
return space_used - (AllocPolicy() ? sizeof(AllocationPolicy) : 0);
}
void SerialArena::CleanupListFallback() {
// The first chunk might be only partially full, so calculate its size
// from cleanup_ptr_. Subsequent chunks are always full, so use list->size.
size_t n = cleanup_ptr_ - &cleanup_->nodes[0];
CleanupChunk* list = cleanup_;
while (true) {
CleanupNode* node = &list->nodes[0];
// Cleanup newest elements first (allocated last).
for (size_t i = n; i > 0; i--) {
node[i - 1].cleanup(node[i - 1].elem);
}
list = list->next;
if (list == nullptr) {
break;
}
// All but the first chunk are always full.
n = list->size;
}
}
SerialArena* SerialArena::New(Block* b, void* owner, ArenaImpl* arena) {
auto pos = b->pos();
GOOGLE_DCHECK_LE(pos + ArenaImpl::kSerialArenaSize, b->size());
SerialArena* serial = reinterpret_cast<SerialArena*>(b->Pointer(pos));
b->set_pos(pos + ArenaImpl::kSerialArenaSize);
serial->arena_ = arena;
serial->owner_ = owner;
serial->head_ = b;
serial->ptr_ = b->Pointer(b->pos());
serial->limit_ = b->Pointer(b->size());
serial->cleanup_ = NULL;
serial->cleanup_ptr_ = NULL;
serial->cleanup_limit_ = NULL;
return serial;
void ThreadSafeArena::CleanupList() {
PerSerialArena([](SerialArena* a) { a->CleanupList(); });
}
PROTOBUF_NOINLINE
SerialArena* ArenaImpl::GetSerialArenaFallback(void* me) {
SerialArena* ThreadSafeArena::GetSerialArenaFallback(void* me) {
// Look for this SerialArena in our linked list.
SerialArena* serial = threads_.load(std::memory_order_acquire);
for (; serial; serial = serial->next()) {
@ -422,8 +444,8 @@ SerialArena* ArenaImpl::GetSerialArenaFallback(void* me) {
if (!serial) {
// This thread doesn't have any SerialArena, which also means it doesn't
// have any blocks yet. So we'll allocate its first block now.
SerialArena::Block* b = SerialArena::NewBlock(NULL, kSerialArenaSize, this);
serial = SerialArena::New(b, me, this);
serial = SerialArena::New(
AllocateMemory(AllocPolicy(), 0, kSerialArenaSize), me);
SerialArena* head = threads_.load(std::memory_order_relaxed);
do {
@ -436,14 +458,25 @@ SerialArena* ArenaImpl::GetSerialArenaFallback(void* me) {
return serial;
}
ArenaMetricsCollector::~ArenaMetricsCollector() {}
} // namespace internal
PROTOBUF_FUNC_ALIGN(32)
void* Arena::AllocateAlignedNoHook(size_t n) {
return impl_.AllocateAligned(n);
return impl_.AllocateAligned(n, nullptr);
}
PROTOBUF_FUNC_ALIGN(32)
void* Arena::AllocateAlignedWithHook(size_t n, const std::type_info* type) {
return impl_.AllocateAligned(n, type);
}
PROTOBUF_FUNC_ALIGN(32)
std::pair<void*, internal::SerialArena::CleanupNode*>
Arena::AllocateAlignedWithCleanup(size_t n, const std::type_info* type) {
return impl_.AllocateAlignedWithCleanup(n, type);
}
} // namespace protobuf
} // namespace google
#include <google/protobuf/port_undef.inc>

@ -93,11 +93,28 @@ class EpsCopyInputStream; // defined in parse_context.h
template <typename Type>
class GenericTypeHandler; // defined in repeated_field.h
PROTOBUF_ALWAYS_INLINE
inline void* AlignTo(void* ptr, size_t align) {
return reinterpret_cast<void*>(
(reinterpret_cast<uintptr_t>(ptr) + align - 1) & (~align + 1));
}
// Templated cleanup methods.
template <typename T>
void arena_destruct_object(void* object) {
reinterpret_cast<T*>(object)->~T();
}
template <bool destructor_skippable, typename T>
struct ObjectDestructor {
constexpr static void (*destructor)(void*) = &arena_destruct_object<T>;
};
template <typename T>
struct ObjectDestructor<true, T> {
constexpr static void (*destructor)(void*) = nullptr;
};
template <typename T>
void arena_delete_object(void* object) {
delete reinterpret_cast<T*>(object);
@ -138,34 +155,38 @@ struct ArenaOptions {
void (*block_dealloc)(void*, size_t);
ArenaOptions()
: start_block_size(kDefaultStartBlockSize),
max_block_size(kDefaultMaxBlockSize),
: start_block_size(internal::AllocationPolicy::kDefaultStartBlockSize),
max_block_size(internal::AllocationPolicy::kDefaultMaxBlockSize),
initial_block(NULL),
initial_block_size(0),
block_alloc(kDefaultBlockAlloc),
block_dealloc(&internal::ArenaFree),
block_alloc(nullptr),
block_dealloc(nullptr),
make_metrics_collector(nullptr) {}
PROTOBUF_EXPORT static void* (*const kDefaultBlockAlloc)(size_t);
private:
// If make_metrics_collector is not nullptr, it will be called at Arena init
// time. It may return a pointer to a collector instance that will be notified
// of interesting events related to the arena.
internal::ArenaMetricsCollector* (*make_metrics_collector)();
// Constants define default starting block size and max block size for
// arena allocator behavior -- see descriptions above.
static const size_t kDefaultStartBlockSize =
internal::ArenaImpl::kDefaultStartBlockSize;
static const size_t kDefaultMaxBlockSize =
internal::ArenaImpl::kDefaultMaxBlockSize;
internal::ArenaMetricsCollector* MetricsCollector() const {
return make_metrics_collector ? (*make_metrics_collector)() : nullptr;
}
internal::AllocationPolicy AllocationPolicy() const {
internal::AllocationPolicy res;
res.start_block_size = start_block_size;
res.max_block_size = max_block_size;
res.block_alloc = block_alloc;
res.block_dealloc = block_dealloc;
res.metrics_collector = MetricsCollector();
return res;
}
friend void arena_metrics::EnableArenaMetrics(ArenaOptions*);
friend class Arena;
friend class ArenaOptionsTestFriend;
friend class internal::ArenaImpl;
};
// Support for non-RTTI environments. (The metrics hooks API uses type
@ -236,7 +257,9 @@ class PROTOBUF_EXPORT PROTOBUF_ALIGNAS(8) Arena final {
// Arena constructor taking custom options. See ArenaOptions above for
// descriptions of the options available.
explicit Arena(const ArenaOptions& options) : impl_(options) {}
explicit Arena(const ArenaOptions& options)
: impl_(options.initial_block, options.initial_block_size,
options.AllocationPolicy()) {}
// Block overhead. Use this as a guide for how much to over-allocate the
// initial block if you want an allocation of size N to fit inside it.
@ -244,8 +267,9 @@ class PROTOBUF_EXPORT PROTOBUF_ALIGNAS(8) Arena final {
// WARNING: if you allocate multiple objects, it is difficult to guarantee
// that a series of allocations will fit in the initial block, especially if
// Arena changes its alignment guarantees in the future!
static const size_t kBlockOverhead = internal::ArenaImpl::kBlockHeaderSize +
internal::ArenaImpl::kSerialArenaSize;
static const size_t kBlockOverhead =
internal::ThreadSafeArena::kBlockHeaderSize +
internal::ThreadSafeArena::kSerialArenaSize;
inline ~Arena() {}
@ -290,8 +314,16 @@ class PROTOBUF_EXPORT PROTOBUF_ALIGNAS(8) Arena final {
// is obtained from the arena).
template <typename T, typename... Args>
PROTOBUF_ALWAYS_INLINE static T* Create(Arena* arena, Args&&... args) {
return CreateNoMessage<T>(arena, is_arena_constructable<T>(),
std::forward<Args>(args)...);
if (arena == NULL) {
return new T(std::forward<Args>(args)...);
} else {
auto destructor =
internal::ObjectDestructor<std::is_trivially_destructible<T>::value,
T>::destructor;
return new (arena->AllocateInternal(sizeof(T), alignof(T), destructor,
RTTI_TYPE_ID(T)))
T(std::forward<Args>(args)...);
}
}
// Create an array of object type T on the arena *without* invoking the
@ -316,9 +348,12 @@ class PROTOBUF_EXPORT PROTOBUF_ALIGNAS(8) Arena final {
}
}
// The following are routines are for monitoring. They will aproximate the
// total sum allocated and used memory, but the exact value is an
// implementation deal. For instance allocated space depends on growth
// policies. Do not use these in unit tests.
// Returns the total space allocated by the arena, which is the sum of the
// sizes of the underlying blocks. This method is relatively fast; a counter
// is kept as blocks are allocated.
// sizes of the underlying blocks.
uint64 SpaceAllocated() const { return impl_.SpaceAllocated(); }
// Returns the total space used by the arena. Similar to SpaceAllocated but
// does not include free space and block overhead. The total space returned
@ -336,8 +371,8 @@ class PROTOBUF_EXPORT PROTOBUF_ALIGNAS(8) Arena final {
// Adds |object| to a list of heap-allocated objects to be freed with |delete|
// when the arena is destroyed or reset.
template <typename T>
PROTOBUF_NOINLINE void Own(T* object) {
OwnInternal(object, std::is_convertible<T*, Message*>());
PROTOBUF_ALWAYS_INLINE void Own(T* object) {
OwnInternal(object, std::is_convertible<T*, MessageLite*>());
}
// Adds |object| to a list of objects whose destructors will be manually
@ -346,7 +381,7 @@ class PROTOBUF_EXPORT PROTOBUF_ALIGNAS(8) Arena final {
// normally only used for objects that are placement-newed into
// arena-allocated memory.
template <typename T>
PROTOBUF_NOINLINE void OwnDestructor(T* object) {
PROTOBUF_ALWAYS_INLINE void OwnDestructor(T* object) {
if (object != NULL) {
impl_.AddCleanup(object, &internal::arena_destruct_object<T>);
}
@ -356,8 +391,8 @@ class PROTOBUF_EXPORT PROTOBUF_ALIGNAS(8) Arena final {
// will be manually called when the arena is destroyed or reset. This differs
// from OwnDestructor() in that any member function may be specified, not only
// the class destructor.
PROTOBUF_NOINLINE void OwnCustomDestructor(void* object,
void (*destruct)(void*)) {
PROTOBUF_ALWAYS_INLINE void OwnCustomDestructor(void* object,
void (*destruct)(void*)) {
impl_.AddCleanup(object, destruct);
}
@ -436,6 +471,8 @@ class PROTOBUF_EXPORT PROTOBUF_ALIGNAS(8) Arena final {
};
private:
internal::ThreadSafeArena impl_;
template <typename T>
struct has_get_arena : InternalHelper<T>::has_get_arena {};
@ -467,41 +504,26 @@ class PROTOBUF_EXPORT PROTOBUF_ALIGNAS(8) Arena final {
}
}
template <typename T, typename... Args>
PROTOBUF_ALWAYS_INLINE static T* CreateInternal(Arena* arena,
Args&&... args) {
if (arena == NULL) {
return new T(std::forward<Args>(args)...);
} else {
return arena->DoCreate<T>(std::is_trivially_destructible<T>::value,
std::forward<Args>(args)...);
}
}
inline void AllocHook(const std::type_info* allocated_type, size_t n) const {
impl_.RecordAlloc(allocated_type, n);
}
// Allocate and also optionally call collector with the allocated type info
// when allocation recording is enabled.
template <typename T>
PROTOBUF_ALWAYS_INLINE void* AllocateInternal(bool skip_explicit_ownership) {
const size_t n = internal::AlignUpTo8(sizeof(T));
PROTOBUF_ALWAYS_INLINE void* AllocateInternal(size_t size, size_t align,
void (*destructor)(void*),
const std::type_info* type) {
// Monitor allocation if needed.
impl_.RecordAlloc(RTTI_TYPE_ID(T), n);
if (skip_explicit_ownership) {
return AllocateAlignedTo<alignof(T)>(sizeof(T));
if (destructor == nullptr) {
return AllocateAlignedWithHook(size, align, type);
} else {
if (alignof(T) <= 8) {
return impl_.AllocateAlignedAndAddCleanup(
n, &internal::arena_destruct_object<T>);
if (align <= 8) {
auto res = AllocateAlignedWithCleanup(internal::AlignUpTo8(size), type);
res.second->elem = res.first;
res.second->cleanup = destructor;
return res.first;
} else {
auto ptr =
reinterpret_cast<uintptr_t>(impl_.AllocateAlignedAndAddCleanup(
sizeof(T) + alignof(T) - 8,
&internal::arena_destruct_object<T>));
return reinterpret_cast<void*>((ptr + alignof(T) - 8) &
(~alignof(T) + 1));
auto res = AllocateAlignedWithCleanup(size + align - 8, type);
auto ptr = internal::AlignTo(res.first, align);
res.second->elem = ptr;
res.second->cleanup = destructor;
return ptr;
}
}
}
@ -522,7 +544,7 @@ class PROTOBUF_EXPORT PROTOBUF_ALIGNAS(8) Arena final {
PROTOBUF_ALWAYS_INLINE static T* DoCreateMaybeMessage(Arena* arena,
std::false_type,
Args&&... args) {
return CreateInternal<T>(arena, std::forward<Args>(args)...);
return Create<T>(arena, std::forward<Args>(args)...);
}
template <typename T, typename... Args>
@ -532,25 +554,6 @@ class PROTOBUF_EXPORT PROTOBUF_ALIGNAS(8) Arena final {
std::forward<Args>(args)...);
}
template <typename T, typename... Args>
PROTOBUF_ALWAYS_INLINE static T* CreateNoMessage(Arena* arena, std::true_type,
Args&&... args) {
// User is constructing with Create() despite the fact that T supports arena
// construction. In this case we have to delegate to CreateInternal(), and
// we can't use any CreateMaybeMessage() specialization that may be defined.
return CreateInternal<T>(arena, std::forward<Args>(args)...);
}
template <typename T, typename... Args>
PROTOBUF_ALWAYS_INLINE static T* CreateNoMessage(Arena* arena,
std::false_type,
Args&&... args) {
// User is constructing with Create() and the type does not support arena
// construction. In this case we can delegate to CreateMaybeMessage() and
// use any specialization that may be available for that.
return CreateMaybeMessage<T>(arena, std::forward<Args>(args)...);
}
// Just allocate the required size for the given type assuming the
// type has a trivial constructor.
template <typename T>
@ -559,22 +562,19 @@ class PROTOBUF_EXPORT PROTOBUF_ALIGNAS(8) Arena final {
<< "Requested size is too large to fit into size_t.";
// We count on compiler to realize that if sizeof(T) is a multiple of
// 8 AlignUpTo can be elided.
const size_t n = internal::AlignUpTo8(sizeof(T) * num_elements);
// Monitor allocation if needed.
impl_.RecordAlloc(RTTI_TYPE_ID(T), n);
return static_cast<T*>(AllocateAlignedTo<alignof(T)>(n));
const size_t n = sizeof(T) * num_elements;
return static_cast<T*>(
AllocateAlignedWithHook(n, alignof(T), RTTI_TYPE_ID(T)));
}
template <typename T, typename... Args>
PROTOBUF_ALWAYS_INLINE T* DoCreate(bool skip_explicit_ownership,
Args&&... args) {
return new (AllocateInternal<T>(skip_explicit_ownership))
T(std::forward<Args>(args)...);
}
template <typename T, typename... Args>
PROTOBUF_ALWAYS_INLINE T* DoCreateMessage(Args&&... args) {
return InternalHelper<T>::Construct(
AllocateInternal<T>(InternalHelper<T>::is_destructor_skippable::value),
AllocateInternal(sizeof(T), alignof(T),
internal::ObjectDestructor<
InternalHelper<T>::is_destructor_skippable::value,
T>::destructor,
RTTI_TYPE_ID(T)),
this, std::forward<Args>(args)...);
}
@ -619,7 +619,7 @@ class PROTOBUF_EXPORT PROTOBUF_ALIGNAS(8) Arena final {
template <typename T>
PROTOBUF_ALWAYS_INLINE void OwnInternal(T* object, std::true_type) {
if (object != NULL) {
impl_.AddCleanup(object, &internal::arena_delete_object<Message>);
impl_.AddCleanup(object, &internal::arena_delete_object<MessageLite>);
}
}
template <typename T>
@ -654,24 +654,38 @@ class PROTOBUF_EXPORT PROTOBUF_ALIGNAS(8) Arena final {
}
// For friends of arena.
void* AllocateAligned(size_t n) {
return AllocateAlignedNoHook(internal::AlignUpTo8(n));
void* AllocateAligned(size_t n, size_t align = 8) {
if (align <= 8) {
return AllocateAlignedNoHook(internal::AlignUpTo8(n));
} else {
// We are wasting space by over allocating align - 8 bytes. Compared
// to a dedicated function that takes current alignment in consideration.
// Such a scheme would only waste (align - 8)/2 bytes on average, but
// requires a dedicated function in the outline arena allocation
// functions. Possibly re-evaluate tradeoffs later.
return internal::AlignTo(AllocateAlignedNoHook(n + align - 8), align);
}
}
template<size_t Align>
void* AllocateAlignedTo(size_t n) {
static_assert(Align > 0, "Alignment must be greater than 0");
static_assert((Align & (Align - 1)) == 0, "Alignment must be power of two");
if (Align <= 8) return AllocateAligned(n);
// TODO(b/151247138): if the pointer would have been aligned already,
// this is wasting space. We should pass the alignment down.
uintptr_t ptr = reinterpret_cast<uintptr_t>(AllocateAligned(n + Align - 8));
ptr = (ptr + Align - 1) & (~Align + 1);
return reinterpret_cast<void*>(ptr);
void* AllocateAlignedWithHook(size_t n, size_t align,
const std::type_info* type) {
if (align <= 8) {
return AllocateAlignedWithHook(internal::AlignUpTo8(n), type);
} else {
// We are wasting space by over allocating align - 8 bytes. Compared
// to a dedicated function that takes current alignment in consideration.
// Such a schemee would only waste (align - 8)/2 bytes on average, but
// requires a dedicated function in the outline arena allocation
// functions. Possibly re-evaluate tradeoffs later.
return internal::AlignTo(AllocateAlignedWithHook(n + align - 8, type),
align);
}
}
void* AllocateAlignedNoHook(size_t n);
internal::ArenaImpl impl_;
void* AllocateAlignedWithHook(size_t n, const std::type_info* type);
std::pair<void*, internal::SerialArena::CleanupNode*>
AllocateAlignedWithCleanup(size_t n, const std::type_info* type);
template <typename Type>
friend class internal::GenericTypeHandler;

@ -35,6 +35,7 @@
#include <atomic>
#include <limits>
#include <typeinfo>
#include <google/protobuf/stubs/common.h>
#include <google/protobuf/stubs/logging.h>
@ -48,24 +49,19 @@
namespace google {
namespace protobuf {
struct ArenaOptions;
namespace internal {
inline size_t AlignUpTo8(size_t n) {
inline constexpr size_t AlignUpTo8(size_t n) {
// Align n to next multiple of 8 (from Hacker's Delight, Chapter 3.)
return (n + 7) & static_cast<size_t>(-8);
}
using LifecycleIdAtomic = uint64_t;
void PROTOBUF_EXPORT ArenaFree(void* object, size_t size);
// MetricsCollector collects stats for a particular arena.
class PROTOBUF_EXPORT ArenaMetricsCollector {
public:
virtual ~ArenaMetricsCollector();
ArenaMetricsCollector(bool record_allocs) : record_allocs_(record_allocs) {}
// Invoked when the arena is about to be destroyed. This method will
// typically finalize any metric collection and delete the collector.
@ -76,10 +72,6 @@ class PROTOBUF_EXPORT ArenaMetricsCollector {
// space_allocated is the space used by the arena just before the reset.
virtual void OnReset(uint64 space_allocated) = 0;
// Does OnAlloc() need to be called? If false, metric collection overhead
// will be reduced since we will not do extra work per allocation.
virtual bool RecordAllocs() = 0;
// OnAlloc is called when an allocation happens.
// type_info is promised to be static - its lifetime extends to
// match program's lifetime (It is given by typeid operator).
@ -88,78 +80,79 @@ class PROTOBUF_EXPORT ArenaMetricsCollector {
// allocations for managing the arena)
virtual void OnAlloc(const std::type_info* allocated_type,
uint64 alloc_size) = 0;
};
class ArenaImpl;
// A thread-unsafe Arena that can only be used within its owning thread.
class PROTOBUF_EXPORT SerialArena {
public:
// Blocks are variable length malloc-ed objects. The following structure
// describes the common header for all blocks.
class PROTOBUF_EXPORT Block {
public:
Block(size_t size, Block* next, bool special, bool user_owned)
: next_and_bits_(reinterpret_cast<uintptr_t>(next) | (special ? 1 : 0) |
(user_owned ? 2 : 0)),
pos_(kBlockHeaderSize),
size_(size) {
GOOGLE_DCHECK_EQ(reinterpret_cast<uintptr_t>(next) & 3, 0u);
}
char* Pointer(size_t n) {
GOOGLE_DCHECK(n <= size_);
return reinterpret_cast<char*>(this) + n;
}
// One of the blocks may be special. This is either a user-supplied
// initial block, or a block we created at startup to hold Options info.
// A special block is not deleted by Reset.
bool special() const { return (next_and_bits_ & 1) != 0; }
// Whether or not this current block is owned by the user.
// Only special blocks can be user_owned.
bool user_owned() const { return (next_and_bits_ & 2) != 0; }
// Does OnAlloc() need to be called? If false, metric collection overhead
// will be reduced since we will not do extra work per allocation.
bool RecordAllocs() { return record_allocs_; }
Block* next() const {
const uintptr_t bottom_bits = 3;
return reinterpret_cast<Block*>(next_and_bits_ & ~bottom_bits);
}
protected:
// This class is destructed by the call to OnDestroy().
~ArenaMetricsCollector() = default;
const bool record_allocs_;
};
void clear_next() {
next_and_bits_ &= 3; // Set next to nullptr, preserve bottom bits.
}
struct AllocationPolicy {
static constexpr size_t kDefaultStartBlockSize = 256;
static constexpr size_t kDefaultMaxBlockSize = 8192;
size_t pos() const { return pos_; }
size_t size() const { return size_; }
void set_pos(size_t pos) { pos_ = pos; }
size_t start_block_size = kDefaultStartBlockSize;
size_t max_block_size = kDefaultMaxBlockSize;
void* (*block_alloc)(size_t) = nullptr;
void (*block_dealloc)(void*, size_t) = nullptr;
ArenaMetricsCollector* metrics_collector = nullptr;
private:
// Holds pointer to next block for this thread + special/user_owned bits.
uintptr_t next_and_bits_;
bool IsDefault() const {
return start_block_size == kDefaultMaxBlockSize &&
max_block_size == kDefaultMaxBlockSize && block_alloc == nullptr &&
block_dealloc == nullptr && metrics_collector == nullptr;
}
};
size_t pos_;
size_t size_;
// data follows
// A simple arena allocator. Calls to allocate functions must be properly
// serialized by the caller, hence this class cannot be used as a general
// purpose allocator in a multi-threaded program. It serves as a building block
// for ThreadSafeArena, which provides a thread-safe arena allocator.
//
// This class manages
// 1) Arena bump allocation + owning memory blocks.
// 2) Maintaining a cleanup list.
// It delagetes the actual memory allocation back to ThreadSafeArena, which
// contains the information on block growth policy and backing memory allocation
// used.
class PROTOBUF_EXPORT SerialArena {
public:
struct Memory {
void* ptr;
size_t size;
};
// The allocate/free methods here are a little strange, since SerialArena is
// allocated inside a Block which it also manages. This is to avoid doing
// an extra allocation for the SerialArena itself.
// Node contains the ptr of the object to be cleaned up and the associated
// cleanup function ptr.
struct CleanupNode {
void* elem; // Pointer to the object to be cleaned up.
void (*cleanup)(void*); // Function pointer to the destructor or deleter.
};
// Creates a new SerialArena inside Block* and returns it.
static SerialArena* New(Block* b, void* owner, ArenaImpl* arena);
// Creates a new SerialArena inside mem using the remaining memory as for
// future allocations.
static SerialArena* New(SerialArena::Memory mem, void* owner);
// Free SerialArena returning the memory passed in to New
template <typename Deallocator>
Memory Free(Deallocator deallocator);
void CleanupList();
uint64 SpaceAllocated() const {
return space_allocated_.load(std::memory_order_relaxed);
}
uint64 SpaceUsed() const;
bool HasSpace(size_t n) { return n <= static_cast<size_t>(limit_ - ptr_); }
void* AllocateAligned(size_t n) {
void* AllocateAligned(size_t n, const AllocationPolicy* policy) {
GOOGLE_DCHECK_EQ(internal::AlignUpTo8(n), n); // Must be already aligned.
GOOGLE_DCHECK_GE(limit_, ptr_);
if (PROTOBUF_PREDICT_FALSE(!HasSpace(n))) {
return AllocateAlignedFallback(n);
return AllocateAlignedFallback(n, policy);
}
void* ret = ptr_;
ptr_ += n;
@ -183,51 +176,52 @@ class PROTOBUF_EXPORT SerialArena {
return true;
}
void AddCleanup(void* elem, void (*cleanup)(void*)) {
if (PROTOBUF_PREDICT_FALSE(cleanup_ptr_ == cleanup_limit_)) {
AddCleanupFallback(elem, cleanup);
return;
std::pair<void*, CleanupNode*> AllocateAlignedWithCleanup(
size_t n, const AllocationPolicy* policy) {
if (PROTOBUF_PREDICT_FALSE(!HasSpace(n + kCleanupSize))) {
return AllocateAlignedWithCleanupFallback(n, policy);
}
cleanup_ptr_->elem = elem;
cleanup_ptr_->cleanup = cleanup;
cleanup_ptr_++;
void* ret = ptr_;
ptr_ += n;
limit_ -= kCleanupSize;
#ifdef ADDRESS_SANITIZER
ASAN_UNPOISON_MEMORY_REGION(ret, n);
ASAN_UNPOISON_MEMORY_REGION(limit_, kCleanupSize);
#endif // ADDRESS_SANITIZER
return CreatePair(ret, reinterpret_cast<CleanupNode*>(limit_));
}
void* AllocateAlignedAndAddCleanup(size_t n, void (*cleanup)(void*)) {
void* ret = AllocateAligned(n);
AddCleanup(ret, cleanup);
return ret;
void AddCleanup(void* elem, void (*cleanup)(void*),
const AllocationPolicy* policy) {
auto res = AllocateAlignedWithCleanup(0, policy);
res.second->elem = elem;
res.second->cleanup = cleanup;
}
Block* head() const { return head_; }
void* owner() const { return owner_; }
SerialArena* next() const { return next_; }
void set_next(SerialArena* next) { next_ = next; }
static Block* NewBlock(Block* last_block, size_t min_bytes, ArenaImpl* arena);
private:
// Node contains the ptr of the object to be cleaned up and the associated
// cleanup function ptr.
struct CleanupNode {
void* elem; // Pointer to the object to be cleaned up.
void (*cleanup)(void*); // Function pointer to the destructor or deleter.
};
// Cleanup uses a chunked linked list, to reduce pointer chasing.
struct CleanupChunk {
static size_t SizeOf(size_t i) {
return sizeof(CleanupChunk) + (sizeof(CleanupNode) * (i - 1));
// Blocks are variable length malloc-ed objects. The following structure
// describes the common header for all blocks.
struct Block {
char* Pointer(size_t n) {
GOOGLE_DCHECK(n <= size);
return reinterpret_cast<char*>(this) + n;
}
size_t size; // Total elements in the list.
CleanupChunk* next; // Next node in the list.
CleanupNode nodes[1]; // True length is |size|.
Block* next;
size_t size;
CleanupNode* start;
// data follows
};
ArenaImpl* arena_; // Containing arena.
void* owner_; // &ThreadCache of this thread;
Block* head_; // Head of linked list of blocks.
CleanupChunk* cleanup_; // Head of cleanup list.
SerialArena* next_; // Next SerialArena in this linked list.
size_t space_used_ = 0; // Necessary for metrics.
std::atomic<size_t> space_allocated_;
// Next pointer to allocate from. Always 8-byte aligned. Points inside
// head_ (and head_->pos will always be non-canonical). We keep these
@ -235,17 +229,20 @@ class PROTOBUF_EXPORT SerialArena {
char* ptr_;
char* limit_;
// Next CleanupList members to append to. These point inside cleanup_.
CleanupNode* cleanup_ptr_;
CleanupNode* cleanup_limit_;
// contructor private as to only New should be used.
inline SerialArena(Block* b, void* owner);
void* AllocateAlignedFallback(size_t n, const AllocationPolicy* policy);
std::pair<void*, CleanupNode*> AllocateAlignedWithCleanupFallback(
size_t n, const AllocationPolicy* policy);
void AllocateNewBlock(size_t n, const AllocationPolicy* policy);
void* AllocateAlignedFallback(size_t n);
void AddCleanupFallback(void* elem, void (*cleanup)(void*));
void CleanupListFallback();
std::pair<void*, CleanupNode*> CreatePair(void* ptr, CleanupNode* node) {
return {ptr, node};
}
public:
static constexpr size_t kBlockHeaderSize =
(sizeof(Block) + 7) & static_cast<size_t>(-8);
static constexpr size_t kBlockHeaderSize = AlignUpTo8(sizeof(Block));
static constexpr size_t kCleanupSize = AlignUpTo8(sizeof(CleanupNode));
};
// This class provides the core Arena memory allocation library. Different
@ -254,42 +251,43 @@ class PROTOBUF_EXPORT SerialArena {
// in turn would be templates, which will/cannot happen. However separating
// the memory allocation part from the cruft of the API users expect we can
// use #ifdef the select the best implementation based on hardware / OS.
class PROTOBUF_EXPORT ArenaImpl {
class PROTOBUF_EXPORT ThreadSafeArena {
public:
static const size_t kDefaultStartBlockSize = 256;
static const size_t kDefaultMaxBlockSize = 8192;
ArenaImpl() { Init(false); }
ThreadSafeArena() { Init(false); }
ArenaImpl(char* mem, size_t size) {
GOOGLE_DCHECK_EQ(reinterpret_cast<uintptr_t>(mem) & 7, 0u);
Init(false);
ThreadSafeArena(char* mem, size_t size) { InitializeFrom(mem, size); }
// Ignore initial block if it is too small.
if (mem != nullptr && size >= kBlockHeaderSize + kSerialArenaSize) {
SetInitialBlock(new (mem) SerialArena::Block(size, nullptr, true, true));
explicit ThreadSafeArena(void* mem, size_t size,
const AllocationPolicy& policy) {
if (policy.IsDefault()) {
// Legacy code doesn't use the API above, but provides the initial block
// through ArenaOptions. I suspect most do not touch the allocation
// policy parameters.
InitializeFrom(mem, size);
} else {
auto collector = policy.metrics_collector;
bool record_allocs = collector && collector->RecordAllocs();
InitializeWithPolicy(mem, size, record_allocs, policy);
}
}
explicit ArenaImpl(const ArenaOptions& options);
// Destructor deletes all owned heap allocated objects, and destructs objects
// that have non-trivial destructors, except for proto2 message objects whose
// destructors can be skipped. Also, frees all blocks except the initial block
// if it was passed in.
~ArenaImpl();
~ThreadSafeArena();
uint64 Reset();
uint64 SpaceAllocated() const;
uint64 SpaceUsed() const;
void* AllocateAligned(size_t n) {
void* AllocateAligned(size_t n, const std::type_info* type) {
SerialArena* arena;
if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(&arena))) {
return arena->AllocateAligned(n);
if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(tag_and_id_, &arena))) {
return arena->AllocateAligned(n, AllocPolicy());
} else {
return AllocateAlignedFallback(n);
return AllocateAlignedFallback(n, type);
}
}
@ -300,90 +298,74 @@ class PROTOBUF_EXPORT ArenaImpl {
// code for the happy path.
PROTOBUF_ALWAYS_INLINE bool MaybeAllocateAligned(size_t n, void** out) {
SerialArena* a;
if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFromThreadCache(&a))) {
if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFromThreadCache(tag_and_id_, &a))) {
return a->MaybeAllocateAligned(n, out);
}
return false;
}
void* AllocateAlignedAndAddCleanup(size_t n, void (*cleanup)(void*));
std::pair<void*, SerialArena::CleanupNode*> AllocateAlignedWithCleanup(
size_t n, const std::type_info* type);
// Add object pointer and cleanup function pointer to the list.
void AddCleanup(void* elem, void (*cleanup)(void*));
inline void RecordAlloc(const std::type_info* allocated_type,
size_t n) const {
if (PROTOBUF_PREDICT_FALSE(record_allocs())) {
options_->metrics_collector->OnAlloc(allocated_type, n);
}
}
private:
// Unique for each arena. Changes on Reset().
uint64 tag_and_id_;
// The LSB of tag_and_id_ indicates if allocs in this arena are recorded.
enum { kRecordAllocs = 1 };
std::pair<void*, size_t> NewBuffer(size_t last_size, size_t min_bytes);
intptr_t alloc_policy_ = 0; // Tagged pointer to AllocPolicy.
// The LSB of alloc_policy_ indicates if the user owns the initial block.
enum { kUserOwnedInitialBlock = 1 };
private:
// Pointer to a linked list of SerialArena.
std::atomic<SerialArena*> threads_;
std::atomic<SerialArena*> hint_; // Fast thread-local block access
std::atomic<size_t> space_allocated_; // Total size of all allocated blocks.
// Unique for each arena. Changes on Reset().
// Least-significant-bit is 1 iff allocations should be recorded.
uint64 lifecycle_id_;
struct Options {
size_t start_block_size;
size_t max_block_size;
void* (*block_alloc)(size_t);
void (*block_dealloc)(void*, size_t);
ArenaMetricsCollector* metrics_collector;
};
std::atomic<SerialArena*> hint_; // Fast thread-local block access
Options* options_ = nullptr;
void* AllocateAlignedFallback(size_t n);
void* AllocateAlignedAndAddCleanupFallback(size_t n, void (*cleanup)(void*));
const AllocationPolicy* AllocPolicy() const {
return reinterpret_cast<const AllocationPolicy*>(alloc_policy_ & -8);
}
void InitializeFrom(void* mem, size_t size);
void InitializeWithPolicy(void* mem, size_t size, bool record_allocs,
AllocationPolicy policy);
void* AllocateAlignedFallback(size_t n, const std::type_info* type);
std::pair<void*, SerialArena::CleanupNode*>
AllocateAlignedWithCleanupFallback(size_t n, const std::type_info* type);
void AddCleanupFallback(void* elem, void (*cleanup)(void*));
void Init(bool record_allocs);
void SetInitialBlock(
SerialArena::Block* block); // Can be called right after Init()
void SetInitialBlock(void* mem, size_t size);
// Return true iff allocations should be recorded in a metrics collector.
inline bool record_allocs() const { return lifecycle_id_ & 1; }
// Delete or Destruct all objects owned by the arena.
void CleanupList();
// Invoke fn(b) for every Block* b.
template <typename Functor>
void PerBlock(Functor fn) {
// By omitting an Acquire barrier we ensure that any user code that doesn't
// properly synchronize Reset() or the destructor will throw a TSAN warning.
SerialArena* serial = threads_.load(std::memory_order_relaxed);
while (serial) {
// fn() may delete blocks and arenas, so fetch next pointers before fn();
SerialArena* cur = serial;
serial = serial->next();
for (auto* block = cur->head(); block != nullptr;) {
auto* b = block;
block = b->next();
fn(b);
}
}
inline bool ShouldRecordAlloc() const { return tag_and_id_ & kRecordAllocs; }
inline uint64 LifeCycleId() const {
return tag_and_id_ & (-kRecordAllocs - 1);
}
// Delete or Destruct all objects owned by the arena.
void CleanupList();
inline void RecordAlloc(const std::type_info* allocated_type,
size_t n) const {
AllocPolicy()->metrics_collector->OnAlloc(allocated_type, n);
}
inline void CacheSerialArena(SerialArena* serial) {
thread_cache().last_serial_arena = serial;
thread_cache().last_lifecycle_id_seen = lifecycle_id_;
thread_cache().last_lifecycle_id_seen = LifeCycleId();
// TODO(haberman): evaluate whether we would gain efficiency by getting rid
// of hint_. It's the only write we do to ArenaImpl in the allocation path,
// which will dirty the cache line.
// of hint_. It's the only write we do to ThreadSafeArena in the allocation
// path, which will dirty the cache line.
hint_.store(serial, std::memory_order_release);
}
PROTOBUF_ALWAYS_INLINE bool GetSerialArenaFast(SerialArena** arena) {
if (GetSerialArenaFromThreadCache(arena)) return true;
PROTOBUF_ALWAYS_INLINE bool GetSerialArenaFast(uint64 lifecycle_id,
SerialArena** arena) {
if (GetSerialArenaFromThreadCache(lifecycle_id, arena)) return true;
if (lifecycle_id & kRecordAllocs) return false;
// Check whether we own the last accessed SerialArena on this arena. This
// fast path optimizes the case where a single thread uses multiple arenas.
@ -397,12 +379,12 @@ class PROTOBUF_EXPORT ArenaImpl {
}
PROTOBUF_ALWAYS_INLINE bool GetSerialArenaFromThreadCache(
SerialArena** arena) {
uint64 lifecycle_id, SerialArena** arena) {
// If this thread already owns a block in this arena then try to use that.
// This fast path optimizes the case where multiple threads allocate from
// the same arena.
ThreadCache* tc = &thread_cache();
if (PROTOBUF_PREDICT_TRUE(tc->last_lifecycle_id_seen == lifecycle_id_)) {
if (PROTOBUF_PREDICT_TRUE(tc->last_lifecycle_id_seen == lifecycle_id)) {
*arena = tc->last_serial_arena;
return true;
}
@ -410,6 +392,20 @@ class PROTOBUF_EXPORT ArenaImpl {
}
SerialArena* GetSerialArenaFallback(void* me);
template <typename Functor>
void PerSerialArena(Functor fn) {
// By omitting an Acquire barrier we ensure that any user code that doesn't
// properly synchronize Reset() or the destructor will throw a TSAN warning.
SerialArena* serial = threads_.load(std::memory_order_relaxed);
for (; serial; serial = serial->next()) fn(serial);
}
// Releases all memory except the first block which it returns. The first
// block might be owned by the user and thus need some extra checks before
// deleting.
SerialArena::Memory Free(size_t* space_allocated);
#ifdef _MSC_VER
#pragma warning(disable : 4324)
#endif
@ -462,11 +458,11 @@ class PROTOBUF_EXPORT ArenaImpl {
static ThreadCache& thread_cache() { return thread_cache_; }
#endif
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ArenaImpl);
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ThreadSafeArena);
// All protos have pointers back to the arena hence Arena must have
// pointer stability.
ArenaImpl(ArenaImpl&&) = delete;
ArenaImpl& operator=(ArenaImpl&&) = delete;
ThreadSafeArena(ThreadSafeArena&&) = delete;
ThreadSafeArena& operator=(ThreadSafeArena&&) = delete;
public:
// kBlockHeaderSize is sizeof(Block), aligned up to the nearest multiple of 8
@ -474,8 +470,6 @@ class PROTOBUF_EXPORT ArenaImpl {
static constexpr size_t kBlockHeaderSize = SerialArena::kBlockHeaderSize;
static constexpr size_t kSerialArenaSize =
(sizeof(SerialArena) + 7) & static_cast<size_t>(-8);
static constexpr size_t kOptionsSize =
(sizeof(Options) + 7) & static_cast<size_t>(-8);
static_assert(kBlockHeaderSize % 8 == 0,
"kBlockHeaderSize must be a multiple of 8.");
static_assert(kSerialArenaSize % 8 == 0,

@ -1331,7 +1331,7 @@ void ClearHookCounts() {
} // namespace
// A helper utility class that handles arena callbacks.
class ArenaOptionsTestFriend : public internal::ArenaMetricsCollector {
class ArenaOptionsTestFriend final : public internal::ArenaMetricsCollector {
public:
static internal::ArenaMetricsCollector* NewWithAllocs() {
return new ArenaOptionsTestFriend(true);
@ -1352,7 +1352,7 @@ class ArenaOptionsTestFriend : public internal::ArenaMetricsCollector {
}
explicit ArenaOptionsTestFriend(bool record_allocs)
: record_allocs_(record_allocs) {
: ArenaMetricsCollector(record_allocs) {
++hooks_num_init;
}
void OnDestroy(uint64 space_allocated) override {
@ -1360,14 +1360,10 @@ class ArenaOptionsTestFriend : public internal::ArenaMetricsCollector {
delete this;
}
void OnReset(uint64 space_allocated) override { ++hooks_num_reset; }
bool RecordAllocs() override { return record_allocs_; }
void OnAlloc(const std::type_info* allocated_type,
uint64 alloc_size) override {
++hooks_num_allocations;
}
private:
bool record_allocs_;
};
// Test the hooks are correctly called.
@ -1408,3 +1404,5 @@ TEST(ArenaTest, ArenaHooksWhenAllocationsNotNeeded) {
} // namespace protobuf
} // namespace google
#include <google/protobuf/port_undef.inc>

@ -252,3 +252,5 @@ void ArenaStringPtr::ClearToDefault(const LazyString& default_value,
} // namespace internal
} // namespace protobuf
} // namespace google
#include <google/protobuf/port_undef.inc>

@ -173,3 +173,5 @@ TEST(ArenaStringPtrTest, ArenaStringPtrOnArenaNoSSO) {
} // namespace protobuf
} // namespace google
#include <google/protobuf/port_undef.inc>

@ -2018,130 +2018,76 @@ CommandLineInterface::InterpretArgument(const std::string& name,
void CommandLineInterface::PrintHelpText() {
// Sorry for indentation here; line wrapping would be uglier.
std::cout
<<
"Usage: " << executable_name_
<< " [OPTION] PROTO_FILES\n"
"Parse PROTO_FILES and generate output based on the options given:\n"
" -IPATH, --proto_path=PATH Specify the directory in which to "
"search for\n"
" imports. May be specified multiple "
"times;\n"
" directories will be searched in order. "
" If not\n"
" given, the current working directory "
"is used.\n"
" If not found in any of the these "
"directories,\n"
" the --descriptor_set_in descriptors "
"will be\n"
" checked for required proto file.\n"
" --version Show version info and exit.\n"
" -h, --help Show this text and exit.\n"
" --encode=MESSAGE_TYPE Read a text-format message of the "
"given type\n"
" from standard input and write it in "
"binary\n"
" to standard output. The message type "
"must\n"
" be defined in PROTO_FILES or their "
"imports.\n"
" --deterministic_output When using --encode, ensure map fields "
"are\n"
" deterministically ordered. Note that"
"this order is not\n"
" canonical, and changes across builds"
"or releases of protoc.\n"
" --decode=MESSAGE_TYPE Read a binary message of the given "
"type from\n"
" standard input and write it in text "
"format\n"
" to standard output. The message type "
"must\n"
" be defined in PROTO_FILES or their "
"imports.\n"
" --decode_raw Read an arbitrary protocol message "
"from\n"
" standard input and write the raw "
"tag/value\n"
" pairs in text format to standard "
"output. No\n"
" PROTO_FILES should be given when using "
"this\n"
" flag.\n"
" --descriptor_set_in=FILES Specifies a delimited list of FILES\n"
" each containing a FileDescriptorSet "
"(a\n"
" protocol buffer defined in "
"descriptor.proto).\n"
" The FileDescriptor for each of the "
"PROTO_FILES\n"
" provided will be loaded from these\n"
" FileDescriptorSets. If a "
"FileDescriptor\n"
" appears multiple times, the first "
"occurrence\n"
" will be used.\n"
" -oFILE, Writes a FileDescriptorSet (a protocol "
"buffer,\n"
" --descriptor_set_out=FILE defined in descriptor.proto) "
"containing all of\n"
" the input files to FILE.\n"
" --include_imports When using --descriptor_set_out, also "
"include\n"
" all dependencies of the input files in "
"the\n"
" set, so that the set is "
"self-contained.\n"
" --include_source_info When using --descriptor_set_out, do "
"not strip\n"
" SourceCodeInfo from the "
"FileDescriptorProto.\n"
" This results in vastly larger "
"descriptors that\n"
" include information about the "
"original\n"
" location of each decl in the source "
"file as\n"
" well as surrounding comments.\n"
" --dependency_out=FILE Write a dependency output file in the "
"format\n"
" expected by make. This writes the "
"transitive\n"
" set of input file paths to FILE\n"
" --error_format=FORMAT Set the format in which to print "
"errors.\n"
" FORMAT may be 'gcc' (the default) or "
"'msvs'\n"
" (Microsoft Visual Studio format).\n"
" --print_free_field_numbers Print the free field numbers of the "
"messages\n"
" defined in the given proto files. "
"Groups share\n"
" the same field number space with the "
"parent \n"
" message. Extension ranges are counted "
"as \n"
" occupied fields numbers.\n"
<< std::endl;
std::cout << "Usage: " << executable_name_ << " [OPTION] PROTO_FILES";
std::cout << R"(
Parse PROTO_FILES and generate output based on the options given:
-IPATH, --proto_path=PATH Specify the directory in which to search for
imports. May be specified multiple times;
directories will be searched in order. If not
given, the current working directory is used.
If not found in any of the these directories,
the --descriptor_set_in descriptors will be
checked for required proto file.
--version Show version info and exit.
-h, --help Show this text and exit.
--encode=MESSAGE_TYPE Read a text-format message of the given type
from standard input and write it in binary
to standard output. The message type must
be defined in PROTO_FILES or their imports.
--deterministic_output When using --encode, ensure map fields are
deterministically ordered. Note that this order
is not canonical, and changes across builds or
releases of protoc.
--decode=MESSAGE_TYPE Read a binary message of the given type from
standard input and write it in text format
to standard output. The message type must
be defined in PROTO_FILES or their imports.
--decode_raw Read an arbitrary protocol message from
standard input and write the raw tag/value
pairs in text format to standard output. No
PROTO_FILES should be given when using this
flag.
--descriptor_set_in=FILES Specifies a delimited list of FILES
each containing a FileDescriptorSet (a
protocol buffer defined in descriptor.proto).
The FileDescriptor for each of the PROTO_FILES
provided will be loaded from these
FileDescriptorSets. If a FileDescriptor
appears multiple times, the first occurrence
will be used.
-oFILE, Writes a FileDescriptorSet (a protocol buffer,
--descriptor_set_out=FILE defined in descriptor.proto) containing all of
the input files to FILE.
--include_imports When using --descriptor_set_out, also include
all dependencies of the input files in the
set, so that the set is self-contained.
--include_source_info When using --descriptor_set_out, do not strip
SourceCodeInfo from the FileDescriptorProto.
This results in vastly larger descriptors that
include information about the original
location of each decl in the source file as
well as surrounding comments.
--dependency_out=FILE Write a dependency output file in the format
expected by make. This writes the transitive
set of input file paths to FILE
--error_format=FORMAT Set the format in which to print errors.
FORMAT may be 'gcc' (the default) or 'msvs'
(Microsoft Visual Studio format).
--print_free_field_numbers Print the free field numbers of the messages
defined in the given proto files. Groups share
the same field number space with the parent
message. Extension ranges are counted as
occupied fields numbers.)";
if (!plugin_prefix_.empty()) {
std::cout
<< " --plugin=EXECUTABLE Specifies a plugin executable to "
"use.\n"
" Normally, protoc searches the PATH "
"for\n"
" plugins, but you may specify "
"additional\n"
" executables not in the path using "
"this flag.\n"
" Additionally, EXECUTABLE may be of "
"the form\n"
" NAME=PATH, in which case the given "
"plugin name\n"
" is mapped to the given executable "
"even if\n"
" the executable's own name differs."
<< std::endl;
std::cout << R"(
--plugin=EXECUTABLE Specifies a plugin executable to use.
Normally, protoc searches the PATH for
plugins, but you may specify additional
executables not in the path using this flag.
Additionally, EXECUTABLE may be of the form
NAME=PATH, in which case the given plugin name
is mapped to the given executable even if
the executable's own name differs.)";
}
for (GeneratorMap::iterator iter = generators_by_flag_name_.begin();
@ -2149,35 +2095,26 @@ void CommandLineInterface::PrintHelpText() {
// FIXME(kenton): If the text is long enough it will wrap, which is ugly,
// but fixing this nicely (e.g. splitting on spaces) is probably more
// trouble than it's worth.
std::cout << " " << iter->first << "=OUT_DIR "
std::cout << std::endl
<< " " << iter->first << "=OUT_DIR "
<< std::string(19 - iter->first.size(),
' ') // Spaces for alignment.
<< iter->second.help_text << std::endl;
}
std::cout << " @<filename> Read options and filenames from "
"file. If a\n"
" relative file path is specified, "
"the file\n"
" will be searched in the working "
"directory.\n"
" The --proto_path option will not "
"affect how\n"
" this argument file is searched. "
"Content of\n"
" the file will be expanded in the "
"position of\n"
" @<filename> as in the argument "
"list. Note\n"
" that shell expansion is not "
"applied to the\n"
" content of the file (i.e., you "
"cannot use\n"
" quotes, wildcards, escapes, "
"commands, etc.).\n"
" Each line corresponds to a "
"single argument,\n"
" even if it contains spaces."
<< std::endl;
<< iter->second.help_text;
}
std::cout << R"(
@<filename> Read options and filenames from file. If a
relative file path is specified, the file
will be searched in the working directory.
The --proto_path option will not affect how
this argument file is searched. Content of
the file will be expanded in the position of
@<filename> as in the argument list. Note
that shell expansion is not applied to the
content of the file (i.e., you cannot use
quotes, wildcards, escapes, commands, etc.).
Each line corresponds to a single argument,
even if it contains spaces.)";
std::cout << std::endl;
}
bool CommandLineInterface::EnforceProto3OptionalSupport(

@ -99,8 +99,7 @@ ExtensionGenerator::ExtensionGenerator(const FieldDescriptor* descriptor,
std::string scope =
IsScoped() ? ClassName(descriptor_->extension_scope(), false) + "::" : "";
variables_["scope"] = scope;
std::string scoped_name = scope + ResolveKeyword(name);
variables_["scoped_name"] = scoped_name;
variables_["scoped_name"] = ExtensionName(descriptor_);
variables_["number"] = StrCat(descriptor_->number());
}
@ -175,6 +174,7 @@ void ExtensionGenerator::GenerateDefinition(io::Printer* printer) {
}
format(
"PROTOBUF_ATTRIBUTE_INIT_PRIORITY "
"::$proto_ns$::internal::ExtensionIdentifier< $extendee$,\n"
" ::$proto_ns$::internal::$type_traits$, $field_type$, $packed$ >\n"
" $scoped_name$($constant_name$, $1$);\n",

@ -429,6 +429,12 @@ void FileGenerator::GenerateSourceIncludes(io::Printer* printer) {
format("// @@protoc_insertion_point(includes)\n");
IncludeFile("net/proto2/public/port_def.inc", printer);
// For MSVC builds, we use #pragma init_seg to move the initialization of our
// libraries to happen before the user code.
// This worksaround the fact that MSVC does not do constant initializers when
// required by the standard.
format("\nPROTOBUF_PRAGMA_INIT_SEG\n");
}
void FileGenerator::GenerateSourceDefaultInstance(int idx,
@ -916,8 +922,9 @@ void FileGenerator::GenerateReflectionInitializationCode(io::Printer* printer) {
if (file_->name() != "net/proto2/proto/descriptor.proto") {
format(
"// Force running AddDescriptors() at dynamic initialization time.\n"
"static bool $1$ = (static_cast<void>("
"::$proto_ns$::internal::AddDescriptors(&$desc_table$)), true);\n",
"PROTOBUF_ATTRIBUTE_INIT_PRIORITY "
"static ::$proto_ns$::internal::AddDescriptorsRunner "
"$1$(&$desc_table$);\n",
UniqueName("dynamic_init_dummy", file_, options_));
}
}

@ -362,10 +362,16 @@ std::string QualifiedClassName(const EnumDescriptor* d) {
return QualifiedClassName(d, Options());
}
std::string ExtensionName(const FieldDescriptor* d) {
if (const Descriptor* scope = d->extension_scope())
return StrCat(ClassName(scope), "::", ResolveKeyword(d->name()));
return ResolveKeyword(d->name());
}
std::string QualifiedExtensionName(const FieldDescriptor* d,
const Options& options) {
GOOGLE_DCHECK(d->is_extension());
return QualifiedFileLevelSymbol(d->file(), FieldName(d), options);
return QualifiedFileLevelSymbol(d->file(), ExtensionName(d), options);
}
std::string QualifiedExtensionName(const FieldDescriptor* d) {

@ -134,6 +134,10 @@ inline std::string ClassName(const EnumDescriptor* descriptor, bool qualified) {
: ClassName(descriptor);
}
// Returns the extension name prefixed with the class name if nested but without
// the package name.
std::string ExtensionName(const FieldDescriptor* d);
std::string QualifiedExtensionName(const FieldDescriptor* d,
const Options& options);
std::string QualifiedExtensionName(const FieldDescriptor* d);

@ -308,12 +308,7 @@ bool ShouldMarkIsInitializedAsFinal(const Descriptor* descriptor,
bool ShouldMarkNewAsFinal(const Descriptor* descriptor,
const Options& options) {
static std::set<std::string> exclusions{
};
const std::string name = ClassName(descriptor, true);
return exclusions.find(name) == exclusions.end() ||
options.opensource_runtime;
return true;
}
// Returns true to make the message serialize in order, decided by the following
@ -1232,13 +1227,13 @@ void MessageGenerator::GenerateClassDefinition(io::Printer* printer) {
"\n");
if (HasDescriptorMethods(descriptor_->file(), options_)) {
format(
"void PackFrom(const ::$proto_ns$::Message& message) {\n"
" _any_metadata_.PackFrom(message);\n"
"bool PackFrom(const ::$proto_ns$::Message& message) {\n"
" return _any_metadata_.PackFrom(message);\n"
"}\n"
"void PackFrom(const ::$proto_ns$::Message& message,\n"
"bool PackFrom(const ::$proto_ns$::Message& message,\n"
" ::PROTOBUF_NAMESPACE_ID::ConstStringParam "
"type_url_prefix) {\n"
" _any_metadata_.PackFrom(message, type_url_prefix);\n"
" return _any_metadata_.PackFrom(message, type_url_prefix);\n"
"}\n"
"bool UnpackTo(::$proto_ns$::Message* message) const {\n"
" return _any_metadata_.UnpackTo(message);\n"
@ -1250,16 +1245,16 @@ void MessageGenerator::GenerateClassDefinition(io::Printer* printer) {
"template <typename T, class = typename std::enable_if<"
"!std::is_convertible<T, const ::$proto_ns$::Message&>"
"::value>::type>\n"
"void PackFrom(const T& message) {\n"
" _any_metadata_.PackFrom<T>(message);\n"
"bool PackFrom(const T& message) {\n"
" return _any_metadata_.PackFrom<T>(message);\n"
"}\n"
"template <typename T, class = typename std::enable_if<"
"!std::is_convertible<T, const ::$proto_ns$::Message&>"
"::value>::type>\n"
"void PackFrom(const T& message,\n"
"bool PackFrom(const T& message,\n"
" ::PROTOBUF_NAMESPACE_ID::ConstStringParam "
"type_url_prefix) {\n"
" _any_metadata_.PackFrom<T>(message, type_url_prefix);"
" return _any_metadata_.PackFrom<T>(message, type_url_prefix);"
"}\n"
"template <typename T, class = typename std::enable_if<"
"!std::is_convertible<T, const ::$proto_ns$::Message&>"
@ -1270,14 +1265,14 @@ void MessageGenerator::GenerateClassDefinition(io::Printer* printer) {
} else {
format(
"template <typename T>\n"
"void PackFrom(const T& message) {\n"
" _any_metadata_.PackFrom(message);\n"
"bool PackFrom(const T& message) {\n"
" return _any_metadata_.PackFrom(message);\n"
"}\n"
"template <typename T>\n"
"void PackFrom(const T& message,\n"
"bool PackFrom(const T& message,\n"
" ::PROTOBUF_NAMESPACE_ID::ConstStringParam "
"type_url_prefix) {\n"
" _any_metadata_.PackFrom(message, type_url_prefix);\n"
" return _any_metadata_.PackFrom(message, type_url_prefix);\n"
"}\n"
"template <typename T>\n"
"bool UnpackTo(T* message) const {\n"
@ -2464,7 +2459,8 @@ void MessageGenerator::GenerateConstructorBody(io::Printer* printer,
} else {
pod_template =
"::memset(reinterpret_cast<char*>(this) + static_cast<size_t>(\n"
" reinterpret_cast<char*>(&$first$_) - reinterpret_cast<char*>(this)),\n"
" reinterpret_cast<char*>(&$first$_) - "
"reinterpret_cast<char*>(this)),\n"
" 0, static_cast<size_t>(reinterpret_cast<char*>(&$last$_) -\n"
" reinterpret_cast<char*>(&$first$_)) + sizeof($last$_));\n";
}
@ -3603,29 +3599,17 @@ void MessageGenerator::GenerateSerializeWithCachedSizesBodyShuffled(
"_weak_field_map_);\n");
}
format(
"static const int kStart = GetInvariantPerBuild($1$UL) % $2$;\n"
"bool first_pass = true;\n"
"for (int i = kStart; i != kStart || first_pass; i = ((i + $3$) % $2$)) "
"{\n",
0,
num_fields, kLargePrime);
format("for (int i = $1$; i >= 0; i-- ) {\n", num_fields - 1);
format.Indent();
format("switch(i) {\n");
format.Indent();
bool first_pass_set = false;
int index = 0;
for (const auto* f : ordered_fields) {
format("case $1$: {\n", index++);
format.Indent();
if (!first_pass_set) {
first_pass_set = true;
format("first_pass = false;\n");
}
GenerateSerializeOneField(printer, f, -1);
format("break;\n");
@ -3637,11 +3621,6 @@ void MessageGenerator::GenerateSerializeWithCachedSizesBodyShuffled(
format("case $1$: {\n", index++);
format.Indent();
if (!first_pass_set) {
first_pass_set = true;
format("first_pass = false;\n");
}
GenerateSerializeOneExtensionRange(printer, r);
format("break;\n");

@ -89,6 +89,17 @@ const std::unordered_set<std::string>* kReservedNames =
"transient", "try", "void", "volatile", "while",
});
// Names that should be avoided as field names in Kotlin.
// All Kotlin hard keywords are in this list.
const std::unordered_set<std::string>* kKotlinForbiddenNames =
new std::unordered_set<std::string>({
"as", "as?", "break", "class", "continue", "do", "else",
"false", "for", "fun", "if", "in", "!in", "interface",
"is", "!is", "null", "object", "package", "return", "super",
"this", "throw", "true", "try", "typealias", "typeof", "val",
"var", "when", "while",
});
bool IsForbidden(const std::string& field_name) {
for (int i = 0; i < GOOGLE_ARRAYSIZE(kForbiddenWordList); ++i) {
if (field_name == kForbiddenWordList[i]) {
@ -215,6 +226,7 @@ std::string UnderscoresToCamelCaseCheckReserved(const FieldDescriptor* field) {
return name;
}
std::string UniqueFileScopeIdentifier(const Descriptor* descriptor) {
return "static_" + StringReplace(descriptor->full_name(), ".", "_", true);
}

@ -51,6 +51,7 @@ namespace java {
extern const char kThickSeparator[];
extern const char kThinSeparator[];
// If annotation_file is non-empty, prints a javax.annotation.Generated
// annotation to the given Printer. annotation_file will be referenced in the
// annotation's comments field. delimiter should be the Printer's delimiter

@ -68,6 +68,7 @@ std::string ClassNameWithoutPackage(const Descriptor* descriptor,
return StripPackageName(descriptor->full_name(), descriptor->file());
}
// Get the name of an enum's Java class without package name prefix.
std::string ClassNameWithoutPackage(const EnumDescriptor* descriptor,
bool immutable) {

@ -3162,8 +3162,7 @@ void Generator::GenerateClassDeserializeBinaryField(
"fieldtype",
JSFieldTypeAnnotation(options, field, false, true,
/* singular_if_not_packed */ false, BYTES_U8),
"reader",
JSBinaryReaderMethodType(field));
"reader", JSBinaryReaderMethodType(field));
} else {
printer->Print(
" var value = /** @type {$fieldtype$} */ "
@ -3179,7 +3178,8 @@ void Generator::GenerateClassDeserializeBinaryField(
printer->Print(
" for (var i = 0; i < values.length; i++) {\n"
" msg.add$name$(values[i]);\n"
" }\n", "name",
" }\n",
"name",
JSGetterName(options, field, BYTES_DEFAULT, /* drop_list = */ true));
} else if (field->is_repeated()) {
printer->Print(

@ -14,6 +14,8 @@
#include <google/protobuf/wire_format.h>
// @@protoc_insertion_point(includes)
#include <google/protobuf/port_def.inc>
PROTOBUF_PRAGMA_INIT_SEG
extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2fdescriptor_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<6> scc_info_FileDescriptorProto_google_2fprotobuf_2fdescriptor_2eproto;
extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2fdescriptor_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_GeneratedCodeInfo_google_2fprotobuf_2fdescriptor_2eproto;
extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2fcompiler_2fplugin_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_CodeGeneratorResponse_File_google_2fprotobuf_2fcompiler_2fplugin_2eproto;
@ -204,7 +206,7 @@ const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_google
};
// Force running AddDescriptors() at dynamic initialization time.
static bool dynamic_init_dummy_google_2fprotobuf_2fcompiler_2fplugin_2eproto = (static_cast<void>(::PROTOBUF_NAMESPACE_ID::internal::AddDescriptors(&descriptor_table_google_2fprotobuf_2fcompiler_2fplugin_2eproto)), true);
PROTOBUF_ATTRIBUTE_INIT_PRIORITY static ::PROTOBUF_NAMESPACE_ID::internal::AddDescriptorsRunner dynamic_init_dummy_google_2fprotobuf_2fcompiler_2fplugin_2eproto(&descriptor_table_google_2fprotobuf_2fcompiler_2fplugin_2eproto);
PROTOBUF_NAMESPACE_OPEN
namespace compiler {
const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* CodeGeneratorResponse_Feature_descriptor() {

@ -855,15 +855,15 @@ void DescriptorPool::Tables::RollbackToLastCheckpoint() {
GOOGLE_DCHECK(!checkpoints_.empty());
const CheckPoint& checkpoint = checkpoints_.back();
for (int i = checkpoint.pending_symbols_before_checkpoint;
for (size_t i = checkpoint.pending_symbols_before_checkpoint;
i < symbols_after_checkpoint_.size(); i++) {
symbols_by_name_.erase(symbols_after_checkpoint_[i]);
}
for (int i = checkpoint.pending_files_before_checkpoint;
for (size_t i = checkpoint.pending_files_before_checkpoint;
i < files_after_checkpoint_.size(); i++) {
files_by_name_.erase(files_after_checkpoint_[i]);
}
for (int i = checkpoint.pending_extensions_before_checkpoint;
for (size_t i = checkpoint.pending_extensions_before_checkpoint;
i < extensions_after_checkpoint_.size(); i++) {
extensions_.erase(extensions_after_checkpoint_[i]);
}
@ -4203,13 +4203,13 @@ void DescriptorBuilder::AllocateOptionsImpl(
void DescriptorBuilder::AddRecursiveImportError(
const FileDescriptorProto& proto, int from_here) {
std::string error_message("File recursively imports itself: ");
for (int i = from_here; i < tables_->pending_files_.size(); i++) {
for (size_t i = from_here; i < tables_->pending_files_.size(); i++) {
error_message.append(tables_->pending_files_[i]);
error_message.append(" -> ");
}
error_message.append(proto.name());
if (from_here < tables_->pending_files_.size() - 1) {
if (static_cast<size_t>(from_here) < tables_->pending_files_.size() - 1) {
AddError(tables_->pending_files_[from_here + 1], proto,
DescriptorPool::ErrorCollector::IMPORT, error_message);
} else {
@ -4282,7 +4282,7 @@ const FileDescriptor* DescriptorBuilder::BuildFile(
// mid-file, but that's pretty ugly, and I'm pretty sure there are
// some languages out there that do not allow recursive dependencies
// at all.
for (int i = 0; i < tables_->pending_files_.size(); i++) {
for (size_t i = 0; i < tables_->pending_files_.size(); i++) {
if (tables_->pending_files_[i] == proto.name()) {
AddRecursiveImportError(proto, i);
return nullptr;
@ -6702,10 +6702,10 @@ void DescriptorBuilder::OptionInterpreter::UpdateSourceCodeInfo(
if (matched) {
// see if this location is in the range to remove
bool loc_matches = true;
if (loc->path_size() < pathv.size()) {
if (loc->path_size() < static_cast<int64>(pathv.size())) {
loc_matches = false;
} else {
for (int j = 0; j < pathv.size(); j++) {
for (size_t j = 0; j < pathv.size(); j++) {
if (loc->path(j) != pathv[j]) {
loc_matches = false;
break;
@ -7444,3 +7444,5 @@ void LazyDescriptor::OnceInternal() {
} // namespace protobuf
} // namespace google
#include <google/protobuf/port_undef.inc>

@ -1491,7 +1491,8 @@ class PROTOBUF_EXPORT FileDescriptor {
Syntax syntax() const;
static const char* SyntaxName(Syntax syntax);
// Find a top-level message type by name. Returns nullptr if not found.
// Find a top-level message type by name (not full_name). Returns nullptr if
// not found.
const Descriptor* FindMessageTypeByName(ConstStringParam name) const;
// Find a top-level enum type by name. Returns nullptr if not found.
const EnumDescriptor* FindEnumTypeByName(ConstStringParam name) const;

@ -14,6 +14,8 @@
#include <google/protobuf/wire_format.h>
// @@protoc_insertion_point(includes)
#include <google/protobuf/port_def.inc>
PROTOBUF_PRAGMA_INIT_SEG
extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2fdescriptor_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<6> scc_info_DescriptorProto_google_2fprotobuf_2fdescriptor_2eproto;
extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2fdescriptor_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_DescriptorProto_ExtensionRange_google_2fprotobuf_2fdescriptor_2eproto;
extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2fdescriptor_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_DescriptorProto_ReservedRange_google_2fprotobuf_2fdescriptor_2eproto;
@ -1174,7 +1176,7 @@ const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_google
};
// Force running AddDescriptors() at dynamic initialization time.
static bool dynamic_init_dummy_google_2fprotobuf_2fdescriptor_2eproto = (static_cast<void>(::PROTOBUF_NAMESPACE_ID::internal::AddDescriptors(&descriptor_table_google_2fprotobuf_2fdescriptor_2eproto)), true);
PROTOBUF_ATTRIBUTE_INIT_PRIORITY static ::PROTOBUF_NAMESPACE_ID::internal::AddDescriptorsRunner dynamic_init_dummy_google_2fprotobuf_2fdescriptor_2eproto(&descriptor_table_google_2fprotobuf_2fdescriptor_2eproto);
PROTOBUF_NAMESPACE_OPEN
const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* FieldDescriptorProto_Type_descriptor() {
::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_google_2fprotobuf_2fdescriptor_2eproto);

@ -962,14 +962,14 @@ bool MergedDescriptorDatabase::FindFileByName(const std::string& filename,
bool MergedDescriptorDatabase::FindFileContainingSymbol(
const std::string& symbol_name, FileDescriptorProto* output) {
for (int i = 0; i < sources_.size(); i++) {
for (size_t i = 0; i < sources_.size(); i++) {
if (sources_[i]->FindFileContainingSymbol(symbol_name, output)) {
// The symbol was found in source i. However, if one of the previous
// sources defines a file with the same name (which presumably doesn't
// contain the symbol, since it wasn't found in that source), then we
// must hide it from the caller.
FileDescriptorProto temp;
for (int j = 0; j < i; j++) {
for (size_t j = 0; j < i; j++) {
if (sources_[j]->FindFileByName(output->name(), &temp)) {
// Found conflicting file in a previous source.
return false;
@ -984,7 +984,7 @@ bool MergedDescriptorDatabase::FindFileContainingSymbol(
bool MergedDescriptorDatabase::FindFileContainingExtension(
const std::string& containing_type, int field_number,
FileDescriptorProto* output) {
for (int i = 0; i < sources_.size(); i++) {
for (size_t i = 0; i < sources_.size(); i++) {
if (sources_[i]->FindFileContainingExtension(containing_type, field_number,
output)) {
// The symbol was found in source i. However, if one of the previous
@ -992,7 +992,7 @@ bool MergedDescriptorDatabase::FindFileContainingExtension(
// contain the symbol, since it wasn't found in that source), then we
// must hide it from the caller.
FileDescriptorProto temp;
for (int j = 0; j < i; j++) {
for (size_t j = 0; j < i; j++) {
if (sources_[j]->FindFileByName(output->name(), &temp)) {
// Found conflicting file in a previous source.
return false;

@ -1851,12 +1851,17 @@ class ExtensionDescriptorTest : public testing::Test {
// extensions 10 to 19;
// extensions 30 to 39;
// }
// extends Foo with optional int32 foo_int32 = 10;
// extends Foo with repeated TestEnum foo_enum = 19;
// extend Foo {
// optional int32 foo_int32 = 10;
// }
// extend Foo {
// repeated TestEnum foo_enum = 19;
// }
// message Bar {
// extends Foo with optional Qux foo_message = 30;
// // (using Qux as the group type)
// extends Foo with repeated group foo_group = 39;
// extend Foo {
// optional Qux foo_message = 30;
// repeated Qux foo_group = 39; // (but internally set to TYPE_GROUP)
// }
// }
FileDescriptorProto foo_file;
@ -8110,3 +8115,5 @@ TEST_F(LazilyBuildDependenciesTest, Dependency) {
} // namespace descriptor_unittest
} // namespace protobuf
} // namespace google
#include <google/protobuf/port_undef.inc>

@ -14,6 +14,8 @@
#include <google/protobuf/wire_format.h>
// @@protoc_insertion_point(includes)
#include <google/protobuf/port_def.inc>
PROTOBUF_PRAGMA_INIT_SEG
PROTOBUF_NAMESPACE_OPEN
class DurationDefaultTypeInternal {
public:
@ -76,7 +78,7 @@ const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_google
};
// Force running AddDescriptors() at dynamic initialization time.
static bool dynamic_init_dummy_google_2fprotobuf_2fduration_2eproto = (static_cast<void>(::PROTOBUF_NAMESPACE_ID::internal::AddDescriptors(&descriptor_table_google_2fprotobuf_2fduration_2eproto)), true);
PROTOBUF_ATTRIBUTE_INIT_PRIORITY static ::PROTOBUF_NAMESPACE_ID::internal::AddDescriptorsRunner dynamic_init_dummy_google_2fprotobuf_2fduration_2eproto(&descriptor_table_google_2fprotobuf_2fduration_2eproto);
PROTOBUF_NAMESPACE_OPEN
// ===================================================================

@ -509,7 +509,7 @@ DynamicMessage::~DynamicMessage() {
void* field_ptr =
OffsetToPointer(type_info_->oneof_case_offset +
sizeof(uint32) * field->containing_oneof()->index());
if (*(reinterpret_cast<const uint32*>(field_ptr)) == field->number()) {
if (*(reinterpret_cast<const int32*>(field_ptr)) == field->number()) {
field_ptr = OffsetToPointer(
type_info_->offsets[descriptor->field_count() +
field->containing_oneof()->index()]);

@ -14,6 +14,8 @@
#include <google/protobuf/wire_format.h>
// @@protoc_insertion_point(includes)
#include <google/protobuf/port_def.inc>
PROTOBUF_PRAGMA_INIT_SEG
PROTOBUF_NAMESPACE_OPEN
class EmptyDefaultTypeInternal {
public:
@ -73,7 +75,7 @@ const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_google
};
// Force running AddDescriptors() at dynamic initialization time.
static bool dynamic_init_dummy_google_2fprotobuf_2fempty_2eproto = (static_cast<void>(::PROTOBUF_NAMESPACE_ID::internal::AddDescriptors(&descriptor_table_google_2fprotobuf_2fempty_2eproto)), true);
PROTOBUF_ATTRIBUTE_INIT_PRIORITY static ::PROTOBUF_NAMESPACE_ID::internal::AddDescriptorsRunner dynamic_init_dummy_google_2fprotobuf_2fempty_2eproto(&descriptor_table_google_2fprotobuf_2fempty_2eproto);
PROTOBUF_NAMESPACE_OPEN
// ===================================================================

@ -180,7 +180,6 @@ void ExtensionSet::RegisterMessageExtension(const MessageLite* containing_type,
Register(containing_type, number, info);
}
// ===================================================================
// Constructors and basic methods.
@ -2141,3 +2140,5 @@ size_t ExtensionSet::MessageSetByteSize() const {
} // namespace internal
} // namespace protobuf
} // namespace google
#include <google/protobuf/port_undef.inc>

@ -1328,7 +1328,9 @@ RepeatedMessageTypeTraits<Type>::GetDefaultRepeatedField() {
// ExtensionIdentifier
// This is the type of actual extension objects. E.g. if you have:
// extends Foo with optional int32 bar = 1234;
// extend Foo {
// optional int32 bar = 1234;
// }
// then "bar" will be defined in C++ as:
// ExtensionIdentifier<Foo, PrimitiveTypeTraits<int32>, 5, false> bar(1234);
//

@ -534,3 +534,5 @@ bool ExtensionSet::ParseMessageSetItem(io::CodedInputStream* input,
} // namespace internal
} // namespace protobuf
} // namespace google
#include <google/protobuf/port_undef.inc>

@ -1335,3 +1335,5 @@ TEST(ExtensionSetTest, ConstInit) {
} // namespace internal
} // namespace protobuf
} // namespace google
#include <google/protobuf/port_undef.inc>

@ -14,6 +14,8 @@
#include <google/protobuf/wire_format.h>
// @@protoc_insertion_point(includes)
#include <google/protobuf/port_def.inc>
PROTOBUF_PRAGMA_INIT_SEG
PROTOBUF_NAMESPACE_OPEN
class FieldMaskDefaultTypeInternal {
public:
@ -75,7 +77,7 @@ const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_google
};
// Force running AddDescriptors() at dynamic initialization time.
static bool dynamic_init_dummy_google_2fprotobuf_2ffield_5fmask_2eproto = (static_cast<void>(::PROTOBUF_NAMESPACE_ID::internal::AddDescriptors(&descriptor_table_google_2fprotobuf_2ffield_5fmask_2eproto)), true);
PROTOBUF_ATTRIBUTE_INIT_PRIORITY static ::PROTOBUF_NAMESPACE_ID::internal::AddDescriptorsRunner dynamic_init_dummy_google_2fprotobuf_2ffield_5fmask_2eproto(&descriptor_table_google_2fprotobuf_2ffield_5fmask_2eproto);
PROTOBUF_NAMESPACE_OPEN
// ===================================================================

@ -83,7 +83,7 @@ int LookUpEnumName(const EnumEntry* enums, const int* sorted_indices,
bool InitializeEnumStrings(
const EnumEntry* enums, const int* sorted_indices, size_t size,
internal::ExplicitlyConstructed<std::string>* enum_strings) {
for (int i = 0; i < size; ++i) {
for (size_t i = 0; i < size; ++i) {
enum_strings[i].Construct(enums[sorted_indices[i]].name);
internal::OnShutdownDestroyString(enum_strings[i].get_mutable());
}

@ -836,7 +836,7 @@ void Reflection::ClearField(Message* message,
}
case FieldDescriptor::CPPTYPE_MESSAGE:
if (schema_.HasBitIndex(field) == -1) {
if (schema_.HasBitIndex(field) == static_cast<uint32>(-1)) {
// Proto3 does not have has-bits and we need to set a message field
// to nullptr in order to indicate its un-presence.
if (GetArena(message) == nullptr) {
@ -1046,7 +1046,8 @@ void Reflection::ListFieldsMayFailOnStripped(
schema_.HasHasbits() ? GetHasBits(message) : nullptr;
const uint32* const has_bits_indices = schema_.has_bit_indices_;
output->reserve(descriptor_->field_count());
for (int i = 0; i <= last_non_weak_field_index_; i++) {
const int last_non_weak_field_index = last_non_weak_field_index_;
for (int i = 0; i <= last_non_weak_field_index; i++) {
const FieldDescriptor* field = descriptor_->field(i);
if (!should_fail && schema_.IsFieldStripped(field)) {
continue;
@ -1061,10 +1062,11 @@ void Reflection::ListFieldsMayFailOnStripped(
const uint32* const oneof_case_array = GetConstPointerAtOffset<uint32>(
&message, schema_.oneof_case_offset_);
// Equivalent to: HasOneofField(message, field)
if (oneof_case_array[containing_oneof->index()] == field->number()) {
if (static_cast<int64>(oneof_case_array[containing_oneof->index()]) ==
field->number()) {
output->push_back(field);
}
} else if (has_bits && has_bits_indices[i] != -1) {
} else if (has_bits && has_bits_indices[i] != static_cast<uint32>(-1)) {
CheckInvalidAccess(schema_, field);
// Equivalent to: HasBit(message, field)
if (IsIndexInHasBitSet(has_bits, has_bits_indices[i])) {
@ -2005,7 +2007,7 @@ InternalMetadata* Reflection::MutableInternalMetadata(Message* message) const {
bool Reflection::HasBit(const Message& message,
const FieldDescriptor* field) const {
GOOGLE_DCHECK(!field->options().weak());
if (schema_.HasBitIndex(field) != -1) {
if (schema_.HasBitIndex(field) != static_cast<uint32>(-1)) {
return IsIndexInHasBitSet(GetHasBits(message), schema_.HasBitIndex(field));
}
@ -2064,7 +2066,7 @@ bool Reflection::HasBit(const Message& message,
void Reflection::SetBit(Message* message, const FieldDescriptor* field) const {
GOOGLE_DCHECK(!field->options().weak());
const uint32 index = schema_.HasBitIndex(field);
if (index == -1) return;
if (index == static_cast<uint32>(-1)) return;
MutableHasBits(message)[index / 32] |=
(static_cast<uint32>(1) << (index % 32));
}
@ -2073,7 +2075,7 @@ void Reflection::ClearBit(Message* message,
const FieldDescriptor* field) const {
GOOGLE_DCHECK(!field->options().weak());
const uint32 index = schema_.HasBitIndex(field);
if (index == -1) return;
if (index == static_cast<uint32>(-1)) return;
MutableHasBits(message)[index / 32] &=
~(static_cast<uint32>(1) << (index % 32));
}
@ -2107,7 +2109,8 @@ bool Reflection::HasOneof(const Message& message,
bool Reflection::HasOneofField(const Message& message,
const FieldDescriptor* field) const {
return (GetOneofCase(message, field->containing_oneof()) == field->number());
return (GetOneofCase(message, field->containing_oneof()) ==
static_cast<uint32>(field->number()));
}
void Reflection::SetOneofCase(Message* message,
@ -2416,6 +2419,8 @@ struct MetadataOwner {
std::vector<std::pair<const Metadata*, const Metadata*> > metadata_arrays_;
};
void AddDescriptors(const DescriptorTable* table);
void AssignDescriptorsImpl(const DescriptorTable* table, bool eager) {
// Ensure the file descriptor is added to the pool.
{
@ -2493,6 +2498,16 @@ void AddDescriptorsImpl(const DescriptorTable* table) {
MessageFactory::InternalRegisterGeneratedFile(table);
}
void AddDescriptors(const DescriptorTable* table) {
// AddDescriptors is not thread safe. Callers need to ensure calls are
// properly serialized. This function is only called pre-main by global
// descriptors and we can assume single threaded access or it's called
// by AssignDescriptorImpl which uses a mutex to sequence calls.
if (table->is_initialized) return;
table->is_initialized = true;
AddDescriptorsImpl(table);
}
} // namespace
// Separate function because it needs to be a friend of
@ -2513,14 +2528,8 @@ void AssignDescriptors(const DescriptorTable* table, bool eager) {
call_once(*table->once, AssignDescriptorsImpl, table, eager);
}
void AddDescriptors(const DescriptorTable* table) {
// AddDescriptors is not thread safe. Callers need to ensure calls are
// properly serialized. This function is only called pre-main by global
// descriptors and we can assume single threaded access or it's called
// by AssignDescriptorImpl which uses a mutex to sequence calls.
if (table->is_initialized) return;
table->is_initialized = true;
AddDescriptorsImpl(table);
AddDescriptorsRunner::AddDescriptorsRunner(const DescriptorTable* table) {
AddDescriptors(table);
}
void RegisterFileLevelMetadata(const DescriptorTable* table) {
@ -2544,3 +2553,5 @@ void UnknownFieldSetSerializer(const uint8* base, uint32 offset, uint32 tag,
} // namespace internal
} // namespace protobuf
} // namespace google
#include <google/protobuf/port_undef.inc>

@ -291,18 +291,15 @@ enum {
void PROTOBUF_EXPORT AssignDescriptors(const DescriptorTable* table,
bool eager = false);
// AddDescriptors() is a file-level procedure which adds the encoded
// FileDescriptorProto for this .proto file to the global DescriptorPool for
// generated files (DescriptorPool::generated_pool()). It ordinarily runs at
// static initialization time, but is not used at all in LITE_RUNTIME mode.
// AddDescriptors() is *not* thread-safe.
void PROTOBUF_EXPORT AddDescriptors(const DescriptorTable* table);
// These cannot be in lite so we put them in the reflection.
PROTOBUF_EXPORT void UnknownFieldSetSerializer(const uint8* base, uint32 offset,
uint32 tag, uint32 has_offset,
io::CodedOutputStream* output);
struct PROTOBUF_EXPORT AddDescriptorsRunner {
explicit AddDescriptorsRunner(const DescriptorTable* table);
};
} // namespace internal
} // namespace protobuf
} // namespace google

@ -172,7 +172,7 @@ template <ProcessingType field_type>
inline void ResetOneofField(const ParseTable& table, int field_number,
Arena* arena, MessageLite* msg, uint32* oneof_case,
int64 offset, const void* default_ptr) {
if (*oneof_case == field_number) {
if (static_cast<int64>(*oneof_case) == field_number) {
// The oneof is already set to the right type, so there is no need to clear
// it.
return;

@ -52,10 +52,14 @@
#include <google/protobuf/message_lite.h>
#include <google/protobuf/metadata_lite.h>
#include <google/protobuf/stubs/mutex.h>
#include <google/protobuf/port_def.inc>
#include <google/protobuf/repeated_field.h>
#include <google/protobuf/wire_format_lite.h>
// Must be included last
#include <google/protobuf/port_def.inc>
PROTOBUF_PRAGMA_INIT_SEG
namespace google {
namespace protobuf {
@ -68,8 +72,9 @@ void DestroyString(const void* s) {
static_cast<const std::string*>(s)->~basic_string();
}
PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT EmptyString
fixed_address_empty_string; // NOLINT
PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT
PROTOBUF_ATTRIBUTE_INIT_PRIORITY EmptyString
fixed_address_empty_string; // NOLINT
PROTOBUF_CONSTINIT std::atomic<bool> init_protobuf_defaults_state{false};
@ -90,6 +95,11 @@ void InitProtobufDefaultsSlow() {
static bool is_inited = InitProtobufDefaultsImpl();
(void)is_inited;
}
// Force the initialization of the empty string.
// Normally, registration would do it, but we don't have any guarantee that
// there is any object with reflection.
PROTOBUF_ATTRIBUTE_INIT_PRIORITY static std::true_type init_empty_string =
(InitProtobufDefaultsSlow(), std::true_type{});
size_t StringSpaceUsedExcludingSelfLong(const std::string& str) {
const void* start = &str;
@ -802,3 +812,5 @@ void InitSCCImpl(SCCInfoBase* scc) {
} // namespace internal
} // namespace protobuf
} // namespace google
#include <google/protobuf/port_undef.inc>

@ -63,3 +63,5 @@ const ImplicitWeakMessage* ImplicitWeakMessage::default_instance() {
} // namespace internal
} // namespace protobuf
} // namespace google
#include <google/protobuf/port_undef.inc>

@ -312,7 +312,7 @@ bool CodedInputStream::ReadLittleEndian32Fallback(uint32* value) {
uint8 bytes[sizeof(*value)];
const uint8* ptr;
if (BufferSize() >= sizeof(*value)) {
if (BufferSize() >= static_cast<int64>(sizeof(*value))) {
// Fast path: Enough bytes in the buffer to read directly.
ptr = buffer_;
Advance(sizeof(*value));
@ -329,7 +329,7 @@ bool CodedInputStream::ReadLittleEndian64Fallback(uint64* value) {
uint8 bytes[sizeof(*value)];
const uint8* ptr;
if (BufferSize() >= sizeof(*value)) {
if (BufferSize() >= static_cast<int64>(sizeof(*value))) {
// Fast path: Enough bytes in the buffer to read directly.
ptr = buffer_;
Advance(sizeof(*value));
@ -351,7 +351,7 @@ template <size_t N>
const uint8* DecodeVarint64KnownSize(const uint8* buffer, uint64* value) {
GOOGLE_DCHECK_GT(N, 0);
uint64 result = static_cast<uint64>(buffer[N - 1]) << (7 * (N - 1));
for (int i = 0, offset = 0; i < N - 1; i++, offset += 7) {
for (size_t i = 0, offset = 0; i < N - 1; i++, offset += 7) {
result += static_cast<uint64>(buffer[i] - 0x80) << offset;
}
*value = result;
@ -954,3 +954,5 @@ uint8* CodedOutputStream::WriteStringWithSizeToArray(const std::string& str,
} // namespace io
} // namespace protobuf
} // namespace google
#include <google/protobuf/port_undef.inc>

@ -1344,3 +1344,5 @@ TEST_F(CodedStreamTest, InputOver2G) {
} // namespace io
} // namespace protobuf
} // namespace google
#include <google/protobuf/port_undef.inc>

@ -298,7 +298,7 @@ bool GzipOutputStream::Next(void** data, int* size) {
return true;
}
void GzipOutputStream::BackUp(int count) {
GOOGLE_CHECK_GE(zcontext_.avail_in, count);
GOOGLE_CHECK_GE(zcontext_.avail_in, static_cast<uInt>(count));
zcontext_.avail_in -= count;
}
int64_t GzipOutputStream::ByteCount() const {

@ -296,7 +296,7 @@ void Printer::FormatInternal(const std::vector<std::string>& args,
}
push_back(c);
}
if (arg_index != args.size()) {
if (arg_index != static_cast<int>(args.size())) {
GOOGLE_LOG(FATAL) << " Unused arguments. " << save;
}
if (!annotations.empty()) {
@ -324,7 +324,7 @@ const char* Printer::WriteVariable(
GOOGLE_CHECK(std::isdigit(start[1]));
GOOGLE_CHECK_EQ(end - start, 2);
int idx = start[1] - '1';
if (idx < 0 || idx >= args.size()) {
if (idx < 0 || static_cast<size_t>(idx) >= args.size()) {
GOOGLE_LOG(FATAL) << "Annotation ${" << idx + 1 << "$ is out of bounds.";
}
if (idx > *arg_index) {
@ -358,10 +358,10 @@ const char* Printer::WriteVariable(
start_var, static_cast<std::string::size_type>(end_var - start_var)};
std::string sub;
if (std::isdigit(var_name[0])) {
GOOGLE_CHECK_EQ(var_name.size(), 1); // No need for multi-digits
GOOGLE_CHECK_EQ(var_name.size(), 1U); // No need for multi-digits
int idx = var_name[0] - '1'; // Start counting at 1
GOOGLE_CHECK_GE(idx, 0);
if (idx >= args.size()) {
if (static_cast<size_t>(idx) >= args.size()) {
GOOGLE_LOG(FATAL) << "Argument $" << idx + 1 << "$ is out of bounds.";
}
if (idx > *arg_index) {

@ -888,7 +888,8 @@ bool Tokenizer::ParseInteger(const std::string& text, uint64 max_value,
// token, but Tokenizer still think it's integer.
return false;
}
if (digit > max_value || result > (max_value - digit) / base) {
if (static_cast<uint64>(digit) > max_value ||
result > (max_value - digit) / base) {
// Overflow.
return false;
}
@ -918,7 +919,8 @@ double Tokenizer::ParseFloat(const std::string& text) {
++end;
}
GOOGLE_LOG_IF(DFATAL, end - start != text.size() || *start == '-')
GOOGLE_LOG_IF(DFATAL,
static_cast<size_t>(end - start) != text.size() || *start == '-')
<< " Tokenizer::ParseFloat() passed text that could not have been"
" tokenized as a float: "
<< CEscape(text);
@ -1114,8 +1116,8 @@ void Tokenizer::ParseStringAppend(const std::string& text,
template <typename CharacterClass>
static bool AllInClass(const std::string& s) {
for (int i = 0; i < s.size(); ++i) {
if (!CharacterClass::InClass(s[i])) return false;
for (const char character : s) {
if (!CharacterClass::InClass(character)) return false;
}
return true;
}

@ -168,7 +168,7 @@ bool StringOutputStream::Next(void** data, int* size) {
void StringOutputStream::BackUp(int count) {
GOOGLE_CHECK_GE(count, 0);
GOOGLE_CHECK(target_ != NULL);
GOOGLE_CHECK_LE(count, target_->size());
GOOGLE_CHECK_LE(static_cast<size_t>(count), target_->size());
target_->resize(target_->size() - count);
}

@ -838,7 +838,7 @@ class Map {
// non-determinism to the map ordering.
bool ShouldInsertAfterHead(void* node) {
#ifdef NDEBUG
(void) node;
(void)node;
return false;
#else
// Doing modulo with a prime mixes the bits more.

@ -601,3 +601,5 @@ size_t DynamicMapField::SpaceUsedExcludingSelfNoLock() const {
} // namespace internal
} // namespace protobuf
} // namespace google
#include <google/protobuf/port_undef.inc>

@ -72,8 +72,8 @@ class MapIterator;
// map key.
class PROTOBUF_EXPORT MapKey {
public:
MapKey() : type_(0) {}
MapKey(const MapKey& other) : type_(0) { CopyFrom(other); }
MapKey() : type_() {}
MapKey(const MapKey& other) : type_() { CopyFrom(other); }
MapKey& operator=(const MapKey& other) {
CopyFrom(other);
@ -87,12 +87,12 @@ class PROTOBUF_EXPORT MapKey {
}
FieldDescriptor::CppType type() const {
if (type_ == 0) {
if (type_ == FieldDescriptor::CppType()) {
GOOGLE_LOG(FATAL) << "Protocol Buffer map usage error:\n"
<< "MapKey::type MapKey is not initialized. "
<< "Call set methods to initialize MapKey.";
}
return (FieldDescriptor::CppType)type_;
return type_;
}
void SetInt64Value(int64 value) {
@ -261,7 +261,8 @@ class PROTOBUF_EXPORT MapKey {
}
// type_ is 0 or a valid FieldDescriptor::CppType.
int type_;
// Use "CppType()" to indicate zero.
FieldDescriptor::CppType type_;
};
} // namespace protobuf
@ -329,7 +330,7 @@ class PROTOBUF_EXPORT MapFieldBase {
// It uses a linker initialized mutex, so it is not compatible with regular
// runtime instances.
// Except in MSVC, where we can't have a constinit mutex.
explicit PROTOBUF_MAYBE_CONSTEXPR MapFieldBase(ConstantInitialized)
explicit constexpr MapFieldBase(ConstantInitialized)
: arena_(nullptr),
repeated_field_(nullptr),
mutex_(GOOGLE_PROTOBUF_LINKER_INITIALIZED),
@ -678,7 +679,7 @@ class PROTOBUF_EXPORT DynamicMapField
// the map value.
class PROTOBUF_EXPORT MapValueConstRef {
public:
MapValueConstRef() : data_(nullptr), type_(0) {}
MapValueConstRef() : data_(nullptr), type_() {}
int64 GetInt64Value() const {
TYPE_CHECK(FieldDescriptor::CPPTYPE_INT64,
@ -735,15 +736,16 @@ class PROTOBUF_EXPORT MapValueConstRef {
// own this value.
void* data_;
// type_ is 0 or a valid FieldDescriptor::CppType.
int type_;
// Use "CppType()" to indicate zero.
FieldDescriptor::CppType type_;
FieldDescriptor::CppType type() const {
if (type_ == 0 || data_ == nullptr) {
if (type_ == FieldDescriptor::CppType() || data_ == nullptr) {
GOOGLE_LOG(FATAL)
<< "Protocol Buffer map usage error:\n"
<< "MapValueConstRef::type MapValueConstRef is not initialized.";
}
return static_cast<FieldDescriptor::CppType>(type_);
return type_;
}
private:

@ -501,3 +501,5 @@ TEST(MapFieldTest, ConstInit) {
} // namespace internal
} // namespace protobuf
} // namespace google
#include <google/protobuf/port_undef.inc>

@ -3770,3 +3770,5 @@ TEST(MoveTest, MoveAssignmentWorks) {
} // namespace internal
} // namespace protobuf
} // namespace google
#include <google/protobuf/port_undef.inc>

@ -363,3 +363,5 @@ PROTOBUF_NOINLINE
} // namespace protobuf
} // namespace google
#include <google/protobuf/port_undef.inc>

@ -1252,7 +1252,8 @@ const T* DynamicCastToGenerated(const Message* from) {
#if PROTOBUF_RTTI
return dynamic_cast<const T*>(from);
#else
bool ok = T::default_instance().GetReflection() == from->GetReflection();
bool ok = from != nullptr &&
T::default_instance().GetReflection() == from->GetReflection();
return ok ? down_cast<const T*>(from) : nullptr;
#endif
}

@ -389,7 +389,7 @@ bool MessageLite::SerializePartialToCodedStream(
}
int final_byte_count = output->ByteCount();
if (final_byte_count - original_byte_count != size) {
if (final_byte_count - original_byte_count != static_cast<int64>(size)) {
ByteSizeConsistencyError(size, ByteSizeLong(),
final_byte_count - original_byte_count, *this);
}
@ -488,7 +488,7 @@ bool MessageLite::SerializePartialToArray(void* data, int size) const {
<< " exceeded maximum protobuf size of 2GB: " << byte_size;
return false;
}
if (size < byte_size) return false;
if (size < static_cast<int64>(byte_size)) return false;
uint8* start = reinterpret_cast<uint8*>(data);
SerializeToArrayImpl(*this, start, byte_size);
return true;
@ -581,3 +581,5 @@ void ShutdownProtobufLibrary() {
} // namespace protobuf
} // namespace google
#include <google/protobuf/port_undef.inc>

@ -151,7 +151,6 @@ class ExplicitlyConstructed {
} union_;
};
PROTOBUF_DISABLE_MSVC_UNION_WARNING
// We need a publicly accessible `value` object to allow constexpr
// support in C++11.
// A constexpr accessor does not work portably.
@ -163,7 +162,6 @@ union EmptyString {
std::false_type dummy;
std::string value;
};
PROTOBUF_ENABLE_MSVC_UNION_WARNING
// Default empty string object. Don't use this directly. Instead, call
// GetEmptyString() to get the reference.

@ -235,6 +235,10 @@ TEST(MESSAGE_TEST_NAME, DynamicCastToGenerated) {
test_all_types_pointer_const));
EXPECT_EQ(nullptr, DynamicCastToGenerated<const UNITTEST::TestRequired>(
test_all_types_pointer_const));
Message* test_all_types_pointer_nullptr = nullptr;
EXPECT_EQ(nullptr, DynamicCastToGenerated<UNITTEST::TestAllTypes>(
test_all_types_pointer_nullptr));
}
#ifdef PROTOBUF_HAS_DEATH_TEST // death tests do not work on Windows yet.

@ -591,3 +591,5 @@ const char* UnknownFieldParse(uint32 tag, std::string* unknown, const char* ptr,
} // namespace internal
} // namespace protobuf
} // namespace google
#include <google/protobuf/port_undef.inc>

@ -39,5 +39,9 @@
#include <google/protobuf/stubs/port.h>
// Protobuf intends to move into the pb:: namespace.
namespace protobuf_future_namespace_placeholder {}
namespace pb = ::protobuf_future_namespace_placeholder;
#endif // GOOGLE_PROTOBUF_PORT_H__

@ -139,21 +139,21 @@
#ifdef PROTOBUF_FINAL
#error PROTOBUF_FINAL was previously defined
#endif
#ifdef PROTOBUF_DISABLE_MSVC_UNION_WARNING
#error PROTOBUF_DISABLE_MSVC_UNION_WARNING was previously defined
#endif
#ifdef PROTOBUF_ENABLE_MSVC_UNION_WARNING
#error PROTOBUF_ENABLE_MSVC_UNION_WARNING was previously defined
#endif
#ifdef PROTOBUF_CONSTINIT
#error PROTOBUF_CONSTINIT was previously defined
#endif
#ifdef PROTOBUF_MAYBE_CONSTEXPR
#error PROTOBUF_MAYBE_CONSTEXPR was previously defined
#endif
#ifdef PROTOBUF_ATTRIBUTE_NO_DESTROY
#error PROTOBUF_ATTRIBUTE_NO_DESTROY was previously defined
#endif
#ifdef PROTOBUF_ATTRIBUTE_INIT_PRIORITY
#error PROTOBUF_ATTRIBUTE_INIT_PRIORITY was previously defined
#endif
#ifdef PROTOBUF_PRAGMA_INIT_SEG
#error PROTOBUF_PRAGMA_INIT_SEG was previously defined
#endif
#define PROTOBUF_NAMESPACE "google::protobuf"
@ -464,10 +464,6 @@
// name.
#pragma push_macro("DEBUG")
#undef DEBUG
#pragma push_macro("TRUE")
#undef TRUE
#pragma push_macro("FALSE")
#undef FALSE
#endif // defined(__clang__) || defined(__GNUC__) || defined(_MSC_VER)
#if defined(__clang__)
@ -560,28 +556,6 @@
#define PROTOBUF_CONSTINIT
#endif
// Some constructors can't be constexpr under MSVC, but given that MSVC will not
// do constant initialization of globals anyway we can omit `constexpr` from
// them. These constructors are marked with PROTOBUF_MAYBE_CONSTEXPR
#if defined(_MSC_VER)
#define PROTOBUF_MAYBE_CONSTEXPR
#else
#define PROTOBUF_MAYBE_CONSTEXPR constexpr
#endif
#if _MSC_VER
#define PROTOBUF_DISABLE_MSVC_UNION_WARNING \
__pragma(warning(push)) \
__pragma(warning(disable : 4582)) \
__pragma(warning(disable : 4583))
#define PROTOBUF_ENABLE_MSVC_UNION_WARNING \
__pragma(warning(pop))
#else
#define PROTOBUF_DISABLE_MSVC_UNION_WARNING
#define PROTOBUF_ENABLE_MSVC_UNION_WARNING
#endif
#if defined(__cpp_constinit)
#define PROTOBUF_CONSTINIT constinit
#elif defined(__has_cpp_attribute)
@ -604,3 +578,34 @@
#if !defined(PROTOBUF_ATTRIBUTE_NO_DESTROY)
#define PROTOBUF_ATTRIBUTE_NO_DESTROY
#endif
#if defined(__GNUC__)
// Protobuf extensions and reflection require registration of the protos linked
// in the binary. Not until everything is registered does the runtime have a
// complete view on all protos. When code is using reflection or extensions
// in between registration calls this can lead to surprising behavior. By
// having the registration run first we mitigate this scenario.
// Highest priority is 101. We use 102 to allow code that really wants to
// higher priority to still beat us.
#define PROTOBUF_ATTRIBUTE_INIT_PRIORITY __attribute__((init_priority((102))))
#else
#define PROTOBUF_ATTRIBUTE_INIT_PRIORITY
#endif
#if _MSC_VER
#define PROTOBUF_PRAGMA_INIT_SEG __pragma(init_seg(lib))
#else
#define PROTOBUF_PRAGMA_INIT_SEG
#endif
// Silence some MSVC warnings in all our code.
#if _MSC_VER
#pragma warning(push)
// For non-trivial unions
#pragma warning(disable : 4582)
#pragma warning(disable : 4583)
// For init_seg(lib)
#pragma warning(disable : 4073)
// To silence the fact that we will pop this push from another file
#pragma warning(disable : 5031)
#endif

@ -75,8 +75,9 @@
#undef PROTOBUF_DISABLE_MSVC_UNION_WARNING
#undef PROTOBUF_ENABLE_MSVC_UNION_WARNING
#undef PROTOBUF_CONSTINIT
#undef PROTOBUF_MAYBE_CONSTEXPR
#undef PROTOBUF_ATTRIBUTE_NO_DESTROY
#undef PROTOBUF_ATTRIBUTE_INIT_PRIORITY
#undef PROTOBUF_PRAGMA_INIT_SEG
// Restore macro that may have been #undef'd in port_def.inc.
#ifdef _MSC_VER
@ -105,8 +106,6 @@
#if defined(__clang__) || defined(__GNUC__) || defined(_MSC_VER)
#pragma pop_macro("DEBUG")
#pragma pop_macro("TRUE")
#pragma pop_macro("FALSE")
#endif // defined(__clang__) || defined(__GNUC__) || defined(_MSC_VER)
#if defined(__clang__)
@ -114,3 +113,8 @@
#elif defined(__GNUC__)
#pragma GCC diagnostic pop
#endif
// Pop the warning(push) from port_def.inc
#if _MSC_VER
#pragma warning(pop)
#endif

@ -436,3 +436,5 @@ void GenericSwap(Message* m1, Message* m2) {
} // namespace internal
} // namespace protobuf
} // namespace google
#include <google/protobuf/port_undef.inc>

@ -58,8 +58,10 @@ void** RepeatedPtrFieldBase::InternalExtend(int extend_amount) {
Arena* arena = GetArena();
new_size = std::max(internal::kRepeatedFieldLowerClampLimit,
std::max(total_size_ * 2, new_size));
GOOGLE_CHECK_LE(new_size, (std::numeric_limits<size_t>::max() - kRepHeaderSize) /
sizeof(old_rep->elements[0]))
GOOGLE_CHECK_LE(
static_cast<int64>(new_size),
static_cast<int64>((std::numeric_limits<size_t>::max() - kRepHeaderSize) /
sizeof(old_rep->elements[0])))
<< "Requested size is too large to fit into size_t.";
size_t bytes = kRepHeaderSize + sizeof(old_rep->elements[0]) * new_size;
if (arena == NULL) {
@ -134,3 +136,5 @@ template class PROTOBUF_EXPORT_TEMPLATE_DEFINE RepeatedPtrField<std::string>;
} // namespace protobuf
} // namespace google
#include <google/protobuf/port_undef.inc>

@ -1206,6 +1206,12 @@ RepeatedField<Element>::RepeatedField(Iter begin, const Iter& end)
template <typename Element>
RepeatedField<Element>::~RepeatedField() {
#ifndef NDEBUG
// Try to trigger segfault / asan failure in non-opt builds. If arena_
// lifetime has ended before the destructor.
auto arena = GetArena();
if (arena) (void)arena->SpaceAllocated();
#endif
if (total_size_ > 0) {
InternalDeallocate(rep(), total_size_);
}

@ -2090,3 +2090,5 @@ TEST_F(RepeatedFieldInsertionIteratorsTest, MoveProtos) {
} // namespace protobuf
} // namespace google
#include <google/protobuf/port_undef.inc>

@ -14,6 +14,8 @@
#include <google/protobuf/wire_format.h>
// @@protoc_insertion_point(includes)
#include <google/protobuf/port_def.inc>
PROTOBUF_PRAGMA_INIT_SEG
PROTOBUF_NAMESPACE_OPEN
class SourceContextDefaultTypeInternal {
public:
@ -75,7 +77,7 @@ const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_google
};
// Force running AddDescriptors() at dynamic initialization time.
static bool dynamic_init_dummy_google_2fprotobuf_2fsource_5fcontext_2eproto = (static_cast<void>(::PROTOBUF_NAMESPACE_ID::internal::AddDescriptors(&descriptor_table_google_2fprotobuf_2fsource_5fcontext_2eproto)), true);
PROTOBUF_ATTRIBUTE_INIT_PRIORITY static ::PROTOBUF_NAMESPACE_ID::internal::AddDescriptorsRunner dynamic_init_dummy_google_2fprotobuf_2fsource_5fcontext_2eproto(&descriptor_table_google_2fprotobuf_2fsource_5fcontext_2eproto);
PROTOBUF_NAMESPACE_OPEN
// ===================================================================

@ -14,6 +14,8 @@
#include <google/protobuf/wire_format.h>
// @@protoc_insertion_point(includes)
#include <google/protobuf/port_def.inc>
PROTOBUF_PRAGMA_INIT_SEG
extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2fstruct_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_ListValue_google_2fprotobuf_2fstruct_2eproto;
PROTOBUF_NAMESPACE_OPEN
class Struct_FieldsEntry_DoNotUseDefaultTypeInternal {
@ -145,7 +147,7 @@ const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_google
};
// Force running AddDescriptors() at dynamic initialization time.
static bool dynamic_init_dummy_google_2fprotobuf_2fstruct_2eproto = (static_cast<void>(::PROTOBUF_NAMESPACE_ID::internal::AddDescriptors(&descriptor_table_google_2fprotobuf_2fstruct_2eproto)), true);
PROTOBUF_ATTRIBUTE_INIT_PRIORITY static ::PROTOBUF_NAMESPACE_ID::internal::AddDescriptorsRunner dynamic_init_dummy_google_2fprotobuf_2fstruct_2eproto(&descriptor_table_google_2fprotobuf_2fstruct_2eproto);
PROTOBUF_NAMESPACE_OPEN
const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* NullValue_descriptor() {
::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_google_2fprotobuf_2fstruct_2eproto);

@ -327,3 +327,5 @@ const char* FatalException::what() const throw() {
} // namespace protobuf
} // namespace google
#include <google/protobuf/port_undef.inc>

@ -190,3 +190,5 @@ std::ostream& operator<<(std::ostream& o, const uint128& b) {
} // namespace protobuf
} // namespace google
#include <google/protobuf/port_undef.inc> // NOLINT

@ -515,3 +515,5 @@ TEST(Int128, OStream) {
}
} // namespace protobuf
} // namespace google
#include <google/protobuf/port_undef.inc>

@ -90,12 +90,33 @@ class PROTOBUF_EXPORT CriticalSectionLock {
#endif
// In MSVC std::mutex does not have a constexpr constructor.
// This wrapper makes the constructor constexpr.
template <typename T>
class CallOnceInitializedMutex {
public:
constexpr CallOnceInitializedMutex() : flag_{}, buf_{} {}
~CallOnceInitializedMutex() { get().~T(); }
void lock() { get().lock(); }
void unlock() { get().unlock(); }
private:
T& get() {
std::call_once(flag_, [&] { ::new (static_cast<void*>(&buf_)) T(); });
return reinterpret_cast<T&>(buf_);
}
std::once_flag flag_;
alignas(T) char buf_[sizeof(T)];
};
// Mutex is a natural type to wrap. As both google and other organization have
// specialized mutexes. gRPC also provides an injection mechanism for custom
// mutexes.
class GOOGLE_PROTOBUF_CAPABILITY("mutex") PROTOBUF_EXPORT WrappedMutex {
public:
WrappedMutex() = default;
constexpr WrappedMutex() = default;
void Lock() GOOGLE_PROTOBUF_ACQUIRE() { mu_.lock(); }
void Unlock() GOOGLE_PROTOBUF_RELEASE() { mu_.unlock(); }
// Crash if this Mutex is not held exclusively by this thread.
@ -103,11 +124,13 @@ class GOOGLE_PROTOBUF_CAPABILITY("mutex") PROTOBUF_EXPORT WrappedMutex {
void AssertHeld() const {}
private:
#ifndef GOOGLE_PROTOBUF_SUPPORT_WINDOWS_XP
#if defined(GOOGLE_PROTOBUF_SUPPORT_WINDOWS_XP)
CallOnceInitializedMutex<CriticalSectionLock> mu_;
#elif defined(_MSC_VER)
CallOnceInitializedMutex<std::mutex> mu_;
#else
std::mutex mu_;
#else // ifndef GOOGLE_PROTOBUF_SUPPORT_WINDOWS_XP
CriticalSectionLock mu_;
#endif // #ifndef GOOGLE_PROTOBUF_SUPPORT_WINDOWS_XP
#endif
};
using Mutex = WrappedMutex;

@ -51,11 +51,11 @@
#if !defined(PROTOBUF_DISABLE_LITTLE_ENDIAN_OPT_FOR_TEST)
#define PROTOBUF_LITTLE_ENDIAN 1
#endif
#if defined(_MSC_VER) && _MSC_VER >= 1300 && !defined(__INTEL_COMPILER)
// If MSVC has "/RTCc" set, it will complain about truncating casts at
// runtime. This file contains some intentional truncating casts.
#pragma runtime_checks("c", off)
#endif
#if defined(_MSC_VER) && _MSC_VER >= 1300 && !defined(__INTEL_COMPILER)
// If MSVC has "/RTCc" set, it will complain about truncating casts at
// runtime. This file contains some intentional truncating casts.
#pragma runtime_checks("c", off)
#endif
#else
#include <sys/param.h> // __BYTE_ORDER
#if defined(__OpenBSD__)

@ -54,7 +54,6 @@
#include <google/protobuf/dynamic_message.h>
#include <google/protobuf/map_field.h>
#include <google/protobuf/message.h>
#include <google/protobuf/port_def.inc>
#include <google/protobuf/repeated_field.h>
#include <google/protobuf/unknown_field_set.h>
#include <google/protobuf/wire_format_lite.h>
@ -63,6 +62,9 @@
#include <google/protobuf/stubs/map_util.h>
#include <google/protobuf/stubs/stl_util.h>
// Must be included last.
#include <google/protobuf/port_def.inc>
namespace google {
namespace protobuf {
@ -161,7 +163,7 @@ TextFormat::ParseLocation TextFormat::ParseInfoTree::GetLocation(
const std::vector<TextFormat::ParseLocation>* locations =
FindOrNull(locations_, field);
if (locations == nullptr || index >= locations->size()) {
if (locations == nullptr || index >= static_cast<int64>(locations->size())) {
return TextFormat::ParseLocation();
}
@ -176,7 +178,7 @@ TextFormat::ParseInfoTree* TextFormat::ParseInfoTree::GetTreeForNested(
}
auto it = nested_.find(field);
if (it == nested_.end() || index >= it->second.size()) {
if (it == nested_.end() || index >= static_cast<int64>(it->second.size())) {
return nullptr;
}
@ -1321,7 +1323,7 @@ class TextFormat::Printer::TextGenerator
if (failed_) return;
}
while (size > buffer_size_) {
while (static_cast<int64>(size) > buffer_size_) {
// Data exceeds space in the buffer. Copy what we can and request a
// new buffer.
if (buffer_size_ > 0) {
@ -2393,7 +2395,8 @@ void TextFormat::Printer::PrintFieldValue(const Message& message,
const std::string* value_to_print = &value;
std::string truncated_value;
if (truncate_string_field_longer_than_ > 0 &&
truncate_string_field_longer_than_ < value.size()) {
static_cast<size_t>(truncate_string_field_longer_than_) <
value.size()) {
truncated_value = value.substr(0, truncate_string_field_longer_than_) +
"...<truncated>...";
value_to_print = &truncated_value;
@ -2576,3 +2579,5 @@ void TextFormat::Printer::PrintUnknownFields(
} // namespace protobuf
} // namespace google
#include <google/protobuf/port_undef.inc>

@ -2160,3 +2160,5 @@ TEST(TextFormatUnknownFieldTest, TestUnknownExtension) {
} // namespace text_format_unittest
} // namespace protobuf
} // namespace google
#include <google/protobuf/port_undef.inc>

@ -14,6 +14,8 @@
#include <google/protobuf/wire_format.h>
// @@protoc_insertion_point(includes)
#include <google/protobuf/port_def.inc>
PROTOBUF_PRAGMA_INIT_SEG
PROTOBUF_NAMESPACE_OPEN
class TimestampDefaultTypeInternal {
public:
@ -76,7 +78,7 @@ const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_google
};
// Force running AddDescriptors() at dynamic initialization time.
static bool dynamic_init_dummy_google_2fprotobuf_2ftimestamp_2eproto = (static_cast<void>(::PROTOBUF_NAMESPACE_ID::internal::AddDescriptors(&descriptor_table_google_2fprotobuf_2ftimestamp_2eproto)), true);
PROTOBUF_ATTRIBUTE_INIT_PRIORITY static ::PROTOBUF_NAMESPACE_ID::internal::AddDescriptorsRunner dynamic_init_dummy_google_2fprotobuf_2ftimestamp_2eproto(&descriptor_table_google_2fprotobuf_2ftimestamp_2eproto);
PROTOBUF_NAMESPACE_OPEN
// ===================================================================

@ -14,6 +14,8 @@
#include <google/protobuf/wire_format.h>
// @@protoc_insertion_point(includes)
#include <google/protobuf/port_def.inc>
PROTOBUF_PRAGMA_INIT_SEG
extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2fany_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_Any_google_2fprotobuf_2fany_2eproto;
extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2ftype_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_EnumValue_google_2fprotobuf_2ftype_2eproto;
extern PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2ftype_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_Field_google_2fprotobuf_2ftype_2eproto;
@ -250,7 +252,7 @@ const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_google
};
// Force running AddDescriptors() at dynamic initialization time.
static bool dynamic_init_dummy_google_2fprotobuf_2ftype_2eproto = (static_cast<void>(::PROTOBUF_NAMESPACE_ID::internal::AddDescriptors(&descriptor_table_google_2fprotobuf_2ftype_2eproto)), true);
PROTOBUF_ATTRIBUTE_INIT_PRIORITY static ::PROTOBUF_NAMESPACE_ID::internal::AddDescriptorsRunner dynamic_init_dummy_google_2fprotobuf_2ftype_2eproto(&descriptor_table_google_2fprotobuf_2ftype_2eproto);
PROTOBUF_NAMESPACE_OPEN
const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* Field_Kind_descriptor() {
::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_google_2fprotobuf_2ftype_2eproto);

@ -183,7 +183,7 @@ void UnknownFieldSet::DeleteSubrange(int start, int num) {
(fields_)[i + start].Delete();
}
// Slide down the remaining fields.
for (int i = start + num; i < fields_.size(); ++i) {
for (size_t i = start + num; i < fields_.size(); ++i) {
(fields_)[i - num] = (fields_)[i];
}
// Pop off the # of deleted fields.
@ -193,8 +193,8 @@ void UnknownFieldSet::DeleteSubrange(int start, int num) {
}
void UnknownFieldSet::DeleteByNumber(int number) {
int left = 0; // The number of fields left after deletion.
for (int i = 0; i < fields_.size(); ++i) {
size_t left = 0; // The number of fields left after deletion.
for (size_t i = 0; i < fields_.size(); ++i) {
UnknownField* field = &(fields_)[i];
if (field->number() == number) {
field->Delete();
@ -324,3 +324,5 @@ const char* UnknownFieldParse(uint64 tag, UnknownFieldSet* unknown,
} // namespace internal
} // namespace protobuf
} // namespace google
#include <google/protobuf/port_undef.inc>

@ -74,12 +74,18 @@ bool ParseDelimitedFromCodedStream(MessageLite* message,
return false;
}
// Get the position after any size bytes have been read (and only the message
// itself remains).
int position_after_size = input->CurrentPosition();
// Tell the stream not to read beyond that size.
io::CodedInputStream::Limit limit = input->PushLimit(size);
// Parse the message.
if (!message->MergeFromCodedStream(input)) return false;
if (!input->ConsumedEntireMessage()) return false;
if (input->CurrentPosition() - position_after_size != static_cast<int>(size))
return false;
// Release the limit.
input->PopLimit(limit);

@ -82,6 +82,35 @@ TEST(DelimitedMessageUtilTest, DelimitedMessages) {
}
}
TEST(DelimitedMessageUtilTest, FailsAtEndOfStream) {
std::stringstream full_stream;
std::stringstream partial_stream;
{
protobuf_unittest::ForeignMessage message;
message.set_c(42);
message.set_d(24);
EXPECT_TRUE(SerializeDelimitedToOstream(message, &full_stream));
std::string full_output = full_stream.str();
ASSERT_GT(full_output.size(), size_t{2});
ASSERT_EQ(full_output[0], 4);
partial_stream << full_output[0] << full_output[1] << full_output[2];
}
{
bool clean_eof;
io::IstreamInputStream zstream(&partial_stream);
protobuf_unittest::ForeignMessage message;
clean_eof = true;
EXPECT_FALSE(ParseDelimitedFromZeroCopyStream(&message,
&zstream, &clean_eof));
EXPECT_FALSE(clean_eof);
}
}
} // namespace util
} // namespace protobuf
} // namespace google

@ -141,7 +141,7 @@ class MessageDifferencer::MultipleFieldsMapKeyComparator
int path_index) const {
const FieldDescriptor* field = key_field_path[path_index];
std::vector<SpecificField> current_parent_fields(parent_fields);
if (path_index == key_field_path.size() - 1) {
if (path_index == static_cast<int64>(key_field_path.size() - 1)) {
if (field->is_repeated()) {
if (!message_differencer_->CompareRepeatedField(
message1, message2, field, &current_parent_fields)) {
@ -187,7 +187,7 @@ class MessageDifferencer::MultipleFieldsMapKeyComparator
void MatchIndicesPostProcessorForSmartList(
std::vector<int>* match_list1, std::vector<int>* match_list2) {
int last_matched_index = -1;
for (int i = 0; i < match_list1->size(); ++i) {
for (size_t i = 0; i < match_list1->size(); ++i) {
if (match_list1->at(i) < 0) {
continue;
}
@ -395,7 +395,7 @@ void MessageDifferencer::TreatAsMapWithMultipleFieldPathsAsKey(
GOOGLE_CHECK_EQ(FieldDescriptor::CPPTYPE_MESSAGE, field->cpp_type())
<< "Field has to be message type. Field name is: " << field->full_name();
for (const auto& key_field_path : key_field_paths) {
for (int j = 0; j < key_field_path.size(); ++j) {
for (size_t j = 0; j < key_field_path.size(); ++j) {
const FieldDescriptor* parent_field =
j == 0 ? field : key_field_path[j - 1];
const FieldDescriptor* child_field = key_field_path[j];
@ -669,8 +669,8 @@ bool MessageDifferencer::CompareRequestedFieldsUsingSettings(
FieldDescriptorArray MessageDifferencer::CombineFields(
const FieldDescriptorArray& fields1, Scope fields1_scope,
const FieldDescriptorArray& fields2, Scope fields2_scope) {
int index1 = 0;
int index2 = 0;
size_t index1 = 0;
size_t index2 = 0;
tmp_message_fields_.clear();
@ -1417,8 +1417,8 @@ bool MessageDifferencer::CompareUnknownFields(
// Now that we have two sorted lists, we can detect fields which appear only
// in one list or the other by traversing them simultaneously.
int index1 = 0;
int index2 = 0;
size_t index1 = 0;
size_t index2 = 0;
while (index1 < fields1.size() || index2 < fields2.size()) {
enum {
ADDITION,
@ -1523,12 +1523,14 @@ bool MessageDifferencer::CompareUnknownFields(
if (IsUnknownFieldIgnored(message1, message2, specific_field,
*parent_field)) {
if (reporter_ != NULL) {
if (report_ignores_ && reporter_ != NULL) {
parent_field->push_back(specific_field);
reporter_->ReportUnknownFieldIgnored(message1, message2, *parent_field);
parent_field->pop_back();
}
return true;
if (change_type != ADDITION) ++index1;
if (change_type != DELETION) ++index2;
continue;
}
if (change_type == ADDITION || change_type == DELETION ||
@ -1881,7 +1883,7 @@ MessageDifferencer::StreamReporter::~StreamReporter() {
void MessageDifferencer::StreamReporter::PrintPath(
const std::vector<SpecificField>& field_path, bool left_side) {
for (int i = 0; i < field_path.size(); ++i) {
for (size_t i = 0; i < field_path.size(); ++i) {
if (i > 0) {
printer_->Print(".");
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save