Merge branch 'master' into kotlinProtos

pull/8272/head
deannagarcia 4 years ago committed by GitHub
commit e844a53716
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 12
      CHANGES.txt
  2. 2
      Protobuf-C++.podspec
  3. 2
      Protobuf.podspec
  4. 2
      configure.ac
  5. 2
      csharp/Google.Protobuf.Tools.nuspec
  6. BIN
      csharp/src/Google.Protobuf.Test/testprotos.pb
  7. 2
      csharp/src/Google.Protobuf/Google.Protobuf.csproj
  8. 16
      csharp/src/Google.Protobuf/Reflection/Descriptor.cs
  9. 4
      examples/AddPerson.java
  10. 4
      examples/ListPeople.java
  11. 3
      examples/addressbook.proto
  12. 2
      java/bom/pom.xml
  13. 2
      java/core/pom.xml
  14. 3
      java/core/src/main/java/com/google/protobuf/CodedInputStream.java
  15. 29
      java/core/src/test/java/com/google/protobuf/CodedInputStreamTest.java
  16. 2
      java/lite/pom.xml
  17. 2
      java/pom.xml
  18. 2
      java/util/pom.xml
  19. 2
      js/package.json
  20. 26
      kokoro/linux/aarch64/dockcross_helpers/run_dockcross_manylinux2014_aarch64.sh
  21. 34
      kokoro/linux/aarch64/python_crosscompile_aarch64.sh
  22. 28
      kokoro/linux/aarch64/python_run_tests_with_qemu_aarch64.sh
  23. 19
      kokoro/linux/aarch64/test_python_aarch64.sh
  24. 25
      php/ext/google/protobuf/package.xml
  25. 2
      php/ext/google/protobuf/protobuf.h
  26. 64
      php/src/Google/Protobuf/Internal/FileOptions.php
  27. 2
      protoc-artifacts/pom.xml
  28. 2
      python/google/protobuf/__init__.py
  29. 9
      ruby/ext/google/protobuf_c/message.c
  30. 85
      ruby/ext/google/protobuf_c/protobuf.c
  31. 2
      ruby/ext/google/protobuf_c/protobuf.h
  32. 2
      ruby/google-protobuf.gemspec
  33. 3
      ruby/tests/common_tests.rb
  34. 2
      src/Makefile.am
  35. 2
      src/google/protobuf/any.pb.h
  36. 2
      src/google/protobuf/api.pb.h
  37. 2
      src/google/protobuf/compiler/plugin.pb.h
  38. 2
      src/google/protobuf/descriptor.pb.h
  39. 16
      src/google/protobuf/descriptor.proto
  40. 2
      src/google/protobuf/duration.pb.h
  41. 2
      src/google/protobuf/empty.pb.h
  42. 2
      src/google/protobuf/field_mask.pb.h
  43. 2
      src/google/protobuf/port_def.inc
  44. 4
      src/google/protobuf/port_undef.inc
  45. 2
      src/google/protobuf/source_context.pb.h
  46. 2
      src/google/protobuf/struct.pb.h
  47. 2
      src/google/protobuf/stubs/common.h
  48. 15
      src/google/protobuf/stubs/int128.cc
  49. 127
      src/google/protobuf/stubs/int128.h
  50. 200
      src/google/protobuf/stubs/int128_unittest.cc
  51. 2
      src/google/protobuf/stubs/logging.h
  52. 2
      src/google/protobuf/timestamp.pb.h
  53. 2
      src/google/protobuf/type.pb.h
  54. 17
      src/google/protobuf/util/field_comparator.h
  55. 65
      src/google/protobuf/util/message_differencer.cc
  56. 15
      src/google/protobuf/util/message_differencer.h
  57. 2
      src/google/protobuf/wrappers.pb.h

@ -22,8 +22,11 @@ Unreleased Changes (C++/Java/Python/PHP/Objective-C/C#/Ruby/JavaScript)
on an error path.
* Avoid expensive inlined code space for encoding message length for messages
>= 128 bytes and instead do a procedure call to a shared out-of-line routine.
* util::DefaultFieldComparator will be final in a future version of protobuf.
Subclasses should inherit from SimpleFieldComparator instead.
Java:
* Detect invalid overflow of byteLimit and return InvalidProtocolBufferException as documented.
* Exceptions thrown while reading from an InputStream in parseFrom are now
included as causes.
* Support potentially more efficient proto parsing from RopeByteStrings.
@ -37,6 +40,15 @@ Unreleased Changes (C++/Java/Python/PHP/Objective-C/C#/Ruby/JavaScript)
JavaScript
* Make Any.pack() chainable.
2021-04-02 version 3.15.7 (C++/Java/Python/PHP/Objective-C/C#/Ruby/JavaScript)
C++
* Remove the ::pb namespace (alias) (#8423)
Ruby
* Fix unbounded memory growth for Ruby <2.7 (#8429)
* Fixed message equality in cases where the message type is different (#8434)
2021-03-10 version 3.15.6 (C++/Java/Python/PHP/Objective-C/C#/Ruby/JavaScript)
Ruby

@ -1,6 +1,6 @@
Pod::Spec.new do |s|
s.name = 'Protobuf-C++'
s.version = '3.15.6'
s.version = '3.15.7'
s.summary = 'Protocol Buffers v3 runtime library for C++.'
s.homepage = 'https://github.com/google/protobuf'
s.license = '3-Clause BSD License'

@ -5,7 +5,7 @@
# dependent projects use the :git notation to refer to the library.
Pod::Spec.new do |s|
s.name = 'Protobuf'
s.version = '3.15.6'
s.version = '3.15.7'
s.summary = 'Protocol Buffers v.3 runtime library for Objective-C.'
s.homepage = 'https://github.com/protocolbuffers/protobuf'
s.license = '3-Clause BSD License'

@ -17,7 +17,7 @@ AC_PREREQ(2.59)
# In the SVN trunk, the version should always be the next anticipated release
# version with the "-pre" suffix. (We used to use "-SNAPSHOT" but this pushed
# the size of one file name in the dist tarfile over the 99-char limit.)
AC_INIT([Protocol Buffers],[3.15.6],[protobuf@googlegroups.com],[protobuf])
AC_INIT([Protocol Buffers],[3.15.7],[protobuf@googlegroups.com],[protobuf])
AM_MAINTAINER_MODE([enable])

@ -5,7 +5,7 @@
<title>Google Protocol Buffers tools</title>
<summary>Tools for Protocol Buffers - Google's data interchange format.</summary>
<description>See project site for more info.</description>
<version>3.15.6</version>
<version>3.15.7</version>
<authors>Google Inc.</authors>
<owners>protobuf-packages</owners>
<licenseUrl>https://github.com/protocolbuffers/protobuf/blob/master/LICENSE</licenseUrl>

@ -4,7 +4,7 @@
<Description>C# runtime library for Protocol Buffers - Google's data interchange format.</Description>
<Copyright>Copyright 2015, Google Inc.</Copyright>
<AssemblyTitle>Google Protocol Buffers</AssemblyTitle>
<VersionPrefix>3.15.6</VersionPrefix>
<VersionPrefix>3.15.7</VersionPrefix>
<!-- C# 7.2 is required for Span/BufferWriter/ReadOnlySequence -->
<LangVersion>7.2</LangVersion>
<Authors>Google Inc.</Authors>

@ -4796,11 +4796,11 @@ namespace Google.Protobuf.Reflection {
private string javaOuterClassname_;
/// <summary>
/// If set, all the classes from the .proto file are wrapped in a single
/// outer class with the given name. This applies to both Proto1
/// (equivalent to the old "--one_java_file" option) and Proto2 (where
/// a .proto always translates to a single class, but you may want to
/// explicitly choose the class name).
/// Controls the name of the wrapper Java class generated for the .proto file.
/// That class will always contain the .proto file's getDescriptor() method as
/// well as any top-level extensions defined in the .proto file.
/// If java_multiple_files is disabled, then all the other classes from the
/// .proto file will be nested inside the single wrapper outer class.
/// </summary>
[global::System.Diagnostics.DebuggerNonUserCodeAttribute]
public string JavaOuterClassname {
@ -4826,10 +4826,10 @@ namespace Google.Protobuf.Reflection {
private bool javaMultipleFiles_;
/// <summary>
/// If set true, then the Java code generator will generate a separate .java
/// If enabled, then the Java code generator will generate a separate .java
/// file for each top-level message, enum, and service defined in the .proto
/// file. Thus, these types will *not* be nested inside the outer class
/// named by java_outer_classname. However, the outer class will still be
/// file. Thus, these types will *not* be nested inside the wrapper class
/// named by java_outer_classname. However, the wrapper class will still be
/// generated to contain the file's getDescriptor() method as well as any
/// top-level extensions defined in the file.
/// </summary>

@ -1,7 +1,7 @@
// See README.txt for information and build instructions.
import com.example.tutorial.AddressBookProtos.AddressBook;
import com.example.tutorial.AddressBookProtos.Person;
import com.example.tutorial.protos.AddressBook;
import com.example.tutorial.protos.Person;
import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.FileNotFoundException;

@ -1,7 +1,7 @@
// See README.txt for information and build instructions.
import com.example.tutorial.AddressBookProtos.AddressBook;
import com.example.tutorial.AddressBookProtos.Person;
import com.example.tutorial.protos.AddressBook;
import com.example.tutorial.protos.Person;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.PrintStream;

@ -14,7 +14,8 @@ import "google/protobuf/timestamp.proto";
// [END declaration]
// [START java_declaration]
option java_package = "com.example.tutorial";
option java_multiple_files = true;
option java_package = "com.example.tutorial.protos";
option java_outer_classname = "AddressBookProtos";
// [END java_declaration]

@ -4,7 +4,7 @@
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-bom</artifactId>
<version>3.15.6</version>
<version>3.15.7</version>
<packaging>pom</packaging>
<name>Protocol Buffers [BOM]</name>

@ -4,7 +4,7 @@
<parent>
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-parent</artifactId>
<version>3.15.6</version>
<version>3.15.7</version>
</parent>
<artifactId>protobuf-java</artifactId>

@ -1185,6 +1185,9 @@ public abstract class CodedInputStream {
throw InvalidProtocolBufferException.negativeSize();
}
byteLimit += getTotalBytesRead();
if (byteLimit < 0) {
throw InvalidProtocolBufferException.parseFailure();
}
final int oldLimit = currentLimit;
if (byteLimit > oldLimit) {
throw InvalidProtocolBufferException.truncatedMessage();

@ -1284,4 +1284,33 @@ public class CodedInputStreamTest extends TestCase {
maliciousCapture.get(1)[0] = 0x9;
assertEquals(0x9, byteArray[0]); // MODIFICATION! Should we fix?
}
public void testInvalidInputYieldsInvalidProtocolBufferException_readTag() throws Exception {
byte[] input = new byte[] {0x0a, (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, 0x77};
CodedInputStream inputStream = CodedInputStream.newInstance(input);
try {
inputStream.readTag();
int size = inputStream.readRawVarint32();
inputStream.pushLimit(size);
inputStream.readTag();
fail();
} catch (InvalidProtocolBufferException ex) {
// Expected.
}
}
public void testInvalidInputYieldsInvalidProtocolBufferException_readBytes() throws Exception {
byte[] input =
new byte[] {0x0a, (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, 0x67, 0x1a, 0x1a};
CodedInputStream inputStream = CodedInputStream.newInstance(input);
try {
inputStream.readTag();
int size = inputStream.readRawVarint32();
inputStream.pushLimit(size);
inputStream.readBytes();
fail();
} catch (InvalidProtocolBufferException ex) {
// Expected.
}
}
}

@ -4,7 +4,7 @@
<parent>
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-parent</artifactId>
<version>3.15.6</version>
<version>3.15.7</version>
</parent>
<artifactId>protobuf-javalite</artifactId>

@ -4,7 +4,7 @@
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-parent</artifactId>
<version>3.15.6</version>
<version>3.15.7</version>
<packaging>pom</packaging>
<name>Protocol Buffers [Parent]</name>

@ -4,7 +4,7 @@
<parent>
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-parent</artifactId>
<version>3.15.6</version>
<version>3.15.7</version>
</parent>
<artifactId>protobuf-java-util</artifactId>

@ -1,6 +1,6 @@
{
"name": "google-protobuf",
"version": "3.15.6",
"version": "3.15.7",
"description": "Protocol Buffers for JavaScript",
"main": "google-protobuf.js",
"files": [

@ -0,0 +1,26 @@
#!/bin/bash
set -e
# go to the repo root
cd $(dirname $0)/../../../..
# running dockcross image without any arguments generates a wrapper
# scripts that can be used to run commands under the dockcross image
# easily.
# See https://github.com/dockcross/dockcross#usage for details
docker run --rm -it dockcross/manylinux2014-aarch64 >dockcross-manylinux2014-aarch64.sh
chmod +x dockcross-manylinux2014-aarch64.sh
# the wrapper script has CRLF line endings and bash doesn't like that
# so we change CRLF line endings into LF.
sed -i 's/\r//g' dockcross-manylinux2014-aarch64.sh
# The dockcross wrapper script runs arbitrary commands under the selected dockcross
# image with the following properties which make its use very convenient:
# * the current working directory is mounted under /work so the container can easily
# access the current workspace
# * the processes in the container run under the same UID and GID as the host process so unlike
# vanilla "docker run" invocations, the workspace doesn't get polluted with files
# owned by root.
./dockcross-manylinux2014-aarch64.sh "$@"

@ -0,0 +1,34 @@
#!/bin/bash
#
# Builds protobuf C++ with aarch64 crosscompiler and runs a basic set of tests under an emulator.
# NOTE: This script is expected to run under the dockcross/linux-arm64 docker image.
set -ex
PYTHON="/opt/python/cp38-cp38/bin/python"
./autogen.sh
CXXFLAGS="-fPIC -g -O2" ./configure
make -j8
pushd python
# TODO: currently this step relies on qemu being registered with binfmt_misc so that
# aarch64 binaries are automatically run with an emulator. This works well once
# "sudo apt install qemu-user-static binfmt-support" is installed on the host machine.
${PYTHON} setup.py build_py
# when crosscompiling for aarch64, --plat-name needs to be set explicitly
# to end up with correctly named wheel file
# the value should be manylinuxABC_ARCH and dockcross docker image
# conveniently provides the value in the AUDITWHEEL_PLAT env
plat_name_flag="--plat-name=$AUDITWHEEL_PLAT"
# override the value of EXT_SUFFIX to make sure the crosscompiled .so files in the wheel have the correct filename suffix
export PROTOCOL_BUFFERS_OVERRIDE_EXT_SUFFIX="$(${PYTHON} -c 'import sysconfig; print(sysconfig.get_config_var("EXT_SUFFIX").replace("-x86_64-linux-gnu.so", "-aarch64-linux-gnu.so"))')"
# Build the python extension inplace to be able to python unittests later
${PYTHON} setup.py build_ext --cpp_implementation --compile_static_extension --inplace
# Build the binary wheel (to check it with auditwheel)
${PYTHON} setup.py bdist_wheel --cpp_implementation --compile_static_extension $plat_name_flag

@ -0,0 +1,28 @@
#!/bin/bash
set -ex
# go to the repo root
cd $(dirname $0)/../../..
cd python
PYTHON="/opt/python/cp38-cp38/bin/python"
${PYTHON} -m pip install --user six pytest auditwheel
# check that we are really using aarch64 python
(${PYTHON} -c 'import sysconfig; print(sysconfig.get_platform())' | grep -q "linux-aarch64") || (echo "Wrong python platform, needs to be aarch64 python."; exit 1)
# step 1: run all python unittests
# we've built the python extension previously with --inplace option
# so we can just discover all the unittests and run them directly under
# the python/ directory.
LD_LIBRARY_PATH=../src/.libs PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp ${PYTHON} -m pytest google/protobuf
# step 2: run auditwheel show to check that the wheel is manylinux2014 compatible.
# auditwheel needs to run on wheel's target platform (or under an emulator)
${PYTHON} -m auditwheel show dist/protobuf-*-manylinux2014_aarch64.whl
# step 3: smoketest that the wheel can be installed and run a smokecheck
${PYTHON} -m pip install dist/protobuf-*-manylinux2014_aarch64.whl
# when python cpp extension is on, simply importing a message type will trigger loading the cpp extension
PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp ${PYTHON} -c 'import google.protobuf.timestamp_pb2; print("Successfully loaded the python cpp extension!")'

@ -0,0 +1,19 @@
#!/bin/bash
set -e
# go to the repo root
cd $(dirname $0)/../../..
# crosscompile python extension and the binary wheel under dockcross/manylinux2014-aarch64 image
kokoro/linux/aarch64/dockcross_helpers/run_dockcross_manylinux2014_aarch64.sh kokoro/linux/aarch64/python_crosscompile_aarch64.sh
# once crosscompilation is done, use an actual aarch64 docker image (with a real aarch64 python) to run all the tests under an emulator
# * mount the protobuf root as /work to be able to access the crosscompiled files
# * intentionally use a different image than manylinux2014 so that we don't build and test on the same linux distribution
# (manylinux_2_24 is debian-based while manylinux2014 is centos-based)
# * to avoid running the process inside docker as root (which can pollute the workspace with files owned by root), we force
# running under current user's UID and GID. To be able to do that, we need to provide a home directory for the user
# otherwise the UID would be homeless under the docker container and pip install wouldn't work. For simplicity,
# we just run map the user's home to a throwaway temporary directory
docker run -it --rm --user "$(id -u):$(id -g)" -e "HOME=/home/fake-user" -v "$(mktemp -d):/home/fake-user" -v "$(pwd)":/work -w /work quay.io/pypa/manylinux_2_24_aarch64 kokoro/linux/aarch64/python_run_tests_with_qemu_aarch64.sh

@ -10,11 +10,11 @@
<email>protobuf-opensource@google.com</email>
<active>yes</active>
</lead>
<date>2021-03-10</date>
<time>10:11:34</time>
<date>2021-04-02</date>
<time>10:01:42</time>
<version>
<release>3.15.6</release>
<api>3.15.6</api>
<release>3.15.7</release>
<api>3.15.7</api>
</version>
<stability>
<release>stable</release>
@ -22,7 +22,7 @@
</stability>
<license uri="https://opensource.org/licenses/BSD-3-Clause">3-Clause BSD License</license>
<notes>
No new changes in 3.15.6
No new changes in 3.15.7
</notes>
<contents>
<dir baseinstalldir="/" name="/">
@ -887,5 +887,20 @@ G A release.
<notes>
</notes>
</release>
<release>
<version>
<release>3.15.7</release>
<api>3.15.7</api>
</version>
<stability>
<release>stable</release>
<api>stable</api>
</stability>
<date>2021-04-02</date>
<time>10:01:42</time>
<license uri="https://opensource.org/licenses/BSD-3-Clause">3-Clause BSD License</license>
<notes>
</notes>
</release>
</changelog>
</package>

@ -76,7 +76,7 @@ ZEND_BEGIN_ARG_INFO_EX(arginfo_setter, 0, 0, 1)
ZEND_ARG_INFO(0, value)
ZEND_END_ARG_INFO()
#define PHP_PROTOBUF_VERSION "3.15.6"
#define PHP_PROTOBUF_VERSION "3.15.7"
// ptr -> PHP object cache. This is a weak map that caches lazily-created
// wrapper objects around upb types:

@ -25,20 +25,20 @@ class FileOptions extends \Google\Protobuf\Internal\Message
*/
protected $java_package = null;
/**
* If set, all the classes from the .proto file are wrapped in a single
* outer class with the given name. This applies to both Proto1
* (equivalent to the old "--one_java_file" option) and Proto2 (where
* a .proto always translates to a single class, but you may want to
* explicitly choose the class name).
* Controls the name of the wrapper Java class generated for the .proto file.
* That class will always contain the .proto file's getDescriptor() method as
* well as any top-level extensions defined in the .proto file.
* If java_multiple_files is disabled, then all the other classes from the
* .proto file will be nested inside the single wrapper outer class.
*
* Generated from protobuf field <code>optional string java_outer_classname = 8;</code>
*/
protected $java_outer_classname = null;
/**
* If set true, then the Java code generator will generate a separate .java
* If enabled, then the Java code generator will generate a separate .java
* file for each top-level message, enum, and service defined in the .proto
* file. Thus, these types will *not* be nested inside the outer class
* named by java_outer_classname. However, the outer class will still be
* file. Thus, these types will *not* be nested inside the wrapper class
* named by java_outer_classname. However, the wrapper class will still be
* generated to contain the file's getDescriptor() method as well as any
* top-level extensions defined in the file.
*
@ -192,16 +192,16 @@ class FileOptions extends \Google\Protobuf\Internal\Message
* inappropriate because proto packages do not normally start with backwards
* domain names.
* @type string $java_outer_classname
* If set, all the classes from the .proto file are wrapped in a single
* outer class with the given name. This applies to both Proto1
* (equivalent to the old "--one_java_file" option) and Proto2 (where
* a .proto always translates to a single class, but you may want to
* explicitly choose the class name).
* Controls the name of the wrapper Java class generated for the .proto file.
* That class will always contain the .proto file's getDescriptor() method as
* well as any top-level extensions defined in the .proto file.
* If java_multiple_files is disabled, then all the other classes from the
* .proto file will be nested inside the single wrapper outer class.
* @type bool $java_multiple_files
* If set true, then the Java code generator will generate a separate .java
* If enabled, then the Java code generator will generate a separate .java
* file for each top-level message, enum, and service defined in the .proto
* file. Thus, these types will *not* be nested inside the outer class
* named by java_outer_classname. However, the outer class will still be
* file. Thus, these types will *not* be nested inside the wrapper class
* named by java_outer_classname. However, the wrapper class will still be
* generated to contain the file's getDescriptor() method as well as any
* top-level extensions defined in the file.
* @type bool $java_generate_equals_and_hash
@ -319,11 +319,11 @@ class FileOptions extends \Google\Protobuf\Internal\Message
}
/**
* If set, all the classes from the .proto file are wrapped in a single
* outer class with the given name. This applies to both Proto1
* (equivalent to the old "--one_java_file" option) and Proto2 (where
* a .proto always translates to a single class, but you may want to
* explicitly choose the class name).
* Controls the name of the wrapper Java class generated for the .proto file.
* That class will always contain the .proto file's getDescriptor() method as
* well as any top-level extensions defined in the .proto file.
* If java_multiple_files is disabled, then all the other classes from the
* .proto file will be nested inside the single wrapper outer class.
*
* Generated from protobuf field <code>optional string java_outer_classname = 8;</code>
* @return string
@ -344,11 +344,11 @@ class FileOptions extends \Google\Protobuf\Internal\Message
}
/**
* If set, all the classes from the .proto file are wrapped in a single
* outer class with the given name. This applies to both Proto1
* (equivalent to the old "--one_java_file" option) and Proto2 (where
* a .proto always translates to a single class, but you may want to
* explicitly choose the class name).
* Controls the name of the wrapper Java class generated for the .proto file.
* That class will always contain the .proto file's getDescriptor() method as
* well as any top-level extensions defined in the .proto file.
* If java_multiple_files is disabled, then all the other classes from the
* .proto file will be nested inside the single wrapper outer class.
*
* Generated from protobuf field <code>optional string java_outer_classname = 8;</code>
* @param string $var
@ -363,10 +363,10 @@ class FileOptions extends \Google\Protobuf\Internal\Message
}
/**
* If set true, then the Java code generator will generate a separate .java
* If enabled, then the Java code generator will generate a separate .java
* file for each top-level message, enum, and service defined in the .proto
* file. Thus, these types will *not* be nested inside the outer class
* named by java_outer_classname. However, the outer class will still be
* file. Thus, these types will *not* be nested inside the wrapper class
* named by java_outer_classname. However, the wrapper class will still be
* generated to contain the file's getDescriptor() method as well as any
* top-level extensions defined in the file.
*
@ -389,10 +389,10 @@ class FileOptions extends \Google\Protobuf\Internal\Message
}
/**
* If set true, then the Java code generator will generate a separate .java
* If enabled, then the Java code generator will generate a separate .java
* file for each top-level message, enum, and service defined in the .proto
* file. Thus, these types will *not* be nested inside the outer class
* named by java_outer_classname. However, the outer class will still be
* file. Thus, these types will *not* be nested inside the wrapper class
* named by java_outer_classname. However, the wrapper class will still be
* generated to contain the file's getDescriptor() method as well as any
* top-level extensions defined in the file.
*

@ -8,7 +8,7 @@
</parent>
<groupId>com.google.protobuf</groupId>
<artifactId>protoc</artifactId>
<version>3.15.6</version>
<version>3.15.7</version>
<packaging>pom</packaging>
<name>Protobuf Compiler</name>
<description>

@ -30,4 +30,4 @@
# Copyright 2007 Google Inc. All Rights Reserved.
__version__ = '3.15.6'
__version__ = '3.15.7'

@ -697,16 +697,13 @@ bool Message_Equal(const upb_msg *m1, const upb_msg *m2, const upb_msgdef *m) {
* field is of a primitive type).
*/
static VALUE Message_eq(VALUE _self, VALUE _other) {
if (TYPE(_self) != TYPE(_other)) {
return Qfalse;
}
if (CLASS_OF(_self) != CLASS_OF(_other)) return Qfalse;
Message* self = ruby_to_Message(_self);
Message* other = ruby_to_Message(_other);
assert(self->msgdef == other->msgdef);
return Message_Equal(self->msg, other->msg, self->msgdef)
? Qtrue
: Qfalse;
return Message_Equal(self->msg, other->msg, self->msgdef) ? Qtrue : Qfalse;
}
uint64_t Message_Hash(const upb_msg* msg, const upb_msgdef* m, uint64_t seed) {

@ -251,14 +251,80 @@ void Arena_register(VALUE module) {
// The object is used only for its identity; it does not contain any data.
VALUE secondary_map = Qnil;
// Mutations to the map are under a mutex, because SeconaryMap_MaybeGC()
// iterates over the map which cannot happen in parallel with insertions, or
// Ruby will throw:
// can't add a new key into hash during iteration (RuntimeError)
VALUE secondary_map_mutex = Qnil;
// Lambda that will GC entries from the secondary map that are no longer present
// in the primary map.
VALUE gc_secondary_map_lambda = Qnil;
ID length;
extern VALUE weak_obj_cache;
static void SecondaryMap_Init() {
rb_gc_register_address(&secondary_map);
rb_gc_register_address(&gc_secondary_map_lambda);
rb_gc_register_address(&secondary_map_mutex);
secondary_map = rb_hash_new();
gc_secondary_map_lambda = rb_eval_string(
"->(secondary, weak) {\n"
" secondary.delete_if { |k, v| !weak.key?(v) }\n"
"}\n");
secondary_map_mutex = rb_mutex_new();
length = rb_intern("length");
}
static VALUE SecondaryMap_Get(VALUE key) {
// The secondary map is a regular Hash, and will never shrink on its own.
// The main object cache is a WeakMap that will automatically remove entries
// when the target object is no longer reachable, but unless we manually
// remove the corresponding entries from the secondary map, it will grow
// without bound.
//
// To avoid this unbounded growth we periodically remove entries from the
// secondary map that are no longer present in the WeakMap. The logic of
// how often to perform this GC is an artbirary tuning parameter that
// represents a straightforward CPU/memory tradeoff.
//
// Requires: secondary_map_mutex is held.
static void SecondaryMap_MaybeGC() {
PBRUBY_ASSERT(rb_mutex_locked_p(secondary_map_mutex) == Qtrue);
size_t weak_len = NUM2ULL(rb_funcall(weak_obj_cache, length, 0));
size_t secondary_len = RHASH_SIZE(secondary_map);
if (secondary_len < weak_len) {
// Logically this case should not be possible: a valid entry cannot exist in
// the weak table unless there is a corresponding entry in the secondary
// table. It should *always* be the case that secondary_len >= weak_len.
//
// However ObjectSpace::WeakMap#length (and therefore weak_len) is
// unreliable: it overreports its true length by including non-live objects.
// However these non-live objects are not yielded in iteration, so we may
// have previously deleted them from the secondary map in a previous
// invocation of SecondaryMap_MaybeGC().
//
// In this case, we can't measure any waste, so we just return.
return;
}
size_t waste = secondary_len - weak_len;
// GC if we could remove at least 2000 entries or 20% of the table size
// (whichever is greater). Since the cost of the GC pass is O(N), we
// want to make sure that we condition this on overall table size, to
// avoid O(N^2) CPU costs.
size_t threshold = PBRUBY_MAX(secondary_len * 0.2, 2000);
if (waste > threshold) {
rb_funcall(gc_secondary_map_lambda, rb_intern("call"), 2,
secondary_map, weak_obj_cache);
}
}
// Requires: secondary_map_mutex is held by this thread iff create == true.
static VALUE SecondaryMap_Get(VALUE key, bool create) {
PBRUBY_ASSERT(!create || rb_mutex_locked_p(secondary_map_mutex) == Qtrue);
VALUE ret = rb_hash_lookup(secondary_map, key);
if (ret == Qnil) {
if (ret == Qnil && create) {
SecondaryMap_MaybeGC();
ret = rb_eval_string("Object.new");
rb_hash_aset(secondary_map, key, ret);
}
@ -267,14 +333,15 @@ static VALUE SecondaryMap_Get(VALUE key) {
#endif
static VALUE ObjectCache_GetKey(const void* key) {
// Requires: secondary_map_mutex is held by this thread iff create == true.
static VALUE ObjectCache_GetKey(const void* key, bool create) {
char buf[sizeof(key)];
memcpy(&buf, &key, sizeof(key));
intptr_t key_int = (intptr_t)key;
PBRUBY_ASSERT((key_int & 3) == 0);
VALUE ret = LL2NUM(key_int >> 2);
#if USE_SECONDARY_MAP
ret = SecondaryMap_Get(ret);
ret = SecondaryMap_Get(ret, create);
#endif
return ret;
}
@ -298,14 +365,20 @@ static void ObjectCache_Init() {
void ObjectCache_Add(const void* key, VALUE val) {
PBRUBY_ASSERT(ObjectCache_Get(key) == Qnil);
VALUE key_rb = ObjectCache_GetKey(key);
#if USE_SECONDARY_MAP
rb_mutex_lock(secondary_map_mutex);
#endif
VALUE key_rb = ObjectCache_GetKey(key, true);
rb_funcall(weak_obj_cache, item_set, 2, key_rb, val);
#if USE_SECONDARY_MAP
rb_mutex_unlock(secondary_map_mutex);
#endif
PBRUBY_ASSERT(ObjectCache_Get(key) == val);
}
// Returns the cached object for this key, if any. Otherwise returns Qnil.
VALUE ObjectCache_Get(const void* key) {
VALUE key_rb = ObjectCache_GetKey(key);
VALUE key_rb = ObjectCache_GetKey(key, false);
return rb_funcall(weak_obj_cache, item_get, 1, key_rb);
}

@ -106,6 +106,8 @@ extern VALUE cTypeError;
#define PBRUBY_ASSERT(expr) assert(expr)
#endif
#define PBRUBY_MAX(x, y) (((x) > (y)) ? (x) : (y))
#define UPB_UNUSED(var) (void)var
#endif // __GOOGLE_PROTOBUF_RUBY_PROTOBUF_H__

@ -1,6 +1,6 @@
Gem::Specification.new do |s|
s.name = "google-protobuf"
s.version = "3.15.6"
s.version = "3.15.7"
git_tag = "v#{s.version.to_s.sub('.rc.', '-rc')}" # Converts X.Y.Z.rc.N to vX.Y.Z-rcN, used for the git tag
s.licenses = ["BSD-3-Clause"]
s.summary = "Protocol Buffers"

@ -699,12 +699,13 @@ module CommonTests
assert m.repeated_msg[0].object_id != m2.repeated_msg[0].object_id
end
def test_eq
def test_message_eq
m = proto_module::TestMessage.new(:optional_int32 => 42,
:repeated_int32 => [1, 2, 3])
m2 = proto_module::TestMessage.new(:optional_int32 => 43,
:repeated_int32 => [1, 2, 3])
assert m != m2
assert_not_equal proto_module::TestMessage.new, proto_module::TestMessage2.new
end
def test_enum_lookup

@ -18,7 +18,7 @@ else
PTHREAD_DEF =
endif
PROTOBUF_VERSION = 26:6:0
PROTOBUF_VERSION = 26:7:0
if GCC
# Turn on all warnings except for sign comparison (we ignore sign comparison

@ -13,7 +13,7 @@
#error incompatible with your Protocol Buffer headers. Please update
#error your headers.
#endif
#if 3015006 < PROTOBUF_MIN_PROTOC_VERSION
#if 3015007 < PROTOBUF_MIN_PROTOC_VERSION
#error This file was generated by an older version of protoc which is
#error incompatible with your Protocol Buffer headers. Please
#error regenerate this file with a newer version of protoc.

@ -13,7 +13,7 @@
#error incompatible with your Protocol Buffer headers. Please update
#error your headers.
#endif
#if 3015006 < PROTOBUF_MIN_PROTOC_VERSION
#if 3015007 < PROTOBUF_MIN_PROTOC_VERSION
#error This file was generated by an older version of protoc which is
#error incompatible with your Protocol Buffer headers. Please
#error regenerate this file with a newer version of protoc.

@ -13,7 +13,7 @@
#error incompatible with your Protocol Buffer headers. Please update
#error your headers.
#endif
#if 3015006 < PROTOBUF_MIN_PROTOC_VERSION
#if 3015007 < PROTOBUF_MIN_PROTOC_VERSION
#error This file was generated by an older version of protoc which is
#error incompatible with your Protocol Buffer headers. Please
#error regenerate this file with a newer version of protoc.

@ -13,7 +13,7 @@
#error incompatible with your Protocol Buffer headers. Please update
#error your headers.
#endif
#if 3015006 < PROTOBUF_MIN_PROTOC_VERSION
#if 3015007 < PROTOBUF_MIN_PROTOC_VERSION
#error This file was generated by an older version of protoc which is
#error incompatible with your Protocol Buffer headers. Please
#error regenerate this file with a newer version of protoc.

@ -348,17 +348,17 @@ message FileOptions {
optional string java_package = 1;
// If set, all the classes from the .proto file are wrapped in a single
// outer class with the given name. This applies to both Proto1
// (equivalent to the old "--one_java_file" option) and Proto2 (where
// a .proto always translates to a single class, but you may want to
// explicitly choose the class name).
// Controls the name of the wrapper Java class generated for the .proto file.
// That class will always contain the .proto file's getDescriptor() method as
// well as any top-level extensions defined in the .proto file.
// If java_multiple_files is disabled, then all the other classes from the
// .proto file will be nested inside the single wrapper outer class.
optional string java_outer_classname = 8;
// If set true, then the Java code generator will generate a separate .java
// If enabled, then the Java code generator will generate a separate .java
// file for each top-level message, enum, and service defined in the .proto
// file. Thus, these types will *not* be nested inside the outer class
// named by java_outer_classname. However, the outer class will still be
// file. Thus, these types will *not* be nested inside the wrapper class
// named by java_outer_classname. However, the wrapper class will still be
// generated to contain the file's getDescriptor() method as well as any
// top-level extensions defined in the file.
optional bool java_multiple_files = 10 [default = false];

@ -13,7 +13,7 @@
#error incompatible with your Protocol Buffer headers. Please update
#error your headers.
#endif
#if 3015006 < PROTOBUF_MIN_PROTOC_VERSION
#if 3015007 < PROTOBUF_MIN_PROTOC_VERSION
#error This file was generated by an older version of protoc which is
#error incompatible with your Protocol Buffer headers. Please
#error regenerate this file with a newer version of protoc.

@ -13,7 +13,7 @@
#error incompatible with your Protocol Buffer headers. Please update
#error your headers.
#endif
#if 3015006 < PROTOBUF_MIN_PROTOC_VERSION
#if 3015007 < PROTOBUF_MIN_PROTOC_VERSION
#error This file was generated by an older version of protoc which is
#error incompatible with your Protocol Buffer headers. Please
#error regenerate this file with a newer version of protoc.

@ -13,7 +13,7 @@
#error incompatible with your Protocol Buffer headers. Please update
#error your headers.
#endif
#if 3015006 < PROTOBUF_MIN_PROTOC_VERSION
#if 3015007 < PROTOBUF_MIN_PROTOC_VERSION
#error This file was generated by an older version of protoc which is
#error incompatible with your Protocol Buffer headers. Please
#error regenerate this file with a newer version of protoc.

@ -337,7 +337,7 @@
// Shared google3/opensource definitions. //////////////////////////////////////
#define PROTOBUF_VERSION 3015006
#define PROTOBUF_VERSION 3015007
#define PROTOBUF_MIN_HEADER_VERSION_FOR_PROTOC 3015000
#define PROTOBUF_MIN_PROTOC_VERSION 3015000
#define PROTOBUF_VERSION_SUFFIX ""

@ -79,6 +79,10 @@
#undef PROTOBUF_ATTRIBUTE_INIT_PRIORITY
#undef PROTOBUF_PRAGMA_INIT_SEG
#ifdef PROTOBUF_FUTURE_BREAKING_CHANGES
#undef PROTOBUF_FUTURE_BREAKING_CHANGES
#endif
// Restore macro that may have been #undef'd in port_def.inc.
#ifdef _MSC_VER
#pragma pop_macro("CREATE_NEW")

@ -13,7 +13,7 @@
#error incompatible with your Protocol Buffer headers. Please update
#error your headers.
#endif
#if 3015006 < PROTOBUF_MIN_PROTOC_VERSION
#if 3015007 < PROTOBUF_MIN_PROTOC_VERSION
#error This file was generated by an older version of protoc which is
#error incompatible with your Protocol Buffer headers. Please
#error regenerate this file with a newer version of protoc.

@ -13,7 +13,7 @@
#error incompatible with your Protocol Buffer headers. Please update
#error your headers.
#endif
#if 3015006 < PROTOBUF_MIN_PROTOC_VERSION
#if 3015007 < PROTOBUF_MIN_PROTOC_VERSION
#error This file was generated by an older version of protoc which is
#error incompatible with your Protocol Buffer headers. Please
#error regenerate this file with a newer version of protoc.

@ -82,7 +82,7 @@ namespace internal {
// The current version, represented as a single integer to make comparison
// easier: major * 10^6 + minor * 10^3 + micro
#define GOOGLE_PROTOBUF_VERSION 3015006
#define GOOGLE_PROTOBUF_VERSION 3015007
// A suffix string for alpha, beta or rc releases. Empty for stable releases.
#define GOOGLE_PROTOBUF_VERSION_SUFFIX ""

@ -33,7 +33,6 @@
#include <iomanip>
#include <ostream> // NOLINT(readability/streams)
#include <sstream>
#include <string>
#include <google/protobuf/stubs/logging.h>
@ -41,7 +40,11 @@
namespace google {
namespace protobuf {
namespace int128_internal {
const uint128_pod kuint128max = {
static_cast<uint64>(PROTOBUF_LONGLONG(0xFFFFFFFFFFFFFFFF)),
static_cast<uint64>(PROTOBUF_LONGLONG(0xFFFFFFFFFFFFFFFF))
};
// Returns the 0-based position of the last set bit (i.e., most significant bit)
// in the given uint64. The argument may not be 0.
@ -185,14 +188,6 @@ std::ostream& operator<<(std::ostream& o, const uint128& b) {
return o << rep;
}
void VerifyValidShift(std::string op, int amount) {
// Shifting more than 127 is UB in Abseil, just crash for now to verify
// callers don't depend on it returning 0.
GOOGLE_CHECK_LT(amount, 128) << "Error executing operator " << op
<< ": shifts of more than 127 are undefined";
}
} // namespace int128_internal
} // namespace protobuf
} // namespace google

@ -33,33 +33,38 @@
#include <google/protobuf/stubs/common.h>
#include <iosfwd>
#include <limits>
#include <string>
#include <google/protobuf/port_def.inc>
namespace google {
namespace protobuf {
namespace int128_internal {
// An unsigned 128-bit integer type. Thread-compatible.
class PROTOBUF_EXPORT uint128 {
public:
uint128() = default;
struct uint128_pod;
private:
// Use `MakeUint128` instead.
constexpr uint128(uint64 top, uint64 bottom);
// TODO(xiaofeng): Define GOOGLE_PROTOBUF_HAS_CONSTEXPR when constexpr is
// available.
#ifdef GOOGLE_PROTOBUF_HAS_CONSTEXPR
# define UINT128_CONSTEXPR constexpr
#else
# define UINT128_CONSTEXPR
#endif
// An unsigned 128-bit integer type. Thread-compatible.
class PROTOBUF_EXPORT uint128 {
public:
UINT128_CONSTEXPR uint128(); // Sets to 0, but don't trust on this behavior.
UINT128_CONSTEXPR uint128(uint64 top, uint64 bottom);
#ifndef SWIG
constexpr uint128(int bottom);
constexpr uint128(uint32 bottom); // Top 96 bits = 0
UINT128_CONSTEXPR uint128(int bottom);
UINT128_CONSTEXPR uint128(uint32 bottom); // Top 96 bits = 0
#endif
constexpr uint128(uint64 bottom); // hi_ = 0
UINT128_CONSTEXPR uint128(uint64 bottom); // hi_ = 0
UINT128_CONSTEXPR uint128(const uint128_pod &val);
// Trivial copy constructor, assignment operator and destructor.
void Initialize(uint64 top, uint64 bottom);
// Arithmetic operators.
uint128& operator+=(const uint128& b);
uint128& operator-=(const uint128& b);
@ -77,10 +82,8 @@ class PROTOBUF_EXPORT uint128 {
uint128& operator++();
uint128& operator--();
friend constexpr uint64 Uint128Low64(const uint128& v);
friend constexpr uint64 Uint128High64(const uint128& v);
friend constexpr uint128 MakeUint128(uint64_t high, uint64_t low);
friend uint64 Uint128Low64(const uint128& v);
friend uint64 Uint128High64(const uint128& v);
// We add "std::" to avoid including all of port.h.
PROTOBUF_EXPORT friend std::ostream& operator<<(std::ostream& o,
@ -97,12 +100,26 @@ class PROTOBUF_EXPORT uint128 {
uint64 hi_;
// Not implemented, just declared for catching automatic type conversions.
uint128(uint8) = delete;
uint128(uint16) = delete;
uint128(float v) = delete;
uint128(double v) = delete;
uint128(uint8);
uint128(uint16);
uint128(float v);
uint128(double v);
};
// This is a POD form of uint128 which can be used for static variables which
// need to be operated on as uint128.
struct uint128_pod {
// Note: The ordering of fields is different than 'class uint128' but the
// same as its 2-arg constructor. This enables more obvious initialization
// of static instances, which is the primary reason for this struct in the
// first place. This does not seem to defeat any optimizations wrt
// operations involving this struct.
uint64 hi;
uint64 lo;
};
PROTOBUF_EXPORT extern const uint128_pod kuint128max;
// allow uint128 to be logged
PROTOBUF_EXPORT extern std::ostream& operator<<(std::ostream& o,
const uint128& b);
@ -110,12 +127,8 @@ PROTOBUF_EXPORT extern std::ostream& operator<<(std::ostream& o,
// Methods to access low and high pieces of 128-bit value.
// Defined externally from uint128 to facilitate conversion
// to native 128-bit types when compilers support them.
inline constexpr uint64 Uint128Low64(const uint128& v) { return v.lo_; }
inline constexpr uint64 Uint128High64(const uint128& v) { return v.hi_; }
constexpr uint128 MakeUint128(uint64_t high, uint64_t low) {
return uint128(high, low);
}
inline uint64 Uint128Low64(const uint128& v) { return v.lo_; }
inline uint64 Uint128High64(const uint128& v) { return v.hi_; }
// TODO: perhaps it would be nice to have int128, a signed 128-bit type?
@ -130,17 +143,27 @@ inline bool operator!=(const uint128& lhs, const uint128& rhs) {
return !(lhs == rhs);
}
inline constexpr uint128::uint128(uint64 top, uint64 bottom)
inline UINT128_CONSTEXPR uint128::uint128() : lo_(0), hi_(0) {}
inline UINT128_CONSTEXPR uint128::uint128(uint64 top, uint64 bottom)
: lo_(bottom), hi_(top) {}
inline constexpr uint128::uint128(uint64 bottom)
inline UINT128_CONSTEXPR uint128::uint128(const uint128_pod& v)
: lo_(v.lo), hi_(v.hi) {}
inline UINT128_CONSTEXPR uint128::uint128(uint64 bottom)
: lo_(bottom), hi_(0) {}
#ifndef SWIG
inline constexpr uint128::uint128(uint32 bottom)
inline UINT128_CONSTEXPR uint128::uint128(uint32 bottom)
: lo_(bottom), hi_(0) {}
inline constexpr uint128::uint128(int bottom)
inline UINT128_CONSTEXPR uint128::uint128(int bottom)
: lo_(bottom), hi_(static_cast<int64>((bottom < 0) ? -1 : 0)) {}
#endif
#undef UINT128_CONSTEXPR
inline void uint128::Initialize(uint64 top, uint64 bottom) {
hi_ = top;
lo_ = bottom;
}
// Comparison operators.
#define CMP128(op) \
@ -164,9 +187,9 @@ inline uint128 operator-(const uint128& val) {
const uint64 lo_flip = ~Uint128Low64(val);
const uint64 lo_add = lo_flip + 1;
if (lo_add < lo_flip) {
return MakeUint128(hi_flip + 1, lo_add);
return uint128(hi_flip + 1, lo_add);
}
return MakeUint128(hi_flip, lo_add);
return uint128(hi_flip, lo_add);
}
inline bool operator!(const uint128& val) {
@ -176,13 +199,13 @@ inline bool operator!(const uint128& val) {
// Logical operators.
inline uint128 operator~(const uint128& val) {
return MakeUint128(~Uint128High64(val), ~Uint128Low64(val));
return uint128(~Uint128High64(val), ~Uint128Low64(val));
}
#define LOGIC128(op) \
inline uint128 operator op(const uint128& lhs, const uint128& rhs) { \
return MakeUint128(Uint128High64(lhs) op Uint128High64(rhs), \
Uint128Low64(lhs) op Uint128Low64(rhs)); \
return uint128(Uint128High64(lhs) op Uint128High64(rhs), \
Uint128Low64(lhs) op Uint128Low64(rhs)); \
}
LOGIC128(|)
@ -206,11 +229,7 @@ LOGICASSIGN128(^=)
// Shift operators.
void VerifyValidShift(std::string op, int amount);
inline uint128 operator<<(const uint128& val, int amount) {
VerifyValidShift("<<", amount);
// uint64 shifts of >= 64 are undefined, so we will need some special-casing.
if (amount < 64) {
if (amount == 0) {
@ -219,14 +238,15 @@ inline uint128 operator<<(const uint128& val, int amount) {
uint64 new_hi = (Uint128High64(val) << amount) |
(Uint128Low64(val) >> (64 - amount));
uint64 new_lo = Uint128Low64(val) << amount;
return MakeUint128(new_hi, new_lo);
return uint128(new_hi, new_lo);
} else if (amount < 128) {
return uint128(Uint128Low64(val) << (amount - 64), 0);
} else {
return uint128(0, 0);
}
return MakeUint128(Uint128Low64(val) << (amount - 64), 0);
}
inline uint128 operator>>(const uint128& val, int amount) {
VerifyValidShift(">>", amount);
// uint64 shifts of >= 64 are undefined, so we will need some special-casing.
if (amount < 64) {
if (amount == 0) {
@ -235,10 +255,12 @@ inline uint128 operator>>(const uint128& val, int amount) {
uint64 new_hi = Uint128High64(val) >> amount;
uint64 new_lo = (Uint128Low64(val) >> amount) |
(Uint128High64(val) << (64 - amount));
return MakeUint128(new_hi, new_lo);
return uint128(new_hi, new_lo);
} else if (amount < 128) {
return uint128(0, Uint128High64(val) >> (amount - 64));
} else {
return uint128(0, 0);
}
return MakeUint128(0, Uint128High64(val) >> (amount - 64));
}
inline uint128& uint128::operator<<=(int amount) {
@ -357,17 +379,6 @@ inline uint128& uint128::operator--() {
return *this;
}
constexpr uint128 Uint128Max() {
return MakeUint128((std::numeric_limits<uint64>::max)(),
(std::numeric_limits<uint64>::max)());
}
} // namespace int128_internal
using google::protobuf::int128_internal::uint128;
using google::protobuf::int128_internal::Uint128Max;
using google::protobuf::int128_internal::MakeUint128;
} // namespace protobuf
} // namespace google

@ -33,7 +33,6 @@
#include <algorithm>
#include <sstream>
#include <utility>
#include <type_traits>
#include <google/protobuf/testing/googletest.h>
#include <gtest/gtest.h>
@ -45,18 +44,16 @@ namespace protobuf {
TEST(Int128, AllTests) {
uint128 zero(0);
EXPECT_EQ(zero, uint128());
uint128 one(1);
uint128 one_2arg = MakeUint128(0, 1);
uint128 two = MakeUint128(0, 2);
uint128 three = MakeUint128(0, 3);
uint128 big = MakeUint128(2000, 2);
uint128 big_minus_one = MakeUint128(2000, 1);
uint128 bigger = MakeUint128(2001, 1);
uint128 biggest(Uint128Max());
uint128 high_low = MakeUint128(1, 0);
uint128 low_high = MakeUint128(0, kuint64max);
uint128 one_2arg(0, 1);
uint128 two(0, 2);
uint128 three(0, 3);
uint128 big(2000, 2);
uint128 big_minus_one(2000, 1);
uint128 bigger(2001, 1);
uint128 biggest(kuint128max);
uint128 high_low(1, 0);
uint128 low_high(0, kuint64max);
EXPECT_LT(one, two);
EXPECT_GT(two, one);
EXPECT_LT(one, big);
@ -95,6 +92,8 @@ TEST(Int128, AllTests) {
EXPECT_EQ(big, (big >> 1) << 1);
EXPECT_EQ(one, (one << 80) >> 80);
EXPECT_EQ(zero, (one >> 80) << 80);
EXPECT_EQ(zero, big >> 128);
EXPECT_EQ(zero, big << 128);
// Shift assignments.
uint128 big_copy = big;
@ -118,9 +117,9 @@ TEST(Int128, AllTests) {
big_copy = big;
EXPECT_EQ(big >> 73, big_copy >>= 73);
big_copy = big;
EXPECT_EQ(big << 127, big_copy <<= 127);
EXPECT_EQ(big << 128, big_copy <<= 128);
big_copy = big;
EXPECT_EQ(big >> 127, big_copy >>= 127);
EXPECT_EQ(big >> 128, big_copy >>= 128);
EXPECT_EQ(Uint128High64(biggest), kuint64max);
EXPECT_EQ(Uint128Low64(biggest), kuint64max);
@ -171,13 +170,92 @@ TEST(Int128, AllTests) {
EXPECT_EQ(big, -(-big));
EXPECT_EQ(two, -((-one) - 1));
EXPECT_EQ(Uint128Max(), -one);
EXPECT_EQ(kuint128max, -one);
EXPECT_EQ(zero, -zero);
GOOGLE_LOG(INFO) << one;
GOOGLE_LOG(INFO) << big_minus_one;
}
TEST(Int128, PodTests) {
uint128_pod pod = { 12345, 67890 };
uint128 from_pod(pod);
EXPECT_EQ(12345, Uint128High64(from_pod));
EXPECT_EQ(67890, Uint128Low64(from_pod));
uint128 zero(0);
uint128_pod zero_pod = {0, 0};
uint128 one(1);
uint128_pod one_pod = {0, 1};
uint128 two(2);
uint128_pod two_pod = {0, 2};
uint128 three(3);
uint128_pod three_pod = {0, 3};
uint128 big(1, 0);
uint128_pod big_pod = {1, 0};
EXPECT_EQ(zero, zero_pod);
EXPECT_EQ(zero_pod, zero);
EXPECT_EQ(zero_pod, zero_pod);
EXPECT_EQ(one, one_pod);
EXPECT_EQ(one_pod, one);
EXPECT_EQ(one_pod, one_pod);
EXPECT_EQ(two, two_pod);
EXPECT_EQ(two_pod, two);
EXPECT_EQ(two_pod, two_pod);
EXPECT_NE(one, two_pod);
EXPECT_NE(one_pod, two);
EXPECT_NE(one_pod, two_pod);
EXPECT_LT(one, two_pod);
EXPECT_LT(one_pod, two);
EXPECT_LT(one_pod, two_pod);
EXPECT_LE(one, one_pod);
EXPECT_LE(one_pod, one);
EXPECT_LE(one_pod, one_pod);
EXPECT_LE(one, two_pod);
EXPECT_LE(one_pod, two);
EXPECT_LE(one_pod, two_pod);
EXPECT_GT(two, one_pod);
EXPECT_GT(two_pod, one);
EXPECT_GT(two_pod, one_pod);
EXPECT_GE(two, two_pod);
EXPECT_GE(two_pod, two);
EXPECT_GE(two_pod, two_pod);
EXPECT_GE(two, one_pod);
EXPECT_GE(two_pod, one);
EXPECT_GE(two_pod, one_pod);
EXPECT_EQ(three, one | two_pod);
EXPECT_EQ(three, one_pod | two);
EXPECT_EQ(three, one_pod | two_pod);
EXPECT_EQ(one, three & one_pod);
EXPECT_EQ(one, three_pod & one);
EXPECT_EQ(one, three_pod & one_pod);
EXPECT_EQ(two, three ^ one_pod);
EXPECT_EQ(two, three_pod ^ one);
EXPECT_EQ(two, three_pod ^ one_pod);
EXPECT_EQ(two, three & (~one));
EXPECT_EQ(three, ~~three);
EXPECT_EQ(two, two_pod << 0);
EXPECT_EQ(two, one_pod << 1);
EXPECT_EQ(big, one_pod << 64);
EXPECT_EQ(zero, one_pod << 128);
EXPECT_EQ(two, two_pod >> 0);
EXPECT_EQ(one, two_pod >> 1);
EXPECT_EQ(one, big_pod >> 64);
EXPECT_EQ(one, zero + one_pod);
EXPECT_EQ(one, zero_pod + one);
EXPECT_EQ(one, zero_pod + one_pod);
EXPECT_EQ(one, two - one_pod);
EXPECT_EQ(one, two_pod - one);
EXPECT_EQ(one, two_pod - one_pod);
}
TEST(Int128, OperatorAssignReturnRef) {
uint128 v(1);
(v += 4) -= 3;
@ -215,38 +293,38 @@ TEST(Int128, Multiply) {
}
// Verified with dc.
a = MakeUint128(PROTOBUF_ULONGLONG(0xffffeeeeddddcccc),
PROTOBUF_ULONGLONG(0xbbbbaaaa99998888));
b = MakeUint128(PROTOBUF_ULONGLONG(0x7777666655554444),
PROTOBUF_ULONGLONG(0x3333222211110000));
a = uint128(PROTOBUF_ULONGLONG(0xffffeeeeddddcccc),
PROTOBUF_ULONGLONG(0xbbbbaaaa99998888));
b = uint128(PROTOBUF_ULONGLONG(0x7777666655554444),
PROTOBUF_ULONGLONG(0x3333222211110000));
c = a * b;
EXPECT_EQ(MakeUint128(PROTOBUF_ULONGLONG(0x530EDA741C71D4C3),
PROTOBUF_ULONGLONG(0xBF25975319080000)),
EXPECT_EQ(uint128(PROTOBUF_ULONGLONG(0x530EDA741C71D4C3),
PROTOBUF_ULONGLONG(0xBF25975319080000)),
c);
EXPECT_EQ(0, c - b * a);
EXPECT_EQ(a * a - b * b, (a + b) * (a - b));
// Verified with dc.
a = MakeUint128(PROTOBUF_ULONGLONG(0x0123456789abcdef),
PROTOBUF_ULONGLONG(0xfedcba9876543210));
b = MakeUint128(PROTOBUF_ULONGLONG(0x02468ace13579bdf),
PROTOBUF_ULONGLONG(0xfdb97531eca86420));
a = uint128(PROTOBUF_ULONGLONG(0x0123456789abcdef),
PROTOBUF_ULONGLONG(0xfedcba9876543210));
b = uint128(PROTOBUF_ULONGLONG(0x02468ace13579bdf),
PROTOBUF_ULONGLONG(0xfdb97531eca86420));
c = a * b;
EXPECT_EQ(MakeUint128(PROTOBUF_ULONGLONG(0x97a87f4f261ba3f2),
PROTOBUF_ULONGLONG(0x342d0bbf48948200)),
EXPECT_EQ(uint128(PROTOBUF_ULONGLONG(0x97a87f4f261ba3f2),
PROTOBUF_ULONGLONG(0x342d0bbf48948200)),
c);
EXPECT_EQ(0, c - b * a);
EXPECT_EQ(a*a - b*b, (a+b) * (a-b));
}
TEST(Int128, AliasTests) {
uint128 x1 = MakeUint128(1, 2);
uint128 x2 = MakeUint128(2, 4);
uint128 x1(1, 2);
uint128 x2(2, 4);
x1 += x1;
EXPECT_EQ(x2, x1);
uint128 x3 = MakeUint128(1, static_cast<uint64>(1) << 63);
uint128 x4 = MakeUint128(3, 0);
uint128 x3(1, static_cast<uint64>(1) << 63);
uint128 x4(3, 0);
x3 += x3;
EXPECT_EQ(x4, x3);
}
@ -267,12 +345,6 @@ TEST(Int128, ModByZeroCheckFails) {
a = 123;
EXPECT_DEATH(a % b, "Division or mod by zero:");
}
TEST(Int128, ShiftGreater128) {
uint128 a;
EXPECT_DEATH(a << 128, "Left-shift greater or equal 128");
EXPECT_DEATH(a >> 128, "Right-shift greater or equal 128");
}
#endif // PROTOBUF_HAS_DEATH_TEST
TEST(Int128, DivideAndMod) {
@ -287,10 +359,10 @@ TEST(Int128, DivideAndMod) {
EXPECT_EQ(0, q);
EXPECT_EQ(0, r);
a = MakeUint128(PROTOBUF_ULONGLONG(0x530eda741c71d4c3),
PROTOBUF_ULONGLONG(0xbf25975319080000));
q = MakeUint128(PROTOBUF_ULONGLONG(0x4de2cab081),
PROTOBUF_ULONGLONG(0x14c34ab4676e4bab));
a = uint128(PROTOBUF_ULONGLONG(0x530eda741c71d4c3),
PROTOBUF_ULONGLONG(0xbf25975319080000));
q = uint128(PROTOBUF_ULONGLONG(0x4de2cab081),
PROTOBUF_ULONGLONG(0x14c34ab4676e4bab));
b = uint128(0x1110001);
r = uint128(0x3eb455);
ASSERT_EQ(a, q * b + r); // Sanity-check.
@ -328,8 +400,8 @@ TEST(Int128, DivideAndMod) {
// Try a large remainder.
b = a / 2 + 1;
uint128 expected_r = MakeUint128(PROTOBUF_ULONGLONG(0x29876d3a0e38ea61),
PROTOBUF_ULONGLONG(0xdf92cba98c83ffff));
uint128 expected_r(PROTOBUF_ULONGLONG(0x29876d3a0e38ea61),
PROTOBUF_ULONGLONG(0xdf92cba98c83ffff));
// Sanity checks.
ASSERT_EQ(a / 2 - 1, expected_r);
ASSERT_EQ(a, b + expected_r);
@ -349,8 +421,8 @@ static uint64 RandomUint64() {
TEST(Int128, DivideAndModRandomInputs) {
const int kNumIters = 1 << 18;
for (int i = 0; i < kNumIters; ++i) {
const uint128 a = MakeUint128(RandomUint64(), RandomUint64());
const uint128 b = MakeUint128(RandomUint64(), RandomUint64());
const uint128 a(RandomUint64(), RandomUint64());
const uint128 b(RandomUint64(), RandomUint64());
if (b == 0) {
continue; // Avoid a div-by-zero.
}
@ -360,22 +432,24 @@ TEST(Int128, DivideAndModRandomInputs) {
}
}
#ifdef GOOGLE_PROTOBUF_HAS_CONSTEXPR
TEST(Int128, ConstexprTest) {
constexpr uint128 zero;
constexpr uint128 one = 1;
constexpr uint128_pod pod = {2, 3};
constexpr uint128 from_pod = pod;
constexpr uint128 minus_two = -2;
EXPECT_EQ(one, uint128(1));
EXPECT_EQ(minus_two, MakeUint128(-1ULL, -2ULL));
EXPECT_EQ(from_pod, uint128(2, 3));
EXPECT_EQ(minus_two, uint128(-1ULL, -2ULL));
}
#if !defined(__GNUC__) || __GNUC__ > 4
// libstdc++ is missing the required type traits pre gcc-5.0.0
// https://gcc.gnu.org/onlinedocs/gcc-4.9.4/libstdc++/manual/manual/status.html#:~:text=20.9.4.3
TEST(Int128, Traits) {
EXPECT_TRUE(std::is_trivially_copy_constructible<uint128>::value);
EXPECT_TRUE(std::is_trivially_copy_assignable<uint128>::value);
EXPECT_TRUE(std::is_trivially_destructible<uint128>::value);
}
#endif // !defined(__GNUC__) || __GNUC__ > 4
#endif // GOOGLE_PROTOBUF_HAS_CONSTEXPR
TEST(Int128, OStream) {
struct {
@ -390,28 +464,28 @@ TEST(Int128, OStream) {
{uint128(0), std::ios::oct, 0, '_', "0"},
{uint128(0), std::ios::hex, 0, '_', "0"},
// crossover between lo_ and hi_
{MakeUint128(0, -1), std::ios::dec, 0, '_', "18446744073709551615"},
{MakeUint128(0, -1), std::ios::oct, 0, '_', "1777777777777777777777"},
{MakeUint128(0, -1), std::ios::hex, 0, '_', "ffffffffffffffff"},
{MakeUint128(1, 0), std::ios::dec, 0, '_', "18446744073709551616"},
{MakeUint128(1, 0), std::ios::oct, 0, '_', "2000000000000000000000"},
{MakeUint128(1, 0), std::ios::hex, 0, '_', "10000000000000000"},
{uint128(0, -1), std::ios::dec, 0, '_', "18446744073709551615"},
{uint128(0, -1), std::ios::oct, 0, '_', "1777777777777777777777"},
{uint128(0, -1), std::ios::hex, 0, '_', "ffffffffffffffff"},
{uint128(1, 0), std::ios::dec, 0, '_', "18446744073709551616"},
{uint128(1, 0), std::ios::oct, 0, '_', "2000000000000000000000"},
{uint128(1, 0), std::ios::hex, 0, '_', "10000000000000000"},
// just the top bit
{MakeUint128(PROTOBUF_ULONGLONG(0x8000000000000000), 0), std::ios::dec, 0,
{uint128(PROTOBUF_ULONGLONG(0x8000000000000000), 0), std::ios::dec, 0,
'_', "170141183460469231731687303715884105728"},
{MakeUint128(PROTOBUF_ULONGLONG(0x8000000000000000), 0), std::ios::oct, 0,
{uint128(PROTOBUF_ULONGLONG(0x8000000000000000), 0), std::ios::oct, 0,
'_', "2000000000000000000000000000000000000000000"},
{MakeUint128(PROTOBUF_ULONGLONG(0x8000000000000000), 0), std::ios::hex, 0,
{uint128(PROTOBUF_ULONGLONG(0x8000000000000000), 0), std::ios::hex, 0,
'_', "80000000000000000000000000000000"},
// maximum uint128 value
{MakeUint128(-1, -1), std::ios::dec, 0, '_',
{uint128(-1, -1), std::ios::dec, 0, '_',
"340282366920938463463374607431768211455"},
{MakeUint128(-1, -1), std::ios::oct, 0, '_',
{uint128(-1, -1), std::ios::oct, 0, '_',
"3777777777777777777777777777777777777777777"},
{MakeUint128(-1, -1), std::ios::hex, 0, '_',
{uint128(-1, -1), std::ios::hex, 0, '_',
"ffffffffffffffffffffffffffffffff"},
// uppercase
{MakeUint128(-1, -1), std::ios::hex | std::ios::uppercase, 0, '_',
{uint128(-1, -1), std::ios::hex | std::ios::uppercase, 0, '_',
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"},
// showbase
{uint128(1), std::ios::dec | std::ios::showbase, 0, '_', "1"},

@ -31,7 +31,6 @@
#ifndef GOOGLE_PROTOBUF_STUBS_LOGGING_H_
#define GOOGLE_PROTOBUF_STUBS_LOGGING_H_
#include <google/protobuf/stubs/int128.h>
#include <google/protobuf/stubs/macros.h>
#include <google/protobuf/stubs/port.h>
#include <google/protobuf/stubs/status.h>
@ -66,6 +65,7 @@ enum LogLevel {
#endif
};
class uint128;
namespace internal {
class LogFinisher;

@ -13,7 +13,7 @@
#error incompatible with your Protocol Buffer headers. Please update
#error your headers.
#endif
#if 3015006 < PROTOBUF_MIN_PROTOC_VERSION
#if 3015007 < PROTOBUF_MIN_PROTOC_VERSION
#error This file was generated by an older version of protoc which is
#error incompatible with your Protocol Buffer headers. Please
#error regenerate this file with a newer version of protoc.

@ -13,7 +13,7 @@
#error incompatible with your Protocol Buffer headers. Please update
#error your headers.
#endif
#if 3015006 < PROTOBUF_MIN_PROTOC_VERSION
#if 3015007 < PROTOBUF_MIN_PROTOC_VERSION
#error This file was generated by an older version of protoc which is
#error incompatible with your Protocol Buffer headers. Please
#error regenerate this file with a newer version of protoc.

@ -146,15 +146,6 @@ class PROTOBUF_EXPORT SimpleFieldComparator : public FieldComparator {
void SetDefaultFractionAndMargin(double fraction, double margin);
protected:
// NOTE: this will go away.
ComparisonResult Compare(const Message& message_1, const Message& message_2,
const FieldDescriptor* field, int index_1,
int index_2,
const util::FieldContext* field_context) override {
return SimpleCompare(message_1, message_2, field, index_1, index_2,
field_context);
}
// Returns the comparison result for the given field in two messages.
//
// This function is called directly by DefaultFieldComparator::Compare.
@ -268,7 +259,13 @@ class PROTOBUF_EXPORT SimpleFieldComparator : public FieldComparator {
};
// Default field comparison: use the basic implementation of FieldComparator.
class PROTOBUF_EXPORT DefaultFieldComparator : public SimpleFieldComparator {
#ifdef PROTOBUF_FUTURE_BREAKING_CHANGES
class PROTOBUF_EXPORT DefaultFieldComparator final
: public SimpleFieldComparator
#else // PROTOBUF_FUTURE_BREAKING_CHANGES
class PROTOBUF_EXPORT DefaultFieldComparator : public SimpleFieldComparator
#endif // PROTOBUF_FUTURE_BREAKING_CHANGES
{
public:
ComparisonResult Compare(const Message& message_1, const Message& message_2,
const FieldDescriptor* field, int index_1,

@ -148,18 +148,16 @@ class MessageDifferencer::MultipleFieldsMapKeyComparator
const FieldDescriptor* field = key_field_path[path_index];
std::vector<SpecificField> current_parent_fields(parent_fields);
if (path_index == static_cast<int64_t>(key_field_path.size() - 1)) {
if (field->is_repeated()) {
if (!message_differencer_->CompareRepeatedField(
message1, message2, field, &current_parent_fields)) {
return false;
}
if (field->is_map()) {
return message_differencer_->CompareMapField(message1, message2, field,
&current_parent_fields);
} else if (field->is_repeated()) {
return message_differencer_->CompareRepeatedField(
message1, message2, field, &current_parent_fields);
} else {
if (!message_differencer_->CompareFieldValueUsingParentFields(
message1, message2, field, -1, -1, &current_parent_fields)) {
return false;
}
return message_differencer_->CompareFieldValueUsingParentFields(
message1, message2, field, -1, -1, &current_parent_fields);
}
return true;
} else {
const Reflection* reflection1 = message1.GetReflection();
const Reflection* reflection2 = message2.GetReflection();
@ -830,24 +828,17 @@ bool MessageDifferencer::CompareWithFieldsInternal(
bool fieldDifferent = false;
assert(field1 != NULL);
if (field1->is_repeated()) {
if (field1->is_map()) {
fieldDifferent =
!CompareMapField(message1, message2, field1, parent_fields);
} else if (field1->is_repeated()) {
fieldDifferent =
!CompareRepeatedField(message1, message2, field1, parent_fields);
if (fieldDifferent) {
if (reporter_ == NULL) return false;
isDifferent = true;
}
} else {
fieldDifferent = !CompareFieldValueUsingParentFields(
message1, message2, field1, -1, -1, parent_fields);
// If we have found differences, either report them or terminate if
// no reporter is present.
if (fieldDifferent && reporter_ == NULL) {
return false;
}
if (reporter_ != NULL) {
if (reporter_ != nullptr) {
SpecificField specific_field;
specific_field.field = field1;
parent_fields->push_back(specific_field);
@ -860,6 +851,10 @@ bool MessageDifferencer::CompareWithFieldsInternal(
parent_fields->pop_back();
}
}
if (fieldDifferent) {
if (reporter_ == nullptr) return false;
isDifferent = true;
}
// Increment the field indices.
++field_index1;
++field_index2;
@ -1002,17 +997,19 @@ bool MessageDifferencer::CompareMapFieldByMapReflection(
return true;
}
bool MessageDifferencer::CompareRepeatedField(
bool MessageDifferencer::CompareMapField(
const Message& message1, const Message& message2,
const FieldDescriptor* repeated_field,
std::vector<SpecificField>* parent_fields) {
GOOGLE_DCHECK(repeated_field->is_map());
// the input FieldDescriptor is guaranteed to be repeated field.
const Reflection* reflection1 = message1.GetReflection();
const Reflection* reflection2 = message2.GetReflection();
// When both map fields are on map, do not sync to repeated field.
// TODO(jieluo): Add support for reporter
if (repeated_field->is_map() && reporter_ == nullptr &&
if (reporter_ == nullptr &&
// Users didn't set custom map field key comparator
map_field_key_comparator_.find(repeated_field) ==
map_field_key_comparator_.end() &&
@ -1052,6 +1049,26 @@ bool MessageDifferencer::CompareRepeatedField(
}
}
return CompareRepeatedRep(message1, message2, repeated_field, parent_fields);
}
bool MessageDifferencer::CompareRepeatedField(
const Message& message1, const Message& message2,
const FieldDescriptor* repeated_field,
std::vector<SpecificField>* parent_fields) {
GOOGLE_DCHECK(!repeated_field->is_map());
return CompareRepeatedRep(message1, message2, repeated_field, parent_fields);
}
bool MessageDifferencer::CompareRepeatedRep(
const Message& message1, const Message& message2,
const FieldDescriptor* repeated_field,
std::vector<SpecificField>* parent_fields) {
// the input FieldDescriptor is guaranteed to be repeated field.
GOOGLE_DCHECK(repeated_field->is_repeated());
const Reflection* reflection1 = message1.GetReflection();
const Reflection* reflection2 = message2.GetReflection();
const int count1 = reflection1->FieldSize(message1, repeated_field);
const int count2 = reflection2->FieldSize(message2, repeated_field);
const bool treated_as_subset = IsTreatedAsSubset(repeated_field);

@ -779,7 +779,20 @@ class PROTOBUF_EXPORT MessageDifferencer {
const FieldDescriptor* field,
std::vector<SpecificField>* parent_fields);
// Compare the map fields using map reflection instead of sync to repeated.
// Compares map fields, and report the error.
bool CompareMapField(const Message& message1, const Message& message2,
const FieldDescriptor* field,
std::vector<SpecificField>* parent_fields);
// Helper for CompareRepeatedField and CompareMapField: compares and reports
// differences element-wise. This is the implementation for non-map fields,
// and can also compare map fields by using the underlying representation.
bool CompareRepeatedRep(const Message& message1, const Message& message2,
const FieldDescriptor* field,
std::vector<SpecificField>* parent_fields);
// Helper for CompareMapField: compare the map fields using map reflection
// instead of sync to repeated.
bool CompareMapFieldByMapReflection(const Message& message1,
const Message& message2,
const FieldDescriptor* field,

@ -13,7 +13,7 @@
#error incompatible with your Protocol Buffer headers. Please update
#error your headers.
#endif
#if 3015006 < PROTOBUF_MIN_PROTOC_VERSION
#if 3015007 < PROTOBUF_MIN_PROTOC_VERSION
#error This file was generated by an older version of protoc which is
#error incompatible with your Protocol Buffer headers. Please
#error regenerate this file with a newer version of protoc.

Loading…
Cancel
Save