master v1.41.0
captainfffsama 2 years ago
parent 8984f4533b
commit 1297acf91b
  1. 2
      BUILD
  2. 19
      README.md
  3. 41
      abseil-cpp/.github/ISSUE_TEMPLATE/00-bug_report.md
  4. 7
      abseil-cpp/.github/ISSUE_TEMPLATE/90-question.md
  5. 1
      abseil-cpp/.github/ISSUE_TEMPLATE/config.yml
  6. 15
      abseil-cpp/.gitignore
  7. 24
      abseil-cpp/CMake/AbseilDll.cmake
  8. 77
      abseil-cpp/CMake/AbseilHelpers.cmake
  9. 20
      abseil-cpp/CMake/AbseilInstallDirs.cmake
  10. 30
      abseil-cpp/CMake/Googletest/CMakeLists.txt.in
  11. 4
      abseil-cpp/CMake/README.md
  12. 146
      abseil-cpp/CMake/install_test_project/test.sh
  13. 48
      abseil-cpp/CMakeLists.txt
  14. 16
      abseil-cpp/LTS.md
  15. 39
      abseil-cpp/README.md
  16. 7
      abseil-cpp/WORKSPACE
  17. 38
      abseil-cpp/absl/BUILD.bazel
  18. 4
      abseil-cpp/absl/abseil.podspec.gen.py
  19. 6
      abseil-cpp/absl/algorithm/container.h
  20. 38
      abseil-cpp/absl/base/BUILD.bazel
  21. 25
      abseil-cpp/absl/base/CMakeLists.txt
  22. 28
      abseil-cpp/absl/base/attributes.h
  23. 11
      abseil-cpp/absl/base/call_once.h
  24. 46
      abseil-cpp/absl/base/config.h
  25. 34
      abseil-cpp/absl/base/dynamic_annotations.h
  26. 219
      abseil-cpp/absl/base/internal/bits.h
  27. 97
      abseil-cpp/absl/base/internal/bits_test.cc
  28. 5
      abseil-cpp/absl/base/internal/direct_mmap.h
  29. 61
      abseil-cpp/absl/base/internal/endian.h
  30. 2
      abseil-cpp/absl/base/internal/exponential_biased_test.cc
  31. 19
      abseil-cpp/absl/base/internal/low_level_alloc_test.cc
  32. 5
      abseil-cpp/absl/base/internal/low_level_scheduling.h
  33. 66
      abseil-cpp/absl/base/internal/raw_logging.cc
  34. 22
      abseil-cpp/absl/base/internal/raw_logging.h
  35. 13
      abseil-cpp/absl/base/internal/spinlock.cc
  36. 19
      abseil-cpp/absl/base/internal/spinlock.h
  37. 4
      abseil-cpp/absl/base/internal/spinlock_akaros.inc
  38. 6
      abseil-cpp/absl/base/internal/spinlock_linux.inc
  39. 4
      abseil-cpp/absl/base/internal/spinlock_posix.inc
  40. 22
      abseil-cpp/absl/base/internal/spinlock_wait.h
  41. 10
      abseil-cpp/absl/base/internal/spinlock_win32.inc
  42. 2
      abseil-cpp/absl/base/internal/strerror.cc
  43. 6
      abseil-cpp/absl/base/internal/strerror_test.cc
  44. 2
      abseil-cpp/absl/base/internal/sysinfo.cc
  45. 23
      abseil-cpp/absl/base/internal/sysinfo_test.cc
  46. 7
      abseil-cpp/absl/base/internal/thread_identity.cc
  47. 85
      abseil-cpp/absl/base/internal/thread_identity.h
  48. 118
      abseil-cpp/absl/base/internal/throw_delegate.cc
  49. 76
      abseil-cpp/absl/base/internal/unaligned_access.h
  50. 4
      abseil-cpp/absl/base/internal/unscaledcycleclock.cc
  51. 8
      abseil-cpp/absl/base/log_severity.h
  52. 11
      abseil-cpp/absl/base/macros.h
  53. 17
      abseil-cpp/absl/base/optimization.h
  54. 2
      abseil-cpp/absl/base/options.h
  55. 1
      abseil-cpp/absl/base/port.h
  56. 1
      abseil-cpp/absl/base/spinlock_test_common.cc
  57. 2
      abseil-cpp/absl/base/thread_annotations.h
  58. 0
      abseil-cpp/absl/cleanup/BUILD.bazel
  59. 0
      abseil-cpp/absl/cleanup/CMakeLists.txt
  60. 0
      abseil-cpp/absl/cleanup/cleanup.h
  61. 0
      abseil-cpp/absl/cleanup/cleanup_test.cc
  62. 0
      abseil-cpp/absl/cleanup/internal/cleanup.h
  63. 38
      abseil-cpp/absl/compiler_config_setting.bzl
  64. 57
      abseil-cpp/absl/container/BUILD.bazel
  65. 9
      abseil-cpp/absl/container/CMakeLists.txt
  66. 36
      abseil-cpp/absl/container/btree_benchmark.cc
  67. 7
      abseil-cpp/absl/container/btree_map.h
  68. 383
      abseil-cpp/absl/container/btree_test.cc
  69. 4
      abseil-cpp/absl/container/fixed_array.h
  70. 2
      abseil-cpp/absl/container/flat_hash_set.h
  71. 8
      abseil-cpp/absl/container/inlined_vector.h
  72. 22
      abseil-cpp/absl/container/inlined_vector_benchmark.cc
  73. 36
      abseil-cpp/absl/container/inlined_vector_test.cc
  74. 472
      abseil-cpp/absl/container/internal/btree.h
  75. 99
      abseil-cpp/absl/container/internal/btree_container.h
  76. 2
      abseil-cpp/absl/container/internal/compressed_tuple.h
  77. 5
      abseil-cpp/absl/container/internal/container_memory_test.cc
  78. 6
      abseil-cpp/absl/container/internal/hashtablez_sampler.cc
  79. 3
      abseil-cpp/absl/container/internal/hashtablez_sampler.h
  80. 3
      abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc
  81. 6
      abseil-cpp/absl/container/internal/hashtablez_sampler_test.cc
  82. 207
      abseil-cpp/absl/container/internal/inlined_vector.h
  83. 8
      abseil-cpp/absl/container/internal/layout.h
  84. 0
      abseil-cpp/absl/container/internal/layout_benchmark.cc
  85. 560
      abseil-cpp/absl/container/internal/layout_test.cc
  86. 15
      abseil-cpp/absl/container/internal/raw_hash_set.cc
  87. 272
      abseil-cpp/absl/container/internal/raw_hash_set.h
  88. 4
      abseil-cpp/absl/container/internal/raw_hash_set_allocator_test.cc
  89. 0
      abseil-cpp/absl/container/internal/raw_hash_set_benchmark.cc
  90. 0
      abseil-cpp/absl/container/internal/raw_hash_set_probe_benchmark.cc
  91. 172
      abseil-cpp/absl/container/internal/raw_hash_set_test.cc
  92. 1
      abseil-cpp/absl/container/internal/unordered_map_constructor_test.h
  93. 30
      abseil-cpp/absl/container/node_hash_set.h
  94. 17
      abseil-cpp/absl/copts/AbseilConfigureCopts.cmake
  95. 110
      abseil-cpp/absl/copts/GENERATED_AbseilCopts.cmake
  96. 110
      abseil-cpp/absl/copts/GENERATED_copts.bzl
  97. 14
      abseil-cpp/absl/copts/configure_copts.bzl
  98. 124
      abseil-cpp/absl/copts/copts.py
  99. 20
      abseil-cpp/absl/debugging/BUILD.bazel
  100. 36
      abseil-cpp/absl/debugging/failure_signal_handler.cc
  101. Some files were not shown because too many files have changed in this diff Show More

@ -12,6 +12,4 @@ exports_files([
"futures.BUILD", "futures.BUILD",
"libuv.BUILD", "libuv.BUILD",
"protobuf.patch", "protobuf.patch",
"rules_python.patch",
"protoc-gen-validate.patch",
]) ])

@ -58,26 +58,26 @@ Updating some dependencies requires extra care.
### Updating third_party/boringssl-with-bazel ### Updating third_party/boringssl-with-bazel
- Update the `third_party/boringssl-with-bazel` submodule to the latest [`main-with-bazel`](https://github.com/google/boringssl/tree/main-with-bazel) branch - Update the `third_party/boringssl-with-bazel` submodule to the latest [`master-with-bazel`](https://github.com/google/boringssl/tree/master-with-bazel) branch
``` ```
git submodule update --init # just to start in a clean state git submodule update --init # just to start in a clean state
cd third_party/boringssl-with-bazel cd third_party/boringssl-with-bazel
git fetch origin # fetch what's new in the boringssl repository git fetch origin # fetch what's new in the boringssl repository
git checkout origin/main-with-bazel # checkout the current state of main-with-bazel branch in the boringssl repo git checkout origin/master-with-bazel # checkout the current state of master-with-bazel branch in the boringssl repo
# Note the latest commit SHA on main-with-bazel branch # Note the latest commit SHA on master-with-bazel-branch
cd ../.. # go back to grpc repo root cd ../.. # go back to grpc repo root
git status # will show that there are new commits in third_party/boringssl-with-bazel git status # will show that there are new commits in third_party/boringssl-with-bazel
git add third_party/boringssl-with-bazel # we actually want to update the changes to the submodule git add third_party/boringssl-with-bazel # we actually want to update the changes to the submodule
git commit -m "update submodule boringssl-with-bazel with origin/main-with-bazel" # commit git commit -m "update submodule boringssl-with-bazel with origin/master-with-bazel" # commit
``` ```
- Update boringssl dependency in `bazel/grpc_deps.bzl` to the same commit SHA as main-with-bazel branch - Update boringssl dependency in `bazel/grpc_deps.bzl` to the same commit SHA as master-with-bazel branch
- Update `http_archive(name = "boringssl",` section by updating the sha in `strip_prefix` and `urls` fields. - Update `http_archive(name = "boringssl",` section by updating the sha in `strip_prefix` and `urls` fields.
- Also, set `sha256` field to "" as the existing value is not valid. This will be added later once we know what that value is. - Also, set `sha256` field to "" as the existing value is not valid. This will be added later once we know what that value is.
- Update `tools/run_tests/sanity/check_submodules.sh` with the same commit - Update `tools/run_tests/sanity/check_submodules.sh` with the same commit
- Commit these changes `git commit -m "update boringssl dependency to main-with-bazel commit SHA"` - Commit these changes `git commit -m "update boringssl dependency to master-with-bazel commit SHA"`
- Run `tools/buildgen/generate_projects.sh` to regenerate the generated files - Run `tools/buildgen/generate_projects.sh` to regenerate the generated files
- Because `sha256` in `bazel/grpc_deps.bzl` was left empty, you will get a DEBUG msg like this one: - Because `sha256` in `bazel/grpc_deps.bzl` was left empty, you will get a DEBUG msg like this one:
@ -118,16 +118,13 @@ Apart from the above steps, please perform the following two steps to generate t
Since upb is vendored in the gRPC repo, you cannot use submodule to update it. Please follow the steps below. Since upb is vendored in the gRPC repo, you cannot use submodule to update it. Please follow the steps below.
1. Update third_party/upb directory by running 1. Update third_party/upb directory by running
- `export GRPC_ROOT=~/git/grpc` `git subtree pull --squash --prefix=third_party/upb https://github.com/protocolbuffers/upb.git master`
- `wget https://github.com/protocolbuffers/upb/archive/refs/heads/main.zip`
- `rm -rf $GRPC_ROOT/third_party/upb`
- `unzip main.zip -d $GRPC_ROOT/third_party`
- `mv $GRPC_ROOT/third_party/upb-main $GRPC_ROOT/third_party/upb`
2. Update the dependency in `grpc_deps.bzl` to the same commit 2. Update the dependency in `grpc_deps.bzl` to the same commit
3. Populate the bazel download mirror by running `bazel/update_mirror.sh` 3. Populate the bazel download mirror by running `bazel/update_mirror.sh`
4. Update `src/upb/gen_build_yaml.py` for newly added or removed upb files 4. Update `src/upb/gen_build_yaml.py` for newly added or removed upb files
5. Run `tools/buildgen/generate_projects.sh` to regenerate the generated files 5. Run `tools/buildgen/generate_projects.sh` to regenerate the generated files
6. Run `tools/codegen/core/gen_upb_api.sh` to regenerate upb files. 6. Run `tools/codegen/core/gen_upb_api.sh` to regenerate upb files.
If you see breaking changes here, you may want to import upb into Google3 along with gRPC.
### Updating third_party/xxhash ### Updating third_party/xxhash

@ -1,41 +0,0 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: 'bug'
assignees: ''
---
**Describe the bug**
Include a clear and concise description of what the problem is, including what
you expected to happen, and what actually happened.
**Steps to reproduce the bug**
It's important that we are able to reproduce the problem that you are
experiencing. Please provide all code and relevant steps to reproduce the
problem, including your `BUILD`/`CMakeLists.txt` file and build commands. Links
to a GitHub branch or [godbolt.org](https://godbolt.org/) that demonstrate the
problem are also helpful.
**What version of Abseil are you using?**
**What operating system and version are you using**
If you are using a Linux distribution please include the name and version of the
distribution as well.
**What compiler and version are you using?**
Please include the output of `gcc -v` or `clang -v`, or the equivalent for your
compiler.
**What build system are you using?**
Please include the output of `bazel --version` or `cmake --version`, or the
equivalent for your build system.
**Additional context**
Add any other context about the problem here.

@ -1,7 +0,0 @@
---
name: Question
about: Have a question? Ask us anything! :-)
title: ''
labels: 'question'
assignees: ''
---

@ -1 +0,0 @@
blank_issues_enables: true

@ -1,15 +0,0 @@
# Ignore all bazel-* symlinks.
/bazel-*
# Ignore Bazel verbose explanations
--verbose_explanations
# Ignore CMake usual build directory
build
# Ignore Vim files
*.swp
# Ignore QtCreator Project file
CMakeLists.txt.user
# Ignore VS Code files
.vscode/*
# Ignore generated python artifacts
*.pyc
copts/__pycache__/

@ -1,4 +1,5 @@
include(CMakeParseArguments) include(CMakeParseArguments)
include(GNUInstallDirs)
set(ABSL_INTERNAL_DLL_FILES set(ABSL_INTERNAL_DLL_FILES
"algorithm/algorithm.h" "algorithm/algorithm.h"
@ -10,7 +11,6 @@ set(ABSL_INTERNAL_DLL_FILES
"base/const_init.h" "base/const_init.h"
"base/dynamic_annotations.h" "base/dynamic_annotations.h"
"base/internal/atomic_hook.h" "base/internal/atomic_hook.h"
"base/internal/bits.h"
"base/internal/cycleclock.cc" "base/internal/cycleclock.cc"
"base/internal/cycleclock.h" "base/internal/cycleclock.h"
"base/internal/direct_mmap.h" "base/internal/direct_mmap.h"
@ -61,6 +61,8 @@ set(ABSL_INTERNAL_DLL_FILES
"base/policy_checks.h" "base/policy_checks.h"
"base/port.h" "base/port.h"
"base/thread_annotations.h" "base/thread_annotations.h"
"cleanup/cleanup.h"
"cleanup/internal/cleanup.h"
"container/btree_map.h" "container/btree_map.h"
"container/btree_set.h" "container/btree_set.h"
"container/fixed_array.h" "container/fixed_array.h"
@ -122,10 +124,15 @@ set(ABSL_INTERNAL_DLL_FILES
"hash/internal/hash.h" "hash/internal/hash.h"
"hash/internal/hash.cc" "hash/internal/hash.cc"
"hash/internal/spy_hash_state.h" "hash/internal/spy_hash_state.h"
"hash/internal/wyhash.h"
"hash/internal/wyhash.cc"
"memory/memory.h" "memory/memory.h"
"meta/type_traits.h" "meta/type_traits.h"
"numeric/bits.h"
"numeric/int128.cc" "numeric/int128.cc"
"numeric/int128.h" "numeric/int128.h"
"numeric/internal/bits.h"
"numeric/internal/representation.h"
"random/bernoulli_distribution.h" "random/bernoulli_distribution.h"
"random/beta_distribution.h" "random/beta_distribution.h"
"random/bit_gen_ref.h" "random/bit_gen_ref.h"
@ -190,12 +197,18 @@ set(ABSL_INTERNAL_DLL_FILES
"strings/cord.h" "strings/cord.h"
"strings/escaping.cc" "strings/escaping.cc"
"strings/escaping.h" "strings/escaping.h"
"strings/internal/cord_internal.cc"
"strings/internal/cord_internal.h" "strings/internal/cord_internal.h"
"strings/internal/cord_rep_flat.h"
"strings/internal/cord_rep_ring.cc"
"strings/internal/cord_rep_ring.h"
"strings/internal/cord_rep_ring_reader.h"
"strings/internal/charconv_bigint.cc" "strings/internal/charconv_bigint.cc"
"strings/internal/charconv_bigint.h" "strings/internal/charconv_bigint.h"
"strings/internal/charconv_parse.cc" "strings/internal/charconv_parse.cc"
"strings/internal/charconv_parse.h" "strings/internal/charconv_parse.h"
"strings/internal/stl_type_traits.h" "strings/internal/stl_type_traits.h"
"strings/internal/string_constant.h"
"strings/match.cc" "strings/match.cc"
"strings/match.h" "strings/match.h"
"strings/numbers.cc" "strings/numbers.cc"
@ -250,6 +263,7 @@ set(ABSL_INTERNAL_DLL_FILES
"synchronization/notification.h" "synchronization/notification.h"
"synchronization/internal/create_thread_identity.cc" "synchronization/internal/create_thread_identity.cc"
"synchronization/internal/create_thread_identity.h" "synchronization/internal/create_thread_identity.h"
"synchronization/internal/futex.h"
"synchronization/internal/graphcycles.cc" "synchronization/internal/graphcycles.cc"
"synchronization/internal/graphcycles.h" "synchronization/internal/graphcycles.h"
"synchronization/internal/kernel_timeout.h" "synchronization/internal/kernel_timeout.h"
@ -487,7 +501,7 @@ function(absl_make_dll)
abseil_dll abseil_dll
PUBLIC PUBLIC
"$<BUILD_INTERFACE:${ABSL_COMMON_INCLUDE_DIRS}>" "$<BUILD_INTERFACE:${ABSL_COMMON_INCLUDE_DIRS}>"
$<INSTALL_INTERFACE:${ABSL_INSTALL_INCLUDEDIR}> $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>
) )
target_compile_options( target_compile_options(
@ -505,8 +519,8 @@ function(absl_make_dll)
${ABSL_CC_LIB_DEFINES} ${ABSL_CC_LIB_DEFINES}
) )
install(TARGETS abseil_dll EXPORT ${PROJECT_NAME}Targets install(TARGETS abseil_dll EXPORT ${PROJECT_NAME}Targets
RUNTIME DESTINATION ${ABSL_INSTALL_BINDIR} RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
LIBRARY DESTINATION ${ABSL_INSTALL_LIBDIR} LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
ARCHIVE DESTINATION ${ABSL_INSTALL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
) )
endfunction() endfunction()

@ -17,7 +17,6 @@
include(CMakeParseArguments) include(CMakeParseArguments)
include(AbseilConfigureCopts) include(AbseilConfigureCopts)
include(AbseilDll) include(AbseilDll)
include(AbseilInstallDirs)
# The IDE folder for Abseil that will be used if Abseil is included in a CMake # The IDE folder for Abseil that will be used if Abseil is included in a CMake
# project that sets # project that sets
@ -41,7 +40,7 @@ endif()
# LINKOPTS: List of link options # LINKOPTS: List of link options
# PUBLIC: Add this so that this library will be exported under absl:: # PUBLIC: Add this so that this library will be exported under absl::
# Also in IDE, target will appear in Abseil folder while non PUBLIC will be in Abseil/internal. # Also in IDE, target will appear in Abseil folder while non PUBLIC will be in Abseil/internal.
# TESTONLY: When added, this target will only be built if user passes -DABSL_RUN_TESTS=ON to CMake. # TESTONLY: When added, this target will only be built if BUILD_TESTING=ON.
# #
# Note: # Note:
# By default, absl_cc_library will always create a library named absl_${NAME}, # By default, absl_cc_library will always create a library named absl_${NAME},
@ -83,7 +82,7 @@ function(absl_cc_library)
${ARGN} ${ARGN}
) )
if(ABSL_CC_LIB_TESTONLY AND NOT ABSL_RUN_TESTS) if(ABSL_CC_LIB_TESTONLY AND NOT BUILD_TESTING)
return() return()
endif() endif()
@ -104,7 +103,7 @@ function(absl_cc_library)
endif() endif()
endforeach() endforeach()
if("${ABSL_CC_SRCS}" STREQUAL "") if(ABSL_CC_SRCS STREQUAL "")
set(ABSL_CC_LIB_IS_INTERFACE 1) set(ABSL_CC_LIB_IS_INTERFACE 1)
else() else()
set(ABSL_CC_LIB_IS_INTERFACE 0) set(ABSL_CC_LIB_IS_INTERFACE 0)
@ -122,7 +121,11 @@ function(absl_cc_library)
# 4. "static" -- This target does not depend on the DLL and should be built # 4. "static" -- This target does not depend on the DLL and should be built
# statically. # statically.
if (${ABSL_BUILD_DLL}) if (${ABSL_BUILD_DLL})
absl_internal_dll_contains(TARGET ${_NAME} OUTPUT _in_dll) if(ABSL_ENABLE_INSTALL)
absl_internal_dll_contains(TARGET ${_NAME} OUTPUT _in_dll)
else()
absl_internal_dll_contains(TARGET ${ABSL_CC_LIB_NAME} OUTPUT _in_dll)
endif()
if (${_in_dll}) if (${_in_dll})
# This target should be replaced by the DLL # This target should be replaced by the DLL
set(_build_type "dll") set(_build_type "dll")
@ -137,8 +140,53 @@ function(absl_cc_library)
set(_build_type "static") set(_build_type "static")
endif() endif()
# Generate a pkg-config file for every library:
if(_build_type STREQUAL "static" OR _build_type STREQUAL "shared")
if(NOT ABSL_CC_LIB_TESTONLY)
if(absl_VERSION)
set(PC_VERSION "${absl_VERSION}")
else()
set(PC_VERSION "head")
endif()
foreach(dep ${ABSL_CC_LIB_DEPS})
if(${dep} MATCHES "^absl::(.*)")
# Join deps with commas.
if(PC_DEPS)
set(PC_DEPS "${PC_DEPS},")
endif()
set(PC_DEPS "${PC_DEPS} absl_${CMAKE_MATCH_1} = ${PC_VERSION}")
endif()
endforeach()
foreach(cflag ${ABSL_CC_LIB_COPTS})
if(${cflag} MATCHES "^(-Wno|/wd)")
# These flags are needed to suppress warnings that might fire in our headers.
set(PC_CFLAGS "${PC_CFLAGS} ${cflag}")
elseif(${cflag} MATCHES "^(-W|/w[1234eo])")
# Don't impose our warnings on others.
else()
set(PC_CFLAGS "${PC_CFLAGS} ${cflag}")
endif()
endforeach()
FILE(GENERATE OUTPUT "${CMAKE_BINARY_DIR}/lib/pkgconfig/absl_${_NAME}.pc" CONTENT "\
prefix=${CMAKE_INSTALL_PREFIX}\n\
exec_prefix=\${prefix}\n\
libdir=\${prefix}/${CMAKE_INSTALL_LIBDIR}\n\
includedir=\${prefix}/${CMAKE_INSTALL_INCLUDEDIR}\n\
\n\
Name: absl_${_NAME}\n\
Description: Abseil ${_NAME} library\n\
URL: https://abseil.io/\n\
Version: ${PC_VERSION}\n\
Requires:${PC_DEPS}\n\
Libs: -L\${libdir} $<JOIN:${ABSL_CC_LIB_LINKOPTS}, > $<$<NOT:$<BOOL:${ABSL_CC_LIB_IS_INTERFACE}>>:-labsl_${_NAME}>\n\
Cflags: -I\${includedir}${PC_CFLAGS}\n")
INSTALL(FILES "${CMAKE_BINARY_DIR}/lib/pkgconfig/absl_${_NAME}.pc"
DESTINATION "${CMAKE_INSTALL_PREFIX}/lib/pkgconfig")
endif()
endif()
if(NOT ABSL_CC_LIB_IS_INTERFACE) if(NOT ABSL_CC_LIB_IS_INTERFACE)
if(${_build_type} STREQUAL "dll_dep") if(_build_type STREQUAL "dll_dep")
# This target depends on the DLL. When adding dependencies to this target, # This target depends on the DLL. When adding dependencies to this target,
# any depended-on-target which is contained inside the DLL is replaced # any depended-on-target which is contained inside the DLL is replaced
# with a dependency on the DLL. # with a dependency on the DLL.
@ -167,7 +215,7 @@ function(absl_cc_library)
"${_gtest_link_define}" "${_gtest_link_define}"
) )
elseif(${_build_type} STREQUAL "static" OR ${_build_type} STREQUAL "shared") elseif(_build_type STREQUAL "static" OR _build_type STREQUAL "shared")
add_library(${_NAME} "") add_library(${_NAME} "")
target_sources(${_NAME} PRIVATE ${ABSL_CC_LIB_SRCS} ${ABSL_CC_LIB_HDRS}) target_sources(${_NAME} PRIVATE ${ABSL_CC_LIB_SRCS} ${ABSL_CC_LIB_HDRS})
target_link_libraries(${_NAME} target_link_libraries(${_NAME}
@ -190,7 +238,7 @@ function(absl_cc_library)
target_include_directories(${_NAME} target_include_directories(${_NAME}
PUBLIC PUBLIC
"$<BUILD_INTERFACE:${ABSL_COMMON_INCLUDE_DIRS}>" "$<BUILD_INTERFACE:${ABSL_COMMON_INCLUDE_DIRS}>"
$<INSTALL_INTERFACE:${ABSL_INSTALL_INCLUDEDIR}> $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>
) )
target_compile_options(${_NAME} target_compile_options(${_NAME}
PRIVATE ${ABSL_CC_LIB_COPTS}) PRIVATE ${ABSL_CC_LIB_COPTS})
@ -215,6 +263,7 @@ function(absl_cc_library)
if(ABSL_ENABLE_INSTALL) if(ABSL_ENABLE_INSTALL)
set_target_properties(${_NAME} PROPERTIES set_target_properties(${_NAME} PROPERTIES
OUTPUT_NAME "absl_${_NAME}" OUTPUT_NAME "absl_${_NAME}"
SOVERSION "2103.0.0"
) )
endif() endif()
else() else()
@ -223,10 +272,10 @@ function(absl_cc_library)
target_include_directories(${_NAME} target_include_directories(${_NAME}
INTERFACE INTERFACE
"$<BUILD_INTERFACE:${ABSL_COMMON_INCLUDE_DIRS}>" "$<BUILD_INTERFACE:${ABSL_COMMON_INCLUDE_DIRS}>"
$<INSTALL_INTERFACE:${ABSL_INSTALL_INCLUDEDIR}> $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>
) )
if (${_build_type} STREQUAL "dll") if (_build_type STREQUAL "dll")
set(ABSL_CC_LIB_DEPS abseil_dll) set(ABSL_CC_LIB_DEPS abseil_dll)
endif() endif()
@ -243,9 +292,9 @@ function(absl_cc_library)
# installed abseil can't be tested. # installed abseil can't be tested.
if(NOT ABSL_CC_LIB_TESTONLY AND ABSL_ENABLE_INSTALL) if(NOT ABSL_CC_LIB_TESTONLY AND ABSL_ENABLE_INSTALL)
install(TARGETS ${_NAME} EXPORT ${PROJECT_NAME}Targets install(TARGETS ${_NAME} EXPORT ${PROJECT_NAME}Targets
RUNTIME DESTINATION ${ABSL_INSTALL_BINDIR} RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
LIBRARY DESTINATION ${ABSL_INSTALL_LIBDIR} LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
ARCHIVE DESTINATION ${ABSL_INSTALL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
) )
endif() endif()
@ -290,7 +339,7 @@ endfunction()
# gtest_main # gtest_main
# ) # )
function(absl_cc_test) function(absl_cc_test)
if(NOT ABSL_RUN_TESTS) if(NOT BUILD_TESTING)
return() return()
endif() endif()

@ -1,20 +0,0 @@
include(GNUInstallDirs)
# absl_VERSION is only set if we are an LTS release being installed, in which
# case it may be into a system directory and so we need to make subdirectories
# for each installed version of Abseil. This mechanism is implemented in
# Abseil's internal Copybara (https://github.com/google/copybara) workflows and
# isn't visible in the CMake buildsystem itself.
if(absl_VERSION)
set(ABSL_SUBDIR "${PROJECT_NAME}_${PROJECT_VERSION}")
set(ABSL_INSTALL_BINDIR "${CMAKE_INSTALL_BINDIR}/${ABSL_SUBDIR}")
set(ABSL_INSTALL_CONFIGDIR "${CMAKE_INSTALL_LIBDIR}/cmake/${ABSL_SUBDIR}")
set(ABSL_INSTALL_INCLUDEDIR "${CMAKE_INSTALL_INCLUDEDIR}/${ABSL_SUBDIR}")
set(ABSL_INSTALL_LIBDIR "${CMAKE_INSTALL_LIBDIR}/${ABSL_SUBDIR}")
else()
set(ABSL_INSTALL_BINDIR "${CMAKE_INSTALL_BINDIR}")
set(ABSL_INSTALL_CONFIGDIR "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}")
set(ABSL_INSTALL_INCLUDEDIR "${CMAKE_INSTALL_INCLUDEDIR}")
set(ABSL_INSTALL_LIBDIR "${CMAKE_INSTALL_LIBDIR}")
endif()

@ -3,24 +3,12 @@ cmake_minimum_required(VERSION 2.8.2)
project(googletest-external NONE) project(googletest-external NONE)
include(ExternalProject) include(ExternalProject)
if(${ABSL_USE_GOOGLETEST_HEAD}) ExternalProject_Add(googletest
ExternalProject_Add(googletest URL "${absl_gtest_download_url}" # May be empty
GIT_REPOSITORY https://github.com/google/googletest.git SOURCE_DIR "${absl_gtest_src_dir}"
GIT_TAG master BINARY_DIR "${absl_gtest_build_dir}"
SOURCE_DIR "${absl_gtest_src_dir}" CONFIGURE_COMMAND ""
BINARY_DIR "${absl_gtest_build_dir}" BUILD_COMMAND ""
CONFIGURE_COMMAND "" INSTALL_COMMAND ""
BUILD_COMMAND "" TEST_COMMAND ""
INSTALL_COMMAND "" )
TEST_COMMAND ""
)
else()
ExternalProject_Add(googletest
SOURCE_DIR "${absl_gtest_src_dir}"
BINARY_DIR "${absl_gtest_build_dir}"
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)
endif()

@ -52,7 +52,7 @@ target_link_libraries(my_exe absl::base absl::synchronization absl::strings)
### Running Abseil Tests with CMake ### Running Abseil Tests with CMake
Use the `-DABSL_RUN_TESTS=ON` flag to run Abseil tests. Note that if the `-DBUILD_TESTING=OFF` flag is passed then Abseil tests will not be run. Use the `-DBUILD_TESTING=ON` flag to run Abseil tests.
You will need to provide Abseil with a Googletest dependency. There are two You will need to provide Abseil with a Googletest dependency. There are two
options for how to do this: options for how to do this:
@ -70,7 +70,7 @@ For example, to run just the Abseil tests, you could use this script:
cd path/to/abseil-cpp cd path/to/abseil-cpp
mkdir build mkdir build
cd build cd build
cmake -DABSL_USE_GOOGLETEST_HEAD=ON -DABSL_RUN_TESTS=ON .. cmake -DBUILD_TESTING=ON -DABSL_USE_GOOGLETEST_HEAD=ON ..
make -j make -j
ctest ctest
``` ```

@ -13,70 +13,44 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
#
# "Unit" and integration tests for Absl CMake installation # Unit and integration tests for Abseil LTS CMake installation
# TODO(absl-team): This script isn't fully hermetic because
# -DABSL_USE_GOOGLETEST_HEAD=ON means that this script isn't pinned to a fixed
# version of GoogleTest. This means that an upstream change to GoogleTest could
# break this test. Fix this by allowing this script to pin to a known-good
# version of GoogleTest.
# Fail on any error. Treat unset variables an error. Print commands as executed. # Fail on any error. Treat unset variables an error. Print commands as executed.
set -euox pipefail set -euox pipefail
install_absl() { source ci/cmake_common.sh
pushd "${absl_build_dir}"
if [[ "${#}" -eq 1 ]]; then
cmake -DCMAKE_INSTALL_PREFIX="${1}" "${absl_dir}"
else
cmake "${absl_dir}"
fi
cmake --build . --target install -- -j
popd
}
uninstall_absl() {
xargs rm < "${absl_build_dir}"/install_manifest.txt
rm -rf "${absl_build_dir}"
mkdir -p "${absl_build_dir}"
}
lts_install=""
while getopts ":l" lts; do
case "${lts}" in
l )
lts_install="true"
;;
esac
done
absl_dir=/abseil-cpp absl_dir=/abseil-cpp
absl_build_dir=/buildfs/absl-build absl_build_dir=/buildfs
project_dir="${absl_dir}"/CMake/install_test_project project_dir="${absl_dir}"/CMake/install_test_project
project_build_dir=/buildfs/project-build project_build_dir=/buildfs/project-build
mkdir -p "${absl_build_dir}" build_shared_libs="OFF"
mkdir -p "${project_build_dir}" if [ "${LINK_TYPE:-}" = "DYNAMIC" ]; then
build_shared_libs="ON"
if [[ "${lts_install}" ]]; then
install_dir="/usr/local"
else
install_dir="${project_build_dir}"/install
fi fi
mkdir -p "${install_dir}"
# Test build, install, and link against installed abseil # Run the LTS transformations
pushd "${project_build_dir}" ./create_lts.py 99998877
if [[ "${lts_install}" ]]; then
install_absl # Install Abseil
cmake "${project_dir}" pushd "${absl_build_dir}"
else cmake "${absl_dir}" \
install_absl "${install_dir}" -DABSL_GOOGLETEST_DOWNLOAD_URL="${ABSL_GOOGLETEST_DOWNLOAD_URL}" \
cmake "${project_dir}" -DCMAKE_PREFIX_PATH="${install_dir}" -DCMAKE_BUILD_TYPE=Release \
fi -DBUILD_TESTING=ON \
-DBUILD_SHARED_LIBS="${build_shared_libs}"
make -j $(nproc)
ctest -j $(nproc)
make install
ldconfig
popd
# Test the project against the installed Abseil
mkdir -p "${project_build_dir}"
pushd "${project_build_dir}"
cmake "${project_dir}"
cmake --build . --target simple cmake --build . --target simple
output="$(${project_build_dir}/simple "printme" 2>&1)" output="$(${project_build_dir}/simple "printme" 2>&1)"
@ -88,57 +62,35 @@ fi
popd popd
# Test that we haven't accidentally made absl::abslblah if ! grep absl::strings "/usr/local/lib/cmake/absl/abslTargets.cmake"; then
pushd "${install_dir}" cat "/usr/local/lib/cmake/absl/abslTargets.cmake"
# Starting in CMake 3.12 the default install dir is lib$bit_width
if [[ -d lib64 ]]; then
libdir="lib64"
elif [[ -d lib ]]; then
libdir="lib"
else
echo "ls *, */*, */*/*:"
ls *
ls */*
ls */*/*
echo "unknown lib dir"
fi
if [[ "${lts_install}" ]]; then
# LTS versions append the date of the release to the subdir.
# 9999/99/99 is the dummy date used in the local_lts workflow.
absl_subdir="absl_99999999"
else
absl_subdir="absl"
fi
if ! grep absl::strings "${libdir}/cmake/${absl_subdir}/abslTargets.cmake"; then
cat "${libdir}"/cmake/absl/abslTargets.cmake
echo "CMake targets named incorrectly" echo "CMake targets named incorrectly"
exit 1 exit 1
fi fi
uninstall_absl pushd "${HOME}"
popd cat > hello-abseil.cc << EOF
#include <cstdlib>
#include "absl/strings/str_format.h"
int main(int argc, char **argv) {
absl::PrintF("Hello Abseil!\n");
return EXIT_SUCCESS;
}
EOF
if [[ ! "${lts_install}" ]]; then if [ "${LINK_TYPE:-}" != "DYNAMIC" ]; then
# Test that we warn if installed without a prefix or a system prefix pc_args=($(pkg-config --cflags --libs --static absl_str_format))
output="$(install_absl 2>&1)" g++ -static -o hello-abseil hello-abseil.cc "${pc_args[@]}"
if [[ "${output}" != *"Please set CMAKE_INSTALL_PREFIX"* ]]; then else
echo "Install without prefix didn't warn as expected. Output:" pc_args=($(pkg-config --cflags --libs absl_str_format))
echo "${output}" g++ -o hello-abseil hello-abseil.cc "${pc_args[@]}"
exit 1
fi
uninstall_absl
output="$(install_absl /usr 2>&1)"
if [[ "${output}" != *"Please set CMAKE_INSTALL_PREFIX"* ]]; then
echo "Install with /usr didn't warn as expected. Output:"
echo "${output}"
exit 1
fi
uninstall_absl
fi fi
hello="$(./hello-abseil)"
[[ "${hello}" == "Hello Abseil!" ]]
popd
echo "Install test complete!" echo "Install test complete!"
exit 0 exit 0

@ -20,7 +20,6 @@
# (https://fedoraproject.org/wiki/EPEL#Extra_Packages_for_Enterprise_Linux_.28EPEL.29) # (https://fedoraproject.org/wiki/EPEL#Extra_Packages_for_Enterprise_Linux_.28EPEL.29)
# and then issuing `yum install cmake3` on the command line. # and then issuing `yum install cmake3` on the command line.
cmake_minimum_required(VERSION 3.5) cmake_minimum_required(VERSION 3.5)
set(CMAKE_CXX_STANDARD 17)
# Compiler id for Apple Clang is now AppleClang. # Compiler id for Apple Clang is now AppleClang.
if (POLICY CMP0025) if (POLICY CMP0025)
@ -42,7 +41,12 @@ if (POLICY CMP0077)
cmake_policy(SET CMP0077 NEW) cmake_policy(SET CMP0077 NEW)
endif (POLICY CMP0077) endif (POLICY CMP0077)
project(absl CXX) # Set BUILD_TESTING to OFF by default.
# This must come before the project() and include(CTest) lines.
OPTION(BUILD_TESTING "Build tests" OFF)
project(absl LANGUAGES CXX VERSION 20210324)
include(CTest)
# Output directory is correct by default for most build setups. However, when # Output directory is correct by default for most build setups. However, when
# building Abseil as a DLL, it is important to have the DLL in the same # building Abseil as a DLL, it is important to have the DLL in the same
@ -52,7 +56,7 @@ set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
# when absl is included as subproject (i.e. using add_subdirectory(abseil-cpp)) # when absl is included as subproject (i.e. using add_subdirectory(abseil-cpp))
# in the source tree of a project that uses it, install rules are disabled. # in the source tree of a project that uses it, install rules are disabled.
if(NOT "^${CMAKE_SOURCE_DIR}$" STREQUAL "^${PROJECT_SOURCE_DIR}$") if(NOT CMAKE_SOURCE_DIR STREQUAL PROJECT_SOURCE_DIR)
option(ABSL_ENABLE_INSTALL "Enable install rule" OFF) option(ABSL_ENABLE_INSTALL "Enable install rule" OFF)
else() else()
option(ABSL_ENABLE_INSTALL "Enable install rule" ON) option(ABSL_ENABLE_INSTALL "Enable install rule" ON)
@ -63,8 +67,8 @@ list(APPEND CMAKE_MODULE_PATH
${CMAKE_CURRENT_LIST_DIR}/absl/copts ${CMAKE_CURRENT_LIST_DIR}/absl/copts
) )
include(AbseilInstallDirs)
include(CMakePackageConfigHelpers) include(CMakePackageConfigHelpers)
include(GNUInstallDirs)
include(AbseilDll) include(AbseilDll)
include(AbseilHelpers) include(AbseilHelpers)
@ -96,25 +100,28 @@ find_package(Threads REQUIRED)
option(ABSL_USE_EXTERNAL_GOOGLETEST option(ABSL_USE_EXTERNAL_GOOGLETEST
"If ON, Abseil will assume that the targets for GoogleTest are already provided by the including project. This makes sense when Abseil is used with add_subproject." OFF) "If ON, Abseil will assume that the targets for GoogleTest are already provided by the including project. This makes sense when Abseil is used with add_subproject." OFF)
option(ABSL_USE_GOOGLETEST_HEAD option(ABSL_USE_GOOGLETEST_HEAD
"If ON, abseil will download HEAD from googletest at config time." OFF) "If ON, abseil will download HEAD from GoogleTest at config time." OFF)
set(ABSL_GOOGLETEST_DOWNLOAD_URL "" CACHE STRING "If set, download GoogleTest from this URL")
set(ABSL_LOCAL_GOOGLETEST_DIR "/usr/src/googletest" CACHE PATH set(ABSL_LOCAL_GOOGLETEST_DIR "/usr/src/googletest" CACHE PATH
"If ABSL_USE_GOOGLETEST_HEAD is OFF, specifies the directory of a local googletest checkout." "If ABSL_USE_GOOGLETEST_HEAD is OFF and ABSL_GOOGLETEST_URL is not set, specifies the directory of a local GoogleTest checkout."
) )
option(ABSL_RUN_TESTS "If ON, Abseil tests will be run." OFF) if(BUILD_TESTING)
if(${ABSL_RUN_TESTS})
# enable CTest. This will set BUILD_TESTING to ON unless otherwise specified
# on the command line
include(CTest)
## check targets ## check targets
if (NOT ABSL_USE_EXTERNAL_GOOGLETEST) if (NOT ABSL_USE_EXTERNAL_GOOGLETEST)
set(absl_gtest_build_dir ${CMAKE_BINARY_DIR}/googletest-build) set(absl_gtest_build_dir ${CMAKE_BINARY_DIR}/googletest-build)
if(${ABSL_USE_GOOGLETEST_HEAD}) if(ABSL_USE_GOOGLETEST_HEAD AND ABSL_GOOGLETEST_DOWNLOAD_URL)
message(FATAL_ERROR "Do not set both ABSL_USE_GOOGLETEST_HEAD and ABSL_GOOGLETEST_DOWNLOAD_URL")
endif()
if(ABSL_USE_GOOGLETEST_HEAD)
set(absl_gtest_download_url "https://github.com/google/googletest/archive/master.zip")
elseif(ABSL_GOOGLETEST_DOWNLOAD_URL)
set(absl_gtest_download_url ${ABSL_GOOGLETEST_DOWNLOAD_URL})
endif()
if(absl_gtest_download_url)
set(absl_gtest_src_dir ${CMAKE_BINARY_DIR}/googletest-src) set(absl_gtest_src_dir ${CMAKE_BINARY_DIR}/googletest-src)
else() else()
set(absl_gtest_src_dir ${ABSL_LOCAL_GOOGLETEST_DIR}) set(absl_gtest_src_dir ${ABSL_LOCAL_GOOGLETEST_DIR})
@ -137,20 +144,21 @@ endif()
add_subdirectory(absl) add_subdirectory(absl)
if(ABSL_ENABLE_INSTALL) if(ABSL_ENABLE_INSTALL)
# install as a subdirectory only # install as a subdirectory only
install(EXPORT ${PROJECT_NAME}Targets install(EXPORT ${PROJECT_NAME}Targets
NAMESPACE absl:: NAMESPACE absl::
DESTINATION "${ABSL_INSTALL_CONFIGDIR}" DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}"
) )
configure_package_config_file( configure_package_config_file(
CMake/abslConfig.cmake.in CMake/abslConfig.cmake.in
"${PROJECT_BINARY_DIR}/${PROJECT_NAME}Config.cmake" "${PROJECT_BINARY_DIR}/${PROJECT_NAME}Config.cmake"
INSTALL_DESTINATION "${ABSL_INSTALL_CONFIGDIR}" INSTALL_DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}"
) )
install(FILES "${PROJECT_BINARY_DIR}/${PROJECT_NAME}Config.cmake" install(FILES "${PROJECT_BINARY_DIR}/${PROJECT_NAME}Config.cmake"
DESTINATION "${ABSL_INSTALL_CONFIGDIR}" DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}"
) )
# Abseil only has a version in LTS releases. This mechanism is accomplished # Abseil only has a version in LTS releases. This mechanism is accomplished
@ -163,12 +171,12 @@ if(ABSL_ENABLE_INSTALL)
) )
install(FILES "${PROJECT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake" install(FILES "${PROJECT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake"
DESTINATION ${ABSL_INSTALL_CONFIGDIR} DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}"
) )
endif() # absl_VERSION endif() # absl_VERSION
install(DIRECTORY absl install(DIRECTORY absl
DESTINATION ${ABSL_INSTALL_INCLUDEDIR} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}
FILES_MATCHING FILES_MATCHING
PATTERN "*.inc" PATTERN "*.inc"
PATTERN "*.h" PATTERN "*.h"

@ -1,16 +0,0 @@
# Long Term Support (LTS) Branches
This repository contains periodic snapshots of the Abseil codebase that are
Long Term Support (LTS) branches. An LTS branch allows you to use a known
version of Abseil without interfering with other projects which may also, in
turn, use Abseil. (For more information about our releases, see the
[Abseil Release Management](https://abseil.io/about/releases) guide.)
## LTS Branches
The following lists LTS branches and the dates on which they have been released:
* [LTS Branch December 18, 2018](https://github.com/abseil/abseil-cpp/tree/lts_2018_12_18/)
* [LTS Branch June 20, 2018](https://github.com/abseil/abseil-cpp/tree/lts_2018_06_20/)
* [LTS Branch August 8, 2019](https://github.com/abseil/abseil-cpp/tree/lts_2019_08_08/)
* [LTS Branch February 25, 2020](https://github.com/abseil/abseil-cpp/tree/lts_2020_02_25/)

@ -9,7 +9,9 @@ standard library.
- [About Abseil](#about) - [About Abseil](#about)
- [Quickstart](#quickstart) - [Quickstart](#quickstart)
- [Building Abseil](#build) - [Building Abseil](#build)
- [Support](#support)
- [Codemap](#codemap) - [Codemap](#codemap)
- [Releases](#releases)
- [License](#license) - [License](#license)
- [Links](#links) - [Links](#links)
@ -42,14 +44,22 @@ the Abseil code, running tests, and getting a simple binary working.
<a name="build"></a> <a name="build"></a>
## Building Abseil ## Building Abseil
[Bazel](https://bazel.build) is the official build system for Abseil, [Bazel](https://bazel.build) and [CMake](https://cmake.org/) are the official
which is supported on most major platforms (Linux, Windows, macOS, for example) build systems for Abseil.
and compilers. See the [quickstart](https://abseil.io/docs/cpp/quickstart) for
more information on building Abseil using the Bazel build system.
<a name="cmake"></a> See the [quickstart](https://abseil.io/docs/cpp/quickstart) for more information
If you require CMake support, please check the on building Abseil using the Bazel build system.
[CMake build instructions](CMake/README.md).
If you require CMake support, please check the [CMake build
instructions](CMake/README.md) and [CMake
Quickstart](https://abseil.io/docs/cpp/quickstart-cmake).
## Support
Abseil is officially supported on many platforms. See the [Abseil
platform support
guide](https://abseil.io/docs/cpp/platforms/platforms) for details on
supported operating systems, compilers, CPUs, etc.
## Codemap ## Codemap
@ -62,6 +72,9 @@ Abseil contains the following C++ library components:
* [`algorithm`](absl/algorithm/) * [`algorithm`](absl/algorithm/)
<br /> The `algorithm` library contains additions to the C++ `<algorithm>` <br /> The `algorithm` library contains additions to the C++ `<algorithm>`
library and container-based versions of such algorithms. library and container-based versions of such algorithms.
* [`cleanup`](absl/cleanup/)
<br /> The `cleanup` library contains the control-flow-construct-like type
`absl::Cleanup` which is used for executing a callback on scope exit.
* [`container`](absl/container/) * [`container`](absl/container/)
<br /> The `container` library contains additional STL-style containers, <br /> The `container` library contains additional STL-style containers,
including Abseil's unordered "Swiss table" containers. including Abseil's unordered "Swiss table" containers.
@ -79,6 +92,9 @@ Abseil contains the following C++ library components:
available within C++14 and C++17 versions of the C++ `<type_traits>` library. available within C++14 and C++17 versions of the C++ `<type_traits>` library.
* [`numeric`](absl/numeric/) * [`numeric`](absl/numeric/)
<br /> The `numeric` library contains C++11-compatible 128-bit integers. <br /> The `numeric` library contains C++11-compatible 128-bit integers.
* [`status`](absl/status/)
<br /> The `status` contains abstractions for error handling, specifically
`absl::Status` and `absl::StatusOr<T>`.
* [`strings`](absl/strings/) * [`strings`](absl/strings/)
<br /> The `strings` library contains a variety of strings routines and <br /> The `strings` library contains a variety of strings routines and
utilities, including a C++11-compatible version of the C++17 utilities, including a C++11-compatible version of the C++17
@ -97,6 +113,15 @@ Abseil contains the following C++ library components:
* [`utility`](absl/utility/) * [`utility`](absl/utility/)
<br /> The `utility` library contains utility and helper code. <br /> The `utility` library contains utility and helper code.
## Releases
Abseil recommends users "live-at-head" (update to the latest commit from the
master branch as often as possible). However, we realize this philosophy doesn't
work for every project, so we also provide [Long Term Support
Releases](https://github.com/abseil/abseil-cpp/releases) to which we backport
fixes for severe bugs. See our [release
management](https://abseil.io/about/releases) document for more details.
## License ## License
The Abseil C++ library is licensed under the terms of the Apache The Abseil C++ library is licensed under the terms of the Apache

@ -20,6 +20,7 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
# GoogleTest/GoogleMock framework. Used by most unit-tests. # GoogleTest/GoogleMock framework. Used by most unit-tests.
http_archive( http_archive(
name = "com_google_googletest", name = "com_google_googletest",
# Keep this URL in sync with ABSL_GOOGLETEST_COMMIT in ci/cmake_common.sh.
urls = ["https://github.com/google/googletest/archive/8567b09290fe402cf01923e2131c5635b8ed851b.zip"], # 2020-06-12T22:24:28Z urls = ["https://github.com/google/googletest/archive/8567b09290fe402cf01923e2131c5635b8ed851b.zip"], # 2020-06-12T22:24:28Z
strip_prefix = "googletest-8567b09290fe402cf01923e2131c5635b8ed851b", strip_prefix = "googletest-8567b09290fe402cf01923e2131c5635b8ed851b",
sha256 = "9a8a166eb6a56c7b3d7b19dc2c946fe4778fd6f21c7a12368ad3b836d8f1be48", sha256 = "9a8a166eb6a56c7b3d7b19dc2c946fe4778fd6f21c7a12368ad3b836d8f1be48",
@ -28,9 +29,9 @@ http_archive(
# Google benchmark. # Google benchmark.
http_archive( http_archive(
name = "com_github_google_benchmark", name = "com_github_google_benchmark",
urls = ["https://github.com/google/benchmark/archive/16703ff83c1ae6d53e5155df3bb3ab0bc96083be.zip"], urls = ["https://github.com/google/benchmark/archive/bf585a2789e30585b4e3ce6baf11ef2750b54677.zip"], # 2020-11-26T11:14:03Z
strip_prefix = "benchmark-16703ff83c1ae6d53e5155df3bb3ab0bc96083be", strip_prefix = "benchmark-bf585a2789e30585b4e3ce6baf11ef2750b54677",
sha256 = "59f918c8ccd4d74b6ac43484467b500f1d64b40cc1010daa055375b322a43ba3", sha256 = "2a778d821997df7d8646c9c59b8edb9a573a6e04c534c01892a40aa524a7b68c",
) )
# C++ rules for Bazel. # C++ rules for Bazel.

@ -12,19 +12,32 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
#
load(
":compiler_config_setting.bzl",
"create_llvm_config",
)
package(default_visibility = ["//visibility:public"]) package(default_visibility = ["//visibility:public"])
licenses(["notice"]) licenses(["notice"])
create_llvm_config( config_setting(
name = "llvm_compiler", name = "clang_compiler",
flag_values = {
"@bazel_tools//tools/cpp:compiler": "clang",
},
visibility = [":__subpackages__"],
)
config_setting(
name = "msvc_compiler",
flag_values = {
"@bazel_tools//tools/cpp:compiler": "msvc-cl",
},
visibility = [":__subpackages__"],
)
config_setting(
name = "clang-cl_compiler",
flag_values = {
"@bazel_tools//tools/cpp:compiler": "clang-cl",
},
visibility = [":__subpackages__"], visibility = [":__subpackages__"],
) )
@ -42,15 +55,6 @@ config_setting(
], ],
) )
config_setting(
name = "windows",
constraint_values = [
"@bazel_tools//platforms:x86_64",
"@bazel_tools//platforms:windows",
],
visibility = [":__subpackages__"],
)
config_setting( config_setting(
name = "ppc", name = "ppc",
values = { values = {

@ -40,8 +40,8 @@ Pod::Spec.new do |s|
'USE_HEADERMAP' => 'NO', 'USE_HEADERMAP' => 'NO',
'ALWAYS_SEARCH_USER_PATHS' => 'NO', 'ALWAYS_SEARCH_USER_PATHS' => 'NO',
} }
s.ios.deployment_target = '7.0' s.ios.deployment_target = '9.0'
s.osx.deployment_target = '10.9' s.osx.deployment_target = '10.10'
s.tvos.deployment_target = '9.0' s.tvos.deployment_target = '9.0'
s.watchos.deployment_target = '2.0' s.watchos.deployment_target = '2.0'
""" """

@ -90,10 +90,10 @@ using ContainerPointerType =
// lookup of std::begin and std::end, i.e. // lookup of std::begin and std::end, i.e.
// using std::begin; // using std::begin;
// using std::end; // using std::end;
// std::foo(begin(c), end(c); // std::foo(begin(c), end(c));
// becomes // becomes
// std::foo(container_algorithm_internal::begin(c), // std::foo(container_algorithm_internal::begin(c),
// container_algorithm_internal::end(c)); // container_algorithm_internal::end(c));
// These are meant for internal use only. // These are meant for internal use only.
template <typename C> template <typename C>
@ -188,7 +188,7 @@ bool c_any_of(const C& c, Pred&& pred) {
// c_none_of() // c_none_of()
// //
// Container-based version of the <algorithm> `std::none_of()` function to // Container-based version of the <algorithm> `std::none_of()` function to
// test if no elements in a container fulfil a condition. // test if no elements in a container fulfill a condition.
template <typename C, typename Pred> template <typename C, typename Pred>
bool c_none_of(const C& c, Pred&& pred) { bool c_none_of(const C& c, Pred&& pred) {
return std::none_of(container_algorithm_internal::c_begin(c), return std::none_of(container_algorithm_internal::c_begin(c),

@ -160,7 +160,8 @@ cc_library(
], ],
copts = ABSL_DEFAULT_COPTS, copts = ABSL_DEFAULT_COPTS,
linkopts = select({ linkopts = select({
"//absl:windows": [], "//absl:msvc_compiler": [],
"//absl:clang-cl_compiler": [],
"//absl:wasm": [], "//absl:wasm": [],
"//conditions:default": ["-pthread"], "//conditions:default": ["-pthread"],
}) + ABSL_DEFAULT_LINKOPTS, }) + ABSL_DEFAULT_LINKOPTS,
@ -220,7 +221,10 @@ cc_library(
], ],
copts = ABSL_DEFAULT_COPTS, copts = ABSL_DEFAULT_COPTS,
linkopts = select({ linkopts = select({
"//absl:windows": [ "//absl:msvc_compiler": [
"-DEFAULTLIB:advapi32.lib",
],
"//absl:clang-cl_compiler": [
"-DEFAULTLIB:advapi32.lib", "-DEFAULTLIB:advapi32.lib",
], ],
"//absl:wasm": [], "//absl:wasm": [],
@ -479,6 +483,7 @@ cc_library(
copts = ABSL_DEFAULT_COPTS, copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS, linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [ deps = [
":base",
":config", ":config",
":core_headers", ":core_headers",
], ],
@ -551,7 +556,9 @@ cc_test(
srcs = ["internal/low_level_alloc_test.cc"], srcs = ["internal/low_level_alloc_test.cc"],
copts = ABSL_TEST_COPTS, copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS, linkopts = ABSL_DEFAULT_LINKOPTS,
tags = ["no_test_ios_x86_64"], tags = [
"no_test_ios_x86_64",
],
deps = [ deps = [
":malloc_internal", ":malloc_internal",
"//absl/container:node_hash_map", "//absl/container:node_hash_map",
@ -586,31 +593,6 @@ cc_test(
], ],
) )
cc_library(
name = "bits",
hdrs = ["internal/bits.h"],
linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = [
"//absl:__subpackages__",
],
deps = [
":config",
":core_headers",
],
)
cc_test(
name = "bits_test",
size = "small",
srcs = ["internal/bits_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":bits",
"@com_google_googletest//:gtest_main",
],
)
cc_library( cc_library(
name = "exponential_biased", name = "exponential_biased",
srcs = ["internal/exponential_biased.cc"], srcs = ["internal/exponential_biased.cc"],

@ -418,6 +418,7 @@ absl_cc_library(
COPTS COPTS
${ABSL_DEFAULT_COPTS} ${ABSL_DEFAULT_COPTS}
DEPS DEPS
absl::base
absl::config absl::config
absl::core_headers absl::core_headers
PUBLIC PUBLIC
@ -518,30 +519,6 @@ absl_cc_test(
gtest_main gtest_main
) )
absl_cc_library(
NAME
bits
HDRS
"internal/bits.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::config
absl::core_headers
)
absl_cc_test(
NAME
bits_test
SRCS
"internal/bits_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::bits
gtest_main
)
absl_cc_library( absl_cc_library(
NAME NAME
exponential_biased exponential_biased

@ -18,8 +18,6 @@
// These macros are used within Abseil and allow the compiler to optimize, where // These macros are used within Abseil and allow the compiler to optimize, where
// applicable, certain function calls. // applicable, certain function calls.
// //
// This file is used for both C and C++!
//
// Most macros here are exposing GCC or Clang features, and are stubbed out for // Most macros here are exposing GCC or Clang features, and are stubbed out for
// other compilers. // other compilers.
// //
@ -121,7 +119,7 @@
#if ABSL_HAVE_ATTRIBUTE(disable_tail_calls) #if ABSL_HAVE_ATTRIBUTE(disable_tail_calls)
#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1 #define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1
#define ABSL_ATTRIBUTE_NO_TAIL_CALL __attribute__((disable_tail_calls)) #define ABSL_ATTRIBUTE_NO_TAIL_CALL __attribute__((disable_tail_calls))
#elif defined(__GNUC__) && !defined(__clang__) #elif defined(__GNUC__) && !defined(__clang__) && !defined(__e2k__)
#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1 #define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1
#define ABSL_ATTRIBUTE_NO_TAIL_CALL \ #define ABSL_ATTRIBUTE_NO_TAIL_CALL \
__attribute__((optimize("no-optimize-sibling-calls"))) __attribute__((optimize("no-optimize-sibling-calls")))
@ -607,6 +605,7 @@
// When used with unsupported compilers, the ABSL_FALLTHROUGH_INTENDED macro // When used with unsupported compilers, the ABSL_FALLTHROUGH_INTENDED macro
// has no effect on diagnostics. In any case this macro has no effect on runtime // has no effect on diagnostics. In any case this macro has no effect on runtime
// behavior and performance of code. // behavior and performance of code.
#ifdef ABSL_FALLTHROUGH_INTENDED #ifdef ABSL_FALLTHROUGH_INTENDED
#error "ABSL_FALLTHROUGH_INTENDED should not be defined." #error "ABSL_FALLTHROUGH_INTENDED should not be defined."
#endif #endif
@ -645,7 +644,7 @@
// Every usage of a deprecated entity will trigger a warning when compiled with // Every usage of a deprecated entity will trigger a warning when compiled with
// clang's `-Wdeprecated-declarations` option. This option is turned off by // clang's `-Wdeprecated-declarations` option. This option is turned off by
// default, but the warnings will be reported by clang-tidy. // default, but the warnings will be reported by clang-tidy.
#if defined(__clang__) && __cplusplus >= 201103L #if defined(__clang__) && defined(__cplusplus) && __cplusplus >= 201103L
#define ABSL_DEPRECATED(message) __attribute__((deprecated(message))) #define ABSL_DEPRECATED(message) __attribute__((deprecated(message)))
#endif #endif
@ -679,4 +678,25 @@
#define ABSL_CONST_INIT #define ABSL_CONST_INIT
#endif // ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization) #endif // ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization)
// ABSL_ATTRIBUTE_PURE_FUNCTION
//
// ABSL_ATTRIBUTE_PURE_FUNCTION is used to annotate declarations of "pure"
// functions. A function is pure if its return value is only a function of its
// arguments. The pure attribute prohibits a function from modifying the state
// of the program that is observable by means other than inspecting the
// function's return value. Declaring such functions with the pure attribute
// allows the compiler to avoid emitting some calls in repeated invocations of
// the function with the same argument values.
//
// Example:
//
// ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Milliseconds(Duration d);
#if ABSL_HAVE_CPP_ATTRIBUTE(gnu::pure)
#define ABSL_ATTRIBUTE_PURE_FUNCTION [[gnu::pure]]
#elif ABSL_HAVE_ATTRIBUTE(pure)
#define ABSL_ATTRIBUTE_PURE_FUNCTION __attribute__((pure))
#else
#define ABSL_ATTRIBUTE_PURE_FUNCTION
#endif
#endif // ABSL_BASE_ATTRIBUTES_H_ #endif // ABSL_BASE_ATTRIBUTES_H_

@ -177,15 +177,8 @@ void CallOnceImpl(std::atomic<uint32_t>* control,
scheduling_mode) == kOnceInit) { scheduling_mode) == kOnceInit) {
base_internal::invoke(std::forward<Callable>(fn), base_internal::invoke(std::forward<Callable>(fn),
std::forward<Args>(args)...); std::forward<Args>(args)...);
// The call to SpinLockWake below is an optimization, because the waiter old_control =
// in SpinLockWait is waiting with a short timeout. The atomic load/store control->exchange(base_internal::kOnceDone, std::memory_order_release);
// sequence is slightly faster than an atomic exchange:
// old_control = control->exchange(base_internal::kOnceDone,
// std::memory_order_release);
// We opt for a slightly faster case when there are no waiters, in spite
// of longer tail latency when there are waiters.
old_control = control->load(std::memory_order_relaxed);
control->store(base_internal::kOnceDone, std::memory_order_release);
if (old_control == base_internal::kOnceWaiter) { if (old_control == base_internal::kOnceWaiter) {
base_internal::SpinLockWake(control, true); base_internal::SpinLockWake(control, true);
} }

@ -121,10 +121,16 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#if ABSL_OPTION_USE_INLINE_NAMESPACE == 0 #if ABSL_OPTION_USE_INLINE_NAMESPACE == 0
#define ABSL_NAMESPACE_BEGIN #define ABSL_NAMESPACE_BEGIN
#define ABSL_NAMESPACE_END #define ABSL_NAMESPACE_END
#define ABSL_INTERNAL_C_SYMBOL(x) x
#elif ABSL_OPTION_USE_INLINE_NAMESPACE == 1 #elif ABSL_OPTION_USE_INLINE_NAMESPACE == 1
#define ABSL_NAMESPACE_BEGIN \ #define ABSL_NAMESPACE_BEGIN \
inline namespace ABSL_OPTION_INLINE_NAMESPACE_NAME { inline namespace ABSL_OPTION_INLINE_NAMESPACE_NAME {
#define ABSL_NAMESPACE_END } #define ABSL_NAMESPACE_END }
#define ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v) x##_##v
#define ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, v) \
ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v)
#define ABSL_INTERNAL_C_SYMBOL(x) \
ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, ABSL_OPTION_INLINE_NAMESPACE_NAME)
#else #else
#error options.h is misconfigured. #error options.h is misconfigured.
#endif #endif
@ -216,6 +222,8 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#if ABSL_INTERNAL_HAS_KEYWORD(__builtin_LINE) && \ #if ABSL_INTERNAL_HAS_KEYWORD(__builtin_LINE) && \
ABSL_INTERNAL_HAS_KEYWORD(__builtin_FILE) ABSL_INTERNAL_HAS_KEYWORD(__builtin_FILE)
#define ABSL_HAVE_SOURCE_LOCATION_CURRENT 1 #define ABSL_HAVE_SOURCE_LOCATION_CURRENT 1
#elif defined(__GNUC__) && __GNUC__ >= 5
#define ABSL_HAVE_SOURCE_LOCATION_CURRENT 1
#endif #endif
#endif #endif
@ -364,7 +372,7 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ #elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
defined(__ros__) || defined(__native_client__) || defined(__asmjs__) || \ defined(__ros__) || defined(__native_client__) || defined(__asmjs__) || \
defined(__wasm__) || defined(__Fuchsia__) || defined(__sun) || \ defined(__wasm__) || defined(__Fuchsia__) || defined(__sun) || \
defined(__ASYLO__) defined(__ASYLO__) || defined(__myriad2__)
#define ABSL_HAVE_MMAP 1 #define ABSL_HAVE_MMAP 1
#endif #endif
@ -379,6 +387,15 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#define ABSL_HAVE_PTHREAD_GETSCHEDPARAM 1 #define ABSL_HAVE_PTHREAD_GETSCHEDPARAM 1
#endif #endif
// ABSL_HAVE_SCHED_GETCPU
//
// Checks whether sched_getcpu is available.
#ifdef ABSL_HAVE_SCHED_GETCPU
#error ABSL_HAVE_SCHED_GETCPU cannot be directly set
#elif defined(__linux__)
#define ABSL_HAVE_SCHED_GETCPU 1
#endif
// ABSL_HAVE_SCHED_YIELD // ABSL_HAVE_SCHED_YIELD
// //
// Checks whether the platform implements sched_yield(2) as defined in // Checks whether the platform implements sched_yield(2) as defined in
@ -490,7 +507,7 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#endif #endif
#ifdef __has_include #ifdef __has_include
#if __has_include(<any>) && __cplusplus >= 201703L && \ #if __has_include(<any>) && defined(__cplusplus) && __cplusplus >= 201703L && \
!ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
#define ABSL_HAVE_STD_ANY 1 #define ABSL_HAVE_STD_ANY 1
#endif #endif
@ -504,8 +521,8 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#endif #endif
#ifdef __has_include #ifdef __has_include
#if __has_include(<optional>) && __cplusplus >= 201703L && \ #if __has_include(<optional>) && defined(__cplusplus) && \
!ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE __cplusplus >= 201703L && !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
#define ABSL_HAVE_STD_OPTIONAL 1 #define ABSL_HAVE_STD_OPTIONAL 1
#endif #endif
#endif #endif
@ -518,8 +535,8 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#endif #endif
#ifdef __has_include #ifdef __has_include
#if __has_include(<variant>) && __cplusplus >= 201703L && \ #if __has_include(<variant>) && defined(__cplusplus) && \
!ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE __cplusplus >= 201703L && !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
#define ABSL_HAVE_STD_VARIANT 1 #define ABSL_HAVE_STD_VARIANT 1
#endif #endif
#endif #endif
@ -532,7 +549,8 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#endif #endif
#ifdef __has_include #ifdef __has_include
#if __has_include(<string_view>) && __cplusplus >= 201703L #if __has_include(<string_view>) && defined(__cplusplus) && \
__cplusplus >= 201703L
#define ABSL_HAVE_STD_STRING_VIEW 1 #define ABSL_HAVE_STD_STRING_VIEW 1
#endif #endif
#endif #endif
@ -544,8 +562,9 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
// not correctly set by MSVC, so we use `_MSVC_LANG` to check the language // not correctly set by MSVC, so we use `_MSVC_LANG` to check the language
// version. // version.
// TODO(zhangxy): fix tests before enabling aliasing for `std::any`. // TODO(zhangxy): fix tests before enabling aliasing for `std::any`.
#if defined(_MSC_VER) && _MSC_VER >= 1910 && \ #if defined(_MSC_VER) && _MSC_VER >= 1910 && \
((defined(_MSVC_LANG) && _MSVC_LANG > 201402) || __cplusplus > 201402) ((defined(_MSVC_LANG) && _MSVC_LANG > 201402) || \
(defined(__cplusplus) && __cplusplus > 201402))
// #define ABSL_HAVE_STD_ANY 1 // #define ABSL_HAVE_STD_ANY 1
#define ABSL_HAVE_STD_OPTIONAL 1 #define ABSL_HAVE_STD_OPTIONAL 1
#define ABSL_HAVE_STD_VARIANT 1 #define ABSL_HAVE_STD_VARIANT 1
@ -711,4 +730,13 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#define ABSL_HAVE_ADDRESS_SANITIZER 1 #define ABSL_HAVE_ADDRESS_SANITIZER 1
#endif #endif
// ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION
//
// Class template argument deduction is a language feature added in C++17.
#ifdef ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION
#error "ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION cannot be directly set."
#elif defined(__cpp_deduction_guides)
#define ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION 1
#endif
#endif // ABSL_BASE_CONFIG_H_ #endif // ABSL_BASE_CONFIG_H_

@ -110,6 +110,9 @@
// Define race annotations. // Define race annotations.
#if ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 1 #if ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 1
// Some of the symbols used in this section (e.g. AnnotateBenignRaceSized) are
// defined by the compiler-based santizer implementation, not by the Abseil
// library. Therefore they do not use ABSL_INTERNAL_C_SYMBOL.
// ------------------------------------------------------------- // -------------------------------------------------------------
// Annotations that suppress errors. It is usually better to express the // Annotations that suppress errors. It is usually better to express the
@ -286,17 +289,22 @@ ABSL_INTERNAL_END_EXTERN_C
// Define IGNORE_READS_BEGIN/_END annotations. // Define IGNORE_READS_BEGIN/_END annotations.
#if ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED == 1 #if ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED == 1
// Some of the symbols used in this section (e.g. AnnotateIgnoreReadsBegin) are
// defined by the compiler-based implementation, not by the Abseil
// library. Therefore they do not use ABSL_INTERNAL_C_SYMBOL.
// Request the analysis tool to ignore all reads in the current thread until // Request the analysis tool to ignore all reads in the current thread until
// ABSL_ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey // ABSL_ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey
// reads, while still checking other reads and all writes. // reads, while still checking other reads and all writes.
// See also ABSL_ANNOTATE_UNPROTECTED_READ. // See also ABSL_ANNOTATE_UNPROTECTED_READ.
#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \ #define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin)(__FILE__, __LINE__) ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin) \
(__FILE__, __LINE__)
// Stop ignoring reads. // Stop ignoring reads.
#define ABSL_ANNOTATE_IGNORE_READS_END() \ #define ABSL_ANNOTATE_IGNORE_READS_END() \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd)(__FILE__, __LINE__) ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd) \
(__FILE__, __LINE__)
// Function prototypes of annotations provided by the compiler-based sanitizer // Function prototypes of annotations provided by the compiler-based sanitizer
// implementation. // implementation.
@ -316,16 +324,22 @@ ABSL_INTERNAL_END_EXTERN_C
// TODO(delesley) -- The exclusive lock here ignores writes as well, but // TODO(delesley) -- The exclusive lock here ignores writes as well, but
// allows IGNORE_READS_AND_WRITES to work properly. // allows IGNORE_READS_AND_WRITES to work properly.
#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \ #define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \
ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsBegin)() ABSL_INTERNAL_GLOBAL_SCOPED( \
ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsBegin)) \
()
#define ABSL_ANNOTATE_IGNORE_READS_END() \ #define ABSL_ANNOTATE_IGNORE_READS_END() \
ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsEnd)() ABSL_INTERNAL_GLOBAL_SCOPED( \
ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsEnd)) \
()
ABSL_INTERNAL_STATIC_INLINE void AbslInternalAnnotateIgnoreReadsBegin() ABSL_INTERNAL_STATIC_INLINE void ABSL_INTERNAL_C_SYMBOL(
AbslInternalAnnotateIgnoreReadsBegin)()
ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE {} ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE {}
ABSL_INTERNAL_STATIC_INLINE void AbslInternalAnnotateIgnoreReadsEnd() ABSL_INTERNAL_STATIC_INLINE void ABSL_INTERNAL_C_SYMBOL(
AbslInternalAnnotateIgnoreReadsEnd)()
ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE {} ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE {}
#else #else

@ -1,219 +0,0 @@
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ABSL_BASE_INTERNAL_BITS_H_
#define ABSL_BASE_INTERNAL_BITS_H_
// This file contains bitwise ops which are implementation details of various
// absl libraries.
#include <cstdint>
#include "absl/base/config.h"
// Clang on Windows has __builtin_clzll; otherwise we need to use the
// windows intrinsic functions.
#if defined(_MSC_VER) && !defined(__clang__)
#include <intrin.h>
#if defined(_M_X64)
#pragma intrinsic(_BitScanReverse64)
#pragma intrinsic(_BitScanForward64)
#endif
#pragma intrinsic(_BitScanReverse)
#pragma intrinsic(_BitScanForward)
#endif
#include "absl/base/attributes.h"
#if defined(_MSC_VER) && !defined(__clang__)
// We can achieve something similar to attribute((always_inline)) with MSVC by
// using the __forceinline keyword, however this is not perfect. MSVC is
// much less aggressive about inlining, and even with the __forceinline keyword.
#define ABSL_BASE_INTERNAL_FORCEINLINE __forceinline
#else
// Use default attribute inline.
#define ABSL_BASE_INTERNAL_FORCEINLINE inline ABSL_ATTRIBUTE_ALWAYS_INLINE
#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros64Slow(uint64_t n) {
int zeroes = 60;
if (n >> 32) {
zeroes -= 32;
n >>= 32;
}
if (n >> 16) {
zeroes -= 16;
n >>= 16;
}
if (n >> 8) {
zeroes -= 8;
n >>= 8;
}
if (n >> 4) {
zeroes -= 4;
n >>= 4;
}
return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes;
}
ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros64(uint64_t n) {
#if defined(_MSC_VER) && !defined(__clang__) && defined(_M_X64)
// MSVC does not have __buitin_clzll. Use _BitScanReverse64.
unsigned long result = 0; // NOLINT(runtime/int)
if (_BitScanReverse64(&result, n)) {
return 63 - result;
}
return 64;
#elif defined(_MSC_VER) && !defined(__clang__)
// MSVC does not have __buitin_clzll. Compose two calls to _BitScanReverse
unsigned long result = 0; // NOLINT(runtime/int)
if ((n >> 32) &&
_BitScanReverse(&result, static_cast<unsigned long>(n >> 32))) {
return 31 - result;
}
if (_BitScanReverse(&result, static_cast<unsigned long>(n))) {
return 63 - result;
}
return 64;
#elif defined(__GNUC__) || defined(__clang__)
// Use __builtin_clzll, which uses the following instructions:
// x86: bsr
// ARM64: clz
// PPC: cntlzd
static_assert(sizeof(unsigned long long) == sizeof(n), // NOLINT(runtime/int)
"__builtin_clzll does not take 64-bit arg");
// Handle 0 as a special case because __builtin_clzll(0) is undefined.
if (n == 0) {
return 64;
}
return __builtin_clzll(n);
#else
return CountLeadingZeros64Slow(n);
#endif
}
ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros32Slow(uint64_t n) {
int zeroes = 28;
if (n >> 16) {
zeroes -= 16;
n >>= 16;
}
if (n >> 8) {
zeroes -= 8;
n >>= 8;
}
if (n >> 4) {
zeroes -= 4;
n >>= 4;
}
return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes;
}
ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros32(uint32_t n) {
#if defined(_MSC_VER) && !defined(__clang__)
unsigned long result = 0; // NOLINT(runtime/int)
if (_BitScanReverse(&result, n)) {
return 31 - result;
}
return 32;
#elif defined(__GNUC__) || defined(__clang__)
// Use __builtin_clz, which uses the following instructions:
// x86: bsr
// ARM64: clz
// PPC: cntlzd
static_assert(sizeof(int) == sizeof(n),
"__builtin_clz does not take 32-bit arg");
// Handle 0 as a special case because __builtin_clz(0) is undefined.
if (n == 0) {
return 32;
}
return __builtin_clz(n);
#else
return CountLeadingZeros32Slow(n);
#endif
}
ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero64Slow(uint64_t n) {
int c = 63;
n &= ~n + 1;
if (n & 0x00000000FFFFFFFF) c -= 32;
if (n & 0x0000FFFF0000FFFF) c -= 16;
if (n & 0x00FF00FF00FF00FF) c -= 8;
if (n & 0x0F0F0F0F0F0F0F0F) c -= 4;
if (n & 0x3333333333333333) c -= 2;
if (n & 0x5555555555555555) c -= 1;
return c;
}
ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero64(uint64_t n) {
#if defined(_MSC_VER) && !defined(__clang__) && defined(_M_X64)
unsigned long result = 0; // NOLINT(runtime/int)
_BitScanForward64(&result, n);
return result;
#elif defined(_MSC_VER) && !defined(__clang__)
unsigned long result = 0; // NOLINT(runtime/int)
if (static_cast<uint32_t>(n) == 0) {
_BitScanForward(&result, static_cast<unsigned long>(n >> 32));
return result + 32;
}
_BitScanForward(&result, static_cast<unsigned long>(n));
return result;
#elif defined(__GNUC__) || defined(__clang__)
static_assert(sizeof(unsigned long long) == sizeof(n), // NOLINT(runtime/int)
"__builtin_ctzll does not take 64-bit arg");
return __builtin_ctzll(n);
#else
return CountTrailingZerosNonZero64Slow(n);
#endif
}
ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero32Slow(uint32_t n) {
int c = 31;
n &= ~n + 1;
if (n & 0x0000FFFF) c -= 16;
if (n & 0x00FF00FF) c -= 8;
if (n & 0x0F0F0F0F) c -= 4;
if (n & 0x33333333) c -= 2;
if (n & 0x55555555) c -= 1;
return c;
}
ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero32(uint32_t n) {
#if defined(_MSC_VER) && !defined(__clang__)
unsigned long result = 0; // NOLINT(runtime/int)
_BitScanForward(&result, n);
return result;
#elif defined(__GNUC__) || defined(__clang__)
static_assert(sizeof(int) == sizeof(n),
"__builtin_ctz does not take 32-bit arg");
return __builtin_ctz(n);
#else
return CountTrailingZerosNonZero32Slow(n);
#endif
}
#undef ABSL_BASE_INTERNAL_FORCEINLINE
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl
#endif // ABSL_BASE_INTERNAL_BITS_H_

@ -1,97 +0,0 @@
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/base/internal/bits.h"
#include "gtest/gtest.h"
namespace {
int CLZ64(uint64_t n) {
int fast = absl::base_internal::CountLeadingZeros64(n);
int slow = absl::base_internal::CountLeadingZeros64Slow(n);
EXPECT_EQ(fast, slow) << n;
return fast;
}
TEST(BitsTest, CountLeadingZeros64) {
EXPECT_EQ(64, CLZ64(uint64_t{}));
EXPECT_EQ(0, CLZ64(~uint64_t{}));
for (int index = 0; index < 64; index++) {
uint64_t x = static_cast<uint64_t>(1) << index;
const auto cnt = 63 - index;
ASSERT_EQ(cnt, CLZ64(x)) << index;
ASSERT_EQ(cnt, CLZ64(x + x - 1)) << index;
}
}
int CLZ32(uint32_t n) {
int fast = absl::base_internal::CountLeadingZeros32(n);
int slow = absl::base_internal::CountLeadingZeros32Slow(n);
EXPECT_EQ(fast, slow) << n;
return fast;
}
TEST(BitsTest, CountLeadingZeros32) {
EXPECT_EQ(32, CLZ32(uint32_t{}));
EXPECT_EQ(0, CLZ32(~uint32_t{}));
for (int index = 0; index < 32; index++) {
uint32_t x = static_cast<uint32_t>(1) << index;
const auto cnt = 31 - index;
ASSERT_EQ(cnt, CLZ32(x)) << index;
ASSERT_EQ(cnt, CLZ32(x + x - 1)) << index;
ASSERT_EQ(CLZ64(x), CLZ32(x) + 32);
}
}
int CTZ64(uint64_t n) {
int fast = absl::base_internal::CountTrailingZerosNonZero64(n);
int slow = absl::base_internal::CountTrailingZerosNonZero64Slow(n);
EXPECT_EQ(fast, slow) << n;
return fast;
}
TEST(BitsTest, CountTrailingZerosNonZero64) {
EXPECT_EQ(0, CTZ64(~uint64_t{}));
for (int index = 0; index < 64; index++) {
uint64_t x = static_cast<uint64_t>(1) << index;
const auto cnt = index;
ASSERT_EQ(cnt, CTZ64(x)) << index;
ASSERT_EQ(cnt, CTZ64(~(x - 1))) << index;
}
}
int CTZ32(uint32_t n) {
int fast = absl::base_internal::CountTrailingZerosNonZero32(n);
int slow = absl::base_internal::CountTrailingZerosNonZero32Slow(n);
EXPECT_EQ(fast, slow) << n;
return fast;
}
TEST(BitsTest, CountTrailingZerosNonZero32) {
EXPECT_EQ(0, CTZ32(~uint32_t{}));
for (int index = 0; index < 32; index++) {
uint32_t x = static_cast<uint32_t>(1) << index;
const auto cnt = index;
ASSERT_EQ(cnt, CTZ32(x)) << index;
ASSERT_EQ(cnt, CTZ32(~(x - 1))) << index;
}
}
} // namespace

@ -74,10 +74,13 @@ namespace base_internal {
inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd, inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd,
off64_t offset) noexcept { off64_t offset) noexcept {
#if defined(__i386__) || defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || \ #if defined(__i386__) || defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || \
defined(__m68k__) || defined(__sh__) || \
(defined(__hppa__) && !defined(__LP64__)) || \
(defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) || \ (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) || \
(defined(__PPC__) && !defined(__PPC64__)) || \ (defined(__PPC__) && !defined(__PPC64__)) || \
(defined(__riscv) && __riscv_xlen == 32) || \ (defined(__riscv) && __riscv_xlen == 32) || \
(defined(__s390__) && !defined(__s390x__)) (defined(__s390__) && !defined(__s390x__)) || \
(defined(__sparc__) && !defined(__arch64__))
// On these architectures, implement mmap with mmap2. // On these architectures, implement mmap with mmap2.
static int pagesize = 0; static int pagesize = 0;
if (pagesize == 0) { if (pagesize == 0) {

@ -26,6 +26,7 @@
#endif #endif
#include <cstdint> #include <cstdint>
#include "absl/base/casts.h"
#include "absl/base/config.h" #include "absl/base/config.h"
#include "absl/base/internal/unaligned_access.h" #include "absl/base/internal/unaligned_access.h"
#include "absl/base/port.h" #include "absl/base/port.h"
@ -173,6 +174,36 @@ inline constexpr bool IsLittleEndian() { return false; }
#endif /* ENDIAN */ #endif /* ENDIAN */
inline uint8_t FromHost(uint8_t x) { return x; }
inline uint16_t FromHost(uint16_t x) { return FromHost16(x); }
inline uint32_t FromHost(uint32_t x) { return FromHost32(x); }
inline uint64_t FromHost(uint64_t x) { return FromHost64(x); }
inline uint8_t ToHost(uint8_t x) { return x; }
inline uint16_t ToHost(uint16_t x) { return ToHost16(x); }
inline uint32_t ToHost(uint32_t x) { return ToHost32(x); }
inline uint64_t ToHost(uint64_t x) { return ToHost64(x); }
inline int8_t FromHost(int8_t x) { return x; }
inline int16_t FromHost(int16_t x) {
return bit_cast<int16_t>(FromHost16(bit_cast<uint16_t>(x)));
}
inline int32_t FromHost(int32_t x) {
return bit_cast<int32_t>(FromHost32(bit_cast<uint32_t>(x)));
}
inline int64_t FromHost(int64_t x) {
return bit_cast<int64_t>(FromHost64(bit_cast<uint64_t>(x)));
}
inline int8_t ToHost(int8_t x) { return x; }
inline int16_t ToHost(int16_t x) {
return bit_cast<int16_t>(ToHost16(bit_cast<uint16_t>(x)));
}
inline int32_t ToHost(int32_t x) {
return bit_cast<int32_t>(ToHost32(bit_cast<uint32_t>(x)));
}
inline int64_t ToHost(int64_t x) {
return bit_cast<int64_t>(ToHost64(bit_cast<uint64_t>(x)));
}
// Functions to do unaligned loads and stores in little-endian order. // Functions to do unaligned loads and stores in little-endian order.
inline uint16_t Load16(const void *p) { inline uint16_t Load16(const void *p) {
return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p)); return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
@ -233,6 +264,36 @@ inline constexpr bool IsLittleEndian() { return false; }
#endif /* ENDIAN */ #endif /* ENDIAN */
inline uint8_t FromHost(uint8_t x) { return x; }
inline uint16_t FromHost(uint16_t x) { return FromHost16(x); }
inline uint32_t FromHost(uint32_t x) { return FromHost32(x); }
inline uint64_t FromHost(uint64_t x) { return FromHost64(x); }
inline uint8_t ToHost(uint8_t x) { return x; }
inline uint16_t ToHost(uint16_t x) { return ToHost16(x); }
inline uint32_t ToHost(uint32_t x) { return ToHost32(x); }
inline uint64_t ToHost(uint64_t x) { return ToHost64(x); }
inline int8_t FromHost(int8_t x) { return x; }
inline int16_t FromHost(int16_t x) {
return bit_cast<int16_t>(FromHost16(bit_cast<uint16_t>(x)));
}
inline int32_t FromHost(int32_t x) {
return bit_cast<int32_t>(FromHost32(bit_cast<uint32_t>(x)));
}
inline int64_t FromHost(int64_t x) {
return bit_cast<int64_t>(FromHost64(bit_cast<uint64_t>(x)));
}
inline int8_t ToHost(int8_t x) { return x; }
inline int16_t ToHost(int16_t x) {
return bit_cast<int16_t>(ToHost16(bit_cast<uint16_t>(x)));
}
inline int32_t ToHost(int32_t x) {
return bit_cast<int32_t>(ToHost32(bit_cast<uint32_t>(x)));
}
inline int64_t ToHost(int64_t x) {
return bit_cast<int64_t>(ToHost64(bit_cast<uint64_t>(x)));
}
// Functions to do unaligned loads and stores in big-endian order. // Functions to do unaligned loads and stores in big-endian order.
inline uint16_t Load16(const void *p) { inline uint16_t Load16(const void *p) {
return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p)); return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));

@ -185,7 +185,7 @@ TEST(ExponentialBiasedTest, InitializationModes) {
ABSL_CONST_INIT static ExponentialBiased eb_static; ABSL_CONST_INIT static ExponentialBiased eb_static;
EXPECT_THAT(eb_static.GetSkipCount(2), Ge(0)); EXPECT_THAT(eb_static.GetSkipCount(2), Ge(0));
#if ABSL_HAVE_THREAD_LOCAL #ifdef ABSL_HAVE_THREAD_LOCAL
thread_local ExponentialBiased eb_thread; thread_local ExponentialBiased eb_thread;
EXPECT_THAT(eb_thread.GetSkipCount(2), Ge(0)); EXPECT_THAT(eb_thread.GetSkipCount(2), Ge(0));
#endif #endif

@ -21,6 +21,10 @@
#include <unordered_map> #include <unordered_map>
#include <utility> #include <utility>
#ifdef __EMSCRIPTEN__
#include <emscripten.h>
#endif
#include "absl/container/node_hash_map.h" #include "absl/container/node_hash_map.h"
namespace absl { namespace absl {
@ -158,5 +162,20 @@ ABSL_NAMESPACE_END
int main(int argc, char *argv[]) { int main(int argc, char *argv[]) {
// The actual test runs in the global constructor of `before_main`. // The actual test runs in the global constructor of `before_main`.
printf("PASS\n"); printf("PASS\n");
#ifdef __EMSCRIPTEN__
// clang-format off
// This is JS here. Don't try to format it.
MAIN_THREAD_EM_ASM({
if (ENVIRONMENT_IS_WEB) {
if (typeof TEST_FINISH === 'function') {
TEST_FINISH($0);
} else {
console.error('Attempted to exit with status ' + $0);
console.error('But TEST_FINSIHED is not a function.');
}
}
}, 0);
// clang-format on
#endif
return 0; return 0;
} }

@ -61,6 +61,8 @@ class SchedulingGuard {
public: public:
// Returns true iff the calling thread may be cooperatively rescheduled. // Returns true iff the calling thread may be cooperatively rescheduled.
static bool ReschedulingIsAllowed(); static bool ReschedulingIsAllowed();
SchedulingGuard(const SchedulingGuard&) = delete;
SchedulingGuard& operator=(const SchedulingGuard&) = delete;
private: private:
// Disable cooperative rescheduling of the calling thread. It may still // Disable cooperative rescheduling of the calling thread. It may still
@ -101,9 +103,6 @@ class SchedulingGuard {
friend class SchedulingHelper; friend class SchedulingHelper;
friend class SpinLock; friend class SpinLock;
friend int absl::synchronization_internal::MutexDelay(int32_t c, int mode); friend int absl::synchronization_internal::MutexDelay(int32_t c, int mode);
SchedulingGuard(const SchedulingGuard&) = delete;
SchedulingGuard& operator=(const SchedulingGuard&) = delete;
}; };
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------

@ -67,28 +67,32 @@
#undef ABSL_HAVE_RAW_IO #undef ABSL_HAVE_RAW_IO
#endif #endif
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace raw_logging_internal {
namespace {
// TODO(gfalcon): We want raw-logging to work on as many platforms as possible. // TODO(gfalcon): We want raw-logging to work on as many platforms as possible.
// Explicitly #error out when not ABSL_LOW_LEVEL_WRITE_SUPPORTED, except for a // Explicitly `#error` out when not `ABSL_LOW_LEVEL_WRITE_SUPPORTED`, except for
// selected set of platforms for which we expect not to be able to raw log. // a selected set of platforms for which we expect not to be able to raw log.
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static absl::base_internal::AtomicHook< ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
absl::raw_logging_internal::LogPrefixHook> absl::base_internal::AtomicHook<LogPrefixHook>
log_prefix_hook; log_prefix_hook;
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static absl::base_internal::AtomicHook< ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
absl::raw_logging_internal::AbortHook> absl::base_internal::AtomicHook<AbortHook>
abort_hook; abort_hook;
#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED #ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
static const char kTruncated[] = " ... (message truncated)\n"; constexpr char kTruncated[] = " ... (message truncated)\n";
// sprintf the format to the buffer, adjusting *buf and *size to reflect the // sprintf the format to the buffer, adjusting *buf and *size to reflect the
// consumed bytes, and return whether the message fit without truncation. If // consumed bytes, and return whether the message fit without truncation. If
// truncation occurred, if possible leave room in the buffer for the message // truncation occurred, if possible leave room in the buffer for the message
// kTruncated[]. // kTruncated[].
inline static bool VADoRawLog(char** buf, int* size, const char* format, bool VADoRawLog(char** buf, int* size, const char* format, va_list ap)
va_list ap) ABSL_PRINTF_ATTRIBUTE(3, 0); ABSL_PRINTF_ATTRIBUTE(3, 0);
inline static bool VADoRawLog(char** buf, int* size, bool VADoRawLog(char** buf, int* size, const char* format, va_list ap) {
const char* format, va_list ap) {
int n = vsnprintf(*buf, *size, format, ap); int n = vsnprintf(*buf, *size, format, ap);
bool result = true; bool result = true;
if (n < 0 || n > *size) { if (n < 0 || n > *size) {
@ -96,7 +100,7 @@ inline static bool VADoRawLog(char** buf, int* size,
if (static_cast<size_t>(*size) > sizeof(kTruncated)) { if (static_cast<size_t>(*size) > sizeof(kTruncated)) {
n = *size - sizeof(kTruncated); // room for truncation message n = *size - sizeof(kTruncated); // room for truncation message
} else { } else {
n = 0; // no room for truncation message n = 0; // no room for truncation message
} }
} }
*size -= n; *size -= n;
@ -105,9 +109,7 @@ inline static bool VADoRawLog(char** buf, int* size,
} }
#endif // ABSL_LOW_LEVEL_WRITE_SUPPORTED #endif // ABSL_LOW_LEVEL_WRITE_SUPPORTED
static constexpr int kLogBufSize = 3000; constexpr int kLogBufSize = 3000;
namespace {
// CAVEAT: vsnprintf called from *DoRawLog below has some (exotic) code paths // CAVEAT: vsnprintf called from *DoRawLog below has some (exotic) code paths
// that invoke malloc() and getenv() that might acquire some locks. // that invoke malloc() and getenv() that might acquire some locks.
@ -166,7 +168,7 @@ void RawLogVA(absl::LogSeverity severity, const char* file, int line,
} else { } else {
DoRawLog(&buf, &size, "%s", kTruncated); DoRawLog(&buf, &size, "%s", kTruncated);
} }
absl::raw_logging_internal::SafeWriteToStderr(buffer, strlen(buffer)); SafeWriteToStderr(buffer, strlen(buffer));
} }
#else #else
static_cast<void>(format); static_cast<void>(format);
@ -181,11 +183,18 @@ void RawLogVA(absl::LogSeverity severity, const char* file, int line,
} }
} }
// Non-formatting version of RawLog().
//
// TODO(gfalcon): When string_view no longer depends on base, change this
// interface to take its message as a string_view instead.
void DefaultInternalLog(absl::LogSeverity severity, const char* file, int line,
const std::string& message) {
RawLog(severity, file, line, "%.*s", static_cast<int>(message.size()),
message.data());
}
} // namespace } // namespace
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace raw_logging_internal {
void SafeWriteToStderr(const char *s, size_t len) { void SafeWriteToStderr(const char *s, size_t len) {
#if defined(ABSL_HAVE_SYSCALL_WRITE) #if defined(ABSL_HAVE_SYSCALL_WRITE)
syscall(SYS_write, STDERR_FILENO, s, len); syscall(SYS_write, STDERR_FILENO, s, len);
@ -200,8 +209,6 @@ void SafeWriteToStderr(const char *s, size_t len) {
#endif #endif
} }
void RawLog(absl::LogSeverity severity, const char* file, int line,
const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5);
void RawLog(absl::LogSeverity severity, const char* file, int line, void RawLog(absl::LogSeverity severity, const char* file, int line,
const char* format, ...) { const char* format, ...) {
va_list ap; va_list ap;
@ -210,15 +217,6 @@ void RawLog(absl::LogSeverity severity, const char* file, int line,
va_end(ap); va_end(ap);
} }
// Non-formatting version of RawLog().
//
// TODO(gfalcon): When string_view no longer depends on base, change this
// interface to take its message as a string_view instead.
static void DefaultInternalLog(absl::LogSeverity severity, const char* file,
int line, const std::string& message) {
RawLog(severity, file, line, "%s", message.c_str());
}
bool RawLoggingFullySupported() { bool RawLoggingFullySupported() {
#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED #ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
return true; return true;
@ -231,6 +229,10 @@ ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL
absl::base_internal::AtomicHook<InternalLogFunction> absl::base_internal::AtomicHook<InternalLogFunction>
internal_log_function(DefaultInternalLog); internal_log_function(DefaultInternalLog);
void RegisterLogPrefixHook(LogPrefixHook func) { log_prefix_hook.Store(func); }
void RegisterAbortHook(AbortHook func) { abort_hook.Store(func); }
void RegisterInternalLogFunction(InternalLogFunction func) { void RegisterInternalLogFunction(InternalLogFunction func) {
internal_log_function.Store(func); internal_log_function.Store(func);
} }

@ -72,12 +72,14 @@
// //
// The API is a subset of the above: each macro only takes two arguments. Use // The API is a subset of the above: each macro only takes two arguments. Use
// StrCat if you need to build a richer message. // StrCat if you need to build a richer message.
#define ABSL_INTERNAL_LOG(severity, message) \ #define ABSL_INTERNAL_LOG(severity, message) \
do { \ do { \
constexpr const char* absl_raw_logging_internal_filename = __FILE__; \ constexpr const char* absl_raw_logging_internal_filename = __FILE__; \
::absl::raw_logging_internal::internal_log_function( \ ::absl::raw_logging_internal::internal_log_function( \
ABSL_RAW_LOGGING_INTERNAL_##severity, \ ABSL_RAW_LOGGING_INTERNAL_##severity, \
absl_raw_logging_internal_filename, __LINE__, message); \ absl_raw_logging_internal_filename, __LINE__, message); \
if (ABSL_RAW_LOGGING_INTERNAL_##severity == ::absl::LogSeverity::kFatal) \
ABSL_INTERNAL_UNREACHABLE; \
} while (0) } while (0)
#define ABSL_INTERNAL_CHECK(condition, message) \ #define ABSL_INTERNAL_CHECK(condition, message) \
@ -176,6 +178,14 @@ ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL extern base_internal::AtomicHook<
InternalLogFunction> InternalLogFunction>
internal_log_function; internal_log_function;
// Registers hooks of the above types. Only a single hook of each type may be
// registered. It is an error to call these functions multiple times with
// different input arguments.
//
// These functions are safe to call at any point during initialization; they do
// not block or malloc, and are async-signal safe.
void RegisterLogPrefixHook(LogPrefixHook func);
void RegisterAbortHook(AbortHook func);
void RegisterInternalLogFunction(InternalLogFunction func); void RegisterInternalLogFunction(InternalLogFunction func);
} // namespace raw_logging_internal } // namespace raw_logging_internal

@ -125,8 +125,9 @@ void SpinLock::SlowLock() {
// it as having a sleeper. // it as having a sleeper.
if ((lock_value & kWaitTimeMask) == 0) { if ((lock_value & kWaitTimeMask) == 0) {
// Here, just "mark" that the thread is going to sleep. Don't store the // Here, just "mark" that the thread is going to sleep. Don't store the
// lock wait time in the lock as that will cause the current lock // lock wait time in the lock -- the lock word stores the amount of time
// owner to think it experienced contention. // that the current holder waited before acquiring the lock, not the wait
// time of any thread currently waiting to acquire it.
if (lockword_.compare_exchange_strong( if (lockword_.compare_exchange_strong(
lock_value, lock_value | kSpinLockSleeper, lock_value, lock_value | kSpinLockSleeper,
std::memory_order_relaxed, std::memory_order_relaxed)) { std::memory_order_relaxed, std::memory_order_relaxed)) {
@ -140,6 +141,14 @@ void SpinLock::SlowLock() {
// this thread obtains the lock. // this thread obtains the lock.
lock_value = TryLockInternal(lock_value, wait_cycles); lock_value = TryLockInternal(lock_value, wait_cycles);
continue; // Skip the delay at the end of the loop. continue; // Skip the delay at the end of the loop.
} else if ((lock_value & kWaitTimeMask) == 0) {
// The lock is still held, without a waiter being marked, but something
// else about the lock word changed, causing our CAS to fail. For
// example, a new lock holder may have acquired the lock with
// kSpinLockDisabledScheduling set, whereas the previous holder had not
// set that flag. In this case, attempt again to mark ourselves as a
// waiter.
continue;
} }
} }

@ -15,11 +15,8 @@
// //
// Most users requiring mutual exclusion should use Mutex. // Most users requiring mutual exclusion should use Mutex.
// SpinLock is provided for use in three situations: // SpinLock is provided for use in two situations:
// - for use in code that Mutex itself depends on // - for use in code that Mutex itself depends on
// - to get a faster fast-path release under low contention (without an
// atomic read-modify-write) In return, SpinLock has worse behaviour under
// contention, which is why Mutex is preferred in most situations.
// - for async signal safety (see below) // - for async signal safety (see below)
// SpinLock is async signal safe. If a spinlock is used within a signal // SpinLock is async signal safe. If a spinlock is used within a signal
@ -140,8 +137,20 @@ class ABSL_LOCKABLE SpinLock {
// //
// bit[0] encodes whether a lock is being held. // bit[0] encodes whether a lock is being held.
// bit[1] encodes whether a lock uses cooperative scheduling. // bit[1] encodes whether a lock uses cooperative scheduling.
// bit[2] encodes whether a lock disables scheduling. // bit[2] encodes whether the current lock holder disabled scheduling when
// acquiring the lock. Only set when kSpinLockHeld is also set.
// bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int. // bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int.
// This is set by the lock holder to indicate how long it waited on
// the lock before eventually acquiring it. The number of cycles is
// encoded as a 29-bit unsigned int, or in the case that the current
// holder did not wait but another waiter is queued, the LSB
// (kSpinLockSleeper) is set. The implementation does not explicitly
// track the number of queued waiters beyond this. It must always be
// assumed that waiters may exist if the current holder was required to
// queue.
//
// Invariant: if the lock is not held, the value is either 0 or
// kSpinLockCooperative.
static constexpr uint32_t kSpinLockHeld = 1; static constexpr uint32_t kSpinLockHeld = 1;
static constexpr uint32_t kSpinLockCooperative = 2; static constexpr uint32_t kSpinLockCooperative = 2;
static constexpr uint32_t kSpinLockDisabledScheduling = 4; static constexpr uint32_t kSpinLockDisabledScheduling = 4;

@ -20,7 +20,7 @@
extern "C" { extern "C" {
ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay( ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */, std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */,
int /* loop */, absl::base_internal::SchedulingMode /* mode */) { int /* loop */, absl::base_internal::SchedulingMode /* mode */) {
// In Akaros, one must take care not to call anything that could cause a // In Akaros, one must take care not to call anything that could cause a
@ -29,7 +29,7 @@ ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay(
// arbitrary code. // arbitrary code.
} }
ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake( ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
std::atomic<uint32_t>* /* lock_word */, bool /* all */) {} std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
} // extern "C" } // extern "C"

@ -56,7 +56,7 @@ static_assert(sizeof(std::atomic<uint32_t>) == sizeof(int),
extern "C" { extern "C" {
ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay( ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
std::atomic<uint32_t> *w, uint32_t value, int loop, std::atomic<uint32_t> *w, uint32_t value, int loop,
absl::base_internal::SchedulingMode) { absl::base_internal::SchedulingMode) {
absl::base_internal::ErrnoSaver errno_saver; absl::base_internal::ErrnoSaver errno_saver;
@ -66,8 +66,8 @@ ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay(
syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, &tm); syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, &tm);
} }
ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake(std::atomic<uint32_t> *w, ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
bool all) { std::atomic<uint32_t> *w, bool all) {
syscall(SYS_futex, w, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, all ? INT_MAX : 1, 0); syscall(SYS_futex, w, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, all ? INT_MAX : 1, 0);
} }

@ -25,7 +25,7 @@
extern "C" { extern "C" {
ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay( ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */, int loop, std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */, int loop,
absl::base_internal::SchedulingMode /* mode */) { absl::base_internal::SchedulingMode /* mode */) {
absl::base_internal::ErrnoSaver errno_saver; absl::base_internal::ErrnoSaver errno_saver;
@ -40,7 +40,7 @@ ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay(
} }
} }
ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake( ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
std::atomic<uint32_t>* /* lock_word */, bool /* all */) {} std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
} // extern "C" } // extern "C"

@ -43,18 +43,16 @@ uint32_t SpinLockWait(std::atomic<uint32_t> *w, int n,
const SpinLockWaitTransition trans[], const SpinLockWaitTransition trans[],
SchedulingMode scheduling_mode); SchedulingMode scheduling_mode);
// If possible, wake some thread that has called SpinLockDelay(w, ...). If // If possible, wake some thread that has called SpinLockDelay(w, ...). If `all`
// "all" is true, wake all such threads. This call is a hint, and on some // is true, wake all such threads. On some systems, this may be a no-op; on
// systems it may be a no-op; threads calling SpinLockDelay() will always wake // those systems, threads calling SpinLockDelay() will always wake eventually
// eventually even if SpinLockWake() is never called. // even if SpinLockWake() is never called.
void SpinLockWake(std::atomic<uint32_t> *w, bool all); void SpinLockWake(std::atomic<uint32_t> *w, bool all);
// Wait for an appropriate spin delay on iteration "loop" of a // Wait for an appropriate spin delay on iteration "loop" of a
// spin loop on location *w, whose previously observed value was "value". // spin loop on location *w, whose previously observed value was "value".
// SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick, // SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick,
// or may wait for a delay that can be truncated by a call to SpinLockWake(w). // or may wait for a call to SpinLockWake(w).
// In all cases, it must return in bounded time even if SpinLockWake() is not
// called.
void SpinLockDelay(std::atomic<uint32_t> *w, uint32_t value, int loop, void SpinLockDelay(std::atomic<uint32_t> *w, uint32_t value, int loop,
base_internal::SchedulingMode scheduling_mode); base_internal::SchedulingMode scheduling_mode);
@ -73,21 +71,23 @@ ABSL_NAMESPACE_END
// By changing our extension points to be extern "C", we dodge this // By changing our extension points to be extern "C", we dodge this
// check. // check.
extern "C" { extern "C" {
void AbslInternalSpinLockWake(std::atomic<uint32_t> *w, bool all); void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(std::atomic<uint32_t> *w,
void AbslInternalSpinLockDelay( bool all);
void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
std::atomic<uint32_t> *w, uint32_t value, int loop, std::atomic<uint32_t> *w, uint32_t value, int loop,
absl::base_internal::SchedulingMode scheduling_mode); absl::base_internal::SchedulingMode scheduling_mode);
} }
inline void absl::base_internal::SpinLockWake(std::atomic<uint32_t> *w, inline void absl::base_internal::SpinLockWake(std::atomic<uint32_t> *w,
bool all) { bool all) {
AbslInternalSpinLockWake(w, all); ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(w, all);
} }
inline void absl::base_internal::SpinLockDelay( inline void absl::base_internal::SpinLockDelay(
std::atomic<uint32_t> *w, uint32_t value, int loop, std::atomic<uint32_t> *w, uint32_t value, int loop,
absl::base_internal::SchedulingMode scheduling_mode) { absl::base_internal::SchedulingMode scheduling_mode) {
AbslInternalSpinLockDelay(w, value, loop, scheduling_mode); ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)
(w, value, loop, scheduling_mode);
} }
#endif // ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_ #endif // ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_

@ -20,9 +20,9 @@
extern "C" { extern "C" {
void AbslInternalSpinLockDelay(std::atomic<uint32_t>* /* lock_word */, void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
uint32_t /* value */, int loop, std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */, int loop,
absl::base_internal::SchedulingMode /* mode */) { absl::base_internal::SchedulingMode /* mode */) {
if (loop == 0) { if (loop == 0) {
} else if (loop == 1) { } else if (loop == 1) {
Sleep(0); Sleep(0);
@ -31,7 +31,7 @@ void AbslInternalSpinLockDelay(std::atomic<uint32_t>* /* lock_word */,
} }
} }
void AbslInternalSpinLockWake(std::atomic<uint32_t>* /* lock_word */, void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
bool /* all */) {} std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
} // extern "C" } // extern "C"

@ -51,7 +51,6 @@ const char* StrErrorAdaptor(int errnum, char* buf, size_t buflen) {
} }
std::string StrErrorInternal(int errnum) { std::string StrErrorInternal(int errnum) {
absl::base_internal::ErrnoSaver errno_saver;
char buf[100]; char buf[100];
const char* str = StrErrorAdaptor(errnum, buf, sizeof buf); const char* str = StrErrorAdaptor(errnum, buf, sizeof buf);
if (*str == '\0') { if (*str == '\0') {
@ -76,6 +75,7 @@ std::array<std::string, kSysNerr>* NewStrErrorTable() {
} // namespace } // namespace
std::string StrError(int errnum) { std::string StrError(int errnum) {
absl::base_internal::ErrnoSaver errno_saver;
static const auto* table = NewStrErrorTable(); static const auto* table = NewStrErrorTable();
if (errnum >= 0 && errnum < static_cast<int>(table->size())) { if (errnum >= 0 && errnum < static_cast<int>(table->size())) {
return (*table)[errnum]; return (*table)[errnum];

@ -62,12 +62,14 @@ TEST(StrErrorTest, MultipleThreads) {
++counter; ++counter;
errno = ERANGE; errno = ERANGE;
const std::string value = absl::base_internal::StrError(i); const std::string value = absl::base_internal::StrError(i);
// EXPECT_* could change errno. Stash it first.
int check_err = errno;
EXPECT_THAT(check_err, Eq(ERANGE));
// Only the GNU implementation is guaranteed to provide the // Only the GNU implementation is guaranteed to provide the
// string "Unknown error nnn". POSIX doesn't say anything. // string "Unknown error nnn". POSIX doesn't say anything.
if (!absl::StartsWith(value, "Unknown error ")) { if (!absl::StartsWith(value, "Unknown error ")) {
EXPECT_THAT(absl::base_internal::StrError(i), Eq(expected_strings[i])); EXPECT_THAT(value, Eq(expected_strings[i]));
} }
EXPECT_THAT(errno, Eq(ERANGE));
} }
}; };

@ -426,7 +426,7 @@ pid_t GetTID() {
// userspace construct) to avoid unnecessary system calls. Without this caching, // userspace construct) to avoid unnecessary system calls. Without this caching,
// it can take roughly 98ns, while it takes roughly 1ns with this caching. // it can take roughly 98ns, while it takes roughly 1ns with this caching.
pid_t GetCachedTID() { pid_t GetCachedTID() {
#if ABSL_HAVE_THREAD_LOCAL #ifdef ABSL_HAVE_THREAD_LOCAL
static thread_local pid_t thread_id = GetTID(); static thread_local pid_t thread_id = GetTID();
return thread_id; return thread_id;
#else #else

@ -37,17 +37,28 @@ TEST(SysinfoTest, NumCPUs) {
<< "NumCPUs() should not have the default value of 0"; << "NumCPUs() should not have the default value of 0";
} }
// Ensure that NominalCPUFrequency returns a reasonable value, or 1.00 on
// platforms where the CPU frequency is not available through sysfs.
//
// POWER is particularly problematic here; some Linux kernels expose the CPU
// frequency, while others do not. Since we can't predict a priori what a given
// machine is going to do, just disable this test on POWER on Linux.
#if !(defined(__linux) && (defined(__ppc64__) || defined(__PPC64__)))
TEST(SysinfoTest, NominalCPUFrequency) { TEST(SysinfoTest, NominalCPUFrequency) {
#if !(defined(__aarch64__) && defined(__linux__)) && !defined(__EMSCRIPTEN__) // Linux only exposes the CPU frequency on certain architectures, and
EXPECT_GE(NominalCPUFrequency(), 1000.0) // Emscripten doesn't expose it at all.
<< "NominalCPUFrequency() did not return a reasonable value"; #if defined(__linux__) && \
#else (defined(__aarch64__) || defined(__hppa__) || defined(__mips__) || \
// Aarch64 cannot read the CPU frequency from sysfs, so we get back 1.0. defined(__riscv) || defined(__s390x__)) || \
// Emscripten does not have a sysfs to read from at all. defined(__EMSCRIPTEN__)
EXPECT_EQ(NominalCPUFrequency(), 1.0) EXPECT_EQ(NominalCPUFrequency(), 1.0)
<< "CPU frequency detection was fixed! Please update unittest."; << "CPU frequency detection was fixed! Please update unittest.";
#else
EXPECT_GE(NominalCPUFrequency(), 1000.0)
<< "NominalCPUFrequency() did not return a reasonable value";
#endif #endif
} }
#endif
TEST(SysinfoTest, GetTID) { TEST(SysinfoTest, GetTID) {
EXPECT_EQ(GetTID(), GetTID()); // Basic compile and equality test. EXPECT_EQ(GetTID(), GetTID()); // Basic compile and equality test.

@ -23,6 +23,7 @@
#include <cassert> #include <cassert>
#include <memory> #include <memory>
#include "absl/base/attributes.h"
#include "absl/base/call_once.h" #include "absl/base/call_once.h"
#include "absl/base/internal/raw_logging.h" #include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/spinlock.h" #include "absl/base/internal/spinlock.h"
@ -53,9 +54,11 @@ void AllocateThreadIdentityKey(ThreadIdentityReclaimerFunction reclaimer) {
// exist within a process (via dlopen() or similar), references to // exist within a process (via dlopen() or similar), references to
// thread_identity_ptr from each instance of the code will refer to // thread_identity_ptr from each instance of the code will refer to
// *different* instances of this ptr. // *different* instances of this ptr.
#ifdef __GNUC__ // Apple platforms have the visibility attribute, but issue a compile warning
// that protected visibility is unsupported.
#if ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__)
__attribute__((visibility("protected"))) __attribute__((visibility("protected")))
#endif // __GNUC__ #endif // ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__)
#if ABSL_PER_THREAD_TLS #if ABSL_PER_THREAD_TLS
// Prefer __thread to thread_local as benchmarks indicate it is a bit faster. // Prefer __thread to thread_local as benchmarks indicate it is a bit faster.
ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr = nullptr; ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr = nullptr;

@ -32,6 +32,7 @@
#include "absl/base/config.h" #include "absl/base/config.h"
#include "absl/base/internal/per_thread_tls.h" #include "absl/base/internal/per_thread_tls.h"
#include "absl/base/optimization.h"
namespace absl { namespace absl {
ABSL_NAMESPACE_BEGIN ABSL_NAMESPACE_BEGIN
@ -69,30 +70,28 @@ struct PerThreadSynch {
// is using this PerThreadSynch as a terminator. Its // is using this PerThreadSynch as a terminator. Its
// skip field must not be filled in because the loop // skip field must not be filled in because the loop
// might then skip over the terminator. // might then skip over the terminator.
bool wake; // This thread is to be woken from a Mutex.
// The wait parameters of the current wait. waitp is null if the // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the
// thread is not waiting. Transitions from null to non-null must // waiter is waiting on the mutex as part of a CV Wait or Mutex Await.
// occur before the enqueue commit point (state = kQueued in //
// Enqueue() and CondVarEnqueue()). Transitions from non-null to // The value of "x->cond_waiter" is meaningless if "x" is not on a
// null must occur after the wait is finished (state = kAvailable in // Mutex waiter list.
// Mutex::Block() and CondVar::WaitCommon()). This field may be bool cond_waiter;
// changed only by the thread that describes this PerThreadSynch. A bool maybe_unlocking; // Valid at head of Mutex waiter queue;
// special case is Fer(), which calls Enqueue() on another thread, // true if UnlockSlow could be searching
// but with an identical SynchWaitParams pointer, thus leaving the // for a waiter to wake. Used for an optimization
// pointer unchanged. // in Enqueue(). true is always a valid value.
SynchWaitParams *waitp; // Can be reset to false when the unlocker or any
// writer releases the lock, or a reader fully
bool suppress_fatal_errors; // If true, try to proceed even in the face of // releases the lock. It may not be set to false
// broken invariants. This is used within fatal // by a reader that decrements the count to
// signal handlers to improve the chances of // non-zero. protected by mutex spinlock
// debug logging information being output bool suppress_fatal_errors; // If true, try to proceed even in the face
// successfully. // of broken invariants. This is used within
// fatal signal handlers to improve the
intptr_t readers; // Number of readers in mutex. // chances of debug logging information being
int priority; // Priority of thread (updated every so often). // output successfully.
int priority; // Priority of thread (updated every so often).
// When priority will next be read (cycles).
int64_t next_priority_read_cycles;
// State values: // State values:
// kAvailable: This PerThreadSynch is available. // kAvailable: This PerThreadSynch is available.
@ -111,30 +110,30 @@ struct PerThreadSynch {
}; };
std::atomic<State> state; std::atomic<State> state;
bool maybe_unlocking; // Valid at head of Mutex waiter queue; // The wait parameters of the current wait. waitp is null if the
// true if UnlockSlow could be searching // thread is not waiting. Transitions from null to non-null must
// for a waiter to wake. Used for an optimization // occur before the enqueue commit point (state = kQueued in
// in Enqueue(). true is always a valid value. // Enqueue() and CondVarEnqueue()). Transitions from non-null to
// Can be reset to false when the unlocker or any // null must occur after the wait is finished (state = kAvailable in
// writer releases the lock, or a reader fully releases // Mutex::Block() and CondVar::WaitCommon()). This field may be
// the lock. It may not be set to false by a reader // changed only by the thread that describes this PerThreadSynch. A
// that decrements the count to non-zero. // special case is Fer(), which calls Enqueue() on another thread,
// protected by mutex spinlock // but with an identical SynchWaitParams pointer, thus leaving the
// pointer unchanged.
SynchWaitParams* waitp;
bool wake; // This thread is to be woken from a Mutex. intptr_t readers; // Number of readers in mutex.
// If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the // When priority will next be read (cycles).
// waiter is waiting on the mutex as part of a CV Wait or Mutex Await. int64_t next_priority_read_cycles;
//
// The value of "x->cond_waiter" is meaningless if "x" is not on a
// Mutex waiter list.
bool cond_waiter;
// Locks held; used during deadlock detection. // Locks held; used during deadlock detection.
// Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity(). // Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity().
SynchLocksHeld *all_locks; SynchLocksHeld *all_locks;
}; };
// The instances of this class are allocated in NewThreadIdentity() with an
// alignment of PerThreadSynch::kAlignment.
struct ThreadIdentity { struct ThreadIdentity {
// Must be the first member. The Mutex implementation requires that // Must be the first member. The Mutex implementation requires that
// the PerThreadSynch object associated with each thread is // the PerThreadSynch object associated with each thread is
@ -144,7 +143,7 @@ struct ThreadIdentity {
// Private: Reserved for absl::synchronization_internal::Waiter. // Private: Reserved for absl::synchronization_internal::Waiter.
struct WaiterState { struct WaiterState {
char data[128]; alignas(void*) char data[128];
} waiter_state; } waiter_state;
// Used by PerThreadSem::{Get,Set}ThreadBlockedCounter(). // Used by PerThreadSem::{Get,Set}ThreadBlockedCounter().
@ -212,7 +211,9 @@ void ClearCurrentThreadIdentity();
#define ABSL_THREAD_IDENTITY_MODE ABSL_FORCE_THREAD_IDENTITY_MODE #define ABSL_THREAD_IDENTITY_MODE ABSL_FORCE_THREAD_IDENTITY_MODE
#elif defined(_WIN32) && !defined(__MINGW32__) #elif defined(_WIN32) && !defined(__MINGW32__)
#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11 #define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \ #elif defined(__APPLE__) && defined(ABSL_HAVE_THREAD_LOCAL)
#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \
(__GOOGLE_GRTE_VERSION__ >= 20140228L) (__GOOGLE_GRTE_VERSION__ >= 20140228L)
// Support for async-safe TLS was specifically added in GRTEv4. It's not // Support for async-safe TLS was specifically added in GRTEv4. It's not
// present in the upstream eglibc. // present in the upstream eglibc.

@ -18,6 +18,7 @@
#include <functional> #include <functional>
#include <new> #include <new>
#include <stdexcept> #include <stdexcept>
#include "absl/base/config.h" #include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h" #include "absl/base/internal/raw_logging.h"
@ -25,83 +26,186 @@ namespace absl {
ABSL_NAMESPACE_BEGIN ABSL_NAMESPACE_BEGIN
namespace base_internal { namespace base_internal {
// NOTE: The various STL exception throwing functions are placed within the
// #ifdef blocks so the symbols aren't exposed on platforms that don't support
// them, such as the Android NDK. For example, ANGLE fails to link when building
// within AOSP without them, since the STL functions don't exist.
namespace { namespace {
#ifdef ABSL_HAVE_EXCEPTIONS
template <typename T> template <typename T>
[[noreturn]] void Throw(const T& error) { [[noreturn]] void Throw(const T& error) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw error; throw error;
#else
ABSL_RAW_LOG(FATAL, "%s", error.what());
std::abort();
#endif
} }
#endif
} // namespace } // namespace
void ThrowStdLogicError(const std::string& what_arg) { void ThrowStdLogicError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::logic_error(what_arg)); Throw(std::logic_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
} }
void ThrowStdLogicError(const char* what_arg) { void ThrowStdLogicError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::logic_error(what_arg)); Throw(std::logic_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
} }
void ThrowStdInvalidArgument(const std::string& what_arg) { void ThrowStdInvalidArgument(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::invalid_argument(what_arg)); Throw(std::invalid_argument(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
} }
void ThrowStdInvalidArgument(const char* what_arg) { void ThrowStdInvalidArgument(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::invalid_argument(what_arg)); Throw(std::invalid_argument(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
} }
void ThrowStdDomainError(const std::string& what_arg) { void ThrowStdDomainError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::domain_error(what_arg)); Throw(std::domain_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
} }
void ThrowStdDomainError(const char* what_arg) { void ThrowStdDomainError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::domain_error(what_arg)); Throw(std::domain_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
} }
void ThrowStdLengthError(const std::string& what_arg) { void ThrowStdLengthError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::length_error(what_arg)); Throw(std::length_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
} }
void ThrowStdLengthError(const char* what_arg) { void ThrowStdLengthError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::length_error(what_arg)); Throw(std::length_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
} }
void ThrowStdOutOfRange(const std::string& what_arg) { void ThrowStdOutOfRange(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::out_of_range(what_arg)); Throw(std::out_of_range(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
} }
void ThrowStdOutOfRange(const char* what_arg) { void ThrowStdOutOfRange(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::out_of_range(what_arg)); Throw(std::out_of_range(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
} }
void ThrowStdRuntimeError(const std::string& what_arg) { void ThrowStdRuntimeError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::runtime_error(what_arg)); Throw(std::runtime_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
} }
void ThrowStdRuntimeError(const char* what_arg) { void ThrowStdRuntimeError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::runtime_error(what_arg)); Throw(std::runtime_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
} }
void ThrowStdRangeError(const std::string& what_arg) { void ThrowStdRangeError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::range_error(what_arg)); Throw(std::range_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
} }
void ThrowStdRangeError(const char* what_arg) { void ThrowStdRangeError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::range_error(what_arg)); Throw(std::range_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
} }
void ThrowStdOverflowError(const std::string& what_arg) { void ThrowStdOverflowError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::overflow_error(what_arg)); Throw(std::overflow_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
} }
void ThrowStdOverflowError(const char* what_arg) { void ThrowStdOverflowError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::overflow_error(what_arg)); Throw(std::overflow_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
} }
void ThrowStdUnderflowError(const std::string& what_arg) { void ThrowStdUnderflowError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::underflow_error(what_arg)); Throw(std::underflow_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
} }
void ThrowStdUnderflowError(const char* what_arg) { void ThrowStdUnderflowError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::underflow_error(what_arg)); Throw(std::underflow_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
} }
void ThrowStdBadFunctionCall() { Throw(std::bad_function_call()); } void ThrowStdBadFunctionCall() {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::bad_function_call());
#else
std::abort();
#endif
}
void ThrowStdBadAlloc() { Throw(std::bad_alloc()); } void ThrowStdBadAlloc() {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::bad_alloc());
#else
std::abort();
#endif
}
} // namespace base_internal } // namespace base_internal
ABSL_NAMESPACE_END ABSL_NAMESPACE_END

@ -31,80 +31,6 @@
// The unaligned API is C++ only. The declarations use C++ features // The unaligned API is C++ only. The declarations use C++ features
// (namespaces, inline) which are absent or incompatible in C. // (namespaces, inline) which are absent or incompatible in C.
#if defined(__cplusplus) #if defined(__cplusplus)
#if defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
defined(ABSL_HAVE_THREAD_SANITIZER) || defined(ABSL_HAVE_MEMORY_SANITIZER)
// Consider we have an unaligned load/store of 4 bytes from address 0x...05.
// AddressSanitizer will treat it as a 3-byte access to the range 05:07 and
// will miss a bug if 08 is the first unaddressable byte.
// ThreadSanitizer will also treat this as a 3-byte access to 05:07 and will
// miss a race between this access and some other accesses to 08.
// MemorySanitizer will correctly propagate the shadow on unaligned stores
// and correctly report bugs on unaligned loads, but it may not properly
// update and report the origin of the uninitialized memory.
// For all three tools, replacing an unaligned access with a tool-specific
// callback solves the problem.
// Make sure uint16_t/uint32_t/uint64_t are defined.
#include <stdint.h>
extern "C" {
uint16_t __sanitizer_unaligned_load16(const void *p);
uint32_t __sanitizer_unaligned_load32(const void *p);
uint64_t __sanitizer_unaligned_load64(const void *p);
void __sanitizer_unaligned_store16(void *p, uint16_t v);
void __sanitizer_unaligned_store32(void *p, uint32_t v);
void __sanitizer_unaligned_store64(void *p, uint64_t v);
} // extern "C"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
inline uint16_t UnalignedLoad16(const void *p) {
return __sanitizer_unaligned_load16(p);
}
inline uint32_t UnalignedLoad32(const void *p) {
return __sanitizer_unaligned_load32(p);
}
inline uint64_t UnalignedLoad64(const void *p) {
return __sanitizer_unaligned_load64(p);
}
inline void UnalignedStore16(void *p, uint16_t v) {
__sanitizer_unaligned_store16(p, v);
}
inline void UnalignedStore32(void *p, uint32_t v) {
__sanitizer_unaligned_store32(p, v);
}
inline void UnalignedStore64(void *p, uint64_t v) {
__sanitizer_unaligned_store64(p, v);
}
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl
#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \
(absl::base_internal::UnalignedLoad16(_p))
#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \
(absl::base_internal::UnalignedLoad32(_p))
#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \
(absl::base_internal::UnalignedLoad64(_p))
#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \
(absl::base_internal::UnalignedStore16(_p, _val))
#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \
(absl::base_internal::UnalignedStore32(_p, _val))
#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \
(absl::base_internal::UnalignedStore64(_p, _val))
#else
namespace absl { namespace absl {
ABSL_NAMESPACE_BEGIN ABSL_NAMESPACE_BEGIN
namespace base_internal { namespace base_internal {
@ -151,8 +77,6 @@ ABSL_NAMESPACE_END
#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ #define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \
(absl::base_internal::UnalignedStore64(_p, _val)) (absl::base_internal::UnalignedStore64(_p, _val))
#endif
#endif // defined(__cplusplus), end of unaligned API #endif // defined(__cplusplus), end of unaligned API
#endif // ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_ #endif // ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_

@ -123,9 +123,7 @@ double UnscaledCycleClock::Frequency() {
#pragma intrinsic(__rdtsc) #pragma intrinsic(__rdtsc)
int64_t UnscaledCycleClock::Now() { int64_t UnscaledCycleClock::Now() { return __rdtsc(); }
return __rdtsc();
}
double UnscaledCycleClock::Frequency() { double UnscaledCycleClock::Frequency() {
return base_internal::NominalCPUFrequency(); return base_internal::NominalCPUFrequency();

@ -12,8 +12,8 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#ifndef ABSL_BASE_INTERNAL_LOG_SEVERITY_H_ #ifndef ABSL_BASE_LOG_SEVERITY_H_
#define ABSL_BASE_INTERNAL_LOG_SEVERITY_H_ #define ABSL_BASE_LOG_SEVERITY_H_
#include <array> #include <array>
#include <ostream> #include <ostream>
@ -36,7 +36,7 @@ ABSL_NAMESPACE_BEGIN
// such values to a defined severity level, however in some cases values other // such values to a defined severity level, however in some cases values other
// than the defined levels are useful for comparison. // than the defined levels are useful for comparison.
// //
// Exmaple: // Example:
// //
// // Effectively disables all logging: // // Effectively disables all logging:
// SetMinLogLevel(static_cast<absl::LogSeverity>(100)); // SetMinLogLevel(static_cast<absl::LogSeverity>(100));
@ -118,4 +118,4 @@ std::ostream& operator<<(std::ostream& os, absl::LogSeverity s);
ABSL_NAMESPACE_END ABSL_NAMESPACE_END
} // namespace absl } // namespace absl
#endif // ABSL_BASE_INTERNAL_LOG_SEVERITY_H_ #endif // ABSL_BASE_LOG_SEVERITY_H_

@ -144,4 +144,15 @@ ABSL_NAMESPACE_END
#define ABSL_INTERNAL_RETHROW do {} while (false) #define ABSL_INTERNAL_RETHROW do {} while (false)
#endif // ABSL_HAVE_EXCEPTIONS #endif // ABSL_HAVE_EXCEPTIONS
// `ABSL_INTERNAL_UNREACHABLE` is an unreachable statement. A program which
// reaches one has undefined behavior, and the compiler may optimize
// accordingly.
#if defined(__GNUC__) || ABSL_HAVE_BUILTIN(__builtin_unreachable)
#define ABSL_INTERNAL_UNREACHABLE __builtin_unreachable()
#elif defined(_MSC_VER)
#define ABSL_INTERNAL_UNREACHABLE __assume(0)
#else
#define ABSL_INTERNAL_UNREACHABLE
#endif
#endif // ABSL_BASE_MACROS_H_ #endif // ABSL_BASE_MACROS_H_

@ -22,13 +22,15 @@
#ifndef ABSL_BASE_OPTIMIZATION_H_ #ifndef ABSL_BASE_OPTIMIZATION_H_
#define ABSL_BASE_OPTIMIZATION_H_ #define ABSL_BASE_OPTIMIZATION_H_
#include <assert.h>
#include "absl/base/config.h" #include "absl/base/config.h"
// ABSL_BLOCK_TAIL_CALL_OPTIMIZATION // ABSL_BLOCK_TAIL_CALL_OPTIMIZATION
// //
// Instructs the compiler to avoid optimizing tail-call recursion. Use of this // Instructs the compiler to avoid optimizing tail-call recursion. This macro is
// macro is useful when you wish to preserve the existing function order within // useful when you wish to preserve the existing function order within a stack
// a stack trace for logging, debugging, or profiling purposes. // trace for logging, debugging, or profiling purposes.
// //
// Example: // Example:
// //
@ -104,9 +106,10 @@
// Cacheline aligning objects properly allows constructive memory sharing and // Cacheline aligning objects properly allows constructive memory sharing and
// prevents destructive (or "false") memory sharing. // prevents destructive (or "false") memory sharing.
// //
// NOTE: this macro should be replaced with usage of `alignas()` using // NOTE: callers should replace uses of this macro with `alignas()` using
// `std::hardware_constructive_interference_size` and/or // `std::hardware_constructive_interference_size` and/or
// `std::hardware_destructive_interference_size` when available within C++17. // `std::hardware_destructive_interference_size` when C++17 becomes available to
// them.
// //
// See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0154r1.html // See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0154r1.html
// for more information. // for more information.
@ -179,7 +182,7 @@
#endif #endif
// ABSL_INTERNAL_ASSUME(cond) // ABSL_INTERNAL_ASSUME(cond)
// Informs the compiler than a condition is always true and that it can assume // Informs the compiler that a condition is always true and that it can assume
// it to be true for optimization purposes. The call has undefined behavior if // it to be true for optimization purposes. The call has undefined behavior if
// the condition is false. // the condition is false.
// In !NDEBUG mode, the condition is checked with an assert(). // In !NDEBUG mode, the condition is checked with an assert().
@ -216,7 +219,7 @@
// This macro forces small unique name on a static file level symbols like // This macro forces small unique name on a static file level symbols like
// static local variables or static functions. This is intended to be used in // static local variables or static functions. This is intended to be used in
// macro definitions to optimize the cost of generated code. Do NOT use it on // macro definitions to optimize the cost of generated code. Do NOT use it on
// symbols exported from translation unit since it may casue a link time // symbols exported from translation unit since it may cause a link time
// conflict. // conflict.
// //
// Example: // Example:

@ -206,7 +206,7 @@
// allowed. // allowed.
#define ABSL_OPTION_USE_INLINE_NAMESPACE 1 #define ABSL_OPTION_USE_INLINE_NAMESPACE 1
#define ABSL_OPTION_INLINE_NAMESPACE_NAME lts_2020_09_23 #define ABSL_OPTION_INLINE_NAMESPACE_NAME lts_20210324
// ABSL_OPTION_HARDENED // ABSL_OPTION_HARDENED
// //

@ -14,7 +14,6 @@
// //
// This files is a forwarding header for other headers containing various // This files is a forwarding header for other headers containing various
// portability macros and functions. // portability macros and functions.
// This file is used for both C and C++!
#ifndef ABSL_BASE_PORT_H_ #ifndef ABSL_BASE_PORT_H_
#define ABSL_BASE_PORT_H_ #define ABSL_BASE_PORT_H_

@ -92,6 +92,7 @@ static void TestFunction(int thread_salt, SpinLock* spinlock) {
static void ThreadedTest(SpinLock* spinlock) { static void ThreadedTest(SpinLock* spinlock) {
std::vector<std::thread> threads; std::vector<std::thread> threads;
threads.reserve(kNumThreads);
for (int i = 0; i < kNumThreads; ++i) { for (int i = 0; i < kNumThreads; ++i) {
threads.push_back(std::thread(TestFunction, i, spinlock)); threads.push_back(std::thread(TestFunction, i, spinlock));
} }

@ -317,7 +317,7 @@ namespace base_internal {
// Takes a reference to a guarded data member, and returns an unguarded // Takes a reference to a guarded data member, and returns an unguarded
// reference. // reference.
// Do not used this function directly, use ABSL_TS_UNCHECKED_READ instead. // Do not use this function directly, use ABSL_TS_UNCHECKED_READ instead.
template <typename T> template <typename T>
inline const T& ts_unchecked_read(const T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS { inline const T& ts_unchecked_read(const T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS {
return v; return v;

@ -1,38 +0,0 @@
#
# Copyright 2018 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates config_setting that allows selecting based on 'compiler' value."""
def create_llvm_config(name, visibility):
# The "do_not_use_tools_cpp_compiler_present" attribute exists to
# distinguish between older versions of Bazel that do not support
# "@bazel_tools//tools/cpp:compiler" flag_value, and newer ones that do.
# In the future, the only way to select on the compiler will be through
# flag_values{"@bazel_tools//tools/cpp:compiler"} and the else branch can
# be removed.
if hasattr(cc_common, "do_not_use_tools_cpp_compiler_present"):
native.config_setting(
name = name,
flag_values = {
"@bazel_tools//tools/cpp:compiler": "llvm",
},
visibility = visibility,
)
else:
native.config_setting(
name = name,
values = {"compiler": "llvm"},
visibility = visibility,
)

@ -599,12 +599,12 @@ cc_library(
":hashtablez_sampler", ":hashtablez_sampler",
":have_sse", ":have_sse",
":layout", ":layout",
"//absl/base:bits",
"//absl/base:config", "//absl/base:config",
"//absl/base:core_headers", "//absl/base:core_headers",
"//absl/base:endian", "//absl/base:endian",
"//absl/memory", "//absl/memory",
"//absl/meta:type_traits", "//absl/meta:type_traits",
"//absl/numeric:bits",
"//absl/utility", "//absl/utility",
], ],
) )
@ -630,6 +630,45 @@ cc_test(
], ],
) )
cc_binary(
name = "raw_hash_set_benchmark",
testonly = 1,
srcs = ["internal/raw_hash_set_benchmark.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
tags = ["benchmark"],
visibility = ["//visibility:private"],
deps = [
":hash_function_defaults",
":raw_hash_set",
"//absl/base:raw_logging_internal",
"//absl/strings:str_format",
"@com_github_google_benchmark//:benchmark_main",
],
)
cc_binary(
name = "raw_hash_set_probe_benchmark",
testonly = 1,
srcs = ["internal/raw_hash_set_probe_benchmark.cc"],
copts = ABSL_TEST_COPTS,
linkopts = select({
"//conditions:default": [],
}) + ABSL_DEFAULT_LINKOPTS,
tags = ["benchmark"],
visibility = ["//visibility:private"],
deps = [
":flat_hash_map",
":hash_function_defaults",
":hashtable_debug",
":raw_hash_set",
"//absl/random",
"//absl/random:distributions",
"//absl/strings",
"//absl/strings:str_format",
],
)
cc_test( cc_test(
name = "raw_hash_set_allocator_test", name = "raw_hash_set_allocator_test",
size = "small", size = "small",
@ -677,6 +716,22 @@ cc_test(
], ],
) )
cc_binary(
name = "layout_benchmark",
testonly = 1,
srcs = ["internal/layout_benchmark.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
tags = ["benchmark"],
visibility = ["//visibility:private"],
deps = [
":layout",
"//absl/base:core_headers",
"//absl/base:raw_logging_internal",
"@com_github_google_benchmark//:benchmark_main",
],
)
cc_library( cc_library(
name = "tracked", name = "tracked",
testonly = 1, testonly = 1,

@ -14,15 +14,6 @@
# limitations under the License. # limitations under the License.
# #
# This is deprecated and will be removed in the future. It also doesn't do
# anything anyways. Prefer to use the library associated with the API you are
# using.
absl_cc_library(
NAME
container
PUBLIC
)
absl_cc_library( absl_cc_library(
NAME NAME
btree btree

@ -26,6 +26,7 @@
#include <unordered_set> #include <unordered_set>
#include <vector> #include <vector>
#include "benchmark/benchmark.h"
#include "absl/base/internal/raw_logging.h" #include "absl/base/internal/raw_logging.h"
#include "absl/container/btree_map.h" #include "absl/container/btree_map.h"
#include "absl/container/btree_set.h" #include "absl/container/btree_set.h"
@ -39,7 +40,6 @@
#include "absl/strings/cord.h" #include "absl/strings/cord.h"
#include "absl/strings/str_format.h" #include "absl/strings/str_format.h"
#include "absl/time/time.h" #include "absl/time/time.h"
#include "benchmark/benchmark.h"
namespace absl { namespace absl {
ABSL_NAMESPACE_BEGIN ABSL_NAMESPACE_BEGIN
@ -101,39 +101,6 @@ void BM_InsertSorted(benchmark::State& state) {
BM_InsertImpl<T>(state, true); BM_InsertImpl<T>(state, true);
} }
// container::insert sometimes returns a pair<iterator, bool> and sometimes
// returns an iterator (for multi- containers).
template <typename Iter>
Iter GetIterFromInsert(const std::pair<Iter, bool>& pair) {
return pair.first;
}
template <typename Iter>
Iter GetIterFromInsert(const Iter iter) {
return iter;
}
// Benchmark insertion of values into a container at the end.
template <typename T>
void BM_InsertEnd(benchmark::State& state) {
using V = typename remove_pair_const<typename T::value_type>::type;
typename KeyOfValue<typename T::key_type, V>::type key_of_value;
T container;
const int kSize = 10000;
for (int i = 0; i < kSize; ++i) {
container.insert(Generator<V>(kSize)(i));
}
V v = Generator<V>(kSize)(kSize - 1);
typename T::key_type k = key_of_value(v);
auto it = container.find(k);
while (state.KeepRunning()) {
// Repeatedly removing then adding v.
container.erase(it);
it = GetIterFromInsert(container.insert(v));
}
}
// Benchmark inserting the first few elements in a container. In b-tree, this is // Benchmark inserting the first few elements in a container. In b-tree, this is
// when the root node grows. // when the root node grows.
template <typename T> template <typename T>
@ -513,7 +480,6 @@ BTREE_TYPES(Time);
#define MY_BENCHMARK3(type) \ #define MY_BENCHMARK3(type) \
MY_BENCHMARK4(type, Insert); \ MY_BENCHMARK4(type, Insert); \
MY_BENCHMARK4(type, InsertSorted); \ MY_BENCHMARK4(type, InsertSorted); \
MY_BENCHMARK4(type, InsertEnd); \
MY_BENCHMARK4(type, InsertSmall); \ MY_BENCHMARK4(type, InsertSmall); \
MY_BENCHMARK4(type, Lookup); \ MY_BENCHMARK4(type, Lookup); \
MY_BENCHMARK4(type, FullLookup); \ MY_BENCHMARK4(type, FullLookup); \

@ -384,9 +384,8 @@ class btree_map
// btree_map::equal_range() // btree_map::equal_range()
// //
// Returns a closed range [first, last], defined by a `std::pair` of two // Returns a half-open range [first, last), defined by a `std::pair` of two
// iterators, containing all elements with the passed key in the // iterators, containing all elements with the passed key in the `btree_map`.
// `btree_map`.
using Base::equal_range; using Base::equal_range;
// btree_map::find() // btree_map::find()
@ -709,7 +708,7 @@ class btree_multimap
// btree_multimap::equal_range() // btree_multimap::equal_range()
// //
// Returns a closed range [first, last], defined by a `std::pair` of two // Returns a half-open range [first, last), defined by a `std::pair` of two
// iterators, containing all elements with the passed key in the // iterators, containing all elements with the passed key in the
// `btree_multimap`. // `btree_multimap`.
using Base::equal_range; using Base::equal_range;

@ -55,6 +55,7 @@ using ::testing::ElementsAreArray;
using ::testing::IsEmpty; using ::testing::IsEmpty;
using ::testing::IsNull; using ::testing::IsNull;
using ::testing::Pair; using ::testing::Pair;
using ::testing::SizeIs;
template <typename T, typename U> template <typename T, typename U>
void CheckPairEquals(const T &x, const U &y) { void CheckPairEquals(const T &x, const U &y) {
@ -1182,6 +1183,103 @@ TEST(Btree, RangeCtorSanity) {
EXPECT_EQ(1, tmap.size()); EXPECT_EQ(1, tmap.size());
} }
} // namespace
class BtreeNodePeer {
public:
// Yields the size of a leaf node with a specific number of values.
template <typename ValueType>
constexpr static size_t GetTargetNodeSize(size_t target_values_per_node) {
return btree_node<
set_params<ValueType, std::less<ValueType>, std::allocator<ValueType>,
/*TargetNodeSize=*/256, // This parameter isn't used here.
/*Multi=*/false>>::SizeWithNSlots(target_values_per_node);
}
// Yields the number of slots in a (non-root) leaf node for this btree.
template <typename Btree>
constexpr static size_t GetNumSlotsPerNode() {
return btree_node<typename Btree::params_type>::kNodeSlots;
}
template <typename Btree>
constexpr static size_t GetMaxFieldType() {
return std::numeric_limits<
typename btree_node<typename Btree::params_type>::field_type>::max();
}
template <typename Btree>
constexpr static bool UsesLinearNodeSearch() {
return btree_node<typename Btree::params_type>::use_linear_search::value;
}
};
namespace {
class BtreeMapTest : public ::testing::Test {
public:
struct Key {};
struct Cmp {
template <typename T>
bool operator()(T, T) const {
return false;
}
};
struct KeyLin {
using absl_btree_prefer_linear_node_search = std::true_type;
};
struct CmpLin : Cmp {
using absl_btree_prefer_linear_node_search = std::true_type;
};
struct KeyBin {
using absl_btree_prefer_linear_node_search = std::false_type;
};
struct CmpBin : Cmp {
using absl_btree_prefer_linear_node_search = std::false_type;
};
template <typename K, typename C>
static bool IsLinear() {
return BtreeNodePeer::UsesLinearNodeSearch<absl::btree_map<K, int, C>>();
}
};
TEST_F(BtreeMapTest, TestLinearSearchPreferredForKeyLinearViaAlias) {
// Test requesting linear search by directly exporting an alias.
EXPECT_FALSE((IsLinear<Key, Cmp>()));
EXPECT_TRUE((IsLinear<KeyLin, Cmp>()));
EXPECT_TRUE((IsLinear<Key, CmpLin>()));
EXPECT_TRUE((IsLinear<KeyLin, CmpLin>()));
}
TEST_F(BtreeMapTest, LinearChoiceTree) {
// Cmp has precedence, and is forcing binary
EXPECT_FALSE((IsLinear<Key, CmpBin>()));
EXPECT_FALSE((IsLinear<KeyLin, CmpBin>()));
EXPECT_FALSE((IsLinear<KeyBin, CmpBin>()));
EXPECT_FALSE((IsLinear<int, CmpBin>()));
EXPECT_FALSE((IsLinear<std::string, CmpBin>()));
// Cmp has precedence, and is forcing linear
EXPECT_TRUE((IsLinear<Key, CmpLin>()));
EXPECT_TRUE((IsLinear<KeyLin, CmpLin>()));
EXPECT_TRUE((IsLinear<KeyBin, CmpLin>()));
EXPECT_TRUE((IsLinear<int, CmpLin>()));
EXPECT_TRUE((IsLinear<std::string, CmpLin>()));
// Cmp has no preference, Key determines linear vs binary.
EXPECT_FALSE((IsLinear<Key, Cmp>()));
EXPECT_TRUE((IsLinear<KeyLin, Cmp>()));
EXPECT_FALSE((IsLinear<KeyBin, Cmp>()));
// arithmetic key w/ std::less or std::greater: linear
EXPECT_TRUE((IsLinear<int, std::less<int>>()));
EXPECT_TRUE((IsLinear<double, std::greater<double>>()));
// arithmetic key w/ custom compare: binary
EXPECT_FALSE((IsLinear<int, Cmp>()));
// non-arithmetic key: binary
EXPECT_FALSE((IsLinear<std::string, std::less<std::string>>()));
}
TEST(Btree, BtreeMapCanHoldMoveOnlyTypes) { TEST(Btree, BtreeMapCanHoldMoveOnlyTypes) {
absl::btree_map<std::string, std::unique_ptr<std::string>> m; absl::btree_map<std::string, std::unique_ptr<std::string>> m;
@ -1327,34 +1425,6 @@ TEST(Btree, RValueInsert) {
EXPECT_EQ(tracker.swaps(), 0); EXPECT_EQ(tracker.swaps(), 0);
} }
} // namespace
class BtreeNodePeer {
public:
// Yields the size of a leaf node with a specific number of values.
template <typename ValueType>
constexpr static size_t GetTargetNodeSize(size_t target_values_per_node) {
return btree_node<
set_params<ValueType, std::less<ValueType>, std::allocator<ValueType>,
/*TargetNodeSize=*/256, // This parameter isn't used here.
/*Multi=*/false>>::SizeWithNValues(target_values_per_node);
}
// Yields the number of values in a (non-root) leaf node for this set.
template <typename Set>
constexpr static size_t GetNumValuesPerNode() {
return btree_node<typename Set::params_type>::kNodeValues;
}
template <typename Set>
constexpr static size_t GetMaxFieldType() {
return std::numeric_limits<
typename btree_node<typename Set::params_type>::field_type>::max();
}
};
namespace {
// A btree set with a specific number of values per node. // A btree set with a specific number of values per node.
template <typename Key, int TargetValuesPerNode, typename Cmp = std::less<Key>> template <typename Key, int TargetValuesPerNode, typename Cmp = std::less<Key>>
class SizedBtreeSet class SizedBtreeSet
@ -1388,7 +1458,7 @@ void ExpectOperationCounts(const int expected_moves,
TEST(Btree, MovesComparisonsCopiesSwapsTracking) { TEST(Btree, MovesComparisonsCopiesSwapsTracking) {
InstanceTracker tracker; InstanceTracker tracker;
// Note: this is minimum number of values per node. // Note: this is minimum number of values per node.
SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/3> set3; SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/4> set4;
// Note: this is the default number of values per node for a set of int32s // Note: this is the default number of values per node for a set of int32s
// (with 64-bit pointers). // (with 64-bit pointers).
SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/61> set61; SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/61> set61;
@ -1399,28 +1469,28 @@ TEST(Btree, MovesComparisonsCopiesSwapsTracking) {
std::vector<int> values = std::vector<int> values =
GenerateValuesWithSeed<int>(10000, 1 << 22, /*seed=*/23); GenerateValuesWithSeed<int>(10000, 1 << 22, /*seed=*/23);
EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set3)>(), 3); EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set4)>(), 4);
EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set61)>(), 61); EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set61)>(), 61);
EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set100)>(), 100); EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set100)>(), 100);
if (sizeof(void *) == 8) { if (sizeof(void *) == 8) {
EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<absl::btree_set<int32_t>>(), EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<absl::btree_set<int32_t>>(),
BtreeNodePeer::GetNumValuesPerNode<decltype(set61)>()); BtreeNodePeer::GetNumSlotsPerNode<decltype(set61)>());
} }
// Test key insertion/deletion in random order. // Test key insertion/deletion in random order.
ExpectOperationCounts(45281, 132551, values, &tracker, &set3); ExpectOperationCounts(56540, 134212, values, &tracker, &set4);
ExpectOperationCounts(386718, 129807, values, &tracker, &set61); ExpectOperationCounts(386718, 129807, values, &tracker, &set61);
ExpectOperationCounts(586761, 130310, values, &tracker, &set100); ExpectOperationCounts(586761, 130310, values, &tracker, &set100);
// Test key insertion/deletion in sorted order. // Test key insertion/deletion in sorted order.
std::sort(values.begin(), values.end()); std::sort(values.begin(), values.end());
ExpectOperationCounts(26638, 92134, values, &tracker, &set3); ExpectOperationCounts(24972, 85563, values, &tracker, &set4);
ExpectOperationCounts(20208, 87757, values, &tracker, &set61); ExpectOperationCounts(20208, 87757, values, &tracker, &set61);
ExpectOperationCounts(20124, 96583, values, &tracker, &set100); ExpectOperationCounts(20124, 96583, values, &tracker, &set100);
// Test key insertion/deletion in reverse sorted order. // Test key insertion/deletion in reverse sorted order.
std::reverse(values.begin(), values.end()); std::reverse(values.begin(), values.end());
ExpectOperationCounts(49951, 119325, values, &tracker, &set3); ExpectOperationCounts(54949, 127531, values, &tracker, &set4);
ExpectOperationCounts(338813, 118266, values, &tracker, &set61); ExpectOperationCounts(338813, 118266, values, &tracker, &set61);
ExpectOperationCounts(534529, 125279, values, &tracker, &set100); ExpectOperationCounts(534529, 125279, values, &tracker, &set100);
} }
@ -1437,9 +1507,9 @@ struct MovableOnlyInstanceThreeWayCompare {
TEST(Btree, MovesComparisonsCopiesSwapsTrackingThreeWayCompare) { TEST(Btree, MovesComparisonsCopiesSwapsTrackingThreeWayCompare) {
InstanceTracker tracker; InstanceTracker tracker;
// Note: this is minimum number of values per node. // Note: this is minimum number of values per node.
SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/3, SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/4,
MovableOnlyInstanceThreeWayCompare> MovableOnlyInstanceThreeWayCompare>
set3; set4;
// Note: this is the default number of values per node for a set of int32s // Note: this is the default number of values per node for a set of int32s
// (with 64-bit pointers). // (with 64-bit pointers).
SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/61, SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/61,
@ -1454,28 +1524,28 @@ TEST(Btree, MovesComparisonsCopiesSwapsTrackingThreeWayCompare) {
std::vector<int> values = std::vector<int> values =
GenerateValuesWithSeed<int>(10000, 1 << 22, /*seed=*/23); GenerateValuesWithSeed<int>(10000, 1 << 22, /*seed=*/23);
EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set3)>(), 3); EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set4)>(), 4);
EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set61)>(), 61); EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set61)>(), 61);
EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set100)>(), 100); EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set100)>(), 100);
if (sizeof(void *) == 8) { if (sizeof(void *) == 8) {
EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<absl::btree_set<int32_t>>(), EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<absl::btree_set<int32_t>>(),
BtreeNodePeer::GetNumValuesPerNode<decltype(set61)>()); BtreeNodePeer::GetNumSlotsPerNode<decltype(set61)>());
} }
// Test key insertion/deletion in random order. // Test key insertion/deletion in random order.
ExpectOperationCounts(45281, 122560, values, &tracker, &set3); ExpectOperationCounts(56540, 124221, values, &tracker, &set4);
ExpectOperationCounts(386718, 119816, values, &tracker, &set61); ExpectOperationCounts(386718, 119816, values, &tracker, &set61);
ExpectOperationCounts(586761, 120319, values, &tracker, &set100); ExpectOperationCounts(586761, 120319, values, &tracker, &set100);
// Test key insertion/deletion in sorted order. // Test key insertion/deletion in sorted order.
std::sort(values.begin(), values.end()); std::sort(values.begin(), values.end());
ExpectOperationCounts(26638, 92134, values, &tracker, &set3); ExpectOperationCounts(24972, 85563, values, &tracker, &set4);
ExpectOperationCounts(20208, 87757, values, &tracker, &set61); ExpectOperationCounts(20208, 87757, values, &tracker, &set61);
ExpectOperationCounts(20124, 96583, values, &tracker, &set100); ExpectOperationCounts(20124, 96583, values, &tracker, &set100);
// Test key insertion/deletion in reverse sorted order. // Test key insertion/deletion in reverse sorted order.
std::reverse(values.begin(), values.end()); std::reverse(values.begin(), values.end());
ExpectOperationCounts(49951, 109326, values, &tracker, &set3); ExpectOperationCounts(54949, 117532, values, &tracker, &set4);
ExpectOperationCounts(338813, 108267, values, &tracker, &set61); ExpectOperationCounts(338813, 108267, values, &tracker, &set61);
ExpectOperationCounts(534529, 115280, values, &tracker, &set100); ExpectOperationCounts(534529, 115280, values, &tracker, &set100);
} }
@ -1968,6 +2038,30 @@ TEST(Btree, ExtractAndInsertNodeHandleMultiMap) {
EXPECT_EQ(res, ++other.begin()); EXPECT_EQ(res, ++other.begin());
} }
TEST(Btree, ExtractMultiMapEquivalentKeys) {
// Note: using string keys means a three-way comparator.
absl::btree_multimap<std::string, int> map;
for (int i = 0; i < 100; ++i) {
for (int j = 0; j < 100; ++j) {
map.insert({absl::StrCat(i), j});
}
}
for (int i = 0; i < 100; ++i) {
const std::string key = absl::StrCat(i);
auto node_handle = map.extract(key);
EXPECT_EQ(node_handle.key(), key);
EXPECT_EQ(node_handle.mapped(), 0) << i;
}
for (int i = 0; i < 100; ++i) {
const std::string key = absl::StrCat(i);
auto node_handle = map.extract(key);
EXPECT_EQ(node_handle.key(), key);
EXPECT_EQ(node_handle.mapped(), 1) << i;
}
}
// For multisets, insert with hint also affects correctness because we need to // For multisets, insert with hint also affects correctness because we need to
// insert immediately before the hint if possible. // insert immediately before the hint if possible.
struct InsertMultiHintData { struct InsertMultiHintData {
@ -2109,6 +2203,31 @@ TEST(Btree, MergeIntoMultiMapsWithDifferentComparators) {
Pair(4, 1), Pair(4, 4), Pair(5, 5))); Pair(4, 1), Pair(4, 4), Pair(5, 5)));
} }
TEST(Btree, MergeIntoSetMovableOnly) {
absl::btree_set<MovableOnlyInstance> src;
src.insert(MovableOnlyInstance(1));
absl::btree_multiset<MovableOnlyInstance> dst1;
dst1.insert(MovableOnlyInstance(2));
absl::btree_set<MovableOnlyInstance> dst2;
// Test merge into multiset.
dst1.merge(src);
EXPECT_TRUE(src.empty());
// ElementsAre/ElementsAreArray don't work with move-only types.
ASSERT_THAT(dst1, SizeIs(2));
EXPECT_EQ(*dst1.begin(), MovableOnlyInstance(1));
EXPECT_EQ(*std::next(dst1.begin()), MovableOnlyInstance(2));
// Test merge into set.
dst2.merge(dst1);
EXPECT_TRUE(dst1.empty());
ASSERT_THAT(dst2, SizeIs(2));
EXPECT_EQ(*dst2.begin(), MovableOnlyInstance(1));
EXPECT_EQ(*std::next(dst2.begin()), MovableOnlyInstance(2));
}
struct KeyCompareToWeakOrdering { struct KeyCompareToWeakOrdering {
template <typename T> template <typename T>
absl::weak_ordering operator()(const T &a, const T &b) const { absl::weak_ordering operator()(const T &a, const T &b) const {
@ -2585,6 +2704,12 @@ struct MultiKey {
int i2; int i2;
}; };
bool operator==(const MultiKey a, const MultiKey b) {
return a.i1 == b.i1 && a.i2 == b.i2;
}
// A heterogeneous comparator that has different equivalence classes for
// different lookup types.
struct MultiKeyComp { struct MultiKeyComp {
using is_transparent = void; using is_transparent = void;
bool operator()(const MultiKey a, const MultiKey b) const { bool operator()(const MultiKey a, const MultiKey b) const {
@ -2595,11 +2720,36 @@ struct MultiKeyComp {
bool operator()(const MultiKey a, const int b) const { return a.i1 < b; } bool operator()(const MultiKey a, const int b) const { return a.i1 < b; }
}; };
// Test that when there's a heterogeneous comparator that behaves differently // A heterogeneous, three-way comparator that has different equivalence classes
// for some heterogeneous operators, we get equal_range() right. // for different lookup types.
TEST(Btree, MultiKeyEqualRange) { struct MultiKeyThreeWayComp {
absl::btree_set<MultiKey, MultiKeyComp> set; using is_transparent = void;
absl::weak_ordering operator()(const MultiKey a, const MultiKey b) const {
if (a.i1 < b.i1) return absl::weak_ordering::less;
if (a.i1 > b.i1) return absl::weak_ordering::greater;
if (a.i2 < b.i2) return absl::weak_ordering::less;
if (a.i2 > b.i2) return absl::weak_ordering::greater;
return absl::weak_ordering::equivalent;
}
absl::weak_ordering operator()(const int a, const MultiKey b) const {
if (a < b.i1) return absl::weak_ordering::less;
if (a > b.i1) return absl::weak_ordering::greater;
return absl::weak_ordering::equivalent;
}
absl::weak_ordering operator()(const MultiKey a, const int b) const {
if (a.i1 < b) return absl::weak_ordering::less;
if (a.i1 > b) return absl::weak_ordering::greater;
return absl::weak_ordering::equivalent;
}
};
template <typename Compare>
class BtreeMultiKeyTest : public ::testing::Test {};
using MultiKeyComps = ::testing::Types<MultiKeyComp, MultiKeyThreeWayComp>;
TYPED_TEST_SUITE(BtreeMultiKeyTest, MultiKeyComps);
TYPED_TEST(BtreeMultiKeyTest, EqualRange) {
absl::btree_set<MultiKey, TypeParam> set;
for (int i = 0; i < 100; ++i) { for (int i = 0; i < 100; ++i) {
for (int j = 0; j < 100; ++j) { for (int j = 0; j < 100; ++j) {
set.insert({i, j}); set.insert({i, j});
@ -2609,11 +2759,140 @@ TEST(Btree, MultiKeyEqualRange) {
for (int i = 0; i < 100; ++i) { for (int i = 0; i < 100; ++i) {
auto equal_range = set.equal_range(i); auto equal_range = set.equal_range(i);
EXPECT_EQ(equal_range.first->i1, i); EXPECT_EQ(equal_range.first->i1, i);
EXPECT_EQ(equal_range.first->i2, 0); EXPECT_EQ(equal_range.first->i2, 0) << i;
EXPECT_EQ(std::distance(equal_range.first, equal_range.second), 100) << i; EXPECT_EQ(std::distance(equal_range.first, equal_range.second), 100) << i;
} }
} }
TYPED_TEST(BtreeMultiKeyTest, Extract) {
absl::btree_set<MultiKey, TypeParam> set;
for (int i = 0; i < 100; ++i) {
for (int j = 0; j < 100; ++j) {
set.insert({i, j});
}
}
for (int i = 0; i < 100; ++i) {
auto node_handle = set.extract(i);
EXPECT_EQ(node_handle.value().i1, i);
EXPECT_EQ(node_handle.value().i2, 0) << i;
}
for (int i = 0; i < 100; ++i) {
auto node_handle = set.extract(i);
EXPECT_EQ(node_handle.value().i1, i);
EXPECT_EQ(node_handle.value().i2, 1) << i;
}
}
TYPED_TEST(BtreeMultiKeyTest, Erase) {
absl::btree_set<MultiKey, TypeParam> set = {
{1, 1}, {2, 1}, {2, 2}, {3, 1}};
EXPECT_EQ(set.erase(2), 2);
EXPECT_THAT(set, ElementsAre(MultiKey{1, 1}, MultiKey{3, 1}));
}
TYPED_TEST(BtreeMultiKeyTest, Count) {
const absl::btree_set<MultiKey, TypeParam> set = {
{1, 1}, {2, 1}, {2, 2}, {3, 1}};
EXPECT_EQ(set.count(2), 2);
}
TEST(Btree, AllocConstructor) {
using Alloc = CountingAllocator<int>;
using Set = absl::btree_set<int, std::less<int>, Alloc>;
int64_t bytes_used = 0;
Alloc alloc(&bytes_used);
Set set(alloc);
set.insert({1, 2, 3});
EXPECT_THAT(set, ElementsAre(1, 2, 3));
EXPECT_GT(bytes_used, set.size() * sizeof(int));
}
TEST(Btree, AllocInitializerListConstructor) {
using Alloc = CountingAllocator<int>;
using Set = absl::btree_set<int, std::less<int>, Alloc>;
int64_t bytes_used = 0;
Alloc alloc(&bytes_used);
Set set({1, 2, 3}, alloc);
EXPECT_THAT(set, ElementsAre(1, 2, 3));
EXPECT_GT(bytes_used, set.size() * sizeof(int));
}
TEST(Btree, AllocRangeConstructor) {
using Alloc = CountingAllocator<int>;
using Set = absl::btree_set<int, std::less<int>, Alloc>;
int64_t bytes_used = 0;
Alloc alloc(&bytes_used);
std::vector<int> v = {1, 2, 3};
Set set(v.begin(), v.end(), alloc);
EXPECT_THAT(set, ElementsAre(1, 2, 3));
EXPECT_GT(bytes_used, set.size() * sizeof(int));
}
TEST(Btree, AllocCopyConstructor) {
using Alloc = CountingAllocator<int>;
using Set = absl::btree_set<int, std::less<int>, Alloc>;
int64_t bytes_used1 = 0;
Alloc alloc1(&bytes_used1);
Set set1(alloc1);
set1.insert({1, 2, 3});
int64_t bytes_used2 = 0;
Alloc alloc2(&bytes_used2);
Set set2(set1, alloc2);
EXPECT_THAT(set1, ElementsAre(1, 2, 3));
EXPECT_THAT(set2, ElementsAre(1, 2, 3));
EXPECT_GT(bytes_used1, set1.size() * sizeof(int));
EXPECT_EQ(bytes_used1, bytes_used2);
}
TEST(Btree, AllocMoveConstructor_SameAlloc) {
using Alloc = CountingAllocator<int>;
using Set = absl::btree_set<int, std::less<int>, Alloc>;
int64_t bytes_used = 0;
Alloc alloc(&bytes_used);
Set set1(alloc);
set1.insert({1, 2, 3});
const int64_t original_bytes_used = bytes_used;
EXPECT_GT(original_bytes_used, set1.size() * sizeof(int));
Set set2(std::move(set1), alloc);
EXPECT_THAT(set2, ElementsAre(1, 2, 3));
EXPECT_EQ(bytes_used, original_bytes_used);
}
TEST(Btree, AllocMoveConstructor_DifferentAlloc) {
using Alloc = CountingAllocator<int>;
using Set = absl::btree_set<int, std::less<int>, Alloc>;
int64_t bytes_used1 = 0;
Alloc alloc1(&bytes_used1);
Set set1(alloc1);
set1.insert({1, 2, 3});
const int64_t original_bytes_used = bytes_used1;
EXPECT_GT(original_bytes_used, set1.size() * sizeof(int));
int64_t bytes_used2 = 0;
Alloc alloc2(&bytes_used2);
Set set2(std::move(set1), alloc2);
EXPECT_THAT(set2, ElementsAre(1, 2, 3));
// We didn't free these bytes allocated by `set1` yet.
EXPECT_EQ(bytes_used1, original_bytes_used);
EXPECT_EQ(bytes_used2, original_bytes_used);
}
} // namespace } // namespace
} // namespace container_internal } // namespace container_internal
ABSL_NAMESPACE_END ABSL_NAMESPACE_END

@ -232,8 +232,8 @@ class FixedArray {
// FixedArray::at // FixedArray::at
// //
// Bounds-checked access. Returns a reference to the ith element of the // Bounds-checked access. Returns a reference to the ith element of the fixed
// fiexed array, or throws std::out_of_range // array, or throws std::out_of_range
reference at(size_type i) { reference at(size_type i) {
if (ABSL_PREDICT_FALSE(i >= size())) { if (ABSL_PREDICT_FALSE(i >= size())) {
base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check"); base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check");

@ -324,7 +324,7 @@ class flat_hash_set
// flat_hash_set::merge() // flat_hash_set::merge()
// //
// Extracts elements from a given `source` flat hash map into this // Extracts elements from a given `source` flat hash set into this
// `flat_hash_set`. If the destination `flat_hash_set` already contains an // `flat_hash_set`. If the destination `flat_hash_set` already contains an
// element with an equivalent key, that element is not extracted. // element with an equivalent key, that element is not extracted.
using Base::merge; using Base::merge;

@ -167,11 +167,13 @@ class InlinedVector {
// Creates an inlined vector by copying the contents of `other` using `alloc`. // Creates an inlined vector by copying the contents of `other` using `alloc`.
InlinedVector(const InlinedVector& other, const allocator_type& alloc) InlinedVector(const InlinedVector& other, const allocator_type& alloc)
: storage_(alloc) { : storage_(alloc) {
if (IsMemcpyOk::value && !other.storage_.GetIsAllocated()) { if (other.empty()) {
// Empty; nothing to do.
} else if (IsMemcpyOk::value && !other.storage_.GetIsAllocated()) {
// Memcpy-able and do not need allocation.
storage_.MemcpyFrom(other.storage_); storage_.MemcpyFrom(other.storage_);
} else { } else {
storage_.Initialize(IteratorValueAdapter<const_pointer>(other.data()), storage_.InitFrom(other.storage_);
other.size());
} }
} }

@ -534,6 +534,28 @@ void BM_ConstructFromMove(benchmark::State& state) {
ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, TrivialType); ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, TrivialType);
ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, NontrivialType); ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, NontrivialType);
// Measure cost of copy-constructor+destructor.
void BM_CopyTrivial(benchmark::State& state) {
const int n = state.range(0);
InlVec<int64_t> src(n);
for (auto s : state) {
InlVec<int64_t> copy(src);
benchmark::DoNotOptimize(copy);
}
}
BENCHMARK(BM_CopyTrivial)->Arg(0)->Arg(1)->Arg(kLargeSize);
// Measure cost of copy-constructor+destructor.
void BM_CopyNonTrivial(benchmark::State& state) {
const int n = state.range(0);
InlVec<InlVec<int64_t>> src(n);
for (auto s : state) {
InlVec<InlVec<int64_t>> copy(src);
benchmark::DoNotOptimize(copy);
}
}
BENCHMARK(BM_CopyNonTrivial)->Arg(0)->Arg(1)->Arg(kLargeSize);
template <typename T, size_t FromSize, size_t ToSize> template <typename T, size_t FromSize, size_t ToSize>
void BM_AssignSizeRef(benchmark::State& state) { void BM_AssignSizeRef(benchmark::State& state) {
auto size = ToSize; auto size = ToSize;

@ -736,22 +736,26 @@ TEST(OverheadTest, Storage) {
// In particular, ensure that std::allocator doesn't cost anything to store. // In particular, ensure that std::allocator doesn't cost anything to store.
// The union should be absorbing some of the allocation bookkeeping overhead // The union should be absorbing some of the allocation bookkeeping overhead
// in the larger vectors, leaving only the size_ field as overhead. // in the larger vectors, leaving only the size_ field as overhead.
EXPECT_EQ(2 * sizeof(int*),
sizeof(absl::InlinedVector<int*, 1>) - 1 * sizeof(int*)); struct T { void* val; };
EXPECT_EQ(1 * sizeof(int*), size_t expected_overhead = sizeof(T);
sizeof(absl::InlinedVector<int*, 2>) - 2 * sizeof(int*));
EXPECT_EQ(1 * sizeof(int*), EXPECT_EQ((2 * expected_overhead),
sizeof(absl::InlinedVector<int*, 3>) - 3 * sizeof(int*)); sizeof(absl::InlinedVector<T, 1>) - sizeof(T[1]));
EXPECT_EQ(1 * sizeof(int*), EXPECT_EQ(expected_overhead,
sizeof(absl::InlinedVector<int*, 4>) - 4 * sizeof(int*)); sizeof(absl::InlinedVector<T, 2>) - sizeof(T[2]));
EXPECT_EQ(1 * sizeof(int*), EXPECT_EQ(expected_overhead,
sizeof(absl::InlinedVector<int*, 5>) - 5 * sizeof(int*)); sizeof(absl::InlinedVector<T, 3>) - sizeof(T[3]));
EXPECT_EQ(1 * sizeof(int*), EXPECT_EQ(expected_overhead,
sizeof(absl::InlinedVector<int*, 6>) - 6 * sizeof(int*)); sizeof(absl::InlinedVector<T, 4>) - sizeof(T[4]));
EXPECT_EQ(1 * sizeof(int*), EXPECT_EQ(expected_overhead,
sizeof(absl::InlinedVector<int*, 7>) - 7 * sizeof(int*)); sizeof(absl::InlinedVector<T, 5>) - sizeof(T[5]));
EXPECT_EQ(1 * sizeof(int*), EXPECT_EQ(expected_overhead,
sizeof(absl::InlinedVector<int*, 8>) - 8 * sizeof(int*)); sizeof(absl::InlinedVector<T, 6>) - sizeof(T[6]));
EXPECT_EQ(expected_overhead,
sizeof(absl::InlinedVector<T, 7>) - sizeof(T[7]));
EXPECT_EQ(expected_overhead,
sizeof(absl::InlinedVector<T, 8>) - sizeof(T[8]));
} }
TEST(IntVec, Clear) { TEST(IntVec, Clear) {

@ -182,15 +182,44 @@ struct key_compare_to_adapter<std::greater<absl::Cord>> {
using type = StringBtreeDefaultGreater; using type = StringBtreeDefaultGreater;
}; };
// Detects an 'absl_btree_prefer_linear_node_search' member. This is
// a protocol used as an opt-in or opt-out of linear search.
//
// For example, this would be useful for key types that wrap an integer
// and define their own cheap operator<(). For example:
//
// class K {
// public:
// using absl_btree_prefer_linear_node_search = std::true_type;
// ...
// private:
// friend bool operator<(K a, K b) { return a.k_ < b.k_; }
// int k_;
// };
//
// btree_map<K, V> m; // Uses linear search
//
// If T has the preference tag, then it has a preference.
// Btree will use the tag's truth value.
template <typename T, typename = void>
struct has_linear_node_search_preference : std::false_type {};
template <typename T, typename = void>
struct prefers_linear_node_search : std::false_type {};
template <typename T>
struct has_linear_node_search_preference<
T, absl::void_t<typename T::absl_btree_prefer_linear_node_search>>
: std::true_type {};
template <typename T>
struct prefers_linear_node_search<
T, absl::void_t<typename T::absl_btree_prefer_linear_node_search>>
: T::absl_btree_prefer_linear_node_search {};
template <typename Key, typename Compare, typename Alloc, int TargetNodeSize, template <typename Key, typename Compare, typename Alloc, int TargetNodeSize,
bool Multi, typename SlotPolicy> bool Multi, typename SlotPolicy>
struct common_params { struct common_params {
// If Compare is a common comparator for a string-like type, then we adapt it // If Compare is a common comparator for a string-like type, then we adapt it
// to use heterogeneous lookup and to be a key-compare-to comparator. // to use heterogeneous lookup and to be a key-compare-to comparator.
using key_compare = typename key_compare_to_adapter<Compare>::type; using key_compare = typename key_compare_to_adapter<Compare>::type;
// True when key_compare has been adapted to StringBtreeDefault{Less,Greater}.
using is_key_compare_adapted =
absl::negation<std::is_same<key_compare, Compare>>;
// A type which indicates if we have a key-compare-to functor or a plain old // A type which indicates if we have a key-compare-to functor or a plain old
// key-compare functor. // key-compare functor.
using is_key_compare_to = btree_is_key_compare_to<key_compare, Key>; using is_key_compare_to = btree_is_key_compare_to<key_compare, Key>;
@ -200,9 +229,6 @@ struct common_params {
using size_type = std::make_signed<size_t>::type; using size_type = std::make_signed<size_t>::type;
using difference_type = ptrdiff_t; using difference_type = ptrdiff_t;
// True if this is a multiset or multimap.
using is_multi_container = std::integral_constant<bool, Multi>;
using slot_policy = SlotPolicy; using slot_policy = SlotPolicy;
using slot_type = typename slot_policy::slot_type; using slot_type = typename slot_policy::slot_type;
using value_type = typename slot_policy::value_type; using value_type = typename slot_policy::value_type;
@ -212,6 +238,23 @@ struct common_params {
using reference = value_type &; using reference = value_type &;
using const_reference = const value_type &; using const_reference = const value_type &;
// For the given lookup key type, returns whether we can have multiple
// equivalent keys in the btree. If this is a multi-container, then we can.
// Otherwise, we can have multiple equivalent keys only if all of the
// following conditions are met:
// - The comparator is transparent.
// - The lookup key type is not the same as key_type.
// - The comparator is not a StringBtreeDefault{Less,Greater} comparator
// that we know has the same equivalence classes for all lookup types.
template <typename LookupKey>
constexpr static bool can_have_multiple_equivalent_keys() {
return Multi ||
(IsTransparent<key_compare>::value &&
!std::is_same<LookupKey, Key>::value &&
!std::is_same<key_compare, StringBtreeDefaultLess>::value &&
!std::is_same<key_compare, StringBtreeDefaultGreater>::value);
}
enum { enum {
kTargetNodeSize = TargetNodeSize, kTargetNodeSize = TargetNodeSize,
@ -391,6 +434,10 @@ struct SearchResult {
// useful information. // useful information.
template <typename V> template <typename V>
struct SearchResult<V, false> { struct SearchResult<V, false> {
SearchResult() {}
explicit SearchResult(V value) : value(value) {}
SearchResult(V value, MatchKind /*match*/) : value(value) {}
V value; V value;
static constexpr bool HasMatch() { return false; } static constexpr bool HasMatch() { return false; }
@ -403,7 +450,6 @@ struct SearchResult<V, false> {
template <typename Params> template <typename Params>
class btree_node { class btree_node {
using is_key_compare_to = typename Params::is_key_compare_to; using is_key_compare_to = typename Params::is_key_compare_to;
using is_multi_container = typename Params::is_multi_container;
using field_type = typename Params::node_count_type; using field_type = typename Params::node_count_type;
using allocator_type = typename Params::allocator_type; using allocator_type = typename Params::allocator_type;
using slot_type = typename Params::slot_type; using slot_type = typename Params::slot_type;
@ -421,15 +467,22 @@ class btree_node {
using difference_type = typename Params::difference_type; using difference_type = typename Params::difference_type;
// Btree decides whether to use linear node search as follows: // Btree decides whether to use linear node search as follows:
// - If the comparator expresses a preference, use that.
// - If the key expresses a preference, use that.
// - If the key is arithmetic and the comparator is std::less or // - If the key is arithmetic and the comparator is std::less or
// std::greater, choose linear. // std::greater, choose linear.
// - Otherwise, choose binary. // - Otherwise, choose binary.
// TODO(ezb): Might make sense to add condition(s) based on node-size. // TODO(ezb): Might make sense to add condition(s) based on node-size.
using use_linear_search = std::integral_constant< using use_linear_search = std::integral_constant<
bool, bool,
std::is_arithmetic<key_type>::value && has_linear_node_search_preference<key_compare>::value
(std::is_same<std::less<key_type>, key_compare>::value || ? prefers_linear_node_search<key_compare>::value
std::is_same<std::greater<key_type>, key_compare>::value)>; : has_linear_node_search_preference<key_type>::value
? prefers_linear_node_search<key_type>::value
: std::is_arithmetic<key_type>::value &&
(std::is_same<std::less<key_type>, key_compare>::value ||
std::is_same<std::greater<key_type>,
key_compare>::value)>;
// This class is organized by gtl::Layout as if it had the following // This class is organized by gtl::Layout as if it had the following
// structure: // structure:
@ -446,23 +499,23 @@ class btree_node {
// // is the same as the count of values. // // is the same as the count of values.
// field_type finish; // field_type finish;
// // The maximum number of values the node can hold. This is an integer in // // The maximum number of values the node can hold. This is an integer in
// // [1, kNodeValues] for root leaf nodes, kNodeValues for non-root leaf // // [1, kNodeSlots] for root leaf nodes, kNodeSlots for non-root leaf
// // nodes, and kInternalNodeMaxCount (as a sentinel value) for internal // // nodes, and kInternalNodeMaxCount (as a sentinel value) for internal
// // nodes (even though there are still kNodeValues values in the node). // // nodes (even though there are still kNodeSlots values in the node).
// // TODO(ezb): make max_count use only 4 bits and record log2(capacity) // // TODO(ezb): make max_count use only 4 bits and record log2(capacity)
// // to free extra bits for is_root, etc. // // to free extra bits for is_root, etc.
// field_type max_count; // field_type max_count;
// //
// // The array of values. The capacity is `max_count` for leaf nodes and // // The array of values. The capacity is `max_count` for leaf nodes and
// // kNodeValues for internal nodes. Only the values in // // kNodeSlots for internal nodes. Only the values in
// // [start, finish) have been initialized and are valid. // // [start, finish) have been initialized and are valid.
// slot_type values[max_count]; // slot_type values[max_count];
// //
// // The array of child pointers. The keys in children[i] are all less // // The array of child pointers. The keys in children[i] are all less
// // than key(i). The keys in children[i + 1] are all greater than key(i). // // than key(i). The keys in children[i + 1] are all greater than key(i).
// // There are 0 children for leaf nodes and kNodeValues + 1 children for // // There are 0 children for leaf nodes and kNodeSlots + 1 children for
// // internal nodes. // // internal nodes.
// btree_node *children[kNodeValues + 1]; // btree_node *children[kNodeSlots + 1];
// //
// This class is only constructed by EmptyNodeType. Normally, pointers to the // This class is only constructed by EmptyNodeType. Normally, pointers to the
// layout above are allocated, cast to btree_node*, and de-allocated within // layout above are allocated, cast to btree_node*, and de-allocated within
@ -484,57 +537,62 @@ class btree_node {
private: private:
using layout_type = absl::container_internal::Layout<btree_node *, field_type, using layout_type = absl::container_internal::Layout<btree_node *, field_type,
slot_type, btree_node *>; slot_type, btree_node *>;
constexpr static size_type SizeWithNValues(size_type n) { constexpr static size_type SizeWithNSlots(size_type n) {
return layout_type(/*parent*/ 1, return layout_type(/*parent*/ 1,
/*position, start, finish, max_count*/ 4, /*position, start, finish, max_count*/ 4,
/*values*/ n, /*slots*/ n,
/*children*/ 0) /*children*/ 0)
.AllocSize(); .AllocSize();
} }
// A lower bound for the overhead of fields other than values in a leaf node. // A lower bound for the overhead of fields other than values in a leaf node.
constexpr static size_type MinimumOverhead() { constexpr static size_type MinimumOverhead() {
return SizeWithNValues(1) - sizeof(value_type); return SizeWithNSlots(1) - sizeof(value_type);
} }
// Compute how many values we can fit onto a leaf node taking into account // Compute how many values we can fit onto a leaf node taking into account
// padding. // padding.
constexpr static size_type NodeTargetValues(const int begin, const int end) { constexpr static size_type NodeTargetSlots(const int begin, const int end) {
return begin == end ? begin return begin == end ? begin
: SizeWithNValues((begin + end) / 2 + 1) > : SizeWithNSlots((begin + end) / 2 + 1) >
params_type::kTargetNodeSize params_type::kTargetNodeSize
? NodeTargetValues(begin, (begin + end) / 2) ? NodeTargetSlots(begin, (begin + end) / 2)
: NodeTargetValues((begin + end) / 2 + 1, end); : NodeTargetSlots((begin + end) / 2 + 1, end);
} }
enum { enum {
kTargetNodeSize = params_type::kTargetNodeSize, kTargetNodeSize = params_type::kTargetNodeSize,
kNodeTargetValues = NodeTargetValues(0, params_type::kTargetNodeSize), kNodeTargetSlots = NodeTargetSlots(0, params_type::kTargetNodeSize),
// We need a minimum of 3 values per internal node in order to perform // We need a minimum of 3 slots per internal node in order to perform
// splitting (1 value for the two nodes involved in the split and 1 value // splitting (1 value for the two nodes involved in the split and 1 value
// propagated to the parent as the delimiter for the split). // propagated to the parent as the delimiter for the split). For performance
kNodeValues = kNodeTargetValues >= 3 ? kNodeTargetValues : 3, // reasons, we don't allow 3 slots-per-node due to bad worst case occupancy
// of 1/3 (for a node, not a b-tree).
kMinNodeSlots = 4,
kNodeSlots =
kNodeTargetSlots >= kMinNodeSlots ? kNodeTargetSlots : kMinNodeSlots,
// The node is internal (i.e. is not a leaf node) if and only if `max_count` // The node is internal (i.e. is not a leaf node) if and only if `max_count`
// has this value. // has this value.
kInternalNodeMaxCount = 0, kInternalNodeMaxCount = 0,
}; };
// Leaves can have less than kNodeValues values. // Leaves can have less than kNodeSlots values.
constexpr static layout_type LeafLayout(const int max_values = kNodeValues) { constexpr static layout_type LeafLayout(const int slot_count = kNodeSlots) {
return layout_type(/*parent*/ 1, return layout_type(/*parent*/ 1,
/*position, start, finish, max_count*/ 4, /*position, start, finish, max_count*/ 4,
/*values*/ max_values, /*slots*/ slot_count,
/*children*/ 0); /*children*/ 0);
} }
constexpr static layout_type InternalLayout() { constexpr static layout_type InternalLayout() {
return layout_type(/*parent*/ 1, return layout_type(/*parent*/ 1,
/*position, start, finish, max_count*/ 4, /*position, start, finish, max_count*/ 4,
/*values*/ kNodeValues, /*slots*/ kNodeSlots,
/*children*/ kNodeValues + 1); /*children*/ kNodeSlots + 1);
} }
constexpr static size_type LeafSize(const int max_values = kNodeValues) { constexpr static size_type LeafSize(const int slot_count = kNodeSlots) {
return LeafLayout(max_values).AllocSize(); return LeafLayout(slot_count).AllocSize();
} }
constexpr static size_type InternalSize() { constexpr static size_type InternalSize() {
return InternalLayout().AllocSize(); return InternalLayout().AllocSize();
@ -591,10 +649,10 @@ class btree_node {
} }
field_type max_count() const { field_type max_count() const {
// Internal nodes have max_count==kInternalNodeMaxCount. // Internal nodes have max_count==kInternalNodeMaxCount.
// Leaf nodes have max_count in [1, kNodeValues]. // Leaf nodes have max_count in [1, kNodeSlots].
const field_type max_count = GetField<1>()[3]; const field_type max_count = GetField<1>()[3];
return max_count == field_type{kInternalNodeMaxCount} return max_count == field_type{kInternalNodeMaxCount}
? field_type{kNodeValues} ? field_type{kNodeSlots}
: max_count; : max_count;
} }
@ -672,7 +730,7 @@ class btree_node {
} }
++s; ++s;
} }
return {s}; return SearchResult<int, false>{s};
} }
// Returns the position of the first value whose key is not less than k using // Returns the position of the first value whose key is not less than k using
@ -707,7 +765,7 @@ class btree_node {
e = mid; e = mid;
} }
} }
return {s}; return SearchResult<int, false>{s};
} }
// Returns the position of the first value whose key is not less than k using // Returns the position of the first value whose key is not less than k using
@ -716,7 +774,7 @@ class btree_node {
SearchResult<int, true> binary_search_impl( SearchResult<int, true> binary_search_impl(
const K &k, int s, int e, const CompareTo &comp, const K &k, int s, int e, const CompareTo &comp,
std::true_type /* IsCompareTo */) const { std::true_type /* IsCompareTo */) const {
if (is_multi_container::value) { if (params_type::template can_have_multiple_equivalent_keys<K>()) {
MatchKind exact_match = MatchKind::kNe; MatchKind exact_match = MatchKind::kNe;
while (s != e) { while (s != e) {
const int mid = (s + e) >> 1; const int mid = (s + e) >> 1;
@ -727,14 +785,14 @@ class btree_node {
e = mid; e = mid;
if (c == 0) { if (c == 0) {
// Need to return the first value whose key is not less than k, // Need to return the first value whose key is not less than k,
// which requires continuing the binary search if this is a // which requires continuing the binary search if there could be
// multi-container. // multiple equivalent keys.
exact_match = MatchKind::kEq; exact_match = MatchKind::kEq;
} }
} }
} }
return {s, exact_match}; return {s, exact_match};
} else { // Not a multi-container. } else { // Can't have multiple equivalent keys.
while (s != e) { while (s != e) {
const int mid = (s + e) >> 1; const int mid = (s + e) >> 1;
const absl::weak_ordering c = comp(key(mid), k); const absl::weak_ordering c = comp(key(mid), k);
@ -784,12 +842,12 @@ class btree_node {
start_slot(), max_count * sizeof(slot_type)); start_slot(), max_count * sizeof(slot_type));
} }
void init_internal(btree_node *parent) { void init_internal(btree_node *parent) {
init_leaf(parent, kNodeValues); init_leaf(parent, kNodeSlots);
// Set `max_count` to a sentinel value to indicate that this node is // Set `max_count` to a sentinel value to indicate that this node is
// internal. // internal.
set_max_count(kInternalNodeMaxCount); set_max_count(kInternalNodeMaxCount);
absl::container_internal::SanitizerPoisonMemoryRegion( absl::container_internal::SanitizerPoisonMemoryRegion(
&mutable_child(start()), (kNodeValues + 1) * sizeof(btree_node *)); &mutable_child(start()), (kNodeSlots + 1) * sizeof(btree_node *));
} }
static void deallocate(const size_type size, btree_node *node, static void deallocate(const size_type size, btree_node *node,
@ -800,12 +858,6 @@ class btree_node {
// Deletes a node and all of its children. // Deletes a node and all of its children.
static void clear_and_delete(btree_node *node, allocator_type *alloc); static void clear_and_delete(btree_node *node, allocator_type *alloc);
public:
// Exposed only for tests.
static bool testonly_uses_linear_node_search() {
return use_linear_search::value;
}
private: private:
template <typename... Args> template <typename... Args>
void value_init(const field_type i, allocator_type *alloc, Args &&... args) { void value_init(const field_type i, allocator_type *alloc, Args &&... args) {
@ -873,6 +925,7 @@ struct btree_iterator {
using key_type = typename Node::key_type; using key_type = typename Node::key_type;
using size_type = typename Node::size_type; using size_type = typename Node::size_type;
using params_type = typename Node::params_type; using params_type = typename Node::params_type;
using is_map_container = typename params_type::is_map_container;
using node_type = Node; using node_type = Node;
using normal_node = typename std::remove_const<Node>::type; using normal_node = typename std::remove_const<Node>::type;
@ -884,7 +937,7 @@ struct btree_iterator {
using slot_type = typename params_type::slot_type; using slot_type = typename params_type::slot_type;
using iterator = using iterator =
btree_iterator<normal_node, normal_reference, normal_pointer>; btree_iterator<normal_node, normal_reference, normal_pointer>;
using const_iterator = using const_iterator =
btree_iterator<const_node, const_reference, const_pointer>; btree_iterator<const_node, const_reference, const_pointer>;
@ -901,20 +954,19 @@ struct btree_iterator {
btree_iterator(Node *n, int p) : node(n), position(p) {} btree_iterator(Node *n, int p) : node(n), position(p) {}
// NOTE: this SFINAE allows for implicit conversions from iterator to // NOTE: this SFINAE allows for implicit conversions from iterator to
// const_iterator, but it specifically avoids defining copy constructors so // const_iterator, but it specifically avoids hiding the copy constructor so
// that btree_iterator can be trivially copyable. This is for performance and // that the trivial one will be used when possible.
// binary size reasons.
template <typename N, typename R, typename P, template <typename N, typename R, typename P,
absl::enable_if_t< absl::enable_if_t<
std::is_same<btree_iterator<N, R, P>, iterator>::value && std::is_same<btree_iterator<N, R, P>, iterator>::value &&
std::is_same<btree_iterator, const_iterator>::value, std::is_same<btree_iterator, const_iterator>::value,
int> = 0> int> = 0>
btree_iterator(const btree_iterator<N, R, P> &other) // NOLINT btree_iterator(const btree_iterator<N, R, P> other) // NOLINT
: node(other.node), position(other.position) {} : node(other.node), position(other.position) {}
private: private:
// This SFINAE allows explicit conversions from const_iterator to // This SFINAE allows explicit conversions from const_iterator to
// iterator, but also avoids defining a copy constructor. // iterator, but also avoids hiding the copy constructor.
// NOTE: the const_cast is safe because this constructor is only called by // NOTE: the const_cast is safe because this constructor is only called by
// non-const methods and the container owns the nodes. // non-const methods and the container owns the nodes.
template <typename N, typename R, typename P, template <typename N, typename R, typename P,
@ -922,7 +974,7 @@ struct btree_iterator {
std::is_same<btree_iterator<N, R, P>, const_iterator>::value && std::is_same<btree_iterator<N, R, P>, const_iterator>::value &&
std::is_same<btree_iterator, iterator>::value, std::is_same<btree_iterator, iterator>::value,
int> = 0> int> = 0>
explicit btree_iterator(const btree_iterator<N, R, P> &other) explicit btree_iterator(const btree_iterator<N, R, P> other)
: node(const_cast<node_type *>(other.node)), position(other.position) {} : node(const_cast<node_type *>(other.node)), position(other.position) {}
// Increment/decrement the iterator. // Increment/decrement the iterator.
@ -985,6 +1037,8 @@ struct btree_iterator {
} }
private: private:
friend iterator;
friend const_iterator;
template <typename Params> template <typename Params>
friend class btree; friend class btree;
template <typename Tree> template <typename Tree>
@ -995,8 +1049,6 @@ struct btree_iterator {
friend class btree_map_container; friend class btree_map_container;
template <typename Tree> template <typename Tree>
friend class btree_multiset_container; friend class btree_multiset_container;
template <typename N, typename R, typename P>
friend struct btree_iterator;
template <typename TreeType, typename CheckerType> template <typename TreeType, typename CheckerType>
friend class base_checker; friend class base_checker;
@ -1017,8 +1069,6 @@ class btree {
using is_key_compare_to = typename Params::is_key_compare_to; using is_key_compare_to = typename Params::is_key_compare_to;
using init_type = typename Params::init_type; using init_type = typename Params::init_type;
using field_type = typename node_type::field_type; using field_type = typename node_type::field_type;
using is_multi_container = typename Params::is_multi_container;
using is_key_compare_adapted = typename Params::is_key_compare_adapted;
// We use a static empty node for the root/leftmost/rightmost of empty btrees // We use a static empty node for the root/leftmost/rightmost of empty btrees
// in order to avoid branching in begin()/end(). // in order to avoid branching in begin()/end().
@ -1054,8 +1104,8 @@ class btree {
} }
enum : uint32_t { enum : uint32_t {
kNodeValues = node_type::kNodeValues, kNodeSlots = node_type::kNodeSlots,
kMinNodeValues = kNodeValues / 2, kMinNodeValues = kNodeSlots / 2,
}; };
struct node_stats { struct node_stats {
@ -1085,7 +1135,8 @@ class btree {
using const_reference = typename Params::const_reference; using const_reference = typename Params::const_reference;
using pointer = typename Params::pointer; using pointer = typename Params::pointer;
using const_pointer = typename Params::const_pointer; using const_pointer = typename Params::const_pointer;
using iterator = btree_iterator<node_type, reference, pointer>; using iterator =
typename btree_iterator<node_type, reference, pointer>::iterator;
using const_iterator = typename iterator::const_iterator; using const_iterator = typename iterator::const_iterator;
using reverse_iterator = std::reverse_iterator<iterator>; using reverse_iterator = std::reverse_iterator<iterator>;
using const_reverse_iterator = std::reverse_iterator<const_iterator>; using const_reverse_iterator = std::reverse_iterator<const_iterator>;
@ -1098,28 +1149,46 @@ class btree {
private: private:
// For use in copy_or_move_values_in_order. // For use in copy_or_move_values_in_order.
const value_type &maybe_move_from_iterator(const_iterator it) { return *it; } const value_type &maybe_move_from_iterator(const_iterator it) { return *it; }
value_type &&maybe_move_from_iterator(iterator it) { return std::move(*it); } value_type &&maybe_move_from_iterator(iterator it) {
// This is a destructive operation on the other container so it's safe for
// us to const_cast and move from the keys here even if it's a set.
return std::move(const_cast<value_type &>(*it));
}
// Copies or moves (depending on the template parameter) the values in // Copies or moves (depending on the template parameter) the values in
// other into this btree in their order in other. This btree must be empty // other into this btree in their order in other. This btree must be empty
// before this method is called. This method is used in copy construction, // before this method is called. This method is used in copy construction,
// copy assignment, and move assignment. // copy assignment, and move assignment.
template <typename Btree> template <typename Btree>
void copy_or_move_values_in_order(Btree *other); void copy_or_move_values_in_order(Btree &other);
// Validates that various assumptions/requirements are true at compile time. // Validates that various assumptions/requirements are true at compile time.
constexpr static bool static_assert_validation(); constexpr static bool static_assert_validation();
public: public:
btree(const key_compare &comp, const allocator_type &alloc); btree(const key_compare &comp, const allocator_type &alloc)
: root_(comp, alloc, EmptyNode()), rightmost_(EmptyNode()), size_(0) {}
btree(const btree &other); btree(const btree &other) : btree(other, other.allocator()) {}
btree(const btree &other, const allocator_type &alloc)
: btree(other.key_comp(), alloc) {
copy_or_move_values_in_order(other);
}
btree(btree &&other) noexcept btree(btree &&other) noexcept
: root_(std::move(other.root_)), : root_(std::move(other.root_)),
rightmost_(absl::exchange(other.rightmost_, EmptyNode())), rightmost_(absl::exchange(other.rightmost_, EmptyNode())),
size_(absl::exchange(other.size_, 0)) { size_(absl::exchange(other.size_, 0)) {
other.mutable_root() = EmptyNode(); other.mutable_root() = EmptyNode();
} }
btree(btree &&other, const allocator_type &alloc)
: btree(other.key_comp(), alloc) {
if (alloc == other.allocator()) {
swap(other);
} else {
// Move values from `other` one at a time when allocators are different.
copy_or_move_values_in_order(other);
}
}
~btree() { ~btree() {
// Put static_asserts in destructor to avoid triggering them before the type // Put static_asserts in destructor to avoid triggering them before the type
@ -1147,17 +1216,22 @@ class btree {
return const_reverse_iterator(begin()); return const_reverse_iterator(begin());
} }
// Finds the first element whose key is not less than key. // Finds the first element whose key is not less than `key`.
template <typename K> template <typename K>
iterator lower_bound(const K &key) { iterator lower_bound(const K &key) {
return internal_end(internal_lower_bound(key)); return internal_end(internal_lower_bound(key).value);
} }
template <typename K> template <typename K>
const_iterator lower_bound(const K &key) const { const_iterator lower_bound(const K &key) const {
return internal_end(internal_lower_bound(key)); return internal_end(internal_lower_bound(key).value);
} }
// Finds the first element whose key is greater than key. // Finds the first element whose key is not less than `key` and also returns
// whether that element is equal to `key`.
template <typename K>
std::pair<iterator, bool> lower_bound_equal(const K &key) const;
// Finds the first element whose key is greater than `key`.
template <typename K> template <typename K>
iterator upper_bound(const K &key) { iterator upper_bound(const K &key) {
return internal_end(internal_upper_bound(key)); return internal_end(internal_upper_bound(key));
@ -1239,18 +1313,8 @@ class btree {
// to the element after the last erased element. // to the element after the last erased element.
std::pair<size_type, iterator> erase_range(iterator begin, iterator end); std::pair<size_type, iterator> erase_range(iterator begin, iterator end);
// Erases the specified key from the btree. Returns 1 if an element was // Finds an element with key equivalent to `key` or returns `end()` if `key`
// erased and 0 otherwise. // is not present.
template <typename K>
size_type erase_unique(const K &key);
// Erases all of the entries matching the specified key from the
// btree. Returns the number of elements erased.
template <typename K>
size_type erase_multi(const K &key);
// Finds the iterator corresponding to a key or returns end() if the key is
// not present.
template <typename K> template <typename K>
iterator find(const K &key) { iterator find(const K &key) {
return internal_end(internal_find(key)); return internal_end(internal_find(key));
@ -1260,23 +1324,6 @@ class btree {
return internal_end(internal_find(key)); return internal_end(internal_find(key));
} }
// Returns a count of the number of times the key appears in the btree.
template <typename K>
size_type count_unique(const K &key) const {
const iterator begin = internal_find(key);
if (begin.node == nullptr) {
// The key doesn't exist in the tree.
return 0;
}
return 1;
}
// Returns a count of the number of times the key appears in the btree.
template <typename K>
size_type count_multi(const K &key) const {
const auto range = equal_range(key);
return std::distance(range.first, range.second);
}
// Clear the btree, deleting all of the values it contains. // Clear the btree, deleting all of the values it contains.
void clear(); void clear();
@ -1339,12 +1386,14 @@ class btree {
} }
} }
// The average number of bytes used per value stored in the btree. // The average number of bytes used per value stored in the btree assuming
// random insertion order.
static double average_bytes_per_value() { static double average_bytes_per_value() {
// Returns the number of bytes per value on a leaf node that is 75% // The expected number of values per node with random insertion order is the
// full. Experimentally, this matches up nicely with the computed number of // average of the maximum and minimum numbers of values per node.
// bytes per value in trees that had their values inserted in random order. const double expected_values_per_node =
return node_type::LeafSize() / (kNodeValues * 0.75); (kNodeSlots + kMinNodeValues) / 2.0;
return node_type::LeafSize() / expected_values_per_node;
} }
// The fullness of the btree. Computed as the number of elements in the btree // The fullness of the btree. Computed as the number of elements in the btree
@ -1354,7 +1403,7 @@ class btree {
// Returns 0 for empty trees. // Returns 0 for empty trees.
double fullness() const { double fullness() const {
if (empty()) return 0.0; if (empty()) return 0.0;
return static_cast<double>(size()) / (nodes() * kNodeValues); return static_cast<double>(size()) / (nodes() * kNodeSlots);
} }
// The overhead of the btree structure in bytes per node. Computed as the // The overhead of the btree structure in bytes per node. Computed as the
// total number of bytes used by the btree minus the number of bytes used for // total number of bytes used by the btree minus the number of bytes used for
@ -1404,7 +1453,7 @@ class btree {
} }
node_type *new_leaf_node(node_type *parent) { node_type *new_leaf_node(node_type *parent) {
node_type *n = allocate(node_type::LeafSize()); node_type *n = allocate(node_type::LeafSize());
n->init_leaf(parent, kNodeValues); n->init_leaf(parent, kNodeSlots);
return n; return n;
} }
node_type *new_leaf_root_node(const int max_count) { node_type *new_leaf_root_node(const int max_count) {
@ -1453,28 +1502,19 @@ class btree {
static IterType internal_last(IterType iter); static IterType internal_last(IterType iter);
// Returns an iterator pointing to the leaf position at which key would // Returns an iterator pointing to the leaf position at which key would
// reside in the tree. We provide 2 versions of internal_locate. The first // reside in the tree, unless there is an exact match - in which case, the
// version uses a less-than comparator and is incapable of distinguishing when // result may not be on a leaf. When there's a three-way comparator, we can
// there is an exact match. The second version is for the key-compare-to // return whether there was an exact match. This allows the caller to avoid a
// specialization and distinguishes exact matches. The key-compare-to // subsequent comparison to determine if an exact match was made, which is
// specialization allows the caller to avoid a subsequent comparison to // important for keys with expensive comparison, such as strings.
// determine if an exact match was made, which is important for keys with
// expensive comparison, such as strings.
template <typename K> template <typename K>
SearchResult<iterator, is_key_compare_to::value> internal_locate( SearchResult<iterator, is_key_compare_to::value> internal_locate(
const K &key) const; const K &key) const;
template <typename K>
SearchResult<iterator, false> internal_locate_impl(
const K &key, std::false_type /* IsCompareTo */) const;
template <typename K>
SearchResult<iterator, true> internal_locate_impl(
const K &key, std::true_type /* IsCompareTo */) const;
// Internal routine which implements lower_bound(). // Internal routine which implements lower_bound().
template <typename K> template <typename K>
iterator internal_lower_bound(const K &key) const; SearchResult<iterator, is_key_compare_to::value> internal_lower_bound(
const K &key) const;
// Internal routine which implements upper_bound(). // Internal routine which implements upper_bound().
template <typename K> template <typename K>
@ -1503,13 +1543,6 @@ class btree {
return res; return res;
} }
public:
// Exposed only for tests.
static bool testonly_uses_linear_node_search() {
return node_type::testonly_uses_linear_node_search();
}
private:
// We use compressed tuple in order to save space because key_compare and // We use compressed tuple in order to save space because key_compare and
// allocator_type are usually empty. // allocator_type are usually empty.
absl::container_internal::CompressedTuple<key_compare, allocator_type, absl::container_internal::CompressedTuple<key_compare, allocator_type,
@ -1665,7 +1698,7 @@ template <typename P>
void btree_node<P>::split(const int insert_position, btree_node *dest, void btree_node<P>::split(const int insert_position, btree_node *dest,
allocator_type *alloc) { allocator_type *alloc) {
assert(dest->count() == 0); assert(dest->count() == 0);
assert(max_count() == kNodeValues); assert(max_count() == kNodeSlots);
// We bias the split based on the position being inserted. If we're // We bias the split based on the position being inserted. If we're
// inserting at the beginning of the left node then bias the split to put // inserting at the beginning of the left node then bias the split to put
@ -1673,7 +1706,7 @@ void btree_node<P>::split(const int insert_position, btree_node *dest,
// right node then bias the split to put more values on the left node. // right node then bias the split to put more values on the left node.
if (insert_position == start()) { if (insert_position == start()) {
dest->set_finish(dest->start() + finish() - 1); dest->set_finish(dest->start() + finish() - 1);
} else if (insert_position == kNodeValues) { } else if (insert_position == kNodeSlots) {
dest->set_finish(dest->start()); dest->set_finish(dest->start());
} else { } else {
dest->set_finish(dest->start() + count() / 2); dest->set_finish(dest->start() + count() / 2);
@ -1744,7 +1777,7 @@ void btree_node<P>::clear_and_delete(btree_node *node, allocator_type *alloc) {
// Navigate to the leftmost leaf under node, and then delete upwards. // Navigate to the leftmost leaf under node, and then delete upwards.
while (!node->leaf()) node = node->start_child(); while (!node->leaf()) node = node->start_child();
// Use `int` because `pos` needs to be able to hold `kNodeValues+1`, which // Use `int` because `pos` needs to be able to hold `kNodeSlots+1`, which
// isn't guaranteed to be a valid `field_type`. // isn't guaranteed to be a valid `field_type`.
int pos = node->position(); int pos = node->position();
btree_node *parent = node->parent(); btree_node *parent = node->parent();
@ -1832,7 +1865,7 @@ void btree_iterator<N, R, P>::decrement_slow() {
// btree methods // btree methods
template <typename P> template <typename P>
template <typename Btree> template <typename Btree>
void btree<P>::copy_or_move_values_in_order(Btree *other) { void btree<P>::copy_or_move_values_in_order(Btree &other) {
static_assert(std::is_same<btree, Btree>::value || static_assert(std::is_same<btree, Btree>::value ||
std::is_same<const btree, Btree>::value, std::is_same<const btree, Btree>::value,
"Btree type must be same or const."); "Btree type must be same or const.");
@ -1840,11 +1873,11 @@ void btree<P>::copy_or_move_values_in_order(Btree *other) {
// We can avoid key comparisons because we know the order of the // We can avoid key comparisons because we know the order of the
// values is the same order we'll store them in. // values is the same order we'll store them in.
auto iter = other->begin(); auto iter = other.begin();
if (iter == other->end()) return; if (iter == other.end()) return;
insert_multi(maybe_move_from_iterator(iter)); insert_multi(maybe_move_from_iterator(iter));
++iter; ++iter;
for (; iter != other->end(); ++iter) { for (; iter != other.end(); ++iter) {
// If the btree is not empty, we can just insert the new value at the end // If the btree is not empty, we can just insert the new value at the end
// of the tree. // of the tree.
internal_emplace(end(), maybe_move_from_iterator(iter)); internal_emplace(end(), maybe_move_from_iterator(iter));
@ -1863,7 +1896,7 @@ constexpr bool btree<P>::static_assert_validation() {
// Note: We assert that kTargetValues, which is computed from // Note: We assert that kTargetValues, which is computed from
// Params::kTargetNodeSize, must fit the node_type::field_type. // Params::kTargetNodeSize, must fit the node_type::field_type.
static_assert( static_assert(
kNodeValues < (1 << (8 * sizeof(typename node_type::field_type))), kNodeSlots < (1 << (8 * sizeof(typename node_type::field_type))),
"target node size too large"); "target node size too large");
// Verify that key_compare returns an absl::{weak,strong}_ordering or bool. // Verify that key_compare returns an absl::{weak,strong}_ordering or bool.
@ -1883,31 +1916,29 @@ constexpr bool btree<P>::static_assert_validation() {
} }
template <typename P> template <typename P>
btree<P>::btree(const key_compare &comp, const allocator_type &alloc) template <typename K>
: root_(comp, alloc, EmptyNode()), rightmost_(EmptyNode()), size_(0) {} auto btree<P>::lower_bound_equal(const K &key) const
-> std::pair<iterator, bool> {
template <typename P> const SearchResult<iterator, is_key_compare_to::value> res =
btree<P>::btree(const btree &other) internal_lower_bound(key);
: btree(other.key_comp(), other.allocator()) { const iterator lower = iterator(internal_end(res.value));
copy_or_move_values_in_order(&other); const bool equal = res.HasMatch()
? res.IsEq()
: lower != end() && !compare_keys(key, lower.key());
return {lower, equal};
} }
template <typename P> template <typename P>
template <typename K> template <typename K>
auto btree<P>::equal_range(const K &key) -> std::pair<iterator, iterator> { auto btree<P>::equal_range(const K &key) -> std::pair<iterator, iterator> {
const iterator lower = lower_bound(key); const std::pair<iterator, bool> lower_and_equal = lower_bound_equal(key);
// TODO(ezb): we should be able to avoid this comparison when there's a const iterator lower = lower_and_equal.first;
// three-way comparator. if (!lower_and_equal.second) {
if (lower == end() || compare_keys(key, lower.key())) return {lower, lower}; return {lower, lower};
}
const iterator next = std::next(lower); const iterator next = std::next(lower);
// When the comparator is heterogeneous, we can't assume that comparison with if (!params_type::template can_have_multiple_equivalent_keys<K>()) {
// non-`key_type` will be equivalent to `key_type` comparisons so there
// could be multiple equivalent keys even in a unique-container. But for
// heterogeneous comparisons from the default string adapted comparators, we
// don't need to worry about this.
if (!is_multi_container::value &&
(std::is_same<K, key_type>::value || is_key_compare_adapted::value)) {
// The next iterator after lower must point to a key greater than `key`. // The next iterator after lower must point to a key greater than `key`.
// Note: if this assert fails, then it may indicate that the comparator does // Note: if this assert fails, then it may indicate that the comparator does
// not meet the equivalence requirements for Compare // not meet the equivalence requirements for Compare
@ -1918,7 +1949,7 @@ auto btree<P>::equal_range(const K &key) -> std::pair<iterator, iterator> {
// Try once more to avoid the call to upper_bound() if there's only one // Try once more to avoid the call to upper_bound() if there's only one
// equivalent key. This should prevent all calls to upper_bound() in cases of // equivalent key. This should prevent all calls to upper_bound() in cases of
// unique-containers with heterogeneous comparators in which all comparison // unique-containers with heterogeneous comparators in which all comparison
// operators are equivalent. // operators have the same equivalence classes.
if (next == end() || compare_keys(key, next.key())) return {lower, next}; if (next == end() || compare_keys(key, next.key())) return {lower, next};
// In this case, we need to call upper_bound() to avoid worst case O(N) // In this case, we need to call upper_bound() to avoid worst case O(N)
@ -1934,8 +1965,8 @@ auto btree<P>::insert_unique(const K &key, Args &&... args)
mutable_root() = rightmost_ = new_leaf_root_node(1); mutable_root() = rightmost_ = new_leaf_root_node(1);
} }
auto res = internal_locate(key); SearchResult<iterator, is_key_compare_to::value> res = internal_locate(key);
iterator &iter = res.value; iterator iter = res.value;
if (res.HasMatch()) { if (res.HasMatch()) {
if (res.IsEq()) { if (res.IsEq()) {
@ -2049,7 +2080,7 @@ auto btree<P>::operator=(const btree &other) -> btree & {
*mutable_allocator() = other.allocator(); *mutable_allocator() = other.allocator();
} }
copy_or_move_values_in_order(&other); copy_or_move_values_in_order(other);
} }
return *this; return *this;
} }
@ -2079,7 +2110,7 @@ auto btree<P>::operator=(btree &&other) noexcept -> btree & {
// comparator while moving the values so we can't swap the key // comparator while moving the values so we can't swap the key
// comparators. // comparators.
*mutable_key_comp() = other.key_comp(); *mutable_key_comp() = other.key_comp();
copy_or_move_values_in_order(&other); copy_or_move_values_in_order(other);
} }
} }
} }
@ -2202,31 +2233,6 @@ auto btree<P>::erase_range(iterator begin, iterator end)
return {count, begin}; return {count, begin};
} }
template <typename P>
template <typename K>
auto btree<P>::erase_unique(const K &key) -> size_type {
const iterator iter = internal_find(key);
if (iter.node == nullptr) {
// The key doesn't exist in the tree, return nothing done.
return 0;
}
erase(iter);
return 1;
}
template <typename P>
template <typename K>
auto btree<P>::erase_multi(const K &key) -> size_type {
const iterator begin = internal_lower_bound(key);
if (begin.node == nullptr) {
// The key doesn't exist in the tree, return nothing done.
return 0;
}
// Delete all of the keys between begin and upper_bound(key).
const iterator end = internal_end(internal_upper_bound(key));
return erase_range(begin, end).first;
}
template <typename P> template <typename P>
void btree<P>::clear() { void btree<P>::clear() {
if (!empty()) { if (!empty()) {
@ -2271,7 +2277,7 @@ void btree<P>::rebalance_or_split(iterator *iter) {
node_type *&node = iter->node; node_type *&node = iter->node;
int &insert_position = iter->position; int &insert_position = iter->position;
assert(node->count() == node->max_count()); assert(node->count() == node->max_count());
assert(kNodeValues == node->max_count()); assert(kNodeSlots == node->max_count());
// First try to make room on the node by rebalancing. // First try to make room on the node by rebalancing.
node_type *parent = node->parent(); node_type *parent = node->parent();
@ -2279,17 +2285,17 @@ void btree<P>::rebalance_or_split(iterator *iter) {
if (node->position() > parent->start()) { if (node->position() > parent->start()) {
// Try rebalancing with our left sibling. // Try rebalancing with our left sibling.
node_type *left = parent->child(node->position() - 1); node_type *left = parent->child(node->position() - 1);
assert(left->max_count() == kNodeValues); assert(left->max_count() == kNodeSlots);
if (left->count() < kNodeValues) { if (left->count() < kNodeSlots) {
// We bias rebalancing based on the position being inserted. If we're // We bias rebalancing based on the position being inserted. If we're
// inserting at the end of the right node then we bias rebalancing to // inserting at the end of the right node then we bias rebalancing to
// fill up the left node. // fill up the left node.
int to_move = (kNodeValues - left->count()) / int to_move = (kNodeSlots - left->count()) /
(1 + (insert_position < kNodeValues)); (1 + (insert_position < static_cast<int>(kNodeSlots)));
to_move = (std::max)(1, to_move); to_move = (std::max)(1, to_move);
if (insert_position - to_move >= node->start() || if (insert_position - to_move >= node->start() ||
left->count() + to_move < kNodeValues) { left->count() + to_move < static_cast<int>(kNodeSlots)) {
left->rebalance_right_to_left(to_move, node, mutable_allocator()); left->rebalance_right_to_left(to_move, node, mutable_allocator());
assert(node->max_count() - node->count() == to_move); assert(node->max_count() - node->count() == to_move);
@ -2308,17 +2314,17 @@ void btree<P>::rebalance_or_split(iterator *iter) {
if (node->position() < parent->finish()) { if (node->position() < parent->finish()) {
// Try rebalancing with our right sibling. // Try rebalancing with our right sibling.
node_type *right = parent->child(node->position() + 1); node_type *right = parent->child(node->position() + 1);
assert(right->max_count() == kNodeValues); assert(right->max_count() == kNodeSlots);
if (right->count() < kNodeValues) { if (right->count() < kNodeSlots) {
// We bias rebalancing based on the position being inserted. If we're // We bias rebalancing based on the position being inserted. If we're
// inserting at the beginning of the left node then we bias rebalancing // inserting at the beginning of the left node then we bias rebalancing
// to fill up the right node. // to fill up the right node.
int to_move = (kNodeValues - right->count()) / int to_move = (static_cast<int>(kNodeSlots) - right->count()) /
(1 + (insert_position > node->start())); (1 + (insert_position > node->start()));
to_move = (std::max)(1, to_move); to_move = (std::max)(1, to_move);
if (insert_position <= node->finish() - to_move || if (insert_position <= node->finish() - to_move ||
right->count() + to_move < kNodeValues) { right->count() + to_move < static_cast<int>(kNodeSlots)) {
node->rebalance_left_to_right(to_move, right, mutable_allocator()); node->rebalance_left_to_right(to_move, right, mutable_allocator());
if (insert_position > node->finish()) { if (insert_position > node->finish()) {
@ -2334,8 +2340,8 @@ void btree<P>::rebalance_or_split(iterator *iter) {
// Rebalancing failed, make sure there is room on the parent node for a new // Rebalancing failed, make sure there is room on the parent node for a new
// value. // value.
assert(parent->max_count() == kNodeValues); assert(parent->max_count() == kNodeSlots);
if (parent->count() == kNodeValues) { if (parent->count() == kNodeSlots) {
iterator parent_iter(node->parent(), node->position()); iterator parent_iter(node->parent(), node->position());
rebalance_or_split(&parent_iter); rebalance_or_split(&parent_iter);
} }
@ -2380,8 +2386,8 @@ bool btree<P>::try_merge_or_rebalance(iterator *iter) {
if (iter->node->position() > parent->start()) { if (iter->node->position() > parent->start()) {
// Try merging with our left sibling. // Try merging with our left sibling.
node_type *left = parent->child(iter->node->position() - 1); node_type *left = parent->child(iter->node->position() - 1);
assert(left->max_count() == kNodeValues); assert(left->max_count() == kNodeSlots);
if (1 + left->count() + iter->node->count() <= kNodeValues) { if (1U + left->count() + iter->node->count() <= kNodeSlots) {
iter->position += 1 + left->count(); iter->position += 1 + left->count();
merge_nodes(left, iter->node); merge_nodes(left, iter->node);
iter->node = left; iter->node = left;
@ -2391,8 +2397,8 @@ bool btree<P>::try_merge_or_rebalance(iterator *iter) {
if (iter->node->position() < parent->finish()) { if (iter->node->position() < parent->finish()) {
// Try merging with our right sibling. // Try merging with our right sibling.
node_type *right = parent->child(iter->node->position() + 1); node_type *right = parent->child(iter->node->position() + 1);
assert(right->max_count() == kNodeValues); assert(right->max_count() == kNodeSlots);
if (1 + iter->node->count() + right->count() <= kNodeValues) { if (1U + iter->node->count() + right->count() <= kNodeSlots) {
merge_nodes(iter->node, right); merge_nodes(iter->node, right);
return true; return true;
} }
@ -2473,12 +2479,12 @@ inline auto btree<P>::internal_emplace(iterator iter, Args &&... args)
allocator_type *alloc = mutable_allocator(); allocator_type *alloc = mutable_allocator();
if (iter.node->count() == max_count) { if (iter.node->count() == max_count) {
// Make room in the leaf for the new item. // Make room in the leaf for the new item.
if (max_count < kNodeValues) { if (max_count < kNodeSlots) {
// Insertion into the root where the root is smaller than the full node // Insertion into the root where the root is smaller than the full node
// size. Simply grow the size of the root node. // size. Simply grow the size of the root node.
assert(iter.node == root()); assert(iter.node == root());
iter.node = iter.node =
new_leaf_root_node((std::min<int>)(kNodeValues, 2 * max_count)); new_leaf_root_node((std::min<int>)(kNodeSlots, 2 * max_count));
// Transfer the values from the old root to the new root. // Transfer the values from the old root to the new root.
node_type *old_root = root(); node_type *old_root = root();
node_type *new_root = iter.node; node_type *new_root = iter.node;
@ -2501,61 +2507,51 @@ template <typename P>
template <typename K> template <typename K>
inline auto btree<P>::internal_locate(const K &key) const inline auto btree<P>::internal_locate(const K &key) const
-> SearchResult<iterator, is_key_compare_to::value> { -> SearchResult<iterator, is_key_compare_to::value> {
return internal_locate_impl(key, is_key_compare_to());
}
template <typename P>
template <typename K>
inline auto btree<P>::internal_locate_impl(
const K &key, std::false_type /* IsCompareTo */) const
-> SearchResult<iterator, false> {
iterator iter(const_cast<node_type *>(root()));
for (;;) {
iter.position = iter.node->lower_bound(key, key_comp()).value;
// NOTE: we don't need to walk all the way down the tree if the keys are
// equal, but determining equality would require doing an extra comparison
// on each node on the way down, and we will need to go all the way to the
// leaf node in the expected case.
if (iter.node->leaf()) {
break;
}
iter.node = iter.node->child(iter.position);
}
return {iter};
}
template <typename P>
template <typename K>
inline auto btree<P>::internal_locate_impl(
const K &key, std::true_type /* IsCompareTo */) const
-> SearchResult<iterator, true> {
iterator iter(const_cast<node_type *>(root())); iterator iter(const_cast<node_type *>(root()));
for (;;) { for (;;) {
SearchResult<int, true> res = iter.node->lower_bound(key, key_comp()); SearchResult<int, is_key_compare_to::value> res =
iter.node->lower_bound(key, key_comp());
iter.position = res.value; iter.position = res.value;
if (res.match == MatchKind::kEq) { if (res.IsEq()) {
return {iter, MatchKind::kEq}; return {iter, MatchKind::kEq};
} }
// Note: in the non-key-compare-to case, we don't need to walk all the way
// down the tree if the keys are equal, but determining equality would
// require doing an extra comparison on each node on the way down, and we
// will need to go all the way to the leaf node in the expected case.
if (iter.node->leaf()) { if (iter.node->leaf()) {
break; break;
} }
iter.node = iter.node->child(iter.position); iter.node = iter.node->child(iter.position);
} }
// Note: in the non-key-compare-to case, the key may actually be equivalent
// here (and the MatchKind::kNe is ignored).
return {iter, MatchKind::kNe}; return {iter, MatchKind::kNe};
} }
template <typename P> template <typename P>
template <typename K> template <typename K>
auto btree<P>::internal_lower_bound(const K &key) const -> iterator { auto btree<P>::internal_lower_bound(const K &key) const
-> SearchResult<iterator, is_key_compare_to::value> {
if (!params_type::template can_have_multiple_equivalent_keys<K>()) {
SearchResult<iterator, is_key_compare_to::value> ret = internal_locate(key);
ret.value = internal_last(ret.value);
return ret;
}
iterator iter(const_cast<node_type *>(root())); iterator iter(const_cast<node_type *>(root()));
SearchResult<int, is_key_compare_to::value> res;
bool seen_eq = false;
for (;;) { for (;;) {
iter.position = iter.node->lower_bound(key, key_comp()).value; res = iter.node->lower_bound(key, key_comp());
iter.position = res.value;
if (iter.node->leaf()) { if (iter.node->leaf()) {
break; break;
} }
seen_eq = seen_eq || res.IsEq();
iter.node = iter.node->child(iter.position); iter.node = iter.node->child(iter.position);
} }
return internal_last(iter); if (res.IsEq()) return {iter, MatchKind::kEq};
return {internal_last(iter), seen_eq ? MatchKind::kEq : MatchKind::kNe};
} }
template <typename P> template <typename P>
@ -2575,7 +2571,7 @@ auto btree<P>::internal_upper_bound(const K &key) const -> iterator {
template <typename P> template <typename P>
template <typename K> template <typename K>
auto btree<P>::internal_find(const K &key) const -> iterator { auto btree<P>::internal_find(const K &key) const -> iterator {
auto res = internal_locate(key); SearchResult<iterator, is_key_compare_to::value> res = internal_locate(key);
if (res.HasMatch()) { if (res.HasMatch()) {
if (res.IsEq()) { if (res.IsEq()) {
return res.value; return res.value;

@ -23,6 +23,7 @@
#include "absl/base/internal/throw_delegate.h" #include "absl/base/internal/throw_delegate.h"
#include "absl/container/internal/btree.h" // IWYU pragma: export #include "absl/container/internal/btree.h" // IWYU pragma: export
#include "absl/container/internal/common.h" #include "absl/container/internal/common.h"
#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h" #include "absl/meta/type_traits.h"
namespace absl { namespace absl {
@ -68,8 +69,21 @@ class btree_container {
explicit btree_container(const key_compare &comp, explicit btree_container(const key_compare &comp,
const allocator_type &alloc = allocator_type()) const allocator_type &alloc = allocator_type())
: tree_(comp, alloc) {} : tree_(comp, alloc) {}
btree_container(const btree_container &other) = default; explicit btree_container(const allocator_type &alloc)
btree_container(btree_container &&other) noexcept = default; : tree_(key_compare(), alloc) {}
btree_container(const btree_container &other)
: btree_container(other, absl::allocator_traits<allocator_type>::
select_on_container_copy_construction(
other.get_allocator())) {}
btree_container(const btree_container &other, const allocator_type &alloc)
: tree_(other.tree_, alloc) {}
btree_container(btree_container &&other) noexcept(
std::is_nothrow_move_constructible<Tree>::value) = default;
btree_container(btree_container &&other, const allocator_type &alloc)
: tree_(std::move(other.tree_), alloc) {}
btree_container &operator=(const btree_container &other) = default; btree_container &operator=(const btree_container &other) = default;
btree_container &operator=(btree_container &&other) noexcept( btree_container &operator=(btree_container &&other) noexcept(
std::is_nothrow_move_assignable<Tree>::value) = default; std::is_nothrow_move_assignable<Tree>::value) = default;
@ -90,6 +104,11 @@ class btree_container {
// Lookup routines. // Lookup routines.
template <typename K = key_type> template <typename K = key_type>
size_type count(const key_arg<K> &key) const {
auto equal_range = this->equal_range(key);
return std::distance(equal_range.first, equal_range.second);
}
template <typename K = key_type>
iterator find(const key_arg<K> &key) { iterator find(const key_arg<K> &key) {
return tree_.find(key); return tree_.find(key);
} }
@ -138,6 +157,11 @@ class btree_container {
iterator erase(const_iterator first, const_iterator last) { iterator erase(const_iterator first, const_iterator last) {
return tree_.erase_range(iterator(first), iterator(last)).second; return tree_.erase_range(iterator(first), iterator(last)).second;
} }
template <typename K = key_type>
size_type erase(const key_arg<K> &key) {
auto equal_range = this->equal_range(key);
return tree_.erase_range(equal_range.first, equal_range.second).first;
}
// Extract routines. // Extract routines.
node_type extract(iterator position) { node_type extract(iterator position) {
@ -151,7 +175,6 @@ class btree_container {
return extract(iterator(position)); return extract(iterator(position));
} }
public:
// Utility routines. // Utility routines.
void clear() { tree_.clear(); } void clear() { tree_.clear(); }
void swap(btree_container &other) { tree_.swap(other.tree_); } void swap(btree_container &other) { tree_.swap(other.tree_); }
@ -235,7 +258,7 @@ class btree_set_container : public btree_container<Tree> {
using super_type::super_type; using super_type::super_type;
btree_set_container() {} btree_set_container() {}
// Range constructor. // Range constructors.
template <class InputIterator> template <class InputIterator>
btree_set_container(InputIterator b, InputIterator e, btree_set_container(InputIterator b, InputIterator e,
const key_compare &comp = key_compare(), const key_compare &comp = key_compare(),
@ -243,18 +266,19 @@ class btree_set_container : public btree_container<Tree> {
: super_type(comp, alloc) { : super_type(comp, alloc) {
insert(b, e); insert(b, e);
} }
template <class InputIterator>
btree_set_container(InputIterator b, InputIterator e,
const allocator_type &alloc)
: btree_set_container(b, e, key_compare(), alloc) {}
// Initializer list constructor. // Initializer list constructors.
btree_set_container(std::initializer_list<init_type> init, btree_set_container(std::initializer_list<init_type> init,
const key_compare &comp = key_compare(), const key_compare &comp = key_compare(),
const allocator_type &alloc = allocator_type()) const allocator_type &alloc = allocator_type())
: btree_set_container(init.begin(), init.end(), comp, alloc) {} : btree_set_container(init.begin(), init.end(), comp, alloc) {}
btree_set_container(std::initializer_list<init_type> init,
// Lookup routines. const allocator_type &alloc)
template <typename K = key_type> : btree_set_container(init.begin(), init.end(), alloc) {}
size_type count(const key_arg<K> &key) const {
return this->tree_.count_unique(key);
}
// Insertion routines. // Insertion routines.
std::pair<iterator, bool> insert(const value_type &v) { std::pair<iterator, bool> insert(const value_type &v) {
@ -313,20 +337,13 @@ class btree_set_container : public btree_container<Tree> {
return res.first; return res.first;
} }
// Deletion routines.
// TODO(ezb): we should support heterogeneous comparators that have different
// behavior for K!=key_type.
template <typename K = key_type>
size_type erase(const key_arg<K> &key) {
return this->tree_.erase_unique(key);
}
using super_type::erase;
// Node extraction routines. // Node extraction routines.
template <typename K = key_type> template <typename K = key_type>
node_type extract(const key_arg<K> &key) { node_type extract(const key_arg<K> &key) {
auto it = this->find(key); const std::pair<iterator, bool> lower_and_equal =
return it == this->end() ? node_type() : extract(it); this->tree_.lower_bound_equal(key);
return lower_and_equal.second ? extract(lower_and_equal.first)
: node_type();
} }
using super_type::extract; using super_type::extract;
@ -344,7 +361,7 @@ class btree_set_container : public btree_container<Tree> {
int> = 0> int> = 0>
void merge(btree_container<T> &src) { // NOLINT void merge(btree_container<T> &src) { // NOLINT
for (auto src_it = src.begin(); src_it != src.end();) { for (auto src_it = src.begin(); src_it != src.end();) {
if (insert(std::move(*src_it)).second) { if (insert(std::move(params_type::element(src_it.slot()))).second) {
src_it = src.erase(src_it); src_it = src.erase(src_it);
} else { } else {
++src_it; ++src_it;
@ -371,6 +388,7 @@ template <typename Tree>
class btree_map_container : public btree_set_container<Tree> { class btree_map_container : public btree_set_container<Tree> {
using super_type = btree_set_container<Tree>; using super_type = btree_set_container<Tree>;
using params_type = typename Tree::params_type; using params_type = typename Tree::params_type;
friend class BtreeNodePeer;
private: private:
template <class K> template <class K>
@ -535,7 +553,7 @@ class btree_multiset_container : public btree_container<Tree> {
using super_type::super_type; using super_type::super_type;
btree_multiset_container() {} btree_multiset_container() {}
// Range constructor. // Range constructors.
template <class InputIterator> template <class InputIterator>
btree_multiset_container(InputIterator b, InputIterator e, btree_multiset_container(InputIterator b, InputIterator e,
const key_compare &comp = key_compare(), const key_compare &comp = key_compare(),
@ -543,18 +561,19 @@ class btree_multiset_container : public btree_container<Tree> {
: super_type(comp, alloc) { : super_type(comp, alloc) {
insert(b, e); insert(b, e);
} }
template <class InputIterator>
btree_multiset_container(InputIterator b, InputIterator e,
const allocator_type &alloc)
: btree_multiset_container(b, e, key_compare(), alloc) {}
// Initializer list constructor. // Initializer list constructors.
btree_multiset_container(std::initializer_list<init_type> init, btree_multiset_container(std::initializer_list<init_type> init,
const key_compare &comp = key_compare(), const key_compare &comp = key_compare(),
const allocator_type &alloc = allocator_type()) const allocator_type &alloc = allocator_type())
: btree_multiset_container(init.begin(), init.end(), comp, alloc) {} : btree_multiset_container(init.begin(), init.end(), comp, alloc) {}
btree_multiset_container(std::initializer_list<init_type> init,
// Lookup routines. const allocator_type &alloc)
template <typename K = key_type> : btree_multiset_container(init.begin(), init.end(), alloc) {}
size_type count(const key_arg<K> &key) const {
return this->tree_.count_multi(key);
}
// Insertion routines. // Insertion routines.
iterator insert(const value_type &v) { return this->tree_.insert_multi(v); } iterator insert(const value_type &v) { return this->tree_.insert_multi(v); }
@ -600,18 +619,13 @@ class btree_multiset_container : public btree_container<Tree> {
return res; return res;
} }
// Deletion routines.
template <typename K = key_type>
size_type erase(const key_arg<K> &key) {
return this->tree_.erase_multi(key);
}
using super_type::erase;
// Node extraction routines. // Node extraction routines.
template <typename K = key_type> template <typename K = key_type>
node_type extract(const key_arg<K> &key) { node_type extract(const key_arg<K> &key) {
auto it = this->find(key); const std::pair<iterator, bool> lower_and_equal =
return it == this->end() ? node_type() : extract(it); this->tree_.lower_bound_equal(key);
return lower_and_equal.second ? extract(lower_and_equal.first)
: node_type();
} }
using super_type::extract; using super_type::extract;
@ -627,8 +641,9 @@ class btree_multiset_container : public btree_container<Tree> {
typename T::params_type::is_map_container>>::value, typename T::params_type::is_map_container>>::value,
int> = 0> int> = 0>
void merge(btree_container<T> &src) { // NOLINT void merge(btree_container<T> &src) { // NOLINT
insert(std::make_move_iterator(src.begin()), for (auto src_it = src.begin(), end = src.end(); src_it != end; ++src_it) {
std::make_move_iterator(src.end())); insert(std::move(params_type::element(src_it.slot())));
}
src.clear(); src.clear();
} }

@ -257,7 +257,7 @@ class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple
template <int I> template <int I>
ElemT<I>& get() & { ElemT<I>& get() & {
return internal_compressed_tuple::Storage<ElemT<I>, I>::get(); return StorageT<I>::get();
} }
template <int I> template <int I>

@ -166,7 +166,7 @@ TryDecomposeValue(F&& f, Arg&& arg) {
} }
TEST(DecomposeValue, Decomposable) { TEST(DecomposeValue, Decomposable) {
auto f = [](const int& x, int&& y) { auto f = [](const int& x, int&& y) { // NOLINT
EXPECT_EQ(&x, &y); EXPECT_EQ(&x, &y);
EXPECT_EQ(42, x); EXPECT_EQ(42, x);
return 'A'; return 'A';
@ -200,7 +200,8 @@ TryDecomposePair(F&& f, Args&&... args) {
} }
TEST(DecomposePair, Decomposable) { TEST(DecomposePair, Decomposable) {
auto f = [](const int& x, std::piecewise_construct_t, std::tuple<int&&> k, auto f = [](const int& x, // NOLINT
std::piecewise_construct_t, std::tuple<int&&> k,
std::tuple<double>&& v) { std::tuple<double>&& v) {
EXPECT_EQ(&x, &std::get<0>(k)); EXPECT_EQ(&x, &std::get<0>(k));
EXPECT_EQ(42, x); EXPECT_EQ(42, x);

@ -72,6 +72,7 @@ void HashtablezInfo::PrepareForSampling() {
total_probe_length.store(0, std::memory_order_relaxed); total_probe_length.store(0, std::memory_order_relaxed);
hashes_bitwise_or.store(0, std::memory_order_relaxed); hashes_bitwise_or.store(0, std::memory_order_relaxed);
hashes_bitwise_and.store(~size_t{}, std::memory_order_relaxed); hashes_bitwise_and.store(~size_t{}, std::memory_order_relaxed);
hashes_bitwise_xor.store(0, std::memory_order_relaxed);
create_time = absl::Now(); create_time = absl::Now();
// The inliner makes hardcoded skip_count difficult (especially when combined // The inliner makes hardcoded skip_count difficult (especially when combined
@ -180,7 +181,9 @@ static bool ShouldForceSampling() {
if (ABSL_PREDICT_TRUE(state == kDontForce)) return false; if (ABSL_PREDICT_TRUE(state == kDontForce)) return false;
if (state == kUninitialized) { if (state == kUninitialized) {
state = AbslContainerInternalSampleEverything() ? kForce : kDontForce; state = ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)()
? kForce
: kDontForce;
global_state.store(state, std::memory_order_relaxed); global_state.store(state, std::memory_order_relaxed);
} }
return state == kForce; return state == kForce;
@ -235,6 +238,7 @@ void RecordInsertSlow(HashtablezInfo* info, size_t hash,
info->hashes_bitwise_and.fetch_and(hash, std::memory_order_relaxed); info->hashes_bitwise_and.fetch_and(hash, std::memory_order_relaxed);
info->hashes_bitwise_or.fetch_or(hash, std::memory_order_relaxed); info->hashes_bitwise_or.fetch_or(hash, std::memory_order_relaxed);
info->hashes_bitwise_xor.fetch_xor(hash, std::memory_order_relaxed);
info->max_probe_length.store( info->max_probe_length.store(
std::max(info->max_probe_length.load(std::memory_order_relaxed), std::max(info->max_probe_length.load(std::memory_order_relaxed),
probe_length), probe_length),

@ -78,6 +78,7 @@ struct HashtablezInfo {
std::atomic<size_t> total_probe_length; std::atomic<size_t> total_probe_length;
std::atomic<size_t> hashes_bitwise_or; std::atomic<size_t> hashes_bitwise_or;
std::atomic<size_t> hashes_bitwise_and; std::atomic<size_t> hashes_bitwise_and;
std::atomic<size_t> hashes_bitwise_xor;
// `HashtablezSampler` maintains intrusive linked lists for all samples. See // `HashtablezSampler` maintains intrusive linked lists for all samples. See
// comments on `HashtablezSampler::all_` for details on these. `init_mu` // comments on `HashtablezSampler::all_` for details on these. `init_mu`
@ -312,7 +313,7 @@ void SetHashtablezMaxSamples(int32_t max);
// initialization of static storage duration objects. // initialization of static storage duration objects.
// The definition of this constant is weak, which allows us to inject a // The definition of this constant is weak, which allows us to inject a
// different value for it at link time. // different value for it at link time.
extern "C" bool AbslContainerInternalSampleEverything(); extern "C" bool ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)();
} // namespace container_internal } // namespace container_internal
ABSL_NAMESPACE_END ABSL_NAMESPACE_END

@ -21,7 +21,8 @@ ABSL_NAMESPACE_BEGIN
namespace container_internal { namespace container_internal {
// See hashtablez_sampler.h for details. // See hashtablez_sampler.h for details.
extern "C" ABSL_ATTRIBUTE_WEAK bool AbslContainerInternalSampleEverything() { extern "C" ABSL_ATTRIBUTE_WEAK bool ABSL_INTERNAL_C_SYMBOL(
AbslContainerInternalSampleEverything)() {
return false; return false;
} }

@ -89,6 +89,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
EXPECT_EQ(info.total_probe_length.load(), 0); EXPECT_EQ(info.total_probe_length.load(), 0);
EXPECT_EQ(info.hashes_bitwise_or.load(), 0); EXPECT_EQ(info.hashes_bitwise_or.load(), 0);
EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{}); EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{});
EXPECT_EQ(info.hashes_bitwise_xor.load(), 0);
EXPECT_GE(info.create_time, test_start); EXPECT_GE(info.create_time, test_start);
info.capacity.store(1, std::memory_order_relaxed); info.capacity.store(1, std::memory_order_relaxed);
@ -98,6 +99,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
info.total_probe_length.store(1, std::memory_order_relaxed); info.total_probe_length.store(1, std::memory_order_relaxed);
info.hashes_bitwise_or.store(1, std::memory_order_relaxed); info.hashes_bitwise_or.store(1, std::memory_order_relaxed);
info.hashes_bitwise_and.store(1, std::memory_order_relaxed); info.hashes_bitwise_and.store(1, std::memory_order_relaxed);
info.hashes_bitwise_xor.store(1, std::memory_order_relaxed);
info.create_time = test_start - absl::Hours(20); info.create_time = test_start - absl::Hours(20);
info.PrepareForSampling(); info.PrepareForSampling();
@ -109,6 +111,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
EXPECT_EQ(info.total_probe_length.load(), 0); EXPECT_EQ(info.total_probe_length.load(), 0);
EXPECT_EQ(info.hashes_bitwise_or.load(), 0); EXPECT_EQ(info.hashes_bitwise_or.load(), 0);
EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{}); EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{});
EXPECT_EQ(info.hashes_bitwise_xor.load(), 0);
EXPECT_GE(info.create_time, test_start); EXPECT_GE(info.create_time, test_start);
} }
@ -133,14 +136,17 @@ TEST(HashtablezInfoTest, RecordInsert) {
EXPECT_EQ(info.max_probe_length.load(), 6); EXPECT_EQ(info.max_probe_length.load(), 6);
EXPECT_EQ(info.hashes_bitwise_and.load(), 0x0000FF00); EXPECT_EQ(info.hashes_bitwise_and.load(), 0x0000FF00);
EXPECT_EQ(info.hashes_bitwise_or.load(), 0x0000FF00); EXPECT_EQ(info.hashes_bitwise_or.load(), 0x0000FF00);
EXPECT_EQ(info.hashes_bitwise_xor.load(), 0x0000FF00);
RecordInsertSlow(&info, 0x000FF000, 4 * kProbeLength); RecordInsertSlow(&info, 0x000FF000, 4 * kProbeLength);
EXPECT_EQ(info.max_probe_length.load(), 6); EXPECT_EQ(info.max_probe_length.load(), 6);
EXPECT_EQ(info.hashes_bitwise_and.load(), 0x0000F000); EXPECT_EQ(info.hashes_bitwise_and.load(), 0x0000F000);
EXPECT_EQ(info.hashes_bitwise_or.load(), 0x000FFF00); EXPECT_EQ(info.hashes_bitwise_or.load(), 0x000FFF00);
EXPECT_EQ(info.hashes_bitwise_xor.load(), 0x000F0F00);
RecordInsertSlow(&info, 0x00FF0000, 12 * kProbeLength); RecordInsertSlow(&info, 0x00FF0000, 12 * kProbeLength);
EXPECT_EQ(info.max_probe_length.load(), 12); EXPECT_EQ(info.max_probe_length.load(), 12);
EXPECT_EQ(info.hashes_bitwise_and.load(), 0x00000000); EXPECT_EQ(info.hashes_bitwise_and.load(), 0x00000000);
EXPECT_EQ(info.hashes_bitwise_or.load(), 0x00FFFF00); EXPECT_EQ(info.hashes_bitwise_or.load(), 0x00FFFF00);
EXPECT_EQ(info.hashes_bitwise_xor.load(), 0x00F00F00);
} }
TEST(HashtablezInfoTest, RecordErase) { TEST(HashtablezInfoTest, RecordErase) {

@ -33,6 +33,12 @@ namespace absl {
ABSL_NAMESPACE_BEGIN ABSL_NAMESPACE_BEGIN
namespace inlined_vector_internal { namespace inlined_vector_internal {
// GCC does not deal very well with the below code
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#endif
template <typename Iterator> template <typename Iterator>
using IsAtLeastForwardIterator = std::is_convertible< using IsAtLeastForwardIterator = std::is_convertible<
typename std::iterator_traits<Iterator>::iterator_category, typename std::iterator_traits<Iterator>::iterator_category,
@ -75,6 +81,23 @@ void DestroyElements(AllocatorType* alloc_ptr, Pointer destroy_first,
} }
} }
// If kUseMemcpy is true, memcpy(dst, src, n); else do nothing.
// Useful to avoid compiler warnings when memcpy() is used for T values
// that are not trivially copyable in non-reachable code.
template <bool kUseMemcpy>
inline void MemcpyIfAllowed(void* dst, const void* src, size_t n);
// memcpy when allowed.
template <>
inline void MemcpyIfAllowed<true>(void* dst, const void* src, size_t n) {
memcpy(dst, src, n);
}
// Do nothing for types that are not memcpy-able. This function is only
// called from non-reachable branches.
template <>
inline void MemcpyIfAllowed<false>(void*, const void*, size_t) {}
template <typename AllocatorType, typename Pointer, typename ValueAdapter, template <typename AllocatorType, typename Pointer, typename ValueAdapter,
typename SizeType> typename SizeType>
void ConstructElements(AllocatorType* alloc_ptr, Pointer construct_first, void ConstructElements(AllocatorType* alloc_ptr, Pointer construct_first,
@ -298,14 +321,20 @@ class Storage {
// Storage Constructors and Destructor // Storage Constructors and Destructor
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
Storage() : metadata_() {} Storage() : metadata_(allocator_type(), /* size and is_allocated */ 0) {}
explicit Storage(const allocator_type& alloc) : metadata_(alloc, {}) {} explicit Storage(const allocator_type& alloc)
: metadata_(alloc, /* size and is_allocated */ 0) {}
~Storage() { ~Storage() {
pointer data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData(); if (GetSizeAndIsAllocated() == 0) {
inlined_vector_internal::DestroyElements(GetAllocPtr(), data, GetSize()); // Empty and not allocated; nothing to do.
DeallocateIfAllocated(); } else if (IsMemcpyOk::value) {
// No destructors need to be run; just deallocate if necessary.
DeallocateIfAllocated();
} else {
DestroyContents();
}
} }
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
@ -363,6 +392,8 @@ class Storage {
// Storage Member Mutators // Storage Member Mutators
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
ABSL_ATTRIBUTE_NOINLINE void InitFrom(const Storage& other);
template <typename ValueAdapter> template <typename ValueAdapter>
void Initialize(ValueAdapter values, size_type new_size); void Initialize(ValueAdapter values, size_type new_size);
@ -445,6 +476,8 @@ class Storage {
} }
private: private:
ABSL_ATTRIBUTE_NOINLINE void DestroyContents();
using Metadata = using Metadata =
container_internal::CompressedTuple<allocator_type, size_type>; container_internal::CompressedTuple<allocator_type, size_type>;
@ -462,10 +495,47 @@ class Storage {
Inlined inlined; Inlined inlined;
}; };
template <typename... Args>
ABSL_ATTRIBUTE_NOINLINE reference EmplaceBackSlow(Args&&... args);
Metadata metadata_; Metadata metadata_;
Data data_; Data data_;
}; };
template <typename T, size_t N, typename A>
void Storage<T, N, A>::DestroyContents() {
pointer data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData();
inlined_vector_internal::DestroyElements(GetAllocPtr(), data, GetSize());
DeallocateIfAllocated();
}
template <typename T, size_t N, typename A>
void Storage<T, N, A>::InitFrom(const Storage& other) {
const auto n = other.GetSize();
assert(n > 0); // Empty sources handled handled in caller.
const_pointer src;
pointer dst;
if (!other.GetIsAllocated()) {
dst = GetInlinedData();
src = other.GetInlinedData();
} else {
// Because this is only called from the `InlinedVector` constructors, it's
// safe to take on the allocation with size `0`. If `ConstructElements(...)`
// throws, deallocation will be automatically handled by `~Storage()`.
size_type new_capacity = ComputeCapacity(GetInlinedCapacity(), n);
dst = AllocatorTraits::allocate(*GetAllocPtr(), new_capacity);
SetAllocatedData(dst, new_capacity);
src = other.GetAllocatedData();
}
if (IsMemcpyOk::value) {
MemcpyIfAllowed<IsMemcpyOk::value>(dst, src, sizeof(dst[0]) * n);
} else {
auto values = IteratorValueAdapter<const_pointer>(src);
inlined_vector_internal::ConstructElements(GetAllocPtr(), dst, &values, n);
}
GetSizeAndIsAllocated() = other.GetSizeAndIsAllocated();
}
template <typename T, size_t N, typename A> template <typename T, size_t N, typename A>
template <typename ValueAdapter> template <typename ValueAdapter>
auto Storage<T, N, A>::Initialize(ValueAdapter values, size_type new_size) auto Storage<T, N, A>::Initialize(ValueAdapter values, size_type new_size)
@ -542,48 +612,42 @@ template <typename T, size_t N, typename A>
template <typename ValueAdapter> template <typename ValueAdapter>
auto Storage<T, N, A>::Resize(ValueAdapter values, size_type new_size) -> void { auto Storage<T, N, A>::Resize(ValueAdapter values, size_type new_size) -> void {
StorageView storage_view = MakeStorageView(); StorageView storage_view = MakeStorageView();
auto* const base = storage_view.data;
IteratorValueAdapter<MoveIterator> move_values( const size_type size = storage_view.size;
MoveIterator(storage_view.data)); auto* alloc = GetAllocPtr();
if (new_size <= size) {
AllocationTransaction allocation_tx(GetAllocPtr()); // Destroy extra old elements.
ConstructionTransaction construction_tx(GetAllocPtr()); inlined_vector_internal::DestroyElements(alloc, base + new_size,
size - new_size);
absl::Span<value_type> construct_loop; } else if (new_size <= storage_view.capacity) {
absl::Span<value_type> move_construct_loop; // Construct new elements in place.
absl::Span<value_type> destroy_loop; inlined_vector_internal::ConstructElements(alloc, base + size, &values,
new_size - size);
if (new_size > storage_view.capacity) { } else {
// Steps:
// a. Allocate new backing store.
// b. Construct new elements in new backing store.
// c. Move existing elements from old backing store to now.
// d. Destroy all elements in old backing store.
// Use transactional wrappers for the first two steps so we can roll
// back if necessary due to exceptions.
AllocationTransaction allocation_tx(alloc);
size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size); size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size);
pointer new_data = allocation_tx.Allocate(new_capacity); pointer new_data = allocation_tx.Allocate(new_capacity);
construct_loop = {new_data + storage_view.size,
new_size - storage_view.size};
move_construct_loop = {new_data, storage_view.size};
destroy_loop = {storage_view.data, storage_view.size};
} else if (new_size > storage_view.size) {
construct_loop = {storage_view.data + storage_view.size,
new_size - storage_view.size};
} else {
destroy_loop = {storage_view.data + new_size, storage_view.size - new_size};
}
construction_tx.Construct(construct_loop.data(), &values, ConstructionTransaction construction_tx(alloc);
construct_loop.size()); construction_tx.Construct(new_data + size, &values, new_size - size);
inlined_vector_internal::ConstructElements( IteratorValueAdapter<MoveIterator> move_values((MoveIterator(base)));
GetAllocPtr(), move_construct_loop.data(), &move_values, inlined_vector_internal::ConstructElements(alloc, new_data, &move_values,
move_construct_loop.size()); size);
inlined_vector_internal::DestroyElements(GetAllocPtr(), destroy_loop.data(),
destroy_loop.size());
construction_tx.Commit(); inlined_vector_internal::DestroyElements(alloc, base, size);
if (allocation_tx.DidAllocate()) { construction_tx.Commit();
DeallocateIfAllocated(); DeallocateIfAllocated();
AcquireAllocatedData(&allocation_tx); AcquireAllocatedData(&allocation_tx);
SetIsAllocated(); SetIsAllocated();
} }
SetSize(new_size); SetSize(new_size);
} }
@ -684,44 +748,50 @@ template <typename T, size_t N, typename A>
template <typename... Args> template <typename... Args>
auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> reference { auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> reference {
StorageView storage_view = MakeStorageView(); StorageView storage_view = MakeStorageView();
const auto n = storage_view.size;
if (ABSL_PREDICT_TRUE(n != storage_view.capacity)) {
// Fast path; new element fits.
pointer last_ptr = storage_view.data + n;
AllocatorTraits::construct(*GetAllocPtr(), last_ptr,
std::forward<Args>(args)...);
AddSize(1);
return *last_ptr;
}
// TODO(b/173712035): Annotate with musttail attribute to prevent regression.
return EmplaceBackSlow(std::forward<Args>(args)...);
}
template <typename T, size_t N, typename A>
template <typename... Args>
auto Storage<T, N, A>::EmplaceBackSlow(Args&&... args) -> reference {
StorageView storage_view = MakeStorageView();
AllocationTransaction allocation_tx(GetAllocPtr()); AllocationTransaction allocation_tx(GetAllocPtr());
IteratorValueAdapter<MoveIterator> move_values( IteratorValueAdapter<MoveIterator> move_values(
MoveIterator(storage_view.data)); MoveIterator(storage_view.data));
size_type new_capacity = NextCapacity(storage_view.capacity);
pointer construct_data; pointer construct_data = allocation_tx.Allocate(new_capacity);
if (storage_view.size == storage_view.capacity) {
size_type new_capacity = NextCapacity(storage_view.capacity);
construct_data = allocation_tx.Allocate(new_capacity);
} else {
construct_data = storage_view.data;
}
pointer last_ptr = construct_data + storage_view.size; pointer last_ptr = construct_data + storage_view.size;
// Construct new element.
AllocatorTraits::construct(*GetAllocPtr(), last_ptr, AllocatorTraits::construct(*GetAllocPtr(), last_ptr,
std::forward<Args>(args)...); std::forward<Args>(args)...);
// Move elements from old backing store to new backing store.
if (allocation_tx.DidAllocate()) { ABSL_INTERNAL_TRY {
ABSL_INTERNAL_TRY { inlined_vector_internal::ConstructElements(
inlined_vector_internal::ConstructElements( GetAllocPtr(), allocation_tx.GetData(), &move_values,
GetAllocPtr(), allocation_tx.GetData(), &move_values, storage_view.size);
storage_view.size); }
} ABSL_INTERNAL_CATCH_ANY {
ABSL_INTERNAL_CATCH_ANY { AllocatorTraits::destroy(*GetAllocPtr(), last_ptr);
AllocatorTraits::destroy(*GetAllocPtr(), last_ptr); ABSL_INTERNAL_RETHROW;
ABSL_INTERNAL_RETHROW;
}
inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
storage_view.size);
DeallocateIfAllocated();
AcquireAllocatedData(&allocation_tx);
SetIsAllocated();
} }
// Destroy elements in old backing store.
inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
storage_view.size);
DeallocateIfAllocated();
AcquireAllocatedData(&allocation_tx);
SetIsAllocated();
AddSize(1); AddSize(1);
return *last_ptr; return *last_ptr;
} }
@ -885,6 +955,11 @@ auto Storage<T, N, A>::Swap(Storage* other_storage_ptr) -> void {
swap(*GetAllocPtr(), *other_storage_ptr->GetAllocPtr()); swap(*GetAllocPtr(), *other_storage_ptr->GetAllocPtr());
} }
// End ignore "maybe-uninitialized"
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic pop
#endif
} // namespace inlined_vector_internal } // namespace inlined_vector_internal
ABSL_NAMESPACE_END ABSL_NAMESPACE_END
} // namespace absl } // namespace absl

@ -404,7 +404,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
constexpr size_t Offset() const { constexpr size_t Offset() const {
static_assert(N < NumOffsets, "Index out of bounds"); static_assert(N < NumOffsets, "Index out of bounds");
return adl_barrier::Align( return adl_barrier::Align(
Offset<N - 1>() + SizeOf<ElementType<N - 1>>() * size_[N - 1], Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * size_[N - 1],
ElementAlignment<N>::value); ElementAlignment<N>::value);
} }
@ -597,7 +597,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
constexpr size_t AllocSize() const { constexpr size_t AllocSize() const {
static_assert(NumTypes == NumSizes, "You must specify sizes of all fields"); static_assert(NumTypes == NumSizes, "You must specify sizes of all fields");
return Offset<NumTypes - 1>() + return Offset<NumTypes - 1>() +
SizeOf<ElementType<NumTypes - 1>>() * size_[NumTypes - 1]; SizeOf<ElementType<NumTypes - 1>>::value * size_[NumTypes - 1];
} }
// If built with --config=asan, poisons padding bytes (if any) in the // If built with --config=asan, poisons padding bytes (if any) in the
@ -621,7 +621,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
// The `if` is an optimization. It doesn't affect the observable behaviour. // The `if` is an optimization. It doesn't affect the observable behaviour.
if (ElementAlignment<N - 1>::value % ElementAlignment<N>::value) { if (ElementAlignment<N - 1>::value % ElementAlignment<N>::value) {
size_t start = size_t start =
Offset<N - 1>() + SizeOf<ElementType<N - 1>>() * size_[N - 1]; Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * size_[N - 1];
ASAN_POISON_MEMORY_REGION(p + start, Offset<N>() - start); ASAN_POISON_MEMORY_REGION(p + start, Offset<N>() - start);
} }
#endif #endif
@ -645,7 +645,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
// produce "unsigned*" where another produces "unsigned int *". // produce "unsigned*" where another produces "unsigned int *".
std::string DebugString() const { std::string DebugString() const {
const auto offsets = Offsets(); const auto offsets = Offsets();
const size_t sizes[] = {SizeOf<ElementType<OffsetSeq>>()...}; const size_t sizes[] = {SizeOf<ElementType<OffsetSeq>>::value...};
const std::string types[] = { const std::string types[] = {
adl_barrier::TypeName<ElementType<OffsetSeq>>()...}; adl_barrier::TypeName<ElementType<OffsetSeq>>()...};
std::string res = absl::StrCat("@0", types[0], "(", sizes[0], ")"); std::string res = absl::StrCat("@0", types[0], "(", sizes[0], ")");

@ -128,8 +128,10 @@ TEST(Layout, ElementTypes) {
{ {
using L = Layout<int32_t, int32_t>; using L = Layout<int32_t, int32_t>;
SameType<std::tuple<int32_t, int32_t>, L::ElementTypes>(); SameType<std::tuple<int32_t, int32_t>, L::ElementTypes>();
SameType<std::tuple<int32_t, int32_t>, decltype(L::Partial())::ElementTypes>(); SameType<std::tuple<int32_t, int32_t>,
SameType<std::tuple<int32_t, int32_t>, decltype(L::Partial(0))::ElementTypes>(); decltype(L::Partial())::ElementTypes>();
SameType<std::tuple<int32_t, int32_t>,
decltype(L::Partial(0))::ElementTypes>();
} }
{ {
using L = Layout<int8_t, int32_t, Int128>; using L = Layout<int8_t, int32_t, Int128>;
@ -368,18 +370,21 @@ TEST(Layout, PointerByIndex) {
{ {
using L = Layout<int32_t>; using L = Layout<int32_t>;
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p)))); EXPECT_EQ(0,
Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3).Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3).Pointer<0>(p))));
} }
{ {
using L = Layout<int32_t, int32_t>; using L = Layout<int32_t, int32_t>;
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p))));
EXPECT_EQ(12, Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<1>(p))));
EXPECT_EQ(0, EXPECT_EQ(0,
Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<0>(p)))); Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p))));
EXPECT_EQ(12, EXPECT_EQ(12,
Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<1>(p)))); Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<1>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<0>(p))));
EXPECT_EQ(
12, Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3, 5).Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3, 5).Pointer<0>(p))));
EXPECT_EQ(12, Distance(p, Type<const int32_t*>(L(3, 5).Pointer<1>(p)))); EXPECT_EQ(12, Distance(p, Type<const int32_t*>(L(3, 5).Pointer<1>(p))));
} }
@ -387,39 +392,44 @@ TEST(Layout, PointerByIndex) {
using L = Layout<int8_t, int32_t, Int128>; using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial().Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial().Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(0).Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(0).Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<1>(p)))); EXPECT_EQ(0,
Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(1).Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(1).Pointer<0>(p))));
EXPECT_EQ(4, Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<1>(p)))); EXPECT_EQ(4,
Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(5).Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(5).Pointer<0>(p))));
EXPECT_EQ(8, Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<1>(p)))); EXPECT_EQ(8,
Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<1>(p))));
EXPECT_EQ(0, EXPECT_EQ(0,
Distance(p, Type<const int8_t*>(L::Partial(0, 0).Pointer<0>(p)))); Distance(p, Type<const int8_t*>(L::Partial(0, 0).Pointer<0>(p))));
EXPECT_EQ(0, EXPECT_EQ(
Distance(p, Type<const int32_t*>(L::Partial(0, 0).Pointer<1>(p)))); 0, Distance(p, Type<const int32_t*>(L::Partial(0, 0).Pointer<1>(p))));
EXPECT_EQ(0, EXPECT_EQ(0,
Distance(p, Type<const Int128*>(L::Partial(0, 0).Pointer<2>(p)))); Distance(p, Type<const Int128*>(L::Partial(0, 0).Pointer<2>(p))));
EXPECT_EQ(0, EXPECT_EQ(0,
Distance(p, Type<const int8_t*>(L::Partial(1, 0).Pointer<0>(p)))); Distance(p, Type<const int8_t*>(L::Partial(1, 0).Pointer<0>(p))));
EXPECT_EQ(4, EXPECT_EQ(
Distance(p, Type<const int32_t*>(L::Partial(1, 0).Pointer<1>(p)))); 4, Distance(p, Type<const int32_t*>(L::Partial(1, 0).Pointer<1>(p))));
EXPECT_EQ(8, EXPECT_EQ(8,
Distance(p, Type<const Int128*>(L::Partial(1, 0).Pointer<2>(p)))); Distance(p, Type<const Int128*>(L::Partial(1, 0).Pointer<2>(p))));
EXPECT_EQ(0, EXPECT_EQ(0,
Distance(p, Type<const int8_t*>(L::Partial(5, 3).Pointer<0>(p)))); Distance(p, Type<const int8_t*>(L::Partial(5, 3).Pointer<0>(p))));
EXPECT_EQ(8, EXPECT_EQ(
Distance(p, Type<const int32_t*>(L::Partial(5, 3).Pointer<1>(p)))); 8, Distance(p, Type<const int32_t*>(L::Partial(5, 3).Pointer<1>(p))));
EXPECT_EQ(24, EXPECT_EQ(24,
Distance(p, Type<const Int128*>(L::Partial(5, 3).Pointer<2>(p)))); Distance(p, Type<const Int128*>(L::Partial(5, 3).Pointer<2>(p))));
EXPECT_EQ( EXPECT_EQ(
0, Distance(p, Type<const int8_t*>(L::Partial(0, 0, 0).Pointer<0>(p)))); 0, Distance(p, Type<const int8_t*>(L::Partial(0, 0, 0).Pointer<0>(p))));
EXPECT_EQ( EXPECT_EQ(
0, Distance(p, Type<const int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p)))); 0,
Distance(p, Type<const int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p))));
EXPECT_EQ( EXPECT_EQ(
0, Distance(p, Type<const Int128*>(L::Partial(0, 0, 0).Pointer<2>(p)))); 0, Distance(p, Type<const Int128*>(L::Partial(0, 0, 0).Pointer<2>(p))));
EXPECT_EQ( EXPECT_EQ(
0, Distance(p, Type<const int8_t*>(L::Partial(1, 0, 0).Pointer<0>(p)))); 0, Distance(p, Type<const int8_t*>(L::Partial(1, 0, 0).Pointer<0>(p))));
EXPECT_EQ( EXPECT_EQ(
4, Distance(p, Type<const int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p)))); 4,
Distance(p, Type<const int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p))));
EXPECT_EQ( EXPECT_EQ(
8, Distance(p, Type<const Int128*>(L::Partial(1, 0, 0).Pointer<2>(p)))); 8, Distance(p, Type<const Int128*>(L::Partial(1, 0, 0).Pointer<2>(p))));
EXPECT_EQ( EXPECT_EQ(
@ -428,7 +438,8 @@ TEST(Layout, PointerByIndex) {
24, 24,
Distance(p, Type<const Int128*>(L::Partial(5, 3, 1).Pointer<2>(p)))); Distance(p, Type<const Int128*>(L::Partial(5, 3, 1).Pointer<2>(p))));
EXPECT_EQ( EXPECT_EQ(
8, Distance(p, Type<const int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p)))); 8,
Distance(p, Type<const int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L(5, 3, 1).Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L(5, 3, 1).Pointer<0>(p))));
EXPECT_EQ(24, Distance(p, Type<const Int128*>(L(5, 3, 1).Pointer<2>(p)))); EXPECT_EQ(24, Distance(p, Type<const Int128*>(L(5, 3, 1).Pointer<2>(p))));
EXPECT_EQ(8, Distance(p, Type<const int32_t*>(L(5, 3, 1).Pointer<1>(p)))); EXPECT_EQ(8, Distance(p, Type<const int32_t*>(L(5, 3, 1).Pointer<1>(p))));
@ -439,75 +450,78 @@ TEST(Layout, PointerByType) {
alignas(max_align_t) const unsigned char p[100] = {}; alignas(max_align_t) const unsigned char p[100] = {};
{ {
using L = Layout<int32_t>; using L = Layout<int32_t>;
EXPECT_EQ(0, EXPECT_EQ(
Distance(p, Type<const int32_t*>(L::Partial().Pointer<int32_t>(p)))); 0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<int32_t>(p))));
EXPECT_EQ(0, EXPECT_EQ(
Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<int32_t>(p)))); 0,
Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3).Pointer<int32_t>(p)))); EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3).Pointer<int32_t>(p))));
} }
{ {
using L = Layout<int8_t, int32_t, Int128>; using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial().Pointer<int8_t>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int8_t*>(L::Partial(0).Pointer<int8_t>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<int32_t>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int8_t*>(L::Partial(1).Pointer<int8_t>(p))));
EXPECT_EQ(4,
Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<int32_t>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int8_t*>(L::Partial(5).Pointer<int8_t>(p))));
EXPECT_EQ(8,
Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<int32_t>(p))));
EXPECT_EQ( EXPECT_EQ(
0, Distance(p, Type<const int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p)))); 0, Distance(p, Type<const int8_t*>(L::Partial().Pointer<int8_t>(p))));
EXPECT_EQ( EXPECT_EQ(
0, Distance(p, Type<const int32_t*>(L::Partial(0, 0).Pointer<int32_t>(p)))); 0, Distance(p, Type<const int8_t*>(L::Partial(0).Pointer<int8_t>(p))));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance(p, Type<const Int128*>(L::Partial(0, 0).Pointer<Int128>(p)))); Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<int32_t>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p))));
EXPECT_EQ( EXPECT_EQ(
4, Distance(p, Type<const int32_t*>(L::Partial(1, 0).Pointer<int32_t>(p)))); 0, Distance(p, Type<const int8_t*>(L::Partial(1).Pointer<int8_t>(p))));
EXPECT_EQ( EXPECT_EQ(
8, 4,
Distance(p, Type<const Int128*>(L::Partial(1, 0).Pointer<Int128>(p)))); Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<int32_t>(p))));
EXPECT_EQ( EXPECT_EQ(
0, Distance(p, Type<const int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p)))); 0, Distance(p, Type<const int8_t*>(L::Partial(5).Pointer<int8_t>(p))));
EXPECT_EQ( EXPECT_EQ(
8, Distance(p, Type<const int32_t*>(L::Partial(5, 3).Pointer<int32_t>(p)))); 8,
Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<int32_t>(p))));
EXPECT_EQ( EXPECT_EQ(
24, 0,
Distance(p, Type<const Int128*>(L::Partial(5, 3).Pointer<Int128>(p)))); Distance(p, Type<const int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(
L::Partial(0, 0).Pointer<int32_t>(p))));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance(p, Type<const int8_t*>(L::Partial(0, 0, 0).Pointer<int8_t>(p)))); Distance(p, Type<const Int128*>(L::Partial(0, 0).Pointer<Int128>(p))));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance(p, Type<const int32_t*>(L::Partial(0, 0, 0).Pointer<int32_t>(p)))); Distance(p, Type<const int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p))));
EXPECT_EQ(0, Distance(p, Type<const Int128*>( EXPECT_EQ(4, Distance(p, Type<const int32_t*>(
L::Partial(0, 0, 0).Pointer<Int128>(p)))); L::Partial(1, 0).Pointer<int32_t>(p))));
EXPECT_EQ(
8,
Distance(p, Type<const Int128*>(L::Partial(1, 0).Pointer<Int128>(p))));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance(p, Type<const int8_t*>(L::Partial(1, 0, 0).Pointer<int8_t>(p)))); Distance(p, Type<const int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p))));
EXPECT_EQ(8, Distance(p, Type<const int32_t*>(
L::Partial(5, 3).Pointer<int32_t>(p))));
EXPECT_EQ( EXPECT_EQ(
4, 24,
Distance(p, Type<const int32_t*>(L::Partial(1, 0, 0).Pointer<int32_t>(p)))); Distance(p, Type<const Int128*>(L::Partial(5, 3).Pointer<Int128>(p))));
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(
L::Partial(0, 0, 0).Pointer<int8_t>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(
L::Partial(0, 0, 0).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<const Int128*>(
L::Partial(0, 0, 0).Pointer<Int128>(p))));
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(
L::Partial(1, 0, 0).Pointer<int8_t>(p))));
EXPECT_EQ(4, Distance(p, Type<const int32_t*>(
L::Partial(1, 0, 0).Pointer<int32_t>(p))));
EXPECT_EQ(8, Distance(p, Type<const Int128*>( EXPECT_EQ(8, Distance(p, Type<const Int128*>(
L::Partial(1, 0, 0).Pointer<Int128>(p)))); L::Partial(1, 0, 0).Pointer<Int128>(p))));
EXPECT_EQ( EXPECT_EQ(0, Distance(p, Type<const int8_t*>(
0, L::Partial(5, 3, 1).Pointer<int8_t>(p))));
Distance(p, Type<const int8_t*>(L::Partial(5, 3, 1).Pointer<int8_t>(p))));
EXPECT_EQ(24, Distance(p, Type<const Int128*>( EXPECT_EQ(24, Distance(p, Type<const Int128*>(
L::Partial(5, 3, 1).Pointer<Int128>(p)))); L::Partial(5, 3, 1).Pointer<Int128>(p))));
EXPECT_EQ( EXPECT_EQ(8, Distance(p, Type<const int32_t*>(
8, L::Partial(5, 3, 1).Pointer<int32_t>(p))));
Distance(p, Type<const int32_t*>(L::Partial(5, 3, 1).Pointer<int32_t>(p))));
EXPECT_EQ(24, EXPECT_EQ(24,
Distance(p, Type<const Int128*>(L(5, 3, 1).Pointer<Int128>(p)))); Distance(p, Type<const Int128*>(L(5, 3, 1).Pointer<Int128>(p))));
EXPECT_EQ(8, Distance(p, Type<const int32_t*>(L(5, 3, 1).Pointer<int32_t>(p)))); EXPECT_EQ(
8, Distance(p, Type<const int32_t*>(L(5, 3, 1).Pointer<int32_t>(p))));
} }
} }
@ -548,15 +562,18 @@ TEST(Layout, MutablePointerByIndex) {
EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5, 3).Pointer<1>(p)))); EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5, 3).Pointer<1>(p))));
EXPECT_EQ(24, Distance(p, Type<Int128*>(L::Partial(5, 3).Pointer<2>(p)))); EXPECT_EQ(24, Distance(p, Type<Int128*>(L::Partial(5, 3).Pointer<2>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0, 0, 0).Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0, 0, 0).Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p)))); EXPECT_EQ(0,
Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<Int128*>(L::Partial(0, 0, 0).Pointer<2>(p)))); EXPECT_EQ(0, Distance(p, Type<Int128*>(L::Partial(0, 0, 0).Pointer<2>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1, 0, 0).Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1, 0, 0).Pointer<0>(p))));
EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p)))); EXPECT_EQ(4,
Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p))));
EXPECT_EQ(8, Distance(p, Type<Int128*>(L::Partial(1, 0, 0).Pointer<2>(p)))); EXPECT_EQ(8, Distance(p, Type<Int128*>(L::Partial(1, 0, 0).Pointer<2>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5, 3, 1).Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5, 3, 1).Pointer<0>(p))));
EXPECT_EQ(24, EXPECT_EQ(24,
Distance(p, Type<Int128*>(L::Partial(5, 3, 1).Pointer<2>(p)))); Distance(p, Type<Int128*>(L::Partial(5, 3, 1).Pointer<2>(p))));
EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p)))); EXPECT_EQ(8,
Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L(5, 3, 1).Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type<int8_t*>(L(5, 3, 1).Pointer<0>(p))));
EXPECT_EQ(24, Distance(p, Type<Int128*>(L(5, 3, 1).Pointer<2>(p)))); EXPECT_EQ(24, Distance(p, Type<Int128*>(L(5, 3, 1).Pointer<2>(p))));
EXPECT_EQ(8, Distance(p, Type<int32_t*>(L(5, 3, 1).Pointer<1>(p)))); EXPECT_EQ(8, Distance(p, Type<int32_t*>(L(5, 3, 1).Pointer<1>(p))));
@ -568,48 +585,61 @@ TEST(Layout, MutablePointerByType) {
{ {
using L = Layout<int32_t>; using L = Layout<int32_t>;
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial().Pointer<int32_t>(p)))); EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial().Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(3).Pointer<int32_t>(p)))); EXPECT_EQ(0,
Distance(p, Type<int32_t*>(L::Partial(3).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L(3).Pointer<int32_t>(p)))); EXPECT_EQ(0, Distance(p, Type<int32_t*>(L(3).Pointer<int32_t>(p))));
} }
{ {
using L = Layout<int8_t, int32_t, Int128>; using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial().Pointer<int8_t>(p)))); EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial().Pointer<int8_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0).Pointer<int8_t>(p)))); EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0).Pointer<int8_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0).Pointer<int32_t>(p)))); EXPECT_EQ(0,
Distance(p, Type<int32_t*>(L::Partial(0).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1).Pointer<int8_t>(p)))); EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1).Pointer<int8_t>(p))));
EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1).Pointer<int32_t>(p)))); EXPECT_EQ(4,
Distance(p, Type<int32_t*>(L::Partial(1).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5).Pointer<int8_t>(p)))); EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5).Pointer<int8_t>(p))));
EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5).Pointer<int32_t>(p)))); EXPECT_EQ(8,
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p)))); Distance(p, Type<int32_t*>(L::Partial(5).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0, 0).Pointer<int32_t>(p)))); EXPECT_EQ(0,
Distance(p, Type<int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p))));
EXPECT_EQ(
0, Distance(p, Type<int32_t*>(L::Partial(0, 0).Pointer<int32_t>(p))));
EXPECT_EQ(0, EXPECT_EQ(0,
Distance(p, Type<Int128*>(L::Partial(0, 0).Pointer<Int128>(p)))); Distance(p, Type<Int128*>(L::Partial(0, 0).Pointer<Int128>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p)))); EXPECT_EQ(0,
EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1, 0).Pointer<int32_t>(p)))); Distance(p, Type<int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p))));
EXPECT_EQ(
4, Distance(p, Type<int32_t*>(L::Partial(1, 0).Pointer<int32_t>(p))));
EXPECT_EQ(8, EXPECT_EQ(8,
Distance(p, Type<Int128*>(L::Partial(1, 0).Pointer<Int128>(p)))); Distance(p, Type<Int128*>(L::Partial(1, 0).Pointer<Int128>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p)))); EXPECT_EQ(0,
EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5, 3).Pointer<int32_t>(p)))); Distance(p, Type<int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p))));
EXPECT_EQ(
8, Distance(p, Type<int32_t*>(L::Partial(5, 3).Pointer<int32_t>(p))));
EXPECT_EQ(24, EXPECT_EQ(24,
Distance(p, Type<Int128*>(L::Partial(5, 3).Pointer<Int128>(p)))); Distance(p, Type<Int128*>(L::Partial(5, 3).Pointer<Int128>(p))));
EXPECT_EQ(0, EXPECT_EQ(
Distance(p, Type<int8_t*>(L::Partial(0, 0, 0).Pointer<int8_t>(p)))); 0, Distance(p, Type<int8_t*>(L::Partial(0, 0, 0).Pointer<int8_t>(p))));
EXPECT_EQ(0, EXPECT_EQ(
Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<int32_t>(p)))); 0,
Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<int32_t>(p))));
EXPECT_EQ( EXPECT_EQ(
0, Distance(p, Type<Int128*>(L::Partial(0, 0, 0).Pointer<Int128>(p)))); 0, Distance(p, Type<Int128*>(L::Partial(0, 0, 0).Pointer<Int128>(p))));
EXPECT_EQ(0, EXPECT_EQ(
Distance(p, Type<int8_t*>(L::Partial(1, 0, 0).Pointer<int8_t>(p)))); 0, Distance(p, Type<int8_t*>(L::Partial(1, 0, 0).Pointer<int8_t>(p))));
EXPECT_EQ(4, EXPECT_EQ(
Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<int32_t>(p)))); 4,
Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<int32_t>(p))));
EXPECT_EQ( EXPECT_EQ(
8, Distance(p, Type<Int128*>(L::Partial(1, 0, 0).Pointer<Int128>(p)))); 8, Distance(p, Type<Int128*>(L::Partial(1, 0, 0).Pointer<Int128>(p))));
EXPECT_EQ(0, EXPECT_EQ(
Distance(p, Type<int8_t*>(L::Partial(5, 3, 1).Pointer<int8_t>(p)))); 0, Distance(p, Type<int8_t*>(L::Partial(5, 3, 1).Pointer<int8_t>(p))));
EXPECT_EQ( EXPECT_EQ(
24, Distance(p, Type<Int128*>(L::Partial(5, 3, 1).Pointer<Int128>(p)))); 24, Distance(p, Type<Int128*>(L::Partial(5, 3, 1).Pointer<Int128>(p))));
EXPECT_EQ(8, EXPECT_EQ(
Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<int32_t>(p)))); 8,
Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L(5, 3, 1).Pointer<int8_t>(p)))); EXPECT_EQ(0, Distance(p, Type<int8_t*>(L(5, 3, 1).Pointer<int8_t>(p))));
EXPECT_EQ(24, Distance(p, Type<Int128*>(L(5, 3, 1).Pointer<Int128>(p)))); EXPECT_EQ(24, Distance(p, Type<Int128*>(L(5, 3, 1).Pointer<Int128>(p))));
EXPECT_EQ(8, Distance(p, Type<int32_t*>(L(5, 3, 1).Pointer<int32_t>(p)))); EXPECT_EQ(8, Distance(p, Type<int32_t*>(L(5, 3, 1).Pointer<int32_t>(p))));
@ -790,67 +820,72 @@ TEST(Layout, SliceByIndexData) {
{ {
using L = Layout<int32_t>; using L = Layout<int32_t>;
EXPECT_EQ( EXPECT_EQ(
0, 0, Distance(
Distance(p, Type<Span<const int32_t>>(L::Partial(0).Slice<0>(p)).data())); p, Type<Span<const int32_t>>(L::Partial(0).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0, Distance(
Distance(p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data())); p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data()));
EXPECT_EQ(0, Distance(p, Type<Span<const int32_t>>(L(3).Slice<0>(p)).data())); EXPECT_EQ(0,
Distance(p, Type<Span<const int32_t>>(L(3).Slice<0>(p)).data()));
} }
{ {
using L = Layout<int32_t, int32_t>; using L = Layout<int32_t, int32_t>;
EXPECT_EQ( EXPECT_EQ(
0, 0, Distance(
Distance(p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data())); p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance(p, Distance(
Type<Span<const int32_t>>(L::Partial(3, 5).Slice<0>(p)).data())); p, Type<Span<const int32_t>>(L::Partial(3, 5).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
12, 12,
Distance(p, Distance(
Type<Span<const int32_t>>(L::Partial(3, 5).Slice<1>(p)).data())); p, Type<Span<const int32_t>>(L::Partial(3, 5).Slice<1>(p)).data()));
EXPECT_EQ(0, EXPECT_EQ(
Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<0>(p)).data())); 0, Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<0>(p)).data()));
EXPECT_EQ(12, EXPECT_EQ(
Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<1>(p)).data())); 12, Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<1>(p)).data()));
} }
{ {
using L = Layout<int8_t, int32_t, Int128>; using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ( EXPECT_EQ(
0, 0, Distance(
Distance(p, Type<Span<const int8_t>>(L::Partial(0).Slice<0>(p)).data())); p, Type<Span<const int8_t>>(L::Partial(0).Slice<0>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<const int8_t>>(L::Partial(1).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0, Distance(
Distance(p, Type<Span<const int8_t>>(L::Partial(5).Slice<0>(p)).data())); p, Type<Span<const int8_t>>(L::Partial(1).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, Distance( 0, Distance(
p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<0>(p)).data())); p, Type<Span<const int8_t>>(L::Partial(5).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance(p, Distance(
Type<Span<const int32_t>>(L::Partial(0, 0).Slice<1>(p)).data())); p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, Distance( 0,
p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<0>(p)).data())); Distance(
p, Type<Span<const int32_t>>(L::Partial(0, 0).Slice<1>(p)).data()));
EXPECT_EQ(
0,
Distance(
p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
4, 4,
Distance(p, Distance(
Type<Span<const int32_t>>(L::Partial(1, 0).Slice<1>(p)).data())); p, Type<Span<const int32_t>>(L::Partial(1, 0).Slice<1>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, Distance( 0,
p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<0>(p)).data())); Distance(
p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
8, 8,
Distance(p, Distance(
Type<Span<const int32_t>>(L::Partial(5, 3).Slice<1>(p)).data())); p, Type<Span<const int32_t>>(L::Partial(5, 3).Slice<1>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance( Distance(
p, Type<Span<const int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data())); p,
Type<Span<const int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance( Distance(
@ -864,7 +899,8 @@ TEST(Layout, SliceByIndexData) {
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance( Distance(
p, Type<Span<const int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data())); p,
Type<Span<const int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
4, 4,
Distance( Distance(
@ -878,7 +914,8 @@ TEST(Layout, SliceByIndexData) {
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance( Distance(
p, Type<Span<const int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data())); p,
Type<Span<const int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
24, 24,
Distance( Distance(
@ -890,12 +927,14 @@ TEST(Layout, SliceByIndexData) {
p, p,
Type<Span<const int32_t>>(L::Partial(5, 3, 1).Slice<1>(p)).data())); Type<Span<const int32_t>>(L::Partial(5, 3, 1).Slice<1>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, Distance(p, Type<Span<const int8_t>>(L(5, 3, 1).Slice<0>(p)).data())); 0,
Distance(p, Type<Span<const int8_t>>(L(5, 3, 1).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
24, 24,
Distance(p, Type<Span<const Int128>>(L(5, 3, 1).Slice<2>(p)).data())); Distance(p, Type<Span<const Int128>>(L(5, 3, 1).Slice<2>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
8, Distance(p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<1>(p)).data())); 8,
Distance(p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<1>(p)).data()));
} }
} }
@ -906,98 +945,94 @@ TEST(Layout, SliceByTypeData) {
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance( Distance(
p, Type<Span<const int32_t>>(L::Partial(0).Slice<int32_t>(p)).data())); p,
Type<Span<const int32_t>>(L::Partial(0).Slice<int32_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance( Distance(
p, Type<Span<const int32_t>>(L::Partial(3).Slice<int32_t>(p)).data())); p,
Type<Span<const int32_t>>(L::Partial(3).Slice<int32_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, Distance(p, Type<Span<const int32_t>>(L(3).Slice<int32_t>(p)).data())); 0,
Distance(p, Type<Span<const int32_t>>(L(3).Slice<int32_t>(p)).data()));
} }
{ {
using L = Layout<int8_t, int32_t, Int128>; using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(
0, Distance(
p, Type<Span<const int8_t>>(L::Partial(0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<const int8_t>>(L::Partial(1).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<const int8_t>>(L::Partial(5).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance( Distance(
p, p,
Type<Span<const int32_t>>(L::Partial(0, 0).Slice<int32_t>(p)).data())); Type<Span<const int8_t>>(L::Partial(0).Slice<int8_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance(
p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
4,
Distance( Distance(
p, p,
Type<Span<const int32_t>>(L::Partial(1, 0).Slice<int32_t>(p)).data())); Type<Span<const int8_t>>(L::Partial(1).Slice<int8_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance(
p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<int8_t>(p)).data()));
EXPECT_EQ(
8,
Distance( Distance(
p, p,
Type<Span<const int32_t>>(L::Partial(5, 3).Slice<int32_t>(p)).data())); Type<Span<const int8_t>>(L::Partial(5).Slice<int8_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance( Distance(p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<int8_t>(p))
p, .data()));
Type<Span<const int8_t>>(L::Partial(0, 0, 0).Slice<int8_t>(p)).data())); EXPECT_EQ(0, Distance(p, Type<Span<const int32_t>>(
L::Partial(0, 0).Slice<int32_t>(p))
.data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance(p, Type<Span<const int32_t>>(L::Partial(0, 0, 0).Slice<int32_t>(p)) Distance(p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<int8_t>(p))
.data())); .data()));
EXPECT_EQ(0, Distance(p, Type<Span<const Int128>>( EXPECT_EQ(4, Distance(p, Type<Span<const int32_t>>(
L::Partial(0, 0, 0).Slice<Int128>(p)) L::Partial(1, 0).Slice<int32_t>(p))
.data())); .data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance( Distance(p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<int8_t>(p))
p,
Type<Span<const int8_t>>(L::Partial(1, 0, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
4,
Distance(p, Type<Span<const int32_t>>(L::Partial(1, 0, 0).Slice<int32_t>(p))
.data())); .data()));
EXPECT_EQ(8, Distance(p, Type<Span<const int32_t>>(
L::Partial(5, 3).Slice<int32_t>(p))
.data()));
EXPECT_EQ(0, Distance(p, Type<Span<const int8_t>>(
L::Partial(0, 0, 0).Slice<int8_t>(p))
.data()));
EXPECT_EQ(0, Distance(p, Type<Span<const int32_t>>(
L::Partial(0, 0, 0).Slice<int32_t>(p))
.data()));
EXPECT_EQ(0, Distance(p, Type<Span<const Int128>>(
L::Partial(0, 0, 0).Slice<Int128>(p))
.data()));
EXPECT_EQ(0, Distance(p, Type<Span<const int8_t>>(
L::Partial(1, 0, 0).Slice<int8_t>(p))
.data()));
EXPECT_EQ(4, Distance(p, Type<Span<const int32_t>>(
L::Partial(1, 0, 0).Slice<int32_t>(p))
.data()));
EXPECT_EQ(8, Distance(p, Type<Span<const Int128>>( EXPECT_EQ(8, Distance(p, Type<Span<const Int128>>(
L::Partial(1, 0, 0).Slice<Int128>(p)) L::Partial(1, 0, 0).Slice<Int128>(p))
.data())); .data()));
EXPECT_EQ( EXPECT_EQ(0, Distance(p, Type<Span<const int8_t>>(
0, L::Partial(5, 3, 1).Slice<int8_t>(p))
Distance( .data()));
p,
Type<Span<const int8_t>>(L::Partial(5, 3, 1).Slice<int8_t>(p)).data()));
EXPECT_EQ(24, Distance(p, Type<Span<const Int128>>( EXPECT_EQ(24, Distance(p, Type<Span<const Int128>>(
L::Partial(5, 3, 1).Slice<Int128>(p)) L::Partial(5, 3, 1).Slice<Int128>(p))
.data())); .data()));
EXPECT_EQ( EXPECT_EQ(8, Distance(p, Type<Span<const int32_t>>(
8, L::Partial(5, 3, 1).Slice<int32_t>(p))
Distance(p, Type<Span<const int32_t>>(L::Partial(5, 3, 1).Slice<int32_t>(p)) .data()));
.data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance(p, Type<Span<const int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data())); Distance(p,
Type<Span<const int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
24, 24,
Distance(p, Distance(p,
Type<Span<const Int128>>(L(5, 3, 1).Slice<Int128>(p)).data())); Type<Span<const Int128>>(L(5, 3, 1).Slice<Int128>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
8, Distance( 8,
p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data())); Distance(
p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data()));
} }
} }
@ -1005,18 +1040,19 @@ TEST(Layout, MutableSliceByIndexData) {
alignas(max_align_t) unsigned char p[100]; alignas(max_align_t) unsigned char p[100];
{ {
using L = Layout<int32_t>; using L = Layout<int32_t>;
EXPECT_EQ(0, EXPECT_EQ(
Distance(p, Type<Span<int32_t>>(L::Partial(0).Slice<0>(p)).data())); 0, Distance(p, Type<Span<int32_t>>(L::Partial(0).Slice<0>(p)).data()));
EXPECT_EQ(0, EXPECT_EQ(
Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data())); 0, Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data()));
EXPECT_EQ(0, Distance(p, Type<Span<int32_t>>(L(3).Slice<0>(p)).data())); EXPECT_EQ(0, Distance(p, Type<Span<int32_t>>(L(3).Slice<0>(p)).data()));
} }
{ {
using L = Layout<int32_t, int32_t>; using L = Layout<int32_t, int32_t>;
EXPECT_EQ(0,
Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, Distance(p, Type<Span<int32_t>>(L::Partial(3, 5).Slice<0>(p)).data())); 0, Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<int32_t>>(L::Partial(3, 5).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
12, 12,
Distance(p, Type<Span<int32_t>>(L::Partial(3, 5).Slice<1>(p)).data())); Distance(p, Type<Span<int32_t>>(L::Partial(3, 5).Slice<1>(p)).data()));
@ -1025,55 +1061,63 @@ TEST(Layout, MutableSliceByIndexData) {
} }
{ {
using L = Layout<int8_t, int32_t, Int128>; using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(0,
Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<0>(p)).data()));
EXPECT_EQ(0,
Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<0>(p)).data()));
EXPECT_EQ(0,
Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<0>(p)).data()));
EXPECT_EQ(
0, Distance(p, Type<Span<int8_t>>(L::Partial(0, 0).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, Distance(p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<1>(p)).data())); 0, Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, Distance(p, Type<Span<int8_t>>(L::Partial(1, 0).Slice<0>(p)).data())); 0, Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
4, Distance(p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<1>(p)).data())); 0, Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, Distance(p, Type<Span<int8_t>>(L::Partial(5, 3).Slice<0>(p)).data())); 0,
Distance(p, Type<Span<int8_t>>(L::Partial(0, 0).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
8, Distance(p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<1>(p)).data())); 0,
Distance(p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<1>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance(p, Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data())); Distance(p, Type<Span<int8_t>>(L::Partial(1, 0).Slice<0>(p)).data()));
EXPECT_EQ(
4,
Distance(p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<1>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance(p, Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<1>(p)).data())); Distance(p, Type<Span<int8_t>>(L::Partial(5, 3).Slice<0>(p)).data()));
EXPECT_EQ(
8,
Distance(p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<1>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<1>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, Distance( 0, Distance(
p, Type<Span<Int128>>(L::Partial(0, 0, 0).Slice<2>(p)).data())); p, Type<Span<Int128>>(L::Partial(0, 0, 0).Slice<2>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0, Distance(
Distance(p, Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data())); p, Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
4, 4, Distance(
Distance(p, Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<1>(p)).data())); p, Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<1>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
8, Distance( 8, Distance(
p, Type<Span<Int128>>(L::Partial(1, 0, 0).Slice<2>(p)).data())); p, Type<Span<Int128>>(L::Partial(1, 0, 0).Slice<2>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0, Distance(
Distance(p, Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data())); p, Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
24, Distance( 24, Distance(
p, Type<Span<Int128>>(L::Partial(5, 3, 1).Slice<2>(p)).data())); p, Type<Span<Int128>>(L::Partial(5, 3, 1).Slice<2>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
8, 8, Distance(
Distance(p, Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<1>(p)).data())); p, Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<1>(p)).data()));
EXPECT_EQ(0, Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<0>(p)).data())); EXPECT_EQ(0,
Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<0>(p)).data()));
EXPECT_EQ(24, EXPECT_EQ(24,
Distance(p, Type<Span<Int128>>(L(5, 3, 1).Slice<2>(p)).data())); Distance(p, Type<Span<Int128>>(L(5, 3, 1).Slice<2>(p)).data()));
EXPECT_EQ(8, Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<1>(p)).data())); EXPECT_EQ(8,
Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<1>(p)).data()));
} }
} }
@ -1082,66 +1126,84 @@ TEST(Layout, MutableSliceByTypeData) {
{ {
using L = Layout<int32_t>; using L = Layout<int32_t>;
EXPECT_EQ( EXPECT_EQ(
0, 0, Distance(
Distance(p, Type<Span<int32_t>>(L::Partial(0).Slice<int32_t>(p)).data())); p, Type<Span<int32_t>>(L::Partial(0).Slice<int32_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0, Distance(
Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<int32_t>(p)).data())); p, Type<Span<int32_t>>(L::Partial(3).Slice<int32_t>(p)).data()));
EXPECT_EQ(0, Distance(p, Type<Span<int32_t>>(L(3).Slice<int32_t>(p)).data())); EXPECT_EQ(0,
Distance(p, Type<Span<int32_t>>(L(3).Slice<int32_t>(p)).data()));
} }
{ {
using L = Layout<int8_t, int32_t, Int128>; using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ( EXPECT_EQ(
0, Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<int8_t>(p)).data())); 0,
Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<int8_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<int8_t>(p)).data())); 0,
Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<int8_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<int8_t>(p)).data())); 0,
Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<int8_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance(p, Type<Span<int8_t>>(L::Partial(0, 0).Slice<int8_t>(p)).data())); Distance(p,
Type<Span<int8_t>>(L::Partial(0, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, Distance( 0,
p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<int32_t>(p)).data())); Distance(
p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<int32_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance(p, Type<Span<int8_t>>(L::Partial(1, 0).Slice<int8_t>(p)).data())); Distance(p,
Type<Span<int8_t>>(L::Partial(1, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
4, Distance( 4,
p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<int32_t>(p)).data())); Distance(
p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<int32_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance(p, Type<Span<int8_t>>(L::Partial(5, 3).Slice<int8_t>(p)).data())); Distance(p,
Type<Span<int8_t>>(L::Partial(5, 3).Slice<int8_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
8, Distance( 8,
p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<int32_t>(p)).data())); Distance(
p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<int32_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, Distance( 0,
p, Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<int8_t>(p)).data())); Distance(
p,
Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance( Distance(
p, Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<int32_t>(p)).data())); p,
Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<int32_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance( Distance(
p, p,
Type<Span<Int128>>(L::Partial(0, 0, 0).Slice<Int128>(p)).data())); Type<Span<Int128>>(L::Partial(0, 0, 0).Slice<Int128>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, Distance( 0,
p, Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<int8_t>(p)).data())); Distance(
p,
Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
4, 4,
Distance( Distance(
p, Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<int32_t>(p)).data())); p,
Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<int32_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
8, 8,
Distance( Distance(
p, p,
Type<Span<Int128>>(L::Partial(1, 0, 0).Slice<Int128>(p)).data())); Type<Span<Int128>>(L::Partial(1, 0, 0).Slice<Int128>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, Distance( 0,
p, Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<int8_t>(p)).data())); Distance(
p,
Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<int8_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
24, 24,
Distance( Distance(
@ -1150,14 +1212,16 @@ TEST(Layout, MutableSliceByTypeData) {
EXPECT_EQ( EXPECT_EQ(
8, 8,
Distance( Distance(
p, Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<int32_t>(p)).data())); p,
EXPECT_EQ(0, Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<int32_t>(p)).data()));
Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data())); EXPECT_EQ(
0, Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
24, 24,
Distance(p, Type<Span<Int128>>(L(5, 3, 1).Slice<Int128>(p)).data())); Distance(p, Type<Span<Int128>>(L(5, 3, 1).Slice<Int128>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
8, Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data())); 8,
Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data()));
} }
} }
@ -1256,17 +1320,17 @@ TEST(Layout, MutableSlices) {
} }
{ {
const auto x = L::Partial(1, 2, 3); const auto x = L::Partial(1, 2, 3);
EXPECT_THAT( EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(
(Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(x.Slices(p))), x.Slices(p))),
Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
IsSameSlice(x.Slice<2>(p)))); IsSameSlice(x.Slice<2>(p))));
} }
{ {
const L x(1, 2, 3); const L x(1, 2, 3);
EXPECT_THAT( EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(
(Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(x.Slices(p))), x.Slices(p))),
Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
IsSameSlice(x.Slice<2>(p)))); IsSameSlice(x.Slice<2>(p))));
} }
} }
@ -1398,7 +1462,8 @@ TEST(Layout, DebugString) {
x.DebugString()); x.DebugString());
} }
{ {
constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3); constexpr auto x =
Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3);
EXPECT_EQ( EXPECT_EQ(
"@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; " "@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
"@16" + "@16" +
@ -1406,7 +1471,8 @@ TEST(Layout, DebugString) {
x.DebugString()); x.DebugString());
} }
{ {
constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3, 4); constexpr auto x =
Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3, 4);
EXPECT_EQ( EXPECT_EQ(
"@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; " "@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
"@16" + "@16" +

@ -27,7 +27,7 @@ constexpr size_t Group::kWidth;
// Returns "random" seed. // Returns "random" seed.
inline size_t RandomSeed() { inline size_t RandomSeed() {
#if ABSL_HAVE_THREAD_LOCAL #ifdef ABSL_HAVE_THREAD_LOCAL
static thread_local size_t counter = 0; static thread_local size_t counter = 0;
size_t value = ++counter; size_t value = ++counter;
#else // ABSL_HAVE_THREAD_LOCAL #else // ABSL_HAVE_THREAD_LOCAL
@ -43,6 +43,19 @@ bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl) {
return (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6; return (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6;
} }
void ConvertDeletedToEmptyAndFullToDeleted(
ctrl_t* ctrl, size_t capacity) {
assert(ctrl[capacity] == kSentinel);
assert(IsValidCapacity(capacity));
for (ctrl_t* pos = ctrl; pos != ctrl + capacity + 1; pos += Group::kWidth) {
Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
}
// Copy the cloned ctrl bytes.
std::memcpy(ctrl + capacity + 1, ctrl, Group::kWidth);
ctrl[capacity] = kSentinel;
}
} // namespace container_internal } // namespace container_internal
ABSL_NAMESPACE_END ABSL_NAMESPACE_END
} // namespace absl } // namespace absl

@ -102,7 +102,6 @@
#include <type_traits> #include <type_traits>
#include <utility> #include <utility>
#include "absl/base/internal/bits.h"
#include "absl/base/internal/endian.h" #include "absl/base/internal/endian.h"
#include "absl/base/optimization.h" #include "absl/base/optimization.h"
#include "absl/base/port.h" #include "absl/base/port.h"
@ -116,6 +115,7 @@
#include "absl/container/internal/layout.h" #include "absl/container/internal/layout.h"
#include "absl/memory/memory.h" #include "absl/memory/memory.h"
#include "absl/meta/type_traits.h" #include "absl/meta/type_traits.h"
#include "absl/numeric/bits.h"
#include "absl/utility/utility.h" #include "absl/utility/utility.h"
namespace absl { namespace absl {
@ -189,18 +189,9 @@ constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) {
} }
template <typename T> template <typename T>
int TrailingZeros(T x) { uint32_t TrailingZeros(T x) {
return sizeof(T) == 8 ? base_internal::CountTrailingZerosNonZero64( ABSL_INTERNAL_ASSUME(x != 0);
static_cast<uint64_t>(x)) return countr_zero(x);
: base_internal::CountTrailingZerosNonZero32(
static_cast<uint32_t>(x));
}
template <typename T>
int LeadingZeros(T x) {
return sizeof(T) == 8
? base_internal::CountLeadingZeros64(static_cast<uint64_t>(x))
: base_internal::CountLeadingZeros32(static_cast<uint32_t>(x));
} }
// An abstraction over a bitmask. It provides an easy way to iterate through the // An abstraction over a bitmask. It provides an easy way to iterate through the
@ -230,26 +221,24 @@ class BitMask {
} }
explicit operator bool() const { return mask_ != 0; } explicit operator bool() const { return mask_ != 0; }
int operator*() const { return LowestBitSet(); } int operator*() const { return LowestBitSet(); }
int LowestBitSet() const { uint32_t LowestBitSet() const {
return container_internal::TrailingZeros(mask_) >> Shift; return container_internal::TrailingZeros(mask_) >> Shift;
} }
int HighestBitSet() const { uint32_t HighestBitSet() const {
return (sizeof(T) * CHAR_BIT - container_internal::LeadingZeros(mask_) - return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift);
1) >>
Shift;
} }
BitMask begin() const { return *this; } BitMask begin() const { return *this; }
BitMask end() const { return BitMask(0); } BitMask end() const { return BitMask(0); }
int TrailingZeros() const { uint32_t TrailingZeros() const {
return container_internal::TrailingZeros(mask_) >> Shift; return container_internal::TrailingZeros(mask_) >> Shift;
} }
int LeadingZeros() const { uint32_t LeadingZeros() const {
constexpr int total_significant_bits = SignificantBits << Shift; constexpr int total_significant_bits = SignificantBits << Shift;
constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits; constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
return container_internal::LeadingZeros(mask_ << extra_bits) >> Shift; return countl_zero(mask_ << extra_bits) >> Shift;
} }
private: private:
@ -380,8 +369,8 @@ struct GroupSse2Impl {
// Returns the number of trailing empty or deleted elements in the group. // Returns the number of trailing empty or deleted elements in the group.
uint32_t CountLeadingEmptyOrDeleted() const { uint32_t CountLeadingEmptyOrDeleted() const {
auto special = _mm_set1_epi8(kSentinel); auto special = _mm_set1_epi8(kSentinel);
return TrailingZeros( return TrailingZeros(static_cast<uint32_t>(
_mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1); _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1));
} }
void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
@ -472,25 +461,23 @@ inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
// DELETED -> EMPTY // DELETED -> EMPTY
// EMPTY -> EMPTY // EMPTY -> EMPTY
// FULL -> DELETED // FULL -> DELETED
inline void ConvertDeletedToEmptyAndFullToDeleted( void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
ctrl_t* ctrl, size_t capacity) {
assert(ctrl[capacity] == kSentinel);
assert(IsValidCapacity(capacity));
for (ctrl_t* pos = ctrl; pos != ctrl + capacity + 1; pos += Group::kWidth) {
Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
}
// Copy the cloned ctrl bytes.
std::memcpy(ctrl + capacity + 1, ctrl, Group::kWidth);
ctrl[capacity] = kSentinel;
}
// Rounds up the capacity to the next power of 2 minus 1, with a minimum of 1. // Rounds up the capacity to the next power of 2 minus 1, with a minimum of 1.
inline size_t NormalizeCapacity(size_t n) { inline size_t NormalizeCapacity(size_t n) {
return n ? ~size_t{} >> LeadingZeros(n) : 1; return n ? ~size_t{} >> countl_zero(n) : 1;
} }
// We use 7/8th as maximum load factor. // General notes on capacity/growth methods below:
// For 16-wide groups, that gives an average of two empty slots per group. // - We use 7/8th as maximum load factor. For 16-wide groups, that gives an
// average of two empty slots per group.
// - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity.
// - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we
// never need to probe (the whole table fits in one group) so we don't need a
// load factor less than 1.
// Given `capacity` of the table, returns the size (i.e. number of full slots)
// at which we should grow the capacity.
inline size_t CapacityToGrowth(size_t capacity) { inline size_t CapacityToGrowth(size_t capacity) {
assert(IsValidCapacity(capacity)); assert(IsValidCapacity(capacity));
// `capacity*7/8` // `capacity*7/8`
@ -501,7 +488,7 @@ inline size_t CapacityToGrowth(size_t capacity) {
return capacity - capacity / 8; return capacity - capacity / 8;
} }
// From desired "growth" to a lowerbound of the necessary capacity. // From desired "growth" to a lowerbound of the necessary capacity.
// Might not be a valid one and required NormalizeCapacity(). // Might not be a valid one and requires NormalizeCapacity().
inline size_t GrowthToLowerboundCapacity(size_t growth) { inline size_t GrowthToLowerboundCapacity(size_t growth) {
// `growth*8/7` // `growth*8/7`
if (Group::kWidth == 8 && growth == 7) { if (Group::kWidth == 8 && growth == 7) {
@ -523,6 +510,64 @@ inline void AssertIsValid(ctrl_t* ctrl) {
"been erased, or the table might have rehashed."); "been erased, or the table might have rehashed.");
} }
struct FindInfo {
size_t offset;
size_t probe_length;
};
// The representation of the object has two modes:
// - small: For capacities < kWidth-1
// - large: For the rest.
//
// Differences:
// - In small mode we are able to use the whole capacity. The extra control
// bytes give us at least one "empty" control byte to stop the iteration.
// This is important to make 1 a valid capacity.
//
// - In small mode only the first `capacity()` control bytes after the
// sentinel are valid. The rest contain dummy kEmpty values that do not
// represent a real slot. This is important to take into account on
// find_first_non_full(), where we never try ShouldInsertBackwards() for
// small tables.
inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
inline probe_seq<Group::kWidth> probe(ctrl_t* ctrl, size_t hash,
size_t capacity) {
return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
}
// Probes the raw_hash_set with the probe sequence for hash and returns the
// pointer to the first empty or deleted slot.
// NOTE: this function must work with tables having both kEmpty and kDelete
// in one group. Such tables appears during drop_deletes_without_resize.
//
// This function is very useful when insertions happen and:
// - the input is already a set
// - there are enough slots
// - the element with the hash is not in the table
inline FindInfo find_first_non_full(ctrl_t* ctrl, size_t hash,
size_t capacity) {
auto seq = probe(ctrl, hash, capacity);
while (true) {
Group g{ctrl + seq.offset()};
auto mask = g.MatchEmptyOrDeleted();
if (mask) {
#if !defined(NDEBUG)
// We want to add entropy even when ASLR is not enabled.
// In debug build we will randomly insert in either the front or back of
// the group.
// TODO(kfm,sbenza): revisit after we do unconditional mixing
if (!is_small(capacity) && ShouldInsertBackwards(hash, ctrl)) {
return {seq.offset(mask.HighestBitSet()), seq.index()};
}
#endif
return {seq.offset(mask.LowestBitSet()), seq.index()};
}
seq.next();
assert(seq.index() < capacity && "full table!");
}
}
// Policy: a policy defines how to perform different operations on // Policy: a policy defines how to perform different operations on
// the slots of the hashtable (see hash_policy_traits.h for the full interface // the slots of the hashtable (see hash_policy_traits.h for the full interface
// of policy). // of policy).
@ -747,10 +792,10 @@ class raw_hash_set {
explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(), explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(),
const key_equal& eq = key_equal(), const key_equal& eq = key_equal(),
const allocator_type& alloc = allocator_type()) const allocator_type& alloc = allocator_type())
: ctrl_(EmptyGroup()), settings_(0, hash, eq, alloc) { : ctrl_(EmptyGroup()),
settings_(0, HashtablezInfoHandle(), hash, eq, alloc) {
if (bucket_count) { if (bucket_count) {
capacity_ = NormalizeCapacity(bucket_count); capacity_ = NormalizeCapacity(bucket_count);
reset_growth_left();
initialize_slots(); initialize_slots();
} }
} }
@ -856,10 +901,10 @@ class raw_hash_set {
// than a full `insert`. // than a full `insert`.
for (const auto& v : that) { for (const auto& v : that) {
const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v); const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v);
auto target = find_first_non_full(hash); auto target = find_first_non_full(ctrl_, hash, capacity_);
set_ctrl(target.offset, H2(hash)); set_ctrl(target.offset, H2(hash));
emplace_at(target.offset, v); emplace_at(target.offset, v);
infoz_.RecordInsert(hash, target.probe_length); infoz().RecordInsert(hash, target.probe_length);
} }
size_ = that.size(); size_ = that.size();
growth_left() -= that.size(); growth_left() -= that.size();
@ -873,28 +918,27 @@ class raw_hash_set {
slots_(absl::exchange(that.slots_, nullptr)), slots_(absl::exchange(that.slots_, nullptr)),
size_(absl::exchange(that.size_, 0)), size_(absl::exchange(that.size_, 0)),
capacity_(absl::exchange(that.capacity_, 0)), capacity_(absl::exchange(that.capacity_, 0)),
infoz_(absl::exchange(that.infoz_, HashtablezInfoHandle())),
// Hash, equality and allocator are copied instead of moved because // Hash, equality and allocator are copied instead of moved because
// `that` must be left valid. If Hash is std::function<Key>, moving it // `that` must be left valid. If Hash is std::function<Key>, moving it
// would create a nullptr functor that cannot be called. // would create a nullptr functor that cannot be called.
settings_(that.settings_) { settings_(absl::exchange(that.growth_left(), 0),
// growth_left was copied above, reset the one from `that`. absl::exchange(that.infoz(), HashtablezInfoHandle()),
that.growth_left() = 0; that.hash_ref(), that.eq_ref(), that.alloc_ref()) {}
}
raw_hash_set(raw_hash_set&& that, const allocator_type& a) raw_hash_set(raw_hash_set&& that, const allocator_type& a)
: ctrl_(EmptyGroup()), : ctrl_(EmptyGroup()),
slots_(nullptr), slots_(nullptr),
size_(0), size_(0),
capacity_(0), capacity_(0),
settings_(0, that.hash_ref(), that.eq_ref(), a) { settings_(0, HashtablezInfoHandle(), that.hash_ref(), that.eq_ref(),
a) {
if (a == that.alloc_ref()) { if (a == that.alloc_ref()) {
std::swap(ctrl_, that.ctrl_); std::swap(ctrl_, that.ctrl_);
std::swap(slots_, that.slots_); std::swap(slots_, that.slots_);
std::swap(size_, that.size_); std::swap(size_, that.size_);
std::swap(capacity_, that.capacity_); std::swap(capacity_, that.capacity_);
std::swap(growth_left(), that.growth_left()); std::swap(growth_left(), that.growth_left());
std::swap(infoz_, that.infoz_); std::swap(infoz(), that.infoz());
} else { } else {
reserve(that.size()); reserve(that.size());
// Note: this will copy elements of dense_set and unordered_set instead of // Note: this will copy elements of dense_set and unordered_set instead of
@ -965,7 +1009,7 @@ class raw_hash_set {
reset_growth_left(); reset_growth_left();
} }
assert(empty()); assert(empty());
infoz_.RecordStorageChanged(0, capacity_); infoz().RecordStorageChanged(0, capacity_);
} }
// This overload kicks in when the argument is an rvalue of insertable and // This overload kicks in when the argument is an rvalue of insertable and
@ -1038,7 +1082,7 @@ class raw_hash_set {
template <class InputIt> template <class InputIt>
void insert(InputIt first, InputIt last) { void insert(InputIt first, InputIt last) {
for (; first != last; ++first) insert(*first); for (; first != last; ++first) emplace(*first);
} }
template <class T, RequiresNotInit<T> = 0, RequiresInsertable<const T&> = 0> template <class T, RequiresNotInit<T> = 0, RequiresInsertable<const T&> = 0>
@ -1065,7 +1109,9 @@ class raw_hash_set {
} }
iterator insert(const_iterator, node_type&& node) { iterator insert(const_iterator, node_type&& node) {
return insert(std::move(node)).first; auto res = insert(std::move(node));
node = std::move(res.node);
return res.position;
} }
// This overload kicks in if we can deduce the key from args. This enables us // This overload kicks in if we can deduce the key from args. This enables us
@ -1255,7 +1301,7 @@ class raw_hash_set {
swap(growth_left(), that.growth_left()); swap(growth_left(), that.growth_left());
swap(hash_ref(), that.hash_ref()); swap(hash_ref(), that.hash_ref());
swap(eq_ref(), that.eq_ref()); swap(eq_ref(), that.eq_ref());
swap(infoz_, that.infoz_); swap(infoz(), that.infoz());
SwapAlloc(alloc_ref(), that.alloc_ref(), SwapAlloc(alloc_ref(), that.alloc_ref(),
typename AllocTraits::propagate_on_container_swap{}); typename AllocTraits::propagate_on_container_swap{});
} }
@ -1264,7 +1310,7 @@ class raw_hash_set {
if (n == 0 && capacity_ == 0) return; if (n == 0 && capacity_ == 0) return;
if (n == 0 && size_ == 0) { if (n == 0 && size_ == 0) {
destroy_slots(); destroy_slots();
infoz_.RecordStorageChanged(0, 0); infoz().RecordStorageChanged(0, 0);
return; return;
} }
// bitor is a faster way of doing `max` here. We will round up to the next // bitor is a faster way of doing `max` here. We will round up to the next
@ -1276,7 +1322,12 @@ class raw_hash_set {
} }
} }
void reserve(size_t n) { rehash(GrowthToLowerboundCapacity(n)); } void reserve(size_t n) {
size_t m = GrowthToLowerboundCapacity(n);
if (m > capacity_) {
resize(NormalizeCapacity(m));
}
}
// Extension API: support for heterogeneous keys. // Extension API: support for heterogeneous keys.
// //
@ -1301,7 +1352,7 @@ class raw_hash_set {
void prefetch(const key_arg<K>& key) const { void prefetch(const key_arg<K>& key) const {
(void)key; (void)key;
#if defined(__GNUC__) #if defined(__GNUC__)
auto seq = probe(hash_ref()(key)); auto seq = probe(ctrl_, hash_ref()(key), capacity_);
__builtin_prefetch(static_cast<const void*>(ctrl_ + seq.offset())); __builtin_prefetch(static_cast<const void*>(ctrl_ + seq.offset()));
__builtin_prefetch(static_cast<const void*>(slots_ + seq.offset())); __builtin_prefetch(static_cast<const void*>(slots_ + seq.offset()));
#endif // __GNUC__ #endif // __GNUC__
@ -1316,7 +1367,7 @@ class raw_hash_set {
// called heterogeneous key support. // called heterogeneous key support.
template <class K = key_type> template <class K = key_type>
iterator find(const key_arg<K>& key, size_t hash) { iterator find(const key_arg<K>& key, size_t hash) {
auto seq = probe(hash); auto seq = probe(ctrl_, hash, capacity_);
while (true) { while (true) {
Group g{ctrl_ + seq.offset()}; Group g{ctrl_ + seq.offset()};
for (int i : g.Match(H2(hash))) { for (int i : g.Match(H2(hash))) {
@ -1477,7 +1528,7 @@ class raw_hash_set {
set_ctrl(index, was_never_full ? kEmpty : kDeleted); set_ctrl(index, was_never_full ? kEmpty : kDeleted);
growth_left() += was_never_full; growth_left() += was_never_full;
infoz_.RecordErase(); infoz().RecordErase();
} }
void initialize_slots() { void initialize_slots() {
@ -1494,7 +1545,7 @@ class raw_hash_set {
// bound more carefully. // bound more carefully.
if (std::is_same<SlotAlloc, std::allocator<slot_type>>::value && if (std::is_same<SlotAlloc, std::allocator<slot_type>>::value &&
slots_ == nullptr) { slots_ == nullptr) {
infoz_ = Sample(); infoz() = Sample();
} }
auto layout = MakeLayout(capacity_); auto layout = MakeLayout(capacity_);
@ -1504,7 +1555,7 @@ class raw_hash_set {
slots_ = layout.template Pointer<1>(mem); slots_ = layout.template Pointer<1>(mem);
reset_ctrl(); reset_ctrl();
reset_growth_left(); reset_growth_left();
infoz_.RecordStorageChanged(size_, capacity_); infoz().RecordStorageChanged(size_, capacity_);
} }
void destroy_slots() { void destroy_slots() {
@ -1538,7 +1589,7 @@ class raw_hash_set {
if (IsFull(old_ctrl[i])) { if (IsFull(old_ctrl[i])) {
size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
PolicyTraits::element(old_slots + i)); PolicyTraits::element(old_slots + i));
auto target = find_first_non_full(hash); auto target = find_first_non_full(ctrl_, hash, capacity_);
size_t new_i = target.offset; size_t new_i = target.offset;
total_probe_length += target.probe_length; total_probe_length += target.probe_length;
set_ctrl(new_i, H2(hash)); set_ctrl(new_i, H2(hash));
@ -1552,12 +1603,12 @@ class raw_hash_set {
Deallocate<Layout::Alignment()>(&alloc_ref(), old_ctrl, Deallocate<Layout::Alignment()>(&alloc_ref(), old_ctrl,
layout.AllocSize()); layout.AllocSize());
} }
infoz_.RecordRehash(total_probe_length); infoz().RecordRehash(total_probe_length);
} }
void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE { void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE {
assert(IsValidCapacity(capacity_)); assert(IsValidCapacity(capacity_));
assert(!is_small()); assert(!is_small(capacity_));
// Algorithm: // Algorithm:
// - mark all DELETED slots as EMPTY // - mark all DELETED slots as EMPTY
// - mark all FULL slots as DELETED // - mark all FULL slots as DELETED
@ -1582,7 +1633,7 @@ class raw_hash_set {
if (!IsDeleted(ctrl_[i])) continue; if (!IsDeleted(ctrl_[i])) continue;
size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
PolicyTraits::element(slots_ + i)); PolicyTraits::element(slots_ + i));
auto target = find_first_non_full(hash); auto target = find_first_non_full(ctrl_, hash, capacity_);
size_t new_i = target.offset; size_t new_i = target.offset;
total_probe_length += target.probe_length; total_probe_length += target.probe_length;
@ -1590,7 +1641,8 @@ class raw_hash_set {
// If they do, we don't need to move the object as it falls already in the // If they do, we don't need to move the object as it falls already in the
// best probe we can. // best probe we can.
const auto probe_index = [&](size_t pos) { const auto probe_index = [&](size_t pos) {
return ((pos - probe(hash).offset()) & capacity_) / Group::kWidth; return ((pos - probe(ctrl_, hash, capacity_).offset()) & capacity_) /
Group::kWidth;
}; };
// Element doesn't move. // Element doesn't move.
@ -1617,7 +1669,7 @@ class raw_hash_set {
} }
} }
reset_growth_left(); reset_growth_left();
infoz_.RecordRehash(total_probe_length); infoz().RecordRehash(total_probe_length);
} }
void rehash_and_grow_if_necessary() { void rehash_and_grow_if_necessary() {
@ -1634,7 +1686,7 @@ class raw_hash_set {
bool has_element(const value_type& elem) const { bool has_element(const value_type& elem) const {
size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem); size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem);
auto seq = probe(hash); auto seq = probe(ctrl_, hash, capacity_);
while (true) { while (true) {
Group g{ctrl_ + seq.offset()}; Group g{ctrl_ + seq.offset()};
for (int i : g.Match(H2(hash))) { for (int i : g.Match(H2(hash))) {
@ -1649,41 +1701,6 @@ class raw_hash_set {
return false; return false;
} }
// Probes the raw_hash_set with the probe sequence for hash and returns the
// pointer to the first empty or deleted slot.
// NOTE: this function must work with tables having both kEmpty and kDelete
// in one group. Such tables appears during drop_deletes_without_resize.
//
// This function is very useful when insertions happen and:
// - the input is already a set
// - there are enough slots
// - the element with the hash is not in the table
struct FindInfo {
size_t offset;
size_t probe_length;
};
FindInfo find_first_non_full(size_t hash) {
auto seq = probe(hash);
while (true) {
Group g{ctrl_ + seq.offset()};
auto mask = g.MatchEmptyOrDeleted();
if (mask) {
#if !defined(NDEBUG)
// We want to add entropy even when ASLR is not enabled.
// In debug build we will randomly insert in either the front or back of
// the group.
// TODO(kfm,sbenza): revisit after we do unconditional mixing
if (!is_small() && ShouldInsertBackwards(hash, ctrl_)) {
return {seq.offset(mask.HighestBitSet()), seq.index()};
}
#endif
return {seq.offset(mask.LowestBitSet()), seq.index()};
}
seq.next();
assert(seq.index() < capacity_ && "full table!");
}
}
// TODO(alkis): Optimize this assuming *this and that don't overlap. // TODO(alkis): Optimize this assuming *this and that don't overlap.
raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) { raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) {
raw_hash_set tmp(std::move(that)); raw_hash_set tmp(std::move(that));
@ -1700,7 +1717,7 @@ class raw_hash_set {
template <class K> template <class K>
std::pair<size_t, bool> find_or_prepare_insert(const K& key) { std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
auto hash = hash_ref()(key); auto hash = hash_ref()(key);
auto seq = probe(hash); auto seq = probe(ctrl_, hash, capacity_);
while (true) { while (true) {
Group g{ctrl_ + seq.offset()}; Group g{ctrl_ + seq.offset()};
for (int i : g.Match(H2(hash))) { for (int i : g.Match(H2(hash))) {
@ -1717,16 +1734,16 @@ class raw_hash_set {
} }
size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE { size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
auto target = find_first_non_full(hash); auto target = find_first_non_full(ctrl_, hash, capacity_);
if (ABSL_PREDICT_FALSE(growth_left() == 0 && if (ABSL_PREDICT_FALSE(growth_left() == 0 &&
!IsDeleted(ctrl_[target.offset]))) { !IsDeleted(ctrl_[target.offset]))) {
rehash_and_grow_if_necessary(); rehash_and_grow_if_necessary();
target = find_first_non_full(hash); target = find_first_non_full(ctrl_, hash, capacity_);
} }
++size_; ++size_;
growth_left() -= IsEmpty(ctrl_[target.offset]); growth_left() -= IsEmpty(ctrl_[target.offset]);
set_ctrl(target.offset, H2(hash)); set_ctrl(target.offset, H2(hash));
infoz_.RecordInsert(hash, target.probe_length); infoz().RecordInsert(hash, target.probe_length);
return target.offset; return target.offset;
} }
@ -1754,10 +1771,6 @@ class raw_hash_set {
private: private:
friend struct RawHashSetTestOnlyAccess; friend struct RawHashSetTestOnlyAccess;
probe_seq<Group::kWidth> probe(size_t hash) const {
return probe_seq<Group::kWidth>(H1(hash, ctrl_), capacity_);
}
// Reset all ctrl bytes back to kEmpty, except the sentinel. // Reset all ctrl bytes back to kEmpty, except the sentinel.
void reset_ctrl() { void reset_ctrl() {
std::memset(ctrl_, kEmpty, capacity_ + Group::kWidth); std::memset(ctrl_, kEmpty, capacity_ + Group::kWidth);
@ -1787,29 +1800,15 @@ class raw_hash_set {
size_t& growth_left() { return settings_.template get<0>(); } size_t& growth_left() { return settings_.template get<0>(); }
// The representation of the object has two modes: HashtablezInfoHandle& infoz() { return settings_.template get<1>(); }
// - small: For capacities < kWidth-1
// - large: For the rest. hasher& hash_ref() { return settings_.template get<2>(); }
// const hasher& hash_ref() const { return settings_.template get<2>(); }
// Differences: key_equal& eq_ref() { return settings_.template get<3>(); }
// - In small mode we are able to use the whole capacity. The extra control const key_equal& eq_ref() const { return settings_.template get<3>(); }
// bytes give us at least one "empty" control byte to stop the iteration. allocator_type& alloc_ref() { return settings_.template get<4>(); }
// This is important to make 1 a valid capacity.
//
// - In small mode only the first `capacity()` control bytes after the
// sentinel are valid. The rest contain dummy kEmpty values that do not
// represent a real slot. This is important to take into account on
// find_first_non_full(), where we never try ShouldInsertBackwards() for
// small tables.
bool is_small() const { return capacity_ < Group::kWidth - 1; }
hasher& hash_ref() { return settings_.template get<1>(); }
const hasher& hash_ref() const { return settings_.template get<1>(); }
key_equal& eq_ref() { return settings_.template get<2>(); }
const key_equal& eq_ref() const { return settings_.template get<2>(); }
allocator_type& alloc_ref() { return settings_.template get<3>(); }
const allocator_type& alloc_ref() const { const allocator_type& alloc_ref() const {
return settings_.template get<3>(); return settings_.template get<4>();
} }
// TODO(alkis): Investigate removing some of these fields: // TODO(alkis): Investigate removing some of these fields:
@ -1819,10 +1818,11 @@ class raw_hash_set {
slot_type* slots_ = nullptr; // [capacity * slot_type] slot_type* slots_ = nullptr; // [capacity * slot_type]
size_t size_ = 0; // number of full slots size_t size_ = 0; // number of full slots
size_t capacity_ = 0; // total number of slots size_t capacity_ = 0; // total number of slots
HashtablezInfoHandle infoz_; absl::container_internal::CompressedTuple<size_t /* growth_left */,
absl::container_internal::CompressedTuple<size_t /* growth_left */, hasher, HashtablezInfoHandle, hasher,
key_equal, allocator_type> key_equal, allocator_type>
settings_{0, hasher{}, key_equal{}, allocator_type{}}; settings_{0, HashtablezInfoHandle{}, hasher{}, key_equal{},
allocator_type{}};
}; };
// Erases all elements that satisfy the predicate `pred` from the container `c`. // Erases all elements that satisfy the predicate `pred` from the container `c`.
@ -1846,7 +1846,7 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
const typename Set::key_type& key) { const typename Set::key_type& key) {
size_t num_probes = 0; size_t num_probes = 0;
size_t hash = set.hash_ref()(key); size_t hash = set.hash_ref()(key);
auto seq = set.probe(hash); auto seq = probe(set.ctrl_, hash, set.capacity_);
while (true) { while (true) {
container_internal::Group g{set.ctrl_ + seq.offset()}; container_internal::Group g{set.ctrl_ + seq.offset()};
for (int i : g.Match(container_internal::H2(hash))) { for (int i : g.Match(container_internal::H2(hash))) {

@ -466,6 +466,9 @@ class PAlloc {
size_t id_ = std::numeric_limits<size_t>::max(); size_t id_ = std::numeric_limits<size_t>::max();
}; };
// This doesn't compile with GCC 5.4 and 5.5 due to a bug in noexcept handing.
#if !defined(__GNUC__) || __GNUC__ != 5 || (__GNUC_MINOR__ != 4 && \
__GNUC_MINOR__ != 5)
TEST(NoPropagateOn, Swap) { TEST(NoPropagateOn, Swap) {
using PA = PAlloc<char>; using PA = PAlloc<char>;
using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, PA>; using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, PA>;
@ -475,6 +478,7 @@ TEST(NoPropagateOn, Swap) {
EXPECT_EQ(t1.get_allocator(), PA(1)); EXPECT_EQ(t1.get_allocator(), PA(1));
EXPECT_EQ(t2.get_allocator(), PA(2)); EXPECT_EQ(t2.get_allocator(), PA(2));
} }
#endif
TEST(NoPropagateOn, CopyConstruct) { TEST(NoPropagateOn, CopyConstruct) {
using PA = PAlloc<char>; using PA = PAlloc<char>;

@ -14,6 +14,7 @@
#include "absl/container/internal/raw_hash_set.h" #include "absl/container/internal/raw_hash_set.h"
#include <atomic>
#include <cmath> #include <cmath>
#include <cstdint> #include <cstdint>
#include <deque> #include <deque>
@ -22,6 +23,8 @@
#include <numeric> #include <numeric>
#include <random> #include <random>
#include <string> #include <string>
#include <unordered_map>
#include <unordered_set>
#include "gmock/gmock.h" #include "gmock/gmock.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
@ -48,11 +51,10 @@ struct RawHashSetTestOnlyAccess {
namespace { namespace {
using ::testing::DoubleNear;
using ::testing::ElementsAre; using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::Ge; using ::testing::Ge;
using ::testing::Lt; using ::testing::Lt;
using ::testing::Optional;
using ::testing::Pair; using ::testing::Pair;
using ::testing::UnorderedElementsAre; using ::testing::UnorderedElementsAre;
@ -75,8 +77,14 @@ TEST(Util, GrowthAndCapacity) {
for (size_t growth = 0; growth < 10000; ++growth) { for (size_t growth = 0; growth < 10000; ++growth) {
SCOPED_TRACE(growth); SCOPED_TRACE(growth);
size_t capacity = NormalizeCapacity(GrowthToLowerboundCapacity(growth)); size_t capacity = NormalizeCapacity(GrowthToLowerboundCapacity(growth));
// The capacity is large enough for `growth` // The capacity is large enough for `growth`.
EXPECT_THAT(CapacityToGrowth(capacity), Ge(growth)); EXPECT_THAT(CapacityToGrowth(capacity), Ge(growth));
// For (capacity+1) < kWidth, growth should equal capacity.
if (capacity + 1 < Group::kWidth) {
EXPECT_THAT(CapacityToGrowth(capacity), Eq(capacity));
} else {
EXPECT_THAT(CapacityToGrowth(capacity), Lt(capacity));
}
if (growth != 0 && capacity > 1) { if (growth != 0 && capacity > 1) {
// There is no smaller capacity that works. // There is no smaller capacity that works.
EXPECT_THAT(CapacityToGrowth(capacity / 2), Lt(growth)); EXPECT_THAT(CapacityToGrowth(capacity / 2), Lt(growth));
@ -250,25 +258,43 @@ TEST(Group, CountLeadingEmptyOrDeleted) {
} }
} }
struct IntPolicy { template <class T>
using slot_type = int64_t; struct ValuePolicy {
using key_type = int64_t; using slot_type = T;
using init_type = int64_t; using key_type = T;
using init_type = T;
static void construct(void*, int64_t* slot, int64_t v) { *slot = v; } template <class Allocator, class... Args>
static void destroy(void*, int64_t*) {} static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
static void transfer(void*, int64_t* new_slot, int64_t* old_slot) { absl::allocator_traits<Allocator>::construct(*alloc, slot,
*new_slot = *old_slot; std::forward<Args>(args)...);
} }
static int64_t& element(slot_type* slot) { return *slot; } template <class Allocator>
static void destroy(Allocator* alloc, slot_type* slot) {
absl::allocator_traits<Allocator>::destroy(*alloc, slot);
}
template <class F> template <class Allocator>
static auto apply(F&& f, int64_t x) -> decltype(std::forward<F>(f)(x, x)) { static void transfer(Allocator* alloc, slot_type* new_slot,
return std::forward<F>(f)(x, x); slot_type* old_slot) {
construct(alloc, new_slot, std::move(*old_slot));
destroy(alloc, old_slot);
}
static T& element(slot_type* slot) { return *slot; }
template <class F, class... Args>
static decltype(absl::container_internal::DecomposeValue(
std::declval<F>(), std::declval<Args>()...))
apply(F&& f, Args&&... args) {
return absl::container_internal::DecomposeValue(
std::forward<F>(f), std::forward<Args>(args)...);
} }
}; };
using IntPolicy = ValuePolicy<int64_t>;
class StringPolicy { class StringPolicy {
template <class F, class K, class V, template <class F, class K, class V,
class = typename std::enable_if< class = typename std::enable_if<
@ -393,6 +419,13 @@ TEST(Table, EmptyFunctorOptimization) {
size_t growth_left; size_t growth_left;
void* infoz; void* infoz;
}; };
struct MockTableInfozDisabled {
void* ctrl;
void* slots;
size_t size;
size_t capacity;
size_t growth_left;
};
struct StatelessHash { struct StatelessHash {
size_t operator()(absl::string_view) const { return 0; } size_t operator()(absl::string_view) const { return 0; }
}; };
@ -400,17 +433,27 @@ TEST(Table, EmptyFunctorOptimization) {
size_t dummy; size_t dummy;
}; };
EXPECT_EQ( if (std::is_empty<HashtablezInfoHandle>::value) {
sizeof(MockTable), EXPECT_EQ(sizeof(MockTableInfozDisabled),
sizeof( sizeof(raw_hash_set<StringPolicy, StatelessHash,
raw_hash_set<StringPolicy, StatelessHash, std::equal_to<absl::string_view>,
std::equal_to<absl::string_view>, std::allocator<int>>)); std::allocator<int>>));
EXPECT_EQ( EXPECT_EQ(sizeof(MockTableInfozDisabled) + sizeof(StatefulHash),
sizeof(MockTable) + sizeof(StatefulHash), sizeof(raw_hash_set<StringPolicy, StatefulHash,
sizeof( std::equal_to<absl::string_view>,
raw_hash_set<StringPolicy, StatefulHash, std::allocator<int>>));
std::equal_to<absl::string_view>, std::allocator<int>>)); } else {
EXPECT_EQ(sizeof(MockTable),
sizeof(raw_hash_set<StringPolicy, StatelessHash,
std::equal_to<absl::string_view>,
std::allocator<int>>));
EXPECT_EQ(sizeof(MockTable) + sizeof(StatefulHash),
sizeof(raw_hash_set<StringPolicy, StatefulHash,
std::equal_to<absl::string_view>,
std::allocator<int>>));
}
} }
TEST(Table, Empty) { TEST(Table, Empty) {
@ -847,7 +890,8 @@ TEST(Table, EraseMaintainsValidIterator) {
std::vector<int64_t> CollectBadMergeKeys(size_t N) { std::vector<int64_t> CollectBadMergeKeys(size_t N) {
static constexpr int kGroupSize = Group::kWidth - 1; static constexpr int kGroupSize = Group::kWidth - 1;
auto topk_range = [](size_t b, size_t e, IntTable* t) -> std::vector<int64_t> { auto topk_range = [](size_t b, size_t e,
IntTable* t) -> std::vector<int64_t> {
for (size_t i = b; i != e; ++i) { for (size_t i = b; i != e; ++i) {
t->emplace(i); t->emplace(i);
} }
@ -1001,8 +1045,8 @@ using ProbeStatsPerSize = std::map<size_t, ProbeStats>;
// 1. Create new table and reserve it to keys.size() * 2 // 1. Create new table and reserve it to keys.size() * 2
// 2. Insert all keys xored with seed // 2. Insert all keys xored with seed
// 3. Collect ProbeStats from final table. // 3. Collect ProbeStats from final table.
ProbeStats CollectProbeStatsOnKeysXoredWithSeed(const std::vector<int64_t>& keys, ProbeStats CollectProbeStatsOnKeysXoredWithSeed(
size_t num_iters) { const std::vector<int64_t>& keys, size_t num_iters) {
const size_t reserve_size = keys.size() * 2; const size_t reserve_size = keys.size() * 2;
ProbeStats stats; ProbeStats stats;
@ -1656,6 +1700,38 @@ TEST(Table, Merge) {
EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0"))); EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0")));
} }
TEST(Table, IteratorEmplaceConstructibleRequirement) {
struct Value {
explicit Value(absl::string_view view) : value(view) {}
std::string value;
bool operator==(const Value& other) const { return value == other.value; }
};
struct H {
size_t operator()(const Value& v) const {
return absl::Hash<std::string>{}(v.value);
}
};
struct Table : raw_hash_set<ValuePolicy<Value>, H, std::equal_to<Value>,
std::allocator<Value>> {
using Base = typename Table::raw_hash_set;
using Base::Base;
};
std::string input[3]{"A", "B", "C"};
Table t(std::begin(input), std::end(input));
EXPECT_THAT(t, UnorderedElementsAre(Value{"A"}, Value{"B"}, Value{"C"}));
input[0] = "D";
input[1] = "E";
input[2] = "F";
t.insert(std::begin(input), std::end(input));
EXPECT_THAT(t, UnorderedElementsAre(Value{"A"}, Value{"B"}, Value{"C"},
Value{"D"}, Value{"E"}, Value{"F"}));
}
TEST(Nodes, EmptyNodeType) { TEST(Nodes, EmptyNodeType) {
using node_type = StringTable::node_type; using node_type = StringTable::node_type;
node_type n; node_type n;
@ -1710,6 +1786,26 @@ TEST(Nodes, ExtractInsert) {
EXPECT_FALSE(node); EXPECT_FALSE(node);
} }
TEST(Nodes, HintInsert) {
IntTable t = {1, 2, 3};
auto node = t.extract(1);
EXPECT_THAT(t, UnorderedElementsAre(2, 3));
auto it = t.insert(t.begin(), std::move(node));
EXPECT_THAT(t, UnorderedElementsAre(1, 2, 3));
EXPECT_EQ(*it, 1);
EXPECT_FALSE(node);
node = t.extract(2);
EXPECT_THAT(t, UnorderedElementsAre(1, 3));
// reinsert 2 to make the next insert fail.
t.insert(2);
EXPECT_THAT(t, UnorderedElementsAre(1, 2, 3));
it = t.insert(t.begin(), std::move(node));
EXPECT_EQ(*it, 2);
// The node was not emptied by the insert call.
EXPECT_TRUE(node);
}
IntTable MakeSimpleTable(size_t size) { IntTable MakeSimpleTable(size_t size) {
IntTable t; IntTable t;
while (t.size() < size) t.insert(t.size()); while (t.size() < size) t.insert(t.size());
@ -1804,18 +1900,34 @@ TEST(RawHashSamplerTest, Sample) {
auto& sampler = HashtablezSampler::Global(); auto& sampler = HashtablezSampler::Global();
size_t start_size = 0; size_t start_size = 0;
start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; }); std::unordered_set<const HashtablezInfo*> preexisting_info;
start_size += sampler.Iterate([&](const HashtablezInfo& info) {
preexisting_info.insert(&info);
++start_size;
});
std::vector<IntTable> tables; std::vector<IntTable> tables;
for (int i = 0; i < 1000000; ++i) { for (int i = 0; i < 1000000; ++i) {
tables.emplace_back(); tables.emplace_back();
tables.back().insert(1); tables.back().insert(1);
tables.back().insert(i % 5);
} }
size_t end_size = 0; size_t end_size = 0;
end_size += sampler.Iterate([&](const HashtablezInfo&) { ++end_size; }); std::unordered_map<size_t, int> observed_checksums;
end_size += sampler.Iterate([&](const HashtablezInfo& info) {
if (preexisting_info.count(&info) == 0) {
observed_checksums[info.hashes_bitwise_xor.load(
std::memory_order_relaxed)]++;
}
++end_size;
});
EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()), EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
0.01, 0.005); 0.01, 0.005);
EXPECT_EQ(observed_checksums.size(), 5);
for (const auto& [_, count] : observed_checksums) {
EXPECT_NEAR((100 * count) / static_cast<double>(tables.size()), 0.2, 0.05);
}
} }
#endif // ABSL_INTERNAL_HASHTABLEZ_SAMPLE #endif // ABSL_INTERNAL_HASHTABLEZ_SAMPLE

@ -16,6 +16,7 @@
#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_ #define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_
#include <algorithm> #include <algorithm>
#include <unordered_map>
#include <vector> #include <vector>
#include "gmock/gmock.h" #include "gmock/gmock.h"

@ -18,7 +18,7 @@
// //
// An `absl::node_hash_set<T>` is an unordered associative container designed to // An `absl::node_hash_set<T>` is an unordered associative container designed to
// be a more efficient replacement for `std::unordered_set`. Like // be a more efficient replacement for `std::unordered_set`. Like
// `unordered_set`, search, insertion, and deletion of map elements can be done // `unordered_set`, search, insertion, and deletion of set elements can be done
// as an `O(1)` operation. However, `node_hash_set` (and other unordered // as an `O(1)` operation. However, `node_hash_set` (and other unordered
// associative containers known as the collection of Abseil "Swiss tables") // associative containers known as the collection of Abseil "Swiss tables")
// contain other optimizations that result in both memory and computation // contain other optimizations that result in both memory and computation
@ -60,7 +60,7 @@ struct NodeHashSetPolicy;
// following notable differences: // following notable differences:
// //
// * Supports heterogeneous lookup, through `find()`, `operator[]()` and // * Supports heterogeneous lookup, through `find()`, `operator[]()` and
// `insert()`, provided that the map is provided a compatible heterogeneous // `insert()`, provided that the set is provided a compatible heterogeneous
// hashing function and equality operator. // hashing function and equality operator.
// * Contains a `capacity()` member function indicating the number of element // * Contains a `capacity()` member function indicating the number of element
// slots (open, deleted, and empty) within the hash set. // slots (open, deleted, and empty) within the hash set.
@ -76,13 +76,13 @@ struct NodeHashSetPolicy;
// Example: // Example:
// //
// // Create a node hash set of three strings // // Create a node hash set of three strings
// absl::node_hash_map<std::string, std::string> ducks = // absl::node_hash_set<std::string> ducks =
// {"huey", "dewey", "louie"}; // {"huey", "dewey", "louie"};
// //
// // Insert a new element into the node hash map // // Insert a new element into the node hash set
// ducks.insert("donald"}; // ducks.insert("donald");
// //
// // Force a rehash of the node hash map // // Force a rehash of the node hash set
// ducks.rehash(0); // ducks.rehash(0);
// //
// // See if "dewey" is present // // See if "dewey" is present
@ -100,7 +100,7 @@ class node_hash_set
public: public:
// Constructors and Assignment Operators // Constructors and Assignment Operators
// //
// A node_hash_set supports the same overload set as `std::unordered_map` // A node_hash_set supports the same overload set as `std::unordered_set`
// for construction and assignment: // for construction and assignment:
// //
// * Default constructor // * Default constructor
@ -167,7 +167,7 @@ class node_hash_set
// available within the `node_hash_set`. // available within the `node_hash_set`.
// //
// NOTE: this member function is particular to `absl::node_hash_set` and is // NOTE: this member function is particular to `absl::node_hash_set` and is
// not provided in the `std::unordered_map` API. // not provided in the `std::unordered_set` API.
using Base::capacity; using Base::capacity;
// node_hash_set::empty() // node_hash_set::empty()
@ -208,7 +208,7 @@ class node_hash_set
// `void`. // `void`.
// //
// NOTE: this return behavior is different than that of STL containers in // NOTE: this return behavior is different than that of STL containers in
// general and `std::unordered_map` in particular. // general and `std::unordered_set` in particular.
// //
// iterator erase(const_iterator first, const_iterator last): // iterator erase(const_iterator first, const_iterator last):
// //
@ -314,7 +314,7 @@ class node_hash_set
// node_hash_set::merge() // node_hash_set::merge()
// //
// Extracts elements from a given `source` flat hash map into this // Extracts elements from a given `source` node hash set into this
// `node_hash_set`. If the destination `node_hash_set` already contains an // `node_hash_set`. If the destination `node_hash_set` already contains an
// element with an equivalent key, that element is not extracted. // element with an equivalent key, that element is not extracted.
using Base::merge; using Base::merge;
@ -322,15 +322,15 @@ class node_hash_set
// node_hash_set::swap(node_hash_set& other) // node_hash_set::swap(node_hash_set& other)
// //
// Exchanges the contents of this `node_hash_set` with those of the `other` // Exchanges the contents of this `node_hash_set` with those of the `other`
// flat hash map, avoiding invocation of any move, copy, or swap operations on // node hash set, avoiding invocation of any move, copy, or swap operations on
// individual elements. // individual elements.
// //
// All iterators and references on the `node_hash_set` remain valid, excepting // All iterators and references on the `node_hash_set` remain valid, excepting
// for the past-the-end iterator, which is invalidated. // for the past-the-end iterator, which is invalidated.
// //
// `swap()` requires that the flat hash set's hashing and key equivalence // `swap()` requires that the node hash set's hashing and key equivalence
// functions be Swappable, and are exchaged using unqualified calls to // functions be Swappable, and are exchaged using unqualified calls to
// non-member `swap()`. If the map's allocator has // non-member `swap()`. If the set's allocator has
// `std::allocator_traits<allocator_type>::propagate_on_container_swap::value` // `std::allocator_traits<allocator_type>::propagate_on_container_swap::value`
// set to `true`, the allocators are also exchanged using an unqualified call // set to `true`, the allocators are also exchanged using an unqualified call
// to non-member `swap()`; otherwise, the allocators are not swapped. // to non-member `swap()`; otherwise, the allocators are not swapped.
@ -385,14 +385,14 @@ class node_hash_set
// node_hash_set::bucket_count() // node_hash_set::bucket_count()
// //
// Returns the number of "buckets" within the `node_hash_set`. Note that // Returns the number of "buckets" within the `node_hash_set`. Note that
// because a flat hash map contains all elements within its internal storage, // because a node hash set contains all elements within its internal storage,
// this value simply equals the current capacity of the `node_hash_set`. // this value simply equals the current capacity of the `node_hash_set`.
using Base::bucket_count; using Base::bucket_count;
// node_hash_set::load_factor() // node_hash_set::load_factor()
// //
// Returns the current load factor of the `node_hash_set` (the average number // Returns the current load factor of the `node_hash_set` (the average number
// of slots occupied with a value within the hash map). // of slots occupied with a value within the hash set).
using Base::load_factor; using Base::load_factor;
// node_hash_set::max_load_factor() // node_hash_set::max_load_factor()

@ -12,16 +12,16 @@ else()
set(ABSL_BUILD_DLL FALSE) set(ABSL_BUILD_DLL FALSE)
endif() endif()
if("${CMAKE_SYSTEM_PROCESSOR}" MATCHES "x86_64|amd64|AMD64") if(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64|amd64|AMD64")
if (MSVC) if (MSVC)
set(ABSL_RANDOM_RANDEN_COPTS "${ABSL_RANDOM_HWAES_MSVC_X64_FLAGS}") set(ABSL_RANDOM_RANDEN_COPTS "${ABSL_RANDOM_HWAES_MSVC_X64_FLAGS}")
else() else()
set(ABSL_RANDOM_RANDEN_COPTS "${ABSL_RANDOM_HWAES_X64_FLAGS}") set(ABSL_RANDOM_RANDEN_COPTS "${ABSL_RANDOM_HWAES_X64_FLAGS}")
endif() endif()
elseif("${CMAKE_SYSTEM_PROCESSOR}" MATCHES "arm.*|aarch64") elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "arm.*|aarch64")
if ("${CMAKE_SIZEOF_VOID_P}" STREQUAL "8") if (CMAKE_SIZEOF_VOID_P STREQUAL "8")
set(ABSL_RANDOM_RANDEN_COPTS "${ABSL_RANDOM_HWAES_ARM64_FLAGS}") set(ABSL_RANDOM_RANDEN_COPTS "${ABSL_RANDOM_HWAES_ARM64_FLAGS}")
elseif("${CMAKE_SIZEOF_VOID_P}" STREQUAL "4") elseif(CMAKE_SIZEOF_VOID_P STREQUAL "4")
set(ABSL_RANDOM_RANDEN_COPTS "${ABSL_RANDOM_HWAES_ARM32_FLAGS}") set(ABSL_RANDOM_RANDEN_COPTS "${ABSL_RANDOM_HWAES_ARM32_FLAGS}")
else() else()
message(WARNING "Value of CMAKE_SIZEOF_VOID_P (${CMAKE_SIZEOF_VOID_P}) is not supported.") message(WARNING "Value of CMAKE_SIZEOF_VOID_P (${CMAKE_SIZEOF_VOID_P}) is not supported.")
@ -32,20 +32,19 @@ else()
endif() endif()
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set(ABSL_DEFAULT_COPTS "${ABSL_GCC_FLAGS}") set(ABSL_DEFAULT_COPTS "${ABSL_GCC_FLAGS}")
set(ABSL_TEST_COPTS "${ABSL_GCC_FLAGS};${ABSL_GCC_TEST_FLAGS}") set(ABSL_TEST_COPTS "${ABSL_GCC_FLAGS};${ABSL_GCC_TEST_FLAGS}")
elseif("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
# MATCHES so we get both Clang and AppleClang # MATCHES so we get both Clang and AppleClang
if(MSVC) if(MSVC)
# clang-cl is half MSVC, half LLVM # clang-cl is half MSVC, half LLVM
set(ABSL_DEFAULT_COPTS "${ABSL_CLANG_CL_FLAGS}") set(ABSL_DEFAULT_COPTS "${ABSL_CLANG_CL_FLAGS}")
set(ABSL_TEST_COPTS "${ABSL_CLANG_CL_FLAGS};${ABSL_CLANG_CL_TEST_FLAGS}") set(ABSL_TEST_COPTS "${ABSL_CLANG_CL_FLAGS};${ABSL_CLANG_CL_TEST_FLAGS}")
set(ABSL_DEFAULT_LINKOPTS "${ABSL_MSVC_LINKOPTS}")
else() else()
set(ABSL_DEFAULT_COPTS "${ABSL_LLVM_FLAGS}") set(ABSL_DEFAULT_COPTS "${ABSL_LLVM_FLAGS}")
set(ABSL_TEST_COPTS "${ABSL_LLVM_FLAGS};${ABSL_LLVM_TEST_FLAGS}") set(ABSL_TEST_COPTS "${ABSL_LLVM_FLAGS};${ABSL_LLVM_TEST_FLAGS}")
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang") if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
# AppleClang doesn't have lsan # AppleClang doesn't have lsan
# https://developer.apple.com/documentation/code_diagnostics # https://developer.apple.com/documentation/code_diagnostics
if(NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 3.5) if(NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 3.5)
@ -54,7 +53,7 @@ elseif("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
endif() endif()
endif() endif()
endif() endif()
elseif("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") elseif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
set(ABSL_DEFAULT_COPTS "${ABSL_MSVC_FLAGS}") set(ABSL_DEFAULT_COPTS "${ABSL_MSVC_FLAGS}")
set(ABSL_TEST_COPTS "${ABSL_MSVC_FLAGS};${ABSL_MSVC_TEST_FLAGS}") set(ABSL_TEST_COPTS "${ABSL_MSVC_FLAGS};${ABSL_MSVC_TEST_FLAGS}")
set(ABSL_DEFAULT_LINKOPTS "${ABSL_MSVC_LINKOPTS}") set(ABSL_DEFAULT_LINKOPTS "${ABSL_MSVC_LINKOPTS}")

@ -5,47 +5,6 @@
list(APPEND ABSL_CLANG_CL_FLAGS list(APPEND ABSL_CLANG_CL_FLAGS
"/W3" "/W3"
"-Wno-c++98-compat-pedantic"
"-Wno-conversion"
"-Wno-covered-switch-default"
"-Wno-deprecated"
"-Wno-disabled-macro-expansion"
"-Wno-double-promotion"
"-Wno-comma"
"-Wno-extra-semi"
"-Wno-extra-semi-stmt"
"-Wno-packed"
"-Wno-padded"
"-Wno-sign-compare"
"-Wno-float-conversion"
"-Wno-float-equal"
"-Wno-format-nonliteral"
"-Wno-gcc-compat"
"-Wno-global-constructors"
"-Wno-exit-time-destructors"
"-Wno-non-modular-include-in-module"
"-Wno-old-style-cast"
"-Wno-range-loop-analysis"
"-Wno-reserved-id-macro"
"-Wno-shorten-64-to-32"
"-Wno-switch-enum"
"-Wno-thread-safety-negative"
"-Wno-unknown-warning-option"
"-Wno-unreachable-code"
"-Wno-unused-macros"
"-Wno-weak-vtables"
"-Wno-zero-as-null-pointer-constant"
"-Wbitfield-enum-conversion"
"-Wbool-conversion"
"-Wconstant-conversion"
"-Wenum-conversion"
"-Wint-conversion"
"-Wliteral-conversion"
"-Wnon-literal-null-conversion"
"-Wnull-conversion"
"-Wobjc-literal-conversion"
"-Wno-sign-conversion"
"-Wstring-conversion"
"/DNOMINMAX" "/DNOMINMAX"
"/DWIN32_LEAN_AND_MEAN" "/DWIN32_LEAN_AND_MEAN"
"/D_CRT_SECURE_NO_WARNINGS" "/D_CRT_SECURE_NO_WARNINGS"
@ -78,6 +37,7 @@ list(APPEND ABSL_GCC_FLAGS
"-Wextra" "-Wextra"
"-Wcast-qual" "-Wcast-qual"
"-Wconversion-null" "-Wconversion-null"
"-Wformat-security"
"-Wmissing-declarations" "-Wmissing-declarations"
"-Woverlength-strings" "-Woverlength-strings"
"-Wpointer-arith" "-Wpointer-arith"
@ -87,8 +47,6 @@ list(APPEND ABSL_GCC_FLAGS
"-Wvarargs" "-Wvarargs"
"-Wvla" "-Wvla"
"-Wwrite-strings" "-Wwrite-strings"
"-Wno-missing-field-initializers"
"-Wno-sign-compare"
"-DNOMINMAX" "-DNOMINMAX"
) )
@ -105,48 +63,36 @@ list(APPEND ABSL_GCC_TEST_FLAGS
list(APPEND ABSL_LLVM_FLAGS list(APPEND ABSL_LLVM_FLAGS
"-Wall" "-Wall"
"-Wextra" "-Wextra"
"-Weverything" "-Wcast-qual"
"-Wno-c++98-compat-pedantic" "-Wconversion"
"-Wno-conversion" "-Wfloat-overflow-conversion"
"-Wno-covered-switch-default" "-Wfloat-zero-conversion"
"-Wno-deprecated" "-Wfor-loop-analysis"
"-Wno-disabled-macro-expansion" "-Wformat-security"
"-Wno-double-promotion" "-Wgnu-redeclared-enum"
"-Wno-comma" "-Winfinite-recursion"
"-Wno-extra-semi" "-Wliteral-conversion"
"-Wno-extra-semi-stmt" "-Wmissing-declarations"
"-Wno-packed" "-Woverlength-strings"
"-Wno-padded" "-Wpointer-arith"
"-Wno-sign-compare" "-Wself-assign"
"-Wshadow"
"-Wstring-conversion"
"-Wtautological-overlap-compare"
"-Wundef"
"-Wuninitialized"
"-Wunreachable-code"
"-Wunused-comparison"
"-Wunused-local-typedefs"
"-Wunused-result"
"-Wvla"
"-Wwrite-strings"
"-Wno-float-conversion" "-Wno-float-conversion"
"-Wno-float-equal" "-Wno-implicit-float-conversion"
"-Wno-format-nonliteral" "-Wno-implicit-int-float-conversion"
"-Wno-gcc-compat" "-Wno-implicit-int-conversion"
"-Wno-global-constructors"
"-Wno-exit-time-destructors"
"-Wno-non-modular-include-in-module"
"-Wno-old-style-cast"
"-Wno-range-loop-analysis"
"-Wno-reserved-id-macro"
"-Wno-shorten-64-to-32" "-Wno-shorten-64-to-32"
"-Wno-switch-enum"
"-Wno-thread-safety-negative"
"-Wno-unknown-warning-option"
"-Wno-unreachable-code"
"-Wno-unused-macros"
"-Wno-weak-vtables"
"-Wno-zero-as-null-pointer-constant"
"-Wbitfield-enum-conversion"
"-Wbool-conversion"
"-Wconstant-conversion"
"-Wenum-conversion"
"-Wint-conversion"
"-Wliteral-conversion"
"-Wnon-literal-null-conversion"
"-Wnull-conversion"
"-Wobjc-literal-conversion"
"-Wno-sign-conversion" "-Wno-sign-conversion"
"-Wstring-conversion"
"-DNOMINMAX" "-DNOMINMAX"
) )

@ -6,47 +6,6 @@
ABSL_CLANG_CL_FLAGS = [ ABSL_CLANG_CL_FLAGS = [
"/W3", "/W3",
"-Wno-c++98-compat-pedantic",
"-Wno-conversion",
"-Wno-covered-switch-default",
"-Wno-deprecated",
"-Wno-disabled-macro-expansion",
"-Wno-double-promotion",
"-Wno-comma",
"-Wno-extra-semi",
"-Wno-extra-semi-stmt",
"-Wno-packed",
"-Wno-padded",
"-Wno-sign-compare",
"-Wno-float-conversion",
"-Wno-float-equal",
"-Wno-format-nonliteral",
"-Wno-gcc-compat",
"-Wno-global-constructors",
"-Wno-exit-time-destructors",
"-Wno-non-modular-include-in-module",
"-Wno-old-style-cast",
"-Wno-range-loop-analysis",
"-Wno-reserved-id-macro",
"-Wno-shorten-64-to-32",
"-Wno-switch-enum",
"-Wno-thread-safety-negative",
"-Wno-unknown-warning-option",
"-Wno-unreachable-code",
"-Wno-unused-macros",
"-Wno-weak-vtables",
"-Wno-zero-as-null-pointer-constant",
"-Wbitfield-enum-conversion",
"-Wbool-conversion",
"-Wconstant-conversion",
"-Wenum-conversion",
"-Wint-conversion",
"-Wliteral-conversion",
"-Wnon-literal-null-conversion",
"-Wnull-conversion",
"-Wobjc-literal-conversion",
"-Wno-sign-conversion",
"-Wstring-conversion",
"/DNOMINMAX", "/DNOMINMAX",
"/DWIN32_LEAN_AND_MEAN", "/DWIN32_LEAN_AND_MEAN",
"/D_CRT_SECURE_NO_WARNINGS", "/D_CRT_SECURE_NO_WARNINGS",
@ -79,6 +38,7 @@ ABSL_GCC_FLAGS = [
"-Wextra", "-Wextra",
"-Wcast-qual", "-Wcast-qual",
"-Wconversion-null", "-Wconversion-null",
"-Wformat-security",
"-Wmissing-declarations", "-Wmissing-declarations",
"-Woverlength-strings", "-Woverlength-strings",
"-Wpointer-arith", "-Wpointer-arith",
@ -88,8 +48,6 @@ ABSL_GCC_FLAGS = [
"-Wvarargs", "-Wvarargs",
"-Wvla", "-Wvla",
"-Wwrite-strings", "-Wwrite-strings",
"-Wno-missing-field-initializers",
"-Wno-sign-compare",
"-DNOMINMAX", "-DNOMINMAX",
] ]
@ -106,48 +64,36 @@ ABSL_GCC_TEST_FLAGS = [
ABSL_LLVM_FLAGS = [ ABSL_LLVM_FLAGS = [
"-Wall", "-Wall",
"-Wextra", "-Wextra",
"-Weverything", "-Wcast-qual",
"-Wno-c++98-compat-pedantic", "-Wconversion",
"-Wno-conversion", "-Wfloat-overflow-conversion",
"-Wno-covered-switch-default", "-Wfloat-zero-conversion",
"-Wno-deprecated", "-Wfor-loop-analysis",
"-Wno-disabled-macro-expansion", "-Wformat-security",
"-Wno-double-promotion", "-Wgnu-redeclared-enum",
"-Wno-comma", "-Winfinite-recursion",
"-Wno-extra-semi", "-Wliteral-conversion",
"-Wno-extra-semi-stmt", "-Wmissing-declarations",
"-Wno-packed", "-Woverlength-strings",
"-Wno-padded", "-Wpointer-arith",
"-Wno-sign-compare", "-Wself-assign",
"-Wshadow",
"-Wstring-conversion",
"-Wtautological-overlap-compare",
"-Wundef",
"-Wuninitialized",
"-Wunreachable-code",
"-Wunused-comparison",
"-Wunused-local-typedefs",
"-Wunused-result",
"-Wvla",
"-Wwrite-strings",
"-Wno-float-conversion", "-Wno-float-conversion",
"-Wno-float-equal", "-Wno-implicit-float-conversion",
"-Wno-format-nonliteral", "-Wno-implicit-int-float-conversion",
"-Wno-gcc-compat", "-Wno-implicit-int-conversion",
"-Wno-global-constructors",
"-Wno-exit-time-destructors",
"-Wno-non-modular-include-in-module",
"-Wno-old-style-cast",
"-Wno-range-loop-analysis",
"-Wno-reserved-id-macro",
"-Wno-shorten-64-to-32", "-Wno-shorten-64-to-32",
"-Wno-switch-enum",
"-Wno-thread-safety-negative",
"-Wno-unknown-warning-option",
"-Wno-unreachable-code",
"-Wno-unused-macros",
"-Wno-weak-vtables",
"-Wno-zero-as-null-pointer-constant",
"-Wbitfield-enum-conversion",
"-Wbool-conversion",
"-Wconstant-conversion",
"-Wenum-conversion",
"-Wint-conversion",
"-Wliteral-conversion",
"-Wnon-literal-null-conversion",
"-Wnull-conversion",
"-Wobjc-literal-conversion",
"-Wno-sign-conversion", "-Wno-sign-conversion",
"-Wstring-conversion",
"-DNOMINMAX", "-DNOMINMAX",
] ]

@ -22,21 +22,21 @@ load(
) )
ABSL_DEFAULT_COPTS = select({ ABSL_DEFAULT_COPTS = select({
"//absl:windows": ABSL_MSVC_FLAGS, "//absl:msvc_compiler": ABSL_MSVC_FLAGS,
"//absl:llvm_compiler": ABSL_LLVM_FLAGS, "//absl:clang-cl_compiler": ABSL_CLANG_CL_FLAGS,
"//absl:clang_compiler": ABSL_LLVM_FLAGS,
"//conditions:default": ABSL_GCC_FLAGS, "//conditions:default": ABSL_GCC_FLAGS,
}) })
# in absence of modules (--compiler=gcc or -c opt), cc_tests leak their copts
# to their (included header) dependencies and fail to build outside absl
ABSL_TEST_COPTS = ABSL_DEFAULT_COPTS + select({ ABSL_TEST_COPTS = ABSL_DEFAULT_COPTS + select({
"//absl:windows": ABSL_MSVC_TEST_FLAGS, "//absl:msvc_compiler": ABSL_MSVC_TEST_FLAGS,
"//absl:llvm_compiler": ABSL_LLVM_TEST_FLAGS, "//absl:clang-cl_compiler": ABSL_CLANG_CL_TEST_FLAGS,
"//absl:clang_compiler": ABSL_LLVM_TEST_FLAGS,
"//conditions:default": ABSL_GCC_TEST_FLAGS, "//conditions:default": ABSL_GCC_TEST_FLAGS,
}) })
ABSL_DEFAULT_LINKOPTS = select({ ABSL_DEFAULT_LINKOPTS = select({
"//absl:windows": ABSL_MSVC_LINKOPTS, "//absl:msvc_compiler": ABSL_MSVC_LINKOPTS,
"//conditions:default": [], "//conditions:default": [],
}) })

@ -16,77 +16,6 @@ MSVC_BIG_WARNING_FLAGS = [
"/W3", "/W3",
] ]
LLVM_BIG_WARNING_FLAGS = [
"-Wall",
"-Wextra",
"-Weverything",
]
# Docs on single flags is preceded by a comment.
# Docs on groups of flags is preceded by ###.
LLVM_DISABLE_WARNINGS_FLAGS = [
# Abseil does not support C++98
"-Wno-c++98-compat-pedantic",
# Turns off all implicit conversion warnings. Most are re-enabled below.
"-Wno-conversion",
"-Wno-covered-switch-default",
"-Wno-deprecated",
"-Wno-disabled-macro-expansion",
"-Wno-double-promotion",
###
# Turned off as they include valid C++ code.
"-Wno-comma",
"-Wno-extra-semi",
"-Wno-extra-semi-stmt",
"-Wno-packed",
"-Wno-padded",
###
# Google style does not use unsigned integers, though STL containers
# have unsigned types.
"-Wno-sign-compare",
###
"-Wno-float-conversion",
"-Wno-float-equal",
"-Wno-format-nonliteral",
# Too aggressive: warns on Clang extensions enclosed in Clang-only
# compilation paths.
"-Wno-gcc-compat",
###
# Some internal globals are necessary. Don't do this at home.
"-Wno-global-constructors",
"-Wno-exit-time-destructors",
###
"-Wno-non-modular-include-in-module",
"-Wno-old-style-cast",
# Warns on preferred usage of non-POD types such as string_view
"-Wno-range-loop-analysis",
"-Wno-reserved-id-macro",
"-Wno-shorten-64-to-32",
"-Wno-switch-enum",
"-Wno-thread-safety-negative",
"-Wno-unknown-warning-option",
"-Wno-unreachable-code",
# Causes warnings on include guards
"-Wno-unused-macros",
"-Wno-weak-vtables",
# Causes warnings on usage of types/compare.h comparison operators.
"-Wno-zero-as-null-pointer-constant",
###
# Implicit conversion warnings turned off by -Wno-conversion
# which are re-enabled below.
"-Wbitfield-enum-conversion",
"-Wbool-conversion",
"-Wconstant-conversion",
"-Wenum-conversion",
"-Wint-conversion",
"-Wliteral-conversion",
"-Wnon-literal-null-conversion",
"-Wnull-conversion",
"-Wobjc-literal-conversion",
"-Wno-sign-conversion",
"-Wstring-conversion",
]
LLVM_TEST_DISABLE_WARNINGS_FLAGS = [ LLVM_TEST_DISABLE_WARNINGS_FLAGS = [
"-Wno-c99-extensions", "-Wno-c99-extensions",
"-Wno-deprecated-declarations", "-Wno-deprecated-declarations",
@ -125,6 +54,7 @@ COPT_VARS = {
"-Wextra", "-Wextra",
"-Wcast-qual", "-Wcast-qual",
"-Wconversion-null", "-Wconversion-null",
"-Wformat-security",
"-Wmissing-declarations", "-Wmissing-declarations",
"-Woverlength-strings", "-Woverlength-strings",
"-Wpointer-arith", "-Wpointer-arith",
@ -134,13 +64,6 @@ COPT_VARS = {
"-Wvarargs", "-Wvarargs",
"-Wvla", # variable-length array "-Wvla", # variable-length array
"-Wwrite-strings", "-Wwrite-strings",
# gcc-4.x has spurious missing field initializer warnings.
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=36750
# Remove when gcc-4.x is no longer supported.
"-Wno-missing-field-initializers",
# Google style does not use unsigned integers, though STL containers
# have unsigned types.
"-Wno-sign-compare",
# Don't define min and max macros (Build on Windows using gcc) # Don't define min and max macros (Build on Windows using gcc)
"-DNOMINMAX", "-DNOMINMAX",
], ],
@ -153,15 +76,48 @@ COPT_VARS = {
"-Wno-unused-parameter", "-Wno-unused-parameter",
"-Wno-unused-private-field", "-Wno-unused-private-field",
], ],
"ABSL_LLVM_FLAGS": "ABSL_LLVM_FLAGS": [
LLVM_BIG_WARNING_FLAGS + LLVM_DISABLE_WARNINGS_FLAGS + [ "-Wall",
# Don't define min and max macros (Build on Windows using clang) "-Wextra",
"-DNOMINMAX", "-Wcast-qual",
], "-Wconversion",
"-Wfloat-overflow-conversion",
"-Wfloat-zero-conversion",
"-Wfor-loop-analysis",
"-Wformat-security",
"-Wgnu-redeclared-enum",
"-Winfinite-recursion",
"-Wliteral-conversion",
"-Wmissing-declarations",
"-Woverlength-strings",
"-Wpointer-arith",
"-Wself-assign",
"-Wshadow",
"-Wstring-conversion",
"-Wtautological-overlap-compare",
"-Wundef",
"-Wuninitialized",
"-Wunreachable-code",
"-Wunused-comparison",
"-Wunused-local-typedefs",
"-Wunused-result",
"-Wvla",
"-Wwrite-strings",
# Warnings that are enabled by group warning flags like -Wall that we
# explicitly disable.
"-Wno-float-conversion",
"-Wno-implicit-float-conversion",
"-Wno-implicit-int-float-conversion",
"-Wno-implicit-int-conversion",
"-Wno-shorten-64-to-32",
"-Wno-sign-conversion",
# Don't define min and max macros (Build on Windows using clang)
"-DNOMINMAX",
],
"ABSL_LLVM_TEST_FLAGS": "ABSL_LLVM_TEST_FLAGS":
LLVM_TEST_DISABLE_WARNINGS_FLAGS, LLVM_TEST_DISABLE_WARNINGS_FLAGS,
"ABSL_CLANG_CL_FLAGS": "ABSL_CLANG_CL_FLAGS":
(MSVC_BIG_WARNING_FLAGS + LLVM_DISABLE_WARNINGS_FLAGS + MSVC_DEFINES), (MSVC_BIG_WARNING_FLAGS + MSVC_DEFINES),
"ABSL_CLANG_CL_TEST_FLAGS": "ABSL_CLANG_CL_TEST_FLAGS":
LLVM_TEST_DISABLE_WARNINGS_FLAGS, LLVM_TEST_DISABLE_WARNINGS_FLAGS,
"ABSL_MSVC_FLAGS": "ABSL_MSVC_FLAGS":

@ -66,7 +66,8 @@ cc_library(
], ],
copts = ABSL_DEFAULT_COPTS, copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS + select({ linkopts = ABSL_DEFAULT_LINKOPTS + select({
"//absl:windows": ["-DEFAULTLIB:dbghelp.lib"], "//absl:msvc_compiler": ["-DEFAULTLIB:dbghelp.lib"],
"//absl:clang-cl_compiler": ["-DEFAULTLIB:dbghelp.lib"],
"//conditions:default": [], "//conditions:default": [],
}), }),
deps = [ deps = [
@ -86,11 +87,13 @@ cc_test(
name = "symbolize_test", name = "symbolize_test",
srcs = ["symbolize_test.cc"], srcs = ["symbolize_test.cc"],
copts = ABSL_TEST_COPTS + select({ copts = ABSL_TEST_COPTS + select({
"//absl:windows": ["/Z7"], "//absl:msvc_compiler": ["/Z7"],
"//absl:clang-cl_compiler": ["/Z7"],
"//conditions:default": [], "//conditions:default": [],
}), }),
linkopts = ABSL_DEFAULT_LINKOPTS + select({ linkopts = ABSL_DEFAULT_LINKOPTS + select({
"//absl:windows": ["/DEBUG"], "//absl:msvc_compiler": ["/DEBUG"],
"//absl:clang-cl_compiler": ["/DEBUG"],
"//conditions:default": [], "//conditions:default": [],
}), }),
deps = [ deps = [
@ -148,7 +151,8 @@ cc_test(
srcs = ["failure_signal_handler_test.cc"], srcs = ["failure_signal_handler_test.cc"],
copts = ABSL_TEST_COPTS, copts = ABSL_TEST_COPTS,
linkopts = select({ linkopts = select({
"//absl:windows": [], "//absl:msvc_compiler": [],
"//absl:clang-cl_compiler": [],
"//absl:wasm": [], "//absl:wasm": [],
"//conditions:default": ["-pthread"], "//conditions:default": ["-pthread"],
}) + ABSL_DEFAULT_LINKOPTS, }) + ABSL_DEFAULT_LINKOPTS,
@ -239,7 +243,7 @@ cc_library(
# These targets exists for use in tests only, explicitly configuring the # These targets exists for use in tests only, explicitly configuring the
# LEAK_SANITIZER macro. It must be linked with -fsanitize=leak for lsan. # LEAK_SANITIZER macro. It must be linked with -fsanitize=leak for lsan.
ABSL_LSAN_LINKOPTS = select({ ABSL_LSAN_LINKOPTS = select({
"//absl:llvm_compiler": ["-fsanitize=leak"], "//absl:clang_compiler": ["-fsanitize=leak"],
"//conditions:default": [], "//conditions:default": [],
}) })
@ -249,13 +253,14 @@ cc_library(
srcs = ["leak_check.cc"], srcs = ["leak_check.cc"],
hdrs = ["leak_check.h"], hdrs = ["leak_check.h"],
copts = select({ copts = select({
"//absl:llvm_compiler": ["-DLEAK_SANITIZER"], "//absl:clang_compiler": ["-DLEAK_SANITIZER"],
"//conditions:default": [], "//conditions:default": [],
}), }),
linkopts = ABSL_DEFAULT_LINKOPTS, linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = ["//visibility:private"], visibility = ["//visibility:private"],
deps = [ deps = [
"//absl/base:config", "//absl/base:config",
"//absl/base:core_headers",
], ],
) )
@ -269,6 +274,7 @@ cc_library(
visibility = ["//visibility:private"], visibility = ["//visibility:private"],
deps = [ deps = [
"//absl/base:config", "//absl/base:config",
"//absl/base:core_headers",
], ],
) )
@ -276,7 +282,7 @@ cc_test(
name = "leak_check_test", name = "leak_check_test",
srcs = ["leak_check_test.cc"], srcs = ["leak_check_test.cc"],
copts = select({ copts = select({
"//absl:llvm_compiler": ["-DABSL_EXPECT_LEAK_SANITIZER"], "//absl:clang_compiler": ["-DABSL_EXPECT_LEAK_SANITIZER"],
"//conditions:default": [], "//conditions:default": [],
}), }),
linkopts = ABSL_LSAN_LINKOPTS + ABSL_DEFAULT_LINKOPTS, linkopts = ABSL_LSAN_LINKOPTS + ABSL_DEFAULT_LINKOPTS,

@ -21,6 +21,7 @@
#ifdef _WIN32 #ifdef _WIN32
#include <windows.h> #include <windows.h>
#else #else
#include <sched.h>
#include <unistd.h> #include <unistd.h>
#endif #endif
@ -219,17 +220,24 @@ static void WriteToStderr(const char* data) {
absl::raw_logging_internal::SafeWriteToStderr(data, strlen(data)); absl::raw_logging_internal::SafeWriteToStderr(data, strlen(data));
} }
static void WriteSignalMessage(int signo, void (*writerfn)(const char*)) { static void WriteSignalMessage(int signo, int cpu,
char buf[64]; void (*writerfn)(const char*)) {
char buf[96];
char on_cpu[32] = {0};
if (cpu != -1) {
snprintf(on_cpu, sizeof(on_cpu), " on cpu %d", cpu);
}
const char* const signal_string = const char* const signal_string =
debugging_internal::FailureSignalToString(signo); debugging_internal::FailureSignalToString(signo);
if (signal_string != nullptr && signal_string[0] != '\0') { if (signal_string != nullptr && signal_string[0] != '\0') {
snprintf(buf, sizeof(buf), "*** %s received at time=%ld ***\n", snprintf(buf, sizeof(buf), "*** %s received at time=%ld%s ***\n",
signal_string, signal_string,
static_cast<long>(time(nullptr))); // NOLINT(runtime/int) static_cast<long>(time(nullptr)), // NOLINT(runtime/int)
on_cpu);
} else { } else {
snprintf(buf, sizeof(buf), "*** Signal %d received at time=%ld ***\n", snprintf(buf, sizeof(buf), "*** Signal %d received at time=%ld%s ***\n",
signo, static_cast<long>(time(nullptr))); // NOLINT(runtime/int) signo, static_cast<long>(time(nullptr)), // NOLINT(runtime/int)
on_cpu);
} }
writerfn(buf); writerfn(buf);
} }
@ -269,10 +277,10 @@ ABSL_ATTRIBUTE_NOINLINE static void WriteStackTrace(
// Called by AbslFailureSignalHandler() to write the failure info. It is // Called by AbslFailureSignalHandler() to write the failure info. It is
// called once with writerfn set to WriteToStderr() and then possibly // called once with writerfn set to WriteToStderr() and then possibly
// with writerfn set to the user provided function. // with writerfn set to the user provided function.
static void WriteFailureInfo(int signo, void* ucontext, static void WriteFailureInfo(int signo, void* ucontext, int cpu,
void (*writerfn)(const char*)) { void (*writerfn)(const char*)) {
WriterFnStruct writerfn_struct{writerfn}; WriterFnStruct writerfn_struct{writerfn};
WriteSignalMessage(signo, writerfn); WriteSignalMessage(signo, cpu, writerfn);
WriteStackTrace(ucontext, fsh_options.symbolize_stacktrace, WriterFnWrapper, WriteStackTrace(ucontext, fsh_options.symbolize_stacktrace, WriterFnWrapper,
&writerfn_struct); &writerfn_struct);
} }
@ -334,6 +342,14 @@ static void AbslFailureSignalHandler(int signo, siginfo_t*, void* ucontext) {
} }
} }
// Increase the chance that the CPU we report was the same CPU on which the
// signal was received by doing this as early as possible, i.e. after
// verifying that this is not a recursive signal handler invocation.
int my_cpu = -1;
#ifdef ABSL_HAVE_SCHED_GETCPU
my_cpu = sched_getcpu();
#endif
#ifdef ABSL_HAVE_ALARM #ifdef ABSL_HAVE_ALARM
// Set an alarm to abort the program in case this code hangs or deadlocks. // Set an alarm to abort the program in case this code hangs or deadlocks.
if (fsh_options.alarm_on_failure_secs > 0) { if (fsh_options.alarm_on_failure_secs > 0) {
@ -344,12 +360,12 @@ static void AbslFailureSignalHandler(int signo, siginfo_t*, void* ucontext) {
#endif #endif
// First write to stderr. // First write to stderr.
WriteFailureInfo(signo, ucontext, WriteToStderr); WriteFailureInfo(signo, ucontext, my_cpu, WriteToStderr);
// Riskier code (because it is less likely to be async-signal-safe) // Riskier code (because it is less likely to be async-signal-safe)
// goes after this point. // goes after this point.
if (fsh_options.writerfn != nullptr) { if (fsh_options.writerfn != nullptr) {
WriteFailureInfo(signo, ucontext, fsh_options.writerfn); WriteFailureInfo(signo, ucontext, my_cpu, fsh_options.writerfn);
} }
if (fsh_options.call_previous_handler) { if (fsh_options.call_previous_handler) {

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save