diff --git a/abseil-cpp/.github/ISSUE_TEMPLATE/00-bug_report.md b/abseil-cpp/.github/ISSUE_TEMPLATE/00-bug_report.md new file mode 100644 index 00000000..1edf3de0 --- /dev/null +++ b/abseil-cpp/.github/ISSUE_TEMPLATE/00-bug_report.md @@ -0,0 +1,41 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '' +labels: 'bug' +assignees: '' +--- + +**Describe the bug** + +Include a clear and concise description of what the problem is, including what +you expected to happen, and what actually happened. + +**Steps to reproduce the bug** + +It's important that we are able to reproduce the problem that you are +experiencing. Please provide all code and relevant steps to reproduce the +problem, including your `BUILD`/`CMakeLists.txt` file and build commands. Links +to a GitHub branch or [godbolt.org](https://godbolt.org/) that demonstrate the +problem are also helpful. + +**What version of Abseil are you using?** + +**What operating system and version are you using** + +If you are using a Linux distribution please include the name and version of the +distribution as well. + +**What compiler and version are you using?** + +Please include the output of `gcc -v` or `clang -v`, or the equivalent for your +compiler. + +**What build system are you using?** + +Please include the output of `bazel --version` or `cmake --version`, or the +equivalent for your build system. + +**Additional context** + +Add any other context about the problem here. diff --git a/abseil-cpp/.github/ISSUE_TEMPLATE/90-question.md b/abseil-cpp/.github/ISSUE_TEMPLATE/90-question.md new file mode 100644 index 00000000..84cf3491 --- /dev/null +++ b/abseil-cpp/.github/ISSUE_TEMPLATE/90-question.md @@ -0,0 +1,7 @@ +--- +name: Question +about: Have a question? Ask us anything! :-) +title: '' +labels: 'question' +assignees: '' +--- diff --git a/abseil-cpp/.github/ISSUE_TEMPLATE/config.yml b/abseil-cpp/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 00000000..9794ae1d --- /dev/null +++ b/abseil-cpp/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1 @@ +blank_issues_enables: true diff --git a/abseil-cpp/.gitignore b/abseil-cpp/.gitignore new file mode 100644 index 00000000..d54fa5a9 --- /dev/null +++ b/abseil-cpp/.gitignore @@ -0,0 +1,15 @@ +# Ignore all bazel-* symlinks. +/bazel-* +# Ignore Bazel verbose explanations +--verbose_explanations +# Ignore CMake usual build directory +build +# Ignore Vim files +*.swp +# Ignore QtCreator Project file +CMakeLists.txt.user +# Ignore VS Code files +.vscode/* +# Ignore generated python artifacts +*.pyc +copts/__pycache__/ diff --git a/abseil-cpp/CMake/AbseilDll.cmake b/abseil-cpp/CMake/AbseilDll.cmake index 00cddb84..cf6a8c9a 100644 --- a/abseil-cpp/CMake/AbseilDll.cmake +++ b/abseil-cpp/CMake/AbseilDll.cmake @@ -1,5 +1,4 @@ include(CMakeParseArguments) -include(GNUInstallDirs) set(ABSL_INTERNAL_DLL_FILES "algorithm/algorithm.h" @@ -11,12 +10,15 @@ set(ABSL_INTERNAL_DLL_FILES "base/const_init.h" "base/dynamic_annotations.h" "base/internal/atomic_hook.h" + "base/internal/bits.h" "base/internal/cycleclock.cc" "base/internal/cycleclock.h" "base/internal/direct_mmap.h" "base/internal/dynamic_annotations.h" "base/internal/endian.h" "base/internal/errno_saver.h" + "base/internal/exponential_biased.cc" + "base/internal/exponential_biased.h" "base/internal/fast_type_id.h" "base/internal/hide_ptr.h" "base/internal/identity.h" @@ -26,7 +28,8 @@ set(ABSL_INTERNAL_DLL_FILES "base/internal/low_level_alloc.h" "base/internal/low_level_scheduling.h" "base/internal/per_thread_tls.h" - "base/internal/prefetch.h" + "base/internal/periodic_sampler.cc" + "base/internal/periodic_sampler.h" "base/internal/pretty_function.h" "base/internal/raw_logging.cc" "base/internal/raw_logging.h" @@ -58,8 +61,6 @@ set(ABSL_INTERNAL_DLL_FILES "base/policy_checks.h" "base/port.h" "base/thread_annotations.h" - "cleanup/cleanup.h" - "cleanup/internal/cleanup.h" "container/btree_map.h" "container/btree_set.h" "container/fixed_array.h" @@ -79,9 +80,10 @@ set(ABSL_INTERNAL_DLL_FILES "container/internal/hashtablez_sampler.cc" "container/internal/hashtablez_sampler.h" "container/internal/hashtablez_sampler_force_weak_definition.cc" + "container/internal/have_sse.h" "container/internal/inlined_vector.h" "container/internal/layout.h" - "container/internal/node_slot_policy.h" + "container/internal/node_hash_policy.h" "container/internal/raw_hash_map.h" "container/internal/raw_hash_set.cc" "container/internal/raw_hash_set.h" @@ -91,6 +93,7 @@ set(ABSL_INTERNAL_DLL_FILES "debugging/failure_signal_handler.cc" "debugging/failure_signal_handler.h" "debugging/leak_check.h" + "debugging/leak_check_disable.cc" "debugging/stacktrace.cc" "debugging/stacktrace.h" "debugging/symbolize.cc" @@ -109,11 +112,9 @@ set(ABSL_INTERNAL_DLL_FILES "debugging/internal/symbolize.h" "debugging/internal/vdso_support.cc" "debugging/internal/vdso_support.h" - "functional/any_invocable.h" "functional/internal/front_binder.h" "functional/bind_front.h" "functional/function_ref.h" - "functional/internal/any_invocable.h" "functional/internal/function_ref.h" "hash/hash.h" "hash/internal/city.h" @@ -121,20 +122,10 @@ set(ABSL_INTERNAL_DLL_FILES "hash/internal/hash.h" "hash/internal/hash.cc" "hash/internal/spy_hash_state.h" - "hash/internal/low_level_hash.h" - "hash/internal/low_level_hash.cc" "memory/memory.h" "meta/type_traits.h" - "numeric/bits.h" "numeric/int128.cc" "numeric/int128.h" - "numeric/internal/bits.h" - "numeric/internal/representation.h" - "profiling/internal/exponential_biased.cc" - "profiling/internal/exponential_biased.h" - "profiling/internal/periodic_sampler.cc" - "profiling/internal/periodic_sampler.h" - "profiling/internal/sample_recorder.h" "random/bernoulli_distribution.h" "random/beta_distribution.h" "random/bit_gen_ref.h" @@ -197,46 +188,14 @@ set(ABSL_INTERNAL_DLL_FILES "strings/charconv.h" "strings/cord.cc" "strings/cord.h" - "strings/cord_analysis.cc" - "strings/cord_analysis.h" - "strings/cord_buffer.cc" - "strings/cord_buffer.h" "strings/escaping.cc" "strings/escaping.h" + "strings/internal/cord_internal.h" "strings/internal/charconv_bigint.cc" "strings/internal/charconv_bigint.h" "strings/internal/charconv_parse.cc" "strings/internal/charconv_parse.h" - "strings/internal/cord_data_edge.h" - "strings/internal/cord_internal.cc" - "strings/internal/cord_internal.h" - "strings/internal/cord_rep_btree.cc" - "strings/internal/cord_rep_btree.h" - "strings/internal/cord_rep_btree_navigator.cc" - "strings/internal/cord_rep_btree_navigator.h" - "strings/internal/cord_rep_btree_reader.cc" - "strings/internal/cord_rep_btree_reader.h" - "strings/internal/cord_rep_crc.cc" - "strings/internal/cord_rep_crc.h" - "strings/internal/cord_rep_consume.h" - "strings/internal/cord_rep_consume.cc" - "strings/internal/cord_rep_flat.h" - "strings/internal/cord_rep_ring.cc" - "strings/internal/cord_rep_ring.h" - "strings/internal/cord_rep_ring_reader.h" - "strings/internal/cordz_functions.cc" - "strings/internal/cordz_functions.h" - "strings/internal/cordz_handle.cc" - "strings/internal/cordz_handle.h" - "strings/internal/cordz_info.cc" - "strings/internal/cordz_info.h" - "strings/internal/cordz_sample_token.cc" - "strings/internal/cordz_sample_token.h" - "strings/internal/cordz_statistics.h" - "strings/internal/cordz_update_scope.h" - "strings/internal/cordz_update_tracker.h" "strings/internal/stl_type_traits.h" - "strings/internal/string_constant.h" "strings/match.cc" "strings/match.h" "strings/numbers.cc" @@ -291,7 +250,6 @@ set(ABSL_INTERNAL_DLL_FILES "synchronization/notification.h" "synchronization/internal/create_thread_identity.cc" "synchronization/internal/create_thread_identity.h" - "synchronization/internal/futex.h" "synchronization/internal/graphcycles.cc" "synchronization/internal/graphcycles.h" "synchronization/internal/kernel_timeout.h" @@ -349,7 +307,6 @@ set(ABSL_INTERNAL_DLL_FILES "types/internal/span.h" "types/variant.h" "utility/utility.h" - "debugging/leak_check.cc" ) set(ABSL_INTERNAL_DLL_TARGETS @@ -360,6 +317,7 @@ set(ABSL_INTERNAL_DLL_TARGETS "debugging_internal" "demangle_internal" "leak_check" + "leak_check_disable" "stack_consumption" "debugging" "hash" @@ -390,7 +348,6 @@ set(ABSL_INTERNAL_DLL_TARGETS "kernel_timeout_internal" "synchronization" "thread_pool" - "any_invocable" "bind_front" "function_ref" "atomic_hook" @@ -460,13 +417,13 @@ set(ABSL_INTERNAL_DLL_TARGETS "hashtablez_sampler" "hashtable_debug" "hashtable_debug_hooks" - "node_slot_policy" + "have_sse" + "node_hash_policy" "raw_hash_map" "container_common" "raw_hash_set" "layout" "tracked" - "sample_recorder" ) function(absl_internal_dll_contains) @@ -530,7 +487,7 @@ function(absl_make_dll) abseil_dll PUBLIC "$" - $ + $ ) target_compile_options( @@ -548,8 +505,8 @@ function(absl_make_dll) ${ABSL_CC_LIB_DEFINES} ) install(TARGETS abseil_dll EXPORT ${PROJECT_NAME}Targets - RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} - LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} - ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} + RUNTIME DESTINATION ${ABSL_INSTALL_BINDIR} + LIBRARY DESTINATION ${ABSL_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${ABSL_INSTALL_LIBDIR} ) endfunction() diff --git a/abseil-cpp/CMake/AbseilHelpers.cmake b/abseil-cpp/CMake/AbseilHelpers.cmake index dbb09fe2..8b2925c5 100644 --- a/abseil-cpp/CMake/AbseilHelpers.cmake +++ b/abseil-cpp/CMake/AbseilHelpers.cmake @@ -17,6 +17,7 @@ include(CMakeParseArguments) include(AbseilConfigureCopts) include(AbseilDll) +include(AbseilInstallDirs) # The IDE folder for Abseil that will be used if Abseil is included in a CMake # project that sets @@ -40,8 +41,7 @@ endif() # LINKOPTS: List of link options # PUBLIC: Add this so that this library will be exported under absl:: # Also in IDE, target will appear in Abseil folder while non PUBLIC will be in Abseil/internal. -# TESTONLY: When added, this target will only be built if both -# BUILD_TESTING=ON and ABSL_BUILD_TESTING=ON. +# TESTONLY: When added, this target will only be built if user passes -DABSL_RUN_TESTS=ON to CMake. # # Note: # By default, absl_cc_library will always create a library named absl_${NAME}, @@ -83,8 +83,7 @@ function(absl_cc_library) ${ARGN} ) - if(NOT ABSL_CC_LIB_PUBLIC AND ABSL_CC_LIB_TESTONLY AND - NOT (BUILD_TESTING AND ABSL_BUILD_TESTING)) + if(ABSL_CC_LIB_TESTONLY AND NOT ABSL_RUN_TESTS) return() endif() @@ -105,7 +104,7 @@ function(absl_cc_library) endif() endforeach() - if(ABSL_CC_SRCS STREQUAL "") + if("${ABSL_CC_SRCS}" STREQUAL "") set(ABSL_CC_LIB_IS_INTERFACE 1) else() set(ABSL_CC_LIB_IS_INTERFACE 0) @@ -123,11 +122,7 @@ function(absl_cc_library) # 4. "static" -- This target does not depend on the DLL and should be built # statically. if (${ABSL_BUILD_DLL}) - if(ABSL_ENABLE_INSTALL) - absl_internal_dll_contains(TARGET ${_NAME} OUTPUT _in_dll) - else() - absl_internal_dll_contains(TARGET ${ABSL_CC_LIB_NAME} OUTPUT _in_dll) - endif() + absl_internal_dll_contains(TARGET ${_NAME} OUTPUT _in_dll) if (${_in_dll}) # This target should be replaced by the DLL set(_build_type "dll") @@ -142,55 +137,8 @@ function(absl_cc_library) set(_build_type "static") endif() - # Generate a pkg-config file for every library: - if((_build_type STREQUAL "static" OR _build_type STREQUAL "shared") - AND ABSL_ENABLE_INSTALL) - if(NOT ABSL_CC_LIB_TESTONLY) - if(absl_VERSION) - set(PC_VERSION "${absl_VERSION}") - else() - set(PC_VERSION "head") - endif() - foreach(dep ${ABSL_CC_LIB_DEPS}) - if(${dep} MATCHES "^absl::(.*)") - # Join deps with commas. - if(PC_DEPS) - set(PC_DEPS "${PC_DEPS},") - endif() - set(PC_DEPS "${PC_DEPS} absl_${CMAKE_MATCH_1} = ${PC_VERSION}") - endif() - endforeach() - foreach(cflag ${ABSL_CC_LIB_COPTS}) - if(${cflag} MATCHES "^(-Wno|/wd)") - # These flags are needed to suppress warnings that might fire in our headers. - set(PC_CFLAGS "${PC_CFLAGS} ${cflag}") - elseif(${cflag} MATCHES "^(-W|/w[1234eo])") - # Don't impose our warnings on others. - else() - set(PC_CFLAGS "${PC_CFLAGS} ${cflag}") - endif() - endforeach() - string(REPLACE ";" " " PC_LINKOPTS "${ABSL_CC_LIB_LINKOPTS}") - FILE(GENERATE OUTPUT "${CMAKE_BINARY_DIR}/lib/pkgconfig/absl_${_NAME}.pc" CONTENT "\ -prefix=${CMAKE_INSTALL_PREFIX}\n\ -exec_prefix=\${prefix}\n\ -libdir=${CMAKE_INSTALL_FULL_LIBDIR}\n\ -includedir=${CMAKE_INSTALL_FULL_INCLUDEDIR}\n\ -\n\ -Name: absl_${_NAME}\n\ -Description: Abseil ${_NAME} library\n\ -URL: https://abseil.io/\n\ -Version: ${PC_VERSION}\n\ -Requires:${PC_DEPS}\n\ -Libs: -L\${libdir} ${PC_LINKOPTS} $<$>:-labsl_${_NAME}>\n\ -Cflags: -I\${includedir}${PC_CFLAGS}\n") - INSTALL(FILES "${CMAKE_BINARY_DIR}/lib/pkgconfig/absl_${_NAME}.pc" - DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig") - endif() - endif() - if(NOT ABSL_CC_LIB_IS_INTERFACE) - if(_build_type STREQUAL "dll_dep") + if(${_build_type} STREQUAL "dll_dep") # This target depends on the DLL. When adding dependencies to this target, # any depended-on-target which is contained inside the DLL is replaced # with a dependency on the DLL. @@ -219,7 +167,7 @@ Cflags: -I\${includedir}${PC_CFLAGS}\n") "${_gtest_link_define}" ) - elseif(_build_type STREQUAL "static" OR _build_type STREQUAL "shared") + elseif(${_build_type} STREQUAL "static" OR ${_build_type} STREQUAL "shared") add_library(${_NAME} "") target_sources(${_NAME} PRIVATE ${ABSL_CC_LIB_SRCS} ${ABSL_CC_LIB_HDRS}) target_link_libraries(${_NAME} @@ -242,7 +190,7 @@ Cflags: -I\${includedir}${PC_CFLAGS}\n") target_include_directories(${_NAME} PUBLIC "$" - $ + $ ) target_compile_options(${_NAME} PRIVATE ${ABSL_CC_LIB_COPTS}) @@ -257,23 +205,9 @@ Cflags: -I\${includedir}${PC_CFLAGS}\n") set_property(TARGET ${_NAME} PROPERTY FOLDER ${ABSL_IDE_FOLDER}/internal) endif() - if(ABSL_PROPAGATE_CXX_STD) - # Abseil libraries require C++11 as the current minimum standard. - # Top-level application CMake projects should ensure a consistent C++ - # standard for all compiled sources by setting CMAKE_CXX_STANDARD. - target_compile_features(${_NAME} PUBLIC cxx_std_11) - else() - # Note: This is legacy (before CMake 3.8) behavior. Setting the - # target-level CXX_STANDARD property to ABSL_CXX_STANDARD (which is - # initialized by CMAKE_CXX_STANDARD) should have no real effect, since - # that is the default value anyway. - # - # CXX_STANDARD_REQUIRED does guard against the top-level CMake project - # not having enabled CMAKE_CXX_STANDARD_REQUIRED (which prevents - # "decaying" to an older standard if the requested one isn't available). - set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD ${ABSL_CXX_STANDARD}) - set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD_REQUIRED ON) - endif() + # INTERFACE libraries can't have the CXX_STANDARD property set + set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD ${ABSL_CXX_STANDARD}) + set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD_REQUIRED ON) # When being installed, we lose the absl_ prefix. We want to put it back # to have properly named lib files. This is a no-op when we are not being @@ -281,7 +215,6 @@ Cflags: -I\${includedir}${PC_CFLAGS}\n") if(ABSL_ENABLE_INSTALL) set_target_properties(${_NAME} PROPERTIES OUTPUT_NAME "absl_${_NAME}" - SOVERSION "2206.0.0" ) endif() else() @@ -290,10 +223,10 @@ Cflags: -I\${includedir}${PC_CFLAGS}\n") target_include_directories(${_NAME} INTERFACE "$" - $ + $ ) - if (_build_type STREQUAL "dll") + if (${_build_type} STREQUAL "dll") set(ABSL_CC_LIB_DEPS abseil_dll) endif() @@ -304,25 +237,15 @@ Cflags: -I\${includedir}${PC_CFLAGS}\n") ${ABSL_DEFAULT_LINKOPTS} ) target_compile_definitions(${_NAME} INTERFACE ${ABSL_CC_LIB_DEFINES}) - - if(ABSL_PROPAGATE_CXX_STD) - # Abseil libraries require C++11 as the current minimum standard. - # Top-level application CMake projects should ensure a consistent C++ - # standard for all compiled sources by setting CMAKE_CXX_STANDARD. - target_compile_features(${_NAME} INTERFACE cxx_std_11) - - # (INTERFACE libraries can't have the CXX_STANDARD property set, so there - # is no legacy behavior else case). - endif() endif() # TODO currently we don't install googletest alongside abseil sources, so # installed abseil can't be tested. if(NOT ABSL_CC_LIB_TESTONLY AND ABSL_ENABLE_INSTALL) install(TARGETS ${_NAME} EXPORT ${PROJECT_NAME}Targets - RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} - LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} - ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} + RUNTIME DESTINATION ${ABSL_INSTALL_BINDIR} + LIBRARY DESTINATION ${ABSL_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${ABSL_INSTALL_LIBDIR} ) endif() @@ -363,11 +286,11 @@ endfunction() # "awesome_test.cc" # DEPS # absl::awesome -# GTest::gmock -# GTest::gtest_main +# gmock +# gtest_main # ) function(absl_cc_test) - if(NOT (BUILD_TESTING AND ABSL_BUILD_TESTING)) + if(NOT ABSL_RUN_TESTS) return() endif() @@ -417,23 +340,8 @@ function(absl_cc_test) # Add all Abseil targets to a folder in the IDE for organization. set_property(TARGET ${_NAME} PROPERTY FOLDER ${ABSL_IDE_FOLDER}/test) - if(ABSL_PROPAGATE_CXX_STD) - # Abseil libraries require C++11 as the current minimum standard. - # Top-level application CMake projects should ensure a consistent C++ - # standard for all compiled sources by setting CMAKE_CXX_STANDARD. - target_compile_features(${_NAME} PUBLIC cxx_std_11) - else() - # Note: This is legacy (before CMake 3.8) behavior. Setting the - # target-level CXX_STANDARD property to ABSL_CXX_STANDARD (which is - # initialized by CMAKE_CXX_STANDARD) should have no real effect, since - # that is the default value anyway. - # - # CXX_STANDARD_REQUIRED does guard against the top-level CMake project - # not having enabled CMAKE_CXX_STANDARD_REQUIRED (which prevents - # "decaying" to an older standard if the requested one isn't available). - set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD ${ABSL_CXX_STANDARD}) - set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD_REQUIRED ON) - endif() + set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD ${ABSL_CXX_STANDARD}) + set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD_REQUIRED ON) add_test(NAME ${_NAME} COMMAND ${_NAME}) endfunction() diff --git a/abseil-cpp/CMake/AbseilInstallDirs.cmake b/abseil-cpp/CMake/AbseilInstallDirs.cmake new file mode 100644 index 00000000..6fc914b6 --- /dev/null +++ b/abseil-cpp/CMake/AbseilInstallDirs.cmake @@ -0,0 +1,20 @@ +include(GNUInstallDirs) + +# absl_VERSION is only set if we are an LTS release being installed, in which +# case it may be into a system directory and so we need to make subdirectories +# for each installed version of Abseil. This mechanism is implemented in +# Abseil's internal Copybara (https://github.com/google/copybara) workflows and +# isn't visible in the CMake buildsystem itself. + +if(absl_VERSION) + set(ABSL_SUBDIR "${PROJECT_NAME}_${PROJECT_VERSION}") + set(ABSL_INSTALL_BINDIR "${CMAKE_INSTALL_BINDIR}/${ABSL_SUBDIR}") + set(ABSL_INSTALL_CONFIGDIR "${CMAKE_INSTALL_LIBDIR}/cmake/${ABSL_SUBDIR}") + set(ABSL_INSTALL_INCLUDEDIR "${CMAKE_INSTALL_INCLUDEDIR}/${ABSL_SUBDIR}") + set(ABSL_INSTALL_LIBDIR "${CMAKE_INSTALL_LIBDIR}/${ABSL_SUBDIR}") +else() + set(ABSL_INSTALL_BINDIR "${CMAKE_INSTALL_BINDIR}") + set(ABSL_INSTALL_CONFIGDIR "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}") + set(ABSL_INSTALL_INCLUDEDIR "${CMAKE_INSTALL_INCLUDEDIR}") + set(ABSL_INSTALL_LIBDIR "${CMAKE_INSTALL_LIBDIR}") +endif() diff --git a/abseil-cpp/CMake/Googletest/CMakeLists.txt.in b/abseil-cpp/CMake/Googletest/CMakeLists.txt.in index 5769e3a9..994dac0b 100644 --- a/abseil-cpp/CMake/Googletest/CMakeLists.txt.in +++ b/abseil-cpp/CMake/Googletest/CMakeLists.txt.in @@ -3,12 +3,24 @@ cmake_minimum_required(VERSION 2.8.2) project(googletest-external NONE) include(ExternalProject) -ExternalProject_Add(googletest - URL "${absl_gtest_download_url}" # May be empty - SOURCE_DIR "${absl_gtest_src_dir}" - BINARY_DIR "${absl_gtest_build_dir}" - CONFIGURE_COMMAND "" - BUILD_COMMAND "" - INSTALL_COMMAND "" - TEST_COMMAND "" -) +if(${ABSL_USE_GOOGLETEST_HEAD}) + ExternalProject_Add(googletest + GIT_REPOSITORY https://github.com/google/googletest.git + GIT_TAG master + SOURCE_DIR "${absl_gtest_src_dir}" + BINARY_DIR "${absl_gtest_build_dir}" + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" + ) +else() + ExternalProject_Add(googletest + SOURCE_DIR "${absl_gtest_src_dir}" + BINARY_DIR "${absl_gtest_build_dir}" + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" + ) +endif() \ No newline at end of file diff --git a/abseil-cpp/CMake/README.md b/abseil-cpp/CMake/README.md index 8134615e..8f73475a 100644 --- a/abseil-cpp/CMake/README.md +++ b/abseil-cpp/CMake/README.md @@ -20,10 +20,8 @@ googletest framework ### Step-by-Step Instructions 1. If you want to build the Abseil tests, integrate the Abseil dependency -[Google Test](https://github.com/google/googletest) into your CMake -project. To disable Abseil tests, you have to pass either -`-DBUILD_TESTING=OFF` or `-DABSL_BUILD_TESTING=OFF` when configuring your -project with CMake. +[Google Test](https://github.com/google/googletest) into your CMake project. To disable Abseil tests, you have to pass +`-DBUILD_TESTING=OFF` when configuring your project with CMake. 2. Download Abseil and copy it into a subdirectory in your CMake project or add Abseil as a [git submodule](https://git-scm.com/docs/git-submodule) in your @@ -36,16 +34,15 @@ to include Abseil directly in your CMake project. 4. Add the **absl::** target you wish to use to the [`target_link_libraries()`](https://cmake.org/cmake/help/latest/command/target_link_libraries.html) section of your executable or of your library.
-Here is a short CMakeLists.txt example of an application project using Abseil. +Here is a short CMakeLists.txt example of a project file using Abseil. ```cmake -cmake_minimum_required(VERSION 3.8.2) -project(my_app_project) +cmake_minimum_required(VERSION 3.5) +project(my_project) # Pick the C++ standard to compile with. # Abseil currently supports C++11, C++14, and C++17. set(CMAKE_CXX_STANDARD 11) -set(CMAKE_CXX_STANDARD_REQUIRED ON) add_subdirectory(abseil-cpp) @@ -53,48 +50,9 @@ add_executable(my_exe source.cpp) target_link_libraries(my_exe absl::base absl::synchronization absl::strings) ``` -Note that if you are developing a library designed for use by other clients, you -should instead leave `CMAKE_CXX_STANDARD` unset (or only set if being built as -the current top-level CMake project) and configure the minimum required C++ -standard at the target level. If you require a later minimum C++ standard than -Abseil does, it's a good idea to also enforce that `CMAKE_CXX_STANDARD` (which -will control Abseil library targets) is set to at least that minimum. For -example: - -```cmake -cmake_minimum_required(VERSION 3.8.2) -project(my_lib_project) - -# Leave C++ standard up to the root application, so set it only if this is the -# current top-level CMake project. -if(CMAKE_SOURCE_DIR STREQUAL my_lib_project_SOURCE_DIR) - set(CMAKE_CXX_STANDARD 17) - set(CMAKE_CXX_STANDARD_REQUIRED ON) -endif() - -add_subdirectory(abseil-cpp) - -add_library(my_lib source.cpp) -target_link_libraries(my_lib absl::base absl::synchronization absl::strings) - -# Enforce that my_lib requires C++17. Important to document for clients that they -# must set CMAKE_CXX_STANDARD to 17 or higher for proper Abseil ABI compatibility -# (since otherwise, Abseil library targets could be compiled with a lower C++ -# standard than my_lib). -target_compile_features(my_lib PUBLIC cxx_std_17) -if(CMAKE_CXX_STANDARD LESS 17) - message(FATAL_ERROR - "my_lib_project requires CMAKE_CXX_STANDARD >= 17 (got: ${CMAKE_CXX_STANDARD})") -endif() -``` - -Then the top-level application project that uses your library is responsible for -setting a consistent `CMAKE_CXX_STANDARD` that is sufficiently high. - ### Running Abseil Tests with CMake -Use the `-DABSL_BUILD_TESTING=ON` flag to run Abseil tests. Note that -BUILD_TESTING must also be on (the default). +Use the `-DABSL_RUN_TESTS=ON` flag to run Abseil tests. Note that if the `-DBUILD_TESTING=OFF` flag is passed then Abseil tests will not be run. You will need to provide Abseil with a Googletest dependency. There are two options for how to do this: @@ -112,7 +70,7 @@ For example, to run just the Abseil tests, you could use this script: cd path/to/abseil-cpp mkdir build cd build -cmake -DABSL_BUILD_TESTING=ON -DABSL_USE_GOOGLETEST_HEAD=ON .. +cmake -DABSL_USE_GOOGLETEST_HEAD=ON -DABSL_RUN_TESTS=ON .. make -j ctest ``` @@ -141,48 +99,3 @@ absl::synchronization absl::time absl::utility ``` - -## Traditional CMake Set-Up - -For larger projects, it may make sense to use the traditional CMake set-up where you build and install projects separately. - -First, you'd need to build and install Google Test: -``` -cmake -S /source/googletest -B /build/googletest -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/installation/dir -DBUILD_GMOCK=ON -cmake --build /build/googletest --target install -``` - -Then you need to configure and build Abseil. Make sure you enable `ABSL_USE_EXTERNAL_GOOGLETEST` and `ABSL_FIND_GOOGLETEST`. You also need to enable `ABSL_ENABLE_INSTALL` so that you can install Abseil itself. -``` -cmake -S /source/abseil-cpp -B /build/abseil-cpp -DCMAKE_PREFIX_PATH=/installation/dir -DCMAKE_INSTALL_PREFIX=/installation/dir -DABSL_ENABLE_INSTALL=ON -DABSL_USE_EXTERNAL_GOOGLETEST=ON -DABSL_FIND_GOOGLETEST=ON -cmake --build /temporary/build/abseil-cpp -``` - -(`CMAKE_PREFIX_PATH` is where you already have Google Test installed; `CMAKE_INSTALL_PREFIX` is where you want to have Abseil installed; they can be different.) - -Run the tests: -``` -ctest --test-dir /temporary/build/abseil-cpp -``` - -And finally install: -``` -cmake --build /temporary/build/abseil-cpp --target install -``` - -# CMake Option Synposis - -## Enable Standard CMake Installation - -`-DABSL_ENABLE_INSTALL=ON` - -## Google Test Options - -`-DABSL_BUILD_TESTING=ON` must be set to enable testing - -- Have Abseil download and build Google Test for you: `-DABSL_USE_EXTERNAL_GOOGLETEST=OFF` (default) - - Download and build latest Google Test: `-DABSL_USE_GOOGLETEST_HEAD=ON` - - Download specific Google Test version (ZIP archive): `-DABSL_GOOGLETEST_DOWNLOAD_URL=https://.../version.zip` - - Use Google Test from specific local directory: `-DABSL_LOCAL_GOOGLETEST_DIR=/path/to/googletest` -- Use Google Test included elsewhere in your project: `-DABSL_USE_EXTERNAL_GOOGLETEST=ON` -- Use standard CMake `find_package(CTest)` to find installed Google Test: `-DABSL_USE_EXTERNAL_GOOGLETEST=ON -DABSL_FIND_GOOGLETEST=ON` diff --git a/abseil-cpp/CMake/install_test_project/CMakeLists.txt b/abseil-cpp/CMake/install_test_project/CMakeLists.txt index b865b2ec..06b797e9 100644 --- a/abseil-cpp/CMake/install_test_project/CMakeLists.txt +++ b/abseil-cpp/CMake/install_test_project/CMakeLists.txt @@ -18,8 +18,10 @@ cmake_minimum_required(VERSION 3.5) project(absl_cmake_testing CXX) +set(CMAKE_CXX_STANDARD 11) + add_executable(simple simple.cc) find_package(absl REQUIRED) -target_link_libraries(simple absl::strings absl::config) +target_link_libraries(simple absl::strings) diff --git a/abseil-cpp/CMake/install_test_project/simple.cc b/abseil-cpp/CMake/install_test_project/simple.cc index 7daa7f09..e9e35291 100644 --- a/abseil-cpp/CMake/install_test_project/simple.cc +++ b/abseil-cpp/CMake/install_test_project/simple.cc @@ -14,17 +14,8 @@ // limitations under the License. #include -#include "absl/base/config.h" #include "absl/strings/substitute.h" -#if !defined(ABSL_LTS_RELEASE_VERSION) || ABSL_LTS_RELEASE_VERSION != 99998877 -#error ABSL_LTS_RELEASE_VERSION is not set correctly. -#endif - -#if !defined(ABSL_LTS_RELEASE_PATCH_LEVEL) || ABSL_LTS_RELEASE_PATCH_LEVEL != 0 -#error ABSL_LTS_RELEASE_PATCH_LEVEL is not set correctly. -#endif - int main(int argc, char** argv) { for (int i = 0; i < argc; ++i) { std::cout << absl::Substitute("Arg $0: $1\n", i, argv[i]); diff --git a/abseil-cpp/CMake/install_test_project/test.sh b/abseil-cpp/CMake/install_test_project/test.sh index cc028bac..99989b03 100755 --- a/abseil-cpp/CMake/install_test_project/test.sh +++ b/abseil-cpp/CMake/install_test_project/test.sh @@ -13,60 +13,70 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -# Unit and integration tests for Abseil LTS CMake installation + +# "Unit" and integration tests for Absl CMake installation + +# TODO(absl-team): This script isn't fully hermetic because +# -DABSL_USE_GOOGLETEST_HEAD=ON means that this script isn't pinned to a fixed +# version of GoogleTest. This means that an upstream change to GoogleTest could +# break this test. Fix this by allowing this script to pin to a known-good +# version of GoogleTest. # Fail on any error. Treat unset variables an error. Print commands as executed. set -euox pipefail +install_absl() { + pushd "${absl_build_dir}" + if [[ "${#}" -eq 1 ]]; then + cmake -DCMAKE_INSTALL_PREFIX="${1}" "${absl_dir}" + else + cmake "${absl_dir}" + fi + cmake --build . --target install -- -j + popd +} + +uninstall_absl() { + xargs rm < "${absl_build_dir}"/install_manifest.txt + rm -rf "${absl_build_dir}" + mkdir -p "${absl_build_dir}" +} + +lts_install="" + +while getopts ":l" lts; do + case "${lts}" in + l ) + lts_install="true" + ;; + esac +done + absl_dir=/abseil-cpp -absl_build_dir=/buildfs -googletest_builddir=/googletest_builddir +absl_build_dir=/buildfs/absl-build project_dir="${absl_dir}"/CMake/install_test_project project_build_dir=/buildfs/project-build -build_shared_libs="OFF" -if [ "${LINK_TYPE:-}" = "DYNAMIC" ]; then - build_shared_libs="ON" -fi - -# Build and install GoogleTest -mkdir "${googletest_builddir}" -pushd "${googletest_builddir}" -curl -L "${ABSL_GOOGLETEST_DOWNLOAD_URL}" --output "${ABSL_GOOGLETEST_COMMIT}".zip -unzip "${ABSL_GOOGLETEST_COMMIT}".zip -pushd "googletest-${ABSL_GOOGLETEST_COMMIT}" -mkdir build -pushd build -cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS="${build_shared_libs}" .. -make -j $(nproc) -make install -ldconfig -popd -popd -popd +mkdir -p "${absl_build_dir}" +mkdir -p "${project_build_dir}" -# Run the LTS transformations -./create_lts.py 99998877 - -# Build and install Abseil -pushd "${absl_build_dir}" -cmake "${absl_dir}" \ - -DABSL_USE_EXTERNAL_GOOGLETEST=ON \ - -DABSL_FIND_GOOGLETEST=ON \ - -DCMAKE_BUILD_TYPE=Release \ - -DABSL_BUILD_TESTING=ON \ - -DBUILD_SHARED_LIBS="${build_shared_libs}" -make -j $(nproc) -ctest -j $(nproc) --output-on-failure -make install -ldconfig -popd +if [[ "${lts_install}" ]]; then + install_dir="/usr/local" +else + install_dir="${project_build_dir}"/install +fi +mkdir -p "${install_dir}" -# Test the project against the installed Abseil -mkdir -p "${project_build_dir}" +# Test build, install, and link against installed abseil pushd "${project_build_dir}" -cmake "${project_dir}" +if [[ "${lts_install}" ]]; then + install_absl + cmake "${project_dir}" +else + install_absl "${install_dir}" + cmake "${project_dir}" -DCMAKE_PREFIX_PATH="${install_dir}" +fi + cmake --build . --target simple output="$(${project_build_dir}/simple "printme" 2>&1)" @@ -78,35 +88,57 @@ fi popd -if ! grep absl::strings "/usr/local/lib/cmake/absl/abslTargets.cmake"; then - cat "/usr/local/lib/cmake/absl/abslTargets.cmake" - echo "CMake targets named incorrectly" - exit 1 -fi - -pushd "${HOME}" -cat > hello-abseil.cc << EOF -#include - -#include "absl/strings/str_format.h" +# Test that we haven't accidentally made absl::abslblah +pushd "${install_dir}" -int main(int argc, char **argv) { - absl::PrintF("Hello Abseil!\n"); - return EXIT_SUCCESS; -} -EOF +# Starting in CMake 3.12 the default install dir is lib$bit_width +if [[ -d lib64 ]]; then + libdir="lib64" +elif [[ -d lib ]]; then + libdir="lib" +else + echo "ls *, */*, */*/*:" + ls * + ls */* + ls */*/* + echo "unknown lib dir" +fi -if [ "${LINK_TYPE:-}" != "DYNAMIC" ]; then - pc_args=($(pkg-config --cflags --libs --static absl_str_format)) - g++ -static -o hello-abseil hello-abseil.cc "${pc_args[@]}" +if [[ "${lts_install}" ]]; then + # LTS versions append the date of the release to the subdir. + # 9999/99/99 is the dummy date used in the local_lts workflow. + absl_subdir="absl_99999999" else - pc_args=($(pkg-config --cflags --libs absl_str_format)) - g++ -o hello-abseil hello-abseil.cc "${pc_args[@]}" + absl_subdir="absl" fi -hello="$(./hello-abseil)" -[[ "${hello}" == "Hello Abseil!" ]] +if ! grep absl::strings "${libdir}/cmake/${absl_subdir}/abslTargets.cmake"; then + cat "${libdir}"/cmake/absl/abslTargets.cmake + echo "CMake targets named incorrectly" + exit 1 +fi + +uninstall_absl popd +if [[ ! "${lts_install}" ]]; then + # Test that we warn if installed without a prefix or a system prefix + output="$(install_absl 2>&1)" + if [[ "${output}" != *"Please set CMAKE_INSTALL_PREFIX"* ]]; then + echo "Install without prefix didn't warn as expected. Output:" + echo "${output}" + exit 1 + fi + uninstall_absl + + output="$(install_absl /usr 2>&1)" + if [[ "${output}" != *"Please set CMAKE_INSTALL_PREFIX"* ]]; then + echo "Install with /usr didn't warn as expected. Output:" + echo "${output}" + exit 1 + fi + uninstall_absl +fi + echo "Install test complete!" exit 0 diff --git a/abseil-cpp/CMakeLists.txt b/abseil-cpp/CMakeLists.txt index 79869ff5..f0af6f66 100644 --- a/abseil-cpp/CMakeLists.txt +++ b/abseil-cpp/CMakeLists.txt @@ -41,13 +41,7 @@ if (POLICY CMP0077) cmake_policy(SET CMP0077 NEW) endif (POLICY CMP0077) -# Allow the user to specify the MSVC runtime -if (POLICY CMP0091) - cmake_policy(SET CMP0091 NEW) -endif (POLICY CMP0091) - -project(absl LANGUAGES CXX VERSION 20220623) -include(CTest) +project(absl CXX) # Output directory is correct by default for most build setups. However, when # building Abseil as a DLL, it is important to have the DLL in the same @@ -57,26 +51,19 @@ set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) # when absl is included as subproject (i.e. using add_subdirectory(abseil-cpp)) # in the source tree of a project that uses it, install rules are disabled. -if(NOT CMAKE_SOURCE_DIR STREQUAL PROJECT_SOURCE_DIR) +if(NOT "^${CMAKE_SOURCE_DIR}$" STREQUAL "^${PROJECT_SOURCE_DIR}$") option(ABSL_ENABLE_INSTALL "Enable install rule" OFF) else() option(ABSL_ENABLE_INSTALL "Enable install rule" ON) endif() -option(ABSL_PROPAGATE_CXX_STD - "Use CMake C++ standard meta features (e.g. cxx_std_11) that propagate to targets that link to Abseil" - OFF) # TODO: Default to ON for CMake 3.8 and greater. -if((${CMAKE_VERSION} VERSION_GREATER_EQUAL 3.8) AND (NOT ABSL_PROPAGATE_CXX_STD)) - message(WARNING "A future Abseil release will default ABSL_PROPAGATE_CXX_STD to ON for CMake 3.8 and up. We recommend enabling this option to ensure your project still builds correctly.") -endif() - list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_LIST_DIR}/CMake ${CMAKE_CURRENT_LIST_DIR}/absl/copts ) +include(AbseilInstallDirs) include(CMakePackageConfigHelpers) -include(GNUInstallDirs) include(AbseilDll) include(AbseilHelpers) @@ -105,57 +92,28 @@ endif() ## pthread find_package(Threads REQUIRED) -include(CMakeDependentOption) - -option(ABSL_BUILD_TESTING - "If ON, Abseil will build all of Abseil's own tests." OFF) - option(ABSL_USE_EXTERNAL_GOOGLETEST - "If ON, Abseil will assume that the targets for GoogleTest are already provided by the including project. This makes sense when Abseil is used with add_subdirectory." OFF) - -cmake_dependent_option(ABSL_FIND_GOOGLETEST - "If ON, Abseil will use find_package(GTest) rather than assuming that GoogleTest is already provided by the including project." - ON - "ABSL_USE_EXTERNAL_GOOGLETEST" - OFF) + "If ON, Abseil will assume that the targets for GoogleTest are already provided by the including project. This makes sense when Abseil is used with add_subproject." OFF) option(ABSL_USE_GOOGLETEST_HEAD - "If ON, abseil will download HEAD from GoogleTest at config time." OFF) - -set(ABSL_GOOGLETEST_DOWNLOAD_URL "" CACHE STRING "If set, download GoogleTest from this URL") + "If ON, abseil will download HEAD from googletest at config time." OFF) set(ABSL_LOCAL_GOOGLETEST_DIR "/usr/src/googletest" CACHE PATH - "If ABSL_USE_GOOGLETEST_HEAD is OFF and ABSL_GOOGLETEST_URL is not set, specifies the directory of a local GoogleTest checkout." + "If ABSL_USE_GOOGLETEST_HEAD is OFF, specifies the directory of a local googletest checkout." ) -if(BUILD_TESTING AND ABSL_BUILD_TESTING) +option(ABSL_RUN_TESTS "If ON, Abseil tests will be run." OFF) + +if(${ABSL_RUN_TESTS}) + # enable CTest. This will set BUILD_TESTING to ON unless otherwise specified + # on the command line + include(CTest) + ## check targets - if (ABSL_USE_EXTERNAL_GOOGLETEST) - if (ABSL_FIND_GOOGLETEST) - find_package(GTest REQUIRED) - elseif(NOT TARGET GTest::gtest) - if(TARGET gtest) - # When Google Test is included directly rather than through find_package, the aliases are missing. - add_library(GTest::gtest ALIAS gtest) - add_library(GTest::gtest_main ALIAS gtest_main) - add_library(GTest::gmock ALIAS gmock) - add_library(GTest::gmock_main ALIAS gmock_main) - else() - message(FATAL_ERROR "ABSL_USE_EXTERNAL_GOOGLETEST is ON and ABSL_FIND_GOOGLETEST is OFF, which means that the top-level project must build the Google Test project. However, the target gtest was not found.") - endif() - endif() - else() + if (NOT ABSL_USE_EXTERNAL_GOOGLETEST) set(absl_gtest_build_dir ${CMAKE_BINARY_DIR}/googletest-build) - if(ABSL_USE_GOOGLETEST_HEAD AND ABSL_GOOGLETEST_DOWNLOAD_URL) - message(FATAL_ERROR "Do not set both ABSL_USE_GOOGLETEST_HEAD and ABSL_GOOGLETEST_DOWNLOAD_URL") - endif() - if(ABSL_USE_GOOGLETEST_HEAD) - set(absl_gtest_download_url "https://github.com/google/googletest/archive/main.zip") - elseif(ABSL_GOOGLETEST_DOWNLOAD_URL) - set(absl_gtest_download_url ${ABSL_GOOGLETEST_DOWNLOAD_URL}) - endif() - if(absl_gtest_download_url) + if(${ABSL_USE_GOOGLETEST_HEAD}) set(absl_gtest_src_dir ${CMAKE_BINARY_DIR}/googletest-src) else() set(absl_gtest_src_dir ${ABSL_LOCAL_GOOGLETEST_DIR}) @@ -163,30 +121,35 @@ if(BUILD_TESTING AND ABSL_BUILD_TESTING) include(CMake/Googletest/DownloadGTest.cmake) endif() - check_target(GTest::gtest) - check_target(GTest::gtest_main) - check_target(GTest::gmock) - check_target(GTest::gmock_main) + check_target(gtest) + check_target(gtest_main) + check_target(gmock) + + list(APPEND ABSL_TEST_COMMON_LIBRARIES + gtest_main + gtest + gmock + ${CMAKE_THREAD_LIBS_INIT} + ) endif() add_subdirectory(absl) if(ABSL_ENABLE_INSTALL) - # install as a subdirectory only install(EXPORT ${PROJECT_NAME}Targets NAMESPACE absl:: - DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}" + DESTINATION "${ABSL_INSTALL_CONFIGDIR}" ) configure_package_config_file( CMake/abslConfig.cmake.in "${PROJECT_BINARY_DIR}/${PROJECT_NAME}Config.cmake" - INSTALL_DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}" + INSTALL_DESTINATION "${ABSL_INSTALL_CONFIGDIR}" ) install(FILES "${PROJECT_BINARY_DIR}/${PROJECT_NAME}Config.cmake" - DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}" + DESTINATION "${ABSL_INSTALL_CONFIGDIR}" ) # Abseil only has a version in LTS releases. This mechanism is accomplished @@ -199,12 +162,12 @@ if(ABSL_ENABLE_INSTALL) ) install(FILES "${PROJECT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake" - DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}" + DESTINATION ${ABSL_INSTALL_CONFIGDIR} ) endif() # absl_VERSION install(DIRECTORY absl - DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} + DESTINATION ${ABSL_INSTALL_INCLUDEDIR} FILES_MATCHING PATTERN "*.inc" PATTERN "*.h" diff --git a/abseil-cpp/FAQ.md b/abseil-cpp/FAQ.md index fbd92ce9..78028fc0 100644 --- a/abseil-cpp/FAQ.md +++ b/abseil-cpp/FAQ.md @@ -27,10 +27,7 @@ compiler, there several ways to do this: file](https://docs.bazel.build/versions/master/guide.html#bazelrc) If you are using CMake as the build system, you'll need to add a line like -`set(CMAKE_CXX_STANDARD 17)` to your top level `CMakeLists.txt` file. If you -are developing a library designed to be used by other clients, you should -instead leave `CMAKE_CXX_STANDARD` unset and configure the minimum C++ standard -required by each of your library targets via `target_compile_features`. See the +`set(CMAKE_CXX_STANDARD 17)` to your top level `CMakeLists.txt` file. See the [CMake build instructions](https://github.com/abseil/abseil-cpp/blob/master/CMake/README.md) for more information. diff --git a/abseil-cpp/LTS.md b/abseil-cpp/LTS.md new file mode 100644 index 00000000..ade8b17c --- /dev/null +++ b/abseil-cpp/LTS.md @@ -0,0 +1,16 @@ +# Long Term Support (LTS) Branches + +This repository contains periodic snapshots of the Abseil codebase that are +Long Term Support (LTS) branches. An LTS branch allows you to use a known +version of Abseil without interfering with other projects which may also, in +turn, use Abseil. (For more information about our releases, see the +[Abseil Release Management](https://abseil.io/about/releases) guide.) + +## LTS Branches + +The following lists LTS branches and the dates on which they have been released: + +* [LTS Branch December 18, 2018](https://github.com/abseil/abseil-cpp/tree/lts_2018_12_18/) +* [LTS Branch June 20, 2018](https://github.com/abseil/abseil-cpp/tree/lts_2018_06_20/) +* [LTS Branch August 8, 2019](https://github.com/abseil/abseil-cpp/tree/lts_2019_08_08/) +* [LTS Branch February 25, 2020](https://github.com/abseil/abseil-cpp/tree/lts_2020_02_25/) diff --git a/abseil-cpp/README.md b/abseil-cpp/README.md index db3a7b44..85de5696 100644 --- a/abseil-cpp/README.md +++ b/abseil-cpp/README.md @@ -9,9 +9,7 @@ standard library. - [About Abseil](#about) - [Quickstart](#quickstart) - [Building Abseil](#build) -- [Support](#support) - [Codemap](#codemap) -- [Releases](#releases) - [License](#license) - [Links](#links) @@ -44,22 +42,14 @@ the Abseil code, running tests, and getting a simple binary working. ## Building Abseil -[Bazel](https://bazel.build) and [CMake](https://cmake.org/) are the official -build systems for Abseil. +[Bazel](https://bazel.build) is the official build system for Abseil, +which is supported on most major platforms (Linux, Windows, macOS, for example) +and compilers. See the [quickstart](https://abseil.io/docs/cpp/quickstart) for +more information on building Abseil using the Bazel build system. -See the [quickstart](https://abseil.io/docs/cpp/quickstart) for more information -on building Abseil using the Bazel build system. - -If you require CMake support, please check the [CMake build -instructions](CMake/README.md) and [CMake -Quickstart](https://abseil.io/docs/cpp/quickstart-cmake). - -## Support - -Abseil is officially supported on many platforms. See the [Abseil -platform support -guide](https://abseil.io/docs/cpp/platforms/platforms) for details on -supported operating systems, compilers, CPUs, etc. + +If you require CMake support, please check the +[CMake build instructions](CMake/README.md). ## Codemap @@ -72,9 +62,6 @@ Abseil contains the following C++ library components: * [`algorithm`](absl/algorithm/)
The `algorithm` library contains additions to the C++ `` library and container-based versions of such algorithms. -* [`cleanup`](absl/cleanup/) -
The `cleanup` library contains the control-flow-construct-like type - `absl::Cleanup` which is used for executing a callback on scope exit. * [`container`](absl/container/)
The `container` library contains additional STL-style containers, including Abseil's unordered "Swiss table" containers. @@ -92,12 +79,6 @@ Abseil contains the following C++ library components: available within C++14 and C++17 versions of the C++ `` library. * [`numeric`](absl/numeric/)
The `numeric` library contains C++11-compatible 128-bit integers. -* [`profiling`](absl/profiling/) -
The `profiling` library contains utility code for profiling C++ - entities. It is currently a private dependency of other Abseil libraries. -* [`status`](absl/status/) -
The `status` contains abstractions for error handling, specifically - `absl::Status` and `absl::StatusOr`. * [`strings`](absl/strings/)
The `strings` library contains a variety of strings routines and utilities, including a C++11-compatible version of the C++17 @@ -116,15 +97,6 @@ Abseil contains the following C++ library components: * [`utility`](absl/utility/)
The `utility` library contains utility and helper code. -## Releases - -Abseil recommends users "live-at-head" (update to the latest commit from the -master branch as often as possible). However, we realize this philosophy doesn't -work for every project, so we also provide [Long Term Support -Releases](https://github.com/abseil/abseil-cpp/releases) to which we backport -fixes for severe bugs. See our [release -management](https://abseil.io/about/releases) document for more details. - ## License The Abseil C++ library is licensed under the terms of the Apache diff --git a/abseil-cpp/WORKSPACE b/abseil-cpp/WORKSPACE index c332ba4e..0b533562 100644 --- a/abseil-cpp/WORKSPACE +++ b/abseil-cpp/WORKSPACE @@ -15,47 +15,30 @@ # workspace(name = "com_google_absl") - load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") # GoogleTest/GoogleMock framework. Used by most unit-tests. http_archive( name = "com_google_googletest", - sha256 = "ce7366fe57eb49928311189cb0e40e0a8bf3d3682fca89af30d884c25e983786", - strip_prefix = "googletest-release-1.12.0", - # Keep this URL in sync with ABSL_GOOGLETEST_COMMIT in ci/cmake_common.sh. - urls = ["https://github.com/google/googletest/archive/refs/tags/release-1.12.0.zip"], -) - -# RE2 (the regular expression library used by GoogleTest) -# Note this must use a commit from the `abseil` branch of the RE2 project. -# https://github.com/google/re2/tree/abseil -http_archive( - name = "com_googlesource_code_re2", - sha256 = "0a890c2aa0bb05b2ce906a15efb520d0f5ad4c7d37b8db959c43772802991887", - strip_prefix = "re2-a427f10b9fb4622dd6d8643032600aa1b50fbd12", - urls = ["https://github.com/google/re2/archive/a427f10b9fb4622dd6d8643032600aa1b50fbd12.zip"], # 2022-06-09 + urls = ["https://github.com/google/googletest/archive/8567b09290fe402cf01923e2131c5635b8ed851b.zip"], # 2020-06-12T22:24:28Z + strip_prefix = "googletest-8567b09290fe402cf01923e2131c5635b8ed851b", + sha256 = "9a8a166eb6a56c7b3d7b19dc2c946fe4778fd6f21c7a12368ad3b836d8f1be48", ) # Google benchmark. http_archive( - name = "com_github_google_benchmark", # 2021-09-20T09:19:51Z - sha256 = "62e2f2e6d8a744d67e4bbc212fcfd06647080de4253c97ad5c6749e09faf2cb0", - strip_prefix = "benchmark-0baacde3618ca617da95375e0af13ce1baadea47", - urls = ["https://github.com/google/benchmark/archive/0baacde3618ca617da95375e0af13ce1baadea47.zip"], -) - -# Bazel Skylib. -http_archive( - name = "bazel_skylib", - urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/1.2.1/bazel-skylib-1.2.1.tar.gz"], - sha256 = "f7be3474d42aae265405a592bb7da8e171919d74c16f082a5457840f06054728", + name = "com_github_google_benchmark", + urls = ["https://github.com/google/benchmark/archive/16703ff83c1ae6d53e5155df3bb3ab0bc96083be.zip"], + strip_prefix = "benchmark-16703ff83c1ae6d53e5155df3bb3ab0bc96083be", + sha256 = "59f918c8ccd4d74b6ac43484467b500f1d64b40cc1010daa055375b322a43ba3", ) -# Bazel platform rules. +# C++ rules for Bazel. http_archive( - name = "platforms", - sha256 = "a879ea428c6d56ab0ec18224f976515948822451473a80d06c2e50af0bbe5121", - strip_prefix = "platforms-da5541f26b7de1dc8e04c075c99df5351742a4a2", - urls = ["https://github.com/bazelbuild/platforms/archive/da5541f26b7de1dc8e04c075c99df5351742a4a2.zip"], # 2022-05-27 + name = "rules_cc", + sha256 = "9a446e9dd9c1bb180c86977a8dc1e9e659550ae732ae58bd2e8fd51e15b2c91d", + strip_prefix = "rules_cc-262ebec3c2296296526740db4aefce68c80de7fa", + urls = [ + "https://github.com/bazelbuild/rules_cc/archive/262ebec3c2296296526740db4aefce68c80de7fa.zip", + ], ) diff --git a/abseil-cpp/absl/BUILD.bazel b/abseil-cpp/absl/BUILD.bazel index 7cccbbba..0b772df4 100644 --- a/abseil-cpp/absl/BUILD.bazel +++ b/abseil-cpp/absl/BUILD.bazel @@ -12,106 +12,57 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# -load("@bazel_skylib//lib:selects.bzl", "selects") +load( + ":compiler_config_setting.bzl", + "create_llvm_config", +) package(default_visibility = ["//visibility:public"]) licenses(["notice"]) -config_setting( - name = "clang_compiler", - flag_values = { - "@bazel_tools//tools/cpp:compiler": "clang", - }, - visibility = [":__subpackages__"], -) - -config_setting( - name = "msvc_compiler", - flag_values = { - "@bazel_tools//tools/cpp:compiler": "msvc-cl", - }, - visibility = [":__subpackages__"], -) - -config_setting( - name = "clang-cl_compiler", - flag_values = { - "@bazel_tools//tools/cpp:compiler": "clang-cl", - }, +create_llvm_config( + name = "llvm_compiler", visibility = [":__subpackages__"], ) config_setting( name = "osx", constraint_values = [ - "@platforms//os:osx", + "@bazel_tools//platforms:osx", ], ) config_setting( name = "ios", constraint_values = [ - "@platforms//os:ios", + "@bazel_tools//platforms:ios", ], ) config_setting( - name = "ppc", - values = { - "cpu": "ppc", - }, - visibility = [":__subpackages__"], -) - -config_setting( - name = "cpu_wasm", - values = { - "cpu": "wasm", - }, + name = "windows", + constraint_values = [ + "@bazel_tools//platforms:x86_64", + "@bazel_tools//platforms:windows", + ], visibility = [":__subpackages__"], ) config_setting( - name = "cpu_wasm32", + name = "ppc", values = { - "cpu": "wasm32", + "cpu": "ppc", }, visibility = [":__subpackages__"], ) config_setting( - name = "platforms_wasm32", - constraint_values = [ - "@platforms//cpu:wasm32", - ], - visibility = [":__subpackages__"], -) - -config_setting( - name = "platforms_wasm64", - constraint_values = [ - "@platforms//cpu:wasm64", - ], - visibility = [":__subpackages__"], -) - -selects.config_setting_group( name = "wasm", - match_any = [ - ":cpu_wasm", - ":cpu_wasm32", - ":platforms_wasm32", - ":platforms_wasm64", - ], - visibility = [":__subpackages__"], -) - -config_setting( - name = "fuchsia", values = { - "cpu": "fuchsia", + "cpu": "wasm32", }, visibility = [":__subpackages__"], ) diff --git a/abseil-cpp/absl/CMakeLists.txt b/abseil-cpp/absl/CMakeLists.txt index b1715846..fbfa7822 100644 --- a/abseil-cpp/absl/CMakeLists.txt +++ b/abseil-cpp/absl/CMakeLists.txt @@ -16,7 +16,6 @@ add_subdirectory(base) add_subdirectory(algorithm) -add_subdirectory(cleanup) add_subdirectory(container) add_subdirectory(debugging) add_subdirectory(flags) @@ -25,7 +24,6 @@ add_subdirectory(hash) add_subdirectory(memory) add_subdirectory(meta) add_subdirectory(numeric) -add_subdirectory(profiling) add_subdirectory(random) add_subdirectory(status) add_subdirectory(strings) diff --git a/abseil-cpp/absl/abseil.podspec.gen.py b/abseil-cpp/absl/abseil.podspec.gen.py index 63752980..6aefb794 100755 --- a/abseil-cpp/absl/abseil.podspec.gen.py +++ b/abseil-cpp/absl/abseil.podspec.gen.py @@ -40,8 +40,8 @@ Pod::Spec.new do |s| 'USE_HEADERMAP' => 'NO', 'ALWAYS_SEARCH_USER_PATHS' => 'NO', } - s.ios.deployment_target = '9.0' - s.osx.deployment_target = '10.10' + s.ios.deployment_target = '7.0' + s.osx.deployment_target = '10.9' s.tvos.deployment_target = '9.0' s.watchos.deployment_target = '2.0' """ diff --git a/abseil-cpp/absl/algorithm/BUILD.bazel b/abseil-cpp/absl/algorithm/BUILD.bazel index f6d74714..a3002b7d 100644 --- a/abseil-cpp/absl/algorithm/BUILD.bazel +++ b/abseil-cpp/absl/algorithm/BUILD.bazel @@ -14,6 +14,7 @@ # limitations under the License. # +load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test") load( "//absl:copts/configure_copts.bzl", "ABSL_DEFAULT_COPTS", @@ -43,7 +44,6 @@ cc_test( linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":algorithm", - "//absl/base:config", "@com_google_googletest//:gtest_main", ], ) diff --git a/abseil-cpp/absl/algorithm/CMakeLists.txt b/abseil-cpp/absl/algorithm/CMakeLists.txt index 181b49ca..56cd0fb8 100644 --- a/abseil-cpp/absl/algorithm/CMakeLists.txt +++ b/abseil-cpp/absl/algorithm/CMakeLists.txt @@ -35,8 +35,7 @@ absl_cc_test( ${ABSL_TEST_COPTS} DEPS absl::algorithm - absl::config - GTest::gmock_main + gmock_main ) absl_cc_library( @@ -66,5 +65,5 @@ absl_cc_test( absl::core_headers absl::memory absl::span - GTest::gmock_main + gmock_main ) diff --git a/abseil-cpp/absl/algorithm/algorithm_test.cc b/abseil-cpp/absl/algorithm/algorithm_test.cc index d18df024..81fccb61 100644 --- a/abseil-cpp/absl/algorithm/algorithm_test.cc +++ b/abseil-cpp/absl/algorithm/algorithm_test.cc @@ -20,7 +20,6 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" -#include "absl/base/config.h" namespace { @@ -51,15 +50,7 @@ TEST(EqualTest, EmptyRange) { std::vector empty1; std::vector empty2; - // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105705 -#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wnonnull" -#endif EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), empty1.begin(), empty1.end())); -#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) -#pragma GCC diagnostic pop -#endif EXPECT_FALSE(absl::equal(empty1.begin(), empty1.end(), v1.begin(), v1.end())); EXPECT_TRUE( absl::equal(empty1.begin(), empty1.end(), empty2.begin(), empty2.end())); diff --git a/abseil-cpp/absl/algorithm/container.h b/abseil-cpp/absl/algorithm/container.h index 26b19529..f0cee94f 100644 --- a/abseil-cpp/absl/algorithm/container.h +++ b/abseil-cpp/absl/algorithm/container.h @@ -90,10 +90,10 @@ using ContainerPointerType = // lookup of std::begin and std::end, i.e. // using std::begin; // using std::end; -// std::foo(begin(c), end(c)); +// std::foo(begin(c), end(c); // becomes // std::foo(container_algorithm_internal::begin(c), -// container_algorithm_internal::end(c)); +// container_algorithm_internal::end(c)); // These are meant for internal use only. template @@ -166,7 +166,7 @@ container_algorithm_internal::ContainerDifferenceType c_distance( // c_all_of() // // Container-based version of the `std::all_of()` function to -// test if all elements within a container satisfy a condition. +// test a condition on all elements within a container. template bool c_all_of(const C& c, Pred&& pred) { return std::all_of(container_algorithm_internal::c_begin(c), @@ -188,7 +188,7 @@ bool c_any_of(const C& c, Pred&& pred) { // c_none_of() // // Container-based version of the `std::none_of()` function to -// test if no elements in a container fulfill a condition. +// test if no elements in a container fulfil a condition. template bool c_none_of(const C& c, Pred&& pred) { return std::none_of(container_algorithm_internal::c_begin(c), @@ -905,11 +905,11 @@ void c_sort(C& c) { // Overload of c_sort() for performing a `comp` comparison other than the // default `operator<`. -template -void c_sort(C& c, LessThan&& comp) { +template +void c_sort(C& c, Compare&& comp) { std::sort(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), - std::forward(comp)); + std::forward(comp)); } // c_stable_sort() @@ -925,11 +925,11 @@ void c_stable_sort(C& c) { // Overload of c_stable_sort() for performing a `comp` comparison other than the // default `operator<`. -template -void c_stable_sort(C& c, LessThan&& comp) { +template +void c_stable_sort(C& c, Compare&& comp) { std::stable_sort(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), - std::forward(comp)); + std::forward(comp)); } // c_is_sorted() @@ -944,11 +944,11 @@ bool c_is_sorted(const C& c) { // c_is_sorted() overload for performing a `comp` comparison other than the // default `operator<`. -template -bool c_is_sorted(const C& c, LessThan&& comp) { +template +bool c_is_sorted(const C& c, Compare&& comp) { return std::is_sorted(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), - std::forward(comp)); + std::forward(comp)); } // c_partial_sort() @@ -966,14 +966,14 @@ void c_partial_sort( // Overload of c_partial_sort() for performing a `comp` comparison other than // the default `operator<`. -template +template void c_partial_sort( RandomAccessContainer& sequence, container_algorithm_internal::ContainerIter middle, - LessThan&& comp) { + Compare&& comp) { std::partial_sort(container_algorithm_internal::c_begin(sequence), middle, container_algorithm_internal::c_end(sequence), - std::forward(comp)); + std::forward(comp)); } // c_partial_sort_copy() @@ -994,15 +994,15 @@ c_partial_sort_copy(const C& sequence, RandomAccessContainer& result) { // Overload of c_partial_sort_copy() for performing a `comp` comparison other // than the default `operator<`. -template +template container_algorithm_internal::ContainerIter c_partial_sort_copy(const C& sequence, RandomAccessContainer& result, - LessThan&& comp) { + Compare&& comp) { return std::partial_sort_copy(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), container_algorithm_internal::c_begin(result), container_algorithm_internal::c_end(result), - std::forward(comp)); + std::forward(comp)); } // c_is_sorted_until() @@ -1018,12 +1018,12 @@ container_algorithm_internal::ContainerIter c_is_sorted_until(C& c) { // Overload of c_is_sorted_until() for performing a `comp` comparison other than // the default `operator<`. -template +template container_algorithm_internal::ContainerIter c_is_sorted_until( - C& c, LessThan&& comp) { + C& c, Compare&& comp) { return std::is_sorted_until(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), - std::forward(comp)); + std::forward(comp)); } // c_nth_element() @@ -1043,14 +1043,14 @@ void c_nth_element( // Overload of c_nth_element() for performing a `comp` comparison other than // the default `operator<`. -template +template void c_nth_element( RandomAccessContainer& sequence, container_algorithm_internal::ContainerIter nth, - LessThan&& comp) { + Compare&& comp) { std::nth_element(container_algorithm_internal::c_begin(sequence), nth, container_algorithm_internal::c_end(sequence), - std::forward(comp)); + std::forward(comp)); } //------------------------------------------------------------------------------ @@ -1072,12 +1072,12 @@ container_algorithm_internal::ContainerIter c_lower_bound( // Overload of c_lower_bound() for performing a `comp` comparison other than // the default `operator<`. -template +template container_algorithm_internal::ContainerIter c_lower_bound( - Sequence& sequence, T&& value, LessThan&& comp) { + Sequence& sequence, T&& value, Compare&& comp) { return std::lower_bound(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), - std::forward(value), std::forward(comp)); + std::forward(value), std::forward(comp)); } // c_upper_bound() @@ -1095,12 +1095,12 @@ container_algorithm_internal::ContainerIter c_upper_bound( // Overload of c_upper_bound() for performing a `comp` comparison other than // the default `operator<`. -template +template container_algorithm_internal::ContainerIter c_upper_bound( - Sequence& sequence, T&& value, LessThan&& comp) { + Sequence& sequence, T&& value, Compare&& comp) { return std::upper_bound(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), - std::forward(value), std::forward(comp)); + std::forward(value), std::forward(comp)); } // c_equal_range() @@ -1118,12 +1118,12 @@ c_equal_range(Sequence& sequence, T&& value) { // Overload of c_equal_range() for performing a `comp` comparison other than // the default `operator<`. -template +template container_algorithm_internal::ContainerIterPairType -c_equal_range(Sequence& sequence, T&& value, LessThan&& comp) { +c_equal_range(Sequence& sequence, T&& value, Compare&& comp) { return std::equal_range(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), - std::forward(value), std::forward(comp)); + std::forward(value), std::forward(comp)); } // c_binary_search() @@ -1140,12 +1140,12 @@ bool c_binary_search(Sequence&& sequence, T&& value) { // Overload of c_binary_search() for performing a `comp` comparison other than // the default `operator<`. -template -bool c_binary_search(Sequence&& sequence, T&& value, LessThan&& comp) { +template +bool c_binary_search(Sequence&& sequence, T&& value, Compare&& comp) { return std::binary_search(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(value), - std::forward(comp)); + std::forward(comp)); } //------------------------------------------------------------------------------ @@ -1166,14 +1166,14 @@ OutputIterator c_merge(const C1& c1, const C2& c2, OutputIterator result) { // Overload of c_merge() for performing a `comp` comparison other than // the default `operator<`. -template +template OutputIterator c_merge(const C1& c1, const C2& c2, OutputIterator result, - LessThan&& comp) { + Compare&& comp) { return std::merge(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), result, - std::forward(comp)); + std::forward(comp)); } // c_inplace_merge() @@ -1189,13 +1189,13 @@ void c_inplace_merge(C& c, // Overload of c_inplace_merge() for performing a merge using a `comp` other // than `operator<`. -template +template void c_inplace_merge(C& c, container_algorithm_internal::ContainerIter middle, - LessThan&& comp) { + Compare&& comp) { std::inplace_merge(container_algorithm_internal::c_begin(c), middle, container_algorithm_internal::c_end(c), - std::forward(comp)); + std::forward(comp)); } // c_includes() @@ -1213,13 +1213,13 @@ bool c_includes(const C1& c1, const C2& c2) { // Overload of c_includes() for performing a merge using a `comp` other than // `operator<`. -template -bool c_includes(const C1& c1, const C2& c2, LessThan&& comp) { +template +bool c_includes(const C1& c1, const C2& c2, Compare&& comp) { return std::includes(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), - std::forward(comp)); + std::forward(comp)); } // c_set_union() @@ -1243,7 +1243,7 @@ OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output) { // Overload of c_set_union() for performing a merge using a `comp` other than // `operator<`. -template ::value, void>::type, @@ -1251,18 +1251,18 @@ template ::value, void>::type> OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output, - LessThan&& comp) { + Compare&& comp) { return std::set_union(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), output, - std::forward(comp)); + std::forward(comp)); } // c_set_intersection() // // Container-based version of the `std::set_intersection()` function -// to return an iterator containing the intersection of two sorted containers. +// to return an iterator containing the intersection of two containers. template ::value, @@ -1272,11 +1272,6 @@ template ::type> OutputIterator c_set_intersection(const C1& c1, const C2& c2, OutputIterator output) { - // In debug builds, ensure that both containers are sorted with respect to the - // default comparator. std::set_intersection requires the containers be sorted - // using operator<. - assert(absl::c_is_sorted(c1)); - assert(absl::c_is_sorted(c2)); return std::set_intersection(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), @@ -1285,7 +1280,7 @@ OutputIterator c_set_intersection(const C1& c1, const C2& c2, // Overload of c_set_intersection() for performing a merge using a `comp` other // than `operator<`. -template ::value, void>::type, @@ -1293,17 +1288,12 @@ template ::value, void>::type> OutputIterator c_set_intersection(const C1& c1, const C2& c2, - OutputIterator output, LessThan&& comp) { - // In debug builds, ensure that both containers are sorted with respect to the - // default comparator. std::set_intersection requires the containers be sorted - // using the same comparator. - assert(absl::c_is_sorted(c1, comp)); - assert(absl::c_is_sorted(c2, comp)); + OutputIterator output, Compare&& comp) { return std::set_intersection(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), output, - std::forward(comp)); + std::forward(comp)); } // c_set_difference() @@ -1328,7 +1318,7 @@ OutputIterator c_set_difference(const C1& c1, const C2& c2, // Overload of c_set_difference() for performing a merge using a `comp` other // than `operator<`. -template ::value, void>::type, @@ -1336,12 +1326,12 @@ template ::value, void>::type> OutputIterator c_set_difference(const C1& c1, const C2& c2, - OutputIterator output, LessThan&& comp) { + OutputIterator output, Compare&& comp) { return std::set_difference(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), output, - std::forward(comp)); + std::forward(comp)); } // c_set_symmetric_difference() @@ -1367,7 +1357,7 @@ OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2, // Overload of c_set_symmetric_difference() for performing a merge using a // `comp` other than `operator<`. -template ::value, void>::type, @@ -1376,13 +1366,13 @@ template ::type> OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2, OutputIterator output, - LessThan&& comp) { + Compare&& comp) { return std::set_symmetric_difference( container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), output, - std::forward(comp)); + std::forward(comp)); } //------------------------------------------------------------------------------ @@ -1401,11 +1391,11 @@ void c_push_heap(RandomAccessContainer& sequence) { // Overload of c_push_heap() for performing a push operation on a heap using a // `comp` other than `operator<`. -template -void c_push_heap(RandomAccessContainer& sequence, LessThan&& comp) { +template +void c_push_heap(RandomAccessContainer& sequence, Compare&& comp) { std::push_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), - std::forward(comp)); + std::forward(comp)); } // c_pop_heap() @@ -1420,11 +1410,11 @@ void c_pop_heap(RandomAccessContainer& sequence) { // Overload of c_pop_heap() for performing a pop operation on a heap using a // `comp` other than `operator<`. -template -void c_pop_heap(RandomAccessContainer& sequence, LessThan&& comp) { +template +void c_pop_heap(RandomAccessContainer& sequence, Compare&& comp) { std::pop_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), - std::forward(comp)); + std::forward(comp)); } // c_make_heap() @@ -1439,11 +1429,11 @@ void c_make_heap(RandomAccessContainer& sequence) { // Overload of c_make_heap() for performing heap comparisons using a // `comp` other than `operator<` -template -void c_make_heap(RandomAccessContainer& sequence, LessThan&& comp) { +template +void c_make_heap(RandomAccessContainer& sequence, Compare&& comp) { std::make_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), - std::forward(comp)); + std::forward(comp)); } // c_sort_heap() @@ -1458,11 +1448,11 @@ void c_sort_heap(RandomAccessContainer& sequence) { // Overload of c_sort_heap() for performing heap comparisons using a // `comp` other than `operator<` -template -void c_sort_heap(RandomAccessContainer& sequence, LessThan&& comp) { +template +void c_sort_heap(RandomAccessContainer& sequence, Compare&& comp) { std::sort_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), - std::forward(comp)); + std::forward(comp)); } // c_is_heap() @@ -1477,11 +1467,11 @@ bool c_is_heap(const RandomAccessContainer& sequence) { // Overload of c_is_heap() for performing heap comparisons using a // `comp` other than `operator<` -template -bool c_is_heap(const RandomAccessContainer& sequence, LessThan&& comp) { +template +bool c_is_heap(const RandomAccessContainer& sequence, Compare&& comp) { return std::is_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), - std::forward(comp)); + std::forward(comp)); } // c_is_heap_until() @@ -1497,12 +1487,12 @@ c_is_heap_until(RandomAccessContainer& sequence) { // Overload of c_is_heap_until() for performing heap comparisons using a // `comp` other than `operator<` -template +template container_algorithm_internal::ContainerIter -c_is_heap_until(RandomAccessContainer& sequence, LessThan&& comp) { +c_is_heap_until(RandomAccessContainer& sequence, Compare&& comp) { return std::is_heap_until(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), - std::forward(comp)); + std::forward(comp)); } //------------------------------------------------------------------------------ @@ -1523,12 +1513,12 @@ container_algorithm_internal::ContainerIter c_min_element( // Overload of c_min_element() for performing a `comp` comparison other than // `operator<`. -template +template container_algorithm_internal::ContainerIter c_min_element( - Sequence& sequence, LessThan&& comp) { + Sequence& sequence, Compare&& comp) { return std::min_element(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), - std::forward(comp)); + std::forward(comp)); } // c_max_element() @@ -1545,12 +1535,12 @@ container_algorithm_internal::ContainerIter c_max_element( // Overload of c_max_element() for performing a `comp` comparison other than // `operator<`. -template +template container_algorithm_internal::ContainerIter c_max_element( - Sequence& sequence, LessThan&& comp) { + Sequence& sequence, Compare&& comp) { return std::max_element(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), - std::forward(comp)); + std::forward(comp)); } // c_minmax_element() @@ -1568,12 +1558,12 @@ c_minmax_element(C& c) { // Overload of c_minmax_element() for performing `comp` comparisons other than // `operator<`. -template +template container_algorithm_internal::ContainerIterPairType -c_minmax_element(C& c, LessThan&& comp) { +c_minmax_element(C& c, Compare&& comp) { return std::minmax_element(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), - std::forward(comp)); + std::forward(comp)); } //------------------------------------------------------------------------------ @@ -1598,15 +1588,15 @@ bool c_lexicographical_compare(Sequence1&& sequence1, Sequence2&& sequence2) { // Overload of c_lexicographical_compare() for performing a lexicographical // comparison using a `comp` operator instead of `operator<`. -template +template bool c_lexicographical_compare(Sequence1&& sequence1, Sequence2&& sequence2, - LessThan&& comp) { + Compare&& comp) { return std::lexicographical_compare( container_algorithm_internal::c_begin(sequence1), container_algorithm_internal::c_end(sequence1), container_algorithm_internal::c_begin(sequence2), container_algorithm_internal::c_end(sequence2), - std::forward(comp)); + std::forward(comp)); } // c_next_permutation() @@ -1622,11 +1612,11 @@ bool c_next_permutation(C& c) { // Overload of c_next_permutation() for performing a lexicographical // comparison using a `comp` operator instead of `operator<`. -template -bool c_next_permutation(C& c, LessThan&& comp) { +template +bool c_next_permutation(C& c, Compare&& comp) { return std::next_permutation(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), - std::forward(comp)); + std::forward(comp)); } // c_prev_permutation() @@ -1642,11 +1632,11 @@ bool c_prev_permutation(C& c) { // Overload of c_prev_permutation() for performing a lexicographical // comparison using a `comp` operator instead of `operator<`. -template -bool c_prev_permutation(C& c, LessThan&& comp) { +template +bool c_prev_permutation(C& c, Compare&& comp) { return std::prev_permutation(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), - std::forward(comp)); + std::forward(comp)); } //------------------------------------------------------------------------------ diff --git a/abseil-cpp/absl/base/BUILD.bazel b/abseil-cpp/absl/base/BUILD.bazel index bd023ad8..9d96abeb 100644 --- a/abseil-cpp/absl/base/BUILD.bazel +++ b/abseil-cpp/absl/base/BUILD.bazel @@ -14,6 +14,7 @@ # limitations under the License. # +load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library", "cc_test") load( "//absl:copts/configure_copts.bzl", "ABSL_DEFAULT_COPTS", @@ -75,7 +76,6 @@ cc_library( ":atomic_hook", ":config", ":core_headers", - ":errno_saver", ":log_severity", ], ) @@ -158,12 +158,9 @@ cc_library( "internal/direct_mmap.h", "internal/low_level_alloc.h", ], - copts = ABSL_DEFAULT_COPTS + select({ - "//conditions:default": [], - }), + copts = ABSL_DEFAULT_COPTS, linkopts = select({ - "//absl:msvc_compiler": [], - "//absl:clang-cl_compiler": [], + "//absl:windows": [], "//absl:wasm": [], "//conditions:default": ["-pthread"], }) + ABSL_DEFAULT_LINKOPTS, @@ -223,10 +220,7 @@ cc_library( ], copts = ABSL_DEFAULT_COPTS, linkopts = select({ - "//absl:msvc_compiler": [ - "-DEFAULTLIB:advapi32.lib", - ], - "//absl:clang-cl_compiler": [ + "//absl:windows": [ "-DEFAULTLIB:advapi32.lib", ], "//absl:wasm": [], @@ -435,9 +429,6 @@ cc_test( srcs = ["spinlock_test_common.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, - tags = [ - "no_test_wasm", - ], deps = [ ":base", ":base_internal", @@ -488,7 +479,6 @@ cc_library( copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ - ":base", ":config", ":core_headers", ], @@ -561,10 +551,7 @@ cc_test( srcs = ["internal/low_level_alloc_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, - tags = [ - "no_test_ios_x86_64", - "no_test_wasm", - ], + tags = ["no_test_ios_x86_64"], deps = [ ":malloc_internal", "//absl/container:node_hash_map", @@ -577,9 +564,6 @@ cc_test( srcs = ["internal/thread_identity_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, - tags = [ - "no_test_wasm", - ], deps = [ ":base", ":core_headers", @@ -602,6 +586,100 @@ cc_test( ], ) +cc_library( + name = "bits", + hdrs = ["internal/bits.h"], + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = [ + ":config", + ":core_headers", + ], +) + +cc_test( + name = "bits_test", + size = "small", + srcs = ["internal/bits_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":bits", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "exponential_biased", + srcs = ["internal/exponential_biased.cc"], + hdrs = ["internal/exponential_biased.h"], + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = [ + ":config", + ":core_headers", + ], +) + +cc_test( + name = "exponential_biased_test", + size = "small", + srcs = ["internal/exponential_biased_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = ["//visibility:private"], + deps = [ + ":exponential_biased", + "//absl/strings", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "periodic_sampler", + srcs = ["internal/periodic_sampler.cc"], + hdrs = ["internal/periodic_sampler.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":core_headers", + ":exponential_biased", + ], +) + +cc_test( + name = "periodic_sampler_test", + size = "small", + srcs = ["internal/periodic_sampler_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = ["//visibility:private"], + deps = [ + ":core_headers", + ":periodic_sampler", + "@com_google_googletest//:gtest_main", + ], +) + +cc_binary( + name = "periodic_sampler_benchmark", + testonly = 1, + srcs = ["internal/periodic_sampler_benchmark.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = ["benchmark"], + visibility = ["//visibility:private"], + deps = [ + ":core_headers", + ":periodic_sampler", + "@com_github_google_benchmark//:benchmark_main", + ], +) + cc_library( name = "scoped_set_env", testonly = 1, @@ -712,31 +790,6 @@ cc_test( ], ) -cc_library( - name = "prefetch", - hdrs = ["internal/prefetch.h"], - copts = ABSL_DEFAULT_COPTS, - linkopts = ABSL_DEFAULT_LINKOPTS, - visibility = [ - "//absl:__subpackages__", - ], - deps = [ - ":config", - ], -) - -cc_test( - name = "prefetch_test", - size = "small", - srcs = ["internal/prefetch_test.cc"], - copts = ABSL_TEST_COPTS, - linkopts = ABSL_DEFAULT_LINKOPTS, - deps = [ - ":prefetch", - "@com_google_googletest//:gtest_main", - ], -) - cc_test( name = "unique_small_name_test", size = "small", diff --git a/abseil-cpp/absl/base/CMakeLists.txt b/abseil-cpp/absl/base/CMakeLists.txt index ed55093a..9ff5aa24 100644 --- a/abseil-cpp/absl/base/CMakeLists.txt +++ b/abseil-cpp/absl/base/CMakeLists.txt @@ -16,7 +16,6 @@ find_library(LIBRT rt) -# Internal-only target, do not depend on directly. absl_cc_library( NAME atomic_hook @@ -29,7 +28,6 @@ absl_cc_library( ${ABSL_DEFAULT_COPTS} ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME errno_saver @@ -54,7 +52,6 @@ absl_cc_library( ${ABSL_DEFAULT_COPTS} ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME raw_logging_internal @@ -66,13 +63,11 @@ absl_cc_library( absl::atomic_hook absl::config absl::core_headers - absl::errno_saver absl::log_severity COPTS ${ABSL_DEFAULT_COPTS} ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME spinlock_wait @@ -136,7 +131,6 @@ absl_cc_library( PUBLIC ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME malloc_internal @@ -157,7 +151,6 @@ absl_cc_library( Threads::Threads ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME base_internal @@ -214,7 +207,6 @@ absl_cc_library( PUBLIC ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME throw_delegate @@ -229,7 +221,6 @@ absl_cc_library( absl::raw_logging_internal ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME exception_testing @@ -239,11 +230,10 @@ absl_cc_library( ${ABSL_DEFAULT_COPTS} DEPS absl::config - GTest::gtest + gtest TESTONLY ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME pretty_function @@ -253,7 +243,6 @@ absl_cc_library( ${ABSL_DEFAULT_COPTS} ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME exception_safety_testing @@ -270,7 +259,7 @@ absl_cc_library( absl::meta absl::strings absl::utility - GTest::gtest + gtest TESTONLY ) @@ -284,10 +273,9 @@ absl_cc_test( DEPS absl::exception_safety_testing absl::memory - GTest::gtest_main + gtest_main ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME atomic_hook_test_helper @@ -312,8 +300,8 @@ absl_cc_test( absl::atomic_hook_test_helper absl::atomic_hook absl::core_headers - GTest::gmock - GTest::gtest_main + gmock + gtest_main ) absl_cc_test( @@ -326,7 +314,7 @@ absl_cc_test( DEPS absl::base absl::core_headers - GTest::gtest_main + gtest_main ) absl_cc_test( @@ -339,8 +327,8 @@ absl_cc_test( DEPS absl::errno_saver absl::strerror - GTest::gmock - GTest::gtest_main + gmock + gtest_main ) absl_cc_test( @@ -354,7 +342,7 @@ absl_cc_test( absl::base absl::config absl::throw_delegate - GTest::gtest_main + gtest_main ) absl_cc_test( @@ -369,7 +357,7 @@ absl_cc_test( ${ABSL_TEST_COPTS} DEPS absl::base_internal - GTest::gtest_main + gtest_main ) absl_cc_test( @@ -383,11 +371,10 @@ absl_cc_test( absl::base_internal absl::memory absl::strings - GTest::gmock - GTest::gtest_main + gmock + gtest_main ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME spinlock_test_common @@ -401,7 +388,7 @@ absl_cc_library( absl::base_internal absl::core_headers absl::synchronization - GTest::gtest + gtest TESTONLY ) @@ -419,10 +406,9 @@ absl_cc_test( absl::config absl::core_headers absl::synchronization - GTest::gtest_main + gtest_main ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME endian @@ -432,7 +418,6 @@ absl_cc_library( COPTS ${ABSL_DEFAULT_COPTS} DEPS - absl::base absl::config absl::core_headers PUBLIC @@ -449,7 +434,7 @@ absl_cc_test( absl::base absl::config absl::endian - GTest::gtest_main + gtest_main ) absl_cc_test( @@ -462,7 +447,7 @@ absl_cc_test( DEPS absl::config absl::synchronization - GTest::gtest_main + gtest_main ) absl_cc_test( @@ -476,7 +461,7 @@ absl_cc_test( absl::base absl::core_headers absl::synchronization - GTest::gtest_main + gtest_main ) absl_cc_test( @@ -489,7 +474,7 @@ absl_cc_test( DEPS absl::raw_logging_internal absl::strings - GTest::gtest_main + gtest_main ) absl_cc_test( @@ -502,7 +487,7 @@ absl_cc_test( DEPS absl::base absl::synchronization - GTest::gtest_main + gtest_main ) absl_cc_test( @@ -530,10 +515,87 @@ absl_cc_test( absl::core_headers absl::synchronization Threads::Threads - GTest::gtest_main + gtest_main +) + +absl_cc_library( + NAME + bits + HDRS + "internal/bits.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::core_headers +) + +absl_cc_test( + NAME + bits_test + SRCS + "internal/bits_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::bits + gtest_main +) + +absl_cc_library( + NAME + exponential_biased + SRCS + "internal/exponential_biased.cc" + HDRS + "internal/exponential_biased.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::core_headers +) + +absl_cc_test( + NAME + exponential_biased_test + SRCS + "internal/exponential_biased_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::exponential_biased + absl::strings + gmock_main +) + +absl_cc_library( + NAME + periodic_sampler + SRCS + "internal/periodic_sampler.cc" + HDRS + "internal/periodic_sampler.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::core_headers + absl::exponential_biased +) + +absl_cc_test( + NAME + periodic_sampler_test + SRCS + "internal/periodic_sampler_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::core_headers + absl::periodic_sampler + gmock_main ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME scoped_set_env @@ -557,7 +619,7 @@ absl_cc_test( ${ABSL_TEST_COPTS} DEPS absl::scoped_set_env - GTest::gtest_main + gtest_main ) absl_cc_test( @@ -581,11 +643,10 @@ absl_cc_test( absl::flags_marshalling absl::log_severity absl::strings - GTest::gmock - GTest::gtest_main + gmock + gtest_main ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME strerror @@ -613,11 +674,10 @@ absl_cc_test( DEPS absl::strerror absl::strings - GTest::gmock - GTest::gtest_main + gmock + gtest_main ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME fast_type_id @@ -640,33 +700,7 @@ absl_cc_test( ${ABSL_TEST_COPTS} DEPS absl::fast_type_id - GTest::gtest_main -) - -# Internal-only target, do not depend on directly. -absl_cc_library( - NAME - prefetch - HDRS - "internal/prefetch.h" - COPTS - ${ABSL_DEFAULT_COPTS} - LINKOPTS - ${ABSL_DEFAULT_LINKOPTS} - DEPS - absl::config -) - -absl_cc_test( - NAME - prefetch_test - SRCS - "internal/prefetch_test.cc" - COPTS - ${ABSL_TEST_COPTS} - DEPS - absl::prefetch - GTest::gtest_main + gtest_main ) absl_cc_test( @@ -679,5 +713,5 @@ absl_cc_test( DEPS absl::core_headers absl::optional - GTest::gtest_main + gtest_main ) diff --git a/abseil-cpp/absl/base/attributes.h b/abseil-cpp/absl/base/attributes.h index e4e7a3d8..046fbea3 100644 --- a/abseil-cpp/absl/base/attributes.h +++ b/abseil-cpp/absl/base/attributes.h @@ -18,6 +18,8 @@ // These macros are used within Abseil and allow the compiler to optimize, where // applicable, certain function calls. // +// This file is used for both C and C++! +// // Most macros here are exposing GCC or Clang features, and are stubbed out for // other compilers. // @@ -119,7 +121,7 @@ #if ABSL_HAVE_ATTRIBUTE(disable_tail_calls) #define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1 #define ABSL_ATTRIBUTE_NO_TAIL_CALL __attribute__((disable_tail_calls)) -#elif defined(__GNUC__) && !defined(__clang__) && !defined(__e2k__) +#elif defined(__GNUC__) && !defined(__clang__) #define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1 #define ABSL_ATTRIBUTE_NO_TAIL_CALL \ __attribute__((optimize("no-optimize-sibling-calls"))) @@ -131,15 +133,14 @@ // ABSL_ATTRIBUTE_WEAK // // Tags a function as weak for the purposes of compilation and linking. -// Weak attributes did not work properly in LLVM's Windows backend before -// 9.0.0, so disable them there. See https://bugs.llvm.org/show_bug.cgi?id=37598 +// Weak attributes currently do not work properly in LLVM's Windows backend, +// so disable them there. See https://bugs.llvm.org/show_bug.cgi?id=37598 // for further information. // The MinGW compiler doesn't complain about the weak attribute until the link // step, presumably because Windows doesn't use ELF binaries. -#if (ABSL_HAVE_ATTRIBUTE(weak) || \ - (defined(__GNUC__) && !defined(__clang__))) && \ - (!defined(_WIN32) || (defined(__clang__) && __clang_major__ >= 9)) && \ - !defined(__MINGW32__) +#if (ABSL_HAVE_ATTRIBUTE(weak) || \ + (defined(__GNUC__) && !defined(__clang__))) && \ + !(defined(__llvm__) && defined(_WIN32)) && !defined(__MINGW32__) #undef ABSL_ATTRIBUTE_WEAK #define ABSL_ATTRIBUTE_WEAK __attribute__((weak)) #define ABSL_HAVE_ATTRIBUTE_WEAK 1 @@ -213,9 +214,6 @@ // https://gcc.gnu.org/gcc-4.8/changes.html #if ABSL_HAVE_ATTRIBUTE(no_sanitize_address) #define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address)) -#elif defined(_MSC_VER) && _MSC_VER >= 1928 -// https://docs.microsoft.com/en-us/cpp/cpp/no-sanitize-address -#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS __declspec(no_sanitize_address) #else #define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS #endif @@ -285,7 +283,10 @@ // ABSL_ATTRIBUTE_RETURNS_NONNULL // // Tells the compiler that a particular function never returns a null pointer. -#if ABSL_HAVE_ATTRIBUTE(returns_nonnull) +#if ABSL_HAVE_ATTRIBUTE(returns_nonnull) || \ + (defined(__GNUC__) && \ + (__GNUC__ > 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9)) && \ + !defined(__clang__)) #define ABSL_ATTRIBUTE_RETURNS_NONNULL __attribute__((returns_nonnull)) #else #define ABSL_ATTRIBUTE_RETURNS_NONNULL @@ -315,22 +316,15 @@ __attribute__((section(#name))) __attribute__((noinline)) #endif + // ABSL_ATTRIBUTE_SECTION_VARIABLE // // Tells the compiler/linker to put a given variable into a section and define // `__start_ ## name` and `__stop_ ## name` symbols to bracket the section. // This functionality is supported by GNU linker. #ifndef ABSL_ATTRIBUTE_SECTION_VARIABLE -#ifdef _AIX -// __attribute__((section(#name))) on AIX is achived by using the `.csect` psudo -// op which includes an additional integer as part of its syntax indcating -// alignment. If data fall under different alignments then you might get a -// compilation error indicating a `Section type conflict`. -#define ABSL_ATTRIBUTE_SECTION_VARIABLE(name) -#else #define ABSL_ATTRIBUTE_SECTION_VARIABLE(name) __attribute__((section(#name))) #endif -#endif // ABSL_DECLARE_ATTRIBUTE_SECTION_VARS // @@ -341,8 +335,8 @@ // a no-op on ELF but not on Mach-O. // #ifndef ABSL_DECLARE_ATTRIBUTE_SECTION_VARS -#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) \ - extern char __start_##name[] ABSL_ATTRIBUTE_WEAK; \ +#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) \ + extern char __start_##name[] ABSL_ATTRIBUTE_WEAK; \ extern char __stop_##name[] ABSL_ATTRIBUTE_WEAK #endif #ifndef ABSL_DEFINE_ATTRIBUTE_SECTION_VARS @@ -403,9 +397,6 @@ // // Tells the compiler to warn about unused results. // -// For code or headers that are assured to only build with C++17 and up, prefer -// just using the standard `[[nodiscard]]` directly over this macro. -// // When annotating a function, it must appear as the first part of the // declaration or definition. The compiler will warn if the return value from // such a function is unused: @@ -432,10 +423,9 @@ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66425 // // Note: past advice was to place the macro after the argument list. -// -// TODO(b/176172494): Use ABSL_HAVE_CPP_ATTRIBUTE(nodiscard) when all code is -// compliant with the stricter [[nodiscard]]. -#if defined(__clang__) && ABSL_HAVE_ATTRIBUTE(warn_unused_result) +#if ABSL_HAVE_ATTRIBUTE(nodiscard) +#define ABSL_MUST_USE_RESULT [[nodiscard]] +#elif defined(__clang__) && ABSL_HAVE_ATTRIBUTE(warn_unused_result) #define ABSL_MUST_USE_RESULT __attribute__((warn_unused_result)) #else #define ABSL_MUST_USE_RESULT @@ -505,7 +495,7 @@ #define ABSL_XRAY_NEVER_INSTRUMENT [[clang::xray_never_instrument]] #if ABSL_HAVE_CPP_ATTRIBUTE(clang::xray_log_args) #define ABSL_XRAY_LOG_ARGS(N) \ - [[clang::xray_always_instrument, clang::xray_log_args(N)]] + [[clang::xray_always_instrument, clang::xray_log_args(N)]] #else #define ABSL_XRAY_LOG_ARGS(N) [[clang::xray_always_instrument]] #endif @@ -536,13 +526,6 @@ // ABSL_ATTRIBUTE_UNUSED // // Prevents the compiler from complaining about variables that appear unused. -// -// For code or headers that are assured to only build with C++17 and up, prefer -// just using the standard '[[maybe_unused]]' directly over this macro. -// -// Due to differences in positioning requirements between the old, compiler -// specific __attribute__ syntax and the now standard [[maybe_unused]], this -// macro does not attempt to take advantage of '[[maybe_unused]]'. #if ABSL_HAVE_ATTRIBUTE(unused) || (defined(__GNUC__) && !defined(__clang__)) #undef ABSL_ATTRIBUTE_UNUSED #define ABSL_ATTRIBUTE_UNUSED __attribute__((__unused__)) @@ -563,19 +546,13 @@ // ABSL_ATTRIBUTE_PACKED // // Instructs the compiler not to use natural alignment for a tagged data -// structure, but instead to reduce its alignment to 1. -// -// Therefore, DO NOT APPLY THIS ATTRIBUTE TO STRUCTS CONTAINING ATOMICS. Doing -// so can cause atomic variables to be mis-aligned and silently violate -// atomicity on x86. -// -// This attribute can either be applied to members of a structure or to a -// structure in its entirety. Applying this attribute (judiciously) to a -// structure in its entirety to optimize the memory footprint of very -// commonly-used structs is fine. Do not apply this attribute to a structure in -// its entirety if the purpose is to control the offsets of the members in the -// structure. Instead, apply this attribute only to structure members that need -// it. +// structure, but instead to reduce its alignment to 1. This attribute can +// either be applied to members of a structure or to a structure in its +// entirety. Applying this attribute (judiciously) to a structure in its +// entirety to optimize the memory footprint of very commonly-used structs is +// fine. Do not apply this attribute to a structure in its entirety if the +// purpose is to control the offsets of the members in the structure. Instead, +// apply this attribute only to structure members that need it. // // When applying ABSL_ATTRIBUTE_PACKED only to specific structure members the // natural alignment of structure members not annotated is preserved. Aligned @@ -620,24 +597,30 @@ // case 42: // ... // -// Notes: When supported, GCC and Clang can issue a warning on switch labels -// with unannotated fallthrough using the warning `-Wimplicit-fallthrough`. See -// clang documentation on language extensions for details: +// Notes: when compiled with clang in C++11 mode, the ABSL_FALLTHROUGH_INTENDED +// macro is expanded to the [[clang::fallthrough]] attribute, which is analysed +// when performing switch labels fall-through diagnostic +// (`-Wimplicit-fallthrough`). See clang documentation on language extensions +// for details: // https://clang.llvm.org/docs/AttributeReference.html#fallthrough-clang-fallthrough // -// When used with unsupported compilers, the ABSL_FALLTHROUGH_INTENDED macro has -// no effect on diagnostics. In any case this macro has no effect on runtime +// When used with unsupported compilers, the ABSL_FALLTHROUGH_INTENDED macro +// has no effect on diagnostics. In any case this macro has no effect on runtime // behavior and performance of code. - #ifdef ABSL_FALLTHROUGH_INTENDED #error "ABSL_FALLTHROUGH_INTENDED should not be defined." -#elif ABSL_HAVE_CPP_ATTRIBUTE(fallthrough) -#define ABSL_FALLTHROUGH_INTENDED [[fallthrough]] -#elif ABSL_HAVE_CPP_ATTRIBUTE(clang::fallthrough) +#endif + +// TODO(zhangxy): Use c++17 standard [[fallthrough]] macro, when supported. +#if defined(__clang__) && defined(__has_warning) +#if __has_feature(cxx_attributes) && __has_warning("-Wimplicit-fallthrough") #define ABSL_FALLTHROUGH_INTENDED [[clang::fallthrough]] -#elif ABSL_HAVE_CPP_ATTRIBUTE(gnu::fallthrough) +#endif +#elif defined(__GNUC__) && __GNUC__ >= 7 #define ABSL_FALLTHROUGH_INTENDED [[gnu::fallthrough]] -#else +#endif + +#ifndef ABSL_FALLTHROUGH_INTENDED #define ABSL_FALLTHROUGH_INTENDED \ do { \ } while (0) @@ -649,9 +632,6 @@ // declarations. The macro argument is used as a custom diagnostic message (e.g. // suggestion of a better alternative). // -// For code or headers that are assured to only build with C++14 and up, prefer -// just using the standard `[[deprecated("message")]]` directly over this macro. -// // Examples: // // class ABSL_DEPRECATED("Use Bar instead") Foo {...}; @@ -662,17 +642,14 @@ // ABSL_DEPRECATED("Use DoThat() instead") // void DoThis(); // -// enum FooEnum { -// kBar ABSL_DEPRECATED("Use kBaz instead"), -// }; -// // Every usage of a deprecated entity will trigger a warning when compiled with -// GCC/Clang's `-Wdeprecated-declarations` option. Google's production toolchain -// turns this warning off by default, instead relying on clang-tidy to report -// new uses of deprecated code. -#if ABSL_HAVE_ATTRIBUTE(deprecated) +// clang's `-Wdeprecated-declarations` option. This option is turned off by +// default, but the warnings will be reported by clang-tidy. +#if defined(__clang__) && __cplusplus >= 201103L #define ABSL_DEPRECATED(message) __attribute__((deprecated(message))) -#else +#endif + +#ifndef ABSL_DEPRECATED #define ABSL_DEPRECATED(message) #endif @@ -682,18 +659,9 @@ // not compile (on supported platforms) unless the variable has a constant // initializer. This is useful for variables with static and thread storage // duration, because it guarantees that they will not suffer from the so-called -// "static init order fiasco". -// -// This attribute must be placed on the initializing declaration of the -// variable. Some compilers will give a -Wmissing-constinit warning when this -// attribute is placed on some other declaration but missing from the -// initializing declaration. -// -// In some cases (notably with thread_local variables), `ABSL_CONST_INIT` can -// also be used in a non-initializing declaration to tell the compiler that a -// variable is already initialized, reducing overhead that would otherwise be -// incurred by a hidden guard variable. Thus annotating all declarations with -// this attribute is recommended to potentially enhance optimization. +// "static init order fiasco". Prefer to put this attribute on the most visible +// declaration of the variable, if there's more than one, because code that +// accesses the variable can then use the attribute for optimization. // // Example: // @@ -702,61 +670,13 @@ // ABSL_CONST_INIT static MyType my_var; // }; // -// ABSL_CONST_INIT MyType MyClass::my_var = MakeMyType(...); -// -// For code or headers that are assured to only build with C++20 and up, prefer -// just using the standard `constinit` keyword directly over this macro. +// MyType MyClass::my_var = MakeMyType(...); // // Note that this attribute is redundant if the variable is declared constexpr. -#if defined(__cpp_constinit) && __cpp_constinit >= 201907L -#define ABSL_CONST_INIT constinit -#elif ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization) +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization) #define ABSL_CONST_INIT [[clang::require_constant_initialization]] #else #define ABSL_CONST_INIT -#endif - -// ABSL_ATTRIBUTE_PURE_FUNCTION -// -// ABSL_ATTRIBUTE_PURE_FUNCTION is used to annotate declarations of "pure" -// functions. A function is pure if its return value is only a function of its -// arguments. The pure attribute prohibits a function from modifying the state -// of the program that is observable by means other than inspecting the -// function's return value. Declaring such functions with the pure attribute -// allows the compiler to avoid emitting some calls in repeated invocations of -// the function with the same argument values. -// -// Example: -// -// ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Milliseconds(Duration d); -#if ABSL_HAVE_CPP_ATTRIBUTE(gnu::pure) -#define ABSL_ATTRIBUTE_PURE_FUNCTION [[gnu::pure]] -#elif ABSL_HAVE_ATTRIBUTE(pure) -#define ABSL_ATTRIBUTE_PURE_FUNCTION __attribute__((pure)) -#else -#define ABSL_ATTRIBUTE_PURE_FUNCTION -#endif - -// ABSL_ATTRIBUTE_LIFETIME_BOUND indicates that a resource owned by a function -// parameter or implicit object parameter is retained by the return value of the -// annotated function (or, for a parameter of a constructor, in the value of the -// constructed object). This attribute causes warnings to be produced if a -// temporary object does not live long enough. -// -// When applied to a reference parameter, the referenced object is assumed to be -// retained by the return value of the function. When applied to a non-reference -// parameter (for example, a pointer or a class type), all temporaries -// referenced by the parameter are assumed to be retained by the return value of -// the function. -// -// See also the upstream documentation: -// https://clang.llvm.org/docs/AttributeReference.html#lifetimebound -#if ABSL_HAVE_CPP_ATTRIBUTE(clang::lifetimebound) -#define ABSL_ATTRIBUTE_LIFETIME_BOUND [[clang::lifetimebound]] -#elif ABSL_HAVE_ATTRIBUTE(lifetimebound) -#define ABSL_ATTRIBUTE_LIFETIME_BOUND __attribute__((lifetimebound)) -#else -#define ABSL_ATTRIBUTE_LIFETIME_BOUND -#endif +#endif // ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization) #endif // ABSL_BASE_ATTRIBUTES_H_ diff --git a/abseil-cpp/absl/base/call_once.h b/abseil-cpp/absl/base/call_once.h index 96109f53..5b468af8 100644 --- a/abseil-cpp/absl/base/call_once.h +++ b/abseil-cpp/absl/base/call_once.h @@ -177,8 +177,15 @@ void CallOnceImpl(std::atomic* control, scheduling_mode) == kOnceInit) { base_internal::invoke(std::forward(fn), std::forward(args)...); - old_control = - control->exchange(base_internal::kOnceDone, std::memory_order_release); + // The call to SpinLockWake below is an optimization, because the waiter + // in SpinLockWait is waiting with a short timeout. The atomic load/store + // sequence is slightly faster than an atomic exchange: + // old_control = control->exchange(base_internal::kOnceDone, + // std::memory_order_release); + // We opt for a slightly faster case when there are no waiters, in spite + // of longer tail latency when there are waiters. + old_control = control->load(std::memory_order_relaxed); + control->store(base_internal::kOnceDone, std::memory_order_release); if (old_control == base_internal::kOnceWaiter) { base_internal::SpinLockWake(control, true); } diff --git a/abseil-cpp/absl/base/casts.h b/abseil-cpp/absl/base/casts.h index b99adb06..83c69126 100644 --- a/abseil-cpp/absl/base/casts.h +++ b/abseil-cpp/absl/base/casts.h @@ -29,10 +29,6 @@ #include #include -#if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L -#include // For std::bit_cast. -#endif // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L - #include "absl/base/internal/identity.h" #include "absl/base/macros.h" #include "absl/meta/type_traits.h" @@ -40,6 +36,19 @@ namespace absl { ABSL_NAMESPACE_BEGIN +namespace internal_casts { + +template +struct is_bitcastable + : std::integral_constant< + bool, + sizeof(Dest) == sizeof(Source) && + type_traits_internal::is_trivially_copyable::value && + type_traits_internal::is_trivially_copyable::value && + std::is_default_constructible::value> {}; + +} // namespace internal_casts + // implicit_cast() // // Performs an implicit conversion between types following the language @@ -96,83 +105,81 @@ constexpr To implicit_cast(typename absl::internal::identity_t to) { // bit_cast() // -// Creates a value of the new type `Dest` whose representation is the same as -// that of the argument, which is of (deduced) type `Source` (a "bitwise cast"; -// every bit in the value representation of the result is equal to the -// corresponding bit in the object representation of the source). Source and -// destination types must be of the same size, and both types must be trivially -// copyable. +// Performs a bitwise cast on a type without changing the underlying bit +// representation of that type's value. The two types must be of the same size +// and both types must be trivially copyable. As with most casts, use with +// caution. A `bit_cast()` might be needed when you need to temporarily treat a +// type as some other type, such as in the following cases: // -// As with most casts, use with caution. A `bit_cast()` might be needed when you -// need to treat a value as the value of some other type, for example, to access -// the individual bits of an object which are not normally accessible through -// the object's type, such as for working with the binary representation of a -// floating point value: +// * Serialization (casting temporarily to `char *` for those purposes is +// always allowed by the C++ standard) +// * Managing the individual bits of a type within mathematical operations +// that are not normally accessible through that type +// * Casting non-pointer types to pointer types (casting the other way is +// allowed by `reinterpret_cast()` but round-trips cannot occur the other +// way). +// +// Example: // // float f = 3.14159265358979; -// int i = bit_cast(f); +// int i = bit_cast(f); // // i = 0x40490fdb // -// Reinterpreting and accessing a value directly as a different type (as shown -// below) usually results in undefined behavior. +// Casting non-pointer types to pointer types and then dereferencing them +// traditionally produces undefined behavior. // // Example: // // // WRONG -// float f = 3.14159265358979; -// int i = reinterpret_cast(f); // Wrong -// int j = *reinterpret_cast(&f); // Equally wrong -// int k = *bit_cast(&f); // Equally wrong +// float f = 3.14159265358979; // WRONG +// int i = * reinterpret_cast(&f); // WRONG // -// Reinterpret-casting results in undefined behavior according to the ISO C++ -// specification, section [basic.lval]. Roughly, this section says: if an object -// in memory has one type, and a program accesses it with a different type, the -// result is undefined behavior for most "different type". -// -// Using bit_cast on a pointer and then dereferencing it is no better than using -// reinterpret_cast. You should only use bit_cast on the value itself. +// The address-casting method produces undefined behavior according to the ISO +// C++ specification section [basic.lval]. Roughly, this section says: if an +// object in memory has one type, and a program accesses it with a different +// type, the result is undefined behavior for most values of "different type". // // Such casting results in type punning: holding an object in memory of one type // and reading its bits back using a different type. A `bit_cast()` avoids this -// issue by copying the object representation to a new value, which avoids -// introducing this undefined behavior (since the original value is never -// accessed in the wrong way). -// -// The requirements of `absl::bit_cast` are more strict than that of -// `std::bit_cast` unless compiler support is available. Specifically, without -// compiler support, this implementation also requires `Dest` to be -// default-constructible. In C++20, `absl::bit_cast` is replaced by -// `std::bit_cast`. -#if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L - -using std::bit_cast; - -#else // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L - -template ::value && - type_traits_internal::is_trivially_copyable::value -#if !ABSL_HAVE_BUILTIN(__builtin_bit_cast) - && std::is_default_constructible::value -#endif // !ABSL_HAVE_BUILTIN(__builtin_bit_cast) - , - int>::type = 0> -#if ABSL_HAVE_BUILTIN(__builtin_bit_cast) -inline constexpr Dest bit_cast(const Source& source) { - return __builtin_bit_cast(Dest, source); -} -#else // ABSL_HAVE_BUILTIN(__builtin_bit_cast) +// issue by implementing its casts using `memcpy()`, which avoids introducing +// this undefined behavior. +// +// NOTE: The requirements here are more strict than the bit_cast of standard +// proposal p0476 due to the need for workarounds and lack of intrinsics. +// Specifically, this implementation also requires `Dest` to be +// default-constructible. +template < + typename Dest, typename Source, + typename std::enable_if::value, + int>::type = 0> inline Dest bit_cast(const Source& source) { Dest dest; memcpy(static_cast(std::addressof(dest)), static_cast(std::addressof(source)), sizeof(dest)); return dest; } -#endif // ABSL_HAVE_BUILTIN(__builtin_bit_cast) -#endif // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L +// NOTE: This overload is only picked if the requirements of bit_cast are +// not met. It is therefore UB, but is provided temporarily as previous +// versions of this function template were unchecked. Do not use this in +// new code. +template < + typename Dest, typename Source, + typename std::enable_if< + !internal_casts::is_bitcastable::value, + int>::type = 0> +ABSL_DEPRECATED( + "absl::bit_cast type requirements were violated. Update the types " + "being used such that they are the same size and are both " + "TriviallyCopyable.") +inline Dest bit_cast(const Source& source) { + static_assert(sizeof(Dest) == sizeof(Source), + "Source and destination types should have equal sizes."); + + Dest dest; + memcpy(&dest, &source, sizeof(dest)); + return dest; +} ABSL_NAMESPACE_END } // namespace absl diff --git a/abseil-cpp/absl/base/config.h b/abseil-cpp/absl/base/config.h index 8533aead..c1d0494e 100644 --- a/abseil-cpp/absl/base/config.h +++ b/abseil-cpp/absl/base/config.h @@ -56,25 +56,6 @@ #include #endif // __cplusplus -// ABSL_INTERNAL_CPLUSPLUS_LANG -// -// MSVC does not set the value of __cplusplus correctly, but instead uses -// _MSVC_LANG as a stand-in. -// https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros -// -// However, there are reports that MSVC even sets _MSVC_LANG incorrectly at -// times, for example: -// https://github.com/microsoft/vscode-cpptools/issues/1770 -// https://reviews.llvm.org/D70996 -// -// For this reason, this symbol is considered INTERNAL and code outside of -// Abseil must not use it. -#if defined(_MSVC_LANG) -#define ABSL_INTERNAL_CPLUSPLUS_LANG _MSVC_LANG -#elif defined(__cplusplus) -#define ABSL_INTERNAL_CPLUSPLUS_LANG __cplusplus -#endif - #if defined(__APPLE__) // Included for TARGET_OS_IPHONE, __IPHONE_OS_VERSION_MIN_REQUIRED, // __IPHONE_8_0. @@ -85,35 +66,6 @@ #include "absl/base/options.h" #include "absl/base/policy_checks.h" -// Abseil long-term support (LTS) releases will define -// `ABSL_LTS_RELEASE_VERSION` to the integer representing the date string of the -// LTS release version, and will define `ABSL_LTS_RELEASE_PATCH_LEVEL` to the -// integer representing the patch-level for that release. -// -// For example, for LTS release version "20300401.2", this would give us -// ABSL_LTS_RELEASE_VERSION == 20300401 && ABSL_LTS_RELEASE_PATCH_LEVEL == 2 -// -// These symbols will not be defined in non-LTS code. -// -// Abseil recommends that clients live-at-head. Therefore, if you are using -// these symbols to assert a minimum version requirement, we recommend you do it -// as -// -// #if defined(ABSL_LTS_RELEASE_VERSION) && ABSL_LTS_RELEASE_VERSION < 20300401 -// #error Project foo requires Abseil LTS version >= 20300401 -// #endif -// -// The `defined(ABSL_LTS_RELEASE_VERSION)` part of the check excludes -// live-at-head clients from the minimum version assertion. -// -// See https://abseil.io/about/releases for more information on Abseil release -// management. -// -// LTS releases can be obtained from -// https://github.com/abseil/abseil-cpp/releases. -#define ABSL_LTS_RELEASE_VERSION 20220623 -#define ABSL_LTS_RELEASE_PATCH_LEVEL 0 - // Helper macro to convert a CPP variable to a string literal. #define ABSL_INTERNAL_DO_TOKEN_STR(x) #x #define ABSL_INTERNAL_TOKEN_STR(x) ABSL_INTERNAL_DO_TOKEN_STR(x) @@ -169,16 +121,10 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #if ABSL_OPTION_USE_INLINE_NAMESPACE == 0 #define ABSL_NAMESPACE_BEGIN #define ABSL_NAMESPACE_END -#define ABSL_INTERNAL_C_SYMBOL(x) x #elif ABSL_OPTION_USE_INLINE_NAMESPACE == 1 #define ABSL_NAMESPACE_BEGIN \ inline namespace ABSL_OPTION_INLINE_NAMESPACE_NAME { #define ABSL_NAMESPACE_END } -#define ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v) x##_##v -#define ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, v) \ - ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v) -#define ABSL_INTERNAL_C_SYMBOL(x) \ - ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, ABSL_OPTION_INLINE_NAMESPACE_NAME) #else #error options.h is misconfigured. #endif @@ -202,35 +148,24 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #define ABSL_HAVE_BUILTIN(x) 0 #endif -#ifdef __has_feature -#define ABSL_HAVE_FEATURE(f) __has_feature(f) -#else -#define ABSL_HAVE_FEATURE(f) 0 -#endif - -// Portable check for GCC minimum version: -// https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html -#if defined(__GNUC__) && defined(__GNUC_MINOR__) -#define ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(x, y) \ - (__GNUC__ > (x) || __GNUC__ == (x) && __GNUC_MINOR__ >= (y)) +#if defined(__is_identifier) +#define ABSL_INTERNAL_HAS_KEYWORD(x) !(__is_identifier(x)) #else -#define ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(x, y) 0 +#define ABSL_INTERNAL_HAS_KEYWORD(x) 0 #endif -#if defined(__clang__) && defined(__clang_major__) && defined(__clang_minor__) -#define ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(x, y) \ - (__clang_major__ > (x) || __clang_major__ == (x) && __clang_minor__ >= (y)) +#ifdef __has_feature +#define ABSL_HAVE_FEATURE(f) __has_feature(f) #else -#define ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(x, y) 0 +#define ABSL_HAVE_FEATURE(f) 0 #endif // ABSL_HAVE_TLS is defined to 1 when __thread should be supported. -// We assume __thread is supported on Linux or Asylo when compiled with Clang or -// compiled against libstdc++ with _GLIBCXX_HAVE_TLS defined. +// We assume __thread is supported on Linux when compiled with Clang or compiled +// against libstdc++ with _GLIBCXX_HAVE_TLS defined. #ifdef ABSL_HAVE_TLS #error ABSL_HAVE_TLS cannot be directly set -#elif (defined(__linux__) || defined(__ASYLO__)) && \ - (defined(__clang__) || defined(_GLIBCXX_HAVE_TLS)) +#elif defined(__linux__) && (defined(__clang__) || defined(_GLIBCXX_HAVE_TLS)) #define ABSL_HAVE_TLS 1 #endif @@ -242,9 +177,10 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // gcc >= 4.8.1 using libstdc++, and Visual Studio. #ifdef ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE #error ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE cannot be directly set -#elif defined(_LIBCPP_VERSION) || defined(_MSC_VER) || \ - (!defined(__clang__) && defined(__GLIBCXX__) && \ - ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(4, 8)) +#elif defined(_LIBCPP_VERSION) || \ + (!defined(__clang__) && defined(__GNUC__) && defined(__GLIBCXX__) && \ + (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))) || \ + defined(_MSC_VER) #define ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE 1 #endif @@ -257,22 +193,32 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // // Checks whether `std::is_trivially_copy_assignable` is supported. -// Notes: Clang with libc++ supports these features, as does gcc >= 7.4 with -// libstdc++, or gcc >= 8.2 with libc++, and Visual Studio (but not NVCC). +// Notes: Clang with libc++ supports these features, as does gcc >= 5.1 with +// either libc++ or libstdc++, and Visual Studio (but not NVCC). #if defined(ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE) #error ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE cannot be directly set #elif defined(ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE) #error ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE cannot directly set -#elif (defined(__clang__) && defined(_LIBCPP_VERSION)) || \ - (!defined(__clang__) && \ - ((ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(7, 4) && defined(__GLIBCXX__)) || \ - (ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(8, 2) && \ - defined(_LIBCPP_VERSION)))) || \ +#elif (defined(__clang__) && defined(_LIBCPP_VERSION)) || \ + (!defined(__clang__) && defined(__GNUC__) && \ + (__GNUC__ > 7 || (__GNUC__ == 7 && __GNUC_MINOR__ >= 4)) && \ + (defined(_LIBCPP_VERSION) || defined(__GLIBCXX__))) || \ (defined(_MSC_VER) && !defined(__NVCC__)) #define ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE 1 #define ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE 1 #endif +// ABSL_HAVE_SOURCE_LOCATION_CURRENT +// +// Indicates whether `absl::SourceLocation::current()` will return useful +// information in some contexts. +#ifndef ABSL_HAVE_SOURCE_LOCATION_CURRENT +#if ABSL_INTERNAL_HAS_KEYWORD(__builtin_LINE) && \ + ABSL_INTERNAL_HAS_KEYWORD(__builtin_FILE) +#define ABSL_HAVE_SOURCE_LOCATION_CURRENT 1 +#endif +#endif + // ABSL_HAVE_THREAD_LOCAL // // Checks whether C++11's `thread_local` storage duration specifier is @@ -365,21 +311,25 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // For further details, consult the compiler's documentation. #ifdef ABSL_HAVE_EXCEPTIONS #error ABSL_HAVE_EXCEPTIONS cannot be directly set. -#elif ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(3, 6) + +#elif defined(__clang__) + +#if __clang_major__ > 3 || (__clang_major__ == 3 && __clang_minor__ >= 6) // Clang >= 3.6 #if ABSL_HAVE_FEATURE(cxx_exceptions) #define ABSL_HAVE_EXCEPTIONS 1 #endif // ABSL_HAVE_FEATURE(cxx_exceptions) -#elif defined(__clang__) +#else // Clang < 3.6 // http://releases.llvm.org/3.6.0/tools/clang/docs/ReleaseNotes.html#the-exceptions-macro #if defined(__EXCEPTIONS) && ABSL_HAVE_FEATURE(cxx_exceptions) #define ABSL_HAVE_EXCEPTIONS 1 #endif // defined(__EXCEPTIONS) && ABSL_HAVE_FEATURE(cxx_exceptions) +#endif // __clang_major__ > 3 || (__clang_major__ == 3 && __clang_minor__ >= 6) + // Handle remaining special cases and default to exceptions being supported. -#elif !(defined(__GNUC__) && (__GNUC__ < 5) && !defined(__EXCEPTIONS)) && \ - !(ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(5, 0) && \ - !defined(__cpp_exceptions)) && \ +#elif !(defined(__GNUC__) && (__GNUC__ < 5) && !defined(__EXCEPTIONS)) && \ + !(defined(__GNUC__) && (__GNUC__ >= 5) && !defined(__cpp_exceptions)) && \ !(defined(_MSC_VER) && !defined(_CPPUNWIND)) #define ABSL_HAVE_EXCEPTIONS 1 #endif @@ -411,12 +361,10 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // POSIX.1-2001. #ifdef ABSL_HAVE_MMAP #error ABSL_HAVE_MMAP cannot be directly set -#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ - defined(_AIX) || defined(__ros__) || defined(__native_client__) || \ - defined(__asmjs__) || defined(__wasm__) || defined(__Fuchsia__) || \ - defined(__sun) || defined(__ASYLO__) || defined(__myriad2__) || \ - defined(__HAIKU__) || defined(__OpenBSD__) || defined(__NetBSD__) || \ - defined(__QNX__) +#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ + defined(__ros__) || defined(__native_client__) || defined(__asmjs__) || \ + defined(__wasm__) || defined(__Fuchsia__) || defined(__sun) || \ + defined(__ASYLO__) #define ABSL_HAVE_MMAP 1 #endif @@ -427,20 +375,10 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM #error ABSL_HAVE_PTHREAD_GETSCHEDPARAM cannot be directly set #elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ - defined(_AIX) || defined(__ros__) || defined(__OpenBSD__) || \ - defined(__NetBSD__) + defined(__ros__) #define ABSL_HAVE_PTHREAD_GETSCHEDPARAM 1 #endif -// ABSL_HAVE_SCHED_GETCPU -// -// Checks whether sched_getcpu is available. -#ifdef ABSL_HAVE_SCHED_GETCPU -#error ABSL_HAVE_SCHED_GETCPU cannot be directly set -#elif defined(__linux__) -#define ABSL_HAVE_SCHED_GETCPU 1 -#endif - // ABSL_HAVE_SCHED_YIELD // // Checks whether the platform implements sched_yield(2) as defined in @@ -523,41 +461,22 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #error "absl endian detection needs to be set up for your compiler" #endif -// macOS < 10.13 and iOS < 11 don't let you use , , or -// even though the headers exist and are publicly noted to work, because the -// libc++ shared library shipped on the system doesn't have the requisite -// exported symbols. See https://github.com/abseil/abseil-cpp/issues/207 and +// macOS 10.13 and iOS 10.11 don't let you use , , or +// even though the headers exist and are publicly noted to work. See +// https://github.com/abseil/abseil-cpp/issues/207 and // https://developer.apple.com/documentation/xcode_release_notes/xcode_10_release_notes -// // libc++ spells out the availability requirements in the file // llvm-project/libcxx/include/__config via the #define // _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS. -// -// Unfortunately, Apple initially mis-stated the requirements as macOS < 10.14 -// and iOS < 12 in the libc++ headers. This was corrected by -// https://github.com/llvm/llvm-project/commit/7fb40e1569dd66292b647f4501b85517e9247953 -// which subsequently made it into the XCode 12.5 release. We need to match the -// old (incorrect) conditions when built with old XCode, but can use the -// corrected earlier versions with new XCode. -#if defined(__APPLE__) && defined(_LIBCPP_VERSION) && \ - ((_LIBCPP_VERSION >= 11000 && /* XCode 12.5 or later: */ \ - ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \ - __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101300) || \ - (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \ - __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 110000) || \ - (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \ - __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 40000) || \ - (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \ - __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 110000))) || \ - (_LIBCPP_VERSION < 11000 && /* Pre-XCode 12.5: */ \ - ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \ - __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101400) || \ - (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \ - __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 120000) || \ - (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \ - __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 50000) || \ - (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \ - __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 120000)))) +#if defined(__APPLE__) && defined(_LIBCPP_VERSION) && \ + ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101400) || \ + (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 120000) || \ + (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 50000) || \ + (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 120000)) #define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 1 #else #define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 0 @@ -571,7 +490,7 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #endif #ifdef __has_include -#if __has_include() && defined(__cplusplus) && __cplusplus >= 201703L && \ +#if __has_include() && __cplusplus >= 201703L && \ !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE #define ABSL_HAVE_STD_ANY 1 #endif @@ -585,8 +504,8 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #endif #ifdef __has_include -#if __has_include() && defined(__cplusplus) && \ - __cplusplus >= 201703L && !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE +#if __has_include() && __cplusplus >= 201703L && \ + !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE #define ABSL_HAVE_STD_OPTIONAL 1 #endif #endif @@ -599,8 +518,8 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #endif #ifdef __has_include -#if __has_include() && defined(__cplusplus) && \ - __cplusplus >= 201703L && !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE +#if __has_include() && __cplusplus >= 201703L && \ + !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE #define ABSL_HAVE_STD_VARIANT 1 #endif #endif @@ -613,8 +532,7 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #endif #ifdef __has_include -#if __has_include() && defined(__cplusplus) && \ - __cplusplus >= 201703L +#if __has_include() && __cplusplus >= 201703L #define ABSL_HAVE_STD_STRING_VIEW 1 #endif #endif @@ -626,9 +544,8 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // not correctly set by MSVC, so we use `_MSVC_LANG` to check the language // version. // TODO(zhangxy): fix tests before enabling aliasing for `std::any`. -#if defined(_MSC_VER) && _MSC_VER >= 1910 && \ - ((defined(_MSVC_LANG) && _MSVC_LANG > 201402) || \ - (defined(__cplusplus) && __cplusplus > 201402)) +#if defined(_MSC_VER) && _MSC_VER >= 1910 && \ + ((defined(_MSVC_LANG) && _MSVC_LANG > 201402) || __cplusplus > 201402) // #define ABSL_HAVE_STD_ANY 1 #define ABSL_HAVE_STD_OPTIONAL 1 #define ABSL_HAVE_STD_VARIANT 1 @@ -727,6 +644,8 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #endif #endif +#undef ABSL_INTERNAL_HAS_KEYWORD + // ABSL_DLL // // When building Abseil as a DLL, this macro expands to `__declspec(dllexport)` @@ -752,6 +671,12 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // a compiler instrumentation module and a run-time library. #ifdef ABSL_HAVE_MEMORY_SANITIZER #error "ABSL_HAVE_MEMORY_SANITIZER cannot be directly set." +#elif defined(MEMORY_SANITIZER) +// The MEMORY_SANITIZER macro is deprecated but we will continue to honor it +// for now. +#define ABSL_HAVE_MEMORY_SANITIZER 1 +#elif defined(__SANITIZE_MEMORY__) +#define ABSL_HAVE_MEMORY_SANITIZER 1 #elif !defined(__native_client__) && ABSL_HAVE_FEATURE(memory_sanitizer) #define ABSL_HAVE_MEMORY_SANITIZER 1 #endif @@ -761,6 +686,10 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // ThreadSanitizer (TSan) is a fast data race detector. #ifdef ABSL_HAVE_THREAD_SANITIZER #error "ABSL_HAVE_THREAD_SANITIZER cannot be directly set." +#elif defined(THREAD_SANITIZER) +// The THREAD_SANITIZER macro is deprecated but we will continue to honor it +// for now. +#define ABSL_HAVE_THREAD_SANITIZER 1 #elif defined(__SANITIZE_THREAD__) #define ABSL_HAVE_THREAD_SANITIZER 1 #elif ABSL_HAVE_FEATURE(thread_sanitizer) @@ -772,142 +701,14 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // AddressSanitizer (ASan) is a fast memory error detector. #ifdef ABSL_HAVE_ADDRESS_SANITIZER #error "ABSL_HAVE_ADDRESS_SANITIZER cannot be directly set." +#elif defined(ADDRESS_SANITIZER) +// The ADDRESS_SANITIZER macro is deprecated but we will continue to honor it +// for now. +#define ABSL_HAVE_ADDRESS_SANITIZER 1 #elif defined(__SANITIZE_ADDRESS__) #define ABSL_HAVE_ADDRESS_SANITIZER 1 #elif ABSL_HAVE_FEATURE(address_sanitizer) #define ABSL_HAVE_ADDRESS_SANITIZER 1 #endif -// ABSL_HAVE_HWADDRESS_SANITIZER -// -// Hardware-Assisted AddressSanitizer (or HWASAN) is even faster than asan -// memory error detector which can use CPU features like ARM TBI, Intel LAM or -// AMD UAI. -#ifdef ABSL_HAVE_HWADDRESS_SANITIZER -#error "ABSL_HAVE_HWADDRESS_SANITIZER cannot be directly set." -#elif defined(__SANITIZE_HWADDRESS__) -#define ABSL_HAVE_HWADDRESS_SANITIZER 1 -#elif ABSL_HAVE_FEATURE(hwaddress_sanitizer) -#define ABSL_HAVE_HWADDRESS_SANITIZER 1 -#endif - -// ABSL_HAVE_LEAK_SANITIZER -// -// LeakSanitizer (or lsan) is a detector of memory leaks. -// https://clang.llvm.org/docs/LeakSanitizer.html -// https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer -// -// The macro ABSL_HAVE_LEAK_SANITIZER can be used to detect at compile-time -// whether the LeakSanitizer is potentially available. However, just because the -// LeakSanitizer is available does not mean it is active. Use the -// always-available run-time interface in //absl/debugging/leak_check.h for -// interacting with LeakSanitizer. -#ifdef ABSL_HAVE_LEAK_SANITIZER -#error "ABSL_HAVE_LEAK_SANITIZER cannot be directly set." -#elif defined(LEAK_SANITIZER) -// GCC provides no method for detecting the presense of the standalone -// LeakSanitizer (-fsanitize=leak), so GCC users of -fsanitize=leak should also -// use -DLEAK_SANITIZER. -#define ABSL_HAVE_LEAK_SANITIZER 1 -// Clang standalone LeakSanitizer (-fsanitize=leak) -#elif ABSL_HAVE_FEATURE(leak_sanitizer) -#define ABSL_HAVE_LEAK_SANITIZER 1 -#elif defined(ABSL_HAVE_ADDRESS_SANITIZER) -// GCC or Clang using the LeakSanitizer integrated into AddressSanitizer. -#define ABSL_HAVE_LEAK_SANITIZER 1 -#endif - -// ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION -// -// Class template argument deduction is a language feature added in C++17. -#ifdef ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION -#error "ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION cannot be directly set." -#elif defined(__cpp_deduction_guides) -#define ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION 1 -#endif - -// ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL -// -// Prior to C++17, static constexpr variables defined in classes required a -// separate definition outside of the class body, for example: -// -// class Foo { -// static constexpr int kBar = 0; -// }; -// constexpr int Foo::kBar; -// -// In C++17, these variables defined in classes are considered inline variables, -// and the extra declaration is redundant. Since some compilers warn on the -// extra declarations, ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL can be used -// conditionally ignore them: -// -// #ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL -// constexpr int Foo::kBar; -// #endif -#if defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \ - ABSL_INTERNAL_CPLUSPLUS_LANG < 201703L -#define ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL 1 -#endif - -// `ABSL_INTERNAL_HAS_RTTI` determines whether abseil is being compiled with -// RTTI support. -#ifdef ABSL_INTERNAL_HAS_RTTI -#error ABSL_INTERNAL_HAS_RTTI cannot be directly set -#elif !defined(__GNUC__) || defined(__GXX_RTTI) -#define ABSL_INTERNAL_HAS_RTTI 1 -#endif // !defined(__GNUC__) || defined(__GXX_RTTI) - -// ABSL_INTERNAL_HAVE_SSE is used for compile-time detection of SSE support. -// See https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html for an overview of -// which architectures support the various x86 instruction sets. -#ifdef ABSL_INTERNAL_HAVE_SSE -#error ABSL_INTERNAL_HAVE_SSE cannot be directly set -#elif defined(__SSE__) -#define ABSL_INTERNAL_HAVE_SSE 1 -#elif defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1) -// MSVC only defines _M_IX86_FP for x86 32-bit code, and _M_IX86_FP >= 1 -// indicates that at least SSE was targeted with the /arch:SSE option. -// All x86-64 processors support SSE, so support can be assumed. -// https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros -#define ABSL_INTERNAL_HAVE_SSE 1 -#endif - -// ABSL_INTERNAL_HAVE_SSE2 is used for compile-time detection of SSE2 support. -// See https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html for an overview of -// which architectures support the various x86 instruction sets. -#ifdef ABSL_INTERNAL_HAVE_SSE2 -#error ABSL_INTERNAL_HAVE_SSE2 cannot be directly set -#elif defined(__SSE2__) -#define ABSL_INTERNAL_HAVE_SSE2 1 -#elif defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 2) -// MSVC only defines _M_IX86_FP for x86 32-bit code, and _M_IX86_FP >= 2 -// indicates that at least SSE2 was targeted with the /arch:SSE2 option. -// All x86-64 processors support SSE2, so support can be assumed. -// https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros -#define ABSL_INTERNAL_HAVE_SSE2 1 -#endif - -// ABSL_INTERNAL_HAVE_SSSE3 is used for compile-time detection of SSSE3 support. -// See https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html for an overview of -// which architectures support the various x86 instruction sets. -// -// MSVC does not have a mode that targets SSSE3 at compile-time. To use SSSE3 -// with MSVC requires either assuming that the code will only every run on CPUs -// that support SSSE3, otherwise __cpuid() can be used to detect support at -// runtime and fallback to a non-SSSE3 implementation when SSSE3 is unsupported -// by the CPU. -#ifdef ABSL_INTERNAL_HAVE_SSSE3 -#error ABSL_INTERNAL_HAVE_SSSE3 cannot be directly set -#elif defined(__SSSE3__) -#define ABSL_INTERNAL_HAVE_SSSE3 1 -#endif - -// ABSL_INTERNAL_HAVE_ARM_NEON is used for compile-time detection of NEON (ARM -// SIMD). -#ifdef ABSL_INTERNAL_HAVE_ARM_NEON -#error ABSL_INTERNAL_HAVE_ARM_NEON cannot be directly set -#elif defined(__ARM_NEON) -#define ABSL_INTERNAL_HAVE_ARM_NEON 1 -#endif - #endif // ABSL_BASE_CONFIG_H_ diff --git a/abseil-cpp/absl/base/dynamic_annotations.h b/abseil-cpp/absl/base/dynamic_annotations.h index 3ea7c156..545f8cbc 100644 --- a/abseil-cpp/absl/base/dynamic_annotations.h +++ b/abseil-cpp/absl/base/dynamic_annotations.h @@ -110,9 +110,6 @@ // Define race annotations. #if ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 1 -// Some of the symbols used in this section (e.g. AnnotateBenignRaceSized) are -// defined by the compiler-based santizer implementation, not by the Abseil -// library. Therefore they do not use ABSL_INTERNAL_C_SYMBOL. // ------------------------------------------------------------- // Annotations that suppress errors. It is usually better to express the @@ -289,22 +286,17 @@ ABSL_INTERNAL_END_EXTERN_C // Define IGNORE_READS_BEGIN/_END annotations. #if ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED == 1 -// Some of the symbols used in this section (e.g. AnnotateIgnoreReadsBegin) are -// defined by the compiler-based implementation, not by the Abseil -// library. Therefore they do not use ABSL_INTERNAL_C_SYMBOL. // Request the analysis tool to ignore all reads in the current thread until // ABSL_ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey // reads, while still checking other reads and all writes. // See also ABSL_ANNOTATE_UNPROTECTED_READ. -#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \ - ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin) \ - (__FILE__, __LINE__) +#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin)(__FILE__, __LINE__) // Stop ignoring reads. -#define ABSL_ANNOTATE_IGNORE_READS_END() \ - ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd) \ - (__FILE__, __LINE__) +#define ABSL_ANNOTATE_IGNORE_READS_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd)(__FILE__, __LINE__) // Function prototypes of annotations provided by the compiler-based sanitizer // implementation. @@ -324,22 +316,16 @@ ABSL_INTERNAL_END_EXTERN_C // TODO(delesley) -- The exclusive lock here ignores writes as well, but // allows IGNORE_READS_AND_WRITES to work properly. -#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \ - ABSL_INTERNAL_GLOBAL_SCOPED( \ - ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsBegin)) \ - () +#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsBegin)() -#define ABSL_ANNOTATE_IGNORE_READS_END() \ - ABSL_INTERNAL_GLOBAL_SCOPED( \ - ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsEnd)) \ - () +#define ABSL_ANNOTATE_IGNORE_READS_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsEnd)() -ABSL_INTERNAL_STATIC_INLINE void ABSL_INTERNAL_C_SYMBOL( - AbslInternalAnnotateIgnoreReadsBegin)() +ABSL_INTERNAL_STATIC_INLINE void AbslInternalAnnotateIgnoreReadsBegin() ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE {} -ABSL_INTERNAL_STATIC_INLINE void ABSL_INTERNAL_C_SYMBOL( - AbslInternalAnnotateIgnoreReadsEnd)() +ABSL_INTERNAL_STATIC_INLINE void AbslInternalAnnotateIgnoreReadsEnd() ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE {} #else @@ -433,6 +419,31 @@ ABSL_NAMESPACE_END #endif +#ifdef __cplusplus +#ifdef ABSL_HAVE_THREAD_SANITIZER +ABSL_INTERNAL_BEGIN_EXTERN_C +int RunningOnValgrind(); +double ValgrindSlowdown(); +ABSL_INTERNAL_END_EXTERN_C +#else +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { +ABSL_DEPRECATED( + "Don't use this interface. It is misleading and is being deleted.") +ABSL_ATTRIBUTE_ALWAYS_INLINE inline int RunningOnValgrind() { return 0; } +ABSL_DEPRECATED( + "Don't use this interface. It is misleading and is being deleted.") +ABSL_ATTRIBUTE_ALWAYS_INLINE inline double ValgrindSlowdown() { return 1.0; } +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +using absl::base_internal::RunningOnValgrind; +using absl::base_internal::ValgrindSlowdown; +#endif +#endif + // ------------------------------------------------------------------------- // Address sanitizer annotations @@ -446,7 +457,7 @@ ABSL_NAMESPACE_END __sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid) #define ABSL_ADDRESS_SANITIZER_REDZONE(name) \ struct { \ - alignas(8) char x[8]; \ + char x[8] __attribute__((aligned(8))); \ } name #else diff --git a/abseil-cpp/absl/base/exception_safety_testing_test.cc b/abseil-cpp/absl/base/exception_safety_testing_test.cc index a87fd6a9..a59be29e 100644 --- a/abseil-cpp/absl/base/exception_safety_testing_test.cc +++ b/abseil-cpp/absl/base/exception_safety_testing_test.cc @@ -701,10 +701,7 @@ struct BasicGuaranteeWithExtraContracts : public NonNegative { static constexpr int kExceptionSentinel = 9999; }; - -#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL constexpr int BasicGuaranteeWithExtraContracts::kExceptionSentinel; -#endif TEST(ExceptionCheckTest, BasicGuaranteeWithExtraContracts) { auto tester_with_val = diff --git a/abseil-cpp/absl/base/internal/bits.h b/abseil-cpp/absl/base/internal/bits.h new file mode 100644 index 00000000..81648e2c --- /dev/null +++ b/abseil-cpp/absl/base/internal/bits.h @@ -0,0 +1,219 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_BITS_H_ +#define ABSL_BASE_INTERNAL_BITS_H_ + +// This file contains bitwise ops which are implementation details of various +// absl libraries. + +#include + +#include "absl/base/config.h" + +// Clang on Windows has __builtin_clzll; otherwise we need to use the +// windows intrinsic functions. +#if defined(_MSC_VER) && !defined(__clang__) +#include +#if defined(_M_X64) +#pragma intrinsic(_BitScanReverse64) +#pragma intrinsic(_BitScanForward64) +#endif +#pragma intrinsic(_BitScanReverse) +#pragma intrinsic(_BitScanForward) +#endif + +#include "absl/base/attributes.h" + +#if defined(_MSC_VER) && !defined(__clang__) +// We can achieve something similar to attribute((always_inline)) with MSVC by +// using the __forceinline keyword, however this is not perfect. MSVC is +// much less aggressive about inlining, and even with the __forceinline keyword. +#define ABSL_BASE_INTERNAL_FORCEINLINE __forceinline +#else +// Use default attribute inline. +#define ABSL_BASE_INTERNAL_FORCEINLINE inline ABSL_ATTRIBUTE_ALWAYS_INLINE +#endif + + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros64Slow(uint64_t n) { + int zeroes = 60; + if (n >> 32) { + zeroes -= 32; + n >>= 32; + } + if (n >> 16) { + zeroes -= 16; + n >>= 16; + } + if (n >> 8) { + zeroes -= 8; + n >>= 8; + } + if (n >> 4) { + zeroes -= 4; + n >>= 4; + } + return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes; +} + +ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros64(uint64_t n) { +#if defined(_MSC_VER) && !defined(__clang__) && defined(_M_X64) + // MSVC does not have __buitin_clzll. Use _BitScanReverse64. + unsigned long result = 0; // NOLINT(runtime/int) + if (_BitScanReverse64(&result, n)) { + return 63 - result; + } + return 64; +#elif defined(_MSC_VER) && !defined(__clang__) + // MSVC does not have __buitin_clzll. Compose two calls to _BitScanReverse + unsigned long result = 0; // NOLINT(runtime/int) + if ((n >> 32) && + _BitScanReverse(&result, static_cast(n >> 32))) { + return 31 - result; + } + if (_BitScanReverse(&result, static_cast(n))) { + return 63 - result; + } + return 64; +#elif defined(__GNUC__) || defined(__clang__) + // Use __builtin_clzll, which uses the following instructions: + // x86: bsr + // ARM64: clz + // PPC: cntlzd + static_assert(sizeof(unsigned long long) == sizeof(n), // NOLINT(runtime/int) + "__builtin_clzll does not take 64-bit arg"); + + // Handle 0 as a special case because __builtin_clzll(0) is undefined. + if (n == 0) { + return 64; + } + return __builtin_clzll(n); +#else + return CountLeadingZeros64Slow(n); +#endif +} + +ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros32Slow(uint64_t n) { + int zeroes = 28; + if (n >> 16) { + zeroes -= 16; + n >>= 16; + } + if (n >> 8) { + zeroes -= 8; + n >>= 8; + } + if (n >> 4) { + zeroes -= 4; + n >>= 4; + } + return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes; +} + +ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros32(uint32_t n) { +#if defined(_MSC_VER) && !defined(__clang__) + unsigned long result = 0; // NOLINT(runtime/int) + if (_BitScanReverse(&result, n)) { + return 31 - result; + } + return 32; +#elif defined(__GNUC__) || defined(__clang__) + // Use __builtin_clz, which uses the following instructions: + // x86: bsr + // ARM64: clz + // PPC: cntlzd + static_assert(sizeof(int) == sizeof(n), + "__builtin_clz does not take 32-bit arg"); + + // Handle 0 as a special case because __builtin_clz(0) is undefined. + if (n == 0) { + return 32; + } + return __builtin_clz(n); +#else + return CountLeadingZeros32Slow(n); +#endif +} + +ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero64Slow(uint64_t n) { + int c = 63; + n &= ~n + 1; + if (n & 0x00000000FFFFFFFF) c -= 32; + if (n & 0x0000FFFF0000FFFF) c -= 16; + if (n & 0x00FF00FF00FF00FF) c -= 8; + if (n & 0x0F0F0F0F0F0F0F0F) c -= 4; + if (n & 0x3333333333333333) c -= 2; + if (n & 0x5555555555555555) c -= 1; + return c; +} + +ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero64(uint64_t n) { +#if defined(_MSC_VER) && !defined(__clang__) && defined(_M_X64) + unsigned long result = 0; // NOLINT(runtime/int) + _BitScanForward64(&result, n); + return result; +#elif defined(_MSC_VER) && !defined(__clang__) + unsigned long result = 0; // NOLINT(runtime/int) + if (static_cast(n) == 0) { + _BitScanForward(&result, static_cast(n >> 32)); + return result + 32; + } + _BitScanForward(&result, static_cast(n)); + return result; +#elif defined(__GNUC__) || defined(__clang__) + static_assert(sizeof(unsigned long long) == sizeof(n), // NOLINT(runtime/int) + "__builtin_ctzll does not take 64-bit arg"); + return __builtin_ctzll(n); +#else + return CountTrailingZerosNonZero64Slow(n); +#endif +} + +ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero32Slow(uint32_t n) { + int c = 31; + n &= ~n + 1; + if (n & 0x0000FFFF) c -= 16; + if (n & 0x00FF00FF) c -= 8; + if (n & 0x0F0F0F0F) c -= 4; + if (n & 0x33333333) c -= 2; + if (n & 0x55555555) c -= 1; + return c; +} + +ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero32(uint32_t n) { +#if defined(_MSC_VER) && !defined(__clang__) + unsigned long result = 0; // NOLINT(runtime/int) + _BitScanForward(&result, n); + return result; +#elif defined(__GNUC__) || defined(__clang__) + static_assert(sizeof(int) == sizeof(n), + "__builtin_ctz does not take 32-bit arg"); + return __builtin_ctz(n); +#else + return CountTrailingZerosNonZero32Slow(n); +#endif +} + +#undef ABSL_BASE_INTERNAL_FORCEINLINE + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_BITS_H_ diff --git a/abseil-cpp/absl/base/internal/bits_test.cc b/abseil-cpp/absl/base/internal/bits_test.cc new file mode 100644 index 00000000..7855fa62 --- /dev/null +++ b/abseil-cpp/absl/base/internal/bits_test.cc @@ -0,0 +1,97 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/bits.h" + +#include "gtest/gtest.h" + +namespace { + +int CLZ64(uint64_t n) { + int fast = absl::base_internal::CountLeadingZeros64(n); + int slow = absl::base_internal::CountLeadingZeros64Slow(n); + EXPECT_EQ(fast, slow) << n; + return fast; +} + +TEST(BitsTest, CountLeadingZeros64) { + EXPECT_EQ(64, CLZ64(uint64_t{})); + EXPECT_EQ(0, CLZ64(~uint64_t{})); + + for (int index = 0; index < 64; index++) { + uint64_t x = static_cast(1) << index; + const auto cnt = 63 - index; + ASSERT_EQ(cnt, CLZ64(x)) << index; + ASSERT_EQ(cnt, CLZ64(x + x - 1)) << index; + } +} + +int CLZ32(uint32_t n) { + int fast = absl::base_internal::CountLeadingZeros32(n); + int slow = absl::base_internal::CountLeadingZeros32Slow(n); + EXPECT_EQ(fast, slow) << n; + return fast; +} + +TEST(BitsTest, CountLeadingZeros32) { + EXPECT_EQ(32, CLZ32(uint32_t{})); + EXPECT_EQ(0, CLZ32(~uint32_t{})); + + for (int index = 0; index < 32; index++) { + uint32_t x = static_cast(1) << index; + const auto cnt = 31 - index; + ASSERT_EQ(cnt, CLZ32(x)) << index; + ASSERT_EQ(cnt, CLZ32(x + x - 1)) << index; + ASSERT_EQ(CLZ64(x), CLZ32(x) + 32); + } +} + +int CTZ64(uint64_t n) { + int fast = absl::base_internal::CountTrailingZerosNonZero64(n); + int slow = absl::base_internal::CountTrailingZerosNonZero64Slow(n); + EXPECT_EQ(fast, slow) << n; + return fast; +} + +TEST(BitsTest, CountTrailingZerosNonZero64) { + EXPECT_EQ(0, CTZ64(~uint64_t{})); + + for (int index = 0; index < 64; index++) { + uint64_t x = static_cast(1) << index; + const auto cnt = index; + ASSERT_EQ(cnt, CTZ64(x)) << index; + ASSERT_EQ(cnt, CTZ64(~(x - 1))) << index; + } +} + +int CTZ32(uint32_t n) { + int fast = absl::base_internal::CountTrailingZerosNonZero32(n); + int slow = absl::base_internal::CountTrailingZerosNonZero32Slow(n); + EXPECT_EQ(fast, slow) << n; + return fast; +} + +TEST(BitsTest, CountTrailingZerosNonZero32) { + EXPECT_EQ(0, CTZ32(~uint32_t{})); + + for (int index = 0; index < 32; index++) { + uint32_t x = static_cast(1) << index; + const auto cnt = index; + ASSERT_EQ(cnt, CTZ32(x)) << index; + ASSERT_EQ(cnt, CTZ32(~(x - 1))) << index; + } +} + + +} // namespace diff --git a/abseil-cpp/absl/base/internal/cycleclock.cc b/abseil-cpp/absl/base/internal/cycleclock.cc index 902e3f5e..0e65005b 100644 --- a/abseil-cpp/absl/base/internal/cycleclock.cc +++ b/abseil-cpp/absl/base/internal/cycleclock.cc @@ -25,8 +25,6 @@ #include #include // NOLINT(build/c++11) -#include "absl/base/attributes.h" -#include "absl/base/config.h" #include "absl/base/internal/unscaledcycleclock.h" namespace absl { @@ -35,20 +33,44 @@ namespace base_internal { #if ABSL_USE_UNSCALED_CYCLECLOCK -#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL -constexpr int32_t CycleClock::kShift; -constexpr double CycleClock::kFrequencyScale; +namespace { + +#ifdef NDEBUG +#ifdef ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY +// Not debug mode and the UnscaledCycleClock frequency is the CPU +// frequency. Scale the CycleClock to prevent overflow if someone +// tries to represent the time as cycles since the Unix epoch. +static constexpr int32_t kShift = 1; +#else +// Not debug mode and the UnscaledCycleClock isn't operating at the +// raw CPU frequency. There is no need to do any scaling, so don't +// needlessly sacrifice precision. +static constexpr int32_t kShift = 0; +#endif +#else +// In debug mode use a different shift to discourage depending on a +// particular shift value. +static constexpr int32_t kShift = 2; #endif -ABSL_CONST_INIT std::atomic - CycleClock::cycle_clock_source_{nullptr}; +static constexpr double kFrequencyScale = 1.0 / (1 << kShift); +static std::atomic cycle_clock_source; -void CycleClockSource::Register(CycleClockSourceFunc source) { - // Corresponds to the load(std::memory_order_acquire) in LoadCycleClockSource. - CycleClock::cycle_clock_source_.store(source, std::memory_order_release); +CycleClockSourceFunc LoadCycleClockSource() { + // Optimize for the common case (no callback) by first doing a relaxed load; + // this is significantly faster on non-x86 platforms. + if (cycle_clock_source.load(std::memory_order_relaxed) == nullptr) { + return nullptr; + } + // This corresponds to the store(std::memory_order_release) in + // CycleClockSource::Register, and makes sure that any updates made prior to + // registering the callback are visible to this thread before the callback is + // invoked. + return cycle_clock_source.load(std::memory_order_acquire); } -#ifdef _WIN32 +} // namespace + int64_t CycleClock::Now() { auto fn = LoadCycleClockSource(); if (fn == nullptr) { @@ -56,7 +78,15 @@ int64_t CycleClock::Now() { } return fn() >> kShift; } -#endif + +double CycleClock::Frequency() { + return kFrequencyScale * base_internal::UnscaledCycleClock::Frequency(); +} + +void CycleClockSource::Register(CycleClockSourceFunc source) { + // Corresponds to the load(std::memory_order_acquire) in LoadCycleClockSource. + cycle_clock_source.store(source, std::memory_order_release); +} #else diff --git a/abseil-cpp/absl/base/internal/cycleclock.h b/abseil-cpp/absl/base/internal/cycleclock.h index 9704e388..a18b5844 100644 --- a/abseil-cpp/absl/base/internal/cycleclock.h +++ b/abseil-cpp/absl/base/internal/cycleclock.h @@ -42,19 +42,14 @@ #ifndef ABSL_BASE_INTERNAL_CYCLECLOCK_H_ #define ABSL_BASE_INTERNAL_CYCLECLOCK_H_ -#include #include -#include "absl/base/attributes.h" #include "absl/base/config.h" -#include "absl/base/internal/unscaledcycleclock.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { -using CycleClockSourceFunc = int64_t (*)(); - // ----------------------------------------------------------------------------- // CycleClock // ----------------------------------------------------------------------------- @@ -73,38 +68,13 @@ class CycleClock { static double Frequency(); private: -#if ABSL_USE_UNSCALED_CYCLECLOCK - static CycleClockSourceFunc LoadCycleClockSource(); - -#ifdef NDEBUG -#ifdef ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY - // Not debug mode and the UnscaledCycleClock frequency is the CPU - // frequency. Scale the CycleClock to prevent overflow if someone - // tries to represent the time as cycles since the Unix epoch. - static constexpr int32_t kShift = 1; -#else - // Not debug mode and the UnscaledCycleClock isn't operating at the - // raw CPU frequency. There is no need to do any scaling, so don't - // needlessly sacrifice precision. - static constexpr int32_t kShift = 0; -#endif -#else // NDEBUG - // In debug mode use a different shift to discourage depending on a - // particular shift value. - static constexpr int32_t kShift = 2; -#endif // NDEBUG - - static constexpr double kFrequencyScale = 1.0 / (1 << kShift); - ABSL_CONST_INIT static std::atomic cycle_clock_source_; -#endif // ABSL_USE_UNSCALED_CYCLECLOC - CycleClock() = delete; // no instances CycleClock(const CycleClock&) = delete; CycleClock& operator=(const CycleClock&) = delete; - - friend class CycleClockSource; }; +using CycleClockSourceFunc = int64_t (*)(); + class CycleClockSource { private: // CycleClockSource::Register() @@ -117,41 +87,6 @@ class CycleClockSource { static void Register(CycleClockSourceFunc source); }; -#if ABSL_USE_UNSCALED_CYCLECLOCK - -inline CycleClockSourceFunc CycleClock::LoadCycleClockSource() { -#if !defined(__x86_64__) - // Optimize for the common case (no callback) by first doing a relaxed load; - // this is significantly faster on non-x86 platforms. - if (cycle_clock_source_.load(std::memory_order_relaxed) == nullptr) { - return nullptr; - } -#endif // !defined(__x86_64__) - - // This corresponds to the store(std::memory_order_release) in - // CycleClockSource::Register, and makes sure that any updates made prior to - // registering the callback are visible to this thread before the callback - // is invoked. - return cycle_clock_source_.load(std::memory_order_acquire); -} - -// Accessing globals in inlined code in Window DLLs is problematic. -#ifndef _WIN32 -inline int64_t CycleClock::Now() { - auto fn = LoadCycleClockSource(); - if (fn == nullptr) { - return base_internal::UnscaledCycleClock::Now() >> kShift; - } - return fn() >> kShift; -} -#endif - -inline double CycleClock::Frequency() { - return kFrequencyScale * base_internal::UnscaledCycleClock::Frequency(); -} - -#endif // ABSL_USE_UNSCALED_CYCLECLOCK - } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/abseil-cpp/absl/base/internal/direct_mmap.h b/abseil-cpp/absl/base/internal/direct_mmap.h index e492bb00..16accf09 100644 --- a/abseil-cpp/absl/base/internal/direct_mmap.h +++ b/abseil-cpp/absl/base/internal/direct_mmap.h @@ -20,7 +20,7 @@ #include "absl/base/config.h" -#ifdef ABSL_HAVE_MMAP +#if ABSL_HAVE_MMAP #include @@ -41,13 +41,13 @@ #ifdef __mips__ // Include definitions of the ABI currently in use. -#if defined(__BIONIC__) || !defined(__GLIBC__) +#ifdef __BIONIC__ // Android doesn't have sgidefs.h, but does have asm/sgidefs.h, which has the // definitions we need. #include #else #include -#endif // __BIONIC__ || !__GLIBC__ +#endif // __BIONIC__ #endif // __mips__ // SYS_mmap and SYS_munmap are not defined in Android. @@ -74,13 +74,10 @@ namespace base_internal { inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd, off64_t offset) noexcept { #if defined(__i386__) || defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || \ - defined(__m68k__) || defined(__sh__) || \ - (defined(__hppa__) && !defined(__LP64__)) || \ (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) || \ (defined(__PPC__) && !defined(__PPC64__)) || \ (defined(__riscv) && __riscv_xlen == 32) || \ - (defined(__s390__) && !defined(__s390x__)) || \ - (defined(__sparc__) && !defined(__arch64__)) + (defined(__s390__) && !defined(__s390x__)) // On these architectures, implement mmap with mmap2. static int pagesize = 0; if (pagesize == 0) { diff --git a/abseil-cpp/absl/base/internal/endian.h b/abseil-cpp/absl/base/internal/endian.h index 50747d75..9677530e 100644 --- a/abseil-cpp/absl/base/internal/endian.h +++ b/abseil-cpp/absl/base/internal/endian.h @@ -16,10 +16,16 @@ #ifndef ABSL_BASE_INTERNAL_ENDIAN_H_ #define ABSL_BASE_INTERNAL_ENDIAN_H_ -#include -#include +// The following guarantees declaration of the byte swap functions +#ifdef _MSC_VER +#include // NOLINT(build/include) +#elif defined(__FreeBSD__) +#include +#elif defined(__GLIBC__) +#include // IWYU pragma: export +#endif -#include "absl/base/casts.h" +#include #include "absl/base/config.h" #include "absl/base/internal/unaligned_access.h" #include "absl/base/port.h" @@ -27,11 +33,47 @@ namespace absl { ABSL_NAMESPACE_BEGIN +// Use compiler byte-swapping intrinsics if they are available. 32-bit +// and 64-bit versions are available in Clang and GCC as of GCC 4.3.0. +// The 16-bit version is available in Clang and GCC only as of GCC 4.8.0. +// For simplicity, we enable them all only for GCC 4.8.0 or later. +#if defined(__clang__) || \ + (defined(__GNUC__) && \ + ((__GNUC__ == 4 && __GNUC_MINOR__ >= 8) || __GNUC__ >= 5)) inline uint64_t gbswap_64(uint64_t host_int) { -#if ABSL_HAVE_BUILTIN(__builtin_bswap64) || defined(__GNUC__) return __builtin_bswap64(host_int); +} +inline uint32_t gbswap_32(uint32_t host_int) { + return __builtin_bswap32(host_int); +} +inline uint16_t gbswap_16(uint16_t host_int) { + return __builtin_bswap16(host_int); +} + #elif defined(_MSC_VER) +inline uint64_t gbswap_64(uint64_t host_int) { return _byteswap_uint64(host_int); +} +inline uint32_t gbswap_32(uint32_t host_int) { + return _byteswap_ulong(host_int); +} +inline uint16_t gbswap_16(uint16_t host_int) { + return _byteswap_ushort(host_int); +} + +#else +inline uint64_t gbswap_64(uint64_t host_int) { +#if defined(__GNUC__) && defined(__x86_64__) && !defined(__APPLE__) + // Adapted from /usr/include/byteswap.h. Not available on Mac. + if (__builtin_constant_p(host_int)) { + return __bswap_constant_64(host_int); + } else { + uint64_t result; + __asm__("bswap %0" : "=r"(result) : "0"(host_int)); + return result; + } +#elif defined(__GLIBC__) + return bswap_64(host_int); #else return (((host_int & uint64_t{0xFF}) << 56) | ((host_int & uint64_t{0xFF00}) << 40) | @@ -41,14 +83,12 @@ inline uint64_t gbswap_64(uint64_t host_int) { ((host_int & uint64_t{0xFF0000000000}) >> 24) | ((host_int & uint64_t{0xFF000000000000}) >> 40) | ((host_int & uint64_t{0xFF00000000000000}) >> 56)); -#endif +#endif // bswap_64 } inline uint32_t gbswap_32(uint32_t host_int) { -#if ABSL_HAVE_BUILTIN(__builtin_bswap32) || defined(__GNUC__) - return __builtin_bswap32(host_int); -#elif defined(_MSC_VER) - return _byteswap_ulong(host_int); +#if defined(__GLIBC__) + return bswap_32(host_int); #else return (((host_int & uint32_t{0xFF}) << 24) | ((host_int & uint32_t{0xFF00}) << 8) | @@ -58,29 +98,33 @@ inline uint32_t gbswap_32(uint32_t host_int) { } inline uint16_t gbswap_16(uint16_t host_int) { -#if ABSL_HAVE_BUILTIN(__builtin_bswap16) || defined(__GNUC__) - return __builtin_bswap16(host_int); -#elif defined(_MSC_VER) - return _byteswap_ushort(host_int); +#if defined(__GLIBC__) + return bswap_16(host_int); #else return (((host_int & uint16_t{0xFF}) << 8) | ((host_int & uint16_t{0xFF00}) >> 8)); #endif } +#endif // intrinsics available + #ifdef ABSL_IS_LITTLE_ENDIAN -// Portable definitions for htonl (host-to-network) and friends on little-endian -// architectures. +// Definitions for ntohl etc. that don't require us to include +// netinet/in.h. We wrap gbswap_32 and gbswap_16 in functions rather +// than just #defining them because in debug mode, gcc doesn't +// correctly handle the (rather involved) definitions of bswap_32. +// gcc guarantees that inline functions are as fast as macros, so +// this isn't a performance hit. inline uint16_t ghtons(uint16_t x) { return gbswap_16(x); } inline uint32_t ghtonl(uint32_t x) { return gbswap_32(x); } inline uint64_t ghtonll(uint64_t x) { return gbswap_64(x); } #elif defined ABSL_IS_BIG_ENDIAN -// Portable definitions for htonl (host-to-network) etc on big-endian -// architectures. These definitions are simpler since the host byte order is the -// same as network byte order. +// These definitions are simpler on big-endian machines +// These are functions instead of macros to avoid self-assignment warnings +// on calls such as "i = ghtnol(i);". This also provides type checking. inline uint16_t ghtons(uint16_t x) { return x; } inline uint32_t ghtonl(uint32_t x) { return x; } inline uint64_t ghtonll(uint64_t x) { return x; } @@ -129,36 +173,6 @@ inline constexpr bool IsLittleEndian() { return false; } #endif /* ENDIAN */ -inline uint8_t FromHost(uint8_t x) { return x; } -inline uint16_t FromHost(uint16_t x) { return FromHost16(x); } -inline uint32_t FromHost(uint32_t x) { return FromHost32(x); } -inline uint64_t FromHost(uint64_t x) { return FromHost64(x); } -inline uint8_t ToHost(uint8_t x) { return x; } -inline uint16_t ToHost(uint16_t x) { return ToHost16(x); } -inline uint32_t ToHost(uint32_t x) { return ToHost32(x); } -inline uint64_t ToHost(uint64_t x) { return ToHost64(x); } - -inline int8_t FromHost(int8_t x) { return x; } -inline int16_t FromHost(int16_t x) { - return bit_cast(FromHost16(bit_cast(x))); -} -inline int32_t FromHost(int32_t x) { - return bit_cast(FromHost32(bit_cast(x))); -} -inline int64_t FromHost(int64_t x) { - return bit_cast(FromHost64(bit_cast(x))); -} -inline int8_t ToHost(int8_t x) { return x; } -inline int16_t ToHost(int16_t x) { - return bit_cast(ToHost16(bit_cast(x))); -} -inline int32_t ToHost(int32_t x) { - return bit_cast(ToHost32(bit_cast(x))); -} -inline int64_t ToHost(int64_t x) { - return bit_cast(ToHost64(bit_cast(x))); -} - // Functions to do unaligned loads and stores in little-endian order. inline uint16_t Load16(const void *p) { return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p)); @@ -219,36 +233,6 @@ inline constexpr bool IsLittleEndian() { return false; } #endif /* ENDIAN */ -inline uint8_t FromHost(uint8_t x) { return x; } -inline uint16_t FromHost(uint16_t x) { return FromHost16(x); } -inline uint32_t FromHost(uint32_t x) { return FromHost32(x); } -inline uint64_t FromHost(uint64_t x) { return FromHost64(x); } -inline uint8_t ToHost(uint8_t x) { return x; } -inline uint16_t ToHost(uint16_t x) { return ToHost16(x); } -inline uint32_t ToHost(uint32_t x) { return ToHost32(x); } -inline uint64_t ToHost(uint64_t x) { return ToHost64(x); } - -inline int8_t FromHost(int8_t x) { return x; } -inline int16_t FromHost(int16_t x) { - return bit_cast(FromHost16(bit_cast(x))); -} -inline int32_t FromHost(int32_t x) { - return bit_cast(FromHost32(bit_cast(x))); -} -inline int64_t FromHost(int64_t x) { - return bit_cast(FromHost64(bit_cast(x))); -} -inline int8_t ToHost(int8_t x) { return x; } -inline int16_t ToHost(int16_t x) { - return bit_cast(ToHost16(bit_cast(x))); -} -inline int32_t ToHost(int32_t x) { - return bit_cast(ToHost32(bit_cast(x))); -} -inline int64_t ToHost(int64_t x) { - return bit_cast(ToHost64(bit_cast(x))); -} - // Functions to do unaligned loads and stores in big-endian order. inline uint16_t Load16(const void *p) { return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p)); diff --git a/abseil-cpp/absl/base/internal/exception_safety_testing.h b/abseil-cpp/absl/base/internal/exception_safety_testing.h index 77a5aec6..6ba89d05 100644 --- a/abseil-cpp/absl/base/internal/exception_safety_testing.h +++ b/abseil-cpp/absl/base/internal/exception_safety_testing.h @@ -536,22 +536,7 @@ class ThrowingValue : private exceptions_internal::TrackedObject { } // Memory management operators - static void* operator new(size_t s) noexcept( - IsSpecified(TypeSpec::kNoThrowNew)) { - if (!IsSpecified(TypeSpec::kNoThrowNew)) { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); - } - return ::operator new(s); - } - - static void* operator new[](size_t s) noexcept( - IsSpecified(TypeSpec::kNoThrowNew)) { - if (!IsSpecified(TypeSpec::kNoThrowNew)) { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); - } - return ::operator new[](s); - } - + // Args.. allows us to overload regular and placement new in one shot template static void* operator new(size_t s, Args&&... args) noexcept( IsSpecified(TypeSpec::kNoThrowNew)) { @@ -572,6 +557,12 @@ class ThrowingValue : private exceptions_internal::TrackedObject { // Abseil doesn't support throwing overloaded operator delete. These are // provided so a throwing operator-new can clean up after itself. + // + // We provide both regular and templated operator delete because if only the + // templated version is provided as we did with operator new, the compiler has + // no way of knowing which overload of operator delete to call. See + // https://en.cppreference.com/w/cpp/memory/new/operator_delete and + // https://en.cppreference.com/w/cpp/language/delete for the gory details. void operator delete(void* p) noexcept { ::operator delete(p); } template @@ -735,8 +726,9 @@ class ThrowingAllocator : private exceptions_internal::TrackedObject { ThrowingAllocator select_on_container_copy_construction() noexcept( IsSpecified(AllocSpec::kNoThrowAllocate)) { + auto& out = *this; ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION); - return *this; + return out; } template diff --git a/abseil-cpp/absl/profiling/internal/exponential_biased.cc b/abseil-cpp/absl/base/internal/exponential_biased.cc similarity index 95% rename from abseil-cpp/absl/profiling/internal/exponential_biased.cc rename to abseil-cpp/absl/base/internal/exponential_biased.cc index 81d9a757..1b30c061 100644 --- a/abseil-cpp/absl/profiling/internal/exponential_biased.cc +++ b/abseil-cpp/absl/base/internal/exponential_biased.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "absl/profiling/internal/exponential_biased.h" +#include "absl/base/internal/exponential_biased.h" #include @@ -26,7 +26,7 @@ namespace absl { ABSL_NAMESPACE_BEGIN -namespace profiling_internal { +namespace base_internal { // The algorithm generates a random number between 0 and 1 and applies the // inverse cumulative distribution function for an exponential. Specifically: @@ -64,7 +64,7 @@ int64_t ExponentialBiased::GetSkipCount(int64_t mean) { // Assume huge values are bias neutral, retain bias for next call. return std::numeric_limits::max() / 2; } - double value = std::rint(interval); + double value = std::round(interval); bias_ = interval - value; return value; } @@ -88,6 +88,6 @@ void ExponentialBiased::Initialize() { initialized_ = true; } -} // namespace profiling_internal +} // namespace base_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/abseil-cpp/absl/profiling/internal/exponential_biased.h b/abseil-cpp/absl/base/internal/exponential_biased.h similarity index 94% rename from abseil-cpp/absl/profiling/internal/exponential_biased.h rename to abseil-cpp/absl/base/internal/exponential_biased.h index d31f7782..94f79a33 100644 --- a/abseil-cpp/absl/profiling/internal/exponential_biased.h +++ b/abseil-cpp/absl/base/internal/exponential_biased.h @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifndef ABSL_PROFILING_INTERNAL_EXPONENTIAL_BIASED_H_ -#define ABSL_PROFILING_INTERNAL_EXPONENTIAL_BIASED_H_ +#ifndef ABSL_BASE_INTERNAL_EXPONENTIAL_BIASED_H_ +#define ABSL_BASE_INTERNAL_EXPONENTIAL_BIASED_H_ #include @@ -22,7 +22,7 @@ namespace absl { ABSL_NAMESPACE_BEGIN -namespace profiling_internal { +namespace base_internal { // ExponentialBiased provides a small and fast random number generator for a // rounded exponential distribution. This generator manages very little state, @@ -66,7 +66,7 @@ namespace profiling_internal { // Adjusting with rounding bias is relatively trivial: // // double value = bias_ + exponential_distribution(mean)(); -// double rounded_value = std::rint(value); +// double rounded_value = std::round(value); // bias_ = value - rounded_value; // return rounded_value; // @@ -123,8 +123,8 @@ inline uint64_t ExponentialBiased::NextRandom(uint64_t rnd) { return (prng_mult * rnd + prng_add) & prng_mod_mask; } -} // namespace profiling_internal +} // namespace base_internal ABSL_NAMESPACE_END } // namespace absl -#endif // ABSL_PROFILING_INTERNAL_EXPONENTIAL_BIASED_H_ +#endif // ABSL_BASE_INTERNAL_EXPONENTIAL_BIASED_H_ diff --git a/abseil-cpp/absl/profiling/internal/exponential_biased_test.cc b/abseil-cpp/absl/base/internal/exponential_biased_test.cc similarity index 97% rename from abseil-cpp/absl/profiling/internal/exponential_biased_test.cc rename to abseil-cpp/absl/base/internal/exponential_biased_test.cc index 6a6c317e..90a482d2 100644 --- a/abseil-cpp/absl/profiling/internal/exponential_biased_test.cc +++ b/abseil-cpp/absl/base/internal/exponential_biased_test.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "absl/profiling/internal/exponential_biased.h" +#include "absl/base/internal/exponential_biased.h" #include @@ -28,8 +28,7 @@ using ::testing::Ge; namespace absl { ABSL_NAMESPACE_BEGIN -namespace profiling_internal { -namespace { +namespace base_internal { MATCHER_P2(IsBetween, a, b, absl::StrCat(std::string(negation ? "isn't" : "is"), " between ", a, @@ -186,7 +185,7 @@ TEST(ExponentialBiasedTest, InitializationModes) { ABSL_CONST_INIT static ExponentialBiased eb_static; EXPECT_THAT(eb_static.GetSkipCount(2), Ge(0)); -#ifdef ABSL_HAVE_THREAD_LOCAL +#if ABSL_HAVE_THREAD_LOCAL thread_local ExponentialBiased eb_thread; EXPECT_THAT(eb_thread.GetSkipCount(2), Ge(0)); #endif @@ -195,7 +194,6 @@ TEST(ExponentialBiasedTest, InitializationModes) { EXPECT_THAT(eb_stack.GetSkipCount(2), Ge(0)); } -} // namespace -} // namespace profiling_internal +} // namespace base_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/abseil-cpp/absl/base/internal/fast_type_id.h b/abseil-cpp/absl/base/internal/fast_type_id.h index a547b3a8..3db59e83 100644 --- a/abseil-cpp/absl/base/internal/fast_type_id.h +++ b/abseil-cpp/absl/base/internal/fast_type_id.h @@ -28,10 +28,8 @@ struct FastTypeTag { constexpr static char dummy_var = 0; }; -#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL template constexpr char FastTypeTag::dummy_var; -#endif // FastTypeId() evaluates at compile/link-time to a unique pointer for the // passed-in type. These are meant to be good match for keys into maps or diff --git a/abseil-cpp/absl/base/internal/invoke.h b/abseil-cpp/absl/base/internal/invoke.h index 643c2a42..5c71f328 100644 --- a/abseil-cpp/absl/base/internal/invoke.h +++ b/abseil-cpp/absl/base/internal/invoke.h @@ -14,8 +14,6 @@ // // absl::base_internal::invoke(f, args...) is an implementation of // INVOKE(f, args...) from section [func.require] of the C++ standard. -// When compiled as C++17 and later versions, it is implemented as an alias of -// std::invoke. // // [func.require] // Define INVOKE (f, t1, t2, ..., tN) as follows: @@ -37,26 +35,6 @@ #ifndef ABSL_BASE_INTERNAL_INVOKE_H_ #define ABSL_BASE_INTERNAL_INVOKE_H_ -#include "absl/base/config.h" - -#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L - -#include - -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace base_internal { - -using std::invoke; -using std::invoke_result_t; -using std::is_invocable_r; - -} // namespace base_internal -ABSL_NAMESPACE_END -} // namespace absl - -#else // ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L - #include #include #include @@ -102,18 +80,8 @@ struct MemFunAndRef : StrippedAccept { static decltype((std::declval().* std::declval())(std::declval()...)) Invoke(MemFun&& mem_fun, Obj&& obj, Args&&... args) { -// Ignore bogus GCC warnings on this line. -// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101436 for similar example. -#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(11, 0) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Warray-bounds" -#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" -#endif return (std::forward(obj).* std::forward(mem_fun))(std::forward(args)...); -#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(11, 0) -#pragma GCC diagnostic pop -#endif } }; @@ -212,30 +180,8 @@ invoke_result_t invoke(F&& f, Args&&... args) { return Invoker::type::Invoke(std::forward(f), std::forward(args)...); } - -template -struct IsInvocableRImpl : std::false_type {}; - -template -struct IsInvocableRImpl< - absl::void_t >, R, F, - Args...> - : std::integral_constant< - bool, - std::is_convertible, - R>::value || - std::is_void::value> {}; - -// Type trait whose member `value` is true if invoking `F` with `Args` is valid, -// and either the return type is convertible to `R`, or `R` is void. -// C++11-compatible version of `std::is_invocable_r`. -template -using is_invocable_r = IsInvocableRImpl; - } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl -#endif // ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L - #endif // ABSL_BASE_INTERNAL_INVOKE_H_ diff --git a/abseil-cpp/absl/base/internal/low_level_alloc_test.cc b/abseil-cpp/absl/base/internal/low_level_alloc_test.cc index 8fdec09e..2f2eaffa 100644 --- a/abseil-cpp/absl/base/internal/low_level_alloc_test.cc +++ b/abseil-cpp/absl/base/internal/low_level_alloc_test.cc @@ -21,10 +21,6 @@ #include #include -#ifdef __EMSCRIPTEN__ -#include -#endif - #include "absl/container/node_hash_map.h" namespace absl { @@ -86,7 +82,7 @@ static void Test(bool use_new_arena, bool call_malloc_hook, int n) { AllocMap::iterator it; BlockDesc block_desc; int rnd; - LowLevelAlloc::Arena *arena = nullptr; + LowLevelAlloc::Arena *arena = 0; if (use_new_arena) { int32_t flags = call_malloc_hook ? LowLevelAlloc::kCallMallocHook : 0; arena = LowLevelAlloc::NewArena(flags); @@ -101,10 +97,11 @@ static void Test(bool use_new_arena, bool call_malloc_hook, int n) { case 0: // coin came up heads: add a block using_low_level_alloc = true; block_desc.len = rand() & 0x3fff; - block_desc.ptr = reinterpret_cast( - arena == nullptr - ? LowLevelAlloc::Alloc(block_desc.len) - : LowLevelAlloc::AllocWithArena(block_desc.len, arena)); + block_desc.ptr = + reinterpret_cast( + arena == 0 + ? LowLevelAlloc::Alloc(block_desc.len) + : LowLevelAlloc::AllocWithArena(block_desc.len, arena)); using_low_level_alloc = false; RandomizeBlockDesc(&block_desc); rnd = rand(); @@ -161,20 +158,5 @@ ABSL_NAMESPACE_END int main(int argc, char *argv[]) { // The actual test runs in the global constructor of `before_main`. printf("PASS\n"); -#ifdef __EMSCRIPTEN__ - // clang-format off -// This is JS here. Don't try to format it. - MAIN_THREAD_EM_ASM({ - if (ENVIRONMENT_IS_WEB) { - if (typeof TEST_FINISH === 'function') { - TEST_FINISH($0); - } else { - console.error('Attempted to exit with status ' + $0); - console.error('But TEST_FINSIHED is not a function.'); - } - } - }, 0); -// clang-format on -#endif return 0; } diff --git a/abseil-cpp/absl/base/internal/low_level_scheduling.h b/abseil-cpp/absl/base/internal/low_level_scheduling.h index 9baccc06..6ef79fbf 100644 --- a/abseil-cpp/absl/base/internal/low_level_scheduling.h +++ b/abseil-cpp/absl/base/internal/low_level_scheduling.h @@ -61,8 +61,6 @@ class SchedulingGuard { public: // Returns true iff the calling thread may be cooperatively rescheduled. static bool ReschedulingIsAllowed(); - SchedulingGuard(const SchedulingGuard&) = delete; - SchedulingGuard& operator=(const SchedulingGuard&) = delete; private: // Disable cooperative rescheduling of the calling thread. It may still @@ -103,6 +101,9 @@ class SchedulingGuard { friend class SchedulingHelper; friend class SpinLock; friend int absl::synchronization_internal::MutexDelay(int32_t c, int mode); + + SchedulingGuard(const SchedulingGuard&) = delete; + SchedulingGuard& operator=(const SchedulingGuard&) = delete; }; //------------------------------------------------------------------------------ diff --git a/abseil-cpp/absl/profiling/internal/periodic_sampler.cc b/abseil-cpp/absl/base/internal/periodic_sampler.cc similarity index 88% rename from abseil-cpp/absl/profiling/internal/periodic_sampler.cc rename to abseil-cpp/absl/base/internal/periodic_sampler.cc index a738a82c..520dabba 100644 --- a/abseil-cpp/absl/profiling/internal/periodic_sampler.cc +++ b/abseil-cpp/absl/base/internal/periodic_sampler.cc @@ -12,15 +12,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "absl/profiling/internal/periodic_sampler.h" +#include "absl/base/internal/periodic_sampler.h" #include -#include "absl/profiling/internal/exponential_biased.h" +#include "absl/base/internal/exponential_biased.h" namespace absl { ABSL_NAMESPACE_BEGIN -namespace profiling_internal { +namespace base_internal { int64_t PeriodicSamplerBase::GetExponentialBiased(int period) noexcept { return rng_.GetStride(period); @@ -48,6 +48,6 @@ bool PeriodicSamplerBase::SubtleConfirmSample() noexcept { return true; } -} // namespace profiling_internal +} // namespace base_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/abseil-cpp/absl/profiling/internal/periodic_sampler.h b/abseil-cpp/absl/base/internal/periodic_sampler.h similarity index 95% rename from abseil-cpp/absl/profiling/internal/periodic_sampler.h rename to abseil-cpp/absl/base/internal/periodic_sampler.h index 54f0af45..f8a86796 100644 --- a/abseil-cpp/absl/profiling/internal/periodic_sampler.h +++ b/abseil-cpp/absl/base/internal/periodic_sampler.h @@ -12,19 +12,19 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifndef ABSL_PROFILING_INTERNAL_PERIODIC_SAMPLER_H_ -#define ABSL_PROFILING_INTERNAL_PERIODIC_SAMPLER_H_ +#ifndef ABSL_BASE_INTERNAL_PERIODIC_SAMPLER_H_ +#define ABSL_BASE_INTERNAL_PERIODIC_SAMPLER_H_ #include #include +#include "absl/base/internal/exponential_biased.h" #include "absl/base/optimization.h" -#include "absl/profiling/internal/exponential_biased.h" namespace absl { ABSL_NAMESPACE_BEGIN -namespace profiling_internal { +namespace base_internal { // PeriodicSamplerBase provides the basic period sampler implementation. // @@ -149,7 +149,7 @@ class PeriodicSamplerBase { // ICC x64 (OK) : https://gcc.godbolt.org/z/ptTNfD // MSVC x64 (OK) : https://gcc.godbolt.org/z/76j4-5 uint64_t stride_ = 0; - absl::profiling_internal::ExponentialBiased rng_; + ExponentialBiased rng_; }; inline bool PeriodicSamplerBase::SubtleMaybeSample() noexcept { @@ -204,8 +204,8 @@ class PeriodicSampler final : public PeriodicSamplerBase { template std::atomic PeriodicSampler::period_(default_period); -} // namespace profiling_internal +} // namespace base_internal ABSL_NAMESPACE_END } // namespace absl -#endif // ABSL_PROFILING_INTERNAL_PERIODIC_SAMPLER_H_ +#endif // ABSL_BASE_INTERNAL_PERIODIC_SAMPLER_H_ diff --git a/abseil-cpp/absl/profiling/internal/periodic_sampler_benchmark.cc b/abseil-cpp/absl/base/internal/periodic_sampler_benchmark.cc similarity index 94% rename from abseil-cpp/absl/profiling/internal/periodic_sampler_benchmark.cc rename to abseil-cpp/absl/base/internal/periodic_sampler_benchmark.cc index 8f0e5574..5ad469ce 100644 --- a/abseil-cpp/absl/profiling/internal/periodic_sampler_benchmark.cc +++ b/abseil-cpp/absl/base/internal/periodic_sampler_benchmark.cc @@ -12,12 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "absl/profiling/internal/periodic_sampler.h" #include "benchmark/benchmark.h" +#include "absl/base/internal/periodic_sampler.h" namespace absl { ABSL_NAMESPACE_BEGIN -namespace profiling_internal { +namespace base_internal { namespace { template @@ -74,6 +74,6 @@ void BM_PeriodicSampler_Disabled(benchmark::State& state) { BENCHMARK(BM_PeriodicSampler_Disabled); } // namespace -} // namespace profiling_internal +} // namespace base_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/abseil-cpp/absl/profiling/internal/periodic_sampler_test.cc b/abseil-cpp/absl/base/internal/periodic_sampler_test.cc similarity index 97% rename from abseil-cpp/absl/profiling/internal/periodic_sampler_test.cc rename to abseil-cpp/absl/base/internal/periodic_sampler_test.cc index ef986f38..3b301e37 100644 --- a/abseil-cpp/absl/profiling/internal/periodic_sampler_test.cc +++ b/abseil-cpp/absl/base/internal/periodic_sampler_test.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "absl/profiling/internal/periodic_sampler.h" +#include "absl/base/internal/periodic_sampler.h" #include // NOLINT(build/c++11) @@ -23,7 +23,7 @@ namespace absl { ABSL_NAMESPACE_BEGIN -namespace profiling_internal { +namespace base_internal { namespace { using testing::Eq; @@ -172,6 +172,6 @@ TEST(PeriodicSamplerTest, SetGlobalPeriod) { } } // namespace -} // namespace profiling_internal +} // namespace base_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/abseil-cpp/absl/base/internal/prefetch.h b/abseil-cpp/absl/base/internal/prefetch.h deleted file mode 100644 index 06419283..00000000 --- a/abseil-cpp/absl/base/internal/prefetch.h +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2022 The Abseil Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef ABSL_BASE_INTERNAL_PREFETCH_H_ -#define ABSL_BASE_INTERNAL_PREFETCH_H_ - -#include "absl/base/config.h" - -#ifdef __SSE__ -#include -#endif - -#if defined(_MSC_VER) && defined(ABSL_INTERNAL_HAVE_SSE) -#include -#pragma intrinsic(_mm_prefetch) -#endif - -// Compatibility wrappers around __builtin_prefetch, to prefetch data -// for read if supported by the toolchain. - -// Move data into the cache before it is read, or "prefetch" it. -// -// The value of `addr` is the address of the memory to prefetch. If -// the target and compiler support it, data prefetch instructions are -// generated. If the prefetch is done some time before the memory is -// read, it may be in the cache by the time the read occurs. -// -// The function names specify the temporal locality heuristic applied, -// using the names of Intel prefetch instructions: -// -// T0 - high degree of temporal locality; data should be left in as -// many levels of the cache possible -// T1 - moderate degree of temporal locality -// T2 - low degree of temporal locality -// Nta - no temporal locality, data need not be left in the cache -// after the read -// -// Incorrect or gratuitous use of these functions can degrade -// performance, so use them only when representative benchmarks show -// an improvement. -// -// Example usage: -// -// absl::base_internal::PrefetchT0(addr); -// -// Currently, the different prefetch calls behave on some Intel -// architectures as follows: -// -// SNB..SKL SKX -// PrefetchT0() L1/L2/L3 L1/L2 -// PrefetchT1() L2/L3 L2 -// PrefetchT2() L2/L3 L2 -// PrefetchNta() L1/--/L3 L1* -// -// * On SKX PrefetchNta() will bring the line into L1 but will evict -// from L3 cache. This might result in surprising behavior. -// -// SNB = Sandy Bridge, SKL = Skylake, SKX = Skylake Xeon. -// -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace base_internal { - -void PrefetchT0(const void* addr); -void PrefetchT1(const void* addr); -void PrefetchT2(const void* addr); -void PrefetchNta(const void* addr); - -// Implementation details follow. - -#if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__) - -#define ABSL_INTERNAL_HAVE_PREFETCH 1 - -// See __builtin_prefetch: -// https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html. -// -// These functions speculatively load for read only. This is -// safe for all currently supported platforms. However, prefetch for -// store may have problems depending on the target platform. -// -inline void PrefetchT0(const void* addr) { - // Note: this uses prefetcht0 on Intel. - __builtin_prefetch(addr, 0, 3); -} -inline void PrefetchT1(const void* addr) { - // Note: this uses prefetcht1 on Intel. - __builtin_prefetch(addr, 0, 2); -} -inline void PrefetchT2(const void* addr) { - // Note: this uses prefetcht2 on Intel. - __builtin_prefetch(addr, 0, 1); -} -inline void PrefetchNta(const void* addr) { - // Note: this uses prefetchtnta on Intel. - __builtin_prefetch(addr, 0, 0); -} - -#elif defined(ABSL_INTERNAL_HAVE_SSE) - -#define ABSL_INTERNAL_HAVE_PREFETCH 1 - -inline void PrefetchT0(const void* addr) { - _mm_prefetch(reinterpret_cast(addr), _MM_HINT_T0); -} -inline void PrefetchT1(const void* addr) { - _mm_prefetch(reinterpret_cast(addr), _MM_HINT_T1); -} -inline void PrefetchT2(const void* addr) { - _mm_prefetch(reinterpret_cast(addr), _MM_HINT_T2); -} -inline void PrefetchNta(const void* addr) { - _mm_prefetch(reinterpret_cast(addr), _MM_HINT_NTA); -} - -#else -inline void PrefetchT0(const void*) {} -inline void PrefetchT1(const void*) {} -inline void PrefetchT2(const void*) {} -inline void PrefetchNta(const void*) {} -#endif - -} // namespace base_internal -ABSL_NAMESPACE_END -} // namespace absl - -#endif // ABSL_BASE_INTERNAL_PREFETCH_H_ diff --git a/abseil-cpp/absl/base/internal/prefetch_test.cc b/abseil-cpp/absl/base/internal/prefetch_test.cc deleted file mode 100644 index 7c1dae46..00000000 --- a/abseil-cpp/absl/base/internal/prefetch_test.cc +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2022 The Abseil Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "absl/base/internal/prefetch.h" - -#include "gtest/gtest.h" - -namespace { - -int number = 42; - -TEST(Prefetch, TemporalLocalityNone) { - absl::base_internal::PrefetchNta(&number); - EXPECT_EQ(number, 42); -} - -TEST(Prefetch, TemporalLocalityLow) { - absl::base_internal::PrefetchT2(&number); - EXPECT_EQ(number, 42); -} - -TEST(Prefetch, TemporalLocalityMedium) { - absl::base_internal::PrefetchT1(&number); - EXPECT_EQ(number, 42); -} - -TEST(Prefetch, TemporalLocalityHigh) { - absl::base_internal::PrefetchT0(&number); - EXPECT_EQ(number, 42); -} - -} // namespace diff --git a/abseil-cpp/absl/base/internal/raw_logging.cc b/abseil-cpp/absl/base/internal/raw_logging.cc index 54e71a3f..ae8754c6 100644 --- a/abseil-cpp/absl/base/internal/raw_logging.cc +++ b/abseil-cpp/absl/base/internal/raw_logging.cc @@ -14,17 +14,15 @@ #include "absl/base/internal/raw_logging.h" +#include #include -#include #include #include #include -#include #include "absl/base/attributes.h" #include "absl/base/config.h" #include "absl/base/internal/atomic_hook.h" -#include "absl/base/internal/errno_saver.h" #include "absl/base/log_severity.h" // We know how to perform low-level writes to stderr in POSIX and Windows. For @@ -38,8 +36,8 @@ // This preprocessor token is also defined in raw_io.cc. If you need to copy // this, consider moving both to config.h instead. #if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ - defined(__Fuchsia__) || defined(__native_client__) || \ - defined(__OpenBSD__) || defined(__EMSCRIPTEN__) || defined(__ASYLO__) + defined(__Fuchsia__) || defined(__native_client__) || \ + defined(__EMSCRIPTEN__) || defined(__ASYLO__) #include @@ -52,8 +50,7 @@ // ABSL_HAVE_SYSCALL_WRITE is defined when the platform provides the syscall // syscall(SYS_write, /*int*/ fd, /*char* */ buf, /*size_t*/ len); // for low level operations that want to avoid libc. -#if (defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__)) && \ - !defined(__ANDROID__) +#if (defined(__linux__) || defined(__FreeBSD__)) && !defined(__ANDROID__) #include #define ABSL_HAVE_SYSCALL_WRITE 1 #define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1 @@ -70,25 +67,28 @@ #undef ABSL_HAVE_RAW_IO #endif -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace raw_logging_internal { -namespace { - // TODO(gfalcon): We want raw-logging to work on as many platforms as possible. -// Explicitly `#error` out when not `ABSL_LOW_LEVEL_WRITE_SUPPORTED`, except for -// a selected set of platforms for which we expect not to be able to raw log. +// Explicitly #error out when not ABSL_LOW_LEVEL_WRITE_SUPPORTED, except for a +// selected set of platforms for which we expect not to be able to raw log. + +ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static absl::base_internal::AtomicHook< + absl::raw_logging_internal::LogPrefixHook> + log_prefix_hook; +ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static absl::base_internal::AtomicHook< + absl::raw_logging_internal::AbortHook> + abort_hook; #ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED -constexpr char kTruncated[] = " ... (message truncated)\n"; +static const char kTruncated[] = " ... (message truncated)\n"; // sprintf the format to the buffer, adjusting *buf and *size to reflect the // consumed bytes, and return whether the message fit without truncation. If // truncation occurred, if possible leave room in the buffer for the message // kTruncated[]. -bool VADoRawLog(char** buf, int* size, const char* format, va_list ap) - ABSL_PRINTF_ATTRIBUTE(3, 0); -bool VADoRawLog(char** buf, int* size, const char* format, va_list ap) { +inline static bool VADoRawLog(char** buf, int* size, const char* format, + va_list ap) ABSL_PRINTF_ATTRIBUTE(3, 0); +inline static bool VADoRawLog(char** buf, int* size, + const char* format, va_list ap) { int n = vsnprintf(*buf, *size, format, ap); bool result = true; if (n < 0 || n > *size) { @@ -96,7 +96,7 @@ bool VADoRawLog(char** buf, int* size, const char* format, va_list ap) { if (static_cast(*size) > sizeof(kTruncated)) { n = *size - sizeof(kTruncated); // room for truncation message } else { - n = 0; // no room for truncation message + n = 0; // no room for truncation message } } *size -= n; @@ -105,7 +105,9 @@ bool VADoRawLog(char** buf, int* size, const char* format, va_list ap) { } #endif // ABSL_LOW_LEVEL_WRITE_SUPPORTED -constexpr int kLogBufSize = 3000; +static constexpr int kLogBufSize = 3000; + +namespace { // CAVEAT: vsnprintf called from *DoRawLog below has some (exotic) code paths // that invoke malloc() and getenv() that might acquire some locks. @@ -126,18 +128,6 @@ bool DoRawLog(char** buf, int* size, const char* format, ...) { return true; } -bool DefaultLogFilterAndPrefix(absl::LogSeverity, const char* file, int line, - char** buf, int* buf_size) { - DoRawLog(buf, buf_size, "[%s : %d] RAW: ", file, line); - return true; -} - -ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES -absl::base_internal::AtomicHook - log_filter_and_prefix_hook(DefaultLogFilterAndPrefix); -ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES -absl::base_internal::AtomicHook abort_hook; - void RawLogVA(absl::LogSeverity severity, const char* file, int line, const char* format, va_list ap) ABSL_PRINTF_ATTRIBUTE(4, 0); void RawLogVA(absl::LogSeverity severity, const char* file, int line, @@ -158,7 +148,14 @@ void RawLogVA(absl::LogSeverity severity, const char* file, int line, } #endif - enabled = log_filter_and_prefix_hook(severity, file, line, &buf, &size); + auto log_prefix_hook_ptr = log_prefix_hook.Load(); + if (log_prefix_hook_ptr) { + enabled = log_prefix_hook_ptr(severity, file, line, &buf, &size); + } else { + if (enabled) { + DoRawLog(&buf, &size, "[%s : %d] RAW: ", file, line); + } + } const char* const prefix_end = buf; #ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED @@ -169,12 +166,11 @@ void RawLogVA(absl::LogSeverity severity, const char* file, int line, } else { DoRawLog(&buf, &size, "%s", kTruncated); } - AsyncSignalSafeWriteToStderr(buffer, strlen(buffer)); + absl::raw_logging_internal::SafeWriteToStderr(buffer, strlen(buffer)); } #else static_cast(format); static_cast(ap); - static_cast(enabled); #endif // Abort the process after logging a FATAL message, even if the output itself @@ -185,23 +181,13 @@ void RawLogVA(absl::LogSeverity severity, const char* file, int line, } } -// Non-formatting version of RawLog(). -// -// TODO(gfalcon): When string_view no longer depends on base, change this -// interface to take its message as a string_view instead. -void DefaultInternalLog(absl::LogSeverity severity, const char* file, int line, - const std::string& message) { - RawLog(severity, file, line, "%.*s", static_cast(message.size()), - message.data()); -} - } // namespace -void AsyncSignalSafeWriteToStderr(const char* s, size_t len) { - absl::base_internal::ErrnoSaver errno_saver; +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace raw_logging_internal { +void SafeWriteToStderr(const char *s, size_t len) { #if defined(ABSL_HAVE_SYSCALL_WRITE) - // We prefer calling write via `syscall` to minimize the risk of libc doing - // something "helpful". syscall(SYS_write, STDERR_FILENO, s, len); #elif defined(ABSL_HAVE_POSIX_WRITE) write(STDERR_FILENO, s, len); @@ -214,6 +200,8 @@ void AsyncSignalSafeWriteToStderr(const char* s, size_t len) { #endif } +void RawLog(absl::LogSeverity severity, const char* file, int line, + const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5); void RawLog(absl::LogSeverity severity, const char* file, int line, const char* format, ...) { va_list ap; @@ -222,6 +210,15 @@ void RawLog(absl::LogSeverity severity, const char* file, int line, va_end(ap); } +// Non-formatting version of RawLog(). +// +// TODO(gfalcon): When string_view no longer depends on base, change this +// interface to take its message as a string_view instead. +static void DefaultInternalLog(absl::LogSeverity severity, const char* file, + int line, const std::string& message) { + RawLog(severity, file, line, "%s", message.c_str()); +} + bool RawLoggingFullySupported() { #ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED return true; @@ -234,12 +231,6 @@ ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL absl::base_internal::AtomicHook internal_log_function(DefaultInternalLog); -void RegisterLogFilterAndPrefixHook(LogFilterAndPrefixHook func) { - log_filter_and_prefix_hook.Store(func); -} - -void RegisterAbortHook(AbortHook func) { abort_hook.Store(func); } - void RegisterInternalLogFunction(InternalLogFunction func) { internal_log_function.Store(func); } diff --git a/abseil-cpp/absl/base/internal/raw_logging.h b/abseil-cpp/absl/base/internal/raw_logging.h index 0747c9df..2508f3cf 100644 --- a/abseil-cpp/absl/base/internal/raw_logging.h +++ b/abseil-cpp/absl/base/internal/raw_logging.h @@ -72,14 +72,12 @@ // // The API is a subset of the above: each macro only takes two arguments. Use // StrCat if you need to build a richer message. -#define ABSL_INTERNAL_LOG(severity, message) \ - do { \ - constexpr const char* absl_raw_logging_internal_filename = __FILE__; \ - ::absl::raw_logging_internal::internal_log_function( \ - ABSL_RAW_LOGGING_INTERNAL_##severity, \ - absl_raw_logging_internal_filename, __LINE__, message); \ - if (ABSL_RAW_LOGGING_INTERNAL_##severity == ::absl::LogSeverity::kFatal) \ - ABSL_INTERNAL_UNREACHABLE; \ +#define ABSL_INTERNAL_LOG(severity, message) \ + do { \ + constexpr const char* absl_raw_logging_internal_filename = __FILE__; \ + ::absl::raw_logging_internal::internal_log_function( \ + ABSL_RAW_LOGGING_INTERNAL_##severity, \ + absl_raw_logging_internal_filename, __LINE__, message); \ } while (0) #define ABSL_INTERNAL_CHECK(condition, message) \ @@ -109,9 +107,12 @@ namespace raw_logging_internal { void RawLog(absl::LogSeverity severity, const char* file, int line, const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5); -// Writes the provided buffer directly to stderr, in a signal-safe, low-level -// manner. -void AsyncSignalSafeWriteToStderr(const char* s, size_t len); +// Writes the provided buffer directly to stderr, in a safe, low-level manner. +// +// In POSIX this means calling write(), which is async-signal safe and does +// not malloc. If the platform supports the SYS_write syscall, we invoke that +// directly to side-step any libc interception. +void SafeWriteToStderr(const char *s, size_t len); // compile-time function to get the "base" filename, that is, the part of // a filename after the last "/" or "\" path separator. The search starts at @@ -145,12 +146,11 @@ bool RawLoggingFullySupported(); // 'severity' is the severity level of the message being written. // 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro // was located. -// 'buf' and 'buf_size' are pointers to the buffer and buffer size. If the -// hook writes a prefix, it must increment *buf and decrement *buf_size +// 'buffer' and 'buf_size' are pointers to the buffer and buffer size. If the +// hook writes a prefix, it must increment *buffer and decrement *buf_size // accordingly. -using LogFilterAndPrefixHook = bool (*)(absl::LogSeverity severity, - const char* file, int line, char** buf, - int* buf_size); +using LogPrefixHook = bool (*)(absl::LogSeverity severity, const char* file, + int line, char** buffer, int* buf_size); // Function type for a raw_logging customization hook called to abort a process // when a FATAL message is logged. If the provided AbortHook() returns, the @@ -160,10 +160,7 @@ using LogFilterAndPrefixHook = bool (*)(absl::LogSeverity severity, // was located. // The NUL-terminated logged message lives in the buffer between 'buf_start' // and 'buf_end'. 'prefix_end' points to the first non-prefix character of the -// buffer (as written by the LogFilterAndPrefixHook.) -// -// The lifetime of the filename and message buffers will not end while the -// process remains alive. +// buffer (as written by the LogPrefixHook.) using AbortHook = void (*)(const char* file, int line, const char* buf_start, const char* prefix_end, const char* buf_end); @@ -179,14 +176,6 @@ ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL extern base_internal::AtomicHook< InternalLogFunction> internal_log_function; -// Registers hooks of the above types. Only a single hook of each type may be -// registered. It is an error to call these functions multiple times with -// different input arguments. -// -// These functions are safe to call at any point during initialization; they do -// not block or malloc, and are async-signal safe. -void RegisterLogFilterAndPrefixHook(LogFilterAndPrefixHook func); -void RegisterAbortHook(AbortHook func); void RegisterInternalLogFunction(InternalLogFunction func); } // namespace raw_logging_internal diff --git a/abseil-cpp/absl/base/internal/spinlock.cc b/abseil-cpp/absl/base/internal/spinlock.cc index 9b5ed6e4..a7d44f3e 100644 --- a/abseil-cpp/absl/base/internal/spinlock.cc +++ b/abseil-cpp/absl/base/internal/spinlock.cc @@ -19,7 +19,6 @@ #include #include "absl/base/attributes.h" -#include "absl/base/config.h" #include "absl/base/internal/atomic_hook.h" #include "absl/base/internal/cycleclock.h" #include "absl/base/internal/spinlock_wait.h" @@ -67,14 +66,12 @@ void RegisterSpinLockProfiler(void (*fn)(const void *contendedlock, submit_profile_data.Store(fn); } -#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL // Static member variable definitions. constexpr uint32_t SpinLock::kSpinLockHeld; constexpr uint32_t SpinLock::kSpinLockCooperative; constexpr uint32_t SpinLock::kSpinLockDisabledScheduling; constexpr uint32_t SpinLock::kSpinLockSleeper; constexpr uint32_t SpinLock::kWaitTimeMask; -#endif // Uncommon constructors. SpinLock::SpinLock(base_internal::SchedulingMode mode) @@ -128,9 +125,8 @@ void SpinLock::SlowLock() { // it as having a sleeper. if ((lock_value & kWaitTimeMask) == 0) { // Here, just "mark" that the thread is going to sleep. Don't store the - // lock wait time in the lock -- the lock word stores the amount of time - // that the current holder waited before acquiring the lock, not the wait - // time of any thread currently waiting to acquire it. + // lock wait time in the lock as that will cause the current lock + // owner to think it experienced contention. if (lockword_.compare_exchange_strong( lock_value, lock_value | kSpinLockSleeper, std::memory_order_relaxed, std::memory_order_relaxed)) { @@ -144,14 +140,6 @@ void SpinLock::SlowLock() { // this thread obtains the lock. lock_value = TryLockInternal(lock_value, wait_cycles); continue; // Skip the delay at the end of the loop. - } else if ((lock_value & kWaitTimeMask) == 0) { - // The lock is still held, without a waiter being marked, but something - // else about the lock word changed, causing our CAS to fail. For - // example, a new lock holder may have acquired the lock with - // kSpinLockDisabledScheduling set, whereas the previous holder had not - // set that flag. In this case, attempt again to mark ourselves as a - // waiter. - continue; } } diff --git a/abseil-cpp/absl/base/internal/spinlock.h b/abseil-cpp/absl/base/internal/spinlock.h index 6d8d8ddd..e6ac9e64 100644 --- a/abseil-cpp/absl/base/internal/spinlock.h +++ b/abseil-cpp/absl/base/internal/spinlock.h @@ -15,16 +15,17 @@ // // Most users requiring mutual exclusion should use Mutex. -// SpinLock is provided for use in two situations: -// - for use by Abseil internal code that Mutex itself depends on +// SpinLock is provided for use in three situations: +// - for use in code that Mutex itself depends on +// - to get a faster fast-path release under low contention (without an +// atomic read-modify-write) In return, SpinLock has worse behaviour under +// contention, which is why Mutex is preferred in most situations. // - for async signal safety (see below) // SpinLock is async signal safe. If a spinlock is used within a signal // handler, all code that acquires the lock must ensure that the signal cannot // arrive while they are holding the lock. Typically, this is done by blocking // the signal. -// -// Threads waiting on a SpinLock may be woken in an arbitrary order. #ifndef ABSL_BASE_INTERNAL_SPINLOCK_H_ #define ABSL_BASE_INTERNAL_SPINLOCK_H_ @@ -120,14 +121,6 @@ class ABSL_LOCKABLE SpinLock { return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0; } - // Return immediately if this thread holds the SpinLock exclusively. - // Otherwise, report an error by crashing with a diagnostic. - inline void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK() { - if (!IsHeld()) { - ABSL_RAW_LOG(FATAL, "thread should hold the lock on SpinLock"); - } - } - protected: // These should not be exported except for testing. @@ -147,20 +140,8 @@ class ABSL_LOCKABLE SpinLock { // // bit[0] encodes whether a lock is being held. // bit[1] encodes whether a lock uses cooperative scheduling. - // bit[2] encodes whether the current lock holder disabled scheduling when - // acquiring the lock. Only set when kSpinLockHeld is also set. + // bit[2] encodes whether a lock disables scheduling. // bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int. - // This is set by the lock holder to indicate how long it waited on - // the lock before eventually acquiring it. The number of cycles is - // encoded as a 29-bit unsigned int, or in the case that the current - // holder did not wait but another waiter is queued, the LSB - // (kSpinLockSleeper) is set. The implementation does not explicitly - // track the number of queued waiters beyond this. It must always be - // assumed that waiters may exist if the current holder was required to - // queue. - // - // Invariant: if the lock is not held, the value is either 0 or - // kSpinLockCooperative. static constexpr uint32_t kSpinLockHeld = 1; static constexpr uint32_t kSpinLockCooperative = 2; static constexpr uint32_t kSpinLockDisabledScheduling = 4; diff --git a/abseil-cpp/absl/base/internal/spinlock_akaros.inc b/abseil-cpp/absl/base/internal/spinlock_akaros.inc index 7b0cada4..bc468940 100644 --- a/abseil-cpp/absl/base/internal/spinlock_akaros.inc +++ b/abseil-cpp/absl/base/internal/spinlock_akaros.inc @@ -20,7 +20,7 @@ extern "C" { -ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( +ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay( std::atomic* /* lock_word */, uint32_t /* value */, int /* loop */, absl::base_internal::SchedulingMode /* mode */) { // In Akaros, one must take care not to call anything that could cause a @@ -29,7 +29,7 @@ ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( // arbitrary code. } -ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)( +ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake( std::atomic* /* lock_word */, bool /* all */) {} } // extern "C" diff --git a/abseil-cpp/absl/base/internal/spinlock_linux.inc b/abseil-cpp/absl/base/internal/spinlock_linux.inc index fe8ba674..e31c6ed4 100644 --- a/abseil-cpp/absl/base/internal/spinlock_linux.inc +++ b/abseil-cpp/absl/base/internal/spinlock_linux.inc @@ -56,15 +56,18 @@ static_assert(sizeof(std::atomic) == sizeof(int), extern "C" { -ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( - std::atomic *w, uint32_t value, int, +ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay( + std::atomic *w, uint32_t value, int loop, absl::base_internal::SchedulingMode) { absl::base_internal::ErrnoSaver errno_saver; - syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, nullptr); + struct timespec tm; + tm.tv_sec = 0; + tm.tv_nsec = absl::base_internal::SpinLockSuggestedDelayNS(loop); + syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, &tm); } -ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)( - std::atomic *w, bool all) { +ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake(std::atomic *w, + bool all) { syscall(SYS_futex, w, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, all ? INT_MAX : 1, 0); } diff --git a/abseil-cpp/absl/base/internal/spinlock_posix.inc b/abseil-cpp/absl/base/internal/spinlock_posix.inc index 4f6f887d..fcd21b15 100644 --- a/abseil-cpp/absl/base/internal/spinlock_posix.inc +++ b/abseil-cpp/absl/base/internal/spinlock_posix.inc @@ -25,7 +25,7 @@ extern "C" { -ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( +ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay( std::atomic* /* lock_word */, uint32_t /* value */, int loop, absl::base_internal::SchedulingMode /* mode */) { absl::base_internal::ErrnoSaver errno_saver; @@ -40,7 +40,7 @@ ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( } } -ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)( +ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake( std::atomic* /* lock_word */, bool /* all */) {} } // extern "C" diff --git a/abseil-cpp/absl/base/internal/spinlock_wait.h b/abseil-cpp/absl/base/internal/spinlock_wait.h index 9a1adcda..169bc749 100644 --- a/abseil-cpp/absl/base/internal/spinlock_wait.h +++ b/abseil-cpp/absl/base/internal/spinlock_wait.h @@ -39,22 +39,22 @@ struct SpinLockWaitTransition { // satisfying 0<=i *w, int n, const SpinLockWaitTransition trans[], SchedulingMode scheduling_mode); -// If possible, wake some thread that has called SpinLockDelay(w, ...). If `all` -// is true, wake all such threads. On some systems, this may be a no-op; on -// those systems, threads calling SpinLockDelay() will always wake eventually -// even if SpinLockWake() is never called. +// If possible, wake some thread that has called SpinLockDelay(w, ...). If +// "all" is true, wake all such threads. This call is a hint, and on some +// systems it may be a no-op; threads calling SpinLockDelay() will always wake +// eventually even if SpinLockWake() is never called. void SpinLockWake(std::atomic *w, bool all); // Wait for an appropriate spin delay on iteration "loop" of a // spin loop on location *w, whose previously observed value was "value". // SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick, -// or may wait for a call to SpinLockWake(w). +// or may wait for a delay that can be truncated by a call to SpinLockWake(w). +// In all cases, it must return in bounded time even if SpinLockWake() is not +// called. void SpinLockDelay(std::atomic *w, uint32_t value, int loop, base_internal::SchedulingMode scheduling_mode); @@ -73,23 +73,21 @@ ABSL_NAMESPACE_END // By changing our extension points to be extern "C", we dodge this // check. extern "C" { -void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(std::atomic *w, - bool all); -void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( +void AbslInternalSpinLockWake(std::atomic *w, bool all); +void AbslInternalSpinLockDelay( std::atomic *w, uint32_t value, int loop, absl::base_internal::SchedulingMode scheduling_mode); } inline void absl::base_internal::SpinLockWake(std::atomic *w, bool all) { - ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(w, all); + AbslInternalSpinLockWake(w, all); } inline void absl::base_internal::SpinLockDelay( std::atomic *w, uint32_t value, int loop, absl::base_internal::SchedulingMode scheduling_mode) { - ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay) - (w, value, loop, scheduling_mode); + AbslInternalSpinLockDelay(w, value, loop, scheduling_mode); } #endif // ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_ diff --git a/abseil-cpp/absl/base/internal/spinlock_win32.inc b/abseil-cpp/absl/base/internal/spinlock_win32.inc index 9d224813..78654b5b 100644 --- a/abseil-cpp/absl/base/internal/spinlock_win32.inc +++ b/abseil-cpp/absl/base/internal/spinlock_win32.inc @@ -20,9 +20,9 @@ extern "C" { -void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( - std::atomic* /* lock_word */, uint32_t /* value */, int loop, - absl::base_internal::SchedulingMode /* mode */) { +void AbslInternalSpinLockDelay(std::atomic* /* lock_word */, + uint32_t /* value */, int loop, + absl::base_internal::SchedulingMode /* mode */) { if (loop == 0) { } else if (loop == 1) { Sleep(0); @@ -31,7 +31,7 @@ void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( } } -void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)( - std::atomic* /* lock_word */, bool /* all */) {} +void AbslInternalSpinLockWake(std::atomic* /* lock_word */, + bool /* all */) {} } // extern "C" diff --git a/abseil-cpp/absl/base/internal/strerror.cc b/abseil-cpp/absl/base/internal/strerror.cc index 0d6226fd..d66ba120 100644 --- a/abseil-cpp/absl/base/internal/strerror.cc +++ b/abseil-cpp/absl/base/internal/strerror.cc @@ -51,6 +51,7 @@ const char* StrErrorAdaptor(int errnum, char* buf, size_t buflen) { } std::string StrErrorInternal(int errnum) { + absl::base_internal::ErrnoSaver errno_saver; char buf[100]; const char* str = StrErrorAdaptor(errnum, buf, sizeof buf); if (*str == '\0') { @@ -75,7 +76,6 @@ std::array* NewStrErrorTable() { } // namespace std::string StrError(int errnum) { - absl::base_internal::ErrnoSaver errno_saver; static const auto* table = NewStrErrorTable(); if (errnum >= 0 && errnum < static_cast(table->size())) { return (*table)[errnum]; diff --git a/abseil-cpp/absl/base/internal/strerror_test.cc b/abseil-cpp/absl/base/internal/strerror_test.cc index e32d5b5c..a53da97f 100644 --- a/abseil-cpp/absl/base/internal/strerror_test.cc +++ b/abseil-cpp/absl/base/internal/strerror_test.cc @@ -62,14 +62,12 @@ TEST(StrErrorTest, MultipleThreads) { ++counter; errno = ERANGE; const std::string value = absl::base_internal::StrError(i); - // EXPECT_* could change errno. Stash it first. - int check_err = errno; - EXPECT_THAT(check_err, Eq(ERANGE)); // Only the GNU implementation is guaranteed to provide the // string "Unknown error nnn". POSIX doesn't say anything. if (!absl::StartsWith(value, "Unknown error ")) { - EXPECT_THAT(value, Eq(expected_strings[i])); + EXPECT_THAT(absl::base_internal::StrError(i), Eq(expected_strings[i])); } + EXPECT_THAT(errno, Eq(ERANGE)); } }; diff --git a/abseil-cpp/absl/base/internal/sysinfo.cc b/abseil-cpp/absl/base/internal/sysinfo.cc index c8366df1..349d9268 100644 --- a/abseil-cpp/absl/base/internal/sysinfo.cc +++ b/abseil-cpp/absl/base/internal/sysinfo.cc @@ -61,77 +61,9 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { -namespace { - -#if defined(_WIN32) - -// Returns number of bits set in `bitMask` -DWORD Win32CountSetBits(ULONG_PTR bitMask) { - for (DWORD bitSetCount = 0; ; ++bitSetCount) { - if (bitMask == 0) return bitSetCount; - bitMask &= bitMask - 1; - } -} - -// Returns the number of logical CPUs using GetLogicalProcessorInformation(), or -// 0 if the number of processors is not available or can not be computed. -// https://docs.microsoft.com/en-us/windows/win32/api/sysinfoapi/nf-sysinfoapi-getlogicalprocessorinformation -int Win32NumCPUs() { -#pragma comment(lib, "kernel32.lib") - using Info = SYSTEM_LOGICAL_PROCESSOR_INFORMATION; - - DWORD info_size = sizeof(Info); - Info* info(static_cast(malloc(info_size))); - if (info == nullptr) return 0; - - bool success = GetLogicalProcessorInformation(info, &info_size); - if (!success && GetLastError() == ERROR_INSUFFICIENT_BUFFER) { - free(info); - info = static_cast(malloc(info_size)); - if (info == nullptr) return 0; - success = GetLogicalProcessorInformation(info, &info_size); - } - - DWORD logicalProcessorCount = 0; - if (success) { - Info* ptr = info; - DWORD byteOffset = 0; - while (byteOffset + sizeof(Info) <= info_size) { - switch (ptr->Relationship) { - case RelationProcessorCore: - logicalProcessorCount += Win32CountSetBits(ptr->ProcessorMask); - break; - - case RelationNumaNode: - case RelationCache: - case RelationProcessorPackage: - // Ignore other entries - break; - - default: - // Ignore unknown entries - break; - } - byteOffset += sizeof(Info); - ptr++; - } - } - free(info); - return logicalProcessorCount; -} - -#endif - -} // namespace - static int GetNumCPUs() { #if defined(__myriad2__) return 1; -#elif defined(_WIN32) - const unsigned hardware_concurrency = Win32NumCPUs(); - return hardware_concurrency ? hardware_concurrency : 1; -#elif defined(_AIX) - return sysconf(_SC_NPROCESSORS_ONLN); #else // Other possibilities: // - Read /sys/devices/system/cpu/online and use cpumask_parse() @@ -494,7 +426,7 @@ pid_t GetTID() { // userspace construct) to avoid unnecessary system calls. Without this caching, // it can take roughly 98ns, while it takes roughly 1ns with this caching. pid_t GetCachedTID() { -#ifdef ABSL_HAVE_THREAD_LOCAL +#if ABSL_HAVE_THREAD_LOCAL static thread_local pid_t thread_id = GetTID(); return thread_id; #else diff --git a/abseil-cpp/absl/base/internal/sysinfo_test.cc b/abseil-cpp/absl/base/internal/sysinfo_test.cc index f305b6c5..fa8b88b1 100644 --- a/abseil-cpp/absl/base/internal/sysinfo_test.cc +++ b/abseil-cpp/absl/base/internal/sysinfo_test.cc @@ -37,6 +37,18 @@ TEST(SysinfoTest, NumCPUs) { << "NumCPUs() should not have the default value of 0"; } +TEST(SysinfoTest, NominalCPUFrequency) { +#if !(defined(__aarch64__) && defined(__linux__)) && !defined(__EMSCRIPTEN__) + EXPECT_GE(NominalCPUFrequency(), 1000.0) + << "NominalCPUFrequency() did not return a reasonable value"; +#else + // Aarch64 cannot read the CPU frequency from sysfs, so we get back 1.0. + // Emscripten does not have a sysfs to read from at all. + EXPECT_EQ(NominalCPUFrequency(), 1.0) + << "CPU frequency detection was fixed! Please update unittest."; +#endif +} + TEST(SysinfoTest, GetTID) { EXPECT_EQ(GetTID(), GetTID()); // Basic compile and equality test. #ifdef __native_client__ diff --git a/abseil-cpp/absl/base/internal/thread_identity.cc b/abseil-cpp/absl/base/internal/thread_identity.cc index 79853f09..d63a04ae 100644 --- a/abseil-cpp/absl/base/internal/thread_identity.cc +++ b/abseil-cpp/absl/base/internal/thread_identity.cc @@ -14,7 +14,7 @@ #include "absl/base/internal/thread_identity.h" -#if !defined(_WIN32) || defined(__MINGW32__) +#ifndef _WIN32 #include #include #endif @@ -23,7 +23,6 @@ #include #include -#include "absl/base/attributes.h" #include "absl/base/call_once.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/internal/spinlock.h" @@ -54,12 +53,9 @@ void AllocateThreadIdentityKey(ThreadIdentityReclaimerFunction reclaimer) { // exist within a process (via dlopen() or similar), references to // thread_identity_ptr from each instance of the code will refer to // *different* instances of this ptr. -// Apple platforms have the visibility attribute, but issue a compile warning -// that protected visibility is unsupported. -ABSL_CONST_INIT // Must come before __attribute__((visibility("protected"))) -#if ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__) +#ifdef __GNUC__ __attribute__((visibility("protected"))) -#endif // ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__) +#endif // __GNUC__ #if ABSL_PER_THREAD_TLS // Prefer __thread to thread_local as benchmarks indicate it is a bit faster. ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr = nullptr; @@ -121,10 +117,10 @@ void SetCurrentThreadIdentity( ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11 // Please see the comment on `CurrentThreadIdentityIfPresent` in -// thread_identity.h. When we cannot expose thread_local variables in -// headers, we opt for the correct-but-slower option of not inlining this -// function. -#ifndef ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT +// thread_identity.h. Because DLLs cannot expose thread_local variables in +// headers, we opt for the correct-but-slower option of placing the definition +// of this function only in a translation unit inside DLL. +#if defined(ABSL_BUILD_DLL) || defined(ABSL_CONSUME_DLL) ThreadIdentity* CurrentThreadIdentityIfPresent() { return thread_identity_ptr; } #endif #endif diff --git a/abseil-cpp/absl/base/internal/thread_identity.h b/abseil-cpp/absl/base/internal/thread_identity.h index 659694b3..ceb109b4 100644 --- a/abseil-cpp/absl/base/internal/thread_identity.h +++ b/abseil-cpp/absl/base/internal/thread_identity.h @@ -32,7 +32,6 @@ #include "absl/base/config.h" #include "absl/base/internal/per_thread_tls.h" -#include "absl/base/optimization.h" namespace absl { ABSL_NAMESPACE_BEGIN @@ -70,28 +69,30 @@ struct PerThreadSynch { // is using this PerThreadSynch as a terminator. Its // skip field must not be filled in because the loop // might then skip over the terminator. - bool wake; // This thread is to be woken from a Mutex. - // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the - // waiter is waiting on the mutex as part of a CV Wait or Mutex Await. - // - // The value of "x->cond_waiter" is meaningless if "x" is not on a - // Mutex waiter list. - bool cond_waiter; - bool maybe_unlocking; // Valid at head of Mutex waiter queue; - // true if UnlockSlow could be searching - // for a waiter to wake. Used for an optimization - // in Enqueue(). true is always a valid value. - // Can be reset to false when the unlocker or any - // writer releases the lock, or a reader fully - // releases the lock. It may not be set to false - // by a reader that decrements the count to - // non-zero. protected by mutex spinlock - bool suppress_fatal_errors; // If true, try to proceed even in the face - // of broken invariants. This is used within - // fatal signal handlers to improve the - // chances of debug logging information being - // output successfully. - int priority; // Priority of thread (updated every so often). + + // The wait parameters of the current wait. waitp is null if the + // thread is not waiting. Transitions from null to non-null must + // occur before the enqueue commit point (state = kQueued in + // Enqueue() and CondVarEnqueue()). Transitions from non-null to + // null must occur after the wait is finished (state = kAvailable in + // Mutex::Block() and CondVar::WaitCommon()). This field may be + // changed only by the thread that describes this PerThreadSynch. A + // special case is Fer(), which calls Enqueue() on another thread, + // but with an identical SynchWaitParams pointer, thus leaving the + // pointer unchanged. + SynchWaitParams *waitp; + + bool suppress_fatal_errors; // If true, try to proceed even in the face of + // broken invariants. This is used within fatal + // signal handlers to improve the chances of + // debug logging information being output + // successfully. + + intptr_t readers; // Number of readers in mutex. + int priority; // Priority of thread (updated every so often). + + // When priority will next be read (cycles). + int64_t next_priority_read_cycles; // State values: // kAvailable: This PerThreadSynch is available. @@ -110,30 +111,30 @@ struct PerThreadSynch { }; std::atomic state; - // The wait parameters of the current wait. waitp is null if the - // thread is not waiting. Transitions from null to non-null must - // occur before the enqueue commit point (state = kQueued in - // Enqueue() and CondVarEnqueue()). Transitions from non-null to - // null must occur after the wait is finished (state = kAvailable in - // Mutex::Block() and CondVar::WaitCommon()). This field may be - // changed only by the thread that describes this PerThreadSynch. A - // special case is Fer(), which calls Enqueue() on another thread, - // but with an identical SynchWaitParams pointer, thus leaving the - // pointer unchanged. - SynchWaitParams* waitp; + bool maybe_unlocking; // Valid at head of Mutex waiter queue; + // true if UnlockSlow could be searching + // for a waiter to wake. Used for an optimization + // in Enqueue(). true is always a valid value. + // Can be reset to false when the unlocker or any + // writer releases the lock, or a reader fully releases + // the lock. It may not be set to false by a reader + // that decrements the count to non-zero. + // protected by mutex spinlock - intptr_t readers; // Number of readers in mutex. + bool wake; // This thread is to be woken from a Mutex. - // When priority will next be read (cycles). - int64_t next_priority_read_cycles; + // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the + // waiter is waiting on the mutex as part of a CV Wait or Mutex Await. + // + // The value of "x->cond_waiter" is meaningless if "x" is not on a + // Mutex waiter list. + bool cond_waiter; // Locks held; used during deadlock detection. // Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity(). SynchLocksHeld *all_locks; }; -// The instances of this class are allocated in NewThreadIdentity() with an -// alignment of PerThreadSynch::kAlignment. struct ThreadIdentity { // Must be the first member. The Mutex implementation requires that // the PerThreadSynch object associated with each thread is @@ -143,7 +144,7 @@ struct ThreadIdentity { // Private: Reserved for absl::synchronization_internal::Waiter. struct WaiterState { - alignas(void*) char data[128]; + char data[128]; } waiter_state; // Used by PerThreadSem::{Get,Set}ThreadBlockedCounter(). @@ -188,32 +189,30 @@ void ClearCurrentThreadIdentity(); // May be chosen at compile time via: -DABSL_FORCE_THREAD_IDENTITY_MODE= #ifdef ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC -#error ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC cannot be directly set +#error ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC cannot be direcly set #else #define ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC 0 #endif #ifdef ABSL_THREAD_IDENTITY_MODE_USE_TLS -#error ABSL_THREAD_IDENTITY_MODE_USE_TLS cannot be directly set +#error ABSL_THREAD_IDENTITY_MODE_USE_TLS cannot be direcly set #else #define ABSL_THREAD_IDENTITY_MODE_USE_TLS 1 #endif #ifdef ABSL_THREAD_IDENTITY_MODE_USE_CPP11 -#error ABSL_THREAD_IDENTITY_MODE_USE_CPP11 cannot be directly set +#error ABSL_THREAD_IDENTITY_MODE_USE_CPP11 cannot be direcly set #else #define ABSL_THREAD_IDENTITY_MODE_USE_CPP11 2 #endif #ifdef ABSL_THREAD_IDENTITY_MODE -#error ABSL_THREAD_IDENTITY_MODE cannot be directly set +#error ABSL_THREAD_IDENTITY_MODE cannot be direcly set #elif defined(ABSL_FORCE_THREAD_IDENTITY_MODE) #define ABSL_THREAD_IDENTITY_MODE ABSL_FORCE_THREAD_IDENTITY_MODE #elif defined(_WIN32) && !defined(__MINGW32__) #define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11 -#elif defined(__APPLE__) && defined(ABSL_HAVE_THREAD_LOCAL) -#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11 -#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \ +#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \ (__GOOGLE_GRTE_VERSION__ >= 20140228L) // Support for async-safe TLS was specifically added in GRTEv4. It's not // present in the upstream eglibc. @@ -236,18 +235,13 @@ ABSL_CONST_INIT extern thread_local ThreadIdentity* thread_identity_ptr; #error Thread-local storage not detected on this platform #endif -// thread_local variables cannot be in headers exposed by DLLs or in certain -// build configurations on Apple platforms. However, it is important for -// performance reasons in general that `CurrentThreadIdentityIfPresent` be -// inlined. In the other cases we opt to have the function not be inlined. Note +// thread_local variables cannot be in headers exposed by DLLs. However, it is +// important for performance reasons in general that +// `CurrentThreadIdentityIfPresent` be inlined. This is not possible across a +// DLL boundary so, with DLLs, we opt to have the function not be inlined. Note // that `CurrentThreadIdentityIfPresent` is declared above so we can exclude -// this entire inline definition. -#if !defined(__APPLE__) && !defined(ABSL_BUILD_DLL) && \ - !defined(ABSL_CONSUME_DLL) -#define ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT 1 -#endif - -#ifdef ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT +// this entire inline definition when compiling as a DLL. +#if !defined(ABSL_BUILD_DLL) && !defined(ABSL_CONSUME_DLL) inline ThreadIdentity* CurrentThreadIdentityIfPresent() { return thread_identity_ptr; } diff --git a/abseil-cpp/absl/base/internal/throw_delegate.cc b/abseil-cpp/absl/base/internal/throw_delegate.cc index c260ff1e..c055f75d 100644 --- a/abseil-cpp/absl/base/internal/throw_delegate.cc +++ b/abseil-cpp/absl/base/internal/throw_delegate.cc @@ -18,7 +18,6 @@ #include #include #include - #include "absl/base/config.h" #include "absl/base/internal/raw_logging.h" @@ -26,186 +25,83 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { -// NOTE: The various STL exception throwing functions are placed within the -// #ifdef blocks so the symbols aren't exposed on platforms that don't support -// them, such as the Android NDK. For example, ANGLE fails to link when building -// within AOSP without them, since the STL functions don't exist. namespace { -#ifdef ABSL_HAVE_EXCEPTIONS template [[noreturn]] void Throw(const T& error) { +#ifdef ABSL_HAVE_EXCEPTIONS throw error; -} +#else + ABSL_RAW_LOG(FATAL, "%s", error.what()); + std::abort(); #endif +} } // namespace void ThrowStdLogicError(const std::string& what_arg) { -#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::logic_error(what_arg)); -#else - ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); - std::abort(); -#endif } void ThrowStdLogicError(const char* what_arg) { -#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::logic_error(what_arg)); -#else - ABSL_RAW_LOG(FATAL, "%s", what_arg); - std::abort(); -#endif } void ThrowStdInvalidArgument(const std::string& what_arg) { -#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::invalid_argument(what_arg)); -#else - ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); - std::abort(); -#endif } void ThrowStdInvalidArgument(const char* what_arg) { -#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::invalid_argument(what_arg)); -#else - ABSL_RAW_LOG(FATAL, "%s", what_arg); - std::abort(); -#endif } void ThrowStdDomainError(const std::string& what_arg) { -#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::domain_error(what_arg)); -#else - ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); - std::abort(); -#endif } void ThrowStdDomainError(const char* what_arg) { -#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::domain_error(what_arg)); -#else - ABSL_RAW_LOG(FATAL, "%s", what_arg); - std::abort(); -#endif } void ThrowStdLengthError(const std::string& what_arg) { -#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::length_error(what_arg)); -#else - ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); - std::abort(); -#endif } void ThrowStdLengthError(const char* what_arg) { -#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::length_error(what_arg)); -#else - ABSL_RAW_LOG(FATAL, "%s", what_arg); - std::abort(); -#endif } void ThrowStdOutOfRange(const std::string& what_arg) { -#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::out_of_range(what_arg)); -#else - ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); - std::abort(); -#endif } void ThrowStdOutOfRange(const char* what_arg) { -#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::out_of_range(what_arg)); -#else - ABSL_RAW_LOG(FATAL, "%s", what_arg); - std::abort(); -#endif } void ThrowStdRuntimeError(const std::string& what_arg) { -#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::runtime_error(what_arg)); -#else - ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); - std::abort(); -#endif } void ThrowStdRuntimeError(const char* what_arg) { -#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::runtime_error(what_arg)); -#else - ABSL_RAW_LOG(FATAL, "%s", what_arg); - std::abort(); -#endif } void ThrowStdRangeError(const std::string& what_arg) { -#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::range_error(what_arg)); -#else - ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); - std::abort(); -#endif } void ThrowStdRangeError(const char* what_arg) { -#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::range_error(what_arg)); -#else - ABSL_RAW_LOG(FATAL, "%s", what_arg); - std::abort(); -#endif } void ThrowStdOverflowError(const std::string& what_arg) { -#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::overflow_error(what_arg)); -#else - ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); - std::abort(); -#endif } void ThrowStdOverflowError(const char* what_arg) { -#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::overflow_error(what_arg)); -#else - ABSL_RAW_LOG(FATAL, "%s", what_arg); - std::abort(); -#endif } void ThrowStdUnderflowError(const std::string& what_arg) { -#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::underflow_error(what_arg)); -#else - ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); - std::abort(); -#endif } void ThrowStdUnderflowError(const char* what_arg) { -#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::underflow_error(what_arg)); -#else - ABSL_RAW_LOG(FATAL, "%s", what_arg); - std::abort(); -#endif } -void ThrowStdBadFunctionCall() { -#ifdef ABSL_HAVE_EXCEPTIONS - Throw(std::bad_function_call()); -#else - std::abort(); -#endif -} +void ThrowStdBadFunctionCall() { Throw(std::bad_function_call()); } -void ThrowStdBadAlloc() { -#ifdef ABSL_HAVE_EXCEPTIONS - Throw(std::bad_alloc()); -#else - std::abort(); -#endif -} +void ThrowStdBadAlloc() { Throw(std::bad_alloc()); } } // namespace base_internal ABSL_NAMESPACE_END diff --git a/abseil-cpp/absl/base/internal/unaligned_access.h b/abseil-cpp/absl/base/internal/unaligned_access.h index 093dd9b4..dd5250de 100644 --- a/abseil-cpp/absl/base/internal/unaligned_access.h +++ b/abseil-cpp/absl/base/internal/unaligned_access.h @@ -31,6 +31,80 @@ // The unaligned API is C++ only. The declarations use C++ features // (namespaces, inline) which are absent or incompatible in C. #if defined(__cplusplus) + +#if defined(ABSL_HAVE_ADDRESS_SANITIZER) || \ + defined(ABSL_HAVE_THREAD_SANITIZER) || defined(ABSL_HAVE_MEMORY_SANITIZER) +// Consider we have an unaligned load/store of 4 bytes from address 0x...05. +// AddressSanitizer will treat it as a 3-byte access to the range 05:07 and +// will miss a bug if 08 is the first unaddressable byte. +// ThreadSanitizer will also treat this as a 3-byte access to 05:07 and will +// miss a race between this access and some other accesses to 08. +// MemorySanitizer will correctly propagate the shadow on unaligned stores +// and correctly report bugs on unaligned loads, but it may not properly +// update and report the origin of the uninitialized memory. +// For all three tools, replacing an unaligned access with a tool-specific +// callback solves the problem. + +// Make sure uint16_t/uint32_t/uint64_t are defined. +#include + +extern "C" { +uint16_t __sanitizer_unaligned_load16(const void *p); +uint32_t __sanitizer_unaligned_load32(const void *p); +uint64_t __sanitizer_unaligned_load64(const void *p); +void __sanitizer_unaligned_store16(void *p, uint16_t v); +void __sanitizer_unaligned_store32(void *p, uint32_t v); +void __sanitizer_unaligned_store64(void *p, uint64_t v); +} // extern "C" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +inline uint16_t UnalignedLoad16(const void *p) { + return __sanitizer_unaligned_load16(p); +} + +inline uint32_t UnalignedLoad32(const void *p) { + return __sanitizer_unaligned_load32(p); +} + +inline uint64_t UnalignedLoad64(const void *p) { + return __sanitizer_unaligned_load64(p); +} + +inline void UnalignedStore16(void *p, uint16_t v) { + __sanitizer_unaligned_store16(p, v); +} + +inline void UnalignedStore32(void *p, uint32_t v) { + __sanitizer_unaligned_store32(p, v); +} + +inline void UnalignedStore64(void *p, uint64_t v) { + __sanitizer_unaligned_store64(p, v); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \ + (absl::base_internal::UnalignedLoad16(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \ + (absl::base_internal::UnalignedLoad32(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \ + (absl::base_internal::UnalignedLoad64(_p)) + +#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \ + (absl::base_internal::UnalignedStore16(_p, _val)) +#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \ + (absl::base_internal::UnalignedStore32(_p, _val)) +#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ + (absl::base_internal::UnalignedStore64(_p, _val)) + +#else + namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { @@ -77,6 +151,8 @@ ABSL_NAMESPACE_END #define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ (absl::base_internal::UnalignedStore64(_p, _val)) +#endif + #endif // defined(__cplusplus), end of unaligned API #endif // ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_ diff --git a/abseil-cpp/absl/base/internal/unscaledcycleclock.cc b/abseil-cpp/absl/base/internal/unscaledcycleclock.cc index b1c396c6..f1e7bbef 100644 --- a/abseil-cpp/absl/base/internal/unscaledcycleclock.cc +++ b/abseil-cpp/absl/base/internal/unscaledcycleclock.cc @@ -24,13 +24,8 @@ #ifdef __GLIBC__ #include #elif defined(__FreeBSD__) -// clang-format off -// This order does actually matter =(. -#include #include -// clang-format on - -#include "absl/base/call_once.h" +#include #endif #endif @@ -54,6 +49,12 @@ double UnscaledCycleClock::Frequency() { #elif defined(__x86_64__) +int64_t UnscaledCycleClock::Now() { + uint64_t low, high; + __asm__ volatile("rdtsc" : "=a"(low), "=d"(high)); + return (high << 32) | low; +} + double UnscaledCycleClock::Frequency() { return base_internal::NominalCPUFrequency(); } @@ -86,10 +87,6 @@ int64_t UnscaledCycleClock::Now() { double UnscaledCycleClock::Frequency() { #ifdef __GLIBC__ return __ppc_get_timebase_freq(); -#elif defined(_AIX) - // This is the same constant value as returned by - // __ppc_get_timebase_freq(). - return static_cast(512000000); #elif defined(__FreeBSD__) static once_flag init_timebase_frequency_once; static double timebase_frequency = 0.0; @@ -122,23 +119,13 @@ double UnscaledCycleClock::Frequency() { return aarch64_timer_frequency; } -#elif defined(__riscv) - -int64_t UnscaledCycleClock::Now() { - int64_t virtual_timer_value; - asm volatile("rdcycle %0" : "=r"(virtual_timer_value)); - return virtual_timer_value; -} - -double UnscaledCycleClock::Frequency() { - return base_internal::NominalCPUFrequency(); -} - #elif defined(_M_IX86) || defined(_M_X64) #pragma intrinsic(__rdtsc) -int64_t UnscaledCycleClock::Now() { return __rdtsc(); } +int64_t UnscaledCycleClock::Now() { + return __rdtsc(); +} double UnscaledCycleClock::Frequency() { return base_internal::NominalCPUFrequency(); diff --git a/abseil-cpp/absl/base/internal/unscaledcycleclock.h b/abseil-cpp/absl/base/internal/unscaledcycleclock.h index 2cbeae31..82f2c87a 100644 --- a/abseil-cpp/absl/base/internal/unscaledcycleclock.h +++ b/abseil-cpp/absl/base/internal/unscaledcycleclock.h @@ -46,8 +46,8 @@ // The following platforms have an implementation of a hardware counter. #if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \ - defined(__powerpc__) || defined(__ppc__) || defined(__riscv) || \ - defined(_M_IX86) || (defined(_M_X64) && !defined(_M_ARM64EC)) + defined(__powerpc__) || defined(__ppc__) || \ + defined(_M_IX86) || defined(_M_X64) #define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 1 #else #define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 0 @@ -59,7 +59,8 @@ // CycleClock that runs at atleast 1 MHz. We've found some Android // ARM64 devices where this is not the case, so we disable it by // default on Android ARM64. -#if defined(__native_client__) || (defined(__APPLE__)) || \ +#if defined(__native_client__) || \ + (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || \ (defined(__ANDROID__) && defined(__aarch64__)) #define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 0 #else @@ -79,8 +80,8 @@ // This macro can be used to test if UnscaledCycleClock::Frequency() // is NominalCPUFrequency() on a particular platform. -#if (defined(__i386__) || defined(__x86_64__) || defined(__riscv) || \ - defined(_M_IX86) || defined(_M_X64)) +#if (defined(__i386__) || defined(__x86_64__) || \ + defined(_M_IX86) || defined(_M_X64)) #define ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY #endif @@ -114,16 +115,6 @@ class UnscaledCycleClock { friend class base_internal::UnscaledCycleClockWrapperForInitializeFrequency; }; -#if defined(__x86_64__) - -inline int64_t UnscaledCycleClock::Now() { - uint64_t low, high; - __asm__ volatile("rdtsc" : "=a"(low), "=d"(high)); - return (high << 32) | low; -} - -#endif - } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/abseil-cpp/absl/base/invoke_test.cc b/abseil-cpp/absl/base/invoke_test.cc index 7be26f64..bcdef36c 100644 --- a/abseil-cpp/absl/base/invoke_test.cc +++ b/abseil-cpp/absl/base/invoke_test.cc @@ -31,14 +31,6 @@ namespace { int Function(int a, int b) { return a - b; } -void VoidFunction(int& a, int& b) { - a += b; - b = a - b; - a -= b; -} - -int ZeroArgFunction() { return -1937; } - int Sink(std::unique_ptr p) { return *p; } @@ -231,100 +223,6 @@ TEST(InvokeTest, SfinaeFriendly) { EXPECT_THAT(CallMaybeWithArg(Factory), ::testing::Pointee(42)); } -TEST(IsInvocableRTest, CallableExactMatch) { - static_assert( - base_internal::is_invocable_r::value, - "Should be true for exact match of types on a free function"); -} - -TEST(IsInvocableRTest, CallableArgumentConversionMatch) { - static_assert( - base_internal::is_invocable_r::value, - "Should be true for convertible argument type"); -} - -TEST(IsInvocableRTest, CallableReturnConversionMatch) { - static_assert(base_internal::is_invocable_r::value, - "Should be true for convertible return type"); -} - -TEST(IsInvocableRTest, CallableReturnVoid) { - static_assert(base_internal::is_invocable_r::value, - "Should be true for void expected and actual return types"); - static_assert( - base_internal::is_invocable_r::value, - "Should be true for void expected and non-void actual return types"); -} - -TEST(IsInvocableRTest, CallableRefQualifierMismatch) { - static_assert(!base_internal::is_invocable_r::value, - "Should be false for reference constness mismatch"); - static_assert(!base_internal::is_invocable_r::value, - "Should be false for reference value category mismatch"); -} - -TEST(IsInvocableRTest, CallableArgumentTypeMismatch) { - static_assert(!base_internal::is_invocable_r::value, - "Should be false for argument type mismatch"); -} - -TEST(IsInvocableRTest, CallableReturnTypeMismatch) { - static_assert(!base_internal::is_invocable_r::value, - "Should be false for return type mismatch"); -} - -TEST(IsInvocableRTest, CallableTooFewArgs) { - static_assert( - !base_internal::is_invocable_r::value, - "Should be false for too few arguments"); -} - -TEST(IsInvocableRTest, CallableTooManyArgs) { - static_assert(!base_internal::is_invocable_r::value, - "Should be false for too many arguments"); -} - -TEST(IsInvocableRTest, MemberFunctionAndReference) { - static_assert(base_internal::is_invocable_r::value, - "Should be true for exact match of types on a member function " - "and class reference"); -} - -TEST(IsInvocableRTest, MemberFunctionAndPointer) { - static_assert(base_internal::is_invocable_r::value, - "Should be true for exact match of types on a member function " - "and class pointer"); -} - -TEST(IsInvocableRTest, DataMemberAndReference) { - static_assert(base_internal::is_invocable_r::value, - "Should be true for exact match of types on a data member and " - "class reference"); -} - -TEST(IsInvocableRTest, DataMemberAndPointer) { - static_assert(base_internal::is_invocable_r::value, - "Should be true for exact match of types on a data member and " - "class pointer"); -} - -TEST(IsInvocableRTest, CallableZeroArgs) { - static_assert( - base_internal::is_invocable_r::value, - "Should be true for exact match for a zero-arg free function"); -} - } // namespace } // namespace base_internal ABSL_NAMESPACE_END diff --git a/abseil-cpp/absl/base/log_severity.cc b/abseil-cpp/absl/base/log_severity.cc index 60a8fc1f..72312afd 100644 --- a/abseil-cpp/absl/base/log_severity.cc +++ b/abseil-cpp/absl/base/log_severity.cc @@ -16,8 +16,6 @@ #include -#include "absl/base/attributes.h" - namespace absl { ABSL_NAMESPACE_BEGIN @@ -25,31 +23,5 @@ std::ostream& operator<<(std::ostream& os, absl::LogSeverity s) { if (s == absl::NormalizeLogSeverity(s)) return os << absl::LogSeverityName(s); return os << "absl::LogSeverity(" << static_cast(s) << ")"; } - -std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtLeast s) { - switch (s) { - case absl::LogSeverityAtLeast::kInfo: - case absl::LogSeverityAtLeast::kWarning: - case absl::LogSeverityAtLeast::kError: - case absl::LogSeverityAtLeast::kFatal: - return os << ">=" << static_cast(s); - case absl::LogSeverityAtLeast::kInfinity: - return os << "INFINITY"; - } - return os; -} - -std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtMost s) { - switch (s) { - case absl::LogSeverityAtMost::kInfo: - case absl::LogSeverityAtMost::kWarning: - case absl::LogSeverityAtMost::kError: - case absl::LogSeverityAtMost::kFatal: - return os << "<=" << static_cast(s); - case absl::LogSeverityAtMost::kNegativeInfinity: - return os << "NEGATIVE_INFINITY"; - } - return os; -} ABSL_NAMESPACE_END } // namespace absl diff --git a/abseil-cpp/absl/base/log_severity.h b/abseil-cpp/absl/base/log_severity.h index 8bdca38b..65a3b166 100644 --- a/abseil-cpp/absl/base/log_severity.h +++ b/abseil-cpp/absl/base/log_severity.h @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifndef ABSL_BASE_LOG_SEVERITY_H_ -#define ABSL_BASE_LOG_SEVERITY_H_ +#ifndef ABSL_BASE_INTERNAL_LOG_SEVERITY_H_ +#define ABSL_BASE_INTERNAL_LOG_SEVERITY_H_ #include #include @@ -36,7 +36,7 @@ ABSL_NAMESPACE_BEGIN // such values to a defined severity level, however in some cases values other // than the defined levels are useful for comparison. // -// Example: +// Exmaple: // // // Effectively disables all logging: // SetMinLogLevel(static_cast(100)); @@ -115,58 +115,7 @@ constexpr absl::LogSeverity NormalizeLogSeverity(int s) { // unspecified; do not rely on it. std::ostream& operator<<(std::ostream& os, absl::LogSeverity s); -// Enums representing a lower bound for LogSeverity. APIs that only operate on -// messages of at least a certain level (for example, `SetMinLogLevel()`) use -// this type to specify that level. absl::LogSeverityAtLeast::kInfinity is -// a level above all threshold levels and therefore no log message will -// ever meet this threshold. -enum class LogSeverityAtLeast : int { - kInfo = static_cast(absl::LogSeverity::kInfo), - kWarning = static_cast(absl::LogSeverity::kWarning), - kError = static_cast(absl::LogSeverity::kError), - kFatal = static_cast(absl::LogSeverity::kFatal), - kInfinity = 1000, -}; - -std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtLeast s); - -// Enums representing an upper bound for LogSeverity. APIs that only operate on -// messages of at most a certain level (for example, buffer all messages at or -// below a certain level) use this type to specify that level. -// absl::LogSeverityAtMost::kNegativeInfinity is a level below all threshold -// levels and therefore will exclude all log messages. -enum class LogSeverityAtMost : int { - kNegativeInfinity = -1000, - kInfo = static_cast(absl::LogSeverity::kInfo), - kWarning = static_cast(absl::LogSeverity::kWarning), - kError = static_cast(absl::LogSeverity::kError), - kFatal = static_cast(absl::LogSeverity::kFatal), -}; - -std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtMost s); - -#define COMPOP(op1, op2, T) \ - constexpr bool operator op1(absl::T lhs, absl::LogSeverity rhs) { \ - return static_cast(lhs) op1 rhs; \ - } \ - constexpr bool operator op2(absl::LogSeverity lhs, absl::T rhs) { \ - return lhs op2 static_cast(rhs); \ - } - -// Comparisons between `LogSeverity` and `LogSeverityAtLeast`/ -// `LogSeverityAtMost` are only supported in one direction. -// Valid checks are: -// LogSeverity >= LogSeverityAtLeast -// LogSeverity < LogSeverityAtLeast -// LogSeverity <= LogSeverityAtMost -// LogSeverity > LogSeverityAtMost -COMPOP(>, <, LogSeverityAtLeast) -COMPOP(<=, >=, LogSeverityAtLeast) -COMPOP(<, >, LogSeverityAtMost) -COMPOP(>=, <=, LogSeverityAtMost) -#undef COMPOP - ABSL_NAMESPACE_END } // namespace absl -#endif // ABSL_BASE_LOG_SEVERITY_H_ +#endif // ABSL_BASE_INTERNAL_LOG_SEVERITY_H_ diff --git a/abseil-cpp/absl/base/log_severity_test.cc b/abseil-cpp/absl/base/log_severity_test.cc index 16091a5b..2c6872b0 100644 --- a/abseil-cpp/absl/base/log_severity_test.cc +++ b/abseil-cpp/absl/base/log_severity_test.cc @@ -35,8 +35,7 @@ using ::testing::IsTrue; using ::testing::TestWithParam; using ::testing::Values; -template -std::string StreamHelper(T value) { +std::string StreamHelper(absl::LogSeverity value) { std::ostringstream stream; stream << value; return stream.str(); @@ -53,9 +52,9 @@ TEST(StreamTest, Works) { Eq("absl::LogSeverity(4)")); } -static_assert(absl::flags_internal::FlagUseValueAndInitBitStorage< - absl::LogSeverity>::value, - "Flags of type absl::LogSeverity ought to be lock-free."); +static_assert( + absl::flags_internal::FlagUseOneWordStorage::value, + "Flags of type absl::LogSeverity ought to be lock-free."); using ParseFlagFromOutOfRangeIntegerTest = TestWithParam; INSTANTIATE_TEST_SUITE_P( @@ -202,44 +201,4 @@ TEST_P(UnparseFlagToOtherIntegerTest, ReturnsExpectedValueAndRoundTrips) { IsTrue()); EXPECT_THAT(reparsed_value, Eq(to_unparse)); } - -TEST(LogThresholdTest, LogSeverityAtLeastTest) { - EXPECT_LT(absl::LogSeverity::kError, absl::LogSeverityAtLeast::kFatal); - EXPECT_GT(absl::LogSeverityAtLeast::kError, absl::LogSeverity::kInfo); - - EXPECT_LE(absl::LogSeverityAtLeast::kInfo, absl::LogSeverity::kError); - EXPECT_GE(absl::LogSeverity::kError, absl::LogSeverityAtLeast::kInfo); -} - -TEST(LogThresholdTest, LogSeverityAtMostTest) { - EXPECT_GT(absl::LogSeverity::kError, absl::LogSeverityAtMost::kWarning); - EXPECT_LT(absl::LogSeverityAtMost::kError, absl::LogSeverity::kFatal); - - EXPECT_GE(absl::LogSeverityAtMost::kFatal, absl::LogSeverity::kError); - EXPECT_LE(absl::LogSeverity::kWarning, absl::LogSeverityAtMost::kError); -} - -TEST(LogThresholdTest, Extremes) { - EXPECT_LT(absl::LogSeverity::kFatal, absl::LogSeverityAtLeast::kInfinity); - EXPECT_GT(absl::LogSeverity::kInfo, - absl::LogSeverityAtMost::kNegativeInfinity); -} - -TEST(LogThresholdTest, Output) { - EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kInfo), Eq(">=INFO")); - EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kWarning), - Eq(">=WARNING")); - EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kError), Eq(">=ERROR")); - EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kFatal), Eq(">=FATAL")); - EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kInfinity), - Eq("INFINITY")); - - EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kInfo), Eq("<=INFO")); - EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kWarning), Eq("<=WARNING")); - EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kError), Eq("<=ERROR")); - EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kFatal), Eq("<=FATAL")); - EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kNegativeInfinity), - Eq("NEGATIVE_INFINITY")); -} - } // namespace diff --git a/abseil-cpp/absl/base/macros.h b/abseil-cpp/absl/base/macros.h index 3e085a91..02dd9ff4 100644 --- a/abseil-cpp/absl/base/macros.h +++ b/abseil-cpp/absl/base/macros.h @@ -144,15 +144,4 @@ ABSL_NAMESPACE_END #define ABSL_INTERNAL_RETHROW do {} while (false) #endif // ABSL_HAVE_EXCEPTIONS -// `ABSL_INTERNAL_UNREACHABLE` is an unreachable statement. A program which -// reaches one has undefined behavior, and the compiler may optimize -// accordingly. -#if defined(__GNUC__) || ABSL_HAVE_BUILTIN(__builtin_unreachable) -#define ABSL_INTERNAL_UNREACHABLE __builtin_unreachable() -#elif defined(_MSC_VER) -#define ABSL_INTERNAL_UNREACHABLE __assume(0) -#else -#define ABSL_INTERNAL_UNREACHABLE -#endif - #endif // ABSL_BASE_MACROS_H_ diff --git a/abseil-cpp/absl/base/optimization.h b/abseil-cpp/absl/base/optimization.h index db5cc097..2e31376c 100644 --- a/abseil-cpp/absl/base/optimization.h +++ b/abseil-cpp/absl/base/optimization.h @@ -22,15 +22,13 @@ #ifndef ABSL_BASE_OPTIMIZATION_H_ #define ABSL_BASE_OPTIMIZATION_H_ -#include - #include "absl/base/config.h" // ABSL_BLOCK_TAIL_CALL_OPTIMIZATION // -// Instructs the compiler to avoid optimizing tail-call recursion. This macro is -// useful when you wish to preserve the existing function order within a stack -// trace for logging, debugging, or profiling purposes. +// Instructs the compiler to avoid optimizing tail-call recursion. Use of this +// macro is useful when you wish to preserve the existing function order within +// a stack trace for logging, debugging, or profiling purposes. // // Example: // @@ -106,10 +104,9 @@ // Cacheline aligning objects properly allows constructive memory sharing and // prevents destructive (or "false") memory sharing. // -// NOTE: callers should replace uses of this macro with `alignas()` using +// NOTE: this macro should be replaced with usage of `alignas()` using // `std::hardware_constructive_interference_size` and/or -// `std::hardware_destructive_interference_size` when C++17 becomes available to -// them. +// `std::hardware_destructive_interference_size` when available within C++17. // // See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0154r1.html // for more information. @@ -181,43 +178,35 @@ #define ABSL_PREDICT_TRUE(x) (x) #endif -// ABSL_ASSUME(cond) -// -// Informs the compiler that a condition is always true and that it can assume -// it to be true for optimization purposes. -// -// WARNING: If the condition is false, the program can produce undefined and -// potentially dangerous behavior. -// +// ABSL_INTERNAL_ASSUME(cond) +// Informs the compiler than a condition is always true and that it can assume +// it to be true for optimization purposes. The call has undefined behavior if +// the condition is false. // In !NDEBUG mode, the condition is checked with an assert(). -// -// NOTE: The expression must not have side effects, as it may only be evaluated -// in some compilation modes and not others. Some compilers may issue a warning -// if the compiler cannot prove the expression has no side effects. For example, -// the expression should not use a function call since the compiler cannot prove -// that a function call does not have side effects. +// NOTE: The expression must not have side effects, as it will only be evaluated +// in some compilation modes and not others. // // Example: // // int x = ...; -// ABSL_ASSUME(x >= 0); +// ABSL_INTERNAL_ASSUME(x >= 0); // // The compiler can optimize the division to a simple right shift using the // // assumption specified above. // int y = x / 16; // #if !defined(NDEBUG) -#define ABSL_ASSUME(cond) assert(cond) +#define ABSL_INTERNAL_ASSUME(cond) assert(cond) #elif ABSL_HAVE_BUILTIN(__builtin_assume) -#define ABSL_ASSUME(cond) __builtin_assume(cond) +#define ABSL_INTERNAL_ASSUME(cond) __builtin_assume(cond) #elif defined(__GNUC__) || ABSL_HAVE_BUILTIN(__builtin_unreachable) -#define ABSL_ASSUME(cond) \ +#define ABSL_INTERNAL_ASSUME(cond) \ do { \ if (!(cond)) __builtin_unreachable(); \ } while (0) #elif defined(_MSC_VER) -#define ABSL_ASSUME(cond) __assume(cond) +#define ABSL_INTERNAL_ASSUME(cond) __assume(cond) #else -#define ABSL_ASSUME(cond) \ +#define ABSL_INTERNAL_ASSUME(cond) \ do { \ static_cast(false && (cond)); \ } while (0) @@ -227,7 +216,7 @@ // This macro forces small unique name on a static file level symbols like // static local variables or static functions. This is intended to be used in // macro definitions to optimize the cost of generated code. Do NOT use it on -// symbols exported from translation unit since it may cause a link time +// symbols exported from translation unit since it may casue a link time // conflict. // // Example: diff --git a/abseil-cpp/absl/base/options.h b/abseil-cpp/absl/base/options.h index bc598470..6a0fb07e 100644 --- a/abseil-cpp/absl/base/options.h +++ b/abseil-cpp/absl/base/options.h @@ -206,7 +206,7 @@ // allowed. #define ABSL_OPTION_USE_INLINE_NAMESPACE 1 -#define ABSL_OPTION_INLINE_NAMESPACE_NAME lts_20220623 +#define ABSL_OPTION_INLINE_NAMESPACE_NAME lts_2020_09_23 // ABSL_OPTION_HARDENED // diff --git a/abseil-cpp/absl/base/port.h b/abseil-cpp/absl/base/port.h index 5bc4d6cd..6c28068d 100644 --- a/abseil-cpp/absl/base/port.h +++ b/abseil-cpp/absl/base/port.h @@ -14,6 +14,7 @@ // // This files is a forwarding header for other headers containing various // portability macros and functions. +// This file is used for both C and C++! #ifndef ABSL_BASE_PORT_H_ #define ABSL_BASE_PORT_H_ diff --git a/abseil-cpp/absl/base/spinlock_test_common.cc b/abseil-cpp/absl/base/spinlock_test_common.cc index 2b572c5b..dee266e4 100644 --- a/abseil-cpp/absl/base/spinlock_test_common.cc +++ b/abseil-cpp/absl/base/spinlock_test_common.cc @@ -92,7 +92,6 @@ static void TestFunction(int thread_salt, SpinLock* spinlock) { static void ThreadedTest(SpinLock* spinlock) { std::vector threads; - threads.reserve(kNumThreads); for (int i = 0; i < kNumThreads; ++i) { threads.push_back(std::thread(TestFunction, i, spinlock)); } diff --git a/abseil-cpp/absl/base/thread_annotations.h b/abseil-cpp/absl/base/thread_annotations.h index bc8a6203..e23fff1d 100644 --- a/abseil-cpp/absl/base/thread_annotations.h +++ b/abseil-cpp/absl/base/thread_annotations.h @@ -154,8 +154,8 @@ // ABSL_LOCKS_EXCLUDED() // -// Documents the locks that cannot be held by callers of this function, as they -// might be acquired by this function (Abseil's `Mutex` locks are +// Documents the locks acquired in the body of the function. These locks +// cannot be held when calling this function (as Abseil's `Mutex` locks are // non-reentrant). #if ABSL_HAVE_ATTRIBUTE(locks_excluded) #define ABSL_LOCKS_EXCLUDED(...) __attribute__((locks_excluded(__VA_ARGS__))) @@ -317,7 +317,7 @@ namespace base_internal { // Takes a reference to a guarded data member, and returns an unguarded // reference. -// Do not use this function directly, use ABSL_TS_UNCHECKED_READ instead. +// Do not used this function directly, use ABSL_TS_UNCHECKED_READ instead. template inline const T& ts_unchecked_read(const T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS { return v; diff --git a/abseil-cpp/absl/cleanup/BUILD.bazel b/abseil-cpp/absl/cleanup/BUILD.bazel deleted file mode 100644 index 2154d9f1..00000000 --- a/abseil-cpp/absl/cleanup/BUILD.bazel +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright 2021 The Abseil Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -load( - "//absl:copts/configure_copts.bzl", - "ABSL_DEFAULT_COPTS", - "ABSL_DEFAULT_LINKOPTS", - "ABSL_TEST_COPTS", -) - -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -cc_library( - name = "cleanup_internal", - hdrs = ["internal/cleanup.h"], - copts = ABSL_DEFAULT_COPTS, - linkopts = ABSL_DEFAULT_LINKOPTS, - deps = [ - "//absl/base:base_internal", - "//absl/base:core_headers", - "//absl/utility", - ], -) - -cc_library( - name = "cleanup", - hdrs = [ - "cleanup.h", - ], - copts = ABSL_DEFAULT_COPTS, - linkopts = ABSL_DEFAULT_LINKOPTS, - deps = [ - ":cleanup_internal", - "//absl/base:config", - "//absl/base:core_headers", - ], -) - -cc_test( - name = "cleanup_test", - size = "small", - srcs = [ - "cleanup_test.cc", - ], - copts = ABSL_TEST_COPTS, - deps = [ - ":cleanup", - "//absl/base:config", - "//absl/utility", - "@com_google_googletest//:gtest_main", - ], -) diff --git a/abseil-cpp/absl/cleanup/CMakeLists.txt b/abseil-cpp/absl/cleanup/CMakeLists.txt deleted file mode 100644 index f5af40b4..00000000 --- a/abseil-cpp/absl/cleanup/CMakeLists.txt +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright 2021 The Abseil Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Internal-only target, do not depend on directly. -absl_cc_library( - NAME - cleanup_internal - HDRS - "internal/cleanup.h" - COPTS - ${ABSL_DEFAULT_COPTS} - DEPS - absl::base_internal - absl::core_headers - absl::utility - PUBLIC -) - -absl_cc_library( - NAME - cleanup - HDRS - "cleanup.h" - COPTS - ${ABSL_DEFAULT_COPTS} - DEPS - absl::cleanup_internal - absl::config - absl::core_headers - PUBLIC -) - -absl_cc_test( - NAME - cleanup_test - SRCS - "cleanup_test.cc" - COPTS - ${ABSL_TEST_COPTS} - DEPS - absl::cleanup - absl::config - absl::utility - GTest::gmock_main -) diff --git a/abseil-cpp/absl/cleanup/cleanup.h b/abseil-cpp/absl/cleanup/cleanup.h deleted file mode 100644 index 960ccd08..00000000 --- a/abseil-cpp/absl/cleanup/cleanup.h +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2021 The Abseil Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ----------------------------------------------------------------------------- -// File: cleanup.h -// ----------------------------------------------------------------------------- -// -// `absl::Cleanup` implements the scope guard idiom, invoking the contained -// callback's `operator()() &&` on scope exit. -// -// Example: -// -// ``` -// absl::Status CopyGoodData(const char* source_path, const char* sink_path) { -// FILE* source_file = fopen(source_path, "r"); -// if (source_file == nullptr) { -// return absl::NotFoundError("No source file"); // No cleanups execute -// } -// -// // C++17 style cleanup using class template argument deduction -// absl::Cleanup source_closer = [source_file] { fclose(source_file); }; -// -// FILE* sink_file = fopen(sink_path, "w"); -// if (sink_file == nullptr) { -// return absl::NotFoundError("No sink file"); // First cleanup executes -// } -// -// // C++11 style cleanup using the factory function -// auto sink_closer = absl::MakeCleanup([sink_file] { fclose(sink_file); }); -// -// Data data; -// while (ReadData(source_file, &data)) { -// if (!data.IsGood()) { -// absl::Status result = absl::FailedPreconditionError("Read bad data"); -// return result; // Both cleanups execute -// } -// SaveData(sink_file, &data); -// } -// -// return absl::OkStatus(); // Both cleanups execute -// } -// ``` -// -// Methods: -// -// `std::move(cleanup).Cancel()` will prevent the callback from executing. -// -// `std::move(cleanup).Invoke()` will execute the callback early, before -// destruction, and prevent the callback from executing in the destructor. -// -// Usage: -// -// `absl::Cleanup` is not an interface type. It is only intended to be used -// within the body of a function. It is not a value type and instead models a -// control flow construct. Check out `defer` in Golang for something similar. - -#ifndef ABSL_CLEANUP_CLEANUP_H_ -#define ABSL_CLEANUP_CLEANUP_H_ - -#include - -#include "absl/base/config.h" -#include "absl/base/macros.h" -#include "absl/cleanup/internal/cleanup.h" - -namespace absl { -ABSL_NAMESPACE_BEGIN - -template -class ABSL_MUST_USE_RESULT Cleanup final { - static_assert(cleanup_internal::WasDeduced(), - "Explicit template parameters are not supported."); - - static_assert(cleanup_internal::ReturnsVoid(), - "Callbacks that return values are not supported."); - - public: - Cleanup(Callback callback) : storage_(std::move(callback)) {} // NOLINT - - Cleanup(Cleanup&& other) = default; - - void Cancel() && { - ABSL_HARDENING_ASSERT(storage_.IsCallbackEngaged()); - storage_.DestroyCallback(); - } - - void Invoke() && { - ABSL_HARDENING_ASSERT(storage_.IsCallbackEngaged()); - storage_.InvokeCallback(); - storage_.DestroyCallback(); - } - - ~Cleanup() { - if (storage_.IsCallbackEngaged()) { - storage_.InvokeCallback(); - storage_.DestroyCallback(); - } - } - - private: - cleanup_internal::Storage storage_; -}; - -// `absl::Cleanup c = /* callback */;` -// -// C++17 type deduction API for creating an instance of `absl::Cleanup` -#if defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION) -template -Cleanup(Callback callback) -> Cleanup; -#endif // defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION) - -// `auto c = absl::MakeCleanup(/* callback */);` -// -// C++11 type deduction API for creating an instance of `absl::Cleanup` -template -absl::Cleanup MakeCleanup(Callback callback) { - static_assert(cleanup_internal::WasDeduced(), - "Explicit template parameters are not supported."); - - static_assert(cleanup_internal::ReturnsVoid(), - "Callbacks that return values are not supported."); - - return {std::move(callback)}; -} - -ABSL_NAMESPACE_END -} // namespace absl - -#endif // ABSL_CLEANUP_CLEANUP_H_ diff --git a/abseil-cpp/absl/cleanup/cleanup_test.cc b/abseil-cpp/absl/cleanup/cleanup_test.cc deleted file mode 100644 index 46b88589..00000000 --- a/abseil-cpp/absl/cleanup/cleanup_test.cc +++ /dev/null @@ -1,311 +0,0 @@ -// Copyright 2021 The Abseil Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "absl/cleanup/cleanup.h" - -#include -#include -#include - -#include "gtest/gtest.h" -#include "absl/base/config.h" -#include "absl/utility/utility.h" - -namespace { - -using Tag = absl::cleanup_internal::Tag; - -template -constexpr bool IsSame() { - return (std::is_same::value); -} - -struct IdentityFactory { - template - static Callback AsCallback(Callback callback) { - return Callback(std::move(callback)); - } -}; - -// `FunctorClass` is a type used for testing `absl::Cleanup`. It is intended to -// represent users that make their own move-only callback types outside of -// `std::function` and lambda literals. -class FunctorClass { - using Callback = std::function; - - public: - explicit FunctorClass(Callback callback) : callback_(std::move(callback)) {} - - FunctorClass(FunctorClass&& other) - : callback_(absl::exchange(other.callback_, Callback())) {} - - FunctorClass(const FunctorClass&) = delete; - - FunctorClass& operator=(const FunctorClass&) = delete; - - FunctorClass& operator=(FunctorClass&&) = delete; - - void operator()() const& = delete; - - void operator()() && { - ASSERT_TRUE(callback_); - callback_(); - callback_ = nullptr; - } - - private: - Callback callback_; -}; - -struct FunctorClassFactory { - template - static FunctorClass AsCallback(Callback callback) { - return FunctorClass(std::move(callback)); - } -}; - -struct StdFunctionFactory { - template - static std::function AsCallback(Callback callback) { - return std::function(std::move(callback)); - } -}; - -using CleanupTestParams = - ::testing::Types; -template -struct CleanupTest : public ::testing::Test {}; -TYPED_TEST_SUITE(CleanupTest, CleanupTestParams); - -bool fn_ptr_called = false; -void FnPtrFunction() { fn_ptr_called = true; } - -TYPED_TEST(CleanupTest, FactoryProducesCorrectType) { - { - auto callback = TypeParam::AsCallback([] {}); - auto cleanup = absl::MakeCleanup(std::move(callback)); - - static_assert( - IsSame, decltype(cleanup)>(), - ""); - } - - { - auto cleanup = absl::MakeCleanup(&FnPtrFunction); - - static_assert(IsSame, decltype(cleanup)>(), - ""); - } - - { - auto cleanup = absl::MakeCleanup(FnPtrFunction); - - static_assert(IsSame, decltype(cleanup)>(), - ""); - } -} - -#if defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION) -TYPED_TEST(CleanupTest, CTADProducesCorrectType) { - { - auto callback = TypeParam::AsCallback([] {}); - absl::Cleanup cleanup = std::move(callback); - - static_assert( - IsSame, decltype(cleanup)>(), - ""); - } - - { - absl::Cleanup cleanup = &FnPtrFunction; - - static_assert(IsSame, decltype(cleanup)>(), - ""); - } - - { - absl::Cleanup cleanup = FnPtrFunction; - - static_assert(IsSame, decltype(cleanup)>(), - ""); - } -} - -TYPED_TEST(CleanupTest, FactoryAndCTADProduceSameType) { - { - auto callback = IdentityFactory::AsCallback([] {}); - auto factory_cleanup = absl::MakeCleanup(callback); - absl::Cleanup deduction_cleanup = callback; - - static_assert( - IsSame(), ""); - } - - { - auto factory_cleanup = - absl::MakeCleanup(FunctorClassFactory::AsCallback([] {})); - absl::Cleanup deduction_cleanup = FunctorClassFactory::AsCallback([] {}); - - static_assert( - IsSame(), ""); - } - - { - auto factory_cleanup = - absl::MakeCleanup(StdFunctionFactory::AsCallback([] {})); - absl::Cleanup deduction_cleanup = StdFunctionFactory::AsCallback([] {}); - - static_assert( - IsSame(), ""); - } - - { - auto factory_cleanup = absl::MakeCleanup(&FnPtrFunction); - absl::Cleanup deduction_cleanup = &FnPtrFunction; - - static_assert( - IsSame(), ""); - } - - { - auto factory_cleanup = absl::MakeCleanup(FnPtrFunction); - absl::Cleanup deduction_cleanup = FnPtrFunction; - - static_assert( - IsSame(), ""); - } -} -#endif // defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION) - -TYPED_TEST(CleanupTest, BasicUsage) { - bool called = false; - - { - auto cleanup = - absl::MakeCleanup(TypeParam::AsCallback([&called] { called = true; })); - EXPECT_FALSE(called); // Constructor shouldn't invoke the callback - } - - EXPECT_TRUE(called); // Destructor should invoke the callback -} - -TYPED_TEST(CleanupTest, BasicUsageWithFunctionPointer) { - fn_ptr_called = false; - - { - auto cleanup = absl::MakeCleanup(TypeParam::AsCallback(&FnPtrFunction)); - EXPECT_FALSE(fn_ptr_called); // Constructor shouldn't invoke the callback - } - - EXPECT_TRUE(fn_ptr_called); // Destructor should invoke the callback -} - -TYPED_TEST(CleanupTest, Cancel) { - bool called = false; - - { - auto cleanup = - absl::MakeCleanup(TypeParam::AsCallback([&called] { called = true; })); - EXPECT_FALSE(called); // Constructor shouldn't invoke the callback - - std::move(cleanup).Cancel(); - EXPECT_FALSE(called); // Cancel shouldn't invoke the callback - } - - EXPECT_FALSE(called); // Destructor shouldn't invoke the callback -} - -TYPED_TEST(CleanupTest, Invoke) { - bool called = false; - - { - auto cleanup = - absl::MakeCleanup(TypeParam::AsCallback([&called] { called = true; })); - EXPECT_FALSE(called); // Constructor shouldn't invoke the callback - - std::move(cleanup).Invoke(); - EXPECT_TRUE(called); // Invoke should invoke the callback - - called = false; // Reset tracker before destructor runs - } - - EXPECT_FALSE(called); // Destructor shouldn't invoke the callback -} - -TYPED_TEST(CleanupTest, Move) { - bool called = false; - - { - auto moved_from_cleanup = - absl::MakeCleanup(TypeParam::AsCallback([&called] { called = true; })); - EXPECT_FALSE(called); // Constructor shouldn't invoke the callback - - { - auto moved_to_cleanup = std::move(moved_from_cleanup); - EXPECT_FALSE(called); // Move shouldn't invoke the callback - } - - EXPECT_TRUE(called); // Destructor should invoke the callback - - called = false; // Reset tracker before destructor runs - } - - EXPECT_FALSE(called); // Destructor shouldn't invoke the callback -} - -int DestructionCount = 0; - -struct DestructionCounter { - void operator()() {} - - ~DestructionCounter() { ++DestructionCount; } -}; - -TYPED_TEST(CleanupTest, DestructorDestroys) { - { - auto cleanup = - absl::MakeCleanup(TypeParam::AsCallback(DestructionCounter())); - DestructionCount = 0; - } - - EXPECT_EQ(DestructionCount, 1); // Engaged cleanup destroys -} - -TYPED_TEST(CleanupTest, CancelDestroys) { - { - auto cleanup = - absl::MakeCleanup(TypeParam::AsCallback(DestructionCounter())); - DestructionCount = 0; - - std::move(cleanup).Cancel(); - EXPECT_EQ(DestructionCount, 1); // Cancel destroys - } - - EXPECT_EQ(DestructionCount, 1); // Canceled cleanup does not double destroy -} - -TYPED_TEST(CleanupTest, InvokeDestroys) { - { - auto cleanup = - absl::MakeCleanup(TypeParam::AsCallback(DestructionCounter())); - DestructionCount = 0; - - std::move(cleanup).Invoke(); - EXPECT_EQ(DestructionCount, 1); // Invoke destroys - } - - EXPECT_EQ(DestructionCount, 1); // Invoked cleanup does not double destroy -} - -} // namespace diff --git a/abseil-cpp/absl/cleanup/internal/cleanup.h b/abseil-cpp/absl/cleanup/internal/cleanup.h deleted file mode 100644 index 2783fcb7..00000000 --- a/abseil-cpp/absl/cleanup/internal/cleanup.h +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2021 The Abseil Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef ABSL_CLEANUP_INTERNAL_CLEANUP_H_ -#define ABSL_CLEANUP_INTERNAL_CLEANUP_H_ - -#include -#include -#include - -#include "absl/base/internal/invoke.h" -#include "absl/base/macros.h" -#include "absl/base/thread_annotations.h" -#include "absl/utility/utility.h" - -namespace absl { -ABSL_NAMESPACE_BEGIN - -namespace cleanup_internal { - -struct Tag {}; - -template -constexpr bool WasDeduced() { - return (std::is_same::value) && - (sizeof...(Args) == 0); -} - -template -constexpr bool ReturnsVoid() { - return (std::is_same, void>::value); -} - -template -class Storage { - public: - Storage() = delete; - - explicit Storage(Callback callback) { - // Placement-new into a character buffer is used for eager destruction when - // the cleanup is invoked or cancelled. To ensure this optimizes well, the - // behavior is implemented locally instead of using an absl::optional. - ::new (GetCallbackBuffer()) Callback(std::move(callback)); - is_callback_engaged_ = true; - } - - Storage(Storage&& other) { - ABSL_HARDENING_ASSERT(other.IsCallbackEngaged()); - - ::new (GetCallbackBuffer()) Callback(std::move(other.GetCallback())); - is_callback_engaged_ = true; - - other.DestroyCallback(); - } - - Storage(const Storage& other) = delete; - - Storage& operator=(Storage&& other) = delete; - - Storage& operator=(const Storage& other) = delete; - - void* GetCallbackBuffer() { return static_cast(+callback_buffer_); } - - Callback& GetCallback() { - return *reinterpret_cast(GetCallbackBuffer()); - } - - bool IsCallbackEngaged() const { return is_callback_engaged_; } - - void DestroyCallback() { - is_callback_engaged_ = false; - GetCallback().~Callback(); - } - - void InvokeCallback() ABSL_NO_THREAD_SAFETY_ANALYSIS { - std::move(GetCallback())(); - } - - private: - bool is_callback_engaged_; - alignas(Callback) char callback_buffer_[sizeof(Callback)]; -}; - -} // namespace cleanup_internal - -ABSL_NAMESPACE_END -} // namespace absl - -#endif // ABSL_CLEANUP_INTERNAL_CLEANUP_H_ diff --git a/abseil-cpp/absl/compiler_config_setting.bzl b/abseil-cpp/absl/compiler_config_setting.bzl new file mode 100644 index 00000000..66962294 --- /dev/null +++ b/abseil-cpp/absl/compiler_config_setting.bzl @@ -0,0 +1,38 @@ +# +# Copyright 2018 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Creates config_setting that allows selecting based on 'compiler' value.""" + +def create_llvm_config(name, visibility): + # The "do_not_use_tools_cpp_compiler_present" attribute exists to + # distinguish between older versions of Bazel that do not support + # "@bazel_tools//tools/cpp:compiler" flag_value, and newer ones that do. + # In the future, the only way to select on the compiler will be through + # flag_values{"@bazel_tools//tools/cpp:compiler"} and the else branch can + # be removed. + if hasattr(cc_common, "do_not_use_tools_cpp_compiler_present"): + native.config_setting( + name = name, + flag_values = { + "@bazel_tools//tools/cpp:compiler": "llvm", + }, + visibility = visibility, + ) + else: + native.config_setting( + name = name, + values = {"compiler": "llvm"}, + visibility = visibility, + ) diff --git a/abseil-cpp/absl/container/BUILD.bazel b/abseil-cpp/absl/container/BUILD.bazel index d01d78e5..8e72ad03 100644 --- a/abseil-cpp/absl/container/BUILD.bazel +++ b/abseil-cpp/absl/container/BUILD.bazel @@ -14,6 +14,7 @@ # limitations under the License. # +load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library", "cc_test") load( "//absl:copts/configure_copts.bzl", "ABSL_DEFAULT_COPTS", @@ -217,6 +218,11 @@ cc_test( ], ) +NOTEST_TAGS_NONMOBILE = [ + "no_test_darwin_x86_64", + "no_test_loonix", +] + NOTEST_TAGS_MOBILE = [ "no_test_android_arm", "no_test_android_arm64", @@ -224,6 +230,8 @@ NOTEST_TAGS_MOBILE = [ "no_test_ios_x86_64", ] +NOTEST_TAGS = NOTEST_TAGS_MOBILE + NOTEST_TAGS_NONMOBILE + cc_library( name = "flat_hash_map", hdrs = ["flat_hash_map.h"], @@ -234,7 +242,6 @@ cc_library( ":hash_function_defaults", ":raw_hash_map", "//absl/algorithm:container", - "//absl/base:core_headers", "//absl/memory", ], ) @@ -244,7 +251,7 @@ cc_test( srcs = ["flat_hash_map_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, - tags = ["no_test_loonix"], + tags = NOTEST_TAGS_NONMOBILE, deps = [ ":flat_hash_map", ":hash_generator_testing", @@ -278,7 +285,7 @@ cc_test( srcs = ["flat_hash_set_test.cc"], copts = ABSL_TEST_COPTS + ["-DUNORDERED_SET_CXX17"], linkopts = ABSL_DEFAULT_LINKOPTS, - tags = ["no_test_loonix"], + tags = NOTEST_TAGS_NONMOBILE, deps = [ ":flat_hash_set", ":hash_generator_testing", @@ -301,10 +308,9 @@ cc_library( deps = [ ":container_memory", ":hash_function_defaults", - ":node_slot_policy", + ":node_hash_policy", ":raw_hash_map", "//absl/algorithm:container", - "//absl/base:core_headers", "//absl/memory", ], ) @@ -314,7 +320,7 @@ cc_test( srcs = ["node_hash_map_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, - tags = ["no_test_loonix"], + tags = NOTEST_TAGS_NONMOBILE, deps = [ ":hash_generator_testing", ":node_hash_map", @@ -334,10 +340,9 @@ cc_library( linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":hash_function_defaults", - ":node_slot_policy", + ":node_hash_policy", ":raw_hash_set", "//absl/algorithm:container", - "//absl/base:core_headers", "//absl/memory", ], ) @@ -347,7 +352,7 @@ cc_test( srcs = ["node_hash_set_test.cc"], copts = ABSL_TEST_COPTS + ["-DUNORDERED_SET_CXX17"], linkopts = ABSL_DEFAULT_LINKOPTS, - tags = ["no_test_loonix"], + tags = NOTEST_TAGS_NONMOBILE, deps = [ ":node_hash_set", ":unordered_set_constructor_test", @@ -376,7 +381,7 @@ cc_test( srcs = ["internal/container_memory_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, - tags = ["no_test_loonix"], + tags = NOTEST_TAGS_NONMOBILE, deps = [ ":container_memory", ":test_instance_tracker", @@ -403,7 +408,7 @@ cc_test( srcs = ["internal/hash_function_defaults_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, - tags = NOTEST_TAGS_MOBILE + ["no_test_loonix"], + tags = NOTEST_TAGS, deps = [ ":hash_function_defaults", "//absl/hash", @@ -502,13 +507,12 @@ cc_library( copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ + ":have_sse", "//absl/base", - "//absl/base:config", "//absl/base:core_headers", + "//absl/base:exponential_biased", "//absl/debugging:stacktrace", "//absl/memory", - "//absl/profiling:exponential_biased", - "//absl/profiling:sample_recorder", "//absl/synchronization", "//absl/utility", ], @@ -518,14 +522,10 @@ cc_test( name = "hashtablez_sampler_test", srcs = ["internal/hashtablez_sampler_test.cc"], linkopts = ABSL_DEFAULT_LINKOPTS, - tags = [ - "no_test_wasm", - ], deps = [ ":hashtablez_sampler", - "//absl/base:config", + ":have_sse", "//absl/base:core_headers", - "//absl/profiling:sample_recorder", "//absl/synchronization", "//absl/synchronization:thread_pool", "//absl/time", @@ -534,21 +534,21 @@ cc_test( ) cc_library( - name = "node_slot_policy", - hdrs = ["internal/node_slot_policy.h"], + name = "node_hash_policy", + hdrs = ["internal/node_hash_policy.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = ["//absl/base:config"], ) cc_test( - name = "node_slot_policy_test", - srcs = ["internal/node_slot_policy_test.cc"], + name = "node_hash_policy_test", + srcs = ["internal/node_hash_policy_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":hash_policy_traits", - ":node_slot_policy", + ":node_hash_policy", "@com_google_googletest//:gtest_main", ], ) @@ -565,6 +565,14 @@ cc_library( ], ) +cc_library( + name = "have_sse", + hdrs = ["internal/have_sse.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = ["//visibility:private"], +) + cc_library( name = "common", hdrs = ["internal/common.h"], @@ -589,13 +597,14 @@ cc_library( ":hash_policy_traits", ":hashtable_debug_hooks", ":hashtablez_sampler", + ":have_sse", + ":layout", + "//absl/base:bits", "//absl/base:config", "//absl/base:core_headers", "//absl/base:endian", - "//absl/base:prefetch", "//absl/memory", "//absl/meta:type_traits", - "//absl/numeric:bits", "//absl/utility", ], ) @@ -605,7 +614,7 @@ cc_test( srcs = ["internal/raw_hash_set_test.cc"], copts = ABSL_TEST_COPTS, linkstatic = 1, - tags = NOTEST_TAGS_MOBILE + ["no_test_loonix"], + tags = NOTEST_TAGS, deps = [ ":container_memory", ":hash_function_defaults", @@ -615,52 +624,12 @@ cc_test( "//absl/base", "//absl/base:config", "//absl/base:core_headers", - "//absl/base:prefetch", "//absl/base:raw_logging_internal", "//absl/strings", "@com_google_googletest//:gtest_main", ], ) -cc_binary( - name = "raw_hash_set_benchmark", - testonly = 1, - srcs = ["internal/raw_hash_set_benchmark.cc"], - copts = ABSL_TEST_COPTS, - linkopts = ABSL_DEFAULT_LINKOPTS, - tags = ["benchmark"], - visibility = ["//visibility:private"], - deps = [ - ":hash_function_defaults", - ":raw_hash_set", - "//absl/base:raw_logging_internal", - "//absl/strings:str_format", - "@com_github_google_benchmark//:benchmark_main", - ], -) - -cc_binary( - name = "raw_hash_set_probe_benchmark", - testonly = 1, - srcs = ["internal/raw_hash_set_probe_benchmark.cc"], - copts = ABSL_TEST_COPTS, - linkopts = select({ - "//conditions:default": [], - }) + ABSL_DEFAULT_LINKOPTS, - tags = ["benchmark"], - visibility = ["//visibility:private"], - deps = [ - ":flat_hash_map", - ":hash_function_defaults", - ":hashtable_debug", - ":raw_hash_set", - "//absl/random", - "//absl/random:distributions", - "//absl/strings", - "//absl/strings:str_format", - ], -) - cc_test( name = "raw_hash_set_allocator_test", size = "small", @@ -696,7 +665,7 @@ cc_test( srcs = ["internal/layout_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, - tags = NOTEST_TAGS_MOBILE + ["no_test_loonix"], + tags = NOTEST_TAGS, visibility = ["//visibility:private"], deps = [ ":layout", @@ -708,22 +677,6 @@ cc_test( ], ) -cc_binary( - name = "layout_benchmark", - testonly = 1, - srcs = ["internal/layout_benchmark.cc"], - copts = ABSL_TEST_COPTS, - linkopts = ABSL_DEFAULT_LINKOPTS, - tags = ["benchmark"], - visibility = ["//visibility:private"], - deps = [ - ":layout", - "//absl/base:core_headers", - "//absl/base:raw_logging_internal", - "@com_github_google_benchmark//:benchmark_main", - ], -) - cc_library( name = "tracked", testonly = 1, @@ -843,7 +796,7 @@ cc_test( srcs = ["internal/unordered_set_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, - tags = ["no_test_loonix"], + tags = NOTEST_TAGS_NONMOBILE, deps = [ ":unordered_set_constructor_test", ":unordered_set_lookup_test", @@ -858,7 +811,7 @@ cc_test( srcs = ["internal/unordered_map_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, - tags = ["no_test_loonix"], + tags = NOTEST_TAGS_NONMOBILE, deps = [ ":unordered_map_constructor_test", ":unordered_map_lookup_test", @@ -868,22 +821,6 @@ cc_test( ], ) -cc_test( - name = "sample_element_size_test", - srcs = ["sample_element_size_test.cc"], - copts = ABSL_TEST_COPTS, - linkopts = ABSL_DEFAULT_LINKOPTS, - tags = ["no_test_loonix"], - visibility = ["//visibility:private"], - deps = [ - ":flat_hash_map", - ":flat_hash_set", - ":node_hash_map", - ":node_hash_set", - "@com_google_googletest//:gtest_main", - ], -) - cc_library( name = "btree", srcs = [ @@ -903,7 +840,6 @@ cc_library( ":container_memory", ":layout", "//absl/base:core_headers", - "//absl/base:raw_logging_internal", "//absl/base:throw_delegate", "//absl/memory", "//absl/meta:type_traits", @@ -939,10 +875,6 @@ cc_test( copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, shard_count = 10, - tags = [ - "no_test_ios", - "no_test_wasm", - ], visibility = ["//visibility:private"], deps = [ ":btree", diff --git a/abseil-cpp/absl/container/CMakeLists.txt b/abseil-cpp/absl/container/CMakeLists.txt index 9b5c59a4..eb202c45 100644 --- a/abseil-cpp/absl/container/CMakeLists.txt +++ b/abseil-cpp/absl/container/CMakeLists.txt @@ -14,6 +14,15 @@ # limitations under the License. # +# This is deprecated and will be removed in the future. It also doesn't do +# anything anyways. Prefer to use the library associated with the API you are +# using. +absl_cc_library( + NAME + container + PUBLIC +) + absl_cc_library( NAME btree @@ -35,14 +44,12 @@ absl_cc_library( absl::core_headers absl::layout absl::memory - absl::raw_logging_internal absl::strings absl::throw_delegate absl::type_traits absl::utility ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME btree_test_common @@ -82,10 +89,9 @@ absl_cc_test( absl::strings absl::test_instance_tracker absl::type_traits - GTest::gmock_main + gmock_main ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME compressed_tuple @@ -112,7 +118,7 @@ absl_cc_test( absl::optional absl::test_instance_tracker absl::utility - GTest::gmock_main + gmock_main ) absl_cc_library( @@ -147,7 +153,7 @@ absl_cc_test( absl::exception_testing absl::hash_testing absl::memory - GTest::gmock_main + gmock_main ) absl_cc_test( @@ -161,10 +167,9 @@ absl_cc_test( absl::fixed_array absl::config absl::exception_safety_testing - GTest::gmock_main + gmock_main ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME inlined_vector_internal @@ -197,7 +202,6 @@ absl_cc_library( PUBLIC ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME counting_allocator @@ -227,7 +231,7 @@ absl_cc_test( absl::memory absl::raw_logging_internal absl::strings - GTest::gmock_main + gmock_main ) absl_cc_test( @@ -241,10 +245,9 @@ absl_cc_test( absl::inlined_vector absl::config absl::exception_safety_testing - GTest::gmock_main + gmock_main ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME test_instance_tracker @@ -268,7 +271,7 @@ absl_cc_test( ${ABSL_TEST_COPTS} DEPS absl::test_instance_tracker - GTest::gmock_main + gmock_main ) absl_cc_library( @@ -280,7 +283,6 @@ absl_cc_library( ${ABSL_DEFAULT_COPTS} DEPS absl::container_memory - absl::core_headers absl::hash_function_defaults absl::raw_hash_map absl::algorithm_container @@ -304,7 +306,7 @@ absl_cc_test( absl::unordered_map_modifiers_test absl::any absl::raw_logging_internal - GTest::gmock_main + gmock_main ) absl_cc_library( @@ -342,7 +344,7 @@ absl_cc_test( absl::memory absl::raw_logging_internal absl::strings - GTest::gmock_main + gmock_main ) absl_cc_library( @@ -354,9 +356,8 @@ absl_cc_library( ${ABSL_DEFAULT_COPTS} DEPS absl::container_memory - absl::core_headers absl::hash_function_defaults - absl::node_slot_policy + absl::node_hash_policy absl::raw_hash_map absl::algorithm_container absl::memory @@ -378,7 +379,7 @@ absl_cc_test( absl::unordered_map_lookup_test absl::unordered_map_members_test absl::unordered_map_modifiers_test - GTest::gmock_main + gmock_main ) absl_cc_library( @@ -389,9 +390,8 @@ absl_cc_library( COPTS ${ABSL_DEFAULT_COPTS} DEPS - absl::core_headers absl::hash_function_defaults - absl::node_slot_policy + absl::node_hash_policy absl::raw_hash_set absl::algorithm_container absl::memory @@ -413,10 +413,9 @@ absl_cc_test( absl::unordered_set_lookup_test absl::unordered_set_members_test absl::unordered_set_modifiers_test - GTest::gmock_main + gmock_main ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME container_memory @@ -443,10 +442,9 @@ absl_cc_test( absl::container_memory absl::strings absl::test_instance_tracker - GTest::gmock_main + gmock_main ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME hash_function_defaults @@ -476,10 +474,9 @@ absl_cc_test( absl::hash absl::random_random absl::strings - GTest::gmock_main + gmock_main ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME hash_generator_testing @@ -497,7 +494,6 @@ absl_cc_library( TESTONLY ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME hash_policy_testing @@ -520,10 +516,9 @@ absl_cc_test( ${ABSL_TEST_COPTS} DEPS absl::hash_policy_testing - GTest::gmock_main + gmock_main ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME hash_policy_traits @@ -545,10 +540,9 @@ absl_cc_test( ${ABSL_TEST_COPTS} DEPS absl::hash_policy_traits - GTest::gmock_main + gmock_main ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME hashtablez_sampler @@ -561,9 +555,8 @@ absl_cc_library( ${ABSL_DEFAULT_COPTS} DEPS absl::base - absl::config absl::exponential_biased - absl::sample_recorder + absl::have_sse absl::synchronization ) @@ -575,12 +568,11 @@ absl_cc_test( COPTS ${ABSL_TEST_COPTS} DEPS - absl::config absl::hashtablez_sampler - GTest::gmock_main + absl::have_sse + gmock_main ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME hashtable_debug @@ -592,7 +584,6 @@ absl_cc_library( absl::hashtable_debug_hooks ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME hashtable_debug_hooks @@ -605,12 +596,20 @@ absl_cc_library( PUBLIC ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME - node_slot_policy + have_sse HDRS - "internal/node_slot_policy.h" + "internal/have_sse.h" + COPTS + ${ABSL_DEFAULT_COPTS} +) + +absl_cc_library( + NAME + node_hash_policy + HDRS + "internal/node_hash_policy.h" COPTS ${ABSL_DEFAULT_COPTS} DEPS @@ -620,18 +619,17 @@ absl_cc_library( absl_cc_test( NAME - node_slot_policy_test + node_hash_policy_test SRCS - "internal/node_slot_policy_test.cc" + "internal/node_hash_policy_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::hash_policy_traits - absl::node_slot_policy - GTest::gmock_main + absl::node_hash_policy + gmock_main ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME raw_hash_map @@ -646,7 +644,6 @@ absl_cc_library( PUBLIC ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME container_common @@ -658,7 +655,6 @@ absl_cc_library( absl::type_traits ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME raw_hash_set @@ -678,10 +674,11 @@ absl_cc_library( absl::endian absl::hash_policy_traits absl::hashtable_debug_hooks + absl::have_sse + absl::layout absl::memory absl::meta absl::optional - absl::prefetch absl::utility absl::hashtablez_sampler PUBLIC @@ -703,10 +700,9 @@ absl_cc_test( absl::base absl::config absl::core_headers - absl::prefetch absl::raw_logging_internal absl::strings - GTest::gmock_main + gmock_main ) absl_cc_test( @@ -720,10 +716,9 @@ absl_cc_test( absl::raw_hash_set absl::tracked absl::core_headers - GTest::gmock_main + gmock_main ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME layout @@ -754,10 +749,9 @@ absl_cc_test( absl::core_headers absl::raw_logging_internal absl::span - GTest::gmock_main + gmock_main ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME tracked @@ -770,7 +764,6 @@ absl_cc_library( TESTONLY ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME unordered_map_constructor_test @@ -781,11 +774,10 @@ absl_cc_library( DEPS absl::hash_generator_testing absl::hash_policy_testing - GTest::gmock + gmock TESTONLY ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME unordered_map_lookup_test @@ -796,11 +788,10 @@ absl_cc_library( DEPS absl::hash_generator_testing absl::hash_policy_testing - GTest::gmock + gmock TESTONLY ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME unordered_map_members_test @@ -810,11 +801,10 @@ absl_cc_library( ${ABSL_TEST_COPTS} DEPS absl::type_traits - GTest::gmock + gmock TESTONLY ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME unordered_map_modifiers_test @@ -825,11 +815,10 @@ absl_cc_library( DEPS absl::hash_generator_testing absl::hash_policy_testing - GTest::gmock + gmock TESTONLY ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME unordered_set_constructor_test @@ -840,11 +829,10 @@ absl_cc_library( DEPS absl::hash_generator_testing absl::hash_policy_testing - GTest::gmock + gmock TESTONLY ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME unordered_set_lookup_test @@ -855,11 +843,10 @@ absl_cc_library( DEPS absl::hash_generator_testing absl::hash_policy_testing - GTest::gmock + gmock TESTONLY ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME unordered_set_members_test @@ -869,11 +856,10 @@ absl_cc_library( ${ABSL_TEST_COPTS} DEPS absl::type_traits - GTest::gmock + gmock TESTONLY ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME unordered_set_modifiers_test @@ -884,7 +870,7 @@ absl_cc_library( DEPS absl::hash_generator_testing absl::hash_policy_testing - GTest::gmock + gmock TESTONLY ) @@ -900,7 +886,7 @@ absl_cc_test( absl::unordered_set_lookup_test absl::unordered_set_members_test absl::unordered_set_modifiers_test - GTest::gmock_main + gmock_main ) absl_cc_test( @@ -915,20 +901,5 @@ absl_cc_test( absl::unordered_map_lookup_test absl::unordered_map_members_test absl::unordered_map_modifiers_test - GTest::gmock_main -) - -absl_cc_test( - NAME - sample_element_size_test - SRCS - "sample_element_size_test.cc" - COPTS - ${ABSL_TEST_COPTS} - DEPS - absl::flat_hash_map - absl::flat_hash_set - absl::node_hash_map - absl::node_hash_set - GTest::gmock_main + gmock_main ) diff --git a/abseil-cpp/absl/container/btree_benchmark.cc b/abseil-cpp/absl/container/btree_benchmark.cc index 0ca497c8..46798676 100644 --- a/abseil-cpp/absl/container/btree_benchmark.cc +++ b/abseil-cpp/absl/container/btree_benchmark.cc @@ -26,7 +26,6 @@ #include #include -#include "benchmark/benchmark.h" #include "absl/base/internal/raw_logging.h" #include "absl/container/btree_map.h" #include "absl/container/btree_set.h" @@ -40,6 +39,7 @@ #include "absl/strings/cord.h" #include "absl/strings/str_format.h" #include "absl/time/time.h" +#include "benchmark/benchmark.h" namespace absl { ABSL_NAMESPACE_BEGIN @@ -101,6 +101,39 @@ void BM_InsertSorted(benchmark::State& state) { BM_InsertImpl(state, true); } +// container::insert sometimes returns a pair and sometimes +// returns an iterator (for multi- containers). +template +Iter GetIterFromInsert(const std::pair& pair) { + return pair.first; +} +template +Iter GetIterFromInsert(const Iter iter) { + return iter; +} + +// Benchmark insertion of values into a container at the end. +template +void BM_InsertEnd(benchmark::State& state) { + using V = typename remove_pair_const::type; + typename KeyOfValue::type key_of_value; + + T container; + const int kSize = 10000; + for (int i = 0; i < kSize; ++i) { + container.insert(Generator(kSize)(i)); + } + V v = Generator(kSize)(kSize - 1); + typename T::key_type k = key_of_value(v); + + auto it = container.find(k); + while (state.KeepRunning()) { + // Repeatedly removing then adding v. + container.erase(it); + it = GetIterFromInsert(container.insert(v)); + } +} + // Benchmark inserting the first few elements in a container. In b-tree, this is // when the root node grows. template @@ -153,9 +186,9 @@ void BM_FullLookup(benchmark::State& state) { BM_LookupImpl(state, true); } -// Benchmark erasing values from a container. +// Benchmark deletion of values from a container. template -void BM_Erase(benchmark::State& state) { +void BM_Delete(benchmark::State& state) { using V = typename remove_pair_const::type; typename KeyOfValue::type key_of_value; std::vector values = GenerateValues(kBenchmarkValues); @@ -180,9 +213,9 @@ void BM_Erase(benchmark::State& state) { } } -// Benchmark erasing multiple values from a container. +// Benchmark deletion of multiple values from a container. template -void BM_EraseRange(benchmark::State& state) { +void BM_DeleteRange(benchmark::State& state) { using V = typename remove_pair_const::type; typename KeyOfValue::type key_of_value; std::vector values = GenerateValues(kBenchmarkValues); @@ -222,40 +255,6 @@ void BM_EraseRange(benchmark::State& state) { } } -// Predicate that erases every other element. We can't use a lambda because -// C++11 doesn't support generic lambdas. -// TODO(b/207389011): consider adding benchmarks that remove different fractions -// of keys (e.g. 10%, 90%). -struct EraseIfPred { - uint64_t i = 0; - template - bool operator()(const T&) { - return ++i % 2; - } -}; - -// Benchmark erasing multiple values from a container with a predicate. -template -void BM_EraseIf(benchmark::State& state) { - using V = typename remove_pair_const::type; - std::vector values = GenerateValues(kBenchmarkValues); - - // Removes half of the keys per batch. - const int batch_size = (kBenchmarkValues + 1) / 2; - EraseIfPred pred; - while (state.KeepRunningBatch(batch_size)) { - state.PauseTiming(); - { - T container(values.begin(), values.end()); - state.ResumeTiming(); - erase_if(container, pred); - benchmark::DoNotOptimize(container); - state.PauseTiming(); - } - state.ResumeTiming(); - } -} - // Benchmark steady-state insert (into first half of range) and remove (from // second half of range), treating the container approximately like a queue with // log-time access for all elements. This benchmark does not test the case where @@ -511,14 +510,15 @@ BTREE_TYPES(Time); void BM_##type##_##func(benchmark::State& state) { BM_##func(state); } \ BENCHMARK(BM_##type##_##func) -#define MY_BENCHMARK3_STL(type) \ +#define MY_BENCHMARK3(type) \ MY_BENCHMARK4(type, Insert); \ MY_BENCHMARK4(type, InsertSorted); \ + MY_BENCHMARK4(type, InsertEnd); \ MY_BENCHMARK4(type, InsertSmall); \ MY_BENCHMARK4(type, Lookup); \ MY_BENCHMARK4(type, FullLookup); \ - MY_BENCHMARK4(type, Erase); \ - MY_BENCHMARK4(type, EraseRange); \ + MY_BENCHMARK4(type, Delete); \ + MY_BENCHMARK4(type, DeleteRange); \ MY_BENCHMARK4(type, QueueAddRem); \ MY_BENCHMARK4(type, MixedAddRem); \ MY_BENCHMARK4(type, Fifo); \ @@ -526,13 +526,9 @@ BTREE_TYPES(Time); MY_BENCHMARK4(type, InsertRangeRandom); \ MY_BENCHMARK4(type, InsertRangeSorted) -#define MY_BENCHMARK3(type) \ - MY_BENCHMARK4(type, EraseIf); \ - MY_BENCHMARK3_STL(type) - #define MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(type) \ - MY_BENCHMARK3_STL(stl_##type); \ - MY_BENCHMARK3_STL(stl_unordered_##type); \ + MY_BENCHMARK3(stl_##type); \ + MY_BENCHMARK3(stl_unordered_##type); \ MY_BENCHMARK3(btree_256_##type) #define MY_BENCHMARK2(type) \ @@ -722,12 +718,12 @@ double ContainerInfo(const btree_map>& b) { btree_set>; \ using btree_256_map_size##SIZE##copies##SIZE##ptr = \ btree_map>; \ - MY_BENCHMARK3_STL(stl_set_size##SIZE##copies##SIZE##ptr); \ - MY_BENCHMARK3_STL(stl_unordered_set_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3(stl_set_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3(stl_unordered_set_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3(flat_hash_set_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3(btree_256_set_size##SIZE##copies##SIZE##ptr); \ - MY_BENCHMARK3_STL(stl_map_size##SIZE##copies##SIZE##ptr); \ - MY_BENCHMARK3_STL(stl_unordered_map_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3(stl_map_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3(stl_unordered_map_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3(flat_hash_map_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3(btree_256_map_size##SIZE##copies##SIZE##ptr) diff --git a/abseil-cpp/absl/container/btree_map.h b/abseil-cpp/absl/container/btree_map.h index 286817f1..abc09b0a 100644 --- a/abseil-cpp/absl/container/btree_map.h +++ b/abseil-cpp/absl/container/btree_map.h @@ -35,17 +35,14 @@ // // However, these types should not be considered drop-in replacements for // `std::map` and `std::multimap` as there are some API differences, which are -// noted in this header file. The most consequential differences with respect to -// migrating to b-tree from the STL types are listed in the next paragraph. -// Other API differences are minor. +// noted in this header file. // // Importantly, insertions and deletions may invalidate outstanding iterators, // pointers, and references to elements. Such invalidations are typically only // an issue if insertion and deletion operations are interleaved with the use of // more than one iterator, pointer, or reference simultaneously. For this // reason, `insert()` and `erase()` return a valid iterator at the current -// position. Another important difference is that key-types must be -// copy-constructible. +// position. #ifndef ABSL_CONTAINER_BTREE_MAP_H_ #define ABSL_CONTAINER_BTREE_MAP_H_ @@ -56,14 +53,6 @@ namespace absl { ABSL_NAMESPACE_BEGIN -namespace container_internal { - -template -struct map_params; - -} // namespace container_internal - // absl::btree_map<> // // An `absl::btree_map` is an ordered associative container of @@ -85,7 +74,7 @@ class btree_map : public container_internal::btree_map_container< container_internal::btree>> { + /*Multi=*/false>>> { using Base = typename btree_map::btree_map_container; public: @@ -377,8 +366,8 @@ class btree_map // Determines whether an element comparing equal to the given `key` exists // within the `btree_map`, returning `true` if so or `false` otherwise. // - // Supports heterogeneous lookup, provided that the map has a compatible - // heterogeneous comparator. + // Supports heterogeneous lookup, provided that the map is provided a + // compatible heterogeneous comparator. using Base::contains; // btree_map::count() @@ -389,14 +378,15 @@ class btree_map // the `btree_map`. Note that this function will return either `1` or `0` // since duplicate elements are not allowed within a `btree_map`. // - // Supports heterogeneous lookup, provided that the map has a compatible - // heterogeneous comparator. + // Supports heterogeneous lookup, provided that the map is provided a + // compatible heterogeneous comparator. using Base::count; // btree_map::equal_range() // - // Returns a half-open range [first, last), defined by a `std::pair` of two - // iterators, containing all elements with the passed key in the `btree_map`. + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `btree_map`. using Base::equal_range; // btree_map::find() @@ -406,34 +396,10 @@ class btree_map // // Finds an element with the passed `key` within the `btree_map`. // - // Supports heterogeneous lookup, provided that the map has a compatible - // heterogeneous comparator. + // Supports heterogeneous lookup, provided that the map is provided a + // compatible heterogeneous comparator. using Base::find; - // btree_map::lower_bound() - // - // template iterator lower_bound(const K& key): - // template const_iterator lower_bound(const K& key) const: - // - // Finds the first element with a key that is not less than `key` within the - // `btree_map`. - // - // Supports heterogeneous lookup, provided that the map has a compatible - // heterogeneous comparator. - using Base::lower_bound; - - // btree_map::upper_bound() - // - // template iterator upper_bound(const K& key): - // template const_iterator upper_bound(const K& key) const: - // - // Finds the first element with a key that is greater than `key` within the - // `btree_map`. - // - // Supports heterogeneous lookup, provided that the map has a compatible - // heterogeneous comparator. - using Base::upper_bound; - // btree_map::operator[]() // // Returns a reference to the value mapped to the passed key within the @@ -478,11 +444,15 @@ void swap(btree_map &x, btree_map &y) { // absl::erase_if(absl::btree_map<>, Pred) // // Erases all elements that satisfy the predicate pred from the container. -// Returns the number of erased elements. template -typename btree_map::size_type erase_if( - btree_map &map, Pred pred) { - return container_internal::btree_access::erase_if(map, std::move(pred)); +void erase_if(btree_map &map, Pred pred) { + for (auto it = map.begin(); it != map.end();) { + if (pred(*it)) { + it = map.erase(it); + } else { + ++it; + } + } } // absl::btree_multimap @@ -507,7 +477,7 @@ class btree_multimap : public container_internal::btree_multimap_container< container_internal::btree>> { + /*Multi=*/true>>> { using Base = typename btree_multimap::btree_multimap_container; public: @@ -700,8 +670,9 @@ class btree_multimap // btree_multimap::merge() // - // Extracts all elements from a given `source` btree_multimap into this - // `btree_multimap`. + // Extracts elements from a given `source` btree_multimap into this + // `btree_multimap`. If the destination `btree_multimap` already contains an + // element with an equivalent key, that element is not extracted. using Base::merge; // btree_multimap::swap(btree_multimap& other) @@ -721,8 +692,8 @@ class btree_multimap // Determines whether an element comparing equal to the given `key` exists // within the `btree_multimap`, returning `true` if so or `false` otherwise. // - // Supports heterogeneous lookup, provided that the map has a compatible - // heterogeneous comparator. + // Supports heterogeneous lookup, provided that the map is provided a + // compatible heterogeneous comparator. using Base::contains; // btree_multimap::count() @@ -732,13 +703,13 @@ class btree_multimap // Returns the number of elements comparing equal to the given `key` within // the `btree_multimap`. // - // Supports heterogeneous lookup, provided that the map has a compatible - // heterogeneous comparator. + // Supports heterogeneous lookup, provided that the map is provided a + // compatible heterogeneous comparator. using Base::count; // btree_multimap::equal_range() // - // Returns a half-open range [first, last), defined by a `std::pair` of two + // Returns a closed range [first, last], defined by a `std::pair` of two // iterators, containing all elements with the passed key in the // `btree_multimap`. using Base::equal_range; @@ -750,34 +721,10 @@ class btree_multimap // // Finds an element with the passed `key` within the `btree_multimap`. // - // Supports heterogeneous lookup, provided that the map has a compatible - // heterogeneous comparator. + // Supports heterogeneous lookup, provided that the map is provided a + // compatible heterogeneous comparator. using Base::find; - // btree_multimap::lower_bound() - // - // template iterator lower_bound(const K& key): - // template const_iterator lower_bound(const K& key) const: - // - // Finds the first element with a key that is not less than `key` within the - // `btree_multimap`. - // - // Supports heterogeneous lookup, provided that the map has a compatible - // heterogeneous comparator. - using Base::lower_bound; - - // btree_multimap::upper_bound() - // - // template iterator upper_bound(const K& key): - // template const_iterator upper_bound(const K& key) const: - // - // Finds the first element with a key that is greater than `key` within the - // `btree_multimap`. - // - // Supports heterogeneous lookup, provided that the map has a compatible - // heterogeneous comparator. - using Base::upper_bound; - // btree_multimap::get_allocator() // // Returns the allocator function associated with this `btree_multimap`. @@ -805,45 +752,16 @@ void swap(btree_multimap &x, btree_multimap &y) { // absl::erase_if(absl::btree_multimap<>, Pred) // // Erases all elements that satisfy the predicate pred from the container. -// Returns the number of erased elements. template -typename btree_multimap::size_type erase_if( - btree_multimap &map, Pred pred) { - return container_internal::btree_access::erase_if(map, std::move(pred)); -} - -namespace container_internal { - -// A parameters structure for holding the type parameters for a btree_map. -// Compare and Alloc should be nothrow copy-constructible. -template -struct map_params : common_params> { - using super_type = typename map_params::common_params; - using mapped_type = Data; - // This type allows us to move keys when it is safe to do so. It is safe - // for maps in which value_type and mutable_value_type are layout compatible. - using slot_policy = typename super_type::slot_policy; - using slot_type = typename super_type::slot_type; - using value_type = typename super_type::value_type; - using init_type = typename super_type::init_type; - - template - static auto key(const V &value) -> decltype(value.first) { - return value.first; +void erase_if(btree_multimap &map, Pred pred) { + for (auto it = map.begin(); it != map.end();) { + if (pred(*it)) { + it = map.erase(it); + } else { + ++it; + } } - static const Key &key(const slot_type *s) { return slot_policy::key(s); } - static const Key &key(slot_type *s) { return slot_policy::key(s); } - // For use in node handle. - static auto mutable_key(slot_type *s) - -> decltype(slot_policy::mutable_key(s)) { - return slot_policy::mutable_key(s); - } - static mapped_type &value(value_type *value) { return value->second; } -}; - -} // namespace container_internal +} ABSL_NAMESPACE_END } // namespace absl diff --git a/abseil-cpp/absl/container/btree_set.h b/abseil-cpp/absl/container/btree_set.h index 695b09f5..21ef0a03 100644 --- a/abseil-cpp/absl/container/btree_set.h +++ b/abseil-cpp/absl/container/btree_set.h @@ -35,9 +35,7 @@ // // However, these types should not be considered drop-in replacements for // `std::set` and `std::multiset` as there are some API differences, which are -// noted in this header file. The most consequential differences with respect to -// migrating to b-tree from the STL types are listed in the next paragraph. -// Other API differences are minor. +// noted in this header file. // // Importantly, insertions and deletions may invalidate outstanding iterators, // pointers, and references to elements. Such invalidations are typically only @@ -55,17 +53,6 @@ namespace absl { ABSL_NAMESPACE_BEGIN -namespace container_internal { - -template -struct set_slot_policy; - -template -struct set_params; - -} // namespace container_internal - // absl::btree_set<> // // An `absl::btree_set` is an ordered associative container of unique key @@ -87,7 +74,7 @@ class btree_set : public container_internal::btree_set_container< container_internal::btree>> { + /*Multi=*/false>>> { using Base = typename btree_set::btree_set_container; public: @@ -313,8 +300,8 @@ class btree_set // Determines whether an element comparing equal to the given `key` exists // within the `btree_set`, returning `true` if so or `false` otherwise. // - // Supports heterogeneous lookup, provided that the set has a compatible - // heterogeneous comparator. + // Supports heterogeneous lookup, provided that the set is provided a + // compatible heterogeneous comparator. using Base::contains; // btree_set::count() @@ -325,8 +312,8 @@ class btree_set // the `btree_set`. Note that this function will return either `1` or `0` // since duplicate elements are not allowed within a `btree_set`. // - // Supports heterogeneous lookup, provided that the set has a compatible - // heterogeneous comparator. + // Supports heterogeneous lookup, provided that the set is provided a + // compatible heterogeneous comparator. using Base::count; // btree_set::equal_range() @@ -343,32 +330,10 @@ class btree_set // // Finds an element with the passed `key` within the `btree_set`. // - // Supports heterogeneous lookup, provided that the set has a compatible - // heterogeneous comparator. + // Supports heterogeneous lookup, provided that the set is provided a + // compatible heterogeneous comparator. using Base::find; - // btree_set::lower_bound() - // - // template iterator lower_bound(const K& key): - // template const_iterator lower_bound(const K& key) const: - // - // Finds the first element that is not less than `key` within the `btree_set`. - // - // Supports heterogeneous lookup, provided that the set has a compatible - // heterogeneous comparator. - using Base::lower_bound; - - // btree_set::upper_bound() - // - // template iterator upper_bound(const K& key): - // template const_iterator upper_bound(const K& key) const: - // - // Finds the first element that is greater than `key` within the `btree_set`. - // - // Supports heterogeneous lookup, provided that the set has a compatible - // heterogeneous comparator. - using Base::upper_bound; - // btree_set::get_allocator() // // Returns the allocator function associated with this `btree_set`. @@ -398,11 +363,15 @@ void swap(btree_set &x, btree_set &y) { // absl::erase_if(absl::btree_set<>, Pred) // // Erases all elements that satisfy the predicate pred from the container. -// Returns the number of erased elements. template -typename btree_set::size_type erase_if(btree_set &set, - Pred pred) { - return container_internal::btree_access::erase_if(set, std::move(pred)); +void erase_if(btree_set &set, Pred pred) { + for (auto it = set.begin(); it != set.end();) { + if (pred(*it)) { + it = set.erase(it); + } else { + ++it; + } + } } // absl::btree_multiset<> @@ -427,7 +396,7 @@ class btree_multiset : public container_internal::btree_multiset_container< container_internal::btree>> { + /*Multi=*/true>>> { using Base = typename btree_multiset::btree_multiset_container; public: @@ -613,8 +582,9 @@ class btree_multiset // btree_multiset::merge() // - // Extracts all elements from a given `source` btree_multiset into this - // `btree_multiset`. + // Extracts elements from a given `source` btree_multiset into this + // `btree_multiset`. If the destination `btree_multiset` already contains an + // element with an equivalent key, that element is not extracted. using Base::merge; // btree_multiset::swap(btree_multiset& other) @@ -634,8 +604,8 @@ class btree_multiset // Determines whether an element comparing equal to the given `key` exists // within the `btree_multiset`, returning `true` if so or `false` otherwise. // - // Supports heterogeneous lookup, provided that the set has a compatible - // heterogeneous comparator. + // Supports heterogeneous lookup, provided that the set is provided a + // compatible heterogeneous comparator. using Base::contains; // btree_multiset::count() @@ -645,8 +615,8 @@ class btree_multiset // Returns the number of elements comparing equal to the given `key` within // the `btree_multiset`. // - // Supports heterogeneous lookup, provided that the set has a compatible - // heterogeneous comparator. + // Supports heterogeneous lookup, provided that the set is provided a + // compatible heterogeneous comparator. using Base::count; // btree_multiset::equal_range() @@ -663,34 +633,10 @@ class btree_multiset // // Finds an element with the passed `key` within the `btree_multiset`. // - // Supports heterogeneous lookup, provided that the set has a compatible - // heterogeneous comparator. + // Supports heterogeneous lookup, provided that the set is provided a + // compatible heterogeneous comparator. using Base::find; - // btree_multiset::lower_bound() - // - // template iterator lower_bound(const K& key): - // template const_iterator lower_bound(const K& key) const: - // - // Finds the first element that is not less than `key` within the - // `btree_multiset`. - // - // Supports heterogeneous lookup, provided that the set has a compatible - // heterogeneous comparator. - using Base::lower_bound; - - // btree_multiset::upper_bound() - // - // template iterator upper_bound(const K& key): - // template const_iterator upper_bound(const K& key) const: - // - // Finds the first element that is greater than `key` within the - // `btree_multiset`. - // - // Supports heterogeneous lookup, provided that the set has a compatible - // heterogeneous comparator. - using Base::upper_bound; - // btree_multiset::get_allocator() // // Returns the allocator function associated with this `btree_multiset`. @@ -720,72 +666,16 @@ void swap(btree_multiset &x, btree_multiset &y) { // absl::erase_if(absl::btree_multiset<>, Pred) // // Erases all elements that satisfy the predicate pred from the container. -// Returns the number of erased elements. template -typename btree_multiset::size_type erase_if( - btree_multiset & set, Pred pred) { - return container_internal::btree_access::erase_if(set, std::move(pred)); -} - -namespace container_internal { - -// This type implements the necessary functions from the -// absl::container_internal::slot_type interface for btree_(multi)set. -template -struct set_slot_policy { - using slot_type = Key; - using value_type = Key; - using mutable_value_type = Key; - - static value_type &element(slot_type *slot) { return *slot; } - static const value_type &element(const slot_type *slot) { return *slot; } - - template - static void construct(Alloc *alloc, slot_type *slot, Args &&...args) { - absl::allocator_traits::construct(*alloc, slot, - std::forward(args)...); - } - - template - static void construct(Alloc *alloc, slot_type *slot, slot_type *other) { - absl::allocator_traits::construct(*alloc, slot, std::move(*other)); - } - - template - static void construct(Alloc *alloc, slot_type *slot, const slot_type *other) { - absl::allocator_traits::construct(*alloc, slot, *other); - } - - template - static void destroy(Alloc *alloc, slot_type *slot) { - absl::allocator_traits::destroy(*alloc, slot); +void erase_if(btree_multiset &set, Pred pred) { + for (auto it = set.begin(); it != set.end();) { + if (pred(*it)) { + it = set.erase(it); + } else { + ++it; + } } - - template - static void transfer(Alloc *alloc, slot_type *new_slot, slot_type *old_slot) { - construct(alloc, new_slot, old_slot); - destroy(alloc, old_slot); - } -}; - -// A parameters structure for holding the type parameters for a btree_set. -// Compare and Alloc should be nothrow copy-constructible. -template -struct set_params : common_params> { - using value_type = Key; - using slot_type = typename set_params::common_params::slot_type; - - template - static const V &key(const V &value) { - return value; - } - static const Key &key(const slot_type *slot) { return *slot; } - static const Key &key(slot_type *slot) { return *slot; } -}; - -} // namespace container_internal +} ABSL_NAMESPACE_END } // namespace absl diff --git a/abseil-cpp/absl/container/btree_test.cc b/abseil-cpp/absl/container/btree_test.cc index f20f3430..1bfa0c20 100644 --- a/abseil-cpp/absl/container/btree_test.cc +++ b/abseil-cpp/absl/container/btree_test.cc @@ -14,14 +14,10 @@ #include "absl/container/btree_test.h" -#include -#include #include -#include #include #include #include -#include #include #include #include @@ -59,7 +55,6 @@ using ::testing::ElementsAreArray; using ::testing::IsEmpty; using ::testing::IsNull; using ::testing::Pair; -using ::testing::SizeIs; template void CheckPairEquals(const T &x, const U &y) { @@ -599,7 +594,7 @@ void BtreeTest() { using V = typename remove_pair_const::type; const std::vector random_values = GenerateValuesWithSeed( absl::GetFlag(FLAGS_test_values), 4 * absl::GetFlag(FLAGS_test_values), - GTEST_FLAG_GET(random_seed)); + testing::GTEST_FLAG(random_seed)); unique_checker container; @@ -623,7 +618,7 @@ void BtreeMultiTest() { using V = typename remove_pair_const::type; const std::vector random_values = GenerateValuesWithSeed( absl::GetFlag(FLAGS_test_values), 4 * absl::GetFlag(FLAGS_test_values), - GTEST_FLAG_GET(random_seed)); + testing::GTEST_FLAG(random_seed)); multi_checker container; @@ -1187,114 +1182,12 @@ TEST(Btree, RangeCtorSanity) { EXPECT_EQ(1, tmap.size()); } -} // namespace - -class BtreeNodePeer { - public: - // Yields the size of a leaf node with a specific number of values. - template - constexpr static size_t GetTargetNodeSize(size_t target_values_per_node) { - return btree_node< - set_params, std::allocator, - /*TargetNodeSize=*/256, // This parameter isn't used here. - /*Multi=*/false>>::SizeWithNSlots(target_values_per_node); - } - - // Yields the number of slots in a (non-root) leaf node for this btree. - template - constexpr static size_t GetNumSlotsPerNode() { - return btree_node::kNodeSlots; - } - - template - constexpr static size_t GetMaxFieldType() { - return std::numeric_limits< - typename btree_node::field_type>::max(); - } - - template - constexpr static bool UsesLinearNodeSearch() { - return btree_node::use_linear_search::value; - } - - template - constexpr static bool UsesGenerations() { - return Btree::params_type::kEnableGenerations; - } -}; - -namespace { - -class BtreeMapTest : public ::testing::Test { - public: - struct Key {}; - struct Cmp { - template - bool operator()(T, T) const { - return false; - } - }; - - struct KeyLin { - using absl_btree_prefer_linear_node_search = std::true_type; - }; - struct CmpLin : Cmp { - using absl_btree_prefer_linear_node_search = std::true_type; - }; - - struct KeyBin { - using absl_btree_prefer_linear_node_search = std::false_type; - }; - struct CmpBin : Cmp { - using absl_btree_prefer_linear_node_search = std::false_type; - }; - - template - static bool IsLinear() { - return BtreeNodePeer::UsesLinearNodeSearch>(); - } -}; - -TEST_F(BtreeMapTest, TestLinearSearchPreferredForKeyLinearViaAlias) { - // Test requesting linear search by directly exporting an alias. - EXPECT_FALSE((IsLinear())); - EXPECT_TRUE((IsLinear())); - EXPECT_TRUE((IsLinear())); - EXPECT_TRUE((IsLinear())); -} - -TEST_F(BtreeMapTest, LinearChoiceTree) { - // Cmp has precedence, and is forcing binary - EXPECT_FALSE((IsLinear())); - EXPECT_FALSE((IsLinear())); - EXPECT_FALSE((IsLinear())); - EXPECT_FALSE((IsLinear())); - EXPECT_FALSE((IsLinear())); - // Cmp has precedence, and is forcing linear - EXPECT_TRUE((IsLinear())); - EXPECT_TRUE((IsLinear())); - EXPECT_TRUE((IsLinear())); - EXPECT_TRUE((IsLinear())); - EXPECT_TRUE((IsLinear())); - // Cmp has no preference, Key determines linear vs binary. - EXPECT_FALSE((IsLinear())); - EXPECT_TRUE((IsLinear())); - EXPECT_FALSE((IsLinear())); - // arithmetic key w/ std::less or std::greater: linear - EXPECT_TRUE((IsLinear>())); - EXPECT_TRUE((IsLinear>())); - // arithmetic key w/ custom compare: binary - EXPECT_FALSE((IsLinear())); - // non-arithmetic key: binary - EXPECT_FALSE((IsLinear>())); -} - TEST(Btree, BtreeMapCanHoldMoveOnlyTypes) { absl::btree_map> m; std::unique_ptr &v = m["A"]; EXPECT_TRUE(v == nullptr); - v = absl::make_unique("X"); + v.reset(new std::string("X")); auto iter = m.find("A"); EXPECT_EQ("X", *iter->second); @@ -1353,34 +1246,38 @@ TEST(Btree, InitializerListInsert) { EXPECT_EQ(++it, range.second); } -template -void AssertKeyCompareStringAdapted() { - using Adapted = typename key_compare_adapter::type; +template +void AssertKeyCompareToAdapted() { + using Adapted = typename key_compare_to_adapter::type; + static_assert(!std::is_same::value, + "key_compare_to_adapter should have adapted this comparator."); static_assert( - std::is_same::value || - std::is_same::value, - "key_compare_adapter should have string-adapted this comparator."); + std::is_same>::value, + "Adapted comparator should be a key-compare-to comparator."); } -template -void AssertKeyCompareNotStringAdapted() { - using Adapted = typename key_compare_adapter::type; +template +void AssertKeyCompareToNotAdapted() { + using Unadapted = typename key_compare_to_adapter::type; static_assert( - !std::is_same::value && - !std::is_same::value, - "key_compare_adapter shouldn't have string-adapted this comparator."); + std::is_same::value, + "key_compare_to_adapter shouldn't have adapted this comparator."); + static_assert( + std::is_same>::value, + "Un-adapted comparator should return bool."); } -TEST(Btree, KeyCompareAdapter) { - AssertKeyCompareStringAdapted, std::string>(); - AssertKeyCompareStringAdapted, std::string>(); - AssertKeyCompareStringAdapted, - absl::string_view>(); - AssertKeyCompareStringAdapted, - absl::string_view>(); - AssertKeyCompareStringAdapted, absl::Cord>(); - AssertKeyCompareStringAdapted, absl::Cord>(); - AssertKeyCompareNotStringAdapted, int>(); - AssertKeyCompareNotStringAdapted, int>(); +TEST(Btree, KeyCompareToAdapter) { + AssertKeyCompareToAdapted, std::string>(); + AssertKeyCompareToAdapted, std::string>(); + AssertKeyCompareToAdapted, absl::string_view>(); + AssertKeyCompareToAdapted, + absl::string_view>(); + AssertKeyCompareToAdapted, absl::Cord>(); + AssertKeyCompareToAdapted, absl::Cord>(); + AssertKeyCompareToNotAdapted, int>(); + AssertKeyCompareToNotAdapted, int>(); } TEST(Btree, RValueInsert) { @@ -1430,19 +1327,39 @@ TEST(Btree, RValueInsert) { EXPECT_EQ(tracker.swaps(), 0); } -template -struct CheckedCompareOptedOutCmp : Cmp, BtreeTestOnlyCheckedCompareOptOutBase { - using Cmp::Cmp; - CheckedCompareOptedOutCmp() {} - CheckedCompareOptedOutCmp(Cmp cmp) : Cmp(std::move(cmp)) {} // NOLINT +} // namespace + +class BtreeNodePeer { + public: + // Yields the size of a leaf node with a specific number of values. + template + constexpr static size_t GetTargetNodeSize(size_t target_values_per_node) { + return btree_node< + set_params, std::allocator, + /*TargetNodeSize=*/256, // This parameter isn't used here. + /*Multi=*/false>>::SizeWithNValues(target_values_per_node); + } + + // Yields the number of values in a (non-root) leaf node for this set. + template + constexpr static size_t GetNumValuesPerNode() { + return btree_node::kNodeValues; + } + + template + constexpr static size_t GetMaxFieldType() { + return std::numeric_limits< + typename btree_node::field_type>::max(); + } }; -// A btree set with a specific number of values per node. Opt out of -// checked_compare so that we can expect exact numbers of comparisons. +namespace { + +// A btree set with a specific number of values per node. template > class SizedBtreeSet : public btree_set_container, std::allocator, + set_params, BtreeNodePeer::GetTargetNodeSize(TargetValuesPerNode), /*Multi=*/false>>> { using Base = typename SizedBtreeSet::btree_set_container; @@ -1471,7 +1388,7 @@ void ExpectOperationCounts(const int expected_moves, TEST(Btree, MovesComparisonsCopiesSwapsTracking) { InstanceTracker tracker; // Note: this is minimum number of values per node. - SizedBtreeSet set4; + SizedBtreeSet set3; // Note: this is the default number of values per node for a set of int32s // (with 64-bit pointers). SizedBtreeSet set61; @@ -1482,30 +1399,28 @@ TEST(Btree, MovesComparisonsCopiesSwapsTracking) { std::vector values = GenerateValuesWithSeed(10000, 1 << 22, /*seed=*/23); - EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 4); - EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 61); - EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 100); + EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode(), 3); + EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode(), 61); + EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode(), 100); if (sizeof(void *) == 8) { - EXPECT_EQ( - BtreeNodePeer::GetNumSlotsPerNode>(), - // When we have generations, there is one fewer slot. - BtreeNodePeer::UsesGenerations>() ? 60 : 61); + EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode>(), + BtreeNodePeer::GetNumValuesPerNode()); } // Test key insertion/deletion in random order. - ExpectOperationCounts(56540, 134212, values, &tracker, &set4); + ExpectOperationCounts(45281, 132551, values, &tracker, &set3); ExpectOperationCounts(386718, 129807, values, &tracker, &set61); ExpectOperationCounts(586761, 130310, values, &tracker, &set100); // Test key insertion/deletion in sorted order. std::sort(values.begin(), values.end()); - ExpectOperationCounts(24972, 85563, values, &tracker, &set4); + ExpectOperationCounts(26638, 92134, values, &tracker, &set3); ExpectOperationCounts(20208, 87757, values, &tracker, &set61); ExpectOperationCounts(20124, 96583, values, &tracker, &set100); // Test key insertion/deletion in reverse sorted order. std::reverse(values.begin(), values.end()); - ExpectOperationCounts(54949, 127531, values, &tracker, &set4); + ExpectOperationCounts(49951, 119325, values, &tracker, &set3); ExpectOperationCounts(338813, 118266, values, &tracker, &set61); ExpectOperationCounts(534529, 125279, values, &tracker, &set100); } @@ -1522,9 +1437,9 @@ struct MovableOnlyInstanceThreeWayCompare { TEST(Btree, MovesComparisonsCopiesSwapsTrackingThreeWayCompare) { InstanceTracker tracker; // Note: this is minimum number of values per node. - SizedBtreeSet - set4; + set3; // Note: this is the default number of values per node for a set of int32s // (with 64-bit pointers). SizedBtreeSet values = GenerateValuesWithSeed(10000, 1 << 22, /*seed=*/23); - EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 4); - EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 61); - EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 100); + EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode(), 3); + EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode(), 61); + EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode(), 100); if (sizeof(void *) == 8) { - EXPECT_EQ( - BtreeNodePeer::GetNumSlotsPerNode>(), - // When we have generations, there is one fewer slot. - BtreeNodePeer::UsesGenerations>() ? 60 : 61); + EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode>(), + BtreeNodePeer::GetNumValuesPerNode()); } // Test key insertion/deletion in random order. - ExpectOperationCounts(56540, 124221, values, &tracker, &set4); + ExpectOperationCounts(45281, 122560, values, &tracker, &set3); ExpectOperationCounts(386718, 119816, values, &tracker, &set61); ExpectOperationCounts(586761, 120319, values, &tracker, &set100); // Test key insertion/deletion in sorted order. std::sort(values.begin(), values.end()); - ExpectOperationCounts(24972, 85563, values, &tracker, &set4); + ExpectOperationCounts(26638, 92134, values, &tracker, &set3); ExpectOperationCounts(20208, 87757, values, &tracker, &set61); ExpectOperationCounts(20124, 96583, values, &tracker, &set100); // Test key insertion/deletion in reverse sorted order. std::reverse(values.begin(), values.end()); - ExpectOperationCounts(54949, 117532, values, &tracker, &set4); + ExpectOperationCounts(49951, 109326, values, &tracker, &set3); ExpectOperationCounts(338813, 108267, values, &tracker, &set61); ExpectOperationCounts(534529, 115280, values, &tracker, &set100); } @@ -1725,25 +1638,10 @@ TEST(Btree, StrSplitCompatible) { EXPECT_EQ(split_set, expected_set); } -TEST(Btree, KeyComp) { - absl::btree_set s; - EXPECT_TRUE(s.key_comp()(1, 2)); - EXPECT_FALSE(s.key_comp()(2, 2)); - EXPECT_FALSE(s.key_comp()(2, 1)); - - absl::btree_map m1; - EXPECT_TRUE(m1.key_comp()(1, 2)); - EXPECT_FALSE(m1.key_comp()(2, 2)); - EXPECT_FALSE(m1.key_comp()(2, 1)); - - // Even though we internally adapt the comparator of `m2` to be three-way and - // heterogeneous, the comparator we expose through key_comp() is the original - // unadapted comparator. - absl::btree_map m2; - EXPECT_TRUE(m2.key_comp()("a", "b")); - EXPECT_FALSE(m2.key_comp()("b", "b")); - EXPECT_FALSE(m2.key_comp()("b", "a")); -} +// We can't use EXPECT_EQ/etc. to compare absl::weak_ordering because they +// convert literal 0 to int and absl::weak_ordering can only be compared with +// literal 0. Defining this function allows for avoiding ClangTidy warnings. +bool Identity(const bool b) { return b; } TEST(Btree, ValueComp) { absl::btree_set s; @@ -1756,29 +1654,13 @@ TEST(Btree, ValueComp) { EXPECT_FALSE(m1.value_comp()(std::make_pair(2, 0), std::make_pair(2, 0))); EXPECT_FALSE(m1.value_comp()(std::make_pair(2, 0), std::make_pair(1, 0))); - // Even though we internally adapt the comparator of `m2` to be three-way and - // heterogeneous, the comparator we expose through value_comp() is based on - // the original unadapted comparator. absl::btree_map m2; - EXPECT_TRUE(m2.value_comp()(std::make_pair("a", 0), std::make_pair("b", 0))); - EXPECT_FALSE(m2.value_comp()(std::make_pair("b", 0), std::make_pair("b", 0))); - EXPECT_FALSE(m2.value_comp()(std::make_pair("b", 0), std::make_pair("a", 0))); -} - -// Test that we have the protected members from the std::map::value_compare API. -// See https://en.cppreference.com/w/cpp/container/map/value_compare. -TEST(Btree, MapValueCompProtected) { - struct key_compare { - bool operator()(int l, int r) const { return l < r; } - int id; - }; - using value_compare = absl::btree_map::value_compare; - struct value_comp_child : public value_compare { - explicit value_comp_child(key_compare kc) : value_compare(kc) {} - int GetId() const { return comp.id; } - }; - value_comp_child c(key_compare{10}); - EXPECT_EQ(c.GetId(), 10); + EXPECT_TRUE(Identity( + m2.value_comp()(std::make_pair("a", 0), std::make_pair("b", 0)) < 0)); + EXPECT_TRUE(Identity( + m2.value_comp()(std::make_pair("b", 0), std::make_pair("b", 0)) == 0)); + EXPECT_TRUE(Identity( + m2.value_comp()(std::make_pair("b", 0), std::make_pair("a", 0)) > 0)); } TEST(Btree, DefaultConstruction) { @@ -2086,30 +1968,6 @@ TEST(Btree, ExtractAndInsertNodeHandleMultiMap) { EXPECT_EQ(res, ++other.begin()); } -TEST(Btree, ExtractMultiMapEquivalentKeys) { - // Note: using string keys means a three-way comparator. - absl::btree_multimap map; - for (int i = 0; i < 100; ++i) { - for (int j = 0; j < 100; ++j) { - map.insert({absl::StrCat(i), j}); - } - } - - for (int i = 0; i < 100; ++i) { - const std::string key = absl::StrCat(i); - auto node_handle = map.extract(key); - EXPECT_EQ(node_handle.key(), key); - EXPECT_EQ(node_handle.mapped(), 0) << i; - } - - for (int i = 0; i < 100; ++i) { - const std::string key = absl::StrCat(i); - auto node_handle = map.extract(key); - EXPECT_EQ(node_handle.key(), key); - EXPECT_EQ(node_handle.mapped(), 1) << i; - } -} - // For multisets, insert with hint also affects correctness because we need to // insert immediately before the hint if possible. struct InsertMultiHintData { @@ -2251,31 +2109,6 @@ TEST(Btree, MergeIntoMultiMapsWithDifferentComparators) { Pair(4, 1), Pair(4, 4), Pair(5, 5))); } -TEST(Btree, MergeIntoSetMovableOnly) { - absl::btree_set src; - src.insert(MovableOnlyInstance(1)); - absl::btree_multiset dst1; - dst1.insert(MovableOnlyInstance(2)); - absl::btree_set dst2; - - // Test merge into multiset. - dst1.merge(src); - - EXPECT_TRUE(src.empty()); - // ElementsAre/ElementsAreArray don't work with move-only types. - ASSERT_THAT(dst1, SizeIs(2)); - EXPECT_EQ(*dst1.begin(), MovableOnlyInstance(1)); - EXPECT_EQ(*std::next(dst1.begin()), MovableOnlyInstance(2)); - - // Test merge into set. - dst2.merge(dst1); - - EXPECT_TRUE(dst1.empty()); - ASSERT_THAT(dst2, SizeIs(2)); - EXPECT_EQ(*dst2.begin(), MovableOnlyInstance(1)); - EXPECT_EQ(*std::next(dst2.begin()), MovableOnlyInstance(2)); -} - struct KeyCompareToWeakOrdering { template absl::weak_ordering operator()(const T &a, const T &b) const { @@ -2330,9 +2163,7 @@ TEST(Btree, TryEmplaceWithHintWorks) { }; using Cmp = decltype(cmp); - // Use a map that is opted out of key_compare being adapted so we can expect - // strict comparison call limits. - absl::btree_map> m(cmp); + absl::btree_map m(cmp); for (int i = 0; i < 128; ++i) { m.emplace(i, i); } @@ -2487,28 +2318,23 @@ TEST(Btree, EraseIf) { // Test that erase_if works with all the container types and supports lambdas. { absl::btree_set s = {1, 3, 5, 6, 100}; - EXPECT_EQ(erase_if(s, [](int k) { return k > 3; }), 3); + erase_if(s, [](int k) { return k > 3; }); EXPECT_THAT(s, ElementsAre(1, 3)); } { absl::btree_multiset s = {1, 3, 3, 5, 6, 6, 100}; - EXPECT_EQ(erase_if(s, [](int k) { return k <= 3; }), 3); + erase_if(s, [](int k) { return k <= 3; }); EXPECT_THAT(s, ElementsAre(5, 6, 6, 100)); } { absl::btree_map m = {{1, 1}, {3, 3}, {6, 6}, {100, 100}}; - EXPECT_EQ( - erase_if(m, [](std::pair kv) { return kv.first > 3; }), - 2); + erase_if(m, [](std::pair kv) { return kv.first > 3; }); EXPECT_THAT(m, ElementsAre(Pair(1, 1), Pair(3, 3))); } { absl::btree_multimap m = {{1, 1}, {3, 3}, {3, 6}, {6, 6}, {6, 7}, {100, 6}}; - EXPECT_EQ( - erase_if(m, - [](std::pair kv) { return kv.second == 6; }), - 3); + erase_if(m, [](std::pair kv) { return kv.second == 6; }); EXPECT_THAT(m, ElementsAre(Pair(1, 1), Pair(3, 3), Pair(6, 7))); } // Test that erasing all elements from a large set works and test support for @@ -2516,29 +2342,15 @@ TEST(Btree, EraseIf) { { absl::btree_set s; for (int i = 0; i < 1000; ++i) s.insert(2 * i); - EXPECT_EQ(erase_if(s, IsEven), 1000); + erase_if(s, IsEven); EXPECT_THAT(s, IsEmpty()); } // Test that erase_if supports other format of function pointers. { absl::btree_set s = {1, 3, 5, 6, 100}; - EXPECT_EQ(erase_if(s, &IsEven), 2); + erase_if(s, &IsEven); EXPECT_THAT(s, ElementsAre(1, 3, 5)); } - // Test that erase_if invokes the predicate once per element. - { - absl::btree_set s; - for (int i = 0; i < 1000; ++i) s.insert(i); - int pred_calls = 0; - EXPECT_EQ(erase_if(s, - [&pred_calls](int k) { - ++pred_calls; - return k % 2; - }), - 500); - EXPECT_THAT(s, SizeIs(500)); - EXPECT_EQ(pred_calls, 1000); - } } TEST(Btree, InsertOrAssign) { @@ -2773,12 +2585,6 @@ struct MultiKey { int i2; }; -bool operator==(const MultiKey a, const MultiKey b) { - return a.i1 == b.i1 && a.i2 == b.i2; -} - -// A heterogeneous comparator that has different equivalence classes for -// different lookup types. struct MultiKeyComp { using is_transparent = void; bool operator()(const MultiKey a, const MultiKey b) const { @@ -2789,36 +2595,11 @@ struct MultiKeyComp { bool operator()(const MultiKey a, const int b) const { return a.i1 < b; } }; -// A heterogeneous, three-way comparator that has different equivalence classes -// for different lookup types. -struct MultiKeyThreeWayComp { - using is_transparent = void; - absl::weak_ordering operator()(const MultiKey a, const MultiKey b) const { - if (a.i1 < b.i1) return absl::weak_ordering::less; - if (a.i1 > b.i1) return absl::weak_ordering::greater; - if (a.i2 < b.i2) return absl::weak_ordering::less; - if (a.i2 > b.i2) return absl::weak_ordering::greater; - return absl::weak_ordering::equivalent; - } - absl::weak_ordering operator()(const int a, const MultiKey b) const { - if (a < b.i1) return absl::weak_ordering::less; - if (a > b.i1) return absl::weak_ordering::greater; - return absl::weak_ordering::equivalent; - } - absl::weak_ordering operator()(const MultiKey a, const int b) const { - if (a.i1 < b) return absl::weak_ordering::less; - if (a.i1 > b) return absl::weak_ordering::greater; - return absl::weak_ordering::equivalent; - } -}; - -template -class BtreeMultiKeyTest : public ::testing::Test {}; -using MultiKeyComps = ::testing::Types; -TYPED_TEST_SUITE(BtreeMultiKeyTest, MultiKeyComps); +// Test that when there's a heterogeneous comparator that behaves differently +// for some heterogeneous operators, we get equal_range() right. +TEST(Btree, MultiKeyEqualRange) { + absl::btree_set set; -TYPED_TEST(BtreeMultiKeyTest, EqualRange) { - absl::btree_set set; for (int i = 0; i < 100; ++i) { for (int j = 0; j < 100; ++j) { set.insert({i, j}); @@ -2828,426 +2609,11 @@ TYPED_TEST(BtreeMultiKeyTest, EqualRange) { for (int i = 0; i < 100; ++i) { auto equal_range = set.equal_range(i); EXPECT_EQ(equal_range.first->i1, i); - EXPECT_EQ(equal_range.first->i2, 0) << i; + EXPECT_EQ(equal_range.first->i2, 0); EXPECT_EQ(std::distance(equal_range.first, equal_range.second), 100) << i; } } -TYPED_TEST(BtreeMultiKeyTest, Extract) { - absl::btree_set set; - for (int i = 0; i < 100; ++i) { - for (int j = 0; j < 100; ++j) { - set.insert({i, j}); - } - } - - for (int i = 0; i < 100; ++i) { - auto node_handle = set.extract(i); - EXPECT_EQ(node_handle.value().i1, i); - EXPECT_EQ(node_handle.value().i2, 0) << i; - } - - for (int i = 0; i < 100; ++i) { - auto node_handle = set.extract(i); - EXPECT_EQ(node_handle.value().i1, i); - EXPECT_EQ(node_handle.value().i2, 1) << i; - } -} - -TYPED_TEST(BtreeMultiKeyTest, Erase) { - absl::btree_set set = { - {1, 1}, {2, 1}, {2, 2}, {3, 1}}; - EXPECT_EQ(set.erase(2), 2); - EXPECT_THAT(set, ElementsAre(MultiKey{1, 1}, MultiKey{3, 1})); -} - -TYPED_TEST(BtreeMultiKeyTest, Count) { - const absl::btree_set set = { - {1, 1}, {2, 1}, {2, 2}, {3, 1}}; - EXPECT_EQ(set.count(2), 2); -} - -TEST(Btree, AllocConstructor) { - using Alloc = CountingAllocator; - using Set = absl::btree_set, Alloc>; - int64_t bytes_used = 0; - Alloc alloc(&bytes_used); - Set set(alloc); - - set.insert({1, 2, 3}); - - EXPECT_THAT(set, ElementsAre(1, 2, 3)); - EXPECT_GT(bytes_used, set.size() * sizeof(int)); -} - -TEST(Btree, AllocInitializerListConstructor) { - using Alloc = CountingAllocator; - using Set = absl::btree_set, Alloc>; - int64_t bytes_used = 0; - Alloc alloc(&bytes_used); - Set set({1, 2, 3}, alloc); - - EXPECT_THAT(set, ElementsAre(1, 2, 3)); - EXPECT_GT(bytes_used, set.size() * sizeof(int)); -} - -TEST(Btree, AllocRangeConstructor) { - using Alloc = CountingAllocator; - using Set = absl::btree_set, Alloc>; - int64_t bytes_used = 0; - Alloc alloc(&bytes_used); - std::vector v = {1, 2, 3}; - Set set(v.begin(), v.end(), alloc); - - EXPECT_THAT(set, ElementsAre(1, 2, 3)); - EXPECT_GT(bytes_used, set.size() * sizeof(int)); -} - -TEST(Btree, AllocCopyConstructor) { - using Alloc = CountingAllocator; - using Set = absl::btree_set, Alloc>; - int64_t bytes_used1 = 0; - Alloc alloc1(&bytes_used1); - Set set1(alloc1); - - set1.insert({1, 2, 3}); - - int64_t bytes_used2 = 0; - Alloc alloc2(&bytes_used2); - Set set2(set1, alloc2); - - EXPECT_THAT(set1, ElementsAre(1, 2, 3)); - EXPECT_THAT(set2, ElementsAre(1, 2, 3)); - EXPECT_GT(bytes_used1, set1.size() * sizeof(int)); - EXPECT_EQ(bytes_used1, bytes_used2); -} - -TEST(Btree, AllocMoveConstructor_SameAlloc) { - using Alloc = CountingAllocator; - using Set = absl::btree_set, Alloc>; - int64_t bytes_used = 0; - Alloc alloc(&bytes_used); - Set set1(alloc); - - set1.insert({1, 2, 3}); - - const int64_t original_bytes_used = bytes_used; - EXPECT_GT(original_bytes_used, set1.size() * sizeof(int)); - - Set set2(std::move(set1), alloc); - - EXPECT_THAT(set2, ElementsAre(1, 2, 3)); - EXPECT_EQ(bytes_used, original_bytes_used); -} - -TEST(Btree, AllocMoveConstructor_DifferentAlloc) { - using Alloc = CountingAllocator; - using Set = absl::btree_set, Alloc>; - int64_t bytes_used1 = 0; - Alloc alloc1(&bytes_used1); - Set set1(alloc1); - - set1.insert({1, 2, 3}); - - const int64_t original_bytes_used = bytes_used1; - EXPECT_GT(original_bytes_used, set1.size() * sizeof(int)); - - int64_t bytes_used2 = 0; - Alloc alloc2(&bytes_used2); - Set set2(std::move(set1), alloc2); - - EXPECT_THAT(set2, ElementsAre(1, 2, 3)); - // We didn't free these bytes allocated by `set1` yet. - EXPECT_EQ(bytes_used1, original_bytes_used); - EXPECT_EQ(bytes_used2, original_bytes_used); -} - -bool IntCmp(const int a, const int b) { return a < b; } - -TEST(Btree, SupportsFunctionPtrComparator) { - absl::btree_set set(IntCmp); - set.insert({1, 2, 3}); - EXPECT_THAT(set, ElementsAre(1, 2, 3)); - EXPECT_TRUE(set.key_comp()(1, 2)); - EXPECT_TRUE(set.value_comp()(1, 2)); - - absl::btree_map map(&IntCmp); - map[1] = 1; - EXPECT_THAT(map, ElementsAre(Pair(1, 1))); - EXPECT_TRUE(map.key_comp()(1, 2)); - EXPECT_TRUE(map.value_comp()(std::make_pair(1, 1), std::make_pair(2, 2))); -} - -template -struct TransparentPassThroughComp { - using is_transparent = void; - - // This will fail compilation if we attempt a comparison that Compare does not - // support, and the failure will happen inside the function implementation so - // it can't be avoided by using SFINAE on this comparator. - template - bool operator()(const T &lhs, const U &rhs) const { - return Compare()(lhs, rhs); - } -}; - -TEST(Btree, - SupportsTransparentComparatorThatDoesNotImplementAllVisibleOperators) { - absl::btree_set> set; - set.insert(MultiKey{1, 2}); - EXPECT_TRUE(set.contains(1)); -} - -TEST(Btree, ConstructImplicitlyWithUnadaptedComparator) { - absl::btree_set set = {{}, MultiKeyComp{}}; -} - -#ifndef NDEBUG -TEST(Btree, InvalidComparatorsCaught) { - { - struct ZeroAlwaysLessCmp { - bool operator()(int lhs, int rhs) const { - if (lhs == 0) return true; - return lhs < rhs; - } - }; - absl::btree_set set; - EXPECT_DEATH(set.insert({0, 1, 2}), "is_self_equivalent"); - } - { - struct ThreeWayAlwaysLessCmp { - absl::weak_ordering operator()(int, int) const { - return absl::weak_ordering::less; - } - }; - absl::btree_set set; - EXPECT_DEATH(set.insert({0, 1, 2}), "is_self_equivalent"); - } - { - struct SumGreaterZeroCmp { - bool operator()(int lhs, int rhs) const { - // First, do equivalence correctly - so we can test later condition. - if (lhs == rhs) return false; - return lhs + rhs > 0; - } - }; - absl::btree_set set; - // Note: '!' only needs to be escaped when it's the first character. - EXPECT_DEATH(set.insert({0, 1, 2}), - R"regex(\!lhs_comp_rhs \|\| !comp\(\)\(rhs, lhs\))regex"); - } - { - struct ThreeWaySumGreaterZeroCmp { - absl::weak_ordering operator()(int lhs, int rhs) const { - // First, do equivalence correctly - so we can test later condition. - if (lhs == rhs) return absl::weak_ordering::equivalent; - - if (lhs + rhs > 0) return absl::weak_ordering::less; - if (lhs + rhs == 0) return absl::weak_ordering::equivalent; - return absl::weak_ordering::greater; - } - }; - absl::btree_set set; - EXPECT_DEATH(set.insert({0, 1, 2}), "lhs_comp_rhs < 0 -> rhs_comp_lhs > 0"); - } -} -#endif - -#ifndef _MSC_VER -// This test crashes on MSVC. -TEST(Btree, InvalidIteratorUse) { - if (!BtreeNodePeer::UsesGenerations>()) - GTEST_SKIP() << "Generation validation for iterators is disabled."; - - { - absl::btree_set set; - for (int i = 0; i < 10; ++i) set.insert(i); - auto it = set.begin(); - set.erase(it++); - EXPECT_DEATH(set.erase(it++), "invalidated iterator"); - } - { - absl::btree_set set; - for (int i = 0; i < 10; ++i) set.insert(i); - auto it = set.insert(20).first; - set.insert(30); - EXPECT_DEATH(*it, "invalidated iterator"); - } - { - absl::btree_set set; - for (int i = 0; i < 10000; ++i) set.insert(i); - auto it = set.find(5000); - ASSERT_NE(it, set.end()); - set.erase(1); - EXPECT_DEATH(*it, "invalidated iterator"); - } -} -#endif - -class OnlyConstructibleByAllocator { - explicit OnlyConstructibleByAllocator(int i) : i_(i) {} - - public: - OnlyConstructibleByAllocator(const OnlyConstructibleByAllocator &other) - : i_(other.i_) {} - OnlyConstructibleByAllocator &operator=( - const OnlyConstructibleByAllocator &other) { - i_ = other.i_; - return *this; - } - int Get() const { return i_; } - bool operator==(int i) const { return i_ == i; } - - private: - template - friend class OnlyConstructibleAllocator; - - int i_; -}; - -template -class OnlyConstructibleAllocator : public std::allocator { - public: - OnlyConstructibleAllocator() = default; - template - explicit OnlyConstructibleAllocator(const OnlyConstructibleAllocator &) {} - - void construct(OnlyConstructibleByAllocator *p, int i) { - new (p) OnlyConstructibleByAllocator(i); - } - template - void construct(Pair *p, const int i) { - OnlyConstructibleByAllocator only(i); - new (p) Pair(std::move(only), i); - } - - template - struct rebind { - using other = OnlyConstructibleAllocator; - }; -}; - -struct OnlyConstructibleByAllocatorComp { - using is_transparent = void; - bool operator()(OnlyConstructibleByAllocator a, - OnlyConstructibleByAllocator b) const { - return a.Get() < b.Get(); - } - bool operator()(int a, OnlyConstructibleByAllocator b) const { - return a < b.Get(); - } - bool operator()(OnlyConstructibleByAllocator a, int b) const { - return a.Get() < b; - } -}; - -TEST(Btree, OnlyConstructibleByAllocatorType) { - const std::array arr = {3, 4}; - { - absl::btree_set> - set; - set.emplace(1); - set.emplace_hint(set.end(), 2); - set.insert(arr.begin(), arr.end()); - EXPECT_THAT(set, ElementsAre(1, 2, 3, 4)); - } - { - absl::btree_multiset> - set; - set.emplace(1); - set.emplace_hint(set.end(), 2); - // TODO(ezb): fix insert_multi to allow this to compile. - // set.insert(arr.begin(), arr.end()); - EXPECT_THAT(set, ElementsAre(1, 2)); - } - { - absl::btree_map> - map; - map.emplace(1); - map.emplace_hint(map.end(), 2); - map.insert(arr.begin(), arr.end()); - EXPECT_THAT(map, - ElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3), Pair(4, 4))); - } - { - absl::btree_multimap> - map; - map.emplace(1); - map.emplace_hint(map.end(), 2); - // TODO(ezb): fix insert_multi to allow this to compile. - // map.insert(arr.begin(), arr.end()); - EXPECT_THAT(map, ElementsAre(Pair(1, 1), Pair(2, 2))); - } -} - -class NotAssignable { - public: - explicit NotAssignable(int i) : i_(i) {} - NotAssignable(const NotAssignable &other) : i_(other.i_) {} - NotAssignable &operator=(NotAssignable &&other) = delete; - int Get() const { return i_; } - bool operator==(int i) const { return i_ == i; } - friend bool operator<(NotAssignable a, NotAssignable b) { - return a.i_ < b.i_; - } - - private: - int i_; -}; - -TEST(Btree, NotAssignableType) { - { - absl::btree_set set; - set.emplace(1); - set.emplace_hint(set.end(), 2); - set.insert(NotAssignable(3)); - set.insert(set.end(), NotAssignable(4)); - EXPECT_THAT(set, ElementsAre(1, 2, 3, 4)); - set.erase(set.begin()); - EXPECT_THAT(set, ElementsAre(2, 3, 4)); - } - { - absl::btree_multiset set; - set.emplace(1); - set.emplace_hint(set.end(), 2); - set.insert(NotAssignable(2)); - set.insert(set.end(), NotAssignable(3)); - EXPECT_THAT(set, ElementsAre(1, 2, 2, 3)); - set.erase(set.begin()); - EXPECT_THAT(set, ElementsAre(2, 2, 3)); - } - { - absl::btree_map map; - map.emplace(NotAssignable(1), 1); - map.emplace_hint(map.end(), NotAssignable(2), 2); - map.insert({NotAssignable(3), 3}); - map.insert(map.end(), {NotAssignable(4), 4}); - EXPECT_THAT(map, - ElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3), Pair(4, 4))); - map.erase(map.begin()); - EXPECT_THAT(map, ElementsAre(Pair(2, 2), Pair(3, 3), Pair(4, 4))); - } - { - absl::btree_multimap map; - map.emplace(NotAssignable(1), 1); - map.emplace_hint(map.end(), NotAssignable(2), 2); - map.insert({NotAssignable(2), 3}); - map.insert(map.end(), {NotAssignable(3), 3}); - EXPECT_THAT(map, - ElementsAre(Pair(1, 1), Pair(2, 2), Pair(2, 3), Pair(3, 3))); - map.erase(map.begin()); - EXPECT_THAT(map, ElementsAre(Pair(2, 2), Pair(2, 3), Pair(3, 3))); - } -} - } // namespace } // namespace container_internal ABSL_NAMESPACE_END diff --git a/abseil-cpp/absl/container/fixed_array.h b/abseil-cpp/absl/container/fixed_array.h index 2aefae3b..c8fe8d96 100644 --- a/abseil-cpp/absl/container/fixed_array.h +++ b/abseil-cpp/absl/container/fixed_array.h @@ -73,6 +73,11 @@ constexpr static auto kFixedArrayUseDefault = static_cast(-1); // uninitialized (e.g. int, int[4], double), and others default-constructed. // This matches the behavior of c-style arrays and `std::array`, but not // `std::vector`. +// +// Note that `FixedArray` does not provide a public allocator; if it requires a +// heap allocation, it will do so with global `::operator new[]()` and +// `::operator delete[]()`, even if T provides class-scope overrides for these +// operators. template > class FixedArray { @@ -227,8 +232,8 @@ class FixedArray { // FixedArray::at // - // Bounds-checked access. Returns a reference to the ith element of the fixed - // array, or throws std::out_of_range + // Bounds-checked access. Returns a reference to the ith element of the + // fiexed array, or throws std::out_of_range reference at(size_type i) { if (ABSL_PREDICT_FALSE(i >= size())) { base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check"); @@ -489,14 +494,12 @@ class FixedArray { Storage storage_; }; -#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL template constexpr size_t FixedArray::kInlineBytesDefault; template constexpr typename FixedArray::size_type FixedArray::inline_elements; -#endif template void FixedArray::NonEmptyInlinedStorage::AnnotateConstruct( diff --git a/abseil-cpp/absl/container/flat_hash_map.h b/abseil-cpp/absl/container/flat_hash_map.h index e6bdbd9e..74def0df 100644 --- a/abseil-cpp/absl/container/flat_hash_map.h +++ b/abseil-cpp/absl/container/flat_hash_map.h @@ -36,7 +36,6 @@ #include #include "absl/algorithm/container.h" -#include "absl/base/macros.h" #include "absl/container/internal/container_memory.h" #include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export #include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export @@ -76,10 +75,6 @@ struct FlatHashMapPolicy; // absl/hash/hash.h for information on extending Abseil hashing to user-defined // types. // -// Using `absl::flat_hash_map` at interface boundaries in dynamically loaded -// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may -// be randomized across dynamically loaded libraries. -// // NOTE: A `flat_hash_map` stores its value types directly inside its // implementation array to avoid memory indirection. Because a `flat_hash_map` // is designed to move data when rehashed, map values will not retain pointer @@ -361,8 +356,8 @@ class flat_hash_map : public absl::container_internal::raw_hash_map< // `flat_hash_map`. // // iterator try_emplace(const_iterator hint, - // const key_type& k, Args&&... args): - // iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args): + // const init_type& k, Args&&... args): + // iterator try_emplace(const_iterator hint, init_type&& k, Args&&... args): // // Inserts (via copy or move) the element of the specified key into the // `flat_hash_map` using the position of `hint` as a non-binding suggestion @@ -546,12 +541,10 @@ class flat_hash_map : public absl::container_internal::raw_hash_map< // erase_if(flat_hash_map<>, Pred) // // Erases all elements that satisfy the predicate `pred` from the container `c`. -// Returns the number of erased elements. template -typename flat_hash_map::size_type erase_if( - flat_hash_map& c, Predicate pred) { - return container_internal::EraseIf(pred, &c); +void erase_if(flat_hash_map& c, Predicate pred) { + container_internal::EraseIf(pred, &c); } namespace container_internal { diff --git a/abseil-cpp/absl/container/flat_hash_map_test.cc b/abseil-cpp/absl/container/flat_hash_map_test.cc index 263951f1..89ec60c9 100644 --- a/abseil-cpp/absl/container/flat_hash_map_test.cc +++ b/abseil-cpp/absl/container/flat_hash_map_test.cc @@ -236,36 +236,33 @@ TEST(FlatHashMap, EraseIf) { // Erase all elements. { flat_hash_map s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; - EXPECT_EQ(erase_if(s, [](std::pair) { return true; }), 5); + erase_if(s, [](std::pair) { return true; }); EXPECT_THAT(s, IsEmpty()); } // Erase no elements. { flat_hash_map s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; - EXPECT_EQ(erase_if(s, [](std::pair) { return false; }), 0); + erase_if(s, [](std::pair) { return false; }); EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3), Pair(4, 4), Pair(5, 5))); } // Erase specific elements. { flat_hash_map s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; - EXPECT_EQ(erase_if(s, - [](std::pair kvp) { - return kvp.first % 2 == 1; - }), - 3); + erase_if(s, + [](std::pair kvp) { return kvp.first % 2 == 1; }); EXPECT_THAT(s, UnorderedElementsAre(Pair(2, 2), Pair(4, 4))); } // Predicate is function reference. { flat_hash_map s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; - EXPECT_EQ(erase_if(s, FirstIsEven), 2); + erase_if(s, FirstIsEven); EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(3, 3), Pair(5, 5))); } // Predicate is function pointer. { flat_hash_map s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; - EXPECT_EQ(erase_if(s, &FirstIsEven), 2); + erase_if(s, &FirstIsEven); EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(3, 3), Pair(5, 5))); } } @@ -285,32 +282,6 @@ TEST(FlatHashMap, NodeHandleMutableKeyAccess) { } #endif -TEST(FlatHashMap, Reserve) { - // Verify that if we reserve(size() + n) then we can perform n insertions - // without a rehash, i.e., without invalidating any references. - for (size_t trial = 0; trial < 20; ++trial) { - for (size_t initial = 3; initial < 100; ++initial) { - // Fill in `initial` entries, then erase 2 of them, then reserve space for - // two inserts and check for reference stability while doing the inserts. - flat_hash_map map; - for (size_t i = 0; i < initial; ++i) { - map[i] = i; - } - map.erase(0); - map.erase(1); - map.reserve(map.size() + 2); - size_t& a2 = map[2]; - // In the event of a failure, asan will complain in one of these two - // assignments. - map[initial] = a2; - map[initial + 1] = a2; - // Fail even when not under asan: - size_t& a2new = map[2]; - EXPECT_EQ(&a2, &a2new); - } - } -} - } // namespace } // namespace container_internal ABSL_NAMESPACE_END diff --git a/abseil-cpp/absl/container/flat_hash_set.h b/abseil-cpp/absl/container/flat_hash_set.h index 4938c703..81e145aa 100644 --- a/abseil-cpp/absl/container/flat_hash_set.h +++ b/abseil-cpp/absl/container/flat_hash_set.h @@ -67,15 +67,11 @@ struct FlatHashSetPolicy; // // By default, `flat_hash_set` uses the `absl::Hash` hashing framework. All // fundamental and Abseil types that support the `absl::Hash` framework have a -// compatible equality operator for comparing insertions into `flat_hash_set`. +// compatible equality operator for comparing insertions into `flat_hash_map`. // If your type is not yet supported by the `absl::Hash` framework, see // absl/hash/hash.h for information on extending Abseil hashing to user-defined // types. // -// Using `absl::flat_hash_set` at interface boundaries in dynamically loaded -// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may -// be randomized across dynamically loaded libraries. -// // NOTE: A `flat_hash_set` stores its keys directly inside its implementation // array to avoid memory indirection. Because a `flat_hash_set` is designed to // move data when rehashed, set keys will not retain pointer stability. If you @@ -110,7 +106,7 @@ class flat_hash_set public: // Constructors and Assignment Operators // - // A flat_hash_set supports the same overload set as `std::unordered_set` + // A flat_hash_set supports the same overload set as `std::unordered_map` // for construction and assignment: // // * Default constructor @@ -177,7 +173,7 @@ class flat_hash_set // available within the `flat_hash_set`. // // NOTE: this member function is particular to `absl::flat_hash_set` and is - // not provided in the `std::unordered_set` API. + // not provided in the `std::unordered_map` API. using Base::capacity; // flat_hash_set::empty() @@ -328,7 +324,7 @@ class flat_hash_set // flat_hash_set::merge() // - // Extracts elements from a given `source` flat hash set into this + // Extracts elements from a given `source` flat hash map into this // `flat_hash_set`. If the destination `flat_hash_set` already contains an // element with an equivalent key, that element is not extracted. using Base::merge; @@ -336,7 +332,7 @@ class flat_hash_set // flat_hash_set::swap(flat_hash_set& other) // // Exchanges the contents of this `flat_hash_set` with those of the `other` - // flat hash set, avoiding invocation of any move, copy, or swap operations on + // flat hash map, avoiding invocation of any move, copy, or swap operations on // individual elements. // // All iterators and references on the `flat_hash_set` remain valid, excepting @@ -344,7 +340,7 @@ class flat_hash_set // // `swap()` requires that the flat hash set's hashing and key equivalence // functions be Swappable, and are exchaged using unqualified calls to - // non-member `swap()`. If the set's allocator has + // non-member `swap()`. If the map's allocator has // `std::allocator_traits::propagate_on_container_swap::value` // set to `true`, the allocators are also exchanged using an unqualified call // to non-member `swap()`; otherwise, the allocators are not swapped. @@ -399,14 +395,14 @@ class flat_hash_set // flat_hash_set::bucket_count() // // Returns the number of "buckets" within the `flat_hash_set`. Note that - // because a flat hash set contains all elements within its internal storage, + // because a flat hash map contains all elements within its internal storage, // this value simply equals the current capacity of the `flat_hash_set`. using Base::bucket_count; // flat_hash_set::load_factor() // // Returns the current load factor of the `flat_hash_set` (the average number - // of slots occupied with a value within the hash set). + // of slots occupied with a value within the hash map). using Base::load_factor; // flat_hash_set::max_load_factor() @@ -447,11 +443,9 @@ class flat_hash_set // erase_if(flat_hash_set<>, Pred) // // Erases all elements that satisfy the predicate `pred` from the container `c`. -// Returns the number of erased elements. template -typename flat_hash_set::size_type erase_if( - flat_hash_set& c, Predicate pred) { - return container_internal::EraseIf(pred, &c); +void erase_if(flat_hash_set& c, Predicate pred) { + container_internal::EraseIf(pred, &c); } namespace container_internal { diff --git a/abseil-cpp/absl/container/flat_hash_set_test.cc b/abseil-cpp/absl/container/flat_hash_set_test.cc index b6a72a20..8f6f9944 100644 --- a/abseil-cpp/absl/container/flat_hash_set_test.cc +++ b/abseil-cpp/absl/container/flat_hash_set_test.cc @@ -143,31 +143,31 @@ TEST(FlatHashSet, EraseIf) { // Erase all elements. { flat_hash_set s = {1, 2, 3, 4, 5}; - EXPECT_EQ(erase_if(s, [](int) { return true; }), 5); + erase_if(s, [](int) { return true; }); EXPECT_THAT(s, IsEmpty()); } // Erase no elements. { flat_hash_set s = {1, 2, 3, 4, 5}; - EXPECT_EQ(erase_if(s, [](int) { return false; }), 0); + erase_if(s, [](int) { return false; }); EXPECT_THAT(s, UnorderedElementsAre(1, 2, 3, 4, 5)); } // Erase specific elements. { flat_hash_set s = {1, 2, 3, 4, 5}; - EXPECT_EQ(erase_if(s, [](int k) { return k % 2 == 1; }), 3); + erase_if(s, [](int k) { return k % 2 == 1; }); EXPECT_THAT(s, UnorderedElementsAre(2, 4)); } // Predicate is function reference. { flat_hash_set s = {1, 2, 3, 4, 5}; - EXPECT_EQ(erase_if(s, IsEven), 2); + erase_if(s, IsEven); EXPECT_THAT(s, UnorderedElementsAre(1, 3, 5)); } // Predicate is function pointer. { flat_hash_set s = {1, 2, 3, 4, 5}; - EXPECT_EQ(erase_if(s, &IsEven), 2); + erase_if(s, &IsEven); EXPECT_THAT(s, UnorderedElementsAre(1, 3, 5)); } } diff --git a/abseil-cpp/absl/container/inlined_vector.h b/abseil-cpp/absl/container/inlined_vector.h index bc1c4a77..90bb96e8 100644 --- a/abseil-cpp/absl/container/inlined_vector.h +++ b/abseil-cpp/absl/container/inlined_vector.h @@ -36,6 +36,7 @@ #define ABSL_CONTAINER_INLINED_VECTOR_H_ #include +#include #include #include #include @@ -71,43 +72,37 @@ class InlinedVector { using Storage = inlined_vector_internal::Storage; - template - using AllocatorTraits = inlined_vector_internal::AllocatorTraits; - template - using MoveIterator = inlined_vector_internal::MoveIterator; - template - using IsMemcpyOk = inlined_vector_internal::IsMemcpyOk; + using AllocatorTraits = typename Storage::AllocatorTraits; + using RValueReference = typename Storage::RValueReference; + using MoveIterator = typename Storage::MoveIterator; + using IsMemcpyOk = typename Storage::IsMemcpyOk; - template + template using IteratorValueAdapter = - inlined_vector_internal::IteratorValueAdapter; - template - using CopyValueAdapter = inlined_vector_internal::CopyValueAdapter; - template - using DefaultValueAdapter = - inlined_vector_internal::DefaultValueAdapter; + typename Storage::template IteratorValueAdapter; + using CopyValueAdapter = typename Storage::CopyValueAdapter; + using DefaultValueAdapter = typename Storage::DefaultValueAdapter; template using EnableIfAtLeastForwardIterator = absl::enable_if_t< - inlined_vector_internal::IsAtLeastForwardIterator::value, int>; + inlined_vector_internal::IsAtLeastForwardIterator::value>; template using DisableIfAtLeastForwardIterator = absl::enable_if_t< - !inlined_vector_internal::IsAtLeastForwardIterator::value, int>; + !inlined_vector_internal::IsAtLeastForwardIterator::value>; public: - using allocator_type = A; - using value_type = inlined_vector_internal::ValueType; - using pointer = inlined_vector_internal::Pointer; - using const_pointer = inlined_vector_internal::ConstPointer; - using size_type = inlined_vector_internal::SizeType; - using difference_type = inlined_vector_internal::DifferenceType; - using reference = inlined_vector_internal::Reference; - using const_reference = inlined_vector_internal::ConstReference; - using iterator = inlined_vector_internal::Iterator; - using const_iterator = inlined_vector_internal::ConstIterator; - using reverse_iterator = inlined_vector_internal::ReverseIterator; - using const_reverse_iterator = - inlined_vector_internal::ConstReverseIterator; + using allocator_type = typename Storage::allocator_type; + using value_type = typename Storage::value_type; + using pointer = typename Storage::pointer; + using const_pointer = typename Storage::const_pointer; + using size_type = typename Storage::size_type; + using difference_type = typename Storage::difference_type; + using reference = typename Storage::reference; + using const_reference = typename Storage::const_reference; + using iterator = typename Storage::iterator; + using const_iterator = typename Storage::const_iterator; + using reverse_iterator = typename Storage::reverse_iterator; + using const_reverse_iterator = typename Storage::const_reverse_iterator; // --------------------------------------------------------------------------- // InlinedVector Constructors and Destructor @@ -116,28 +111,28 @@ class InlinedVector { // Creates an empty inlined vector with a value-initialized allocator. InlinedVector() noexcept(noexcept(allocator_type())) : storage_() {} - // Creates an empty inlined vector with a copy of `allocator`. - explicit InlinedVector(const allocator_type& allocator) noexcept - : storage_(allocator) {} + // Creates an empty inlined vector with a copy of `alloc`. + explicit InlinedVector(const allocator_type& alloc) noexcept + : storage_(alloc) {} // Creates an inlined vector with `n` copies of `value_type()`. explicit InlinedVector(size_type n, - const allocator_type& allocator = allocator_type()) - : storage_(allocator) { - storage_.Initialize(DefaultValueAdapter(), n); + const allocator_type& alloc = allocator_type()) + : storage_(alloc) { + storage_.Initialize(DefaultValueAdapter(), n); } // Creates an inlined vector with `n` copies of `v`. InlinedVector(size_type n, const_reference v, - const allocator_type& allocator = allocator_type()) - : storage_(allocator) { - storage_.Initialize(CopyValueAdapter(std::addressof(v)), n); + const allocator_type& alloc = allocator_type()) + : storage_(alloc) { + storage_.Initialize(CopyValueAdapter(v), n); } // Creates an inlined vector with copies of the elements of `list`. InlinedVector(std::initializer_list list, - const allocator_type& allocator = allocator_type()) - : InlinedVector(list.begin(), list.end(), allocator) {} + const allocator_type& alloc = allocator_type()) + : InlinedVector(list.begin(), list.end(), alloc) {} // Creates an inlined vector with elements constructed from the provided // forward iterator range [`first`, `last`). @@ -146,40 +141,37 @@ class InlinedVector { // this constructor with two integral arguments and a call to the above // `InlinedVector(size_type, const_reference)` constructor. template = 0> + EnableIfAtLeastForwardIterator* = nullptr> InlinedVector(ForwardIterator first, ForwardIterator last, - const allocator_type& allocator = allocator_type()) - : storage_(allocator) { - storage_.Initialize(IteratorValueAdapter(first), - static_cast(std::distance(first, last))); + const allocator_type& alloc = allocator_type()) + : storage_(alloc) { + storage_.Initialize(IteratorValueAdapter(first), + std::distance(first, last)); } // Creates an inlined vector with elements constructed from the provided input // iterator range [`first`, `last`). template = 0> + DisableIfAtLeastForwardIterator* = nullptr> InlinedVector(InputIterator first, InputIterator last, - const allocator_type& allocator = allocator_type()) - : storage_(allocator) { + const allocator_type& alloc = allocator_type()) + : storage_(alloc) { std::copy(first, last, std::back_inserter(*this)); } // Creates an inlined vector by copying the contents of `other` using // `other`'s allocator. InlinedVector(const InlinedVector& other) - : InlinedVector(other, other.storage_.GetAllocator()) {} - - // Creates an inlined vector by copying the contents of `other` using the - // provided `allocator`. - InlinedVector(const InlinedVector& other, const allocator_type& allocator) - : storage_(allocator) { - if (other.empty()) { - // Empty; nothing to do. - } else if (IsMemcpyOk::value && !other.storage_.GetIsAllocated()) { - // Memcpy-able and do not need allocation. + : InlinedVector(other, *other.storage_.GetAllocPtr()) {} + + // Creates an inlined vector by copying the contents of `other` using `alloc`. + InlinedVector(const InlinedVector& other, const allocator_type& alloc) + : storage_(alloc) { + if (IsMemcpyOk::value && !other.storage_.GetIsAllocated()) { storage_.MemcpyFrom(other.storage_); } else { - storage_.InitFrom(other.storage_); + storage_.Initialize(IteratorValueAdapter(other.data()), + other.size()); } } @@ -200,23 +192,23 @@ class InlinedVector { InlinedVector(InlinedVector&& other) noexcept( absl::allocator_is_nothrow::value || std::is_nothrow_move_constructible::value) - : storage_(other.storage_.GetAllocator()) { - if (IsMemcpyOk::value) { + : storage_(*other.storage_.GetAllocPtr()) { + if (IsMemcpyOk::value) { storage_.MemcpyFrom(other.storage_); other.storage_.SetInlinedSize(0); } else if (other.storage_.GetIsAllocated()) { - storage_.SetAllocation({other.storage_.GetAllocatedData(), - other.storage_.GetAllocatedCapacity()}); + storage_.SetAllocatedData(other.storage_.GetAllocatedData(), + other.storage_.GetAllocatedCapacity()); storage_.SetAllocatedSize(other.storage_.GetSize()); other.storage_.SetInlinedSize(0); } else { - IteratorValueAdapter> other_values( - MoveIterator(other.storage_.GetInlinedData())); + IteratorValueAdapter other_values( + MoveIterator(other.storage_.GetInlinedData())); - inlined_vector_internal::ConstructElements( - storage_.GetAllocator(), storage_.GetInlinedData(), other_values, + inlined_vector_internal::ConstructElements( + storage_.GetAllocPtr(), storage_.GetInlinedData(), &other_values, other.storage_.GetSize()); storage_.SetInlinedSize(other.storage_.GetSize()); @@ -224,32 +216,30 @@ class InlinedVector { } // Creates an inlined vector by moving in the contents of `other` with a copy - // of `allocator`. + // of `alloc`. // - // NOTE: if `other`'s allocator is not equal to `allocator`, even if `other` + // NOTE: if `other`'s allocator is not equal to `alloc`, even if `other` // contains allocated memory, this move constructor will still allocate. Since // allocation is performed, this constructor can only be `noexcept` if the // specified allocator is also `noexcept`. - InlinedVector( - InlinedVector&& other, - const allocator_type& - allocator) noexcept(absl::allocator_is_nothrow::value) - : storage_(allocator) { - if (IsMemcpyOk::value) { + InlinedVector(InlinedVector&& other, const allocator_type& alloc) noexcept( + absl::allocator_is_nothrow::value) + : storage_(alloc) { + if (IsMemcpyOk::value) { storage_.MemcpyFrom(other.storage_); other.storage_.SetInlinedSize(0); - } else if ((storage_.GetAllocator() == other.storage_.GetAllocator()) && + } else if ((*storage_.GetAllocPtr() == *other.storage_.GetAllocPtr()) && other.storage_.GetIsAllocated()) { - storage_.SetAllocation({other.storage_.GetAllocatedData(), - other.storage_.GetAllocatedCapacity()}); + storage_.SetAllocatedData(other.storage_.GetAllocatedData(), + other.storage_.GetAllocatedCapacity()); storage_.SetAllocatedSize(other.storage_.GetSize()); other.storage_.SetInlinedSize(0); } else { - storage_.Initialize(IteratorValueAdapter>( - MoveIterator(other.data())), - other.size()); + storage_.Initialize( + IteratorValueAdapter(MoveIterator(other.data())), + other.size()); } } @@ -450,7 +440,7 @@ class InlinedVector { // `InlinedVector::get_allocator()` // // Returns a copy of the inlined vector's allocator. - allocator_type get_allocator() const { return storage_.GetAllocator(); } + allocator_type get_allocator() const { return *storage_.GetAllocPtr(); } // --------------------------------------------------------------------------- // InlinedVector Member Mutators @@ -484,16 +474,16 @@ class InlinedVector { // unspecified state. InlinedVector& operator=(InlinedVector&& other) { if (ABSL_PREDICT_TRUE(this != std::addressof(other))) { - if (IsMemcpyOk::value || other.storage_.GetIsAllocated()) { - inlined_vector_internal::DestroyAdapter::DestroyElements( - storage_.GetAllocator(), data(), size()); + if (IsMemcpyOk::value || other.storage_.GetIsAllocated()) { + inlined_vector_internal::DestroyElements(storage_.GetAllocPtr(), data(), + size()); storage_.DeallocateIfAllocated(); storage_.MemcpyFrom(other.storage_); other.storage_.SetInlinedSize(0); } else { - storage_.Assign(IteratorValueAdapter>( - MoveIterator(other.storage_.GetInlinedData())), + storage_.Assign(IteratorValueAdapter( + MoveIterator(other.storage_.GetInlinedData())), other.size()); } } @@ -505,7 +495,7 @@ class InlinedVector { // // Replaces the contents of the inlined vector with `n` copies of `v`. void assign(size_type n, const_reference v) { - storage_.Assign(CopyValueAdapter(std::addressof(v)), n); + storage_.Assign(CopyValueAdapter(v), n); } // Overload of `InlinedVector::assign(...)` that replaces the contents of the @@ -519,10 +509,10 @@ class InlinedVector { // // NOTE: this overload is for iterators that are "forward" category or better. template = 0> + EnableIfAtLeastForwardIterator* = nullptr> void assign(ForwardIterator first, ForwardIterator last) { - storage_.Assign(IteratorValueAdapter(first), - static_cast(std::distance(first, last))); + storage_.Assign(IteratorValueAdapter(first), + std::distance(first, last)); } // Overload of `InlinedVector::assign(...)` to replace the contents of the @@ -530,7 +520,7 @@ class InlinedVector { // // NOTE: this overload is for iterators that are "input" category. template = 0> + DisableIfAtLeastForwardIterator* = nullptr> void assign(InputIterator first, InputIterator last) { size_type i = 0; for (; i < size() && first != last; ++i, static_cast(++first)) { @@ -549,7 +539,7 @@ class InlinedVector { // is larger than `size()`, new elements are value-initialized. void resize(size_type n) { ABSL_HARDENING_ASSERT(n <= max_size()); - storage_.Resize(DefaultValueAdapter(), n); + storage_.Resize(DefaultValueAdapter(), n); } // Overload of `InlinedVector::resize(...)` that resizes the inlined vector to @@ -559,7 +549,7 @@ class InlinedVector { // is larger than `size()`, new elements are copied-constructed from `v`. void resize(size_type n, const_reference v) { ABSL_HARDENING_ASSERT(n <= max_size()); - storage_.Resize(CopyValueAdapter(std::addressof(v)), n); + storage_.Resize(CopyValueAdapter(v), n); } // `InlinedVector::insert(...)` @@ -572,7 +562,7 @@ class InlinedVector { // Overload of `InlinedVector::insert(...)` that inserts `v` at `pos` using // move semantics, returning an `iterator` to the newly inserted element. - iterator insert(const_iterator pos, value_type&& v) { + iterator insert(const_iterator pos, RValueReference v) { return emplace(pos, std::move(v)); } @@ -585,20 +575,7 @@ class InlinedVector { if (ABSL_PREDICT_TRUE(n != 0)) { value_type dealias = v; - // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102329#c2 - // It appears that GCC thinks that since `pos` is a const pointer and may - // point to uninitialized memory at this point, a warning should be - // issued. But `pos` is actually only used to compute an array index to - // write to. -#if !defined(__clang__) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" -#endif - return storage_.Insert(pos, CopyValueAdapter(std::addressof(dealias)), - n); -#if !defined(__clang__) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif + return storage_.Insert(pos, CopyValueAdapter(dealias), n); } else { return const_cast(pos); } @@ -617,15 +594,14 @@ class InlinedVector { // // NOTE: this overload is for iterators that are "forward" category or better. template = 0> + EnableIfAtLeastForwardIterator* = nullptr> iterator insert(const_iterator pos, ForwardIterator first, ForwardIterator last) { ABSL_HARDENING_ASSERT(pos >= begin()); ABSL_HARDENING_ASSERT(pos <= end()); if (ABSL_PREDICT_TRUE(first != last)) { - return storage_.Insert(pos, - IteratorValueAdapter(first), + return storage_.Insert(pos, IteratorValueAdapter(first), std::distance(first, last)); } else { return const_cast(pos); @@ -638,7 +614,7 @@ class InlinedVector { // // NOTE: this overload is for iterators that are "input" category. template = 0> + DisableIfAtLeastForwardIterator* = nullptr> iterator insert(const_iterator pos, InputIterator first, InputIterator last) { ABSL_HARDENING_ASSERT(pos >= begin()); ABSL_HARDENING_ASSERT(pos <= end()); @@ -662,8 +638,8 @@ class InlinedVector { value_type dealias(std::forward(args)...); return storage_.Insert(pos, - IteratorValueAdapter>( - MoveIterator(std::addressof(dealias))), + IteratorValueAdapter( + MoveIterator(std::addressof(dealias))), 1); } @@ -683,7 +659,7 @@ class InlinedVector { // Overload of `InlinedVector::push_back(...)` for inserting `v` at `end()` // using move semantics. - void push_back(value_type&& v) { + void push_back(RValueReference v) { static_cast(emplace_back(std::move(v))); } @@ -693,7 +669,7 @@ class InlinedVector { void pop_back() noexcept { ABSL_HARDENING_ASSERT(!empty()); - AllocatorTraits::destroy(storage_.GetAllocator(), data() + (size() - 1)); + AllocatorTraits::destroy(*storage_.GetAllocPtr(), data() + (size() - 1)); storage_.SubtractSize(1); } @@ -732,8 +708,8 @@ class InlinedVector { // Destroys all elements in the inlined vector, setting the size to `0` and // deallocating any held memory. void clear() noexcept { - inlined_vector_internal::DestroyAdapter::DestroyElements( - storage_.GetAllocator(), data(), size()); + inlined_vector_internal::DestroyElements(storage_.GetAllocPtr(), data(), + size()); storage_.DeallocateIfAllocated(); storage_.SetInlinedSize(0); @@ -746,12 +722,15 @@ class InlinedVector { // `InlinedVector::shrink_to_fit()` // - // Attempts to reduce memory usage by moving elements to (or keeping elements - // in) the smallest available buffer sufficient for containing `size()` - // elements. + // Reduces memory usage by freeing unused memory. After being called, calls to + // `capacity()` will be equal to `max(N, size())`. + // + // If `size() <= N` and the inlined vector contains allocated memory, the + // elements will all be moved to the inlined space and the allocated memory + // will be deallocated. // - // If `size()` is sufficiently small, the elements will be moved into (or kept - // in) the inlined space. + // If `size() > N` and `size() < capacity()`, the elements will be moved to a + // smaller allocation. void shrink_to_fit() { if (storage_.GetIsAllocated()) { storage_.ShrinkToFit(); diff --git a/abseil-cpp/absl/container/inlined_vector_benchmark.cc b/abseil-cpp/absl/container/inlined_vector_benchmark.cc index e256fad6..b8dafe93 100644 --- a/abseil-cpp/absl/container/inlined_vector_benchmark.cc +++ b/abseil-cpp/absl/container/inlined_vector_benchmark.cc @@ -534,28 +534,6 @@ void BM_ConstructFromMove(benchmark::State& state) { ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, TrivialType); ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, NontrivialType); -// Measure cost of copy-constructor+destructor. -void BM_CopyTrivial(benchmark::State& state) { - const int n = state.range(0); - InlVec src(n); - for (auto s : state) { - InlVec copy(src); - benchmark::DoNotOptimize(copy); - } -} -BENCHMARK(BM_CopyTrivial)->Arg(0)->Arg(1)->Arg(kLargeSize); - -// Measure cost of copy-constructor+destructor. -void BM_CopyNonTrivial(benchmark::State& state) { - const int n = state.range(0); - InlVec> src(n); - for (auto s : state) { - InlVec> copy(src); - benchmark::DoNotOptimize(copy); - } -} -BENCHMARK(BM_CopyNonTrivial)->Arg(0)->Arg(1)->Arg(kLargeSize); - template void BM_AssignSizeRef(benchmark::State& state) { auto size = ToSize; diff --git a/abseil-cpp/absl/container/inlined_vector_test.cc b/abseil-cpp/absl/container/inlined_vector_test.cc index 4c1ba04a..415c60d9 100644 --- a/abseil-cpp/absl/container/inlined_vector_test.cc +++ b/abseil-cpp/absl/container/inlined_vector_test.cc @@ -736,26 +736,22 @@ TEST(OverheadTest, Storage) { // In particular, ensure that std::allocator doesn't cost anything to store. // The union should be absorbing some of the allocation bookkeeping overhead // in the larger vectors, leaving only the size_ field as overhead. - - struct T { void* val; }; - size_t expected_overhead = sizeof(T); - - EXPECT_EQ((2 * expected_overhead), - sizeof(absl::InlinedVector) - sizeof(T[1])); - EXPECT_EQ(expected_overhead, - sizeof(absl::InlinedVector) - sizeof(T[2])); - EXPECT_EQ(expected_overhead, - sizeof(absl::InlinedVector) - sizeof(T[3])); - EXPECT_EQ(expected_overhead, - sizeof(absl::InlinedVector) - sizeof(T[4])); - EXPECT_EQ(expected_overhead, - sizeof(absl::InlinedVector) - sizeof(T[5])); - EXPECT_EQ(expected_overhead, - sizeof(absl::InlinedVector) - sizeof(T[6])); - EXPECT_EQ(expected_overhead, - sizeof(absl::InlinedVector) - sizeof(T[7])); - EXPECT_EQ(expected_overhead, - sizeof(absl::InlinedVector) - sizeof(T[8])); + EXPECT_EQ(2 * sizeof(int*), + sizeof(absl::InlinedVector) - 1 * sizeof(int*)); + EXPECT_EQ(1 * sizeof(int*), + sizeof(absl::InlinedVector) - 2 * sizeof(int*)); + EXPECT_EQ(1 * sizeof(int*), + sizeof(absl::InlinedVector) - 3 * sizeof(int*)); + EXPECT_EQ(1 * sizeof(int*), + sizeof(absl::InlinedVector) - 4 * sizeof(int*)); + EXPECT_EQ(1 * sizeof(int*), + sizeof(absl::InlinedVector) - 5 * sizeof(int*)); + EXPECT_EQ(1 * sizeof(int*), + sizeof(absl::InlinedVector) - 6 * sizeof(int*)); + EXPECT_EQ(1 * sizeof(int*), + sizeof(absl::InlinedVector) - 7 * sizeof(int*)); + EXPECT_EQ(1 * sizeof(int*), + sizeof(absl::InlinedVector) - 8 * sizeof(int*)); } TEST(IntVec, Clear) { @@ -1545,18 +1541,17 @@ TYPED_TEST_P(InstanceTest, InitializerListAssign) { } } -REGISTER_TYPED_TEST_SUITE_P(InstanceTest, Swap, CountConstructorsDestructors, - CountConstructorsDestructorsOnCopyConstruction, - CountConstructorsDestructorsOnMoveConstruction, - CountConstructorsDestructorsOnAssignment, - CountConstructorsDestructorsOnMoveAssignment, - CountElemAssignInlineBacking, RangedConstructor, - RangedAssign, InitializerListAssign); +REGISTER_TYPED_TEST_CASE_P(InstanceTest, Swap, CountConstructorsDestructors, + CountConstructorsDestructorsOnCopyConstruction, + CountConstructorsDestructorsOnMoveConstruction, + CountConstructorsDestructorsOnAssignment, + CountConstructorsDestructorsOnMoveAssignment, + CountElemAssignInlineBacking, RangedConstructor, + RangedAssign, InitializerListAssign); using InstanceTypes = ::testing::Types; -INSTANTIATE_TYPED_TEST_SUITE_P(InstanceTestOnTypes, InstanceTest, - InstanceTypes); +INSTANTIATE_TYPED_TEST_CASE_P(InstanceTestOnTypes, InstanceTest, InstanceTypes); TEST(DynamicVec, DynamicVecCompiles) { DynamicVec v; diff --git a/abseil-cpp/absl/container/internal/btree.h b/abseil-cpp/absl/container/internal/btree.h index 01f4e749..002ccc1e 100644 --- a/abseil-cpp/absl/container/internal/btree.h +++ b/abseil-cpp/absl/container/internal/btree.h @@ -58,7 +58,6 @@ #include #include -#include "absl/base/internal/raw_logging.h" #include "absl/base/macros.h" #include "absl/container/internal/common.h" #include "absl/container/internal/compressed_tuple.h" @@ -75,24 +74,12 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { -#ifdef ABSL_BTREE_ENABLE_GENERATIONS -#error ABSL_BTREE_ENABLE_GENERATIONS cannot be directly set -#elif defined(ABSL_HAVE_ADDRESS_SANITIZER) || \ - defined(ABSL_HAVE_MEMORY_SANITIZER) -// When compiled in sanitizer mode, we add generation integers to the nodes and -// iterators. When iterators are used, we validate that the container has not -// been mutated since the iterator was constructed. -#define ABSL_BTREE_ENABLE_GENERATIONS -#endif - -template -using compare_result_t = absl::result_of_t; - // A helper class that indicates if the Compare parameter is a key-compare-to // comparator. template using btree_is_key_compare_to = - std::is_convertible, absl::weak_ordering>; + std::is_convertible, + absl::weak_ordering>; struct StringBtreeDefaultLess { using is_transparent = void; @@ -101,12 +88,7 @@ struct StringBtreeDefaultLess { // Compatibility constructor. StringBtreeDefaultLess(std::less) {} // NOLINT - StringBtreeDefaultLess(std::less) {} // NOLINT - - // Allow converting to std::less for use in key_comp()/value_comp(). - explicit operator std::less() const { return {}; } - explicit operator std::less() const { return {}; } - explicit operator std::less() const { return {}; } + StringBtreeDefaultLess(std::less) {} // NOLINT absl::weak_ordering operator()(absl::string_view lhs, absl::string_view rhs) const { @@ -133,12 +115,7 @@ struct StringBtreeDefaultGreater { StringBtreeDefaultGreater() = default; StringBtreeDefaultGreater(std::greater) {} // NOLINT - StringBtreeDefaultGreater(std::greater) {} // NOLINT - - // Allow converting to std::greater for use in key_comp()/value_comp(). - explicit operator std::greater() const { return {}; } - explicit operator std::greater() const { return {}; } - explicit operator std::greater() const { return {}; } + StringBtreeDefaultGreater(std::greater) {} // NOLINT absl::weak_ordering operator()(absl::string_view lhs, absl::string_view rhs) const { @@ -159,241 +136,73 @@ struct StringBtreeDefaultGreater { } }; -// See below comments for checked_compare. -template ::value> -struct checked_compare_base : Compare { - using Compare::Compare; - explicit checked_compare_base(Compare c) : Compare(std::move(c)) {} - const Compare &comp() const { return *this; } -}; +// A helper class to convert a boolean comparison into a three-way "compare-to" +// comparison that returns an `absl::weak_ordering`. This helper +// class is specialized for less, greater, +// less, greater, less, and +// greater. +// +// key_compare_to_adapter is provided so that btree users +// automatically get the more efficient compare-to code when using common +// Abseil string types with common comparison functors. +// These string-like specializations also turn on heterogeneous lookup by +// default. template -struct checked_compare_base { - explicit checked_compare_base(Compare c) : compare(std::move(c)) {} - const Compare &comp() const { return compare; } - Compare compare; -}; - -// A mechanism for opting out of checked_compare for use only in btree_test.cc. -struct BtreeTestOnlyCheckedCompareOptOutBase {}; - -// A helper class to adapt the specified comparator for two use cases: -// (1) When using common Abseil string types with common comparison functors, -// convert a boolean comparison into a three-way comparison that returns an -// `absl::weak_ordering`. This helper class is specialized for -// less, greater, less, -// greater, less, and greater. -// (2) Adapt the comparator to diagnose cases of non-strict-weak-ordering (see -// https://en.cppreference.com/w/cpp/named_req/Compare) in debug mode. Whenever -// a comparison is made, we will make assertions to verify that the comparator -// is valid. -template -struct key_compare_adapter { - // Inherit from checked_compare_base to support function pointers and also - // keep empty-base-optimization (EBO) support for classes. - // Note: we can't use CompressedTuple here because that would interfere - // with the EBO for `btree::rightmost_`. `btree::rightmost_` is itself a - // CompressedTuple and nested `CompressedTuple`s don't support EBO. - // TODO(b/214288561): use CompressedTuple instead once it supports EBO for - // nested `CompressedTuple`s. - struct checked_compare : checked_compare_base { - private: - using Base = typename checked_compare::checked_compare_base; - using Base::comp; - - // If possible, returns whether `t` is equivalent to itself. We can only do - // this for `Key`s because we can't be sure that it's safe to call - // `comp()(k, k)` otherwise. Even if SFINAE allows it, there could be a - // compilation failure inside the implementation of the comparison operator. - bool is_self_equivalent(const Key &k) const { - // Note: this works for both boolean and three-way comparators. - return comp()(k, k) == 0; - } - // If we can't compare `t` with itself, returns true unconditionally. - template - bool is_self_equivalent(const T &) const { - return true; - } - - public: - using Base::Base; - checked_compare(Compare comp) : Base(std::move(comp)) {} // NOLINT - - // Allow converting to Compare for use in key_comp()/value_comp(). - explicit operator Compare() const { return comp(); } - - template >::value, - int> = 0> - bool operator()(const T &lhs, const U &rhs) const { - // NOTE: if any of these assertions fail, then the comparator does not - // establish a strict-weak-ordering (see - // https://en.cppreference.com/w/cpp/named_req/Compare). - assert(is_self_equivalent(lhs)); - assert(is_self_equivalent(rhs)); - const bool lhs_comp_rhs = comp()(lhs, rhs); - assert(!lhs_comp_rhs || !comp()(rhs, lhs)); - return lhs_comp_rhs; - } - - template < - typename T, typename U, - absl::enable_if_t, - absl::weak_ordering>::value, - int> = 0> - absl::weak_ordering operator()(const T &lhs, const U &rhs) const { - // NOTE: if any of these assertions fail, then the comparator does not - // establish a strict-weak-ordering (see - // https://en.cppreference.com/w/cpp/named_req/Compare). - assert(is_self_equivalent(lhs)); - assert(is_self_equivalent(rhs)); - const absl::weak_ordering lhs_comp_rhs = comp()(lhs, rhs); -#ifndef NDEBUG - const absl::weak_ordering rhs_comp_lhs = comp()(rhs, lhs); - if (lhs_comp_rhs > 0) { - assert(rhs_comp_lhs < 0 && "lhs_comp_rhs > 0 -> rhs_comp_lhs < 0"); - } else if (lhs_comp_rhs == 0) { - assert(rhs_comp_lhs == 0 && "lhs_comp_rhs == 0 -> rhs_comp_lhs == 0"); - } else { - assert(rhs_comp_lhs > 0 && "lhs_comp_rhs < 0 -> rhs_comp_lhs > 0"); - } -#endif - return lhs_comp_rhs; - } - }; - using type = absl::conditional_t< - std::is_base_of::value, - Compare, checked_compare>; +struct key_compare_to_adapter { + using type = Compare; }; template <> -struct key_compare_adapter, std::string> { +struct key_compare_to_adapter> { using type = StringBtreeDefaultLess; }; template <> -struct key_compare_adapter, std::string> { +struct key_compare_to_adapter> { using type = StringBtreeDefaultGreater; }; template <> -struct key_compare_adapter, absl::string_view> { +struct key_compare_to_adapter> { using type = StringBtreeDefaultLess; }; template <> -struct key_compare_adapter, absl::string_view> { +struct key_compare_to_adapter> { using type = StringBtreeDefaultGreater; }; template <> -struct key_compare_adapter, absl::Cord> { +struct key_compare_to_adapter> { using type = StringBtreeDefaultLess; }; template <> -struct key_compare_adapter, absl::Cord> { +struct key_compare_to_adapter> { using type = StringBtreeDefaultGreater; }; -// Detects an 'absl_btree_prefer_linear_node_search' member. This is -// a protocol used as an opt-in or opt-out of linear search. -// -// For example, this would be useful for key types that wrap an integer -// and define their own cheap operator<(). For example: -// -// class K { -// public: -// using absl_btree_prefer_linear_node_search = std::true_type; -// ... -// private: -// friend bool operator<(K a, K b) { return a.k_ < b.k_; } -// int k_; -// }; -// -// btree_map m; // Uses linear search -// -// If T has the preference tag, then it has a preference. -// Btree will use the tag's truth value. -template -struct has_linear_node_search_preference : std::false_type {}; -template -struct prefers_linear_node_search : std::false_type {}; -template -struct has_linear_node_search_preference< - T, absl::void_t> - : std::true_type {}; -template -struct prefers_linear_node_search< - T, absl::void_t> - : T::absl_btree_prefer_linear_node_search {}; - -template -constexpr bool compare_has_valid_result_type() { - using compare_result_type = compare_result_t; - return std::is_same::value || - std::is_convertible::value; -} - -template -class map_value_compare { - template - friend class btree; - - // Note: this `protected` is part of the API of std::map::value_compare. See - // https://en.cppreference.com/w/cpp/container/map/value_compare. - protected: - explicit map_value_compare(original_key_compare c) : comp(std::move(c)) {} - - original_key_compare comp; // NOLINT - - public: - auto operator()(const value_type &lhs, const value_type &rhs) const - -> decltype(comp(lhs.first, rhs.first)) { - return comp(lhs.first, rhs.first); - } -}; - template + bool Multi, typename SlotPolicy> struct common_params { - using original_key_compare = Compare; - // If Compare is a common comparator for a string-like type, then we adapt it // to use heterogeneous lookup and to be a key-compare-to comparator. - // We also adapt the comparator to diagnose invalid comparators in debug mode. - // We disable this when `Compare` is invalid in a way that will cause - // adaptation to fail (having invalid return type) so that we can give a - // better compilation failure in static_assert_validation. If we don't do - // this, then there will be cascading compilation failures that are confusing - // for users. - using key_compare = - absl::conditional_t(), - Compare, - typename key_compare_adapter::type>; - - static constexpr bool kIsKeyCompareStringAdapted = - std::is_same::value || - std::is_same::value; - static constexpr bool kIsKeyCompareTransparent = - IsTransparent::value || - kIsKeyCompareStringAdapted; - static constexpr bool kEnableGenerations = -#ifdef ABSL_BTREE_ENABLE_GENERATIONS - true; -#else - false; -#endif - + using key_compare = typename key_compare_to_adapter::type; + // True when key_compare has been adapted to StringBtreeDefault{Less,Greater}. + using is_key_compare_adapted = + absl::negation>; // A type which indicates if we have a key-compare-to functor or a plain old // key-compare functor. using is_key_compare_to = btree_is_key_compare_to; using allocator_type = Alloc; using key_type = Key; - using size_type = size_t; + using size_type = std::make_signed::type; using difference_type = ptrdiff_t; + // True if this is a multiset or multimap. + using is_multi_container = std::integral_constant; + using slot_policy = SlotPolicy; using slot_type = typename slot_policy::slot_type; using value_type = typename slot_policy::value_type; @@ -403,41 +212,20 @@ struct common_params { using reference = value_type &; using const_reference = const value_type &; - using value_compare = - absl::conditional_t, - original_key_compare>; - using is_map_container = std::integral_constant; - - // For the given lookup key type, returns whether we can have multiple - // equivalent keys in the btree. If this is a multi-container, then we can. - // Otherwise, we can have multiple equivalent keys only if all of the - // following conditions are met: - // - The comparator is transparent. - // - The lookup key type is not the same as key_type. - // - The comparator is not a StringBtreeDefault{Less,Greater} comparator - // that we know has the same equivalence classes for all lookup types. - template - constexpr static bool can_have_multiple_equivalent_keys() { - return IsMulti || (IsTransparent::value && - !std::is_same::value && - !kIsKeyCompareStringAdapted); - } - enum { kTargetNodeSize = TargetNodeSize, - // Upper bound for the available space for slots. This is largest for leaf + // Upper bound for the available space for values. This is largest for leaf // nodes, which have overhead of at least a pointer + 4 bytes (for storing // 3 field_types and an enum). - kNodeSlotSpace = + kNodeValueSpace = TargetNodeSize - /*minimum overhead=*/(sizeof(void *) + 4), }; - // This is an integral type large enough to hold as many slots as will fit a - // node of TargetNodeSize bytes. + // This is an integral type large enough to hold as many + // ValueSize-values as will fit a node of TargetNodeSize bytes. using node_count_type = - absl::conditional_t<(kNodeSlotSpace / sizeof(slot_type) > + absl::conditional_t<(kNodeValueSpace / sizeof(value_type) > (std::numeric_limits::max)()), uint16_t, uint8_t>; // NOLINT @@ -460,10 +248,116 @@ struct common_params { slot_policy::destroy(alloc, slot); } static void transfer(Alloc *alloc, slot_type *new_slot, slot_type *old_slot) { - slot_policy::transfer(alloc, new_slot, old_slot); + construct(alloc, new_slot, old_slot); + destroy(alloc, old_slot); + } + static void swap(Alloc *alloc, slot_type *a, slot_type *b) { + slot_policy::swap(alloc, a, b); + } + static void move(Alloc *alloc, slot_type *src, slot_type *dest) { + slot_policy::move(alloc, src, dest); } }; +// A parameters structure for holding the type parameters for a btree_map. +// Compare and Alloc should be nothrow copy-constructible. +template +struct map_params : common_params> { + using super_type = typename map_params::common_params; + using mapped_type = Data; + // This type allows us to move keys when it is safe to do so. It is safe + // for maps in which value_type and mutable_value_type are layout compatible. + using slot_policy = typename super_type::slot_policy; + using slot_type = typename super_type::slot_type; + using value_type = typename super_type::value_type; + using init_type = typename super_type::init_type; + + using key_compare = typename super_type::key_compare; + // Inherit from key_compare for empty base class optimization. + struct value_compare : private key_compare { + value_compare() = default; + explicit value_compare(const key_compare &cmp) : key_compare(cmp) {} + + template + auto operator()(const T &left, const U &right) const + -> decltype(std::declval()(left.first, right.first)) { + return key_compare::operator()(left.first, right.first); + } + }; + using is_map_container = std::true_type; + + template + static auto key(const V &value) -> decltype(value.first) { + return value.first; + } + static const Key &key(const slot_type *s) { return slot_policy::key(s); } + static const Key &key(slot_type *s) { return slot_policy::key(s); } + // For use in node handle. + static auto mutable_key(slot_type *s) + -> decltype(slot_policy::mutable_key(s)) { + return slot_policy::mutable_key(s); + } + static mapped_type &value(value_type *value) { return value->second; } +}; + +// This type implements the necessary functions from the +// absl::container_internal::slot_type interface. +template +struct set_slot_policy { + using slot_type = Key; + using value_type = Key; + using mutable_value_type = Key; + + static value_type &element(slot_type *slot) { return *slot; } + static const value_type &element(const slot_type *slot) { return *slot; } + + template + static void construct(Alloc *alloc, slot_type *slot, Args &&... args) { + absl::allocator_traits::construct(*alloc, slot, + std::forward(args)...); + } + + template + static void construct(Alloc *alloc, slot_type *slot, slot_type *other) { + absl::allocator_traits::construct(*alloc, slot, std::move(*other)); + } + + template + static void destroy(Alloc *alloc, slot_type *slot) { + absl::allocator_traits::destroy(*alloc, slot); + } + + template + static void swap(Alloc * /*alloc*/, slot_type *a, slot_type *b) { + using std::swap; + swap(*a, *b); + } + + template + static void move(Alloc * /*alloc*/, slot_type *src, slot_type *dest) { + *dest = std::move(*src); + } +}; + +// A parameters structure for holding the type parameters for a btree_set. +// Compare and Alloc should be nothrow copy-constructible. +template +struct set_params : common_params> { + using value_type = Key; + using slot_type = typename set_params::common_params::slot_type; + using value_compare = typename set_params::common_params::key_compare; + using is_map_container = std::false_type; + + template + static const V &key(const V &value) { return value; } + static const Key &key(const slot_type *slot) { return *slot; } + static const Key &key(slot_type *slot) { return *slot; } +}; + // An adapter class that converts a lower-bound compare into an upper-bound // compare. Note: there is no need to make a version of this adapter specialized // for key-compare-to functors because the upper-bound (the first value greater @@ -497,10 +391,6 @@ struct SearchResult { // useful information. template struct SearchResult { - SearchResult() {} - explicit SearchResult(V v) : value(v) {} - SearchResult(V v, MatchKind /*match*/) : value(v) {} - V value; static constexpr bool HasMatch() { return false; } @@ -513,10 +403,10 @@ struct SearchResult { template class btree_node { using is_key_compare_to = typename Params::is_key_compare_to; + using is_multi_container = typename Params::is_multi_container; using field_type = typename Params::node_count_type; using allocator_type = typename Params::allocator_type; using slot_type = typename Params::slot_type; - using original_key_compare = typename Params::original_key_compare; public: using params_type = Params; @@ -531,35 +421,21 @@ class btree_node { using difference_type = typename Params::difference_type; // Btree decides whether to use linear node search as follows: - // - If the comparator expresses a preference, use that. - // - If the key expresses a preference, use that. // - If the key is arithmetic and the comparator is std::less or // std::greater, choose linear. // - Otherwise, choose binary. // TODO(ezb): Might make sense to add condition(s) based on node-size. using use_linear_search = std::integral_constant< - bool, has_linear_node_search_preference::value - ? prefers_linear_node_search::value - : has_linear_node_search_preference::value - ? prefers_linear_node_search::value - : std::is_arithmetic::value && - (std::is_same, - original_key_compare>::value || - std::is_same, - original_key_compare>::value)>; - - // This class is organized by absl::container_internal::Layout as if it had - // the following structure: + bool, + std::is_arithmetic::value && + (std::is_same, key_compare>::value || + std::is_same, key_compare>::value)>; + + // This class is organized by gtl::Layout as if it had the following + // structure: // // A pointer to the node's parent. // btree_node *parent; // - // // When ABSL_BTREE_ENABLE_GENERATIONS is defined, we also have a - // // generation integer in order to check that when iterators are - // // used, they haven't been invalidated already. Only the generation on - // // the root is used, but we have one on each node because whether a node - // // is root or not can change. - // uint32_t generation; - // // // The position of the node in the node's parent. // field_type position; // // The index of the first populated value in `values`. @@ -570,23 +446,23 @@ class btree_node { // // is the same as the count of values. // field_type finish; // // The maximum number of values the node can hold. This is an integer in - // // [1, kNodeSlots] for root leaf nodes, kNodeSlots for non-root leaf + // // [1, kNodeValues] for root leaf nodes, kNodeValues for non-root leaf // // nodes, and kInternalNodeMaxCount (as a sentinel value) for internal - // // nodes (even though there are still kNodeSlots values in the node). + // // nodes (even though there are still kNodeValues values in the node). // // TODO(ezb): make max_count use only 4 bits and record log2(capacity) // // to free extra bits for is_root, etc. // field_type max_count; // // // The array of values. The capacity is `max_count` for leaf nodes and - // // kNodeSlots for internal nodes. Only the values in + // // kNodeValues for internal nodes. Only the values in // // [start, finish) have been initialized and are valid. // slot_type values[max_count]; // // // The array of child pointers. The keys in children[i] are all less // // than key(i). The keys in children[i + 1] are all greater than key(i). - // // There are 0 children for leaf nodes and kNodeSlots + 1 children for + // // There are 0 children for leaf nodes and kNodeValues + 1 children for // // internal nodes. - // btree_node *children[kNodeSlots + 1]; + // btree_node *children[kNodeValues + 1]; // // This class is only constructed by EmptyNodeType. Normally, pointers to the // layout above are allocated, cast to btree_node*, and de-allocated within @@ -606,72 +482,59 @@ class btree_node { btree_node() = default; private: - using layout_type = - absl::container_internal::Layout; - constexpr static size_type SizeWithNSlots(size_type n) { - return layout_type( - /*parent*/ 1, - /*generation*/ params_type::kEnableGenerations ? 1 : 0, - /*position, start, finish, max_count*/ 4, - /*slots*/ n, - /*children*/ 0) + using layout_type = absl::container_internal::Layout; + constexpr static size_type SizeWithNValues(size_type n) { + return layout_type(/*parent*/ 1, + /*position, start, finish, max_count*/ 4, + /*values*/ n, + /*children*/ 0) .AllocSize(); } - // A lower bound for the overhead of fields other than slots in a leaf node. + // A lower bound for the overhead of fields other than values in a leaf node. constexpr static size_type MinimumOverhead() { - return SizeWithNSlots(1) - sizeof(slot_type); + return SizeWithNValues(1) - sizeof(value_type); } // Compute how many values we can fit onto a leaf node taking into account // padding. - constexpr static size_type NodeTargetSlots(const size_type begin, - const size_type end) { + constexpr static size_type NodeTargetValues(const int begin, const int end) { return begin == end ? begin - : SizeWithNSlots((begin + end) / 2 + 1) > + : SizeWithNValues((begin + end) / 2 + 1) > params_type::kTargetNodeSize - ? NodeTargetSlots(begin, (begin + end) / 2) - : NodeTargetSlots((begin + end) / 2 + 1, end); + ? NodeTargetValues(begin, (begin + end) / 2) + : NodeTargetValues((begin + end) / 2 + 1, end); } enum { kTargetNodeSize = params_type::kTargetNodeSize, - kNodeTargetSlots = NodeTargetSlots(0, params_type::kTargetNodeSize), + kNodeTargetValues = NodeTargetValues(0, params_type::kTargetNodeSize), - // We need a minimum of 3 slots per internal node in order to perform + // We need a minimum of 3 values per internal node in order to perform // splitting (1 value for the two nodes involved in the split and 1 value - // propagated to the parent as the delimiter for the split). For performance - // reasons, we don't allow 3 slots-per-node due to bad worst case occupancy - // of 1/3 (for a node, not a b-tree). - kMinNodeSlots = 4, - - kNodeSlots = - kNodeTargetSlots >= kMinNodeSlots ? kNodeTargetSlots : kMinNodeSlots, + // propagated to the parent as the delimiter for the split). + kNodeValues = kNodeTargetValues >= 3 ? kNodeTargetValues : 3, // The node is internal (i.e. is not a leaf node) if and only if `max_count` // has this value. kInternalNodeMaxCount = 0, }; - // Leaves can have less than kNodeSlots values. - constexpr static layout_type LeafLayout(const int slot_count = kNodeSlots) { - return layout_type( - /*parent*/ 1, - /*generation*/ params_type::kEnableGenerations ? 1 : 0, - /*position, start, finish, max_count*/ 4, - /*slots*/ slot_count, - /*children*/ 0); + // Leaves can have less than kNodeValues values. + constexpr static layout_type LeafLayout(const int max_values = kNodeValues) { + return layout_type(/*parent*/ 1, + /*position, start, finish, max_count*/ 4, + /*values*/ max_values, + /*children*/ 0); } constexpr static layout_type InternalLayout() { - return layout_type( - /*parent*/ 1, - /*generation*/ params_type::kEnableGenerations ? 1 : 0, - /*position, start, finish, max_count*/ 4, - /*slots*/ kNodeSlots, - /*children*/ kNodeSlots + 1); + return layout_type(/*parent*/ 1, + /*position, start, finish, max_count*/ 4, + /*values*/ kNodeValues, + /*children*/ kNodeValues + 1); } - constexpr static size_type LeafSize(const int slot_count = kNodeSlots) { - return LeafLayout(slot_count).AllocSize(); + constexpr static size_type LeafSize(const int max_values = kNodeValues) { + return LeafLayout(max_values).AllocSize(); } constexpr static size_type InternalSize() { return InternalLayout().AllocSize(); @@ -682,47 +545,44 @@ class btree_node { template inline typename layout_type::template ElementType *GetField() { // We assert that we don't read from values that aren't there. - assert(N < 4 || is_internal()); + assert(N < 3 || !leaf()); return InternalLayout().template Pointer(reinterpret_cast(this)); } template inline const typename layout_type::template ElementType *GetField() const { - assert(N < 4 || is_internal()); + assert(N < 3 || !leaf()); return InternalLayout().template Pointer( reinterpret_cast(this)); } void set_parent(btree_node *p) { *GetField<0>() = p; } - field_type &mutable_finish() { return GetField<2>()[2]; } - slot_type *slot(int i) { return &GetField<3>()[i]; } + field_type &mutable_finish() { return GetField<1>()[2]; } + slot_type *slot(int i) { return &GetField<2>()[i]; } slot_type *start_slot() { return slot(start()); } slot_type *finish_slot() { return slot(finish()); } - const slot_type *slot(int i) const { return &GetField<3>()[i]; } - void set_position(field_type v) { GetField<2>()[0] = v; } - void set_start(field_type v) { GetField<2>()[1] = v; } - void set_finish(field_type v) { GetField<2>()[2] = v; } + const slot_type *slot(int i) const { return &GetField<2>()[i]; } + void set_position(field_type v) { GetField<1>()[0] = v; } + void set_start(field_type v) { GetField<1>()[1] = v; } + void set_finish(field_type v) { GetField<1>()[2] = v; } // This method is only called by the node init methods. - void set_max_count(field_type v) { GetField<2>()[3] = v; } + void set_max_count(field_type v) { GetField<1>()[3] = v; } public: // Whether this is a leaf node or not. This value doesn't change after the // node is created. - bool is_leaf() const { return GetField<2>()[3] != kInternalNodeMaxCount; } - // Whether this is an internal node or not. This value doesn't change after - // the node is created. - bool is_internal() const { return !is_leaf(); } + bool leaf() const { return GetField<1>()[3] != kInternalNodeMaxCount; } // Getter for the position of this node in its parent. - field_type position() const { return GetField<2>()[0]; } + field_type position() const { return GetField<1>()[0]; } // Getter for the offset of the first value in the `values` array. field_type start() const { - // TODO(ezb): when floating storage is implemented, return GetField<2>()[1]; - assert(GetField<2>()[1] == 0); + // TODO(ezb): when floating storage is implemented, return GetField<1>()[1]; + assert(GetField<1>()[1] == 0); return 0; } // Getter for the offset after the last value in the `values` array. - field_type finish() const { return GetField<2>()[2]; } + field_type finish() const { return GetField<1>()[2]; } // Getters for the number of values stored in this node. field_type count() const { @@ -731,10 +591,10 @@ class btree_node { } field_type max_count() const { // Internal nodes have max_count==kInternalNodeMaxCount. - // Leaf nodes have max_count in [1, kNodeSlots]. - const field_type max_count = GetField<2>()[3]; + // Leaf nodes have max_count in [1, kNodeValues]. + const field_type max_count = GetField<1>()[3]; return max_count == field_type{kInternalNodeMaxCount} - ? field_type{kNodeSlots} + ? field_type{kNodeValues} : max_count; } @@ -743,44 +603,21 @@ class btree_node { // Getter for whether the node is the root of the tree. The parent of the // root of the tree is the leftmost node in the tree which is guaranteed to // be a leaf. - bool is_root() const { return parent()->is_leaf(); } + bool is_root() const { return parent()->leaf(); } void make_root() { assert(parent()->is_root()); - set_generation(parent()->generation()); set_parent(parent()->parent()); } - // Gets the root node's generation integer, which is the one used by the tree. - uint32_t *get_root_generation() const { - assert(params_type::kEnableGenerations); - const btree_node *curr = this; - for (; !curr->is_root(); curr = curr->parent()) continue; - return const_cast(&curr->GetField<1>()[0]); - } - - // Returns the generation for iterator validation. - uint32_t generation() const { - return params_type::kEnableGenerations ? *get_root_generation() : 0; - } - // Updates generation. Should only be called on a root node or during node - // initialization. - void set_generation(uint32_t generation) { - if (params_type::kEnableGenerations) GetField<1>()[0] = generation; - } - // Updates the generation. We do this whenever the node is mutated. - void next_generation() { - if (params_type::kEnableGenerations) ++*get_root_generation(); - } - // Getters for the key/value at position i in the node. const key_type &key(int i) const { return params_type::key(slot(i)); } reference value(int i) { return params_type::element(slot(i)); } const_reference value(int i) const { return params_type::element(slot(i)); } // Getters/setter for the child at position i in the node. - btree_node *child(int i) const { return GetField<4>()[i]; } + btree_node *child(int i) const { return GetField<3>()[i]; } btree_node *start_child() const { return child(start()); } - btree_node *&mutable_child(int i) { return GetField<4>()[i]; } + btree_node *&mutable_child(int i) { return GetField<3>()[i]; } void clear_child(int i) { absl::container_internal::SanitizerPoisonObject(&mutable_child(i)); } @@ -835,7 +672,7 @@ class btree_node { } ++s; } - return SearchResult{s}; + return {s}; } // Returns the position of the first value whose key is not less than k using @@ -870,7 +707,7 @@ class btree_node { e = mid; } } - return SearchResult{s}; + return {s}; } // Returns the position of the first value whose key is not less than k using @@ -879,7 +716,7 @@ class btree_node { SearchResult binary_search_impl( const K &k, int s, int e, const CompareTo &comp, std::true_type /* IsCompareTo */) const { - if (params_type::template can_have_multiple_equivalent_keys()) { + if (is_multi_container::value) { MatchKind exact_match = MatchKind::kNe; while (s != e) { const int mid = (s + e) >> 1; @@ -890,14 +727,14 @@ class btree_node { e = mid; if (c == 0) { // Need to return the first value whose key is not less than k, - // which requires continuing the binary search if there could be - // multiple equivalent keys. + // which requires continuing the binary search if this is a + // multi-container. exact_match = MatchKind::kEq; } } } return {s, exact_match}; - } else { // Can't have multiple equivalent keys. + } else { // Not a multi-container. while (s != e) { const int mid = (s + e) >> 1; const absl::weak_ordering c = comp(key(mid), k); @@ -937,8 +774,7 @@ class btree_node { void merge(btree_node *src, allocator_type *alloc); // Node allocation/deletion routines. - void init_leaf(int max_count, btree_node *parent) { - set_generation(0); + void init_leaf(btree_node *parent, int max_count) { set_parent(parent); set_position(0); set_start(0); @@ -948,12 +784,12 @@ class btree_node { start_slot(), max_count * sizeof(slot_type)); } void init_internal(btree_node *parent) { - init_leaf(kNodeSlots, parent); + init_leaf(parent, kNodeValues); // Set `max_count` to a sentinel value to indicate that this node is // internal. set_max_count(kInternalNodeMaxCount); absl::container_internal::SanitizerPoisonMemoryRegion( - &mutable_child(start()), (kNodeSlots + 1) * sizeof(btree_node *)); + &mutable_child(start()), (kNodeValues + 1) * sizeof(btree_node *)); } static void deallocate(const size_type size, btree_node *node, @@ -964,21 +800,24 @@ class btree_node { // Deletes a node and all of its children. static void clear_and_delete(btree_node *node, allocator_type *alloc); + public: + // Exposed only for tests. + static bool testonly_uses_linear_node_search() { + return use_linear_search::value; + } + private: template void value_init(const field_type i, allocator_type *alloc, Args &&... args) { - next_generation(); absl::container_internal::SanitizerUnpoisonObject(slot(i)); params_type::construct(alloc, slot(i), std::forward(args)...); } void value_destroy(const field_type i, allocator_type *alloc) { - next_generation(); params_type::destroy(alloc, slot(i)); absl::container_internal::SanitizerPoisonObject(slot(i)); } void value_destroy_n(const field_type i, const field_type n, allocator_type *alloc) { - next_generation(); for (slot_type *s = slot(i), *end = slot(i + n); s != end; ++s) { params_type::destroy(alloc, s); absl::container_internal::SanitizerPoisonObject(s); @@ -994,7 +833,6 @@ class btree_node { // Transfers value from slot `src_i` in `src_node` to slot `dest_i` in `this`. void transfer(const size_type dest_i, const size_type src_i, btree_node *src_node, allocator_type *alloc) { - next_generation(); transfer(slot(dest_i), src_node->slot(src_i), alloc); } @@ -1003,7 +841,6 @@ class btree_node { void transfer_n(const size_type n, const size_type dest_i, const size_type src_i, btree_node *src_node, allocator_type *alloc) { - next_generation(); for (slot_type *src = src_node->slot(src_i), *end = src + n, *dest = slot(dest_i); src != end; ++src, ++dest) { @@ -1016,7 +853,6 @@ class btree_node { void transfer_n_backward(const size_type n, const size_type dest_i, const size_type src_i, btree_node *src_node, allocator_type *alloc) { - next_generation(); for (slot_type *src = src_node->slot(src_i + n - 1), *end = src - n, *dest = slot(dest_i + n - 1); src != end; --src, --dest) { @@ -1027,17 +863,16 @@ class btree_node { template friend class btree; template - friend class btree_iterator; + friend struct btree_iterator; friend class BtreeNodePeer; - friend struct btree_access; }; template -class btree_iterator { +struct btree_iterator { + private: using key_type = typename Node::key_type; using size_type = typename Node::size_type; using params_type = typename Node::params_type; - using is_map_container = typename params_type::is_map_container; using node_type = Node; using normal_node = typename std::remove_const::type; @@ -1049,7 +884,7 @@ class btree_iterator { using slot_type = typename params_type::slot_type; using iterator = - btree_iterator; + btree_iterator; using const_iterator = btree_iterator; @@ -1061,51 +896,72 @@ class btree_iterator { using reference = Reference; using iterator_category = std::bidirectional_iterator_tag; - btree_iterator() : btree_iterator(nullptr, -1) {} - explicit btree_iterator(Node *n) : btree_iterator(n, n->start()) {} - btree_iterator(Node *n, int p) : node_(n), position_(p) { -#ifdef ABSL_BTREE_ENABLE_GENERATIONS - // Use `~uint32_t{}` as a sentinel value for iterator generations so it - // doesn't match the initial value for the actual generation. - generation_ = n != nullptr ? n->generation() : ~uint32_t{}; -#endif - } + btree_iterator() : node(nullptr), position(-1) {} + explicit btree_iterator(Node *n) : node(n), position(n->start()) {} + btree_iterator(Node *n, int p) : node(n), position(p) {} // NOTE: this SFINAE allows for implicit conversions from iterator to - // const_iterator, but it specifically avoids hiding the copy constructor so - // that the trivial one will be used when possible. + // const_iterator, but it specifically avoids defining copy constructors so + // that btree_iterator can be trivially copyable. This is for performance and + // binary size reasons. template , iterator>::value && std::is_same::value, int> = 0> - btree_iterator(const btree_iterator other) // NOLINT - : node_(other.node_), position_(other.position_) { -#ifdef ABSL_BTREE_ENABLE_GENERATIONS - generation_ = other.generation_; -#endif + btree_iterator(const btree_iterator &other) // NOLINT + : node(other.node), position(other.position) {} + + private: + // This SFINAE allows explicit conversions from const_iterator to + // iterator, but also avoids defining a copy constructor. + // NOTE: the const_cast is safe because this constructor is only called by + // non-const methods and the container owns the nodes. + template , const_iterator>::value && + std::is_same::value, + int> = 0> + explicit btree_iterator(const btree_iterator &other) + : node(const_cast(other.node)), position(other.position) {} + + // Increment/decrement the iterator. + void increment() { + if (node->leaf() && ++position < node->finish()) { + return; + } + increment_slow(); } + void increment_slow(); + + void decrement() { + if (node->leaf() && --position >= node->start()) { + return; + } + decrement_slow(); + } + void decrement_slow(); + public: bool operator==(const iterator &other) const { - return node_ == other.node_ && position_ == other.position_; + return node == other.node && position == other.position; } bool operator==(const const_iterator &other) const { - return node_ == other.node_ && position_ == other.position_; + return node == other.node && position == other.position; } bool operator!=(const iterator &other) const { - return node_ != other.node_ || position_ != other.position_; + return node != other.node || position != other.position; } bool operator!=(const const_iterator &other) const { - return node_ != other.node_ || position_ != other.position_; + return node != other.node || position != other.position; } // Accessors for the key/value the iterator is pointing at. reference operator*() const { - ABSL_HARDENING_ASSERT(node_ != nullptr); - ABSL_HARDENING_ASSERT(node_->start() <= position_); - ABSL_HARDENING_ASSERT(node_->finish() > position_); - assert_valid_generation(); - return node_->value(position_); + ABSL_HARDENING_ASSERT(node != nullptr); + ABSL_HARDENING_ASSERT(node->start() <= position); + ABSL_HARDENING_ASSERT(node->finish() > position); + return node->value(position); } pointer operator->() const { return &operator*(); } @@ -1129,8 +985,6 @@ class btree_iterator { } private: - friend iterator; - friend const_iterator; template friend class btree; template @@ -1141,96 +995,36 @@ class btree_iterator { friend class btree_map_container; template friend class btree_multiset_container; + template + friend struct btree_iterator; template friend class base_checker; - friend struct btree_access; - // This SFINAE allows explicit conversions from const_iterator to - // iterator, but also avoids hiding the copy constructor. - // NOTE: the const_cast is safe because this constructor is only called by - // non-const methods and the container owns the nodes. - template , const_iterator>::value && - std::is_same::value, - int> = 0> - explicit btree_iterator(const btree_iterator other) - : node_(const_cast(other.node_)), - position_(other.position_) { -#ifdef ABSL_BTREE_ENABLE_GENERATIONS - generation_ = other.generation_; -#endif - } - - // Increment/decrement the iterator. - void increment() { - assert_valid_generation(); - if (node_->is_leaf() && ++position_ < node_->finish()) { - return; - } - increment_slow(); - } - void increment_slow(); - - void decrement() { - assert_valid_generation(); - if (node_->is_leaf() && --position_ >= node_->start()) { - return; - } - decrement_slow(); - } - void decrement_slow(); - - // Updates the generation. For use internally right before we return an - // iterator to the user. - void update_generation() { -#ifdef ABSL_BTREE_ENABLE_GENERATIONS - if (node_ != nullptr) generation_ = node_->generation(); -#endif - } - - const key_type &key() const { return node_->key(position_); } - decltype(std::declval()->slot(0)) slot() { - return node_->slot(position_); - } - - void assert_valid_generation() const { -#ifdef ABSL_BTREE_ENABLE_GENERATIONS - if (node_ != nullptr && node_->generation() != generation_) { - ABSL_INTERNAL_LOG( - FATAL, - "Attempting to use an invalidated iterator. The corresponding b-tree " - "container has been mutated since this iterator was constructed."); - } -#endif - } + const key_type &key() const { return node->key(position); } + slot_type *slot() { return node->slot(position); } // The node in the tree the iterator is pointing at. - Node *node_; + Node *node; // The position within the node of the tree the iterator is pointing at. // NOTE: this is an int rather than a field_type because iterators can point // to invalid positions (such as -1) in certain circumstances. - int position_; -#ifdef ABSL_BTREE_ENABLE_GENERATIONS - // Used to check that the iterator hasn't been invalidated. - uint32_t generation_; -#endif + int position; }; template class btree { using node_type = btree_node; using is_key_compare_to = typename Params::is_key_compare_to; + using init_type = typename Params::init_type; using field_type = typename node_type::field_type; + using is_multi_container = typename Params::is_multi_container; + using is_key_compare_adapted = typename Params::is_key_compare_adapted; // We use a static empty node for the root/leftmost/rightmost of empty btrees // in order to avoid branching in begin()/end(). struct alignas(node_type::Alignment()) EmptyNodeType : node_type { using field_type = typename node_type::field_type; node_type *parent; -#ifdef ABSL_BTREE_ENABLE_GENERATIONS - uint32_t generation = 0; -#endif field_type position = 0; field_type start = 0; field_type finish = 0; @@ -1260,8 +1054,8 @@ class btree { } enum : uint32_t { - kNodeSlots = node_type::kNodeSlots, - kMinNodeValues = kNodeSlots / 2, + kNodeValues = node_type::kNodeValues, + kMinNodeValues = kNodeValues / 2, }; struct node_stats { @@ -1285,15 +1079,13 @@ class btree { using size_type = typename Params::size_type; using difference_type = typename Params::difference_type; using key_compare = typename Params::key_compare; - using original_key_compare = typename Params::original_key_compare; using value_compare = typename Params::value_compare; using allocator_type = typename Params::allocator_type; using reference = typename Params::reference; using const_reference = typename Params::const_reference; using pointer = typename Params::pointer; using const_pointer = typename Params::const_pointer; - using iterator = - typename btree_iterator::iterator; + using iterator = btree_iterator; using const_iterator = typename iterator::const_iterator; using reverse_iterator = std::reverse_iterator; using const_reverse_iterator = std::reverse_iterator; @@ -1304,39 +1096,29 @@ class btree { using slot_type = typename Params::slot_type; private: + // For use in copy_or_move_values_in_order. + const value_type &maybe_move_from_iterator(const_iterator it) { return *it; } + value_type &&maybe_move_from_iterator(iterator it) { return std::move(*it); } + // Copies or moves (depending on the template parameter) the values in // other into this btree in their order in other. This btree must be empty // before this method is called. This method is used in copy construction, // copy assignment, and move assignment. template - void copy_or_move_values_in_order(Btree &other); + void copy_or_move_values_in_order(Btree *other); // Validates that various assumptions/requirements are true at compile time. constexpr static bool static_assert_validation(); public: - btree(const key_compare &comp, const allocator_type &alloc) - : root_(EmptyNode()), rightmost_(comp, alloc, EmptyNode()), size_(0) {} + btree(const key_compare &comp, const allocator_type &alloc); - btree(const btree &other) : btree(other, other.allocator()) {} - btree(const btree &other, const allocator_type &alloc) - : btree(other.key_comp(), alloc) { - copy_or_move_values_in_order(other); - } + btree(const btree &other); btree(btree &&other) noexcept - : root_(absl::exchange(other.root_, EmptyNode())), - rightmost_(std::move(other.rightmost_)), + : root_(std::move(other.root_)), + rightmost_(absl::exchange(other.rightmost_, EmptyNode())), size_(absl::exchange(other.size_, 0)) { - other.mutable_rightmost() = EmptyNode(); - } - btree(btree &&other, const allocator_type &alloc) - : btree(other.key_comp(), alloc) { - if (alloc == other.allocator()) { - swap(other); - } else { - // Move values from `other` one at a time when allocators are different. - copy_or_move_values_in_order(other); - } + other.mutable_root() = EmptyNode(); } ~btree() { @@ -1352,9 +1134,9 @@ class btree { iterator begin() { return iterator(leftmost()); } const_iterator begin() const { return const_iterator(leftmost()); } - iterator end() { return iterator(rightmost(), rightmost()->finish()); } + iterator end() { return iterator(rightmost_, rightmost_->finish()); } const_iterator end() const { - return const_iterator(rightmost(), rightmost()->finish()); + return const_iterator(rightmost_, rightmost_->finish()); } reverse_iterator rbegin() { return reverse_iterator(end()); } const_reverse_iterator rbegin() const { @@ -1365,22 +1147,17 @@ class btree { return const_reverse_iterator(begin()); } - // Finds the first element whose key is not less than `key`. + // Finds the first element whose key is not less than key. template iterator lower_bound(const K &key) { - return internal_end(internal_lower_bound(key).value); + return internal_end(internal_lower_bound(key)); } template const_iterator lower_bound(const K &key) const { - return internal_end(internal_lower_bound(key).value); + return internal_end(internal_lower_bound(key)); } - // Finds the first element whose key is not less than `key` and also returns - // whether that element is equal to `key`. - template - std::pair lower_bound_equal(const K &key) const; - - // Finds the first element whose key is greater than `key`. + // Finds the first element whose key is greater than key. template iterator upper_bound(const K &key) { return internal_end(internal_upper_bound(key)); @@ -1462,8 +1239,18 @@ class btree { // to the element after the last erased element. std::pair erase_range(iterator begin, iterator end); - // Finds an element with key equivalent to `key` or returns `end()` if `key` - // is not present. + // Erases the specified key from the btree. Returns 1 if an element was + // erased and 0 otherwise. + template + size_type erase_unique(const K &key); + + // Erases all of the entries matching the specified key from the + // btree. Returns the number of elements erased. + template + size_type erase_multi(const K &key); + + // Finds the iterator corresponding to a key or returns end() if the key is + // not present. template iterator find(const K &key) { return internal_end(internal_find(key)); @@ -1473,6 +1260,23 @@ class btree { return internal_end(internal_find(key)); } + // Returns a count of the number of times the key appears in the btree. + template + size_type count_unique(const K &key) const { + const iterator begin = internal_find(key); + if (begin.node == nullptr) { + // The key doesn't exist in the tree. + return 0; + } + return 1; + } + // Returns a count of the number of times the key appears in the btree. + template + size_type count_multi(const K &key) const { + const auto range = equal_range(key); + return std::distance(range.first, range.second); + } + // Clear the btree, deleting all of the values it contains. void clear(); @@ -1480,16 +1284,14 @@ class btree { void swap(btree &other); const key_compare &key_comp() const noexcept { - return rightmost_.template get<0>(); + return root_.template get<0>(); } template bool compare_keys(const K1 &a, const K2 &b) const { return compare_internal::compare_result_as_less_than(key_comp()(a, b)); } - value_compare value_comp() const { - return value_compare(original_key_compare(key_comp())); - } + value_compare value_comp() const { return value_compare(key_comp()); } // Verifies the structure of the btree. void verify() const; @@ -1527,7 +1329,6 @@ class btree { } // The total number of bytes used by the btree. - // TODO(b/169338300): update to support node_btree_*. size_type bytes_used() const { node_stats stats = internal_stats(root()); if (stats.leaf_nodes == 1 && stats.internal_nodes == 0) { @@ -1538,14 +1339,12 @@ class btree { } } - // The average number of bytes used per value stored in the btree assuming - // random insertion order. + // The average number of bytes used per value stored in the btree. static double average_bytes_per_value() { - // The expected number of values per node with random insertion order is the - // average of the maximum and minimum numbers of values per node. - const double expected_values_per_node = - (kNodeSlots + kMinNodeValues) / 2.0; - return node_type::LeafSize() / expected_values_per_node; + // Returns the number of bytes per value on a leaf node that is 75% + // full. Experimentally, this matches up nicely with the computed number of + // bytes per value in trees that had their values inserted in random order. + return node_type::LeafSize() / (kNodeValues * 0.75); } // The fullness of the btree. Computed as the number of elements in the btree @@ -1555,7 +1354,7 @@ class btree { // Returns 0 for empty trees. double fullness() const { if (empty()) return 0.0; - return static_cast(size()) / (nodes() * kNodeSlots); + return static_cast(size()) / (nodes() * kNodeValues); } // The overhead of the btree structure in bytes per node. Computed as the // total number of bytes used by the btree minus the number of bytes used for @@ -1571,20 +1370,11 @@ class btree { allocator_type get_allocator() const { return allocator(); } private: - friend struct btree_access; - // Internal accessor routines. - node_type *root() { return root_; } - const node_type *root() const { return root_; } - node_type *&mutable_root() noexcept { return root_; } - node_type *rightmost() { return rightmost_.template get<2>(); } - const node_type *rightmost() const { return rightmost_.template get<2>(); } - node_type *&mutable_rightmost() noexcept { - return rightmost_.template get<2>(); - } - key_compare *mutable_key_comp() noexcept { - return &rightmost_.template get<0>(); - } + node_type *root() { return root_.template get<2>(); } + const node_type *root() const { return root_.template get<2>(); } + node_type *&mutable_root() noexcept { return root_.template get<2>(); } + key_compare *mutable_key_comp() noexcept { return &root_.template get<0>(); } // The leftmost node is stored as the parent of the root node. node_type *leftmost() { return root()->parent(); } @@ -1592,10 +1382,10 @@ class btree { // Allocator routines. allocator_type *mutable_allocator() noexcept { - return &rightmost_.template get<1>(); + return &root_.template get<1>(); } const allocator_type &allocator() const noexcept { - return rightmost_.template get<1>(); + return root_.template get<1>(); } // Allocates a correctly aligned node of at least size bytes using the @@ -1614,12 +1404,12 @@ class btree { } node_type *new_leaf_node(node_type *parent) { node_type *n = allocate(node_type::LeafSize()); - n->init_leaf(kNodeSlots, parent); + n->init_leaf(parent, kNodeValues); return n; } node_type *new_leaf_root_node(const int max_count) { node_type *n = allocate(node_type::LeafSize(max_count)); - n->init_leaf(max_count, /*parent=*/n); + n->init_leaf(/*parent=*/n, max_count); return n; } @@ -1643,10 +1433,10 @@ class btree { void try_shrink(); iterator internal_end(iterator iter) { - return iter.node_ != nullptr ? iter : end(); + return iter.node != nullptr ? iter : end(); } const_iterator internal_end(const_iterator iter) const { - return iter.node_ != nullptr ? iter : end(); + return iter.node != nullptr ? iter : end(); } // Emplaces a value into the btree immediately before iter. Requires that @@ -1656,25 +1446,35 @@ class btree { // Returns an iterator pointing to the first value >= the value "iter" is // pointing at. Note that "iter" might be pointing to an invalid location such - // as iter.position_ == iter.node_->finish(). This routine simply moves iter - // up in the tree to a valid location. Requires: iter.node_ is non-null. + // as iter.position == iter.node->finish(). This routine simply moves iter up + // in the tree to a valid location. + // Requires: iter.node is non-null. template static IterType internal_last(IterType iter); // Returns an iterator pointing to the leaf position at which key would - // reside in the tree, unless there is an exact match - in which case, the - // result may not be on a leaf. When there's a three-way comparator, we can - // return whether there was an exact match. This allows the caller to avoid a - // subsequent comparison to determine if an exact match was made, which is - // important for keys with expensive comparison, such as strings. + // reside in the tree. We provide 2 versions of internal_locate. The first + // version uses a less-than comparator and is incapable of distinguishing when + // there is an exact match. The second version is for the key-compare-to + // specialization and distinguishes exact matches. The key-compare-to + // specialization allows the caller to avoid a subsequent comparison to + // determine if an exact match was made, which is important for keys with + // expensive comparison, such as strings. template SearchResult internal_locate( const K &key) const; + template + SearchResult internal_locate_impl( + const K &key, std::false_type /* IsCompareTo */) const; + + template + SearchResult internal_locate_impl( + const K &key, std::true_type /* IsCompareTo */) const; + // Internal routine which implements lower_bound(). template - SearchResult internal_lower_bound( - const K &key) const; + iterator internal_lower_bound(const K &key) const; // Internal routine which implements upper_bound(). template @@ -1693,7 +1493,7 @@ class btree { if (node == nullptr || (node == root() && empty())) { return node_stats(0, 0); } - if (node->is_leaf()) { + if (node->leaf()) { return node_stats(1, 0); } node_stats res(0, 1); @@ -1703,14 +1503,22 @@ class btree { return res; } - node_type *root_; + public: + // Exposed only for tests. + static bool testonly_uses_linear_node_search() { + return node_type::testonly_uses_linear_node_search(); + } - // A pointer to the rightmost node. Note that the leftmost node is stored as - // the root's parent. We use compressed tuple in order to save space because - // key_compare and allocator_type are usually empty. + private: + // We use compressed tuple in order to save space because key_compare and + // allocator_type are usually empty. absl::container_internal::CompressedTuple - rightmost_; + root_; + + // A pointer to the rightmost node. Note that the leftmost node is stored as + // the root's parent. + node_type *rightmost_; // Number of values. size_type size_; @@ -1734,8 +1542,8 @@ inline void btree_node

::emplace_value(const size_type i, value_init(i, alloc, std::forward(args)...); set_finish(finish() + 1); - if (is_internal() && finish() > i + 1) { - for (field_type j = finish(); j > i + 1; --j) { + if (!leaf() && finish() > i + 1) { + for (int j = finish(); j > i + 1; --j) { set_child(j, child(j - 1)); } clear_child(i + 1); @@ -1752,7 +1560,7 @@ inline void btree_node

::remove_values(const field_type i, const field_type src_i = i + to_erase; transfer_n(orig_finish - src_i, i, src_i, this, alloc); - if (is_internal()) { + if (!leaf()) { // Delete all children between begin and end. for (int j = 0; j < to_erase; ++j) { clear_and_delete(child(i + j + 1), alloc); @@ -1789,7 +1597,7 @@ void btree_node

::rebalance_right_to_left(const int to_move, right->transfer_n(right->count() - to_move, right->start(), right->start() + to_move, right, alloc); - if (is_internal()) { + if (!leaf()) { // Move the child pointers from the right to the left node. for (int i = 0; i < to_move; ++i) { init_child(finish() + i + 1, right->child(i)); @@ -1836,7 +1644,7 @@ void btree_node

::rebalance_left_to_right(const int to_move, // 4) Move the new delimiting value to the parent from the left node. parent()->transfer(position(), finish() - to_move, this, alloc); - if (is_internal()) { + if (!leaf()) { // Move the child pointers from the left to the right node. for (int i = right->finish(); i >= right->start(); --i) { right->init_child(i + to_move, right->child(i)); @@ -1857,7 +1665,7 @@ template void btree_node

::split(const int insert_position, btree_node *dest, allocator_type *alloc) { assert(dest->count() == 0); - assert(max_count() == kNodeSlots); + assert(max_count() == kNodeValues); // We bias the split based on the position being inserted. If we're // inserting at the beginning of the left node then bias the split to put @@ -1865,7 +1673,7 @@ void btree_node

::split(const int insert_position, btree_node *dest, // right node then bias the split to put more values on the left node. if (insert_position == start()) { dest->set_finish(dest->start() + finish() - 1); - } else if (insert_position == kNodeSlots) { + } else if (insert_position == kNodeValues) { dest->set_finish(dest->start()); } else { dest->set_finish(dest->start() + count() / 2); @@ -1882,7 +1690,7 @@ void btree_node

::split(const int insert_position, btree_node *dest, value_destroy(finish(), alloc); parent()->init_child(position() + 1, dest); - if (is_internal()) { + if (!leaf()) { for (int i = dest->start(), j = finish() + 1; i <= dest->finish(); ++i, ++j) { assert(child(j) != nullptr); @@ -1903,7 +1711,7 @@ void btree_node

::merge(btree_node *src, allocator_type *alloc) { // Move the values from the right to the left node. transfer_n(src->count(), finish() + 1, src->start(), src, alloc); - if (is_internal()) { + if (!leaf()) { // Move the child pointers from the right to the left node. for (int i = src->start(), j = finish() + 1; i <= src->finish(); ++i, ++j) { init_child(j, src->child(i)); @@ -1921,7 +1729,7 @@ void btree_node

::merge(btree_node *src, allocator_type *alloc) { template void btree_node

::clear_and_delete(btree_node *node, allocator_type *alloc) { - if (node->is_leaf()) { + if (node->leaf()) { node->value_destroy_n(node->start(), node->count(), alloc); deallocate(LeafSize(node->max_count()), node, alloc); return; @@ -1935,16 +1743,8 @@ void btree_node

::clear_and_delete(btree_node *node, allocator_type *alloc) { btree_node *delete_root_parent = node->parent(); // Navigate to the leftmost leaf under node, and then delete upwards. - while (node->is_internal()) node = node->start_child(); -#ifdef ABSL_BTREE_ENABLE_GENERATIONS - // When generations are enabled, we delete the leftmost leaf last in case it's - // the parent of the root and we need to check whether it's a leaf before we - // can update the root's generation. - // TODO(ezb): if we change btree_node::is_root to check a bool inside the node - // instead of checking whether the parent is a leaf, we can remove this logic. - btree_node *leftmost_leaf = node; -#endif - // Use `int` because `pos` needs to be able to hold `kNodeSlots+1`, which + while (!node->leaf()) node = node->start_child(); + // Use `int` because `pos` needs to be able to hold `kNodeValues+1`, which // isn't guaranteed to be a valid `field_type`. int pos = node->position(); btree_node *parent = node->parent(); @@ -1953,17 +1753,14 @@ void btree_node

::clear_and_delete(btree_node *node, allocator_type *alloc) { assert(pos <= parent->finish()); do { node = parent->child(pos); - if (node->is_internal()) { + if (!node->leaf()) { // Navigate to the leftmost leaf under node. - while (node->is_internal()) node = node->start_child(); + while (!node->leaf()) node = node->start_child(); pos = node->position(); parent = node->parent(); } node->value_destroy_n(node->start(), node->count(), alloc); -#ifdef ABSL_BTREE_ENABLE_GENERATIONS - if (leftmost_leaf != node) -#endif - deallocate(LeafSize(node->max_count()), node, alloc); + deallocate(LeafSize(node->max_count()), node, alloc); ++pos; } while (pos <= parent->finish()); @@ -1975,12 +1772,7 @@ void btree_node

::clear_and_delete(btree_node *node, allocator_type *alloc) { parent = node->parent(); node->value_destroy_n(node->start(), node->count(), alloc); deallocate(InternalSize(), node, alloc); - if (parent == delete_root_parent) { -#ifdef ABSL_BTREE_ENABLE_GENERATIONS - deallocate(LeafSize(leftmost_leaf->max_count()), leftmost_leaf, alloc); -#endif - return; - } + if (parent == delete_root_parent) return; ++pos; } while (pos > parent->finish()); } @@ -1990,49 +1782,49 @@ void btree_node

::clear_and_delete(btree_node *node, allocator_type *alloc) { // btree_iterator methods template void btree_iterator::increment_slow() { - if (node_->is_leaf()) { - assert(position_ >= node_->finish()); + if (node->leaf()) { + assert(position >= node->finish()); btree_iterator save(*this); - while (position_ == node_->finish() && !node_->is_root()) { - assert(node_->parent()->child(node_->position()) == node_); - position_ = node_->position(); - node_ = node_->parent(); + while (position == node->finish() && !node->is_root()) { + assert(node->parent()->child(node->position()) == node); + position = node->position(); + node = node->parent(); } // TODO(ezb): assert we aren't incrementing end() instead of handling. - if (position_ == node_->finish()) { + if (position == node->finish()) { *this = save; } } else { - assert(position_ < node_->finish()); - node_ = node_->child(position_ + 1); - while (node_->is_internal()) { - node_ = node_->start_child(); + assert(position < node->finish()); + node = node->child(position + 1); + while (!node->leaf()) { + node = node->start_child(); } - position_ = node_->start(); + position = node->start(); } } template void btree_iterator::decrement_slow() { - if (node_->is_leaf()) { - assert(position_ <= -1); + if (node->leaf()) { + assert(position <= -1); btree_iterator save(*this); - while (position_ < node_->start() && !node_->is_root()) { - assert(node_->parent()->child(node_->position()) == node_); - position_ = node_->position() - 1; - node_ = node_->parent(); + while (position < node->start() && !node->is_root()) { + assert(node->parent()->child(node->position()) == node); + position = node->position() - 1; + node = node->parent(); } // TODO(ezb): assert we aren't decrementing begin() instead of handling. - if (position_ < node_->start()) { + if (position < node->start()) { *this = save; } } else { - assert(position_ >= node_->start()); - node_ = node_->child(position_); - while (node_->is_internal()) { - node_ = node_->child(node_->finish()); + assert(position >= node->start()); + node = node->child(position); + while (!node->leaf()) { + node = node->child(node->finish()); } - position_ = node_->finish() - 1; + position = node->finish() - 1; } } @@ -2040,7 +1832,7 @@ void btree_iterator::decrement_slow() { // btree methods template template -void btree

::copy_or_move_values_in_order(Btree &other) { +void btree

::copy_or_move_values_in_order(Btree *other) { static_assert(std::is_same::value || std::is_same::value, "Btree type must be same or const."); @@ -2048,14 +1840,14 @@ void btree

::copy_or_move_values_in_order(Btree &other) { // We can avoid key comparisons because we know the order of the // values is the same order we'll store them in. - auto iter = other.begin(); - if (iter == other.end()) return; - insert_multi(iter.slot()); + auto iter = other->begin(); + if (iter == other->end()) return; + insert_multi(maybe_move_from_iterator(iter)); ++iter; - for (; iter != other.end(); ++iter) { + for (; iter != other->end(); ++iter) { // If the btree is not empty, we can just insert the new value at the end // of the tree. - internal_emplace(end(), iter.slot()); + internal_emplace(end(), maybe_move_from_iterator(iter)); } } @@ -2071,16 +1863,19 @@ constexpr bool btree

::static_assert_validation() { // Note: We assert that kTargetValues, which is computed from // Params::kTargetNodeSize, must fit the node_type::field_type. static_assert( - kNodeSlots < (1 << (8 * sizeof(typename node_type::field_type))), + kNodeValues < (1 << (8 * sizeof(typename node_type::field_type))), "target node size too large"); // Verify that key_compare returns an absl::{weak,strong}_ordering or bool. + using compare_result_type = + absl::result_of_t; static_assert( - compare_has_valid_result_type(), + std::is_same::value || + std::is_convertible::value, "key comparison function must return absl::{weak,strong}_ordering or " "bool."); - // Test the assumption made in setting kNodeSlotSpace. + // Test the assumption made in setting kNodeValueSpace. static_assert(node_type::MinimumOverhead() >= sizeof(void *) + 4, "node space assumption incorrect"); @@ -2088,29 +1883,31 @@ constexpr bool btree

::static_assert_validation() { } template -template -auto btree

::lower_bound_equal(const K &key) const - -> std::pair { - const SearchResult res = - internal_lower_bound(key); - const iterator lower = iterator(internal_end(res.value)); - const bool equal = res.HasMatch() - ? res.IsEq() - : lower != end() && !compare_keys(key, lower.key()); - return {lower, equal}; +btree

::btree(const key_compare &comp, const allocator_type &alloc) + : root_(comp, alloc, EmptyNode()), rightmost_(EmptyNode()), size_(0) {} + +template +btree

::btree(const btree &other) + : btree(other.key_comp(), other.allocator()) { + copy_or_move_values_in_order(&other); } template template auto btree

::equal_range(const K &key) -> std::pair { - const std::pair lower_and_equal = lower_bound_equal(key); - const iterator lower = lower_and_equal.first; - if (!lower_and_equal.second) { - return {lower, lower}; - } + const iterator lower = lower_bound(key); + // TODO(ezb): we should be able to avoid this comparison when there's a + // three-way comparator. + if (lower == end() || compare_keys(key, lower.key())) return {lower, lower}; const iterator next = std::next(lower); - if (!params_type::template can_have_multiple_equivalent_keys()) { + // When the comparator is heterogeneous, we can't assume that comparison with + // non-`key_type` will be equivalent to `key_type` comparisons so there + // could be multiple equivalent keys even in a unique-container. But for + // heterogeneous comparisons from the default string adapted comparators, we + // don't need to worry about this. + if (!is_multi_container::value && + (std::is_same::value || is_key_compare_adapted::value)) { // The next iterator after lower must point to a key greater than `key`. // Note: if this assert fails, then it may indicate that the comparator does // not meet the equivalence requirements for Compare @@ -2121,7 +1918,7 @@ auto btree

::equal_range(const K &key) -> std::pair { // Try once more to avoid the call to upper_bound() if there's only one // equivalent key. This should prevent all calls to upper_bound() in cases of // unique-containers with heterogeneous comparators in which all comparison - // operators have the same equivalence classes. + // operators are equivalent. if (next == end() || compare_keys(key, next.key())) return {lower, next}; // In this case, we need to call upper_bound() to avoid worst case O(N) @@ -2134,11 +1931,11 @@ template auto btree

::insert_unique(const K &key, Args &&... args) -> std::pair { if (empty()) { - mutable_root() = mutable_rightmost() = new_leaf_root_node(1); + mutable_root() = rightmost_ = new_leaf_root_node(1); } - SearchResult res = internal_locate(key); - iterator iter = res.value; + auto res = internal_locate(key); + iterator &iter = res.value; if (res.HasMatch()) { if (res.IsEq()) { @@ -2147,7 +1944,7 @@ auto btree

::insert_unique(const K &key, Args &&... args) } } else { iterator last = internal_last(iter); - if (last.node_ && !compare_keys(key, last.key())) { + if (last.node && !compare_keys(key, last.key())) { // The key already exists in the tree, do nothing. return {last, false}; } @@ -2192,11 +1989,8 @@ template template void btree

::insert_iterator_unique(InputIterator b, InputIterator e, char) { for (; b != e; ++b) { - // Use a node handle to manage a temp slot. - auto node_handle = - CommonAccess::Construct(get_allocator(), *b); - slot_type *slot = CommonAccess::GetSlot(node_handle); - insert_hint_unique(end(), params_type::key(slot), slot); + init_type value(*b); + insert_hint_unique(end(), params_type::key(value), std::move(value)); } } @@ -2204,11 +1998,11 @@ template template auto btree

::insert_multi(const key_type &key, ValueType &&v) -> iterator { if (empty()) { - mutable_root() = mutable_rightmost() = new_leaf_root_node(1); + mutable_root() = rightmost_ = new_leaf_root_node(1); } iterator iter = internal_upper_bound(key); - if (iter.node_ == nullptr) { + if (iter.node == nullptr) { iter = end(); } return internal_emplace(iter, std::forward(v)); @@ -2255,7 +2049,7 @@ auto btree

::operator=(const btree &other) -> btree & { *mutable_allocator() = other.allocator(); } - copy_or_move_values_in_order(other); + copy_or_move_values_in_order(&other); } return *this; } @@ -2268,15 +2062,15 @@ auto btree

::operator=(btree &&other) noexcept -> btree & { using std::swap; if (absl::allocator_traits< allocator_type>::propagate_on_container_copy_assignment::value) { + // Note: `root_` also contains the allocator and the key comparator. swap(root_, other.root_); - // Note: `rightmost_` also contains the allocator and the key comparator. swap(rightmost_, other.rightmost_); swap(size_, other.size_); } else { if (allocator() == other.allocator()) { swap(mutable_root(), other.mutable_root()); swap(*mutable_key_comp(), *other.mutable_key_comp()); - swap(mutable_rightmost(), other.mutable_rightmost()); + swap(rightmost_, other.rightmost_); swap(size_, other.size_); } else { // We aren't allowed to propagate the allocator and the allocator is @@ -2285,7 +2079,7 @@ auto btree

::operator=(btree &&other) noexcept -> btree & { // comparator while moving the values so we can't swap the key // comparators. *mutable_key_comp() = other.key_comp(); - copy_or_move_values_in_order(other); + copy_or_move_values_in_order(&other); } } } @@ -2294,29 +2088,22 @@ auto btree

::operator=(btree &&other) noexcept -> btree & { template auto btree

::erase(iterator iter) -> iterator { - iter.node_->value_destroy(iter.position_, mutable_allocator()); - iter.update_generation(); - - const bool internal_delete = iter.node_->is_internal(); - if (internal_delete) { - // Deletion of a value on an internal node. First, transfer the largest - // value from our left child here, then erase/rebalance from that position. - // We can get to the largest value from our left child by decrementing iter. + bool internal_delete = false; + if (!iter.node->leaf()) { + // Deletion of a value on an internal node. First, move the largest value + // from our left child here, then delete that position (in remove_values() + // below). We can get to the largest value from our left child by + // decrementing iter. iterator internal_iter(iter); --iter; - assert(iter.node_->is_leaf()); - internal_iter.node_->transfer(internal_iter.position_, iter.position_, - iter.node_, mutable_allocator()); - } else { - // Shift values after erased position in leaf. In the internal case, we - // don't need to do this because the leaf position is the end of the node. - const field_type transfer_from = iter.position_ + 1; - const field_type num_to_transfer = iter.node_->finish() - transfer_from; - iter.node_->transfer_n(num_to_transfer, iter.position_, transfer_from, - iter.node_, mutable_allocator()); - } - // Update node finish and container size. - iter.node_->set_finish(iter.node_->finish() - 1); + assert(iter.node->leaf()); + params_type::move(mutable_allocator(), iter.node->slot(iter.position), + internal_iter.node->slot(internal_iter.position)); + internal_delete = true; + } + + // Delete the key from the leaf. + iter.node->remove_values(iter.position, /*to_erase=*/1, mutable_allocator()); --size_; // We want to return the next value after the one we just erased. If we @@ -2324,7 +2111,7 @@ auto btree

::erase(iterator iter) -> iterator { // value is ++(++iter). If we erased from a leaf node (internal_delete == // false) then the next value is ++iter. Note that ++iter may point to an // internal node and the value in the internal node may move to a leaf node - // (iter.node_) when rebalancing is performed at the leaf level. + // (iter.node) when rebalancing is performed at the leaf level. iterator res = rebalance_after_delete(iter); @@ -2341,14 +2128,14 @@ auto btree

::rebalance_after_delete(iterator iter) -> iterator { iterator res(iter); bool first_iteration = true; for (;;) { - if (iter.node_ == root()) { + if (iter.node == root()) { try_shrink(); if (empty()) { return end(); } break; } - if (iter.node_->count() >= kMinNodeValues) { + if (iter.node->count() >= kMinNodeValues) { break; } bool merged = try_merge_or_rebalance(&iter); @@ -2361,15 +2148,14 @@ auto btree

::rebalance_after_delete(iterator iter) -> iterator { if (!merged) { break; } - iter.position_ = iter.node_->position(); - iter.node_ = iter.node_->parent(); + iter.position = iter.node->position(); + iter.node = iter.node->parent(); } - res.update_generation(); // Adjust our return value. If we're pointing at the end of a node, advance // the iterator. - if (res.position_ == res.node_->finish()) { - res.position_ = res.node_->finish() - 1; + if (res.position == res.node->finish()) { + res.position = res.node->finish() - 1; ++res; } @@ -2386,45 +2172,68 @@ auto btree

::erase_range(iterator begin, iterator end) return {0, begin}; } - if (static_cast(count) == size_) { + if (count == size_) { clear(); return {count, this->end()}; } - if (begin.node_ == end.node_) { - assert(end.position_ > begin.position_); - begin.node_->remove_values(begin.position_, end.position_ - begin.position_, - mutable_allocator()); + if (begin.node == end.node) { + assert(end.position > begin.position); + begin.node->remove_values(begin.position, end.position - begin.position, + mutable_allocator()); size_ -= count; return {count, rebalance_after_delete(begin)}; } const size_type target_size = size_ - count; while (size_ > target_size) { - if (begin.node_->is_leaf()) { + if (begin.node->leaf()) { const size_type remaining_to_erase = size_ - target_size; - const size_type remaining_in_node = - begin.node_->finish() - begin.position_; + const size_type remaining_in_node = begin.node->finish() - begin.position; const size_type to_erase = (std::min)(remaining_to_erase, remaining_in_node); - begin.node_->remove_values(begin.position_, to_erase, - mutable_allocator()); + begin.node->remove_values(begin.position, to_erase, mutable_allocator()); size_ -= to_erase; begin = rebalance_after_delete(begin); } else { begin = erase(begin); } } - begin.update_generation(); return {count, begin}; } +template +template +auto btree

::erase_unique(const K &key) -> size_type { + const iterator iter = internal_find(key); + if (iter.node == nullptr) { + // The key doesn't exist in the tree, return nothing done. + return 0; + } + erase(iter); + return 1; +} + +template +template +auto btree

::erase_multi(const K &key) -> size_type { + const iterator begin = internal_lower_bound(key); + if (begin.node == nullptr) { + // The key doesn't exist in the tree, return nothing done. + return 0; + } + // Delete all of the keys between begin and upper_bound(key). + const iterator end = internal_end(internal_upper_bound(key)); + return erase_range(begin, end).first; +} + template void btree

::clear() { if (!empty()) { node_type::clear_and_delete(root(), mutable_allocator()); } - mutable_root() = mutable_rightmost() = EmptyNode(); + mutable_root() = EmptyNode(); + rightmost_ = EmptyNode(); size_ = 0; } @@ -2433,15 +2242,15 @@ void btree

::swap(btree &other) { using std::swap; if (absl::allocator_traits< allocator_type>::propagate_on_container_swap::value) { - // Note: `rightmost_` also contains the allocator and the key comparator. - swap(rightmost_, other.rightmost_); + // Note: `root_` also contains the allocator and the key comparator. + swap(root_, other.root_); } else { // It's undefined behavior if the allocators are unequal here. assert(allocator() == other.allocator()); - swap(mutable_rightmost(), other.mutable_rightmost()); + swap(mutable_root(), other.mutable_root()); swap(*mutable_key_comp(), *other.mutable_key_comp()); } - swap(mutable_root(), other.mutable_root()); + swap(rightmost_, other.rightmost_); swap(size_, other.size_); } @@ -2449,20 +2258,20 @@ template void btree

::verify() const { assert(root() != nullptr); assert(leftmost() != nullptr); - assert(rightmost() != nullptr); + assert(rightmost_ != nullptr); assert(empty() || size() == internal_verify(root(), nullptr, nullptr)); - assert(leftmost() == (++const_iterator(root(), -1)).node_); - assert(rightmost() == (--const_iterator(root(), root()->finish())).node_); - assert(leftmost()->is_leaf()); - assert(rightmost()->is_leaf()); + assert(leftmost() == (++const_iterator(root(), -1)).node); + assert(rightmost_ == (--const_iterator(root(), root()->finish())).node); + assert(leftmost()->leaf()); + assert(rightmost_->leaf()); } template void btree

::rebalance_or_split(iterator *iter) { - node_type *&node = iter->node_; - int &insert_position = iter->position_; + node_type *&node = iter->node; + int &insert_position = iter->position; assert(node->count() == node->max_count()); - assert(kNodeSlots == node->max_count()); + assert(kNodeValues == node->max_count()); // First try to make room on the node by rebalancing. node_type *parent = node->parent(); @@ -2470,17 +2279,17 @@ void btree

::rebalance_or_split(iterator *iter) { if (node->position() > parent->start()) { // Try rebalancing with our left sibling. node_type *left = parent->child(node->position() - 1); - assert(left->max_count() == kNodeSlots); - if (left->count() < kNodeSlots) { + assert(left->max_count() == kNodeValues); + if (left->count() < kNodeValues) { // We bias rebalancing based on the position being inserted. If we're // inserting at the end of the right node then we bias rebalancing to // fill up the left node. - int to_move = (kNodeSlots - left->count()) / - (1 + (insert_position < static_cast(kNodeSlots))); + int to_move = (kNodeValues - left->count()) / + (1 + (insert_position < kNodeValues)); to_move = (std::max)(1, to_move); if (insert_position - to_move >= node->start() || - left->count() + to_move < static_cast(kNodeSlots)) { + left->count() + to_move < kNodeValues) { left->rebalance_right_to_left(to_move, node, mutable_allocator()); assert(node->max_count() - node->count() == to_move); @@ -2499,17 +2308,17 @@ void btree

::rebalance_or_split(iterator *iter) { if (node->position() < parent->finish()) { // Try rebalancing with our right sibling. node_type *right = parent->child(node->position() + 1); - assert(right->max_count() == kNodeSlots); - if (right->count() < kNodeSlots) { + assert(right->max_count() == kNodeValues); + if (right->count() < kNodeValues) { // We bias rebalancing based on the position being inserted. If we're // inserting at the beginning of the left node then we bias rebalancing // to fill up the right node. - int to_move = (static_cast(kNodeSlots) - right->count()) / + int to_move = (kNodeValues - right->count()) / (1 + (insert_position > node->start())); to_move = (std::max)(1, to_move); if (insert_position <= node->finish() - to_move || - right->count() + to_move < static_cast(kNodeSlots)) { + right->count() + to_move < kNodeValues) { node->rebalance_left_to_right(to_move, right, mutable_allocator()); if (insert_position > node->finish()) { @@ -2525,8 +2334,8 @@ void btree

::rebalance_or_split(iterator *iter) { // Rebalancing failed, make sure there is room on the parent node for a new // value. - assert(parent->max_count() == kNodeSlots); - if (parent->count() == kNodeSlots) { + assert(parent->max_count() == kNodeValues); + if (parent->count() == kNodeValues) { iterator parent_iter(node->parent(), node->position()); rebalance_or_split(&parent_iter); } @@ -2535,20 +2344,19 @@ void btree

::rebalance_or_split(iterator *iter) { // Create a new root node and set the current root node as the child of the // new root. parent = new_internal_node(parent); - parent->set_generation(root()->generation()); parent->init_child(parent->start(), root()); mutable_root() = parent; // If the former root was a leaf node, then it's now the rightmost node. - assert(parent->start_child()->is_internal() || - parent->start_child() == rightmost()); + assert(!parent->start_child()->leaf() || + parent->start_child() == rightmost_); } // Split the node. node_type *split_node; - if (node->is_leaf()) { + if (node->leaf()) { split_node = new_leaf_node(parent); node->split(insert_position, split_node, mutable_allocator()); - if (rightmost() == node) mutable_rightmost() = split_node; + if (rightmost_ == node) rightmost_ = split_node; } else { split_node = new_internal_node(parent); node->split(insert_position, split_node, mutable_allocator()); @@ -2563,56 +2371,55 @@ void btree

::rebalance_or_split(iterator *iter) { template void btree

::merge_nodes(node_type *left, node_type *right) { left->merge(right, mutable_allocator()); - if (rightmost() == right) mutable_rightmost() = left; + if (rightmost_ == right) rightmost_ = left; } template bool btree

::try_merge_or_rebalance(iterator *iter) { - node_type *parent = iter->node_->parent(); - if (iter->node_->position() > parent->start()) { + node_type *parent = iter->node->parent(); + if (iter->node->position() > parent->start()) { // Try merging with our left sibling. - node_type *left = parent->child(iter->node_->position() - 1); - assert(left->max_count() == kNodeSlots); - if (1U + left->count() + iter->node_->count() <= kNodeSlots) { - iter->position_ += 1 + left->count(); - merge_nodes(left, iter->node_); - iter->node_ = left; + node_type *left = parent->child(iter->node->position() - 1); + assert(left->max_count() == kNodeValues); + if (1 + left->count() + iter->node->count() <= kNodeValues) { + iter->position += 1 + left->count(); + merge_nodes(left, iter->node); + iter->node = left; return true; } } - if (iter->node_->position() < parent->finish()) { + if (iter->node->position() < parent->finish()) { // Try merging with our right sibling. - node_type *right = parent->child(iter->node_->position() + 1); - assert(right->max_count() == kNodeSlots); - if (1U + iter->node_->count() + right->count() <= kNodeSlots) { - merge_nodes(iter->node_, right); + node_type *right = parent->child(iter->node->position() + 1); + assert(right->max_count() == kNodeValues); + if (1 + iter->node->count() + right->count() <= kNodeValues) { + merge_nodes(iter->node, right); return true; } // Try rebalancing with our right sibling. We don't perform rebalancing if - // we deleted the first element from iter->node_ and the node is not + // we deleted the first element from iter->node and the node is not // empty. This is a small optimization for the common pattern of deleting // from the front of the tree. if (right->count() > kMinNodeValues && - (iter->node_->count() == 0 || iter->position_ > iter->node_->start())) { - int to_move = (right->count() - iter->node_->count()) / 2; + (iter->node->count() == 0 || iter->position > iter->node->start())) { + int to_move = (right->count() - iter->node->count()) / 2; to_move = (std::min)(to_move, right->count() - 1); - iter->node_->rebalance_right_to_left(to_move, right, mutable_allocator()); + iter->node->rebalance_right_to_left(to_move, right, mutable_allocator()); return false; } } - if (iter->node_->position() > parent->start()) { + if (iter->node->position() > parent->start()) { // Try rebalancing with our left sibling. We don't perform rebalancing if - // we deleted the last element from iter->node_ and the node is not + // we deleted the last element from iter->node and the node is not // empty. This is a small optimization for the common pattern of deleting // from the back of the tree. - node_type *left = parent->child(iter->node_->position() - 1); + node_type *left = parent->child(iter->node->position() - 1); if (left->count() > kMinNodeValues && - (iter->node_->count() == 0 || - iter->position_ < iter->node_->finish())) { - int to_move = (left->count() - iter->node_->count()) / 2; + (iter->node->count() == 0 || iter->position < iter->node->finish())) { + int to_move = (left->count() - iter->node->count()) / 2; to_move = (std::min)(to_move, left->count() - 1); - left->rebalance_left_to_right(to_move, iter->node_, mutable_allocator()); - iter->position_ += to_move; + left->rebalance_left_to_right(to_move, iter->node, mutable_allocator()); + iter->position += to_move; return false; } } @@ -2626,9 +2433,9 @@ void btree

::try_shrink() { return; } // Deleted the last item on the root node, shrink the height of the tree. - if (orig_root->is_leaf()) { + if (orig_root->leaf()) { assert(size() == 0); - mutable_root() = mutable_rightmost() = EmptyNode(); + mutable_root() = rightmost_ = EmptyNode(); } else { node_type *child = orig_root->start_child(); child->make_root(); @@ -2640,16 +2447,15 @@ void btree

::try_shrink() { template template inline IterType btree

::internal_last(IterType iter) { - assert(iter.node_ != nullptr); - while (iter.position_ == iter.node_->finish()) { - iter.position_ = iter.node_->position(); - iter.node_ = iter.node_->parent(); - if (iter.node_->is_leaf()) { - iter.node_ = nullptr; + assert(iter.node != nullptr); + while (iter.position == iter.node->finish()) { + iter.position = iter.node->position(); + iter.node = iter.node->parent(); + if (iter.node->leaf()) { + iter.node = nullptr; break; } } - iter.update_generation(); return iter; } @@ -2657,39 +2463,37 @@ template template inline auto btree

::internal_emplace(iterator iter, Args &&... args) -> iterator { - if (iter.node_->is_internal()) { + if (!iter.node->leaf()) { // We can't insert on an internal node. Instead, we'll insert after the // previous value which is guaranteed to be on a leaf node. --iter; - ++iter.position_; + ++iter.position; } - const field_type max_count = iter.node_->max_count(); + const field_type max_count = iter.node->max_count(); allocator_type *alloc = mutable_allocator(); - if (iter.node_->count() == max_count) { + if (iter.node->count() == max_count) { // Make room in the leaf for the new item. - if (max_count < kNodeSlots) { + if (max_count < kNodeValues) { // Insertion into the root where the root is smaller than the full node // size. Simply grow the size of the root node. - assert(iter.node_ == root()); - iter.node_ = - new_leaf_root_node((std::min)(kNodeSlots, 2 * max_count)); + assert(iter.node == root()); + iter.node = + new_leaf_root_node((std::min)(kNodeValues, 2 * max_count)); // Transfer the values from the old root to the new root. node_type *old_root = root(); - node_type *new_root = iter.node_; + node_type *new_root = iter.node; new_root->transfer_n(old_root->count(), new_root->start(), old_root->start(), old_root, alloc); new_root->set_finish(old_root->finish()); old_root->set_finish(old_root->start()); - new_root->set_generation(old_root->generation()); node_type::clear_and_delete(old_root, alloc); - mutable_root() = mutable_rightmost() = new_root; + mutable_root() = rightmost_ = new_root; } else { rebalance_or_split(&iter); } } - iter.node_->emplace_value(iter.position_, alloc, std::forward(args)...); + iter.node->emplace_value(iter.position, alloc, std::forward(args)...); ++size_; - iter.update_generation(); return iter; } @@ -2697,51 +2501,61 @@ template template inline auto btree

::internal_locate(const K &key) const -> SearchResult { + return internal_locate_impl(key, is_key_compare_to()); +} + +template +template +inline auto btree

::internal_locate_impl( + const K &key, std::false_type /* IsCompareTo */) const + -> SearchResult { iterator iter(const_cast(root())); for (;;) { - SearchResult res = - iter.node_->lower_bound(key, key_comp()); - iter.position_ = res.value; - if (res.IsEq()) { + iter.position = iter.node->lower_bound(key, key_comp()).value; + // NOTE: we don't need to walk all the way down the tree if the keys are + // equal, but determining equality would require doing an extra comparison + // on each node on the way down, and we will need to go all the way to the + // leaf node in the expected case. + if (iter.node->leaf()) { + break; + } + iter.node = iter.node->child(iter.position); + } + return {iter}; +} + +template +template +inline auto btree

::internal_locate_impl( + const K &key, std::true_type /* IsCompareTo */) const + -> SearchResult { + iterator iter(const_cast(root())); + for (;;) { + SearchResult res = iter.node->lower_bound(key, key_comp()); + iter.position = res.value; + if (res.match == MatchKind::kEq) { return {iter, MatchKind::kEq}; } - // Note: in the non-key-compare-to case, we don't need to walk all the way - // down the tree if the keys are equal, but determining equality would - // require doing an extra comparison on each node on the way down, and we - // will need to go all the way to the leaf node in the expected case. - if (iter.node_->is_leaf()) { + if (iter.node->leaf()) { break; } - iter.node_ = iter.node_->child(iter.position_); + iter.node = iter.node->child(iter.position); } - // Note: in the non-key-compare-to case, the key may actually be equivalent - // here (and the MatchKind::kNe is ignored). return {iter, MatchKind::kNe}; } template template -auto btree

::internal_lower_bound(const K &key) const - -> SearchResult { - if (!params_type::template can_have_multiple_equivalent_keys()) { - SearchResult ret = internal_locate(key); - ret.value = internal_last(ret.value); - return ret; - } +auto btree

::internal_lower_bound(const K &key) const -> iterator { iterator iter(const_cast(root())); - SearchResult res; - bool seen_eq = false; for (;;) { - res = iter.node_->lower_bound(key, key_comp()); - iter.position_ = res.value; - if (iter.node_->is_leaf()) { + iter.position = iter.node->lower_bound(key, key_comp()).value; + if (iter.node->leaf()) { break; } - seen_eq = seen_eq || res.IsEq(); - iter.node_ = iter.node_->child(iter.position_); + iter.node = iter.node->child(iter.position); } - if (res.IsEq()) return {iter, MatchKind::kEq}; - return {internal_last(iter), seen_eq ? MatchKind::kEq : MatchKind::kNe}; + return internal_last(iter); } template @@ -2749,11 +2563,11 @@ template auto btree

::internal_upper_bound(const K &key) const -> iterator { iterator iter(const_cast(root())); for (;;) { - iter.position_ = iter.node_->upper_bound(key, key_comp()); - if (iter.node_->is_leaf()) { + iter.position = iter.node->upper_bound(key, key_comp()); + if (iter.node->leaf()) { break; } - iter.node_ = iter.node_->child(iter.position_); + iter.node = iter.node->child(iter.position); } return internal_last(iter); } @@ -2761,14 +2575,14 @@ auto btree

::internal_upper_bound(const K &key) const -> iterator { template template auto btree

::internal_find(const K &key) const -> iterator { - SearchResult res = internal_locate(key); + auto res = internal_locate(key); if (res.HasMatch()) { if (res.IsEq()) { return res.value; } } else { const iterator iter = internal_last(res.value); - if (iter.node_ != nullptr && !compare_keys(key, iter.key())) { + if (iter.node != nullptr && !compare_keys(key, iter.key())) { return iter; } } @@ -2790,7 +2604,7 @@ int btree

::internal_verify(const node_type *node, const key_type *lo, assert(!compare_keys(node->key(i), node->key(i - 1))); } int count = node->count(); - if (node->is_internal()) { + if (!node->leaf()) { for (int i = node->start(); i <= node->finish(); ++i) { assert(node->child(i) != nullptr); assert(node->child(i)->parent() == node); @@ -2803,50 +2617,6 @@ int btree

::internal_verify(const node_type *node, const key_type *lo, return count; } -struct btree_access { - template - static auto erase_if(BtreeContainer &container, Pred pred) - -> typename BtreeContainer::size_type { - const auto initial_size = container.size(); - auto &tree = container.tree_; - auto *alloc = tree.mutable_allocator(); - for (auto it = container.begin(); it != container.end();) { - if (!pred(*it)) { - ++it; - continue; - } - auto *node = it.node_; - if (node->is_internal()) { - // Handle internal nodes normally. - it = container.erase(it); - continue; - } - // If this is a leaf node, then we do all the erases from this node - // at once before doing rebalancing. - - // The current position to transfer slots to. - int to_pos = it.position_; - node->value_destroy(it.position_, alloc); - while (++it.position_ < node->finish()) { - it.update_generation(); - if (pred(*it)) { - node->value_destroy(it.position_, alloc); - } else { - node->transfer(node->slot(to_pos++), node->slot(it.position_), alloc); - } - } - const int num_deleted = node->finish() - to_pos; - tree.size_ -= num_deleted; - node->set_finish(to_pos); - it.position_ = to_pos; - it = tree.rebalance_after_delete(it); - } - return initial_size - container.size(); - } -}; - -#undef ABSL_BTREE_ENABLE_GENERATIONS - } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/abseil-cpp/absl/container/internal/btree_container.h b/abseil-cpp/absl/container/internal/btree_container.h index fc2f740a..137614f8 100644 --- a/abseil-cpp/absl/container/internal/btree_container.h +++ b/abseil-cpp/absl/container/internal/btree_container.h @@ -20,11 +20,9 @@ #include #include -#include "absl/base/attributes.h" #include "absl/base/internal/throw_delegate.h" #include "absl/container/internal/btree.h" // IWYU pragma: export #include "absl/container/internal/common.h" -#include "absl/memory/memory.h" #include "absl/meta/type_traits.h" namespace absl { @@ -44,15 +42,15 @@ class btree_container { // transparent case. template using key_arg = - typename KeyArg::template type< - K, typename Tree::key_type>; + typename KeyArg::value>:: + template type; public: using key_type = typename Tree::key_type; using value_type = typename Tree::value_type; using size_type = typename Tree::size_type; using difference_type = typename Tree::difference_type; - using key_compare = typename Tree::original_key_compare; + using key_compare = typename Tree::key_compare; using value_compare = typename Tree::value_compare; using allocator_type = typename Tree::allocator_type; using reference = typename Tree::reference; @@ -70,21 +68,8 @@ class btree_container { explicit btree_container(const key_compare &comp, const allocator_type &alloc = allocator_type()) : tree_(comp, alloc) {} - explicit btree_container(const allocator_type &alloc) - : tree_(key_compare(), alloc) {} - - btree_container(const btree_container &other) - : btree_container(other, absl::allocator_traits:: - select_on_container_copy_construction( - other.get_allocator())) {} - btree_container(const btree_container &other, const allocator_type &alloc) - : tree_(other.tree_, alloc) {} - - btree_container(btree_container &&other) noexcept( - std::is_nothrow_move_constructible::value) = default; - btree_container(btree_container &&other, const allocator_type &alloc) - : tree_(std::move(other.tree_), alloc) {} - + btree_container(const btree_container &other) = default; + btree_container(btree_container &&other) noexcept = default; btree_container &operator=(const btree_container &other) = default; btree_container &operator=(btree_container &&other) noexcept( std::is_nothrow_move_assignable::value) = default; @@ -105,11 +90,6 @@ class btree_container { // Lookup routines. template - size_type count(const key_arg &key) const { - auto equal_range = this->equal_range(key); - return std::distance(equal_range.first, equal_range.second); - } - template iterator find(const key_arg &key) { return tree_.find(key); } @@ -158,18 +138,12 @@ class btree_container { iterator erase(const_iterator first, const_iterator last) { return tree_.erase_range(iterator(first), iterator(last)).second; } - template - size_type erase(const key_arg &key) { - auto equal_range = this->equal_range(key); - return tree_.erase_range(equal_range.first, equal_range.second).first; - } // Extract routines. node_type extract(iterator position) { - // Use Construct instead of Transfer because the rebalancing code will - // destroy the slot later. - auto node = - CommonAccess::Construct(get_allocator(), position.slot()); + // Use Move instead of Transfer, because the rebalancing code expects to + // have a valid object to scribble metadata bits on top of. + auto node = CommonAccess::Move(get_allocator(), position.slot()); erase(position); return node; } @@ -177,8 +151,9 @@ class btree_container { return extract(iterator(position)); } + public: // Utility routines. - ABSL_ATTRIBUTE_REINITIALIZES void clear() { tree_.clear(); } + void clear() { tree_.clear(); } void swap(btree_container &other) { tree_.swap(other.tree_); } void verify() const { tree_.verify(); } @@ -216,7 +191,7 @@ class btree_container { allocator_type get_allocator() const { return tree_.get_allocator(); } // The key comparator used by the btree. - key_compare key_comp() const { return key_compare(tree_.key_comp()); } + key_compare key_comp() const { return tree_.key_comp(); } value_compare value_comp() const { return tree_.value_comp(); } // Support absl::Hash. @@ -229,7 +204,6 @@ class btree_container { } protected: - friend struct btree_access; Tree tree_; }; @@ -250,7 +224,7 @@ class btree_set_container : public btree_container { using key_type = typename Tree::key_type; using value_type = typename Tree::value_type; using size_type = typename Tree::size_type; - using key_compare = typename Tree::original_key_compare; + using key_compare = typename Tree::key_compare; using allocator_type = typename Tree::allocator_type; using iterator = typename Tree::iterator; using const_iterator = typename Tree::const_iterator; @@ -261,7 +235,7 @@ class btree_set_container : public btree_container { using super_type::super_type; btree_set_container() {} - // Range constructors. + // Range constructor. template btree_set_container(InputIterator b, InputIterator e, const key_compare &comp = key_compare(), @@ -269,19 +243,18 @@ class btree_set_container : public btree_container { : super_type(comp, alloc) { insert(b, e); } - template - btree_set_container(InputIterator b, InputIterator e, - const allocator_type &alloc) - : btree_set_container(b, e, key_compare(), alloc) {} - // Initializer list constructors. + // Initializer list constructor. btree_set_container(std::initializer_list init, const key_compare &comp = key_compare(), const allocator_type &alloc = allocator_type()) : btree_set_container(init.begin(), init.end(), comp, alloc) {} - btree_set_container(std::initializer_list init, - const allocator_type &alloc) - : btree_set_container(init.begin(), init.end(), alloc) {} + + // Lookup routines. + template + size_type count(const key_arg &key) const { + return this->tree_.count_unique(key); + } // Insertion routines. std::pair insert(const value_type &v) { @@ -292,11 +265,8 @@ class btree_set_container : public btree_container { } template std::pair emplace(Args &&... args) { - // Use a node handle to manage a temp slot. - auto node = CommonAccess::Construct(this->get_allocator(), - std::forward(args)...); - auto *slot = CommonAccess::GetSlot(node); - return this->tree_.insert_unique(params_type::key(slot), slot); + init_type v(std::forward(args)...); + return this->tree_.insert_unique(params_type::key(v), std::move(v)); } iterator insert(const_iterator hint, const value_type &v) { return this->tree_ @@ -310,12 +280,9 @@ class btree_set_container : public btree_container { } template iterator emplace_hint(const_iterator hint, Args &&... args) { - // Use a node handle to manage a temp slot. - auto node = CommonAccess::Construct(this->get_allocator(), - std::forward(args)...); - auto *slot = CommonAccess::GetSlot(node); + init_type v(std::forward(args)...); return this->tree_ - .insert_hint_unique(iterator(hint), params_type::key(slot), slot) + .insert_hint_unique(iterator(hint), params_type::key(v), std::move(v)) .first; } template @@ -346,13 +313,20 @@ class btree_set_container : public btree_container { return res.first; } + // Deletion routines. + // TODO(ezb): we should support heterogeneous comparators that have different + // behavior for K!=key_type. + template + size_type erase(const key_arg &key) { + return this->tree_.erase_unique(key); + } + using super_type::erase; + // Node extraction routines. template node_type extract(const key_arg &key) { - const std::pair lower_and_equal = - this->tree_.lower_bound_equal(key); - return lower_and_equal.second ? extract(lower_and_equal.first) - : node_type(); + auto it = this->find(key); + return it == this->end() ? node_type() : extract(it); } using super_type::extract; @@ -370,7 +344,7 @@ class btree_set_container : public btree_container { int> = 0> void merge(btree_container &src) { // NOLINT for (auto src_it = src.begin(); src_it != src.end();) { - if (insert(std::move(params_type::element(src_it.slot()))).second) { + if (insert(std::move(*src_it)).second) { src_it = src.erase(src_it); } else { ++src_it; @@ -397,7 +371,6 @@ template class btree_map_container : public btree_set_container { using super_type = btree_set_container; using params_type = typename Tree::params_type; - friend class BtreeNodePeer; private: template @@ -407,7 +380,7 @@ class btree_map_container : public btree_set_container { using key_type = typename Tree::key_type; using mapped_type = typename params_type::mapped_type; using value_type = typename Tree::value_type; - using key_compare = typename Tree::original_key_compare; + using key_compare = typename Tree::key_compare; using allocator_type = typename Tree::allocator_type; using iterator = typename Tree::iterator; using const_iterator = typename Tree::const_iterator; @@ -544,7 +517,6 @@ class btree_multiset_container : public btree_container { using params_type = typename Tree::params_type; using init_type = typename params_type::init_type; using is_key_compare_to = typename params_type::is_key_compare_to; - friend class BtreeNodePeer; template using key_arg = typename super_type::template key_arg; @@ -553,7 +525,7 @@ class btree_multiset_container : public btree_container { using key_type = typename Tree::key_type; using value_type = typename Tree::value_type; using size_type = typename Tree::size_type; - using key_compare = typename Tree::original_key_compare; + using key_compare = typename Tree::key_compare; using allocator_type = typename Tree::allocator_type; using iterator = typename Tree::iterator; using const_iterator = typename Tree::const_iterator; @@ -563,7 +535,7 @@ class btree_multiset_container : public btree_container { using super_type::super_type; btree_multiset_container() {} - // Range constructors. + // Range constructor. template btree_multiset_container(InputIterator b, InputIterator e, const key_compare &comp = key_compare(), @@ -571,19 +543,18 @@ class btree_multiset_container : public btree_container { : super_type(comp, alloc) { insert(b, e); } - template - btree_multiset_container(InputIterator b, InputIterator e, - const allocator_type &alloc) - : btree_multiset_container(b, e, key_compare(), alloc) {} - // Initializer list constructors. + // Initializer list constructor. btree_multiset_container(std::initializer_list init, const key_compare &comp = key_compare(), const allocator_type &alloc = allocator_type()) : btree_multiset_container(init.begin(), init.end(), comp, alloc) {} - btree_multiset_container(std::initializer_list init, - const allocator_type &alloc) - : btree_multiset_container(init.begin(), init.end(), alloc) {} + + // Lookup routines. + template + size_type count(const key_arg &key) const { + return this->tree_.count_multi(key); + } // Insertion routines. iterator insert(const value_type &v) { return this->tree_.insert_multi(v); } @@ -605,18 +576,12 @@ class btree_multiset_container : public btree_container { } template iterator emplace(Args &&... args) { - // Use a node handle to manage a temp slot. - auto node = CommonAccess::Construct(this->get_allocator(), - std::forward(args)...); - return this->tree_.insert_multi(CommonAccess::GetSlot(node)); + return this->tree_.insert_multi(init_type(std::forward(args)...)); } template iterator emplace_hint(const_iterator hint, Args &&... args) { - // Use a node handle to manage a temp slot. - auto node = CommonAccess::Construct(this->get_allocator(), - std::forward(args)...); - return this->tree_.insert_hint_multi(iterator(hint), - CommonAccess::GetSlot(node)); + return this->tree_.insert_hint_multi( + iterator(hint), init_type(std::forward(args)...)); } iterator insert(node_type &&node) { if (!node) return this->end(); @@ -635,13 +600,18 @@ class btree_multiset_container : public btree_container { return res; } + // Deletion routines. + template + size_type erase(const key_arg &key) { + return this->tree_.erase_multi(key); + } + using super_type::erase; + // Node extraction routines. template node_type extract(const key_arg &key) { - const std::pair lower_and_equal = - this->tree_.lower_bound_equal(key); - return lower_and_equal.second ? extract(lower_and_equal.first) - : node_type(); + auto it = this->find(key); + return it == this->end() ? node_type() : extract(it); } using super_type::extract; @@ -657,9 +627,8 @@ class btree_multiset_container : public btree_container { typename T::params_type::is_map_container>>::value, int> = 0> void merge(btree_container &src) { // NOLINT - for (auto src_it = src.begin(), end = src.end(); src_it != end; ++src_it) { - insert(std::move(params_type::element(src_it.slot()))); - } + insert(std::make_move_iterator(src.begin()), + std::make_move_iterator(src.end())); src.clear(); } @@ -682,7 +651,6 @@ template class btree_multimap_container : public btree_multiset_container { using super_type = btree_multiset_container; using params_type = typename Tree::params_type; - friend class BtreeNodePeer; public: using mapped_type = typename params_type::mapped_type; diff --git a/abseil-cpp/absl/container/internal/common.h b/abseil-cpp/absl/container/internal/common.h index 416d9aa3..030e9d4a 100644 --- a/abseil-cpp/absl/container/internal/common.h +++ b/abseil-cpp/absl/container/internal/common.h @@ -84,11 +84,10 @@ class node_handle_base { PolicyTraits::transfer(alloc(), slot(), s); } - struct construct_tag_t {}; - template - node_handle_base(construct_tag_t, const allocator_type& a, Args&&... args) + struct move_tag_t {}; + node_handle_base(move_tag_t, const allocator_type& a, slot_type* s) : alloc_(a) { - PolicyTraits::construct(alloc(), slot(), std::forward(args)...); + PolicyTraits::construct(alloc(), slot(), s); } void destroy() { @@ -187,8 +186,8 @@ struct CommonAccess { } template - static T Construct(Args&&... args) { - return T(typename T::construct_tag_t{}, std::forward(args)...); + static T Move(Args&&... args) { + return T(typename T::move_tag_t{}, std::forward(args)...); } }; diff --git a/abseil-cpp/absl/container/internal/compressed_tuple.h b/abseil-cpp/absl/container/internal/compressed_tuple.h index 5ebe1649..02bfd03f 100644 --- a/abseil-cpp/absl/container/internal/compressed_tuple.h +++ b/abseil-cpp/absl/container/internal/compressed_tuple.h @@ -257,7 +257,7 @@ class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple template ElemT& get() & { - return StorageT::get(); + return internal_compressed_tuple::Storage, I>::get(); } template diff --git a/abseil-cpp/absl/container/internal/compressed_tuple_test.cc b/abseil-cpp/absl/container/internal/compressed_tuple_test.cc index 74111f97..62a7483e 100644 --- a/abseil-cpp/absl/container/internal/compressed_tuple_test.cc +++ b/abseil-cpp/absl/container/internal/compressed_tuple_test.cc @@ -403,16 +403,6 @@ TEST(CompressedTupleTest, EmptyFinalClass) { } #endif -// TODO(b/214288561): enable this test. -TEST(CompressedTupleTest, DISABLED_NestedEbo) { - struct Empty1 {}; - struct Empty2 {}; - CompressedTuple, int> x; - CompressedTuple y; - // Currently fails with sizeof(x) == 8, sizeof(y) == 4. - EXPECT_EQ(sizeof(x), sizeof(y)); -} - } // namespace } // namespace container_internal ABSL_NAMESPACE_END diff --git a/abseil-cpp/absl/container/internal/container_memory.h b/abseil-cpp/absl/container/internal/container_memory.h index 00e9f6d7..e67529ec 100644 --- a/abseil-cpp/absl/container/internal/container_memory.h +++ b/abseil-cpp/absl/container/internal/container_memory.h @@ -174,7 +174,7 @@ decltype(std::declval()(std::declval())) WithConstructed( // // 2. auto a = PairArgs(args...); // std::pair p(std::piecewise_construct, -// std::move(a.first), std::move(a.second)); +// std::move(p.first), std::move(p.second)); inline std::pair, std::tuple<>> PairArgs() { return {}; } template std::pair, std::tuple> PairArgs(F&& f, S&& s) { @@ -402,15 +402,6 @@ struct map_slot_policy { } } - // Construct this slot by copying from another slot. - template - static void construct(Allocator* alloc, slot_type* slot, - const slot_type* other) { - emplace(slot); - absl::allocator_traits::construct(*alloc, &slot->value, - other->value); - } - template static void destroy(Allocator* alloc, slot_type* slot) { if (kMutableKeys::value) { @@ -433,6 +424,33 @@ struct map_slot_policy { } destroy(alloc, old_slot); } + + template + static void swap(Allocator* alloc, slot_type* a, slot_type* b) { + if (kMutableKeys::value) { + using std::swap; + swap(a->mutable_value, b->mutable_value); + } else { + value_type tmp = std::move(a->value); + absl::allocator_traits::destroy(*alloc, &a->value); + absl::allocator_traits::construct(*alloc, &a->value, + std::move(b->value)); + absl::allocator_traits::destroy(*alloc, &b->value); + absl::allocator_traits::construct(*alloc, &b->value, + std::move(tmp)); + } + } + + template + static void move(Allocator* alloc, slot_type* src, slot_type* dest) { + if (kMutableKeys::value) { + dest->mutable_value = std::move(src->mutable_value); + } else { + absl::allocator_traits::destroy(*alloc, &dest->value); + absl::allocator_traits::construct(*alloc, &dest->value, + std::move(src->value)); + } + } }; } // namespace container_internal diff --git a/abseil-cpp/absl/container/internal/container_memory_test.cc b/abseil-cpp/absl/container/internal/container_memory_test.cc index fb9c4dde..6a7fcd29 100644 --- a/abseil-cpp/absl/container/internal/container_memory_test.cc +++ b/abseil-cpp/absl/container/internal/container_memory_test.cc @@ -166,7 +166,7 @@ TryDecomposeValue(F&& f, Arg&& arg) { } TEST(DecomposeValue, Decomposable) { - auto f = [](const int& x, int&& y) { // NOLINT + auto f = [](const int& x, int&& y) { EXPECT_EQ(&x, &y); EXPECT_EQ(42, x); return 'A'; @@ -200,8 +200,7 @@ TryDecomposePair(F&& f, Args&&... args) { } TEST(DecomposePair, Decomposable) { - auto f = [](const int& x, // NOLINT - std::piecewise_construct_t, std::tuple k, + auto f = [](const int& x, std::piecewise_construct_t, std::tuple k, std::tuple&& v) { EXPECT_EQ(&x, &std::get<0>(k)); EXPECT_EQ(42, x); diff --git a/abseil-cpp/absl/container/internal/counting_allocator.h b/abseil-cpp/absl/container/internal/counting_allocator.h index 66068a5a..927cf082 100644 --- a/abseil-cpp/absl/container/internal/counting_allocator.h +++ b/abseil-cpp/absl/container/internal/counting_allocator.h @@ -80,15 +80,7 @@ class CountingAllocator { template void destroy(U* p) { Allocator allocator; - // Ignore GCC warning bug. -#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wuse-after-free" -#endif AllocatorTraits::destroy(allocator, p); -#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) -#pragma GCC diagnostic pop -#endif if (instance_count_ != nullptr) { *instance_count_ -= 1; } diff --git a/abseil-cpp/absl/container/internal/hash_function_defaults.h b/abseil-cpp/absl/container/internal/hash_function_defaults.h index 250e662c..0683422a 100644 --- a/abseil-cpp/absl/container/internal/hash_function_defaults.h +++ b/abseil-cpp/absl/container/internal/hash_function_defaults.h @@ -78,26 +78,24 @@ struct StringHash { } }; -struct StringEq { - using is_transparent = void; - bool operator()(absl::string_view lhs, absl::string_view rhs) const { - return lhs == rhs; - } - bool operator()(const absl::Cord& lhs, const absl::Cord& rhs) const { - return lhs == rhs; - } - bool operator()(const absl::Cord& lhs, absl::string_view rhs) const { - return lhs == rhs; - } - bool operator()(absl::string_view lhs, const absl::Cord& rhs) const { - return lhs == rhs; - } -}; - // Supports heterogeneous lookup for string-like elements. struct StringHashEq { using Hash = StringHash; - using Eq = StringEq; + struct Eq { + using is_transparent = void; + bool operator()(absl::string_view lhs, absl::string_view rhs) const { + return lhs == rhs; + } + bool operator()(const absl::Cord& lhs, const absl::Cord& rhs) const { + return lhs == rhs; + } + bool operator()(const absl::Cord& lhs, absl::string_view rhs) const { + return lhs == rhs; + } + bool operator()(absl::string_view lhs, const absl::Cord& rhs) const { + return lhs == rhs; + } + }; }; template <> diff --git a/abseil-cpp/absl/container/internal/hash_function_defaults_test.cc b/abseil-cpp/absl/container/internal/hash_function_defaults_test.cc index 9f0a4c72..59576b8e 100644 --- a/abseil-cpp/absl/container/internal/hash_function_defaults_test.cc +++ b/abseil-cpp/absl/container/internal/hash_function_defaults_test.cc @@ -310,7 +310,7 @@ struct StringLikeTest : public ::testing::Test { hash_default_hash hash; }; -TYPED_TEST_SUITE_P(StringLikeTest); +TYPED_TEST_CASE_P(StringLikeTest); TYPED_TEST_P(StringLikeTest, Eq) { EXPECT_TRUE(this->eq(this->a1, this->b1)); diff --git a/abseil-cpp/absl/container/internal/hash_generator_testing.h b/abseil-cpp/absl/container/internal/hash_generator_testing.h index f1f555a5..6869fe45 100644 --- a/abseil-cpp/absl/container/internal/hash_generator_testing.h +++ b/abseil-cpp/absl/container/internal/hash_generator_testing.h @@ -21,13 +21,11 @@ #include #include -#include #include #include #include #include #include -#include #include "absl/container/internal/hash_policy_testing.h" #include "absl/memory/memory.h" @@ -155,25 +153,6 @@ using GeneratedType = decltype( typename Container::value_type, typename Container::key_type>::type>&>()()); -// Naive wrapper that performs a linear search of previous values. -// Beware this is O(SQR), which is reasonable for smaller kMaxValues. -template -struct UniqueGenerator { - Generator gen; - std::vector values; - - T operator()() { - assert(values.size() < kMaxValues); - for (;;) { - T value = gen(); - if (std::find(values.begin(), values.end(), value) == values.end()) { - values.push_back(value); - return value; - } - } - } -}; - } // namespace hash_internal } // namespace container_internal ABSL_NAMESPACE_END diff --git a/abseil-cpp/absl/container/internal/hashtablez_sampler.cc b/abseil-cpp/absl/container/internal/hashtablez_sampler.cc index efc1be58..e4484fbb 100644 --- a/abseil-cpp/absl/container/internal/hashtablez_sampler.cc +++ b/abseil-cpp/absl/container/internal/hashtablez_sampler.cc @@ -21,55 +21,49 @@ #include #include "absl/base/attributes.h" -#include "absl/base/config.h" +#include "absl/base/internal/exponential_biased.h" +#include "absl/container/internal/have_sse.h" #include "absl/debugging/stacktrace.h" #include "absl/memory/memory.h" -#include "absl/profiling/internal/exponential_biased.h" -#include "absl/profiling/internal/sample_recorder.h" #include "absl/synchronization/mutex.h" -#include "absl/utility/utility.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { - -#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL constexpr int HashtablezInfo::kMaxStackDepth; -#endif namespace { ABSL_CONST_INIT std::atomic g_hashtablez_enabled{ false }; ABSL_CONST_INIT std::atomic g_hashtablez_sample_parameter{1 << 10}; -std::atomic g_hashtablez_config_listener{nullptr}; +ABSL_CONST_INIT std::atomic g_hashtablez_max_samples{1 << 20}; #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) -ABSL_PER_THREAD_TLS_KEYWORD absl::profiling_internal::ExponentialBiased +ABSL_PER_THREAD_TLS_KEYWORD absl::base_internal::ExponentialBiased g_exponential_biased_generator; #endif -void TriggerHashtablezConfigListener() { - auto* listener = g_hashtablez_config_listener.load(std::memory_order_acquire); - if (listener != nullptr) listener(); -} - } // namespace #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) -ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample = {0, 0}; +ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample = 0; #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) -HashtablezSampler& GlobalHashtablezSampler() { +HashtablezSampler& HashtablezSampler::Global() { static auto* sampler = new HashtablezSampler(); return *sampler; } -HashtablezInfo::HashtablezInfo() = default; +HashtablezSampler::DisposeCallback HashtablezSampler::SetDisposeCallback( + DisposeCallback f) { + return dispose_.exchange(f, std::memory_order_relaxed); +} + +HashtablezInfo::HashtablezInfo() { PrepareForSampling(); } HashtablezInfo::~HashtablezInfo() = default; -void HashtablezInfo::PrepareForSampling(int64_t stride, - size_t inline_element_size_value) { +void HashtablezInfo::PrepareForSampling() { capacity.store(0, std::memory_order_relaxed); size.store(0, std::memory_order_relaxed); num_erases.store(0, std::memory_order_relaxed); @@ -78,17 +72,100 @@ void HashtablezInfo::PrepareForSampling(int64_t stride, total_probe_length.store(0, std::memory_order_relaxed); hashes_bitwise_or.store(0, std::memory_order_relaxed); hashes_bitwise_and.store(~size_t{}, std::memory_order_relaxed); - hashes_bitwise_xor.store(0, std::memory_order_relaxed); - max_reserve.store(0, std::memory_order_relaxed); create_time = absl::Now(); - weight = stride; // The inliner makes hardcoded skip_count difficult (especially when combined // with LTO). We use the ability to exclude stacks by regex when encoding // instead. depth = absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth, /* skip_count= */ 0); - inline_element_size = inline_element_size_value; + dead = nullptr; +} + +HashtablezSampler::HashtablezSampler() + : dropped_samples_(0), size_estimate_(0), all_(nullptr), dispose_(nullptr) { + absl::MutexLock l(&graveyard_.init_mu); + graveyard_.dead = &graveyard_; +} + +HashtablezSampler::~HashtablezSampler() { + HashtablezInfo* s = all_.load(std::memory_order_acquire); + while (s != nullptr) { + HashtablezInfo* next = s->next; + delete s; + s = next; + } +} + +void HashtablezSampler::PushNew(HashtablezInfo* sample) { + sample->next = all_.load(std::memory_order_relaxed); + while (!all_.compare_exchange_weak(sample->next, sample, + std::memory_order_release, + std::memory_order_relaxed)) { + } +} + +void HashtablezSampler::PushDead(HashtablezInfo* sample) { + if (auto* dispose = dispose_.load(std::memory_order_relaxed)) { + dispose(*sample); + } + + absl::MutexLock graveyard_lock(&graveyard_.init_mu); + absl::MutexLock sample_lock(&sample->init_mu); + sample->dead = graveyard_.dead; + graveyard_.dead = sample; +} + +HashtablezInfo* HashtablezSampler::PopDead() { + absl::MutexLock graveyard_lock(&graveyard_.init_mu); + + // The list is circular, so eventually it collapses down to + // graveyard_.dead == &graveyard_ + // when it is empty. + HashtablezInfo* sample = graveyard_.dead; + if (sample == &graveyard_) return nullptr; + + absl::MutexLock sample_lock(&sample->init_mu); + graveyard_.dead = sample->dead; + sample->PrepareForSampling(); + return sample; +} + +HashtablezInfo* HashtablezSampler::Register() { + int64_t size = size_estimate_.fetch_add(1, std::memory_order_relaxed); + if (size > g_hashtablez_max_samples.load(std::memory_order_relaxed)) { + size_estimate_.fetch_sub(1, std::memory_order_relaxed); + dropped_samples_.fetch_add(1, std::memory_order_relaxed); + return nullptr; + } + + HashtablezInfo* sample = PopDead(); + if (sample == nullptr) { + // Resurrection failed. Hire a new warlock. + sample = new HashtablezInfo(); + PushNew(sample); + } + + return sample; +} + +void HashtablezSampler::Unregister(HashtablezInfo* sample) { + PushDead(sample); + size_estimate_.fetch_sub(1, std::memory_order_relaxed); +} + +int64_t HashtablezSampler::Iterate( + const std::function& f) { + HashtablezInfo* s = all_.load(std::memory_order_acquire); + while (s != nullptr) { + absl::MutexLock l(&s->init_mu); + if (s->dead == nullptr) { + f(*s); + } + s = s->next; + } + + return dropped_samples_.load(std::memory_order_relaxed); } static bool ShouldForceSampling() { @@ -103,40 +180,27 @@ static bool ShouldForceSampling() { if (ABSL_PREDICT_TRUE(state == kDontForce)) return false; if (state == kUninitialized) { - state = ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)() - ? kForce - : kDontForce; + state = AbslContainerInternalSampleEverything() ? kForce : kDontForce; global_state.store(state, std::memory_order_relaxed); } return state == kForce; } -HashtablezInfo* SampleSlow(SamplingState& next_sample, - size_t inline_element_size) { +HashtablezInfo* SampleSlow(int64_t* next_sample) { if (ABSL_PREDICT_FALSE(ShouldForceSampling())) { - next_sample.next_sample = 1; - const int64_t old_stride = exchange(next_sample.sample_stride, 1); - HashtablezInfo* result = - GlobalHashtablezSampler().Register(old_stride, inline_element_size); - return result; + *next_sample = 1; + return HashtablezSampler::Global().Register(); } #if !defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) - next_sample = { - std::numeric_limits::max(), - std::numeric_limits::max(), - }; + *next_sample = std::numeric_limits::max(); return nullptr; #else - bool first = next_sample.next_sample < 0; - - const int64_t next_stride = g_exponential_biased_generator.GetStride( + bool first = *next_sample < 0; + *next_sample = g_exponential_biased_generator.GetStride( g_hashtablez_sample_parameter.load(std::memory_order_relaxed)); - - next_sample.next_sample = next_stride; - const int64_t old_stride = exchange(next_sample.sample_stride, next_stride); // Small values of interval are equivalent to just sampling next time. - ABSL_ASSERT(next_stride >= 1); + ABSL_ASSERT(*next_sample >= 1); // g_hashtablez_enabled can be dynamically flipped, we need to set a threshold // low enough that we will start sampling in a reasonable time, so we just use @@ -146,16 +210,16 @@ HashtablezInfo* SampleSlow(SamplingState& next_sample, // We will only be negative on our first count, so we should just retry in // that case. if (first) { - if (ABSL_PREDICT_TRUE(--next_sample.next_sample > 0)) return nullptr; - return SampleSlow(next_sample, inline_element_size); + if (ABSL_PREDICT_TRUE(--*next_sample > 0)) return nullptr; + return SampleSlow(next_sample); } - return GlobalHashtablezSampler().Register(old_stride, inline_element_size); + return HashtablezSampler::Global().Register(); #endif } void UnsampleSlow(HashtablezInfo* info) { - GlobalHashtablezSampler().Unregister(info); + HashtablezSampler::Global().Unregister(info); } void RecordInsertSlow(HashtablezInfo* info, size_t hash, @@ -163,7 +227,7 @@ void RecordInsertSlow(HashtablezInfo* info, size_t hash, // SwissTables probe in groups of 16, so scale this to count items probes and // not offset from desired. size_t probe_length = distance_from_desired; -#ifdef ABSL_INTERNAL_HAVE_SSE2 +#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 probe_length /= 16; #else probe_length /= 8; @@ -171,7 +235,6 @@ void RecordInsertSlow(HashtablezInfo* info, size_t hash, info->hashes_bitwise_and.fetch_and(hash, std::memory_order_relaxed); info->hashes_bitwise_or.fetch_or(hash, std::memory_order_relaxed); - info->hashes_bitwise_xor.fetch_xor(hash, std::memory_order_relaxed); info->max_probe_length.store( std::max(info->max_probe_length.load(std::memory_order_relaxed), probe_length), @@ -180,33 +243,11 @@ void RecordInsertSlow(HashtablezInfo* info, size_t hash, info->size.fetch_add(1, std::memory_order_relaxed); } -void SetHashtablezConfigListener(HashtablezConfigListener l) { - g_hashtablez_config_listener.store(l, std::memory_order_release); -} - -bool IsHashtablezEnabled() { - return g_hashtablez_enabled.load(std::memory_order_acquire); -} - void SetHashtablezEnabled(bool enabled) { - SetHashtablezEnabledInternal(enabled); - TriggerHashtablezConfigListener(); -} - -void SetHashtablezEnabledInternal(bool enabled) { g_hashtablez_enabled.store(enabled, std::memory_order_release); } -int32_t GetHashtablezSampleParameter() { - return g_hashtablez_sample_parameter.load(std::memory_order_acquire); -} - void SetHashtablezSampleParameter(int32_t rate) { - SetHashtablezSampleParameterInternal(rate); - TriggerHashtablezConfigListener(); -} - -void SetHashtablezSampleParameterInternal(int32_t rate) { if (rate > 0) { g_hashtablez_sample_parameter.store(rate, std::memory_order_release); } else { @@ -215,18 +256,9 @@ void SetHashtablezSampleParameterInternal(int32_t rate) { } } -int32_t GetHashtablezMaxSamples() { - return GlobalHashtablezSampler().GetMaxSamples(); -} - void SetHashtablezMaxSamples(int32_t max) { - SetHashtablezMaxSamplesInternal(max); - TriggerHashtablezConfigListener(); -} - -void SetHashtablezMaxSamplesInternal(int32_t max) { if (max > 0) { - GlobalHashtablezSampler().SetMaxSamples(max); + g_hashtablez_max_samples.store(max, std::memory_order_release); } else { ABSL_RAW_LOG(ERROR, "Invalid hashtablez max samples: %lld", static_cast(max)); // NOLINT(runtime/int) diff --git a/abseil-cpp/absl/container/internal/hashtablez_sampler.h b/abseil-cpp/absl/container/internal/hashtablez_sampler.h index d4016d8a..394348da 100644 --- a/abseil-cpp/absl/container/internal/hashtablez_sampler.h +++ b/abseil-cpp/absl/container/internal/hashtablez_sampler.h @@ -44,10 +44,9 @@ #include #include -#include "absl/base/config.h" #include "absl/base/internal/per_thread_tls.h" #include "absl/base/optimization.h" -#include "absl/profiling/internal/sample_recorder.h" +#include "absl/container/internal/have_sse.h" #include "absl/synchronization/mutex.h" #include "absl/utility/utility.h" @@ -58,7 +57,7 @@ namespace container_internal { // Stores information about a sampled hashtable. All mutations to this *must* // be made through `Record*` functions below. All reads from this *must* only // occur in the callback to `HashtablezSampler::Iterate`. -struct HashtablezInfo : public profiling_internal::Sample { +struct HashtablezInfo { // Constructs the object but does not fill in any fields. HashtablezInfo(); ~HashtablezInfo(); @@ -67,8 +66,7 @@ struct HashtablezInfo : public profiling_internal::Sample { // Puts the object into a clean state, fills in the logically `const` members, // blocking for any readers that are currently sampling the object. - void PrepareForSampling(int64_t stride, size_t inline_element_size_value) - ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu); + void PrepareForSampling() ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu); // These fields are mutated by the various Record* APIs and need to be // thread-safe. @@ -80,23 +78,28 @@ struct HashtablezInfo : public profiling_internal::Sample { std::atomic total_probe_length; std::atomic hashes_bitwise_or; std::atomic hashes_bitwise_and; - std::atomic hashes_bitwise_xor; - std::atomic max_reserve; + + // `HashtablezSampler` maintains intrusive linked lists for all samples. See + // comments on `HashtablezSampler::all_` for details on these. `init_mu` + // guards the ability to restore the sample to a pristine state. This + // prevents races with sampling and resurrecting an object. + absl::Mutex init_mu; + HashtablezInfo* next; + HashtablezInfo* dead ABSL_GUARDED_BY(init_mu); // All of the fields below are set by `PrepareForSampling`, they must not be // mutated in `Record*` functions. They are logically `const` in that sense. - // These are guarded by init_mu, but that is not externalized to clients, - // which can read them only during `SampleRecorder::Iterate` which will hold - // the lock. + // These are guarded by init_mu, but that is not externalized to clients, who + // can only read them during `HashtablezSampler::Iterate` which will hold the + // lock. static constexpr int kMaxStackDepth = 64; absl::Time create_time; int32_t depth; void* stack[kMaxStackDepth]; - size_t inline_element_size; // How big is the slot? }; inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) { -#ifdef ABSL_INTERNAL_HAVE_SSE2 +#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 total_probe_length /= 16; #else total_probe_length /= 8; @@ -110,18 +113,6 @@ inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) { std::memory_order_relaxed); } -inline void RecordReservationSlow(HashtablezInfo* info, - size_t target_capacity) { - info->max_reserve.store( - (std::max)(info->max_reserve.load(std::memory_order_relaxed), - target_capacity), - std::memory_order_relaxed); -} - -inline void RecordClearedReservationSlow(HashtablezInfo* info) { - info->max_reserve.store(0, std::memory_order_relaxed); -} - inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size, size_t capacity) { info->size.store(size, std::memory_order_relaxed); @@ -145,15 +136,7 @@ inline void RecordEraseSlow(HashtablezInfo* info) { std::memory_order_relaxed); } -struct SamplingState { - int64_t next_sample; - // When we make a sampling decision, we record that distance so we can weight - // each sample. - int64_t sample_stride; -}; - -HashtablezInfo* SampleSlow(SamplingState& next_sample, - size_t inline_element_size); +HashtablezInfo* SampleSlow(int64_t* next_sample); void UnsampleSlow(HashtablezInfo* info); #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) @@ -193,16 +176,6 @@ class HashtablezInfoHandle { RecordRehashSlow(info_, total_probe_length); } - inline void RecordReservation(size_t target_capacity) { - if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; - RecordReservationSlow(info_, target_capacity); - } - - inline void RecordClearedReservation() { - if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; - RecordClearedReservationSlow(info_); - } - inline void RecordInsert(size_t hash, size_t distance_from_desired) { if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; RecordInsertSlow(info_, hash, distance_from_desired); @@ -232,8 +205,6 @@ class HashtablezInfoHandle { inline void RecordStorageChanged(size_t /*size*/, size_t /*capacity*/) {} inline void RecordRehash(size_t /*total_probe_length*/) {} - inline void RecordReservation(size_t /*target_capacity*/) {} - inline void RecordClearedReservation() {} inline void RecordInsert(size_t /*hash*/, size_t /*distance_from_desired*/) {} inline void RecordErase() {} @@ -243,54 +214,105 @@ class HashtablezInfoHandle { #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) -extern ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample; +extern ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample; #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) // Returns an RAII sampling handle that manages registration and unregistation // with the global sampler. -inline HashtablezInfoHandle Sample( - size_t inline_element_size ABSL_ATTRIBUTE_UNUSED) { +inline HashtablezInfoHandle Sample() { #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) - if (ABSL_PREDICT_TRUE(--global_next_sample.next_sample > 0)) { + if (ABSL_PREDICT_TRUE(--global_next_sample > 0)) { return HashtablezInfoHandle(nullptr); } - return HashtablezInfoHandle( - SampleSlow(global_next_sample, inline_element_size)); + return HashtablezInfoHandle(SampleSlow(&global_next_sample)); #else return HashtablezInfoHandle(nullptr); #endif // !ABSL_PER_THREAD_TLS } -using HashtablezSampler = - ::absl::profiling_internal::SampleRecorder; +// Holds samples and their associated stack traces with a soft limit of +// `SetHashtablezMaxSamples()`. +// +// Thread safe. +class HashtablezSampler { + public: + // Returns a global Sampler. + static HashtablezSampler& Global(); + + HashtablezSampler(); + ~HashtablezSampler(); -// Returns a global Sampler. -HashtablezSampler& GlobalHashtablezSampler(); + // Registers for sampling. Returns an opaque registration info. + HashtablezInfo* Register(); -using HashtablezConfigListener = void (*)(); -void SetHashtablezConfigListener(HashtablezConfigListener l); + // Unregisters the sample. + void Unregister(HashtablezInfo* sample); + + // The dispose callback will be called on all samples the moment they are + // being unregistered. Only affects samples that are unregistered after the + // callback has been set. + // Returns the previous callback. + using DisposeCallback = void (*)(const HashtablezInfo&); + DisposeCallback SetDisposeCallback(DisposeCallback f); + + // Iterates over all the registered `StackInfo`s. Returning the number of + // samples that have been dropped. + int64_t Iterate(const std::function& f); + + private: + void PushNew(HashtablezInfo* sample); + void PushDead(HashtablezInfo* sample); + HashtablezInfo* PopDead(); + + std::atomic dropped_samples_; + std::atomic size_estimate_; + + // Intrusive lock free linked lists for tracking samples. + // + // `all_` records all samples (they are never removed from this list) and is + // terminated with a `nullptr`. + // + // `graveyard_.dead` is a circular linked list. When it is empty, + // `graveyard_.dead == &graveyard`. The list is circular so that + // every item on it (even the last) has a non-null dead pointer. This allows + // `Iterate` to determine if a given sample is live or dead using only + // information on the sample itself. + // + // For example, nodes [A, B, C, D, E] with [A, C, E] alive and [B, D] dead + // looks like this (G is the Graveyard): + // + // +---+ +---+ +---+ +---+ +---+ + // all -->| A |--->| B |--->| C |--->| D |--->| E | + // | | | | | | | | | | + // +---+ | | +->| |-+ | | +->| |-+ | | + // | G | +---+ | +---+ | +---+ | +---+ | +---+ + // | | | | | | + // | | --------+ +--------+ | + // +---+ | + // ^ | + // +--------------------------------------+ + // + std::atomic all_; + HashtablezInfo graveyard_; + + std::atomic dispose_; +}; // Enables or disables sampling for Swiss tables. -bool IsHashtablezEnabled(); void SetHashtablezEnabled(bool enabled); -void SetHashtablezEnabledInternal(bool enabled); // Sets the rate at which Swiss tables will be sampled. -int32_t GetHashtablezSampleParameter(); void SetHashtablezSampleParameter(int32_t rate); -void SetHashtablezSampleParameterInternal(int32_t rate); // Sets a soft max for the number of samples that will be kept. -int32_t GetHashtablezMaxSamples(); void SetHashtablezMaxSamples(int32_t max); -void SetHashtablezMaxSamplesInternal(int32_t max); // Configuration override. // This allows process-wide sampling without depending on order of // initialization of static storage duration objects. // The definition of this constant is weak, which allows us to inject a // different value for it at link time. -extern "C" bool ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)(); +extern "C" bool AbslContainerInternalSampleEverything(); } // namespace container_internal ABSL_NAMESPACE_END diff --git a/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc b/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc index ed35a7ee..78b9d362 100644 --- a/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc +++ b/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc @@ -21,8 +21,7 @@ ABSL_NAMESPACE_BEGIN namespace container_internal { // See hashtablez_sampler.h for details. -extern "C" ABSL_ATTRIBUTE_WEAK bool ABSL_INTERNAL_C_SYMBOL( - AbslContainerInternalSampleEverything)() { +extern "C" ABSL_ATTRIBUTE_WEAK bool AbslContainerInternalSampleEverything() { return false; } diff --git a/abseil-cpp/absl/container/internal/hashtablez_sampler_test.cc b/abseil-cpp/absl/container/internal/hashtablez_sampler_test.cc index 665d518f..8d10a1e9 100644 --- a/abseil-cpp/absl/container/internal/hashtablez_sampler_test.cc +++ b/abseil-cpp/absl/container/internal/hashtablez_sampler_test.cc @@ -21,8 +21,7 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/base/attributes.h" -#include "absl/base/config.h" -#include "absl/profiling/internal/sample_recorder.h" +#include "absl/container/internal/have_sse.h" #include "absl/synchronization/blocking_counter.h" #include "absl/synchronization/internal/thread_pool.h" #include "absl/synchronization/mutex.h" @@ -30,7 +29,7 @@ #include "absl/time/clock.h" #include "absl/time/time.h" -#ifdef ABSL_INTERNAL_HAVE_SSE2 +#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 constexpr int kProbeLength = 16; #else constexpr int kProbeLength = 8; @@ -70,9 +69,7 @@ std::vector GetSizes(HashtablezSampler* s) { } HashtablezInfo* Register(HashtablezSampler* s, size_t size) { - const int64_t test_stride = 123; - const size_t test_element_size = 17; - auto* info = s->Register(test_stride, test_element_size); + auto* info = s->Register(); assert(info != nullptr); info->size.store(size); return info; @@ -80,11 +77,9 @@ HashtablezInfo* Register(HashtablezSampler* s, size_t size) { TEST(HashtablezInfoTest, PrepareForSampling) { absl::Time test_start = absl::Now(); - const int64_t test_stride = 123; - const size_t test_element_size = 17; HashtablezInfo info; absl::MutexLock l(&info.init_mu); - info.PrepareForSampling(test_stride, test_element_size); + info.PrepareForSampling(); EXPECT_EQ(info.capacity.load(), 0); EXPECT_EQ(info.size.load(), 0); @@ -94,11 +89,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) { EXPECT_EQ(info.total_probe_length.load(), 0); EXPECT_EQ(info.hashes_bitwise_or.load(), 0); EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{}); - EXPECT_EQ(info.hashes_bitwise_xor.load(), 0); - EXPECT_EQ(info.max_reserve.load(), 0); EXPECT_GE(info.create_time, test_start); - EXPECT_EQ(info.weight, test_stride); - EXPECT_EQ(info.inline_element_size, test_element_size); info.capacity.store(1, std::memory_order_relaxed); info.size.store(1, std::memory_order_relaxed); @@ -107,11 +98,9 @@ TEST(HashtablezInfoTest, PrepareForSampling) { info.total_probe_length.store(1, std::memory_order_relaxed); info.hashes_bitwise_or.store(1, std::memory_order_relaxed); info.hashes_bitwise_and.store(1, std::memory_order_relaxed); - info.hashes_bitwise_xor.store(1, std::memory_order_relaxed); - info.max_reserve.store(1, std::memory_order_relaxed); info.create_time = test_start - absl::Hours(20); - info.PrepareForSampling(test_stride * 2, test_element_size); + info.PrepareForSampling(); EXPECT_EQ(info.capacity.load(), 0); EXPECT_EQ(info.size.load(), 0); EXPECT_EQ(info.num_erases.load(), 0); @@ -120,19 +109,13 @@ TEST(HashtablezInfoTest, PrepareForSampling) { EXPECT_EQ(info.total_probe_length.load(), 0); EXPECT_EQ(info.hashes_bitwise_or.load(), 0); EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{}); - EXPECT_EQ(info.hashes_bitwise_xor.load(), 0); - EXPECT_EQ(info.max_reserve.load(), 0); - EXPECT_EQ(info.weight, 2 * test_stride); - EXPECT_EQ(info.inline_element_size, test_element_size); EXPECT_GE(info.create_time, test_start); } TEST(HashtablezInfoTest, RecordStorageChanged) { HashtablezInfo info; absl::MutexLock l(&info.init_mu); - const int64_t test_stride = 21; - const size_t test_element_size = 19; - info.PrepareForSampling(test_stride, test_element_size); + info.PrepareForSampling(); RecordStorageChangedSlow(&info, 17, 47); EXPECT_EQ(info.size.load(), 17); EXPECT_EQ(info.capacity.load(), 47); @@ -144,33 +127,26 @@ TEST(HashtablezInfoTest, RecordStorageChanged) { TEST(HashtablezInfoTest, RecordInsert) { HashtablezInfo info; absl::MutexLock l(&info.init_mu); - const int64_t test_stride = 25; - const size_t test_element_size = 23; - info.PrepareForSampling(test_stride, test_element_size); + info.PrepareForSampling(); EXPECT_EQ(info.max_probe_length.load(), 0); RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength); EXPECT_EQ(info.max_probe_length.load(), 6); EXPECT_EQ(info.hashes_bitwise_and.load(), 0x0000FF00); EXPECT_EQ(info.hashes_bitwise_or.load(), 0x0000FF00); - EXPECT_EQ(info.hashes_bitwise_xor.load(), 0x0000FF00); RecordInsertSlow(&info, 0x000FF000, 4 * kProbeLength); EXPECT_EQ(info.max_probe_length.load(), 6); EXPECT_EQ(info.hashes_bitwise_and.load(), 0x0000F000); EXPECT_EQ(info.hashes_bitwise_or.load(), 0x000FFF00); - EXPECT_EQ(info.hashes_bitwise_xor.load(), 0x000F0F00); RecordInsertSlow(&info, 0x00FF0000, 12 * kProbeLength); EXPECT_EQ(info.max_probe_length.load(), 12); EXPECT_EQ(info.hashes_bitwise_and.load(), 0x00000000); EXPECT_EQ(info.hashes_bitwise_or.load(), 0x00FFFF00); - EXPECT_EQ(info.hashes_bitwise_xor.load(), 0x00F00F00); } TEST(HashtablezInfoTest, RecordErase) { - const int64_t test_stride = 31; - const size_t test_element_size = 29; HashtablezInfo info; absl::MutexLock l(&info.init_mu); - info.PrepareForSampling(test_stride, test_element_size); + info.PrepareForSampling(); EXPECT_EQ(info.num_erases.load(), 0); EXPECT_EQ(info.size.load(), 0); RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength); @@ -178,15 +154,12 @@ TEST(HashtablezInfoTest, RecordErase) { RecordEraseSlow(&info); EXPECT_EQ(info.size.load(), 0); EXPECT_EQ(info.num_erases.load(), 1); - EXPECT_EQ(info.inline_element_size, test_element_size); } TEST(HashtablezInfoTest, RecordRehash) { - const int64_t test_stride = 33; - const size_t test_element_size = 31; HashtablezInfo info; absl::MutexLock l(&info.init_mu); - info.PrepareForSampling(test_stride, test_element_size); + info.PrepareForSampling(); RecordInsertSlow(&info, 0x1, 0); RecordInsertSlow(&info, 0x2, kProbeLength); RecordInsertSlow(&info, 0x4, kProbeLength); @@ -205,67 +178,43 @@ TEST(HashtablezInfoTest, RecordRehash) { EXPECT_EQ(info.total_probe_length.load(), 3); EXPECT_EQ(info.num_erases.load(), 0); EXPECT_EQ(info.num_rehashes.load(), 1); - EXPECT_EQ(info.inline_element_size, test_element_size); -} - -TEST(HashtablezInfoTest, RecordReservation) { - HashtablezInfo info; - absl::MutexLock l(&info.init_mu); - const int64_t test_stride = 35; - const size_t test_element_size = 33; - info.PrepareForSampling(test_stride, test_element_size); - RecordReservationSlow(&info, 3); - EXPECT_EQ(info.max_reserve.load(), 3); - - RecordReservationSlow(&info, 2); - // High watermark does not change - EXPECT_EQ(info.max_reserve.load(), 3); - - RecordReservationSlow(&info, 10); - // High watermark does change - EXPECT_EQ(info.max_reserve.load(), 10); } #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) TEST(HashtablezSamplerTest, SmallSampleParameter) { - const size_t test_element_size = 31; SetHashtablezEnabled(true); SetHashtablezSampleParameter(100); for (int i = 0; i < 1000; ++i) { - SamplingState next_sample = {0, 0}; - HashtablezInfo* sample = SampleSlow(next_sample, test_element_size); - EXPECT_GT(next_sample.next_sample, 0); - EXPECT_EQ(next_sample.next_sample, next_sample.sample_stride); + int64_t next_sample = 0; + HashtablezInfo* sample = SampleSlow(&next_sample); + EXPECT_GT(next_sample, 0); EXPECT_NE(sample, nullptr); UnsampleSlow(sample); } } TEST(HashtablezSamplerTest, LargeSampleParameter) { - const size_t test_element_size = 31; SetHashtablezEnabled(true); SetHashtablezSampleParameter(std::numeric_limits::max()); for (int i = 0; i < 1000; ++i) { - SamplingState next_sample = {0, 0}; - HashtablezInfo* sample = SampleSlow(next_sample, test_element_size); - EXPECT_GT(next_sample.next_sample, 0); - EXPECT_EQ(next_sample.next_sample, next_sample.sample_stride); + int64_t next_sample = 0; + HashtablezInfo* sample = SampleSlow(&next_sample); + EXPECT_GT(next_sample, 0); EXPECT_NE(sample, nullptr); UnsampleSlow(sample); } } TEST(HashtablezSamplerTest, Sample) { - const size_t test_element_size = 31; SetHashtablezEnabled(true); SetHashtablezSampleParameter(100); int64_t num_sampled = 0; int64_t total = 0; double sample_rate = 0.0; for (int i = 0; i < 1000000; ++i) { - HashtablezInfoHandle h = Sample(test_element_size); + HashtablezInfoHandle h = Sample(); ++total; if (HashtablezInfoHandlePeer::IsSampled(h)) { ++num_sampled; @@ -277,17 +226,14 @@ TEST(HashtablezSamplerTest, Sample) { } TEST(HashtablezSamplerTest, Handle) { - auto& sampler = GlobalHashtablezSampler(); - const int64_t test_stride = 41; - const size_t test_element_size = 39; - HashtablezInfoHandle h(sampler.Register(test_stride, test_element_size)); + auto& sampler = HashtablezSampler::Global(); + HashtablezInfoHandle h(sampler.Register()); auto* info = HashtablezInfoHandlePeer::GetInfo(&h); info->hashes_bitwise_and.store(0x12345678, std::memory_order_relaxed); bool found = false; sampler.Iterate([&](const HashtablezInfo& h) { if (&h == info) { - EXPECT_EQ(h.weight, test_stride); EXPECT_EQ(h.hashes_bitwise_and.load(), 0x12345678); found = true; } @@ -353,20 +299,18 @@ TEST(HashtablezSamplerTest, MultiThreaded) { ThreadPool pool(10); for (int i = 0; i < 10; ++i) { - const int64_t sampling_stride = 11 + i % 3; - const size_t elt_size = 10 + i % 2; - pool.Schedule([&sampler, &stop, sampling_stride, elt_size]() { + pool.Schedule([&sampler, &stop]() { std::random_device rd; std::mt19937 gen(rd()); std::vector infoz; while (!stop.HasBeenNotified()) { if (infoz.empty()) { - infoz.push_back(sampler.Register(sampling_stride, elt_size)); + infoz.push_back(sampler.Register()); } switch (std::uniform_int_distribution<>(0, 2)(gen)) { case 0: { - infoz.push_back(sampler.Register(sampling_stride, elt_size)); + infoz.push_back(sampler.Register()); break; } case 1: { @@ -375,7 +319,6 @@ TEST(HashtablezSamplerTest, MultiThreaded) { HashtablezInfo* info = infoz[p]; infoz[p] = infoz.back(); infoz.pop_back(); - EXPECT_EQ(info->weight, sampling_stride); sampler.Unregister(info); break; } diff --git a/abseil-cpp/absl/container/internal/have_sse.h b/abseil-cpp/absl/container/internal/have_sse.h new file mode 100644 index 00000000..e75e1a16 --- /dev/null +++ b/abseil-cpp/absl/container/internal/have_sse.h @@ -0,0 +1,50 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Shared config probing for SSE instructions used in Swiss tables. +#ifndef ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_ +#define ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_ + +#ifndef ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 +#if defined(__SSE2__) || \ + (defined(_MSC_VER) && \ + (defined(_M_X64) || (defined(_M_IX86) && _M_IX86_FP >= 2))) +#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 1 +#else +#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 0 +#endif +#endif + +#ifndef ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 +#ifdef __SSSE3__ +#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 1 +#else +#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 0 +#endif +#endif + +#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 && \ + !ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 +#error "Bad configuration!" +#endif + +#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 +#include +#endif + +#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 +#include +#endif + +#endif // ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_ diff --git a/abseil-cpp/absl/container/internal/inlined_vector.h b/abseil-cpp/absl/container/internal/inlined_vector.h index 54c92a01..4d80b727 100644 --- a/abseil-cpp/absl/container/internal/inlined_vector.h +++ b/abseil-cpp/absl/container/internal/inlined_vector.h @@ -21,11 +21,8 @@ #include #include #include -#include -#include #include -#include "absl/base/attributes.h" #include "absl/base/macros.h" #include "absl/container/internal/compressed_tuple.h" #include "absl/memory/memory.h" @@ -36,148 +33,96 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace inlined_vector_internal { -// GCC does not deal very well with the below code -#if !defined(__clang__) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Warray-bounds" -#endif - -template -using AllocatorTraits = std::allocator_traits; -template -using ValueType = typename AllocatorTraits::value_type; -template -using SizeType = typename AllocatorTraits::size_type; -template -using Pointer = typename AllocatorTraits::pointer; -template -using ConstPointer = typename AllocatorTraits::const_pointer; -template -using SizeType = typename AllocatorTraits::size_type; -template -using DifferenceType = typename AllocatorTraits::difference_type; -template -using Reference = ValueType&; -template -using ConstReference = const ValueType&; -template -using Iterator = Pointer; -template -using ConstIterator = ConstPointer; -template -using ReverseIterator = typename std::reverse_iterator>; -template -using ConstReverseIterator = typename std::reverse_iterator>; -template -using MoveIterator = typename std::move_iterator>; - template using IsAtLeastForwardIterator = std::is_convertible< typename std::iterator_traits::iterator_category, std::forward_iterator_tag>; -template +template ::value_type> using IsMemcpyOk = - absl::conjunction>>, - absl::is_trivially_copy_constructible>, - absl::is_trivially_copy_assignable>, - absl::is_trivially_destructible>>; - -template -struct TypeIdentity { - using type = T; -}; - -// Used for function arguments in template functions to prevent ADL by forcing -// callers to explicitly specify the template parameter. -template -using NoTypeDeduction = typename TypeIdentity::type; - -template >::value> -struct DestroyAdapter; - -template -struct DestroyAdapter { - static void DestroyElements(A& allocator, Pointer destroy_first, - SizeType destroy_size) { - for (SizeType i = destroy_size; i != 0;) { + absl::conjunction>, + absl::is_trivially_copy_constructible, + absl::is_trivially_copy_assignable, + absl::is_trivially_destructible>; + +template +void DestroyElements(AllocatorType* alloc_ptr, Pointer destroy_first, + SizeType destroy_size) { + using AllocatorTraits = absl::allocator_traits; + + if (destroy_first != nullptr) { + for (auto i = destroy_size; i != 0;) { --i; - AllocatorTraits::destroy(allocator, destroy_first + i); + AllocatorTraits::destroy(*alloc_ptr, destroy_first + i); } - } -}; - -template -struct DestroyAdapter { - static void DestroyElements(A& allocator, Pointer destroy_first, - SizeType destroy_size) { - static_cast(allocator); - static_cast(destroy_first); - static_cast(destroy_size); - } -}; - -template -struct Allocation { - Pointer data; - SizeType capacity; -}; - -template ) > ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT)> -struct MallocAdapter { - static Allocation Allocate(A& allocator, SizeType requested_capacity) { - return {AllocatorTraits::allocate(allocator, requested_capacity), - requested_capacity}; - } - static void Deallocate(A& allocator, Pointer pointer, - SizeType capacity) { - AllocatorTraits::deallocate(allocator, pointer, capacity); +#if !defined(NDEBUG) + { + using ValueType = typename AllocatorTraits::value_type; + + // Overwrite unused memory with `0xab` so we can catch uninitialized + // usage. + // + // Cast to `void*` to tell the compiler that we don't care that we might + // be scribbling on a vtable pointer. + void* memory_ptr = destroy_first; + auto memory_size = destroy_size * sizeof(ValueType); + std::memset(memory_ptr, 0xab, memory_size); + } +#endif // !defined(NDEBUG) } -}; +} -template -void ConstructElements(NoTypeDeduction& allocator, - Pointer construct_first, ValueAdapter& values, - SizeType construct_size) { - for (SizeType i = 0; i < construct_size; ++i) { - ABSL_INTERNAL_TRY { values.ConstructNext(allocator, construct_first + i); } +template +void ConstructElements(AllocatorType* alloc_ptr, Pointer construct_first, + ValueAdapter* values_ptr, SizeType construct_size) { + for (SizeType i = 0; i < construct_size; ++i) { + ABSL_INTERNAL_TRY { + values_ptr->ConstructNext(alloc_ptr, construct_first + i); + } ABSL_INTERNAL_CATCH_ANY { - DestroyAdapter::DestroyElements(allocator, construct_first, i); + inlined_vector_internal::DestroyElements(alloc_ptr, construct_first, i); ABSL_INTERNAL_RETHROW; } } } -template -void AssignElements(Pointer assign_first, ValueAdapter& values, - SizeType assign_size) { - for (SizeType i = 0; i < assign_size; ++i) { - values.AssignNext(assign_first + i); +template +void AssignElements(Pointer assign_first, ValueAdapter* values_ptr, + SizeType assign_size) { + for (SizeType i = 0; i < assign_size; ++i) { + values_ptr->AssignNext(assign_first + i); } } -template +template struct StorageView { - Pointer data; - SizeType size; - SizeType capacity; + using AllocatorTraits = absl::allocator_traits; + using Pointer = typename AllocatorTraits::pointer; + using SizeType = typename AllocatorTraits::size_type; + + Pointer data; + SizeType size; + SizeType capacity; }; -template +template class IteratorValueAdapter { + using AllocatorTraits = absl::allocator_traits; + using Pointer = typename AllocatorTraits::pointer; + public: explicit IteratorValueAdapter(const Iterator& it) : it_(it) {} - void ConstructNext(A& allocator, Pointer construct_at) { - AllocatorTraits::construct(allocator, construct_at, *it_); + void ConstructNext(AllocatorType* alloc_ptr, Pointer construct_at) { + AllocatorTraits::construct(*alloc_ptr, construct_at, *it_); ++it_; } - void AssignNext(Pointer assign_at) { + void AssignNext(Pointer assign_at) { *assign_at = *it_; ++it_; } @@ -186,123 +131,166 @@ class IteratorValueAdapter { Iterator it_; }; -template +template class CopyValueAdapter { + using AllocatorTraits = absl::allocator_traits; + using ValueType = typename AllocatorTraits::value_type; + using Pointer = typename AllocatorTraits::pointer; + using ConstPointer = typename AllocatorTraits::const_pointer; + public: - explicit CopyValueAdapter(ConstPointer p) : ptr_(p) {} + explicit CopyValueAdapter(const ValueType& v) : ptr_(std::addressof(v)) {} - void ConstructNext(A& allocator, Pointer construct_at) { - AllocatorTraits::construct(allocator, construct_at, *ptr_); + void ConstructNext(AllocatorType* alloc_ptr, Pointer construct_at) { + AllocatorTraits::construct(*alloc_ptr, construct_at, *ptr_); } - void AssignNext(Pointer assign_at) { *assign_at = *ptr_; } + void AssignNext(Pointer assign_at) { *assign_at = *ptr_; } private: - ConstPointer ptr_; + ConstPointer ptr_; }; -template +template class DefaultValueAdapter { + using AllocatorTraits = absl::allocator_traits; + using ValueType = typename AllocatorTraits::value_type; + using Pointer = typename AllocatorTraits::pointer; + public: explicit DefaultValueAdapter() {} - void ConstructNext(A& allocator, Pointer construct_at) { - AllocatorTraits::construct(allocator, construct_at); + void ConstructNext(AllocatorType* alloc_ptr, Pointer construct_at) { + AllocatorTraits::construct(*alloc_ptr, construct_at); } - void AssignNext(Pointer assign_at) { *assign_at = ValueType(); } + void AssignNext(Pointer assign_at) { *assign_at = ValueType(); } }; -template +template class AllocationTransaction { + using AllocatorTraits = absl::allocator_traits; + using Pointer = typename AllocatorTraits::pointer; + using SizeType = typename AllocatorTraits::size_type; + public: - explicit AllocationTransaction(A& allocator) - : allocator_data_(allocator, nullptr), capacity_(0) {} + explicit AllocationTransaction(AllocatorType* alloc_ptr) + : alloc_data_(*alloc_ptr, nullptr) {} ~AllocationTransaction() { if (DidAllocate()) { - MallocAdapter::Deallocate(GetAllocator(), GetData(), GetCapacity()); + AllocatorTraits::deallocate(GetAllocator(), GetData(), GetCapacity()); } } AllocationTransaction(const AllocationTransaction&) = delete; void operator=(const AllocationTransaction&) = delete; - A& GetAllocator() { return allocator_data_.template get<0>(); } - Pointer& GetData() { return allocator_data_.template get<1>(); } - SizeType& GetCapacity() { return capacity_; } + AllocatorType& GetAllocator() { return alloc_data_.template get<0>(); } + Pointer& GetData() { return alloc_data_.template get<1>(); } + SizeType& GetCapacity() { return capacity_; } bool DidAllocate() { return GetData() != nullptr; } - - Pointer Allocate(SizeType requested_capacity) { - Allocation result = - MallocAdapter::Allocate(GetAllocator(), requested_capacity); - GetData() = result.data; - GetCapacity() = result.capacity; - return result.data; + Pointer Allocate(SizeType capacity) { + GetData() = AllocatorTraits::allocate(GetAllocator(), capacity); + GetCapacity() = capacity; + return GetData(); } - ABSL_MUST_USE_RESULT Allocation Release() && { - Allocation result = {GetData(), GetCapacity()}; - Reset(); - return result; - } - - private: void Reset() { GetData() = nullptr; GetCapacity() = 0; } - container_internal::CompressedTuple> allocator_data_; - SizeType capacity_; + private: + container_internal::CompressedTuple alloc_data_; + SizeType capacity_ = 0; }; -template +template class ConstructionTransaction { + using AllocatorTraits = absl::allocator_traits; + using Pointer = typename AllocatorTraits::pointer; + using SizeType = typename AllocatorTraits::size_type; + public: - explicit ConstructionTransaction(A& allocator) - : allocator_data_(allocator, nullptr), size_(0) {} + explicit ConstructionTransaction(AllocatorType* alloc_ptr) + : alloc_data_(*alloc_ptr, nullptr) {} ~ConstructionTransaction() { if (DidConstruct()) { - DestroyAdapter::DestroyElements(GetAllocator(), GetData(), GetSize()); + inlined_vector_internal::DestroyElements(std::addressof(GetAllocator()), + GetData(), GetSize()); } } ConstructionTransaction(const ConstructionTransaction&) = delete; void operator=(const ConstructionTransaction&) = delete; - A& GetAllocator() { return allocator_data_.template get<0>(); } - Pointer& GetData() { return allocator_data_.template get<1>(); } - SizeType& GetSize() { return size_; } + AllocatorType& GetAllocator() { return alloc_data_.template get<0>(); } + Pointer& GetData() { return alloc_data_.template get<1>(); } + SizeType& GetSize() { return size_; } bool DidConstruct() { return GetData() != nullptr; } template - void Construct(Pointer data, ValueAdapter& values, SizeType size) { - ConstructElements(GetAllocator(), data, values, size); + void Construct(Pointer data, ValueAdapter* values_ptr, SizeType size) { + inlined_vector_internal::ConstructElements(std::addressof(GetAllocator()), + data, values_ptr, size); GetData() = data; GetSize() = size; } - void Commit() && { + void Commit() { GetData() = nullptr; GetSize() = 0; } private: - container_internal::CompressedTuple> allocator_data_; - SizeType size_; + container_internal::CompressedTuple alloc_data_; + SizeType size_ = 0; }; template class Storage { public: - static SizeType NextCapacity(SizeType current_capacity) { + using AllocatorTraits = absl::allocator_traits; + using allocator_type = typename AllocatorTraits::allocator_type; + using value_type = typename AllocatorTraits::value_type; + using pointer = typename AllocatorTraits::pointer; + using const_pointer = typename AllocatorTraits::const_pointer; + using size_type = typename AllocatorTraits::size_type; + using difference_type = typename AllocatorTraits::difference_type; + + using reference = value_type&; + using const_reference = const value_type&; + using RValueReference = value_type&&; + using iterator = pointer; + using const_iterator = const_pointer; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + using MoveIterator = std::move_iterator; + using IsMemcpyOk = inlined_vector_internal::IsMemcpyOk; + + using StorageView = inlined_vector_internal::StorageView; + + template + using IteratorValueAdapter = + inlined_vector_internal::IteratorValueAdapter; + using CopyValueAdapter = + inlined_vector_internal::CopyValueAdapter; + using DefaultValueAdapter = + inlined_vector_internal::DefaultValueAdapter; + + using AllocationTransaction = + inlined_vector_internal::AllocationTransaction; + using ConstructionTransaction = + inlined_vector_internal::ConstructionTransaction; + + static size_type NextCapacity(size_type current_capacity) { return current_capacity * 2; } - static SizeType ComputeCapacity(SizeType current_capacity, - SizeType requested_capacity) { + static size_type ComputeCapacity(size_type current_capacity, + size_type requested_capacity) { return (std::max)(NextCapacity(current_capacity), requested_capacity); } @@ -310,138 +298,140 @@ class Storage { // Storage Constructors and Destructor // --------------------------------------------------------------------------- - Storage() : metadata_(A(), /* size and is_allocated */ 0u) {} + Storage() : metadata_() {} - explicit Storage(const A& allocator) - : metadata_(allocator, /* size and is_allocated */ 0u) {} + explicit Storage(const allocator_type& alloc) : metadata_(alloc, {}) {} ~Storage() { - if (GetSizeAndIsAllocated() == 0) { - // Empty and not allocated; nothing to do. - } else if (IsMemcpyOk::value) { - // No destructors need to be run; just deallocate if necessary. - DeallocateIfAllocated(); - } else { - DestroyContents(); - } + pointer data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData(); + inlined_vector_internal::DestroyElements(GetAllocPtr(), data, GetSize()); + DeallocateIfAllocated(); } // --------------------------------------------------------------------------- // Storage Member Accessors // --------------------------------------------------------------------------- - SizeType& GetSizeAndIsAllocated() { return metadata_.template get<1>(); } + size_type& GetSizeAndIsAllocated() { return metadata_.template get<1>(); } - const SizeType& GetSizeAndIsAllocated() const { + const size_type& GetSizeAndIsAllocated() const { return metadata_.template get<1>(); } - SizeType GetSize() const { return GetSizeAndIsAllocated() >> 1; } + size_type GetSize() const { return GetSizeAndIsAllocated() >> 1; } bool GetIsAllocated() const { return GetSizeAndIsAllocated() & 1; } - Pointer GetAllocatedData() { return data_.allocated.allocated_data; } + pointer GetAllocatedData() { return data_.allocated.allocated_data; } - ConstPointer GetAllocatedData() const { + const_pointer GetAllocatedData() const { return data_.allocated.allocated_data; } - Pointer GetInlinedData() { - return reinterpret_cast>( + pointer GetInlinedData() { + return reinterpret_cast( std::addressof(data_.inlined.inlined_data[0])); } - ConstPointer GetInlinedData() const { - return reinterpret_cast>( + const_pointer GetInlinedData() const { + return reinterpret_cast( std::addressof(data_.inlined.inlined_data[0])); } - SizeType GetAllocatedCapacity() const { + size_type GetAllocatedCapacity() const { return data_.allocated.allocated_capacity; } - SizeType GetInlinedCapacity() const { return static_cast>(N); } + size_type GetInlinedCapacity() const { return static_cast(N); } - StorageView MakeStorageView() { - return GetIsAllocated() ? StorageView{GetAllocatedData(), GetSize(), - GetAllocatedCapacity()} - : StorageView{GetInlinedData(), GetSize(), - GetInlinedCapacity()}; + StorageView MakeStorageView() { + return GetIsAllocated() + ? StorageView{GetAllocatedData(), GetSize(), + GetAllocatedCapacity()} + : StorageView{GetInlinedData(), GetSize(), GetInlinedCapacity()}; } - A& GetAllocator() { return metadata_.template get<0>(); } + allocator_type* GetAllocPtr() { + return std::addressof(metadata_.template get<0>()); + } - const A& GetAllocator() const { return metadata_.template get<0>(); } + const allocator_type* GetAllocPtr() const { + return std::addressof(metadata_.template get<0>()); + } // --------------------------------------------------------------------------- // Storage Member Mutators // --------------------------------------------------------------------------- - ABSL_ATTRIBUTE_NOINLINE void InitFrom(const Storage& other); - template - void Initialize(ValueAdapter values, SizeType new_size); + void Initialize(ValueAdapter values, size_type new_size); template - void Assign(ValueAdapter values, SizeType new_size); + void Assign(ValueAdapter values, size_type new_size); template - void Resize(ValueAdapter values, SizeType new_size); + void Resize(ValueAdapter values, size_type new_size); template - Iterator Insert(ConstIterator pos, ValueAdapter values, - SizeType insert_count); + iterator Insert(const_iterator pos, ValueAdapter values, + size_type insert_count); template - Reference EmplaceBack(Args&&... args); + reference EmplaceBack(Args&&... args); - Iterator Erase(ConstIterator from, ConstIterator to); + iterator Erase(const_iterator from, const_iterator to); - void Reserve(SizeType requested_capacity); + void Reserve(size_type requested_capacity); void ShrinkToFit(); void Swap(Storage* other_storage_ptr); void SetIsAllocated() { - GetSizeAndIsAllocated() |= static_cast>(1); + GetSizeAndIsAllocated() |= static_cast(1); } void UnsetIsAllocated() { - GetSizeAndIsAllocated() &= ((std::numeric_limits>::max)() - 1); + GetSizeAndIsAllocated() &= ((std::numeric_limits::max)() - 1); } - void SetSize(SizeType size) { + void SetSize(size_type size) { GetSizeAndIsAllocated() = - (size << 1) | static_cast>(GetIsAllocated()); + (size << 1) | static_cast(GetIsAllocated()); } - void SetAllocatedSize(SizeType size) { - GetSizeAndIsAllocated() = (size << 1) | static_cast>(1); + void SetAllocatedSize(size_type size) { + GetSizeAndIsAllocated() = (size << 1) | static_cast(1); } - void SetInlinedSize(SizeType size) { - GetSizeAndIsAllocated() = size << static_cast>(1); + void SetInlinedSize(size_type size) { + GetSizeAndIsAllocated() = size << static_cast(1); } - void AddSize(SizeType count) { - GetSizeAndIsAllocated() += count << static_cast>(1); + void AddSize(size_type count) { + GetSizeAndIsAllocated() += count << static_cast(1); } - void SubtractSize(SizeType count) { - ABSL_HARDENING_ASSERT(count <= GetSize()); + void SubtractSize(size_type count) { + assert(count <= GetSize()); + + GetSizeAndIsAllocated() -= count << static_cast(1); + } - GetSizeAndIsAllocated() -= count << static_cast>(1); + void SetAllocatedData(pointer data, size_type capacity) { + data_.allocated.allocated_data = data; + data_.allocated.allocated_capacity = capacity; } - void SetAllocation(Allocation allocation) { - data_.allocated.allocated_data = allocation.data; - data_.allocated.allocated_capacity = allocation.capacity; + void AcquireAllocatedData(AllocationTransaction* allocation_tx_ptr) { + SetAllocatedData(allocation_tx_ptr->GetData(), + allocation_tx_ptr->GetCapacity()); + + allocation_tx_ptr->Reset(); } void MemcpyFrom(const Storage& other_storage) { - ABSL_HARDENING_ASSERT(IsMemcpyOk::value || - other_storage.GetIsAllocated()); + assert(IsMemcpyOk::value || other_storage.GetIsAllocated()); GetSizeAndIsAllocated() = other_storage.GetSizeAndIsAllocated(); data_ = other_storage.data_; @@ -449,23 +439,22 @@ class Storage { void DeallocateIfAllocated() { if (GetIsAllocated()) { - MallocAdapter::Deallocate(GetAllocator(), GetAllocatedData(), - GetAllocatedCapacity()); + AllocatorTraits::deallocate(*GetAllocPtr(), GetAllocatedData(), + GetAllocatedCapacity()); } } private: - ABSL_ATTRIBUTE_NOINLINE void DestroyContents(); - - using Metadata = container_internal::CompressedTuple>; + using Metadata = + container_internal::CompressedTuple; struct Allocated { - Pointer allocated_data; - SizeType allocated_capacity; + pointer allocated_data; + size_type allocated_capacity; }; struct Inlined { - alignas(ValueType) char inlined_data[sizeof(ValueType[N])]; + alignas(value_type) char inlined_data[sizeof(value_type[N])]; }; union Data { @@ -473,75 +462,33 @@ class Storage { Inlined inlined; }; - template - ABSL_ATTRIBUTE_NOINLINE Reference EmplaceBackSlow(Args&&... args); - Metadata metadata_; Data data_; }; -template -void Storage::DestroyContents() { - Pointer data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData(); - DestroyAdapter::DestroyElements(GetAllocator(), data, GetSize()); - DeallocateIfAllocated(); -} - -template -void Storage::InitFrom(const Storage& other) { - const SizeType n = other.GetSize(); - ABSL_HARDENING_ASSERT(n > 0); // Empty sources handled handled in caller. - ConstPointer src; - Pointer dst; - if (!other.GetIsAllocated()) { - dst = GetInlinedData(); - src = other.GetInlinedData(); - } else { - // Because this is only called from the `InlinedVector` constructors, it's - // safe to take on the allocation with size `0`. If `ConstructElements(...)` - // throws, deallocation will be automatically handled by `~Storage()`. - SizeType requested_capacity = ComputeCapacity(GetInlinedCapacity(), n); - Allocation allocation = - MallocAdapter::Allocate(GetAllocator(), requested_capacity); - SetAllocation(allocation); - dst = allocation.data; - src = other.GetAllocatedData(); - } - if (IsMemcpyOk::value) { - std::memcpy(reinterpret_cast(dst), - reinterpret_cast(src), n * sizeof(ValueType)); - } else { - auto values = IteratorValueAdapter>(src); - ConstructElements(GetAllocator(), dst, values, n); - } - GetSizeAndIsAllocated() = other.GetSizeAndIsAllocated(); -} - template template -auto Storage::Initialize(ValueAdapter values, SizeType new_size) +auto Storage::Initialize(ValueAdapter values, size_type new_size) -> void { // Only callable from constructors! - ABSL_HARDENING_ASSERT(!GetIsAllocated()); - ABSL_HARDENING_ASSERT(GetSize() == 0); + assert(!GetIsAllocated()); + assert(GetSize() == 0); - Pointer construct_data; + pointer construct_data; if (new_size > GetInlinedCapacity()) { // Because this is only called from the `InlinedVector` constructors, it's // safe to take on the allocation with size `0`. If `ConstructElements(...)` // throws, deallocation will be automatically handled by `~Storage()`. - SizeType requested_capacity = - ComputeCapacity(GetInlinedCapacity(), new_size); - Allocation allocation = - MallocAdapter::Allocate(GetAllocator(), requested_capacity); - construct_data = allocation.data; - SetAllocation(allocation); + size_type new_capacity = ComputeCapacity(GetInlinedCapacity(), new_size); + construct_data = AllocatorTraits::allocate(*GetAllocPtr(), new_capacity); + SetAllocatedData(construct_data, new_capacity); SetIsAllocated(); } else { construct_data = GetInlinedData(); } - ConstructElements(GetAllocator(), construct_data, values, new_size); + inlined_vector_internal::ConstructElements(GetAllocPtr(), construct_data, + &values, new_size); // Since the initial size was guaranteed to be `0` and the allocated bit is // already correct for either case, *adding* `new_size` gives us the correct @@ -551,20 +498,18 @@ auto Storage::Initialize(ValueAdapter values, SizeType new_size) template template -auto Storage::Assign(ValueAdapter values, SizeType new_size) - -> void { - StorageView storage_view = MakeStorageView(); +auto Storage::Assign(ValueAdapter values, size_type new_size) -> void { + StorageView storage_view = MakeStorageView(); - AllocationTransaction allocation_tx(GetAllocator()); + AllocationTransaction allocation_tx(GetAllocPtr()); - absl::Span> assign_loop; - absl::Span> construct_loop; - absl::Span> destroy_loop; + absl::Span assign_loop; + absl::Span construct_loop; + absl::Span destroy_loop; if (new_size > storage_view.capacity) { - SizeType requested_capacity = - ComputeCapacity(storage_view.capacity, new_size); - construct_loop = {allocation_tx.Allocate(requested_capacity), new_size}; + size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size); + construct_loop = {allocation_tx.Allocate(new_capacity), new_size}; destroy_loop = {storage_view.data, storage_view.size}; } else if (new_size > storage_view.size) { assign_loop = {storage_view.data, storage_view.size}; @@ -575,17 +520,18 @@ auto Storage::Assign(ValueAdapter values, SizeType new_size) destroy_loop = {storage_view.data + new_size, storage_view.size - new_size}; } - AssignElements(assign_loop.data(), values, assign_loop.size()); + inlined_vector_internal::AssignElements(assign_loop.data(), &values, + assign_loop.size()); - ConstructElements(GetAllocator(), construct_loop.data(), values, - construct_loop.size()); + inlined_vector_internal::ConstructElements( + GetAllocPtr(), construct_loop.data(), &values, construct_loop.size()); - DestroyAdapter::DestroyElements(GetAllocator(), destroy_loop.data(), - destroy_loop.size()); + inlined_vector_internal::DestroyElements(GetAllocPtr(), destroy_loop.data(), + destroy_loop.size()); if (allocation_tx.DidAllocate()) { DeallocateIfAllocated(); - SetAllocation(std::move(allocation_tx).Release()); + AcquireAllocatedData(&allocation_tx); SetIsAllocated(); } @@ -594,120 +540,125 @@ auto Storage::Assign(ValueAdapter values, SizeType new_size) template template -auto Storage::Resize(ValueAdapter values, SizeType new_size) - -> void { - StorageView storage_view = MakeStorageView(); - Pointer const base = storage_view.data; - const SizeType size = storage_view.size; - A& alloc = GetAllocator(); - if (new_size <= size) { - // Destroy extra old elements. - DestroyAdapter::DestroyElements(alloc, base + new_size, size - new_size); - } else if (new_size <= storage_view.capacity) { - // Construct new elements in place. - ConstructElements(alloc, base + size, values, new_size - size); +auto Storage::Resize(ValueAdapter values, size_type new_size) -> void { + StorageView storage_view = MakeStorageView(); + + IteratorValueAdapter move_values( + MoveIterator(storage_view.data)); + + AllocationTransaction allocation_tx(GetAllocPtr()); + ConstructionTransaction construction_tx(GetAllocPtr()); + + absl::Span construct_loop; + absl::Span move_construct_loop; + absl::Span destroy_loop; + + if (new_size > storage_view.capacity) { + size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size); + pointer new_data = allocation_tx.Allocate(new_capacity); + construct_loop = {new_data + storage_view.size, + new_size - storage_view.size}; + move_construct_loop = {new_data, storage_view.size}; + destroy_loop = {storage_view.data, storage_view.size}; + } else if (new_size > storage_view.size) { + construct_loop = {storage_view.data + storage_view.size, + new_size - storage_view.size}; } else { - // Steps: - // a. Allocate new backing store. - // b. Construct new elements in new backing store. - // c. Move existing elements from old backing store to new backing store. - // d. Destroy all elements in old backing store. - // Use transactional wrappers for the first two steps so we can roll - // back if necessary due to exceptions. - AllocationTransaction allocation_tx(alloc); - SizeType requested_capacity = - ComputeCapacity(storage_view.capacity, new_size); - Pointer new_data = allocation_tx.Allocate(requested_capacity); - - ConstructionTransaction construction_tx(alloc); - construction_tx.Construct(new_data + size, values, new_size - size); - - IteratorValueAdapter> move_values( - (MoveIterator(base))); - ConstructElements(alloc, new_data, move_values, size); - - DestroyAdapter::DestroyElements(alloc, base, size); - std::move(construction_tx).Commit(); + destroy_loop = {storage_view.data + new_size, storage_view.size - new_size}; + } + + construction_tx.Construct(construct_loop.data(), &values, + construct_loop.size()); + + inlined_vector_internal::ConstructElements( + GetAllocPtr(), move_construct_loop.data(), &move_values, + move_construct_loop.size()); + + inlined_vector_internal::DestroyElements(GetAllocPtr(), destroy_loop.data(), + destroy_loop.size()); + + construction_tx.Commit(); + if (allocation_tx.DidAllocate()) { DeallocateIfAllocated(); - SetAllocation(std::move(allocation_tx).Release()); + AcquireAllocatedData(&allocation_tx); SetIsAllocated(); } + SetSize(new_size); } template template -auto Storage::Insert(ConstIterator pos, ValueAdapter values, - SizeType insert_count) -> Iterator { - StorageView storage_view = MakeStorageView(); +auto Storage::Insert(const_iterator pos, ValueAdapter values, + size_type insert_count) -> iterator { + StorageView storage_view = MakeStorageView(); - SizeType insert_index = - std::distance(ConstIterator(storage_view.data), pos); - SizeType insert_end_index = insert_index + insert_count; - SizeType new_size = storage_view.size + insert_count; + size_type insert_index = + std::distance(const_iterator(storage_view.data), pos); + size_type insert_end_index = insert_index + insert_count; + size_type new_size = storage_view.size + insert_count; if (new_size > storage_view.capacity) { - AllocationTransaction allocation_tx(GetAllocator()); - ConstructionTransaction construction_tx(GetAllocator()); - ConstructionTransaction move_construction_tx(GetAllocator()); + AllocationTransaction allocation_tx(GetAllocPtr()); + ConstructionTransaction construction_tx(GetAllocPtr()); + ConstructionTransaction move_construciton_tx(GetAllocPtr()); - IteratorValueAdapter> move_values( - MoveIterator(storage_view.data)); + IteratorValueAdapter move_values( + MoveIterator(storage_view.data)); - SizeType requested_capacity = - ComputeCapacity(storage_view.capacity, new_size); - Pointer new_data = allocation_tx.Allocate(requested_capacity); + size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size); + pointer new_data = allocation_tx.Allocate(new_capacity); - construction_tx.Construct(new_data + insert_index, values, insert_count); + construction_tx.Construct(new_data + insert_index, &values, insert_count); - move_construction_tx.Construct(new_data, move_values, insert_index); + move_construciton_tx.Construct(new_data, &move_values, insert_index); - ConstructElements(GetAllocator(), new_data + insert_end_index, - move_values, storage_view.size - insert_index); + inlined_vector_internal::ConstructElements( + GetAllocPtr(), new_data + insert_end_index, &move_values, + storage_view.size - insert_index); - DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, - storage_view.size); + inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data, + storage_view.size); - std::move(construction_tx).Commit(); - std::move(move_construction_tx).Commit(); + construction_tx.Commit(); + move_construciton_tx.Commit(); DeallocateIfAllocated(); - SetAllocation(std::move(allocation_tx).Release()); + AcquireAllocatedData(&allocation_tx); SetAllocatedSize(new_size); - return Iterator(new_data + insert_index); + return iterator(new_data + insert_index); } else { - SizeType move_construction_destination_index = + size_type move_construction_destination_index = (std::max)(insert_end_index, storage_view.size); - ConstructionTransaction move_construction_tx(GetAllocator()); + ConstructionTransaction move_construction_tx(GetAllocPtr()); - IteratorValueAdapter> move_construction_values( - MoveIterator(storage_view.data + - (move_construction_destination_index - insert_count))); - absl::Span> move_construction = { + IteratorValueAdapter move_construction_values( + MoveIterator(storage_view.data + + (move_construction_destination_index - insert_count))); + absl::Span move_construction = { storage_view.data + move_construction_destination_index, new_size - move_construction_destination_index}; - Pointer move_assignment_values = storage_view.data + insert_index; - absl::Span> move_assignment = { + pointer move_assignment_values = storage_view.data + insert_index; + absl::Span move_assignment = { storage_view.data + insert_end_index, move_construction_destination_index - insert_end_index}; - absl::Span> insert_assignment = {move_assignment_values, - move_construction.size()}; + absl::Span insert_assignment = {move_assignment_values, + move_construction.size()}; - absl::Span> insert_construction = { + absl::Span insert_construction = { insert_assignment.data() + insert_assignment.size(), insert_count - insert_assignment.size()}; move_construction_tx.Construct(move_construction.data(), - move_construction_values, + &move_construction_values, move_construction.size()); - for (Pointer - destination = move_assignment.data() + move_assignment.size(), - last_destination = move_assignment.data(), - source = move_assignment_values + move_assignment.size(); + for (pointer destination = move_assignment.data() + move_assignment.size(), + last_destination = move_assignment.data(), + source = move_assignment_values + move_assignment.size(); ;) { --destination; --source; @@ -715,164 +666,157 @@ auto Storage::Insert(ConstIterator pos, ValueAdapter values, *destination = std::move(*source); } - AssignElements(insert_assignment.data(), values, - insert_assignment.size()); + inlined_vector_internal::AssignElements(insert_assignment.data(), &values, + insert_assignment.size()); - ConstructElements(GetAllocator(), insert_construction.data(), values, - insert_construction.size()); + inlined_vector_internal::ConstructElements( + GetAllocPtr(), insert_construction.data(), &values, + insert_construction.size()); - std::move(move_construction_tx).Commit(); + move_construction_tx.Commit(); AddSize(insert_count); - return Iterator(storage_view.data + insert_index); + return iterator(storage_view.data + insert_index); } } template template -auto Storage::EmplaceBack(Args&&... args) -> Reference { - StorageView storage_view = MakeStorageView(); - const SizeType n = storage_view.size; - if (ABSL_PREDICT_TRUE(n != storage_view.capacity)) { - // Fast path; new element fits. - Pointer last_ptr = storage_view.data + n; - AllocatorTraits::construct(GetAllocator(), last_ptr, - std::forward(args)...); - AddSize(1); - return *last_ptr; - } - // TODO(b/173712035): Annotate with musttail attribute to prevent regression. - return EmplaceBackSlow(std::forward(args)...); -} +auto Storage::EmplaceBack(Args&&... args) -> reference { + StorageView storage_view = MakeStorageView(); -template -template -auto Storage::EmplaceBackSlow(Args&&... args) -> Reference { - StorageView storage_view = MakeStorageView(); - AllocationTransaction allocation_tx(GetAllocator()); - IteratorValueAdapter> move_values( - MoveIterator(storage_view.data)); - SizeType requested_capacity = NextCapacity(storage_view.capacity); - Pointer construct_data = allocation_tx.Allocate(requested_capacity); - Pointer last_ptr = construct_data + storage_view.size; - - // Construct new element. - AllocatorTraits::construct(GetAllocator(), last_ptr, - std::forward(args)...); - // Move elements from old backing store to new backing store. - ABSL_INTERNAL_TRY { - ConstructElements(GetAllocator(), allocation_tx.GetData(), move_values, - storage_view.size); + AllocationTransaction allocation_tx(GetAllocPtr()); + + IteratorValueAdapter move_values( + MoveIterator(storage_view.data)); + + pointer construct_data; + if (storage_view.size == storage_view.capacity) { + size_type new_capacity = NextCapacity(storage_view.capacity); + construct_data = allocation_tx.Allocate(new_capacity); + } else { + construct_data = storage_view.data; } - ABSL_INTERNAL_CATCH_ANY { - AllocatorTraits::destroy(GetAllocator(), last_ptr); - ABSL_INTERNAL_RETHROW; + + pointer last_ptr = construct_data + storage_view.size; + + AllocatorTraits::construct(*GetAllocPtr(), last_ptr, + std::forward(args)...); + + if (allocation_tx.DidAllocate()) { + ABSL_INTERNAL_TRY { + inlined_vector_internal::ConstructElements( + GetAllocPtr(), allocation_tx.GetData(), &move_values, + storage_view.size); + } + ABSL_INTERNAL_CATCH_ANY { + AllocatorTraits::destroy(*GetAllocPtr(), last_ptr); + ABSL_INTERNAL_RETHROW; + } + + inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data, + storage_view.size); + + DeallocateIfAllocated(); + AcquireAllocatedData(&allocation_tx); + SetIsAllocated(); } - // Destroy elements in old backing store. - DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, - storage_view.size); - DeallocateIfAllocated(); - SetAllocation(std::move(allocation_tx).Release()); - SetIsAllocated(); AddSize(1); return *last_ptr; } template -auto Storage::Erase(ConstIterator from, ConstIterator to) - -> Iterator { - StorageView storage_view = MakeStorageView(); +auto Storage::Erase(const_iterator from, const_iterator to) + -> iterator { + StorageView storage_view = MakeStorageView(); - SizeType erase_size = std::distance(from, to); - SizeType erase_index = - std::distance(ConstIterator(storage_view.data), from); - SizeType erase_end_index = erase_index + erase_size; + size_type erase_size = std::distance(from, to); + size_type erase_index = + std::distance(const_iterator(storage_view.data), from); + size_type erase_end_index = erase_index + erase_size; - IteratorValueAdapter> move_values( - MoveIterator(storage_view.data + erase_end_index)); + IteratorValueAdapter move_values( + MoveIterator(storage_view.data + erase_end_index)); - AssignElements(storage_view.data + erase_index, move_values, - storage_view.size - erase_end_index); + inlined_vector_internal::AssignElements(storage_view.data + erase_index, + &move_values, + storage_view.size - erase_end_index); - DestroyAdapter::DestroyElements( - GetAllocator(), storage_view.data + (storage_view.size - erase_size), + inlined_vector_internal::DestroyElements( + GetAllocPtr(), storage_view.data + (storage_view.size - erase_size), erase_size); SubtractSize(erase_size); - return Iterator(storage_view.data + erase_index); + return iterator(storage_view.data + erase_index); } template -auto Storage::Reserve(SizeType requested_capacity) -> void { - StorageView storage_view = MakeStorageView(); +auto Storage::Reserve(size_type requested_capacity) -> void { + StorageView storage_view = MakeStorageView(); if (ABSL_PREDICT_FALSE(requested_capacity <= storage_view.capacity)) return; - AllocationTransaction allocation_tx(GetAllocator()); + AllocationTransaction allocation_tx(GetAllocPtr()); - IteratorValueAdapter> move_values( - MoveIterator(storage_view.data)); + IteratorValueAdapter move_values( + MoveIterator(storage_view.data)); - SizeType new_requested_capacity = + size_type new_capacity = ComputeCapacity(storage_view.capacity, requested_capacity); - Pointer new_data = allocation_tx.Allocate(new_requested_capacity); + pointer new_data = allocation_tx.Allocate(new_capacity); - ConstructElements(GetAllocator(), new_data, move_values, - storage_view.size); + inlined_vector_internal::ConstructElements(GetAllocPtr(), new_data, + &move_values, storage_view.size); - DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, - storage_view.size); + inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data, + storage_view.size); DeallocateIfAllocated(); - SetAllocation(std::move(allocation_tx).Release()); + AcquireAllocatedData(&allocation_tx); SetIsAllocated(); } template auto Storage::ShrinkToFit() -> void { // May only be called on allocated instances! - ABSL_HARDENING_ASSERT(GetIsAllocated()); + assert(GetIsAllocated()); - StorageView storage_view{GetAllocatedData(), GetSize(), - GetAllocatedCapacity()}; + StorageView storage_view{GetAllocatedData(), GetSize(), + GetAllocatedCapacity()}; if (ABSL_PREDICT_FALSE(storage_view.size == storage_view.capacity)) return; - AllocationTransaction allocation_tx(GetAllocator()); + AllocationTransaction allocation_tx(GetAllocPtr()); - IteratorValueAdapter> move_values( - MoveIterator(storage_view.data)); + IteratorValueAdapter move_values( + MoveIterator(storage_view.data)); - Pointer construct_data; + pointer construct_data; if (storage_view.size > GetInlinedCapacity()) { - SizeType requested_capacity = storage_view.size; - construct_data = allocation_tx.Allocate(requested_capacity); - if (allocation_tx.GetCapacity() >= storage_view.capacity) { - // Already using the smallest available heap allocation. - return; - } + size_type new_capacity = storage_view.size; + construct_data = allocation_tx.Allocate(new_capacity); } else { construct_data = GetInlinedData(); } ABSL_INTERNAL_TRY { - ConstructElements(GetAllocator(), construct_data, move_values, - storage_view.size); + inlined_vector_internal::ConstructElements(GetAllocPtr(), construct_data, + &move_values, storage_view.size); } ABSL_INTERNAL_CATCH_ANY { - SetAllocation({storage_view.data, storage_view.capacity}); + SetAllocatedData(storage_view.data, storage_view.capacity); ABSL_INTERNAL_RETHROW; } - DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, - storage_view.size); + inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data, + storage_view.size); - MallocAdapter::Deallocate(GetAllocator(), storage_view.data, - storage_view.capacity); + AllocatorTraits::deallocate(*GetAllocPtr(), storage_view.data, + storage_view.capacity); if (allocation_tx.DidAllocate()) { - SetAllocation(std::move(allocation_tx).Release()); + AcquireAllocatedData(&allocation_tx); } else { UnsetIsAllocated(); } @@ -881,7 +825,7 @@ auto Storage::ShrinkToFit() -> void { template auto Storage::Swap(Storage* other_storage_ptr) -> void { using std::swap; - ABSL_HARDENING_ASSERT(this != other_storage_ptr); + assert(this != other_storage_ptr); if (GetIsAllocated() && other_storage_ptr->GetIsAllocated()) { swap(data_.allocated, other_storage_ptr->data_.allocated); @@ -890,20 +834,20 @@ auto Storage::Swap(Storage* other_storage_ptr) -> void { Storage* large_ptr = other_storage_ptr; if (small_ptr->GetSize() > large_ptr->GetSize()) swap(small_ptr, large_ptr); - for (SizeType i = 0; i < small_ptr->GetSize(); ++i) { + for (size_type i = 0; i < small_ptr->GetSize(); ++i) { swap(small_ptr->GetInlinedData()[i], large_ptr->GetInlinedData()[i]); } - IteratorValueAdapter> move_values( - MoveIterator(large_ptr->GetInlinedData() + small_ptr->GetSize())); + IteratorValueAdapter move_values( + MoveIterator(large_ptr->GetInlinedData() + small_ptr->GetSize())); - ConstructElements(large_ptr->GetAllocator(), - small_ptr->GetInlinedData() + small_ptr->GetSize(), - move_values, - large_ptr->GetSize() - small_ptr->GetSize()); + inlined_vector_internal::ConstructElements( + large_ptr->GetAllocPtr(), + small_ptr->GetInlinedData() + small_ptr->GetSize(), &move_values, + large_ptr->GetSize() - small_ptr->GetSize()); - DestroyAdapter::DestroyElements( - large_ptr->GetAllocator(), + inlined_vector_internal::DestroyElements( + large_ptr->GetAllocPtr(), large_ptr->GetInlinedData() + small_ptr->GetSize(), large_ptr->GetSize() - small_ptr->GetSize()); } else { @@ -911,41 +855,36 @@ auto Storage::Swap(Storage* other_storage_ptr) -> void { Storage* inlined_ptr = other_storage_ptr; if (!allocated_ptr->GetIsAllocated()) swap(allocated_ptr, inlined_ptr); - StorageView allocated_storage_view{ - allocated_ptr->GetAllocatedData(), allocated_ptr->GetSize(), - allocated_ptr->GetAllocatedCapacity()}; + StorageView allocated_storage_view{allocated_ptr->GetAllocatedData(), + allocated_ptr->GetSize(), + allocated_ptr->GetAllocatedCapacity()}; - IteratorValueAdapter> move_values( - MoveIterator(inlined_ptr->GetInlinedData())); + IteratorValueAdapter move_values( + MoveIterator(inlined_ptr->GetInlinedData())); ABSL_INTERNAL_TRY { - ConstructElements(inlined_ptr->GetAllocator(), - allocated_ptr->GetInlinedData(), move_values, - inlined_ptr->GetSize()); + inlined_vector_internal::ConstructElements( + inlined_ptr->GetAllocPtr(), allocated_ptr->GetInlinedData(), + &move_values, inlined_ptr->GetSize()); } ABSL_INTERNAL_CATCH_ANY { - allocated_ptr->SetAllocation(Allocation{ - allocated_storage_view.data, allocated_storage_view.capacity}); + allocated_ptr->SetAllocatedData(allocated_storage_view.data, + allocated_storage_view.capacity); ABSL_INTERNAL_RETHROW; } - DestroyAdapter::DestroyElements(inlined_ptr->GetAllocator(), - inlined_ptr->GetInlinedData(), - inlined_ptr->GetSize()); + inlined_vector_internal::DestroyElements(inlined_ptr->GetAllocPtr(), + inlined_ptr->GetInlinedData(), + inlined_ptr->GetSize()); - inlined_ptr->SetAllocation(Allocation{allocated_storage_view.data, - allocated_storage_view.capacity}); + inlined_ptr->SetAllocatedData(allocated_storage_view.data, + allocated_storage_view.capacity); } swap(GetSizeAndIsAllocated(), other_storage_ptr->GetSizeAndIsAllocated()); - swap(GetAllocator(), other_storage_ptr->GetAllocator()); + swap(*GetAllocPtr(), *other_storage_ptr->GetAllocPtr()); } -// End ignore "array-bounds" -#if !defined(__clang__) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif - } // namespace inlined_vector_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/abseil-cpp/absl/container/internal/layout.h b/abseil-cpp/absl/container/internal/layout.h index a59a2430..23367833 100644 --- a/abseil-cpp/absl/container/internal/layout.h +++ b/abseil-cpp/absl/container/internal/layout.h @@ -404,7 +404,7 @@ class LayoutImpl, absl::index_sequence, constexpr size_t Offset() const { static_assert(N < NumOffsets, "Index out of bounds"); return adl_barrier::Align( - Offset() + SizeOf>::value * size_[N - 1], + Offset() + SizeOf>() * size_[N - 1], ElementAlignment::value); } @@ -597,7 +597,7 @@ class LayoutImpl, absl::index_sequence, constexpr size_t AllocSize() const { static_assert(NumTypes == NumSizes, "You must specify sizes of all fields"); return Offset() + - SizeOf>::value * size_[NumTypes - 1]; + SizeOf>() * size_[NumTypes - 1]; } // If built with --config=asan, poisons padding bytes (if any) in the @@ -621,7 +621,7 @@ class LayoutImpl, absl::index_sequence, // The `if` is an optimization. It doesn't affect the observable behaviour. if (ElementAlignment::value % ElementAlignment::value) { size_t start = - Offset() + SizeOf>::value * size_[N - 1]; + Offset() + SizeOf>() * size_[N - 1]; ASAN_POISON_MEMORY_REGION(p + start, Offset() - start); } #endif @@ -645,7 +645,7 @@ class LayoutImpl, absl::index_sequence, // produce "unsigned*" where another produces "unsigned int *". std::string DebugString() const { const auto offsets = Offsets(); - const size_t sizes[] = {SizeOf>::value...}; + const size_t sizes[] = {SizeOf>()...}; const std::string types[] = { adl_barrier::TypeName>()...}; std::string res = absl::StrCat("@0", types[0], "(", sizes[0], ")"); diff --git a/abseil-cpp/absl/container/internal/layout_benchmark.cc b/abseil-cpp/absl/container/internal/layout_benchmark.cc deleted file mode 100644 index d8636e8d..00000000 --- a/abseil-cpp/absl/container/internal/layout_benchmark.cc +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2018 The Abseil Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Every benchmark should have the same performance as the corresponding -// headroom benchmark. - -#include "absl/base/internal/raw_logging.h" -#include "absl/container/internal/layout.h" -#include "benchmark/benchmark.h" - -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { -namespace { - -using ::benchmark::DoNotOptimize; - -using Int128 = int64_t[2]; - -// This benchmark provides the upper bound on performance for BM_OffsetConstant. -template -void BM_OffsetConstantHeadroom(benchmark::State& state) { - for (auto _ : state) { - DoNotOptimize(Offset); - } -} - -template -void BM_OffsetConstant(benchmark::State& state) { - using L = Layout; - ABSL_RAW_CHECK(L::Partial(3, 5, 7).template Offset<3>() == Offset, - "Invalid offset"); - for (auto _ : state) { - DoNotOptimize(L::Partial(3, 5, 7).template Offset<3>()); - } -} - -template -size_t VariableOffset(size_t n, size_t m, size_t k); - -template <> -size_t VariableOffset(size_t n, size_t m, - size_t k) { - auto Align = [](size_t n, size_t m) { return (n + m - 1) & ~(m - 1); }; - return Align(Align(Align(n * 1, 2) + m * 2, 4) + k * 4, 8); -} - -template <> -size_t VariableOffset(size_t n, size_t m, - size_t k) { - // No alignment is necessary. - return n * 16 + m * 4 + k * 2; -} - -// This benchmark provides the upper bound on performance for BM_OffsetVariable. -template -void BM_OffsetVariableHeadroom(benchmark::State& state) { - size_t n = 3; - size_t m = 5; - size_t k = 7; - ABSL_RAW_CHECK(VariableOffset(n, m, k) == Offset, "Invalid offset"); - for (auto _ : state) { - DoNotOptimize(n); - DoNotOptimize(m); - DoNotOptimize(k); - DoNotOptimize(VariableOffset(n, m, k)); - } -} - -template -void BM_OffsetVariable(benchmark::State& state) { - using L = Layout; - size_t n = 3; - size_t m = 5; - size_t k = 7; - ABSL_RAW_CHECK(L::Partial(n, m, k).template Offset<3>() == Offset, - "Inavlid offset"); - for (auto _ : state) { - DoNotOptimize(n); - DoNotOptimize(m); - DoNotOptimize(k); - DoNotOptimize(L::Partial(n, m, k).template Offset<3>()); - } -} - -// Run all benchmarks in two modes: -// -// Layout with padding: int8_t[3], int16_t[5], int32_t[7], Int128[?]. -// Layout without padding: Int128[3], int32_t[5], int16_t[7], int8_t[?]. - -#define OFFSET_BENCHMARK(NAME, OFFSET, T1, T2, T3, T4) \ - auto& NAME##_##OFFSET##_##T1##_##T2##_##T3##_##T4 = \ - NAME; \ - BENCHMARK(NAME##_##OFFSET##_##T1##_##T2##_##T3##_##T4) - -OFFSET_BENCHMARK(BM_OffsetConstantHeadroom, 48, int8_t, int16_t, int32_t, - Int128); -OFFSET_BENCHMARK(BM_OffsetConstant, 48, int8_t, int16_t, int32_t, Int128); -OFFSET_BENCHMARK(BM_OffsetConstantHeadroom, 82, Int128, int32_t, int16_t, - int8_t); -OFFSET_BENCHMARK(BM_OffsetConstant, 82, Int128, int32_t, int16_t, int8_t); -OFFSET_BENCHMARK(BM_OffsetVariableHeadroom, 48, int8_t, int16_t, int32_t, - Int128); -OFFSET_BENCHMARK(BM_OffsetVariable, 48, int8_t, int16_t, int32_t, Int128); -OFFSET_BENCHMARK(BM_OffsetVariableHeadroom, 82, Int128, int32_t, int16_t, - int8_t); -OFFSET_BENCHMARK(BM_OffsetVariable, 82, Int128, int32_t, int16_t, int8_t); -} // namespace -} // namespace container_internal -ABSL_NAMESPACE_END -} // namespace absl diff --git a/abseil-cpp/absl/container/internal/layout_test.cc b/abseil-cpp/absl/container/internal/layout_test.cc index 54e5d5bb..757272f1 100644 --- a/abseil-cpp/absl/container/internal/layout_test.cc +++ b/abseil-cpp/absl/container/internal/layout_test.cc @@ -128,10 +128,8 @@ TEST(Layout, ElementTypes) { { using L = Layout; SameType, L::ElementTypes>(); - SameType, - decltype(L::Partial())::ElementTypes>(); - SameType, - decltype(L::Partial(0))::ElementTypes>(); + SameType, decltype(L::Partial())::ElementTypes>(); + SameType, decltype(L::Partial(0))::ElementTypes>(); } { using L = Layout; @@ -370,21 +368,18 @@ TEST(Layout, PointerByIndex) { { using L = Layout; EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); - EXPECT_EQ(0, - Distance(p, Type(L::Partial(3).Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(3).Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type(L(3).Pointer<0>(p)))); } { using L = Layout; EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(3).Pointer<0>(p)))); + EXPECT_EQ(12, Distance(p, Type(L::Partial(3).Pointer<1>(p)))); EXPECT_EQ(0, - Distance(p, Type(L::Partial(3).Pointer<0>(p)))); + Distance(p, Type(L::Partial(3, 5).Pointer<0>(p)))); EXPECT_EQ(12, - Distance(p, Type(L::Partial(3).Pointer<1>(p)))); - EXPECT_EQ( - 0, Distance(p, Type(L::Partial(3, 5).Pointer<0>(p)))); - EXPECT_EQ( - 12, Distance(p, Type(L::Partial(3, 5).Pointer<1>(p)))); + Distance(p, Type(L::Partial(3, 5).Pointer<1>(p)))); EXPECT_EQ(0, Distance(p, Type(L(3, 5).Pointer<0>(p)))); EXPECT_EQ(12, Distance(p, Type(L(3, 5).Pointer<1>(p)))); } @@ -392,44 +387,39 @@ TEST(Layout, PointerByIndex) { using L = Layout; EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer<0>(p)))); - EXPECT_EQ(0, - Distance(p, Type(L::Partial(0).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer<1>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(1).Pointer<0>(p)))); - EXPECT_EQ(4, - Distance(p, Type(L::Partial(1).Pointer<1>(p)))); + EXPECT_EQ(4, Distance(p, Type(L::Partial(1).Pointer<1>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(5).Pointer<0>(p)))); - EXPECT_EQ(8, - Distance(p, Type(L::Partial(5).Pointer<1>(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(5).Pointer<1>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0).Pointer<0>(p)))); - EXPECT_EQ( - 0, Distance(p, Type(L::Partial(0, 0).Pointer<1>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0, 0).Pointer<1>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0).Pointer<2>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(1, 0).Pointer<0>(p)))); - EXPECT_EQ( - 4, Distance(p, Type(L::Partial(1, 0).Pointer<1>(p)))); + EXPECT_EQ(4, + Distance(p, Type(L::Partial(1, 0).Pointer<1>(p)))); EXPECT_EQ(8, Distance(p, Type(L::Partial(1, 0).Pointer<2>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(5, 3).Pointer<0>(p)))); - EXPECT_EQ( - 8, Distance(p, Type(L::Partial(5, 3).Pointer<1>(p)))); + EXPECT_EQ(8, + Distance(p, Type(L::Partial(5, 3).Pointer<1>(p)))); EXPECT_EQ(24, Distance(p, Type(L::Partial(5, 3).Pointer<2>(p)))); EXPECT_EQ( 0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<0>(p)))); EXPECT_EQ( - 0, - Distance(p, Type(L::Partial(0, 0, 0).Pointer<1>(p)))); + 0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<1>(p)))); EXPECT_EQ( 0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<2>(p)))); EXPECT_EQ( 0, Distance(p, Type(L::Partial(1, 0, 0).Pointer<0>(p)))); EXPECT_EQ( - 4, - Distance(p, Type(L::Partial(1, 0, 0).Pointer<1>(p)))); + 4, Distance(p, Type(L::Partial(1, 0, 0).Pointer<1>(p)))); EXPECT_EQ( 8, Distance(p, Type(L::Partial(1, 0, 0).Pointer<2>(p)))); EXPECT_EQ( @@ -438,8 +428,7 @@ TEST(Layout, PointerByIndex) { 24, Distance(p, Type(L::Partial(5, 3, 1).Pointer<2>(p)))); EXPECT_EQ( - 8, - Distance(p, Type(L::Partial(5, 3, 1).Pointer<1>(p)))); + 8, Distance(p, Type(L::Partial(5, 3, 1).Pointer<1>(p)))); EXPECT_EQ(0, Distance(p, Type(L(5, 3, 1).Pointer<0>(p)))); EXPECT_EQ(24, Distance(p, Type(L(5, 3, 1).Pointer<2>(p)))); EXPECT_EQ(8, Distance(p, Type(L(5, 3, 1).Pointer<1>(p)))); @@ -450,78 +439,75 @@ TEST(Layout, PointerByType) { alignas(max_align_t) const unsigned char p[100] = {}; { using L = Layout; - EXPECT_EQ( - 0, Distance(p, Type(L::Partial().Pointer(p)))); - EXPECT_EQ( - 0, - Distance(p, Type(L::Partial(3).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial().Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(3).Pointer(p)))); EXPECT_EQ(0, Distance(p, Type(L(3).Pointer(p)))); } { using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(1).Pointer(p)))); + EXPECT_EQ(4, + Distance(p, Type(L::Partial(1).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(5).Pointer(p)))); + EXPECT_EQ(8, + Distance(p, Type(L::Partial(5).Pointer(p)))); EXPECT_EQ( - 0, Distance(p, Type(L::Partial().Pointer(p)))); + 0, Distance(p, Type(L::Partial(0, 0).Pointer(p)))); EXPECT_EQ( - 0, Distance(p, Type(L::Partial(0).Pointer(p)))); + 0, Distance(p, Type(L::Partial(0, 0).Pointer(p)))); EXPECT_EQ( 0, - Distance(p, Type(L::Partial(0).Pointer(p)))); - EXPECT_EQ( - 0, Distance(p, Type(L::Partial(1).Pointer(p)))); + Distance(p, Type(L::Partial(0, 0).Pointer(p)))); EXPECT_EQ( - 4, - Distance(p, Type(L::Partial(1).Pointer(p)))); + 0, Distance(p, Type(L::Partial(1, 0).Pointer(p)))); EXPECT_EQ( - 0, Distance(p, Type(L::Partial(5).Pointer(p)))); + 4, Distance(p, Type(L::Partial(1, 0).Pointer(p)))); EXPECT_EQ( 8, - Distance(p, Type(L::Partial(5).Pointer(p)))); - EXPECT_EQ( - 0, - Distance(p, Type(L::Partial(0, 0).Pointer(p)))); - EXPECT_EQ(0, Distance(p, Type( - L::Partial(0, 0).Pointer(p)))); + Distance(p, Type(L::Partial(1, 0).Pointer(p)))); EXPECT_EQ( - 0, - Distance(p, Type(L::Partial(0, 0).Pointer(p)))); + 0, Distance(p, Type(L::Partial(5, 3).Pointer(p)))); EXPECT_EQ( - 0, - Distance(p, Type(L::Partial(1, 0).Pointer(p)))); - EXPECT_EQ(4, Distance(p, Type( - L::Partial(1, 0).Pointer(p)))); + 8, Distance(p, Type(L::Partial(5, 3).Pointer(p)))); EXPECT_EQ( - 8, - Distance(p, Type(L::Partial(1, 0).Pointer(p)))); + 24, + Distance(p, Type(L::Partial(5, 3).Pointer(p)))); EXPECT_EQ( 0, - Distance(p, Type(L::Partial(5, 3).Pointer(p)))); - EXPECT_EQ(8, Distance(p, Type( - L::Partial(5, 3).Pointer(p)))); + Distance(p, Type(L::Partial(0, 0, 0).Pointer(p)))); EXPECT_EQ( - 24, - Distance(p, Type(L::Partial(5, 3).Pointer(p)))); - EXPECT_EQ(0, Distance(p, Type( - L::Partial(0, 0, 0).Pointer(p)))); - EXPECT_EQ(0, Distance(p, Type( - L::Partial(0, 0, 0).Pointer(p)))); + 0, + Distance(p, Type(L::Partial(0, 0, 0).Pointer(p)))); EXPECT_EQ(0, Distance(p, Type( L::Partial(0, 0, 0).Pointer(p)))); - EXPECT_EQ(0, Distance(p, Type( - L::Partial(1, 0, 0).Pointer(p)))); - EXPECT_EQ(4, Distance(p, Type( - L::Partial(1, 0, 0).Pointer(p)))); + EXPECT_EQ( + 0, + Distance(p, Type(L::Partial(1, 0, 0).Pointer(p)))); + EXPECT_EQ( + 4, + Distance(p, Type(L::Partial(1, 0, 0).Pointer(p)))); EXPECT_EQ(8, Distance(p, Type( L::Partial(1, 0, 0).Pointer(p)))); - EXPECT_EQ(0, Distance(p, Type( - L::Partial(5, 3, 1).Pointer(p)))); + EXPECT_EQ( + 0, + Distance(p, Type(L::Partial(5, 3, 1).Pointer(p)))); EXPECT_EQ(24, Distance(p, Type( L::Partial(5, 3, 1).Pointer(p)))); - EXPECT_EQ(8, Distance(p, Type( - L::Partial(5, 3, 1).Pointer(p)))); + EXPECT_EQ( + 8, + Distance(p, Type(L::Partial(5, 3, 1).Pointer(p)))); EXPECT_EQ(24, Distance(p, Type(L(5, 3, 1).Pointer(p)))); - EXPECT_EQ( - 8, Distance(p, Type(L(5, 3, 1).Pointer(p)))); + EXPECT_EQ(8, Distance(p, Type(L(5, 3, 1).Pointer(p)))); } } @@ -562,18 +548,15 @@ TEST(Layout, MutablePointerByIndex) { EXPECT_EQ(8, Distance(p, Type(L::Partial(5, 3).Pointer<1>(p)))); EXPECT_EQ(24, Distance(p, Type(L::Partial(5, 3).Pointer<2>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<0>(p)))); - EXPECT_EQ(0, - Distance(p, Type(L::Partial(0, 0, 0).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<1>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<2>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(1, 0, 0).Pointer<0>(p)))); - EXPECT_EQ(4, - Distance(p, Type(L::Partial(1, 0, 0).Pointer<1>(p)))); + EXPECT_EQ(4, Distance(p, Type(L::Partial(1, 0, 0).Pointer<1>(p)))); EXPECT_EQ(8, Distance(p, Type(L::Partial(1, 0, 0).Pointer<2>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(5, 3, 1).Pointer<0>(p)))); EXPECT_EQ(24, Distance(p, Type(L::Partial(5, 3, 1).Pointer<2>(p)))); - EXPECT_EQ(8, - Distance(p, Type(L::Partial(5, 3, 1).Pointer<1>(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(5, 3, 1).Pointer<1>(p)))); EXPECT_EQ(0, Distance(p, Type(L(5, 3, 1).Pointer<0>(p)))); EXPECT_EQ(24, Distance(p, Type(L(5, 3, 1).Pointer<2>(p)))); EXPECT_EQ(8, Distance(p, Type(L(5, 3, 1).Pointer<1>(p)))); @@ -585,61 +568,48 @@ TEST(Layout, MutablePointerByType) { { using L = Layout; EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer(p)))); - EXPECT_EQ(0, - Distance(p, Type(L::Partial(3).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(3).Pointer(p)))); EXPECT_EQ(0, Distance(p, Type(L(3).Pointer(p)))); } { using L = Layout; EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer(p)))); - EXPECT_EQ(0, - Distance(p, Type(L::Partial(0).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(1).Pointer(p)))); - EXPECT_EQ(4, - Distance(p, Type(L::Partial(1).Pointer(p)))); + EXPECT_EQ(4, Distance(p, Type(L::Partial(1).Pointer(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(5).Pointer(p)))); - EXPECT_EQ(8, - Distance(p, Type(L::Partial(5).Pointer(p)))); - EXPECT_EQ(0, - Distance(p, Type(L::Partial(0, 0).Pointer(p)))); - EXPECT_EQ( - 0, Distance(p, Type(L::Partial(0, 0).Pointer(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(5).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0).Pointer(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0).Pointer(p)))); - EXPECT_EQ(0, - Distance(p, Type(L::Partial(1, 0).Pointer(p)))); - EXPECT_EQ( - 4, Distance(p, Type(L::Partial(1, 0).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(1, 0).Pointer(p)))); + EXPECT_EQ(4, Distance(p, Type(L::Partial(1, 0).Pointer(p)))); EXPECT_EQ(8, Distance(p, Type(L::Partial(1, 0).Pointer(p)))); - EXPECT_EQ(0, - Distance(p, Type(L::Partial(5, 3).Pointer(p)))); - EXPECT_EQ( - 8, Distance(p, Type(L::Partial(5, 3).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(5, 3).Pointer(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(5, 3).Pointer(p)))); EXPECT_EQ(24, Distance(p, Type(L::Partial(5, 3).Pointer(p)))); - EXPECT_EQ( - 0, Distance(p, Type(L::Partial(0, 0, 0).Pointer(p)))); - EXPECT_EQ( - 0, - Distance(p, Type(L::Partial(0, 0, 0).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0, 0, 0).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0, 0, 0).Pointer(p)))); EXPECT_EQ( 0, Distance(p, Type(L::Partial(0, 0, 0).Pointer(p)))); - EXPECT_EQ( - 0, Distance(p, Type(L::Partial(1, 0, 0).Pointer(p)))); - EXPECT_EQ( - 4, - Distance(p, Type(L::Partial(1, 0, 0).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(1, 0, 0).Pointer(p)))); + EXPECT_EQ(4, + Distance(p, Type(L::Partial(1, 0, 0).Pointer(p)))); EXPECT_EQ( 8, Distance(p, Type(L::Partial(1, 0, 0).Pointer(p)))); - EXPECT_EQ( - 0, Distance(p, Type(L::Partial(5, 3, 1).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(5, 3, 1).Pointer(p)))); EXPECT_EQ( 24, Distance(p, Type(L::Partial(5, 3, 1).Pointer(p)))); - EXPECT_EQ( - 8, - Distance(p, Type(L::Partial(5, 3, 1).Pointer(p)))); + EXPECT_EQ(8, + Distance(p, Type(L::Partial(5, 3, 1).Pointer(p)))); EXPECT_EQ(0, Distance(p, Type(L(5, 3, 1).Pointer(p)))); EXPECT_EQ(24, Distance(p, Type(L(5, 3, 1).Pointer(p)))); EXPECT_EQ(8, Distance(p, Type(L(5, 3, 1).Pointer(p)))); @@ -820,72 +790,67 @@ TEST(Layout, SliceByIndexData) { { using L = Layout; EXPECT_EQ( - 0, Distance( - p, Type>(L::Partial(0).Slice<0>(p)).data())); + 0, + Distance(p, Type>(L::Partial(0).Slice<0>(p)).data())); EXPECT_EQ( - 0, Distance( - p, Type>(L::Partial(3).Slice<0>(p)).data())); - EXPECT_EQ(0, - Distance(p, Type>(L(3).Slice<0>(p)).data())); + 0, + Distance(p, Type>(L::Partial(3).Slice<0>(p)).data())); + EXPECT_EQ(0, Distance(p, Type>(L(3).Slice<0>(p)).data())); } { using L = Layout; EXPECT_EQ( - 0, Distance( - p, Type>(L::Partial(3).Slice<0>(p)).data())); + 0, + Distance(p, Type>(L::Partial(3).Slice<0>(p)).data())); EXPECT_EQ( 0, - Distance( - p, Type>(L::Partial(3, 5).Slice<0>(p)).data())); + Distance(p, + Type>(L::Partial(3, 5).Slice<0>(p)).data())); EXPECT_EQ( 12, - Distance( - p, Type>(L::Partial(3, 5).Slice<1>(p)).data())); - EXPECT_EQ( - 0, Distance(p, Type>(L(3, 5).Slice<0>(p)).data())); - EXPECT_EQ( - 12, Distance(p, Type>(L(3, 5).Slice<1>(p)).data())); + Distance(p, + Type>(L::Partial(3, 5).Slice<1>(p)).data())); + EXPECT_EQ(0, + Distance(p, Type>(L(3, 5).Slice<0>(p)).data())); + EXPECT_EQ(12, + Distance(p, Type>(L(3, 5).Slice<1>(p)).data())); } { using L = Layout; EXPECT_EQ( - 0, Distance( - p, Type>(L::Partial(0).Slice<0>(p)).data())); - EXPECT_EQ( - 0, Distance( - p, Type>(L::Partial(1).Slice<0>(p)).data())); - EXPECT_EQ( - 0, Distance( - p, Type>(L::Partial(5).Slice<0>(p)).data())); + 0, + Distance(p, Type>(L::Partial(0).Slice<0>(p)).data())); EXPECT_EQ( 0, - Distance( - p, Type>(L::Partial(0, 0).Slice<0>(p)).data())); + Distance(p, Type>(L::Partial(1).Slice<0>(p)).data())); EXPECT_EQ( 0, - Distance( - p, Type>(L::Partial(0, 0).Slice<1>(p)).data())); + Distance(p, Type>(L::Partial(5).Slice<0>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(0, 0).Slice<0>(p)).data())); EXPECT_EQ( 0, - Distance( - p, Type>(L::Partial(1, 0).Slice<0>(p)).data())); + Distance(p, + Type>(L::Partial(0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(1, 0).Slice<0>(p)).data())); EXPECT_EQ( 4, - Distance( - p, Type>(L::Partial(1, 0).Slice<1>(p)).data())); + Distance(p, + Type>(L::Partial(1, 0).Slice<1>(p)).data())); EXPECT_EQ( - 0, - Distance( - p, Type>(L::Partial(5, 3).Slice<0>(p)).data())); + 0, Distance( + p, Type>(L::Partial(5, 3).Slice<0>(p)).data())); EXPECT_EQ( 8, - Distance( - p, Type>(L::Partial(5, 3).Slice<1>(p)).data())); + Distance(p, + Type>(L::Partial(5, 3).Slice<1>(p)).data())); EXPECT_EQ( 0, Distance( - p, - Type>(L::Partial(0, 0, 0).Slice<0>(p)).data())); + p, Type>(L::Partial(0, 0, 0).Slice<0>(p)).data())); EXPECT_EQ( 0, Distance( @@ -899,8 +864,7 @@ TEST(Layout, SliceByIndexData) { EXPECT_EQ( 0, Distance( - p, - Type>(L::Partial(1, 0, 0).Slice<0>(p)).data())); + p, Type>(L::Partial(1, 0, 0).Slice<0>(p)).data())); EXPECT_EQ( 4, Distance( @@ -914,8 +878,7 @@ TEST(Layout, SliceByIndexData) { EXPECT_EQ( 0, Distance( - p, - Type>(L::Partial(5, 3, 1).Slice<0>(p)).data())); + p, Type>(L::Partial(5, 3, 1).Slice<0>(p)).data())); EXPECT_EQ( 24, Distance( @@ -927,14 +890,12 @@ TEST(Layout, SliceByIndexData) { p, Type>(L::Partial(5, 3, 1).Slice<1>(p)).data())); EXPECT_EQ( - 0, - Distance(p, Type>(L(5, 3, 1).Slice<0>(p)).data())); + 0, Distance(p, Type>(L(5, 3, 1).Slice<0>(p)).data())); EXPECT_EQ( 24, Distance(p, Type>(L(5, 3, 1).Slice<2>(p)).data())); EXPECT_EQ( - 8, - Distance(p, Type>(L(5, 3, 1).Slice<1>(p)).data())); + 8, Distance(p, Type>(L(5, 3, 1).Slice<1>(p)).data())); } } @@ -945,94 +906,98 @@ TEST(Layout, SliceByTypeData) { EXPECT_EQ( 0, Distance( - p, - Type>(L::Partial(0).Slice(p)).data())); + p, Type>(L::Partial(0).Slice(p)).data())); EXPECT_EQ( 0, Distance( - p, - Type>(L::Partial(3).Slice(p)).data())); + p, Type>(L::Partial(3).Slice(p)).data())); EXPECT_EQ( - 0, - Distance(p, Type>(L(3).Slice(p)).data())); + 0, Distance(p, Type>(L(3).Slice(p)).data())); } { using L = Layout; + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(0).Slice(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(1).Slice(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(5).Slice(p)).data())); EXPECT_EQ( 0, Distance( - p, - Type>(L::Partial(0).Slice(p)).data())); + p, Type>(L::Partial(0, 0).Slice(p)).data())); EXPECT_EQ( 0, Distance( p, - Type>(L::Partial(1).Slice(p)).data())); + Type>(L::Partial(0, 0).Slice(p)).data())); EXPECT_EQ( 0, + Distance( + p, Type>(L::Partial(1, 0).Slice(p)).data())); + EXPECT_EQ( + 4, Distance( p, - Type>(L::Partial(5).Slice(p)).data())); + Type>(L::Partial(1, 0).Slice(p)).data())); EXPECT_EQ( 0, - Distance(p, Type>(L::Partial(0, 0).Slice(p)) - .data())); - EXPECT_EQ(0, Distance(p, Type>( - L::Partial(0, 0).Slice(p)) - .data())); + Distance( + p, Type>(L::Partial(5, 3).Slice(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, + Type>(L::Partial(5, 3).Slice(p)).data())); EXPECT_EQ( 0, - Distance(p, Type>(L::Partial(1, 0).Slice(p)) - .data())); - EXPECT_EQ(4, Distance(p, Type>( - L::Partial(1, 0).Slice(p)) - .data())); + Distance( + p, + Type>(L::Partial(0, 0, 0).Slice(p)).data())); EXPECT_EQ( 0, - Distance(p, Type>(L::Partial(5, 3).Slice(p)) + Distance(p, Type>(L::Partial(0, 0, 0).Slice(p)) .data())); - EXPECT_EQ(8, Distance(p, Type>( - L::Partial(5, 3).Slice(p)) - .data())); - EXPECT_EQ(0, Distance(p, Type>( - L::Partial(0, 0, 0).Slice(p)) - .data())); - EXPECT_EQ(0, Distance(p, Type>( - L::Partial(0, 0, 0).Slice(p)) - .data())); EXPECT_EQ(0, Distance(p, Type>( L::Partial(0, 0, 0).Slice(p)) .data())); - EXPECT_EQ(0, Distance(p, Type>( - L::Partial(1, 0, 0).Slice(p)) - .data())); - EXPECT_EQ(4, Distance(p, Type>( - L::Partial(1, 0, 0).Slice(p)) - .data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(1, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 4, + Distance(p, Type>(L::Partial(1, 0, 0).Slice(p)) + .data())); EXPECT_EQ(8, Distance(p, Type>( L::Partial(1, 0, 0).Slice(p)) .data())); - EXPECT_EQ(0, Distance(p, Type>( - L::Partial(5, 3, 1).Slice(p)) - .data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(5, 3, 1).Slice(p)).data())); EXPECT_EQ(24, Distance(p, Type>( L::Partial(5, 3, 1).Slice(p)) .data())); - EXPECT_EQ(8, Distance(p, Type>( - L::Partial(5, 3, 1).Slice(p)) - .data())); + EXPECT_EQ( + 8, + Distance(p, Type>(L::Partial(5, 3, 1).Slice(p)) + .data())); EXPECT_EQ( 0, - Distance(p, - Type>(L(5, 3, 1).Slice(p)).data())); + Distance(p, Type>(L(5, 3, 1).Slice(p)).data())); EXPECT_EQ( 24, Distance(p, Type>(L(5, 3, 1).Slice(p)).data())); EXPECT_EQ( - 8, - Distance( - p, Type>(L(5, 3, 1).Slice(p)).data())); + 8, Distance( + p, Type>(L(5, 3, 1).Slice(p)).data())); } } @@ -1040,19 +1005,18 @@ TEST(Layout, MutableSliceByIndexData) { alignas(max_align_t) unsigned char p[100]; { using L = Layout; - EXPECT_EQ( - 0, Distance(p, Type>(L::Partial(0).Slice<0>(p)).data())); - EXPECT_EQ( - 0, Distance(p, Type>(L::Partial(3).Slice<0>(p)).data())); + EXPECT_EQ(0, + Distance(p, Type>(L::Partial(0).Slice<0>(p)).data())); + EXPECT_EQ(0, + Distance(p, Type>(L::Partial(3).Slice<0>(p)).data())); EXPECT_EQ(0, Distance(p, Type>(L(3).Slice<0>(p)).data())); } { using L = Layout; + EXPECT_EQ(0, + Distance(p, Type>(L::Partial(3).Slice<0>(p)).data())); EXPECT_EQ( - 0, Distance(p, Type>(L::Partial(3).Slice<0>(p)).data())); - EXPECT_EQ( - 0, - Distance(p, Type>(L::Partial(3, 5).Slice<0>(p)).data())); + 0, Distance(p, Type>(L::Partial(3, 5).Slice<0>(p)).data())); EXPECT_EQ( 12, Distance(p, Type>(L::Partial(3, 5).Slice<1>(p)).data())); @@ -1061,63 +1025,55 @@ TEST(Layout, MutableSliceByIndexData) { } { using L = Layout; + EXPECT_EQ(0, + Distance(p, Type>(L::Partial(0).Slice<0>(p)).data())); + EXPECT_EQ(0, + Distance(p, Type>(L::Partial(1).Slice<0>(p)).data())); + EXPECT_EQ(0, + Distance(p, Type>(L::Partial(5).Slice<0>(p)).data())); EXPECT_EQ( - 0, Distance(p, Type>(L::Partial(0).Slice<0>(p)).data())); - EXPECT_EQ( - 0, Distance(p, Type>(L::Partial(1).Slice<0>(p)).data())); + 0, Distance(p, Type>(L::Partial(0, 0).Slice<0>(p)).data())); EXPECT_EQ( - 0, Distance(p, Type>(L::Partial(5).Slice<0>(p)).data())); + 0, Distance(p, Type>(L::Partial(0, 0).Slice<1>(p)).data())); EXPECT_EQ( - 0, - Distance(p, Type>(L::Partial(0, 0).Slice<0>(p)).data())); + 0, Distance(p, Type>(L::Partial(1, 0).Slice<0>(p)).data())); EXPECT_EQ( - 0, - Distance(p, Type>(L::Partial(0, 0).Slice<1>(p)).data())); + 4, Distance(p, Type>(L::Partial(1, 0).Slice<1>(p)).data())); EXPECT_EQ( - 0, - Distance(p, Type>(L::Partial(1, 0).Slice<0>(p)).data())); + 0, Distance(p, Type>(L::Partial(5, 3).Slice<0>(p)).data())); EXPECT_EQ( - 4, - Distance(p, Type>(L::Partial(1, 0).Slice<1>(p)).data())); + 8, Distance(p, Type>(L::Partial(5, 3).Slice<1>(p)).data())); EXPECT_EQ( 0, - Distance(p, Type>(L::Partial(5, 3).Slice<0>(p)).data())); + Distance(p, Type>(L::Partial(0, 0, 0).Slice<0>(p)).data())); EXPECT_EQ( - 8, - Distance(p, Type>(L::Partial(5, 3).Slice<1>(p)).data())); - EXPECT_EQ( - 0, Distance( - p, Type>(L::Partial(0, 0, 0).Slice<0>(p)).data())); - EXPECT_EQ( - 0, Distance( - p, Type>(L::Partial(0, 0, 0).Slice<1>(p)).data())); + 0, + Distance(p, Type>(L::Partial(0, 0, 0).Slice<1>(p)).data())); EXPECT_EQ( 0, Distance( p, Type>(L::Partial(0, 0, 0).Slice<2>(p)).data())); EXPECT_EQ( - 0, Distance( - p, Type>(L::Partial(1, 0, 0).Slice<0>(p)).data())); + 0, + Distance(p, Type>(L::Partial(1, 0, 0).Slice<0>(p)).data())); EXPECT_EQ( - 4, Distance( - p, Type>(L::Partial(1, 0, 0).Slice<1>(p)).data())); + 4, + Distance(p, Type>(L::Partial(1, 0, 0).Slice<1>(p)).data())); EXPECT_EQ( 8, Distance( p, Type>(L::Partial(1, 0, 0).Slice<2>(p)).data())); EXPECT_EQ( - 0, Distance( - p, Type>(L::Partial(5, 3, 1).Slice<0>(p)).data())); + 0, + Distance(p, Type>(L::Partial(5, 3, 1).Slice<0>(p)).data())); EXPECT_EQ( 24, Distance( p, Type>(L::Partial(5, 3, 1).Slice<2>(p)).data())); EXPECT_EQ( - 8, Distance( - p, Type>(L::Partial(5, 3, 1).Slice<1>(p)).data())); - EXPECT_EQ(0, - Distance(p, Type>(L(5, 3, 1).Slice<0>(p)).data())); + 8, + Distance(p, Type>(L::Partial(5, 3, 1).Slice<1>(p)).data())); + EXPECT_EQ(0, Distance(p, Type>(L(5, 3, 1).Slice<0>(p)).data())); EXPECT_EQ(24, Distance(p, Type>(L(5, 3, 1).Slice<2>(p)).data())); - EXPECT_EQ(8, - Distance(p, Type>(L(5, 3, 1).Slice<1>(p)).data())); + EXPECT_EQ(8, Distance(p, Type>(L(5, 3, 1).Slice<1>(p)).data())); } } @@ -1126,84 +1082,66 @@ TEST(Layout, MutableSliceByTypeData) { { using L = Layout; EXPECT_EQ( - 0, Distance( - p, Type>(L::Partial(0).Slice(p)).data())); + 0, + Distance(p, Type>(L::Partial(0).Slice(p)).data())); EXPECT_EQ( - 0, Distance( - p, Type>(L::Partial(3).Slice(p)).data())); - EXPECT_EQ(0, - Distance(p, Type>(L(3).Slice(p)).data())); + 0, + Distance(p, Type>(L::Partial(3).Slice(p)).data())); + EXPECT_EQ(0, Distance(p, Type>(L(3).Slice(p)).data())); } { using L = Layout; EXPECT_EQ( - 0, - Distance(p, Type>(L::Partial(0).Slice(p)).data())); + 0, Distance(p, Type>(L::Partial(0).Slice(p)).data())); EXPECT_EQ( - 0, - Distance(p, Type>(L::Partial(1).Slice(p)).data())); + 0, Distance(p, Type>(L::Partial(1).Slice(p)).data())); EXPECT_EQ( - 0, - Distance(p, Type>(L::Partial(5).Slice(p)).data())); + 0, Distance(p, Type>(L::Partial(5).Slice(p)).data())); EXPECT_EQ( 0, - Distance(p, - Type>(L::Partial(0, 0).Slice(p)).data())); + Distance(p, Type>(L::Partial(0, 0).Slice(p)).data())); EXPECT_EQ( - 0, - Distance( - p, Type>(L::Partial(0, 0).Slice(p)).data())); + 0, Distance( + p, Type>(L::Partial(0, 0).Slice(p)).data())); EXPECT_EQ( 0, - Distance(p, - Type>(L::Partial(1, 0).Slice(p)).data())); + Distance(p, Type>(L::Partial(1, 0).Slice(p)).data())); EXPECT_EQ( - 4, - Distance( - p, Type>(L::Partial(1, 0).Slice(p)).data())); + 4, Distance( + p, Type>(L::Partial(1, 0).Slice(p)).data())); EXPECT_EQ( 0, - Distance(p, - Type>(L::Partial(5, 3).Slice(p)).data())); + Distance(p, Type>(L::Partial(5, 3).Slice(p)).data())); EXPECT_EQ( - 8, - Distance( - p, Type>(L::Partial(5, 3).Slice(p)).data())); + 8, Distance( + p, Type>(L::Partial(5, 3).Slice(p)).data())); EXPECT_EQ( - 0, - Distance( - p, - Type>(L::Partial(0, 0, 0).Slice(p)).data())); + 0, Distance( + p, Type>(L::Partial(0, 0, 0).Slice(p)).data())); EXPECT_EQ( 0, Distance( - p, - Type>(L::Partial(0, 0, 0).Slice(p)).data())); + p, Type>(L::Partial(0, 0, 0).Slice(p)).data())); EXPECT_EQ( 0, Distance( p, Type>(L::Partial(0, 0, 0).Slice(p)).data())); EXPECT_EQ( - 0, - Distance( - p, - Type>(L::Partial(1, 0, 0).Slice(p)).data())); + 0, Distance( + p, Type>(L::Partial(1, 0, 0).Slice(p)).data())); EXPECT_EQ( 4, Distance( - p, - Type>(L::Partial(1, 0, 0).Slice(p)).data())); + p, Type>(L::Partial(1, 0, 0).Slice(p)).data())); EXPECT_EQ( 8, Distance( p, Type>(L::Partial(1, 0, 0).Slice(p)).data())); EXPECT_EQ( - 0, - Distance( - p, - Type>(L::Partial(5, 3, 1).Slice(p)).data())); + 0, Distance( + p, Type>(L::Partial(5, 3, 1).Slice(p)).data())); EXPECT_EQ( 24, Distance( @@ -1212,16 +1150,14 @@ TEST(Layout, MutableSliceByTypeData) { EXPECT_EQ( 8, Distance( - p, - Type>(L::Partial(5, 3, 1).Slice(p)).data())); - EXPECT_EQ( - 0, Distance(p, Type>(L(5, 3, 1).Slice(p)).data())); + p, Type>(L::Partial(5, 3, 1).Slice(p)).data())); + EXPECT_EQ(0, + Distance(p, Type>(L(5, 3, 1).Slice(p)).data())); EXPECT_EQ( 24, Distance(p, Type>(L(5, 3, 1).Slice(p)).data())); EXPECT_EQ( - 8, - Distance(p, Type>(L(5, 3, 1).Slice(p)).data())); + 8, Distance(p, Type>(L(5, 3, 1).Slice(p)).data())); } } @@ -1320,17 +1256,17 @@ TEST(Layout, MutableSlices) { } { const auto x = L::Partial(1, 2, 3); - EXPECT_THAT((Type, Span, Span>>( - x.Slices(p))), - Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), - IsSameSlice(x.Slice<2>(p)))); + EXPECT_THAT( + (Type, Span, Span>>(x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), + IsSameSlice(x.Slice<2>(p)))); } { const L x(1, 2, 3); - EXPECT_THAT((Type, Span, Span>>( - x.Slices(p))), - Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), - IsSameSlice(x.Slice<2>(p)))); + EXPECT_THAT( + (Type, Span, Span>>(x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), + IsSameSlice(x.Slice<2>(p)))); } } @@ -1350,13 +1286,7 @@ TEST(Layout, CustomAlignment) { TEST(Layout, OverAligned) { constexpr size_t M = alignof(max_align_t); constexpr Layout> x(1, 3); -#ifdef __GNUC__ - // Using __attribute__ ((aligned ())) instead of alignas to bypass a gcc bug: - // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89357 - __attribute__((aligned(2 * M))) unsigned char p[x.AllocSize()]; -#else alignas(2 * M) unsigned char p[x.AllocSize()]; -#endif EXPECT_EQ(2 * M + 3, x.AllocSize()); EXPECT_THAT(x.Pointers(p), Tuple(p + 0, p + 2 * M)); } @@ -1468,8 +1398,7 @@ TEST(Layout, DebugString) { x.DebugString()); } { - constexpr auto x = - Layout::Partial(1, 2, 3); + constexpr auto x = Layout::Partial(1, 2, 3); EXPECT_EQ( "@0(1)[1]; @4(4)[2]; @12(1)[3]; " "@16" + @@ -1477,8 +1406,7 @@ TEST(Layout, DebugString) { x.DebugString()); } { - constexpr auto x = - Layout::Partial(1, 2, 3, 4); + constexpr auto x = Layout::Partial(1, 2, 3, 4); EXPECT_EQ( "@0(1)[1]; @4(4)[2]; @12(1)[3]; " "@16" + diff --git a/abseil-cpp/absl/container/internal/node_slot_policy.h b/abseil-cpp/absl/container/internal/node_hash_policy.h similarity index 93% rename from abseil-cpp/absl/container/internal/node_slot_policy.h rename to abseil-cpp/absl/container/internal/node_hash_policy.h index baba5743..4617162f 100644 --- a/abseil-cpp/absl/container/internal/node_slot_policy.h +++ b/abseil-cpp/absl/container/internal/node_hash_policy.h @@ -30,8 +30,8 @@ // It may also optionally define `value()` and `apply()`. For documentation on // these, see hash_policy_traits.h. -#ifndef ABSL_CONTAINER_INTERNAL_NODE_SLOT_POLICY_H_ -#define ABSL_CONTAINER_INTERNAL_NODE_SLOT_POLICY_H_ +#ifndef ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_ +#define ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_ #include #include @@ -46,7 +46,7 @@ ABSL_NAMESPACE_BEGIN namespace container_internal { template -struct node_slot_policy { +struct node_hash_policy { static_assert(std::is_lvalue_reference::value, ""); using slot_type = typename std::remove_cv< @@ -89,4 +89,4 @@ struct node_slot_policy { ABSL_NAMESPACE_END } // namespace absl -#endif // ABSL_CONTAINER_INTERNAL_NODE_SLOT_POLICY_H_ +#endif // ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_ diff --git a/abseil-cpp/absl/container/internal/node_slot_policy_test.cc b/abseil-cpp/absl/container/internal/node_hash_policy_test.cc similarity index 93% rename from abseil-cpp/absl/container/internal/node_slot_policy_test.cc rename to abseil-cpp/absl/container/internal/node_hash_policy_test.cc index 51b7467b..84aabba9 100644 --- a/abseil-cpp/absl/container/internal/node_slot_policy_test.cc +++ b/abseil-cpp/absl/container/internal/node_hash_policy_test.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "absl/container/internal/node_slot_policy.h" +#include "absl/container/internal/node_hash_policy.h" #include @@ -27,7 +27,7 @@ namespace { using ::testing::Pointee; -struct Policy : node_slot_policy { +struct Policy : node_hash_policy { using key_type = int; using init_type = int; diff --git a/abseil-cpp/absl/container/internal/raw_hash_map.h b/abseil-cpp/absl/container/internal/raw_hash_map.h index c7df2efc..0a02757d 100644 --- a/abseil-cpp/absl/container/internal/raw_hash_map.h +++ b/abseil-cpp/absl/container/internal/raw_hash_map.h @@ -51,9 +51,8 @@ class raw_hash_map : public raw_hash_set { using key_arg = typename KeyArgImpl::template type; static_assert(!std::is_reference::value, ""); - - // TODO(b/187807849): Evaluate whether to support reference mapped_type and - // remove this assertion if/when it is supported. + // TODO(alkis): remove this assertion and verify that reference mapped_type is + // supported. static_assert(!std::is_reference::value, ""); using iterator = typename raw_hash_map::raw_hash_set::iterator; diff --git a/abseil-cpp/absl/container/internal/raw_hash_set.cc b/abseil-cpp/absl/container/internal/raw_hash_set.cc index c63a2e02..919ac074 100644 --- a/abseil-cpp/absl/container/internal/raw_hash_set.cc +++ b/abseil-cpp/absl/container/internal/raw_hash_set.cc @@ -23,21 +23,11 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { -// A single block of empty control bytes for tables without any slots allocated. -// This enables removing a branch in the hot path of find(). -alignas(16) ABSL_CONST_INIT ABSL_DLL const ctrl_t kEmptyGroup[16] = { - ctrl_t::kSentinel, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, - ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, - ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, - ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty}; - -#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL constexpr size_t Group::kWidth; -#endif // Returns "random" seed. inline size_t RandomSeed() { -#ifdef ABSL_HAVE_THREAD_LOCAL +#if ABSL_HAVE_THREAD_LOCAL static thread_local size_t counter = 0; size_t value = ++counter; #else // ABSL_HAVE_THREAD_LOCAL @@ -47,25 +37,12 @@ inline size_t RandomSeed() { return value ^ static_cast(reinterpret_cast(&counter)); } -bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl) { +bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl) { // To avoid problems with weak hashes and single bit tests, we use % 13. // TODO(kfm,sbenza): revisit after we do unconditional mixing return (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6; } -void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity) { - assert(ctrl[capacity] == ctrl_t::kSentinel); - assert(IsValidCapacity(capacity)); - for (ctrl_t* pos = ctrl; pos < ctrl + capacity; pos += Group::kWidth) { - Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos); - } - // Copy the cloned ctrl bytes. - std::memcpy(ctrl + capacity + 1, ctrl, NumClonedBytes()); - ctrl[capacity] = ctrl_t::kSentinel; -} -// Extern template instantiotion for inline function. -template FindInfo find_first_non_full(const ctrl_t*, size_t, size_t); - } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/abseil-cpp/absl/container/internal/raw_hash_set.h b/abseil-cpp/absl/container/internal/raw_hash_set.h index ea912f83..ec13a2f7 100644 --- a/abseil-cpp/absl/container/internal/raw_hash_set.h +++ b/abseil-cpp/absl/container/internal/raw_hash_set.h @@ -53,121 +53,40 @@ // // IMPLEMENTATION DETAILS // -// # Table Layout -// -// A raw_hash_set's backing array consists of control bytes followed by slots -// that may or may not contain objects. -// -// The layout of the backing array, for `capacity` slots, is thus, as a -// pseudo-struct: -// -// struct BackingArray { -// // Control bytes for the "real" slots. -// ctrl_t ctrl[capacity]; -// // Always `ctrl_t::kSentinel`. This is used by iterators to find when to -// // stop and serves no other purpose. -// ctrl_t sentinel; -// // A copy of the first `kWidth - 1` elements of `ctrl`. This is used so -// // that if a probe sequence picks a value near the end of `ctrl`, -// // `Group` will have valid control bytes to look at. -// ctrl_t clones[kWidth - 1]; -// // The actual slot data. -// slot_type slots[capacity]; -// }; -// -// The length of this array is computed by `AllocSize()` below. -// -// Control bytes (`ctrl_t`) are bytes (collected into groups of a -// platform-specific size) that define the state of the corresponding slot in -// the slot array. Group manipulation is tightly optimized to be as efficient -// as possible: SSE and friends on x86, clever bit operations on other arches. +// The table stores elements inline in a slot array. In addition to the slot +// array the table maintains some control state per slot. The extra state is one +// byte per slot and stores empty or deleted marks, or alternatively 7 bits from +// the hash of an occupied slot. The table is split into logical groups of +// slots, like so: // // Group 1 Group 2 Group 3 // +---------------+---------------+---------------+ // | | | | | | | | | | | | | | | | | | | | | | | | | // +---------------+---------------+---------------+ // -// Each control byte is either a special value for empty slots, deleted slots -// (sometimes called *tombstones*), and a special end-of-table marker used by -// iterators, or, if occupied, seven bits (H2) from the hash of the value in the -// corresponding slot. -// -// Storing control bytes in a separate array also has beneficial cache effects, -// since more logical slots will fit into a cache line. -// -// # Hashing -// -// We compute two separate hashes, `H1` and `H2`, from the hash of an object. -// `H1(hash(x))` is an index into `slots`, and essentially the starting point -// for the probe sequence. `H2(hash(x))` is a 7-bit value used to filter out -// objects that cannot possibly be the one we are looking for. -// -// # Table operations. -// -// The key operations are `insert`, `find`, and `erase`. -// -// Since `insert` and `erase` are implemented in terms of `find`, we describe -// `find` first. To `find` a value `x`, we compute `hash(x)`. From -// `H1(hash(x))` and the capacity, we construct a `probe_seq` that visits every -// group of slots in some interesting order. -// -// We now walk through these indices. At each index, we select the entire group -// starting with that index and extract potential candidates: occupied slots -// with a control byte equal to `H2(hash(x))`. If we find an empty slot in the -// group, we stop and return an error. Each candidate slot `y` is compared with -// `x`; if `x == y`, we are done and return `&y`; otherwise we contine to the -// next probe index. Tombstones effectively behave like full slots that never -// match the value we're looking for. -// -// The `H2` bits ensure when we compare a slot to an object with `==`, we are -// likely to have actually found the object. That is, the chance is low that -// `==` is called and returns `false`. Thus, when we search for an object, we -// are unlikely to call `==` many times. This likelyhood can be analyzed as -// follows (assuming that H2 is a random enough hash function). +// On lookup the hash is split into two parts: +// - H2: 7 bits (those stored in the control bytes) +// - H1: the rest of the bits +// The groups are probed using H1. For each group the slots are matched to H2 in +// parallel. Because H2 is 7 bits (128 states) and the number of slots per group +// is low (8 or 16) in almost all cases a match in H2 is also a lookup hit. // -// Let's assume that there are `k` "wrong" objects that must be examined in a -// probe sequence. For example, when doing a `find` on an object that is in the -// table, `k` is the number of objects between the start of the probe sequence -// and the final found object (not including the final found object). The -// expected number of objects with an H2 match is then `k/128`. Measurements -// and analysis indicate that even at high load factors, `k` is less than 32, -// meaning that the number of "false positive" comparisons we must perform is -// less than 1/8 per `find`. - -// `insert` is implemented in terms of `unchecked_insert`, which inserts a -// value presumed to not be in the table (violating this requirement will cause -// the table to behave erratically). Given `x` and its hash `hash(x)`, to insert -// it, we construct a `probe_seq` once again, and use it to find the first -// group with an unoccupied (empty *or* deleted) slot. We place `x` into the -// first such slot in the group and mark it as full with `x`'s H2. +// On insert, once the right group is found (as in lookup), its slots are +// filled in order. // -// To `insert`, we compose `unchecked_insert` with `find`. We compute `h(x)` and -// perform a `find` to see if it's already present; if it is, we're done. If -// it's not, we may decide the table is getting overcrowded (i.e. the load -// factor is greater than 7/8 for big tables; `is_small()` tables use a max load -// factor of 1); in this case, we allocate a bigger array, `unchecked_insert` -// each element of the table into the new array (we know that no insertion here -// will insert an already-present value), and discard the old backing array. At -// this point, we may `unchecked_insert` the value `x`. +// On erase a slot is cleared. In case the group did not have any empty slots +// before the erase, the erased slot is marked as deleted. // -// Below, `unchecked_insert` is partly implemented by `prepare_insert`, which -// presents a viable, initialized slot pointee to the caller. +// Groups without empty slots (but maybe with deleted slots) extend the probe +// sequence. The probing algorithm is quadratic. Given N the number of groups, +// the probing function for the i'th probe is: // -// `erase` is implemented in terms of `erase_at`, which takes an index to a -// slot. Given an offset, we simply create a tombstone and destroy its contents. -// If we can prove that the slot would not appear in a probe sequence, we can -// make the slot as empty, instead. We can prove this by observing that if a -// group has any empty slots, it has never been full (assuming we never create -// an empty slot in a group with no empties, which this heuristic guarantees we -// never do) and find would stop at this group anyways (since it does not probe -// beyond groups with empties). +// P(0) = H1 % N // -// `erase` is `erase_at` composed with `find`: if we -// have a value `x`, we can perform a `find`, and then `erase_at` the resulting -// slot. +// P(i) = (P(i - 1) + i) % N // -// To iterate, we simply traverse the array, skipping empty and deleted slots -// and stopping when we hit a `kSentinel`. +// This probing function guarantees that after N probes, all the groups of the +// table will be probed exactly once. #ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ #define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ @@ -183,9 +102,8 @@ #include #include -#include "absl/base/config.h" +#include "absl/base/internal/bits.h" #include "absl/base/internal/endian.h" -#include "absl/base/internal/prefetch.h" #include "absl/base/optimization.h" #include "absl/base/port.h" #include "absl/container/internal/common.h" @@ -194,27 +112,12 @@ #include "absl/container/internal/hash_policy_traits.h" #include "absl/container/internal/hashtable_debug_hooks.h" #include "absl/container/internal/hashtablez_sampler.h" +#include "absl/container/internal/have_sse.h" +#include "absl/container/internal/layout.h" #include "absl/memory/memory.h" #include "absl/meta/type_traits.h" -#include "absl/numeric/bits.h" #include "absl/utility/utility.h" -#ifdef ABSL_INTERNAL_HAVE_SSE2 -#include -#endif - -#ifdef ABSL_INTERNAL_HAVE_SSSE3 -#include -#endif - -#ifdef _MSC_VER -#include -#endif - -#ifdef ABSL_INTERNAL_HAVE_ARM_NEON -#include -#endif - namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { @@ -229,40 +132,14 @@ template void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/, std::false_type /* propagate_on_container_swap */) {} -// The state for a probe sequence. -// -// Currently, the sequence is a triangular progression of the form -// -// p(i) := Width * (i^2 + i)/2 + hash (mod mask + 1) -// -// The use of `Width` ensures that each probe step does not overlap groups; -// the sequence effectively outputs the addresses of *groups* (although not -// necessarily aligned to any boundary). The `Group` machinery allows us -// to check an entire group with minimal branching. -// -// Wrapping around at `mask + 1` is important, but not for the obvious reason. -// As described above, the first few entries of the control byte array -// are mirrored at the end of the array, which `Group` will find and use -// for selecting candidates. However, when those candidates' slots are -// actually inspected, there are no corresponding slots for the cloned bytes, -// so we need to make sure we've treated those offsets as "wrapping around". -// -// It turns out that this probe sequence visits every group exactly once if the -// number of groups is a power of two, since (i^2+i)/2 is a bijection in -// Z/(2^m). See https://en.wikipedia.org/wiki/Quadratic_probing template class probe_seq { public: - // Creates a new probe sequence using `hash` as the initial value of the - // sequence and `mask` (usually the capacity of the table) as the mask to - // apply to each value in the progression. probe_seq(size_t hash, size_t mask) { assert(((mask + 1) & mask) == 0 && "not a mask"); mask_ = mask; offset_ = hash & mask_; } - - // The offset within the table, i.e., the value `p(i)` above. size_t offset() const { return offset_; } size_t offset(size_t i) const { return (offset_ + i) & mask_; } @@ -271,7 +148,7 @@ class probe_seq { offset_ += index_; offset_ &= mask_; } - // 0-based probe index, a multiple of `Width`. + // 0-based probe index. The i-th probe in the probe sequence. size_t index() const { return index_; } private: @@ -295,9 +172,9 @@ struct IsDecomposable : std::false_type {}; template struct IsDecomposable< - absl::void_t(), - std::declval()...))>, + absl::void_t(), + std::declval()...))>, Policy, Hash, Eq, Ts...> : std::true_type {}; // TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it. @@ -312,85 +189,69 @@ constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) { } template -uint32_t TrailingZeros(T x) { - ABSL_ASSUME(x != 0); - return static_cast(countr_zero(x)); +int TrailingZeros(T x) { + return sizeof(T) == 8 ? base_internal::CountTrailingZerosNonZero64( + static_cast(x)) + : base_internal::CountTrailingZerosNonZero32( + static_cast(x)); } -// An abstract bitmask, such as that emitted by a SIMD instruction. -// -// Specifically, this type implements a simple bitset whose representation is -// controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number -// of abstract bits in the bitset, while `Shift` is the log-base-two of the -// width of an abstract bit in the representation. -// This mask provides operations for any number of real bits set in an abstract -// bit. To add iteration on top of that, implementation must guarantee no more -// than one real bit is set in an abstract bit. -template -class NonIterableBitMask { - public: - explicit NonIterableBitMask(T mask) : mask_(mask) {} - - explicit operator bool() const { return this->mask_ != 0; } - - // Returns the index of the lowest *abstract* bit set in `self`. - uint32_t LowestBitSet() const { - return container_internal::TrailingZeros(mask_) >> Shift; - } - - // Returns the index of the highest *abstract* bit set in `self`. - uint32_t HighestBitSet() const { - return static_cast((bit_width(mask_) - 1) >> Shift); - } - - // Return the number of trailing zero *abstract* bits. - uint32_t TrailingZeros() const { - return container_internal::TrailingZeros(mask_) >> Shift; - } - - // Return the number of leading zero *abstract* bits. - uint32_t LeadingZeros() const { - constexpr int total_significant_bits = SignificantBits << Shift; - constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits; - return static_cast(countl_zero(mask_ << extra_bits)) >> Shift; - } - - T mask_; -}; +template +int LeadingZeros(T x) { + return sizeof(T) == 8 + ? base_internal::CountLeadingZeros64(static_cast(x)) + : base_internal::CountLeadingZeros32(static_cast(x)); +} -// Mask that can be iterable -// -// For example, when `SignificantBits` is 16 and `Shift` is zero, this is just -// an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When -// `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as -// the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask. +// An abstraction over a bitmask. It provides an easy way to iterate through the +// indexes of the set bits of a bitmask. When Shift=0 (platforms with SSE), +// this is a true bitmask. On non-SSE, platforms the arithematic used to +// emulate the SSE behavior works in bytes (Shift=3) and leaves each bytes as +// either 0x00 or 0x80. // // For example: -// for (int i : BitMask(0b101)) -> yields 0, 2 +// for (int i : BitMask(0x5)) -> yields 0, 2 // for (int i : BitMask(0x0000000080800000)) -> yields 2, 3 template -class BitMask : public NonIterableBitMask { - using Base = NonIterableBitMask; +class BitMask { static_assert(std::is_unsigned::value, ""); static_assert(Shift == 0 || Shift == 3, ""); public: - explicit BitMask(T mask) : Base(mask) {} - // BitMask is an iterator over the indices of its abstract bits. + // These are useful for unit tests (gunit). using value_type = int; using iterator = BitMask; using const_iterator = BitMask; + explicit BitMask(T mask) : mask_(mask) {} BitMask& operator++() { - this->mask_ &= (this->mask_ - 1); + mask_ &= (mask_ - 1); return *this; } - - uint32_t operator*() const { return Base::LowestBitSet(); } + explicit operator bool() const { return mask_ != 0; } + int operator*() const { return LowestBitSet(); } + int LowestBitSet() const { + return container_internal::TrailingZeros(mask_) >> Shift; + } + int HighestBitSet() const { + return (sizeof(T) * CHAR_BIT - container_internal::LeadingZeros(mask_) - + 1) >> + Shift; + } BitMask begin() const { return *this; } BitMask end() const { return BitMask(0); } + int TrailingZeros() const { + return container_internal::TrailingZeros(mask_) >> Shift; + } + + int LeadingZeros() const { + constexpr int total_significant_bits = SignificantBits << Shift; + constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits; + return container_internal::LeadingZeros(mask_ << extra_bits) >> Shift; + } + private: friend bool operator==(const BitMask& a, const BitMask& b) { return a.mask_ == b.mask_; @@ -398,127 +259,75 @@ class BitMask : public NonIterableBitMask { friend bool operator!=(const BitMask& a, const BitMask& b) { return a.mask_ != b.mask_; } + + T mask_; }; +using ctrl_t = signed char; using h2_t = uint8_t; // The values here are selected for maximum performance. See the static asserts // below for details. - -// A `ctrl_t` is a single control byte, which can have one of four -// states: empty, deleted, full (which has an associated seven-bit h2_t value) -// and the sentinel. They have the following bit patterns: -// -// empty: 1 0 0 0 0 0 0 0 -// deleted: 1 1 1 1 1 1 1 0 -// full: 0 h h h h h h h // h represents the hash bits. -// sentinel: 1 1 1 1 1 1 1 1 -// -// These values are specifically tuned for SSE-flavored SIMD. -// The static_asserts below detail the source of these choices. -// -// We use an enum class so that when strict aliasing is enabled, the compiler -// knows ctrl_t doesn't alias other types. -enum class ctrl_t : int8_t { +enum Ctrl : ctrl_t { kEmpty = -128, // 0b10000000 kDeleted = -2, // 0b11111110 kSentinel = -1, // 0b11111111 }; static_assert( - (static_cast(ctrl_t::kEmpty) & - static_cast(ctrl_t::kDeleted) & - static_cast(ctrl_t::kSentinel) & 0x80) != 0, + kEmpty & kDeleted & kSentinel & 0x80, "Special markers need to have the MSB to make checking for them efficient"); -static_assert( - ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel, - "ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than " - "ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient"); -static_assert( - ctrl_t::kSentinel == static_cast(-1), - "ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD " - "registers (pcmpeqd xmm, xmm)"); -static_assert(ctrl_t::kEmpty == static_cast(-128), - "ctrl_t::kEmpty must be -128 to make the SIMD check for its " +static_assert(kEmpty < kSentinel && kDeleted < kSentinel, + "kEmpty and kDeleted must be smaller than kSentinel to make the " + "SIMD test of IsEmptyOrDeleted() efficient"); +static_assert(kSentinel == -1, + "kSentinel must be -1 to elide loading it from memory into SIMD " + "registers (pcmpeqd xmm, xmm)"); +static_assert(kEmpty == -128, + "kEmpty must be -128 to make the SIMD check for its " "existence efficient (psignb xmm, xmm)"); -static_assert( - (~static_cast(ctrl_t::kEmpty) & - ~static_cast(ctrl_t::kDeleted) & - static_cast(ctrl_t::kSentinel) & 0x7F) != 0, - "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not " - "shared by ctrl_t::kSentinel to make the scalar test for " - "MaskEmptyOrDeleted() efficient"); -static_assert(ctrl_t::kDeleted == static_cast(-2), - "ctrl_t::kDeleted must be -2 to make the implementation of " +static_assert(~kEmpty & ~kDeleted & kSentinel & 0x7F, + "kEmpty and kDeleted must share an unset bit that is not shared " + "by kSentinel to make the scalar test for MatchEmptyOrDeleted() " + "efficient"); +static_assert(kDeleted == -2, + "kDeleted must be -2 to make the implementation of " "ConvertSpecialToEmptyAndFullToDeleted efficient"); -ABSL_DLL extern const ctrl_t kEmptyGroup[16]; - -// Returns a pointer to a control byte group that can be used by empty tables. +// A single block of empty control bytes for tables without any slots allocated. +// This enables removing a branch in the hot path of find(). inline ctrl_t* EmptyGroup() { - // Const must be cast away here; no uses of this function will actually write - // to it, because it is only used for empty tables. - return const_cast(kEmptyGroup); + alignas(16) static constexpr ctrl_t empty_group[] = { + kSentinel, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, + kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty}; + return const_cast(empty_group); } // Mixes a randomly generated per-process seed with `hash` and `ctrl` to // randomize insertion order within groups. -bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl); +bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl); -// Returns a per-table, hash salt, which changes on resize. This gets mixed into -// H1 to randomize iteration order per-table. +// Returns a hash seed. // // The seed consists of the ctrl_ pointer, which adds enough entropy to ensure // non-determinism of iteration order in most cases. -inline size_t PerTableSalt(const ctrl_t* ctrl) { +inline size_t HashSeed(const ctrl_t* ctrl) { // The low bits of the pointer have little or no entropy because of // alignment. We shift the pointer to try to use higher entropy bits. A // good number seems to be 12 bits, because that aligns with page size. return reinterpret_cast(ctrl) >> 12; } -// Extracts the H1 portion of a hash: 57 bits mixed with a per-table salt. + inline size_t H1(size_t hash, const ctrl_t* ctrl) { - return (hash >> 7) ^ PerTableSalt(ctrl); + return (hash >> 7) ^ HashSeed(ctrl); } +inline ctrl_t H2(size_t hash) { return hash & 0x7F; } -// Extracts the H2 portion of a hash: the 7 bits not used for H1. -// -// These are used as an occupied control byte. -inline h2_t H2(size_t hash) { return hash & 0x7F; } - -// Helpers for checking the state of a control byte. -inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; } -inline bool IsFull(ctrl_t c) { return c >= static_cast(0); } -inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; } -inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; } +inline bool IsEmpty(ctrl_t c) { return c == kEmpty; } +inline bool IsFull(ctrl_t c) { return c >= 0; } +inline bool IsDeleted(ctrl_t c) { return c == kDeleted; } +inline bool IsEmptyOrDeleted(ctrl_t c) { return c < kSentinel; } -#ifdef ABSL_INTERNAL_HAVE_SSE2 -// Quick reference guide for intrinsics used below: -// -// * __m128i: An XMM (128-bit) word. -// -// * _mm_setzero_si128: Returns a zero vector. -// * _mm_set1_epi8: Returns a vector with the same i8 in each lane. -// -// * _mm_subs_epi8: Saturating-subtracts two i8 vectors. -// * _mm_and_si128: Ands two i128s together. -// * _mm_or_si128: Ors two i128s together. -// * _mm_andnot_si128: And-nots two i128s together. -// -// * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality, -// filling each lane with 0x00 or 0xff. -// * _mm_cmpgt_epi8: Same as above, but using > rather than ==. -// -// * _mm_loadu_si128: Performs an unaligned load of an i128. -// * _mm_storeu_si128: Performs an unaligned store of an i128. -// -// * _mm_sign_epi8: Retains, negates, or zeroes each i8 lane of the first -// argument if the corresponding lane of the second -// argument is positive, negative, or zero, respectively. -// * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a -// bitmask consisting of those bits. -// * _mm_shuffle_epi8: Selects i8s from the first argument, using the low -// four bits of each i8 lane in the second argument as -// indices. +#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 // https://github.com/abseil/abseil-cpp/issues/209 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853 @@ -547,40 +356,38 @@ struct GroupSse2Impl { BitMask Match(h2_t hash) const { auto match = _mm_set1_epi8(hash); return BitMask( - static_cast(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)))); + _mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))); } // Returns a bitmask representing the positions of empty slots. - NonIterableBitMask MaskEmpty() const { -#ifdef ABSL_INTERNAL_HAVE_SSSE3 - // This only works because ctrl_t::kEmpty is -128. - return NonIterableBitMask( - static_cast(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)))); + BitMask MatchEmpty() const { +#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 + // This only works because kEmpty is -128. + return BitMask( + _mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))); #else - auto match = _mm_set1_epi8(static_cast(ctrl_t::kEmpty)); - return NonIterableBitMask( - static_cast(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)))); + return Match(static_cast(kEmpty)); #endif } // Returns a bitmask representing the positions of empty or deleted slots. - NonIterableBitMask MaskEmptyOrDeleted() const { - auto special = _mm_set1_epi8(static_cast(ctrl_t::kSentinel)); - return NonIterableBitMask(static_cast( - _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)))); + BitMask MatchEmptyOrDeleted() const { + auto special = _mm_set1_epi8(kSentinel); + return BitMask( + _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl))); } // Returns the number of trailing empty or deleted elements in the group. uint32_t CountLeadingEmptyOrDeleted() const { - auto special = _mm_set1_epi8(static_cast(ctrl_t::kSentinel)); - return TrailingZeros(static_cast( - _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1)); + auto special = _mm_set1_epi8(kSentinel); + return TrailingZeros( + _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1); } void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { auto msbs = _mm_set1_epi8(static_cast(-128)); auto x126 = _mm_set1_epi8(126); -#ifdef ABSL_INTERNAL_HAVE_SSSE3 +#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs); #else auto zero = _mm_setzero_si128(); @@ -594,63 +401,6 @@ struct GroupSse2Impl { }; #endif // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 -#if defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN) -struct GroupAArch64Impl { - static constexpr size_t kWidth = 8; - - explicit GroupAArch64Impl(const ctrl_t* pos) { - ctrl = vld1_u8(reinterpret_cast(pos)); - } - - BitMask Match(h2_t hash) const { - uint8x8_t dup = vdup_n_u8(hash); - auto mask = vceq_u8(ctrl, dup); - constexpr uint64_t msbs = 0x8080808080808080ULL; - return BitMask( - vget_lane_u64(vreinterpret_u64_u8(mask), 0) & msbs); - } - - NonIterableBitMask MaskEmpty() const { - uint64_t mask = - vget_lane_u64(vreinterpret_u64_u8( - vceq_s8(vdup_n_s8(static_cast(ctrl_t::kEmpty)), - vreinterpret_s8_u8(ctrl))), - 0); - return NonIterableBitMask(mask); - } - - NonIterableBitMask MaskEmptyOrDeleted() const { - uint64_t mask = - vget_lane_u64(vreinterpret_u64_u8(vcgt_s8( - vdup_n_s8(static_cast(ctrl_t::kSentinel)), - vreinterpret_s8_u8(ctrl))), - 0); - return NonIterableBitMask(mask); - } - - uint32_t CountLeadingEmptyOrDeleted() const { - uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0); - // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and - // kDeleted. We lower all other bits and count number of trailing zeros. - // Clang and GCC optimize countr_zero to rbit+clz without any check for 0, - // so we should be fine. - constexpr uint64_t bits = 0x0101010101010101ULL; - return countr_zero((mask | ~(mask >> 7)) & bits) >> 3; - } - - void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { - uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0); - constexpr uint64_t msbs = 0x8080808080808080ULL; - constexpr uint64_t lsbs = 0x0101010101010101ULL; - auto x = mask & msbs; - auto res = (~x + (x >> 7)) & ~lsbs; - little_endian::Store64(dst, res); - } - - uint8x8_t ctrl; -}; -#endif // ABSL_INTERNAL_HAVE_ARM_NEON && ABSL_IS_LITTLE_ENDIAN - struct GroupPortableImpl { static constexpr size_t kWidth = 8; @@ -664,7 +414,7 @@ struct GroupPortableImpl { // // Caveat: there are false positives but: // - they only occur if there is a real match - // - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel + // - they never occur on kEmpty, kDeleted, kSentinel // - they will be handled gracefully by subsequent checks in code // // Example: @@ -677,23 +427,19 @@ struct GroupPortableImpl { return BitMask((x - lsbs) & ~x & msbs); } - NonIterableBitMask MaskEmpty() const { + BitMask MatchEmpty() const { constexpr uint64_t msbs = 0x8080808080808080ULL; - return NonIterableBitMask((ctrl & (~ctrl << 6)) & - msbs); + return BitMask((ctrl & (~ctrl << 6)) & msbs); } - NonIterableBitMask MaskEmptyOrDeleted() const { + BitMask MatchEmptyOrDeleted() const { constexpr uint64_t msbs = 0x8080808080808080ULL; - return NonIterableBitMask((ctrl & (~ctrl << 7)) & - msbs); + return BitMask((ctrl & (~ctrl << 7)) & msbs); } uint32_t CountLeadingEmptyOrDeleted() const { - // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and - // kDeleted. We lower all other bits and count number of trailing zeros. - constexpr uint64_t bits = 0x0101010101010101ULL; - return countr_zero((ctrl | ~(ctrl >> 7)) & bits) >> 3; + constexpr uint64_t gaps = 0x00FEFEFEFEFEFEFEULL; + return (TrailingZeros(((~ctrl & (ctrl >> 7)) | gaps) + 1) + 7) >> 3; } void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { @@ -707,54 +453,44 @@ struct GroupPortableImpl { uint64_t ctrl; }; -#ifdef ABSL_INTERNAL_HAVE_SSE2 +#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 using Group = GroupSse2Impl; -#elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN) -using Group = GroupAArch64Impl; #else using Group = GroupPortableImpl; #endif -// Returns he number of "cloned control bytes". -// -// This is the number of control bytes that are present both at the beginning -// of the control byte array and at the end, such that we can create a -// `Group::kWidth`-width probe window starting from any control byte. -constexpr size_t NumClonedBytes() { return Group::kWidth - 1; } - template class raw_hash_set; -// Returns whether `n` is a valid capacity (i.e., number of slots). -// -// A valid capacity is a non-zero integer `2^m - 1`. inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; } -// Applies the following mapping to every byte in the control array: -// * kDeleted -> kEmpty -// * kEmpty -> kEmpty -// * _ -> kDeleted // PRECONDITION: // IsValidCapacity(capacity) -// ctrl[capacity] == ctrl_t::kSentinel -// ctrl[i] != ctrl_t::kSentinel for all i < capacity -void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity); +// ctrl[capacity] == kSentinel +// ctrl[i] != kSentinel for all i < capacity +// Applies mapping for every byte in ctrl: +// DELETED -> EMPTY +// EMPTY -> EMPTY +// FULL -> DELETED +inline void ConvertDeletedToEmptyAndFullToDeleted( + ctrl_t* ctrl, size_t capacity) { + assert(ctrl[capacity] == kSentinel); + assert(IsValidCapacity(capacity)); + for (ctrl_t* pos = ctrl; pos != ctrl + capacity + 1; pos += Group::kWidth) { + Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos); + } + // Copy the cloned ctrl bytes. + std::memcpy(ctrl + capacity + 1, ctrl, Group::kWidth); + ctrl[capacity] = kSentinel; +} -// Converts `n` into the next valid capacity, per `IsValidCapacity`. +// Rounds up the capacity to the next power of 2 minus 1, with a minimum of 1. inline size_t NormalizeCapacity(size_t n) { - return n ? ~size_t{} >> countl_zero(n) : 1; + return n ? ~size_t{} >> LeadingZeros(n) : 1; } -// General notes on capacity/growth methods below: -// - We use 7/8th as maximum load factor. For 16-wide groups, that gives an -// average of two empty slots per group. -// - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity. -// - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we -// never need to probe (the whole table fits in one group) so we don't need a -// load factor less than 1. - -// Given `capacity`, applies the load factor; i.e., it returns the maximum -// number of values we should put into the table before a resizing rehash. +// We use 7/8th as maximum load factor. +// For 16-wide groups, that gives an average of two empty slots per group. inline size_t CapacityToGrowth(size_t capacity) { assert(IsValidCapacity(capacity)); // `capacity*7/8` @@ -764,12 +500,8 @@ inline size_t CapacityToGrowth(size_t capacity) { } return capacity - capacity / 8; } - -// Given `growth`, "unapplies" the load factor to find how large the capacity -// should be to stay within the load factor. -// -// This might not be a valid capacity and `NormalizeCapacity()` should be -// called on this. +// From desired "growth" to a lowerbound of the necessary capacity. +// Might not be a valid one and required NormalizeCapacity(). inline size_t GrowthToLowerboundCapacity(size_t growth) { // `growth*8/7` if (Group::kWidth == 8 && growth == 7) { @@ -779,145 +511,18 @@ inline size_t GrowthToLowerboundCapacity(size_t growth) { return growth + static_cast((static_cast(growth) - 1) / 7); } -template -size_t SelectBucketCountForIterRange(InputIter first, InputIter last, - size_t bucket_count) { - if (bucket_count != 0) { - return bucket_count; - } - using InputIterCategory = - typename std::iterator_traits::iterator_category; - if (std::is_base_of::value) { - return GrowthToLowerboundCapacity( - static_cast(std::distance(first, last))); - } - return 0; +inline void AssertIsFull(ctrl_t* ctrl) { + ABSL_HARDENING_ASSERT((ctrl != nullptr && IsFull(*ctrl)) && + "Invalid operation on iterator. The element might have " + "been erased, or the table might have rehashed."); } -#define ABSL_INTERNAL_ASSERT_IS_FULL(ctrl, msg) \ - ABSL_HARDENING_ASSERT((ctrl != nullptr && IsFull(*ctrl)) && msg) - inline void AssertIsValid(ctrl_t* ctrl) { - ABSL_HARDENING_ASSERT( - (ctrl == nullptr || IsFull(*ctrl)) && - "Invalid operation on iterator. The element might have " - "been erased, the table might have rehashed, or this may " - "be an end() iterator."); + ABSL_HARDENING_ASSERT((ctrl == nullptr || IsFull(*ctrl)) && + "Invalid operation on iterator. The element might have " + "been erased, or the table might have rehashed."); } -struct FindInfo { - size_t offset; - size_t probe_length; -}; - -// Whether a table is "small". A small table fits entirely into a probing -// group, i.e., has a capacity < `Group::kWidth`. -// -// In small mode we are able to use the whole capacity. The extra control -// bytes give us at least one "empty" control byte to stop the iteration. -// This is important to make 1 a valid capacity. -// -// In small mode only the first `capacity` control bytes after the sentinel -// are valid. The rest contain dummy ctrl_t::kEmpty values that do not -// represent a real slot. This is important to take into account on -// `find_first_non_full()`, where we never try -// `ShouldInsertBackwards()` for small tables. -inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; } - -// Begins a probing operation on `ctrl`, using `hash`. -inline probe_seq probe(const ctrl_t* ctrl, size_t hash, - size_t capacity) { - return probe_seq(H1(hash, ctrl), capacity); -} - -// Probes an array of control bits using a probe sequence derived from `hash`, -// and returns the offset corresponding to the first deleted or empty slot. -// -// Behavior when the entire table is full is undefined. -// -// NOTE: this function must work with tables having both empty and deleted -// slots in the same group. Such tables appear during `erase()`. -template -inline FindInfo find_first_non_full(const ctrl_t* ctrl, size_t hash, - size_t capacity) { - auto seq = probe(ctrl, hash, capacity); - while (true) { - Group g{ctrl + seq.offset()}; - auto mask = g.MaskEmptyOrDeleted(); - if (mask) { -#if !defined(NDEBUG) - // We want to add entropy even when ASLR is not enabled. - // In debug build we will randomly insert in either the front or back of - // the group. - // TODO(kfm,sbenza): revisit after we do unconditional mixing - if (!is_small(capacity) && ShouldInsertBackwards(hash, ctrl)) { - return {seq.offset(mask.HighestBitSet()), seq.index()}; - } -#endif - return {seq.offset(mask.LowestBitSet()), seq.index()}; - } - seq.next(); - assert(seq.index() <= capacity && "full table!"); - } -} - -// Extern template for inline function keep possibility of inlining. -// When compiler decided to not inline, no symbols will be added to the -// corresponding translation unit. -extern template FindInfo find_first_non_full(const ctrl_t*, size_t, size_t); - -// Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire -// array as marked as empty. -inline void ResetCtrl(size_t capacity, ctrl_t* ctrl, const void* slot, - size_t slot_size) { - std::memset(ctrl, static_cast(ctrl_t::kEmpty), - capacity + 1 + NumClonedBytes()); - ctrl[capacity] = ctrl_t::kSentinel; - SanitizerPoisonMemoryRegion(slot, slot_size * capacity); -} - -// Sets `ctrl[i]` to `h`. -// -// Unlike setting it directly, this function will perform bounds checks and -// mirror the value to the cloned tail if necessary. -inline void SetCtrl(size_t i, ctrl_t h, size_t capacity, ctrl_t* ctrl, - const void* slot, size_t slot_size) { - assert(i < capacity); - - auto* slot_i = static_cast(slot) + i * slot_size; - if (IsFull(h)) { - SanitizerUnpoisonMemoryRegion(slot_i, slot_size); - } else { - SanitizerPoisonMemoryRegion(slot_i, slot_size); - } - - ctrl[i] = h; - ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h; -} - -// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`. -inline void SetCtrl(size_t i, h2_t h, size_t capacity, ctrl_t* ctrl, - const void* slot, size_t slot_size) { - SetCtrl(i, static_cast(h), capacity, ctrl, slot, slot_size); -} - -// Given the capacity of a table, computes the offset (from the start of the -// backing allocation) at which the slots begin. -inline size_t SlotOffset(size_t capacity, size_t slot_align) { - assert(IsValidCapacity(capacity)); - const size_t num_control_bytes = capacity + 1 + NumClonedBytes(); - return (num_control_bytes + slot_align - 1) & (~slot_align + 1); -} - -// Given the capacity of a table, computes the total size of the backing -// array. -inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align) { - return SlotOffset(capacity, slot_align) + capacity * slot_size; -} - -// A SwissTable. -// // Policy: a policy defines how to perform different operations on // the slots of the hashtable (see hash_policy_traits.h for the full interface // of policy). @@ -974,6 +579,13 @@ class raw_hash_set { auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k)); auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k)); + using Layout = absl::container_internal::Layout; + + static Layout MakeLayout(size_t capacity) { + assert(IsValidCapacity(capacity)); + return Layout(capacity + Group::kWidth + 1, capacity); + } + using AllocTraits = absl::allocator_traits; using SlotAlloc = typename absl::allocator_traits< allocator_type>::template rebind_alloc; @@ -1032,22 +644,16 @@ class raw_hash_set { // PRECONDITION: not an end() iterator. reference operator*() const { - ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, - "operator*() called on invalid iterator."); + AssertIsFull(ctrl_); return PolicyTraits::element(slot_); } // PRECONDITION: not an end() iterator. - pointer operator->() const { - ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, - "operator-> called on invalid iterator."); - return &operator*(); - } + pointer operator->() const { return &operator*(); } // PRECONDITION: not an end() iterator. iterator& operator++() { - ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, - "operator++ called on invalid iterator."); + AssertIsFull(ctrl_); ++ctrl_; ++slot_; skip_empty_or_deleted(); @@ -1073,20 +679,16 @@ class raw_hash_set { iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) { // This assumption helps the compiler know that any non-end iterator is // not equal to any end iterator. - ABSL_ASSUME(ctrl != nullptr); + ABSL_INTERNAL_ASSUME(ctrl != nullptr); } - // Fixes up `ctrl_` to point to a full by advancing it and `slot_` until - // they reach one. - // - // If a sentinel is reached, we null both of them out instead. void skip_empty_or_deleted() { while (IsEmptyOrDeleted(*ctrl_)) { uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted(); ctrl_ += shift; slot_ += shift; } - if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr; + if (ABSL_PREDICT_FALSE(*ctrl_ == kSentinel)) ctrl_ = nullptr; } ctrl_t* ctrl_ = nullptr; @@ -1145,10 +747,10 @@ class raw_hash_set { explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(), const key_equal& eq = key_equal(), const allocator_type& alloc = allocator_type()) - : ctrl_(EmptyGroup()), - settings_(0, HashtablezInfoHandle(), hash, eq, alloc) { + : ctrl_(EmptyGroup()), settings_(0, hash, eq, alloc) { if (bucket_count) { capacity_ = NormalizeCapacity(bucket_count); + reset_growth_left(); initialize_slots(); } } @@ -1167,8 +769,7 @@ class raw_hash_set { raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0, const hasher& hash = hasher(), const key_equal& eq = key_equal(), const allocator_type& alloc = allocator_type()) - : raw_hash_set(SelectBucketCountForIterRange(first, last, bucket_count), - hash, eq, alloc) { + : raw_hash_set(bucket_count, hash, eq, alloc) { insert(first, last); } @@ -1255,11 +856,10 @@ class raw_hash_set { // than a full `insert`. for (const auto& v : that) { const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v); - auto target = find_first_non_full(ctrl_, hash, capacity_); - SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_, - sizeof(slot_type)); + auto target = find_first_non_full(hash); + set_ctrl(target.offset, H2(hash)); emplace_at(target.offset, v); - infoz().RecordInsert(hash, target.probe_length); + infoz_.RecordInsert(hash, target.probe_length); } size_ = that.size(); growth_left() -= that.size(); @@ -1273,27 +873,28 @@ class raw_hash_set { slots_(absl::exchange(that.slots_, nullptr)), size_(absl::exchange(that.size_, 0)), capacity_(absl::exchange(that.capacity_, 0)), + infoz_(absl::exchange(that.infoz_, HashtablezInfoHandle())), // Hash, equality and allocator are copied instead of moved because // `that` must be left valid. If Hash is std::function, moving it // would create a nullptr functor that cannot be called. - settings_(absl::exchange(that.growth_left(), 0), - absl::exchange(that.infoz(), HashtablezInfoHandle()), - that.hash_ref(), that.eq_ref(), that.alloc_ref()) {} + settings_(that.settings_) { + // growth_left was copied above, reset the one from `that`. + that.growth_left() = 0; + } raw_hash_set(raw_hash_set&& that, const allocator_type& a) : ctrl_(EmptyGroup()), slots_(nullptr), size_(0), capacity_(0), - settings_(0, HashtablezInfoHandle(), that.hash_ref(), that.eq_ref(), - a) { + settings_(0, that.hash_ref(), that.eq_ref(), a) { if (a == that.alloc_ref()) { std::swap(ctrl_, that.ctrl_); std::swap(slots_, that.slots_); std::swap(size_, that.size_); std::swap(capacity_, that.capacity_); std::swap(growth_left(), that.growth_left()); - std::swap(infoz(), that.infoz()); + std::swap(infoz_, that.infoz_); } else { reserve(that.size()); // Note: this will copy elements of dense_set and unordered_set instead of @@ -1353,8 +954,6 @@ class raw_hash_set { // past that we simply deallocate the array. if (capacity_ > 127) { destroy_slots(); - - infoz().RecordClearedReservation(); } else if (capacity_) { for (size_t i = 0; i != capacity_; ++i) { if (IsFull(ctrl_[i])) { @@ -1362,11 +961,11 @@ class raw_hash_set { } } size_ = 0; - ResetCtrl(capacity_, ctrl_, slots_, sizeof(slot_type)); + reset_ctrl(); reset_growth_left(); } assert(empty()); - infoz().RecordStorageChanged(0, capacity_); + infoz_.RecordStorageChanged(0, capacity_); } // This overload kicks in when the argument is an rvalue of insertable and @@ -1376,7 +975,8 @@ class raw_hash_set { // m.insert(std::make_pair("abc", 42)); // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc // bug. - template = 0, class T2 = T, + template = 0, + class T2 = T, typename std::enable_if::value, int>::type = 0, T* = nullptr> std::pair insert(T&& value) { @@ -1438,7 +1038,7 @@ class raw_hash_set { template void insert(InputIt first, InputIt last) { - for (; first != last; ++first) emplace(*first); + for (; first != last; ++first) insert(*first); } template = 0, RequiresInsertable = 0> @@ -1465,9 +1065,7 @@ class raw_hash_set { } iterator insert(const_iterator, node_type&& node) { - auto res = insert(std::move(node)); - node = std::move(res.node); - return res.position; + return insert(std::move(node)).first; } // This overload kicks in if we can deduce the key from args. This enables us @@ -1596,8 +1194,7 @@ class raw_hash_set { // This overload is necessary because otherwise erase(const K&) would be // a better match if non-const iterator is passed as an argument. void erase(iterator it) { - ABSL_INTERNAL_ASSERT_IS_FULL(it.ctrl_, - "erase() called on invalid iterator."); + AssertIsFull(it.ctrl_); PolicyTraits::destroy(&alloc_ref(), it.slot_); erase_meta_only(it); } @@ -1631,8 +1228,7 @@ class raw_hash_set { } node_type extract(const_iterator position) { - ABSL_INTERNAL_ASSERT_IS_FULL(position.inner_.ctrl_, - "extract() called on invalid iterator."); + AssertIsFull(position.inner_.ctrl_); auto node = CommonAccess::Transfer(alloc_ref(), position.inner_.slot_); erase_meta_only(position); @@ -1659,7 +1255,7 @@ class raw_hash_set { swap(growth_left(), that.growth_left()); swap(hash_ref(), that.hash_ref()); swap(eq_ref(), that.eq_ref()); - swap(infoz(), that.infoz()); + swap(infoz_, that.infoz_); SwapAlloc(alloc_ref(), that.alloc_ref(), typename AllocTraits::propagate_on_container_swap{}); } @@ -1668,34 +1264,19 @@ class raw_hash_set { if (n == 0 && capacity_ == 0) return; if (n == 0 && size_ == 0) { destroy_slots(); - infoz().RecordStorageChanged(0, 0); - infoz().RecordClearedReservation(); + infoz_.RecordStorageChanged(0, 0); return; } - // bitor is a faster way of doing `max` here. We will round up to the next // power-of-2-minus-1, so bitor is good enough. auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size())); // n == 0 unconditionally rehashes as per the standard. if (n == 0 || m > capacity_) { resize(m); - - // This is after resize, to ensure that we have completed the allocation - // and have potentially sampled the hashtable. - infoz().RecordReservation(n); } } - void reserve(size_t n) { - if (n > size() + growth_left()) { - size_t m = GrowthToLowerboundCapacity(n); - resize(NormalizeCapacity(m)); - - // This is after resize, to ensure that we have completed the allocation - // and have potentially sampled the hashtable. - infoz().RecordReservation(n); - } - } + void reserve(size_t n) { rehash(GrowthToLowerboundCapacity(n)); } // Extension API: support for heterogeneous keys. // @@ -1719,13 +1300,11 @@ class raw_hash_set { template void prefetch(const key_arg& key) const { (void)key; - // Avoid probing if we won't be able to prefetch the addresses received. -#ifdef ABSL_INTERNAL_HAVE_PREFETCH - prefetch_heap_block(); - auto seq = probe(ctrl_, hash_ref()(key), capacity_); - base_internal::PrefetchT0(ctrl_ + seq.offset()); - base_internal::PrefetchT0(slots_ + seq.offset()); -#endif // ABSL_INTERNAL_HAVE_PREFETCH +#if defined(__GNUC__) + auto seq = probe(hash_ref()(key)); + __builtin_prefetch(static_cast(ctrl_ + seq.offset())); + __builtin_prefetch(static_cast(slots_ + seq.offset())); +#endif // __GNUC__ } // The API of find() has two extensions. @@ -1737,23 +1316,22 @@ class raw_hash_set { // called heterogeneous key support. template iterator find(const key_arg& key, size_t hash) { - auto seq = probe(ctrl_, hash, capacity_); + auto seq = probe(hash); while (true) { Group g{ctrl_ + seq.offset()}; - for (uint32_t i : g.Match(H2(hash))) { + for (int i : g.Match(H2(hash))) { if (ABSL_PREDICT_TRUE(PolicyTraits::apply( EqualElement{key, eq_ref()}, PolicyTraits::element(slots_ + seq.offset(i))))) return iterator_at(seq.offset(i)); } - if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return end(); + if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return end(); seq.next(); - assert(seq.index() <= capacity_ && "full table!"); + assert(seq.index() < capacity_ && "full table!"); } } template iterator find(const key_arg& key) { - prefetch_heap_block(); return find(key, hash_ref()(key)); } @@ -1763,7 +1341,6 @@ class raw_hash_set { } template const_iterator find(const key_arg& key) const { - prefetch_heap_block(); return find(key, hash_ref()(key)); } @@ -1813,14 +1390,6 @@ class raw_hash_set { return !(a == b); } - template - friend typename std::enable_if::value, - H>::type - AbslHashValue(H h, const raw_hash_set& s) { - return H::combine(H::combine_unordered(std::move(h), s.begin(), s.end()), - s.size()); - } - friend void swap(raw_hash_set& a, raw_hash_set& b) noexcept(noexcept(a.swap(b))) { a.swap(b); @@ -1886,17 +1455,17 @@ class raw_hash_set { slot_type&& slot; }; - // Erases, but does not destroy, the value pointed to by `it`. - // - // This merely updates the pertinent control byte. This can be used in - // conjunction with Policy::transfer to move the object to another place. + // "erases" the object from the container, except that it doesn't actually + // destroy the object. It only updates all the metadata of the class. + // This can be used in conjunction with Policy::transfer to move the object to + // another place. void erase_meta_only(const_iterator it) { assert(IsFull(*it.inner_.ctrl_) && "erasing a dangling iterator"); --size_; - const size_t index = static_cast(it.inner_.ctrl_ - ctrl_); + const size_t index = it.inner_.ctrl_ - ctrl_; const size_t index_before = (index - Group::kWidth) & capacity_; - const auto empty_after = Group(it.inner_.ctrl_).MaskEmpty(); - const auto empty_before = Group(ctrl_ + index_before).MaskEmpty(); + const auto empty_after = Group(it.inner_.ctrl_).MatchEmpty(); + const auto empty_before = Group(ctrl_ + index_before).MatchEmpty(); // We count how many consecutive non empties we have to the right and to the // left of `it`. If the sum is >= kWidth then there is at least one probe @@ -1906,17 +1475,11 @@ class raw_hash_set { static_cast(empty_after.TrailingZeros() + empty_before.LeadingZeros()) < Group::kWidth; - SetCtrl(index, was_never_full ? ctrl_t::kEmpty : ctrl_t::kDeleted, - capacity_, ctrl_, slots_, sizeof(slot_type)); + set_ctrl(index, was_never_full ? kEmpty : kDeleted); growth_left() += was_never_full; - infoz().RecordErase(); + infoz_.RecordErase(); } - // Allocates a backing array for `self` and initializes its control bytes. - // This reads `capacity_` and updates all other fields based on the result of - // the allocation. - // - // This does not free the currently held array; `capacity_` must be nonzero. void initialize_slots() { assert(capacity_); // Folks with custom allocators often make unwarranted assumptions about the @@ -1931,24 +1494,19 @@ class raw_hash_set { // bound more carefully. if (std::is_same>::value && slots_ == nullptr) { - infoz() = Sample(sizeof(slot_type)); + infoz_ = Sample(); } - char* mem = static_cast(Allocate( - &alloc_ref(), - AllocSize(capacity_, sizeof(slot_type), alignof(slot_type)))); - ctrl_ = reinterpret_cast(mem); - slots_ = reinterpret_cast( - mem + SlotOffset(capacity_, alignof(slot_type))); - ResetCtrl(capacity_, ctrl_, slots_, sizeof(slot_type)); + auto layout = MakeLayout(capacity_); + char* mem = static_cast( + Allocate(&alloc_ref(), layout.AllocSize())); + ctrl_ = reinterpret_cast(layout.template Pointer<0>(mem)); + slots_ = layout.template Pointer<1>(mem); + reset_ctrl(); reset_growth_left(); - infoz().RecordStorageChanged(size_, capacity_); + infoz_.RecordStorageChanged(size_, capacity_); } - // Destroys all slots in the backing array, frees the backing array, and - // clears all top-level book-keeping data. - // - // This essentially implements `map = raw_hash_set();`. void destroy_slots() { if (!capacity_) return; for (size_t i = 0; i != capacity_; ++i) { @@ -1956,12 +1514,10 @@ class raw_hash_set { PolicyTraits::destroy(&alloc_ref(), slots_ + i); } } - + auto layout = MakeLayout(capacity_); // Unpoison before returning the memory to the allocator. SanitizerUnpoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_); - Deallocate( - &alloc_ref(), ctrl_, - AllocSize(capacity_, sizeof(slot_type), alignof(slot_type))); + Deallocate(&alloc_ref(), ctrl_, layout.AllocSize()); ctrl_ = EmptyGroup(); slots_ = nullptr; size_ = 0; @@ -1982,29 +1538,26 @@ class raw_hash_set { if (IsFull(old_ctrl[i])) { size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, PolicyTraits::element(old_slots + i)); - auto target = find_first_non_full(ctrl_, hash, capacity_); + auto target = find_first_non_full(hash); size_t new_i = target.offset; total_probe_length += target.probe_length; - SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type)); + set_ctrl(new_i, H2(hash)); PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i); } } if (old_capacity) { SanitizerUnpoisonMemoryRegion(old_slots, sizeof(slot_type) * old_capacity); - Deallocate( - &alloc_ref(), old_ctrl, - AllocSize(old_capacity, sizeof(slot_type), alignof(slot_type))); + auto layout = MakeLayout(old_capacity); + Deallocate(&alloc_ref(), old_ctrl, + layout.AllocSize()); } - infoz().RecordRehash(total_probe_length); + infoz_.RecordRehash(total_probe_length); } - // Prunes control bytes to remove as many tombstones as possible. - // - // See the comment on `rehash_and_grow_if_necessary()`. void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE { assert(IsValidCapacity(capacity_)); - assert(!is_small(capacity_)); + assert(!is_small()); // Algorithm: // - mark all DELETED slots as EMPTY // - mark all FULL slots as DELETED @@ -2027,35 +1580,34 @@ class raw_hash_set { slot_type* slot = reinterpret_cast(&raw); for (size_t i = 0; i != capacity_; ++i) { if (!IsDeleted(ctrl_[i])) continue; - const size_t hash = PolicyTraits::apply( - HashElement{hash_ref()}, PolicyTraits::element(slots_ + i)); - const FindInfo target = find_first_non_full(ctrl_, hash, capacity_); - const size_t new_i = target.offset; + size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, + PolicyTraits::element(slots_ + i)); + auto target = find_first_non_full(hash); + size_t new_i = target.offset; total_probe_length += target.probe_length; // Verify if the old and new i fall within the same group wrt the hash. // If they do, we don't need to move the object as it falls already in the // best probe we can. - const size_t probe_offset = probe(ctrl_, hash, capacity_).offset(); - const auto probe_index = [probe_offset, this](size_t pos) { - return ((pos - probe_offset) & capacity_) / Group::kWidth; + const auto probe_index = [&](size_t pos) { + return ((pos - probe(hash).offset()) & capacity_) / Group::kWidth; }; // Element doesn't move. if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) { - SetCtrl(i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type)); + set_ctrl(i, H2(hash)); continue; } if (IsEmpty(ctrl_[new_i])) { // Transfer element to the empty spot. - // SetCtrl poisons/unpoisons the slots so we have to call it at the + // set_ctrl poisons/unpoisons the slots so we have to call it at the // right time. - SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type)); + set_ctrl(new_i, H2(hash)); PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slots_ + i); - SetCtrl(i, ctrl_t::kEmpty, capacity_, ctrl_, slots_, sizeof(slot_type)); + set_ctrl(i, kEmpty); } else { assert(IsDeleted(ctrl_[new_i])); - SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type)); + set_ctrl(new_i, H2(hash)); // Until we are done rehashing, DELETED marks previously FULL slots. // Swap i and new_i elements. PolicyTraits::transfer(&alloc_ref(), slot, slots_ + i); @@ -2065,61 +1617,14 @@ class raw_hash_set { } } reset_growth_left(); - infoz().RecordRehash(total_probe_length); + infoz_.RecordRehash(total_probe_length); } - // Called whenever the table *might* need to conditionally grow. - // - // This function is an optimization opportunity to perform a rehash even when - // growth is unnecessary, because vacating tombstones is beneficial for - // performance in the long-run. void rehash_and_grow_if_necessary() { if (capacity_ == 0) { resize(1); - } else if (capacity_ > Group::kWidth && - // Do these calcuations in 64-bit to avoid overflow. - size() * uint64_t{32} <= capacity_ * uint64_t{25}) { + } else if (size() <= CapacityToGrowth(capacity()) / 2) { // Squash DELETED without growing if there is enough capacity. - // - // Rehash in place if the current size is <= 25/32 of capacity_. - // Rationale for such a high factor: 1) drop_deletes_without_resize() is - // faster than resize, and 2) it takes quite a bit of work to add - // tombstones. In the worst case, seems to take approximately 4 - // insert/erase pairs to create a single tombstone and so if we are - // rehashing because of tombstones, we can afford to rehash-in-place as - // long as we are reclaiming at least 1/8 the capacity without doing more - // than 2X the work. (Where "work" is defined to be size() for rehashing - // or rehashing in place, and 1 for an insert or erase.) But rehashing in - // place is faster per operation than inserting or even doubling the size - // of the table, so we actually afford to reclaim even less space from a - // resize-in-place. The decision is to rehash in place if we can reclaim - // at about 1/8th of the usable capacity (specifically 3/28 of the - // capacity) which means that the total cost of rehashing will be a small - // fraction of the total work. - // - // Here is output of an experiment using the BM_CacheInSteadyState - // benchmark running the old case (where we rehash-in-place only if we can - // reclaim at least 7/16*capacity_) vs. this code (which rehashes in place - // if we can recover 3/32*capacity_). - // - // Note that although in the worst-case number of rehashes jumped up from - // 15 to 190, but the number of operations per second is almost the same. - // - // Abridged output of running BM_CacheInSteadyState benchmark from - // raw_hash_set_benchmark. N is the number of insert/erase operations. - // - // | OLD (recover >= 7/16 | NEW (recover >= 3/32) - // size | N/s LoadFactor NRehashes | N/s LoadFactor NRehashes - // 448 | 145284 0.44 18 | 140118 0.44 19 - // 493 | 152546 0.24 11 | 151417 0.48 28 - // 538 | 151439 0.26 11 | 151152 0.53 38 - // 583 | 151765 0.28 11 | 150572 0.57 50 - // 628 | 150241 0.31 11 | 150853 0.61 66 - // 672 | 149602 0.33 12 | 150110 0.66 90 - // 717 | 149998 0.35 12 | 149531 0.70 129 - // 762 | 149836 0.37 13 | 148559 0.74 190 - // 807 | 149736 0.39 14 | 151107 0.39 14 - // 852 | 150204 0.42 15 | 151019 0.42 15 drop_deletes_without_resize(); } else { // Otherwise grow the container. @@ -2129,21 +1634,56 @@ class raw_hash_set { bool has_element(const value_type& elem) const { size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem); - auto seq = probe(ctrl_, hash, capacity_); + auto seq = probe(hash); while (true) { Group g{ctrl_ + seq.offset()}; - for (uint32_t i : g.Match(H2(hash))) { + for (int i : g.Match(H2(hash))) { if (ABSL_PREDICT_TRUE(PolicyTraits::element(slots_ + seq.offset(i)) == elem)) return true; } - if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return false; + if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return false; seq.next(); - assert(seq.index() <= capacity_ && "full table!"); + assert(seq.index() < capacity_ && "full table!"); } return false; } + // Probes the raw_hash_set with the probe sequence for hash and returns the + // pointer to the first empty or deleted slot. + // NOTE: this function must work with tables having both kEmpty and kDelete + // in one group. Such tables appears during drop_deletes_without_resize. + // + // This function is very useful when insertions happen and: + // - the input is already a set + // - there are enough slots + // - the element with the hash is not in the table + struct FindInfo { + size_t offset; + size_t probe_length; + }; + FindInfo find_first_non_full(size_t hash) { + auto seq = probe(hash); + while (true) { + Group g{ctrl_ + seq.offset()}; + auto mask = g.MatchEmptyOrDeleted(); + if (mask) { +#if !defined(NDEBUG) + // We want to add entropy even when ASLR is not enabled. + // In debug build we will randomly insert in either the front or back of + // the group. + // TODO(kfm,sbenza): revisit after we do unconditional mixing + if (!is_small() && ShouldInsertBackwards(hash, ctrl_)) { + return {seq.offset(mask.HighestBitSet()), seq.index()}; + } +#endif + return {seq.offset(mask.LowestBitSet()), seq.index()}; + } + seq.next(); + assert(seq.index() < capacity_ && "full table!"); + } + } + // TODO(alkis): Optimize this assuming *this and that don't overlap. raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) { raw_hash_set tmp(std::move(that)); @@ -2157,45 +1697,36 @@ class raw_hash_set { } protected: - // Attempts to find `key` in the table; if it isn't found, returns a slot that - // the value can be inserted into, with the control byte already set to - // `key`'s H2. template std::pair find_or_prepare_insert(const K& key) { - prefetch_heap_block(); auto hash = hash_ref()(key); - auto seq = probe(ctrl_, hash, capacity_); + auto seq = probe(hash); while (true) { Group g{ctrl_ + seq.offset()}; - for (uint32_t i : g.Match(H2(hash))) { + for (int i : g.Match(H2(hash))) { if (ABSL_PREDICT_TRUE(PolicyTraits::apply( EqualElement{key, eq_ref()}, PolicyTraits::element(slots_ + seq.offset(i))))) return {seq.offset(i), false}; } - if (ABSL_PREDICT_TRUE(g.MaskEmpty())) break; + if (ABSL_PREDICT_TRUE(g.MatchEmpty())) break; seq.next(); - assert(seq.index() <= capacity_ && "full table!"); + assert(seq.index() < capacity_ && "full table!"); } return {prepare_insert(hash), true}; } - // Given the hash of a value not currently in the table, finds the next - // viable slot index to insert it at. - // - // REQUIRES: At least one non-full slot available. size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE { - auto target = find_first_non_full(ctrl_, hash, capacity_); + auto target = find_first_non_full(hash); if (ABSL_PREDICT_FALSE(growth_left() == 0 && !IsDeleted(ctrl_[target.offset]))) { rehash_and_grow_if_necessary(); - target = find_first_non_full(ctrl_, hash, capacity_); + target = find_first_non_full(hash); } ++size_; growth_left() -= IsEmpty(ctrl_[target.offset]); - SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_, - sizeof(slot_type)); - infoz().RecordInsert(hash, target.probe_length); + set_ctrl(target.offset, H2(hash)); + infoz_.RecordInsert(hash, target.probe_length); return target.offset; } @@ -2223,78 +1754,86 @@ class raw_hash_set { private: friend struct RawHashSetTestOnlyAccess; + probe_seq probe(size_t hash) const { + return probe_seq(H1(hash, ctrl_), capacity_); + } + + // Reset all ctrl bytes back to kEmpty, except the sentinel. + void reset_ctrl() { + std::memset(ctrl_, kEmpty, capacity_ + Group::kWidth); + ctrl_[capacity_] = kSentinel; + SanitizerPoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_); + } + void reset_growth_left() { growth_left() = CapacityToGrowth(capacity()) - size_; } - // The number of slots we can still fill without needing to rehash. - // - // This is stored separately due to tombstones: we do not include tombstones - // in the growth capacity, because we'd like to rehash when the table is - // otherwise filled with tombstones: otherwise, probe sequences might get - // unacceptably long without triggering a rehash. Callers can also force a - // rehash via the standard `rehash(0)`, which will recompute this value as a - // side-effect. - // - // See `CapacityToGrowth()`. - size_t& growth_left() { return settings_.template get<0>(); } + // Sets the control byte, and if `i < Group::kWidth`, set the cloned byte at + // the end too. + void set_ctrl(size_t i, ctrl_t h) { + assert(i < capacity_); - // Prefetch the heap-allocated memory region to resolve potential TLB misses. - // This is intended to overlap with execution of calculating the hash for a - // key. - void prefetch_heap_block() const { - base_internal::PrefetchT2(ctrl_); + if (IsFull(h)) { + SanitizerUnpoisonObject(slots_ + i); + } else { + SanitizerPoisonObject(slots_ + i); + } + + ctrl_[i] = h; + ctrl_[((i - Group::kWidth) & capacity_) + 1 + + ((Group::kWidth - 1) & capacity_)] = h; } - HashtablezInfoHandle& infoz() { return settings_.template get<1>(); } + size_t& growth_left() { return settings_.template get<0>(); } - hasher& hash_ref() { return settings_.template get<2>(); } - const hasher& hash_ref() const { return settings_.template get<2>(); } - key_equal& eq_ref() { return settings_.template get<3>(); } - const key_equal& eq_ref() const { return settings_.template get<3>(); } - allocator_type& alloc_ref() { return settings_.template get<4>(); } + // The representation of the object has two modes: + // - small: For capacities < kWidth-1 + // - large: For the rest. + // + // Differences: + // - In small mode we are able to use the whole capacity. The extra control + // bytes give us at least one "empty" control byte to stop the iteration. + // This is important to make 1 a valid capacity. + // + // - In small mode only the first `capacity()` control bytes after the + // sentinel are valid. The rest contain dummy kEmpty values that do not + // represent a real slot. This is important to take into account on + // find_first_non_full(), where we never try ShouldInsertBackwards() for + // small tables. + bool is_small() const { return capacity_ < Group::kWidth - 1; } + + hasher& hash_ref() { return settings_.template get<1>(); } + const hasher& hash_ref() const { return settings_.template get<1>(); } + key_equal& eq_ref() { return settings_.template get<2>(); } + const key_equal& eq_ref() const { return settings_.template get<2>(); } + allocator_type& alloc_ref() { return settings_.template get<3>(); } const allocator_type& alloc_ref() const { - return settings_.template get<4>(); + return settings_.template get<3>(); } // TODO(alkis): Investigate removing some of these fields: // - ctrl/slots can be derived from each other // - size can be moved into the slot array - - // The control bytes (and, also, a pointer to the base of the backing array). - // - // This contains `capacity_ + 1 + NumClonedBytes()` entries, even - // when the table is empty (hence EmptyGroup). - ctrl_t* ctrl_ = EmptyGroup(); - // The beginning of the slots, located at `SlotOffset()` bytes after - // `ctrl_`. May be null for empty tables. - slot_type* slots_ = nullptr; - - // The number of filled slots. - size_t size_ = 0; - - // The total number of available slots. - size_t capacity_ = 0; - absl::container_internal::CompressedTuple - settings_{0u, HashtablezInfoHandle{}, hasher{}, key_equal{}, - allocator_type{}}; + settings_{0, hasher{}, key_equal{}, allocator_type{}}; }; // Erases all elements that satisfy the predicate `pred` from the container `c`. template -typename raw_hash_set::size_type EraseIf( - Predicate& pred, raw_hash_set* c) { - const auto initial_size = c->size(); +void EraseIf(Predicate pred, raw_hash_set* c) { for (auto it = c->begin(), last = c->end(); it != last;) { - if (pred(*it)) { - c->erase(it++); - } else { - ++it; + auto copy_it = it++; + if (pred(*copy_it)) { + c->erase(copy_it); } } - return initial_size - c->size(); } namespace hashtable_debug_internal { @@ -2307,10 +1846,10 @@ struct HashtableDebugAccess> { const typename Set::key_type& key) { size_t num_probes = 0; size_t hash = set.hash_ref()(key); - auto seq = probe(set.ctrl_, hash, set.capacity_); + auto seq = set.probe(hash); while (true) { container_internal::Group g{set.ctrl_ + seq.offset()}; - for (uint32_t i : g.Match(container_internal::H2(hash))) { + for (int i : g.Match(container_internal::H2(hash))) { if (Traits::apply( typename Set::template EqualElement{ key, set.eq_ref()}, @@ -2318,7 +1857,7 @@ struct HashtableDebugAccess> { return num_probes; ++num_probes; } - if (g.MaskEmpty()) return num_probes; + if (g.MatchEmpty()) return num_probes; seq.next(); ++num_probes; } @@ -2327,7 +1866,8 @@ struct HashtableDebugAccess> { static size_t AllocatedByteSize(const Set& c) { size_t capacity = c.capacity_; if (capacity == 0) return 0; - size_t m = AllocSize(capacity, sizeof(Slot), alignof(Slot)); + auto layout = Set::MakeLayout(capacity); + size_t m = layout.AllocSize(); size_t per_slot = Traits::space_used(static_cast(nullptr)); if (per_slot != ~size_t{}) { @@ -2345,8 +1885,8 @@ struct HashtableDebugAccess> { static size_t LowerBoundAllocatedByteSize(size_t size) { size_t capacity = GrowthToLowerboundCapacity(size); if (capacity == 0) return 0; - size_t m = - AllocSize(NormalizeCapacity(capacity), sizeof(Slot), alignof(Slot)); + auto layout = Set::MakeLayout(NormalizeCapacity(capacity)); + size_t m = layout.AllocSize(); size_t per_slot = Traits::space_used(static_cast(nullptr)); if (per_slot != ~size_t{}) { m += per_slot * size; @@ -2360,6 +1900,4 @@ struct HashtableDebugAccess> { ABSL_NAMESPACE_END } // namespace absl -#undef ABSL_INTERNAL_ASSERT_IS_FULL - #endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ diff --git a/abseil-cpp/absl/container/internal/raw_hash_set_allocator_test.cc b/abseil-cpp/absl/container/internal/raw_hash_set_allocator_test.cc index e73f53fd..1a036085 100644 --- a/abseil-cpp/absl/container/internal/raw_hash_set_allocator_test.cc +++ b/abseil-cpp/absl/container/internal/raw_hash_set_allocator_test.cc @@ -466,9 +466,6 @@ class PAlloc { size_t id_ = std::numeric_limits::max(); }; -// This doesn't compile with GCC 5.4 and 5.5 due to a bug in noexcept handing. -#if !defined(__GNUC__) || __GNUC__ != 5 || (__GNUC_MINOR__ != 4 && \ - __GNUC_MINOR__ != 5) TEST(NoPropagateOn, Swap) { using PA = PAlloc; using Table = raw_hash_set, PA>; @@ -478,7 +475,6 @@ TEST(NoPropagateOn, Swap) { EXPECT_EQ(t1.get_allocator(), PA(1)); EXPECT_EQ(t2.get_allocator(), PA(2)); } -#endif TEST(NoPropagateOn, CopyConstruct) { using PA = PAlloc; diff --git a/abseil-cpp/absl/container/internal/raw_hash_set_benchmark.cc b/abseil-cpp/absl/container/internal/raw_hash_set_benchmark.cc deleted file mode 100644 index 47dc9048..00000000 --- a/abseil-cpp/absl/container/internal/raw_hash_set_benchmark.cc +++ /dev/null @@ -1,443 +0,0 @@ -// Copyright 2018 The Abseil Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "absl/container/internal/raw_hash_set.h" - -#include -#include - -#include "absl/base/internal/raw_logging.h" -#include "absl/container/internal/hash_function_defaults.h" -#include "absl/strings/str_format.h" -#include "benchmark/benchmark.h" - -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { - -struct RawHashSetTestOnlyAccess { - template - static auto GetSlots(const C& c) -> decltype(c.slots_) { - return c.slots_; - } -}; - -namespace { - -struct IntPolicy { - using slot_type = int64_t; - using key_type = int64_t; - using init_type = int64_t; - - static void construct(void*, int64_t* slot, int64_t v) { *slot = v; } - static void destroy(void*, int64_t*) {} - static void transfer(void*, int64_t* new_slot, int64_t* old_slot) { - *new_slot = *old_slot; - } - - static int64_t& element(slot_type* slot) { return *slot; } - - template - static auto apply(F&& f, int64_t x) -> decltype(std::forward(f)(x, x)) { - return std::forward(f)(x, x); - } -}; - -class StringPolicy { - template ::value>::type> - decltype(std::declval()( - std::declval(), std::piecewise_construct, - std::declval>(), - std::declval())) static apply_impl(F&& f, - std::pair, V> p) { - const absl::string_view& key = std::get<0>(p.first); - return std::forward(f)(key, std::piecewise_construct, std::move(p.first), - std::move(p.second)); - } - - public: - struct slot_type { - struct ctor {}; - - template - slot_type(ctor, Ts&&... ts) : pair(std::forward(ts)...) {} - - std::pair pair; - }; - - using key_type = std::string; - using init_type = std::pair; - - template - static void construct(allocator_type* alloc, slot_type* slot, Args... args) { - std::allocator_traits::construct( - *alloc, slot, typename slot_type::ctor(), std::forward(args)...); - } - - template - static void destroy(allocator_type* alloc, slot_type* slot) { - std::allocator_traits::destroy(*alloc, slot); - } - - template - static void transfer(allocator_type* alloc, slot_type* new_slot, - slot_type* old_slot) { - construct(alloc, new_slot, std::move(old_slot->pair)); - destroy(alloc, old_slot); - } - - static std::pair& element(slot_type* slot) { - return slot->pair; - } - - template - static auto apply(F&& f, Args&&... args) - -> decltype(apply_impl(std::forward(f), - PairArgs(std::forward(args)...))) { - return apply_impl(std::forward(f), - PairArgs(std::forward(args)...)); - } -}; - -struct StringHash : container_internal::hash_default_hash { - using is_transparent = void; -}; -struct StringEq : std::equal_to { - using is_transparent = void; -}; - -struct StringTable - : raw_hash_set> { - using Base = typename StringTable::raw_hash_set; - StringTable() {} - using Base::Base; -}; - -struct IntTable - : raw_hash_set, - std::equal_to, std::allocator> { - using Base = typename IntTable::raw_hash_set; - IntTable() {} - using Base::Base; -}; - -struct string_generator { - template - std::string operator()(RNG& rng) const { - std::string res; - res.resize(12); - std::uniform_int_distribution printable_ascii(0x20, 0x7E); - std::generate(res.begin(), res.end(), [&] { return printable_ascii(rng); }); - return res; - } - - size_t size; -}; - -// Model a cache in steady state. -// -// On a table of size N, keep deleting the LRU entry and add a random one. -void BM_CacheInSteadyState(benchmark::State& state) { - std::random_device rd; - std::mt19937 rng(rd()); - string_generator gen{12}; - StringTable t; - std::deque keys; - while (t.size() < state.range(0)) { - auto x = t.emplace(gen(rng), gen(rng)); - if (x.second) keys.push_back(x.first->first); - } - ABSL_RAW_CHECK(state.range(0) >= 10, ""); - while (state.KeepRunning()) { - // Some cache hits. - std::deque::const_iterator it; - for (int i = 0; i != 90; ++i) { - if (i % 10 == 0) it = keys.end(); - ::benchmark::DoNotOptimize(t.find(*--it)); - } - // Some cache misses. - for (int i = 0; i != 10; ++i) ::benchmark::DoNotOptimize(t.find(gen(rng))); - ABSL_RAW_CHECK(t.erase(keys.front()), keys.front().c_str()); - keys.pop_front(); - while (true) { - auto x = t.emplace(gen(rng), gen(rng)); - if (x.second) { - keys.push_back(x.first->first); - break; - } - } - } - state.SetItemsProcessed(state.iterations()); - state.SetLabel(absl::StrFormat("load_factor=%.2f", t.load_factor())); -} - -template -void CacheInSteadyStateArgs(Benchmark* bm) { - // The default. - const float max_load_factor = 0.875; - // When the cache is at the steady state, the probe sequence will equal - // capacity if there is no reclamation of deleted slots. Pick a number large - // enough to make the benchmark slow for that case. - const size_t capacity = 1 << 10; - - // Check N data points to cover load factors in [0.4, 0.8). - const size_t kNumPoints = 10; - for (size_t i = 0; i != kNumPoints; ++i) - bm->Arg(std::ceil( - capacity * (max_load_factor + i * max_load_factor / kNumPoints) / 2)); -} -BENCHMARK(BM_CacheInSteadyState)->Apply(CacheInSteadyStateArgs); - -void BM_EndComparison(benchmark::State& state) { - std::random_device rd; - std::mt19937 rng(rd()); - string_generator gen{12}; - StringTable t; - while (t.size() < state.range(0)) { - t.emplace(gen(rng), gen(rng)); - } - - for (auto _ : state) { - for (auto it = t.begin(); it != t.end(); ++it) { - benchmark::DoNotOptimize(it); - benchmark::DoNotOptimize(t); - benchmark::DoNotOptimize(it != t.end()); - } - } -} -BENCHMARK(BM_EndComparison)->Arg(400); - -void BM_CopyCtor(benchmark::State& state) { - std::random_device rd; - std::mt19937 rng(rd()); - IntTable t; - std::uniform_int_distribution dist(0, ~uint64_t{}); - - while (t.size() < state.range(0)) { - t.emplace(dist(rng)); - } - - for (auto _ : state) { - IntTable t2 = t; - benchmark::DoNotOptimize(t2); - } -} -BENCHMARK(BM_CopyCtor)->Range(128, 4096); - -void BM_CopyAssign(benchmark::State& state) { - std::random_device rd; - std::mt19937 rng(rd()); - IntTable t; - std::uniform_int_distribution dist(0, ~uint64_t{}); - while (t.size() < state.range(0)) { - t.emplace(dist(rng)); - } - - IntTable t2; - for (auto _ : state) { - t2 = t; - benchmark::DoNotOptimize(t2); - } -} -BENCHMARK(BM_CopyAssign)->Range(128, 4096); - -void BM_RangeCtor(benchmark::State& state) { - std::random_device rd; - std::mt19937 rng(rd()); - std::uniform_int_distribution dist(0, ~uint64_t{}); - std::vector values; - const size_t desired_size = state.range(0); - while (values.size() < desired_size) { - values.emplace_back(dist(rng)); - } - - for (auto unused : state) { - IntTable t{values.begin(), values.end()}; - benchmark::DoNotOptimize(t); - } -} -BENCHMARK(BM_RangeCtor)->Range(128, 65536); - -void BM_NoOpReserveIntTable(benchmark::State& state) { - IntTable t; - t.reserve(100000); - for (auto _ : state) { - benchmark::DoNotOptimize(t); - t.reserve(100000); - } -} -BENCHMARK(BM_NoOpReserveIntTable); - -void BM_NoOpReserveStringTable(benchmark::State& state) { - StringTable t; - t.reserve(100000); - for (auto _ : state) { - benchmark::DoNotOptimize(t); - t.reserve(100000); - } -} -BENCHMARK(BM_NoOpReserveStringTable); - -void BM_ReserveIntTable(benchmark::State& state) { - int reserve_size = state.range(0); - for (auto _ : state) { - state.PauseTiming(); - IntTable t; - state.ResumeTiming(); - benchmark::DoNotOptimize(t); - t.reserve(reserve_size); - } -} -BENCHMARK(BM_ReserveIntTable)->Range(128, 4096); - -void BM_ReserveStringTable(benchmark::State& state) { - int reserve_size = state.range(0); - for (auto _ : state) { - state.PauseTiming(); - StringTable t; - state.ResumeTiming(); - benchmark::DoNotOptimize(t); - t.reserve(reserve_size); - } -} -BENCHMARK(BM_ReserveStringTable)->Range(128, 4096); - -// Like std::iota, except that ctrl_t doesn't support operator++. -template -void Iota(CtrlIter begin, CtrlIter end, int value) { - for (; begin != end; ++begin, ++value) { - *begin = static_cast(value); - } -} - -void BM_Group_Match(benchmark::State& state) { - std::array group; - Iota(group.begin(), group.end(), -4); - Group g{group.data()}; - h2_t h = 1; - for (auto _ : state) { - ::benchmark::DoNotOptimize(h); - ::benchmark::DoNotOptimize(g); - ::benchmark::DoNotOptimize(g.Match(h)); - } -} -BENCHMARK(BM_Group_Match); - -void BM_Group_MaskEmpty(benchmark::State& state) { - std::array group; - Iota(group.begin(), group.end(), -4); - Group g{group.data()}; - for (auto _ : state) { - ::benchmark::DoNotOptimize(g); - ::benchmark::DoNotOptimize(g.MaskEmpty()); - } -} -BENCHMARK(BM_Group_MaskEmpty); - -void BM_Group_MaskEmptyOrDeleted(benchmark::State& state) { - std::array group; - Iota(group.begin(), group.end(), -4); - Group g{group.data()}; - for (auto _ : state) { - ::benchmark::DoNotOptimize(g); - ::benchmark::DoNotOptimize(g.MaskEmptyOrDeleted()); - } -} -BENCHMARK(BM_Group_MaskEmptyOrDeleted); - -void BM_Group_CountLeadingEmptyOrDeleted(benchmark::State& state) { - std::array group; - Iota(group.begin(), group.end(), -2); - Group g{group.data()}; - for (auto _ : state) { - ::benchmark::DoNotOptimize(g); - ::benchmark::DoNotOptimize(g.CountLeadingEmptyOrDeleted()); - } -} -BENCHMARK(BM_Group_CountLeadingEmptyOrDeleted); - -void BM_Group_MatchFirstEmptyOrDeleted(benchmark::State& state) { - std::array group; - Iota(group.begin(), group.end(), -2); - Group g{group.data()}; - for (auto _ : state) { - ::benchmark::DoNotOptimize(g); - ::benchmark::DoNotOptimize(g.MaskEmptyOrDeleted().LowestBitSet()); - } -} -BENCHMARK(BM_Group_MatchFirstEmptyOrDeleted); - -void BM_DropDeletes(benchmark::State& state) { - constexpr size_t capacity = (1 << 20) - 1; - std::vector ctrl(capacity + 1 + Group::kWidth); - ctrl[capacity] = ctrl_t::kSentinel; - std::vector pattern = {ctrl_t::kEmpty, static_cast(2), - ctrl_t::kDeleted, static_cast(2), - ctrl_t::kEmpty, static_cast(1), - ctrl_t::kDeleted}; - for (size_t i = 0; i != capacity; ++i) { - ctrl[i] = pattern[i % pattern.size()]; - } - while (state.KeepRunning()) { - state.PauseTiming(); - std::vector ctrl_copy = ctrl; - state.ResumeTiming(); - ConvertDeletedToEmptyAndFullToDeleted(ctrl_copy.data(), capacity); - ::benchmark::DoNotOptimize(ctrl_copy[capacity]); - } -} -BENCHMARK(BM_DropDeletes); - -} // namespace -} // namespace container_internal -ABSL_NAMESPACE_END -} // namespace absl - -// These methods are here to make it easy to examine the assembly for targeted -// parts of the API. -auto CodegenAbslRawHashSetInt64Find(absl::container_internal::IntTable* table, - int64_t key) -> decltype(table->find(key)) { - return table->find(key); -} - -bool CodegenAbslRawHashSetInt64FindNeEnd( - absl::container_internal::IntTable* table, int64_t key) { - return table->find(key) != table->end(); -} - -auto CodegenAbslRawHashSetInt64Insert(absl::container_internal::IntTable* table, - int64_t key) - -> decltype(table->insert(key)) { - return table->insert(key); -} - -bool CodegenAbslRawHashSetInt64Contains( - absl::container_internal::IntTable* table, int64_t key) { - return table->contains(key); -} - -void CodegenAbslRawHashSetInt64Iterate( - absl::container_internal::IntTable* table) { - for (auto x : *table) benchmark::DoNotOptimize(x); -} - -int odr = - (::benchmark::DoNotOptimize(std::make_tuple( - &CodegenAbslRawHashSetInt64Find, &CodegenAbslRawHashSetInt64FindNeEnd, - &CodegenAbslRawHashSetInt64Insert, - &CodegenAbslRawHashSetInt64Contains, - &CodegenAbslRawHashSetInt64Iterate)), - 1); diff --git a/abseil-cpp/absl/container/internal/raw_hash_set_probe_benchmark.cc b/abseil-cpp/absl/container/internal/raw_hash_set_probe_benchmark.cc deleted file mode 100644 index 7169a2e2..00000000 --- a/abseil-cpp/absl/container/internal/raw_hash_set_probe_benchmark.cc +++ /dev/null @@ -1,590 +0,0 @@ -// Copyright 2018 The Abseil Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Generates probe length statistics for many combinations of key types and key -// distributions, all using the default hash function for swisstable. - -#include -#include // NOLINT -#include - -#include "absl/container/flat_hash_map.h" -#include "absl/container/internal/hash_function_defaults.h" -#include "absl/container/internal/hashtable_debug.h" -#include "absl/container/internal/raw_hash_set.h" -#include "absl/random/distributions.h" -#include "absl/random/random.h" -#include "absl/strings/str_cat.h" -#include "absl/strings/str_format.h" -#include "absl/strings/string_view.h" -#include "absl/strings/strip.h" - -namespace { - -enum class OutputStyle { kRegular, kBenchmark }; - -// The --benchmark command line flag. -// This is populated from main(). -// When run in "benchmark" mode, we have different output. This allows -// A/B comparisons with tools like `benchy`. -absl::string_view benchmarks; - -OutputStyle output() { - return !benchmarks.empty() ? OutputStyle::kBenchmark : OutputStyle::kRegular; -} - -template -struct Policy { - using slot_type = T; - using key_type = T; - using init_type = T; - - template - static void construct(allocator_type* alloc, slot_type* slot, - const Arg& arg) { - std::allocator_traits::construct(*alloc, slot, arg); - } - - template - static void destroy(allocator_type* alloc, slot_type* slot) { - std::allocator_traits::destroy(*alloc, slot); - } - - static slot_type& element(slot_type* slot) { return *slot; } - - template - static auto apply(F&& f, const slot_type& arg) - -> decltype(std::forward(f)(arg, arg)) { - return std::forward(f)(arg, arg); - } -}; - -absl::BitGen& GlobalBitGen() { - static auto* value = new absl::BitGen; - return *value; -} - -// Keeps a pool of allocations and randomly gives one out. -// This introduces more randomization to the addresses given to swisstable and -// should help smooth out this factor from probe length calculation. -template -class RandomizedAllocator { - public: - using value_type = T; - - RandomizedAllocator() = default; - template - RandomizedAllocator(RandomizedAllocator) {} // NOLINT - - static T* allocate(size_t n) { - auto& pointers = GetPointers(n); - // Fill the pool - while (pointers.size() < kRandomPool) { - pointers.push_back(std::allocator{}.allocate(n)); - } - - // Choose a random one. - size_t i = absl::Uniform(GlobalBitGen(), 0, pointers.size()); - T* result = pointers[i]; - pointers[i] = pointers.back(); - pointers.pop_back(); - return result; - } - - static void deallocate(T* p, size_t n) { - // Just put it back on the pool. No need to release the memory. - GetPointers(n).push_back(p); - } - - private: - // We keep at least kRandomPool allocations for each size. - static constexpr size_t kRandomPool = 20; - - static std::vector& GetPointers(size_t n) { - static auto* m = new absl::flat_hash_map>(); - return (*m)[n]; - } -}; - -template -struct DefaultHash { - using type = absl::container_internal::hash_default_hash; -}; - -template -using DefaultHashT = typename DefaultHash::type; - -template -struct Table : absl::container_internal::raw_hash_set< - Policy, DefaultHashT, - absl::container_internal::hash_default_eq, - RandomizedAllocator> {}; - -struct LoadSizes { - size_t min_load; - size_t max_load; -}; - -LoadSizes GetMinMaxLoadSizes() { - static const auto sizes = [] { - Table t; - - // First, fill enough to have a good distribution. - constexpr size_t kMinSize = 10000; - while (t.size() < kMinSize) t.insert(t.size()); - - const auto reach_min_load_factor = [&] { - const double lf = t.load_factor(); - while (lf <= t.load_factor()) t.insert(t.size()); - }; - - // Then, insert until we reach min load factor. - reach_min_load_factor(); - const size_t min_load_size = t.size(); - - // Keep going until we hit min load factor again, then go back one. - t.insert(t.size()); - reach_min_load_factor(); - - return LoadSizes{min_load_size, t.size() - 1}; - }(); - return sizes; -} - -struct Ratios { - double min_load; - double avg_load; - double max_load; -}; - -// See absl/container/internal/hashtable_debug.h for details on -// probe length calculation. -template -Ratios CollectMeanProbeLengths() { - const auto min_max_sizes = GetMinMaxLoadSizes(); - - ElemFn elem; - using Key = decltype(elem()); - Table t; - - Ratios result; - while (t.size() < min_max_sizes.min_load) t.insert(elem()); - result.min_load = - absl::container_internal::GetHashtableDebugProbeSummary(t).mean; - - while (t.size() < (min_max_sizes.min_load + min_max_sizes.max_load) / 2) - t.insert(elem()); - result.avg_load = - absl::container_internal::GetHashtableDebugProbeSummary(t).mean; - - while (t.size() < min_max_sizes.max_load) t.insert(elem()); - result.max_load = - absl::container_internal::GetHashtableDebugProbeSummary(t).mean; - - return result; -} - -template -uintptr_t PointerForAlignment() { - alignas(Align) static constexpr uintptr_t kInitPointer = 0; - return reinterpret_cast(&kInitPointer); -} - -// This incomplete type is used for testing hash of pointers of different -// alignments. -// NOTE: We are generating invalid pointer values on the fly with -// reinterpret_cast. There are not "safely derived" pointers so using them is -// technically UB. It is unlikely to be a problem, though. -template -struct Ptr; - -template -Ptr* MakePtr(uintptr_t v) { - if (sizeof(v) == 8) { - constexpr int kCopyBits = 16; - // Ensure high bits are all the same. - v = static_cast(static_cast(v << kCopyBits) >> - kCopyBits); - } - return reinterpret_cast*>(v); -} - -struct IntIdentity { - uint64_t i; - friend bool operator==(IntIdentity a, IntIdentity b) { return a.i == b.i; } - IntIdentity operator++(int) { return IntIdentity{i++}; } -}; - -template -struct PtrIdentity { - explicit PtrIdentity(uintptr_t val = PointerForAlignment()) : i(val) {} - uintptr_t i; - friend bool operator==(PtrIdentity a, PtrIdentity b) { return a.i == b.i; } - PtrIdentity operator++(int) { - PtrIdentity p(i); - i += Align; - return p; - } -}; - -constexpr char kStringFormat[] = "/path/to/file/name-%07d-of-9999999.txt"; - -template -struct String { - std::string value; - static std::string Make(uint32_t v) { - return {small ? absl::StrCat(v) : absl::StrFormat(kStringFormat, v)}; - } -}; - -template <> -struct DefaultHash { - struct type { - size_t operator()(IntIdentity t) const { return t.i; } - }; -}; - -template -struct DefaultHash> { - struct type { - size_t operator()(PtrIdentity t) const { return t.i; } - }; -}; - -template -struct Sequential { - T operator()() const { return current++; } - mutable T current{}; -}; - -template -struct Sequential*> { - Ptr* operator()() const { - auto* result = MakePtr(current); - current += Align; - return result; - } - mutable uintptr_t current = PointerForAlignment(); -}; - - -template -struct Sequential> { - std::string operator()() const { return String::Make(current++); } - mutable uint32_t current = 0; -}; - -template -struct Sequential> { - mutable Sequential tseq; - mutable Sequential useq; - - using RealT = decltype(tseq()); - using RealU = decltype(useq()); - - mutable std::vector ts; - mutable std::vector us; - mutable size_t ti = 0, ui = 0; - - std::pair operator()() const { - std::pair value{get_t(), get_u()}; - if (ti == 0) { - ti = ui + 1; - ui = 0; - } else { - --ti; - ++ui; - } - return value; - } - - RealT get_t() const { - while (ti >= ts.size()) ts.push_back(tseq()); - return ts[ti]; - } - - RealU get_u() const { - while (ui >= us.size()) us.push_back(useq()); - return us[ui]; - } -}; - -template -struct AlmostSequential { - mutable Sequential current; - - auto operator()() const -> decltype(current()) { - while (absl::Uniform(GlobalBitGen(), 0.0, 1.0) <= percent_skip / 100.) - current(); - return current(); - } -}; - -struct Uniform { - template - T operator()(T) const { - return absl::Uniform(absl::IntervalClosed, GlobalBitGen(), T{0}, ~T{0}); - } -}; - -struct Gaussian { - template - T operator()(T) const { - double d; - do { - d = absl::Gaussian(GlobalBitGen(), 1e6, 1e4); - } while (d <= 0 || d > std::numeric_limits::max() / 2); - return static_cast(d); - } -}; - -struct Zipf { - template - T operator()(T) const { - return absl::Zipf(GlobalBitGen(), std::numeric_limits::max(), 1.6); - } -}; - -template -struct Random { - T operator()() const { return Dist{}(T{}); } -}; - -template -struct Random*, Dist> { - Ptr* operator()() const { - return MakePtr(Random{}() * Align); - } -}; - -template -struct Random { - IntIdentity operator()() const { - return IntIdentity{Random{}()}; - } -}; - -template -struct Random, Dist> { - PtrIdentity operator()() const { - return PtrIdentity{Random{}() * Align}; - } -}; - -template -struct Random, Dist> { - std::string operator()() const { - return String::Make(Random{}()); - } -}; - -template -struct Random, Dist> { - auto operator()() const - -> decltype(std::make_pair(Random{}(), Random{}())) { - return std::make_pair(Random{}(), Random{}()); - } -}; - -template -std::string Name(); - -std::string Name(uint32_t*) { return "u32"; } -std::string Name(uint64_t*) { return "u64"; } -std::string Name(IntIdentity*) { return "IntIdentity"; } - -template -std::string Name(Ptr**) { - return absl::StrCat("Ptr", Align); -} - -template -std::string Name(PtrIdentity*) { - return absl::StrCat("PtrIdentity", Align); -} - -template -std::string Name(String*) { - return small ? "StrS" : "StrL"; -} - -template -std::string Name(std::pair*) { - if (output() == OutputStyle::kBenchmark) - return absl::StrCat("P_", Name(), "_", Name()); - return absl::StrCat("P<", Name(), ",", Name(), ">"); -} - -template -std::string Name(Sequential*) { - return "Sequential"; -} - -template -std::string Name(AlmostSequential*) { - return absl::StrCat("AlmostSeq_", P); -} - -template -std::string Name(Random*) { - return "UnifRand"; -} - -template -std::string Name(Random*) { - return "GausRand"; -} - -template -std::string Name(Random*) { - return "ZipfRand"; -} - -template -std::string Name() { - return Name(static_cast(nullptr)); -} - -constexpr int kNameWidth = 15; -constexpr int kDistWidth = 16; - -bool CanRunBenchmark(absl::string_view name) { - static std::regex* const filter = []() -> std::regex* { - return benchmarks.empty() || benchmarks == "all" - ? nullptr - : new std::regex(std::string(benchmarks)); - }(); - return filter == nullptr || std::regex_search(std::string(name), *filter); -} - -struct Result { - std::string name; - std::string dist_name; - Ratios ratios; -}; - -template -void RunForTypeAndDistribution(std::vector& results) { - std::string name = absl::StrCat(Name(), "/", Name()); - // We have to check against all three names (min/avg/max) before we run it. - // If any of them is enabled, we run it. - if (!CanRunBenchmark(absl::StrCat(name, "/min")) && - !CanRunBenchmark(absl::StrCat(name, "/avg")) && - !CanRunBenchmark(absl::StrCat(name, "/max"))) { - return; - } - results.push_back({Name(), Name(), CollectMeanProbeLengths()}); -} - -template -void RunForType(std::vector& results) { - RunForTypeAndDistribution>(results); - RunForTypeAndDistribution>(results); - RunForTypeAndDistribution>(results); - RunForTypeAndDistribution>(results); -#ifdef NDEBUG - // Disable these in non-opt mode because they take too long. - RunForTypeAndDistribution>(results); - RunForTypeAndDistribution>(results); -#endif // NDEBUG -} - -} // namespace - -int main(int argc, char** argv) { - // Parse the benchmark flags. Ignore all of them except the regex pattern. - for (int i = 1; i < argc; ++i) { - absl::string_view arg = argv[i]; - const auto next = [&] { return argv[std::min(i + 1, argc - 1)]; }; - - if (absl::ConsumePrefix(&arg, "--benchmark_filter")) { - if (arg == "") { - // --benchmark_filter X - benchmarks = next(); - } else if (absl::ConsumePrefix(&arg, "=")) { - // --benchmark_filter=X - benchmarks = arg; - } - } - - // Any --benchmark flag turns on the mode. - if (absl::ConsumePrefix(&arg, "--benchmark")) { - if (benchmarks.empty()) benchmarks="all"; - } - } - - std::vector results; - RunForType(results); - RunForType(results); - RunForType*>(results); - RunForType*>(results); - RunForType*>(results); - RunForType*>(results); - RunForType>(results); - RunForType>(results); - RunForType>(results); - RunForType>(results); - RunForType>(results); - RunForType>(results); - RunForType>(results); - RunForType>>(results); - RunForType, uint64_t>>(results); - RunForType>>(results); - RunForType, uint64_t>>(results); - - switch (output()) { - case OutputStyle::kRegular: - absl::PrintF("%-*s%-*s Min Avg Max\n%s\n", kNameWidth, - "Type", kDistWidth, "Distribution", - std::string(kNameWidth + kDistWidth + 10 * 3, '-')); - for (const auto& result : results) { - absl::PrintF("%-*s%-*s %8.4f %8.4f %8.4f\n", kNameWidth, result.name, - kDistWidth, result.dist_name, result.ratios.min_load, - result.ratios.avg_load, result.ratios.max_load); - } - break; - case OutputStyle::kBenchmark: { - absl::PrintF("{\n"); - absl::PrintF(" \"benchmarks\": [\n"); - absl::string_view comma; - for (const auto& result : results) { - auto print = [&](absl::string_view stat, double Ratios::*val) { - std::string name = - absl::StrCat(result.name, "/", result.dist_name, "/", stat); - // Check the regex again. We might had have enabled only one of the - // stats for the benchmark. - if (!CanRunBenchmark(name)) return; - absl::PrintF(" %s{\n", comma); - absl::PrintF(" \"cpu_time\": %f,\n", 1e9 * result.ratios.*val); - absl::PrintF(" \"real_time\": %f,\n", 1e9 * result.ratios.*val); - absl::PrintF(" \"iterations\": 1,\n"); - absl::PrintF(" \"name\": \"%s\",\n", name); - absl::PrintF(" \"time_unit\": \"ns\"\n"); - absl::PrintF(" }\n"); - comma = ","; - }; - print("min", &Ratios::min_load); - print("avg", &Ratios::avg_load); - print("max", &Ratios::max_load); - } - absl::PrintF(" ],\n"); - absl::PrintF(" \"context\": {\n"); - absl::PrintF(" }\n"); - absl::PrintF("}\n"); - break; - } - } - - return 0; -} diff --git a/abseil-cpp/absl/container/internal/raw_hash_set_test.cc b/abseil-cpp/absl/container/internal/raw_hash_set_test.cc index f77ffbc1..f5ae83c4 100644 --- a/abseil-cpp/absl/container/internal/raw_hash_set_test.cc +++ b/abseil-cpp/absl/container/internal/raw_hash_set_test.cc @@ -14,7 +14,6 @@ #include "absl/container/internal/raw_hash_set.h" -#include #include #include #include @@ -23,15 +22,12 @@ #include #include #include -#include -#include #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/base/attributes.h" #include "absl/base/config.h" #include "absl/base/internal/cycleclock.h" -#include "absl/base/internal/prefetch.h" #include "absl/base/internal/raw_logging.h" #include "absl/container/internal/container_memory.h" #include "absl/container/internal/hash_function_defaults.h" @@ -52,16 +48,14 @@ struct RawHashSetTestOnlyAccess { namespace { +using ::testing::DoubleNear; using ::testing::ElementsAre; -using ::testing::Eq; using ::testing::Ge; using ::testing::Lt; +using ::testing::Optional; using ::testing::Pair; using ::testing::UnorderedElementsAre; -// Convenience function to static cast to ctrl_t. -ctrl_t CtrlT(int i) { return static_cast(i); } - TEST(Util, NormalizeCapacity) { EXPECT_EQ(1, NormalizeCapacity(0)); EXPECT_EQ(1, NormalizeCapacity(1)); @@ -81,14 +75,8 @@ TEST(Util, GrowthAndCapacity) { for (size_t growth = 0; growth < 10000; ++growth) { SCOPED_TRACE(growth); size_t capacity = NormalizeCapacity(GrowthToLowerboundCapacity(growth)); - // The capacity is large enough for `growth`. + // The capacity is large enough for `growth` EXPECT_THAT(CapacityToGrowth(capacity), Ge(growth)); - // For (capacity+1) < kWidth, growth should equal capacity. - if (capacity + 1 < Group::kWidth) { - EXPECT_THAT(CapacityToGrowth(capacity), Eq(capacity)); - } else { - EXPECT_THAT(CapacityToGrowth(capacity), Lt(capacity)); - } if (growth != 0 && capacity > 1) { // There is no smaller capacity that works. EXPECT_THAT(CapacityToGrowth(capacity / 2), Lt(growth)); @@ -174,19 +162,15 @@ TEST(Group, EmptyGroup) { TEST(Group, Match) { if (Group::kWidth == 16) { - ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3), - ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7), - CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1), - CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)}; + ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7, + 7, 5, 3, 1, 1, 1, 1, 1}; EXPECT_THAT(Group{group}.Match(0), ElementsAre()); EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 11, 12, 13, 14, 15)); EXPECT_THAT(Group{group}.Match(3), ElementsAre(3, 10)); EXPECT_THAT(Group{group}.Match(5), ElementsAre(5, 9)); EXPECT_THAT(Group{group}.Match(7), ElementsAre(7, 8)); } else if (Group::kWidth == 8) { - ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2), - ctrl_t::kDeleted, CtrlT(2), CtrlT(1), - ctrl_t::kSentinel, CtrlT(1)}; + ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1}; EXPECT_THAT(Group{group}.Match(0), ElementsAre()); EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 5, 7)); EXPECT_THAT(Group{group}.Match(2), ElementsAre(2, 4)); @@ -195,39 +179,27 @@ TEST(Group, Match) { } } -TEST(Group, MaskEmpty) { +TEST(Group, MatchEmpty) { if (Group::kWidth == 16) { - ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3), - ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7), - CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1), - CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)}; - EXPECT_THAT(Group{group}.MaskEmpty().LowestBitSet(), 0); - EXPECT_THAT(Group{group}.MaskEmpty().HighestBitSet(), 4); + ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7, + 7, 5, 3, 1, 1, 1, 1, 1}; + EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0, 4)); } else if (Group::kWidth == 8) { - ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2), - ctrl_t::kDeleted, CtrlT(2), CtrlT(1), - ctrl_t::kSentinel, CtrlT(1)}; - EXPECT_THAT(Group{group}.MaskEmpty().LowestBitSet(), 0); - EXPECT_THAT(Group{group}.MaskEmpty().HighestBitSet(), 0); + ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1}; + EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0)); } else { FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth; } } -TEST(Group, MaskEmptyOrDeleted) { +TEST(Group, MatchEmptyOrDeleted) { if (Group::kWidth == 16) { - ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kEmpty, CtrlT(3), - ctrl_t::kDeleted, CtrlT(5), ctrl_t::kSentinel, CtrlT(7), - CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1), - CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)}; - EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().LowestBitSet(), 0); - EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().HighestBitSet(), 4); + ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7, + 7, 5, 3, 1, 1, 1, 1, 1}; + EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 2, 4)); } else if (Group::kWidth == 8) { - ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2), - ctrl_t::kDeleted, CtrlT(2), CtrlT(1), - ctrl_t::kSentinel, CtrlT(1)}; - EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().LowestBitSet(), 0); - EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().HighestBitSet(), 3); + ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1}; + EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 3)); } else { FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth; } @@ -237,32 +209,28 @@ TEST(Batch, DropDeletes) { constexpr size_t kCapacity = 63; constexpr size_t kGroupWidth = container_internal::Group::kWidth; std::vector ctrl(kCapacity + 1 + kGroupWidth); - ctrl[kCapacity] = ctrl_t::kSentinel; - std::vector pattern = { - ctrl_t::kEmpty, CtrlT(2), ctrl_t::kDeleted, CtrlT(2), - ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted}; + ctrl[kCapacity] = kSentinel; + std::vector pattern = {kEmpty, 2, kDeleted, 2, kEmpty, 1, kDeleted}; for (size_t i = 0; i != kCapacity; ++i) { ctrl[i] = pattern[i % pattern.size()]; if (i < kGroupWidth - 1) ctrl[i + kCapacity + 1] = pattern[i % pattern.size()]; } ConvertDeletedToEmptyAndFullToDeleted(ctrl.data(), kCapacity); - ASSERT_EQ(ctrl[kCapacity], ctrl_t::kSentinel); - for (size_t i = 0; i < kCapacity + kGroupWidth; ++i) { + ASSERT_EQ(ctrl[kCapacity], kSentinel); + for (size_t i = 0; i < kCapacity + 1 + kGroupWidth; ++i) { ctrl_t expected = pattern[i % (kCapacity + 1) % pattern.size()]; - if (i == kCapacity) expected = ctrl_t::kSentinel; - if (expected == ctrl_t::kDeleted) expected = ctrl_t::kEmpty; - if (IsFull(expected)) expected = ctrl_t::kDeleted; + if (i == kCapacity) expected = kSentinel; + if (expected == kDeleted) expected = kEmpty; + if (IsFull(expected)) expected = kDeleted; EXPECT_EQ(ctrl[i], expected) - << i << " " << static_cast(pattern[i % pattern.size()]); + << i << " " << int{pattern[i % pattern.size()]}; } } TEST(Group, CountLeadingEmptyOrDeleted) { - const std::vector empty_examples = {ctrl_t::kEmpty, ctrl_t::kDeleted}; - const std::vector full_examples = { - CtrlT(0), CtrlT(1), CtrlT(2), CtrlT(3), - CtrlT(5), CtrlT(9), CtrlT(127), ctrl_t::kSentinel}; + const std::vector empty_examples = {kEmpty, kDeleted}; + const std::vector full_examples = {0, 1, 2, 3, 5, 9, 127, kSentinel}; for (ctrl_t empty : empty_examples) { std::vector e(Group::kWidth, empty); @@ -282,44 +250,25 @@ TEST(Group, CountLeadingEmptyOrDeleted) { } } -template -struct ValuePolicy { - using slot_type = T; - using key_type = T; - using init_type = T; +struct IntPolicy { + using slot_type = int64_t; + using key_type = int64_t; + using init_type = int64_t; - template - static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { - absl::allocator_traits::construct(*alloc, slot, - std::forward(args)...); + static void construct(void*, int64_t* slot, int64_t v) { *slot = v; } + static void destroy(void*, int64_t*) {} + static void transfer(void*, int64_t* new_slot, int64_t* old_slot) { + *new_slot = *old_slot; } - template - static void destroy(Allocator* alloc, slot_type* slot) { - absl::allocator_traits::destroy(*alloc, slot); - } + static int64_t& element(slot_type* slot) { return *slot; } - template - static void transfer(Allocator* alloc, slot_type* new_slot, - slot_type* old_slot) { - construct(alloc, new_slot, std::move(*old_slot)); - destroy(alloc, old_slot); - } - - static T& element(slot_type* slot) { return *slot; } - - template - static decltype(absl::container_internal::DecomposeValue( - std::declval(), std::declval()...)) - apply(F&& f, Args&&... args) { - return absl::container_internal::DecomposeValue( - std::forward(f), std::forward(args)...); + template + static auto apply(F&& f, int64_t x) -> decltype(std::forward(f)(x, x)) { + return std::forward(f)(x, x); } }; -using IntPolicy = ValuePolicy; -using Uint8Policy = ValuePolicy; - class StringPolicy { template , - std::equal_to, std::allocator> { - using Base = typename Uint8Table::raw_hash_set; - using Base::Base; -}; - template struct CustomAlloc : std::allocator { CustomAlloc() {} @@ -451,13 +393,6 @@ TEST(Table, EmptyFunctorOptimization) { size_t growth_left; void* infoz; }; - struct MockTableInfozDisabled { - void* ctrl; - void* slots; - size_t size; - size_t capacity; - size_t growth_left; - }; struct StatelessHash { size_t operator()(absl::string_view) const { return 0; } }; @@ -465,27 +400,17 @@ TEST(Table, EmptyFunctorOptimization) { size_t dummy; }; - if (std::is_empty::value) { - EXPECT_EQ(sizeof(MockTableInfozDisabled), - sizeof(raw_hash_set, - std::allocator>)); + EXPECT_EQ( + sizeof(MockTable), + sizeof( + raw_hash_set, std::allocator>)); - EXPECT_EQ(sizeof(MockTableInfozDisabled) + sizeof(StatefulHash), - sizeof(raw_hash_set, - std::allocator>)); - } else { - EXPECT_EQ(sizeof(MockTable), - sizeof(raw_hash_set, - std::allocator>)); - - EXPECT_EQ(sizeof(MockTable) + sizeof(StatefulHash), - sizeof(raw_hash_set, - std::allocator>)); - } + EXPECT_EQ( + sizeof(MockTable) + sizeof(StatefulHash), + sizeof( + raw_hash_set, std::allocator>)); } TEST(Table, Empty) { @@ -573,37 +498,6 @@ TEST(Table, InsertCollisionAndFindAfterDelete) { EXPECT_TRUE(t.empty()); } -TEST(Table, InsertWithinCapacity) { - IntTable t; - t.reserve(10); - const size_t original_capacity = t.capacity(); - const auto addr = [&](int i) { - return reinterpret_cast(&*t.find(i)); - }; - // Inserting an element does not change capacity. - t.insert(0); - EXPECT_THAT(t.capacity(), original_capacity); - const uintptr_t original_addr_0 = addr(0); - // Inserting another element does not rehash. - t.insert(1); - EXPECT_THAT(t.capacity(), original_capacity); - EXPECT_THAT(addr(0), original_addr_0); - // Inserting lots of duplicate elements does not rehash. - for (int i = 0; i < 100; ++i) { - t.insert(i % 10); - } - EXPECT_THAT(t.capacity(), original_capacity); - EXPECT_THAT(addr(0), original_addr_0); - // Inserting a range of duplicate elements does not rehash. - std::vector dup_range; - for (int i = 0; i < 100; ++i) { - dup_range.push_back(i % 10); - } - t.insert(dup_range.begin(), dup_range.end()); - EXPECT_THAT(t.capacity(), original_capacity); - EXPECT_THAT(addr(0), original_addr_0); -} - TEST(Table, LazyEmplace) { StringTable t; bool called = false; @@ -651,53 +545,28 @@ TEST(Table, Contains2) { } int decompose_constructed; -int decompose_copy_constructed; -int decompose_copy_assigned; -int decompose_move_constructed; -int decompose_move_assigned; struct DecomposeType { - DecomposeType(int i = 0) : i(i) { // NOLINT + DecomposeType(int i) : i(i) { // NOLINT ++decompose_constructed; } explicit DecomposeType(const char* d) : DecomposeType(*d) {} - DecomposeType(const DecomposeType& other) : i(other.i) { - ++decompose_copy_constructed; - } - DecomposeType& operator=(const DecomposeType& other) { - ++decompose_copy_assigned; - i = other.i; - return *this; - } - DecomposeType(DecomposeType&& other) : i(other.i) { - ++decompose_move_constructed; - } - DecomposeType& operator=(DecomposeType&& other) { - ++decompose_move_assigned; - i = other.i; - return *this; - } - int i; }; struct DecomposeHash { using is_transparent = void; - size_t operator()(const DecomposeType& a) const { return a.i; } + size_t operator()(DecomposeType a) const { return a.i; } size_t operator()(int a) const { return a; } size_t operator()(const char* a) const { return *a; } }; struct DecomposeEq { using is_transparent = void; - bool operator()(const DecomposeType& a, const DecomposeType& b) const { - return a.i == b.i; - } - bool operator()(const DecomposeType& a, int b) const { return a.i == b; } - bool operator()(const DecomposeType& a, const char* b) const { - return a.i == *b; - } + bool operator()(DecomposeType a, DecomposeType b) const { return a.i == b.i; } + bool operator()(DecomposeType a, int b) const { return a.i == b; } + bool operator()(DecomposeType a, const char* b) const { return a.i == *b; } }; struct DecomposePolicy { @@ -707,9 +576,9 @@ struct DecomposePolicy { template static void construct(void*, DecomposeType* slot, T&& v) { - ::new (slot) DecomposeType(std::forward(v)); + *slot = DecomposeType(std::forward(v)); } - static void destroy(void*, DecomposeType* slot) { slot->~DecomposeType(); } + static void destroy(void*, DecomposeType*) {} static DecomposeType& element(slot_type* slot) { return *slot; } template @@ -724,13 +593,8 @@ void TestDecompose(bool construct_three) { const int one = 1; const char* three_p = "3"; const auto& three = three_p; - const int elem_vector_count = 256; - std::vector elem_vector(elem_vector_count, DecomposeType{0}); - std::iota(elem_vector.begin(), elem_vector.end(), 0); - using DecomposeSet = - raw_hash_set>; - DecomposeSet set1; + raw_hash_set> set1; decompose_constructed = 0; int expected_constructed = 0; @@ -788,72 +652,20 @@ void TestDecompose(bool construct_three) { expected_constructed += construct_three; EXPECT_EQ(expected_constructed, decompose_constructed); } - - decompose_copy_constructed = 0; - decompose_copy_assigned = 0; - decompose_move_constructed = 0; - decompose_move_assigned = 0; - int expected_copy_constructed = 0; - int expected_move_constructed = 0; - { // raw_hash_set(first, last) with random-access iterators - DecomposeSet set2(elem_vector.begin(), elem_vector.end()); - // Expect exactly one copy-constructor call for each element if no - // rehashing is done. - expected_copy_constructed += elem_vector_count; - EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed); - EXPECT_EQ(expected_move_constructed, decompose_move_constructed); - EXPECT_EQ(0, decompose_move_assigned); - EXPECT_EQ(0, decompose_copy_assigned); - } - - { // raw_hash_set(first, last) with forward iterators - std::list elem_list(elem_vector.begin(), elem_vector.end()); - expected_copy_constructed = decompose_copy_constructed; - DecomposeSet set2(elem_list.begin(), elem_list.end()); - // Expect exactly N elements copied into set, expect at most 2*N elements - // moving internally for all resizing needed (for a growth factor of 2). - expected_copy_constructed += elem_vector_count; - EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed); - expected_move_constructed += elem_vector_count; - EXPECT_LT(expected_move_constructed, decompose_move_constructed); - expected_move_constructed += elem_vector_count; - EXPECT_GE(expected_move_constructed, decompose_move_constructed); - EXPECT_EQ(0, decompose_move_assigned); - EXPECT_EQ(0, decompose_copy_assigned); - expected_copy_constructed = decompose_copy_constructed; - expected_move_constructed = decompose_move_constructed; - } - - { // insert(first, last) - DecomposeSet set2; - set2.insert(elem_vector.begin(), elem_vector.end()); - // Expect exactly N elements copied into set, expect at most 2*N elements - // moving internally for all resizing needed (for a growth factor of 2). - const int expected_new_elements = elem_vector_count; - const int expected_max_element_moves = 2 * elem_vector_count; - expected_copy_constructed += expected_new_elements; - EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed); - expected_move_constructed += expected_max_element_moves; - EXPECT_GE(expected_move_constructed, decompose_move_constructed); - EXPECT_EQ(0, decompose_move_assigned); - EXPECT_EQ(0, decompose_copy_assigned); - expected_copy_constructed = decompose_copy_constructed; - expected_move_constructed = decompose_move_constructed; - } } TEST(Table, Decompose) { TestDecompose(false); struct TransparentHashIntOverload { - size_t operator()(const DecomposeType& a) const { return a.i; } + size_t operator()(DecomposeType a) const { return a.i; } size_t operator()(int a) const { return a; } }; struct TransparentEqIntOverload { - bool operator()(const DecomposeType& a, const DecomposeType& b) const { + bool operator()(DecomposeType a, DecomposeType b) const { return a.i == b.i; } - bool operator()(const DecomposeType& a, int b) const { return a.i == b; } + bool operator()(DecomposeType a, int b) const { return a.i == b; } }; TestDecompose(true); TestDecompose(true); @@ -895,7 +707,7 @@ TEST(Table, RehashWithNoResize) { const size_t capacity = t.capacity(); // Remove elements from all groups except the first and the last one. - // All elements removed from full groups will be marked as ctrl_t::kDeleted. + // All elements removed from full groups will be marked as kDeleted. const size_t erase_begin = Group::kWidth / 2; const size_t erase_end = (t.size() / Group::kWidth - 1) * Group::kWidth; for (size_t i = erase_begin; i < erase_end; ++i) { @@ -1035,8 +847,7 @@ TEST(Table, EraseMaintainsValidIterator) { std::vector CollectBadMergeKeys(size_t N) { static constexpr int kGroupSize = Group::kWidth - 1; - auto topk_range = [](size_t b, size_t e, - IntTable* t) -> std::vector { + auto topk_range = [](size_t b, size_t e, IntTable* t) -> std::vector { for (size_t i = b; i != e; ++i) { t->emplace(i); } @@ -1190,8 +1001,8 @@ using ProbeStatsPerSize = std::map; // 1. Create new table and reserve it to keys.size() * 2 // 2. Insert all keys xored with seed // 3. Collect ProbeStats from final table. -ProbeStats CollectProbeStatsOnKeysXoredWithSeed( - const std::vector& keys, size_t num_iters) { +ProbeStats CollectProbeStatsOnKeysXoredWithSeed(const std::vector& keys, + size_t num_iters) { const size_t reserve_size = keys.size() * 2; ProbeStats stats; @@ -1249,7 +1060,7 @@ ExpectedStats XorSeedExpectedStats() { case 16: if (kRandomizesInserts) { return {0.1, - 2.0, + 1.0, {{0.95, 0.1}}, {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}}; } else { @@ -1263,7 +1074,6 @@ ExpectedStats XorSeedExpectedStats() { return {}; } -// TODO(b/80415403): Figure out why this test is so flaky, esp. on MSVC TEST(Table, DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength) { ProbeStatsPerSize stats; std::vector sizes = {Group::kWidth << 5, Group::kWidth << 10}; @@ -1336,17 +1146,17 @@ ExpectedStats LinearTransformExpectedStats() { {{0.95, 0.3}}, {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}}; } else { - return {0.4, - 0.6, - {{0.95, 0.5}}, - {{0.95, 1}, {0.99, 14}, {0.999, 23}, {0.9999, 26}}}; + return {0.15, + 0.5, + {{0.95, 0.3}}, + {{0.95, 0}, {0.99, 3}, {0.999, 15}, {0.9999, 25}}}; } case 16: if (kRandomizesInserts) { return {0.1, 0.4, {{0.95, 0.3}}, - {{0.95, 1}, {0.99, 2}, {0.999, 9}, {0.9999, 15}}}; + {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}}; } else { return {0.05, 0.2, @@ -1358,7 +1168,6 @@ ExpectedStats LinearTransformExpectedStats() { return {}; } -// TODO(b/80415403): Figure out why this test is so flaky. TEST(Table, DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength) { ProbeStatsPerSize stats; std::vector sizes = {Group::kWidth << 5, Group::kWidth << 10}; @@ -1847,38 +1656,6 @@ TEST(Table, Merge) { EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0"))); } -TEST(Table, IteratorEmplaceConstructibleRequirement) { - struct Value { - explicit Value(absl::string_view view) : value(view) {} - std::string value; - - bool operator==(const Value& other) const { return value == other.value; } - }; - struct H { - size_t operator()(const Value& v) const { - return absl::Hash{}(v.value); - } - }; - - struct Table : raw_hash_set, H, std::equal_to, - std::allocator> { - using Base = typename Table::raw_hash_set; - using Base::Base; - }; - - std::string input[3]{"A", "B", "C"}; - - Table t(std::begin(input), std::end(input)); - EXPECT_THAT(t, UnorderedElementsAre(Value{"A"}, Value{"B"}, Value{"C"})); - - input[0] = "D"; - input[1] = "E"; - input[2] = "F"; - t.insert(std::begin(input), std::end(input)); - EXPECT_THAT(t, UnorderedElementsAre(Value{"A"}, Value{"B"}, Value{"C"}, - Value{"D"}, Value{"E"}, Value{"F"})); -} - TEST(Nodes, EmptyNodeType) { using node_type = StringTable::node_type; node_type n; @@ -1933,26 +1710,6 @@ TEST(Nodes, ExtractInsert) { EXPECT_FALSE(node); } -TEST(Nodes, HintInsert) { - IntTable t = {1, 2, 3}; - auto node = t.extract(1); - EXPECT_THAT(t, UnorderedElementsAre(2, 3)); - auto it = t.insert(t.begin(), std::move(node)); - EXPECT_THAT(t, UnorderedElementsAre(1, 2, 3)); - EXPECT_EQ(*it, 1); - EXPECT_FALSE(node); - - node = t.extract(2); - EXPECT_THAT(t, UnorderedElementsAre(1, 3)); - // reinsert 2 to make the next insert fail. - t.insert(2); - EXPECT_THAT(t, UnorderedElementsAre(1, 2, 3)); - it = t.insert(t.begin(), std::move(node)); - EXPECT_EQ(*it, 2); - // The node was not emptied by the insert call. - EXPECT_TRUE(node); -} - IntTable MakeSimpleTable(size_t size) { IntTable t; while (t.size() < size) t.insert(t.size()); @@ -2035,7 +1792,7 @@ TEST(TableDeathTest, EraseOfEndAsserts) { IntTable t; // Extra simple "regexp" as regexp support is highly varied across platforms. - constexpr char kDeathMsg[] = "erase.. called on invalid iterator"; + constexpr char kDeathMsg[] = "Invalid operation on iterator"; EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()), kDeathMsg); } @@ -2045,62 +1802,20 @@ TEST(RawHashSamplerTest, Sample) { SetHashtablezEnabled(true); SetHashtablezSampleParameter(100); - auto& sampler = GlobalHashtablezSampler(); + auto& sampler = HashtablezSampler::Global(); size_t start_size = 0; - std::unordered_set preexisting_info; - start_size += sampler.Iterate([&](const HashtablezInfo& info) { - preexisting_info.insert(&info); - ++start_size; - }); + start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; }); std::vector tables; for (int i = 0; i < 1000000; ++i) { tables.emplace_back(); - - const bool do_reserve = (i % 10 > 5); - const bool do_rehash = !do_reserve && (i % 10 > 0); - - if (do_reserve) { - // Don't reserve on all tables. - tables.back().reserve(10 * (i % 10)); - } - tables.back().insert(1); - tables.back().insert(i % 5); - - if (do_rehash) { - // Rehash some other tables. - tables.back().rehash(10 * (i % 10)); - } } size_t end_size = 0; - std::unordered_map observed_checksums; - std::unordered_map reservations; - end_size += sampler.Iterate([&](const HashtablezInfo& info) { - if (preexisting_info.count(&info) == 0) { - observed_checksums[info.hashes_bitwise_xor.load( - std::memory_order_relaxed)]++; - reservations[info.max_reserve.load(std::memory_order_relaxed)]++; - } - EXPECT_EQ(info.inline_element_size, sizeof(int64_t)); - ++end_size; - }); + end_size += sampler.Iterate([&](const HashtablezInfo&) { ++end_size; }); EXPECT_NEAR((end_size - start_size) / static_cast(tables.size()), 0.01, 0.005); - EXPECT_EQ(observed_checksums.size(), 5); - for (const auto& [_, count] : observed_checksums) { - EXPECT_NEAR((100 * count) / static_cast(tables.size()), 0.2, 0.05); - } - - EXPECT_EQ(reservations.size(), 10); - for (const auto& [reservation, count] : reservations) { - EXPECT_GE(reservation, 0); - EXPECT_LT(reservation, 100); - - EXPECT_NEAR((100 * count) / static_cast(tables.size()), 0.1, 0.05) - << reservation; - } } #endif // ABSL_INTERNAL_HASHTABLEZ_SAMPLE @@ -2109,7 +1824,7 @@ TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) { SetHashtablezEnabled(true); SetHashtablezSampleParameter(100); - auto& sampler = GlobalHashtablezSampler(); + auto& sampler = HashtablezSampler::Global(); size_t start_size = 0; start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; }); @@ -2151,36 +1866,6 @@ TEST(Sanitizer, PoisoningOnErase) { } #endif // ABSL_HAVE_ADDRESS_SANITIZER -TEST(Table, AlignOne) { - // We previously had a bug in which we were copying a control byte over the - // first slot when alignof(value_type) is 1. We test repeated - // insertions/erases and verify that the behavior is correct. - Uint8Table t; - std::unordered_set verifier; // NOLINT - - // Do repeated insertions/erases from the table. - for (int64_t i = 0; i < 100000; ++i) { - SCOPED_TRACE(i); - const uint8_t u = (i * -i) & 0xFF; - auto it = t.find(u); - auto verifier_it = verifier.find(u); - if (it == t.end()) { - ASSERT_EQ(verifier_it, verifier.end()); - t.insert(u); - verifier.insert(u); - } else { - ASSERT_NE(verifier_it, verifier.end()); - t.erase(it); - verifier.erase(verifier_it); - } - } - - EXPECT_EQ(t.size(), verifier.size()); - for (uint8_t u : t) { - EXPECT_EQ(verifier.count(u), 1); - } -} - } // namespace } // namespace container_internal ABSL_NAMESPACE_END diff --git a/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h b/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h index 7e84dc25..76ee95e6 100644 --- a/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h +++ b/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h @@ -16,7 +16,6 @@ #define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_ #include -#include #include #include "gmock/gmock.h" @@ -179,7 +178,7 @@ TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) { A alloc(0); std::vector values; std::generate_n(std::back_inserter(values), 10, - hash_internal::UniqueGenerator()); + hash_internal::Generator()); TypeParam m(values.begin(), values.end(), 123, hasher, equal, alloc); EXPECT_EQ(m.hash_function(), hasher); EXPECT_EQ(m.key_eq(), equal); @@ -198,7 +197,7 @@ void InputIteratorBucketAllocTest(std::true_type) { A alloc(0); std::vector values; std::generate_n(std::back_inserter(values), 10, - hash_internal::UniqueGenerator()); + hash_internal::Generator()); TypeParam m(values.begin(), values.end(), 123, alloc); EXPECT_EQ(m.get_allocator(), alloc); EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); @@ -221,7 +220,7 @@ void InputIteratorBucketHashAllocTest(std::true_type) { A alloc(0); std::vector values; std::generate_n(std::back_inserter(values), 10, - hash_internal::UniqueGenerator()); + hash_internal::Generator()); TypeParam m(values.begin(), values.end(), 123, hasher, alloc); EXPECT_EQ(m.hash_function(), hasher); EXPECT_EQ(m.get_allocator(), alloc); @@ -241,9 +240,8 @@ TYPED_TEST_P(ConstructorTest, CopyConstructor) { H hasher; E equal; A alloc(0); - hash_internal::UniqueGenerator gen; TypeParam m(123, hasher, equal, alloc); - for (size_t i = 0; i != 10; ++i) m.insert(gen()); + for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator()()); TypeParam n(m); EXPECT_EQ(m.hash_function(), n.hash_function()); EXPECT_EQ(m.key_eq(), n.key_eq()); @@ -263,9 +261,8 @@ void CopyConstructorAllocTest(std::true_type) { H hasher; E equal; A alloc(0); - hash_internal::UniqueGenerator gen; TypeParam m(123, hasher, equal, alloc); - for (size_t i = 0; i != 10; ++i) m.insert(gen()); + for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator()()); TypeParam n(m, A(11)); EXPECT_EQ(m.hash_function(), n.hash_function()); EXPECT_EQ(m.key_eq(), n.key_eq()); @@ -287,9 +284,8 @@ TYPED_TEST_P(ConstructorTest, MoveConstructor) { H hasher; E equal; A alloc(0); - hash_internal::UniqueGenerator gen; TypeParam m(123, hasher, equal, alloc); - for (size_t i = 0; i != 10; ++i) m.insert(gen()); + for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator()()); TypeParam t(m); TypeParam n(std::move(t)); EXPECT_EQ(m.hash_function(), n.hash_function()); @@ -310,9 +306,8 @@ void MoveConstructorAllocTest(std::true_type) { H hasher; E equal; A alloc(0); - hash_internal::UniqueGenerator gen; TypeParam m(123, hasher, equal, alloc); - for (size_t i = 0; i != 10; ++i) m.insert(gen()); + for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator()()); TypeParam t(m); TypeParam n(std::move(t), A(1)); EXPECT_EQ(m.hash_function(), n.hash_function()); @@ -329,7 +324,7 @@ TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) { TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) { using T = hash_internal::GeneratedType; - hash_internal::UniqueGenerator gen; + hash_internal::Generator gen; std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; using H = typename TypeParam::hasher; using E = typename TypeParam::key_equal; @@ -352,7 +347,7 @@ template void InitializerListBucketAllocTest(std::true_type) { using T = hash_internal::GeneratedType; using A = typename TypeParam::allocator_type; - hash_internal::UniqueGenerator gen; + hash_internal::Generator gen; std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; A alloc(0); TypeParam m(values, 123, alloc); @@ -375,7 +370,7 @@ void InitializerListBucketHashAllocTest(std::true_type) { using A = typename TypeParam::allocator_type; H hasher; A alloc(0); - hash_internal::UniqueGenerator gen; + hash_internal::Generator gen; std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; TypeParam m(values, 123, hasher, alloc); EXPECT_EQ(m.hash_function(), hasher); @@ -396,7 +391,7 @@ TYPED_TEST_P(ConstructorTest, Assignment) { H hasher; E equal; A alloc(0); - hash_internal::UniqueGenerator gen; + hash_internal::Generator gen; TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc); TypeParam n; n = m; @@ -416,7 +411,7 @@ TYPED_TEST_P(ConstructorTest, MoveAssignment) { H hasher; E equal; A alloc(0); - hash_internal::UniqueGenerator gen; + hash_internal::Generator gen; TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc); TypeParam t(m); TypeParam n; @@ -428,7 +423,7 @@ TYPED_TEST_P(ConstructorTest, MoveAssignment) { TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) { using T = hash_internal::GeneratedType; - hash_internal::UniqueGenerator gen; + hash_internal::Generator gen; std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; TypeParam m; m = values; @@ -437,7 +432,7 @@ TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) { TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) { using T = hash_internal::GeneratedType; - hash_internal::UniqueGenerator gen; + hash_internal::Generator gen; TypeParam m({gen(), gen(), gen()}); TypeParam n({gen()}); n = m; @@ -446,7 +441,7 @@ TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) { TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) { using T = hash_internal::GeneratedType; - hash_internal::UniqueGenerator gen; + hash_internal::Generator gen; TypeParam m({gen(), gen(), gen()}); TypeParam t(m); TypeParam n({gen()}); @@ -456,7 +451,7 @@ TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) { TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) { using T = hash_internal::GeneratedType; - hash_internal::UniqueGenerator gen; + hash_internal::Generator gen; std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; TypeParam m; m = values; @@ -465,7 +460,7 @@ TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) { TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) { using T = hash_internal::GeneratedType; - hash_internal::UniqueGenerator gen; + hash_internal::Generator gen; std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; TypeParam m(values); m = *&m; // Avoid -Wself-assign @@ -476,7 +471,7 @@ TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) { // containers in unspecified state (and in practice in causes memory-leak // according to heap-checker!). -REGISTER_TYPED_TEST_SUITE_P( +REGISTER_TYPED_TEST_CASE_P( ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual, BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc, InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc, diff --git a/abseil-cpp/absl/container/internal/unordered_map_lookup_test.h b/abseil-cpp/absl/container/internal/unordered_map_lookup_test.h index 3713cd9a..e76421e5 100644 --- a/abseil-cpp/absl/container/internal/unordered_map_lookup_test.h +++ b/abseil-cpp/absl/container/internal/unordered_map_lookup_test.h @@ -107,8 +107,8 @@ TYPED_TEST_P(LookupTest, EqualRange) { } } -REGISTER_TYPED_TEST_SUITE_P(LookupTest, At, OperatorBracket, Count, Find, - EqualRange); +REGISTER_TYPED_TEST_CASE_P(LookupTest, At, OperatorBracket, Count, Find, + EqualRange); } // namespace container_internal ABSL_NAMESPACE_END diff --git a/abseil-cpp/absl/container/internal/unordered_map_modifiers_test.h b/abseil-cpp/absl/container/internal/unordered_map_modifiers_test.h index 4d9ab30f..8c9ca779 100644 --- a/abseil-cpp/absl/container/internal/unordered_map_modifiers_test.h +++ b/abseil-cpp/absl/container/internal/unordered_map_modifiers_test.h @@ -81,38 +81,6 @@ TYPED_TEST_P(ModifiersTest, InsertRange) { ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); } -TYPED_TEST_P(ModifiersTest, InsertWithinCapacity) { - using T = hash_internal::GeneratedType; - using V = typename TypeParam::mapped_type; - T val = hash_internal::Generator()(); - TypeParam m; - m.reserve(10); - const size_t original_capacity = m.bucket_count(); - m.insert(val); - EXPECT_EQ(m.bucket_count(), original_capacity); - T val2 = {val.first, hash_internal::Generator()()}; - m.insert(val2); - EXPECT_EQ(m.bucket_count(), original_capacity); -} - -TYPED_TEST_P(ModifiersTest, InsertRangeWithinCapacity) { -#if !defined(__GLIBCXX__) - using T = hash_internal::GeneratedType; - std::vector base_values; - std::generate_n(std::back_inserter(base_values), 10, - hash_internal::Generator()); - std::vector values; - while (values.size() != 100) { - std::copy_n(base_values.begin(), 10, std::back_inserter(values)); - } - TypeParam m; - m.reserve(10); - const size_t original_capacity = m.bucket_count(); - m.insert(values.begin(), values.end()); - EXPECT_EQ(m.bucket_count(), original_capacity); -#endif -} - TYPED_TEST_P(ModifiersTest, InsertOrAssign) { #ifdef UNORDERED_MAP_CXX17 using std::get; @@ -297,12 +265,10 @@ TYPED_TEST_P(ModifiersTest, Swap) { // TODO(alkis): Write tests for extract. // TODO(alkis): Write tests for merge. -REGISTER_TYPED_TEST_SUITE_P(ModifiersTest, Clear, Insert, InsertHint, - InsertRange, InsertWithinCapacity, - InsertRangeWithinCapacity, InsertOrAssign, - InsertOrAssignHint, Emplace, EmplaceHint, - TryEmplace, TryEmplaceHint, Erase, EraseRange, - EraseKey, Swap); +REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint, + InsertRange, InsertOrAssign, InsertOrAssignHint, + Emplace, EmplaceHint, TryEmplace, TryEmplaceHint, + Erase, EraseRange, EraseKey, Swap); template struct is_unique_ptr : std::false_type {}; diff --git a/abseil-cpp/absl/container/internal/unordered_set_constructor_test.h b/abseil-cpp/absl/container/internal/unordered_set_constructor_test.h index af1116e6..41165b05 100644 --- a/abseil-cpp/absl/container/internal/unordered_set_constructor_test.h +++ b/abseil-cpp/absl/container/internal/unordered_set_constructor_test.h @@ -478,7 +478,7 @@ TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) { EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); } -REGISTER_TYPED_TEST_SUITE_P( +REGISTER_TYPED_TEST_CASE_P( ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual, BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc, InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc, diff --git a/abseil-cpp/absl/container/internal/unordered_set_lookup_test.h b/abseil-cpp/absl/container/internal/unordered_set_lookup_test.h index b35f766e..8f2f4b20 100644 --- a/abseil-cpp/absl/container/internal/unordered_set_lookup_test.h +++ b/abseil-cpp/absl/container/internal/unordered_set_lookup_test.h @@ -82,7 +82,7 @@ TYPED_TEST_P(LookupTest, EqualRange) { } } -REGISTER_TYPED_TEST_SUITE_P(LookupTest, Count, Find, EqualRange); +REGISTER_TYPED_TEST_CASE_P(LookupTest, Count, Find, EqualRange); } // namespace container_internal ABSL_NAMESPACE_END diff --git a/abseil-cpp/absl/container/internal/unordered_set_modifiers_test.h b/abseil-cpp/absl/container/internal/unordered_set_modifiers_test.h index d8864bb2..26be58d9 100644 --- a/abseil-cpp/absl/container/internal/unordered_set_modifiers_test.h +++ b/abseil-cpp/absl/container/internal/unordered_set_modifiers_test.h @@ -74,36 +74,6 @@ TYPED_TEST_P(ModifiersTest, InsertRange) { ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); } -TYPED_TEST_P(ModifiersTest, InsertWithinCapacity) { - using T = hash_internal::GeneratedType; - T val = hash_internal::Generator()(); - TypeParam m; - m.reserve(10); - const size_t original_capacity = m.bucket_count(); - m.insert(val); - EXPECT_EQ(m.bucket_count(), original_capacity); - m.insert(val); - EXPECT_EQ(m.bucket_count(), original_capacity); -} - -TYPED_TEST_P(ModifiersTest, InsertRangeWithinCapacity) { -#if !defined(__GLIBCXX__) - using T = hash_internal::GeneratedType; - std::vector base_values; - std::generate_n(std::back_inserter(base_values), 10, - hash_internal::Generator()); - std::vector values; - while (values.size() != 100) { - values.insert(values.end(), base_values.begin(), base_values.end()); - } - TypeParam m; - m.reserve(10); - const size_t original_capacity = m.bucket_count(); - m.insert(values.begin(), values.end()); - EXPECT_EQ(m.bucket_count(), original_capacity); -#endif -} - TYPED_TEST_P(ModifiersTest, Emplace) { using T = hash_internal::GeneratedType; T val = hash_internal::Generator()(); @@ -209,10 +179,9 @@ TYPED_TEST_P(ModifiersTest, Swap) { // TODO(alkis): Write tests for extract. // TODO(alkis): Write tests for merge. -REGISTER_TYPED_TEST_SUITE_P(ModifiersTest, Clear, Insert, InsertHint, - InsertRange, InsertWithinCapacity, - InsertRangeWithinCapacity, Emplace, EmplaceHint, - Erase, EraseRange, EraseKey, Swap); +REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint, + InsertRange, Emplace, EmplaceHint, Erase, EraseRange, + EraseKey, Swap); } // namespace container_internal ABSL_NAMESPACE_END diff --git a/abseil-cpp/absl/container/node_hash_map.h b/abseil-cpp/absl/container/node_hash_map.h index 6868e63a..7a39f628 100644 --- a/abseil-cpp/absl/container/node_hash_map.h +++ b/abseil-cpp/absl/container/node_hash_map.h @@ -41,10 +41,9 @@ #include #include "absl/algorithm/container.h" -#include "absl/base/macros.h" #include "absl/container/internal/container_memory.h" #include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export -#include "absl/container/internal/node_slot_policy.h" +#include "absl/container/internal/node_hash_policy.h" #include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export #include "absl/memory/memory.h" @@ -78,10 +77,6 @@ class NodeHashMapPolicy; // absl/hash/hash.h for information on extending Abseil hashing to user-defined // types. // -// Using `absl::node_hash_map` at interface boundaries in dynamically loaded -// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may -// be randomized across dynamically loaded libraries. -// // Example: // // // Create a node hash map of three strings (that map to strings) @@ -352,8 +347,8 @@ class node_hash_map // `node_hash_map`. // // iterator try_emplace(const_iterator hint, - // const key_type& k, Args&&... args): - // iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args): + // const init_type& k, Args&&... args): + // iterator try_emplace(const_iterator hint, init_type&& k, Args&&... args): // // Inserts (via copy or move) the element of the specified key into the // `node_hash_map` using the position of `hint` as a non-binding suggestion @@ -530,19 +525,17 @@ class node_hash_map // erase_if(node_hash_map<>, Pred) // // Erases all elements that satisfy the predicate `pred` from the container `c`. -// Returns the number of erased elements. template -typename node_hash_map::size_type erase_if( - node_hash_map& c, Predicate pred) { - return container_internal::EraseIf(pred, &c); +void erase_if(node_hash_map& c, Predicate pred) { + container_internal::EraseIf(pred, &c); } namespace container_internal { template class NodeHashMapPolicy - : public absl::container_internal::node_slot_policy< + : public absl::container_internal::node_hash_policy< std::pair&, NodeHashMapPolicy> { using value_type = std::pair; diff --git a/abseil-cpp/absl/container/node_hash_map_test.cc b/abseil-cpp/absl/container/node_hash_map_test.cc index e941a836..8f59a1e4 100644 --- a/abseil-cpp/absl/container/node_hash_map_test.cc +++ b/abseil-cpp/absl/container/node_hash_map_test.cc @@ -223,36 +223,33 @@ TEST(NodeHashMap, EraseIf) { // Erase all elements. { node_hash_map s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; - EXPECT_EQ(erase_if(s, [](std::pair) { return true; }), 5); + erase_if(s, [](std::pair) { return true; }); EXPECT_THAT(s, IsEmpty()); } // Erase no elements. { node_hash_map s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; - EXPECT_EQ(erase_if(s, [](std::pair) { return false; }), 0); + erase_if(s, [](std::pair) { return false; }); EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3), Pair(4, 4), Pair(5, 5))); } // Erase specific elements. { node_hash_map s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; - EXPECT_EQ(erase_if(s, - [](std::pair kvp) { - return kvp.first % 2 == 1; - }), - 3); + erase_if(s, + [](std::pair kvp) { return kvp.first % 2 == 1; }); EXPECT_THAT(s, UnorderedElementsAre(Pair(2, 2), Pair(4, 4))); } // Predicate is function reference. { node_hash_map s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; - EXPECT_EQ(erase_if(s, FirstIsEven), 2); + erase_if(s, FirstIsEven); EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(3, 3), Pair(5, 5))); } // Predicate is function pointer. { node_hash_map s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; - EXPECT_EQ(erase_if(s, &FirstIsEven), 2); + erase_if(s, &FirstIsEven); EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(3, 3), Pair(5, 5))); } } diff --git a/abseil-cpp/absl/container/node_hash_set.h b/abseil-cpp/absl/container/node_hash_set.h index f2cc70c3..56ce3b66 100644 --- a/abseil-cpp/absl/container/node_hash_set.h +++ b/abseil-cpp/absl/container/node_hash_set.h @@ -18,7 +18,7 @@ // // An `absl::node_hash_set` is an unordered associative container designed to // be a more efficient replacement for `std::unordered_set`. Like -// `unordered_set`, search, insertion, and deletion of set elements can be done +// `unordered_set`, search, insertion, and deletion of map elements can be done // as an `O(1)` operation. However, `node_hash_set` (and other unordered // associative containers known as the collection of Abseil "Swiss tables") // contain other optimizations that result in both memory and computation @@ -38,9 +38,8 @@ #include #include "absl/algorithm/container.h" -#include "absl/base/macros.h" #include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export -#include "absl/container/internal/node_slot_policy.h" +#include "absl/container/internal/node_hash_policy.h" #include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export #include "absl/memory/memory.h" @@ -61,7 +60,7 @@ struct NodeHashSetPolicy; // following notable differences: // // * Supports heterogeneous lookup, through `find()`, `operator[]()` and -// `insert()`, provided that the set is provided a compatible heterogeneous +// `insert()`, provided that the map is provided a compatible heterogeneous // hashing function and equality operator. // * Contains a `capacity()` member function indicating the number of element // slots (open, deleted, and empty) within the hash set. @@ -74,20 +73,16 @@ struct NodeHashSetPolicy; // absl/hash/hash.h for information on extending Abseil hashing to user-defined // types. // -// Using `absl::node_hash_set` at interface boundaries in dynamically loaded -// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may -// be randomized across dynamically loaded libraries. -// // Example: // // // Create a node hash set of three strings -// absl::node_hash_set ducks = +// absl::node_hash_map ducks = // {"huey", "dewey", "louie"}; // -// // Insert a new element into the node hash set -// ducks.insert("donald"); +// // Insert a new element into the node hash map +// ducks.insert("donald"}; // -// // Force a rehash of the node hash set +// // Force a rehash of the node hash map // ducks.rehash(0); // // // See if "dewey" is present @@ -105,7 +100,7 @@ class node_hash_set public: // Constructors and Assignment Operators // - // A node_hash_set supports the same overload set as `std::unordered_set` + // A node_hash_set supports the same overload set as `std::unordered_map` // for construction and assignment: // // * Default constructor @@ -172,7 +167,7 @@ class node_hash_set // available within the `node_hash_set`. // // NOTE: this member function is particular to `absl::node_hash_set` and is - // not provided in the `std::unordered_set` API. + // not provided in the `std::unordered_map` API. using Base::capacity; // node_hash_set::empty() @@ -213,7 +208,7 @@ class node_hash_set // `void`. // // NOTE: this return behavior is different than that of STL containers in - // general and `std::unordered_set` in particular. + // general and `std::unordered_map` in particular. // // iterator erase(const_iterator first, const_iterator last): // @@ -319,7 +314,7 @@ class node_hash_set // node_hash_set::merge() // - // Extracts elements from a given `source` node hash set into this + // Extracts elements from a given `source` flat hash map into this // `node_hash_set`. If the destination `node_hash_set` already contains an // element with an equivalent key, that element is not extracted. using Base::merge; @@ -327,15 +322,15 @@ class node_hash_set // node_hash_set::swap(node_hash_set& other) // // Exchanges the contents of this `node_hash_set` with those of the `other` - // node hash set, avoiding invocation of any move, copy, or swap operations on + // flat hash map, avoiding invocation of any move, copy, or swap operations on // individual elements. // // All iterators and references on the `node_hash_set` remain valid, excepting // for the past-the-end iterator, which is invalidated. // - // `swap()` requires that the node hash set's hashing and key equivalence + // `swap()` requires that the flat hash set's hashing and key equivalence // functions be Swappable, and are exchaged using unqualified calls to - // non-member `swap()`. If the set's allocator has + // non-member `swap()`. If the map's allocator has // `std::allocator_traits::propagate_on_container_swap::value` // set to `true`, the allocators are also exchanged using an unqualified call // to non-member `swap()`; otherwise, the allocators are not swapped. @@ -390,14 +385,14 @@ class node_hash_set // node_hash_set::bucket_count() // // Returns the number of "buckets" within the `node_hash_set`. Note that - // because a node hash set contains all elements within its internal storage, + // because a flat hash map contains all elements within its internal storage, // this value simply equals the current capacity of the `node_hash_set`. using Base::bucket_count; // node_hash_set::load_factor() // // Returns the current load factor of the `node_hash_set` (the average number - // of slots occupied with a value within the hash set). + // of slots occupied with a value within the hash map). using Base::load_factor; // node_hash_set::max_load_factor() @@ -438,18 +433,16 @@ class node_hash_set // erase_if(node_hash_set<>, Pred) // // Erases all elements that satisfy the predicate `pred` from the container `c`. -// Returns the number of erased elements. template -typename node_hash_set::size_type erase_if( - node_hash_set& c, Predicate pred) { - return container_internal::EraseIf(pred, &c); +void erase_if(node_hash_set& c, Predicate pred) { + container_internal::EraseIf(pred, &c); } namespace container_internal { template struct NodeHashSetPolicy - : absl::container_internal::node_slot_policy> { + : absl::container_internal::node_hash_policy> { using key_type = T; using init_type = T; using constant_iterators = std::true_type; diff --git a/abseil-cpp/absl/container/node_hash_set_test.cc b/abseil-cpp/absl/container/node_hash_set_test.cc index 98a8dbdd..7ddad202 100644 --- a/abseil-cpp/absl/container/node_hash_set_test.cc +++ b/abseil-cpp/absl/container/node_hash_set_test.cc @@ -108,31 +108,31 @@ TEST(NodeHashSet, EraseIf) { // Erase all elements. { node_hash_set s = {1, 2, 3, 4, 5}; - EXPECT_EQ(erase_if(s, [](int) { return true; }), 5); + erase_if(s, [](int) { return true; }); EXPECT_THAT(s, IsEmpty()); } // Erase no elements. { node_hash_set s = {1, 2, 3, 4, 5}; - EXPECT_EQ(erase_if(s, [](int) { return false; }), 0); + erase_if(s, [](int) { return false; }); EXPECT_THAT(s, UnorderedElementsAre(1, 2, 3, 4, 5)); } // Erase specific elements. { node_hash_set s = {1, 2, 3, 4, 5}; - EXPECT_EQ(erase_if(s, [](int k) { return k % 2 == 1; }), 3); + erase_if(s, [](int k) { return k % 2 == 1; }); EXPECT_THAT(s, UnorderedElementsAre(2, 4)); } // Predicate is function reference. { node_hash_set s = {1, 2, 3, 4, 5}; - EXPECT_EQ(erase_if(s, IsEven), 2); + erase_if(s, IsEven); EXPECT_THAT(s, UnorderedElementsAre(1, 3, 5)); } // Predicate is function pointer. { node_hash_set s = {1, 2, 3, 4, 5}; - EXPECT_EQ(erase_if(s, &IsEven), 2); + erase_if(s, &IsEven); EXPECT_THAT(s, UnorderedElementsAre(1, 3, 5)); } } diff --git a/abseil-cpp/absl/container/sample_element_size_test.cc b/abseil-cpp/absl/container/sample_element_size_test.cc deleted file mode 100644 index b23626b4..00000000 --- a/abseil-cpp/absl/container/sample_element_size_test.cc +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2018 The Abseil Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "gmock/gmock.h" -#include "gtest/gtest.h" -#include "absl/container/flat_hash_map.h" -#include "absl/container/flat_hash_set.h" -#include "absl/container/node_hash_map.h" -#include "absl/container/node_hash_set.h" - -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { -namespace { - -#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) -// Create some tables of type `Table`, then look at all the new -// `HashtablezInfo`s to make sure that the `inline_element_size == -// expected_element_size`. The `inline_element_size` is the amount of memory -// allocated for each slot of a hash table, that is `sizeof(slot_type)`. Add -// the new `HashtablezInfo`s to `preexisting_info`. Store all the new tables -// into `tables`. -template -void TestInlineElementSize( - HashtablezSampler& sampler, - // clang-tidy gives a false positive on this declaration. This unordered - // set cannot be flat_hash_set, however, since that would introduce a mutex - // deadlock. - std::unordered_set& preexisting_info, // NOLINT - std::vector& tables, const typename Table::value_type& elt, - size_t expected_element_size) { - for (int i = 0; i < 10; ++i) { - // We create a new table and must store it somewhere so that when we store - // a pointer to the resulting `HashtablezInfo` into `preexisting_info` - // that we aren't storing a dangling pointer. - tables.emplace_back(); - // We must insert an element to get a hashtablez to instantiate. - tables.back().insert(elt); - } - size_t new_count = 0; - sampler.Iterate([&](const HashtablezInfo& info) { - if (preexisting_info.insert(&info).second) { - EXPECT_EQ(info.inline_element_size, expected_element_size); - ++new_count; - } - }); - // Make sure we actually did get a new hashtablez. - EXPECT_GT(new_count, 0); -} - -struct bigstruct { - char a[1000]; - friend bool operator==(const bigstruct& x, const bigstruct& y) { - return memcmp(x.a, y.a, sizeof(x.a)) == 0; - } - template - friend H AbslHashValue(H h, const bigstruct& c) { - return H::combine_contiguous(std::move(h), c.a, sizeof(c.a)); - } -}; -#endif - -TEST(FlatHashMap, SampleElementSize) { -#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) - // Enable sampling even if the prod default is off. - SetHashtablezEnabled(true); - SetHashtablezSampleParameter(1); - - auto& sampler = GlobalHashtablezSampler(); - std::vector> flat_map_tables; - std::vector> flat_set_tables; - std::vector> node_map_tables; - std::vector> node_set_tables; - - // It takes thousands of new tables after changing the sampling parameters - // before you actually get some instrumentation. And if you must actually - // put something into those tables. - for (int i = 0; i < 10000; ++i) { - flat_map_tables.emplace_back(); - flat_map_tables.back()[i] = bigstruct{}; - } - - // clang-tidy gives a false positive on this declaration. This unordered set - // cannot be a flat_hash_set, however, since that would introduce a mutex - // deadlock. - std::unordered_set preexisting_info; // NOLINT - sampler.Iterate( - [&](const HashtablezInfo& info) { preexisting_info.insert(&info); }); - TestInlineElementSize(sampler, preexisting_info, flat_map_tables, - {0, bigstruct{}}, sizeof(int) + sizeof(bigstruct)); - TestInlineElementSize(sampler, preexisting_info, node_map_tables, - {0, bigstruct{}}, sizeof(void*)); - TestInlineElementSize(sampler, preexisting_info, flat_set_tables, // - bigstruct{}, sizeof(bigstruct)); - TestInlineElementSize(sampler, preexisting_info, node_set_tables, // - bigstruct{}, sizeof(void*)); -#endif -} - -} // namespace -} // namespace container_internal -ABSL_NAMESPACE_END -} // namespace absl diff --git a/abseil-cpp/absl/copts/AbseilConfigureCopts.cmake b/abseil-cpp/absl/copts/AbseilConfigureCopts.cmake index 73435e99..acd46d04 100644 --- a/abseil-cpp/absl/copts/AbseilConfigureCopts.cmake +++ b/abseil-cpp/absl/copts/AbseilConfigureCopts.cmake @@ -1,6 +1,8 @@ # See absl/copts/copts.py and absl/copts/generate_copts.py include(GENERATED_AbseilCopts) +set(ABSL_LSAN_LINKOPTS "") +set(ABSL_HAVE_LSAN OFF) set(ABSL_DEFAULT_LINKOPTS "") if (BUILD_SHARED_LIBS AND MSVC) @@ -10,58 +12,16 @@ else() set(ABSL_BUILD_DLL FALSE) endif() -if(APPLE AND CMAKE_CXX_COMPILER_ID MATCHES [[Clang]]) - # Some CMake targets (not known at the moment of processing) could be set to - # compile for multiple architectures as specified by the OSX_ARCHITECTURES - # property, which is target-specific. We should neither inspect nor rely on - # any CMake property or variable to detect an architecture, in particular: - # - # - CMAKE_OSX_ARCHITECTURES - # is just an initial value for OSX_ARCHITECTURES; set too early. - # - # - OSX_ARCHITECTURES - # is a per-target property; targets could be defined later, and their - # properties could be modified any time later. - # - # - CMAKE_SYSTEM_PROCESSOR - # does not reflect multiple architectures at all. - # - # When compiling for multiple architectures, a build system can invoke a - # compiler either - # - # - once: a single command line for multiple architectures (Ninja build) - # - twice: two command lines per each architecture (Xcode build system) - # - # If case of Xcode, it would be possible to set an Xcode-specific attributes - # like XCODE_ATTRIBUTE_OTHER_CPLUSPLUSFLAGS[arch=arm64] or similar. - # - # In both cases, the viable strategy is to pass all arguments at once, allowing - # the compiler to dispatch arch-specific arguments to a designated backend. - set(ABSL_RANDOM_RANDEN_COPTS "") - foreach(_arch IN ITEMS "x86_64" "arm64") - string(TOUPPER "${_arch}" _arch_uppercase) - string(REPLACE "X86_64" "X64" _arch_uppercase ${_arch_uppercase}) - foreach(_flag IN LISTS ABSL_RANDOM_HWAES_${_arch_uppercase}_FLAGS) - list(APPEND ABSL_RANDOM_RANDEN_COPTS "-Xarch_${_arch}" "${_flag}") - endforeach() - endforeach() - # If a compiler happens to deal with an argument for a currently unused - # architecture, it will warn about an unused command line argument. - option(ABSL_RANDOM_RANDEN_COPTS_WARNING OFF - "Warn if one of ABSL_RANDOM_RANDEN_COPTS is unused") - if(ABSL_RANDOM_RANDEN_COPTS AND NOT ABSL_RANDOM_RANDEN_COPTS_WARNING) - list(APPEND ABSL_RANDOM_RANDEN_COPTS "-Wno-unused-command-line-argument") - endif() -elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64|amd64|AMD64") +if("${CMAKE_SYSTEM_PROCESSOR}" MATCHES "x86_64|amd64|AMD64") if (MSVC) set(ABSL_RANDOM_RANDEN_COPTS "${ABSL_RANDOM_HWAES_MSVC_X64_FLAGS}") else() set(ABSL_RANDOM_RANDEN_COPTS "${ABSL_RANDOM_HWAES_X64_FLAGS}") endif() -elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "arm.*|aarch64") - if (CMAKE_SIZEOF_VOID_P STREQUAL "8") +elseif("${CMAKE_SYSTEM_PROCESSOR}" MATCHES "arm.*|aarch64") + if ("${CMAKE_SIZEOF_VOID_P}" STREQUAL "8") set(ABSL_RANDOM_RANDEN_COPTS "${ABSL_RANDOM_HWAES_ARM64_FLAGS}") - elseif(CMAKE_SIZEOF_VOID_P STREQUAL "4") + elseif("${CMAKE_SIZEOF_VOID_P}" STREQUAL "4") set(ABSL_RANDOM_RANDEN_COPTS "${ABSL_RANDOM_HWAES_ARM32_FLAGS}") else() message(WARNING "Value of CMAKE_SIZEOF_VOID_P (${CMAKE_SIZEOF_VOID_P}) is not supported.") @@ -72,19 +32,29 @@ else() endif() -if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") +if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") set(ABSL_DEFAULT_COPTS "${ABSL_GCC_FLAGS}") set(ABSL_TEST_COPTS "${ABSL_GCC_FLAGS};${ABSL_GCC_TEST_FLAGS}") -elseif(CMAKE_CXX_COMPILER_ID MATCHES "Clang") # MATCHES so we get both Clang and AppleClang +elseif("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") + # MATCHES so we get both Clang and AppleClang if(MSVC) # clang-cl is half MSVC, half LLVM set(ABSL_DEFAULT_COPTS "${ABSL_CLANG_CL_FLAGS}") set(ABSL_TEST_COPTS "${ABSL_CLANG_CL_FLAGS};${ABSL_CLANG_CL_TEST_FLAGS}") + set(ABSL_DEFAULT_LINKOPTS "${ABSL_MSVC_LINKOPTS}") else() set(ABSL_DEFAULT_COPTS "${ABSL_LLVM_FLAGS}") set(ABSL_TEST_COPTS "${ABSL_LLVM_FLAGS};${ABSL_LLVM_TEST_FLAGS}") + if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang") + # AppleClang doesn't have lsan + # https://developer.apple.com/documentation/code_diagnostics + if(NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 3.5) + set(ABSL_LSAN_LINKOPTS "-fsanitize=leak") + set(ABSL_HAVE_LSAN ON) + endif() + endif() endif() -elseif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") +elseif("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") set(ABSL_DEFAULT_COPTS "${ABSL_MSVC_FLAGS}") set(ABSL_TEST_COPTS "${ABSL_MSVC_FLAGS};${ABSL_MSVC_TEST_FLAGS}") set(ABSL_DEFAULT_LINKOPTS "${ABSL_MSVC_LINKOPTS}") diff --git a/abseil-cpp/absl/copts/GENERATED_AbseilCopts.cmake b/abseil-cpp/absl/copts/GENERATED_AbseilCopts.cmake index a4ab1aa2..97bd283e 100644 --- a/abseil-cpp/absl/copts/GENERATED_AbseilCopts.cmake +++ b/abseil-cpp/absl/copts/GENERATED_AbseilCopts.cmake @@ -5,6 +5,47 @@ list(APPEND ABSL_CLANG_CL_FLAGS "/W3" + "-Wno-c++98-compat-pedantic" + "-Wno-conversion" + "-Wno-covered-switch-default" + "-Wno-deprecated" + "-Wno-disabled-macro-expansion" + "-Wno-double-promotion" + "-Wno-comma" + "-Wno-extra-semi" + "-Wno-extra-semi-stmt" + "-Wno-packed" + "-Wno-padded" + "-Wno-sign-compare" + "-Wno-float-conversion" + "-Wno-float-equal" + "-Wno-format-nonliteral" + "-Wno-gcc-compat" + "-Wno-global-constructors" + "-Wno-exit-time-destructors" + "-Wno-non-modular-include-in-module" + "-Wno-old-style-cast" + "-Wno-range-loop-analysis" + "-Wno-reserved-id-macro" + "-Wno-shorten-64-to-32" + "-Wno-switch-enum" + "-Wno-thread-safety-negative" + "-Wno-unknown-warning-option" + "-Wno-unreachable-code" + "-Wno-unused-macros" + "-Wno-weak-vtables" + "-Wno-zero-as-null-pointer-constant" + "-Wbitfield-enum-conversion" + "-Wbool-conversion" + "-Wconstant-conversion" + "-Wenum-conversion" + "-Wint-conversion" + "-Wliteral-conversion" + "-Wnon-literal-null-conversion" + "-Wnull-conversion" + "-Wobjc-literal-conversion" + "-Wno-sign-conversion" + "-Wstring-conversion" "/DNOMINMAX" "/DWIN32_LEAN_AND_MEAN" "/D_CRT_SECURE_NO_WARNINGS" @@ -37,7 +78,6 @@ list(APPEND ABSL_GCC_FLAGS "-Wextra" "-Wcast-qual" "-Wconversion-null" - "-Wformat-security" "-Wmissing-declarations" "-Woverlength-strings" "-Wpointer-arith" @@ -47,6 +87,8 @@ list(APPEND ABSL_GCC_FLAGS "-Wvarargs" "-Wvla" "-Wwrite-strings" + "-Wno-missing-field-initializers" + "-Wno-sign-compare" "-DNOMINMAX" ) @@ -63,38 +105,48 @@ list(APPEND ABSL_GCC_TEST_FLAGS list(APPEND ABSL_LLVM_FLAGS "-Wall" "-Wextra" - "-Wcast-qual" - "-Wconversion" - "-Wfloat-overflow-conversion" - "-Wfloat-zero-conversion" - "-Wfor-loop-analysis" - "-Wformat-security" - "-Wgnu-redeclared-enum" - "-Winfinite-recursion" - "-Winvalid-constexpr" - "-Wliteral-conversion" - "-Wmissing-declarations" - "-Woverlength-strings" - "-Wpointer-arith" - "-Wself-assign" - "-Wshadow-all" - "-Wstring-conversion" - "-Wtautological-overlap-compare" - "-Wundef" - "-Wuninitialized" - "-Wunreachable-code" - "-Wunused-comparison" - "-Wunused-local-typedefs" - "-Wunused-result" - "-Wvla" - "-Wwrite-strings" + "-Weverything" + "-Wno-c++98-compat-pedantic" + "-Wno-conversion" + "-Wno-covered-switch-default" + "-Wno-deprecated" + "-Wno-disabled-macro-expansion" + "-Wno-double-promotion" + "-Wno-comma" + "-Wno-extra-semi" + "-Wno-extra-semi-stmt" + "-Wno-packed" + "-Wno-padded" + "-Wno-sign-compare" "-Wno-float-conversion" - "-Wno-implicit-float-conversion" - "-Wno-implicit-int-float-conversion" - "-Wno-implicit-int-conversion" + "-Wno-float-equal" + "-Wno-format-nonliteral" + "-Wno-gcc-compat" + "-Wno-global-constructors" + "-Wno-exit-time-destructors" + "-Wno-non-modular-include-in-module" + "-Wno-old-style-cast" + "-Wno-range-loop-analysis" + "-Wno-reserved-id-macro" "-Wno-shorten-64-to-32" - "-Wno-sign-conversion" + "-Wno-switch-enum" + "-Wno-thread-safety-negative" "-Wno-unknown-warning-option" + "-Wno-unreachable-code" + "-Wno-unused-macros" + "-Wno-weak-vtables" + "-Wno-zero-as-null-pointer-constant" + "-Wbitfield-enum-conversion" + "-Wbool-conversion" + "-Wconstant-conversion" + "-Wenum-conversion" + "-Wint-conversion" + "-Wliteral-conversion" + "-Wnon-literal-null-conversion" + "-Wnull-conversion" + "-Wobjc-literal-conversion" + "-Wno-sign-conversion" + "-Wstring-conversion" "-DNOMINMAX" ) diff --git a/abseil-cpp/absl/copts/GENERATED_copts.bzl b/abseil-cpp/absl/copts/GENERATED_copts.bzl index a6efc98e..bcdd61ef 100644 --- a/abseil-cpp/absl/copts/GENERATED_copts.bzl +++ b/abseil-cpp/absl/copts/GENERATED_copts.bzl @@ -6,6 +6,47 @@ ABSL_CLANG_CL_FLAGS = [ "/W3", + "-Wno-c++98-compat-pedantic", + "-Wno-conversion", + "-Wno-covered-switch-default", + "-Wno-deprecated", + "-Wno-disabled-macro-expansion", + "-Wno-double-promotion", + "-Wno-comma", + "-Wno-extra-semi", + "-Wno-extra-semi-stmt", + "-Wno-packed", + "-Wno-padded", + "-Wno-sign-compare", + "-Wno-float-conversion", + "-Wno-float-equal", + "-Wno-format-nonliteral", + "-Wno-gcc-compat", + "-Wno-global-constructors", + "-Wno-exit-time-destructors", + "-Wno-non-modular-include-in-module", + "-Wno-old-style-cast", + "-Wno-range-loop-analysis", + "-Wno-reserved-id-macro", + "-Wno-shorten-64-to-32", + "-Wno-switch-enum", + "-Wno-thread-safety-negative", + "-Wno-unknown-warning-option", + "-Wno-unreachable-code", + "-Wno-unused-macros", + "-Wno-weak-vtables", + "-Wno-zero-as-null-pointer-constant", + "-Wbitfield-enum-conversion", + "-Wbool-conversion", + "-Wconstant-conversion", + "-Wenum-conversion", + "-Wint-conversion", + "-Wliteral-conversion", + "-Wnon-literal-null-conversion", + "-Wnull-conversion", + "-Wobjc-literal-conversion", + "-Wno-sign-conversion", + "-Wstring-conversion", "/DNOMINMAX", "/DWIN32_LEAN_AND_MEAN", "/D_CRT_SECURE_NO_WARNINGS", @@ -38,7 +79,6 @@ ABSL_GCC_FLAGS = [ "-Wextra", "-Wcast-qual", "-Wconversion-null", - "-Wformat-security", "-Wmissing-declarations", "-Woverlength-strings", "-Wpointer-arith", @@ -48,6 +88,8 @@ ABSL_GCC_FLAGS = [ "-Wvarargs", "-Wvla", "-Wwrite-strings", + "-Wno-missing-field-initializers", + "-Wno-sign-compare", "-DNOMINMAX", ] @@ -64,38 +106,48 @@ ABSL_GCC_TEST_FLAGS = [ ABSL_LLVM_FLAGS = [ "-Wall", "-Wextra", - "-Wcast-qual", - "-Wconversion", - "-Wfloat-overflow-conversion", - "-Wfloat-zero-conversion", - "-Wfor-loop-analysis", - "-Wformat-security", - "-Wgnu-redeclared-enum", - "-Winfinite-recursion", - "-Winvalid-constexpr", - "-Wliteral-conversion", - "-Wmissing-declarations", - "-Woverlength-strings", - "-Wpointer-arith", - "-Wself-assign", - "-Wshadow-all", - "-Wstring-conversion", - "-Wtautological-overlap-compare", - "-Wundef", - "-Wuninitialized", - "-Wunreachable-code", - "-Wunused-comparison", - "-Wunused-local-typedefs", - "-Wunused-result", - "-Wvla", - "-Wwrite-strings", + "-Weverything", + "-Wno-c++98-compat-pedantic", + "-Wno-conversion", + "-Wno-covered-switch-default", + "-Wno-deprecated", + "-Wno-disabled-macro-expansion", + "-Wno-double-promotion", + "-Wno-comma", + "-Wno-extra-semi", + "-Wno-extra-semi-stmt", + "-Wno-packed", + "-Wno-padded", + "-Wno-sign-compare", "-Wno-float-conversion", - "-Wno-implicit-float-conversion", - "-Wno-implicit-int-float-conversion", - "-Wno-implicit-int-conversion", + "-Wno-float-equal", + "-Wno-format-nonliteral", + "-Wno-gcc-compat", + "-Wno-global-constructors", + "-Wno-exit-time-destructors", + "-Wno-non-modular-include-in-module", + "-Wno-old-style-cast", + "-Wno-range-loop-analysis", + "-Wno-reserved-id-macro", "-Wno-shorten-64-to-32", - "-Wno-sign-conversion", + "-Wno-switch-enum", + "-Wno-thread-safety-negative", "-Wno-unknown-warning-option", + "-Wno-unreachable-code", + "-Wno-unused-macros", + "-Wno-weak-vtables", + "-Wno-zero-as-null-pointer-constant", + "-Wbitfield-enum-conversion", + "-Wbool-conversion", + "-Wconstant-conversion", + "-Wenum-conversion", + "-Wint-conversion", + "-Wliteral-conversion", + "-Wnon-literal-null-conversion", + "-Wnull-conversion", + "-Wobjc-literal-conversion", + "-Wno-sign-conversion", + "-Wstring-conversion", "-DNOMINMAX", ] diff --git a/abseil-cpp/absl/copts/configure_copts.bzl b/abseil-cpp/absl/copts/configure_copts.bzl index 40d5849a..ff9a5ea9 100644 --- a/abseil-cpp/absl/copts/configure_copts.bzl +++ b/abseil-cpp/absl/copts/configure_copts.bzl @@ -22,21 +22,21 @@ load( ) ABSL_DEFAULT_COPTS = select({ - "//absl:msvc_compiler": ABSL_MSVC_FLAGS, - "//absl:clang-cl_compiler": ABSL_CLANG_CL_FLAGS, - "//absl:clang_compiler": ABSL_LLVM_FLAGS, + "//absl:windows": ABSL_MSVC_FLAGS, + "//absl:llvm_compiler": ABSL_LLVM_FLAGS, "//conditions:default": ABSL_GCC_FLAGS, }) +# in absence of modules (--compiler=gcc or -c opt), cc_tests leak their copts +# to their (included header) dependencies and fail to build outside absl ABSL_TEST_COPTS = ABSL_DEFAULT_COPTS + select({ - "//absl:msvc_compiler": ABSL_MSVC_TEST_FLAGS, - "//absl:clang-cl_compiler": ABSL_CLANG_CL_TEST_FLAGS, - "//absl:clang_compiler": ABSL_LLVM_TEST_FLAGS, + "//absl:windows": ABSL_MSVC_TEST_FLAGS, + "//absl:llvm_compiler": ABSL_LLVM_TEST_FLAGS, "//conditions:default": ABSL_GCC_TEST_FLAGS, }) ABSL_DEFAULT_LINKOPTS = select({ - "//absl:msvc_compiler": ABSL_MSVC_LINKOPTS, + "//absl:windows": ABSL_MSVC_LINKOPTS, "//conditions:default": [], }) @@ -50,7 +50,6 @@ ABSL_RANDOM_RANDEN_COPTS = select({ ":cpu_x64_windows": ABSL_RANDOM_HWAES_MSVC_X64_FLAGS, ":cpu_k8": ABSL_RANDOM_HWAES_X64_FLAGS, ":cpu_ppc": ["-mcrypto"], - ":cpu_aarch64": ABSL_RANDOM_HWAES_ARM64_FLAGS, # Supported by default or unsupported. "//conditions:default": [], @@ -71,7 +70,6 @@ def absl_random_randen_copts_init(): "darwin", "x64_windows_msvc", "x64_windows", - "aarch64", ] for cpu in cpu_configs: native.config_setting( diff --git a/abseil-cpp/absl/copts/copts.py b/abseil-cpp/absl/copts/copts.py index 0d6c1ec3..a3437c1b 100644 --- a/abseil-cpp/absl/copts/copts.py +++ b/abseil-cpp/absl/copts/copts.py @@ -16,6 +16,77 @@ MSVC_BIG_WARNING_FLAGS = [ "/W3", ] +LLVM_BIG_WARNING_FLAGS = [ + "-Wall", + "-Wextra", + "-Weverything", +] + +# Docs on single flags is preceded by a comment. +# Docs on groups of flags is preceded by ###. +LLVM_DISABLE_WARNINGS_FLAGS = [ + # Abseil does not support C++98 + "-Wno-c++98-compat-pedantic", + # Turns off all implicit conversion warnings. Most are re-enabled below. + "-Wno-conversion", + "-Wno-covered-switch-default", + "-Wno-deprecated", + "-Wno-disabled-macro-expansion", + "-Wno-double-promotion", + ### + # Turned off as they include valid C++ code. + "-Wno-comma", + "-Wno-extra-semi", + "-Wno-extra-semi-stmt", + "-Wno-packed", + "-Wno-padded", + ### + # Google style does not use unsigned integers, though STL containers + # have unsigned types. + "-Wno-sign-compare", + ### + "-Wno-float-conversion", + "-Wno-float-equal", + "-Wno-format-nonliteral", + # Too aggressive: warns on Clang extensions enclosed in Clang-only + # compilation paths. + "-Wno-gcc-compat", + ### + # Some internal globals are necessary. Don't do this at home. + "-Wno-global-constructors", + "-Wno-exit-time-destructors", + ### + "-Wno-non-modular-include-in-module", + "-Wno-old-style-cast", + # Warns on preferred usage of non-POD types such as string_view + "-Wno-range-loop-analysis", + "-Wno-reserved-id-macro", + "-Wno-shorten-64-to-32", + "-Wno-switch-enum", + "-Wno-thread-safety-negative", + "-Wno-unknown-warning-option", + "-Wno-unreachable-code", + # Causes warnings on include guards + "-Wno-unused-macros", + "-Wno-weak-vtables", + # Causes warnings on usage of types/compare.h comparison operators. + "-Wno-zero-as-null-pointer-constant", + ### + # Implicit conversion warnings turned off by -Wno-conversion + # which are re-enabled below. + "-Wbitfield-enum-conversion", + "-Wbool-conversion", + "-Wconstant-conversion", + "-Wenum-conversion", + "-Wint-conversion", + "-Wliteral-conversion", + "-Wnon-literal-null-conversion", + "-Wnull-conversion", + "-Wobjc-literal-conversion", + "-Wno-sign-conversion", + "-Wstring-conversion", +] + LLVM_TEST_DISABLE_WARNINGS_FLAGS = [ "-Wno-c99-extensions", "-Wno-deprecated-declarations", @@ -54,7 +125,6 @@ COPT_VARS = { "-Wextra", "-Wcast-qual", "-Wconversion-null", - "-Wformat-security", "-Wmissing-declarations", "-Woverlength-strings", "-Wpointer-arith", @@ -64,6 +134,13 @@ COPT_VARS = { "-Wvarargs", "-Wvla", # variable-length array "-Wwrite-strings", + # gcc-4.x has spurious missing field initializer warnings. + # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=36750 + # Remove when gcc-4.x is no longer supported. + "-Wno-missing-field-initializers", + # Google style does not use unsigned integers, though STL containers + # have unsigned types. + "-Wno-sign-compare", # Don't define min and max macros (Build on Windows using gcc) "-DNOMINMAX", ], @@ -76,52 +153,15 @@ COPT_VARS = { "-Wno-unused-parameter", "-Wno-unused-private-field", ], - "ABSL_LLVM_FLAGS": [ - "-Wall", - "-Wextra", - "-Wcast-qual", - "-Wconversion", - "-Wfloat-overflow-conversion", - "-Wfloat-zero-conversion", - "-Wfor-loop-analysis", - "-Wformat-security", - "-Wgnu-redeclared-enum", - "-Winfinite-recursion", - "-Winvalid-constexpr", - "-Wliteral-conversion", - "-Wmissing-declarations", - "-Woverlength-strings", - "-Wpointer-arith", - "-Wself-assign", - "-Wshadow-all", - "-Wstring-conversion", - "-Wtautological-overlap-compare", - "-Wundef", - "-Wuninitialized", - "-Wunreachable-code", - "-Wunused-comparison", - "-Wunused-local-typedefs", - "-Wunused-result", - "-Wvla", - "-Wwrite-strings", - # Warnings that are enabled by group warning flags like -Wall that we - # explicitly disable. - "-Wno-float-conversion", - "-Wno-implicit-float-conversion", - "-Wno-implicit-int-float-conversion", - "-Wno-implicit-int-conversion", - "-Wno-shorten-64-to-32", - "-Wno-sign-conversion", - # Disable warnings on unknown warning flags (when warning flags are - # unknown on older compiler versions) - "-Wno-unknown-warning-option", - # Don't define min and max macros (Build on Windows using clang) - "-DNOMINMAX", - ], + "ABSL_LLVM_FLAGS": + LLVM_BIG_WARNING_FLAGS + LLVM_DISABLE_WARNINGS_FLAGS + [ + # Don't define min and max macros (Build on Windows using clang) + "-DNOMINMAX", + ], "ABSL_LLVM_TEST_FLAGS": LLVM_TEST_DISABLE_WARNINGS_FLAGS, "ABSL_CLANG_CL_FLAGS": - (MSVC_BIG_WARNING_FLAGS + MSVC_DEFINES), + (MSVC_BIG_WARNING_FLAGS + LLVM_DISABLE_WARNINGS_FLAGS + MSVC_DEFINES), "ABSL_CLANG_CL_TEST_FLAGS": LLVM_TEST_DISABLE_WARNINGS_FLAGS, "ABSL_MSVC_FLAGS": diff --git a/abseil-cpp/absl/copts/generate_copts.py b/abseil-cpp/absl/copts/generate_copts.py index 34be2fc2..0e5dc9fa 100755 --- a/abseil-cpp/absl/copts/generate_copts.py +++ b/abseil-cpp/absl/copts/generate_copts.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3 +#!/usr/bin/python """Generate Abseil compile compile option configs. Usage: /copts/generate_copts.py diff --git a/abseil-cpp/absl/debugging/BUILD.bazel b/abseil-cpp/absl/debugging/BUILD.bazel index 932a8e9f..86faac9b 100644 --- a/abseil-cpp/absl/debugging/BUILD.bazel +++ b/abseil-cpp/absl/debugging/BUILD.bazel @@ -14,6 +14,7 @@ # limitations under the License. # +load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test") load( "//absl:copts/configure_copts.bzl", "ABSL_DEFAULT_COPTS", @@ -33,10 +34,8 @@ cc_library( "internal/stacktrace_aarch64-inl.inc", "internal/stacktrace_arm-inl.inc", "internal/stacktrace_config.h", - "internal/stacktrace_emscripten-inl.inc", "internal/stacktrace_generic-inl.inc", "internal/stacktrace_powerpc-inl.inc", - "internal/stacktrace_riscv-inl.inc", "internal/stacktrace_unimplemented-inl.inc", "internal/stacktrace_win32-inl.inc", "internal/stacktrace_x86-inl.inc", @@ -58,7 +57,6 @@ cc_library( "symbolize.cc", "symbolize_darwin.inc", "symbolize_elf.inc", - "symbolize_emscripten.inc", "symbolize_unimplemented.inc", "symbolize_win32.inc", ], @@ -68,8 +66,7 @@ cc_library( ], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS + select({ - "//absl:msvc_compiler": ["-DEFAULTLIB:dbghelp.lib"], - "//absl:clang-cl_compiler": ["-DEFAULTLIB:dbghelp.lib"], + "//absl:windows": ["-DEFAULTLIB:dbghelp.lib"], "//conditions:default": [], }), deps = [ @@ -89,13 +86,11 @@ cc_test( name = "symbolize_test", srcs = ["symbolize_test.cc"], copts = ABSL_TEST_COPTS + select({ - "//absl:msvc_compiler": ["/Z7"], - "//absl:clang-cl_compiler": ["/Z7"], + "//absl:windows": ["/Z7"], "//conditions:default": [], }), linkopts = ABSL_DEFAULT_LINKOPTS + select({ - "//absl:msvc_compiler": ["/DEBUG"], - "//absl:clang-cl_compiler": ["/DEBUG"], + "//absl:windows": ["/DEBUG"], "//conditions:default": [], }), deps = [ @@ -143,6 +138,7 @@ cc_library( "//absl/base", "//absl/base:config", "//absl/base:core_headers", + "//absl/base:errno_saver", "//absl/base:raw_logging_internal", ], ) @@ -152,8 +148,7 @@ cc_test( srcs = ["failure_signal_handler_test.cc"], copts = ABSL_TEST_COPTS, linkopts = select({ - "//absl:msvc_compiler": [], - "//absl:clang-cl_compiler": [], + "//absl:windows": [], "//absl:wasm": [], "//conditions:default": ["-pthread"], }) + ABSL_DEFAULT_LINKOPTS, @@ -181,7 +176,6 @@ cc_library( ], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, - visibility = ["//visibility:private"], deps = [ "//absl/base:config", "//absl/base:core_headers", @@ -196,8 +190,6 @@ cc_library( srcs = ["internal/demangle.cc"], hdrs = ["internal/demangle.h"], copts = ABSL_DEFAULT_COPTS, - linkopts = ABSL_DEFAULT_LINKOPTS, - visibility = ["//visibility:private"], deps = [ "//absl/base", "//absl/base:config", @@ -225,7 +217,6 @@ cc_library( name = "leak_check", srcs = ["leak_check.cc"], hdrs = ["leak_check.h"], - copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ "//absl/base:config", @@ -233,33 +224,96 @@ cc_library( ], ) +# Adding a dependency to leak_check_disable will disable +# sanitizer leak checking (asan/lsan) in a test without +# the need to mess around with build features. +cc_library( + name = "leak_check_disable", + srcs = ["leak_check_disable.cc"], + linkopts = ABSL_DEFAULT_LINKOPTS, + linkstatic = 1, + deps = ["//absl/base:config"], + alwayslink = 1, +) + +# These targets exists for use in tests only, explicitly configuring the +# LEAK_SANITIZER macro. It must be linked with -fsanitize=leak for lsan. +ABSL_LSAN_LINKOPTS = select({ + "//absl:llvm_compiler": ["-fsanitize=leak"], + "//conditions:default": [], +}) + +cc_library( + name = "leak_check_api_enabled_for_testing", + testonly = 1, + srcs = ["leak_check.cc"], + hdrs = ["leak_check.h"], + copts = select({ + "//absl:llvm_compiler": ["-DLEAK_SANITIZER"], + "//conditions:default": [], + }), + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = ["//visibility:private"], + deps = [ + "//absl/base:config", + ], +) + +cc_library( + name = "leak_check_api_disabled_for_testing", + testonly = 1, + srcs = ["leak_check.cc"], + hdrs = ["leak_check.h"], + copts = ["-ULEAK_SANITIZER"], + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = ["//visibility:private"], + deps = [ + "//absl/base:config", + ], +) + cc_test( name = "leak_check_test", srcs = ["leak_check_test.cc"], - copts = ABSL_TEST_COPTS, - linkopts = ABSL_DEFAULT_LINKOPTS, + copts = select({ + "//absl:llvm_compiler": ["-DABSL_EXPECT_LEAK_SANITIZER"], + "//conditions:default": [], + }), + linkopts = ABSL_LSAN_LINKOPTS + ABSL_DEFAULT_LINKOPTS, tags = ["notsan"], deps = [ - ":leak_check", - "//absl/base:config", - "//absl/base:raw_logging_internal", + ":leak_check_api_enabled_for_testing", + "//absl/base", "@com_google_googletest//:gtest_main", ], ) -# Binary that leaks memory and expects to fail on exit. This isn't a -# test that expected to pass on its own; it exists to be called by a -# script that checks exit status and output. -# TODO(absl-team): Write a test to run this with a script that -# verifies that it correctly fails. -cc_binary( - name = "leak_check_fail_test_binary", - srcs = ["leak_check_fail_test.cc"], - copts = ABSL_TEST_COPTS, +cc_test( + name = "leak_check_no_lsan_test", + srcs = ["leak_check_test.cc"], + copts = ["-UABSL_EXPECT_LEAK_SANITIZER"], linkopts = ABSL_DEFAULT_LINKOPTS, + tags = ["noasan"], deps = [ - ":leak_check", - "//absl/base:raw_logging_internal", + ":leak_check_api_disabled_for_testing", + "//absl/base", # for raw_logging + "@com_google_googletest//:gtest_main", + ], +) + +# Test that leak checking is skipped when lsan is enabled but +# ":leak_check_disable" is linked in. +# +# This test should fail in the absence of a dependency on ":leak_check_disable" +cc_test( + name = "disabled_leak_check_test", + srcs = ["leak_check_fail_test.cc"], + linkopts = ABSL_LSAN_LINKOPTS + ABSL_DEFAULT_LINKOPTS, + tags = ["notsan"], + deps = [ + ":leak_check_api_enabled_for_testing", + ":leak_check_disable", + "//absl/base", "@com_google_googletest//:gtest_main", ], ) @@ -284,7 +338,6 @@ cc_test( srcs = ["internal/stack_consumption_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, - tags = ["notsan"], deps = [ ":stack_consumption", "//absl/base:core_headers", @@ -292,18 +345,3 @@ cc_test( "@com_google_googletest//:gtest_main", ], ) - -cc_binary( - name = "stacktrace_benchmark", - testonly = 1, - srcs = ["stacktrace_benchmark.cc"], - copts = ABSL_TEST_COPTS, - linkopts = ABSL_DEFAULT_LINKOPTS, - tags = ["benchmark"], - deps = [ - ":stacktrace", - "//absl/base:config", - "//absl/base:core_headers", - "@com_github_google_benchmark//:benchmark_main", - ], -) diff --git a/abseil-cpp/absl/debugging/CMakeLists.txt b/abseil-cpp/absl/debugging/CMakeLists.txt index d8207d6a..074b44cf 100644 --- a/abseil-cpp/absl/debugging/CMakeLists.txt +++ b/abseil-cpp/absl/debugging/CMakeLists.txt @@ -14,8 +14,6 @@ # limitations under the License. # -find_library(EXECINFO_LIBRARY execinfo) - absl_cc_library( NAME stacktrace @@ -24,10 +22,8 @@ absl_cc_library( "internal/stacktrace_aarch64-inl.inc" "internal/stacktrace_arm-inl.inc" "internal/stacktrace_config.h" - "internal/stacktrace_emscripten-inl.inc" "internal/stacktrace_generic-inl.inc" "internal/stacktrace_powerpc-inl.inc" - "internal/stacktrace_riscv-inl.inc" "internal/stacktrace_unimplemented-inl.inc" "internal/stacktrace_win32-inl.inc" "internal/stacktrace_x86-inl.inc" @@ -35,8 +31,6 @@ absl_cc_library( "stacktrace.cc" COPTS ${ABSL_DEFAULT_COPTS} - LINKOPTS - $<$:${EXECINFO_LIBRARY}> DEPS absl::debugging_internal absl::config @@ -54,7 +48,6 @@ absl_cc_library( "symbolize.cc" "symbolize_darwin.inc" "symbolize_elf.inc" - "symbolize_emscripten.inc" "symbolize_unimplemented.inc" "symbolize_win32.inc" COPTS @@ -94,10 +87,9 @@ absl_cc_test( absl::memory absl::raw_logging_internal absl::strings - GTest::gmock + gmock ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME examine_stack @@ -130,6 +122,7 @@ absl_cc_library( absl::base absl::config absl::core_headers + absl::errno_saver absl::raw_logging_internal PUBLIC ) @@ -148,10 +141,9 @@ absl_cc_test( absl::strings absl::raw_logging_internal Threads::Threads - GTest::gmock + gmock ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME debugging_internal @@ -173,7 +165,6 @@ absl_cc_library( absl::raw_logging_internal ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME demangle_internal @@ -203,7 +194,7 @@ absl_cc_test( absl::core_headers absl::memory absl::raw_logging_internal - GTest::gmock_main + gmock_main ) absl_cc_library( @@ -221,6 +212,42 @@ absl_cc_library( PUBLIC ) +absl_cc_library( + NAME + leak_check_disable + SRCS + "leak_check_disable.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + PUBLIC +) + +absl_cc_library( + NAME + leak_check_api_enabled_for_testing + HDRS + "leak_check.h" + SRCS + "leak_check.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + $<$:-DLEAK_SANITIZER> + TESTONLY +) + +absl_cc_library( + NAME + leak_check_api_disabled_for_testing + HDRS + "leak_check.h" + SRCS + "leak_check.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + "-ULEAK_SANITIZER" + TESTONLY +) + absl_cc_test( NAME leak_check_test @@ -228,15 +255,46 @@ absl_cc_test( "leak_check_test.cc" COPTS ${ABSL_TEST_COPTS} + "$<$:-DABSL_EXPECT_LEAK_SANITIZER>" LINKOPTS - ${ABSL_DEFAULT_LINKOPTS} + "${ABSL_LSAN_LINKOPTS}" DEPS - absl::leak_check + absl::leak_check_api_enabled_for_testing + absl::base + gmock_main +) + +absl_cc_test( + NAME + leak_check_no_lsan_test + SRCS + "leak_check_test.cc" + COPTS + ${ABSL_TEST_COPTS} + "-UABSL_EXPECT_LEAK_SANITIZER" + DEPS + absl::leak_check_api_disabled_for_testing + absl::base + gmock_main +) + +absl_cc_test( + NAME + disabled_leak_check_test + SRCS + "leak_check_fail_test.cc" + COPTS + ${ABSL_TEST_COPTS} + LINKOPTS + "${ABSL_LSAN_LINKOPTS}" + DEPS + absl::leak_check_api_enabled_for_testing + absl::leak_check_disable absl::base - GTest::gmock_main + absl::raw_logging_internal + gmock_main ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME stack_consumption @@ -264,7 +322,7 @@ absl_cc_test( absl::stack_consumption absl::core_headers absl::raw_logging_internal - GTest::gmock_main + gmock_main ) # component target diff --git a/abseil-cpp/absl/debugging/failure_signal_handler.cc b/abseil-cpp/absl/debugging/failure_signal_handler.cc index affade3b..5d13bdbb 100644 --- a/abseil-cpp/absl/debugging/failure_signal_handler.cc +++ b/abseil-cpp/absl/debugging/failure_signal_handler.cc @@ -21,7 +21,6 @@ #ifdef _WIN32 #include #else -#include #include #endif @@ -42,6 +41,7 @@ #include #include "absl/base/attributes.h" +#include "absl/base/internal/errno_saver.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/internal/sysinfo.h" #include "absl/debugging/internal/examine_stack.h" @@ -51,7 +51,7 @@ #define ABSL_HAVE_SIGACTION // Apple WatchOS and TVOS don't allow sigaltstack #if !(defined(TARGET_OS_WATCH) && TARGET_OS_WATCH) && \ - !(defined(TARGET_OS_TV) && TARGET_OS_TV) && !defined(__QNX__) + !(defined(TARGET_OS_TV) && TARGET_OS_TV) #define ABSL_HAVE_SIGALTSTACK #endif #endif @@ -135,8 +135,7 @@ static bool SetupAlternateStackOnce() { #else const size_t page_mask = sysconf(_SC_PAGESIZE) - 1; #endif - size_t stack_size = - (std::max(SIGSTKSZ, 65536) + page_mask) & ~page_mask; + size_t stack_size = (std::max(SIGSTKSZ, 65536) + page_mask) & ~page_mask; #if defined(ABSL_HAVE_ADDRESS_SANITIZER) || \ defined(ABSL_HAVE_MEMORY_SANITIZER) || defined(ABSL_HAVE_THREAD_SANITIZER) // Account for sanitizer instrumentation requiring additional stack space. @@ -216,27 +215,21 @@ static void InstallOneFailureHandler(FailureSignalData* data, #endif static void WriteToStderr(const char* data) { - absl::raw_logging_internal::AsyncSignalSafeWriteToStderr(data, strlen(data)); + absl::base_internal::ErrnoSaver errno_saver; + absl::raw_logging_internal::SafeWriteToStderr(data, strlen(data)); } -static void WriteSignalMessage(int signo, int cpu, - void (*writerfn)(const char*)) { - char buf[96]; - char on_cpu[32] = {0}; - if (cpu != -1) { - snprintf(on_cpu, sizeof(on_cpu), " on cpu %d", cpu); - } +static void WriteSignalMessage(int signo, void (*writerfn)(const char*)) { + char buf[64]; const char* const signal_string = debugging_internal::FailureSignalToString(signo); if (signal_string != nullptr && signal_string[0] != '\0') { - snprintf(buf, sizeof(buf), "*** %s received at time=%ld%s ***\n", + snprintf(buf, sizeof(buf), "*** %s received at time=%ld ***\n", signal_string, - static_cast(time(nullptr)), // NOLINT(runtime/int) - on_cpu); + static_cast(time(nullptr))); // NOLINT(runtime/int) } else { - snprintf(buf, sizeof(buf), "*** Signal %d received at time=%ld%s ***\n", - signo, static_cast(time(nullptr)), // NOLINT(runtime/int) - on_cpu); + snprintf(buf, sizeof(buf), "*** Signal %d received at time=%ld ***\n", + signo, static_cast(time(nullptr))); // NOLINT(runtime/int) } writerfn(buf); } @@ -276,10 +269,10 @@ ABSL_ATTRIBUTE_NOINLINE static void WriteStackTrace( // Called by AbslFailureSignalHandler() to write the failure info. It is // called once with writerfn set to WriteToStderr() and then possibly // with writerfn set to the user provided function. -static void WriteFailureInfo(int signo, void* ucontext, int cpu, +static void WriteFailureInfo(int signo, void* ucontext, void (*writerfn)(const char*)) { WriterFnStruct writerfn_struct{writerfn}; - WriteSignalMessage(signo, cpu, writerfn); + WriteSignalMessage(signo, writerfn); WriteStackTrace(ucontext, fsh_options.symbolize_stacktrace, WriterFnWrapper, &writerfn_struct); } @@ -341,14 +334,6 @@ static void AbslFailureSignalHandler(int signo, siginfo_t*, void* ucontext) { } } - // Increase the chance that the CPU we report was the same CPU on which the - // signal was received by doing this as early as possible, i.e. after - // verifying that this is not a recursive signal handler invocation. - int my_cpu = -1; -#ifdef ABSL_HAVE_SCHED_GETCPU - my_cpu = sched_getcpu(); -#endif - #ifdef ABSL_HAVE_ALARM // Set an alarm to abort the program in case this code hangs or deadlocks. if (fsh_options.alarm_on_failure_secs > 0) { @@ -359,13 +344,12 @@ static void AbslFailureSignalHandler(int signo, siginfo_t*, void* ucontext) { #endif // First write to stderr. - WriteFailureInfo(signo, ucontext, my_cpu, WriteToStderr); + WriteFailureInfo(signo, ucontext, WriteToStderr); // Riskier code (because it is less likely to be async-signal-safe) // goes after this point. if (fsh_options.writerfn != nullptr) { - WriteFailureInfo(signo, ucontext, my_cpu, fsh_options.writerfn); - fsh_options.writerfn(nullptr); + WriteFailureInfo(signo, ucontext, fsh_options.writerfn); } if (fsh_options.call_previous_handler) { diff --git a/abseil-cpp/absl/debugging/failure_signal_handler.h b/abseil-cpp/absl/debugging/failure_signal_handler.h index 500115c0..0c0f585d 100644 --- a/abseil-cpp/absl/debugging/failure_signal_handler.h +++ b/abseil-cpp/absl/debugging/failure_signal_handler.h @@ -90,7 +90,7 @@ struct FailureSignalHandlerOptions { // If non-null, indicates a pointer to a callback function that will be called // upon failure, with a string argument containing failure data. This function // may be used as a hook to write failure data to a secondary location, such - // as a log file. This function will also be called with null data, as a hint + // as a log file. This function may also be called with null data, as a hint // to flush any buffered data before the program may be terminated. Consider // flushing any buffered data in all calls to this function. // diff --git a/abseil-cpp/absl/debugging/failure_signal_handler_test.cc b/abseil-cpp/absl/debugging/failure_signal_handler_test.cc index 6a62428b..d8283b2f 100644 --- a/abseil-cpp/absl/debugging/failure_signal_handler_test.cc +++ b/abseil-cpp/absl/debugging/failure_signal_handler_test.cc @@ -122,12 +122,6 @@ TEST_P(FailureSignalHandlerDeathTest, AbslFatalSignalsWithWriterFn) { "*** ", absl::debugging_internal::FailureSignalToString(signo), " received at "))); - // On platforms where it is possible to get the current CPU, the - // CPU number is also logged. Check that it is present in output. -#if defined(__linux__) - EXPECT_THAT(error_line, testing::HasSubstr(" on cpu ")); -#endif - if (absl::debugging_internal::StackTraceWorksForTest()) { std::getline(error_output, error_line); EXPECT_THAT(error_line, StartsWith("PC: ")); diff --git a/abseil-cpp/absl/debugging/internal/address_is_readable.cc b/abseil-cpp/absl/debugging/internal/address_is_readable.cc index 4be6256b..329c285f 100644 --- a/abseil-cpp/absl/debugging/internal/address_is_readable.cc +++ b/abseil-cpp/absl/debugging/internal/address_is_readable.cc @@ -30,12 +30,16 @@ bool AddressIsReadable(const void* /* addr */) { return true; } ABSL_NAMESPACE_END } // namespace absl -#else // __linux__ && !__ANDROID__ +#else -#include -#include +#include +#include #include +#include +#include +#include + #include "absl/base/internal/errno_saver.h" #include "absl/base/internal/raw_logging.h" @@ -43,54 +47,93 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace debugging_internal { -// NOTE: be extra careful about adding any interposable function calls here -// (such as open(), read(), etc.). These symbols may be interposed and will get -// invoked in contexts they don't expect. -// -// NOTE: any new system calls here may also require sandbox reconfiguration. -// -bool AddressIsReadable(const void *addr) { - // Align address on 8-byte boundary. On aarch64, checking last - // byte before inaccessible page returned unexpected EFAULT. - const uintptr_t u_addr = reinterpret_cast(addr) & ~7; - addr = reinterpret_cast(u_addr); +// Pack a pid and two file descriptors into a 64-bit word, +// using 16, 24, and 24 bits for each respectively. +static uint64_t Pack(uint64_t pid, uint64_t read_fd, uint64_t write_fd) { + ABSL_RAW_CHECK((read_fd >> 24) == 0 && (write_fd >> 24) == 0, + "fd out of range"); + return (pid << 48) | ((read_fd & 0xffffff) << 24) | (write_fd & 0xffffff); +} - // rt_sigprocmask below will succeed for this input. - if (addr == nullptr) return false; +// Unpack x into a pid and two file descriptors, where x was created with +// Pack(). +static void Unpack(uint64_t x, int *pid, int *read_fd, int *write_fd) { + *pid = x >> 48; + *read_fd = (x >> 24) & 0xffffff; + *write_fd = x & 0xffffff; +} - absl::base_internal::ErrnoSaver errno_saver; +// Return whether the byte at *addr is readable, without faulting. +// Save and restores errno. Returns true on systems where +// unimplemented. +// This is a namespace-scoped variable for correct zero-initialization. +static std::atomic pid_and_fds; // initially 0, an invalid pid. - // Here we probe with some syscall which - // - accepts an 8-byte region of user memory as input - // - tests for EFAULT before other validation - // - has no problematic side-effects - // - // rt_sigprocmask(2) works for this. It copies sizeof(kernel_sigset_t)==8 - // bytes from the address into the kernel memory before any validation. - // - // The call can never succeed, since the `how` parameter is not one of - // SIG_BLOCK, SIG_UNBLOCK, SIG_SETMASK. - // - // This strategy depends on Linux implementation details, - // so we rely on the test to alert us if it stops working. - // - // Some discarded past approaches: - // - msync() doesn't reject PROT_NONE regions - // - write() on /dev/null doesn't return EFAULT - // - write() on a pipe requires creating it and draining the writes - // - connect() works but is problematic for sandboxes and needs a valid - // file descriptor - // - // This can never succeed (invalid first argument to sigprocmask). - ABSL_RAW_CHECK(syscall(SYS_rt_sigprocmask, ~0, addr, nullptr, - /*sizeof(kernel_sigset_t)*/ 8) == -1, - "unexpected success"); - ABSL_RAW_CHECK(errno == EFAULT || errno == EINVAL, "unexpected errno"); - return errno != EFAULT; +bool AddressIsReadable(const void *addr) { + absl::base_internal::ErrnoSaver errno_saver; + // We test whether a byte is readable by using write(). Normally, this would + // be done via a cached file descriptor to /dev/null, but linux fails to + // check whether the byte is readable when the destination is /dev/null, so + // we use a cached pipe. We store the pid of the process that created the + // pipe to handle the case where a process forks, and the child closes all + // the file descriptors and then calls this routine. This is not perfect: + // the child could use the routine, then close all file descriptors and then + // use this routine again. But the likely use of this routine is when + // crashing, to test the validity of pages when dumping the stack. Beware + // that we may leak file descriptors, but we're unlikely to leak many. + int bytes_written; + int current_pid = getpid() & 0xffff; // we use only the low order 16 bits + do { // until we do not get EBADF trying to use file descriptors + int pid; + int read_fd; + int write_fd; + uint64_t local_pid_and_fds = pid_and_fds.load(std::memory_order_acquire); + Unpack(local_pid_and_fds, &pid, &read_fd, &write_fd); + while (current_pid != pid) { + int p[2]; + // new pipe + if (pipe(p) != 0) { + ABSL_RAW_LOG(FATAL, "Failed to create pipe, errno=%d", errno); + } + fcntl(p[0], F_SETFD, FD_CLOEXEC); + fcntl(p[1], F_SETFD, FD_CLOEXEC); + uint64_t new_pid_and_fds = Pack(current_pid, p[0], p[1]); + if (pid_and_fds.compare_exchange_strong( + local_pid_and_fds, new_pid_and_fds, std::memory_order_release, + std::memory_order_relaxed)) { + local_pid_and_fds = new_pid_and_fds; // fds exposed to other threads + } else { // fds not exposed to other threads; we can close them. + close(p[0]); + close(p[1]); + local_pid_and_fds = pid_and_fds.load(std::memory_order_acquire); + } + Unpack(local_pid_and_fds, &pid, &read_fd, &write_fd); + } + errno = 0; + // Use syscall(SYS_write, ...) instead of write() to prevent ASAN + // and other checkers from complaining about accesses to arbitrary + // memory. + do { + bytes_written = syscall(SYS_write, write_fd, addr, 1); + } while (bytes_written == -1 && errno == EINTR); + if (bytes_written == 1) { // remove the byte from the pipe + char c; + while (read(read_fd, &c, 1) == -1 && errno == EINTR) { + } + } + if (errno == EBADF) { // Descriptors invalid. + // If pid_and_fds contains the problematic file descriptors we just used, + // this call will forget them, and the loop will try again. + pid_and_fds.compare_exchange_strong(local_pid_and_fds, 0, + std::memory_order_release, + std::memory_order_relaxed); + } + } while (errno == EBADF); + return bytes_written == 1; } } // namespace debugging_internal ABSL_NAMESPACE_END } // namespace absl -#endif // __linux__ && !__ANDROID__ +#endif diff --git a/abseil-cpp/absl/debugging/internal/demangle.cc b/abseil-cpp/absl/debugging/internal/demangle.cc index 93ae3279..46cdb67b 100644 --- a/abseil-cpp/absl/debugging/internal/demangle.cc +++ b/abseil-cpp/absl/debugging/internal/demangle.cc @@ -386,28 +386,24 @@ static bool IsDigit(char c) { return c >= '0' && c <= '9'; } // by GCC 4.5.x and later versions (and our locally-modified version of GCC // 4.4.x) to indicate functions which have been cloned during optimization. // We treat any sequence (.+.+)+ as a function clone suffix. -// Additionally, '_' is allowed along with the alphanumeric sequence. static bool IsFunctionCloneSuffix(const char *str) { size_t i = 0; while (str[i] != '\0') { - bool parsed = false; - // Consume a single [. | _]*[.]* sequence. - if (str[i] == '.' && (IsAlpha(str[i + 1]) || str[i + 1] == '_')) { - parsed = true; - i += 2; - while (IsAlpha(str[i]) || str[i] == '_') { - ++i; - } + // Consume a single .+.+ sequence. + if (str[i] != '.' || !IsAlpha(str[i + 1])) { + return false; } - if (str[i] == '.' && IsDigit(str[i + 1])) { - parsed = true; - i += 2; - while (IsDigit(str[i])) { - ++i; - } + i += 2; + while (IsAlpha(str[i])) { + ++i; } - if (!parsed) + if (str[i] != '.' || !IsDigit(str[i + 1])) { return false; + } + i += 2; + while (IsDigit(str[i])) { + ++i; + } } return true; // Consumed everything in "str". } @@ -1617,7 +1613,6 @@ static bool ParseUnresolvedName(State *state) { // ::= <2-ary operator-name> // ::= <3-ary operator-name> // ::= cl + E -// ::= cp * E # Clang-specific. // ::= cv # type (expression) // ::= cv _ * E # type (expr-list) // ::= st @@ -1640,23 +1635,14 @@ static bool ParseExpression(State *state) { return true; } - ParseState copy = state->parse_state; - // Object/function call expression. + ParseState copy = state->parse_state; if (ParseTwoCharToken(state, "cl") && OneOrMore(ParseExpression, state) && ParseOneCharToken(state, 'E')) { return true; } state->parse_state = copy; - // Clang-specific "cp * E" - // https://clang.llvm.org/doxygen/ItaniumMangle_8cpp_source.html#l04338 - if (ParseTwoCharToken(state, "cp") && ParseSimpleId(state) && - ZeroOrMore(ParseExpression, state) && ParseOneCharToken(state, 'E')) { - return true; - } - state->parse_state = copy; - // Function-param expression (level 0). if (ParseTwoCharToken(state, "fp") && Optional(ParseCVQualifiers(state)) && Optional(ParseNumber(state, nullptr)) && ParseOneCharToken(state, '_')) { diff --git a/abseil-cpp/absl/debugging/internal/demangle_test.cc b/abseil-cpp/absl/debugging/internal/demangle_test.cc index 6b142902..0bed7359 100644 --- a/abseil-cpp/absl/debugging/internal/demangle_test.cc +++ b/abseil-cpp/absl/debugging/internal/demangle_test.cc @@ -70,34 +70,12 @@ TEST(Demangle, Clones) { EXPECT_STREQ("Foo()", tmp); EXPECT_TRUE(Demangle("_ZL3Foov.isra.2.constprop.18", tmp, sizeof(tmp))); EXPECT_STREQ("Foo()", tmp); - // Demangle suffixes produced by -funique-internal-linkage-names. - EXPECT_TRUE(Demangle("_ZL3Foov.__uniq.12345", tmp, sizeof(tmp))); - EXPECT_STREQ("Foo()", tmp); - EXPECT_TRUE(Demangle("_ZL3Foov.__uniq.12345.isra.2.constprop.18", tmp, - sizeof(tmp))); - EXPECT_STREQ("Foo()", tmp); - // Suffixes without the number should also demangle. - EXPECT_TRUE(Demangle("_ZL3Foov.clo", tmp, sizeof(tmp))); - EXPECT_STREQ("Foo()", tmp); - // Suffixes with just the number should also demangle. - EXPECT_TRUE(Demangle("_ZL3Foov.123", tmp, sizeof(tmp))); - EXPECT_STREQ("Foo()", tmp); - // (.clone. followed by non-number), should also demangle. - EXPECT_TRUE(Demangle("_ZL3Foov.clone.foo", tmp, sizeof(tmp))); - EXPECT_STREQ("Foo()", tmp); - // (.clone. followed by multiple numbers), should also demangle. - EXPECT_TRUE(Demangle("_ZL3Foov.clone.123.456", tmp, sizeof(tmp))); - EXPECT_STREQ("Foo()", tmp); - // (a long valid suffix), should demangle. - EXPECT_TRUE(Demangle("_ZL3Foov.part.9.165493.constprop.775.31805", tmp, - sizeof(tmp))); - EXPECT_STREQ("Foo()", tmp); - // Invalid (. without anything else), should not demangle. - EXPECT_FALSE(Demangle("_ZL3Foov.", tmp, sizeof(tmp))); - // Invalid (. with mix of alpha and digits), should not demangle. - EXPECT_FALSE(Demangle("_ZL3Foov.abc123", tmp, sizeof(tmp))); + // Invalid (truncated), should not demangle. + EXPECT_FALSE(Demangle("_ZL3Foov.clo", tmp, sizeof(tmp))); // Invalid (.clone. not followed by number), should not demangle. EXPECT_FALSE(Demangle("_ZL3Foov.clone.", tmp, sizeof(tmp))); + // Invalid (.clone. followed by non-number), should not demangle. + EXPECT_FALSE(Demangle("_ZL3Foov.clone.foo", tmp, sizeof(tmp))); // Invalid (.constprop. not followed by number), should not demangle. EXPECT_FALSE(Demangle("_ZL3Foov.isra.2.constprop.", tmp, sizeof(tmp))); } diff --git a/abseil-cpp/absl/debugging/internal/elf_mem_image.cc b/abseil-cpp/absl/debugging/internal/elf_mem_image.cc index a9d66714..24cc0130 100644 --- a/abseil-cpp/absl/debugging/internal/elf_mem_image.cc +++ b/abseil-cpp/absl/debugging/internal/elf_mem_image.cc @@ -22,7 +22,6 @@ #include #include #include -#include "absl/base/config.h" #include "absl/base/internal/raw_logging.h" // From binutils/include/elf/common.h (this doesn't appear to be documented @@ -44,11 +43,11 @@ namespace debugging_internal { namespace { -#if __SIZEOF_POINTER__ == 4 +#if __WORDSIZE == 32 const int kElfClass = ELFCLASS32; int ElfBind(const ElfW(Sym) *symbol) { return ELF32_ST_BIND(symbol->st_info); } int ElfType(const ElfW(Sym) *symbol) { return ELF32_ST_TYPE(symbol->st_info); } -#elif __SIZEOF_POINTER__ == 8 +#elif __WORDSIZE == 64 const int kElfClass = ELFCLASS64; int ElfBind(const ElfW(Sym) *symbol) { return ELF64_ST_BIND(symbol->st_info); } int ElfType(const ElfW(Sym) *symbol) { return ELF64_ST_TYPE(symbol->st_info); } @@ -176,17 +175,17 @@ void ElfMemImage::Init(const void *base) { } switch (base_as_char[EI_DATA]) { case ELFDATA2LSB: { -#ifndef ABSL_IS_LITTLE_ENDIAN - assert(false); - return; -#endif + if (__LITTLE_ENDIAN != __BYTE_ORDER) { + assert(false); + return; + } break; } case ELFDATA2MSB: { -#ifndef ABSL_IS_BIG_ENDIAN - assert(false); - return; -#endif + if (__BIG_ENDIAN != __BYTE_ORDER) { + assert(false); + return; + } break; } default: { @@ -222,7 +221,7 @@ void ElfMemImage::Init(const void *base) { reinterpret_cast(dynamic_program_header->p_vaddr + relocation); for (; dynamic_entry->d_tag != DT_NULL; ++dynamic_entry) { - const auto value = dynamic_entry->d_un.d_val + relocation; + const ElfW(Xword) value = dynamic_entry->d_un.d_val + relocation; switch (dynamic_entry->d_tag) { case DT_HASH: hash_ = reinterpret_cast(value); @@ -351,11 +350,7 @@ void ElfMemImage::SymbolIterator::Update(int increment) { const ElfW(Versym) *version_symbol = image->GetVersym(index_); ABSL_RAW_CHECK(symbol && version_symbol, ""); const char *const symbol_name = image->GetDynstr(symbol->st_name); -#if defined(__NetBSD__) - const int version_index = version_symbol->vs_vers & VERSYM_VERSION; -#else const ElfW(Versym) version_index = version_symbol[0] & VERSYM_VERSION; -#endif const ElfW(Verdef) *version_definition = nullptr; const char *version_name = ""; if (symbol->st_shndx == SHN_UNDEF) { diff --git a/abseil-cpp/absl/debugging/internal/elf_mem_image.h b/abseil-cpp/absl/debugging/internal/elf_mem_image.h index 113071a9..46bfade3 100644 --- a/abseil-cpp/absl/debugging/internal/elf_mem_image.h +++ b/abseil-cpp/absl/debugging/internal/elf_mem_image.h @@ -31,9 +31,8 @@ #error ABSL_HAVE_ELF_MEM_IMAGE cannot be directly set #endif -#if defined(__ELF__) && !defined(__OpenBSD__) && !defined(__QNX__) && \ - !defined(__native_client__) && !defined(__asmjs__) && \ - !defined(__wasm__) && !defined(__HAIKU__) +#if defined(__ELF__) && defined(__GLIBC__) && !defined(__native_client__) && \ + !defined(__asmjs__) && !defined(__wasm__) #define ABSL_HAVE_ELF_MEM_IMAGE 1 #endif @@ -41,10 +40,6 @@ #include // for ElfW -#if defined(__FreeBSD__) && !defined(ElfW) -#define ElfW(x) __ElfN(x) -#endif - namespace absl { ABSL_NAMESPACE_BEGIN namespace debugging_internal { diff --git a/abseil-cpp/absl/debugging/internal/examine_stack.cc b/abseil-cpp/absl/debugging/internal/examine_stack.cc index 5bdd341e..6e5ff1fb 100644 --- a/abseil-cpp/absl/debugging/internal/examine_stack.cc +++ b/abseil-cpp/absl/debugging/internal/examine_stack.cc @@ -20,13 +20,7 @@ #include #endif -#include "absl/base/config.h" - -#ifdef ABSL_HAVE_MMAP -#include -#endif - -#if defined(__linux__) || defined(__APPLE__) +#ifdef __APPLE__ #include #endif @@ -43,158 +37,35 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace debugging_internal { -namespace { -constexpr int kDefaultDumpStackFramesLimit = 64; -// The %p field width for printf() functions is two characters per byte, -// and two extra for the leading "0x". -constexpr int kPrintfPointerFieldWidth = 2 + 2 * sizeof(void*); - -ABSL_CONST_INIT SymbolizeUrlEmitter debug_stack_trace_hook = nullptr; - -// Async-signal safe mmap allocator. -void* Allocate(size_t num_bytes) { -#ifdef ABSL_HAVE_MMAP - void* p = ::mmap(nullptr, num_bytes, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); - return p == MAP_FAILED ? nullptr : p; -#else - (void)num_bytes; - return nullptr; -#endif // ABSL_HAVE_MMAP -} - -void Deallocate(void* p, size_t size) { -#ifdef ABSL_HAVE_MMAP - ::munmap(p, size); -#else - (void)p; - (void)size; -#endif // ABSL_HAVE_MMAP -} - -// Print a program counter only. -void DumpPC(OutputWriter* writer, void* writer_arg, void* const pc, - const char* const prefix) { - char buf[100]; - snprintf(buf, sizeof(buf), "%s@ %*p\n", prefix, kPrintfPointerFieldWidth, pc); - writer(buf, writer_arg); -} - -// Print a program counter and the corresponding stack frame size. -void DumpPCAndFrameSize(OutputWriter* writer, void* writer_arg, void* const pc, - int framesize, const char* const prefix) { - char buf[100]; - if (framesize <= 0) { - snprintf(buf, sizeof(buf), "%s@ %*p (unknown)\n", prefix, - kPrintfPointerFieldWidth, pc); - } else { - snprintf(buf, sizeof(buf), "%s@ %*p %9d\n", prefix, - kPrintfPointerFieldWidth, pc, framesize); - } - writer(buf, writer_arg); -} - -// Print a program counter and the corresponding symbol. -void DumpPCAndSymbol(OutputWriter* writer, void* writer_arg, void* const pc, - const char* const prefix) { - char tmp[1024]; - const char* symbol = "(unknown)"; - // Symbolizes the previous address of pc because pc may be in the - // next function. The overrun happens when the function ends with - // a call to a function annotated noreturn (e.g. CHECK). - // If symbolization of pc-1 fails, also try pc on the off-chance - // that we crashed on the first instruction of a function (that - // actually happens very often for e.g. __restore_rt). - const uintptr_t prev_pc = reinterpret_cast(pc) - 1; - if (absl::Symbolize(reinterpret_cast(prev_pc), tmp, - sizeof(tmp)) || - absl::Symbolize(pc, tmp, sizeof(tmp))) { - symbol = tmp; - } - char buf[1024]; - snprintf(buf, sizeof(buf), "%s@ %*p %s\n", prefix, kPrintfPointerFieldWidth, - pc, symbol); - writer(buf, writer_arg); -} - -// Print a program counter, its stack frame size, and its symbol name. -// Note that there is a separate symbolize_pc argument. Return addresses may be -// at the end of the function, and this allows the caller to back up from pc if -// appropriate. -void DumpPCAndFrameSizeAndSymbol(OutputWriter* writer, void* writer_arg, - void* const pc, void* const symbolize_pc, - int framesize, const char* const prefix) { - char tmp[1024]; - const char* symbol = "(unknown)"; - if (absl::Symbolize(symbolize_pc, tmp, sizeof(tmp))) { - symbol = tmp; - } - char buf[1024]; - if (framesize <= 0) { - snprintf(buf, sizeof(buf), "%s@ %*p (unknown) %s\n", prefix, - kPrintfPointerFieldWidth, pc, symbol); - } else { - snprintf(buf, sizeof(buf), "%s@ %*p %9d %s\n", prefix, - kPrintfPointerFieldWidth, pc, framesize, symbol); - } - writer(buf, writer_arg); -} - -} // namespace - -void RegisterDebugStackTraceHook(SymbolizeUrlEmitter hook) { - debug_stack_trace_hook = hook; -} - -SymbolizeUrlEmitter GetDebugStackTraceHook() { return debug_stack_trace_hook; } - // Returns the program counter from signal context, nullptr if // unknown. vuc is a ucontext_t*. We use void* to avoid the use of // ucontext_t on non-POSIX systems. -void* GetProgramCounter(void* const vuc) { +void* GetProgramCounter(void* vuc) { #ifdef __linux__ if (vuc != nullptr) { ucontext_t* context = reinterpret_cast(vuc); #if defined(__aarch64__) return reinterpret_cast(context->uc_mcontext.pc); -#elif defined(__alpha__) - return reinterpret_cast(context->uc_mcontext.sc_pc); #elif defined(__arm__) return reinterpret_cast(context->uc_mcontext.arm_pc); -#elif defined(__hppa__) - return reinterpret_cast(context->uc_mcontext.sc_iaoq[0]); #elif defined(__i386__) if (14 < ABSL_ARRAYSIZE(context->uc_mcontext.gregs)) return reinterpret_cast(context->uc_mcontext.gregs[14]); -#elif defined(__ia64__) - return reinterpret_cast(context->uc_mcontext.sc_ip); -#elif defined(__m68k__) - return reinterpret_cast(context->uc_mcontext.gregs[16]); #elif defined(__mips__) return reinterpret_cast(context->uc_mcontext.pc); #elif defined(__powerpc64__) return reinterpret_cast(context->uc_mcontext.gp_regs[32]); #elif defined(__powerpc__) - return reinterpret_cast(context->uc_mcontext.uc_regs->gregs[32]); + return reinterpret_cast(context->uc_mcontext.regs->nip); #elif defined(__riscv) return reinterpret_cast(context->uc_mcontext.__gregs[REG_PC]); #elif defined(__s390__) && !defined(__s390x__) return reinterpret_cast(context->uc_mcontext.psw.addr & 0x7fffffff); #elif defined(__s390__) && defined(__s390x__) return reinterpret_cast(context->uc_mcontext.psw.addr); -#elif defined(__sh__) - return reinterpret_cast(context->uc_mcontext.pc); -#elif defined(__sparc__) && !defined(__arch64__) - return reinterpret_cast(context->uc_mcontext.gregs[19]); -#elif defined(__sparc__) && defined(__arch64__) - return reinterpret_cast(context->uc_mcontext.mc_gregs[19]); #elif defined(__x86_64__) if (16 < ABSL_ARRAYSIZE(context->uc_mcontext.gregs)) return reinterpret_cast(context->uc_mcontext.gregs[16]); -#elif defined(__e2k__) - return reinterpret_cast(context->uc_mcontext.cr0_hi); -#elif defined(__loongarch__) - return reinterpret_cast(context->uc_mcontext.__pc); #else #error "Undefined Architecture." #endif @@ -233,17 +104,59 @@ void* GetProgramCounter(void* const vuc) { return nullptr; } -void DumpPCAndFrameSizesAndStackTrace(void* const pc, void* const stack[], - int frame_sizes[], int depth, - int min_dropped_frames, - bool symbolize_stacktrace, - OutputWriter* writer, void* writer_arg) { +// The %p field width for printf() functions is two characters per byte, +// and two extra for the leading "0x". +static constexpr int kPrintfPointerFieldWidth = 2 + 2 * sizeof(void*); + +// Print a program counter, its stack frame size, and its symbol name. +// Note that there is a separate symbolize_pc argument. Return addresses may be +// at the end of the function, and this allows the caller to back up from pc if +// appropriate. +static void DumpPCAndFrameSizeAndSymbol(void (*writerfn)(const char*, void*), + void* writerfn_arg, void* pc, + void* symbolize_pc, int framesize, + const char* const prefix) { + char tmp[1024]; + const char* symbol = "(unknown)"; + if (absl::Symbolize(symbolize_pc, tmp, sizeof(tmp))) { + symbol = tmp; + } + char buf[1024]; + if (framesize <= 0) { + snprintf(buf, sizeof(buf), "%s@ %*p (unknown) %s\n", prefix, + kPrintfPointerFieldWidth, pc, symbol); + } else { + snprintf(buf, sizeof(buf), "%s@ %*p %9d %s\n", prefix, + kPrintfPointerFieldWidth, pc, framesize, symbol); + } + writerfn(buf, writerfn_arg); +} + +// Print a program counter and the corresponding stack frame size. +static void DumpPCAndFrameSize(void (*writerfn)(const char*, void*), + void* writerfn_arg, void* pc, int framesize, + const char* const prefix) { + char buf[100]; + if (framesize <= 0) { + snprintf(buf, sizeof(buf), "%s@ %*p (unknown)\n", prefix, + kPrintfPointerFieldWidth, pc); + } else { + snprintf(buf, sizeof(buf), "%s@ %*p %9d\n", prefix, + kPrintfPointerFieldWidth, pc, framesize); + } + writerfn(buf, writerfn_arg); +} + +void DumpPCAndFrameSizesAndStackTrace( + void* pc, void* const stack[], int frame_sizes[], int depth, + int min_dropped_frames, bool symbolize_stacktrace, + void (*writerfn)(const char*, void*), void* writerfn_arg) { if (pc != nullptr) { // We don't know the stack frame size for PC, use 0. if (symbolize_stacktrace) { - DumpPCAndFrameSizeAndSymbol(writer, writer_arg, pc, pc, 0, "PC: "); + DumpPCAndFrameSizeAndSymbol(writerfn, writerfn_arg, pc, pc, 0, "PC: "); } else { - DumpPCAndFrameSize(writer, writer_arg, pc, 0, "PC: "); + DumpPCAndFrameSize(writerfn, writerfn_arg, pc, 0, "PC: "); } } for (int i = 0; i < depth; i++) { @@ -253,61 +166,20 @@ void DumpPCAndFrameSizesAndStackTrace(void* const pc, void* const stack[], // call to a function annotated noreturn (e.g. CHECK). Note that we don't // do this for pc above, as the adjustment is only correct for return // addresses. - DumpPCAndFrameSizeAndSymbol(writer, writer_arg, stack[i], + DumpPCAndFrameSizeAndSymbol(writerfn, writerfn_arg, stack[i], reinterpret_cast(stack[i]) - 1, frame_sizes[i], " "); } else { - DumpPCAndFrameSize(writer, writer_arg, stack[i], frame_sizes[i], " "); + DumpPCAndFrameSize(writerfn, writerfn_arg, stack[i], frame_sizes[i], + " "); } } if (min_dropped_frames > 0) { char buf[100]; snprintf(buf, sizeof(buf), " @ ... and at least %d more frames\n", min_dropped_frames); - writer(buf, writer_arg); - } -} - -// Dump current stack trace as directed by writer. -// Make sure this function is not inlined to avoid skipping too many top frames. -ABSL_ATTRIBUTE_NOINLINE -void DumpStackTrace(int min_dropped_frames, int max_num_frames, - bool symbolize_stacktrace, OutputWriter* writer, - void* writer_arg) { - // Print stack trace - void* stack_buf[kDefaultDumpStackFramesLimit]; - void** stack = stack_buf; - int num_stack = kDefaultDumpStackFramesLimit; - int allocated_bytes = 0; - - if (num_stack >= max_num_frames) { - // User requested fewer frames than we already have space for. - num_stack = max_num_frames; - } else { - const size_t needed_bytes = max_num_frames * sizeof(stack[0]); - void* p = Allocate(needed_bytes); - if (p != nullptr) { // We got the space. - num_stack = max_num_frames; - stack = reinterpret_cast(p); - allocated_bytes = needed_bytes; - } + writerfn(buf, writerfn_arg); } - - size_t depth = absl::GetStackTrace(stack, num_stack, min_dropped_frames + 1); - for (size_t i = 0; i < depth; i++) { - if (symbolize_stacktrace) { - DumpPCAndSymbol(writer, writer_arg, stack[i], " "); - } else { - DumpPC(writer, writer_arg, stack[i], " "); - } - } - - auto hook = GetDebugStackTraceHook(); - if (hook != nullptr) { - (*hook)(stack, depth, writer, writer_arg); - } - - if (allocated_bytes != 0) Deallocate(stack, allocated_bytes); } } // namespace debugging_internal diff --git a/abseil-cpp/absl/debugging/internal/examine_stack.h b/abseil-cpp/absl/debugging/internal/examine_stack.h index 190af87f..39336913 100644 --- a/abseil-cpp/absl/debugging/internal/examine_stack.h +++ b/abseil-cpp/absl/debugging/internal/examine_stack.h @@ -23,39 +23,17 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace debugging_internal { -// Type of function used for printing in stack trace dumping, etc. -// We avoid closures to keep things simple. -typedef void OutputWriter(const char*, void*); - -// RegisterDebugStackTraceHook() allows to register a single routine -// `hook` that is called each time DumpStackTrace() is called. -// `hook` may be called from a signal handler. -typedef void (*SymbolizeUrlEmitter)(void* const stack[], int depth, - OutputWriter* writer, void* writer_arg); - -// Registration of SymbolizeUrlEmitter for use inside of a signal handler. -// This is inherently unsafe and must be signal safe code. -void RegisterDebugStackTraceHook(SymbolizeUrlEmitter hook); -SymbolizeUrlEmitter GetDebugStackTraceHook(); - // Returns the program counter from signal context, or nullptr if // unknown. `vuc` is a ucontext_t*. We use void* to avoid the use of // ucontext_t on non-POSIX systems. -void* GetProgramCounter(void* const vuc); +void* GetProgramCounter(void* vuc); -// Uses `writer` to dump the program counter, stack trace, and stack +// Uses `writerfn` to dump the program counter, stack trace, and stack // frame sizes. -void DumpPCAndFrameSizesAndStackTrace(void* const pc, void* const stack[], - int frame_sizes[], int depth, - int min_dropped_frames, - bool symbolize_stacktrace, - OutputWriter* writer, void* writer_arg); - -// Dump current stack trace omitting the topmost `min_dropped_frames` stack -// frames. -void DumpStackTrace(int min_dropped_frames, int max_num_frames, - bool symbolize_stacktrace, OutputWriter* writer, - void* writer_arg); +void DumpPCAndFrameSizesAndStackTrace( + void* pc, void* const stack[], int frame_sizes[], int depth, + int min_dropped_frames, bool symbolize_stacktrace, + void (*writerfn)(const char*, void*), void* writerfn_arg); } // namespace debugging_internal ABSL_NAMESPACE_END diff --git a/abseil-cpp/absl/debugging/internal/stack_consumption.cc b/abseil-cpp/absl/debugging/internal/stack_consumption.cc index 51348649..e3dd51c3 100644 --- a/abseil-cpp/absl/debugging/internal/stack_consumption.cc +++ b/abseil-cpp/absl/debugging/internal/stack_consumption.cc @@ -43,7 +43,7 @@ namespace { // unspecified. Therefore, instead we hardcode the direction of the // stack on platforms we know about. #if defined(__i386__) || defined(__x86_64__) || defined(__ppc__) || \ - defined(__aarch64__) || defined(__riscv) + defined(__aarch64__) constexpr bool kStackGrowsDown = true; #else #error Need to define kStackGrowsDown diff --git a/abseil-cpp/absl/debugging/internal/stack_consumption.h b/abseil-cpp/absl/debugging/internal/stack_consumption.h index f41b64c3..2b5e7151 100644 --- a/abseil-cpp/absl/debugging/internal/stack_consumption.h +++ b/abseil-cpp/absl/debugging/internal/stack_consumption.h @@ -26,7 +26,7 @@ #error ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION cannot be set directly #elif !defined(__APPLE__) && !defined(_WIN32) && \ (defined(__i386__) || defined(__x86_64__) || defined(__ppc__) || \ - defined(__aarch64__) || defined(__riscv)) + defined(__aarch64__)) #define ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION 1 namespace absl { diff --git a/abseil-cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc b/abseil-cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc index 4f9db9d6..14a76f1e 100644 --- a/abseil-cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc +++ b/abseil-cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc @@ -37,11 +37,8 @@ static const unsigned char* GetKernelRtSigreturnAddress() { absl::debugging_internal::VDSOSupport vdso; if (vdso.IsPresent()) { absl::debugging_internal::VDSOSupport::SymbolInfo symbol_info; - auto lookup = [&](int type) { - return vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_2.6.39", type, - &symbol_info); - }; - if ((!lookup(STT_FUNC) && !lookup(STT_NOTYPE)) || + if (!vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_2.6.39", STT_FUNC, + &symbol_info) || symbol_info.address == nullptr) { // Unexpected: VDSO is present, yet the expected symbol is missing // or null. @@ -176,17 +173,12 @@ static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count, // Implementation detail: we clamp the max of frames we are willing to // count, so as not to spend too much time in the loop below. const int kMaxUnwind = 200; - int num_dropped_frames = 0; - for (int j = 0; frame_pointer != nullptr && j < kMaxUnwind; j++) { - if (skip_count > 0) { - skip_count--; - } else { - num_dropped_frames++; - } + int j = 0; + for (; frame_pointer != nullptr && j < kMaxUnwind; j++) { frame_pointer = NextStackFrame(frame_pointer, ucp); } - *min_dropped_frames = num_dropped_frames; + *min_dropped_frames = j; } return n; } diff --git a/abseil-cpp/absl/debugging/internal/stacktrace_arm-inl.inc b/abseil-cpp/absl/debugging/internal/stacktrace_arm-inl.inc index 102a2a12..2a1bf2e8 100644 --- a/abseil-cpp/absl/debugging/internal/stacktrace_arm-inl.inc +++ b/abseil-cpp/absl/debugging/internal/stacktrace_arm-inl.inc @@ -112,16 +112,11 @@ static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count, // Implementation detail: we clamp the max of frames we are willing to // count, so as not to spend too much time in the loop below. const int kMaxUnwind = 200; - int num_dropped_frames = 0; - for (int j = 0; sp != nullptr && j < kMaxUnwind; j++) { - if (skip_count > 0) { - skip_count--; - } else { - num_dropped_frames++; - } + int j = 0; + for (; sp != nullptr && j < kMaxUnwind; j++) { sp = NextStackFrame(sp); } - *min_dropped_frames = num_dropped_frames; + *min_dropped_frames = j; } return n; } diff --git a/abseil-cpp/absl/debugging/internal/stacktrace_config.h b/abseil-cpp/absl/debugging/internal/stacktrace_config.h index 3929b1b7..90af8528 100644 --- a/abseil-cpp/absl/debugging/internal/stacktrace_config.h +++ b/abseil-cpp/absl/debugging/internal/stacktrace_config.h @@ -21,8 +21,6 @@ #ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_CONFIG_H_ #define ABSL_DEBUGGING_INTERNAL_STACKTRACE_CONFIG_H_ -#include "absl/base/config.h" - #if defined(ABSL_STACKTRACE_INL_HEADER) #error ABSL_STACKTRACE_INL_HEADER cannot be directly set @@ -31,16 +29,22 @@ "absl/debugging/internal/stacktrace_win32-inl.inc" #elif defined(__APPLE__) -#ifdef ABSL_HAVE_THREAD_LOCAL // Thread local support required for UnwindImpl. +// Notes: +// * Xcode's clang did not support `thread_local` until version 8, and +// even then not for all iOS < 9.0. +// * Xcode 9.3 started disallowing `thread_local` for 32-bit iOS simulator +// targeting iOS 9.x. +// * Xcode 10 moves the deployment target check for iOS < 9.0 to link time +// making __has_feature unreliable there. +// +// Otherwise, `__has_feature` is only supported by Clang so it has be inside +// `defined(__APPLE__)` check. +#if __has_feature(cxx_thread_local) && \ + !(TARGET_OS_IPHONE && __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0) #define ABSL_STACKTRACE_INL_HEADER \ "absl/debugging/internal/stacktrace_generic-inl.inc" -#endif // defined(ABSL_HAVE_THREAD_LOCAL) - -// Emscripten stacktraces rely on JS. Do not use them in standalone mode. -#elif defined(__EMSCRIPTEN__) && !defined(STANDALONE_WASM) -#define ABSL_STACKTRACE_INL_HEADER \ - "absl/debugging/internal/stacktrace_emscripten-inl.inc" +#endif #elif defined(__linux__) && !defined(__ANDROID__) @@ -56,7 +60,7 @@ // Note: When using glibc this may require -funwind-tables to function properly. #define ABSL_STACKTRACE_INL_HEADER \ "absl/debugging/internal/stacktrace_generic-inl.inc" -#endif // __has_include() +#endif #elif defined(__i386__) || defined(__x86_64__) #define ABSL_STACKTRACE_INL_HEADER \ "absl/debugging/internal/stacktrace_x86-inl.inc" @@ -66,18 +70,15 @@ #elif defined(__aarch64__) #define ABSL_STACKTRACE_INL_HEADER \ "absl/debugging/internal/stacktrace_aarch64-inl.inc" -#elif defined(__riscv) -#define ABSL_STACKTRACE_INL_HEADER \ - "absl/debugging/internal/stacktrace_riscv-inl.inc" #elif defined(__has_include) #if __has_include() // Note: When using glibc this may require -funwind-tables to function properly. #define ABSL_STACKTRACE_INL_HEADER \ "absl/debugging/internal/stacktrace_generic-inl.inc" -#endif // __has_include() -#endif // defined(__has_include) +#endif +#endif -#endif // defined(__linux__) && !defined(__ANDROID__) +#endif // Fallback to the empty implementation. #if !defined(ABSL_STACKTRACE_INL_HEADER) diff --git a/abseil-cpp/absl/debugging/internal/stacktrace_emscripten-inl.inc b/abseil-cpp/absl/debugging/internal/stacktrace_emscripten-inl.inc deleted file mode 100644 index 0f444514..00000000 --- a/abseil-cpp/absl/debugging/internal/stacktrace_emscripten-inl.inc +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2017 The Abseil Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Portable implementation - just use glibc -// -// Note: The glibc implementation may cause a call to malloc. -// This can cause a deadlock in HeapProfiler. - -#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_EMSCRIPTEN_INL_H_ -#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_EMSCRIPTEN_INL_H_ - -#include - -#include -#include - -#include "absl/base/attributes.h" -#include "absl/debugging/stacktrace.h" - -extern "C" { -uintptr_t emscripten_stack_snapshot(); -uint32_t emscripten_stack_unwind_buffer(uintptr_t pc, void *buffer, - uint32_t depth); -} - -// Sometimes, we can try to get a stack trace from within a stack -// trace, which can cause a self-deadlock. -// Protect against such reentrant call by failing to get a stack trace. -// -// We use __thread here because the code here is extremely low level -- it is -// called while collecting stack traces from within malloc and mmap, and thus -// can not call anything which might call malloc or mmap itself. -static __thread int recursive = 0; - -// The stack trace function might be invoked very early in the program's -// execution (e.g. from the very first malloc). -// As such, we suppress usage of backtrace during this early stage of execution. -static std::atomic disable_stacktraces(true); // Disabled until healthy. -// Waiting until static initializers run seems to be late enough. -// This file is included into stacktrace.cc so this will only run once. -ABSL_ATTRIBUTE_UNUSED static int stacktraces_enabler = []() { - // Check if we can even create stacktraces. If not, bail early and leave - // disable_stacktraces set as-is. - // clang-format off - if (!EM_ASM_INT({ return (typeof wasmOffsetConverter !== 'undefined'); })) { - return 0; - } - // clang-format on - disable_stacktraces.store(false, std::memory_order_relaxed); - return 0; -}(); - -template -static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count, - const void *ucp, int *min_dropped_frames) { - if (recursive || disable_stacktraces.load(std::memory_order_relaxed)) { - return 0; - } - ++recursive; - - static_cast(ucp); // Unused. - constexpr int kStackLength = 64; - void *stack[kStackLength]; - - int size; - uintptr_t pc = emscripten_stack_snapshot(); - size = emscripten_stack_unwind_buffer(pc, stack, kStackLength); - - int result_count = size - skip_count; - if (result_count < 0) result_count = 0; - if (result_count > max_depth) result_count = max_depth; - for (int i = 0; i < result_count; i++) result[i] = stack[i + skip_count]; - - if (IS_STACK_FRAMES) { - // No implementation for finding out the stack frame sizes yet. - memset(sizes, 0, sizeof(*sizes) * result_count); - } - if (min_dropped_frames != nullptr) { - if (size - skip_count - max_depth > 0) { - *min_dropped_frames = size - skip_count - max_depth; - } else { - *min_dropped_frames = 0; - } - } - - --recursive; - - return result_count; -} - -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace debugging_internal { -bool StackTraceWorksForTest() { return true; } -} // namespace debugging_internal -ABSL_NAMESPACE_END -} // namespace absl - -#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_EMSCRIPTEN_INL_H_ diff --git a/abseil-cpp/absl/debugging/internal/stacktrace_powerpc-inl.inc b/abseil-cpp/absl/debugging/internal/stacktrace_powerpc-inl.inc index 085cef67..2e7c2f40 100644 --- a/abseil-cpp/absl/debugging/internal/stacktrace_powerpc-inl.inc +++ b/abseil-cpp/absl/debugging/internal/stacktrace_powerpc-inl.inc @@ -131,12 +131,7 @@ static void **NextStackFrame(void **old_sp, const void *uc) { const ucontext_t* signal_context = reinterpret_cast(uc); void **const sp_before_signal = -#if defined(__PPC64__) - reinterpret_cast(signal_context->uc_mcontext.gp_regs[PT_R1]); -#else - reinterpret_cast( - signal_context->uc_mcontext.uc_regs->gregs[PT_R1]); -#endif + reinterpret_cast(signal_context->uc_mcontext.gp_regs[PT_R1]); // Check that alleged sp before signal is nonnull and is reasonably // aligned. if (sp_before_signal != nullptr && @@ -231,16 +226,11 @@ static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count, // Implementation detail: we clamp the max of frames we are willing to // count, so as not to spend too much time in the loop below. const int kMaxUnwind = 1000; - int num_dropped_frames = 0; - for (int j = 0; next_sp != nullptr && j < kMaxUnwind; j++) { - if (skip_count > 0) { - skip_count--; - } else { - num_dropped_frames++; - } + int j = 0; + for (; next_sp != nullptr && j < kMaxUnwind; j++) { next_sp = NextStackFrame(next_sp, ucp); } - *min_dropped_frames = num_dropped_frames; + *min_dropped_frames = j; } return n; } diff --git a/abseil-cpp/absl/debugging/internal/stacktrace_riscv-inl.inc b/abseil-cpp/absl/debugging/internal/stacktrace_riscv-inl.inc deleted file mode 100644 index 7123b71b..00000000 --- a/abseil-cpp/absl/debugging/internal/stacktrace_riscv-inl.inc +++ /dev/null @@ -1,236 +0,0 @@ -// Copyright 2021 The Abseil Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_RISCV_INL_H_ -#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_RISCV_INL_H_ - -// Generate stack trace for riscv - -#include - -#include "absl/base/config.h" -#if defined(__linux__) -#include -#include -#include -#endif - -#include -#include -#include -#include - -#include "absl/base/attributes.h" -#include "absl/debugging/internal/address_is_readable.h" -#include "absl/debugging/internal/vdso_support.h" -#include "absl/debugging/stacktrace.h" - -static const uintptr_t kUnknownFrameSize = 0; - -#if defined(__linux__) -// Returns the address of the VDSO __kernel_rt_sigreturn function, if present. -static const unsigned char *GetKernelRtSigreturnAddress() { - constexpr uintptr_t kImpossibleAddress = 0; - ABSL_CONST_INIT static std::atomic memoized(kImpossibleAddress); - uintptr_t address = memoized.load(std::memory_order_relaxed); - if (address != kImpossibleAddress) { - return reinterpret_cast(address); - } - - address = reinterpret_cast(nullptr); - -#if ABSL_HAVE_VDSO_SUPPORT - absl::debugging_internal::VDSOSupport vdso; - if (vdso.IsPresent()) { - absl::debugging_internal::VDSOSupport::SymbolInfo symbol_info; - // Symbol versioning pulled from arch/riscv/kernel/vdso/vdso.lds at v5.10. - auto lookup = [&](int type) { - return vdso.LookupSymbol("__vdso_rt_sigreturn", "LINUX_4.15", type, - &symbol_info); - }; - if ((!lookup(STT_FUNC) && !lookup(STT_NOTYPE)) || - symbol_info.address == nullptr) { - // Unexpected: VDSO is present, yet the expected symbol is missing or - // null. - assert(false && "VDSO is present, but doesn't have expected symbol"); - } else { - if (reinterpret_cast(symbol_info.address) != - kImpossibleAddress) { - address = reinterpret_cast(symbol_info.address); - } else { - assert(false && "VDSO returned invalid address"); - } - } - } -#endif - - memoized.store(address, std::memory_order_relaxed); - return reinterpret_cast(address); -} -#endif // __linux__ - -// Compute the size of a stack frame in [low..high). We assume that low < high. -// Return size of kUnknownFrameSize. -template -static inline uintptr_t ComputeStackFrameSize(const T *low, const T *high) { - const char *low_char_ptr = reinterpret_cast(low); - const char *high_char_ptr = reinterpret_cast(high); - return low < high ? high_char_ptr - low_char_ptr : kUnknownFrameSize; -} - -// Given a pointer to a stack frame, locate and return the calling stackframe, -// or return null if no stackframe can be found. Perform sanity checks (the -// strictness of which is controlled by the boolean parameter -// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned. -template -ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack. -ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack. -static void ** NextStackFrame(void **old_frame_pointer, const void *uc) { - // . - // . - // . - // +-> +----------------+ - // | | return address | - // | | previous fp | - // | | ... | - // | +----------------+ <-+ - // | | return address | | - // +---|- previous fp | | - // | ... | | - // $fp ->|----------------+ | - // | return address | | - // | previous fp -|---+ - // $sp ->| ... | - // +----------------+ - void **new_frame_pointer = reinterpret_cast(old_frame_pointer[-2]); - bool check_frame_size = true; - -#if defined(__linux__) - if (WITH_CONTEXT && uc != nullptr) { - // Check to see if next frame's return address is __kernel_rt_sigreturn. - if (old_frame_pointer[-1] == GetKernelRtSigreturnAddress()) { - const ucontext_t *ucv = static_cast(uc); - // old_frame_pointer is not suitable for unwinding, look at ucontext to - // discover frame pointer before signal. - // - // RISCV ELF psABI has the frame pointer at x8/fp/s0. - // -- RISCV psABI Table 18.2 - void **const pre_signal_frame_pointer = - reinterpret_cast(ucv->uc_mcontext.__gregs[8]); - - // Check the alleged frame pointer is actually readable. This is to - // prevent "double fault" in case we hit the first fault due to stack - // corruption. - if (!absl::debugging_internal::AddressIsReadable( - pre_signal_frame_pointer)) - return nullptr; - - // Alleged frame pointer is readable, use it for further unwinding. - new_frame_pointer = pre_signal_frame_pointer; - - // Skip frame size check if we return from a signal. We may be using an - // alterate stack for signals. - check_frame_size = false; - } - } -#endif - - // The RISCV ELF psABI mandates that the stack pointer is always 16-byte - // aligned. - // FIXME(abdulras) this doesn't hold for ILP32E which only mandates a 4-byte - // alignment. - if ((reinterpret_cast(new_frame_pointer) & 15) != 0) - return nullptr; - - // Check frame size. In strict mode, we assume frames to be under 100,000 - // bytes. In non-strict mode, we relax the limit to 1MB. - if (check_frame_size) { - const uintptr_t max_size = STRICT_UNWINDING ? 100000 : 1000000; - const uintptr_t frame_size = - ComputeStackFrameSize(old_frame_pointer, new_frame_pointer); - if (frame_size == kUnknownFrameSize || frame_size > max_size) - return nullptr; - } - - return new_frame_pointer; -} - -template -ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack. -ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack. -static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count, - const void *ucp, int *min_dropped_frames) { - // The `frame_pointer` that is computed here points to the top of the frame. - // The two words preceding the address are the return address and the previous - // frame pointer. -#if defined(__GNUC__) - void **frame_pointer = reinterpret_cast(__builtin_frame_address(0)); -#else -#error reading stack pointer not yet supported on this platform -#endif - - int n = 0; - void *return_address = nullptr; - while (frame_pointer && n < max_depth) { - return_address = frame_pointer[-1]; - - // The absl::GetStackFrames routine is called when we are in some - // informational context (the failure signal handler for example). Use the - // non-strict unwinding rules to produce a stack trace that is as complete - // as possible (even if it contains a few bogus entries in some rare cases). - void **next_frame_pointer = - NextStackFrame(frame_pointer, ucp); - - if (skip_count > 0) { - skip_count--; - } else { - result[n] = return_address; - if (IS_STACK_FRAMES) { - sizes[n] = ComputeStackFrameSize(frame_pointer, next_frame_pointer); - } - n++; - } - - frame_pointer = next_frame_pointer; - } - - if (min_dropped_frames != nullptr) { - // Implementation detail: we clamp the max of frames we are willing to - // count, so as not to spend too much time in the loop below. - const int kMaxUnwind = 200; - int num_dropped_frames = 0; - for (int j = 0; frame_pointer != nullptr && j < kMaxUnwind; j++) { - if (skip_count > 0) { - skip_count--; - } else { - num_dropped_frames++; - } - frame_pointer = - NextStackFrame(frame_pointer, ucp); - } - *min_dropped_frames = num_dropped_frames; - } - - return n; -} - -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace debugging_internal { -bool StackTraceWorksForTest() { return true; } -} // namespace debugging_internal -ABSL_NAMESPACE_END -} // namespace absl - -#endif diff --git a/abseil-cpp/absl/debugging/internal/stacktrace_x86-inl.inc b/abseil-cpp/absl/debugging/internal/stacktrace_x86-inl.inc index 1b5d8235..bc320ff7 100644 --- a/abseil-cpp/absl/debugging/internal/stacktrace_x86-inl.inc +++ b/abseil-cpp/absl/debugging/internal/stacktrace_x86-inl.inc @@ -27,7 +27,6 @@ #include #include -#include #include "absl/base/macros.h" #include "absl/base/port.h" @@ -133,8 +132,9 @@ static uintptr_t GetFP(const void *vuc) { const uintptr_t bp = 0; const uintptr_t sp = 0; #endif - // Sanity-check that the base pointer is valid. It's possible that some - // code in the process is compiled with --copt=-fomit-frame-pointer or + // Sanity-check that the base pointer is valid. It should be as long as + // SHRINK_WRAP_FRAME_POINTER is not set, but it's possible that some code in + // the process is compiled with --copt=-fomit-frame-pointer or // --copt=-momit-leaf-frame-pointer. // // TODO(bcmills): -momit-leaf-frame-pointer is currently the default @@ -159,8 +159,7 @@ static uintptr_t GetFP(const void *vuc) { template ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack. ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack. -static void **NextStackFrame(void **old_fp, const void *uc, - size_t stack_low, size_t stack_high) { +static void **NextStackFrame(void **old_fp, const void *uc) { void **new_fp = (void **)*old_fp; #if defined(__linux__) && defined(__i386__) @@ -248,7 +247,7 @@ static void **NextStackFrame(void **old_fp, const void *uc, // using an alternate signal stack. // // TODO(bcmills): The GetFP call should be completely unnecessary when - // ENABLE_COMBINED_UNWINDER is set (because we should be back in the thread's + // SHRINK_WRAP_FRAME_POINTER is set (because we should be back in the thread's // stack by this point), but it is empirically still needed (e.g. when the // stack includes a call to abort). unw_get_reg returns UNW_EBADREG for some // frames. Figure out why GetValidFrameAddr and/or libunwind isn't doing what @@ -259,18 +258,6 @@ static void **NextStackFrame(void **old_fp, const void *uc, // at a greater address that the current one. if (new_fp_u <= old_fp_u) return nullptr; if (new_fp_u - old_fp_u > kMaxFrameBytes) return nullptr; - - if (stack_low < old_fp_u && old_fp_u <= stack_high) { - // Old BP was in the expected stack region... - if (!(stack_low < new_fp_u && new_fp_u <= stack_high)) { - // ... but new BP is outside of expected stack region. - // It is most likely bogus. - return nullptr; - } - } else { - // We may be here if we are executing in a co-routine with a - // separate stack. We can't do safety checks in this case. - } } else { if (new_fp == nullptr) return nullptr; // skip AddressIsReadable() below // In the non-strict mode, allow discontiguous stack frames. @@ -310,17 +297,13 @@ static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count, int n = 0; void **fp = reinterpret_cast(__builtin_frame_address(0)); - size_t stack_low = getpagesize(); // Assume that the first page is not stack. - size_t stack_high = std::numeric_limits::max() - sizeof(void *); - while (fp && n < max_depth) { if (*(fp + 1) == reinterpret_cast(0)) { // In 64-bit code, we often see a frame that // points to itself and has a return address of 0. break; } - void **next_fp = NextStackFrame( - fp, ucp, stack_low, stack_high); + void **next_fp = NextStackFrame(fp, ucp); if (skip_count > 0) { skip_count--; } else { @@ -341,17 +324,11 @@ static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count, // Implementation detail: we clamp the max of frames we are willing to // count, so as not to spend too much time in the loop below. const int kMaxUnwind = 1000; - int num_dropped_frames = 0; - for (int j = 0; fp != nullptr && j < kMaxUnwind; j++) { - if (skip_count > 0) { - skip_count--; - } else { - num_dropped_frames++; - } - fp = NextStackFrame(fp, ucp, stack_low, - stack_high); + int j = 0; + for (; fp != nullptr && j < kMaxUnwind; j++) { + fp = NextStackFrame(fp, ucp); } - *min_dropped_frames = num_dropped_frames; + *min_dropped_frames = j; } return n; } diff --git a/abseil-cpp/absl/debugging/internal/symbolize.h b/abseil-cpp/absl/debugging/internal/symbolize.h index 27d5e652..b3729af7 100644 --- a/abseil-cpp/absl/debugging/internal/symbolize.h +++ b/abseil-cpp/absl/debugging/internal/symbolize.h @@ -28,8 +28,8 @@ #ifdef ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE #error ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE cannot be directly set -#elif defined(__ELF__) && defined(__GLIBC__) && !defined(__native_client__) \ - && !defined(__asmjs__) && !defined(__wasm__) +#elif defined(__ELF__) && defined(__GLIBC__) && !defined(__native_client__) && \ + !defined(__asmjs__) && !defined(__wasm__) #define ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE 1 #include @@ -68,12 +68,6 @@ ABSL_NAMESPACE_END #define ABSL_INTERNAL_HAVE_DARWIN_SYMBOLIZE 1 #endif -#ifdef ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE -#error ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE cannot be directly set -#elif defined(__EMSCRIPTEN__) -#define ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE 1 -#endif - namespace absl { ABSL_NAMESPACE_BEGIN namespace debugging_internal { @@ -124,14 +118,16 @@ bool RemoveAllSymbolDecorators(void); // filename != nullptr // // Returns true if the file was successfully registered. -bool RegisterFileMappingHint(const void* start, const void* end, - uint64_t offset, const char* filename); +bool RegisterFileMappingHint( + const void* start, const void* end, uint64_t offset, const char* filename); // Looks up the file mapping registered by RegisterFileMappingHint for an // address range. If there is one, the file name is stored in *filename and // *start and *end are modified to reflect the registered mapping. Returns // whether any hint was found. -bool GetFileMappingHint(const void** start, const void** end, uint64_t* offset, +bool GetFileMappingHint(const void** start, + const void** end, + uint64_t * offset, const char** filename); } // namespace debugging_internal diff --git a/abseil-cpp/absl/debugging/internal/vdso_support.cc b/abseil-cpp/absl/debugging/internal/vdso_support.cc index 40eb055f..6be16d90 100644 --- a/abseil-cpp/absl/debugging/internal/vdso_support.cc +++ b/abseil-cpp/absl/debugging/internal/vdso_support.cc @@ -20,25 +20,12 @@ #ifdef ABSL_HAVE_VDSO_SUPPORT // defined in vdso_support.h -#if !defined(__has_include) -#define __has_include(header) 0 -#endif - #include #include -#if __has_include() -#include -#elif __has_include() #include -#endif #include -#if !defined(__UCLIBC__) && defined(__GLIBC__) && \ - (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 16)) -#define ABSL_HAVE_GETAUXVAL -#endif - -#ifdef ABSL_HAVE_GETAUXVAL +#if __GLIBC_PREREQ(2, 16) // GLIBC-2.16 implements getauxval. #include #endif @@ -50,17 +37,6 @@ #define AT_SYSINFO_EHDR 33 // for crosstoolv10 #endif -#if defined(__NetBSD__) -using Elf32_auxv_t = Aux32Info; -using Elf64_auxv_t = Aux64Info; -#endif -#if defined(__FreeBSD__) -#if defined(__ELF_WORD_SIZE) && __ELF_WORD_SIZE == 64 -using Elf64_auxv_t = Elf64_Auxinfo; -#endif -using Elf32_auxv_t = Elf32_Auxinfo; -#endif - namespace absl { ABSL_NAMESPACE_BEGIN namespace debugging_internal { @@ -69,9 +45,7 @@ ABSL_CONST_INIT std::atomic VDSOSupport::vdso_base_( debugging_internal::ElfMemImage::kInvalidBase); -ABSL_CONST_INIT std::atomic VDSOSupport::getcpu_fn_( - &InitAndGetCPU); - +std::atomic VDSOSupport::getcpu_fn_(&InitAndGetCPU); VDSOSupport::VDSOSupport() // If vdso_base_ is still set to kInvalidBase, we got here // before VDSOSupport::Init has been called. Call it now. @@ -91,7 +65,7 @@ VDSOSupport::VDSOSupport() // the operation should be idempotent. const void *VDSOSupport::Init() { const auto kInvalidBase = debugging_internal::ElfMemImage::kInvalidBase; -#ifdef ABSL_HAVE_GETAUXVAL +#if __GLIBC_PREREQ(2, 16) if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) { errno = 0; const void *const sysinfo_ehdr = @@ -100,7 +74,7 @@ const void *VDSOSupport::Init() { vdso_base_.store(sysinfo_ehdr, std::memory_order_relaxed); } } -#endif // ABSL_HAVE_GETAUXVAL +#endif // __GLIBC_PREREQ(2, 16) if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) { int fd = open("/proc/self/auxv", O_RDONLY); if (fd == -1) { @@ -112,13 +86,8 @@ const void *VDSOSupport::Init() { ElfW(auxv_t) aux; while (read(fd, &aux, sizeof(aux)) == sizeof(aux)) { if (aux.a_type == AT_SYSINFO_EHDR) { -#if defined(__NetBSD__) - vdso_base_.store(reinterpret_cast(aux.a_v), - std::memory_order_relaxed); -#else vdso_base_.store(reinterpret_cast(aux.a_un.a_val), std::memory_order_relaxed); -#endif break; } } diff --git a/abseil-cpp/absl/debugging/leak_check.cc b/abseil-cpp/absl/debugging/leak_check.cc index 195e82bf..ff904955 100644 --- a/abseil-cpp/absl/debugging/leak_check.cc +++ b/abseil-cpp/absl/debugging/leak_check.cc @@ -11,39 +11,33 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// -// Wrappers around lsan_interface functions. -// -// These are always-available run-time functions manipulating the LeakSanitizer, -// even when the lsan_interface (and LeakSanitizer) is not available. When -// LeakSanitizer is not linked in, these functions become no-op stubs. +// Wrappers around lsan_interface functions. +// When lsan is not linked in, these functions are not available, +// therefore Abseil code which depends on these functions is conditioned on the +// definition of LEAK_SANITIZER. #include "absl/debugging/leak_check.h" -#include "absl/base/attributes.h" -#include "absl/base/config.h" +#ifndef LEAK_SANITIZER -#if defined(ABSL_HAVE_LEAK_SANITIZER) +namespace absl { +ABSL_NAMESPACE_BEGIN +bool HaveLeakSanitizer() { return false; } +void DoIgnoreLeak(const void*) { } +void RegisterLivePointers(const void*, size_t) { } +void UnRegisterLivePointers(const void*, size_t) { } +LeakCheckDisabler::LeakCheckDisabler() { } +LeakCheckDisabler::~LeakCheckDisabler() { } +ABSL_NAMESPACE_END +} // namespace absl -#include +#else -#if ABSL_HAVE_ATTRIBUTE_WEAK -extern "C" ABSL_ATTRIBUTE_WEAK int __lsan_is_turned_off(); -#endif +#include namespace absl { ABSL_NAMESPACE_BEGIN bool HaveLeakSanitizer() { return true; } - -#if ABSL_HAVE_ATTRIBUTE_WEAK -bool LeakCheckerIsActive() { - return !(&__lsan_is_turned_off && __lsan_is_turned_off()); -} -#else -bool LeakCheckerIsActive() { return true; } -#endif - -bool FindAndReportLeaks() { return __lsan_do_recoverable_leak_check(); } void DoIgnoreLeak(const void* ptr) { __lsan_ignore_object(ptr); } void RegisterLivePointers(const void* ptr, size_t size) { __lsan_register_root_region(ptr, size); @@ -56,18 +50,4 @@ LeakCheckDisabler::~LeakCheckDisabler() { __lsan_enable(); } ABSL_NAMESPACE_END } // namespace absl -#else // defined(ABSL_HAVE_LEAK_SANITIZER) - -namespace absl { -ABSL_NAMESPACE_BEGIN -bool HaveLeakSanitizer() { return false; } -bool LeakCheckerIsActive() { return false; } -void DoIgnoreLeak(const void*) { } -void RegisterLivePointers(const void*, size_t) { } -void UnRegisterLivePointers(const void*, size_t) { } -LeakCheckDisabler::LeakCheckDisabler() { } -LeakCheckDisabler::~LeakCheckDisabler() { } -ABSL_NAMESPACE_END -} // namespace absl - -#endif // defined(ABSL_HAVE_LEAK_SANITIZER) +#endif // LEAK_SANITIZER diff --git a/abseil-cpp/absl/debugging/leak_check.h b/abseil-cpp/absl/debugging/leak_check.h index eff162f6..7a5a22dd 100644 --- a/abseil-cpp/absl/debugging/leak_check.h +++ b/abseil-cpp/absl/debugging/leak_check.h @@ -24,24 +24,7 @@ // Note: this leak checking API is not yet supported in MSVC. // Leak checking is enabled by default in all ASan builds. // -// https://clang.llvm.org/docs/LeakSanitizer.html -// https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer -// -// GCC and Clang both automatically enable LeakSanitizer when AddressSanitizer -// is enabled. To use the mode, simply pass `-fsanitize=address` to both the -// compiler and linker. An example Bazel command could be -// -// $ bazel test --copt=-fsanitize=address --linkopt=-fsanitize=address ... -// -// GCC and Clang auto support a standalone LeakSanitizer mode (a mode which does -// not also use AddressSanitizer). To use the mode, simply pass -// `-fsanitize=leak` to both the compiler and linker. Since GCC does not -// currently provide a way of detecting this mode at compile-time, GCC users -// must also pass -DLEAK_SANIITIZER to the compiler. An example Bazel command -// could be -// -// $ bazel test --copt=-DLEAK_SANITIZER --copt=-fsanitize=leak -// --linkopt=-fsanitize=leak ... +// See https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer // // ----------------------------------------------------------------------------- #ifndef ABSL_DEBUGGING_LEAK_CHECK_H_ @@ -60,12 +43,6 @@ ABSL_NAMESPACE_BEGIN // currently built into this target. bool HaveLeakSanitizer(); -// LeakCheckerIsActive() -// -// Returns true if a leak-checking sanitizer (either ASan or standalone LSan) is -// currently built into this target and is turned on. -bool LeakCheckerIsActive(); - // DoIgnoreLeak() // // Implements `IgnoreLeak()` below. This function should usually @@ -85,8 +62,7 @@ void DoIgnoreLeak(const void* ptr); // // If the passed `ptr` does not point to an actively allocated object at the // time `IgnoreLeak()` is called, the call is a no-op; if it is actively -// allocated, leak sanitizer will assume this object is referenced even if -// there is no actual reference in user memory. +// allocated, the object must not get deallocated later. // template T* IgnoreLeak(T* ptr) { @@ -94,19 +70,6 @@ T* IgnoreLeak(T* ptr) { return ptr; } -// FindAndReportLeaks() -// -// If any leaks are detected, prints a leak report and returns true. This -// function may be called repeatedly, and does not affect end-of-process leak -// checking. -// -// Example: -// if (FindAndReportLeaks()) { -// ... diagnostic already printed. Exit with failure code. -// exit(1) -// } -bool FindAndReportLeaks(); - // LeakCheckDisabler // // This helper class indicates that any heap allocations done in the code block diff --git a/abseil-cpp/absl/strings/cord_buffer.cc b/abseil-cpp/absl/debugging/leak_check_disable.cc similarity index 55% rename from abseil-cpp/absl/strings/cord_buffer.cc rename to abseil-cpp/absl/debugging/leak_check_disable.cc index fad6269c..924d6e3d 100644 --- a/abseil-cpp/absl/strings/cord_buffer.cc +++ b/abseil-cpp/absl/debugging/leak_check_disable.cc @@ -1,10 +1,10 @@ -// Copyright 2022 The Abseil Authors +// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// https://www.apache.org/licenses/LICENSE-2.0 +// https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -12,19 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "absl/strings/cord_buffer.h" - -#include - -#include "absl/base/config.h" - -namespace absl { -ABSL_NAMESPACE_BEGIN - -#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL -constexpr size_t CordBuffer::kDefaultLimit; -constexpr size_t CordBuffer::kCustomLimit; -#endif - -ABSL_NAMESPACE_END -} // namespace absl +// Disable LeakSanitizer when this file is linked in. +// This function overrides __lsan_is_turned_off from sanitizer/lsan_interface.h +extern "C" int __lsan_is_turned_off(); +extern "C" int __lsan_is_turned_off() { + return 1; +} diff --git a/abseil-cpp/absl/debugging/leak_check_test.cc b/abseil-cpp/absl/debugging/leak_check_test.cc index 6a42e31b..b5cc4874 100644 --- a/abseil-cpp/absl/debugging/leak_check_test.cc +++ b/abseil-cpp/absl/debugging/leak_check_test.cc @@ -15,24 +15,25 @@ #include #include "gtest/gtest.h" -#include "absl/base/config.h" #include "absl/base/internal/raw_logging.h" #include "absl/debugging/leak_check.h" namespace { +TEST(LeakCheckTest, DetectLeakSanitizer) { +#ifdef ABSL_EXPECT_LEAK_SANITIZER + EXPECT_TRUE(absl::HaveLeakSanitizer()); +#else + EXPECT_FALSE(absl::HaveLeakSanitizer()); +#endif +} + TEST(LeakCheckTest, IgnoreLeakSuppressesLeakedMemoryErrors) { - if (!absl::LeakCheckerIsActive()) { - GTEST_SKIP() << "LeakChecker is not active"; - } auto foo = absl::IgnoreLeak(new std::string("some ignored leaked string")); ABSL_RAW_LOG(INFO, "Ignoring leaked string %s", foo->c_str()); } TEST(LeakCheckTest, LeakCheckDisablerIgnoresLeak) { - if (!absl::LeakCheckerIsActive()) { - GTEST_SKIP() << "LeakChecker is not active"; - } absl::LeakCheckDisabler disabler; auto foo = new std::string("some string leaked while checks are disabled"); ABSL_RAW_LOG(INFO, "Ignoring leaked string %s", foo->c_str()); diff --git a/abseil-cpp/absl/debugging/stacktrace.cc b/abseil-cpp/absl/debugging/stacktrace.cc index ff8069f8..1f7c7d82 100644 --- a/abseil-cpp/absl/debugging/stacktrace.cc +++ b/abseil-cpp/absl/debugging/stacktrace.cc @@ -49,10 +49,8 @@ # include "absl/debugging/internal/stacktrace_aarch64-inl.inc" # include "absl/debugging/internal/stacktrace_arm-inl.inc" -# include "absl/debugging/internal/stacktrace_emscripten-inl.inc" # include "absl/debugging/internal/stacktrace_generic-inl.inc" # include "absl/debugging/internal/stacktrace_powerpc-inl.inc" -# include "absl/debugging/internal/stacktrace_riscv-inl.inc" # include "absl/debugging/internal/stacktrace_unimplemented-inl.inc" # include "absl/debugging/internal/stacktrace_win32-inl.inc" # include "absl/debugging/internal/stacktrace_x86-inl.inc" diff --git a/abseil-cpp/absl/debugging/stacktrace_benchmark.cc b/abseil-cpp/absl/debugging/stacktrace_benchmark.cc deleted file mode 100644 index 9360bafe..00000000 --- a/abseil-cpp/absl/debugging/stacktrace_benchmark.cc +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2022 The Abseil Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "absl/base/attributes.h" -#include "absl/base/config.h" -#include "absl/base/optimization.h" -#include "absl/debugging/stacktrace.h" -#include "benchmark/benchmark.h" - -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace { - -static constexpr int kMaxStackDepth = 100; -static constexpr int kCacheSize = (1 << 16); -void* pcs[kMaxStackDepth]; - -ABSL_ATTRIBUTE_NOINLINE void func(benchmark::State& state, int x, int depth) { - if (x <= 0) { - // Touch a significant amount of memory so that the stack is likely to be - // not cached in the L1 cache. - state.PauseTiming(); - int* arr = new int[kCacheSize]; - for (int i = 0; i < kCacheSize; ++i) benchmark::DoNotOptimize(arr[i] = 100); - delete[] arr; - state.ResumeTiming(); - benchmark::DoNotOptimize(absl::GetStackTrace(pcs, depth, 0)); - return; - } - ABSL_BLOCK_TAIL_CALL_OPTIMIZATION(); - func(state, --x, depth); -} - -void BM_GetStackTrace(benchmark::State& state) { - int depth = state.range(0); - for (auto s : state) { - func(state, depth, depth); - } -} - -BENCHMARK(BM_GetStackTrace)->DenseRange(10, kMaxStackDepth, 10); -} // namespace -ABSL_NAMESPACE_END -} // namespace absl diff --git a/abseil-cpp/absl/debugging/symbolize.cc b/abseil-cpp/absl/debugging/symbolize.cc index 638d3954..5e4a25d6 100644 --- a/abseil-cpp/absl/debugging/symbolize.cc +++ b/abseil-cpp/absl/debugging/symbolize.cc @@ -23,11 +23,6 @@ #endif #endif -// Emscripten symbolization relies on JS. Do not use them in standalone mode. -#if defined(__EMSCRIPTEN__) && !defined(STANDALONE_WASM) -#define ABSL_INTERNAL_HAVE_SYMBOLIZE_WASM -#endif - #if defined(ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE) #include "absl/debugging/symbolize_elf.inc" #elif defined(ABSL_INTERNAL_HAVE_SYMBOLIZE_WIN32) @@ -36,8 +31,6 @@ #include "absl/debugging/symbolize_win32.inc" #elif defined(__APPLE__) #include "absl/debugging/symbolize_darwin.inc" -#elif defined(ABSL_INTERNAL_HAVE_SYMBOLIZE_WASM) -#include "absl/debugging/symbolize_emscripten.inc" #else #include "absl/debugging/symbolize_unimplemented.inc" #endif diff --git a/abseil-cpp/absl/debugging/symbolize_darwin.inc b/abseil-cpp/absl/debugging/symbolize_darwin.inc index 443ce9ef..cdadd40e 100644 --- a/abseil-cpp/absl/debugging/symbolize_darwin.inc +++ b/abseil-cpp/absl/debugging/symbolize_darwin.inc @@ -77,8 +77,8 @@ bool Symbolize(const void* pc, char* out, int out_size) { char tmp_buf[1024]; if (debugging_internal::Demangle(symbol.c_str(), tmp_buf, sizeof(tmp_buf))) { - size_t len = strlen(tmp_buf); - if (len + 1 <= static_cast(out_size)) { // +1 for '\0' + int len = strlen(tmp_buf); + if (len + 1 <= out_size) { // +1 for '\0' assert(len < sizeof(tmp_buf)); memmove(out, tmp_buf, len + 1); } diff --git a/abseil-cpp/absl/debugging/symbolize_elf.inc b/abseil-cpp/absl/debugging/symbolize_elf.inc index 9bfdd915..7c36fd13 100644 --- a/abseil-cpp/absl/debugging/symbolize_elf.inc +++ b/abseil-cpp/absl/debugging/symbolize_elf.inc @@ -77,10 +77,6 @@ #include "absl/debugging/internal/vdso_support.h" #include "absl/strings/string_view.h" -#if defined(__FreeBSD__) && !defined(ElfW) -#define ElfW(x) __ElfN(x) -#endif - namespace absl { ABSL_NAMESPACE_BEGIN @@ -323,7 +319,6 @@ class Symbolizer { const ptrdiff_t relocation, char *out, int out_size, char *tmp_buf, int tmp_buf_size); - const char *GetUncachedSymbol(const void *pc); enum { SYMBOL_BUF_SIZE = 3072, @@ -706,16 +701,6 @@ static ABSL_ATTRIBUTE_NOINLINE FindSymbolResult FindSymbol( const char *start_address = ComputeOffset(original_start_address, relocation); -#ifdef __arm__ - // ARM functions are always aligned to multiples of two bytes; the - // lowest-order bit in start_address is ignored by the CPU and indicates - // whether the function contains ARM (0) or Thumb (1) code. We don't care - // about what encoding is being used; we just want the real start address - // of the function. - start_address = reinterpret_cast( - reinterpret_cast(start_address) & ~1); -#endif - if (deref_function_descriptor_pointer && InSection(original_start_address, opd)) { // The opd section is mapped into memory. Just dereference @@ -1146,14 +1131,6 @@ bool Symbolizer::RegisterObjFile(const char *filename, reinterpret_cast(old->end_addr), old->filename); } return true; - } else if (old->end_addr == start_addr && - reinterpret_cast(old->start_addr) - old->offset == - reinterpret_cast(start_addr) - offset && - strcmp(old->filename, filename) == 0) { - // Two contiguous map entries that span a contiguous region of the file, - // perhaps because some part of the file was mlock()ed. Combine them. - old->end_addr = end_addr; - return true; } } ObjFile *obj = impl->addr_map_.Add(); @@ -1304,7 +1281,7 @@ static bool MaybeInitializeObjFile(ObjFile *obj) { const int phnum = obj->elf_header.e_phnum; const int phentsize = obj->elf_header.e_phentsize; size_t phoff = obj->elf_header.e_phoff; - size_t num_executable_load_segments = 0; + int num_executable_load_segments = 0; for (int j = 0; j < phnum; j++) { ElfW(Phdr) phdr; if (!ReadFromOffsetExact(obj->fd, &phdr, sizeof(phdr), phoff)) { @@ -1342,7 +1319,13 @@ static bool MaybeInitializeObjFile(ObjFile *obj) { // they are called here as well. // To keep stack consumption low, we would like this function to not // get inlined. -const char *Symbolizer::GetUncachedSymbol(const void *pc) { +const char *Symbolizer::GetSymbol(const void *const pc) { + const char *entry = FindSymbolInCache(pc); + if (entry != nullptr) { + return entry; + } + symbol_buf_[0] = '\0'; + ObjFile *const obj = FindObjFile(pc, 1); ptrdiff_t relocation = 0; int fd = -1; @@ -1359,7 +1342,7 @@ const char *Symbolizer::GetUncachedSymbol(const void *pc) { // Note: some binaries have multiple "rx" LOAD segments. We must // find the right one. ElfW(Phdr) *phdr = nullptr; - for (size_t j = 0; j < obj->phdr.size(); j++) { + for (int j = 0; j < obj->phdr.size(); j++) { ElfW(Phdr) &p = obj->phdr[j]; if (p.p_type != PT_LOAD) { // We only expect PT_LOADs. This must be PT_NULL that we didn't @@ -1430,42 +1413,6 @@ const char *Symbolizer::GetUncachedSymbol(const void *pc) { return InsertSymbolInCache(pc, symbol_buf_); } -const char *Symbolizer::GetSymbol(const void *pc) { - const char *entry = FindSymbolInCache(pc); - if (entry != nullptr) { - return entry; - } - symbol_buf_[0] = '\0'; - -#ifdef __hppa__ - { - // In some contexts (e.g., return addresses), PA-RISC uses the lowest two - // bits of the address to indicate the privilege level. Clear those bits - // before trying to symbolize. - const auto pc_bits = reinterpret_cast(pc); - const auto address = pc_bits & ~0x3; - entry = GetUncachedSymbol(reinterpret_cast(address)); - if (entry != nullptr) { - return entry; - } - - // In some contexts, PA-RISC also uses bit 1 of the address to indicate that - // this is a cross-DSO function pointer. Such function pointers actually - // point to a procedure label, a struct whose first 32-bit (pointer) element - // actually points to the function text. With no symbol found for this - // address so far, try interpreting it as a cross-DSO function pointer and - // see how that goes. - if (pc_bits & 0x2) { - return GetUncachedSymbol(*reinterpret_cast(address)); - } - - return nullptr; - } -#else - return GetUncachedSymbol(pc); -#endif -} - bool RemoveAllSymbolDecorators(void) { if (!g_decorators_mu.TryLock()) { // Someone else is using decorators. Get out. diff --git a/abseil-cpp/absl/debugging/symbolize_emscripten.inc b/abseil-cpp/absl/debugging/symbolize_emscripten.inc deleted file mode 100644 index c226c456..00000000 --- a/abseil-cpp/absl/debugging/symbolize_emscripten.inc +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2020 The Abseil Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include -#include - -#include -#include - -#include "absl/base/internal/raw_logging.h" -#include "absl/debugging/internal/demangle.h" -#include "absl/strings/numbers.h" -#include "absl/strings/str_cat.h" -#include "absl/strings/string_view.h" - -extern "C" { -const char* emscripten_pc_get_function(const void* pc); -} - -// clang-format off -EM_JS(bool, HaveOffsetConverter, (), - { return typeof wasmOffsetConverter !== 'undefined'; }); -// clang-format on - -namespace absl { -ABSL_NAMESPACE_BEGIN - -void InitializeSymbolizer(const char*) { - if (!HaveOffsetConverter()) { - ABSL_RAW_LOG(INFO, - "Symbolization unavailable. Rebuild with -sWASM=1 " - "and -sUSE_OFFSET_CONVERTER=1."); - } -} - -bool Symbolize(const void* pc, char* out, int out_size) { - // Check if we have the offset converter necessary for pc_get_function. - // Without it, the program will abort(). - if (!HaveOffsetConverter()) { - return false; - } - const char* func_name = emscripten_pc_get_function(pc); - if (func_name == nullptr) { - return false; - } - - strncpy(out, func_name, out_size); - - if (out[out_size - 1] != '\0') { - // strncpy() does not '\0' terminate when it truncates. - static constexpr char kEllipsis[] = "..."; - int ellipsis_size = std::min(sizeof(kEllipsis) - 1, out_size - 1); - memcpy(out + out_size - ellipsis_size - 1, kEllipsis, ellipsis_size); - out[out_size - 1] = '\0'; - } - - return true; -} - -ABSL_NAMESPACE_END -} // namespace absl diff --git a/abseil-cpp/absl/debugging/symbolize_test.cc b/abseil-cpp/absl/debugging/symbolize_test.cc index 3165c6ed..a2dd4956 100644 --- a/abseil-cpp/absl/debugging/symbolize_test.cc +++ b/abseil-cpp/absl/debugging/symbolize_test.cc @@ -146,22 +146,8 @@ static const char *TrySymbolize(void *pc) { return TrySymbolizeWithLimit(pc, sizeof(try_symbolize_buffer)); } -#if defined(ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE) || \ - defined(ABSL_INTERNAL_HAVE_DARWIN_SYMBOLIZE) || \ - defined(ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE) - -// Test with a return address. -void ABSL_ATTRIBUTE_NOINLINE TestWithReturnAddress() { -#if defined(ABSL_HAVE_ATTRIBUTE_NOINLINE) - void *return_address = __builtin_return_address(0); - const char *symbol = TrySymbolize(return_address); - ABSL_RAW_CHECK(symbol != nullptr, "TestWithReturnAddress failed"); - ABSL_RAW_CHECK(strcmp(symbol, "main") == 0, "TestWithReturnAddress failed"); - std::cout << "TestWithReturnAddress passed" << std::endl; -#endif -} - -#ifndef ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE +#if defined(ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE) || \ + defined(ABSL_INTERNAL_HAVE_DARWIN_SYMBOLIZE) TEST(Symbolize, Cached) { // Compilers should give us pointers to them. @@ -392,14 +378,12 @@ TEST(Symbolize, InstallAndRemoveSymbolDecorators) { DummySymbolDecorator, &c_message), 0); - // Use addresses 4 and 8 here to ensure that we always use valid addresses - // even on systems that require instructions to be 32-bit aligned. - char *address = reinterpret_cast(4); - EXPECT_STREQ("abc", TrySymbolize(address)); + char *address = reinterpret_cast(1); + EXPECT_STREQ("abc", TrySymbolize(address++)); EXPECT_TRUE(absl::debugging_internal::RemoveSymbolDecorator(ticket_b)); - EXPECT_STREQ("ac", TrySymbolize(address + 4)); + EXPECT_STREQ("ac", TrySymbolize(address++)); // Cleanup: remove all remaining decorators so other stack traces don't // get mystery "ac" decoration. @@ -434,7 +418,6 @@ TEST(Symbolize, ForEachSection) { close(fd); } #endif // !ABSL_INTERNAL_HAVE_DARWIN_SYMBOLIZE -#endif // !ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE // x86 specific tests. Uses some inline assembler. extern "C" { @@ -483,52 +466,17 @@ void ABSL_ATTRIBUTE_NOINLINE TestWithPCInsideInlineFunction() { } } -#if defined(__arm__) && ABSL_HAVE_ATTRIBUTE(target) && \ - ((__ARM_ARCH >= 7) || !defined(__ARM_PCS_VFP)) -// Test that we correctly identify bounds of Thumb functions on ARM. -// -// Thumb functions have the lowest-order bit set in their addresses in the ELF -// symbol table. This requires some extra logic to properly compute function -// bounds. To test this logic, nudge a Thumb function right up against an ARM -// function and try to symbolize the ARM function. -// -// A naive implementation will simply use the Thumb function's entry point as -// written in the symbol table and will therefore treat the Thumb function as -// extending one byte further in the instruction stream than it actually does. -// When asked to symbolize the start of the ARM function, it will identify an -// overlap between the Thumb and ARM functions, and it will return the name of -// the Thumb function. -// -// A correct implementation, on the other hand, will null out the lowest-order -// bit in the Thumb function's entry point. It will correctly compute the end of -// the Thumb function, it will find no overlap between the Thumb and ARM -// functions, and it will return the name of the ARM function. -// -// Unfortunately we cannot perform this test on armv6 or lower systems that use -// the hard float ABI because gcc refuses to compile thumb functions on such -// systems with a "sorry, unimplemented: Thumb-1 hard-float VFP ABI" error. - -__attribute__((target("thumb"))) int ArmThumbOverlapThumb(int x) { - return x * x * x; -} - -__attribute__((target("arm"))) int ArmThumbOverlapArm(int x) { - return x * x * x; -} - -void ABSL_ATTRIBUTE_NOINLINE TestArmThumbOverlap() { +// Test with a return address. +void ABSL_ATTRIBUTE_NOINLINE TestWithReturnAddress() { #if defined(ABSL_HAVE_ATTRIBUTE_NOINLINE) - const char *symbol = TrySymbolize((void *)&ArmThumbOverlapArm); - ABSL_RAW_CHECK(symbol != nullptr, "TestArmThumbOverlap failed"); - ABSL_RAW_CHECK(strcmp("ArmThumbOverlapArm()", symbol) == 0, - "TestArmThumbOverlap failed"); - std::cout << "TestArmThumbOverlap passed" << std::endl; + void *return_address = __builtin_return_address(0); + const char *symbol = TrySymbolize(return_address); + ABSL_RAW_CHECK(symbol != nullptr, "TestWithReturnAddress failed"); + ABSL_RAW_CHECK(strcmp(symbol, "main") == 0, "TestWithReturnAddress failed"); + std::cout << "TestWithReturnAddress passed" << std::endl; #endif } -#endif // defined(__arm__) && ABSL_HAVE_ATTRIBUTE(target) && ((__ARM_ARCH >= 7) - // || !defined(__ARM_PCS_VFP)) - #elif defined(_WIN32) #if !defined(ABSL_CONSUME_DLL) @@ -571,6 +519,7 @@ TEST(Symbolize, SymbolizeWithDemangling) { #endif // !defined(ABSL_CONSUME_DLL) #else // Symbolizer unimplemented + TEST(Symbolize, Unimplemented) { char buf[64]; EXPECT_FALSE(absl::Symbolize((void *)(&nonstatic_func), buf, sizeof(buf))); @@ -602,10 +551,6 @@ int main(int argc, char **argv) { TestWithPCInsideInlineFunction(); TestWithPCInsideNonInlineFunction(); TestWithReturnAddress(); -#if defined(__arm__) && ABSL_HAVE_ATTRIBUTE(target) && \ - ((__ARM_ARCH >= 7) || !defined(__ARM_PCS_VFP)) - TestArmThumbOverlap(); -#endif #endif return RUN_ALL_TESTS(); diff --git a/abseil-cpp/absl/flags/BUILD.bazel b/abseil-cpp/absl/flags/BUILD.bazel index 4ca687ee..62fb9a8b 100644 --- a/abseil-cpp/absl/flags/BUILD.bazel +++ b/abseil-cpp/absl/flags/BUILD.bazel @@ -14,6 +14,7 @@ # limitations under the License. # +load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library", "cc_test") load( "//absl:copts/configure_copts.bzl", "ABSL_DEFAULT_COPTS", @@ -100,7 +101,6 @@ cc_library( "//absl/base:log_severity", "//absl/strings", "//absl/strings:str_format", - "//absl/types:optional", ], ) @@ -114,9 +114,7 @@ cc_library( ], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, - visibility = [ - "//visibility:private", - ], + visibility = ["//visibility:private"], deps = [ "//absl/base:config", "//absl/base:fast_type_id", @@ -194,7 +192,6 @@ cc_library( ], hdrs = [ "internal/flag.h", - "internal/sequence_lock.h", ], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, @@ -208,7 +205,6 @@ cc_library( "//absl/base", "//absl/base:config", "//absl/base:core_headers", - "//absl/base:dynamic_annotations", "//absl/memory", "//absl/meta:type_traits", "//absl/strings", @@ -221,7 +217,6 @@ cc_library( name = "flag", srcs = [ "flag.cc", - "internal/flag_msvc.inc", ], hdrs = [ "declare.h", @@ -264,7 +259,6 @@ cc_library( ":reflection", "//absl/base:config", "//absl/base:core_headers", - "//absl/container:flat_hash_map", "//absl/strings", ], ) @@ -326,11 +320,6 @@ cc_test( ], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, - tags = [ - "no_test_android", - "no_test_ios", - "no_test_wasm", - ], deps = [ ":commandlineflag", ":commandlineflag_internal", @@ -367,11 +356,6 @@ cc_test( ], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, - tags = [ - "no_test_android", - "no_test_ios", - "no_test_wasm", - ], deps = [ ":config", ":flag", @@ -393,17 +377,11 @@ cc_binary( "flag_benchmark.cc", ], copts = ABSL_TEST_COPTS, - linkopts = select({ - "//conditions:default": [], - }) + ABSL_DEFAULT_LINKOPTS, tags = ["benchmark"], visibility = ["//visibility:private"], deps = [ - "flag_benchmark.lds", ":flag", ":marshalling", - ":parse", - ":reflection", "//absl/strings", "//absl/time", "//absl/types:optional", @@ -433,16 +411,10 @@ cc_test( ], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, - tags = [ - "no_test_android", - "no_test_ios", - "no_test_wasm", - ], deps = [ ":flag", ":parse", ":reflection", - ":usage_internal", "//absl/base:raw_logging_internal", "//absl/base:scoped_set_env", "//absl/strings", @@ -473,7 +445,6 @@ cc_test( ], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, - tags = ["no_test_wasm"], deps = [ ":program_name", "//absl/strings", @@ -489,11 +460,6 @@ cc_test( ], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, - tags = [ - "no_test_android", - "no_test_ios", - "no_test_wasm", - ], deps = [ ":commandlineflag_internal", ":flag", @@ -506,26 +472,6 @@ cc_test( ], ) -cc_test( - name = "sequence_lock_test", - size = "small", - timeout = "moderate", - srcs = [ - "internal/sequence_lock_test.cc", - ], - copts = ABSL_TEST_COPTS, - linkopts = ABSL_DEFAULT_LINKOPTS, - shard_count = 31, - tags = ["no_test_wasm"], - deps = [ - ":flag_internal", - "//absl/base", - "//absl/container:fixed_array", - "//absl/time", - "@com_google_googletest//:gtest_main", - ], -) - cc_test( name = "usage_config_test", size = "small", @@ -551,11 +497,6 @@ cc_test( ], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, - tags = [ - "no_test_android", - "no_test_ios", - "no_test_wasm", - ], deps = [ ":config", ":flag", diff --git a/abseil-cpp/absl/flags/CMakeLists.txt b/abseil-cpp/absl/flags/CMakeLists.txt index 3e9d5adf..88551914 100644 --- a/abseil-cpp/absl/flags/CMakeLists.txt +++ b/abseil-cpp/absl/flags/CMakeLists.txt @@ -87,7 +87,6 @@ absl_cc_library( absl::config absl::core_headers absl::log_severity - absl::optional absl::strings absl::str_format ) @@ -106,7 +105,6 @@ absl_cc_library( ${ABSL_DEFAULT_LINKOPTS} DEPS absl::config - absl::dynamic_annotations absl::fast_type_id ) @@ -178,7 +176,6 @@ absl_cc_library( "internal/flag.cc" HDRS "internal/flag.h" - "internal/sequence_lock.h" COPTS ${ABSL_DEFAULT_COPTS} LINKOPTS @@ -204,7 +201,6 @@ absl_cc_library( HDRS "declare.h" "flag.h" - "internal/flag_msvc.inc" COPTS ${ABSL_DEFAULT_COPTS} LINKOPTS @@ -242,7 +238,6 @@ absl_cc_library( absl::flags_private_handle_accessor absl::flags_program_name absl::flags_reflection - absl::flat_hash_map absl::strings absl::synchronization ) @@ -313,7 +308,7 @@ absl_cc_test( absl::flags_reflection absl::memory absl::strings - GTest::gtest_main + gtest_main ) absl_cc_test( @@ -325,7 +320,7 @@ absl_cc_test( ${ABSL_TEST_COPTS} DEPS absl::flags_config - GTest::gtest_main + gtest_main ) absl_cc_test( @@ -345,7 +340,7 @@ absl_cc_test( absl::flags_reflection absl::strings absl::time - GTest::gtest_main + gtest_main ) absl_cc_test( @@ -357,7 +352,7 @@ absl_cc_test( ${ABSL_TEST_COPTS} DEPS absl::flags_marshalling - GTest::gtest_main + gtest_main ) absl_cc_test( @@ -371,12 +366,11 @@ absl_cc_test( absl::flags absl::flags_parse absl::flags_reflection - absl::flags_usage_internal absl::raw_logging_internal absl::scoped_set_env absl::span absl::strings - GTest::gmock_main + gmock_main ) absl_cc_test( @@ -388,7 +382,7 @@ absl_cc_test( ${ABSL_TEST_COPTS} DEPS absl::flags_path_util - GTest::gtest_main + gtest_main ) absl_cc_test( @@ -401,7 +395,7 @@ absl_cc_test( DEPS absl::flags_program_name absl::strings - GTest::gtest_main + gtest_main ) absl_cc_test( @@ -418,21 +412,7 @@ absl_cc_test( absl::flags_usage absl::memory absl::strings - GTest::gmock_main -) - -absl_cc_test( - NAME - flags_sequence_lock_test - SRCS - "internal/sequence_lock_test.cc" - COPTS - ${ABSL_TEST_COPTS} - DEPS - absl::base - absl::flags_internal - absl::time - GTest::gmock_main + gmock_main ) absl_cc_test( @@ -447,7 +427,7 @@ absl_cc_test( absl::flags_path_util absl::flags_program_name absl::strings - GTest::gtest_main + gtest_main ) absl_cc_test( @@ -466,5 +446,5 @@ absl_cc_test( absl::flags_reflection absl::flags_usage absl::strings - GTest::gmock + gtest ) diff --git a/abseil-cpp/absl/flags/config.h b/abseil-cpp/absl/flags/config.h index 14c4235b..813a9257 100644 --- a/abseil-cpp/absl/flags/config.h +++ b/abseil-cpp/absl/flags/config.h @@ -45,6 +45,25 @@ #define ABSL_FLAGS_STRIP_HELP ABSL_FLAGS_STRIP_NAMES #endif +// ABSL_FLAGS_INTERNAL_ATOMIC_DOUBLE_WORD macro is used for using atomics with +// double words, e.g. absl::Duration. +// For reasons in bug https://gcc.gnu.org/bugzilla/show_bug.cgi?id=80878, modern +// versions of GCC do not support cmpxchg16b instruction in standard atomics. +#ifdef ABSL_FLAGS_INTERNAL_ATOMIC_DOUBLE_WORD +#error "ABSL_FLAGS_INTERNAL_ATOMIC_DOUBLE_WORD should not be defined." +#elif defined(__clang__) && defined(__x86_64__) && \ + defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) +#define ABSL_FLAGS_INTERNAL_ATOMIC_DOUBLE_WORD 1 +#endif + +// ABSL_FLAGS_INTERNAL_HAS_RTTI macro is used for selecting if we can use RTTI +// for flag type identification. +#ifdef ABSL_FLAGS_INTERNAL_HAS_RTTI +#error ABSL_FLAGS_INTERNAL_HAS_RTTI cannot be directly set +#elif !defined(__GNUC__) || defined(__GXX_RTTI) +#define ABSL_FLAGS_INTERNAL_HAS_RTTI 1 +#endif // !defined(__GNUC__) || defined(__GXX_RTTI) + // These macros represent the "source of truth" for the list of supported // built-in types. #define ABSL_FLAGS_INTERNAL_BUILTIN_TYPES(A) \ diff --git a/abseil-cpp/absl/flags/declare.h b/abseil-cpp/absl/flags/declare.h index d1437bb9..b9794d8b 100644 --- a/abseil-cpp/absl/flags/declare.h +++ b/abseil-cpp/absl/flags/declare.h @@ -60,14 +60,6 @@ ABSL_NAMESPACE_END // The ABSL_DECLARE_FLAG(type, name) macro expands to: // // extern absl::Flag FLAGS_name; -#define ABSL_DECLARE_FLAG(type, name) ABSL_DECLARE_FLAG_INTERNAL(type, name) - -// Internal implementation of ABSL_DECLARE_FLAG to allow macro expansion of its -// arguments. Clients must use ABSL_DECLARE_FLAG instead. -#define ABSL_DECLARE_FLAG_INTERNAL(type, name) \ - extern absl::Flag FLAGS_##name; \ - namespace absl /* block flags in namespaces */ {} \ - /* second redeclaration is to allow applying attributes */ \ - extern absl::Flag FLAGS_##name +#define ABSL_DECLARE_FLAG(type, name) extern ::absl::Flag FLAGS_##name #endif // ABSL_FLAGS_DECLARE_H_ diff --git a/abseil-cpp/absl/flags/flag.h b/abseil-cpp/absl/flags/flag.h index b7f94be7..a9cb2b79 100644 --- a/abseil-cpp/absl/flags/flag.h +++ b/abseil-cpp/absl/flags/flag.h @@ -67,15 +67,105 @@ ABSL_NAMESPACE_BEGIN // ABSL_FLAG(int, count, 0, "Count of items to process"); // // No public methods of `absl::Flag` are part of the Abseil Flags API. -// -// For type support of Abseil Flags, see the marshalling.h header file, which -// discusses supported standard types, optional flags, and additional Abseil -// type support. #if !defined(_MSC_VER) || defined(__clang__) template using Flag = flags_internal::Flag; #else -#include "absl/flags/internal/flag_msvc.inc" +// MSVC debug builds do not implement initialization with constexpr constructors +// correctly. To work around this we add a level of indirection, so that the +// class `absl::Flag` contains an `internal::Flag*` (instead of being an alias +// to that class) and dynamically allocates an instance when necessary. We also +// forward all calls to internal::Flag methods via trampoline methods. In this +// setup the `absl::Flag` class does not have constructor and virtual methods, +// all the data members are public and thus MSVC is able to initialize it at +// link time. To deal with multiple threads accessing the flag for the first +// time concurrently we use an atomic boolean indicating if flag object is +// initialized. We also employ the double-checked locking pattern where the +// second level of protection is a global Mutex, so if two threads attempt to +// construct the flag concurrently only one wins. +// This solution is based on a recomendation here: +// https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html?childToView=648454#comment-648454 + +namespace flags_internal { +absl::Mutex* GetGlobalConstructionGuard(); +} // namespace flags_internal + +template +class Flag { + public: + // No constructor and destructor to ensure this is an aggregate type. + // Visual Studio 2015 still requires the constructor for class to be + // constexpr initializable. +#if _MSC_VER <= 1900 + constexpr Flag(const char* name, const char* filename, + const flags_internal::HelpGenFunc help_gen, + const flags_internal::FlagDfltGenFunc default_value_gen) + : name_(name), + filename_(filename), + help_gen_(help_gen), + default_value_gen_(default_value_gen), + inited_(false), + impl_(nullptr) {} +#endif + + flags_internal::Flag& GetImpl() const { + if (!inited_.load(std::memory_order_acquire)) { + absl::MutexLock l(flags_internal::GetGlobalConstructionGuard()); + + if (inited_.load(std::memory_order_acquire)) { + return *impl_; + } + + impl_ = new flags_internal::Flag( + name_, filename_, + {flags_internal::FlagHelpMsg(help_gen_), + flags_internal::FlagHelpKind::kGenFunc}, + {flags_internal::FlagDefaultSrc(default_value_gen_), + flags_internal::FlagDefaultKind::kGenFunc}); + inited_.store(true, std::memory_order_release); + } + + return *impl_; + } + + // Public methods of `absl::Flag` are NOT part of the Abseil Flags API. + // See https://abseil.io/docs/cpp/guides/flags + bool IsRetired() const { return GetImpl().IsRetired(); } + absl::string_view Name() const { return GetImpl().Name(); } + std::string Help() const { return GetImpl().Help(); } + bool IsModified() const { return GetImpl().IsModified(); } + bool IsSpecifiedOnCommandLine() const { + return GetImpl().IsSpecifiedOnCommandLine(); + } + std::string Filename() const { return GetImpl().Filename(); } + std::string DefaultValue() const { return GetImpl().DefaultValue(); } + std::string CurrentValue() const { return GetImpl().CurrentValue(); } + template + inline bool IsOfType() const { + return GetImpl().template IsOfType(); + } + T Get() const { + return flags_internal::FlagImplPeer::InvokeGet(GetImpl()); + } + void Set(const T& v) { + flags_internal::FlagImplPeer::InvokeSet(GetImpl(), v); + } + void InvokeCallback() { GetImpl().InvokeCallback(); } + + const CommandLineFlag& Reflect() const { + return flags_internal::FlagImplPeer::InvokeReflect(GetImpl()); + } + + // The data members are logically private, but they need to be public for + // this to be an aggregate type. + const char* name_; + const char* filename_; + const flags_internal::HelpGenFunc help_gen_; + const flags_internal::FlagDfltGenFunc default_value_gen_; + + mutable std::atomic inited_; + mutable flags_internal::Flag* impl_; +}; #endif // GetFlag() @@ -175,8 +265,6 @@ ABSL_NAMESPACE_END // // ABSL_FLAG(T, name, default_value, help).OnUpdate(callback); // -// `callback` should be convertible to `void (*)()`. -// // After any setting of the flag value, the callback will be called at least // once. A rapid sequence of changes may be merged together into the same // callback. No concurrent calls to the callback will be made for the same @@ -191,6 +279,7 @@ ABSL_NAMESPACE_END // Note: ABSL_FLAG.OnUpdate() does not have a public definition. Hence, this // comment serves as its API documentation. + // ----------------------------------------------------------------------------- // Implementation details below this section // ----------------------------------------------------------------------------- @@ -212,15 +301,13 @@ ABSL_NAMESPACE_END #if ABSL_FLAGS_STRIP_NAMES #define ABSL_FLAG_IMPL_FLAGNAME(txt) "" #define ABSL_FLAG_IMPL_FILENAME() "" -#define ABSL_FLAG_IMPL_REGISTRAR(T, flag) \ - absl::flags_internal::FlagRegistrar(ABSL_FLAG_IMPL_FLAG_PTR(flag), \ - nullptr) +#define ABSL_FLAG_IMPL_REGISTRAR(T, flag) \ + absl::flags_internal::FlagRegistrar(ABSL_FLAG_IMPL_FLAG_PTR(flag)) #else #define ABSL_FLAG_IMPL_FLAGNAME(txt) txt #define ABSL_FLAG_IMPL_FILENAME() __FILE__ -#define ABSL_FLAG_IMPL_REGISTRAR(T, flag) \ - absl::flags_internal::FlagRegistrar(ABSL_FLAG_IMPL_FLAG_PTR(flag), \ - __FILE__) +#define ABSL_FLAG_IMPL_REGISTRAR(T, flag) \ + absl::flags_internal::FlagRegistrar(ABSL_FLAG_IMPL_FLAG_PTR(flag)) #endif // ABSL_FLAG_IMPL macro definition conditional on ABSL_FLAGS_STRIP_HELP @@ -245,8 +332,8 @@ ABSL_NAMESPACE_END /* default value argument. That keeps temporaries alive */ \ /* long enough for NonConst to work correctly. */ \ static constexpr absl::string_view Value( \ - absl::string_view absl_flag_help = ABSL_FLAG_IMPL_FLAGHELP(txt)) { \ - return absl_flag_help; \ + absl::string_view v = ABSL_FLAG_IMPL_FLAGHELP(txt)) { \ + return v; \ } \ static std::string NonConst() { return std::string(Value()); } \ }; \ @@ -258,8 +345,8 @@ ABSL_NAMESPACE_END #define ABSL_FLAG_IMPL_DECLARE_DEF_VAL_WRAPPER(name, Type, default_value) \ struct AbslFlagDefaultGenFor##name { \ Type value = absl::flags_internal::InitDefaultValue(default_value); \ - static void Gen(void* absl_flag_default_loc) { \ - new (absl_flag_default_loc) Type(AbslFlagDefaultGenFor##name{}.value); \ + static void Gen(void* p) { \ + new (p) Type(AbslFlagDefaultGenFor##name{}.value); \ } \ }; @@ -269,7 +356,6 @@ ABSL_NAMESPACE_END // global name for FLAGS_no symbol, thus preventing the possibility // of defining two flags with names foo and nofoo. #define ABSL_FLAG_IMPL(Type, name, default_value, help) \ - extern ::absl::Flag FLAGS_##name; \ namespace absl /* block flags in namespaces */ {} \ ABSL_FLAG_IMPL_DECLARE_DEF_VAL_WRAPPER(name, Type, default_value) \ ABSL_FLAG_IMPL_DECLARE_HELP_WRAPPER(name, help) \ diff --git a/abseil-cpp/absl/flags/flag_benchmark.cc b/abseil-cpp/absl/flags/flag_benchmark.cc index fc572d9c..7b52c9bc 100644 --- a/abseil-cpp/absl/flags/flag_benchmark.cc +++ b/abseil-cpp/absl/flags/flag_benchmark.cc @@ -20,8 +20,6 @@ #include "absl/flags/flag.h" #include "absl/flags/marshalling.h" -#include "absl/flags/parse.h" -#include "absl/flags/reflection.h" #include "absl/strings/string_view.h" #include "absl/time/time.h" #include "absl/types/optional.h" @@ -101,148 +99,26 @@ std::string AbslUnparseFlag(const UDT&) { return ""; } A(AbslDuration) \ A(UDT) -#define REPLICATE_0(A, T, name, index) A(T, name, index) -#define REPLICATE_1(A, T, name, index) \ - REPLICATE_0(A, T, name, index##0) REPLICATE_0(A, T, name, index##1) -#define REPLICATE_2(A, T, name, index) \ - REPLICATE_1(A, T, name, index##0) REPLICATE_1(A, T, name, index##1) -#define REPLICATE_3(A, T, name, index) \ - REPLICATE_2(A, T, name, index##0) REPLICATE_2(A, T, name, index##1) -#define REPLICATE_4(A, T, name, index) \ - REPLICATE_3(A, T, name, index##0) REPLICATE_3(A, T, name, index##1) -#define REPLICATE_5(A, T, name, index) \ - REPLICATE_4(A, T, name, index##0) REPLICATE_4(A, T, name, index##1) -#define REPLICATE_6(A, T, name, index) \ - REPLICATE_5(A, T, name, index##0) REPLICATE_5(A, T, name, index##1) -#define REPLICATE_7(A, T, name, index) \ - REPLICATE_6(A, T, name, index##0) REPLICATE_6(A, T, name, index##1) -#define REPLICATE_8(A, T, name, index) \ - REPLICATE_7(A, T, name, index##0) REPLICATE_7(A, T, name, index##1) -#define REPLICATE_9(A, T, name, index) \ - REPLICATE_8(A, T, name, index##0) REPLICATE_8(A, T, name, index##1) -#if defined(_MSC_VER) -#define REPLICATE(A, T, name) \ - REPLICATE_7(A, T, name, 0) REPLICATE_7(A, T, name, 1) -#define SINGLE_FLAG(T) FLAGS_##T##_flag_00000000 -#else -#define REPLICATE(A, T, name) \ - REPLICATE_9(A, T, name, 0) REPLICATE_9(A, T, name, 1) -#define SINGLE_FLAG(T) FLAGS_##T##_flag_0000000000 -#endif -#define REPLICATE_ALL(A, T, name) \ - REPLICATE_9(A, T, name, 0) REPLICATE_9(A, T, name, 1) - -#define COUNT(T, name, index) +1 -constexpr size_t kNumFlags = 0 REPLICATE(COUNT, _, _); - -#if defined(__clang__) && defined(__linux__) -// Force the flags used for benchmarks into a separate ELF section. -// This ensures that, even when other parts of the code might change size, -// the layout of the flags across cachelines is kept constant. This makes -// benchmark results more reproducible across unrelated code changes. -#pragma clang section data = ".benchmark_flags" -#endif -#define DEFINE_FLAG(T, name, index) ABSL_FLAG(T, name##_##index, {}, ""); -#define FLAG_DEF(T) REPLICATE(DEFINE_FLAG, T, T##_flag); +#define FLAG_DEF(T) ABSL_FLAG(T, T##_flag, {}, ""); + BENCHMARKED_TYPES(FLAG_DEF) -#if defined(__clang__) && defined(__linux__) -#pragma clang section data = "" -#endif -// Register thousands of flags to bloat up the size of the registry. -// This mimics real life production binaries. -#define BLOAT_FLAG(_unused1, _unused2, index) \ - ABSL_FLAG(int, bloat_flag_##index, 0, ""); -REPLICATE_ALL(BLOAT_FLAG, _, _) namespace { -#define FLAG_PTR(T, name, index) &FLAGS_##name##_##index, -#define FLAG_PTR_ARR(T) \ - static constexpr absl::Flag* FlagPtrs_##T[] = { \ - REPLICATE(FLAG_PTR, T, T##_flag)}; -BENCHMARKED_TYPES(FLAG_PTR_ARR) - -#define BM_SingleGetFlag(T) \ - void BM_SingleGetFlag_##T(benchmark::State& state) { \ - for (auto _ : state) { \ - benchmark::DoNotOptimize(absl::GetFlag(SINGLE_FLAG(T))); \ - } \ - } \ - BENCHMARK(BM_SingleGetFlag_##T)->ThreadRange(1, 16); - -BENCHMARKED_TYPES(BM_SingleGetFlag) - -template -struct Accumulator { - using type = T; -}; -template <> -struct Accumulator { - using type = size_t; -}; -template <> -struct Accumulator { - using type = size_t; -}; -template <> -struct Accumulator { - using type = bool; -}; -template <> -struct Accumulator { - using type = bool; -}; -template <> -struct Accumulator { - using type = bool; -}; +#define BM_GetFlag(T) \ + void BM_GetFlag_##T(benchmark::State& state) { \ + for (auto _ : state) { \ + benchmark::DoNotOptimize(absl::GetFlag(FLAGS_##T##_flag)); \ + } \ + } \ + BENCHMARK(BM_GetFlag_##T); -template -void Accumulate(typename Accumulator::type& a, const T& f) { - a += f; -} -void Accumulate(bool& a, bool f) { a = a || f; } -void Accumulate(size_t& a, const std::string& f) { a += f.size(); } -void Accumulate(size_t& a, const std::vector& f) { a += f.size(); } -void Accumulate(bool& a, const OptionalInt& f) { a |= f.has_value(); } -void Accumulate(bool& a, const OptionalString& f) { a |= f.has_value(); } -void Accumulate(bool& a, const UDT& f) { - a |= reinterpret_cast(&f) & 0x1; -} - -#define BM_ManyGetFlag(T) \ - void BM_ManyGetFlag_##T(benchmark::State& state) { \ - Accumulator::type res = {}; \ - while (state.KeepRunningBatch(kNumFlags)) { \ - for (auto* flag_ptr : FlagPtrs_##T) { \ - Accumulate(res, absl::GetFlag(*flag_ptr)); \ - } \ - } \ - benchmark::DoNotOptimize(res); \ - } \ - BENCHMARK(BM_ManyGetFlag_##T)->ThreadRange(1, 8); - -BENCHMARKED_TYPES(BM_ManyGetFlag) - -void BM_ThreadedFindCommandLineFlag(benchmark::State& state) { - char dummy[] = "dummy"; - char* argv[] = {dummy}; - // We need to ensure that flags have been parsed. That is where the registry - // is finalized. - absl::ParseCommandLine(1, argv); - - while (state.KeepRunningBatch(kNumFlags)) { - for (auto* flag_ptr : FlagPtrs_bool) { - benchmark::DoNotOptimize(absl::FindCommandLineFlag(flag_ptr->Name())); - } - } -} -BENCHMARK(BM_ThreadedFindCommandLineFlag)->ThreadRange(1, 16); +BENCHMARKED_TYPES(BM_GetFlag) } // namespace -#define InvokeGetFlag(T) \ - T AbslInvokeGetFlag##T() { return absl::GetFlag(SINGLE_FLAG(T)); } \ +#define InvokeGetFlag(T) \ + T AbslInvokeGetFlag##T() { return absl::GetFlag(FLAGS_##T##_flag); } \ int odr##T = (benchmark::DoNotOptimize(AbslInvokeGetFlag##T), 1); BENCHMARKED_TYPES(InvokeGetFlag) diff --git a/abseil-cpp/absl/flags/flag_benchmark.lds b/abseil-cpp/absl/flags/flag_benchmark.lds deleted file mode 100644 index af115dfc..00000000 --- a/abseil-cpp/absl/flags/flag_benchmark.lds +++ /dev/null @@ -1,13 +0,0 @@ -/* This linker script forces the flags used by flags_benchmark - * into a separate page-aligned section. This isn't necessary for - * correctness but ensures that the benchmark results are more - * reproducible across unrelated code changes. - */ -SECTIONS { - .benchmark_flags : { - . = ALIGN(0x1000); - * (.benchmark_flags); - } -} - -INSERT AFTER .data diff --git a/abseil-cpp/absl/flags/flag_test.cc b/abseil-cpp/absl/flags/flag_test.cc index 845b4eba..654c8122 100644 --- a/abseil-cpp/absl/flags/flag_test.cc +++ b/abseil-cpp/absl/flags/flag_test.cc @@ -18,7 +18,6 @@ #include #include -#include #include #include #include @@ -27,7 +26,6 @@ #include "gtest/gtest.h" #include "absl/base/attributes.h" -#include "absl/base/macros.h" #include "absl/flags/config.h" #include "absl/flags/declare.h" #include "absl/flags/internal/flag.h" @@ -61,7 +59,6 @@ void TestCallback() {} struct UDT { UDT() = default; UDT(const UDT&) = default; - UDT& operator=(const UDT&) = default; }; bool AbslParseFlag(absl::string_view, UDT*, std::string*) { return true; } std::string AbslUnparseFlag(const UDT&) { return ""; } @@ -103,24 +100,24 @@ struct S2 { TEST_F(FlagTest, Traits) { EXPECT_EQ(flags::StorageKind(), - flags::FlagValueStorageKind::kValueAndInitBit); + flags::FlagValueStorageKind::kOneWordAtomic); EXPECT_EQ(flags::StorageKind(), - flags::FlagValueStorageKind::kValueAndInitBit); + flags::FlagValueStorageKind::kOneWordAtomic); EXPECT_EQ(flags::StorageKind(), flags::FlagValueStorageKind::kOneWordAtomic); EXPECT_EQ(flags::StorageKind(), flags::FlagValueStorageKind::kOneWordAtomic); +#if defined(ABSL_FLAGS_INTERNAL_ATOMIC_DOUBLE_WORD) EXPECT_EQ(flags::StorageKind(), - flags::FlagValueStorageKind::kSequenceLocked); + flags::FlagValueStorageKind::kTwoWordsAtomic); EXPECT_EQ(flags::StorageKind(), - flags::FlagValueStorageKind::kSequenceLocked); -// Make sure absl::Duration uses the sequence-locked code path. MSVC 2015 -// doesn't consider absl::Duration to be trivially-copyable so we just -// restrict this to clang as it seems to be a well-behaved compiler. -#ifdef __clang__ - EXPECT_EQ(flags::StorageKind(), - flags::FlagValueStorageKind::kSequenceLocked); + flags::FlagValueStorageKind::kTwoWordsAtomic); +#else + EXPECT_EQ(flags::StorageKind(), + flags::FlagValueStorageKind::kAlignedBuffer); + EXPECT_EQ(flags::StorageKind(), + flags::FlagValueStorageKind::kAlignedBuffer); #endif EXPECT_EQ(flags::StorageKind(), @@ -178,7 +175,7 @@ bool TestConstructionFor(const absl::Flag& f1, absl::Flag& f2) { EXPECT_EQ(absl::GetFlagReflectionHandle(f1).Help(), "literal help"); EXPECT_EQ(absl::GetFlagReflectionHandle(f1).Filename(), "file"); - flags::FlagRegistrar(ABSL_FLAG_IMPL_FLAG_PTR(f2), nullptr) + flags::FlagRegistrar(ABSL_FLAG_IMPL_FLAG_PTR(f2)) .OnUpdate(TestCallback); EXPECT_EQ(absl::GetFlagReflectionHandle(f2).Name(), "f2"); @@ -586,43 +583,6 @@ TEST_F(FlagTest, TestGetViaReflection) { // -------------------------------------------------------------------- -TEST_F(FlagTest, ConcurrentSetAndGet) { - static constexpr int kNumThreads = 8; - // Two arbitrary durations. One thread will concurrently flip the flag - // between these two values, while the other threads read it and verify - // that no other value is seen. - static const absl::Duration kValidDurations[] = { - absl::Seconds(int64_t{0x6cebf47a9b68c802}) + absl::Nanoseconds(229702057), - absl::Seconds(int64_t{0x23fec0307e4e9d3}) + absl::Nanoseconds(44555374)}; - absl::SetFlag(&FLAGS_test_flag_12, kValidDurations[0]); - - std::atomic stop{false}; - std::vector threads; - auto* handle = absl::FindCommandLineFlag("test_flag_12"); - for (int i = 0; i < kNumThreads; i++) { - threads.emplace_back([&]() { - while (!stop.load(std::memory_order_relaxed)) { - // Try loading the flag both directly and via a reflection - // handle. - absl::Duration v = absl::GetFlag(FLAGS_test_flag_12); - EXPECT_TRUE(v == kValidDurations[0] || v == kValidDurations[1]); - v = *handle->TryGet(); - EXPECT_TRUE(v == kValidDurations[0] || v == kValidDurations[1]); - } - }); - } - absl::Time end_time = absl::Now() + absl::Seconds(1); - int i = 0; - while (absl::Now() < end_time) { - absl::SetFlag(&FLAGS_test_flag_12, - kValidDurations[i++ % ABSL_ARRAYSIZE(kValidDurations)]); - } - stop.store(true, std::memory_order_relaxed); - for (auto& t : threads) t.join(); -} - -// -------------------------------------------------------------------- - int GetDflt1() { return 1; } } // namespace @@ -724,8 +684,6 @@ ABSL_FLAG(CustomUDT, test_flag_custom_udt, CustomUDT(), "test flag custom UDT"); namespace { TEST_F(FlagTest, TestCustomUDT) { - EXPECT_EQ(flags::StorageKind(), - flags::FlagValueStorageKind::kOneWordAtomic); EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_custom_udt), CustomUDT(1, 1)); absl::SetFlag(&FLAGS_test_flag_custom_udt, CustomUDT(2, 3)); EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_custom_udt), CustomUDT(2, 3)); @@ -854,9 +812,7 @@ ABSL_RETIRED_FLAG(bool, old_bool_flag, true, "old descr"); ABSL_RETIRED_FLAG(int, old_int_flag, (int)std::sqrt(10), "old descr"); ABSL_RETIRED_FLAG(std::string, old_str_flag, "", absl::StrCat("old ", "descr")); -namespace { - -bool initialization_order_fiasco_test ABSL_ATTRIBUTE_UNUSED = [] { +bool initializaion_order_fiasco_test = [] { // Iterate over all the flags during static initialization. // This should not trigger ASan's initialization-order-fiasco. auto* handle1 = absl::FindCommandLineFlag("flag_on_separate_file"); @@ -867,6 +823,8 @@ bool initialization_order_fiasco_test ABSL_ATTRIBUTE_UNUSED = [] { return true; }(); +namespace { + TEST_F(FlagTest, TestRetiredFlagRegistration) { auto* handle = absl::FindCommandLineFlag("old_bool_flag"); EXPECT_TRUE(handle->IsOfType()); @@ -946,221 +904,3 @@ TEST_F(FlagTest, TestNonTriviallyCopyableUDT) { } } // namespace - -// -------------------------------------------------------------------- - -namespace { - -enum TestE { A = 1, B = 2, C = 3 }; - -struct EnumWrapper { - EnumWrapper() : e(A) {} - - TestE e; -}; - -bool AbslParseFlag(absl::string_view, EnumWrapper*, std::string*) { - return true; -} -std::string AbslUnparseFlag(const EnumWrapper&) { return ""; } - -} // namespace - -ABSL_FLAG(EnumWrapper, test_enum_wrapper_flag, {}, "help"); - -TEST_F(FlagTest, TesTypeWrappingEnum) { - EnumWrapper value = absl::GetFlag(FLAGS_test_enum_wrapper_flag); - EXPECT_EQ(value.e, A); - - value.e = B; - absl::SetFlag(&FLAGS_test_enum_wrapper_flag, value); - value = absl::GetFlag(FLAGS_test_enum_wrapper_flag); - EXPECT_EQ(value.e, B); -} - -// This is a compile test to ensure macros are expanded within ABSL_FLAG and -// ABSL_DECLARE_FLAG. -#define FLAG_NAME_MACRO(name) prefix_ ## name -ABSL_DECLARE_FLAG(int, FLAG_NAME_MACRO(test_macro_named_flag)); -ABSL_FLAG(int, FLAG_NAME_MACRO(test_macro_named_flag), 0, - "Testing macro expansion within ABSL_FLAG"); - -TEST_F(FlagTest, MacroWithinAbslFlag) { - EXPECT_EQ(absl::GetFlag(FLAGS_prefix_test_macro_named_flag), 0); - absl::SetFlag(&FLAGS_prefix_test_macro_named_flag, 1); - EXPECT_EQ(absl::GetFlag(FLAGS_prefix_test_macro_named_flag), 1); -} - -// -------------------------------------------------------------------- - -#if defined(__GNUC__) && !defined(__clang__) && __GNUC__ <= 5 -#define ABSL_SKIP_OPTIONAL_BOOL_TEST_DUE_TO_GCC_BUG -#endif - -#ifndef ABSL_SKIP_OPTIONAL_BOOL_TEST_DUE_TO_GCC_BUG -ABSL_FLAG(absl::optional, optional_bool, absl::nullopt, "help"); -#endif -ABSL_FLAG(absl::optional, optional_int, {}, "help"); -ABSL_FLAG(absl::optional, optional_double, 9.3, "help"); -ABSL_FLAG(absl::optional, optional_string, absl::nullopt, "help"); -ABSL_FLAG(absl::optional, optional_duration, absl::nullopt, - "help"); -ABSL_FLAG(absl::optional>, optional_optional_int, - absl::nullopt, "help"); -#if defined(ABSL_HAVE_STD_OPTIONAL) && !defined(ABSL_USES_STD_OPTIONAL) -ABSL_FLAG(std::optional, std_optional_int64, std::nullopt, "help"); -#endif - -namespace { - -#ifndef ABSL_SKIP_OPTIONAL_BOOL_TEST_DUE_TO_GCC_BUG -TEST_F(FlagTest, TestOptionalBool) { - EXPECT_FALSE(absl::GetFlag(FLAGS_optional_bool).has_value()); - EXPECT_EQ(absl::GetFlag(FLAGS_optional_bool), absl::nullopt); - - absl::SetFlag(&FLAGS_optional_bool, false); - EXPECT_TRUE(absl::GetFlag(FLAGS_optional_bool).has_value()); - EXPECT_EQ(absl::GetFlag(FLAGS_optional_bool), false); - - absl::SetFlag(&FLAGS_optional_bool, true); - EXPECT_TRUE(absl::GetFlag(FLAGS_optional_bool).has_value()); - EXPECT_EQ(absl::GetFlag(FLAGS_optional_bool), true); - - absl::SetFlag(&FLAGS_optional_bool, absl::nullopt); - EXPECT_FALSE(absl::GetFlag(FLAGS_optional_bool).has_value()); - EXPECT_EQ(absl::GetFlag(FLAGS_optional_bool), absl::nullopt); -} - -// -------------------------------------------------------------------- -#endif - -TEST_F(FlagTest, TestOptionalInt) { - EXPECT_FALSE(absl::GetFlag(FLAGS_optional_int).has_value()); - EXPECT_EQ(absl::GetFlag(FLAGS_optional_int), absl::nullopt); - - absl::SetFlag(&FLAGS_optional_int, 0); - EXPECT_TRUE(absl::GetFlag(FLAGS_optional_int).has_value()); - EXPECT_EQ(absl::GetFlag(FLAGS_optional_int), 0); - - absl::SetFlag(&FLAGS_optional_int, 10); - EXPECT_TRUE(absl::GetFlag(FLAGS_optional_int).has_value()); - EXPECT_EQ(absl::GetFlag(FLAGS_optional_int), 10); - - absl::SetFlag(&FLAGS_optional_int, absl::nullopt); - EXPECT_FALSE(absl::GetFlag(FLAGS_optional_int).has_value()); - EXPECT_EQ(absl::GetFlag(FLAGS_optional_int), absl::nullopt); -} - -// -------------------------------------------------------------------- - -TEST_F(FlagTest, TestOptionalDouble) { - EXPECT_TRUE(absl::GetFlag(FLAGS_optional_double).has_value()); - EXPECT_DOUBLE_EQ(*absl::GetFlag(FLAGS_optional_double), 9.3); - - absl::SetFlag(&FLAGS_optional_double, 0.0); - EXPECT_TRUE(absl::GetFlag(FLAGS_optional_double).has_value()); - EXPECT_EQ(absl::GetFlag(FLAGS_optional_double), 0.0); - - absl::SetFlag(&FLAGS_optional_double, 1.234); - EXPECT_TRUE(absl::GetFlag(FLAGS_optional_double).has_value()); - EXPECT_DOUBLE_EQ(*absl::GetFlag(FLAGS_optional_double), 1.234); - - absl::SetFlag(&FLAGS_optional_double, absl::nullopt); - EXPECT_FALSE(absl::GetFlag(FLAGS_optional_double).has_value()); - EXPECT_EQ(absl::GetFlag(FLAGS_optional_double), absl::nullopt); -} - -// -------------------------------------------------------------------- - -TEST_F(FlagTest, TestOptionalString) { - EXPECT_FALSE(absl::GetFlag(FLAGS_optional_string).has_value()); - EXPECT_EQ(absl::GetFlag(FLAGS_optional_string), absl::nullopt); - - // Setting optional string to "" leads to undefined behavior. - - absl::SetFlag(&FLAGS_optional_string, " "); - EXPECT_TRUE(absl::GetFlag(FLAGS_optional_string).has_value()); - EXPECT_EQ(absl::GetFlag(FLAGS_optional_string), " "); - - absl::SetFlag(&FLAGS_optional_string, "QWERTY"); - EXPECT_TRUE(absl::GetFlag(FLAGS_optional_string).has_value()); - EXPECT_EQ(absl::GetFlag(FLAGS_optional_string), "QWERTY"); - - absl::SetFlag(&FLAGS_optional_string, absl::nullopt); - EXPECT_FALSE(absl::GetFlag(FLAGS_optional_string).has_value()); - EXPECT_EQ(absl::GetFlag(FLAGS_optional_string), absl::nullopt); -} - -// -------------------------------------------------------------------- - -TEST_F(FlagTest, TestOptionalDuration) { - EXPECT_FALSE(absl::GetFlag(FLAGS_optional_duration).has_value()); - EXPECT_EQ(absl::GetFlag(FLAGS_optional_duration), absl::nullopt); - - absl::SetFlag(&FLAGS_optional_duration, absl::ZeroDuration()); - EXPECT_TRUE(absl::GetFlag(FLAGS_optional_duration).has_value()); - EXPECT_EQ(absl::GetFlag(FLAGS_optional_duration), absl::Seconds(0)); - - absl::SetFlag(&FLAGS_optional_duration, absl::Hours(3)); - EXPECT_TRUE(absl::GetFlag(FLAGS_optional_duration).has_value()); - EXPECT_EQ(absl::GetFlag(FLAGS_optional_duration), absl::Hours(3)); - - absl::SetFlag(&FLAGS_optional_duration, absl::nullopt); - EXPECT_FALSE(absl::GetFlag(FLAGS_optional_duration).has_value()); - EXPECT_EQ(absl::GetFlag(FLAGS_optional_duration), absl::nullopt); -} - -// -------------------------------------------------------------------- - -TEST_F(FlagTest, TestOptionalOptional) { - EXPECT_FALSE(absl::GetFlag(FLAGS_optional_optional_int).has_value()); - EXPECT_EQ(absl::GetFlag(FLAGS_optional_optional_int), absl::nullopt); - - absl::optional nullint{absl::nullopt}; - - absl::SetFlag(&FLAGS_optional_optional_int, nullint); - EXPECT_TRUE(absl::GetFlag(FLAGS_optional_optional_int).has_value()); - EXPECT_NE(absl::GetFlag(FLAGS_optional_optional_int), nullint); - EXPECT_EQ(absl::GetFlag(FLAGS_optional_optional_int), - absl::optional>{nullint}); - - absl::SetFlag(&FLAGS_optional_optional_int, 0); - EXPECT_TRUE(absl::GetFlag(FLAGS_optional_optional_int).has_value()); - EXPECT_EQ(absl::GetFlag(FLAGS_optional_optional_int), 0); - - absl::SetFlag(&FLAGS_optional_optional_int, absl::optional{0}); - EXPECT_TRUE(absl::GetFlag(FLAGS_optional_optional_int).has_value()); - EXPECT_EQ(absl::GetFlag(FLAGS_optional_optional_int), 0); - EXPECT_EQ(absl::GetFlag(FLAGS_optional_optional_int), absl::optional{0}); - - absl::SetFlag(&FLAGS_optional_optional_int, absl::nullopt); - EXPECT_FALSE(absl::GetFlag(FLAGS_optional_optional_int).has_value()); - EXPECT_EQ(absl::GetFlag(FLAGS_optional_optional_int), absl::nullopt); -} - -// -------------------------------------------------------------------- - -#if defined(ABSL_HAVE_STD_OPTIONAL) && !defined(ABSL_USES_STD_OPTIONAL) - -TEST_F(FlagTest, TestStdOptional) { - EXPECT_FALSE(absl::GetFlag(FLAGS_std_optional_int64).has_value()); - EXPECT_EQ(absl::GetFlag(FLAGS_std_optional_int64), std::nullopt); - - absl::SetFlag(&FLAGS_std_optional_int64, 0); - EXPECT_TRUE(absl::GetFlag(FLAGS_std_optional_int64).has_value()); - EXPECT_EQ(absl::GetFlag(FLAGS_std_optional_int64), 0); - - absl::SetFlag(&FLAGS_std_optional_int64, 0xFFFFFFFFFF16); - EXPECT_TRUE(absl::GetFlag(FLAGS_std_optional_int64).has_value()); - EXPECT_EQ(absl::GetFlag(FLAGS_std_optional_int64), 0xFFFFFFFFFF16); - - absl::SetFlag(&FLAGS_std_optional_int64, std::nullopt); - EXPECT_FALSE(absl::GetFlag(FLAGS_std_optional_int64).has_value()); - EXPECT_EQ(absl::GetFlag(FLAGS_std_optional_int64), std::nullopt); -} - -// -------------------------------------------------------------------- - -#endif - -} // namespace diff --git a/abseil-cpp/absl/flags/internal/commandlineflag.h b/abseil-cpp/absl/flags/internal/commandlineflag.h index ebfe81ba..cb46fe2e 100644 --- a/abseil-cpp/absl/flags/internal/commandlineflag.h +++ b/abseil-cpp/absl/flags/internal/commandlineflag.h @@ -24,7 +24,7 @@ ABSL_NAMESPACE_BEGIN namespace flags_internal { // An alias for flag fast type id. This value identifies the flag value type -// similarly to typeid(T), without relying on RTTI being available. In most +// simialarly to typeid(T), without relying on RTTI being available. In most // cases this id is enough to uniquely identify the flag's value type. In a few // cases we'll have to resort to using actual RTTI implementation if it is // available. diff --git a/abseil-cpp/absl/flags/internal/flag.cc b/abseil-cpp/absl/flags/internal/flag.cc index 55892d77..1502e7f1 100644 --- a/abseil-cpp/absl/flags/internal/flag.cc +++ b/abseil-cpp/absl/flags/internal/flag.cc @@ -30,7 +30,6 @@ #include "absl/base/call_once.h" #include "absl/base/casts.h" #include "absl/base/config.h" -#include "absl/base/dynamic_annotations.h" #include "absl/base/optimization.h" #include "absl/flags/config.h" #include "absl/flags/internal/commandlineflag.h" @@ -97,8 +96,7 @@ class FlagState : public flags_internal::FlagStateInterface { counter_(counter) {} ~FlagState() override { - if (flag_impl_.ValueStorageKind() != FlagValueStorageKind::kAlignedBuffer && - flag_impl_.ValueStorageKind() != FlagValueStorageKind::kSequenceLocked) + if (flag_impl_.ValueStorageKind() != FlagValueStorageKind::kAlignedBuffer) return; flags_internal::Delete(flag_impl_.op_, value_.heap_allocated); } @@ -120,9 +118,11 @@ class FlagState : public flags_internal::FlagStateInterface { union SavedValue { explicit SavedValue(void* v) : heap_allocated(v) {} explicit SavedValue(int64_t v) : one_word(v) {} + explicit SavedValue(flags_internal::AlignedTwoWords v) : two_words(v) {} void* heap_allocated; int64_t one_word; + flags_internal::AlignedTwoWords two_words; } value_; bool modified_; bool on_command_line_; @@ -146,7 +146,12 @@ void FlagImpl::Init() { auto def_kind = static_cast(def_kind_); switch (ValueStorageKind()) { - case FlagValueStorageKind::kValueAndInitBit: + case FlagValueStorageKind::kAlignedBuffer: + // For this storage kind the default_value_ always points to gen_func + // during initialization. + assert(def_kind == FlagDefaultKind::kGenFunc); + (*default_value_.gen_func)(AlignedBufferValue()); + break; case FlagValueStorageKind::kOneWordAtomic: { alignas(int64_t) std::array buf{}; if (def_kind == FlagDefaultKind::kGenFunc) { @@ -155,33 +160,21 @@ void FlagImpl::Init() { assert(def_kind != FlagDefaultKind::kDynamicValue); std::memcpy(buf.data(), &default_value_, Sizeof(op_)); } - if (ValueStorageKind() == FlagValueStorageKind::kValueAndInitBit) { - // We presume here the memory layout of FlagValueAndInitBit struct. - uint8_t initialized = 1; - std::memcpy(buf.data() + Sizeof(op_), &initialized, - sizeof(initialized)); - } - // Type can contain valid uninitialized bits, e.g. padding. - ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(buf.data(), buf.size()); OneWordValue().store(absl::bit_cast(buf), std::memory_order_release); break; } - case FlagValueStorageKind::kSequenceLocked: { + case FlagValueStorageKind::kTwoWordsAtomic: { // For this storage kind the default_value_ always points to gen_func // during initialization. assert(def_kind == FlagDefaultKind::kGenFunc); - (*default_value_.gen_func)(AtomicBufferValue()); + alignas(AlignedTwoWords) std::array buf{}; + (*default_value_.gen_func)(buf.data()); + auto atomic_value = absl::bit_cast(buf); + TwoWordsValue().store(atomic_value, std::memory_order_release); break; } - case FlagValueStorageKind::kAlignedBuffer: - // For this storage kind the default_value_ always points to gen_func - // during initialization. - assert(def_kind == FlagDefaultKind::kGenFunc); - (*default_value_.gen_func)(AlignedBufferValue()); - break; } - seq_lock_.MarkInitialized(); } absl::Mutex* FlagImpl::DataGuard() const { @@ -208,7 +201,7 @@ void FlagImpl::AssertValidType(FlagFastTypeId rhs_type_id, if (lhs_runtime_type_id == rhs_runtime_type_id) return; -#ifdef ABSL_INTERNAL_HAS_RTTI +#if defined(ABSL_FLAGS_INTERNAL_HAS_RTTI) if (*lhs_runtime_type_id == *rhs_runtime_type_id) return; #endif @@ -236,25 +229,25 @@ std::unique_ptr FlagImpl::MakeInitValue() const { void FlagImpl::StoreValue(const void* src) { switch (ValueStorageKind()) { - case FlagValueStorageKind::kValueAndInitBit: + case FlagValueStorageKind::kAlignedBuffer: + Copy(op_, src, AlignedBufferValue()); + break; case FlagValueStorageKind::kOneWordAtomic: { - // Load the current value to avoid setting 'init' bit manualy. - int64_t one_word_val = OneWordValue().load(std::memory_order_acquire); + int64_t one_word_val = 0; std::memcpy(&one_word_val, src, Sizeof(op_)); OneWordValue().store(one_word_val, std::memory_order_release); - seq_lock_.IncrementModificationCount(); break; } - case FlagValueStorageKind::kSequenceLocked: { - seq_lock_.Write(AtomicBufferValue(), src, Sizeof(op_)); + case FlagValueStorageKind::kTwoWordsAtomic: { + AlignedTwoWords two_words_val{0, 0}; + std::memcpy(&two_words_val, src, Sizeof(op_)); + TwoWordsValue().store(two_words_val, std::memory_order_release); break; } - case FlagValueStorageKind::kAlignedBuffer: - Copy(op_, src, AlignedBufferValue()); - seq_lock_.IncrementModificationCount(); - break; } + modified_ = true; + ++counter_; InvokeCallback(); } @@ -273,10 +266,6 @@ FlagFastTypeId FlagImpl::TypeId() const { return flags_internal::FastTypeId(op_); } -int64_t FlagImpl::ModificationCount() const { - return seq_lock_.ModificationCount(); -} - bool FlagImpl::IsSpecifiedOnCommandLine() const { absl::MutexLock l(DataGuard()); return on_command_line_; @@ -292,22 +281,21 @@ std::string FlagImpl::DefaultValue() const { std::string FlagImpl::CurrentValue() const { auto* guard = DataGuard(); // Make sure flag initialized switch (ValueStorageKind()) { - case FlagValueStorageKind::kValueAndInitBit: + case FlagValueStorageKind::kAlignedBuffer: { + absl::MutexLock l(guard); + return flags_internal::Unparse(op_, AlignedBufferValue()); + } case FlagValueStorageKind::kOneWordAtomic: { const auto one_word_val = absl::bit_cast>( OneWordValue().load(std::memory_order_acquire)); return flags_internal::Unparse(op_, one_word_val.data()); } - case FlagValueStorageKind::kSequenceLocked: { - std::unique_ptr cloned(flags_internal::Alloc(op_), - DynValueDeleter{op_}); - ReadSequenceLockedData(cloned.get()); - return flags_internal::Unparse(op_, cloned.get()); - } - case FlagValueStorageKind::kAlignedBuffer: { - absl::MutexLock l(guard); - return flags_internal::Unparse(op_, AlignedBufferValue()); + case FlagValueStorageKind::kTwoWordsAtomic: { + const auto two_words_val = + absl::bit_cast>( + TwoWordsValue().load(std::memory_order_acquire)); + return flags_internal::Unparse(op_, two_words_val.data()); } } @@ -354,26 +342,20 @@ std::unique_ptr FlagImpl::SaveState() { bool modified = modified_; bool on_command_line = on_command_line_; switch (ValueStorageKind()) { - case FlagValueStorageKind::kValueAndInitBit: + case FlagValueStorageKind::kAlignedBuffer: { + return absl::make_unique( + *this, flags_internal::Clone(op_, AlignedBufferValue()), modified, + on_command_line, counter_); + } case FlagValueStorageKind::kOneWordAtomic: { return absl::make_unique( *this, OneWordValue().load(std::memory_order_acquire), modified, - on_command_line, ModificationCount()); - } - case FlagValueStorageKind::kSequenceLocked: { - void* cloned = flags_internal::Alloc(op_); - // Read is guaranteed to be successful because we hold the lock. - bool success = - seq_lock_.TryRead(cloned, AtomicBufferValue(), Sizeof(op_)); - assert(success); - static_cast(success); - return absl::make_unique(*this, cloned, modified, - on_command_line, ModificationCount()); + on_command_line, counter_); } - case FlagValueStorageKind::kAlignedBuffer: { + case FlagValueStorageKind::kTwoWordsAtomic: { return absl::make_unique( - *this, flags_internal::Clone(op_, AlignedBufferValue()), modified, - on_command_line, ModificationCount()); + *this, TwoWordsValue().load(std::memory_order_acquire), modified, + on_command_line, counter_); } } return nullptr; @@ -381,18 +363,20 @@ std::unique_ptr FlagImpl::SaveState() { bool FlagImpl::RestoreState(const FlagState& flag_state) { absl::MutexLock l(DataGuard()); - if (flag_state.counter_ == ModificationCount()) { + + if (flag_state.counter_ == counter_) { return false; } switch (ValueStorageKind()) { - case FlagValueStorageKind::kValueAndInitBit: + case FlagValueStorageKind::kAlignedBuffer: + StoreValue(flag_state.value_.heap_allocated); + break; case FlagValueStorageKind::kOneWordAtomic: StoreValue(&flag_state.value_.one_word); break; - case FlagValueStorageKind::kSequenceLocked: - case FlagValueStorageKind::kAlignedBuffer: - StoreValue(flag_state.value_.heap_allocated); + case FlagValueStorageKind::kTwoWordsAtomic: + StoreValue(&flag_state.value_.two_words); break; } @@ -416,17 +400,16 @@ void* FlagImpl::AlignedBufferValue() const { return OffsetValue(); } -std::atomic* FlagImpl::AtomicBufferValue() const { - assert(ValueStorageKind() == FlagValueStorageKind::kSequenceLocked); - return OffsetValue>(); -} - std::atomic& FlagImpl::OneWordValue() const { - assert(ValueStorageKind() == FlagValueStorageKind::kOneWordAtomic || - ValueStorageKind() == FlagValueStorageKind::kValueAndInitBit); + assert(ValueStorageKind() == FlagValueStorageKind::kOneWordAtomic); return OffsetValue()->value; } +std::atomic& FlagImpl::TwoWordsValue() const { + assert(ValueStorageKind() == FlagValueStorageKind::kTwoWordsAtomic); + return OffsetValue()->value; +} + // Attempts to parse supplied `value` string using parsing routine in the `flag` // argument. If parsing successful, this function replaces the dst with newly // parsed value. In case if any error is encountered in either step, the error @@ -449,56 +432,26 @@ std::unique_ptr FlagImpl::TryParse( void FlagImpl::Read(void* dst) const { auto* guard = DataGuard(); // Make sure flag initialized switch (ValueStorageKind()) { - case FlagValueStorageKind::kValueAndInitBit: + case FlagValueStorageKind::kAlignedBuffer: { + absl::MutexLock l(guard); + flags_internal::CopyConstruct(op_, AlignedBufferValue(), dst); + break; + } case FlagValueStorageKind::kOneWordAtomic: { const int64_t one_word_val = OneWordValue().load(std::memory_order_acquire); std::memcpy(dst, &one_word_val, Sizeof(op_)); break; } - case FlagValueStorageKind::kSequenceLocked: { - ReadSequenceLockedData(dst); - break; - } - case FlagValueStorageKind::kAlignedBuffer: { - absl::MutexLock l(guard); - flags_internal::CopyConstruct(op_, AlignedBufferValue(), dst); + case FlagValueStorageKind::kTwoWordsAtomic: { + const AlignedTwoWords two_words_val = + TwoWordsValue().load(std::memory_order_acquire); + std::memcpy(dst, &two_words_val, Sizeof(op_)); break; } } } -int64_t FlagImpl::ReadOneWord() const { - assert(ValueStorageKind() == FlagValueStorageKind::kOneWordAtomic || - ValueStorageKind() == FlagValueStorageKind::kValueAndInitBit); - auto* guard = DataGuard(); // Make sure flag initialized - (void)guard; - return OneWordValue().load(std::memory_order_acquire); -} - -bool FlagImpl::ReadOneBool() const { - assert(ValueStorageKind() == FlagValueStorageKind::kValueAndInitBit); - auto* guard = DataGuard(); // Make sure flag initialized - (void)guard; - return absl::bit_cast>( - OneWordValue().load(std::memory_order_acquire)) - .value; -} - -void FlagImpl::ReadSequenceLockedData(void* dst) const { - int size = Sizeof(op_); - // Attempt to read using the sequence lock. - if (ABSL_PREDICT_TRUE(seq_lock_.TryRead(dst, AtomicBufferValue(), size))) { - return; - } - // We failed due to contention. Acquire the lock to prevent contention - // and try again. - absl::ReaderMutexLock l(DataGuard()); - bool success = seq_lock_.TryRead(dst, AtomicBufferValue(), size); - assert(success); - static_cast(success); -} - void FlagImpl::Write(const void* src) { absl::MutexLock l(DataGuard()); diff --git a/abseil-cpp/absl/flags/internal/flag.h b/abseil-cpp/absl/flags/internal/flag.h index 6154638c..370d8a02 100644 --- a/abseil-cpp/absl/flags/internal/flag.h +++ b/abseil-cpp/absl/flags/internal/flag.h @@ -29,7 +29,6 @@ #include "absl/base/attributes.h" #include "absl/base/call_once.h" -#include "absl/base/casts.h" #include "absl/base/config.h" #include "absl/base/optimization.h" #include "absl/base/thread_annotations.h" @@ -37,7 +36,6 @@ #include "absl/flags/config.h" #include "absl/flags/internal/commandlineflag.h" #include "absl/flags/internal/registry.h" -#include "absl/flags/internal/sequence_lock.h" #include "absl/flags/marshalling.h" #include "absl/meta/type_traits.h" #include "absl/strings/string_view.h" @@ -163,7 +161,7 @@ inline ptrdiff_t ValueOffset(FlagOpFn op) { // Returns an address of RTTI's typeid(T). template inline const std::type_info* GenRuntimeTypeId() { -#ifdef ABSL_INTERNAL_HAS_RTTI +#if defined(ABSL_FLAGS_INTERNAL_HAS_RTTI) return &typeid(T); #else return nullptr; @@ -290,7 +288,7 @@ constexpr T InitDefaultValue(EmptyBraces) { template ::value, int>::type = - ((void)GenT{}, 0)> + (GenT{}, 0)> constexpr FlagDefaultArg DefaultArg(int) { return {FlagDefaultSrc(GenT{}.value), FlagDefaultKind::kOneWord}; } @@ -303,54 +301,79 @@ constexpr FlagDefaultArg DefaultArg(char) { /////////////////////////////////////////////////////////////////////////////// // Flag current value auxiliary structs. -constexpr int64_t UninitializedFlagValue() { - return static_cast(0xababababababababll); -} - -template -using FlagUseValueAndInitBitStorage = std::integral_constant< - bool, absl::type_traits_internal::is_trivially_copyable::value && - std::is_default_constructible::value && (sizeof(T) < 8)>; +constexpr int64_t UninitializedFlagValue() { return 0xababababababababll; } template using FlagUseOneWordStorage = std::integral_constant< bool, absl::type_traits_internal::is_trivially_copyable::value && (sizeof(T) <= 8)>; -template -using FlagUseSequenceLockStorage = std::integral_constant< +#if defined(ABSL_FLAGS_INTERNAL_ATOMIC_DOUBLE_WORD) +// Clang does not always produce cmpxchg16b instruction when alignment of a 16 +// bytes type is not 16. +struct alignas(16) AlignedTwoWords { + int64_t first; + int64_t second; + + bool IsInitialized() const { + return first != flags_internal::UninitializedFlagValue(); + } +}; + +template +using FlagUseTwoWordsStorage = std::integral_constant< bool, absl::type_traits_internal::is_trivially_copyable::value && - (sizeof(T) > 8)>; + (sizeof(T) > 8) && (sizeof(T) <= 16)>; +#else +// This is actually unused and only here to avoid ifdefs in other palces. +struct AlignedTwoWords { + constexpr AlignedTwoWords() noexcept : dummy() {} + constexpr AlignedTwoWords(int64_t, int64_t) noexcept : dummy() {} + char dummy; + + bool IsInitialized() const { + std::abort(); + return true; + } +}; + +// This trait should be type dependent, otherwise SFINAE below will fail +template +using FlagUseTwoWordsStorage = + std::integral_constant; +#endif + +template +using FlagUseBufferStorage = + std::integral_constant::value && + !FlagUseTwoWordsStorage::value>; enum class FlagValueStorageKind : uint8_t { - kValueAndInitBit = 0, + kAlignedBuffer = 0, kOneWordAtomic = 1, - kSequenceLocked = 2, - kAlignedBuffer = 3, + kTwoWordsAtomic = 2 }; template static constexpr FlagValueStorageKind StorageKind() { - return FlagUseValueAndInitBitStorage::value - ? FlagValueStorageKind::kValueAndInitBit - : FlagUseOneWordStorage::value - ? FlagValueStorageKind::kOneWordAtomic - : FlagUseSequenceLockStorage::value - ? FlagValueStorageKind::kSequenceLocked - : FlagValueStorageKind::kAlignedBuffer; + return FlagUseBufferStorage::value + ? FlagValueStorageKind::kAlignedBuffer + : FlagUseOneWordStorage::value + ? FlagValueStorageKind::kOneWordAtomic + : FlagValueStorageKind::kTwoWordsAtomic; } struct FlagOneWordValue { - constexpr explicit FlagOneWordValue(int64_t v) : value(v) {} + constexpr FlagOneWordValue() : value(UninitializedFlagValue()) {} + std::atomic value; }; -template -struct alignas(8) FlagValueAndInitBit { - T value; - // Use an int instead of a bool to guarantee that a non-zero value has - // a bit set. - uint8_t init; +struct FlagTwoWordsValue { + constexpr FlagTwoWordsValue() + : value(AlignedTwoWords{UninitializedFlagValue(), 0}) {} + + std::atomic value; }; template -struct FlagValue : FlagOneWordValue { - constexpr FlagValue() : FlagOneWordValue(0) {} - bool Get(const SequenceLock&, T& dst) const { - int64_t storage = value.load(std::memory_order_acquire); - if (ABSL_PREDICT_FALSE(storage == 0)) { - return false; - } - dst = absl::bit_cast>(storage).value; - return true; - } +struct FlagValue { + bool Get(T&) const { return false; } + + alignas(T) char value[sizeof(T)]; }; template struct FlagValue : FlagOneWordValue { - constexpr FlagValue() : FlagOneWordValue(UninitializedFlagValue()) {} - bool Get(const SequenceLock&, T& dst) const { + bool Get(T& dst) const { int64_t one_word_val = value.load(std::memory_order_acquire); if (ABSL_PREDICT_FALSE(one_word_val == UninitializedFlagValue())) { return false; @@ -384,23 +400,15 @@ struct FlagValue : FlagOneWordValue { }; template -struct FlagValue { - bool Get(const SequenceLock& lock, T& dst) const { - return lock.TryRead(&dst, value_words, sizeof(T)); +struct FlagValue : FlagTwoWordsValue { + bool Get(T& dst) const { + AlignedTwoWords two_words_val = value.load(std::memory_order_acquire); + if (ABSL_PREDICT_FALSE(!two_words_val.IsInitialized())) { + return false; + } + std::memcpy(&dst, static_cast(&two_words_val), sizeof(T)); + return true; } - - static constexpr int kNumWords = - flags_internal::AlignUp(sizeof(T), sizeof(uint64_t)) / sizeof(uint64_t); - - alignas(T) alignas( - std::atomic) std::atomic value_words[kNumWords]; -}; - -template -struct FlagValue { - bool Get(const SequenceLock&, T&) const { return false; } - - alignas(T) char value[sizeof(T)]; }; /////////////////////////////////////////////////////////////////////////////// @@ -443,32 +451,13 @@ class FlagImpl final : public CommandLineFlag { def_kind_(static_cast(default_arg.kind)), modified_(false), on_command_line_(false), + counter_(0), callback_(nullptr), default_value_(default_arg.source), data_guard_{} {} // Constant access methods - int64_t ReadOneWord() const ABSL_LOCKS_EXCLUDED(*DataGuard()); - bool ReadOneBool() const ABSL_LOCKS_EXCLUDED(*DataGuard()); void Read(void* dst) const override ABSL_LOCKS_EXCLUDED(*DataGuard()); - void Read(bool* value) const ABSL_LOCKS_EXCLUDED(*DataGuard()) { - *value = ReadOneBool(); - } - template () == - FlagValueStorageKind::kOneWordAtomic, - int> = 0> - void Read(T* value) const ABSL_LOCKS_EXCLUDED(*DataGuard()) { - int64_t v = ReadOneWord(); - std::memcpy(value, static_cast(&v), sizeof(T)); - } - template () == - FlagValueStorageKind::kValueAndInitBit, - int>::type = 0> - void Read(T* value) const ABSL_LOCKS_EXCLUDED(*DataGuard()) { - *value = absl::bit_cast>(ReadOneWord()).value; - } // Mutating access methods void Write(const void* src) ABSL_LOCKS_EXCLUDED(*DataGuard()); @@ -509,17 +498,15 @@ class FlagImpl final : public CommandLineFlag { // flag.cc, we can define it in that file as well. template StorageT* OffsetValue() const; - // This is an accessor for a value stored in an aligned buffer storage - // used for non-trivially-copyable data types. + // This is an accessor for a value stored in an aligned buffer storage. // Returns a mutable pointer to the start of a buffer. void* AlignedBufferValue() const; - - // The same as above, but used for sequencelock-protected storage. - std::atomic* AtomicBufferValue() const; - // This is an accessor for a value stored as one word atomic. Returns a // mutable reference to an atomic value. std::atomic& OneWordValue() const; + // This is an accessor for a value stored as two words atomic. Returns a + // mutable reference to an atomic value. + std::atomic& TwoWordsValue() const; // Attempts to parse supplied `value` string. If parsing is successful, // returns new value. Otherwise returns nullptr. @@ -529,12 +516,6 @@ class FlagImpl final : public CommandLineFlag { // Stores the flag value based on the pointer to the source. void StoreValue(const void* src) ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()); - // Copy the flag data, protected by `seq_lock_` into `dst`. - // - // REQUIRES: ValueStorageKind() == kSequenceLocked. - void ReadSequenceLockedData(void* dst) const - ABSL_LOCKS_EXCLUDED(*DataGuard()); - FlagHelpKind HelpSourceKind() const { return static_cast(help_source_kind_); } @@ -560,8 +541,6 @@ class FlagImpl final : public CommandLineFlag { void CheckDefaultValueParsingRoundtrip() const override ABSL_LOCKS_EXCLUDED(*DataGuard()); - int64_t ModificationCount() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()); - // Interfaces to save and restore flags to/from persistent state. // Returns current flag state or nullptr if flag does not support // saving and restoring a state. @@ -608,9 +587,8 @@ class FlagImpl final : public CommandLineFlag { // Unique tag for absl::call_once call to initialize this flag. absl::once_flag init_control_; - // Sequence lock / mutation counter. - flags_internal::SequenceLock seq_lock_; - + // Mutation counter + int64_t counter_ ABSL_GUARDED_BY(*DataGuard()); // Optional flag's callback and absl::Mutex to guard the invocations. FlagCallback* callback_ ABSL_GUARDED_BY(*DataGuard()); // Either a pointer to the function generating the default value based on the @@ -671,9 +649,7 @@ class Flag { impl_.AssertValidType(base_internal::FastTypeId(), &GenRuntimeTypeId); #endif - if (ABSL_PREDICT_FALSE(!value_.Get(impl_.seq_lock_, u.value))) { - impl_.Read(&u.value); - } + if (!value_.Get(u.value)) impl_.Read(&u.value); return std::move(u.value); } void Set(const T& v) { @@ -757,8 +733,8 @@ void* FlagOps(FlagOp op, const void* v1, void* v2, void* v3) { case FlagOp::kValueOffset: { // Round sizeof(FlagImp) to a multiple of alignof(FlagValue) to get the // offset of the data. - size_t round_to = alignof(FlagValue); - size_t offset = + ptrdiff_t round_to = alignof(FlagValue); + ptrdiff_t offset = (sizeof(FlagImpl) + round_to - 1) / round_to * round_to; return reinterpret_cast(offset); } @@ -774,9 +750,8 @@ struct FlagRegistrarEmpty {}; template class FlagRegistrar { public: - explicit FlagRegistrar(Flag& flag, const char* filename) : flag_(flag) { - if (do_register) - flags_internal::RegisterCommandLineFlag(flag_.impl_, filename); + explicit FlagRegistrar(Flag& flag) : flag_(flag) { + if (do_register) flags_internal::RegisterCommandLineFlag(flag_.impl_); } FlagRegistrar OnUpdate(FlagCallbackFunc cb) && { diff --git a/abseil-cpp/absl/flags/internal/flag_msvc.inc b/abseil-cpp/absl/flags/internal/flag_msvc.inc deleted file mode 100644 index c31bd27f..00000000 --- a/abseil-cpp/absl/flags/internal/flag_msvc.inc +++ /dev/null @@ -1,116 +0,0 @@ -// -// Copyright 2021 The Abseil Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Do not include this file directly. -// Include absl/flags/flag.h instead. - -// MSVC debug builds do not implement initialization with constexpr constructors -// correctly. To work around this we add a level of indirection, so that the -// class `absl::Flag` contains an `internal::Flag*` (instead of being an alias -// to that class) and dynamically allocates an instance when necessary. We also -// forward all calls to internal::Flag methods via trampoline methods. In this -// setup the `absl::Flag` class does not have constructor and virtual methods, -// all the data members are public and thus MSVC is able to initialize it at -// link time. To deal with multiple threads accessing the flag for the first -// time concurrently we use an atomic boolean indicating if flag object is -// initialized. We also employ the double-checked locking pattern where the -// second level of protection is a global Mutex, so if two threads attempt to -// construct the flag concurrently only one wins. -// -// This solution is based on a recomendation here: -// https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html?childToView=648454#comment-648454 - -namespace flags_internal { -absl::Mutex* GetGlobalConstructionGuard(); -} // namespace flags_internal - -// Public methods of `absl::Flag` are NOT part of the Abseil Flags API. -// See https://abseil.io/docs/cpp/guides/flags -template -class Flag { - public: - // No constructor and destructor to ensure this is an aggregate type. - // Visual Studio 2015 still requires the constructor for class to be - // constexpr initializable. -#if _MSC_VER <= 1900 - constexpr Flag(const char* name, const char* filename, - const flags_internal::HelpGenFunc help_gen, - const flags_internal::FlagDfltGenFunc default_value_gen) - : name_(name), - filename_(filename), - help_gen_(help_gen), - default_value_gen_(default_value_gen), - inited_(false), - impl_(nullptr) {} -#endif - - flags_internal::Flag& GetImpl() const { - if (!inited_.load(std::memory_order_acquire)) { - absl::MutexLock l(flags_internal::GetGlobalConstructionGuard()); - - if (inited_.load(std::memory_order_acquire)) { - return *impl_; - } - - impl_ = new flags_internal::Flag( - name_, filename_, - {flags_internal::FlagHelpMsg(help_gen_), - flags_internal::FlagHelpKind::kGenFunc}, - {flags_internal::FlagDefaultSrc(default_value_gen_), - flags_internal::FlagDefaultKind::kGenFunc}); - inited_.store(true, std::memory_order_release); - } - - return *impl_; - } - - // Public methods of `absl::Flag` are NOT part of the Abseil Flags API. - // See https://abseil.io/docs/cpp/guides/flags - bool IsRetired() const { return GetImpl().IsRetired(); } - absl::string_view Name() const { return GetImpl().Name(); } - std::string Help() const { return GetImpl().Help(); } - bool IsModified() const { return GetImpl().IsModified(); } - bool IsSpecifiedOnCommandLine() const { - return GetImpl().IsSpecifiedOnCommandLine(); - } - std::string Filename() const { return GetImpl().Filename(); } - std::string DefaultValue() const { return GetImpl().DefaultValue(); } - std::string CurrentValue() const { return GetImpl().CurrentValue(); } - template - inline bool IsOfType() const { - return GetImpl().template IsOfType(); - } - T Get() const { - return flags_internal::FlagImplPeer::InvokeGet(GetImpl()); - } - void Set(const T& v) { - flags_internal::FlagImplPeer::InvokeSet(GetImpl(), v); - } - void InvokeCallback() { GetImpl().InvokeCallback(); } - - const CommandLineFlag& Reflect() const { - return flags_internal::FlagImplPeer::InvokeReflect(GetImpl()); - } - - // The data members are logically private, but they need to be public for - // this to be an aggregate type. - const char* name_; - const char* filename_; - const flags_internal::HelpGenFunc help_gen_; - const flags_internal::FlagDfltGenFunc default_value_gen_; - - mutable std::atomic inited_; - mutable flags_internal::Flag* impl_; -}; diff --git a/abseil-cpp/absl/flags/internal/registry.h b/abseil-cpp/absl/flags/internal/registry.h index 4b68c85f..1df2db79 100644 --- a/abseil-cpp/absl/flags/internal/registry.h +++ b/abseil-cpp/absl/flags/internal/registry.h @@ -30,15 +30,16 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace flags_internal { +// Executes specified visitor for each non-retired flag in the registry. +// Requires the caller hold the registry lock. +void ForEachFlagUnlocked(std::function visitor); // Executes specified visitor for each non-retired flag in the registry. While // callback are executed, the registry is locked and can't be changed. void ForEachFlag(std::function visitor); //----------------------------------------------------------------------------- -bool RegisterCommandLineFlag(CommandLineFlag&, const char* filename); - -void FinalizeRegistry(); +bool RegisterCommandLineFlag(CommandLineFlag&); //----------------------------------------------------------------------------- // Retired registrations: diff --git a/abseil-cpp/absl/flags/internal/sequence_lock.h b/abseil-cpp/absl/flags/internal/sequence_lock.h deleted file mode 100644 index 36318ab9..00000000 --- a/abseil-cpp/absl/flags/internal/sequence_lock.h +++ /dev/null @@ -1,187 +0,0 @@ -// -// Copyright 2020 The Abseil Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef ABSL_FLAGS_INTERNAL_SEQUENCE_LOCK_H_ -#define ABSL_FLAGS_INTERNAL_SEQUENCE_LOCK_H_ - -#include -#include - -#include -#include -#include - -#include "absl/base/optimization.h" - -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace flags_internal { - -// Align 'x' up to the nearest 'align' bytes. -inline constexpr size_t AlignUp(size_t x, size_t align) { - return align * ((x + align - 1) / align); -} - -// A SequenceLock implements lock-free reads. A sequence counter is incremented -// before and after each write, and readers access the counter before and after -// accessing the protected data. If the counter is verified to not change during -// the access, and the sequence counter value was even, then the reader knows -// that the read was race-free and valid. Otherwise, the reader must fall back -// to a Mutex-based code path. -// -// This particular SequenceLock starts in an "uninitialized" state in which -// TryRead() returns false. It must be enabled by calling MarkInitialized(). -// This serves as a marker that the associated flag value has not yet been -// initialized and a slow path needs to be taken. -// -// The memory reads and writes protected by this lock must use the provided -// `TryRead()` and `Write()` functions. These functions behave similarly to -// `memcpy()`, with one oddity: the protected data must be an array of -// `std::atomic`. This is to comply with the C++ standard, which -// considers data races on non-atomic objects to be undefined behavior. See "Can -// Seqlocks Get Along With Programming Language Memory Models?"[1] by Hans J. -// Boehm for more details. -// -// [1] https://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf -class SequenceLock { - public: - constexpr SequenceLock() : lock_(kUninitialized) {} - - // Mark that this lock is ready for use. - void MarkInitialized() { - assert(lock_.load(std::memory_order_relaxed) == kUninitialized); - lock_.store(0, std::memory_order_release); - } - - // Copy "size" bytes of data from "src" to "dst", protected as a read-side - // critical section of the sequence lock. - // - // Unlike traditional sequence lock implementations which loop until getting a - // clean read, this implementation returns false in the case of concurrent - // calls to `Write`. In such a case, the caller should fall back to a - // locking-based slow path. - // - // Returns false if the sequence lock was not yet marked as initialized. - // - // NOTE: If this returns false, "dst" may be overwritten with undefined - // (potentially uninitialized) data. - bool TryRead(void* dst, const std::atomic* src, size_t size) const { - // Acquire barrier ensures that no loads done by f() are reordered - // above the first load of the sequence counter. - int64_t seq_before = lock_.load(std::memory_order_acquire); - if (ABSL_PREDICT_FALSE(seq_before & 1) == 1) return false; - RelaxedCopyFromAtomic(dst, src, size); - // Another acquire fence ensures that the load of 'lock_' below is - // strictly ordered after the RelaxedCopyToAtomic call above. - std::atomic_thread_fence(std::memory_order_acquire); - int64_t seq_after = lock_.load(std::memory_order_relaxed); - return ABSL_PREDICT_TRUE(seq_before == seq_after); - } - - // Copy "size" bytes from "src" to "dst" as a write-side critical section - // of the sequence lock. Any concurrent readers will be forced to retry - // until they get a read that does not conflict with this write. - // - // This call must be externally synchronized against other calls to Write, - // but may proceed concurrently with reads. - void Write(std::atomic* dst, const void* src, size_t size) { - // We can use relaxed instructions to increment the counter since we - // are extenally synchronized. The std::atomic_thread_fence below - // ensures that the counter updates don't get interleaved with the - // copy to the data. - int64_t orig_seq = lock_.load(std::memory_order_relaxed); - assert((orig_seq & 1) == 0); // Must be initially unlocked. - lock_.store(orig_seq + 1, std::memory_order_relaxed); - - // We put a release fence between update to lock_ and writes to shared data. - // Thus all stores to shared data are effectively release operations and - // update to lock_ above cannot be re-ordered past any of them. Note that - // this barrier is not for the fetch_add above. A release barrier for the - // fetch_add would be before it, not after. - std::atomic_thread_fence(std::memory_order_release); - RelaxedCopyToAtomic(dst, src, size); - // "Release" semantics ensure that none of the writes done by - // RelaxedCopyToAtomic() can be reordered after the following modification. - lock_.store(orig_seq + 2, std::memory_order_release); - } - - // Return the number of times that Write() has been called. - // - // REQUIRES: This must be externally synchronized against concurrent calls to - // `Write()` or `IncrementModificationCount()`. - // REQUIRES: `MarkInitialized()` must have been previously called. - int64_t ModificationCount() const { - int64_t val = lock_.load(std::memory_order_relaxed); - assert(val != kUninitialized && (val & 1) == 0); - return val / 2; - } - - // REQUIRES: This must be externally synchronized against concurrent calls to - // `Write()` or `ModificationCount()`. - // REQUIRES: `MarkInitialized()` must have been previously called. - void IncrementModificationCount() { - int64_t val = lock_.load(std::memory_order_relaxed); - assert(val != kUninitialized); - lock_.store(val + 2, std::memory_order_relaxed); - } - - private: - // Perform the equivalent of "memcpy(dst, src, size)", but using relaxed - // atomics. - static void RelaxedCopyFromAtomic(void* dst, const std::atomic* src, - size_t size) { - char* dst_byte = static_cast(dst); - while (size >= sizeof(uint64_t)) { - uint64_t word = src->load(std::memory_order_relaxed); - std::memcpy(dst_byte, &word, sizeof(word)); - dst_byte += sizeof(word); - src++; - size -= sizeof(word); - } - if (size > 0) { - uint64_t word = src->load(std::memory_order_relaxed); - std::memcpy(dst_byte, &word, size); - } - } - - // Perform the equivalent of "memcpy(dst, src, size)", but using relaxed - // atomics. - static void RelaxedCopyToAtomic(std::atomic* dst, const void* src, - size_t size) { - const char* src_byte = static_cast(src); - while (size >= sizeof(uint64_t)) { - uint64_t word; - std::memcpy(&word, src_byte, sizeof(word)); - dst->store(word, std::memory_order_relaxed); - src_byte += sizeof(word); - dst++; - size -= sizeof(word); - } - if (size > 0) { - uint64_t word = 0; - std::memcpy(&word, src_byte, size); - dst->store(word, std::memory_order_relaxed); - } - } - - static constexpr int64_t kUninitialized = -1; - std::atomic lock_; -}; - -} // namespace flags_internal -ABSL_NAMESPACE_END -} // namespace absl - -#endif // ABSL_FLAGS_INTERNAL_SEQUENCE_LOCK_H_ diff --git a/abseil-cpp/absl/flags/internal/sequence_lock_test.cc b/abseil-cpp/absl/flags/internal/sequence_lock_test.cc deleted file mode 100644 index c3ec372e..00000000 --- a/abseil-cpp/absl/flags/internal/sequence_lock_test.cc +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2020 The Abseil Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -#include "absl/flags/internal/sequence_lock.h" - -#include -#include -#include // NOLINT(build/c++11) -#include -#include - -#include "gtest/gtest.h" -#include "absl/base/internal/sysinfo.h" -#include "absl/container/fixed_array.h" -#include "absl/time/clock.h" - -namespace { - -namespace flags = absl::flags_internal; - -class ConcurrentSequenceLockTest - : public testing::TestWithParam> { - public: - ConcurrentSequenceLockTest() - : buf_bytes_(std::get<0>(GetParam())), - num_threads_(std::get<1>(GetParam())) {} - - protected: - const int buf_bytes_; - const int num_threads_; -}; - -TEST_P(ConcurrentSequenceLockTest, ReadAndWrite) { - const int buf_words = - flags::AlignUp(buf_bytes_, sizeof(uint64_t)) / sizeof(uint64_t); - - // The buffer that will be protected by the SequenceLock. - absl::FixedArray> protected_buf(buf_words); - for (auto& v : protected_buf) v = -1; - - flags::SequenceLock seq_lock; - std::atomic stop{false}; - std::atomic bad_reads{0}; - std::atomic good_reads{0}; - std::atomic unsuccessful_reads{0}; - - // Start a bunch of threads which read 'protected_buf' under the sequence - // lock. The main thread will concurrently update 'protected_buf'. The updates - // always consist of an array of identical integers. The reader ensures that - // any data it reads matches that pattern (i.e. the reads are not "torn"). - std::vector threads; - for (int i = 0; i < num_threads_; i++) { - threads.emplace_back([&]() { - absl::FixedArray local_buf(buf_bytes_); - while (!stop.load(std::memory_order_relaxed)) { - if (seq_lock.TryRead(local_buf.data(), protected_buf.data(), - buf_bytes_)) { - bool good = true; - for (const auto& v : local_buf) { - if (v != local_buf[0]) good = false; - } - if (good) { - good_reads.fetch_add(1, std::memory_order_relaxed); - } else { - bad_reads.fetch_add(1, std::memory_order_relaxed); - } - } else { - unsuccessful_reads.fetch_add(1, std::memory_order_relaxed); - } - } - }); - } - while (unsuccessful_reads.load(std::memory_order_relaxed) < num_threads_) { - absl::SleepFor(absl::Milliseconds(1)); - } - seq_lock.MarkInitialized(); - - // Run a maximum of 5 seconds. On Windows, the scheduler behavior seems - // somewhat unfair and without an explicit timeout for this loop, the tests - // can run a long time. - absl::Time deadline = absl::Now() + absl::Seconds(5); - for (int i = 0; i < 100 && absl::Now() < deadline; i++) { - absl::FixedArray writer_buf(buf_bytes_); - for (auto& v : writer_buf) v = i; - seq_lock.Write(protected_buf.data(), writer_buf.data(), buf_bytes_); - absl::SleepFor(absl::Microseconds(10)); - } - stop.store(true, std::memory_order_relaxed); - for (auto& t : threads) t.join(); - ASSERT_GE(good_reads, 0); - ASSERT_EQ(bad_reads, 0); -} - -// Simple helper for generating a range of thread counts. -// Generates [low, low*scale, low*scale^2, ...high) -// (even if high is between low*scale^k and low*scale^(k+1)). -std::vector MultiplicativeRange(int low, int high, int scale) { - std::vector result; - for (int current = low; current < high; current *= scale) { - result.push_back(current); - } - result.push_back(high); - return result; -} - -#ifndef ABSL_HAVE_THREAD_SANITIZER -const int kMaxThreads = absl::base_internal::NumCPUs(); -#else -// With TSAN, a lot of threads contending for atomic access on the sequence -// lock make this test run too slowly. -const int kMaxThreads = std::min(absl::base_internal::NumCPUs(), 4); -#endif - -// Return all of the interesting buffer sizes worth testing: -// powers of two and adjacent values. -std::vector InterestingBufferSizes() { - std::vector ret; - for (int v : MultiplicativeRange(1, 128, 2)) { - ret.push_back(v); - if (v > 1) { - ret.push_back(v - 1); - } - ret.push_back(v + 1); - } - return ret; -} - -INSTANTIATE_TEST_SUITE_P( - TestManyByteSizes, ConcurrentSequenceLockTest, - testing::Combine( - // Buffer size (bytes). - testing::ValuesIn(InterestingBufferSizes()), - // Number of reader threads. - testing::ValuesIn(MultiplicativeRange(1, kMaxThreads, 2)))); - -// Simple single-threaded test, parameterized by the size of the buffer to be -// protected. -class SequenceLockTest : public testing::TestWithParam {}; - -TEST_P(SequenceLockTest, SingleThreaded) { - const int size = GetParam(); - absl::FixedArray> protected_buf( - flags::AlignUp(size, sizeof(uint64_t)) / sizeof(uint64_t)); - - flags::SequenceLock seq_lock; - seq_lock.MarkInitialized(); - - std::vector src_buf(size, 'x'); - seq_lock.Write(protected_buf.data(), src_buf.data(), size); - - std::vector dst_buf(size, '0'); - ASSERT_TRUE(seq_lock.TryRead(dst_buf.data(), protected_buf.data(), size)); - ASSERT_EQ(src_buf, dst_buf); -} -INSTANTIATE_TEST_SUITE_P(TestManyByteSizes, SequenceLockTest, - // Buffer size (bytes). - testing::Range(1, 128)); - -} // namespace diff --git a/abseil-cpp/absl/flags/internal/usage.cc b/abseil-cpp/absl/flags/internal/usage.cc index 949709e8..0805df31 100644 --- a/abseil-cpp/absl/flags/internal/usage.cc +++ b/abseil-cpp/absl/flags/internal/usage.cc @@ -37,26 +37,26 @@ #include "absl/strings/str_split.h" #include "absl/strings/string_view.h" -// Dummy global variables to prevent anyone else defining these. -bool FLAGS_help = false; -bool FLAGS_helpfull = false; -bool FLAGS_helpshort = false; -bool FLAGS_helppackage = false; -bool FLAGS_version = false; -bool FLAGS_only_check_args = false; -bool FLAGS_helpon = false; -bool FLAGS_helpmatch = false; +ABSL_FLAG(bool, help, false, + "show help on important flags for this binary [tip: all flags can " + "have two dashes]"); +ABSL_FLAG(bool, helpfull, false, "show help on all flags"); +ABSL_FLAG(bool, helpshort, false, + "show help on only the main module for this program"); +ABSL_FLAG(bool, helppackage, false, + "show help on all modules in the main package"); +ABSL_FLAG(bool, version, false, "show version and build info and exit"); +ABSL_FLAG(bool, only_check_args, false, "exit after checking all flags"); +ABSL_FLAG(std::string, helpon, "", + "show help on the modules named by this flag value"); +ABSL_FLAG(std::string, helpmatch, "", + "show help on modules whose name contains the specified substr"); namespace absl { ABSL_NAMESPACE_BEGIN namespace flags_internal { namespace { -using PerFlagFilter = std::function; - -// Maximum length size in a human readable format. -constexpr size_t kHrfMaxLineLength = 80; - // This class is used to emit an XML element with `tag` and `text`. // It adds opening and closing tags and escapes special characters in the text. // For example: @@ -109,12 +109,9 @@ class FlagHelpPrettyPrinter { public: // Pretty printer holds on to the std::ostream& reference to direct an output // to that stream. - FlagHelpPrettyPrinter(size_t max_line_len, size_t min_line_len, - size_t wrapped_line_indent, std::ostream& out) + FlagHelpPrettyPrinter(int max_line_len, std::ostream& out) : out_(out), max_line_len_(max_line_len), - min_line_len_(min_line_len), - wrapped_line_indent_(wrapped_line_indent), line_len_(0), first_line_(true) {} @@ -148,8 +145,7 @@ class FlagHelpPrettyPrinter { } // Write the token, ending the string first if necessary/possible. - if (!new_line && - (line_len_ + static_cast(token.size()) >= max_line_len_)) { + if (!new_line && (line_len_ + token.size() >= max_line_len_)) { EndLine(); new_line = true; } @@ -168,12 +164,13 @@ class FlagHelpPrettyPrinter { void StartLine() { if (first_line_) { - line_len_ = min_line_len_; + out_ << " "; + line_len_ = 4; first_line_ = false; } else { - line_len_ = min_line_len_ + wrapped_line_indent_; + out_ << " "; + line_len_ = 6; } - out_ << std::string(line_len_, ' '); } void EndLine() { out_ << '\n'; @@ -182,15 +179,13 @@ class FlagHelpPrettyPrinter { private: std::ostream& out_; - const size_t max_line_len_; - const size_t min_line_len_; - const size_t wrapped_line_indent_; - size_t line_len_; + const int max_line_len_; + int line_len_; bool first_line_; }; void FlagHelpHumanReadable(const CommandLineFlag& flag, std::ostream& out) { - FlagHelpPrettyPrinter printer(kHrfMaxLineLength, 4, 2, out); + FlagHelpPrettyPrinter printer(80, out); // Max line length is 80. // Flag name. printer.Write(absl::StrCat("--", flag.Name())); @@ -226,7 +221,7 @@ void FlagHelpHumanReadable(const CommandLineFlag& flag, std::ostream& out) { // If a flag's help message has been stripped (e.g. by adding '#define // STRIP_FLAG_HELP 1' then this flag will not be displayed by '--help' // and its variants. -void FlagsHelpImpl(std::ostream& out, PerFlagFilter filter_cb, +void FlagsHelpImpl(std::ostream& out, flags_internal::FlagKindFilter filter_cb, HelpFormat format, absl::string_view program_usage_message) { if (format == HelpFormat::kHumanReadable) { out << flags_internal::ShortProgramInvocationName() << ": " @@ -245,7 +240,7 @@ void FlagsHelpImpl(std::ostream& out, PerFlagFilter filter_cb, << XMLElement("usage", program_usage_message) << '\n'; } - // Ordered map of package name to + // Map of package name to // map of file name to // vector of flags in the file. // This map is used to output matching flags grouped by package and file @@ -261,11 +256,11 @@ void FlagsHelpImpl(std::ostream& out, PerFlagFilter filter_cb, // If the flag has been stripped, pretend that it doesn't exist. if (flag.Help() == flags_internal::kStrippedFlagHelp) return; - // Make sure flag satisfies the filter - if (!filter_cb(flag)) return; - std::string flag_filename = flag.Filename(); + // Make sure flag satisfies the filter + if (!filter_cb || !filter_cb(flag_filename)) return; + matching_flags[std::string(flags_internal::Package(flag_filename))] [flag_filename] .push_back(&flag); @@ -273,26 +268,20 @@ void FlagsHelpImpl(std::ostream& out, PerFlagFilter filter_cb, absl::string_view package_separator; // controls blank lines between packages absl::string_view file_separator; // controls blank lines between files - for (auto& package : matching_flags) { + for (const auto& package : matching_flags) { if (format == HelpFormat::kHumanReadable) { out << package_separator; package_separator = "\n\n"; } file_separator = ""; - for (auto& flags_in_file : package.second) { + for (const auto& flags_in_file : package.second) { if (format == HelpFormat::kHumanReadable) { out << file_separator << " Flags from " << flags_in_file.first << ":\n"; file_separator = "\n"; } - std::sort(std::begin(flags_in_file.second), - std::end(flags_in_file.second), - [](const CommandLineFlag* lhs, const CommandLineFlag* rhs) { - return lhs->Name() < rhs->Name(); - }); - for (const auto* flag : flags_in_file.second) { flags_internal::FlagHelp(out, *flag, format); } @@ -300,34 +289,15 @@ void FlagsHelpImpl(std::ostream& out, PerFlagFilter filter_cb, } if (format == HelpFormat::kHumanReadable) { - FlagHelpPrettyPrinter printer(kHrfMaxLineLength, 0, 0, out); - if (filter_cb && matching_flags.empty()) { - printer.Write("No flags matched.\n", true); + out << " No modules matched: use -helpfull\n"; } - printer.EndLine(); - printer.Write( - "Try --helpfull to get a list of all flags or --help=substring " - "shows help for flags which include specified substring in either " - "in the name, or description or path.\n", - true); } else { // The end of the document. out << "\n"; } } -void FlagsHelpImpl(std::ostream& out, - flags_internal::FlagKindFilter filename_filter_cb, - HelpFormat format, absl::string_view program_usage_message) { - FlagsHelpImpl( - out, - [&](const absl::CommandLineFlag& flag) { - return filename_filter_cb && filename_filter_cb(flag.Filename()); - }, - format, program_usage_message); -} - } // namespace // -------------------------------------------------------------------- @@ -339,7 +309,7 @@ void FlagHelp(std::ostream& out, const CommandLineFlag& flag, } // -------------------------------------------------------------------- -// Produces the help messages for all flags matching the filename filter. +// Produces the help messages for all flags matching the filter. // If filter is empty produces help messages for all flags. void FlagsHelp(std::ostream& out, absl::string_view filter, HelpFormat format, absl::string_view program_usage_message) { @@ -354,169 +324,66 @@ void FlagsHelp(std::ostream& out, absl::string_view filter, HelpFormat format, // If so, handles them appropriately. int HandleUsageFlags(std::ostream& out, absl::string_view program_usage_message) { - switch (GetFlagsHelpMode()) { - case HelpMode::kNone: - break; - case HelpMode::kImportant: - flags_internal::FlagsHelpImpl( - out, flags_internal::GetUsageConfig().contains_help_flags, - GetFlagsHelpFormat(), program_usage_message); - return 1; - - case HelpMode::kShort: - flags_internal::FlagsHelpImpl( - out, flags_internal::GetUsageConfig().contains_helpshort_flags, - GetFlagsHelpFormat(), program_usage_message); - return 1; - - case HelpMode::kFull: - flags_internal::FlagsHelp(out, "", GetFlagsHelpFormat(), - program_usage_message); - return 1; - - case HelpMode::kPackage: - flags_internal::FlagsHelpImpl( - out, flags_internal::GetUsageConfig().contains_helppackage_flags, - GetFlagsHelpFormat(), program_usage_message); - - return 1; - - case HelpMode::kMatch: { - std::string substr = GetFlagsHelpMatchSubstr(); - if (substr.empty()) { - // show all options - flags_internal::FlagsHelp(out, substr, GetFlagsHelpFormat(), - program_usage_message); - } else { - auto filter_cb = [&substr](const absl::CommandLineFlag& flag) { - if (absl::StrContains(flag.Name(), substr)) return true; - if (absl::StrContains(flag.Filename(), substr)) return true; - if (absl::StrContains(flag.Help(), substr)) return true; - - return false; - }; - flags_internal::FlagsHelpImpl( - out, filter_cb, HelpFormat::kHumanReadable, program_usage_message); - } - - return 1; - } - case HelpMode::kVersion: - if (flags_internal::GetUsageConfig().version_string) - out << flags_internal::GetUsageConfig().version_string(); - // Unlike help, we may be asking for version in a script, so return 0 - return 0; - - case HelpMode::kOnlyCheckArgs: - return 0; + if (absl::GetFlag(FLAGS_helpshort)) { + flags_internal::FlagsHelpImpl( + out, flags_internal::GetUsageConfig().contains_helpshort_flags, + HelpFormat::kHumanReadable, program_usage_message); + return 1; } - return -1; -} - -// -------------------------------------------------------------------- -// Globals representing usage reporting flags - -namespace { - -ABSL_CONST_INIT absl::Mutex help_attributes_guard(absl::kConstInit); -ABSL_CONST_INIT std::string* match_substr - ABSL_GUARDED_BY(help_attributes_guard) = nullptr; -ABSL_CONST_INIT HelpMode help_mode ABSL_GUARDED_BY(help_attributes_guard) = - HelpMode::kNone; -ABSL_CONST_INIT HelpFormat help_format ABSL_GUARDED_BY(help_attributes_guard) = - HelpFormat::kHumanReadable; - -} // namespace - -std::string GetFlagsHelpMatchSubstr() { - absl::MutexLock l(&help_attributes_guard); - if (match_substr == nullptr) return ""; - return *match_substr; -} - -void SetFlagsHelpMatchSubstr(absl::string_view substr) { - absl::MutexLock l(&help_attributes_guard); - if (match_substr == nullptr) match_substr = new std::string; - match_substr->assign(substr.data(), substr.size()); -} - -HelpMode GetFlagsHelpMode() { - absl::MutexLock l(&help_attributes_guard); - return help_mode; -} - -void SetFlagsHelpMode(HelpMode mode) { - absl::MutexLock l(&help_attributes_guard); - help_mode = mode; -} - -HelpFormat GetFlagsHelpFormat() { - absl::MutexLock l(&help_attributes_guard); - return help_format; -} + if (absl::GetFlag(FLAGS_helpfull)) { + // show all options + flags_internal::FlagsHelp(out, "", HelpFormat::kHumanReadable, + program_usage_message); + return 1; + } -void SetFlagsHelpFormat(HelpFormat format) { - absl::MutexLock l(&help_attributes_guard); - help_format = format; -} + if (!absl::GetFlag(FLAGS_helpon).empty()) { + flags_internal::FlagsHelp( + out, absl::StrCat("/", absl::GetFlag(FLAGS_helpon), "."), + HelpFormat::kHumanReadable, program_usage_message); + return 1; + } -// Deduces usage flags from the input argument in a form --name=value or -// --name. argument is already split into name and value before we call this -// function. -bool DeduceUsageFlags(absl::string_view name, absl::string_view value) { - if (absl::ConsumePrefix(&name, "help")) { - if (name == "") { - if (value.empty()) { - SetFlagsHelpMode(HelpMode::kImportant); - } else { - SetFlagsHelpMode(HelpMode::kMatch); - SetFlagsHelpMatchSubstr(value); - } - return true; - } + if (!absl::GetFlag(FLAGS_helpmatch).empty()) { + flags_internal::FlagsHelp(out, absl::GetFlag(FLAGS_helpmatch), + HelpFormat::kHumanReadable, + program_usage_message); + return 1; + } - if (name == "match") { - SetFlagsHelpMode(HelpMode::kMatch); - SetFlagsHelpMatchSubstr(value); - return true; - } + if (absl::GetFlag(FLAGS_help)) { + flags_internal::FlagsHelpImpl( + out, flags_internal::GetUsageConfig().contains_help_flags, + HelpFormat::kHumanReadable, program_usage_message); - if (name == "on") { - SetFlagsHelpMode(HelpMode::kMatch); - SetFlagsHelpMatchSubstr(absl::StrCat("/", value, ".")); - return true; - } + out << "\nTry --helpfull to get a list of all flags.\n"; - if (name == "full") { - SetFlagsHelpMode(HelpMode::kFull); - return true; - } + return 1; + } - if (name == "short") { - SetFlagsHelpMode(HelpMode::kShort); - return true; - } + if (absl::GetFlag(FLAGS_helppackage)) { + flags_internal::FlagsHelpImpl( + out, flags_internal::GetUsageConfig().contains_helppackage_flags, + HelpFormat::kHumanReadable, program_usage_message); - if (name == "package") { - SetFlagsHelpMode(HelpMode::kPackage); - return true; - } + out << "\nTry --helpfull to get a list of all flags.\n"; - return false; + return 1; } - if (name == "version") { - SetFlagsHelpMode(HelpMode::kVersion); - return true; + if (absl::GetFlag(FLAGS_version)) { + if (flags_internal::GetUsageConfig().version_string) + out << flags_internal::GetUsageConfig().version_string(); + // Unlike help, we may be asking for version in a script, so return 0 + return 0; } - if (name == "only_check_args") { - SetFlagsHelpMode(HelpMode::kOnlyCheckArgs); - return true; + if (absl::GetFlag(FLAGS_only_check_args)) { + return 0; } - return false; + return -1; } } // namespace flags_internal diff --git a/abseil-cpp/absl/flags/internal/usage.h b/abseil-cpp/absl/flags/internal/usage.h index c0bcac57..0c62dc4b 100644 --- a/abseil-cpp/absl/flags/internal/usage.h +++ b/abseil-cpp/absl/flags/internal/usage.h @@ -36,8 +36,7 @@ enum class HelpFormat { kHumanReadable, }; -// Streams the help message describing `flag` to `out`. -// The default value for `flag` is included in the output. +// Outputs the help message describing specific flag. void FlagHelp(std::ostream& out, const CommandLineFlag& flag, HelpFormat format = HelpFormat::kHumanReadable); @@ -66,39 +65,17 @@ void FlagsHelp(std::ostream& out, absl::string_view filter, int HandleUsageFlags(std::ostream& out, absl::string_view program_usage_message); -// -------------------------------------------------------------------- -// Globals representing usage reporting flags - -enum class HelpMode { - kNone, - kImportant, - kShort, - kFull, - kPackage, - kMatch, - kVersion, - kOnlyCheckArgs -}; - -// Returns substring to filter help output (--help=substr argument) -std::string GetFlagsHelpMatchSubstr(); -// Returns the requested help mode. -HelpMode GetFlagsHelpMode(); -// Returns the requested help format. -HelpFormat GetFlagsHelpFormat(); - -// These are corresponding setters to the attributes above. -void SetFlagsHelpMatchSubstr(absl::string_view); -void SetFlagsHelpMode(HelpMode); -void SetFlagsHelpFormat(HelpFormat); - -// Deduces usage flags from the input argument in a form --name=value or -// --name. argument is already split into name and value before we call this -// function. -bool DeduceUsageFlags(absl::string_view name, absl::string_view value); - } // namespace flags_internal ABSL_NAMESPACE_END } // namespace absl +ABSL_DECLARE_FLAG(bool, help); +ABSL_DECLARE_FLAG(bool, helpfull); +ABSL_DECLARE_FLAG(bool, helpshort); +ABSL_DECLARE_FLAG(bool, helppackage); +ABSL_DECLARE_FLAG(bool, version); +ABSL_DECLARE_FLAG(bool, only_check_args); +ABSL_DECLARE_FLAG(std::string, helpon); +ABSL_DECLARE_FLAG(std::string, helpmatch); + #endif // ABSL_FLAGS_INTERNAL_USAGE_H_ diff --git a/abseil-cpp/absl/flags/internal/usage_test.cc b/abseil-cpp/absl/flags/internal/usage_test.cc index 209a7be9..6e583fbe 100644 --- a/abseil-cpp/absl/flags/internal/usage_test.cc +++ b/abseil-cpp/absl/flags/internal/usage_test.cc @@ -20,7 +20,6 @@ #include #include -#include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/flags/flag.h" #include "absl/flags/internal/parse.h" @@ -46,12 +45,9 @@ static const char kTestUsageMessage[] = "Custom usage message"; struct UDT { UDT() = default; UDT(const UDT&) = default; - UDT& operator=(const UDT&) = default; }; -static bool AbslParseFlag(absl::string_view, UDT*, std::string*) { - return true; -} -static std::string AbslUnparseFlag(const UDT&) { return "UDT{}"; } +bool AbslParseFlag(absl::string_view, UDT*, std::string*) { return true; } +std::string AbslUnparseFlag(const UDT&) { return "UDT{}"; } ABSL_FLAG(UDT, usage_reporting_test_flag_05, {}, "usage_reporting_test_flag_05 help message"); @@ -91,11 +87,6 @@ class UsageReportingTest : public testing::Test { default_config.normalize_filename = &NormalizeFileName; absl::SetFlagsUsageConfig(default_config); } - ~UsageReportingTest() override { - flags::SetFlagsHelpMode(flags::HelpMode::kNone); - flags::SetFlagsHelpMatchSubstr(""); - flags::SetFlagsHelpFormat(flags::HelpFormat::kHumanReadable); - } private: absl::FlagSaver flag_saver_; @@ -106,19 +97,14 @@ class UsageReportingTest : public testing::Test { using UsageReportingDeathTest = UsageReportingTest; TEST_F(UsageReportingDeathTest, TestSetProgramUsageMessage) { -#if !defined(GTEST_HAS_ABSL) || !GTEST_HAS_ABSL - // Check for kTestUsageMessage set in main() below. EXPECT_EQ(absl::ProgramUsageMessage(), kTestUsageMessage); -#else - // Check for part of the usage message set by GoogleTest. - EXPECT_THAT(absl::ProgramUsageMessage(), - ::testing::HasSubstr( - "This program contains tests written using Google Test")); -#endif +#ifndef _WIN32 + // TODO(rogeeff): figure out why this does not work on Windows. EXPECT_DEATH_IF_SUPPORTED( absl::SetProgramUsageMessage("custom usage message"), - ::testing::HasSubstr("SetProgramUsageMessage() called twice")); + ".*SetProgramUsageMessage\\(\\) called twice.*"); +#endif } // -------------------------------------------------------------------- @@ -205,10 +191,6 @@ TEST_F(UsageReportingTest, TestFlagsHelpHRF) { Some more help. Even more long long long long long long long long long long long long help message.); default: ""; - -Try --helpfull to get a list of all flags or --help=substring shows help for -flags which include specified substring in either in the name, or description or -path. )"; std::stringstream test_buf_01; @@ -232,11 +214,7 @@ path. EXPECT_EQ(test_buf_04.str(), R"(usage_test: Custom usage message -No flags matched. - -Try --helpfull to get a list of all flags or --help=substring shows help for -flags which include specified substring in either in the name, or description or -path. + No modules matched: use -helpfull )"); std::stringstream test_buf_05; @@ -248,8 +226,12 @@ path. absl::StartsWith(test_out_str, "usage_test: Custom usage message")); EXPECT_TRUE(absl::StrContains( test_out_str, "Flags from absl/flags/internal/usage_test.cc:")); + EXPECT_TRUE(absl::StrContains(test_out_str, + "Flags from absl/flags/internal/usage.cc:")); EXPECT_TRUE( absl::StrContains(test_out_str, "-usage_reporting_test_flag_01 ")); + EXPECT_TRUE(absl::StrContains(test_out_str, "-help (show help")) + << test_out_str; } // -------------------------------------------------------------------- @@ -262,40 +244,7 @@ TEST_F(UsageReportingTest, TestNoUsageFlags) { // -------------------------------------------------------------------- TEST_F(UsageReportingTest, TestUsageFlag_helpshort) { - flags::SetFlagsHelpMode(flags::HelpMode::kShort); - - std::stringstream test_buf; - EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage), 1); - EXPECT_EQ(test_buf.str(), - R"(usage_test: Custom usage message - - Flags from absl/flags/internal/usage_test.cc: - --usage_reporting_test_flag_01 (usage_reporting_test_flag_01 help message); - default: 101; - --usage_reporting_test_flag_02 (usage_reporting_test_flag_02 help message); - default: false; - --usage_reporting_test_flag_03 (usage_reporting_test_flag_03 help message); - default: 1.03; - --usage_reporting_test_flag_04 (usage_reporting_test_flag_04 help message); - default: 1000000000000004; - --usage_reporting_test_flag_05 (usage_reporting_test_flag_05 help message); - default: UDT{}; - --usage_reporting_test_flag_06 (usage_reporting_test_flag_06 help message. - - Some more help. - Even more long long long long long long long long long long long long help - message.); default: ""; - -Try --helpfull to get a list of all flags or --help=substring shows help for -flags which include specified substring in either in the name, or description or -path. -)"); -} - -// -------------------------------------------------------------------- - -TEST_F(UsageReportingTest, TestUsageFlag_help_simple) { - flags::SetFlagsHelpMode(flags::HelpMode::kImportant); + absl::SetFlag(&FLAGS_helpshort, true); std::stringstream test_buf; EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage), 1); @@ -318,42 +267,13 @@ TEST_F(UsageReportingTest, TestUsageFlag_help_simple) { Some more help. Even more long long long long long long long long long long long long help message.); default: ""; - -Try --helpfull to get a list of all flags or --help=substring shows help for -flags which include specified substring in either in the name, or description or -path. )"); } // -------------------------------------------------------------------- -TEST_F(UsageReportingTest, TestUsageFlag_help_one_flag) { - flags::SetFlagsHelpMode(flags::HelpMode::kMatch); - flags::SetFlagsHelpMatchSubstr("usage_reporting_test_flag_06"); - - std::stringstream test_buf; - EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage), 1); - EXPECT_EQ(test_buf.str(), - R"(usage_test: Custom usage message - - Flags from absl/flags/internal/usage_test.cc: - --usage_reporting_test_flag_06 (usage_reporting_test_flag_06 help message. - - Some more help. - Even more long long long long long long long long long long long long help - message.); default: ""; - -Try --helpfull to get a list of all flags or --help=substring shows help for -flags which include specified substring in either in the name, or description or -path. -)"); -} - -// -------------------------------------------------------------------- - -TEST_F(UsageReportingTest, TestUsageFlag_help_multiple_flag) { - flags::SetFlagsHelpMode(flags::HelpMode::kMatch); - flags::SetFlagsHelpMatchSubstr("test_flag"); +TEST_F(UsageReportingTest, TestUsageFlag_help) { + absl::SetFlag(&FLAGS_help, true); std::stringstream test_buf; EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage), 1); @@ -377,16 +297,14 @@ TEST_F(UsageReportingTest, TestUsageFlag_help_multiple_flag) { Even more long long long long long long long long long long long long help message.); default: ""; -Try --helpfull to get a list of all flags or --help=substring shows help for -flags which include specified substring in either in the name, or description or -path. +Try --helpfull to get a list of all flags. )"); } // -------------------------------------------------------------------- TEST_F(UsageReportingTest, TestUsageFlag_helppackage) { - flags::SetFlagsHelpMode(flags::HelpMode::kPackage); + absl::SetFlag(&FLAGS_helppackage, true); std::stringstream test_buf; EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage), 1); @@ -410,16 +328,14 @@ TEST_F(UsageReportingTest, TestUsageFlag_helppackage) { Even more long long long long long long long long long long long long help message.); default: ""; -Try --helpfull to get a list of all flags or --help=substring shows help for -flags which include specified substring in either in the name, or description or -path. +Try --helpfull to get a list of all flags. )"); } // -------------------------------------------------------------------- TEST_F(UsageReportingTest, TestUsageFlag_version) { - flags::SetFlagsHelpMode(flags::HelpMode::kVersion); + absl::SetFlag(&FLAGS_version, true); std::stringstream test_buf; EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage), 0); @@ -433,7 +349,7 @@ TEST_F(UsageReportingTest, TestUsageFlag_version) { // -------------------------------------------------------------------- TEST_F(UsageReportingTest, TestUsageFlag_only_check_args) { - flags::SetFlagsHelpMode(flags::HelpMode::kOnlyCheckArgs); + absl::SetFlag(&FLAGS_only_check_args, true); std::stringstream test_buf; EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage), 0); @@ -443,22 +359,17 @@ TEST_F(UsageReportingTest, TestUsageFlag_only_check_args) { // -------------------------------------------------------------------- TEST_F(UsageReportingTest, TestUsageFlag_helpon) { - flags::SetFlagsHelpMode(flags::HelpMode::kMatch); - flags::SetFlagsHelpMatchSubstr("/bla-bla."); + absl::SetFlag(&FLAGS_helpon, "bla-bla"); std::stringstream test_buf_01; EXPECT_EQ(flags::HandleUsageFlags(test_buf_01, kTestUsageMessage), 1); EXPECT_EQ(test_buf_01.str(), R"(usage_test: Custom usage message -No flags matched. - -Try --helpfull to get a list of all flags or --help=substring shows help for -flags which include specified substring in either in the name, or description or -path. + No modules matched: use -helpfull )"); - flags::SetFlagsHelpMatchSubstr("/usage_test."); + absl::SetFlag(&FLAGS_helpon, "usage_test"); std::stringstream test_buf_02; EXPECT_EQ(flags::HandleUsageFlags(test_buf_02, kTestUsageMessage), 1); @@ -481,10 +392,6 @@ path. Some more help. Even more long long long long long long long long long long long long help message.); default: ""; - -Try --helpfull to get a list of all flags or --help=substring shows help for -flags which include specified substring in either in the name, or description or -path. )"); } @@ -495,10 +402,8 @@ path. int main(int argc, char* argv[]) { (void)absl::GetFlag(FLAGS_undefok); // Force linking of parse.cc flags::SetProgramInvocationName("usage_test"); -#if !defined(GTEST_HAS_ABSL) || !GTEST_HAS_ABSL - // GoogleTest calls absl::SetProgramUsageMessage() already. absl::SetProgramUsageMessage(kTestUsageMessage); -#endif ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/abseil-cpp/absl/flags/marshalling.h b/abseil-cpp/absl/flags/marshalling.h index b1e2ffa5..0b503354 100644 --- a/abseil-cpp/absl/flags/marshalling.h +++ b/abseil-cpp/absl/flags/marshalling.h @@ -33,7 +33,6 @@ // * `double` // * `std::string` // * `std::vector` -// * `std::optional` // * `absl::LogSeverity` (provided natively for layering reasons) // // Note that support for integral types is implemented using overloads for @@ -66,42 +65,6 @@ // below.) // // ----------------------------------------------------------------------------- -// Optional Flags -// ----------------------------------------------------------------------------- -// -// The Abseil flags library supports flags of type `std::optional` where -// `T` is a type of one of the supported flags. We refer to this flag type as -// an "optional flag." An optional flag is either "valueless", holding no value -// of type `T` (indicating that the flag has not been set) or a value of type -// `T`. The valueless state in C++ code is represented by a value of -// `std::nullopt` for the optional flag. -// -// Using `std::nullopt` as an optional flag's default value allows you to check -// whether such a flag was ever specified on the command line: -// -// if (absl::GetFlag(FLAGS_foo).has_value()) { -// // flag was set on command line -// } else { -// // flag was not passed on command line -// } -// -// Using an optional flag in this manner avoids common workarounds for -// indicating such an unset flag (such as using sentinal values to indicate this -// state). -// -// An optional flag also allows a developer to pass a flag in an "unset" -// valueless state on the command line, allowing the flag to later be set in -// binary logic. An optional flag's valueless state is indicated by the special -// notation of passing the value as an empty string through the syntax `--flag=` -// or `--flag ""`. -// -// $ binary_with_optional --flag_in_unset_state= -// $ binary_with_optional --flag_in_unset_state "" -// -// Note: as a result of the above syntax requirements, an optional flag cannot -// be set to a `T` of any value which unparses to the empty string. -// -// ----------------------------------------------------------------------------- // Adding Type Support for Abseil Flags // ----------------------------------------------------------------------------- // @@ -120,7 +83,7 @@ // // AbslParseFlag converts from a string to OutputMode. // // Must be in same namespace as OutputMode. // -// // Parses an OutputMode from the command line flag value `text`. Returns +// // Parses an OutputMode from the command line flag value `text. Returns // // `true` and sets `*mode` on success; returns `false` and sets `*error` // // on failure. // bool AbslParseFlag(absl::string_view text, @@ -176,7 +139,7 @@ // // // Within the implementation, `AbslParseFlag()` will, in turn invoke // // `absl::ParseFlag()` on its constituent `int` and `std::string` types -// // (which have built-in Abseil flag support). +// // (which have built-in Abseil flag support. // // bool AbslParseFlag(absl::string_view text, MyFlagType* flag, // std::string* err) { @@ -199,27 +162,14 @@ #ifndef ABSL_FLAGS_MARSHALLING_H_ #define ABSL_FLAGS_MARSHALLING_H_ -#include "absl/base/config.h" - -#if defined(ABSL_HAVE_STD_OPTIONAL) && !defined(ABSL_USES_STD_OPTIONAL) -#include -#endif #include #include +#include "absl/base/config.h" #include "absl/strings/string_view.h" -#include "absl/types/optional.h" namespace absl { ABSL_NAMESPACE_BEGIN - -// Forward declaration to be used inside composable flag parse/unparse -// implementations -template -inline bool ParseFlag(absl::string_view input, T* dst, std::string* error); -template -inline std::string UnparseFlag(const T& v); - namespace flags_internal { // Overloads of `AbslParseFlag()` and `AbslUnparseFlag()` for fundamental types. @@ -238,36 +188,6 @@ bool AbslParseFlag(absl::string_view, double*, std::string*); bool AbslParseFlag(absl::string_view, std::string*, std::string*); bool AbslParseFlag(absl::string_view, std::vector*, std::string*); -template -bool AbslParseFlag(absl::string_view text, absl::optional* f, - std::string* err) { - if (text.empty()) { - *f = absl::nullopt; - return true; - } - T value; - if (!absl::ParseFlag(text, &value, err)) return false; - - *f = std::move(value); - return true; -} - -#if defined(ABSL_HAVE_STD_OPTIONAL) && !defined(ABSL_USES_STD_OPTIONAL) -template -bool AbslParseFlag(absl::string_view text, std::optional* f, - std::string* err) { - if (text.empty()) { - *f = std::nullopt; - return true; - } - T value; - if (!absl::ParseFlag(text, &value, err)) return false; - - *f = std::move(value); - return true; -} -#endif - template bool InvokeParseFlag(absl::string_view input, T* dst, std::string* err) { // Comment on next line provides a good compiler error message if T @@ -281,18 +201,6 @@ bool InvokeParseFlag(absl::string_view input, T* dst, std::string* err) { std::string AbslUnparseFlag(absl::string_view v); std::string AbslUnparseFlag(const std::vector&); -template -std::string AbslUnparseFlag(const absl::optional& f) { - return f.has_value() ? absl::UnparseFlag(*f) : ""; -} - -#if defined(ABSL_HAVE_STD_OPTIONAL) && !defined(ABSL_USES_STD_OPTIONAL) -template -std::string AbslUnparseFlag(const std::optional& f) { - return f.has_value() ? absl::UnparseFlag(*f) : ""; -} -#endif - template std::string Unparse(const T& v) { // Comment on next line provides a good compiler error message if T does not diff --git a/abseil-cpp/absl/flags/marshalling_test.cc b/abseil-cpp/absl/flags/marshalling_test.cc index 7b6d2ad5..4a64ce11 100644 --- a/abseil-cpp/absl/flags/marshalling_test.cc +++ b/abseil-cpp/absl/flags/marshalling_test.cc @@ -659,88 +659,6 @@ TEST(MarshallingTest, TestVectorOfStringParsing) { // -------------------------------------------------------------------- -TEST(MarshallingTest, TestOptionalBoolParsing) { - std::string err; - absl::optional value; - - EXPECT_TRUE(absl::ParseFlag("", &value, &err)); - EXPECT_FALSE(value.has_value()); - - EXPECT_TRUE(absl::ParseFlag("true", &value, &err)); - EXPECT_TRUE(value.has_value()); - EXPECT_TRUE(*value); - - EXPECT_TRUE(absl::ParseFlag("false", &value, &err)); - EXPECT_TRUE(value.has_value()); - EXPECT_FALSE(*value); - - EXPECT_FALSE(absl::ParseFlag("nullopt", &value, &err)); -} - -// -------------------------------------------------------------------- - -TEST(MarshallingTest, TestOptionalIntParsing) { - std::string err; - absl::optional value; - - EXPECT_TRUE(absl::ParseFlag("", &value, &err)); - EXPECT_FALSE(value.has_value()); - - EXPECT_TRUE(absl::ParseFlag("10", &value, &err)); - EXPECT_TRUE(value.has_value()); - EXPECT_EQ(*value, 10); - - EXPECT_TRUE(absl::ParseFlag("0x1F", &value, &err)); - EXPECT_TRUE(value.has_value()); - EXPECT_EQ(*value, 31); - - EXPECT_FALSE(absl::ParseFlag("nullopt", &value, &err)); -} - -// -------------------------------------------------------------------- - -TEST(MarshallingTest, TestOptionalDoubleParsing) { - std::string err; - absl::optional value; - - EXPECT_TRUE(absl::ParseFlag("", &value, &err)); - EXPECT_FALSE(value.has_value()); - - EXPECT_TRUE(absl::ParseFlag("1.11", &value, &err)); - EXPECT_TRUE(value.has_value()); - EXPECT_EQ(*value, 1.11); - - EXPECT_TRUE(absl::ParseFlag("-0.12", &value, &err)); - EXPECT_TRUE(value.has_value()); - EXPECT_EQ(*value, -0.12); - - EXPECT_FALSE(absl::ParseFlag("nullopt", &value, &err)); -} - -// -------------------------------------------------------------------- - -TEST(MarshallingTest, TestOptionalStringParsing) { - std::string err; - absl::optional value; - - EXPECT_TRUE(absl::ParseFlag("", &value, &err)); - EXPECT_FALSE(value.has_value()); - - EXPECT_TRUE(absl::ParseFlag(" ", &value, &err)); - EXPECT_TRUE(value.has_value()); - EXPECT_EQ(*value, " "); - - EXPECT_TRUE(absl::ParseFlag("aqswde", &value, &err)); - EXPECT_TRUE(value.has_value()); - EXPECT_EQ(*value, "aqswde"); - - EXPECT_TRUE(absl::ParseFlag("nullopt", &value, &err)); - EXPECT_TRUE(value.has_value()); - EXPECT_EQ(*value, "nullopt"); -} - -// -------------------------------------------------------------------- - TEST(MarshallingTest, TestBoolUnparsing) { EXPECT_EQ(absl::UnparseFlag(true), "true"); EXPECT_EQ(absl::UnparseFlag(false), "false"); @@ -890,90 +808,6 @@ TEST(MarshallingTest, TestStringUnparsing) { // -------------------------------------------------------------------- -TEST(MarshallingTest, TestOptionalBoolUnparsing) { - absl::optional value; - - EXPECT_EQ(absl::UnparseFlag(value), ""); - value = true; - EXPECT_EQ(absl::UnparseFlag(value), "true"); - value = false; - EXPECT_EQ(absl::UnparseFlag(value), "false"); - value = absl::nullopt; - EXPECT_EQ(absl::UnparseFlag(value), ""); -} - -// -------------------------------------------------------------------- - -TEST(MarshallingTest, TestOptionalIntUnparsing) { - absl::optional value; - - EXPECT_EQ(absl::UnparseFlag(value), ""); - value = 0; - EXPECT_EQ(absl::UnparseFlag(value), "0"); - value = -12; - EXPECT_EQ(absl::UnparseFlag(value), "-12"); - value = absl::nullopt; - EXPECT_EQ(absl::UnparseFlag(value), ""); -} - -// -------------------------------------------------------------------- - -TEST(MarshallingTest, TestOptionalDoubleUnparsing) { - absl::optional value; - - EXPECT_EQ(absl::UnparseFlag(value), ""); - value = 1.; - EXPECT_EQ(absl::UnparseFlag(value), "1"); - value = -1.23; - EXPECT_EQ(absl::UnparseFlag(value), "-1.23"); - value = absl::nullopt; - EXPECT_EQ(absl::UnparseFlag(value), ""); -} - -// -------------------------------------------------------------------- - -TEST(MarshallingTest, TestOptionalStringUnparsing) { - absl::optional strvalue; - EXPECT_EQ(absl::UnparseFlag(strvalue), ""); - - strvalue = "asdfg"; - EXPECT_EQ(absl::UnparseFlag(strvalue), "asdfg"); - - strvalue = " "; - EXPECT_EQ(absl::UnparseFlag(strvalue), " "); - - strvalue = ""; // It is UB to set an optional string flag to "" - EXPECT_EQ(absl::UnparseFlag(strvalue), ""); -} - -// -------------------------------------------------------------------- - -#if defined(ABSL_HAVE_STD_OPTIONAL) && !defined(ABSL_USES_STD_OPTIONAL) - -TEST(MarshallingTest, TestStdOptionalUnparsing) { - std::optional strvalue; - EXPECT_EQ(absl::UnparseFlag(strvalue), ""); - - strvalue = "asdfg"; - EXPECT_EQ(absl::UnparseFlag(strvalue), "asdfg"); - - strvalue = " "; - EXPECT_EQ(absl::UnparseFlag(strvalue), " "); - - strvalue = ""; // It is UB to set an optional string flag to "" - EXPECT_EQ(absl::UnparseFlag(strvalue), ""); - - std::optional intvalue; - EXPECT_EQ(absl::UnparseFlag(intvalue), ""); - - intvalue = 10; - EXPECT_EQ(absl::UnparseFlag(intvalue), "10"); -} - -// -------------------------------------------------------------------- - -#endif - template void TestRoundtrip(T v) { T new_v; diff --git a/abseil-cpp/absl/flags/parse.cc b/abseil-cpp/absl/flags/parse.cc index dd1a6796..4f4bb3d5 100644 --- a/abseil-cpp/absl/flags/parse.cc +++ b/abseil-cpp/absl/flags/parse.cc @@ -611,11 +611,6 @@ std::vector ParseCommandLineImpl(int argc, char* argv[], OnUndefinedFlag on_undef_flag) { ABSL_INTERNAL_CHECK(argc > 0, "Missing argv[0]"); - // Once parsing has started we will not have more flag registrations. - // If we did, they would be missing during parsing, which is a problem on - // itself. - flags_internal::FinalizeRegistry(); - // This routine does not return anything since we abort on failure. CheckDefaultValuesParsingRoundtrip(); @@ -713,11 +708,6 @@ std::vector ParseCommandLineImpl(int argc, char* argv[], std::tie(flag, is_negative) = LocateFlag(flag_name); if (flag == nullptr) { - // Usage flags are not modeled as Abseil flags. Locate them separately. - if (flags_internal::DeduceUsageFlags(flag_name, value)) { - continue; - } - if (on_undef_flag != OnUndefinedFlag::kIgnoreUndefined) { undefined_flag_names.emplace_back(arg_from_argv, std::string(flag_name)); diff --git a/abseil-cpp/absl/flags/parse_test.cc b/abseil-cpp/absl/flags/parse_test.cc index 8dc91db2..d35a6e47 100644 --- a/abseil-cpp/absl/flags/parse_test.cc +++ b/abseil-cpp/absl/flags/parse_test.cc @@ -28,7 +28,6 @@ #include "absl/flags/declare.h" #include "absl/flags/flag.h" #include "absl/flags/internal/parse.h" -#include "absl/flags/internal/usage.h" #include "absl/flags/reflection.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" @@ -46,7 +45,6 @@ using absl::base_internal::ScopedSetEnv; struct UDT { UDT() = default; UDT(const UDT&) = default; - UDT& operator=(const UDT&) = default; UDT(int v) : value(v) {} // NOLINT int value; @@ -209,9 +207,6 @@ namespace flags = absl::flags_internal; using testing::ElementsAreArray; class ParseTest : public testing::Test { - public: - ~ParseTest() override { flags::SetFlagsHelpMode(flags::HelpMode::kNone); } - private: absl::FlagSaver flag_saver_; }; @@ -856,7 +851,7 @@ TEST_F(ParseTest, TestIgnoreUndefinedFlags) { // -------------------------------------------------------------------- -TEST_F(ParseDeathTest, TestSimpleHelpFlagHandling) { +TEST_F(ParseDeathTest, TestHelpFlagHandling) { const char* in_args1[] = { "testbin", "--help", @@ -875,38 +870,11 @@ TEST_F(ParseDeathTest, TestSimpleHelpFlagHandling) { flags::UsageFlagsAction::kIgnoreUsage, flags::OnUndefinedFlag::kAbortIfUndefined); - EXPECT_EQ(flags::GetFlagsHelpMode(), flags::HelpMode::kImportant); EXPECT_EQ(absl::GetFlag(FLAGS_int_flag), 3); } // -------------------------------------------------------------------- -TEST_F(ParseDeathTest, TestSubstringHelpFlagHandling) { - const char* in_args1[] = { - "testbin", - "--help=abcd", - }; - - auto out_args1 = flags::ParseCommandLineImpl( - 2, const_cast(in_args1), flags::ArgvListAction::kRemoveParsedArgs, - flags::UsageFlagsAction::kIgnoreUsage, - flags::OnUndefinedFlag::kAbortIfUndefined); - - EXPECT_EQ(flags::GetFlagsHelpMode(), flags::HelpMode::kMatch); - EXPECT_EQ(flags::GetFlagsHelpMatchSubstr(), "abcd"); - - const char* in_args2[] = {"testbin", "--help", "some_positional_arg"}; - - auto out_args2 = flags::ParseCommandLineImpl( - 3, const_cast(in_args2), flags::ArgvListAction::kRemoveParsedArgs, - flags::UsageFlagsAction::kIgnoreUsage, - flags::OnUndefinedFlag::kAbortIfUndefined); - - EXPECT_EQ(flags::GetFlagsHelpMode(), flags::HelpMode::kImportant); -} - -// -------------------------------------------------------------------- - TEST_F(ParseTest, WasPresentOnCommandLine) { const char* in_args1[] = { "testbin", "arg1", "--bool_flag", diff --git a/abseil-cpp/absl/flags/reflection.cc b/abseil-cpp/absl/flags/reflection.cc index dbce4032..d7060221 100644 --- a/abseil-cpp/absl/flags/reflection.cc +++ b/abseil-cpp/absl/flags/reflection.cc @@ -17,12 +17,11 @@ #include -#include +#include #include #include "absl/base/config.h" #include "absl/base/thread_annotations.h" -#include "absl/container/flat_hash_map.h" #include "absl/flags/commandlineflag.h" #include "absl/flags/internal/private_handle_accessor.h" #include "absl/flags/internal/registry.h" @@ -50,30 +49,28 @@ class FlagRegistry { ~FlagRegistry() = default; // Store a flag in this registry. Takes ownership of *flag. - void RegisterFlag(CommandLineFlag& flag, const char* filename); + void RegisterFlag(CommandLineFlag& flag); void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION(lock_) { lock_.Lock(); } void Unlock() ABSL_UNLOCK_FUNCTION(lock_) { lock_.Unlock(); } // Returns the flag object for the specified name, or nullptr if not found. // Will emit a warning if a 'retired' flag is specified. - CommandLineFlag* FindFlag(absl::string_view name); + CommandLineFlag* FindFlagLocked(absl::string_view name); static FlagRegistry& GlobalRegistry(); // returns a singleton registry private: friend class flags_internal::FlagSaverImpl; // reads all the flags in order // to copy them - friend void ForEachFlag(std::function visitor); - friend void FinalizeRegistry(); + friend void ForEachFlagUnlocked( + std::function visitor); - // The map from name to flag, for FindFlag(). - using FlagMap = absl::flat_hash_map; + // The map from name to flag, for FindFlagLocked(). + using FlagMap = std::map; using FlagIterator = FlagMap::iterator; using FlagConstIterator = FlagMap::const_iterator; FlagMap flags_; - std::vector flat_flags_; - std::atomic finalized_flags_{false}; absl::Mutex lock_; @@ -82,6 +79,15 @@ class FlagRegistry { FlagRegistry& operator=(const FlagRegistry&); }; +CommandLineFlag* FlagRegistry::FindFlagLocked(absl::string_view name) { + FlagConstIterator i = flags_.find(name); + if (i == flags_.end()) { + return nullptr; + } + + return i->second; +} + namespace { class FlagRegistryLock { @@ -95,37 +101,8 @@ class FlagRegistryLock { } // namespace -CommandLineFlag* FlagRegistry::FindFlag(absl::string_view name) { - if (finalized_flags_.load(std::memory_order_acquire)) { - // We could save some gcus here if we make `Name()` be non-virtual. - // We could move the `const char*` name to the base class. - auto it = std::partition_point( - flat_flags_.begin(), flat_flags_.end(), - [=](CommandLineFlag* f) { return f->Name() < name; }); - if (it != flat_flags_.end() && (*it)->Name() == name) return *it; - } - - FlagRegistryLock frl(*this); - auto it = flags_.find(name); - return it != flags_.end() ? it->second : nullptr; -} - -void FlagRegistry::RegisterFlag(CommandLineFlag& flag, const char* filename) { - if (filename != nullptr && - flag.Filename() != GetUsageConfig().normalize_filename(filename)) { - flags_internal::ReportUsageError( - absl::StrCat( - "Inconsistency between flag object and registration for flag '", - flag.Name(), - "', likely due to duplicate flags or an ODR violation. Relevant " - "files: ", - flag.Filename(), " and ", filename), - true); - std::exit(1); - } - +void FlagRegistry::RegisterFlag(CommandLineFlag& flag) { FlagRegistryLock registry_lock(*this); - std::pair ins = flags_.insert(FlagMap::value_type(flag.Name(), &flag)); if (ins.second == false) { // means the name was already in the map @@ -175,43 +152,27 @@ FlagRegistry& FlagRegistry::GlobalRegistry() { // -------------------------------------------------------------------- -void ForEachFlag(std::function visitor) { +void ForEachFlagUnlocked(std::function visitor) { FlagRegistry& registry = FlagRegistry::GlobalRegistry(); - - if (registry.finalized_flags_.load(std::memory_order_acquire)) { - for (const auto& i : registry.flat_flags_) visitor(*i); + for (FlagRegistry::FlagConstIterator i = registry.flags_.begin(); + i != registry.flags_.end(); ++i) { + visitor(*i->second); } +} +void ForEachFlag(std::function visitor) { + FlagRegistry& registry = FlagRegistry::GlobalRegistry(); FlagRegistryLock frl(registry); - for (const auto& i : registry.flags_) visitor(*i.second); + ForEachFlagUnlocked(visitor); } // -------------------------------------------------------------------- -bool RegisterCommandLineFlag(CommandLineFlag& flag, const char* filename) { - FlagRegistry::GlobalRegistry().RegisterFlag(flag, filename); +bool RegisterCommandLineFlag(CommandLineFlag& flag) { + FlagRegistry::GlobalRegistry().RegisterFlag(flag); return true; } -void FinalizeRegistry() { - auto& registry = FlagRegistry::GlobalRegistry(); - FlagRegistryLock frl(registry); - if (registry.finalized_flags_.load(std::memory_order_relaxed)) { - // Was already finalized. Ignore the second time. - return; - } - registry.flat_flags_.reserve(registry.flags_.size()); - for (const auto& f : registry.flags_) { - registry.flat_flags_.push_back(f.second); - } - std::sort(std::begin(registry.flat_flags_), std::end(registry.flat_flags_), - [](const CommandLineFlag* lhs, const CommandLineFlag* rhs) { - return lhs->Name() < rhs->Name(); - }); - registry.flags_.clear(); - registry.finalized_flags_.store(true, std::memory_order_release); -} - // -------------------------------------------------------------------- namespace { @@ -283,7 +244,7 @@ void Retire(const char* name, FlagFastTypeId type_id, char* buf) { static_assert(alignof(RetiredFlagObj) == kRetiredFlagObjAlignment, ""); auto* flag = ::new (static_cast(buf)) flags_internal::RetiredFlagObj(name, type_id); - FlagRegistry::GlobalRegistry().RegisterFlag(*flag, nullptr); + FlagRegistry::GlobalRegistry().RegisterFlag(*flag); } // -------------------------------------------------------------------- @@ -337,7 +298,9 @@ CommandLineFlag* FindCommandLineFlag(absl::string_view name) { if (name.empty()) return nullptr; flags_internal::FlagRegistry& registry = flags_internal::FlagRegistry::GlobalRegistry(); - return registry.FindFlag(name); + flags_internal::FlagRegistryLock frl(registry); + + return registry.FindFlagLocked(name); } // -------------------------------------------------------------------- @@ -345,7 +308,7 @@ CommandLineFlag* FindCommandLineFlag(absl::string_view name) { absl::flat_hash_map GetAllFlags() { absl::flat_hash_map res; flags_internal::ForEachFlag([&](CommandLineFlag& flag) { - if (!flag.IsRetired()) res.insert({flag.Name(), &flag}); + res.insert({flag.Name(), &flag}); }); return res; } diff --git a/abseil-cpp/absl/flags/reflection.h b/abseil-cpp/absl/flags/reflection.h index e6baf5de..4ce0ab6c 100644 --- a/abseil-cpp/absl/flags/reflection.h +++ b/abseil-cpp/absl/flags/reflection.h @@ -64,7 +64,7 @@ absl::flat_hash_map GetAllFlags(); // void MyFunc() { // absl::FlagSaver fs; // ... -// absl::SetFlag(&FLAGS_myFlag, otherValue); +// absl::SetFlag(FLAGS_myFlag, otherValue); // ... // } // scope of FlagSaver left, flags return to previous state // diff --git a/abseil-cpp/absl/flags/reflection_test.cc b/abseil-cpp/absl/flags/reflection_test.cc index 79cfa90c..1a1dcb4a 100644 --- a/abseil-cpp/absl/flags/reflection_test.cc +++ b/abseil-cpp/absl/flags/reflection_test.cc @@ -32,8 +32,12 @@ ABSL_FLAG(int, int_flag, 1, "int_flag help"); ABSL_FLAG(std::string, string_flag, "dflt", "string_flag help"); ABSL_RETIRED_FLAG(bool, bool_retired_flag, false, "bool_retired_flag help"); +ABSL_DECLARE_FLAG(bool, help); + namespace { +namespace flags = absl::flags_internal; + class ReflectionTest : public testing::Test { protected: void SetUp() override { flag_saver_ = absl::make_unique(); } @@ -62,9 +66,12 @@ TEST_F(ReflectionTest, TestFindCommandLineFlag) { // -------------------------------------------------------------------- TEST_F(ReflectionTest, TestGetAllFlags) { + (void)absl::GetFlag(FLAGS_help); // Force linking of usage flags. + auto all_flags = absl::GetAllFlags(); EXPECT_NE(all_flags.find("int_flag"), all_flags.end()); - EXPECT_EQ(all_flags.find("bool_retired_flag"), all_flags.end()); + EXPECT_NE(all_flags.find("bool_retired_flag"), all_flags.end()); + EXPECT_NE(all_flags.find("help"), all_flags.end()); EXPECT_EQ(all_flags.find("some_undefined_flag"), all_flags.end()); std::vector flag_names_first_attempt; diff --git a/abseil-cpp/absl/flags/usage_config.cc b/abseil-cpp/absl/flags/usage_config.cc index 5d7426db..ae2f548a 100644 --- a/abseil-cpp/absl/flags/usage_config.cc +++ b/abseil-cpp/absl/flags/usage_config.cc @@ -34,8 +34,7 @@ extern "C" { // Additional report of fatal usage error message before we std::exit. Error is // fatal if is_fatal argument to ReportUsageError is true. -ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL( - AbslInternalReportFatalUsageError)(absl::string_view) {} +ABSL_ATTRIBUTE_WEAK void AbslInternalReportFatalUsageError(absl::string_view) {} } // extern "C" @@ -129,7 +128,7 @@ void ReportUsageError(absl::string_view msg, bool is_fatal) { std::cerr << "ERROR: " << msg << std::endl; if (is_fatal) { - ABSL_INTERNAL_C_SYMBOL(AbslInternalReportFatalUsageError)(msg); + AbslInternalReportFatalUsageError(msg); } } diff --git a/abseil-cpp/absl/flags/usage_config.h b/abseil-cpp/absl/flags/usage_config.h index ded70300..96eecea2 100644 --- a/abseil-cpp/absl/flags/usage_config.h +++ b/abseil-cpp/absl/flags/usage_config.h @@ -127,8 +127,7 @@ extern "C" { // Additional report of fatal usage error message before we std::exit. Error is // fatal if is_fatal argument to ReportUsageError is true. -void ABSL_INTERNAL_C_SYMBOL(AbslInternalReportFatalUsageError)( - absl::string_view); +void AbslInternalReportFatalUsageError(absl::string_view); } // extern "C" diff --git a/abseil-cpp/absl/functional/BUILD.bazel b/abseil-cpp/absl/functional/BUILD.bazel index c4fbce98..ebd9b99b 100644 --- a/abseil-cpp/absl/functional/BUILD.bazel +++ b/abseil-cpp/absl/functional/BUILD.bazel @@ -14,6 +14,7 @@ # limitations under the License. # +load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test") load( "//absl:copts/configure_copts.bzl", "ABSL_DEFAULT_COPTS", @@ -25,40 +26,6 @@ package(default_visibility = ["//visibility:public"]) licenses(["notice"]) -cc_library( - name = "any_invocable", - srcs = ["internal/any_invocable.h"], - hdrs = ["any_invocable.h"], - copts = ABSL_DEFAULT_COPTS, - linkopts = ABSL_DEFAULT_LINKOPTS, - deps = [ - "//absl/base:base_internal", - "//absl/base:config", - "//absl/base:core_headers", - "//absl/meta:type_traits", - "//absl/utility", - ], -) - -cc_test( - name = "any_invocable_test", - srcs = [ - "any_invocable_test.cc", - "internal/any_invocable.h", - ], - copts = ABSL_TEST_COPTS, - linkopts = ABSL_DEFAULT_LINKOPTS, - deps = [ - ":any_invocable", - "//absl/base:base_internal", - "//absl/base:config", - "//absl/base:core_headers", - "//absl/meta:type_traits", - "//absl/utility", - "@com_google_googletest//:gtest_main", - ], -) - cc_library( name = "bind_front", srcs = ["internal/front_binder.h"], @@ -93,7 +60,6 @@ cc_library( linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ "//absl/base:base_internal", - "//absl/base:core_headers", "//absl/meta:type_traits", ], ) @@ -112,15 +78,14 @@ cc_test( ) cc_test( - name = "function_type_benchmark", + name = "function_ref_benchmark", srcs = [ - "function_type_benchmark.cc", + "function_ref_benchmark.cc", ], copts = ABSL_TEST_COPTS, tags = ["benchmark"], visibility = ["//visibility:private"], deps = [ - ":any_invocable", ":function_ref", "//absl/base:core_headers", "@com_github_google_benchmark//:benchmark_main", diff --git a/abseil-cpp/absl/functional/CMakeLists.txt b/abseil-cpp/absl/functional/CMakeLists.txt index c0f6eaaa..cda914f2 100644 --- a/abseil-cpp/absl/functional/CMakeLists.txt +++ b/abseil-cpp/absl/functional/CMakeLists.txt @@ -14,42 +14,6 @@ # limitations under the License. # -absl_cc_library( - NAME - any_invocable - SRCS - "internal/any_invocable.h" - HDRS - "any_invocable.h" - COPTS - ${ABSL_DEFAULT_COPTS} - DEPS - absl::base_internal - absl::config - absl::core_headers - absl::type_traits - absl::utility - PUBLIC -) - -absl_cc_test( - NAME - any_invocable_test - SRCS - "any_invocable_test.cc" - "internal/any_invocable.h" - COPTS - ${ABSL_DEFAULT_COPTS} - DEPS - absl::any_invocable - absl::base_internal - absl::config - absl::core_headers - absl::type_traits - absl::utility - GTest::gmock_main -) - absl_cc_library( NAME bind_front @@ -75,7 +39,7 @@ absl_cc_test( DEPS absl::bind_front absl::memory - GTest::gmock_main + gmock_main ) absl_cc_library( @@ -89,7 +53,6 @@ absl_cc_library( ${ABSL_DEFAULT_COPTS} DEPS absl::base_internal - absl::core_headers absl::meta PUBLIC ) @@ -105,5 +68,5 @@ absl_cc_test( absl::function_ref absl::memory absl::test_instance_tracker - GTest::gmock_main + gmock_main ) diff --git a/abseil-cpp/absl/functional/any_invocable.h b/abseil-cpp/absl/functional/any_invocable.h deleted file mode 100644 index 0c5faca0..00000000 --- a/abseil-cpp/absl/functional/any_invocable.h +++ /dev/null @@ -1,313 +0,0 @@ -// Copyright 2022 The Abseil Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ----------------------------------------------------------------------------- -// File: any_invocable.h -// ----------------------------------------------------------------------------- -// -// This header file defines an `absl::AnyInvocable` type that assumes ownership -// and wraps an object of an invocable type. (Invocable types adhere to the -// concept specified in https://en.cppreference.com/w/cpp/concepts/invocable.) -// -// In general, prefer `absl::AnyInvocable` when you need a type-erased -// function parameter that needs to take ownership of the type. -// -// NOTE: `absl::AnyInvocable` is similar to the C++23 `std::move_only_function` -// abstraction, but has a slightly different API and is not designed to be a -// drop-in replacement or C++11-compatible backfill of that type. - -#ifndef ABSL_FUNCTIONAL_ANY_INVOCABLE_H_ -#define ABSL_FUNCTIONAL_ANY_INVOCABLE_H_ - -#include -#include -#include -#include - -#include "absl/base/config.h" -#include "absl/functional/internal/any_invocable.h" -#include "absl/meta/type_traits.h" -#include "absl/utility/utility.h" - -namespace absl { -ABSL_NAMESPACE_BEGIN - -// absl::AnyInvocable -// -// `absl::AnyInvocable` is a functional wrapper type, like `std::function`, that -// assumes ownership of an invocable object. Unlike `std::function`, an -// `absl::AnyInvocable` is more type-safe and provides the following additional -// benefits: -// -// * Properly adheres to const correctness of the underlying type -// * Is move-only so avoids concurrency problems with copied invocables and -// unnecessary copies in general. -// * Supports reference qualifiers allowing it to perform unique actions (noted -// below). -// -// `absl::AnyInvocable` is a template, and an `absl::AnyInvocable` instantiation -// may wrap any invocable object with a compatible function signature, e.g. -// having arguments and return types convertible to types matching the -// `absl::AnyInvocable` signature, and also matching any stated reference -// qualifiers, as long as that type is moveable. It therefore provides broad -// type erasure for functional objects. -// -// An `absl::AnyInvocable` is typically used as a type-erased function parameter -// for accepting various functional objects: -// -// // Define a function taking an AnyInvocable parameter. -// void my_func(absl::AnyInvocable f) { -// ... -// }; -// -// // That function can accept any invocable type: -// -// // Accept a function reference. We don't need to move a reference. -// int func1() { return 0; }; -// my_func(func1); -// -// // Accept a lambda. We use std::move here because otherwise my_func would -// // copy the lambda. -// auto lambda = []() { return 0; }; -// my_func(std::move(lambda)); -// -// // Accept a function pointer. We don't need to move a function pointer. -// func2 = &func1; -// my_func(func2); -// -// // Accept an std::function by moving it. Note that the lambda is copyable -// // (satisfying std::function requirements) and moveable (satisfying -// // absl::AnyInvocable requirements). -// std::function func6 = []() { return 0; }; -// my_func(std::move(func6)); -// -// `AnyInvocable` also properly respects `const` qualifiers, reference -// qualifiers, and the `noexcept` specification (only in C++ 17 and beyond) as -// part of the user-specified function type (e.g. -// `AnyInvocable`). These qualifiers will be applied to -// the `AnyInvocable` object's `operator()`, and the underlying invocable must -// be compatible with those qualifiers. -// -// Comparison of const and non-const function types: -// -// // Store a closure inside of `func` with the function type `int()`. -// // Note that we have made `func` itself `const`. -// const AnyInvocable func = [](){ return 0; }; -// -// func(); // Compile-error: the passed type `int()` isn't `const`. -// -// // Store a closure inside of `const_func` with the function type -// // `int() const`. -// // Note that we have also made `const_func` itself `const`. -// const AnyInvocable const_func = [](){ return 0; }; -// -// const_func(); // Fine: `int() const` is `const`. -// -// In the above example, the call `func()` would have compiled if -// `std::function` were used even though the types are not const compatible. -// This is a bug, and using `absl::AnyInvocable` properly detects that bug. -// -// In addition to affecting the signature of `operator()`, the `const` and -// reference qualifiers of the function type also appropriately constrain which -// kinds of invocable objects you are allowed to place into the `AnyInvocable` -// instance. If you specify a function type that is const-qualified, then -// anything that you attempt to put into the `AnyInvocable` must be callable on -// a `const` instance of that type. -// -// Constraint example: -// -// // Fine because the lambda is callable when `const`. -// AnyInvocable func = [=](){ return 0; }; -// -// // This is a compile-error because the lambda isn't callable when `const`. -// AnyInvocable error = [=]() mutable { return 0; }; -// -// An `&&` qualifier can be used to express that an `absl::AnyInvocable` -// instance should be invoked at most once: -// -// // Invokes `continuation` with the logical result of an operation when -// // that operation completes (common in asynchronous code). -// void CallOnCompletion(AnyInvocable continuation) { -// int result_of_foo = foo(); -// -// // `std::move` is required because the `operator()` of `continuation` is -// // rvalue-reference qualified. -// std::move(continuation)(result_of_foo); -// } -// -// Credits to Matt Calabrese (https://github.com/mattcalabrese) for the original -// implementation. -template -class AnyInvocable : private internal_any_invocable::Impl { - private: - static_assert( - std::is_function::value, - "The template argument of AnyInvocable must be a function type."); - - using Impl = internal_any_invocable::Impl; - - public: - // The return type of Sig - using result_type = typename Impl::result_type; - - // Constructors - - // Constructs the `AnyInvocable` in an empty state. - AnyInvocable() noexcept = default; - AnyInvocable(std::nullptr_t) noexcept {} // NOLINT - - // Constructs the `AnyInvocable` from an existing `AnyInvocable` by a move. - // Note that `f` is not guaranteed to be empty after move-construction, - // although it may be. - AnyInvocable(AnyInvocable&& /*f*/) noexcept = default; - - // Constructs an `AnyInvocable` from an invocable object. - // - // Upon construction, `*this` is only empty if `f` is a function pointer or - // member pointer type and is null, or if `f` is an `AnyInvocable` that is - // empty. - template ::value>> - AnyInvocable(F&& f) // NOLINT - : Impl(internal_any_invocable::ConversionConstruct(), - std::forward(f)) {} - - // Constructs an `AnyInvocable` that holds an invocable object of type `T`, - // which is constructed in-place from the given arguments. - // - // Example: - // - // AnyInvocable func( - // absl::in_place_type, arg1, arg2); - // - template ::value>> - explicit AnyInvocable(absl::in_place_type_t, Args&&... args) - : Impl(absl::in_place_type>, - std::forward(args)...) { - static_assert(std::is_same>::value, - "The explicit template argument of in_place_type is required " - "to be an unqualified object type."); - } - - // Overload of the above constructor to support list-initialization. - template &, Args...>::value>> - explicit AnyInvocable(absl::in_place_type_t, - std::initializer_list ilist, Args&&... args) - : Impl(absl::in_place_type>, ilist, - std::forward(args)...) { - static_assert(std::is_same>::value, - "The explicit template argument of in_place_type is required " - "to be an unqualified object type."); - } - - // Assignment Operators - - // Assigns an `AnyInvocable` through move-assignment. - // Note that `f` is not guaranteed to be empty after move-assignment - // although it may be. - AnyInvocable& operator=(AnyInvocable&& /*f*/) noexcept = default; - - // Assigns an `AnyInvocable` from a nullptr, clearing the `AnyInvocable`. If - // not empty, destroys the target, putting `*this` into an empty state. - AnyInvocable& operator=(std::nullptr_t) noexcept { - this->Clear(); - return *this; - } - - // Assigns an `AnyInvocable` from an existing `AnyInvocable` instance. - // - // Upon assignment, `*this` is only empty if `f` is a function pointer or - // member pointer type and is null, or if `f` is an `AnyInvocable` that is - // empty. - template ::value>> - AnyInvocable& operator=(F&& f) { - *this = AnyInvocable(std::forward(f)); - return *this; - } - - // Assigns an `AnyInvocable` from a reference to an invocable object. - // Upon assignment, stores a reference to the invocable object in the - // `AnyInvocable` instance. - template < - class F, - typename = absl::enable_if_t< - internal_any_invocable::CanAssignReferenceWrapper::value>> - AnyInvocable& operator=(std::reference_wrapper f) noexcept { - *this = AnyInvocable(f); - return *this; - } - - // Destructor - - // If not empty, destroys the target. - ~AnyInvocable() = default; - - // absl::AnyInvocable::swap() - // - // Exchanges the targets of `*this` and `other`. - void swap(AnyInvocable& other) noexcept { std::swap(*this, other); } - - // abl::AnyInvocable::operator bool() - // - // Returns `true` if `*this` is not empty. - explicit operator bool() const noexcept { return this->HasValue(); } - - // Invokes the target object of `*this`. `*this` must not be empty. - // - // Note: The signature of this function call operator is the same as the - // template parameter `Sig`. - using Impl::operator(); - - // Equality operators - - // Returns `true` if `*this` is empty. - friend bool operator==(const AnyInvocable& f, std::nullptr_t) noexcept { - return !f.HasValue(); - } - - // Returns `true` if `*this` is empty. - friend bool operator==(std::nullptr_t, const AnyInvocable& f) noexcept { - return !f.HasValue(); - } - - // Returns `false` if `*this` is empty. - friend bool operator!=(const AnyInvocable& f, std::nullptr_t) noexcept { - return f.HasValue(); - } - - // Returns `false` if `*this` is empty. - friend bool operator!=(std::nullptr_t, const AnyInvocable& f) noexcept { - return f.HasValue(); - } - - // swap() - // - // Exchanges the targets of `f1` and `f2`. - friend void swap(AnyInvocable& f1, AnyInvocable& f2) noexcept { f1.swap(f2); } - - private: - // Friending other instantiations is necessary for conversions. - template - friend class internal_any_invocable::CoreImpl; -}; - -ABSL_NAMESPACE_END -} // namespace absl - -#endif // ABSL_FUNCTIONAL_ANY_INVOCABLE_H_ diff --git a/abseil-cpp/absl/functional/any_invocable_test.cc b/abseil-cpp/absl/functional/any_invocable_test.cc deleted file mode 100644 index fb5e7792..00000000 --- a/abseil-cpp/absl/functional/any_invocable_test.cc +++ /dev/null @@ -1,1696 +0,0 @@ -// Copyright 2022 The Abseil Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "absl/functional/any_invocable.h" - -#include -#include -#include -#include - -#include "gtest/gtest.h" -#include "absl/base/config.h" -#include "absl/meta/type_traits.h" -#include "absl/utility/utility.h" - -static_assert(absl::internal_any_invocable::kStorageSize >= sizeof(void*), - "These tests assume that the small object storage is at least " - "the size of a pointer."); - -namespace { - -// Helper macro used to avoid spelling `noexcept` in language versions older -// than C++17, where it is not part of the type system, in order to avoid -// compilation failures and internal compiler errors. -#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L -#define ABSL_INTERNAL_NOEXCEPT_SPEC(noex) noexcept(noex) -#else -#define ABSL_INTERNAL_NOEXCEPT_SPEC(noex) -#endif - -// A dummy type we use when passing qualifiers to metafunctions -struct _ {}; - -template -struct Wrapper { - template ::value>> - Wrapper(U&&); // NOLINT -}; - -// This will cause a recursive trait instantiation if the SFINAE checks are -// not ordered correctly for constructibility. -static_assert(std::is_constructible>, - Wrapper>>::value, - ""); - -// A metafunction that takes the cv and l-value reference qualifiers that were -// associated with a function type (here passed via qualifiers of an object -// type), and . -template -struct QualifiersForThisImpl { - static_assert(std::is_object::value, ""); - using type = - absl::conditional_t::value, const This, This>&; -}; - -template -struct QualifiersForThisImpl - : QualifiersForThisImpl {}; - -template -struct QualifiersForThisImpl { - static_assert(std::is_object::value, ""); - using type = - absl::conditional_t::value, const This, This>&&; -}; - -template -using QualifiersForThis = - typename QualifiersForThisImpl::type; - -// A metafunction that takes the cv and l-value reference qualifier of T and -// applies them to U's function type qualifiers. -template -struct GiveQualifiersToFunImpl; - -template -struct GiveQualifiersToFunImpl { - using type = - absl::conditional_t::value, R(P...) const, R(P...)>; -}; - -template -struct GiveQualifiersToFunImpl { - using type = - absl::conditional_t::value, R(P...) const&, R(P...)&>; -}; - -template -struct GiveQualifiersToFunImpl { - using type = - absl::conditional_t::value, R(P...) const&&, R(P...) &&>; -}; - -// If noexcept is a part of the type system, then provide the noexcept forms. -#if defined(__cpp_noexcept_function_type) - -template -struct GiveQualifiersToFunImpl { - using type = absl::conditional_t::value, - R(P...) const noexcept, R(P...) noexcept>; -}; - -template -struct GiveQualifiersToFunImpl { - using type = - absl::conditional_t::value, R(P...) const & noexcept, - R(P...) & noexcept>; -}; - -template -struct GiveQualifiersToFunImpl { - using type = - absl::conditional_t::value, R(P...) const && noexcept, - R(P...) && noexcept>; -}; - -#endif // defined(__cpp_noexcept_function_type) - -template -using GiveQualifiersToFun = typename GiveQualifiersToFunImpl::type; - -// This is used in template parameters to decide whether or not to use a type -// that fits in the small object optimization storage. -enum class ObjSize { small, large }; - -// A base type that is used with classes as a means to insert an -// appropriately-sized dummy datamember when Size is ObjSize::large so that the -// user's class type is guaranteed to not fit in small object storage. -template -struct TypeErasedPadding; - -template <> -struct TypeErasedPadding {}; - -template <> -struct TypeErasedPadding { - char dummy_data[absl::internal_any_invocable::kStorageSize + 1] = {}; -}; - -struct Int { - Int(int v) noexcept : value(v) {} // NOLINT -#ifndef _MSC_VER - Int(Int&&) noexcept { - // NOTE: Prior to C++17, this not being called requires optimizations to - // take place when performing the top-level invocation. In practice, - // most supported compilers perform this optimization prior to C++17. - std::abort(); - } -#else - Int(Int&& v) noexcept = default; -#endif - operator int() && noexcept { return value; } // NOLINT - - int MemberFunctionAdd(int const& b, int c) noexcept { // NOLINT - return value + b + c; - } - - int value; -}; - -enum class Movable { no, yes, nothrow, trivial }; - -enum class NothrowCall { no, yes }; - -enum class Destructible { nothrow, trivial }; - -enum class ObjAlign : std::size_t { - normal = absl::internal_any_invocable::kAlignment, - large = absl::internal_any_invocable::kAlignment * 2, -}; - -// A function-object template that has knobs for each property that can affect -// how the object is stored in AnyInvocable. -template -struct add; - -#define ABSL_INTERNALS_ADD(qual) \ - template \ - struct alignas(static_cast(Alignment)) \ - add : TypeErasedPadding { \ - explicit add(int state_init) : state(state_init) {} \ - explicit add(std::initializer_list state_init, int tail) \ - : state(std::accumulate(std::begin(state_init), std::end(state_init), \ - 0) + \ - tail) {} \ - add(add&& other) = default; /*NOLINT*/ \ - Int operator()(int a, int b, int c) qual \ - ABSL_INTERNAL_NOEXCEPT_SPEC(CallExceptionSpec == NothrowCall::yes) { \ - return state + a + b + c; \ - } \ - int state; \ - }; \ - \ - template \ - struct alignas(static_cast(Alignment)) \ - add : TypeErasedPadding { \ - explicit add(int state_init) : state(state_init) {} \ - explicit add(std::initializer_list state_init, int tail) \ - : state(std::accumulate(std::begin(state_init), std::end(state_init), \ - 0) + \ - tail) {} \ - ~add() noexcept {} \ - add(add&& other) = default; /*NOLINT*/ \ - Int operator()(int a, int b, int c) qual \ - ABSL_INTERNAL_NOEXCEPT_SPEC(CallExceptionSpec == NothrowCall::yes) { \ - return state + a + b + c; \ - } \ - int state; \ - } - -// Explicitly specify an empty argument. -// MSVC (at least up to _MSC_VER 1931, if not beyond) warns that -// ABSL_INTERNALS_ADD() is an undefined zero-arg overload. -#define ABSL_INTERNALS_NOARG -ABSL_INTERNALS_ADD(ABSL_INTERNALS_NOARG); -#undef ABSL_INTERNALS_NOARG - -ABSL_INTERNALS_ADD(const); -ABSL_INTERNALS_ADD(&); -ABSL_INTERNALS_ADD(const&); -ABSL_INTERNALS_ADD(&&); // NOLINT -ABSL_INTERNALS_ADD(const&&); // NOLINT - -#undef ABSL_INTERNALS_ADD - -template -struct add : private add { - using Base = add; - - explicit add(int state_init) : Base(state_init) {} - - explicit add(std::initializer_list state_init, int tail) - : Base(state_init, tail) {} - - add(add&&) = delete; - - using Base::operator(); - using Base::state; -}; - -template -struct add : private add { - using Base = add; - - explicit add(int state_init) : Base(state_init) {} - - explicit add(std::initializer_list state_init, int tail) - : Base(state_init, tail) {} - - add(add&& other) noexcept(false) : Base(other.state) {} // NOLINT - - using Base::operator(); - using Base::state; -}; - -template -struct add : private add { - using Base = add; - - explicit add(int state_init) : Base(state_init) {} - - explicit add(std::initializer_list state_init, int tail) - : Base(state_init, tail) {} - - add(add&& other) noexcept : Base(other.state) {} - - using Base::operator(); - using Base::state; -}; - -// Actual non-member functions rather than function objects -Int add_function(Int&& a, int b, int c) noexcept { return a.value + b + c; } - -Int mult_function(Int&& a, int b, int c) noexcept { return a.value * b * c; } - -Int square_function(Int const&& a) noexcept { return a.value * a.value; } - -template -using AnyInvocable = absl::AnyInvocable; - -// Instantiations of this template contains all of the compile-time parameters -// for a given instantiation of the AnyInvocable test suite. -template -struct TestParams { - static constexpr Movable kMovability = Movability; - static constexpr Destructible kDestructibility = Destructibility; - using Qualifiers = Qual; - static constexpr NothrowCall kCallExceptionSpec = CallExceptionSpec; - static constexpr bool kIsNoexcept = kCallExceptionSpec == NothrowCall::yes; - static constexpr bool kIsRvalueQualified = - std::is_rvalue_reference::value; - static constexpr ObjSize kSize = Size; - static constexpr ObjAlign kAlignment = Alignment; - - // These types are used when testing with member object pointer Invocables - using UnqualifiedUnaryFunType = int(Int const&&) - ABSL_INTERNAL_NOEXCEPT_SPEC(CallExceptionSpec == NothrowCall::yes); - using UnaryFunType = GiveQualifiersToFun; - using MemObjPtrType = int(Int::*); - using UnaryAnyInvType = AnyInvocable; - using UnaryThisParamType = QualifiersForThis; - - template - static UnaryThisParamType ToUnaryThisParam(T&& fun) { - return static_cast(fun); - } - - // This function type intentionally uses 3 "kinds" of parameter types. - // - A user-defined type - // - A reference type - // - A scalar type - // - // These were chosen because internal forwarding takes place on parameters - // differently depending based on type properties (scalars are forwarded by - // value). - using ResultType = Int; - using AnyInvocableFunTypeNotNoexcept = Int(Int, const int&, int); - using UnqualifiedFunType = - typename std::conditional::type; - using FunType = GiveQualifiersToFun; - using MemFunPtrType = - typename std::conditional::type; - using AnyInvType = AnyInvocable; - using AddType = add; - using ThisParamType = QualifiersForThis; - - template - static ThisParamType ToThisParam(T&& fun) { - return static_cast(fun); - } - - // These typedefs are used when testing void return type covariance. - using UnqualifiedVoidFunType = - typename std::conditional::type; - using VoidFunType = GiveQualifiersToFun; - using VoidAnyInvType = AnyInvocable; - using VoidThisParamType = QualifiersForThis; - - template - static VoidThisParamType ToVoidThisParam(T&& fun) { - return static_cast(fun); - } - - using CompatibleAnyInvocableFunType = - absl::conditional_t::value, - GiveQualifiersToFun, - GiveQualifiersToFun>; - - using CompatibleAnyInvType = AnyInvocable; - - using IncompatibleInvocable = - absl::conditional_t::value, - GiveQualifiersToFun<_&, UnqualifiedFunType>(_::*), - GiveQualifiersToFun<_&&, UnqualifiedFunType>(_::*)>; -}; - -// Given a member-pointer type, this metafunction yields the target type of the -// pointer, not including the class-type. It is used to verify that the function -// call operator of AnyInvocable has the proper signature, corresponding to the -// function type that the user provided. -template -struct MemberTypeOfImpl; - -template -struct MemberTypeOfImpl { - using type = T; -}; - -template -using MemberTypeOf = typename MemberTypeOfImpl::type; - -template -struct IsMemberSwappableImpl : std::false_type { - static constexpr bool kIsNothrow = false; -}; - -template -struct IsMemberSwappableImpl< - T, absl::void_t().swap(std::declval()))>> - : std::true_type { - static constexpr bool kIsNothrow = - noexcept(std::declval().swap(std::declval())); -}; - -template -using IsMemberSwappable = IsMemberSwappableImpl; - -template -using IsNothrowMemberSwappable = - std::integral_constant::kIsNothrow>; - -template -class AnyInvTestBasic : public ::testing::Test {}; - -TYPED_TEST_SUITE_P(AnyInvTestBasic); - -TYPED_TEST_P(AnyInvTestBasic, DefaultConstruction) { - using AnyInvType = typename TypeParam::AnyInvType; - - AnyInvType fun; - - EXPECT_FALSE(static_cast(fun)); - - EXPECT_TRUE(std::is_nothrow_default_constructible::value); -} - -TYPED_TEST_P(AnyInvTestBasic, ConstructionNullptr) { - using AnyInvType = typename TypeParam::AnyInvType; - - AnyInvType fun = nullptr; - - EXPECT_FALSE(static_cast(fun)); - - EXPECT_TRUE( - (std::is_nothrow_constructible::value)); -} - -TYPED_TEST_P(AnyInvTestBasic, ConstructionNullFunctionPtr) { - using AnyInvType = typename TypeParam::AnyInvType; - using UnqualifiedFunType = typename TypeParam::UnqualifiedFunType; - - UnqualifiedFunType* const null_fun_ptr = nullptr; - AnyInvType fun = null_fun_ptr; - - EXPECT_FALSE(static_cast(fun)); -} - -TYPED_TEST_P(AnyInvTestBasic, ConstructionNullMemberFunctionPtr) { - using AnyInvType = typename TypeParam::AnyInvType; - using MemFunPtrType = typename TypeParam::MemFunPtrType; - - const MemFunPtrType null_mem_fun_ptr = nullptr; - AnyInvType fun = null_mem_fun_ptr; - - EXPECT_FALSE(static_cast(fun)); -} - -TYPED_TEST_P(AnyInvTestBasic, ConstructionNullMemberObjectPtr) { - using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType; - using MemObjPtrType = typename TypeParam::MemObjPtrType; - - const MemObjPtrType null_mem_obj_ptr = nullptr; - UnaryAnyInvType fun = null_mem_obj_ptr; - - EXPECT_FALSE(static_cast(fun)); -} - -TYPED_TEST_P(AnyInvTestBasic, ConstructionMemberFunctionPtr) { - using AnyInvType = typename TypeParam::AnyInvType; - - AnyInvType fun = &Int::MemberFunctionAdd; - - EXPECT_TRUE(static_cast(fun)); - EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value); -} - -TYPED_TEST_P(AnyInvTestBasic, ConstructionMemberObjectPtr) { - using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType; - - UnaryAnyInvType fun = &Int::value; - - EXPECT_TRUE(static_cast(fun)); - EXPECT_EQ(13, TypeParam::ToUnaryThisParam(fun)(13)); -} - -TYPED_TEST_P(AnyInvTestBasic, ConstructionFunctionReferenceDecay) { - using AnyInvType = typename TypeParam::AnyInvType; - - AnyInvType fun = add_function; - - EXPECT_TRUE(static_cast(fun)); - EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value); -} - -TYPED_TEST_P(AnyInvTestBasic, ConstructionCompatibleAnyInvocableEmpty) { - using AnyInvType = typename TypeParam::AnyInvType; - using CompatibleAnyInvType = typename TypeParam::CompatibleAnyInvType; - - CompatibleAnyInvType other; - AnyInvType fun = std::move(other); - - EXPECT_FALSE(static_cast(other)); // NOLINT - EXPECT_EQ(other, nullptr); // NOLINT - EXPECT_EQ(nullptr, other); // NOLINT - - EXPECT_FALSE(static_cast(fun)); -} - -TYPED_TEST_P(AnyInvTestBasic, ConstructionCompatibleAnyInvocableNonempty) { - using AnyInvType = typename TypeParam::AnyInvType; - using CompatibleAnyInvType = typename TypeParam::CompatibleAnyInvType; - - CompatibleAnyInvType other = &add_function; - AnyInvType fun = std::move(other); - - EXPECT_FALSE(static_cast(other)); // NOLINT - EXPECT_EQ(other, nullptr); // NOLINT - EXPECT_EQ(nullptr, other); // NOLINT - - EXPECT_TRUE(static_cast(fun)); - EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value); -} - -TYPED_TEST_P(AnyInvTestBasic, ConversionToBool) { - using AnyInvType = typename TypeParam::AnyInvType; - - { - AnyInvType fun; - - // This tests contextually-convertible-to-bool. - EXPECT_FALSE(fun ? true : false); // NOLINT - - // Make sure that the conversion is not implicit. - EXPECT_TRUE( - (std::is_nothrow_constructible::value)); - EXPECT_FALSE((std::is_convertible::value)); - } - - { - AnyInvType fun = &add_function; - - // This tests contextually-convertible-to-bool. - EXPECT_TRUE(fun ? true : false); // NOLINT - } -} - -TYPED_TEST_P(AnyInvTestBasic, Invocation) { - using AnyInvType = typename TypeParam::AnyInvType; - - using FunType = typename TypeParam::FunType; - using AnyInvCallType = MemberTypeOf; - - // Make sure the function call operator of AnyInvocable always has the - // type that was specified via the template argument. - EXPECT_TRUE((std::is_same::value)); - - AnyInvType fun = &add_function; - - EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value); -} - -TYPED_TEST_P(AnyInvTestBasic, InPlaceConstruction) { - using AnyInvType = typename TypeParam::AnyInvType; - using AddType = typename TypeParam::AddType; - - AnyInvType fun(absl::in_place_type, 5); - - EXPECT_TRUE(static_cast(fun)); - EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value); -} - -TYPED_TEST_P(AnyInvTestBasic, InPlaceConstructionInitializerList) { - using AnyInvType = typename TypeParam::AnyInvType; - using AddType = typename TypeParam::AddType; - - AnyInvType fun(absl::in_place_type, {1, 2, 3, 4}, 5); - - EXPECT_TRUE(static_cast(fun)); - EXPECT_EQ(39, TypeParam::ToThisParam(fun)(7, 8, 9).value); -} - -TYPED_TEST_P(AnyInvTestBasic, InPlaceNullFunPtrConstruction) { - using AnyInvType = typename TypeParam::AnyInvType; - using UnqualifiedFunType = typename TypeParam::UnqualifiedFunType; - - AnyInvType fun(absl::in_place_type, nullptr); - - // In-place construction does not lead to empty. - EXPECT_TRUE(static_cast(fun)); -} - -TYPED_TEST_P(AnyInvTestBasic, InPlaceNullFunPtrConstructionValueInit) { - using AnyInvType = typename TypeParam::AnyInvType; - using UnqualifiedFunType = typename TypeParam::UnqualifiedFunType; - - AnyInvType fun(absl::in_place_type); - - // In-place construction does not lead to empty. - EXPECT_TRUE(static_cast(fun)); -} - -TYPED_TEST_P(AnyInvTestBasic, InPlaceNullMemFunPtrConstruction) { - using AnyInvType = typename TypeParam::AnyInvType; - using MemFunPtrType = typename TypeParam::MemFunPtrType; - - AnyInvType fun(absl::in_place_type, nullptr); - - // In-place construction does not lead to empty. - EXPECT_TRUE(static_cast(fun)); -} - -TYPED_TEST_P(AnyInvTestBasic, InPlaceNullMemFunPtrConstructionValueInit) { - using AnyInvType = typename TypeParam::AnyInvType; - using MemFunPtrType = typename TypeParam::MemFunPtrType; - - AnyInvType fun(absl::in_place_type); - - // In-place construction does not lead to empty. - EXPECT_TRUE(static_cast(fun)); -} - -TYPED_TEST_P(AnyInvTestBasic, InPlaceNullMemObjPtrConstruction) { - using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType; - using MemObjPtrType = typename TypeParam::MemObjPtrType; - - UnaryAnyInvType fun(absl::in_place_type, nullptr); - - // In-place construction does not lead to empty. - EXPECT_TRUE(static_cast(fun)); -} - -TYPED_TEST_P(AnyInvTestBasic, InPlaceNullMemObjPtrConstructionValueInit) { - using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType; - using MemObjPtrType = typename TypeParam::MemObjPtrType; - - UnaryAnyInvType fun(absl::in_place_type); - - // In-place construction does not lead to empty. - EXPECT_TRUE(static_cast(fun)); -} - -TYPED_TEST_P(AnyInvTestBasic, InPlaceVoidCovarianceConstruction) { - using VoidAnyInvType = typename TypeParam::VoidAnyInvType; - using AddType = typename TypeParam::AddType; - - VoidAnyInvType fun(absl::in_place_type, 5); - - EXPECT_TRUE(static_cast(fun)); -} - -TYPED_TEST_P(AnyInvTestBasic, MoveConstructionFromEmpty) { - using AnyInvType = typename TypeParam::AnyInvType; - - AnyInvType source_fun; - AnyInvType fun(std::move(source_fun)); - - EXPECT_FALSE(static_cast(fun)); - - EXPECT_TRUE(std::is_nothrow_move_constructible::value); -} - -TYPED_TEST_P(AnyInvTestBasic, MoveConstructionFromNonEmpty) { - using AnyInvType = typename TypeParam::AnyInvType; - using AddType = typename TypeParam::AddType; - - AnyInvType source_fun(absl::in_place_type, 5); - AnyInvType fun(std::move(source_fun)); - - EXPECT_TRUE(static_cast(fun)); - EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value); - - EXPECT_TRUE(std::is_nothrow_move_constructible::value); -} - -TYPED_TEST_P(AnyInvTestBasic, ComparisonWithNullptrEmpty) { - using AnyInvType = typename TypeParam::AnyInvType; - - AnyInvType fun; - - EXPECT_TRUE(fun == nullptr); - EXPECT_TRUE(nullptr == fun); - - EXPECT_FALSE(fun != nullptr); - EXPECT_FALSE(nullptr != fun); -} - -TYPED_TEST_P(AnyInvTestBasic, ComparisonWithNullptrNonempty) { - using AnyInvType = typename TypeParam::AnyInvType; - using AddType = typename TypeParam::AddType; - - AnyInvType fun(absl::in_place_type, 5); - - EXPECT_FALSE(fun == nullptr); - EXPECT_FALSE(nullptr == fun); - - EXPECT_TRUE(fun != nullptr); - EXPECT_TRUE(nullptr != fun); -} - -TYPED_TEST_P(AnyInvTestBasic, ResultType) { - using AnyInvType = typename TypeParam::AnyInvType; - using ExpectedResultType = typename TypeParam::ResultType; - - EXPECT_TRUE((std::is_same::value)); -} - -template -class AnyInvTestCombinatoric : public ::testing::Test {}; - -TYPED_TEST_SUITE_P(AnyInvTestCombinatoric); - -TYPED_TEST_P(AnyInvTestCombinatoric, MoveAssignEmptyEmptyLhsRhs) { - using AnyInvType = typename TypeParam::AnyInvType; - - AnyInvType source_fun; - AnyInvType fun; - - fun = std::move(source_fun); - - EXPECT_FALSE(static_cast(fun)); -} - -TYPED_TEST_P(AnyInvTestCombinatoric, MoveAssignEmptyLhsNonemptyRhs) { - using AnyInvType = typename TypeParam::AnyInvType; - using AddType = typename TypeParam::AddType; - - AnyInvType source_fun(absl::in_place_type, 5); - AnyInvType fun; - - fun = std::move(source_fun); - - EXPECT_TRUE(static_cast(fun)); - EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value); -} - -TYPED_TEST_P(AnyInvTestCombinatoric, MoveAssignNonemptyEmptyLhsRhs) { - using AnyInvType = typename TypeParam::AnyInvType; - using AddType = typename TypeParam::AddType; - - AnyInvType source_fun; - AnyInvType fun(absl::in_place_type, 5); - - fun = std::move(source_fun); - - EXPECT_FALSE(static_cast(fun)); -} - -TYPED_TEST_P(AnyInvTestCombinatoric, MoveAssignNonemptyLhsNonemptyRhs) { - using AnyInvType = typename TypeParam::AnyInvType; - using AddType = typename TypeParam::AddType; - - AnyInvType source_fun(absl::in_place_type, 5); - AnyInvType fun(absl::in_place_type, 20); - - fun = std::move(source_fun); - - EXPECT_TRUE(static_cast(fun)); - EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value); -} - -TYPED_TEST_P(AnyInvTestCombinatoric, SelfMoveAssignEmpty) { - using AnyInvType = typename TypeParam::AnyInvType; - - AnyInvType source_fun; - source_fun = std::move(source_fun); - - // This space intentionally left blank. -} - -TYPED_TEST_P(AnyInvTestCombinatoric, SelfMoveAssignNonempty) { - using AnyInvType = typename TypeParam::AnyInvType; - using AddType = typename TypeParam::AddType; - - AnyInvType source_fun(absl::in_place_type, 5); - source_fun = std::move(source_fun); - - // This space intentionally left blank. -} - -TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullptrEmptyLhs) { - using AnyInvType = typename TypeParam::AnyInvType; - - AnyInvType fun; - fun = nullptr; - - EXPECT_FALSE(static_cast(fun)); -} - -TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullFunctionPtrEmptyLhs) { - using AnyInvType = typename TypeParam::AnyInvType; - using UnqualifiedFunType = typename TypeParam::UnqualifiedFunType; - - UnqualifiedFunType* const null_fun_ptr = nullptr; - AnyInvType fun; - fun = null_fun_ptr; - - EXPECT_FALSE(static_cast(fun)); -} - -TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullMemberFunctionPtrEmptyLhs) { - using AnyInvType = typename TypeParam::AnyInvType; - using MemFunPtrType = typename TypeParam::MemFunPtrType; - - const MemFunPtrType null_mem_fun_ptr = nullptr; - AnyInvType fun; - fun = null_mem_fun_ptr; - - EXPECT_FALSE(static_cast(fun)); -} - -TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullMemberObjectPtrEmptyLhs) { - using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType; - using MemObjPtrType = typename TypeParam::MemObjPtrType; - - const MemObjPtrType null_mem_obj_ptr = nullptr; - UnaryAnyInvType fun; - fun = null_mem_obj_ptr; - - EXPECT_FALSE(static_cast(fun)); -} - -TYPED_TEST_P(AnyInvTestCombinatoric, AssignMemberFunctionPtrEmptyLhs) { - using AnyInvType = typename TypeParam::AnyInvType; - - AnyInvType fun; - fun = &Int::MemberFunctionAdd; - - EXPECT_TRUE(static_cast(fun)); - EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value); -} - -TYPED_TEST_P(AnyInvTestCombinatoric, AssignMemberObjectPtrEmptyLhs) { - using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType; - - UnaryAnyInvType fun; - fun = &Int::value; - - EXPECT_TRUE(static_cast(fun)); - EXPECT_EQ(13, TypeParam::ToUnaryThisParam(fun)(13)); -} - -TYPED_TEST_P(AnyInvTestCombinatoric, AssignFunctionReferenceDecayEmptyLhs) { - using AnyInvType = typename TypeParam::AnyInvType; - - AnyInvType fun; - fun = add_function; - - EXPECT_TRUE(static_cast(fun)); - EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value); -} - -TYPED_TEST_P(AnyInvTestCombinatoric, - AssignCompatibleAnyInvocableEmptyLhsEmptyRhs) { - using AnyInvType = typename TypeParam::AnyInvType; - using CompatibleAnyInvType = typename TypeParam::CompatibleAnyInvType; - - CompatibleAnyInvType other; - AnyInvType fun; - fun = std::move(other); - - EXPECT_FALSE(static_cast(other)); // NOLINT - EXPECT_EQ(other, nullptr); // NOLINT - EXPECT_EQ(nullptr, other); // NOLINT - - EXPECT_FALSE(static_cast(fun)); -} - -TYPED_TEST_P(AnyInvTestCombinatoric, - AssignCompatibleAnyInvocableEmptyLhsNonemptyRhs) { - using AnyInvType = typename TypeParam::AnyInvType; - using CompatibleAnyInvType = typename TypeParam::CompatibleAnyInvType; - - CompatibleAnyInvType other = &add_function; - AnyInvType fun; - fun = std::move(other); - - EXPECT_FALSE(static_cast(other)); // NOLINT - - EXPECT_TRUE(static_cast(fun)); - EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value); -} - -TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullptrNonemptyLhs) { - using AnyInvType = typename TypeParam::AnyInvType; - - AnyInvType fun = &mult_function; - fun = nullptr; - - EXPECT_FALSE(static_cast(fun)); -} - -TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullFunctionPtrNonemptyLhs) { - using AnyInvType = typename TypeParam::AnyInvType; - using UnqualifiedFunType = typename TypeParam::UnqualifiedFunType; - - UnqualifiedFunType* const null_fun_ptr = nullptr; - AnyInvType fun = &mult_function; - fun = null_fun_ptr; - - EXPECT_FALSE(static_cast(fun)); -} - -TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullMemberFunctionPtrNonemptyLhs) { - using AnyInvType = typename TypeParam::AnyInvType; - using MemFunPtrType = typename TypeParam::MemFunPtrType; - - const MemFunPtrType null_mem_fun_ptr = nullptr; - AnyInvType fun = &mult_function; - fun = null_mem_fun_ptr; - - EXPECT_FALSE(static_cast(fun)); -} - -TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullMemberObjectPtrNonemptyLhs) { - using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType; - using MemObjPtrType = typename TypeParam::MemObjPtrType; - - const MemObjPtrType null_mem_obj_ptr = nullptr; - UnaryAnyInvType fun = &square_function; - fun = null_mem_obj_ptr; - - EXPECT_FALSE(static_cast(fun)); -} - -TYPED_TEST_P(AnyInvTestCombinatoric, AssignMemberFunctionPtrNonemptyLhs) { - using AnyInvType = typename TypeParam::AnyInvType; - - AnyInvType fun = &mult_function; - fun = &Int::MemberFunctionAdd; - - EXPECT_TRUE(static_cast(fun)); - EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value); -} - -TYPED_TEST_P(AnyInvTestCombinatoric, AssignMemberObjectPtrNonemptyLhs) { - using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType; - - UnaryAnyInvType fun = &square_function; - fun = &Int::value; - - EXPECT_TRUE(static_cast(fun)); - EXPECT_EQ(13, TypeParam::ToUnaryThisParam(fun)(13)); -} - -TYPED_TEST_P(AnyInvTestCombinatoric, AssignFunctionReferenceDecayNonemptyLhs) { - using AnyInvType = typename TypeParam::AnyInvType; - - AnyInvType fun = &mult_function; - fun = add_function; - - EXPECT_TRUE(static_cast(fun)); - EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value); -} - -TYPED_TEST_P(AnyInvTestCombinatoric, - AssignCompatibleAnyInvocableNonemptyLhsEmptyRhs) { - using AnyInvType = typename TypeParam::AnyInvType; - using CompatibleAnyInvType = typename TypeParam::CompatibleAnyInvType; - - CompatibleAnyInvType other; - AnyInvType fun = &mult_function; - fun = std::move(other); - - EXPECT_FALSE(static_cast(other)); // NOLINT - EXPECT_EQ(other, nullptr); // NOLINT - EXPECT_EQ(nullptr, other); // NOLINT - - EXPECT_FALSE(static_cast(fun)); -} - -TYPED_TEST_P(AnyInvTestCombinatoric, - AssignCompatibleAnyInvocableNonemptyLhsNonemptyRhs) { - using AnyInvType = typename TypeParam::AnyInvType; - using CompatibleAnyInvType = typename TypeParam::CompatibleAnyInvType; - - CompatibleAnyInvType other = &add_function; - AnyInvType fun = &mult_function; - fun = std::move(other); - - EXPECT_FALSE(static_cast(other)); // NOLINT - - EXPECT_TRUE(static_cast(fun)); - EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value); -} - -TYPED_TEST_P(AnyInvTestCombinatoric, SwapEmptyLhsEmptyRhs) { - using AnyInvType = typename TypeParam::AnyInvType; - - // Swap idiom - { - AnyInvType fun; - AnyInvType other; - - using std::swap; - swap(fun, other); - - EXPECT_FALSE(static_cast(fun)); - EXPECT_FALSE(static_cast(other)); - - EXPECT_TRUE( - absl::type_traits_internal::IsNothrowSwappable::value); - } - - // Member swap - { - AnyInvType fun; - AnyInvType other; - - fun.swap(other); - - EXPECT_FALSE(static_cast(fun)); - EXPECT_FALSE(static_cast(other)); - - EXPECT_TRUE(IsNothrowMemberSwappable::value); - } -} - -TYPED_TEST_P(AnyInvTestCombinatoric, SwapEmptyLhsNonemptyRhs) { - using AnyInvType = typename TypeParam::AnyInvType; - using AddType = typename TypeParam::AddType; - - // Swap idiom - { - AnyInvType fun; - AnyInvType other(absl::in_place_type, 5); - - using std::swap; - swap(fun, other); - - EXPECT_TRUE(static_cast(fun)); - EXPECT_FALSE(static_cast(other)); - - EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value); - - EXPECT_TRUE( - absl::type_traits_internal::IsNothrowSwappable::value); - } - - // Member swap - { - AnyInvType fun; - AnyInvType other(absl::in_place_type, 5); - - fun.swap(other); - - EXPECT_TRUE(static_cast(fun)); - EXPECT_FALSE(static_cast(other)); - - EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value); - - EXPECT_TRUE(IsNothrowMemberSwappable::value); - } -} - -TYPED_TEST_P(AnyInvTestCombinatoric, SwapNonemptyLhsEmptyRhs) { - using AnyInvType = typename TypeParam::AnyInvType; - using AddType = typename TypeParam::AddType; - - // Swap idiom - { - AnyInvType fun(absl::in_place_type, 5); - AnyInvType other; - - using std::swap; - swap(fun, other); - - EXPECT_FALSE(static_cast(fun)); - EXPECT_TRUE(static_cast(other)); - - EXPECT_EQ(29, TypeParam::ToThisParam(other)(7, 8, 9).value); - - EXPECT_TRUE( - absl::type_traits_internal::IsNothrowSwappable::value); - } - - // Member swap - { - AnyInvType fun(absl::in_place_type, 5); - AnyInvType other; - - fun.swap(other); - - EXPECT_FALSE(static_cast(fun)); - EXPECT_TRUE(static_cast(other)); - - EXPECT_EQ(29, TypeParam::ToThisParam(other)(7, 8, 9).value); - - EXPECT_TRUE(IsNothrowMemberSwappable::value); - } -} - -TYPED_TEST_P(AnyInvTestCombinatoric, SwapNonemptyLhsNonemptyRhs) { - using AnyInvType = typename TypeParam::AnyInvType; - using AddType = typename TypeParam::AddType; - - // Swap idiom - { - AnyInvType fun(absl::in_place_type, 5); - AnyInvType other(absl::in_place_type, 6); - - using std::swap; - swap(fun, other); - - EXPECT_TRUE(static_cast(fun)); - EXPECT_TRUE(static_cast(other)); - - EXPECT_EQ(30, TypeParam::ToThisParam(fun)(7, 8, 9).value); - EXPECT_EQ(29, TypeParam::ToThisParam(other)(7, 8, 9).value); - - EXPECT_TRUE( - absl::type_traits_internal::IsNothrowSwappable::value); - } - - // Member swap - { - AnyInvType fun(absl::in_place_type, 5); - AnyInvType other(absl::in_place_type, 6); - - fun.swap(other); - - EXPECT_TRUE(static_cast(fun)); - EXPECT_TRUE(static_cast(other)); - - EXPECT_EQ(30, TypeParam::ToThisParam(fun)(7, 8, 9).value); - EXPECT_EQ(29, TypeParam::ToThisParam(other)(7, 8, 9).value); - - EXPECT_TRUE(IsNothrowMemberSwappable::value); - } -} - -template -class AnyInvTestMovable : public ::testing::Test {}; - -TYPED_TEST_SUITE_P(AnyInvTestMovable); - -TYPED_TEST_P(AnyInvTestMovable, ConversionConstructionUserDefinedType) { - using AnyInvType = typename TypeParam::AnyInvType; - using AddType = typename TypeParam::AddType; - - AnyInvType fun(AddType(5)); - - EXPECT_TRUE(static_cast(fun)); - EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value); - - EXPECT_TRUE(static_cast(fun)); - EXPECT_EQ(38, TypeParam::ToThisParam(fun)(10, 11, 12).value); -} - -TYPED_TEST_P(AnyInvTestMovable, ConversionConstructionVoidCovariance) { - using VoidAnyInvType = typename TypeParam::VoidAnyInvType; - using AddType = typename TypeParam::AddType; - - VoidAnyInvType fun(AddType(5)); - - EXPECT_TRUE(static_cast(fun)); -} - -TYPED_TEST_P(AnyInvTestMovable, ConversionAssignUserDefinedTypeEmptyLhs) { - using AnyInvType = typename TypeParam::AnyInvType; - using AddType = typename TypeParam::AddType; - - AnyInvType fun; - fun = AddType(5); - - EXPECT_TRUE(static_cast(fun)); - EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value); - - EXPECT_TRUE(static_cast(fun)); - EXPECT_EQ(38, TypeParam::ToThisParam(fun)(10, 11, 12).value); -} - -TYPED_TEST_P(AnyInvTestMovable, ConversionAssignUserDefinedTypeNonemptyLhs) { - using AnyInvType = typename TypeParam::AnyInvType; - using AddType = typename TypeParam::AddType; - - AnyInvType fun = &add_function; - fun = AddType(5); - - EXPECT_TRUE(static_cast(fun)); - EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value); - - EXPECT_TRUE(static_cast(fun)); - EXPECT_EQ(38, TypeParam::ToThisParam(fun)(10, 11, 12).value); -} - -TYPED_TEST_P(AnyInvTestMovable, ConversionAssignVoidCovariance) { - using VoidAnyInvType = typename TypeParam::VoidAnyInvType; - using AddType = typename TypeParam::AddType; - - VoidAnyInvType fun; - fun = AddType(5); - - EXPECT_TRUE(static_cast(fun)); -} - -template -class AnyInvTestNoexceptFalse : public ::testing::Test {}; - -TYPED_TEST_SUITE_P(AnyInvTestNoexceptFalse); - -TYPED_TEST_P(AnyInvTestNoexceptFalse, ConversionConstructionConstraints) { - using AnyInvType = typename TypeParam::AnyInvType; - - EXPECT_TRUE((std::is_constructible< - AnyInvType, - typename TypeParam::AnyInvocableFunTypeNotNoexcept*>::value)); - EXPECT_FALSE(( - std::is_constructible::value)); -} - -TYPED_TEST_P(AnyInvTestNoexceptFalse, ConversionAssignConstraints) { - using AnyInvType = typename TypeParam::AnyInvType; - - EXPECT_TRUE((std::is_assignable< - AnyInvType&, - typename TypeParam::AnyInvocableFunTypeNotNoexcept*>::value)); - EXPECT_FALSE( - (std::is_assignable::value)); -} - -template -class AnyInvTestNoexceptTrue : public ::testing::Test {}; - -TYPED_TEST_SUITE_P(AnyInvTestNoexceptTrue); - -TYPED_TEST_P(AnyInvTestNoexceptTrue, ConversionConstructionConstraints) { -#if ABSL_INTERNAL_CPLUSPLUS_LANG < 201703L - GTEST_SKIP() << "Noexcept was not part of the type system before C++17."; -#else - using AnyInvType = typename TypeParam::AnyInvType; - -// TODO(b/217761454): Fix this and re-enable for MSVC. -#ifndef _MSC_VER - EXPECT_FALSE((std::is_constructible< - AnyInvType, - typename TypeParam::AnyInvocableFunTypeNotNoexcept*>::value)); -#endif - EXPECT_FALSE(( - std::is_constructible::value)); -#endif -} - -TYPED_TEST_P(AnyInvTestNoexceptTrue, ConversionAssignConstraints) { -#if ABSL_INTERNAL_CPLUSPLUS_LANG < 201703L - GTEST_SKIP() << "Noexcept was not part of the type system before C++17."; -#else - using AnyInvType = typename TypeParam::AnyInvType; - -// TODO(b/217761454): Fix this and re-enable for MSVC. -#ifndef _MSC_VER - EXPECT_FALSE((std::is_assignable< - AnyInvType&, - typename TypeParam::AnyInvocableFunTypeNotNoexcept*>::value)); -#endif - EXPECT_FALSE( - (std::is_assignable::value)); -#endif -} - -template -class AnyInvTestNonRvalue : public ::testing::Test {}; - -TYPED_TEST_SUITE_P(AnyInvTestNonRvalue); - -TYPED_TEST_P(AnyInvTestNonRvalue, ConversionConstructionReferenceWrapper) { - using AnyInvType = typename TypeParam::AnyInvType; - using AddType = typename TypeParam::AddType; - - AddType add(4); - AnyInvType fun = std::ref(add); - add.state = 5; - - EXPECT_TRUE(static_cast(fun)); - EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value); - - EXPECT_TRUE(static_cast(fun)); - EXPECT_EQ(38, TypeParam::ToThisParam(fun)(10, 11, 12).value); -} - -TYPED_TEST_P(AnyInvTestNonRvalue, NonMoveableResultType) { -#if ABSL_INTERNAL_CPLUSPLUS_LANG < 201703L - GTEST_SKIP() << "Copy/move elision was not standard before C++17"; -#else - // Define a result type that cannot be copy- or move-constructed. - struct Result { - int x; - - explicit Result(const int x_in) : x(x_in) {} - Result(Result&&) = delete; - }; - - static_assert(!std::is_move_constructible::value, ""); - static_assert(!std::is_copy_constructible::value, ""); - - // Assumption check: it should nevertheless be possible to use functors that - // return a Result struct according to the language rules. - const auto return_17 = []() noexcept { return Result(17); }; - EXPECT_EQ(17, return_17().x); - - // Just like plain functors, it should work fine to use an AnyInvocable that - // returns the non-moveable type. - using UnqualifiedFun = - absl::conditional_t; - - using Fun = - GiveQualifiersToFun; - - AnyInvocable any_inv(return_17); - EXPECT_EQ(17, any_inv().x); -#endif -} - -TYPED_TEST_P(AnyInvTestNonRvalue, ConversionAssignReferenceWrapperEmptyLhs) { - using AnyInvType = typename TypeParam::AnyInvType; - using AddType = typename TypeParam::AddType; - - AddType add(4); - AnyInvType fun; - fun = std::ref(add); - add.state = 5; - EXPECT_TRUE( - (std::is_nothrow_assignable>::value)); - - EXPECT_TRUE(static_cast(fun)); - EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value); - - EXPECT_TRUE(static_cast(fun)); - EXPECT_EQ(38, TypeParam::ToThisParam(fun)(10, 11, 12).value); -} - -TYPED_TEST_P(AnyInvTestNonRvalue, ConversionAssignReferenceWrapperNonemptyLhs) { - using AnyInvType = typename TypeParam::AnyInvType; - using AddType = typename TypeParam::AddType; - - AddType add(4); - AnyInvType fun = &mult_function; - fun = std::ref(add); - add.state = 5; - EXPECT_TRUE( - (std::is_nothrow_assignable>::value)); - - EXPECT_TRUE(static_cast(fun)); - EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value); - - EXPECT_TRUE(static_cast(fun)); - EXPECT_EQ(38, TypeParam::ToThisParam(fun)(10, 11, 12).value); -} - -template -class AnyInvTestRvalue : public ::testing::Test {}; - -TYPED_TEST_SUITE_P(AnyInvTestRvalue); - -TYPED_TEST_P(AnyInvTestRvalue, ConversionConstructionReferenceWrapper) { - using AnyInvType = typename TypeParam::AnyInvType; - using AddType = typename TypeParam::AddType; - - EXPECT_FALSE(( - std::is_convertible, AnyInvType>::value)); -} - -TYPED_TEST_P(AnyInvTestRvalue, NonMoveableResultType) { -#if ABSL_INTERNAL_CPLUSPLUS_LANG < 201703L - GTEST_SKIP() << "Copy/move elision was not standard before C++17"; -#else - // Define a result type that cannot be copy- or move-constructed. - struct Result { - int x; - - explicit Result(const int x_in) : x(x_in) {} - Result(Result&&) = delete; - }; - - static_assert(!std::is_move_constructible::value, ""); - static_assert(!std::is_copy_constructible::value, ""); - - // Assumption check: it should nevertheless be possible to use functors that - // return a Result struct according to the language rules. - const auto return_17 = []() noexcept { return Result(17); }; - EXPECT_EQ(17, return_17().x); - - // Just like plain functors, it should work fine to use an AnyInvocable that - // returns the non-moveable type. - using UnqualifiedFun = - absl::conditional_t; - - using Fun = - GiveQualifiersToFun; - - EXPECT_EQ(17, AnyInvocable(return_17)().x); -#endif -} - -TYPED_TEST_P(AnyInvTestRvalue, ConversionAssignReferenceWrapper) { - using AnyInvType = typename TypeParam::AnyInvType; - using AddType = typename TypeParam::AddType; - - EXPECT_FALSE(( - std::is_assignable>::value)); -} - -// NOTE: This test suite originally attempted to enumerate all possible -// combinations of type properties but the build-time started getting too large. -// Instead, it is now assumed that certain parameters are orthogonal and so -// some combinations are elided. - -// A metafunction to form a TypeList of all cv and non-rvalue ref combinations, -// coupled with all of the other explicitly specified parameters. -template -using NonRvalueQualifiedTestParams = ::testing::Types< // - TestParams, // - TestParams, // - TestParams, // - TestParams>; - -// A metafunction to form a TypeList of const and non-const rvalue ref -// qualifiers, coupled with all of the other explicitly specified parameters. -template -using RvalueQualifiedTestParams = ::testing::Types< - TestParams, // - TestParams // - >; - -// All qualifier combinations and a noexcept function type -using TestParameterListNonRvalueQualifiersNothrowCall = - NonRvalueQualifiedTestParams; -using TestParameterListRvalueQualifiersNothrowCall = - RvalueQualifiedTestParams; - -// All qualifier combinations and a non-noexcept function type -using TestParameterListNonRvalueQualifiersCallMayThrow = - NonRvalueQualifiedTestParams; -using TestParameterListRvalueQualifiersCallMayThrow = - RvalueQualifiedTestParams; - -// Lists of various cases that should lead to remote storage -using TestParameterListRemoteMovable = ::testing::Types< - // "Normal" aligned types that are large and have trivial destructors - TestParams, // - TestParams, // - TestParams, // - TestParams, // - - // Same as above but with non-trivial destructors - TestParams, // - TestParams, // - TestParams, // - TestParams // - -// Dynamic memory allocation for over-aligned data was introduced in C++17. -// See https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0035r4.html -#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L - // Types that must use remote storage because of a large alignment. - , - TestParams, // - TestParams, // - TestParams, // - TestParams // -#endif - >; -using TestParameterListRemoteNonMovable = ::testing::Types< - // "Normal" aligned types that are large and have trivial destructors - TestParams, // - TestParams, // - // Same as above but with non-trivial destructors - TestParams, // - TestParams // - >; - -// Parameters that lead to local storage -using TestParameterListLocal = ::testing::Types< - // Types that meet the requirements and have trivial destructors - TestParams, // - TestParams, // - - // Same as above but with non-trivial destructors - TestParams, // - TestParams // - >; - -// All of the tests that are run for every possible combination of types. -REGISTER_TYPED_TEST_SUITE_P( - AnyInvTestBasic, DefaultConstruction, ConstructionNullptr, - ConstructionNullFunctionPtr, ConstructionNullMemberFunctionPtr, - ConstructionNullMemberObjectPtr, ConstructionMemberFunctionPtr, - ConstructionMemberObjectPtr, ConstructionFunctionReferenceDecay, - ConstructionCompatibleAnyInvocableEmpty, - ConstructionCompatibleAnyInvocableNonempty, InPlaceConstruction, - ConversionToBool, Invocation, InPlaceConstructionInitializerList, - InPlaceNullFunPtrConstruction, InPlaceNullFunPtrConstructionValueInit, - InPlaceNullMemFunPtrConstruction, InPlaceNullMemFunPtrConstructionValueInit, - InPlaceNullMemObjPtrConstruction, InPlaceNullMemObjPtrConstructionValueInit, - InPlaceVoidCovarianceConstruction, MoveConstructionFromEmpty, - MoveConstructionFromNonEmpty, ComparisonWithNullptrEmpty, - ComparisonWithNullptrNonempty, ResultType); - -INSTANTIATE_TYPED_TEST_SUITE_P( - NonRvalueCallMayThrow, AnyInvTestBasic, - TestParameterListNonRvalueQualifiersCallMayThrow); -INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallMayThrow, AnyInvTestBasic, - TestParameterListRvalueQualifiersCallMayThrow); - -INSTANTIATE_TYPED_TEST_SUITE_P(RemoteMovable, AnyInvTestBasic, - TestParameterListRemoteMovable); -INSTANTIATE_TYPED_TEST_SUITE_P(RemoteNonMovable, AnyInvTestBasic, - TestParameterListRemoteNonMovable); - -INSTANTIATE_TYPED_TEST_SUITE_P(Local, AnyInvTestBasic, TestParameterListLocal); - -INSTANTIATE_TYPED_TEST_SUITE_P(NonRvalueCallNothrow, AnyInvTestBasic, - TestParameterListNonRvalueQualifiersNothrowCall); -INSTANTIATE_TYPED_TEST_SUITE_P(CallNothrowRvalue, AnyInvTestBasic, - TestParameterListRvalueQualifiersNothrowCall); - -// Tests for functions that take two operands. -REGISTER_TYPED_TEST_SUITE_P( - AnyInvTestCombinatoric, MoveAssignEmptyEmptyLhsRhs, - MoveAssignEmptyLhsNonemptyRhs, MoveAssignNonemptyEmptyLhsRhs, - MoveAssignNonemptyLhsNonemptyRhs, SelfMoveAssignEmpty, - SelfMoveAssignNonempty, AssignNullptrEmptyLhs, - AssignNullFunctionPtrEmptyLhs, AssignNullMemberFunctionPtrEmptyLhs, - AssignNullMemberObjectPtrEmptyLhs, AssignMemberFunctionPtrEmptyLhs, - AssignMemberObjectPtrEmptyLhs, AssignFunctionReferenceDecayEmptyLhs, - AssignCompatibleAnyInvocableEmptyLhsEmptyRhs, - AssignCompatibleAnyInvocableEmptyLhsNonemptyRhs, AssignNullptrNonemptyLhs, - AssignNullFunctionPtrNonemptyLhs, AssignNullMemberFunctionPtrNonemptyLhs, - AssignNullMemberObjectPtrNonemptyLhs, AssignMemberFunctionPtrNonemptyLhs, - AssignMemberObjectPtrNonemptyLhs, AssignFunctionReferenceDecayNonemptyLhs, - AssignCompatibleAnyInvocableNonemptyLhsEmptyRhs, - AssignCompatibleAnyInvocableNonemptyLhsNonemptyRhs, SwapEmptyLhsEmptyRhs, - SwapEmptyLhsNonemptyRhs, SwapNonemptyLhsEmptyRhs, - SwapNonemptyLhsNonemptyRhs); - -INSTANTIATE_TYPED_TEST_SUITE_P( - NonRvalueCallMayThrow, AnyInvTestCombinatoric, - TestParameterListNonRvalueQualifiersCallMayThrow); -INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallMayThrow, AnyInvTestCombinatoric, - TestParameterListRvalueQualifiersCallMayThrow); - -INSTANTIATE_TYPED_TEST_SUITE_P(RemoteMovable, AnyInvTestCombinatoric, - TestParameterListRemoteMovable); -INSTANTIATE_TYPED_TEST_SUITE_P(RemoteNonMovable, AnyInvTestCombinatoric, - TestParameterListRemoteNonMovable); - -INSTANTIATE_TYPED_TEST_SUITE_P(Local, AnyInvTestCombinatoric, - TestParameterListLocal); - -INSTANTIATE_TYPED_TEST_SUITE_P(NonRvalueCallNothrow, AnyInvTestCombinatoric, - TestParameterListNonRvalueQualifiersNothrowCall); -INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallNothrow, AnyInvTestCombinatoric, - TestParameterListRvalueQualifiersNothrowCall); - -REGISTER_TYPED_TEST_SUITE_P(AnyInvTestMovable, - ConversionConstructionUserDefinedType, - ConversionConstructionVoidCovariance, - ConversionAssignUserDefinedTypeEmptyLhs, - ConversionAssignUserDefinedTypeNonemptyLhs, - ConversionAssignVoidCovariance); - -INSTANTIATE_TYPED_TEST_SUITE_P( - NonRvalueCallMayThrow, AnyInvTestMovable, - TestParameterListNonRvalueQualifiersCallMayThrow); -INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallMayThrow, AnyInvTestMovable, - TestParameterListRvalueQualifiersCallMayThrow); - -INSTANTIATE_TYPED_TEST_SUITE_P(RemoteMovable, AnyInvTestMovable, - TestParameterListRemoteMovable); - -INSTANTIATE_TYPED_TEST_SUITE_P(Local, AnyInvTestMovable, - TestParameterListLocal); - -INSTANTIATE_TYPED_TEST_SUITE_P(NonRvalueCallNothrow, AnyInvTestMovable, - TestParameterListNonRvalueQualifiersNothrowCall); -INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallNothrow, AnyInvTestMovable, - TestParameterListRvalueQualifiersNothrowCall); - -REGISTER_TYPED_TEST_SUITE_P(AnyInvTestNoexceptFalse, - ConversionConstructionConstraints, - ConversionAssignConstraints); - -INSTANTIATE_TYPED_TEST_SUITE_P( - NonRvalueCallMayThrow, AnyInvTestNoexceptFalse, - TestParameterListNonRvalueQualifiersCallMayThrow); -INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallMayThrow, AnyInvTestNoexceptFalse, - TestParameterListRvalueQualifiersCallMayThrow); - -INSTANTIATE_TYPED_TEST_SUITE_P(RemoteMovable, AnyInvTestNoexceptFalse, - TestParameterListRemoteMovable); -INSTANTIATE_TYPED_TEST_SUITE_P(RemoteNonMovable, AnyInvTestNoexceptFalse, - TestParameterListRemoteNonMovable); - -INSTANTIATE_TYPED_TEST_SUITE_P(Local, AnyInvTestNoexceptFalse, - TestParameterListLocal); - -REGISTER_TYPED_TEST_SUITE_P(AnyInvTestNoexceptTrue, - ConversionConstructionConstraints, - ConversionAssignConstraints); - -INSTANTIATE_TYPED_TEST_SUITE_P(NonRvalueCallNothrow, AnyInvTestNoexceptTrue, - TestParameterListNonRvalueQualifiersNothrowCall); -INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallNothrow, AnyInvTestNoexceptTrue, - TestParameterListRvalueQualifiersNothrowCall); - -REGISTER_TYPED_TEST_SUITE_P(AnyInvTestNonRvalue, - ConversionConstructionReferenceWrapper, - NonMoveableResultType, - ConversionAssignReferenceWrapperEmptyLhs, - ConversionAssignReferenceWrapperNonemptyLhs); - -INSTANTIATE_TYPED_TEST_SUITE_P( - NonRvalueCallMayThrow, AnyInvTestNonRvalue, - TestParameterListNonRvalueQualifiersCallMayThrow); - -INSTANTIATE_TYPED_TEST_SUITE_P(RemoteMovable, AnyInvTestNonRvalue, - TestParameterListRemoteMovable); -INSTANTIATE_TYPED_TEST_SUITE_P(RemoteNonMovable, AnyInvTestNonRvalue, - TestParameterListRemoteNonMovable); - -INSTANTIATE_TYPED_TEST_SUITE_P(Local, AnyInvTestNonRvalue, - TestParameterListLocal); - -INSTANTIATE_TYPED_TEST_SUITE_P(NonRvalueCallNothrow, AnyInvTestNonRvalue, - TestParameterListNonRvalueQualifiersNothrowCall); - -REGISTER_TYPED_TEST_SUITE_P(AnyInvTestRvalue, - ConversionConstructionReferenceWrapper, - NonMoveableResultType, - ConversionAssignReferenceWrapper); - -INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallMayThrow, AnyInvTestRvalue, - TestParameterListRvalueQualifiersCallMayThrow); - -INSTANTIATE_TYPED_TEST_SUITE_P(CallNothrowRvalue, AnyInvTestRvalue, - TestParameterListRvalueQualifiersNothrowCall); - -// Minimal SFINAE testing for platforms where we can't run the tests, but we can -// build binaries for. -static_assert( - std::is_convertible>::value, ""); -static_assert(!std::is_convertible>::value, - ""); - -#undef ABSL_INTERNAL_NOEXCEPT_SPEC - -} // namespace diff --git a/abseil-cpp/absl/functional/bind_front.h b/abseil-cpp/absl/functional/bind_front.h index f9075bd1..5b47970e 100644 --- a/abseil-cpp/absl/functional/bind_front.h +++ b/abseil-cpp/absl/functional/bind_front.h @@ -30,10 +30,6 @@ #ifndef ABSL_FUNCTIONAL_BIND_FRONT_H_ #define ABSL_FUNCTIONAL_BIND_FRONT_H_ -#if defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L -#include // For std::bind_front. -#endif // defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L - #include "absl/functional/internal/front_binder.h" #include "absl/utility/utility.h" @@ -50,8 +46,7 @@ ABSL_NAMESPACE_BEGIN // specified. More importantly, it provides more reliable correctness guarantees // than `std::bind()`; while `std::bind()` will silently ignore passing more // parameters than expected, for example, `absl::bind_front()` will report such -// mis-uses as errors. In C++20, `absl::bind_front` is replaced by -// `std::bind_front`. +// mis-uses as errors. // // absl::bind_front(a...) can be seen as storing the results of // std::make_tuple(a...). @@ -175,9 +170,6 @@ ABSL_NAMESPACE_BEGIN // // Doesn't copy "hi". // absl::bind_front(Print, absl::string_view(hi))("Chuk"); // -#if defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L -using std::bind_front; -#else // defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L template constexpr functional_internal::bind_front_t bind_front( F&& func, BoundArgs&&... args) { @@ -185,7 +177,6 @@ constexpr functional_internal::bind_front_t bind_front( absl::in_place, absl::forward(func), absl::forward(args)...); } -#endif // defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L ABSL_NAMESPACE_END } // namespace absl diff --git a/abseil-cpp/absl/functional/function_ref.h b/abseil-cpp/absl/functional/function_ref.h index f9779607..6e03ac2e 100644 --- a/abseil-cpp/absl/functional/function_ref.h +++ b/abseil-cpp/absl/functional/function_ref.h @@ -50,7 +50,6 @@ #include #include -#include "absl/base/attributes.h" #include "absl/functional/internal/function_ref.h" #include "absl/meta/type_traits.h" @@ -69,8 +68,7 @@ class FunctionRef; // An `absl::FunctionRef` is a lightweight wrapper to any invokable object with // a compatible signature. Generally, an `absl::FunctionRef` should only be used // as an argument type and should be preferred as an argument over a const -// reference to a `std::function`. `absl::FunctionRef` itself does not allocate, -// although the wrapped invokable may. +// reference to a `std::function`. // // Example: // @@ -100,8 +98,7 @@ class FunctionRef { public: // Constructs a FunctionRef from any invokable type. template > - // NOLINTNEXTLINE(runtime/explicit) - FunctionRef(const F& f ABSL_ATTRIBUTE_LIFETIME_BOUND) + FunctionRef(const F& f) // NOLINT(runtime/explicit) : invoker_(&absl::functional_internal::InvokeObject) { absl::functional_internal::AssertNonNull(f); ptr_.obj = &f; @@ -125,7 +122,6 @@ class FunctionRef { // To help prevent subtle lifetime bugs, FunctionRef is not assignable. // Typically, it should only be used as an argument type. FunctionRef& operator=(const FunctionRef& rhs) = delete; - FunctionRef(const FunctionRef& rhs) = default; // Call the underlying object. R operator()(Args... args) const { diff --git a/abseil-cpp/absl/functional/function_type_benchmark.cc b/abseil-cpp/absl/functional/function_ref_benchmark.cc similarity index 78% rename from abseil-cpp/absl/functional/function_type_benchmark.cc rename to abseil-cpp/absl/functional/function_ref_benchmark.cc index 03dc31d8..045305bf 100644 --- a/abseil-cpp/absl/functional/function_type_benchmark.cc +++ b/abseil-cpp/absl/functional/function_ref_benchmark.cc @@ -1,4 +1,4 @@ -// Copyright 2022 The Abseil Authors. +// Copyright 2019 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,14 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include +#include "absl/functional/function_ref.h" + #include -#include #include "benchmark/benchmark.h" #include "absl/base/attributes.h" -#include "absl/functional/any_invocable.h" -#include "absl/functional/function_ref.h" namespace absl { ABSL_NAMESPACE_BEGIN @@ -63,12 +61,6 @@ void BM_TrivialFunctionRef(benchmark::State& state) { } BENCHMARK(BM_TrivialFunctionRef); -void BM_TrivialAnyInvocable(benchmark::State& state) { - ConstructAndCallFunctionBenchmark>(state, - TrivialFunctor{}); -} -BENCHMARK(BM_TrivialAnyInvocable); - void BM_LargeStdFunction(benchmark::State& state) { ConstructAndCallFunctionBenchmark>(state, LargeFunctor{}); @@ -80,13 +72,6 @@ void BM_LargeFunctionRef(benchmark::State& state) { } BENCHMARK(BM_LargeFunctionRef); - -void BM_LargeAnyInvocable(benchmark::State& state) { - ConstructAndCallFunctionBenchmark>(state, - LargeFunctor{}); -} -BENCHMARK(BM_LargeAnyInvocable); - void BM_FunPtrStdFunction(benchmark::State& state) { ConstructAndCallFunctionBenchmark>(state, FreeFunction); } @@ -97,11 +82,6 @@ void BM_FunPtrFunctionRef(benchmark::State& state) { } BENCHMARK(BM_FunPtrFunctionRef); -void BM_FunPtrAnyInvocable(benchmark::State& state) { - ConstructAndCallFunctionBenchmark>(state, FreeFunction); -} -BENCHMARK(BM_FunPtrAnyInvocable); - // Doesn't include construction or copy overhead in the loop. template void CallFunctionBenchmark(benchmark::State& state, const Callable& c, @@ -133,12 +113,6 @@ void BM_TrivialArgsFunctionRef(benchmark::State& state) { } BENCHMARK(BM_TrivialArgsFunctionRef); -void BM_TrivialArgsAnyInvocable(benchmark::State& state) { - CallFunctionBenchmark>( - state, FunctorWithTrivialArgs{}, 1, 2, 3); -} -BENCHMARK(BM_TrivialArgsAnyInvocable); - struct FunctorWithNonTrivialArgs { void operator()(std::string a, std::string b, std::string c) const { benchmark::DoNotOptimize(&a); @@ -163,14 +137,6 @@ void BM_NonTrivialArgsFunctionRef(benchmark::State& state) { } BENCHMARK(BM_NonTrivialArgsFunctionRef); -void BM_NonTrivialArgsAnyInvocable(benchmark::State& state) { - std::string a, b, c; - CallFunctionBenchmark< - AnyInvocable>( - state, FunctorWithNonTrivialArgs{}, a, b, c); -} -BENCHMARK(BM_NonTrivialArgsAnyInvocable); - } // namespace ABSL_NAMESPACE_END } // namespace absl diff --git a/abseil-cpp/absl/functional/function_ref_test.cc b/abseil-cpp/absl/functional/function_ref_test.cc index 412027cd..3aa59745 100644 --- a/abseil-cpp/absl/functional/function_ref_test.cc +++ b/abseil-cpp/absl/functional/function_ref_test.cc @@ -14,7 +14,6 @@ #include "absl/functional/function_ref.h" -#include #include #include "gmock/gmock.h" diff --git a/abseil-cpp/absl/functional/internal/any_invocable.h b/abseil-cpp/absl/functional/internal/any_invocable.h deleted file mode 100644 index f353139c..00000000 --- a/abseil-cpp/absl/functional/internal/any_invocable.h +++ /dev/null @@ -1,857 +0,0 @@ -// Copyright 2022 The Abseil Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Implementation details for `absl::AnyInvocable` - -#ifndef ABSL_FUNCTIONAL_INTERNAL_ANY_INVOCABLE_H_ -#define ABSL_FUNCTIONAL_INTERNAL_ANY_INVOCABLE_H_ - -//////////////////////////////////////////////////////////////////////////////// -// // -// This implementation of the proposed `any_invocable` uses an approach that // -// chooses between local storage and remote storage for the contained target // -// object based on the target object's size, alignment requirements, and // -// whether or not it has a nothrow move constructor. Additional optimizations // -// are performed when the object is a trivially copyable type [basic.types]. // -// // -// There are three datamembers per `AnyInvocable` instance // -// // -// 1) A union containing either // -// - A pointer to the target object referred to via a void*, or // -// - the target object, emplaced into a raw char buffer // -// // -// 2) A function pointer to a "manager" function operation that takes a // -// discriminator and logically branches to either perform a move operation // -// or destroy operation based on that discriminator. // -// // -// 3) A function pointer to an "invoker" function operation that invokes the // -// target object, directly returning the result. // -// // -// When in the logically empty state, the manager function is an empty // -// function and the invoker function is one that would be undefined-behavior // -// to call. // -// // -// An additional optimization is performed when converting from one // -// AnyInvocable to another where only the noexcept specification and/or the // -// cv/ref qualifiers of the function type differ. In these cases, the // -// conversion works by "moving the guts", similar to if they were the same // -// exact type, as opposed to having to perform an additional layer of // -// wrapping through remote storage. // -// // -//////////////////////////////////////////////////////////////////////////////// - -// IWYU pragma: private, include "absl/functional/any_invocable.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "absl/base/config.h" -#include "absl/base/internal/invoke.h" -#include "absl/base/macros.h" -#include "absl/meta/type_traits.h" -#include "absl/utility/utility.h" - -namespace absl { -ABSL_NAMESPACE_BEGIN - -// Helper macro used to prevent spelling `noexcept` in language versions older -// than C++17, where it is not part of the type system, in order to avoid -// compilation failures and internal compiler errors. -#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L -#define ABSL_INTERNAL_NOEXCEPT_SPEC(noex) noexcept(noex) -#else -#define ABSL_INTERNAL_NOEXCEPT_SPEC(noex) -#endif - -// Defined in functional/any_invocable.h -template -class AnyInvocable; - -namespace internal_any_invocable { - -// Constants relating to the small-object-storage for AnyInvocable -enum StorageProperty : std::size_t { - kAlignment = alignof(std::max_align_t), // The alignment of the storage - kStorageSize = sizeof(void*) * 2 // The size of the storage -}; - -//////////////////////////////////////////////////////////////////////////////// -// -// A metafunction for checking if a type is an AnyInvocable instantiation. -// This is used during conversion operations. -template -struct IsAnyInvocable : std::false_type {}; - -template -struct IsAnyInvocable> : std::true_type {}; -// -//////////////////////////////////////////////////////////////////////////////// - -// A type trait that tells us whether or not a target function type should be -// stored locally in the small object optimization storage -template -using IsStoredLocally = std::integral_constant< - bool, sizeof(T) <= kStorageSize && alignof(T) <= kAlignment && - kAlignment % alignof(T) == 0 && - std::is_nothrow_move_constructible::value>; - -// An implementation of std::remove_cvref_t of C++20. -template -using RemoveCVRef = - typename std::remove_cv::type>::type; - -//////////////////////////////////////////////////////////////////////////////// -// -// An implementation of the C++ standard INVOKE pseudo-macro, operation is -// equivalent to std::invoke except that it forces an implicit conversion to the -// specified return type. If "R" is void, the function is executed and the -// return value is simply ignored. -template ::value>> -void InvokeR(F&& f, P&&... args) { - absl::base_internal::invoke(std::forward(f), std::forward

(args)...); -} - -template ::value, int> = 0> -ReturnType InvokeR(F&& f, P&&... args) { - return absl::base_internal::invoke(std::forward(f), - std::forward

(args)...); -} - -// -//////////////////////////////////////////////////////////////////////////////// - -//////////////////////////////////////////////////////////////////////////////// -/// -// A metafunction that takes a "T" corresponding to a parameter type of the -// user's specified function type, and yields the parameter type to use for the -// type-erased invoker. In order to prevent observable moves, this must be -// either a reference or, if the type is trivial, the original parameter type -// itself. Since the parameter type may be incomplete at the point that this -// metafunction is used, we can only do this optimization for scalar types -// rather than for any trivial type. -template -T ForwardImpl(std::true_type); - -template -T&& ForwardImpl(std::false_type); - -// NOTE: We deliberately use an intermediate struct instead of a direct alias, -// as a workaround for b/206991861 on MSVC versions < 1924. -template -struct ForwardedParameter { - using type = decltype(( - ForwardImpl)(std::integral_constant::value>())); -}; - -template -using ForwardedParameterType = typename ForwardedParameter::type; -// -//////////////////////////////////////////////////////////////////////////////// - -// A discriminator when calling the "manager" function that describes operation -// type-erased operation should be invoked. -// -// "relocate_from_to" specifies that the manager should perform a move. -// -// "dispose" specifies that the manager should perform a destroy. -enum class FunctionToCall : bool { relocate_from_to, dispose }; - -// The portion of `AnyInvocable` state that contains either a pointer to the -// target object or the object itself in local storage -union TypeErasedState { - struct { - // A pointer to the type-erased object when remotely stored - void* target; - // The size of the object for `RemoteManagerTrivial` - std::size_t size; - } remote; - - // Local-storage for the type-erased object when small and trivial enough - alignas(kAlignment) char storage[kStorageSize]; -}; - -// A typed accessor for the object in `TypeErasedState` storage -template -T& ObjectInLocalStorage(TypeErasedState* const state) { - // We launder here because the storage may be reused with the same type. -#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L - return *std::launder(reinterpret_cast(&state->storage)); -#elif ABSL_HAVE_BUILTIN(__builtin_launder) - return *__builtin_launder(reinterpret_cast(&state->storage)); -#else - - // When `std::launder` or equivalent are not available, we rely on undefined - // behavior, which works as intended on Abseil's officially supported - // platforms as of Q2 2022. -#if !defined(__clang__) && defined(__GNUC__) -#pragma GCC diagnostic ignored "-Wstrict-aliasing" -#pragma GCC diagnostic push -#endif - return *reinterpret_cast(&state->storage); -#if !defined(__clang__) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif - -#endif -} - -// The type for functions issuing lifetime-related operations: move and dispose -// A pointer to such a function is contained in each `AnyInvocable` instance. -// NOTE: When specifying `FunctionToCall::`dispose, the same state must be -// passed as both "from" and "to". -using ManagerType = void(FunctionToCall /*operation*/, - TypeErasedState* /*from*/, TypeErasedState* /*to*/) - ABSL_INTERNAL_NOEXCEPT_SPEC(true); - -// The type for functions issuing the actual invocation of the object -// A pointer to such a function is contained in each AnyInvocable instance. -template -using InvokerType = ReturnType(TypeErasedState*, ForwardedParameterType

...) - ABSL_INTERNAL_NOEXCEPT_SPEC(SigIsNoexcept); - -// The manager that is used when AnyInvocable is empty -inline void EmptyManager(FunctionToCall /*operation*/, - TypeErasedState* /*from*/, - TypeErasedState* /*to*/) noexcept {} - -// The manager that is used when a target function is in local storage and is -// a trivially copyable type. -inline void LocalManagerTrivial(FunctionToCall /*operation*/, - TypeErasedState* const from, - TypeErasedState* const to) noexcept { - // This single statement without branching handles both possible operations. - // - // For FunctionToCall::dispose, "from" and "to" point to the same state, and - // so this assignment logically would do nothing. - // - // Note: Correctness here relies on http://wg21.link/p0593, which has only - // become standard in C++20, though implementations do not break it in - // practice for earlier versions of C++. - // - // The correct way to do this without that paper is to first placement-new a - // default-constructed T in "to->storage" prior to the memmove, but doing so - // requires a different function to be created for each T that is stored - // locally, which can cause unnecessary bloat and be less cache friendly. - *to = *from; - - // Note: Because the type is trivially copyable, the destructor does not need - // to be called ("trivially copyable" requires a trivial destructor). -} - -// The manager that is used when a target function is in local storage and is -// not a trivially copyable type. -template -void LocalManagerNontrivial(FunctionToCall operation, - TypeErasedState* const from, - TypeErasedState* const to) noexcept { - static_assert(IsStoredLocally::value, - "Local storage must only be used for supported types."); - static_assert(!std::is_trivially_copyable::value, - "Locally stored types must be trivially copyable."); - - T& from_object = (ObjectInLocalStorage)(from); - - switch (operation) { - case FunctionToCall::relocate_from_to: - // NOTE: Requires that the left-hand operand is already empty. - ::new (static_cast(&to->storage)) T(std::move(from_object)); - ABSL_FALLTHROUGH_INTENDED; - case FunctionToCall::dispose: - from_object.~T(); // Must not throw. // NOLINT - return; - } - ABSL_INTERNAL_UNREACHABLE; -} - -// The invoker that is used when a target function is in local storage -// Note: QualTRef here is the target function type along with cv and reference -// qualifiers that must be used when calling the function. -template -ReturnType LocalInvoker( - TypeErasedState* const state, - ForwardedParameterType

... args) noexcept(SigIsNoexcept) { - using RawT = RemoveCVRef; - static_assert( - IsStoredLocally::value, - "Target object must be in local storage in order to be invoked from it."); - - auto& f = (ObjectInLocalStorage)(state); - return (InvokeR)(static_cast(f), - static_cast>(args)...); -} - -// The manager that is used when a target function is in remote storage and it -// has a trivial destructor -inline void RemoteManagerTrivial(FunctionToCall operation, - TypeErasedState* const from, - TypeErasedState* const to) noexcept { - switch (operation) { - case FunctionToCall::relocate_from_to: - // NOTE: Requires that the left-hand operand is already empty. - to->remote = from->remote; - return; - case FunctionToCall::dispose: -#if defined(__cpp_sized_deallocation) - ::operator delete(from->remote.target, from->remote.size); -#else // __cpp_sized_deallocation - ::operator delete(from->remote.target); -#endif // __cpp_sized_deallocation - return; - } - ABSL_INTERNAL_UNREACHABLE; -} - -// The manager that is used when a target function is in remote storage and the -// destructor of the type is not trivial -template -void RemoteManagerNontrivial(FunctionToCall operation, - TypeErasedState* const from, - TypeErasedState* const to) noexcept { - static_assert(!IsStoredLocally::value, - "Remote storage must only be used for types that do not " - "qualify for local storage."); - - switch (operation) { - case FunctionToCall::relocate_from_to: - // NOTE: Requires that the left-hand operand is already empty. - to->remote.target = from->remote.target; - return; - case FunctionToCall::dispose: - ::delete static_cast(from->remote.target); // Must not throw. - return; - } - ABSL_INTERNAL_UNREACHABLE; -} - -// The invoker that is used when a target function is in remote storage -template -ReturnType RemoteInvoker( - TypeErasedState* const state, - ForwardedParameterType

... args) noexcept(SigIsNoexcept) { - using RawT = RemoveCVRef; - static_assert(!IsStoredLocally::value, - "Target object must be in remote storage in order to be " - "invoked from it."); - - auto& f = *static_cast(state->remote.target); - return (InvokeR)(static_cast(f), - static_cast>(args)...); -} - -//////////////////////////////////////////////////////////////////////////////// -// -// A metafunction that checks if a type T is an instantiation of -// absl::in_place_type_t (needed for constructor constraints of AnyInvocable). -template -struct IsInPlaceType : std::false_type {}; - -template -struct IsInPlaceType> : std::true_type {}; -// -//////////////////////////////////////////////////////////////////////////////// - -// A constructor name-tag used with CoreImpl (below) to request the -// conversion-constructor. QualDecayedTRef is the decayed-type of the object to -// wrap, along with the cv and reference qualifiers that must be applied when -// performing an invocation of the wrapped object. -template -struct TypedConversionConstruct {}; - -// A helper base class for all core operations of AnyInvocable. Most notably, -// this class creates the function call operator and constraint-checkers so that -// the top-level class does not have to be a series of partial specializations. -// -// Note: This definition exists (as opposed to being a declaration) so that if -// the user of the top-level template accidentally passes a template argument -// that is not a function type, they will get a static_assert in AnyInvocable's -// class body rather than an error stating that Impl is not defined. -template -class Impl {}; // Note: This is partially-specialized later. - -// A std::unique_ptr deleter that deletes memory allocated via ::operator new. -#if defined(__cpp_sized_deallocation) -class TrivialDeleter { - public: - explicit TrivialDeleter(std::size_t size) : size_(size) {} - - void operator()(void* target) const { - ::operator delete(target, size_); - } - - private: - std::size_t size_; -}; -#else // __cpp_sized_deallocation -class TrivialDeleter { - public: - explicit TrivialDeleter(std::size_t) {} - - void operator()(void* target) const { ::operator delete(target); } -}; -#endif // __cpp_sized_deallocation - -template -class CoreImpl; - -constexpr bool IsCompatibleConversion(void*, void*) { return false; } -template -constexpr bool IsCompatibleConversion(CoreImpl*, - CoreImpl*) { - return !NoExceptDest || NoExceptSrc; -} - -// A helper base class for all core operations of AnyInvocable that do not -// depend on the cv/ref qualifiers of the function type. -template -class CoreImpl { - public: - using result_type = ReturnType; - - CoreImpl() noexcept : manager_(EmptyManager), invoker_(nullptr) {} - - enum class TargetType : int { - kPointer = 0, - kCompatibleAnyInvocable = 1, - kIncompatibleAnyInvocable = 2, - kOther = 3, - }; - - // Note: QualDecayedTRef here includes the cv-ref qualifiers associated with - // the invocation of the Invocable. The unqualified type is the target object - // type to be stored. - template - explicit CoreImpl(TypedConversionConstruct, F&& f) { - using DecayedT = RemoveCVRef; - - constexpr TargetType kTargetType = - (std::is_pointer::value || - std::is_member_pointer::value) - ? TargetType::kPointer - : IsCompatibleAnyInvocable::value - ? TargetType::kCompatibleAnyInvocable - : IsAnyInvocable::value - ? TargetType::kIncompatibleAnyInvocable - : TargetType::kOther; - // NOTE: We only use integers instead of enums as template parameters in - // order to work around a bug on C++14 under MSVC 2017. - // See b/236131881. - Initialize(kTargetType), QualDecayedTRef>( - std::forward(f)); - } - - // Note: QualTRef here includes the cv-ref qualifiers associated with the - // invocation of the Invocable. The unqualified type is the target object - // type to be stored. - template - explicit CoreImpl(absl::in_place_type_t, Args&&... args) { - InitializeStorage(std::forward(args)...); - } - - CoreImpl(CoreImpl&& other) noexcept { - other.manager_(FunctionToCall::relocate_from_to, &other.state_, &state_); - manager_ = other.manager_; - invoker_ = other.invoker_; - other.manager_ = EmptyManager; - other.invoker_ = nullptr; - } - - CoreImpl& operator=(CoreImpl&& other) noexcept { - // Put the left-hand operand in an empty state. - // - // Note: A full reset that leaves us with an object that has its invariants - // intact is necessary in order to handle self-move. This is required by - // types that are used with certain operations of the standard library, such - // as the default definition of std::swap when both operands target the same - // object. - Clear(); - - // Perform the actual move/destory operation on the target function. - other.manager_(FunctionToCall::relocate_from_to, &other.state_, &state_); - manager_ = other.manager_; - invoker_ = other.invoker_; - other.manager_ = EmptyManager; - other.invoker_ = nullptr; - - return *this; - } - - ~CoreImpl() { manager_(FunctionToCall::dispose, &state_, &state_); } - - // Check whether or not the AnyInvocable is in the empty state. - bool HasValue() const { return invoker_ != nullptr; } - - // Effects: Puts the object into its empty state. - void Clear() { - manager_(FunctionToCall::dispose, &state_, &state_); - manager_ = EmptyManager; - invoker_ = nullptr; - } - - template = 0> - void Initialize(F&& f) { -// This condition handles types that decay into pointers, which includes -// function references. Since function references cannot be null, GCC warns -// against comparing their decayed form with nullptr. -// Since this is template-heavy code, we prefer to disable these warnings -// locally instead of adding yet another overload of this function. -#if !defined(__clang__) && defined(__GNUC__) -#pragma GCC diagnostic ignored "-Wpragmas" -#pragma GCC diagnostic ignored "-Waddress" -#pragma GCC diagnostic ignored "-Wnonnull-compare" -#pragma GCC diagnostic push -#endif - if (static_cast>(f) == nullptr) { -#if !defined(__clang__) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif - manager_ = EmptyManager; - invoker_ = nullptr; - return; - } - InitializeStorage(std::forward(f)); - } - - template = 0> - void Initialize(F&& f) { - // In this case we can "steal the guts" of the other AnyInvocable. - f.manager_(FunctionToCall::relocate_from_to, &f.state_, &state_); - manager_ = f.manager_; - invoker_ = f.invoker_; - - f.manager_ = EmptyManager; - f.invoker_ = nullptr; - } - - template = 0> - void Initialize(F&& f) { - if (f.HasValue()) { - InitializeStorage(std::forward(f)); - } else { - manager_ = EmptyManager; - invoker_ = nullptr; - } - } - - template > - void Initialize(F&& f) { - InitializeStorage(std::forward(f)); - } - - // Use local (inline) storage for applicable target object types. - template >::value>> - void InitializeStorage(Args&&... args) { - using RawT = RemoveCVRef; - ::new (static_cast(&state_.storage)) - RawT(std::forward(args)...); - - invoker_ = LocalInvoker; - // We can simplify our manager if we know the type is trivially copyable. - InitializeLocalManager(); - } - - // Use remote storage for target objects that cannot be stored locally. - template >::value, - int> = 0> - void InitializeStorage(Args&&... args) { - InitializeRemoteManager>(std::forward(args)...); - // This is set after everything else in case an exception is thrown in an - // earlier step of the initialization. - invoker_ = RemoteInvoker; - } - - template ::value>> - void InitializeLocalManager() { - manager_ = LocalManagerTrivial; - } - - template ::value, int> = 0> - void InitializeLocalManager() { - manager_ = LocalManagerNontrivial; - } - - template - using HasTrivialRemoteStorage = - std::integral_constant::value && - alignof(T) <= - ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT>; - - template ::value>> - void InitializeRemoteManager(Args&&... args) { - // unique_ptr is used for exception-safety in case construction throws. - std::unique_ptr uninitialized_target( - ::operator new(sizeof(T)), TrivialDeleter(sizeof(T))); - ::new (uninitialized_target.get()) T(std::forward(args)...); - state_.remote.target = uninitialized_target.release(); - state_.remote.size = sizeof(T); - manager_ = RemoteManagerTrivial; - } - - template ::value, int> = 0> - void InitializeRemoteManager(Args&&... args) { - state_.remote.target = ::new T(std::forward(args)...); - manager_ = RemoteManagerNontrivial; - } - - ////////////////////////////////////////////////////////////////////////////// - // - // Type trait to determine if the template argument is an AnyInvocable whose - // function type is compatible enough with ours such that we can - // "move the guts" out of it when moving, rather than having to place a new - // object into remote storage. - - template - struct IsCompatibleAnyInvocable { - static constexpr bool value = false; - }; - - template - struct IsCompatibleAnyInvocable> { - static constexpr bool value = - (IsCompatibleConversion)(static_cast< - typename AnyInvocable::CoreImpl*>( - nullptr), - static_cast(nullptr)); - }; - - // - ////////////////////////////////////////////////////////////////////////////// - - TypeErasedState state_; - ManagerType* manager_; - InvokerType* invoker_; -}; - -// A constructor name-tag used with Impl to request the -// conversion-constructor -struct ConversionConstruct {}; - -//////////////////////////////////////////////////////////////////////////////// -// -// A metafunction that is normally an identity metafunction except that when -// given a std::reference_wrapper, it yields T&. This is necessary because -// currently std::reference_wrapper's operator() is not conditionally noexcept, -// so when checking if such an Invocable is nothrow-invocable, we must pull out -// the underlying type. -template -struct UnwrapStdReferenceWrapperImpl { - using type = T; -}; - -template -struct UnwrapStdReferenceWrapperImpl> { - using type = T&; -}; - -template -using UnwrapStdReferenceWrapper = - typename UnwrapStdReferenceWrapperImpl::type; -// -//////////////////////////////////////////////////////////////////////////////// - -// An alias that always yields std::true_type (used with constraints) where -// substitution failures happen when forming the template arguments. -template -using True = - std::integral_constant*) != 0>; - -/*SFINAE constraints for the conversion-constructor.*/ -template , AnyInvocable>::value>> -using CanConvert = - True>::value>, - absl::enable_if_t::template CallIsValid::value>, - absl::enable_if_t< - Impl::template CallIsNoexceptIfSigIsNoexcept::value>, - absl::enable_if_t, F>::value>>; - -/*SFINAE constraints for the std::in_place constructors.*/ -template -using CanEmplace = True< - absl::enable_if_t::template CallIsValid::value>, - absl::enable_if_t< - Impl::template CallIsNoexceptIfSigIsNoexcept::value>, - absl::enable_if_t, Args...>::value>>; - -/*SFINAE constraints for the conversion-assign operator.*/ -template , AnyInvocable>::value>> -using CanAssign = - True::template CallIsValid::value>, - absl::enable_if_t< - Impl::template CallIsNoexceptIfSigIsNoexcept::value>, - absl::enable_if_t, F>::value>>; - -/*SFINAE constraints for the reference-wrapper conversion-assign operator.*/ -template -using CanAssignReferenceWrapper = - True::template CallIsValid>::value>, - absl::enable_if_t::template CallIsNoexceptIfSigIsNoexcept< - std::reference_wrapper>::value>>; - -//////////////////////////////////////////////////////////////////////////////// -// -// The constraint for checking whether or not a call meets the noexcept -// callability requirements. This is a preprocessor macro because specifying it -// this way as opposed to a disjunction/branch can improve the user-side error -// messages and avoids an instantiation of std::is_nothrow_invocable_r in the -// cases where the user did not specify a noexcept function type. -// -#define ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT(inv_quals, noex) \ - ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_##noex(inv_quals) - -// The disjunction below is because we can't rely on std::is_nothrow_invocable_r -// to give the right result when ReturnType is non-moveable in toolchains that -// don't treat non-moveable result types correctly. For example this was the -// case in libc++ before commit c3a24882 (2022-05). -#define ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_true(inv_quals) \ - absl::enable_if_t> inv_quals, \ - P...>, \ - std::conjunction< \ - std::is_nothrow_invocable< \ - UnwrapStdReferenceWrapper> inv_quals, P...>, \ - std::is_same< \ - ReturnType, \ - absl::base_internal::invoke_result_t< \ - UnwrapStdReferenceWrapper> inv_quals, \ - P...>>>>::value> - -#define ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_false(inv_quals) -// -//////////////////////////////////////////////////////////////////////////////// - -// A macro to generate partial specializations of Impl with the different -// combinations of supported cv/reference qualifiers and noexcept specifier. -// -// Here, `cv` are the cv-qualifiers if any, `ref` is the ref-qualifier if any, -// inv_quals is the reference type to be used when invoking the target, and -// noex is "true" if the function type is noexcept, or false if it is not. -// -// The CallIsValid condition is more complicated than simply using -// absl::base_internal::is_invocable_r because we can't rely on it to give the -// right result when ReturnType is non-moveable in toolchains that don't treat -// non-moveable result types correctly. For example this was the case in libc++ -// before commit c3a24882 (2022-05). -#define ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, noex) \ - template \ - class Impl \ - : public CoreImpl { \ - public: \ - /*The base class, which contains the datamembers and core operations*/ \ - using Core = CoreImpl; \ - \ - /*SFINAE constraint to check if F is invocable with the proper signature*/ \ - template \ - using CallIsValid = True inv_quals, P...>, \ - std::is_same inv_quals, P...>>>::value>>; \ - \ - /*SFINAE constraint to check if F is nothrow-invocable when necessary*/ \ - template \ - using CallIsNoexceptIfSigIsNoexcept = \ - True; \ - \ - /*Put the AnyInvocable into an empty state.*/ \ - Impl() = default; \ - \ - /*The implementation of a conversion-constructor from "f*/ \ - /*This forwards to Core, attaching inv_quals so that the base class*/ \ - /*knows how to properly type-erase the invocation.*/ \ - template \ - explicit Impl(ConversionConstruct, F&& f) \ - : Core(TypedConversionConstruct< \ - typename std::decay::type inv_quals>(), \ - std::forward(f)) {} \ - \ - /*Forward along the in-place construction parameters.*/ \ - template \ - explicit Impl(absl::in_place_type_t, Args&&... args) \ - : Core(absl::in_place_type inv_quals>, \ - std::forward(args)...) {} \ - \ - /*The actual invocation operation with the proper signature*/ \ - ReturnType operator()(P... args) cv ref noexcept(noex) { \ - assert(this->invoker_ != nullptr); \ - return this->invoker_(const_cast(&this->state_), \ - static_cast>(args)...); \ - } \ - } - -// Define the `noexcept(true)` specialization only for C++17 and beyond, when -// `noexcept` is part of the type system. -#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L -// A convenience macro that defines specializations for the noexcept(true) and -// noexcept(false) forms, given the other properties. -#define ABSL_INTERNAL_ANY_INVOCABLE_IMPL(cv, ref, inv_quals) \ - ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, false); \ - ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, true) -#else -#define ABSL_INTERNAL_ANY_INVOCABLE_IMPL(cv, ref, inv_quals) \ - ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, false) -#endif - -// Non-ref-qualified partial specializations -ABSL_INTERNAL_ANY_INVOCABLE_IMPL(, , &); -ABSL_INTERNAL_ANY_INVOCABLE_IMPL(const, , const&); - -// Lvalue-ref-qualified partial specializations -ABSL_INTERNAL_ANY_INVOCABLE_IMPL(, &, &); -ABSL_INTERNAL_ANY_INVOCABLE_IMPL(const, &, const&); - -// Rvalue-ref-qualified partial specializations -ABSL_INTERNAL_ANY_INVOCABLE_IMPL(, &&, &&); -ABSL_INTERNAL_ANY_INVOCABLE_IMPL(const, &&, const&&); - -// Undef the detail-only macros. -#undef ABSL_INTERNAL_ANY_INVOCABLE_IMPL -#undef ABSL_INTERNAL_ANY_INVOCABLE_IMPL_ -#undef ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_false -#undef ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_true -#undef ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT -#undef ABSL_INTERNAL_NOEXCEPT_SPEC - -} // namespace internal_any_invocable -ABSL_NAMESPACE_END -} // namespace absl - -#endif // ABSL_FUNCTIONAL_INTERNAL_ANY_INVOCABLE_H_ diff --git a/abseil-cpp/absl/hash/BUILD.bazel b/abseil-cpp/absl/hash/BUILD.bazel index bcc316f9..5b1e2d01 100644 --- a/abseil-cpp/absl/hash/BUILD.bazel +++ b/abseil-cpp/absl/hash/BUILD.bazel @@ -14,6 +14,7 @@ # limitations under the License. # +load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test") load( "//absl:copts/configure_copts.bzl", "ABSL_DEFAULT_COPTS", @@ -36,12 +37,9 @@ cc_library( linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":city", - ":low_level_hash", - "//absl/base:config", "//absl/base:core_headers", "//absl/base:endian", "//absl/container:fixed_array", - "//absl/functional:function_ref", "//absl/meta:type_traits", "//absl/numeric:int128", "//absl/strings", @@ -75,11 +73,7 @@ cc_test( ":hash_testing", ":spy_hash_state", "//absl/base:core_headers", - "//absl/container:btree", - "//absl/container:flat_hash_map", "//absl/container:flat_hash_set", - "//absl/container:node_hash_map", - "//absl/container:node_hash_set", "//absl/meta:type_traits", "//absl/numeric:int128", "//absl/strings:cord_test_helpers", @@ -87,26 +81,6 @@ cc_test( ], ) -cc_binary( - name = "hash_benchmark", - testonly = 1, - srcs = ["hash_benchmark.cc"], - copts = ABSL_TEST_COPTS, - linkopts = ABSL_DEFAULT_LINKOPTS, - tags = ["benchmark"], - visibility = ["//visibility:private"], - deps = [ - ":hash", - "//absl/base:core_headers", - "//absl/container:flat_hash_set", - "//absl/random", - "//absl/strings", - "//absl/strings:cord", - "//absl/strings:cord_test_helpers", - "@com_github_google_benchmark//:benchmark_main", - ], -) - cc_library( name = "spy_hash_state", testonly = 1, @@ -146,31 +120,3 @@ cc_test( "@com_google_googletest//:gtest_main", ], ) - -cc_library( - name = "low_level_hash", - srcs = ["internal/low_level_hash.cc"], - hdrs = ["internal/low_level_hash.h"], - copts = ABSL_DEFAULT_COPTS, - linkopts = ABSL_DEFAULT_LINKOPTS, - visibility = ["//visibility:private"], - deps = [ - "//absl/base:config", - "//absl/base:endian", - "//absl/numeric:bits", - "//absl/numeric:int128", - ], -) - -cc_test( - name = "low_level_hash_test", - srcs = ["internal/low_level_hash_test.cc"], - copts = ABSL_TEST_COPTS, - linkopts = ABSL_DEFAULT_LINKOPTS, - visibility = ["//visibility:private"], - deps = [ - ":low_level_hash", - "//absl/strings", - "@com_google_googletest//:gtest_main", - ], -) diff --git a/abseil-cpp/absl/hash/CMakeLists.txt b/abseil-cpp/absl/hash/CMakeLists.txt index 423b74b5..61365e9b 100644 --- a/abseil-cpp/absl/hash/CMakeLists.txt +++ b/abseil-cpp/absl/hash/CMakeLists.txt @@ -24,20 +24,17 @@ absl_cc_library( "internal/hash.h" COPTS ${ABSL_DEFAULT_COPTS} - DEPS - absl::city - absl::config + DEPS absl::core_headers absl::endian absl::fixed_array - absl::function_ref absl::meta absl::int128 absl::strings absl::optional absl::variant absl::utility - absl::low_level_hash + absl::city PUBLIC ) @@ -53,7 +50,7 @@ absl_cc_library( absl::meta absl::strings absl::variant - GTest::gmock + gmock TESTONLY ) @@ -69,18 +66,13 @@ absl_cc_test( absl::hash absl::hash_testing absl::core_headers - absl::btree - absl::flat_hash_map absl::flat_hash_set - absl::node_hash_map - absl::node_hash_set absl::spy_hash_state absl::meta absl::int128 - GTest::gmock_main + gmock_main ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME spy_hash_state @@ -95,7 +87,6 @@ absl_cc_library( TESTONLY ) -# Internal-only target, do not depend on directly. absl_cc_library( NAME city @@ -120,35 +111,6 @@ absl_cc_test( ${ABSL_TEST_COPTS} DEPS absl::city - GTest::gmock_main -) - -# Internal-only target, do not depend on directly. -absl_cc_library( - NAME - low_level_hash - HDRS - "internal/low_level_hash.h" - SRCS - "internal/low_level_hash.cc" - COPTS - ${ABSL_DEFAULT_COPTS} - DEPS - absl::bits - absl::config - absl::endian - absl::int128 + gmock_main ) -absl_cc_test( - NAME - low_level_hash_test - SRCS - "internal/low_level_hash_test.cc" - COPTS - ${ABSL_TEST_COPTS} - DEPS - absl::low_level_hash - absl::strings - GTest::gmock_main -) diff --git a/abseil-cpp/absl/hash/hash.h b/abseil-cpp/absl/hash/hash.h index 74e2d7c0..5de132ca 100644 --- a/abseil-cpp/absl/hash/hash.h +++ b/abseil-cpp/absl/hash/hash.h @@ -26,9 +26,9 @@ // support Abseil hashing without requiring you to define a hashing // algorithm. // * `HashState`, a type-erased class which implements the manipulation of the -// hash state (H) itself; contains member functions `combine()`, -// `combine_contiguous()`, and `combine_unordered()`; and which you can use -// to contribute to an existing hash state when hashing your types. +// hash state (H) itself, contains member functions `combine()` and +// `combine_contiguous()`, which you can use to contribute to an existing +// hash state when hashing your types. // // Unlike `std::hash` or other hashing frameworks, the Abseil hashing framework // provides most of its utility by abstracting away the hash algorithm (and its @@ -40,11 +40,6 @@ // each process. E.g., `absl::Hash{}(9)` in one process and // `absl::Hash{}(9)` in another process are likely to differ. // -// `absl::Hash` may also produce different values from different dynamically -// loaded libraries. For this reason, `absl::Hash` values must never cross -// boundries in dynamically loaded libraries (including when used in types like -// hash containers.) -// // `absl::Hash` is intended to strongly mix input bits with a target of passing // an [Avalanche Test](https://en.wikipedia.org/wiki/Avalanche_effect). // @@ -78,10 +73,6 @@ #ifndef ABSL_HASH_HASH_H_ #define ABSL_HASH_HASH_H_ -#include -#include - -#include "absl/functional/function_ref.h" #include "absl/hash/internal/hash.h" namespace absl { @@ -114,27 +105,14 @@ ABSL_NAMESPACE_BEGIN // * std::string_view (as well as any instance of std::basic_string that // uses char and std::char_traits) // * All the standard sequence containers (provided the elements are hashable) -// * All the standard associative containers (provided the elements are +// * All the standard ordered associative containers (provided the elements are // hashable) // * absl types such as the following: // * absl::string_view -// * absl::uint128 -// * absl::Time, absl::Duration, and absl::TimeZone -// * absl containers (provided the elements are hashable) such as the -// following: -// * absl::flat_hash_set, absl::node_hash_set, absl::btree_set -// * absl::flat_hash_map, absl::node_hash_map, absl::btree_map -// * absl::btree_multiset, absl::btree_multimap // * absl::InlinedVector // * absl::FixedArray -// -// When absl::Hash is used to hash an unordered container with a custom hash -// functor, the elements are hashed using default absl::Hash semantics, not -// the custom hash functor. This is consistent with the behavior of -// operator==() on unordered containers, which compares elements pairwise with -// operator==() rather than the custom equality functor. It is usually a -// mistake to use either operator==() or absl::Hash on unordered collections -// that use functors incompatible with operator==() equality. +// * absl::uint128 +// * absl::Time, absl::Duration, and absl::TimeZone // // Note: the list above is not meant to be exhaustive. Additional type support // may be added, in which case the above list will be updated. @@ -173,8 +151,7 @@ ABSL_NAMESPACE_BEGIN // that are otherwise difficult to extend using `AbslHashValue()`. (See the // `HashState` class below.) // -// The "hash state" concept contains three member functions for mixing hash -// state: +// The "hash state" concept contains two member functions for mixing hash state: // // * `H::combine(state, values...)` // @@ -208,15 +185,6 @@ ABSL_NAMESPACE_BEGIN // (it may perform internal optimizations). If you need this guarantee, use a // loop instead. // -// * `H::combine_unordered(state, begin, end)` -// -// Combines a set of elements denoted by an iterator pair into a hash -// state, returning the updated state. Note that the existing hash -// state is move-only and must be passed by value. -// -// Unlike the other two methods, the hashing is order-independent. -// This can be used to hash unordered collections. -// // ----------------------------------------------------------------------------- // Adding Type Support to `absl::Hash` // ----------------------------------------------------------------------------- @@ -246,26 +214,6 @@ ABSL_NAMESPACE_BEGIN template using Hash = absl::hash_internal::Hash; -// HashOf -// -// absl::HashOf() is a helper that generates a hash from the values of its -// arguments. It dispatches to absl::Hash directly, as follows: -// * HashOf(t) == absl::Hash{}(t) -// * HashOf(a, b, c) == HashOf(std::make_tuple(a, b, c)) -// -// HashOf(a1, a2, ...) == HashOf(b1, b2, ...) is guaranteed when -// * The argument lists have pairwise identical C++ types -// * a1 == b1 && a2 == b2 && ... -// -// The requirement that the arguments match in both type and value is critical. -// It means that `a == b` does not necessarily imply `HashOf(a) == HashOf(b)` if -// `a` and `b` have different types. For example, `HashOf(2) != HashOf(2.0)`. -template -size_t HashOf(const Types&... values) { - auto tuple = std::tie(values...); - return absl::Hash{}(tuple); -} - // HashState // // A type erased version of the hash state concept, for use in user-defined @@ -273,9 +221,8 @@ size_t HashOf(const Types&... values) { // classes, virtual functions, etc.). The type erasure adds overhead so it // should be avoided unless necessary. // -// Note: This wrapper will only erase calls to +// Note: This wrapper will only erase calls to: // combine_contiguous(H, const unsigned char*, size_t) -// RunCombineUnordered(H, CombinerF) // // All other calls will be handled internally and will not invoke overloads // provided by the wrapped class. @@ -349,8 +296,6 @@ class HashState : public hash_internal::HashStateBase { private: HashState() = default; - friend class HashState::HashStateBase; - template static void CombineContiguousImpl(void* p, const unsigned char* first, size_t size) { @@ -362,57 +307,16 @@ class HashState : public hash_internal::HashStateBase { void Init(T* state) { state_ = state; combine_contiguous_ = &CombineContiguousImpl; - run_combine_unordered_ = &RunCombineUnorderedImpl; - } - - template - struct CombineUnorderedInvoker { - template - void operator()(T inner_state, ConsumerT inner_cb) { - f(HashState::Create(&inner_state), - [&](HashState& inner_erased) { inner_cb(inner_erased.Real()); }); - } - - absl::FunctionRef)> f; - }; - - template - static HashState RunCombineUnorderedImpl( - HashState state, - absl::FunctionRef)> - f) { - // Note that this implementation assumes that inner_state and outer_state - // are the same type. This isn't true in the SpyHash case, but SpyHash - // types are move-convertible to each other, so this still works. - T& real_state = state.Real(); - real_state = T::RunCombineUnordered( - std::move(real_state), CombineUnorderedInvoker{f}); - return state; - } - - template - static HashState RunCombineUnordered(HashState state, CombinerT combiner) { - auto* run = state.run_combine_unordered_; - return run(std::move(state), std::ref(combiner)); } // Do not erase an already erased state. void Init(HashState* state) { state_ = state->state_; combine_contiguous_ = state->combine_contiguous_; - run_combine_unordered_ = state->run_combine_unordered_; - } - - template - T& Real() { - return *static_cast(state_); } void* state_; void (*combine_contiguous_)(void*, const unsigned char*, size_t); - HashState (*run_combine_unordered_)( - HashState state, - absl::FunctionRef)>); }; ABSL_NAMESPACE_END diff --git a/abseil-cpp/absl/hash/hash_benchmark.cc b/abseil-cpp/absl/hash/hash_benchmark.cc deleted file mode 100644 index 8712a01c..00000000 --- a/abseil-cpp/absl/hash/hash_benchmark.cc +++ /dev/null @@ -1,323 +0,0 @@ -// Copyright 2018 The Abseil Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include -#include -#include -#include -#include - -#include "absl/base/attributes.h" -#include "absl/container/flat_hash_set.h" -#include "absl/hash/hash.h" -#include "absl/random/random.h" -#include "absl/strings/cord.h" -#include "absl/strings/cord_test_helpers.h" -#include "absl/strings/string_view.h" -#include "benchmark/benchmark.h" - -namespace { - -using absl::Hash; - -template